]>
Commit | Line | Data |
---|---|---|
b2441318 | 1 | /* SPDX-License-Identifier: GPL-2.0 */ |
1da177e4 | 2 | /* |
a49976d1 | 3 | * Copyright (C) 1991,1992 Linus Torvalds |
1da177e4 | 4 | * |
a49976d1 | 5 | * entry_32.S contains the system-call and low-level fault and trap handling routines. |
1da177e4 | 6 | * |
39e8701f | 7 | * Stack layout while running C code: |
a49976d1 IM |
8 | * ptrace needs to have all registers on the stack. |
9 | * If the order here is changed, it needs to be | |
10 | * updated in fork.c:copy_process(), signal.c:do_signal(), | |
1da177e4 LT |
11 | * ptrace.c and ptrace.h |
12 | * | |
13 | * 0(%esp) - %ebx | |
14 | * 4(%esp) - %ecx | |
15 | * 8(%esp) - %edx | |
9b47feb7 | 16 | * C(%esp) - %esi |
1da177e4 LT |
17 | * 10(%esp) - %edi |
18 | * 14(%esp) - %ebp | |
19 | * 18(%esp) - %eax | |
20 | * 1C(%esp) - %ds | |
21 | * 20(%esp) - %es | |
464d1a78 | 22 | * 24(%esp) - %fs |
ccbeed3a TH |
23 | * 28(%esp) - %gs saved iff !CONFIG_X86_32_LAZY_GS |
24 | * 2C(%esp) - orig_eax | |
25 | * 30(%esp) - %eip | |
26 | * 34(%esp) - %cs | |
27 | * 38(%esp) - %eflags | |
28 | * 3C(%esp) - %oldesp | |
29 | * 40(%esp) - %oldss | |
1da177e4 LT |
30 | */ |
31 | ||
1da177e4 | 32 | #include <linux/linkage.h> |
d7e7528b | 33 | #include <linux/err.h> |
1da177e4 | 34 | #include <asm/thread_info.h> |
55f327fa | 35 | #include <asm/irqflags.h> |
1da177e4 LT |
36 | #include <asm/errno.h> |
37 | #include <asm/segment.h> | |
38 | #include <asm/smp.h> | |
be44d2aa | 39 | #include <asm/percpu.h> |
ab68ed98 | 40 | #include <asm/processor-flags.h> |
9b7dc567 | 41 | #include <asm/irq_vectors.h> |
cd4d09ec | 42 | #include <asm/cpufeatures.h> |
b4ca46e4 | 43 | #include <asm/alternative-asm.h> |
6837a54d | 44 | #include <asm/asm.h> |
e59d1b0a | 45 | #include <asm/smap.h> |
4d516f41 | 46 | #include <asm/frame.h> |
60400677 | 47 | #include <asm/trapnr.h> |
2641f08b | 48 | #include <asm/nospec-branch.h> |
1da177e4 | 49 | |
afaef01c AP |
50 | #include "calling.h" |
51 | ||
ea714547 JO |
52 | .section .entry.text, "ax" |
53 | ||
139ec7c4 RR |
54 | /* |
55 | * We use macros for low-level operations which need to be overridden | |
56 | * for paravirtualization. The following will never clobber any registers: | |
57 | * INTERRUPT_RETURN (aka. "iret") | |
58 | * GET_CR0_INTO_EAX (aka. "movl %cr0, %eax") | |
d75cd22f | 59 | * ENABLE_INTERRUPTS_SYSEXIT (aka "sti; sysexit"). |
139ec7c4 RR |
60 | * |
61 | * For DISABLE_INTERRUPTS/ENABLE_INTERRUPTS (aka "cli"/"sti"), you must | |
62 | * specify what registers can be overwritten (CLBR_NONE, CLBR_EAX/EDX/ECX/ANY). | |
63 | * Allowing a register to be clobbered can shrink the paravirt replacement | |
64 | * enough to patch inline, increasing performance. | |
65 | */ | |
66 | ||
48593975 | 67 | #ifdef CONFIG_PREEMPTION |
a49976d1 | 68 | # define preempt_stop(clobbers) DISABLE_INTERRUPTS(clobbers); TRACE_IRQS_OFF |
1da177e4 | 69 | #else |
a49976d1 | 70 | # define preempt_stop(clobbers) |
1da177e4 LT |
71 | #endif |
72 | ||
55f327fa IM |
73 | .macro TRACE_IRQS_IRET |
74 | #ifdef CONFIG_TRACE_IRQFLAGS | |
a49976d1 IM |
75 | testl $X86_EFLAGS_IF, PT_EFLAGS(%esp) # interrupts off? |
76 | jz 1f | |
55f327fa IM |
77 | TRACE_IRQS_ON |
78 | 1: | |
79 | #endif | |
80 | .endm | |
81 | ||
e464fb9f JR |
82 | #define PTI_SWITCH_MASK (1 << PAGE_SHIFT) |
83 | ||
ccbeed3a TH |
84 | /* |
85 | * User gs save/restore | |
86 | * | |
87 | * %gs is used for userland TLS and kernel only uses it for stack | |
88 | * canary which is required to be at %gs:20 by gcc. Read the comment | |
89 | * at the top of stackprotector.h for more info. | |
90 | * | |
91 | * Local labels 98 and 99 are used. | |
92 | */ | |
93 | #ifdef CONFIG_X86_32_LAZY_GS | |
94 | ||
95 | /* unfortunately push/pop can't be no-op */ | |
96 | .macro PUSH_GS | |
a49976d1 | 97 | pushl $0 |
ccbeed3a TH |
98 | .endm |
99 | .macro POP_GS pop=0 | |
a49976d1 | 100 | addl $(4 + \pop), %esp |
ccbeed3a TH |
101 | .endm |
102 | .macro POP_GS_EX | |
103 | .endm | |
104 | ||
105 | /* all the rest are no-op */ | |
106 | .macro PTGS_TO_GS | |
107 | .endm | |
108 | .macro PTGS_TO_GS_EX | |
109 | .endm | |
110 | .macro GS_TO_REG reg | |
111 | .endm | |
112 | .macro REG_TO_PTGS reg | |
113 | .endm | |
114 | .macro SET_KERNEL_GS reg | |
115 | .endm | |
116 | ||
117 | #else /* CONFIG_X86_32_LAZY_GS */ | |
118 | ||
119 | .macro PUSH_GS | |
a49976d1 | 120 | pushl %gs |
ccbeed3a TH |
121 | .endm |
122 | ||
123 | .macro POP_GS pop=0 | |
a49976d1 | 124 | 98: popl %gs |
ccbeed3a | 125 | .if \pop <> 0 |
9b47feb7 | 126 | add $\pop, %esp |
ccbeed3a TH |
127 | .endif |
128 | .endm | |
129 | .macro POP_GS_EX | |
130 | .pushsection .fixup, "ax" | |
a49976d1 IM |
131 | 99: movl $0, (%esp) |
132 | jmp 98b | |
ccbeed3a | 133 | .popsection |
a49976d1 | 134 | _ASM_EXTABLE(98b, 99b) |
ccbeed3a TH |
135 | .endm |
136 | ||
137 | .macro PTGS_TO_GS | |
a49976d1 | 138 | 98: mov PT_GS(%esp), %gs |
ccbeed3a TH |
139 | .endm |
140 | .macro PTGS_TO_GS_EX | |
141 | .pushsection .fixup, "ax" | |
a49976d1 IM |
142 | 99: movl $0, PT_GS(%esp) |
143 | jmp 98b | |
ccbeed3a | 144 | .popsection |
a49976d1 | 145 | _ASM_EXTABLE(98b, 99b) |
ccbeed3a TH |
146 | .endm |
147 | ||
148 | .macro GS_TO_REG reg | |
a49976d1 | 149 | movl %gs, \reg |
ccbeed3a TH |
150 | .endm |
151 | .macro REG_TO_PTGS reg | |
a49976d1 | 152 | movl \reg, PT_GS(%esp) |
ccbeed3a TH |
153 | .endm |
154 | .macro SET_KERNEL_GS reg | |
a49976d1 IM |
155 | movl $(__KERNEL_STACK_CANARY), \reg |
156 | movl \reg, %gs | |
ccbeed3a TH |
157 | .endm |
158 | ||
a49976d1 | 159 | #endif /* CONFIG_X86_32_LAZY_GS */ |
ccbeed3a | 160 | |
e464fb9f JR |
161 | /* Unconditionally switch to user cr3 */ |
162 | .macro SWITCH_TO_USER_CR3 scratch_reg:req | |
163 | ALTERNATIVE "jmp .Lend_\@", "", X86_FEATURE_PTI | |
164 | ||
165 | movl %cr3, \scratch_reg | |
166 | orl $PTI_SWITCH_MASK, \scratch_reg | |
167 | movl \scratch_reg, %cr3 | |
168 | .Lend_\@: | |
169 | .endm | |
170 | ||
97193702 JR |
171 | .macro BUG_IF_WRONG_CR3 no_user_check=0 |
172 | #ifdef CONFIG_DEBUG_ENTRY | |
173 | ALTERNATIVE "jmp .Lend_\@", "", X86_FEATURE_PTI | |
174 | .if \no_user_check == 0 | |
175 | /* coming from usermode? */ | |
81ff2c37 | 176 | testl $USER_SEGMENT_RPL_MASK, PT_CS(%esp) |
97193702 JR |
177 | jz .Lend_\@ |
178 | .endif | |
179 | /* On user-cr3? */ | |
180 | movl %cr3, %eax | |
181 | testl $PTI_SWITCH_MASK, %eax | |
182 | jnz .Lend_\@ | |
183 | /* From userspace with kernel cr3 - BUG */ | |
184 | ud2 | |
185 | .Lend_\@: | |
186 | #endif | |
187 | .endm | |
188 | ||
e464fb9f JR |
189 | /* |
190 | * Switch to kernel cr3 if not already loaded and return current cr3 in | |
191 | * \scratch_reg | |
192 | */ | |
193 | .macro SWITCH_TO_KERNEL_CR3 scratch_reg:req | |
194 | ALTERNATIVE "jmp .Lend_\@", "", X86_FEATURE_PTI | |
195 | movl %cr3, \scratch_reg | |
196 | /* Test if we are already on kernel CR3 */ | |
197 | testl $PTI_SWITCH_MASK, \scratch_reg | |
198 | jz .Lend_\@ | |
199 | andl $(~PTI_SWITCH_MASK), \scratch_reg | |
200 | movl \scratch_reg, %cr3 | |
201 | /* Return original CR3 in \scratch_reg */ | |
202 | orl $PTI_SWITCH_MASK, \scratch_reg | |
203 | .Lend_\@: | |
204 | .endm | |
205 | ||
3c88c692 PZ |
206 | #define CS_FROM_ENTRY_STACK (1 << 31) |
207 | #define CS_FROM_USER_CR3 (1 << 30) | |
208 | #define CS_FROM_KERNEL (1 << 29) | |
89542907 | 209 | #define CS_FROM_ESPFIX (1 << 28) |
3c88c692 PZ |
210 | |
211 | .macro FIXUP_FRAME | |
212 | /* | |
213 | * The high bits of the CS dword (__csh) are used for CS_FROM_*. | |
214 | * Clear them in case hardware didn't do this for us. | |
215 | */ | |
82cb8a0b | 216 | andl $0x0000ffff, 4*4(%esp) |
3c88c692 PZ |
217 | |
218 | #ifdef CONFIG_VM86 | |
82cb8a0b | 219 | testl $X86_EFLAGS_VM, 5*4(%esp) |
3c88c692 PZ |
220 | jnz .Lfrom_usermode_no_fixup_\@ |
221 | #endif | |
82cb8a0b | 222 | testl $USER_SEGMENT_RPL_MASK, 4*4(%esp) |
3c88c692 PZ |
223 | jnz .Lfrom_usermode_no_fixup_\@ |
224 | ||
82cb8a0b | 225 | orl $CS_FROM_KERNEL, 4*4(%esp) |
3c88c692 PZ |
226 | |
227 | /* | |
228 | * When we're here from kernel mode; the (exception) stack looks like: | |
229 | * | |
82cb8a0b AL |
230 | * 6*4(%esp) - <previous context> |
231 | * 5*4(%esp) - flags | |
232 | * 4*4(%esp) - cs | |
233 | * 3*4(%esp) - ip | |
234 | * 2*4(%esp) - orig_eax | |
235 | * 1*4(%esp) - gs / function | |
236 | * 0*4(%esp) - fs | |
3c88c692 PZ |
237 | * |
238 | * Lets build a 5 entry IRET frame after that, such that struct pt_regs | |
239 | * is complete and in particular regs->sp is correct. This gives us | |
82cb8a0b | 240 | * the original 6 enties as gap: |
3c88c692 | 241 | * |
82cb8a0b AL |
242 | * 14*4(%esp) - <previous context> |
243 | * 13*4(%esp) - gap / flags | |
244 | * 12*4(%esp) - gap / cs | |
245 | * 11*4(%esp) - gap / ip | |
246 | * 10*4(%esp) - gap / orig_eax | |
247 | * 9*4(%esp) - gap / gs / function | |
248 | * 8*4(%esp) - gap / fs | |
249 | * 7*4(%esp) - ss | |
250 | * 6*4(%esp) - sp | |
251 | * 5*4(%esp) - flags | |
252 | * 4*4(%esp) - cs | |
253 | * 3*4(%esp) - ip | |
254 | * 2*4(%esp) - orig_eax | |
255 | * 1*4(%esp) - gs / function | |
256 | * 0*4(%esp) - fs | |
3c88c692 PZ |
257 | */ |
258 | ||
259 | pushl %ss # ss | |
260 | pushl %esp # sp (points at ss) | |
82cb8a0b AL |
261 | addl $7*4, (%esp) # point sp back at the previous context |
262 | pushl 7*4(%esp) # flags | |
263 | pushl 7*4(%esp) # cs | |
264 | pushl 7*4(%esp) # ip | |
265 | pushl 7*4(%esp) # orig_eax | |
266 | pushl 7*4(%esp) # gs / function | |
267 | pushl 7*4(%esp) # fs | |
3c88c692 PZ |
268 | .Lfrom_usermode_no_fixup_\@: |
269 | .endm | |
270 | ||
271 | .macro IRET_FRAME | |
4c4fd55d AL |
272 | /* |
273 | * We're called with %ds, %es, %fs, and %gs from the interrupted | |
274 | * frame, so we shouldn't use them. Also, we may be in ESPFIX | |
275 | * mode and therefore have a nonzero SS base and an offset ESP, | |
276 | * so any attempt to access the stack needs to use SS. (except for | |
277 | * accesses through %esp, which automatically use SS.) | |
278 | */ | |
3c88c692 PZ |
279 | testl $CS_FROM_KERNEL, 1*4(%esp) |
280 | jz .Lfinished_frame_\@ | |
281 | ||
282 | /* | |
283 | * Reconstruct the 3 entry IRET frame right after the (modified) | |
284 | * regs->sp without lowering %esp in between, such that an NMI in the | |
285 | * middle doesn't scribble our stack. | |
286 | */ | |
287 | pushl %eax | |
288 | pushl %ecx | |
289 | movl 5*4(%esp), %eax # (modified) regs->sp | |
290 | ||
291 | movl 4*4(%esp), %ecx # flags | |
4c4fd55d | 292 | movl %ecx, %ss:-1*4(%eax) |
3c88c692 PZ |
293 | |
294 | movl 3*4(%esp), %ecx # cs | |
295 | andl $0x0000ffff, %ecx | |
4c4fd55d | 296 | movl %ecx, %ss:-2*4(%eax) |
3c88c692 PZ |
297 | |
298 | movl 2*4(%esp), %ecx # ip | |
4c4fd55d | 299 | movl %ecx, %ss:-3*4(%eax) |
3c88c692 PZ |
300 | |
301 | movl 1*4(%esp), %ecx # eax | |
4c4fd55d | 302 | movl %ecx, %ss:-4*4(%eax) |
3c88c692 PZ |
303 | |
304 | popl %ecx | |
4c4fd55d | 305 | lea -4*4(%eax), %esp |
3c88c692 PZ |
306 | popl %eax |
307 | .Lfinished_frame_\@: | |
308 | .endm | |
309 | ||
a1a338e5 | 310 | .macro SAVE_ALL pt_regs_ax=%eax switch_stacks=0 skip_gs=0 unwind_espfix=0 |
f0d96110 | 311 | cld |
e67f1c11 | 312 | .if \skip_gs == 0 |
ccbeed3a | 313 | PUSH_GS |
e67f1c11 | 314 | .endif |
a49976d1 | 315 | pushl %fs |
a1a338e5 AL |
316 | |
317 | pushl %eax | |
318 | movl $(__KERNEL_PERCPU), %eax | |
319 | movl %eax, %fs | |
320 | .if \unwind_espfix > 0 | |
321 | UNWIND_ESPFIX_STACK | |
322 | .endif | |
323 | popl %eax | |
324 | ||
82cb8a0b | 325 | FIXUP_FRAME |
a49976d1 IM |
326 | pushl %es |
327 | pushl %ds | |
150ac78d | 328 | pushl \pt_regs_ax |
a49976d1 IM |
329 | pushl %ebp |
330 | pushl %edi | |
331 | pushl %esi | |
332 | pushl %edx | |
333 | pushl %ecx | |
334 | pushl %ebx | |
335 | movl $(__USER_DS), %edx | |
336 | movl %edx, %ds | |
337 | movl %edx, %es | |
e67f1c11 | 338 | .if \skip_gs == 0 |
ccbeed3a | 339 | SET_KERNEL_GS %edx |
e67f1c11 | 340 | .endif |
45d7b255 JR |
341 | /* Switch to kernel stack if necessary */ |
342 | .if \switch_stacks > 0 | |
343 | SWITCH_TO_KERNEL_STACK | |
344 | .endif | |
f0d96110 | 345 | .endm |
1da177e4 | 346 | |
89542907 PZ |
347 | .macro SAVE_ALL_NMI cr3_reg:req unwind_espfix=0 |
348 | SAVE_ALL unwind_espfix=\unwind_espfix | |
b65bef40 | 349 | |
97193702 JR |
350 | BUG_IF_WRONG_CR3 |
351 | ||
b65bef40 JR |
352 | /* |
353 | * Now switch the CR3 when PTI is enabled. | |
354 | * | |
355 | * We can enter with either user or kernel cr3, the code will | |
356 | * store the old cr3 in \cr3_reg and switches to the kernel cr3 | |
357 | * if necessary. | |
358 | */ | |
359 | SWITCH_TO_KERNEL_CR3 scratch_reg=\cr3_reg | |
360 | ||
361 | .Lend_\@: | |
8b376fae | 362 | .endm |
97193702 | 363 | |
f0d96110 | 364 | .macro RESTORE_INT_REGS |
a49976d1 IM |
365 | popl %ebx |
366 | popl %ecx | |
367 | popl %edx | |
368 | popl %esi | |
369 | popl %edi | |
370 | popl %ebp | |
371 | popl %eax | |
f0d96110 | 372 | .endm |
1da177e4 | 373 | |
ccbeed3a | 374 | .macro RESTORE_REGS pop=0 |
f0d96110 | 375 | RESTORE_INT_REGS |
a49976d1 IM |
376 | 1: popl %ds |
377 | 2: popl %es | |
378 | 3: popl %fs | |
ccbeed3a | 379 | POP_GS \pop |
40ad2199 | 380 | IRET_FRAME |
f0d96110 | 381 | .pushsection .fixup, "ax" |
a49976d1 IM |
382 | 4: movl $0, (%esp) |
383 | jmp 1b | |
384 | 5: movl $0, (%esp) | |
385 | jmp 2b | |
386 | 6: movl $0, (%esp) | |
387 | jmp 3b | |
f95d47ca | 388 | .popsection |
a49976d1 IM |
389 | _ASM_EXTABLE(1b, 4b) |
390 | _ASM_EXTABLE(2b, 5b) | |
391 | _ASM_EXTABLE(3b, 6b) | |
ccbeed3a | 392 | POP_GS_EX |
f0d96110 | 393 | .endm |
1da177e4 | 394 | |
b65bef40 JR |
395 | .macro RESTORE_ALL_NMI cr3_reg:req pop=0 |
396 | /* | |
397 | * Now switch the CR3 when PTI is enabled. | |
398 | * | |
399 | * We enter with kernel cr3 and switch the cr3 to the value | |
400 | * stored on \cr3_reg, which is either a user or a kernel cr3. | |
401 | */ | |
402 | ALTERNATIVE "jmp .Lswitched_\@", "", X86_FEATURE_PTI | |
403 | ||
404 | testl $PTI_SWITCH_MASK, \cr3_reg | |
405 | jz .Lswitched_\@ | |
406 | ||
407 | /* User cr3 in \cr3_reg - write it to hardware cr3 */ | |
408 | movl \cr3_reg, %cr3 | |
409 | ||
410 | .Lswitched_\@: | |
411 | ||
97193702 JR |
412 | BUG_IF_WRONG_CR3 |
413 | ||
8b376fae JR |
414 | RESTORE_REGS pop=\pop |
415 | .endm | |
416 | ||
46eabca2 JR |
417 | .macro CHECK_AND_APPLY_ESPFIX |
418 | #ifdef CONFIG_X86_ESPFIX32 | |
4a13b0e3 AL |
419 | #define GDT_ESPFIX_OFFSET (GDT_ENTRY_ESPFIX_SS * 8) |
420 | #define GDT_ESPFIX_SS PER_CPU_VAR(gdt_page) + GDT_ESPFIX_OFFSET | |
46eabca2 JR |
421 | |
422 | ALTERNATIVE "jmp .Lend_\@", "", X86_BUG_ESPFIX | |
423 | ||
424 | movl PT_EFLAGS(%esp), %eax # mix EFLAGS, SS and CS | |
425 | /* | |
426 | * Warning: PT_OLDSS(%esp) contains the wrong/random values if we | |
427 | * are returning to the kernel. | |
428 | * See comments in process.c:copy_thread() for details. | |
429 | */ | |
430 | movb PT_OLDSS(%esp), %ah | |
431 | movb PT_CS(%esp), %al | |
432 | andl $(X86_EFLAGS_VM | (SEGMENT_TI_MASK << 8) | SEGMENT_RPL_MASK), %eax | |
433 | cmpl $((SEGMENT_LDT << 8) | USER_RPL), %eax | |
434 | jne .Lend_\@ # returning to user-space with LDT SS | |
435 | ||
436 | /* | |
437 | * Setup and switch to ESPFIX stack | |
438 | * | |
439 | * We're returning to userspace with a 16 bit stack. The CPU will not | |
440 | * restore the high word of ESP for us on executing iret... This is an | |
441 | * "official" bug of all the x86-compatible CPUs, which we can work | |
442 | * around to make dosemu and wine happy. We do this by preloading the | |
443 | * high word of ESP with the high word of the userspace ESP while | |
444 | * compensating for the offset by changing to the ESPFIX segment with | |
445 | * a base address that matches for the difference. | |
446 | */ | |
447 | mov %esp, %edx /* load kernel esp */ | |
448 | mov PT_OLDESP(%esp), %eax /* load userspace esp */ | |
449 | mov %dx, %ax /* eax: new kernel esp */ | |
450 | sub %eax, %edx /* offset (low word is 0) */ | |
451 | shr $16, %edx | |
452 | mov %dl, GDT_ESPFIX_SS + 4 /* bits 16..23 */ | |
453 | mov %dh, GDT_ESPFIX_SS + 7 /* bits 24..31 */ | |
454 | pushl $__ESPFIX_SS | |
455 | pushl %eax /* new kernel esp */ | |
456 | /* | |
457 | * Disable interrupts, but do not irqtrace this section: we | |
458 | * will soon execute iret and the tracer was already set to | |
459 | * the irqstate after the IRET: | |
460 | */ | |
461 | DISABLE_INTERRUPTS(CLBR_ANY) | |
462 | lss (%esp), %esp /* switch to espfix segment */ | |
463 | .Lend_\@: | |
464 | #endif /* CONFIG_X86_ESPFIX32 */ | |
465 | .endm | |
45d7b255 | 466 | |
45d7b255 JR |
467 | /* |
468 | * Called with pt_regs fully populated and kernel segments loaded, | |
469 | * so we can access PER_CPU and use the integer registers. | |
470 | * | |
471 | * We need to be very careful here with the %esp switch, because an NMI | |
472 | * can happen everywhere. If the NMI handler finds itself on the | |
473 | * entry-stack, it will overwrite the task-stack and everything we | |
474 | * copied there. So allocate the stack-frame on the task-stack and | |
475 | * switch to it before we do any copying. | |
476 | */ | |
b92a165d | 477 | |
45d7b255 JR |
478 | .macro SWITCH_TO_KERNEL_STACK |
479 | ||
480 | ALTERNATIVE "", "jmp .Lend_\@", X86_FEATURE_XENPV | |
481 | ||
97193702 JR |
482 | BUG_IF_WRONG_CR3 |
483 | ||
e464fb9f JR |
484 | SWITCH_TO_KERNEL_CR3 scratch_reg=%eax |
485 | ||
486 | /* | |
487 | * %eax now contains the entry cr3 and we carry it forward in | |
488 | * that register for the time this macro runs | |
489 | */ | |
490 | ||
45d7b255 JR |
491 | /* Are we on the entry stack? Bail out if not! */ |
492 | movl PER_CPU_VAR(cpu_entry_area), %ecx | |
493 | addl $CPU_ENTRY_AREA_entry_stack + SIZEOF_entry_stack, %ecx | |
494 | subl %esp, %ecx /* ecx = (end of entry_stack) - esp */ | |
495 | cmpl $SIZEOF_entry_stack, %ecx | |
496 | jae .Lend_\@ | |
497 | ||
498 | /* Load stack pointer into %esi and %edi */ | |
499 | movl %esp, %esi | |
500 | movl %esi, %edi | |
501 | ||
502 | /* Move %edi to the top of the entry stack */ | |
503 | andl $(MASK_entry_stack), %edi | |
504 | addl $(SIZEOF_entry_stack), %edi | |
505 | ||
506 | /* Load top of task-stack into %edi */ | |
507 | movl TSS_entry2task_stack(%edi), %edi | |
508 | ||
b92a165d | 509 | /* Special case - entry from kernel mode via entry stack */ |
d5e84c21 JR |
510 | #ifdef CONFIG_VM86 |
511 | movl PT_EFLAGS(%esp), %ecx # mix EFLAGS and CS | |
512 | movb PT_CS(%esp), %cl | |
513 | andl $(X86_EFLAGS_VM | SEGMENT_RPL_MASK), %ecx | |
514 | #else | |
515 | movl PT_CS(%esp), %ecx | |
516 | andl $SEGMENT_RPL_MASK, %ecx | |
517 | #endif | |
518 | cmpl $USER_RPL, %ecx | |
519 | jb .Lentry_from_kernel_\@ | |
b92a165d | 520 | |
45d7b255 JR |
521 | /* Bytes to copy */ |
522 | movl $PTREGS_SIZE, %ecx | |
523 | ||
524 | #ifdef CONFIG_VM86 | |
525 | testl $X86_EFLAGS_VM, PT_EFLAGS(%esi) | |
526 | jz .Lcopy_pt_regs_\@ | |
527 | ||
528 | /* | |
529 | * Stack-frame contains 4 additional segment registers when | |
530 | * coming from VM86 mode | |
531 | */ | |
532 | addl $(4 * 4), %ecx | |
533 | ||
45d7b255 | 534 | #endif |
b92a165d | 535 | .Lcopy_pt_regs_\@: |
45d7b255 JR |
536 | |
537 | /* Allocate frame on task-stack */ | |
538 | subl %ecx, %edi | |
539 | ||
540 | /* Switch to task-stack */ | |
541 | movl %edi, %esp | |
542 | ||
543 | /* | |
544 | * We are now on the task-stack and can safely copy over the | |
545 | * stack-frame | |
546 | */ | |
547 | shrl $2, %ecx | |
548 | cld | |
549 | rep movsl | |
550 | ||
b92a165d JR |
551 | jmp .Lend_\@ |
552 | ||
553 | .Lentry_from_kernel_\@: | |
554 | ||
555 | /* | |
556 | * This handles the case when we enter the kernel from | |
557 | * kernel-mode and %esp points to the entry-stack. When this | |
558 | * happens we need to switch to the task-stack to run C code, | |
559 | * but switch back to the entry-stack again when we approach | |
560 | * iret and return to the interrupted code-path. This usually | |
561 | * happens when we hit an exception while restoring user-space | |
e464fb9f JR |
562 | * segment registers on the way back to user-space or when the |
563 | * sysenter handler runs with eflags.tf set. | |
b92a165d JR |
564 | * |
565 | * When we switch to the task-stack here, we can't trust the | |
566 | * contents of the entry-stack anymore, as the exception handler | |
567 | * might be scheduled out or moved to another CPU. Therefore we | |
568 | * copy the complete entry-stack to the task-stack and set a | |
569 | * marker in the iret-frame (bit 31 of the CS dword) to detect | |
570 | * what we've done on the iret path. | |
571 | * | |
572 | * On the iret path we copy everything back and switch to the | |
573 | * entry-stack, so that the interrupted kernel code-path | |
574 | * continues on the same stack it was interrupted with. | |
575 | * | |
576 | * Be aware that an NMI can happen anytime in this code. | |
577 | * | |
578 | * %esi: Entry-Stack pointer (same as %esp) | |
579 | * %edi: Top of the task stack | |
e464fb9f | 580 | * %eax: CR3 on kernel entry |
b92a165d JR |
581 | */ |
582 | ||
583 | /* Calculate number of bytes on the entry stack in %ecx */ | |
584 | movl %esi, %ecx | |
585 | ||
586 | /* %ecx to the top of entry-stack */ | |
587 | andl $(MASK_entry_stack), %ecx | |
588 | addl $(SIZEOF_entry_stack), %ecx | |
589 | ||
590 | /* Number of bytes on the entry stack to %ecx */ | |
591 | sub %esi, %ecx | |
592 | ||
593 | /* Mark stackframe as coming from entry stack */ | |
594 | orl $CS_FROM_ENTRY_STACK, PT_CS(%esp) | |
595 | ||
e464fb9f JR |
596 | /* |
597 | * Test the cr3 used to enter the kernel and add a marker | |
598 | * so that we can switch back to it before iret. | |
599 | */ | |
600 | testl $PTI_SWITCH_MASK, %eax | |
601 | jz .Lcopy_pt_regs_\@ | |
602 | orl $CS_FROM_USER_CR3, PT_CS(%esp) | |
603 | ||
b92a165d JR |
604 | /* |
605 | * %esi and %edi are unchanged, %ecx contains the number of | |
606 | * bytes to copy. The code at .Lcopy_pt_regs_\@ will allocate | |
607 | * the stack-frame on task-stack and copy everything over | |
608 | */ | |
609 | jmp .Lcopy_pt_regs_\@ | |
610 | ||
45d7b255 JR |
611 | .Lend_\@: |
612 | .endm | |
613 | ||
e5862d05 JR |
614 | /* |
615 | * Switch back from the kernel stack to the entry stack. | |
616 | * | |
617 | * The %esp register must point to pt_regs on the task stack. It will | |
618 | * first calculate the size of the stack-frame to copy, depending on | |
619 | * whether we return to VM86 mode or not. With that it uses 'rep movsl' | |
620 | * to copy the contents of the stack over to the entry stack. | |
621 | * | |
622 | * We must be very careful here, as we can't trust the contents of the | |
623 | * task-stack once we switched to the entry-stack. When an NMI happens | |
624 | * while on the entry-stack, the NMI handler will switch back to the top | |
625 | * of the task stack, overwriting our stack-frame we are about to copy. | |
626 | * Therefore we switch the stack only after everything is copied over. | |
627 | */ | |
628 | .macro SWITCH_TO_ENTRY_STACK | |
629 | ||
630 | ALTERNATIVE "", "jmp .Lend_\@", X86_FEATURE_XENPV | |
631 | ||
632 | /* Bytes to copy */ | |
633 | movl $PTREGS_SIZE, %ecx | |
634 | ||
635 | #ifdef CONFIG_VM86 | |
636 | testl $(X86_EFLAGS_VM), PT_EFLAGS(%esp) | |
637 | jz .Lcopy_pt_regs_\@ | |
638 | ||
639 | /* Additional 4 registers to copy when returning to VM86 mode */ | |
640 | addl $(4 * 4), %ecx | |
641 | ||
642 | .Lcopy_pt_regs_\@: | |
643 | #endif | |
644 | ||
645 | /* Initialize source and destination for movsl */ | |
646 | movl PER_CPU_VAR(cpu_tss_rw + TSS_sp0), %edi | |
647 | subl %ecx, %edi | |
648 | movl %esp, %esi | |
649 | ||
650 | /* Save future stack pointer in %ebx */ | |
651 | movl %edi, %ebx | |
652 | ||
653 | /* Copy over the stack-frame */ | |
654 | shrl $2, %ecx | |
655 | cld | |
656 | rep movsl | |
657 | ||
658 | /* | |
659 | * Switch to entry-stack - needs to happen after everything is | |
660 | * copied because the NMI handler will overwrite the task-stack | |
661 | * when on entry-stack | |
662 | */ | |
663 | movl %ebx, %esp | |
664 | ||
665 | .Lend_\@: | |
666 | .endm | |
667 | ||
b92a165d JR |
668 | /* |
669 | * This macro handles the case when we return to kernel-mode on the iret | |
e464fb9f | 670 | * path and have to switch back to the entry stack and/or user-cr3 |
b92a165d JR |
671 | * |
672 | * See the comments below the .Lentry_from_kernel_\@ label in the | |
673 | * SWITCH_TO_KERNEL_STACK macro for more details. | |
674 | */ | |
675 | .macro PARANOID_EXIT_TO_KERNEL_MODE | |
676 | ||
677 | /* | |
678 | * Test if we entered the kernel with the entry-stack. Most | |
679 | * likely we did not, because this code only runs on the | |
680 | * return-to-kernel path. | |
681 | */ | |
682 | testl $CS_FROM_ENTRY_STACK, PT_CS(%esp) | |
683 | jz .Lend_\@ | |
684 | ||
685 | /* Unlikely slow-path */ | |
686 | ||
687 | /* Clear marker from stack-frame */ | |
688 | andl $(~CS_FROM_ENTRY_STACK), PT_CS(%esp) | |
689 | ||
690 | /* Copy the remaining task-stack contents to entry-stack */ | |
691 | movl %esp, %esi | |
692 | movl PER_CPU_VAR(cpu_tss_rw + TSS_sp0), %edi | |
693 | ||
694 | /* Bytes on the task-stack to ecx */ | |
695 | movl PER_CPU_VAR(cpu_tss_rw + TSS_sp1), %ecx | |
696 | subl %esi, %ecx | |
697 | ||
698 | /* Allocate stack-frame on entry-stack */ | |
699 | subl %ecx, %edi | |
700 | ||
701 | /* | |
702 | * Save future stack-pointer, we must not switch until the | |
703 | * copy is done, otherwise the NMI handler could destroy the | |
704 | * contents of the task-stack we are about to copy. | |
705 | */ | |
706 | movl %edi, %ebx | |
707 | ||
708 | /* Do the copy */ | |
709 | shrl $2, %ecx | |
710 | cld | |
711 | rep movsl | |
712 | ||
713 | /* Safe to switch to entry-stack now */ | |
714 | movl %ebx, %esp | |
715 | ||
e464fb9f JR |
716 | /* |
717 | * We came from entry-stack and need to check if we also need to | |
718 | * switch back to user cr3. | |
719 | */ | |
720 | testl $CS_FROM_USER_CR3, PT_CS(%esp) | |
721 | jz .Lend_\@ | |
722 | ||
723 | /* Clear marker from stack-frame */ | |
724 | andl $(~CS_FROM_USER_CR3), PT_CS(%esp) | |
725 | ||
726 | SWITCH_TO_USER_CR3 scratch_reg=%eax | |
727 | ||
b92a165d JR |
728 | .Lend_\@: |
729 | .endm | |
60400677 TG |
730 | |
731 | /** | |
732 | * idtentry - Macro to generate entry stubs for simple IDT entries | |
733 | * @vector: Vector number | |
734 | * @asmsym: ASM symbol for the entry point | |
735 | * @cfunc: C function to be called | |
736 | * @has_error_code: Hardware pushed error code on stack | |
737 | * @sane: Compatibility flag with 64bit | |
738 | */ | |
739 | .macro idtentry vector asmsym cfunc has_error_code:req sane=0 | |
740 | SYM_CODE_START(\asmsym) | |
741 | ASM_CLAC | |
742 | cld | |
743 | ||
744 | .if \has_error_code == 0 | |
745 | pushl $0 /* Clear the error code */ | |
746 | .endif | |
747 | ||
748 | /* Push the C-function address into the GS slot */ | |
749 | pushl $\cfunc | |
750 | /* Invoke the common exception entry */ | |
751 | jmp handle_exception | |
752 | SYM_CODE_END(\asmsym) | |
753 | .endm | |
754 | ||
53aaf262 TG |
755 | /* |
756 | * Include the defines which emit the idt entries which are shared | |
757 | * shared between 32 and 64 bit. | |
758 | */ | |
759 | #include <asm/idtentry.h> | |
760 | ||
0100301b BG |
761 | /* |
762 | * %eax: prev task | |
763 | * %edx: next task | |
764 | */ | |
8c0fa8a0 | 765 | .pushsection .text, "ax" |
5e63306f | 766 | SYM_CODE_START(__switch_to_asm) |
0100301b BG |
767 | /* |
768 | * Save callee-saved registers | |
769 | * This must match the order in struct inactive_task_frame | |
770 | */ | |
771 | pushl %ebp | |
772 | pushl %ebx | |
773 | pushl %edi | |
774 | pushl %esi | |
a3ba9660 TG |
775 | /* |
776 | * Flags are saved to prevent AC leakage. This could go | |
777 | * away if objtool would have 32bit support to verify | |
778 | * the STAC/CLAC correctness. | |
779 | */ | |
6690e86b | 780 | pushfl |
0100301b BG |
781 | |
782 | /* switch stack */ | |
783 | movl %esp, TASK_threadsp(%eax) | |
784 | movl TASK_threadsp(%edx), %esp | |
785 | ||
050e9baa | 786 | #ifdef CONFIG_STACKPROTECTOR |
0100301b BG |
787 | movl TASK_stack_canary(%edx), %ebx |
788 | movl %ebx, PER_CPU_VAR(stack_canary)+stack_canary_offset | |
789 | #endif | |
790 | ||
c995efd5 DW |
791 | #ifdef CONFIG_RETPOLINE |
792 | /* | |
793 | * When switching from a shallower to a deeper call stack | |
794 | * the RSB may either underflow or use entries populated | |
795 | * with userspace addresses. On CPUs where those concerns | |
796 | * exist, overwrite the RSB with entries which capture | |
797 | * speculative execution to prevent attack. | |
798 | */ | |
d1c99108 | 799 | FILL_RETURN_BUFFER %ebx, RSB_CLEAR_LOOPS, X86_FEATURE_RSB_CTXSW |
c995efd5 DW |
800 | #endif |
801 | ||
a3ba9660 | 802 | /* Restore flags or the incoming task to restore AC state. */ |
6690e86b | 803 | popfl |
a3ba9660 | 804 | /* restore callee-saved registers */ |
0100301b BG |
805 | popl %esi |
806 | popl %edi | |
807 | popl %ebx | |
808 | popl %ebp | |
809 | ||
810 | jmp __switch_to | |
5e63306f | 811 | SYM_CODE_END(__switch_to_asm) |
8c0fa8a0 | 812 | .popsection |
0100301b | 813 | |
ebd57499 JP |
814 | /* |
815 | * The unwinder expects the last frame on the stack to always be at the same | |
816 | * offset from the end of the page, which allows it to validate the stack. | |
817 | * Calling schedule_tail() directly would break that convention because its an | |
818 | * asmlinkage function so its argument has to be pushed on the stack. This | |
819 | * wrapper creates a proper "end of stack" frame header before the call. | |
820 | */ | |
8c0fa8a0 | 821 | .pushsection .text, "ax" |
6d685e53 | 822 | SYM_FUNC_START(schedule_tail_wrapper) |
ebd57499 JP |
823 | FRAME_BEGIN |
824 | ||
825 | pushl %eax | |
826 | call schedule_tail | |
827 | popl %eax | |
828 | ||
829 | FRAME_END | |
830 | ret | |
6d685e53 | 831 | SYM_FUNC_END(schedule_tail_wrapper) |
8c0fa8a0 TG |
832 | .popsection |
833 | ||
0100301b BG |
834 | /* |
835 | * A newly forked process directly context switches into this address. | |
836 | * | |
837 | * eax: prev task we switched from | |
616d2483 BG |
838 | * ebx: kernel thread func (NULL for user thread) |
839 | * edi: kernel thread arg | |
0100301b | 840 | */ |
8c0fa8a0 | 841 | .pushsection .text, "ax" |
5e63306f | 842 | SYM_CODE_START(ret_from_fork) |
ebd57499 | 843 | call schedule_tail_wrapper |
39e8701f | 844 | |
616d2483 BG |
845 | testl %ebx, %ebx |
846 | jnz 1f /* kernel threads are uncommon */ | |
847 | ||
848 | 2: | |
39e8701f | 849 | /* When we fork, we trace the syscall return in the child, too. */ |
ebd57499 | 850 | movl %esp, %eax |
39e8701f | 851 | call syscall_return_slowpath |
4983e5d7 | 852 | jmp .Lsyscall_32_done |
39e8701f | 853 | |
616d2483 BG |
854 | /* kernel thread */ |
855 | 1: movl %edi, %eax | |
34fdce69 | 856 | CALL_NOSPEC ebx |
39e8701f | 857 | /* |
616d2483 BG |
858 | * A kernel thread is allowed to return here after successfully |
859 | * calling do_execve(). Exit to userspace to complete the execve() | |
860 | * syscall. | |
39e8701f | 861 | */ |
616d2483 BG |
862 | movl $0, PT_EAX(%esp) |
863 | jmp 2b | |
5e63306f | 864 | SYM_CODE_END(ret_from_fork) |
8c0fa8a0 | 865 | .popsection |
6783eaa2 | 866 | |
1da177e4 LT |
867 | /* |
868 | * Return to user mode is not as complex as all this looks, | |
869 | * but we want the default path for a system call return to | |
870 | * go as quickly as possible which is why some of this is | |
871 | * less clear than it otherwise should be. | |
872 | */ | |
873 | ||
874 | # userspace resumption stub bypassing syscall exit tracing | |
cc66936e | 875 | SYM_CODE_START_LOCAL(ret_from_exception) |
139ec7c4 | 876 | preempt_stop(CLBR_ANY) |
1da177e4 | 877 | ret_from_intr: |
29a2e283 | 878 | #ifdef CONFIG_VM86 |
a49976d1 IM |
879 | movl PT_EFLAGS(%esp), %eax # mix EFLAGS and CS |
880 | movb PT_CS(%esp), %al | |
881 | andl $(X86_EFLAGS_VM | SEGMENT_RPL_MASK), %eax | |
29a2e283 DA |
882 | #else |
883 | /* | |
6783eaa2 | 884 | * We can be coming here from child spawned by kernel_thread(). |
29a2e283 | 885 | */ |
a49976d1 IM |
886 | movl PT_CS(%esp), %eax |
887 | andl $SEGMENT_RPL_MASK, %eax | |
29a2e283 | 888 | #endif |
a49976d1 | 889 | cmpl $USER_RPL, %eax |
5e1246ff | 890 | jb restore_all_kernel # not returning to v8086 or userspace |
f95d47ca | 891 | |
5d73fc70 | 892 | DISABLE_INTERRUPTS(CLBR_ANY) |
e32e58a9 | 893 | TRACE_IRQS_OFF |
5d73fc70 AL |
894 | movl %esp, %eax |
895 | call prepare_exit_to_usermode | |
4983e5d7 | 896 | jmp restore_all_switch_stack |
cc66936e | 897 | SYM_CODE_END(ret_from_exception) |
1da177e4 | 898 | |
b4edca15 | 899 | SYM_ENTRY(__begin_SYSENTER_singlestep_region, SYM_L_GLOBAL, SYM_A_NONE) |
f2b37575 AL |
900 | /* |
901 | * All code from here through __end_SYSENTER_singlestep_region is subject | |
902 | * to being single-stepped if a user program sets TF and executes SYSENTER. | |
903 | * There is absolutely nothing that we can do to prevent this from happening | |
904 | * (thanks Intel!). To keep our handling of this situation as simple as | |
905 | * possible, we handle TF just like AC and NT, except that our #DB handler | |
906 | * will ignore all of the single-step traps generated in this range. | |
907 | */ | |
908 | ||
28c11b0f | 909 | #ifdef CONFIG_XEN_PV |
f2b37575 AL |
910 | /* |
911 | * Xen doesn't set %esp to be precisely what the normal SYSENTER | |
912 | * entry point expects, so fix it up before using the normal path. | |
913 | */ | |
78762b0e | 914 | SYM_CODE_START(xen_sysenter_target) |
f2b37575 | 915 | addl $5*4, %esp /* remove xen-provided frame */ |
1b00255f | 916 | jmp .Lsysenter_past_esp |
78762b0e | 917 | SYM_CODE_END(xen_sysenter_target) |
f2b37575 AL |
918 | #endif |
919 | ||
fda57b22 AL |
920 | /* |
921 | * 32-bit SYSENTER entry. | |
922 | * | |
923 | * 32-bit system calls through the vDSO's __kernel_vsyscall enter here | |
924 | * if X86_FEATURE_SEP is available. This is the preferred system call | |
925 | * entry on 32-bit systems. | |
926 | * | |
927 | * The SYSENTER instruction, in principle, should *only* occur in the | |
928 | * vDSO. In practice, a small number of Android devices were shipped | |
929 | * with a copy of Bionic that inlined a SYSENTER instruction. This | |
930 | * never happened in any of Google's Bionic versions -- it only happened | |
931 | * in a narrow range of Intel-provided versions. | |
932 | * | |
933 | * SYSENTER loads SS, ESP, CS, and EIP from previously programmed MSRs. | |
934 | * IF and VM in RFLAGS are cleared (IOW: interrupts are off). | |
935 | * SYSENTER does not save anything on the stack, | |
936 | * and does not save old EIP (!!!), ESP, or EFLAGS. | |
937 | * | |
938 | * To avoid losing track of EFLAGS.VM (and thus potentially corrupting | |
939 | * user and/or vm86 state), we explicitly disable the SYSENTER | |
940 | * instruction in vm86 mode by reprogramming the MSRs. | |
941 | * | |
942 | * Arguments: | |
943 | * eax system call number | |
944 | * ebx arg1 | |
945 | * ecx arg2 | |
946 | * edx arg3 | |
947 | * esi arg4 | |
948 | * edi arg5 | |
949 | * ebp user stack | |
950 | * 0(%ebp) arg6 | |
951 | */ | |
6d685e53 | 952 | SYM_FUNC_START(entry_SYSENTER_32) |
e464fb9f JR |
953 | /* |
954 | * On entry-stack with all userspace-regs live - save and | |
955 | * restore eflags and %eax to use it as scratch-reg for the cr3 | |
956 | * switch. | |
957 | */ | |
958 | pushfl | |
959 | pushl %eax | |
97193702 | 960 | BUG_IF_WRONG_CR3 no_user_check=1 |
e464fb9f JR |
961 | SWITCH_TO_KERNEL_CR3 scratch_reg=%eax |
962 | popl %eax | |
963 | popfl | |
964 | ||
965 | /* Stack empty again, switch to task stack */ | |
ae2e565b | 966 | movl TSS_entry2task_stack(%esp), %esp |
e464fb9f | 967 | |
1b00255f | 968 | .Lsysenter_past_esp: |
5f310f73 | 969 | pushl $__USER_DS /* pt_regs->ss */ |
30bfa7b3 | 970 | pushl %ebp /* pt_regs->sp (stashed in bp) */ |
5f310f73 AL |
971 | pushfl /* pt_regs->flags (except IF = 0) */ |
972 | orl $X86_EFLAGS_IF, (%esp) /* Fix IF */ | |
973 | pushl $__USER_CS /* pt_regs->cs */ | |
974 | pushl $0 /* pt_regs->ip = 0 (placeholder) */ | |
975 | pushl %eax /* pt_regs->orig_ax */ | |
45d7b255 | 976 | SAVE_ALL pt_regs_ax=$-ENOSYS /* save rest, stack already switched */ |
5f310f73 | 977 | |
67f590e8 | 978 | /* |
f2b37575 AL |
979 | * SYSENTER doesn't filter flags, so we need to clear NT, AC |
980 | * and TF ourselves. To save a few cycles, we can check whether | |
67f590e8 AL |
981 | * either was set instead of doing an unconditional popfq. |
982 | * This needs to happen before enabling interrupts so that | |
983 | * we don't get preempted with NT set. | |
984 | * | |
f2b37575 AL |
985 | * If TF is set, we will single-step all the way to here -- do_debug |
986 | * will ignore all the traps. (Yes, this is slow, but so is | |
987 | * single-stepping in general. This allows us to avoid having | |
988 | * a more complicated code to handle the case where a user program | |
989 | * forces us to single-step through the SYSENTER entry code.) | |
990 | * | |
67f590e8 AL |
991 | * NB.: .Lsysenter_fix_flags is a label with the code under it moved |
992 | * out-of-line as an optimization: NT is unlikely to be set in the | |
993 | * majority of the cases and instead of polluting the I$ unnecessarily, | |
994 | * we're keeping that code behind a branch which will predict as | |
995 | * not-taken and therefore its instructions won't be fetched. | |
996 | */ | |
f2b37575 | 997 | testl $X86_EFLAGS_NT|X86_EFLAGS_AC|X86_EFLAGS_TF, PT_EFLAGS(%esp) |
67f590e8 AL |
998 | jnz .Lsysenter_fix_flags |
999 | .Lsysenter_flags_fixed: | |
1000 | ||
5f310f73 AL |
1001 | movl %esp, %eax |
1002 | call do_fast_syscall_32 | |
91e2eea9 BO |
1003 | /* XEN PV guests always use IRET path */ |
1004 | ALTERNATIVE "testl %eax, %eax; jz .Lsyscall_32_done", \ | |
1005 | "jmp .Lsyscall_32_done", X86_FEATURE_XENPV | |
5f310f73 | 1006 | |
afaef01c AP |
1007 | STACKLEAK_ERASE |
1008 | ||
4983e5d7 | 1009 | /* Opportunistic SYSEXIT */ |
e5862d05 JR |
1010 | |
1011 | /* | |
1012 | * Setup entry stack - we keep the pointer in %eax and do the | |
1013 | * switch after almost all user-state is restored. | |
1014 | */ | |
1015 | ||
1016 | /* Load entry stack pointer and allocate frame for eflags/eax */ | |
1017 | movl PER_CPU_VAR(cpu_tss_rw + TSS_sp0), %eax | |
1018 | subl $(2*4), %eax | |
1019 | ||
1020 | /* Copy eflags and eax to entry stack */ | |
1021 | movl PT_EFLAGS(%esp), %edi | |
1022 | movl PT_EAX(%esp), %esi | |
1023 | movl %edi, (%eax) | |
1024 | movl %esi, 4(%eax) | |
1025 | ||
1026 | /* Restore user registers and segments */ | |
5f310f73 AL |
1027 | movl PT_EIP(%esp), %edx /* pt_regs->ip */ |
1028 | movl PT_OLDESP(%esp), %ecx /* pt_regs->sp */ | |
3bd29515 AL |
1029 | 1: mov PT_FS(%esp), %fs |
1030 | PTGS_TO_GS | |
e5862d05 | 1031 | |
5f310f73 AL |
1032 | popl %ebx /* pt_regs->bx */ |
1033 | addl $2*4, %esp /* skip pt_regs->cx and pt_regs->dx */ | |
1034 | popl %esi /* pt_regs->si */ | |
1035 | popl %edi /* pt_regs->di */ | |
1036 | popl %ebp /* pt_regs->bp */ | |
e5862d05 JR |
1037 | |
1038 | /* Switch to entry stack */ | |
1039 | movl %eax, %esp | |
5f310f73 | 1040 | |
e464fb9f JR |
1041 | /* Now ready to switch the cr3 */ |
1042 | SWITCH_TO_USER_CR3 scratch_reg=%eax | |
1043 | ||
c2c9b52f AL |
1044 | /* |
1045 | * Restore all flags except IF. (We restore IF separately because | |
1046 | * STI gives a one-instruction window in which we won't be interrupted, | |
1047 | * whereas POPF does not.) | |
1048 | */ | |
236f0cd2 | 1049 | btrl $X86_EFLAGS_IF_BIT, (%esp) |
97193702 | 1050 | BUG_IF_WRONG_CR3 no_user_check=1 |
c2c9b52f | 1051 | popfl |
e5862d05 | 1052 | popl %eax |
c2c9b52f | 1053 | |
5f310f73 AL |
1054 | /* |
1055 | * Return back to the vDSO, which will pop ecx and edx. | |
1056 | * Don't bother with DS and ES (they already contain __USER_DS). | |
1057 | */ | |
88c15ec9 BO |
1058 | sti |
1059 | sysexit | |
af0575bb | 1060 | |
a49976d1 IM |
1061 | .pushsection .fixup, "ax" |
1062 | 2: movl $0, PT_FS(%esp) | |
1063 | jmp 1b | |
f95d47ca | 1064 | .popsection |
a49976d1 | 1065 | _ASM_EXTABLE(1b, 2b) |
ccbeed3a | 1066 | PTGS_TO_GS_EX |
67f590e8 AL |
1067 | |
1068 | .Lsysenter_fix_flags: | |
1069 | pushl $X86_EFLAGS_FIXED | |
1070 | popfl | |
1071 | jmp .Lsysenter_flags_fixed | |
b4edca15 | 1072 | SYM_ENTRY(__end_SYSENTER_singlestep_region, SYM_L_GLOBAL, SYM_A_NONE) |
6d685e53 | 1073 | SYM_FUNC_END(entry_SYSENTER_32) |
1da177e4 | 1074 | |
fda57b22 AL |
1075 | /* |
1076 | * 32-bit legacy system call entry. | |
1077 | * | |
1078 | * 32-bit x86 Linux system calls traditionally used the INT $0x80 | |
1079 | * instruction. INT $0x80 lands here. | |
1080 | * | |
1081 | * This entry point can be used by any 32-bit perform system calls. | |
1082 | * Instances of INT $0x80 can be found inline in various programs and | |
1083 | * libraries. It is also used by the vDSO's __kernel_vsyscall | |
1084 | * fallback for hardware that doesn't support a faster entry method. | |
1085 | * Restarted 32-bit system calls also fall back to INT $0x80 | |
1086 | * regardless of what instruction was originally used to do the system | |
1087 | * call. (64-bit programs can use INT $0x80 as well, but they can | |
1088 | * only run on 64-bit kernels and therefore land in | |
1089 | * entry_INT80_compat.) | |
1090 | * | |
1091 | * This is considered a slow path. It is not used by most libc | |
1092 | * implementations on modern hardware except during process startup. | |
1093 | * | |
1094 | * Arguments: | |
1095 | * eax system call number | |
1096 | * ebx arg1 | |
1097 | * ecx arg2 | |
1098 | * edx arg3 | |
1099 | * esi arg4 | |
1100 | * edi arg5 | |
1101 | * ebp arg6 | |
1102 | */ | |
6d685e53 | 1103 | SYM_FUNC_START(entry_INT80_32) |
e59d1b0a | 1104 | ASM_CLAC |
150ac78d | 1105 | pushl %eax /* pt_regs->orig_ax */ |
45d7b255 JR |
1106 | |
1107 | SAVE_ALL pt_regs_ax=$-ENOSYS switch_stacks=1 /* save rest */ | |
150ac78d | 1108 | |
150ac78d | 1109 | movl %esp, %eax |
a798f091 | 1110 | call do_int80_syscall_32 |
5f310f73 | 1111 | .Lsyscall_32_done: |
afaef01c AP |
1112 | STACKLEAK_ERASE |
1113 | ||
4983e5d7 | 1114 | restore_all_switch_stack: |
e5862d05 | 1115 | SWITCH_TO_ENTRY_STACK |
46eabca2 | 1116 | CHECK_AND_APPLY_ESPFIX |
74a4882d | 1117 | |
e464fb9f JR |
1118 | /* Switch back to user CR3 */ |
1119 | SWITCH_TO_USER_CR3 scratch_reg=%eax | |
1120 | ||
97193702 JR |
1121 | BUG_IF_WRONG_CR3 |
1122 | ||
e464fb9f JR |
1123 | /* Restore user state */ |
1124 | RESTORE_REGS pop=4 # skip orig_eax/error_code | |
1b00255f | 1125 | .Lirq_return: |
10bcc80e MD |
1126 | /* |
1127 | * ARCH_HAS_MEMBARRIER_SYNC_CORE rely on IRET core serialization | |
1128 | * when returning from IPI handler and when returning from | |
1129 | * scheduler to user-space. | |
1130 | */ | |
3701d863 | 1131 | INTERRUPT_RETURN |
1b00255f | 1132 | |
0d2eb73b | 1133 | restore_all_kernel: |
48593975 | 1134 | #ifdef CONFIG_PREEMPTION |
5e1246ff PZ |
1135 | DISABLE_INTERRUPTS(CLBR_ANY) |
1136 | cmpl $0, PER_CPU_VAR(__preempt_count) | |
1137 | jnz .Lno_preempt | |
1138 | testl $X86_EFLAGS_IF, PT_EFLAGS(%esp) # interrupts off (exception path) ? | |
1139 | jz .Lno_preempt | |
1140 | call preempt_schedule_irq | |
1141 | .Lno_preempt: | |
1142 | #endif | |
0d2eb73b | 1143 | TRACE_IRQS_IRET |
b92a165d | 1144 | PARANOID_EXIT_TO_KERNEL_MODE |
97193702 | 1145 | BUG_IF_WRONG_CR3 |
0d2eb73b JR |
1146 | RESTORE_REGS 4 |
1147 | jmp .Lirq_return | |
1148 | ||
a49976d1 | 1149 | .section .fixup, "ax" |
cc66936e | 1150 | SYM_CODE_START(iret_exc) |
a49976d1 IM |
1151 | pushl $0 # no error code |
1152 | pushl $do_iret_error | |
97193702 JR |
1153 | |
1154 | #ifdef CONFIG_DEBUG_ENTRY | |
1155 | /* | |
1156 | * The stack-frame here is the one that iret faulted on, so its a | |
1157 | * return-to-user frame. We are on kernel-cr3 because we come here from | |
1158 | * the fixup code. This confuses the CR3 checker, so switch to user-cr3 | |
1159 | * as the checker expects it. | |
1160 | */ | |
1161 | pushl %eax | |
1162 | SWITCH_TO_USER_CR3 scratch_reg=%eax | |
1163 | popl %eax | |
1164 | #endif | |
1165 | ||
7252c4c3 | 1166 | jmp common_exception |
cc66936e | 1167 | SYM_CODE_END(iret_exc) |
1da177e4 | 1168 | .previous |
1b00255f | 1169 | _ASM_EXTABLE(.Lirq_return, iret_exc) |
6d685e53 | 1170 | SYM_FUNC_END(entry_INT80_32) |
1da177e4 | 1171 | |
f0d96110 | 1172 | .macro FIXUP_ESPFIX_STACK |
dc4c2a0a AH |
1173 | /* |
1174 | * Switch back for ESPFIX stack to the normal zerobased stack | |
1175 | * | |
1176 | * We can't call C functions using the ESPFIX stack. This code reads | |
1177 | * the high word of the segment base from the GDT and swiches to the | |
1178 | * normal stack and adjusts ESP with the matching offset. | |
4a13b0e3 AL |
1179 | * |
1180 | * We might be on user CR3 here, so percpu data is not mapped and we can't | |
1181 | * access the GDT through the percpu segment. Instead, use SGDT to find | |
1182 | * the cpu_entry_area alias of the GDT. | |
dc4c2a0a | 1183 | */ |
34273f41 | 1184 | #ifdef CONFIG_X86_ESPFIX32 |
dc4c2a0a | 1185 | /* fixup the stack */ |
4a13b0e3 AL |
1186 | pushl %ecx |
1187 | subl $2*4, %esp | |
1188 | sgdt (%esp) | |
1189 | movl 2(%esp), %ecx /* GDT address */ | |
1190 | /* | |
1191 | * Careful: ECX is a linear pointer, so we need to force base | |
1192 | * zero. %cs is the only known-linear segment we have right now. | |
1193 | */ | |
1194 | mov %cs:GDT_ESPFIX_OFFSET + 4(%ecx), %al /* bits 16..23 */ | |
1195 | mov %cs:GDT_ESPFIX_OFFSET + 7(%ecx), %ah /* bits 24..31 */ | |
9b47feb7 | 1196 | shl $16, %eax |
4a13b0e3 AL |
1197 | addl $2*4, %esp |
1198 | popl %ecx | |
a49976d1 IM |
1199 | addl %esp, %eax /* the adjusted stack pointer */ |
1200 | pushl $__KERNEL_DS | |
1201 | pushl %eax | |
1202 | lss (%esp), %esp /* switch to the normal stack segment */ | |
34273f41 | 1203 | #endif |
f0d96110 | 1204 | .endm |
a1a338e5 | 1205 | |
f0d96110 | 1206 | .macro UNWIND_ESPFIX_STACK |
a1a338e5 | 1207 | /* It's safe to clobber %eax, all other regs need to be preserved */ |
34273f41 | 1208 | #ifdef CONFIG_X86_ESPFIX32 |
a49976d1 | 1209 | movl %ss, %eax |
f0d96110 | 1210 | /* see if on espfix stack */ |
a49976d1 | 1211 | cmpw $__ESPFIX_SS, %ax |
a1a338e5 | 1212 | jne .Lno_fixup_\@ |
f0d96110 TH |
1213 | /* switch to normal stack */ |
1214 | FIXUP_ESPFIX_STACK | |
a1a338e5 | 1215 | .Lno_fixup_\@: |
34273f41 | 1216 | #endif |
f0d96110 | 1217 | .endm |
1da177e4 LT |
1218 | |
1219 | /* | |
3304c9c3 DV |
1220 | * Build the entry stubs with some assembler magic. |
1221 | * We pack 1 stub into every 8-byte block. | |
1da177e4 | 1222 | */ |
3304c9c3 | 1223 | .align 8 |
5e63306f | 1224 | SYM_CODE_START(irq_entries_start) |
3304c9c3 DV |
1225 | vector=FIRST_EXTERNAL_VECTOR |
1226 | .rept (FIRST_SYSTEM_VECTOR - FIRST_EXTERNAL_VECTOR) | |
a49976d1 | 1227 | pushl $(~vector+0x80) /* Note: always in signed byte range */ |
3304c9c3 DV |
1228 | vector=vector+1 |
1229 | jmp common_interrupt | |
3304c9c3 DV |
1230 | .align 8 |
1231 | .endr | |
5e63306f | 1232 | SYM_CODE_END(irq_entries_start) |
47a55cd7 | 1233 | |
f8a8fe61 TG |
1234 | #ifdef CONFIG_X86_LOCAL_APIC |
1235 | .align 8 | |
5e63306f | 1236 | SYM_CODE_START(spurious_entries_start) |
f8a8fe61 TG |
1237 | vector=FIRST_SYSTEM_VECTOR |
1238 | .rept (NR_VECTORS - FIRST_SYSTEM_VECTOR) | |
1239 | pushl $(~vector+0x80) /* Note: always in signed byte range */ | |
1240 | vector=vector+1 | |
1241 | jmp common_spurious | |
1242 | .align 8 | |
1243 | .endr | |
5e63306f | 1244 | SYM_CODE_END(spurious_entries_start) |
f8a8fe61 | 1245 | |
cc66936e | 1246 | SYM_CODE_START_LOCAL(common_spurious) |
f8a8fe61 TG |
1247 | ASM_CLAC |
1248 | addl $-0x80, (%esp) /* Adjust vector into the [-256, -1] range */ | |
1249 | SAVE_ALL switch_stacks=1 | |
1250 | ENCODE_FRAME_POINTER | |
1251 | TRACE_IRQS_OFF | |
1252 | movl %esp, %eax | |
1253 | call smp_spurious_interrupt | |
1254 | jmp ret_from_intr | |
cc66936e | 1255 | SYM_CODE_END(common_spurious) |
f8a8fe61 TG |
1256 | #endif |
1257 | ||
55f327fa IM |
1258 | /* |
1259 | * the CPU automatically disables interrupts when executing an IRQ vector, | |
1260 | * so IRQ-flags tracing has to follow that: | |
1261 | */ | |
b7c6244f | 1262 | .p2align CONFIG_X86_L1_CACHE_SHIFT |
cc66936e | 1263 | SYM_CODE_START_LOCAL(common_interrupt) |
e59d1b0a | 1264 | ASM_CLAC |
a49976d1 | 1265 | addl $-0x80, (%esp) /* Adjust vector into the [-256, -1] range */ |
45d7b255 JR |
1266 | |
1267 | SAVE_ALL switch_stacks=1 | |
946c1911 | 1268 | ENCODE_FRAME_POINTER |
55f327fa | 1269 | TRACE_IRQS_OFF |
a49976d1 IM |
1270 | movl %esp, %eax |
1271 | call do_IRQ | |
1272 | jmp ret_from_intr | |
cc66936e | 1273 | SYM_CODE_END(common_interrupt) |
1da177e4 | 1274 | |
45d7b255 | 1275 | #define BUILD_INTERRUPT3(name, nr, fn) \ |
6d685e53 | 1276 | SYM_FUNC_START(name) \ |
45d7b255 JR |
1277 | ASM_CLAC; \ |
1278 | pushl $~(nr); \ | |
1279 | SAVE_ALL switch_stacks=1; \ | |
1280 | ENCODE_FRAME_POINTER; \ | |
1281 | TRACE_IRQS_OFF \ | |
1282 | movl %esp, %eax; \ | |
1283 | call fn; \ | |
1284 | jmp ret_from_intr; \ | |
6d685e53 | 1285 | SYM_FUNC_END(name) |
1da177e4 | 1286 | |
a49976d1 IM |
1287 | #define BUILD_INTERRUPT(name, nr) \ |
1288 | BUILD_INTERRUPT3(name, nr, smp_##name); \ | |
02cf94c3 | 1289 | |
1da177e4 | 1290 | /* The include is where all of the SMP etc. interrupts come from */ |
1164dd00 | 1291 | #include <asm/entry_arch.h> |
1da177e4 | 1292 | |
5e63306f | 1293 | SYM_CODE_START(simd_coprocessor_error) |
e59d1b0a | 1294 | ASM_CLAC |
a49976d1 | 1295 | pushl $0 |
40d2e763 BG |
1296 | #ifdef CONFIG_X86_INVD_BUG |
1297 | /* AMD 486 bug: invd from userspace calls exception 19 instead of #GP */ | |
be4c11af | 1298 | ALTERNATIVE "pushl $exc_general_protection", \ |
a49976d1 | 1299 | "pushl $do_simd_coprocessor_error", \ |
8e65f6e0 | 1300 | X86_FEATURE_XMM |
40d2e763 | 1301 | #else |
a49976d1 | 1302 | pushl $do_simd_coprocessor_error |
40d2e763 | 1303 | #endif |
7252c4c3 | 1304 | jmp common_exception |
5e63306f | 1305 | SYM_CODE_END(simd_coprocessor_error) |
1da177e4 | 1306 | |
d3561b7f | 1307 | #ifdef CONFIG_PARAVIRT |
5e63306f | 1308 | SYM_CODE_START(native_iret) |
3701d863 | 1309 | iret |
6837a54d | 1310 | _ASM_EXTABLE(native_iret, iret_exc) |
5e63306f | 1311 | SYM_CODE_END(native_iret) |
d3561b7f RR |
1312 | #endif |
1313 | ||
1da177e4 | 1314 | #ifdef CONFIG_X86_MCE |
5e63306f | 1315 | SYM_CODE_START(machine_check) |
e59d1b0a | 1316 | ASM_CLAC |
a49976d1 | 1317 | pushl $0 |
840371be | 1318 | pushl $do_mce |
7252c4c3 | 1319 | jmp common_exception |
5e63306f | 1320 | SYM_CODE_END(machine_check) |
1da177e4 LT |
1321 | #endif |
1322 | ||
28c11b0f | 1323 | #ifdef CONFIG_XEN_PV |
6d685e53 | 1324 | SYM_FUNC_START(xen_hypervisor_callback) |
a49976d1 IM |
1325 | /* |
1326 | * Check to see if we got the event in the critical | |
1327 | * region in xen_iret_direct, after we've reenabled | |
1328 | * events and checked for pending events. This simulates | |
1329 | * iret instruction's behaviour where it delivers a | |
1330 | * pending interrupt when enabling interrupts: | |
1331 | */ | |
29b810f5 | 1332 | cmpl $xen_iret_start_crit, (%esp) |
a49976d1 | 1333 | jb 1f |
29b810f5 | 1334 | cmpl $xen_iret_end_crit, (%esp) |
a49976d1 | 1335 | jae 1f |
29b810f5 JB |
1336 | call xen_iret_crit_fixup |
1337 | 1: | |
1338 | pushl $-1 /* orig_ax = -1 => not a system call */ | |
1339 | SAVE_ALL | |
1340 | ENCODE_FRAME_POINTER | |
1341 | TRACE_IRQS_OFF | |
1342 | mov %esp, %eax | |
a49976d1 | 1343 | call xen_evtchn_do_upcall |
48593975 | 1344 | #ifndef CONFIG_PREEMPTION |
a49976d1 | 1345 | call xen_maybe_preempt_hcall |
fdfd811d | 1346 | #endif |
a49976d1 | 1347 | jmp ret_from_intr |
6d685e53 | 1348 | SYM_FUNC_END(xen_hypervisor_callback) |
5ead97c8 | 1349 | |
a49976d1 IM |
1350 | /* |
1351 | * Hypervisor uses this for application faults while it executes. | |
1352 | * We get here for two reasons: | |
1353 | * 1. Fault while reloading DS, ES, FS or GS | |
1354 | * 2. Fault while executing IRET | |
1355 | * Category 1 we fix up by reattempting the load, and zeroing the segment | |
1356 | * register if the load fails. | |
1357 | * Category 2 we fix up by jumping to do_iret_error. We cannot use the | |
1358 | * normal Linux return path in this case because if we use the IRET hypercall | |
1359 | * to pop the stack frame we end up in an infinite loop of failsafe callbacks. | |
1360 | * We distinguish between categories by maintaining a status value in EAX. | |
1361 | */ | |
6d685e53 | 1362 | SYM_FUNC_START(xen_failsafe_callback) |
a49976d1 IM |
1363 | pushl %eax |
1364 | movl $1, %eax | |
1365 | 1: mov 4(%esp), %ds | |
1366 | 2: mov 8(%esp), %es | |
1367 | 3: mov 12(%esp), %fs | |
1368 | 4: mov 16(%esp), %gs | |
a349e23d DV |
1369 | /* EAX == 0 => Category 1 (Bad segment) |
1370 | EAX != 0 => Category 2 (Bad IRET) */ | |
a49976d1 IM |
1371 | testl %eax, %eax |
1372 | popl %eax | |
1373 | lea 16(%esp), %esp | |
1374 | jz 5f | |
1375 | jmp iret_exc | |
1376 | 5: pushl $-1 /* orig_ax = -1 => not a system call */ | |
5ead97c8 | 1377 | SAVE_ALL |
946c1911 | 1378 | ENCODE_FRAME_POINTER |
a49976d1 IM |
1379 | jmp ret_from_exception |
1380 | ||
1381 | .section .fixup, "ax" | |
1382 | 6: xorl %eax, %eax | |
1383 | movl %eax, 4(%esp) | |
1384 | jmp 1b | |
1385 | 7: xorl %eax, %eax | |
1386 | movl %eax, 8(%esp) | |
1387 | jmp 2b | |
1388 | 8: xorl %eax, %eax | |
1389 | movl %eax, 12(%esp) | |
1390 | jmp 3b | |
1391 | 9: xorl %eax, %eax | |
1392 | movl %eax, 16(%esp) | |
1393 | jmp 4b | |
5ead97c8 | 1394 | .previous |
a49976d1 IM |
1395 | _ASM_EXTABLE(1b, 6b) |
1396 | _ASM_EXTABLE(2b, 7b) | |
1397 | _ASM_EXTABLE(3b, 8b) | |
1398 | _ASM_EXTABLE(4b, 9b) | |
6d685e53 | 1399 | SYM_FUNC_END(xen_failsafe_callback) |
28c11b0f | 1400 | #endif /* CONFIG_XEN_PV */ |
5ead97c8 | 1401 | |
28c11b0f | 1402 | #ifdef CONFIG_XEN_PVHVM |
bc2b0331 | 1403 | BUILD_INTERRUPT3(xen_hvm_callback_vector, HYPERVISOR_CALLBACK_VECTOR, |
4b9a8dca | 1404 | xen_evtchn_do_upcall) |
28c11b0f | 1405 | #endif |
38e20b07 | 1406 | |
bc2b0331 S |
1407 | |
1408 | #if IS_ENABLED(CONFIG_HYPERV) | |
1409 | ||
1410 | BUILD_INTERRUPT3(hyperv_callback_vector, HYPERVISOR_CALLBACK_VECTOR, | |
4b9a8dca | 1411 | hyperv_vector_handler) |
bc2b0331 | 1412 | |
93286261 VK |
1413 | BUILD_INTERRUPT3(hyperv_reenlightenment_vector, HYPERV_REENLIGHTENMENT_VECTOR, |
1414 | hyperv_reenlightenment_intr) | |
1415 | ||
248e742a MK |
1416 | BUILD_INTERRUPT3(hv_stimer0_callback_vector, HYPERV_STIMER0_VECTOR, |
1417 | hv_stimer0_vector_handler) | |
1418 | ||
bc2b0331 | 1419 | #endif /* CONFIG_HYPERV */ |
5ead97c8 | 1420 | |
5e63306f | 1421 | SYM_CODE_START(page_fault) |
e59d1b0a | 1422 | ASM_CLAC |
b8f70953 MM |
1423 | pushl $do_page_fault |
1424 | jmp common_exception_read_cr2 | |
5e63306f | 1425 | SYM_CODE_END(page_fault) |
a0d14b89 | 1426 | |
cc66936e | 1427 | SYM_CODE_START_LOCAL_NOALIGN(common_exception_read_cr2) |
b8f70953 | 1428 | /* the function address is in %gs's slot on the stack */ |
a1a338e5 | 1429 | SAVE_ALL switch_stacks=1 skip_gs=1 unwind_espfix=1 |
a0d14b89 PZ |
1430 | |
1431 | ENCODE_FRAME_POINTER | |
a0d14b89 PZ |
1432 | |
1433 | /* fixup %gs */ | |
1434 | GS_TO_REG %ecx | |
b8f70953 | 1435 | movl PT_GS(%esp), %edi |
a0d14b89 PZ |
1436 | REG_TO_PTGS %ecx |
1437 | SET_KERNEL_GS %ecx | |
1438 | ||
1439 | GET_CR2_INTO(%ecx) # might clobber %eax | |
1440 | ||
1441 | /* fixup orig %eax */ | |
1442 | movl PT_ORIG_EAX(%esp), %edx # get the error code | |
1443 | movl $-1, PT_ORIG_EAX(%esp) # no syscall to restart | |
1444 | ||
1445 | TRACE_IRQS_OFF | |
1446 | movl %esp, %eax # pt_regs pointer | |
34fdce69 | 1447 | CALL_NOSPEC edi |
a0d14b89 | 1448 | jmp ret_from_exception |
cc66936e | 1449 | SYM_CODE_END(common_exception_read_cr2) |
7252c4c3 | 1450 | |
cc66936e | 1451 | SYM_CODE_START_LOCAL_NOALIGN(common_exception) |
ccbeed3a | 1452 | /* the function address is in %gs's slot on the stack */ |
a1a338e5 | 1453 | SAVE_ALL switch_stacks=1 skip_gs=1 unwind_espfix=1 |
946c1911 | 1454 | ENCODE_FRAME_POINTER |
e67f1c11 PZ |
1455 | |
1456 | /* fixup %gs */ | |
ccbeed3a | 1457 | GS_TO_REG %ecx |
a49976d1 | 1458 | movl PT_GS(%esp), %edi # get the function address |
ccbeed3a TH |
1459 | REG_TO_PTGS %ecx |
1460 | SET_KERNEL_GS %ecx | |
e67f1c11 PZ |
1461 | |
1462 | /* fixup orig %eax */ | |
1463 | movl PT_ORIG_EAX(%esp), %edx # get the error code | |
1464 | movl $-1, PT_ORIG_EAX(%esp) # no syscall to restart | |
1465 | ||
d211af05 | 1466 | TRACE_IRQS_OFF |
a49976d1 | 1467 | movl %esp, %eax # pt_regs pointer |
34fdce69 | 1468 | CALL_NOSPEC edi |
a49976d1 | 1469 | jmp ret_from_exception |
cc66936e | 1470 | SYM_CODE_END(common_exception) |
d211af05 | 1471 | |
60400677 TG |
1472 | SYM_CODE_START_LOCAL_NOALIGN(handle_exception) |
1473 | /* the function address is in %gs's slot on the stack */ | |
1474 | SAVE_ALL switch_stacks=1 skip_gs=1 unwind_espfix=1 | |
1475 | ENCODE_FRAME_POINTER | |
1476 | ||
1477 | /* fixup %gs */ | |
1478 | GS_TO_REG %ecx | |
1479 | movl PT_GS(%esp), %edi # get the function address | |
1480 | REG_TO_PTGS %ecx | |
1481 | SET_KERNEL_GS %ecx | |
1482 | ||
1483 | /* fixup orig %eax */ | |
1484 | movl PT_ORIG_EAX(%esp), %edx # get the error code | |
1485 | movl $-1, PT_ORIG_EAX(%esp) # no syscall to restart | |
1486 | ||
1487 | movl %esp, %eax # pt_regs pointer | |
1488 | CALL_NOSPEC edi | |
1489 | ||
1490 | #ifdef CONFIG_VM86 | |
1491 | movl PT_EFLAGS(%esp), %eax # mix EFLAGS and CS | |
1492 | movb PT_CS(%esp), %al | |
1493 | andl $(X86_EFLAGS_VM | SEGMENT_RPL_MASK), %eax | |
1494 | #else | |
1495 | /* | |
1496 | * We can be coming here from child spawned by kernel_thread(). | |
1497 | */ | |
1498 | movl PT_CS(%esp), %eax | |
1499 | andl $SEGMENT_RPL_MASK, %eax | |
1500 | #endif | |
1501 | cmpl $USER_RPL, %eax # returning to v8086 or userspace ? | |
1502 | jnb ret_to_user | |
1503 | ||
1504 | PARANOID_EXIT_TO_KERNEL_MODE | |
1505 | BUG_IF_WRONG_CR3 | |
1506 | RESTORE_REGS 4 | |
1507 | jmp .Lirq_return | |
1508 | ||
1509 | ret_to_user: | |
1510 | movl %esp, %eax | |
1511 | jmp restore_all_switch_stack | |
1512 | SYM_CODE_END(handle_exception) | |
1513 | ||
5e63306f | 1514 | SYM_CODE_START(debug) |
7536656f | 1515 | /* |
929b44eb | 1516 | * Entry from sysenter is now handled in common_exception |
7536656f | 1517 | */ |
e59d1b0a | 1518 | ASM_CLAC |
e441a2ae | 1519 | pushl $0 |
929b44eb JR |
1520 | pushl $do_debug |
1521 | jmp common_exception | |
5e63306f | 1522 | SYM_CODE_END(debug) |
d211af05 | 1523 | |
7d8d8cfd AL |
1524 | SYM_CODE_START(double_fault) |
1525 | 1: | |
1526 | /* | |
1527 | * This is a task gate handler, not an interrupt gate handler. | |
1528 | * The error code is on the stack, but the stack is otherwise | |
1529 | * empty. Interrupts are off. Our state is sane with the following | |
1530 | * exceptions: | |
1531 | * | |
1532 | * - CR0.TS is set. "TS" literally means "task switched". | |
1533 | * - EFLAGS.NT is set because we're a "nested task". | |
1534 | * - The doublefault TSS has back_link set and has been marked busy. | |
1535 | * - TR points to the doublefault TSS and the normal TSS is busy. | |
1536 | * - CR3 is the normal kernel PGD. This would be delightful, except | |
1537 | * that the CPU didn't bother to save the old CR3 anywhere. This | |
1538 | * would make it very awkward to return back to the context we came | |
1539 | * from. | |
1540 | * | |
1541 | * The rest of EFLAGS is sanitized for us, so we don't need to | |
1542 | * worry about AC or DF. | |
1543 | * | |
1544 | * Don't even bother popping the error code. It's always zero, | |
1545 | * and ignoring it makes us a bit more robust against buggy | |
1546 | * hypervisor task gate implementations. | |
1547 | * | |
1548 | * We will manually undo the task switch instead of doing a | |
1549 | * task-switching IRET. | |
1550 | */ | |
1551 | ||
1552 | clts /* clear CR0.TS */ | |
1553 | pushl $X86_EFLAGS_FIXED | |
1554 | popfl /* clear EFLAGS.NT */ | |
1555 | ||
1556 | call doublefault_shim | |
1557 | ||
1558 | /* We don't support returning, so we have no IRET here. */ | |
1559 | 1: | |
1560 | hlt | |
1561 | jmp 1b | |
1562 | SYM_CODE_END(double_fault) | |
7d8d8cfd | 1563 | |
d211af05 | 1564 | /* |
7536656f AL |
1565 | * NMI is doubly nasty. It can happen on the first instruction of |
1566 | * entry_SYSENTER_32 (just like #DB), but it can also interrupt the beginning | |
1567 | * of the #DB handler even if that #DB in turn hit before entry_SYSENTER_32 | |
1568 | * switched stacks. We handle both conditions by simply checking whether we | |
1569 | * interrupted kernel code running on the SYSENTER stack. | |
d211af05 | 1570 | */ |
5e63306f | 1571 | SYM_CODE_START(nmi) |
e59d1b0a | 1572 | ASM_CLAC |
45d7b255 | 1573 | |
34273f41 | 1574 | #ifdef CONFIG_X86_ESPFIX32 |
89542907 PZ |
1575 | /* |
1576 | * ESPFIX_SS is only ever set on the return to user path | |
1577 | * after we've switched to the entry stack. | |
1578 | */ | |
a49976d1 IM |
1579 | pushl %eax |
1580 | movl %ss, %eax | |
1581 | cmpw $__ESPFIX_SS, %ax | |
1582 | popl %eax | |
1b00255f | 1583 | je .Lnmi_espfix_stack |
34273f41 | 1584 | #endif |
7536656f AL |
1585 | |
1586 | pushl %eax # pt_regs->orig_ax | |
b65bef40 | 1587 | SAVE_ALL_NMI cr3_reg=%edi |
946c1911 | 1588 | ENCODE_FRAME_POINTER |
a49976d1 IM |
1589 | xorl %edx, %edx # zero error code |
1590 | movl %esp, %eax # pt_regs pointer | |
7536656f AL |
1591 | |
1592 | /* Are we currently on the SYSENTER stack? */ | |
72f5e08d | 1593 | movl PER_CPU_VAR(cpu_entry_area), %ecx |
4fe2d8b1 DH |
1594 | addl $CPU_ENTRY_AREA_entry_stack + SIZEOF_entry_stack, %ecx |
1595 | subl %eax, %ecx /* ecx = (end of entry_stack) - esp */ | |
1596 | cmpl $SIZEOF_entry_stack, %ecx | |
7536656f AL |
1597 | jb .Lnmi_from_sysenter_stack |
1598 | ||
1599 | /* Not on SYSENTER stack. */ | |
a49976d1 | 1600 | call do_nmi |
8e676ced | 1601 | jmp .Lnmi_return |
d211af05 | 1602 | |
7536656f AL |
1603 | .Lnmi_from_sysenter_stack: |
1604 | /* | |
1605 | * We're on the SYSENTER stack. Switch off. No one (not even debug) | |
1606 | * is using the thread stack right now, so it's safe for us to use it. | |
1607 | */ | |
946c1911 | 1608 | movl %esp, %ebx |
7536656f AL |
1609 | movl PER_CPU_VAR(cpu_current_top_of_stack), %esp |
1610 | call do_nmi | |
946c1911 | 1611 | movl %ebx, %esp |
8e676ced JR |
1612 | |
1613 | .Lnmi_return: | |
89542907 PZ |
1614 | #ifdef CONFIG_X86_ESPFIX32 |
1615 | testl $CS_FROM_ESPFIX, PT_CS(%esp) | |
1616 | jnz .Lnmi_from_espfix | |
1617 | #endif | |
1618 | ||
8e676ced | 1619 | CHECK_AND_APPLY_ESPFIX |
b65bef40 | 1620 | RESTORE_ALL_NMI cr3_reg=%edi pop=4 |
8e676ced | 1621 | jmp .Lirq_return |
d211af05 | 1622 | |
34273f41 | 1623 | #ifdef CONFIG_X86_ESPFIX32 |
1b00255f | 1624 | .Lnmi_espfix_stack: |
131484c8 | 1625 | /* |
89542907 | 1626 | * Create the pointer to LSS back |
d211af05 | 1627 | */ |
a49976d1 IM |
1628 | pushl %ss |
1629 | pushl %esp | |
1630 | addl $4, (%esp) | |
89542907 PZ |
1631 | |
1632 | /* Copy the (short) IRET frame */ | |
1633 | pushl 4*4(%esp) # flags | |
1634 | pushl 4*4(%esp) # cs | |
1635 | pushl 4*4(%esp) # ip | |
1636 | ||
1637 | pushl %eax # orig_ax | |
1638 | ||
1639 | SAVE_ALL_NMI cr3_reg=%edi unwind_espfix=1 | |
946c1911 | 1640 | ENCODE_FRAME_POINTER |
89542907 PZ |
1641 | |
1642 | /* clear CS_FROM_KERNEL, set CS_FROM_ESPFIX */ | |
1643 | xorl $(CS_FROM_ESPFIX | CS_FROM_KERNEL), PT_CS(%esp) | |
1644 | ||
a49976d1 | 1645 | xorl %edx, %edx # zero error code |
89542907 PZ |
1646 | movl %esp, %eax # pt_regs pointer |
1647 | jmp .Lnmi_from_sysenter_stack | |
1648 | ||
1649 | .Lnmi_from_espfix: | |
b65bef40 | 1650 | RESTORE_ALL_NMI cr3_reg=%edi |
89542907 PZ |
1651 | /* |
1652 | * Because we cleared CS_FROM_KERNEL, IRET_FRAME 'forgot' to | |
1653 | * fix up the gap and long frame: | |
1654 | * | |
1655 | * 3 - original frame (exception) | |
1656 | * 2 - ESPFIX block (above) | |
1657 | * 6 - gap (FIXUP_FRAME) | |
1658 | * 5 - long frame (FIXUP_FRAME) | |
1659 | * 1 - orig_ax | |
1660 | */ | |
1661 | lss (1+5+6)*4(%esp), %esp # back to espfix stack | |
1b00255f | 1662 | jmp .Lirq_return |
34273f41 | 1663 | #endif |
5e63306f | 1664 | SYM_CODE_END(nmi) |
d211af05 | 1665 | |
5e63306f | 1666 | SYM_CODE_START(int3) |
e59d1b0a | 1667 | ASM_CLAC |
e441a2ae | 1668 | pushl $0 |
ac3607f9 TG |
1669 | pushl $do_int3 |
1670 | jmp common_exception | |
5e63306f | 1671 | SYM_CODE_END(int3) |
d211af05 | 1672 | |
8c0fa8a0 | 1673 | .pushsection .text, "ax" |
5e63306f | 1674 | SYM_CODE_START(rewind_stack_do_exit) |
2deb4be2 AL |
1675 | /* Prevent any naive code from trying to unwind to our caller. */ |
1676 | xorl %ebp, %ebp | |
1677 | ||
1678 | movl PER_CPU_VAR(cpu_current_top_of_stack), %esi | |
1679 | leal -TOP_OF_KERNEL_STACK_PADDING-PTREGS_SIZE(%esi), %esp | |
1680 | ||
1681 | call do_exit | |
1682 | 1: jmp 1b | |
5e63306f | 1683 | SYM_CODE_END(rewind_stack_do_exit) |
8c0fa8a0 | 1684 | .popsection |