1 /* SPDX-License-Identifier: GPL-2.0 */
3 * Copyright (C) 1991,1992 Linus Torvalds
5 * entry_32.S contains the system-call and low-level fault and trap handling routines.
7 * Stack layout while running C code:
8 * ptrace needs to have all registers on the stack.
9 * If the order here is changed, it needs to be
10 * updated in fork.c:copy_process(), signal.c:do_signal(),
11 * ptrace.c and ptrace.h
23 * 28(%esp) - %gs saved iff !CONFIG_X86_32_LAZY_GS
32 #include <linux/linkage.h>
33 #include <linux/err.h>
34 #include <asm/thread_info.h>
35 #include <asm/irqflags.h>
36 #include <asm/errno.h>
37 #include <asm/segment.h>
39 #include <asm/percpu.h>
40 #include <asm/processor-flags.h>
41 #include <asm/irq_vectors.h>
42 #include <asm/cpufeatures.h>
43 #include <asm/alternative-asm.h>
46 #include <asm/frame.h>
47 #include <asm/trapnr.h>
48 #include <asm/nospec-branch.h>
52 .section .entry.text, "ax"
55 * We use macros for low-level operations which need to be overridden
56 * for paravirtualization. The following will never clobber any registers:
57 * INTERRUPT_RETURN (aka. "iret")
58 * GET_CR0_INTO_EAX (aka. "movl %cr0, %eax")
59 * ENABLE_INTERRUPTS_SYSEXIT (aka "sti; sysexit").
61 * For DISABLE_INTERRUPTS/ENABLE_INTERRUPTS (aka "cli"/"sti"), you must
62 * specify what registers can be overwritten (CLBR_NONE, CLBR_EAX/EDX/ECX/ANY).
63 * Allowing a register to be clobbered can shrink the paravirt replacement
64 * enough to patch inline, increasing performance.
67 #ifdef CONFIG_PREEMPTION
68 # define preempt_stop(clobbers) DISABLE_INTERRUPTS(clobbers); TRACE_IRQS_OFF
70 # define preempt_stop(clobbers)
73 .macro TRACE_IRQS_IRET
74 #ifdef CONFIG_TRACE_IRQFLAGS
75 testl $X86_EFLAGS_IF, PT_EFLAGS(%esp) # interrupts off?
82 #define PTI_SWITCH_MASK (1 << PAGE_SHIFT)
85 * User gs save/restore
87 * %gs is used for userland TLS and kernel only uses it for stack
88 * canary which is required to be at %gs:20 by gcc. Read the comment
89 * at the top of stackprotector.h for more info.
91 * Local labels 98 and 99 are used.
93 #ifdef CONFIG_X86_32_LAZY_GS
95 /* unfortunately push/pop can't be no-op */
100 addl $(4 + \pop), %esp
105 /* all the rest are no-op */
112 .macro REG_TO_PTGS reg
114 .macro SET_KERNEL_GS reg
117 #else /* CONFIG_X86_32_LAZY_GS */
130 .pushsection .fixup, "ax"
134 _ASM_EXTABLE(98b, 99b)
138 98: mov PT_GS(%esp), %gs
141 .pushsection .fixup, "ax"
142 99: movl $0, PT_GS(%esp)
145 _ASM_EXTABLE(98b, 99b)
151 .macro REG_TO_PTGS reg
152 movl \reg, PT_GS(%esp)
154 .macro SET_KERNEL_GS reg
155 movl $(__KERNEL_STACK_CANARY), \reg
159 #endif /* CONFIG_X86_32_LAZY_GS */
161 /* Unconditionally switch to user cr3 */
162 .macro SWITCH_TO_USER_CR3 scratch_reg:req
163 ALTERNATIVE "jmp .Lend_\@", "", X86_FEATURE_PTI
165 movl %cr3, \scratch_reg
166 orl $PTI_SWITCH_MASK, \scratch_reg
167 movl \scratch_reg, %cr3
171 .macro BUG_IF_WRONG_CR3 no_user_check=0
172 #ifdef CONFIG_DEBUG_ENTRY
173 ALTERNATIVE "jmp .Lend_\@", "", X86_FEATURE_PTI
174 .if \no_user_check == 0
175 /* coming from usermode? */
176 testl $USER_SEGMENT_RPL_MASK, PT_CS(%esp)
181 testl $PTI_SWITCH_MASK, %eax
183 /* From userspace with kernel cr3 - BUG */
190 * Switch to kernel cr3 if not already loaded and return current cr3 in
193 .macro SWITCH_TO_KERNEL_CR3 scratch_reg:req
194 ALTERNATIVE "jmp .Lend_\@", "", X86_FEATURE_PTI
195 movl %cr3, \scratch_reg
196 /* Test if we are already on kernel CR3 */
197 testl $PTI_SWITCH_MASK, \scratch_reg
199 andl $(~PTI_SWITCH_MASK), \scratch_reg
200 movl \scratch_reg, %cr3
201 /* Return original CR3 in \scratch_reg */
202 orl $PTI_SWITCH_MASK, \scratch_reg
206 #define CS_FROM_ENTRY_STACK (1 << 31)
207 #define CS_FROM_USER_CR3 (1 << 30)
208 #define CS_FROM_KERNEL (1 << 29)
209 #define CS_FROM_ESPFIX (1 << 28)
213 * The high bits of the CS dword (__csh) are used for CS_FROM_*.
214 * Clear them in case hardware didn't do this for us.
216 andl $0x0000ffff, 4*4(%esp)
219 testl $X86_EFLAGS_VM, 5*4(%esp)
220 jnz .Lfrom_usermode_no_fixup_\@
222 testl $USER_SEGMENT_RPL_MASK, 4*4(%esp)
223 jnz .Lfrom_usermode_no_fixup_\@
225 orl $CS_FROM_KERNEL, 4*4(%esp)
228 * When we're here from kernel mode; the (exception) stack looks like:
230 * 6*4(%esp) - <previous context>
234 * 2*4(%esp) - orig_eax
235 * 1*4(%esp) - gs / function
238 * Lets build a 5 entry IRET frame after that, such that struct pt_regs
239 * is complete and in particular regs->sp is correct. This gives us
240 * the original 6 enties as gap:
242 * 14*4(%esp) - <previous context>
243 * 13*4(%esp) - gap / flags
244 * 12*4(%esp) - gap / cs
245 * 11*4(%esp) - gap / ip
246 * 10*4(%esp) - gap / orig_eax
247 * 9*4(%esp) - gap / gs / function
248 * 8*4(%esp) - gap / fs
254 * 2*4(%esp) - orig_eax
255 * 1*4(%esp) - gs / function
260 pushl %esp # sp (points at ss)
261 addl $7*4, (%esp) # point sp back at the previous context
262 pushl 7*4(%esp) # flags
265 pushl 7*4(%esp) # orig_eax
266 pushl 7*4(%esp) # gs / function
268 .Lfrom_usermode_no_fixup_\@:
273 * We're called with %ds, %es, %fs, and %gs from the interrupted
274 * frame, so we shouldn't use them. Also, we may be in ESPFIX
275 * mode and therefore have a nonzero SS base and an offset ESP,
276 * so any attempt to access the stack needs to use SS. (except for
277 * accesses through %esp, which automatically use SS.)
279 testl $CS_FROM_KERNEL, 1*4(%esp)
280 jz .Lfinished_frame_\@
283 * Reconstruct the 3 entry IRET frame right after the (modified)
284 * regs->sp without lowering %esp in between, such that an NMI in the
285 * middle doesn't scribble our stack.
289 movl 5*4(%esp), %eax # (modified) regs->sp
291 movl 4*4(%esp), %ecx # flags
292 movl %ecx, %ss:-1*4(%eax)
294 movl 3*4(%esp), %ecx # cs
295 andl $0x0000ffff, %ecx
296 movl %ecx, %ss:-2*4(%eax)
298 movl 2*4(%esp), %ecx # ip
299 movl %ecx, %ss:-3*4(%eax)
301 movl 1*4(%esp), %ecx # eax
302 movl %ecx, %ss:-4*4(%eax)
310 .macro SAVE_ALL pt_regs_ax=%eax switch_stacks=0 skip_gs=0 unwind_espfix=0
318 movl $(__KERNEL_PERCPU), %eax
320 .if \unwind_espfix > 0
335 movl $(__USER_DS), %edx
341 /* Switch to kernel stack if necessary */
342 .if \switch_stacks > 0
343 SWITCH_TO_KERNEL_STACK
347 .macro SAVE_ALL_NMI cr3_reg:req unwind_espfix=0
348 SAVE_ALL unwind_espfix=\unwind_espfix
353 * Now switch the CR3 when PTI is enabled.
355 * We can enter with either user or kernel cr3, the code will
356 * store the old cr3 in \cr3_reg and switches to the kernel cr3
359 SWITCH_TO_KERNEL_CR3 scratch_reg=\cr3_reg
364 .macro RESTORE_INT_REGS
374 .macro RESTORE_REGS pop=0
381 .pushsection .fixup, "ax"
395 .macro RESTORE_ALL_NMI cr3_reg:req pop=0
397 * Now switch the CR3 when PTI is enabled.
399 * We enter with kernel cr3 and switch the cr3 to the value
400 * stored on \cr3_reg, which is either a user or a kernel cr3.
402 ALTERNATIVE "jmp .Lswitched_\@", "", X86_FEATURE_PTI
404 testl $PTI_SWITCH_MASK, \cr3_reg
407 /* User cr3 in \cr3_reg - write it to hardware cr3 */
414 RESTORE_REGS pop=\pop
417 .macro CHECK_AND_APPLY_ESPFIX
418 #ifdef CONFIG_X86_ESPFIX32
419 #define GDT_ESPFIX_OFFSET (GDT_ENTRY_ESPFIX_SS * 8)
420 #define GDT_ESPFIX_SS PER_CPU_VAR(gdt_page) + GDT_ESPFIX_OFFSET
422 ALTERNATIVE "jmp .Lend_\@", "", X86_BUG_ESPFIX
424 movl PT_EFLAGS(%esp), %eax # mix EFLAGS, SS and CS
426 * Warning: PT_OLDSS(%esp) contains the wrong/random values if we
427 * are returning to the kernel.
428 * See comments in process.c:copy_thread() for details.
430 movb PT_OLDSS(%esp), %ah
431 movb PT_CS(%esp), %al
432 andl $(X86_EFLAGS_VM | (SEGMENT_TI_MASK << 8) | SEGMENT_RPL_MASK), %eax
433 cmpl $((SEGMENT_LDT << 8) | USER_RPL), %eax
434 jne .Lend_\@ # returning to user-space with LDT SS
437 * Setup and switch to ESPFIX stack
439 * We're returning to userspace with a 16 bit stack. The CPU will not
440 * restore the high word of ESP for us on executing iret... This is an
441 * "official" bug of all the x86-compatible CPUs, which we can work
442 * around to make dosemu and wine happy. We do this by preloading the
443 * high word of ESP with the high word of the userspace ESP while
444 * compensating for the offset by changing to the ESPFIX segment with
445 * a base address that matches for the difference.
447 mov %esp, %edx /* load kernel esp */
448 mov PT_OLDESP(%esp), %eax /* load userspace esp */
449 mov %dx, %ax /* eax: new kernel esp */
450 sub %eax, %edx /* offset (low word is 0) */
452 mov %dl, GDT_ESPFIX_SS + 4 /* bits 16..23 */
453 mov %dh, GDT_ESPFIX_SS + 7 /* bits 24..31 */
455 pushl %eax /* new kernel esp */
457 * Disable interrupts, but do not irqtrace this section: we
458 * will soon execute iret and the tracer was already set to
459 * the irqstate after the IRET:
461 DISABLE_INTERRUPTS(CLBR_ANY)
462 lss (%esp), %esp /* switch to espfix segment */
464 #endif /* CONFIG_X86_ESPFIX32 */
468 * Called with pt_regs fully populated and kernel segments loaded,
469 * so we can access PER_CPU and use the integer registers.
471 * We need to be very careful here with the %esp switch, because an NMI
472 * can happen everywhere. If the NMI handler finds itself on the
473 * entry-stack, it will overwrite the task-stack and everything we
474 * copied there. So allocate the stack-frame on the task-stack and
475 * switch to it before we do any copying.
478 .macro SWITCH_TO_KERNEL_STACK
480 ALTERNATIVE "", "jmp .Lend_\@", X86_FEATURE_XENPV
484 SWITCH_TO_KERNEL_CR3 scratch_reg=%eax
487 * %eax now contains the entry cr3 and we carry it forward in
488 * that register for the time this macro runs
491 /* Are we on the entry stack? Bail out if not! */
492 movl PER_CPU_VAR(cpu_entry_area), %ecx
493 addl $CPU_ENTRY_AREA_entry_stack + SIZEOF_entry_stack, %ecx
494 subl %esp, %ecx /* ecx = (end of entry_stack) - esp */
495 cmpl $SIZEOF_entry_stack, %ecx
498 /* Load stack pointer into %esi and %edi */
502 /* Move %edi to the top of the entry stack */
503 andl $(MASK_entry_stack), %edi
504 addl $(SIZEOF_entry_stack), %edi
506 /* Load top of task-stack into %edi */
507 movl TSS_entry2task_stack(%edi), %edi
509 /* Special case - entry from kernel mode via entry stack */
511 movl PT_EFLAGS(%esp), %ecx # mix EFLAGS and CS
512 movb PT_CS(%esp), %cl
513 andl $(X86_EFLAGS_VM | SEGMENT_RPL_MASK), %ecx
515 movl PT_CS(%esp), %ecx
516 andl $SEGMENT_RPL_MASK, %ecx
519 jb .Lentry_from_kernel_\@
522 movl $PTREGS_SIZE, %ecx
525 testl $X86_EFLAGS_VM, PT_EFLAGS(%esi)
529 * Stack-frame contains 4 additional segment registers when
530 * coming from VM86 mode
537 /* Allocate frame on task-stack */
540 /* Switch to task-stack */
544 * We are now on the task-stack and can safely copy over the
553 .Lentry_from_kernel_\@:
556 * This handles the case when we enter the kernel from
557 * kernel-mode and %esp points to the entry-stack. When this
558 * happens we need to switch to the task-stack to run C code,
559 * but switch back to the entry-stack again when we approach
560 * iret and return to the interrupted code-path. This usually
561 * happens when we hit an exception while restoring user-space
562 * segment registers on the way back to user-space or when the
563 * sysenter handler runs with eflags.tf set.
565 * When we switch to the task-stack here, we can't trust the
566 * contents of the entry-stack anymore, as the exception handler
567 * might be scheduled out or moved to another CPU. Therefore we
568 * copy the complete entry-stack to the task-stack and set a
569 * marker in the iret-frame (bit 31 of the CS dword) to detect
570 * what we've done on the iret path.
572 * On the iret path we copy everything back and switch to the
573 * entry-stack, so that the interrupted kernel code-path
574 * continues on the same stack it was interrupted with.
576 * Be aware that an NMI can happen anytime in this code.
578 * %esi: Entry-Stack pointer (same as %esp)
579 * %edi: Top of the task stack
580 * %eax: CR3 on kernel entry
583 /* Calculate number of bytes on the entry stack in %ecx */
586 /* %ecx to the top of entry-stack */
587 andl $(MASK_entry_stack), %ecx
588 addl $(SIZEOF_entry_stack), %ecx
590 /* Number of bytes on the entry stack to %ecx */
593 /* Mark stackframe as coming from entry stack */
594 orl $CS_FROM_ENTRY_STACK, PT_CS(%esp)
597 * Test the cr3 used to enter the kernel and add a marker
598 * so that we can switch back to it before iret.
600 testl $PTI_SWITCH_MASK, %eax
602 orl $CS_FROM_USER_CR3, PT_CS(%esp)
605 * %esi and %edi are unchanged, %ecx contains the number of
606 * bytes to copy. The code at .Lcopy_pt_regs_\@ will allocate
607 * the stack-frame on task-stack and copy everything over
609 jmp .Lcopy_pt_regs_\@
615 * Switch back from the kernel stack to the entry stack.
617 * The %esp register must point to pt_regs on the task stack. It will
618 * first calculate the size of the stack-frame to copy, depending on
619 * whether we return to VM86 mode or not. With that it uses 'rep movsl'
620 * to copy the contents of the stack over to the entry stack.
622 * We must be very careful here, as we can't trust the contents of the
623 * task-stack once we switched to the entry-stack. When an NMI happens
624 * while on the entry-stack, the NMI handler will switch back to the top
625 * of the task stack, overwriting our stack-frame we are about to copy.
626 * Therefore we switch the stack only after everything is copied over.
628 .macro SWITCH_TO_ENTRY_STACK
630 ALTERNATIVE "", "jmp .Lend_\@", X86_FEATURE_XENPV
633 movl $PTREGS_SIZE, %ecx
636 testl $(X86_EFLAGS_VM), PT_EFLAGS(%esp)
639 /* Additional 4 registers to copy when returning to VM86 mode */
645 /* Initialize source and destination for movsl */
646 movl PER_CPU_VAR(cpu_tss_rw + TSS_sp0), %edi
650 /* Save future stack pointer in %ebx */
653 /* Copy over the stack-frame */
659 * Switch to entry-stack - needs to happen after everything is
660 * copied because the NMI handler will overwrite the task-stack
661 * when on entry-stack
669 * This macro handles the case when we return to kernel-mode on the iret
670 * path and have to switch back to the entry stack and/or user-cr3
672 * See the comments below the .Lentry_from_kernel_\@ label in the
673 * SWITCH_TO_KERNEL_STACK macro for more details.
675 .macro PARANOID_EXIT_TO_KERNEL_MODE
678 * Test if we entered the kernel with the entry-stack. Most
679 * likely we did not, because this code only runs on the
680 * return-to-kernel path.
682 testl $CS_FROM_ENTRY_STACK, PT_CS(%esp)
685 /* Unlikely slow-path */
687 /* Clear marker from stack-frame */
688 andl $(~CS_FROM_ENTRY_STACK), PT_CS(%esp)
690 /* Copy the remaining task-stack contents to entry-stack */
692 movl PER_CPU_VAR(cpu_tss_rw + TSS_sp0), %edi
694 /* Bytes on the task-stack to ecx */
695 movl PER_CPU_VAR(cpu_tss_rw + TSS_sp1), %ecx
698 /* Allocate stack-frame on entry-stack */
702 * Save future stack-pointer, we must not switch until the
703 * copy is done, otherwise the NMI handler could destroy the
704 * contents of the task-stack we are about to copy.
713 /* Safe to switch to entry-stack now */
717 * We came from entry-stack and need to check if we also need to
718 * switch back to user cr3.
720 testl $CS_FROM_USER_CR3, PT_CS(%esp)
723 /* Clear marker from stack-frame */
724 andl $(~CS_FROM_USER_CR3), PT_CS(%esp)
726 SWITCH_TO_USER_CR3 scratch_reg=%eax
732 * idtentry - Macro to generate entry stubs for simple IDT entries
733 * @vector: Vector number
734 * @asmsym: ASM symbol for the entry point
735 * @cfunc: C function to be called
736 * @has_error_code: Hardware pushed error code on stack
737 * @sane: Compatibility flag with 64bit
739 .macro idtentry vector asmsym cfunc has_error_code:req sane=0
740 SYM_CODE_START(\asmsym)
744 .if \has_error_code == 0
745 pushl $0 /* Clear the error code */
748 /* Push the C-function address into the GS slot */
750 /* Invoke the common exception entry */
752 SYM_CODE_END(\asmsym)
759 .pushsection .text, "ax"
760 SYM_CODE_START(__switch_to_asm)
762 * Save callee-saved registers
763 * This must match the order in struct inactive_task_frame
770 * Flags are saved to prevent AC leakage. This could go
771 * away if objtool would have 32bit support to verify
772 * the STAC/CLAC correctness.
777 movl %esp, TASK_threadsp(%eax)
778 movl TASK_threadsp(%edx), %esp
780 #ifdef CONFIG_STACKPROTECTOR
781 movl TASK_stack_canary(%edx), %ebx
782 movl %ebx, PER_CPU_VAR(stack_canary)+stack_canary_offset
785 #ifdef CONFIG_RETPOLINE
787 * When switching from a shallower to a deeper call stack
788 * the RSB may either underflow or use entries populated
789 * with userspace addresses. On CPUs where those concerns
790 * exist, overwrite the RSB with entries which capture
791 * speculative execution to prevent attack.
793 FILL_RETURN_BUFFER %ebx, RSB_CLEAR_LOOPS, X86_FEATURE_RSB_CTXSW
796 /* Restore flags or the incoming task to restore AC state. */
798 /* restore callee-saved registers */
805 SYM_CODE_END(__switch_to_asm)
809 * The unwinder expects the last frame on the stack to always be at the same
810 * offset from the end of the page, which allows it to validate the stack.
811 * Calling schedule_tail() directly would break that convention because its an
812 * asmlinkage function so its argument has to be pushed on the stack. This
813 * wrapper creates a proper "end of stack" frame header before the call.
815 .pushsection .text, "ax"
816 SYM_FUNC_START(schedule_tail_wrapper)
825 SYM_FUNC_END(schedule_tail_wrapper)
829 * A newly forked process directly context switches into this address.
831 * eax: prev task we switched from
832 * ebx: kernel thread func (NULL for user thread)
833 * edi: kernel thread arg
835 .pushsection .text, "ax"
836 SYM_CODE_START(ret_from_fork)
837 call schedule_tail_wrapper
840 jnz 1f /* kernel threads are uncommon */
843 /* When we fork, we trace the syscall return in the child, too. */
845 call syscall_return_slowpath
846 jmp .Lsyscall_32_done
852 * A kernel thread is allowed to return here after successfully
853 * calling do_execve(). Exit to userspace to complete the execve()
856 movl $0, PT_EAX(%esp)
858 SYM_CODE_END(ret_from_fork)
862 * Return to user mode is not as complex as all this looks,
863 * but we want the default path for a system call return to
864 * go as quickly as possible which is why some of this is
865 * less clear than it otherwise should be.
868 # userspace resumption stub bypassing syscall exit tracing
869 SYM_CODE_START_LOCAL(ret_from_exception)
870 preempt_stop(CLBR_ANY)
873 movl PT_EFLAGS(%esp), %eax # mix EFLAGS and CS
874 movb PT_CS(%esp), %al
875 andl $(X86_EFLAGS_VM | SEGMENT_RPL_MASK), %eax
878 * We can be coming here from child spawned by kernel_thread().
880 movl PT_CS(%esp), %eax
881 andl $SEGMENT_RPL_MASK, %eax
884 jb restore_all_kernel # not returning to v8086 or userspace
886 DISABLE_INTERRUPTS(CLBR_ANY)
889 call prepare_exit_to_usermode
890 jmp restore_all_switch_stack
891 SYM_CODE_END(ret_from_exception)
893 SYM_ENTRY(__begin_SYSENTER_singlestep_region, SYM_L_GLOBAL, SYM_A_NONE)
895 * All code from here through __end_SYSENTER_singlestep_region is subject
896 * to being single-stepped if a user program sets TF and executes SYSENTER.
897 * There is absolutely nothing that we can do to prevent this from happening
898 * (thanks Intel!). To keep our handling of this situation as simple as
899 * possible, we handle TF just like AC and NT, except that our #DB handler
900 * will ignore all of the single-step traps generated in this range.
905 * Xen doesn't set %esp to be precisely what the normal SYSENTER
906 * entry point expects, so fix it up before using the normal path.
908 SYM_CODE_START(xen_sysenter_target)
909 addl $5*4, %esp /* remove xen-provided frame */
910 jmp .Lsysenter_past_esp
911 SYM_CODE_END(xen_sysenter_target)
915 * 32-bit SYSENTER entry.
917 * 32-bit system calls through the vDSO's __kernel_vsyscall enter here
918 * if X86_FEATURE_SEP is available. This is the preferred system call
919 * entry on 32-bit systems.
921 * The SYSENTER instruction, in principle, should *only* occur in the
922 * vDSO. In practice, a small number of Android devices were shipped
923 * with a copy of Bionic that inlined a SYSENTER instruction. This
924 * never happened in any of Google's Bionic versions -- it only happened
925 * in a narrow range of Intel-provided versions.
927 * SYSENTER loads SS, ESP, CS, and EIP from previously programmed MSRs.
928 * IF and VM in RFLAGS are cleared (IOW: interrupts are off).
929 * SYSENTER does not save anything on the stack,
930 * and does not save old EIP (!!!), ESP, or EFLAGS.
932 * To avoid losing track of EFLAGS.VM (and thus potentially corrupting
933 * user and/or vm86 state), we explicitly disable the SYSENTER
934 * instruction in vm86 mode by reprogramming the MSRs.
937 * eax system call number
946 SYM_FUNC_START(entry_SYSENTER_32)
948 * On entry-stack with all userspace-regs live - save and
949 * restore eflags and %eax to use it as scratch-reg for the cr3
954 BUG_IF_WRONG_CR3 no_user_check=1
955 SWITCH_TO_KERNEL_CR3 scratch_reg=%eax
959 /* Stack empty again, switch to task stack */
960 movl TSS_entry2task_stack(%esp), %esp
963 pushl $__USER_DS /* pt_regs->ss */
964 pushl %ebp /* pt_regs->sp (stashed in bp) */
965 pushfl /* pt_regs->flags (except IF = 0) */
966 orl $X86_EFLAGS_IF, (%esp) /* Fix IF */
967 pushl $__USER_CS /* pt_regs->cs */
968 pushl $0 /* pt_regs->ip = 0 (placeholder) */
969 pushl %eax /* pt_regs->orig_ax */
970 SAVE_ALL pt_regs_ax=$-ENOSYS /* save rest, stack already switched */
973 * SYSENTER doesn't filter flags, so we need to clear NT, AC
974 * and TF ourselves. To save a few cycles, we can check whether
975 * either was set instead of doing an unconditional popfq.
976 * This needs to happen before enabling interrupts so that
977 * we don't get preempted with NT set.
979 * If TF is set, we will single-step all the way to here -- do_debug
980 * will ignore all the traps. (Yes, this is slow, but so is
981 * single-stepping in general. This allows us to avoid having
982 * a more complicated code to handle the case where a user program
983 * forces us to single-step through the SYSENTER entry code.)
985 * NB.: .Lsysenter_fix_flags is a label with the code under it moved
986 * out-of-line as an optimization: NT is unlikely to be set in the
987 * majority of the cases and instead of polluting the I$ unnecessarily,
988 * we're keeping that code behind a branch which will predict as
989 * not-taken and therefore its instructions won't be fetched.
991 testl $X86_EFLAGS_NT|X86_EFLAGS_AC|X86_EFLAGS_TF, PT_EFLAGS(%esp)
992 jnz .Lsysenter_fix_flags
993 .Lsysenter_flags_fixed:
996 call do_fast_syscall_32
997 /* XEN PV guests always use IRET path */
998 ALTERNATIVE "testl %eax, %eax; jz .Lsyscall_32_done", \
999 "jmp .Lsyscall_32_done", X86_FEATURE_XENPV
1003 /* Opportunistic SYSEXIT */
1006 * Setup entry stack - we keep the pointer in %eax and do the
1007 * switch after almost all user-state is restored.
1010 /* Load entry stack pointer and allocate frame for eflags/eax */
1011 movl PER_CPU_VAR(cpu_tss_rw + TSS_sp0), %eax
1014 /* Copy eflags and eax to entry stack */
1015 movl PT_EFLAGS(%esp), %edi
1016 movl PT_EAX(%esp), %esi
1020 /* Restore user registers and segments */
1021 movl PT_EIP(%esp), %edx /* pt_regs->ip */
1022 movl PT_OLDESP(%esp), %ecx /* pt_regs->sp */
1023 1: mov PT_FS(%esp), %fs
1026 popl %ebx /* pt_regs->bx */
1027 addl $2*4, %esp /* skip pt_regs->cx and pt_regs->dx */
1028 popl %esi /* pt_regs->si */
1029 popl %edi /* pt_regs->di */
1030 popl %ebp /* pt_regs->bp */
1032 /* Switch to entry stack */
1035 /* Now ready to switch the cr3 */
1036 SWITCH_TO_USER_CR3 scratch_reg=%eax
1039 * Restore all flags except IF. (We restore IF separately because
1040 * STI gives a one-instruction window in which we won't be interrupted,
1041 * whereas POPF does not.)
1043 btrl $X86_EFLAGS_IF_BIT, (%esp)
1044 BUG_IF_WRONG_CR3 no_user_check=1
1049 * Return back to the vDSO, which will pop ecx and edx.
1050 * Don't bother with DS and ES (they already contain __USER_DS).
1055 .pushsection .fixup, "ax"
1056 2: movl $0, PT_FS(%esp)
1059 _ASM_EXTABLE(1b, 2b)
1062 .Lsysenter_fix_flags:
1063 pushl $X86_EFLAGS_FIXED
1065 jmp .Lsysenter_flags_fixed
1066 SYM_ENTRY(__end_SYSENTER_singlestep_region, SYM_L_GLOBAL, SYM_A_NONE)
1067 SYM_FUNC_END(entry_SYSENTER_32)
1070 * 32-bit legacy system call entry.
1072 * 32-bit x86 Linux system calls traditionally used the INT $0x80
1073 * instruction. INT $0x80 lands here.
1075 * This entry point can be used by any 32-bit perform system calls.
1076 * Instances of INT $0x80 can be found inline in various programs and
1077 * libraries. It is also used by the vDSO's __kernel_vsyscall
1078 * fallback for hardware that doesn't support a faster entry method.
1079 * Restarted 32-bit system calls also fall back to INT $0x80
1080 * regardless of what instruction was originally used to do the system
1081 * call. (64-bit programs can use INT $0x80 as well, but they can
1082 * only run on 64-bit kernels and therefore land in
1083 * entry_INT80_compat.)
1085 * This is considered a slow path. It is not used by most libc
1086 * implementations on modern hardware except during process startup.
1089 * eax system call number
1097 SYM_FUNC_START(entry_INT80_32)
1099 pushl %eax /* pt_regs->orig_ax */
1101 SAVE_ALL pt_regs_ax=$-ENOSYS switch_stacks=1 /* save rest */
1104 call do_int80_syscall_32
1108 restore_all_switch_stack:
1109 SWITCH_TO_ENTRY_STACK
1110 CHECK_AND_APPLY_ESPFIX
1112 /* Switch back to user CR3 */
1113 SWITCH_TO_USER_CR3 scratch_reg=%eax
1117 /* Restore user state */
1118 RESTORE_REGS pop=4 # skip orig_eax/error_code
1121 * ARCH_HAS_MEMBARRIER_SYNC_CORE rely on IRET core serialization
1122 * when returning from IPI handler and when returning from
1123 * scheduler to user-space.
1128 #ifdef CONFIG_PREEMPTION
1129 DISABLE_INTERRUPTS(CLBR_ANY)
1130 cmpl $0, PER_CPU_VAR(__preempt_count)
1132 testl $X86_EFLAGS_IF, PT_EFLAGS(%esp) # interrupts off (exception path) ?
1134 call preempt_schedule_irq
1138 PARANOID_EXIT_TO_KERNEL_MODE
1143 .section .fixup, "ax"
1144 SYM_CODE_START(iret_exc)
1145 pushl $0 # no error code
1146 pushl $do_iret_error
1148 #ifdef CONFIG_DEBUG_ENTRY
1150 * The stack-frame here is the one that iret faulted on, so its a
1151 * return-to-user frame. We are on kernel-cr3 because we come here from
1152 * the fixup code. This confuses the CR3 checker, so switch to user-cr3
1153 * as the checker expects it.
1156 SWITCH_TO_USER_CR3 scratch_reg=%eax
1160 jmp common_exception
1161 SYM_CODE_END(iret_exc)
1163 _ASM_EXTABLE(.Lirq_return, iret_exc)
1164 SYM_FUNC_END(entry_INT80_32)
1166 .macro FIXUP_ESPFIX_STACK
1168 * Switch back for ESPFIX stack to the normal zerobased stack
1170 * We can't call C functions using the ESPFIX stack. This code reads
1171 * the high word of the segment base from the GDT and swiches to the
1172 * normal stack and adjusts ESP with the matching offset.
1174 * We might be on user CR3 here, so percpu data is not mapped and we can't
1175 * access the GDT through the percpu segment. Instead, use SGDT to find
1176 * the cpu_entry_area alias of the GDT.
1178 #ifdef CONFIG_X86_ESPFIX32
1179 /* fixup the stack */
1183 movl 2(%esp), %ecx /* GDT address */
1185 * Careful: ECX is a linear pointer, so we need to force base
1186 * zero. %cs is the only known-linear segment we have right now.
1188 mov %cs:GDT_ESPFIX_OFFSET + 4(%ecx), %al /* bits 16..23 */
1189 mov %cs:GDT_ESPFIX_OFFSET + 7(%ecx), %ah /* bits 24..31 */
1193 addl %esp, %eax /* the adjusted stack pointer */
1196 lss (%esp), %esp /* switch to the normal stack segment */
1200 .macro UNWIND_ESPFIX_STACK
1201 /* It's safe to clobber %eax, all other regs need to be preserved */
1202 #ifdef CONFIG_X86_ESPFIX32
1204 /* see if on espfix stack */
1205 cmpw $__ESPFIX_SS, %ax
1207 /* switch to normal stack */
1214 * Build the entry stubs with some assembler magic.
1215 * We pack 1 stub into every 8-byte block.
1218 SYM_CODE_START(irq_entries_start)
1219 vector=FIRST_EXTERNAL_VECTOR
1220 .rept (FIRST_SYSTEM_VECTOR - FIRST_EXTERNAL_VECTOR)
1221 pushl $(~vector+0x80) /* Note: always in signed byte range */
1223 jmp common_interrupt
1226 SYM_CODE_END(irq_entries_start)
1228 #ifdef CONFIG_X86_LOCAL_APIC
1230 SYM_CODE_START(spurious_entries_start)
1231 vector=FIRST_SYSTEM_VECTOR
1232 .rept (NR_VECTORS - FIRST_SYSTEM_VECTOR)
1233 pushl $(~vector+0x80) /* Note: always in signed byte range */
1238 SYM_CODE_END(spurious_entries_start)
1240 SYM_CODE_START_LOCAL(common_spurious)
1242 addl $-0x80, (%esp) /* Adjust vector into the [-256, -1] range */
1243 SAVE_ALL switch_stacks=1
1244 ENCODE_FRAME_POINTER
1247 call smp_spurious_interrupt
1249 SYM_CODE_END(common_spurious)
1253 * the CPU automatically disables interrupts when executing an IRQ vector,
1254 * so IRQ-flags tracing has to follow that:
1256 .p2align CONFIG_X86_L1_CACHE_SHIFT
1257 SYM_CODE_START_LOCAL(common_interrupt)
1259 addl $-0x80, (%esp) /* Adjust vector into the [-256, -1] range */
1261 SAVE_ALL switch_stacks=1
1262 ENCODE_FRAME_POINTER
1267 SYM_CODE_END(common_interrupt)
1269 #define BUILD_INTERRUPT3(name, nr, fn) \
1270 SYM_FUNC_START(name) \
1273 SAVE_ALL switch_stacks=1; \
1274 ENCODE_FRAME_POINTER; \
1278 jmp ret_from_intr; \
1281 #define BUILD_INTERRUPT(name, nr) \
1282 BUILD_INTERRUPT3(name, nr, smp_##name); \
1284 /* The include is where all of the SMP etc. interrupts come from */
1285 #include <asm/entry_arch.h>
1287 SYM_CODE_START(coprocessor_error)
1290 pushl $do_coprocessor_error
1291 jmp common_exception
1292 SYM_CODE_END(coprocessor_error)
1294 SYM_CODE_START(simd_coprocessor_error)
1297 #ifdef CONFIG_X86_INVD_BUG
1298 /* AMD 486 bug: invd from userspace calls exception 19 instead of #GP */
1299 ALTERNATIVE "pushl $do_general_protection", \
1300 "pushl $do_simd_coprocessor_error", \
1303 pushl $do_simd_coprocessor_error
1305 jmp common_exception
1306 SYM_CODE_END(simd_coprocessor_error)
1308 SYM_CODE_START(device_not_available)
1311 pushl $do_device_not_available
1312 jmp common_exception
1313 SYM_CODE_END(device_not_available)
1315 #ifdef CONFIG_PARAVIRT
1316 SYM_CODE_START(native_iret)
1318 _ASM_EXTABLE(native_iret, iret_exc)
1319 SYM_CODE_END(native_iret)
1322 SYM_CODE_START(overflow)
1326 jmp common_exception
1327 SYM_CODE_END(overflow)
1329 SYM_CODE_START(bounds)
1333 jmp common_exception
1334 SYM_CODE_END(bounds)
1336 SYM_CODE_START(invalid_op)
1339 pushl $do_invalid_op
1340 jmp common_exception
1341 SYM_CODE_END(invalid_op)
1343 SYM_CODE_START(coprocessor_segment_overrun)
1346 pushl $do_coprocessor_segment_overrun
1347 jmp common_exception
1348 SYM_CODE_END(coprocessor_segment_overrun)
1350 SYM_CODE_START(invalid_TSS)
1352 pushl $do_invalid_TSS
1353 jmp common_exception
1354 SYM_CODE_END(invalid_TSS)
1356 SYM_CODE_START(segment_not_present)
1358 pushl $do_segment_not_present
1359 jmp common_exception
1360 SYM_CODE_END(segment_not_present)
1362 SYM_CODE_START(stack_segment)
1364 pushl $do_stack_segment
1365 jmp common_exception
1366 SYM_CODE_END(stack_segment)
1368 SYM_CODE_START(alignment_check)
1370 pushl $do_alignment_check
1371 jmp common_exception
1372 SYM_CODE_END(alignment_check)
1374 SYM_CODE_START(divide_error)
1376 pushl $0 # no error code
1377 pushl $do_divide_error
1378 jmp common_exception
1379 SYM_CODE_END(divide_error)
1381 #ifdef CONFIG_X86_MCE
1382 SYM_CODE_START(machine_check)
1386 jmp common_exception
1387 SYM_CODE_END(machine_check)
1390 SYM_CODE_START(spurious_interrupt_bug)
1393 pushl $do_spurious_interrupt_bug
1394 jmp common_exception
1395 SYM_CODE_END(spurious_interrupt_bug)
1397 #ifdef CONFIG_XEN_PV
1398 SYM_FUNC_START(xen_hypervisor_callback)
1400 * Check to see if we got the event in the critical
1401 * region in xen_iret_direct, after we've reenabled
1402 * events and checked for pending events. This simulates
1403 * iret instruction's behaviour where it delivers a
1404 * pending interrupt when enabling interrupts:
1406 cmpl $xen_iret_start_crit, (%esp)
1408 cmpl $xen_iret_end_crit, (%esp)
1410 call xen_iret_crit_fixup
1412 pushl $-1 /* orig_ax = -1 => not a system call */
1414 ENCODE_FRAME_POINTER
1417 call xen_evtchn_do_upcall
1418 #ifndef CONFIG_PREEMPTION
1419 call xen_maybe_preempt_hcall
1422 SYM_FUNC_END(xen_hypervisor_callback)
1425 * Hypervisor uses this for application faults while it executes.
1426 * We get here for two reasons:
1427 * 1. Fault while reloading DS, ES, FS or GS
1428 * 2. Fault while executing IRET
1429 * Category 1 we fix up by reattempting the load, and zeroing the segment
1430 * register if the load fails.
1431 * Category 2 we fix up by jumping to do_iret_error. We cannot use the
1432 * normal Linux return path in this case because if we use the IRET hypercall
1433 * to pop the stack frame we end up in an infinite loop of failsafe callbacks.
1434 * We distinguish between categories by maintaining a status value in EAX.
1436 SYM_FUNC_START(xen_failsafe_callback)
1441 3: mov 12(%esp), %fs
1442 4: mov 16(%esp), %gs
1443 /* EAX == 0 => Category 1 (Bad segment)
1444 EAX != 0 => Category 2 (Bad IRET) */
1450 5: pushl $-1 /* orig_ax = -1 => not a system call */
1452 ENCODE_FRAME_POINTER
1453 jmp ret_from_exception
1455 .section .fixup, "ax"
1469 _ASM_EXTABLE(1b, 6b)
1470 _ASM_EXTABLE(2b, 7b)
1471 _ASM_EXTABLE(3b, 8b)
1472 _ASM_EXTABLE(4b, 9b)
1473 SYM_FUNC_END(xen_failsafe_callback)
1474 #endif /* CONFIG_XEN_PV */
1476 #ifdef CONFIG_XEN_PVHVM
1477 BUILD_INTERRUPT3(xen_hvm_callback_vector, HYPERVISOR_CALLBACK_VECTOR,
1478 xen_evtchn_do_upcall)
1482 #if IS_ENABLED(CONFIG_HYPERV)
1484 BUILD_INTERRUPT3(hyperv_callback_vector, HYPERVISOR_CALLBACK_VECTOR,
1485 hyperv_vector_handler)
1487 BUILD_INTERRUPT3(hyperv_reenlightenment_vector, HYPERV_REENLIGHTENMENT_VECTOR,
1488 hyperv_reenlightenment_intr)
1490 BUILD_INTERRUPT3(hv_stimer0_callback_vector, HYPERV_STIMER0_VECTOR,
1491 hv_stimer0_vector_handler)
1493 #endif /* CONFIG_HYPERV */
1495 SYM_CODE_START(page_fault)
1497 pushl $do_page_fault
1498 jmp common_exception_read_cr2
1499 SYM_CODE_END(page_fault)
1501 SYM_CODE_START_LOCAL_NOALIGN(common_exception_read_cr2)
1502 /* the function address is in %gs's slot on the stack */
1503 SAVE_ALL switch_stacks=1 skip_gs=1 unwind_espfix=1
1505 ENCODE_FRAME_POINTER
1509 movl PT_GS(%esp), %edi
1513 GET_CR2_INTO(%ecx) # might clobber %eax
1515 /* fixup orig %eax */
1516 movl PT_ORIG_EAX(%esp), %edx # get the error code
1517 movl $-1, PT_ORIG_EAX(%esp) # no syscall to restart
1520 movl %esp, %eax # pt_regs pointer
1522 jmp ret_from_exception
1523 SYM_CODE_END(common_exception_read_cr2)
1525 SYM_CODE_START_LOCAL_NOALIGN(common_exception)
1526 /* the function address is in %gs's slot on the stack */
1527 SAVE_ALL switch_stacks=1 skip_gs=1 unwind_espfix=1
1528 ENCODE_FRAME_POINTER
1532 movl PT_GS(%esp), %edi # get the function address
1536 /* fixup orig %eax */
1537 movl PT_ORIG_EAX(%esp), %edx # get the error code
1538 movl $-1, PT_ORIG_EAX(%esp) # no syscall to restart
1541 movl %esp, %eax # pt_regs pointer
1543 jmp ret_from_exception
1544 SYM_CODE_END(common_exception)
1546 SYM_CODE_START_LOCAL_NOALIGN(handle_exception)
1547 /* the function address is in %gs's slot on the stack */
1548 SAVE_ALL switch_stacks=1 skip_gs=1 unwind_espfix=1
1549 ENCODE_FRAME_POINTER
1553 movl PT_GS(%esp), %edi # get the function address
1557 /* fixup orig %eax */
1558 movl PT_ORIG_EAX(%esp), %edx # get the error code
1559 movl $-1, PT_ORIG_EAX(%esp) # no syscall to restart
1561 movl %esp, %eax # pt_regs pointer
1565 movl PT_EFLAGS(%esp), %eax # mix EFLAGS and CS
1566 movb PT_CS(%esp), %al
1567 andl $(X86_EFLAGS_VM | SEGMENT_RPL_MASK), %eax
1570 * We can be coming here from child spawned by kernel_thread().
1572 movl PT_CS(%esp), %eax
1573 andl $SEGMENT_RPL_MASK, %eax
1575 cmpl $USER_RPL, %eax # returning to v8086 or userspace ?
1578 PARANOID_EXIT_TO_KERNEL_MODE
1585 jmp restore_all_switch_stack
1586 SYM_CODE_END(handle_exception)
1588 SYM_CODE_START(debug)
1590 * Entry from sysenter is now handled in common_exception
1595 jmp common_exception
1598 SYM_CODE_START(double_fault)
1601 * This is a task gate handler, not an interrupt gate handler.
1602 * The error code is on the stack, but the stack is otherwise
1603 * empty. Interrupts are off. Our state is sane with the following
1606 * - CR0.TS is set. "TS" literally means "task switched".
1607 * - EFLAGS.NT is set because we're a "nested task".
1608 * - The doublefault TSS has back_link set and has been marked busy.
1609 * - TR points to the doublefault TSS and the normal TSS is busy.
1610 * - CR3 is the normal kernel PGD. This would be delightful, except
1611 * that the CPU didn't bother to save the old CR3 anywhere. This
1612 * would make it very awkward to return back to the context we came
1615 * The rest of EFLAGS is sanitized for us, so we don't need to
1616 * worry about AC or DF.
1618 * Don't even bother popping the error code. It's always zero,
1619 * and ignoring it makes us a bit more robust against buggy
1620 * hypervisor task gate implementations.
1622 * We will manually undo the task switch instead of doing a
1623 * task-switching IRET.
1626 clts /* clear CR0.TS */
1627 pushl $X86_EFLAGS_FIXED
1628 popfl /* clear EFLAGS.NT */
1630 call doublefault_shim
1632 /* We don't support returning, so we have no IRET here. */
1636 SYM_CODE_END(double_fault)
1639 * NMI is doubly nasty. It can happen on the first instruction of
1640 * entry_SYSENTER_32 (just like #DB), but it can also interrupt the beginning
1641 * of the #DB handler even if that #DB in turn hit before entry_SYSENTER_32
1642 * switched stacks. We handle both conditions by simply checking whether we
1643 * interrupted kernel code running on the SYSENTER stack.
1648 #ifdef CONFIG_X86_ESPFIX32
1650 * ESPFIX_SS is only ever set on the return to user path
1651 * after we've switched to the entry stack.
1655 cmpw $__ESPFIX_SS, %ax
1657 je .Lnmi_espfix_stack
1660 pushl %eax # pt_regs->orig_ax
1661 SAVE_ALL_NMI cr3_reg=%edi
1662 ENCODE_FRAME_POINTER
1663 xorl %edx, %edx # zero error code
1664 movl %esp, %eax # pt_regs pointer
1666 /* Are we currently on the SYSENTER stack? */
1667 movl PER_CPU_VAR(cpu_entry_area), %ecx
1668 addl $CPU_ENTRY_AREA_entry_stack + SIZEOF_entry_stack, %ecx
1669 subl %eax, %ecx /* ecx = (end of entry_stack) - esp */
1670 cmpl $SIZEOF_entry_stack, %ecx
1671 jb .Lnmi_from_sysenter_stack
1673 /* Not on SYSENTER stack. */
1677 .Lnmi_from_sysenter_stack:
1679 * We're on the SYSENTER stack. Switch off. No one (not even debug)
1680 * is using the thread stack right now, so it's safe for us to use it.
1683 movl PER_CPU_VAR(cpu_current_top_of_stack), %esp
1688 #ifdef CONFIG_X86_ESPFIX32
1689 testl $CS_FROM_ESPFIX, PT_CS(%esp)
1690 jnz .Lnmi_from_espfix
1693 CHECK_AND_APPLY_ESPFIX
1694 RESTORE_ALL_NMI cr3_reg=%edi pop=4
1697 #ifdef CONFIG_X86_ESPFIX32
1700 * Create the pointer to LSS back
1706 /* Copy the (short) IRET frame */
1707 pushl 4*4(%esp) # flags
1708 pushl 4*4(%esp) # cs
1709 pushl 4*4(%esp) # ip
1711 pushl %eax # orig_ax
1713 SAVE_ALL_NMI cr3_reg=%edi unwind_espfix=1
1714 ENCODE_FRAME_POINTER
1716 /* clear CS_FROM_KERNEL, set CS_FROM_ESPFIX */
1717 xorl $(CS_FROM_ESPFIX | CS_FROM_KERNEL), PT_CS(%esp)
1719 xorl %edx, %edx # zero error code
1720 movl %esp, %eax # pt_regs pointer
1721 jmp .Lnmi_from_sysenter_stack
1724 RESTORE_ALL_NMI cr3_reg=%edi
1726 * Because we cleared CS_FROM_KERNEL, IRET_FRAME 'forgot' to
1727 * fix up the gap and long frame:
1729 * 3 - original frame (exception)
1730 * 2 - ESPFIX block (above)
1731 * 6 - gap (FIXUP_FRAME)
1732 * 5 - long frame (FIXUP_FRAME)
1735 lss (1+5+6)*4(%esp), %esp # back to espfix stack
1740 SYM_CODE_START(int3)
1744 jmp common_exception
1747 SYM_CODE_START(general_protection)
1749 pushl $do_general_protection
1750 jmp common_exception
1751 SYM_CODE_END(general_protection)
1753 .pushsection .text, "ax"
1754 SYM_CODE_START(rewind_stack_do_exit)
1755 /* Prevent any naive code from trying to unwind to our caller. */
1758 movl PER_CPU_VAR(cpu_current_top_of_stack), %esi
1759 leal -TOP_OF_KERNEL_STACK_PADDING-PTREGS_SIZE(%esi), %esp
1763 SYM_CODE_END(rewind_stack_do_exit)