2 * linux/arch/x86_64/entry.S
4 * Copyright (C) 1991, 1992 Linus Torvalds
5 * Copyright (C) 2000, 2001, 2002 Andi Kleen SuSE Labs
6 * Copyright (C) 2000 Pavel Machek <pavel@suse.cz>
10 * entry.S contains the system-call and fault low-level handling routines.
12 * NOTE: This code handles signal-recognition, which happens every time
13 * after an interrupt and after each system call.
15 * Normal syscalls and interrupts don't save a full stack frame, this is
16 * only done for syscall tracing, signals or fork/exec et.al.
18 * A note on terminology:
19 * - top of stack: Architecture defined interrupt frame from SS to RIP
20 * at the top of the kernel process stack.
21 * - partial stack frame: partially saved registers upto R11.
22 * - full stack frame: Like partial stack frame, but all register saved.
25 * - CFI macros are used to generate dwarf2 unwind information for better
26 * backtraces. They don't change any code.
27 * - SAVE_ALL/RESTORE_ALL - Save/restore all registers
28 * - SAVE_ARGS/RESTORE_ARGS - Save/restore registers that C functions modify.
29 * There are unfortunately lots of special cases where some registers
30 * not touched. The macro is a big mess that should be cleaned up.
31 * - SAVE_REST/RESTORE_REST - Handle the registers not saved by SAVE_ARGS.
32 * Gives a full stack frame.
33 * - ENTRY/END Define functions in the symbol table.
34 * - FIXUP_TOP_OF_STACK/RESTORE_TOP_OF_STACK - Fix up the hardware stack
35 * frame that is otherwise undefined after a SYSCALL
36 * - TRACE_IRQ_* - Trace hard interrupt state for lock debugging.
37 * - errorentry/paranoidentry/zeroentry - Define exception entry points.
40 #include <linux/linkage.h>
41 #include <asm/segment.h>
42 #include <asm/cache.h>
43 #include <asm/errno.h>
44 #include <asm/dwarf2.h>
45 #include <asm/calling.h>
46 #include <asm/asm-offsets.h>
48 #include <asm/unistd.h>
49 #include <asm/thread_info.h>
50 #include <asm/hw_irq.h>
52 #include <asm/irqflags.h>
53 #include <asm/paravirt.h>
54 #include <asm/ftrace.h>
59 #ifdef CONFIG_DYNAMIC_FTRACE
72 subq $MCOUNT_INSN_SIZE, %rdi
92 /* taken from glibc */
102 movq 0x38(%rsp), %rdi
104 subq $MCOUNT_INSN_SIZE, %rdi
124 #else /* ! CONFIG_DYNAMIC_FTRACE */
126 cmpq $ftrace_stub, ftrace_trace_function
133 /* taken from glibc */
143 movq 0x38(%rsp), %rdi
145 subq $MCOUNT_INSN_SIZE, %rdi
147 call *ftrace_trace_function
160 #endif /* CONFIG_DYNAMIC_FTRACE */
161 #endif /* CONFIG_FTRACE */
163 #ifndef CONFIG_PREEMPT
164 #define retint_kernel retint_restore_args
167 #ifdef CONFIG_PARAVIRT
168 ENTRY(native_usergs_sysret64)
171 #endif /* CONFIG_PARAVIRT */
174 .macro TRACE_IRQS_IRETQ offset=ARGOFFSET
175 #ifdef CONFIG_TRACE_IRQFLAGS
176 bt $9,EFLAGS-\offset(%rsp) /* interrupts off? */
184 * C code is not supposed to know about undefined top of stack. Every time
185 * a C function with an pt_regs argument is called from the SYSCALL based
186 * fast path FIXUP_TOP_OF_STACK is needed.
187 * RESTORE_TOP_OF_STACK syncs the syscall state after any possible ptregs
191 /* %rsp:at FRAMEEND */
192 .macro FIXUP_TOP_OF_STACK tmp
193 movq %gs:pda_oldrsp,\tmp
195 movq $__USER_DS,SS(%rsp)
196 movq $__USER_CS,CS(%rsp)
198 movq R11(%rsp),\tmp /* get eflags */
199 movq \tmp,EFLAGS(%rsp)
202 .macro RESTORE_TOP_OF_STACK tmp,offset=0
203 movq RSP-\offset(%rsp),\tmp
204 movq \tmp,%gs:pda_oldrsp
205 movq EFLAGS-\offset(%rsp),\tmp
206 movq \tmp,R11-\offset(%rsp)
209 .macro FAKE_STACK_FRAME child_rip
210 /* push in order ss, rsp, eflags, cs, rip */
212 pushq $__KERNEL_DS /* ss */
213 CFI_ADJUST_CFA_OFFSET 8
214 /*CFI_REL_OFFSET ss,0*/
216 CFI_ADJUST_CFA_OFFSET 8
218 pushq $(1<<9) /* eflags - interrupts on */
219 CFI_ADJUST_CFA_OFFSET 8
220 /*CFI_REL_OFFSET rflags,0*/
221 pushq $__KERNEL_CS /* cs */
222 CFI_ADJUST_CFA_OFFSET 8
223 /*CFI_REL_OFFSET cs,0*/
224 pushq \child_rip /* rip */
225 CFI_ADJUST_CFA_OFFSET 8
227 pushq %rax /* orig rax */
228 CFI_ADJUST_CFA_OFFSET 8
231 .macro UNFAKE_STACK_FRAME
233 CFI_ADJUST_CFA_OFFSET -(6*8)
236 .macro CFI_DEFAULT_STACK start=1
242 CFI_DEF_CFA_OFFSET SS+8
244 CFI_REL_OFFSET r15,R15
245 CFI_REL_OFFSET r14,R14
246 CFI_REL_OFFSET r13,R13
247 CFI_REL_OFFSET r12,R12
248 CFI_REL_OFFSET rbp,RBP
249 CFI_REL_OFFSET rbx,RBX
250 CFI_REL_OFFSET r11,R11
251 CFI_REL_OFFSET r10,R10
254 CFI_REL_OFFSET rax,RAX
255 CFI_REL_OFFSET rcx,RCX
256 CFI_REL_OFFSET rdx,RDX
257 CFI_REL_OFFSET rsi,RSI
258 CFI_REL_OFFSET rdi,RDI
259 CFI_REL_OFFSET rip,RIP
260 /*CFI_REL_OFFSET cs,CS*/
261 /*CFI_REL_OFFSET rflags,EFLAGS*/
262 CFI_REL_OFFSET rsp,RSP
263 /*CFI_REL_OFFSET ss,SS*/
266 * A newly forked process directly context switches into this.
271 push kernel_eflags(%rip)
272 CFI_ADJUST_CFA_OFFSET 4
273 popf # reset kernel eflags
274 CFI_ADJUST_CFA_OFFSET -4
276 GET_THREAD_INFO(%rcx)
277 testl $(_TIF_SYSCALL_TRACE|_TIF_SYSCALL_AUDIT),TI_flags(%rcx)
281 testl $3,CS-ARGOFFSET(%rsp) # from kernel_thread?
282 je int_ret_from_sys_call
283 testl $_TIF_IA32,TI_flags(%rcx)
284 jnz int_ret_from_sys_call
285 RESTORE_TOP_OF_STACK %rdi,ARGOFFSET
286 jmp ret_from_sys_call
289 call syscall_trace_leave
290 GET_THREAD_INFO(%rcx)
296 * System call entry. Upto 6 arguments in registers are supported.
298 * SYSCALL does not save anything on the stack and does not change the
304 * rax system call number
306 * rcx return address for syscall/sysret, C arg3
309 * r10 arg3 (--> moved to rcx for C)
312 * r11 eflags for syscall/sysret, temporary for C
313 * r12-r15,rbp,rbx saved by C code, not touched.
315 * Interrupts are off on entry.
316 * Only called from user space.
318 * XXX if we had a free scratch register we could save the RSP into the stack frame
319 * and report it properly in ps. Unfortunately we haven't.
321 * When user can change the frames always force IRET. That is because
322 * it deals with uncanonical addresses better. SYSRET has trouble
323 * with them due to bugs in both AMD and Intel CPUs.
329 CFI_DEF_CFA rsp,PDA_STACKOFFSET
331 /*CFI_REGISTER rflags,r11*/
334 * A hypervisor implementation might want to use a label
335 * after the swapgs, so that it can do the swapgs
336 * for the guest and jump here on syscall.
338 ENTRY(system_call_after_swapgs)
340 movq %rsp,%gs:pda_oldrsp
341 movq %gs:pda_kernelstack,%rsp
343 * No need to follow this irqs off/on section - it's straight
346 ENABLE_INTERRUPTS(CLBR_NONE)
348 movq %rax,ORIG_RAX-ARGOFFSET(%rsp)
349 movq %rcx,RIP-ARGOFFSET(%rsp)
350 CFI_REL_OFFSET rip,RIP-ARGOFFSET
351 GET_THREAD_INFO(%rcx)
352 testl $(_TIF_SYSCALL_TRACE|_TIF_SYSCALL_AUDIT|_TIF_SECCOMP), \
355 cmpq $__NR_syscall_max,%rax
358 call *sys_call_table(,%rax,8) # XXX: rip relative
359 movq %rax,RAX-ARGOFFSET(%rsp)
361 * Syscall return path ending with SYSRET (fast path)
362 * Has incomplete stack frame and undefined top of stack.
365 movl $_TIF_ALLWORK_MASK,%edi
369 GET_THREAD_INFO(%rcx)
370 DISABLE_INTERRUPTS(CLBR_NONE)
372 movl TI_flags(%rcx),%edx
377 * sysretq will re-enable interrupts:
380 movq RIP-ARGOFFSET(%rsp),%rcx
382 RESTORE_ARGS 0,-ARG_SKIP,1
383 /*CFI_REGISTER rflags,r11*/
384 movq %gs:pda_oldrsp, %rsp
388 /* Handle reschedules */
389 /* edx: work, edi: workmask */
391 bt $TIF_NEED_RESCHED,%edx
394 ENABLE_INTERRUPTS(CLBR_NONE)
396 CFI_ADJUST_CFA_OFFSET 8
399 CFI_ADJUST_CFA_OFFSET -8
402 /* Handle a signal */
405 ENABLE_INTERRUPTS(CLBR_NONE)
406 testl $_TIF_DO_NOTIFY_MASK,%edx
409 /* Really a signal */
410 /* edx: work flags (arg3) */
411 leaq do_notify_resume(%rip),%rax
412 leaq -ARGOFFSET(%rsp),%rdi # &pt_regs -> arg1
413 xorl %esi,%esi # oldset -> arg2
414 call ptregscall_common
415 1: movl $_TIF_WORK_MASK,%edi
416 /* Use IRET because user could have changed frame. This
417 works because ptregscall_common has called FIXUP_TOP_OF_STACK. */
418 DISABLE_INTERRUPTS(CLBR_NONE)
423 movq $-ENOSYS,RAX-ARGOFFSET(%rsp)
424 jmp ret_from_sys_call
426 /* Do syscall tracing */
429 movq $-ENOSYS,RAX(%rsp) /* ptrace can change this for a bad syscall */
430 FIXUP_TOP_OF_STACK %rdi
432 call syscall_trace_enter
433 LOAD_ARGS ARGOFFSET /* reload args from stack in case ptrace changed it */
435 cmpq $__NR_syscall_max,%rax
436 ja int_ret_from_sys_call /* RAX(%rsp) set to -ENOSYS above */
437 movq %r10,%rcx /* fixup for C */
438 call *sys_call_table(,%rax,8)
439 movq %rax,RAX-ARGOFFSET(%rsp)
440 /* Use IRET because user could have changed frame */
443 * Syscall return path ending with IRET.
444 * Has correct top of stack, but partial stack frame.
446 .globl int_ret_from_sys_call
447 int_ret_from_sys_call:
448 DISABLE_INTERRUPTS(CLBR_NONE)
450 testl $3,CS-ARGOFFSET(%rsp)
451 je retint_restore_args
452 movl $_TIF_ALLWORK_MASK,%edi
453 /* edi: mask to check */
456 GET_THREAD_INFO(%rcx)
457 movl TI_flags(%rcx),%edx
460 andl $~TS_COMPAT,TI_status(%rcx)
463 /* Either reschedule or signal or syscall exit tracking needed. */
464 /* First do a reschedule test. */
465 /* edx: work, edi: workmask */
467 bt $TIF_NEED_RESCHED,%edx
470 ENABLE_INTERRUPTS(CLBR_NONE)
472 CFI_ADJUST_CFA_OFFSET 8
475 CFI_ADJUST_CFA_OFFSET -8
476 DISABLE_INTERRUPTS(CLBR_NONE)
480 /* handle signals and tracing -- both require a full stack frame */
483 ENABLE_INTERRUPTS(CLBR_NONE)
485 /* Check for syscall exit trace */
486 testl $(_TIF_SYSCALL_TRACE|_TIF_SYSCALL_AUDIT|_TIF_SINGLESTEP),%edx
489 CFI_ADJUST_CFA_OFFSET 8
490 leaq 8(%rsp),%rdi # &ptregs -> arg1
491 call syscall_trace_leave
493 CFI_ADJUST_CFA_OFFSET -8
494 andl $~(_TIF_SYSCALL_TRACE|_TIF_SYSCALL_AUDIT|_TIF_SINGLESTEP),%edi
498 testl $_TIF_DO_NOTIFY_MASK,%edx
500 movq %rsp,%rdi # &ptregs -> arg1
501 xorl %esi,%esi # oldset -> arg2
502 call do_notify_resume
503 1: movl $_TIF_WORK_MASK,%edi
506 DISABLE_INTERRUPTS(CLBR_NONE)
513 * Certain special system calls that need to save a complete full stack frame.
516 .macro PTREGSCALL label,func,arg
519 leaq \func(%rip),%rax
520 leaq -ARGOFFSET+8(%rsp),\arg /* 8 for return address */
521 jmp ptregscall_common
527 PTREGSCALL stub_clone, sys_clone, %r8
528 PTREGSCALL stub_fork, sys_fork, %rdi
529 PTREGSCALL stub_vfork, sys_vfork, %rdi
530 PTREGSCALL stub_sigaltstack, sys_sigaltstack, %rdx
531 PTREGSCALL stub_iopl, sys_iopl, %rsi
533 ENTRY(ptregscall_common)
535 CFI_ADJUST_CFA_OFFSET -8
536 CFI_REGISTER rip, r11
539 CFI_REGISTER rip, r15
540 FIXUP_TOP_OF_STACK %r11
542 RESTORE_TOP_OF_STACK %r11
544 CFI_REGISTER rip, r11
547 CFI_ADJUST_CFA_OFFSET 8
548 CFI_REL_OFFSET rip, 0
551 END(ptregscall_common)
556 CFI_ADJUST_CFA_OFFSET -8
557 CFI_REGISTER rip, r11
559 FIXUP_TOP_OF_STACK %r11
562 RESTORE_TOP_OF_STACK %r11
565 jmp int_ret_from_sys_call
570 * sigreturn is special because it needs to restore all registers on return.
571 * This cannot be done with SYSRET, so use the IRET return path instead.
573 ENTRY(stub_rt_sigreturn)
576 CFI_ADJUST_CFA_OFFSET -8
579 FIXUP_TOP_OF_STACK %r11
580 call sys_rt_sigreturn
581 movq %rax,RAX(%rsp) # fixme, this could be done at the higher layer
583 jmp int_ret_from_sys_call
585 END(stub_rt_sigreturn)
588 * initial frame state for interrupts and exceptions
593 CFI_DEF_CFA rsp,SS+8-\ref
594 /*CFI_REL_OFFSET ss,SS-\ref*/
595 CFI_REL_OFFSET rsp,RSP-\ref
596 /*CFI_REL_OFFSET rflags,EFLAGS-\ref*/
597 /*CFI_REL_OFFSET cs,CS-\ref*/
598 CFI_REL_OFFSET rip,RIP-\ref
601 /* initial frame state for interrupts (and exceptions without error code) */
602 #define INTR_FRAME _frame RIP
603 /* initial frame state for exceptions with error code (and interrupts with
604 vector already pushed) */
605 #define XCPT_FRAME _frame ORIG_RAX
608 * Interrupt entry/exit.
610 * Interrupt entry points save only callee clobbered registers in fast path.
612 * Entry runs with interrupts off.
615 /* 0(%rsp): interrupt number */
616 .macro interrupt func
619 leaq -ARGOFFSET(%rsp),%rdi # arg1 for handler
621 CFI_ADJUST_CFA_OFFSET 8
622 CFI_REL_OFFSET rbp, 0
624 CFI_DEF_CFA_REGISTER rbp
628 /* irqcount is used to check if a CPU is already on an interrupt
629 stack or not. While this is essentially redundant with preempt_count
630 it is a little cheaper to use a separate counter in the PDA
631 (short of moving irq_enter into assembly, which would be too
633 1: incl %gs:pda_irqcount
634 cmoveq %gs:pda_irqstackptr,%rsp
635 push %rbp # backlink for old unwinder
637 * We entered an interrupt context - irqs are off:
643 ENTRY(common_interrupt)
646 /* 0(%rsp): oldrsp-ARGOFFSET */
648 DISABLE_INTERRUPTS(CLBR_NONE)
650 decl %gs:pda_irqcount
652 CFI_DEF_CFA_REGISTER rsp
653 CFI_ADJUST_CFA_OFFSET -8
655 GET_THREAD_INFO(%rcx)
656 testl $3,CS-ARGOFFSET(%rsp)
659 /* Interrupt came from user space */
661 * Has a correct top of stack, but a partial stack frame
662 * %rcx: thread info. Interrupts off.
664 retint_with_reschedule:
665 movl $_TIF_WORK_MASK,%edi
668 movl TI_flags(%rcx),%edx
673 retint_swapgs: /* return to user-space */
675 * The iretq could re-enable interrupts:
677 DISABLE_INTERRUPTS(CLBR_ANY)
682 retint_restore_args: /* return to kernel space */
683 DISABLE_INTERRUPTS(CLBR_ANY)
685 * The iretq could re-enable interrupts:
694 .section __ex_table, "a"
695 .quad irq_return, bad_iret
698 #ifdef CONFIG_PARAVIRT
702 .section __ex_table,"a"
703 .quad native_iret, bad_iret
710 * The iret traps when the %cs or %ss being restored is bogus.
711 * We've lost the original trap vector and error code.
712 * #GPF is the most likely one to get for an invalid selector.
713 * So pretend we completed the iret and took the #GPF in user mode.
715 * We are now running with the kernel GS after exception recovery.
716 * But error_entry expects us to have user GS to match the user %cs,
722 jmp general_protection
726 /* edi: workmask, edx: work */
729 bt $TIF_NEED_RESCHED,%edx
732 ENABLE_INTERRUPTS(CLBR_NONE)
734 CFI_ADJUST_CFA_OFFSET 8
737 CFI_ADJUST_CFA_OFFSET -8
738 GET_THREAD_INFO(%rcx)
739 DISABLE_INTERRUPTS(CLBR_NONE)
744 testl $_TIF_DO_NOTIFY_MASK,%edx
747 ENABLE_INTERRUPTS(CLBR_NONE)
749 movq $-1,ORIG_RAX(%rsp)
750 xorl %esi,%esi # oldset
751 movq %rsp,%rdi # &pt_regs
752 call do_notify_resume
754 DISABLE_INTERRUPTS(CLBR_NONE)
756 GET_THREAD_INFO(%rcx)
757 jmp retint_with_reschedule
759 #ifdef CONFIG_PREEMPT
760 /* Returning to kernel space. Check if we need preemption */
761 /* rcx: threadinfo. interrupts off. */
763 cmpl $0,TI_preempt_count(%rcx)
764 jnz retint_restore_args
765 bt $TIF_NEED_RESCHED,TI_flags(%rcx)
766 jnc retint_restore_args
767 bt $9,EFLAGS-ARGOFFSET(%rsp) /* interrupts off? */
768 jnc retint_restore_args
769 call preempt_schedule_irq
774 END(common_interrupt)
779 .macro apicinterrupt num,func
782 CFI_ADJUST_CFA_OFFSET 8
788 ENTRY(thermal_interrupt)
789 apicinterrupt THERMAL_APIC_VECTOR,smp_thermal_interrupt
790 END(thermal_interrupt)
792 ENTRY(threshold_interrupt)
793 apicinterrupt THRESHOLD_APIC_VECTOR,mce_threshold_interrupt
794 END(threshold_interrupt)
797 ENTRY(reschedule_interrupt)
798 apicinterrupt RESCHEDULE_VECTOR,smp_reschedule_interrupt
799 END(reschedule_interrupt)
801 .macro INVALIDATE_ENTRY num
802 ENTRY(invalidate_interrupt\num)
803 apicinterrupt INVALIDATE_TLB_VECTOR_START+\num,smp_invalidate_interrupt
804 END(invalidate_interrupt\num)
816 ENTRY(call_function_interrupt)
817 apicinterrupt CALL_FUNCTION_VECTOR,smp_call_function_interrupt
818 END(call_function_interrupt)
819 ENTRY(call_function_single_interrupt)
820 apicinterrupt CALL_FUNCTION_SINGLE_VECTOR,smp_call_function_single_interrupt
821 END(call_function_single_interrupt)
822 ENTRY(irq_move_cleanup_interrupt)
823 apicinterrupt IRQ_MOVE_CLEANUP_VECTOR,smp_irq_move_cleanup_interrupt
824 END(irq_move_cleanup_interrupt)
827 ENTRY(apic_timer_interrupt)
828 apicinterrupt LOCAL_TIMER_VECTOR,smp_apic_timer_interrupt
829 END(apic_timer_interrupt)
831 ENTRY(uv_bau_message_intr1)
832 apicinterrupt 220,uv_bau_message_interrupt
833 END(uv_bau_message_intr1)
835 ENTRY(error_interrupt)
836 apicinterrupt ERROR_APIC_VECTOR,smp_error_interrupt
839 ENTRY(spurious_interrupt)
840 apicinterrupt SPURIOUS_APIC_VECTOR,smp_spurious_interrupt
841 END(spurious_interrupt)
844 * Exception entry points.
848 PARAVIRT_ADJUST_EXCEPTION_FRAME
849 pushq $0 /* push error code/oldrax */
850 CFI_ADJUST_CFA_OFFSET 8
851 pushq %rax /* push real oldrax to the rdi slot */
852 CFI_ADJUST_CFA_OFFSET 8
859 .macro errorentry sym
861 PARAVIRT_ADJUST_EXCEPTION_FRAME
863 CFI_ADJUST_CFA_OFFSET 8
870 /* error code is on the stack already */
871 /* handle NMI like exceptions that can happen everywhere */
872 .macro paranoidentry sym, ist=0, irqtrace=1
876 movl $MSR_GS_BASE,%ecx
884 movq %gs:pda_data_offset, %rbp
887 movq ORIG_RAX(%rsp),%rsi
888 movq $-1,ORIG_RAX(%rsp)
890 subq $EXCEPTION_STKSZ, per_cpu__init_tss + TSS_ist + (\ist - 1) * 8(%rbp)
894 addq $EXCEPTION_STKSZ, per_cpu__init_tss + TSS_ist + (\ist - 1) * 8(%rbp)
896 DISABLE_INTERRUPTS(CLBR_NONE)
903 * "Paranoid" exit path from exception stack.
904 * Paranoid because this is used by NMIs and cannot take
905 * any kernel state for granted.
906 * We don't do kernel preemption checks here, because only
907 * NMI should be common and it does not enable IRQs and
908 * cannot get reschedule ticks.
910 * "trace" is 0 for the NMI handler only, because irq-tracing
911 * is fundamentally NMI-unsafe. (we cannot change the soft and
912 * hard flags at once, atomically)
914 .macro paranoidexit trace=1
915 /* ebx: no swapgs flag */
917 testl %ebx,%ebx /* swapgs needed? */
918 jnz paranoid_restore\trace
920 jnz paranoid_userspace\trace
921 paranoid_swapgs\trace:
926 paranoid_restore\trace:
929 paranoid_userspace\trace:
930 GET_THREAD_INFO(%rcx)
931 movl TI_flags(%rcx),%ebx
932 andl $_TIF_WORK_MASK,%ebx
933 jz paranoid_swapgs\trace
934 movq %rsp,%rdi /* &pt_regs */
936 movq %rax,%rsp /* switch stack for scheduling */
937 testl $_TIF_NEED_RESCHED,%ebx
938 jnz paranoid_schedule\trace
939 movl %ebx,%edx /* arg3: thread flags */
943 ENABLE_INTERRUPTS(CLBR_NONE)
944 xorl %esi,%esi /* arg2: oldset */
945 movq %rsp,%rdi /* arg1: &pt_regs */
946 call do_notify_resume
947 DISABLE_INTERRUPTS(CLBR_NONE)
951 jmp paranoid_userspace\trace
952 paranoid_schedule\trace:
956 ENABLE_INTERRUPTS(CLBR_ANY)
958 DISABLE_INTERRUPTS(CLBR_ANY)
962 jmp paranoid_userspace\trace
967 * Exception entry point. This expects an error code/orig_rax on the stack
968 * and the exception handler in %rax.
970 KPROBE_ENTRY(error_entry)
973 /* rdi slot contains rax, oldrax contains error code */
976 CFI_ADJUST_CFA_OFFSET (14*8)
978 CFI_REL_OFFSET rsi,RSI
979 movq 14*8(%rsp),%rsi /* load rax from rdi slot */
982 CFI_REL_OFFSET rdx,RDX
984 CFI_REL_OFFSET rcx,RCX
985 movq %rsi,10*8(%rsp) /* store rax */
986 CFI_REL_OFFSET rax,RAX
992 CFI_REL_OFFSET r10,R10
994 CFI_REL_OFFSET r11,R11
996 CFI_REL_OFFSET rbx,RBX
998 CFI_REL_OFFSET rbp,RBP
1000 CFI_REL_OFFSET r12,R12
1002 CFI_REL_OFFSET r13,R13
1004 CFI_REL_OFFSET r14,R14
1006 CFI_REL_OFFSET r15,R15
1009 je error_kernelspace
1014 CFI_REL_OFFSET rdi,RDI
1016 movq ORIG_RAX(%rsp),%rsi /* get error code */
1017 movq $-1,ORIG_RAX(%rsp)
1019 /* ebx: no swapgs flag (1: don't need swapgs, 0: need it) */
1023 DISABLE_INTERRUPTS(CLBR_NONE)
1025 GET_THREAD_INFO(%rcx)
1028 LOCKDEP_SYS_EXIT_IRQ
1029 movl TI_flags(%rcx),%edx
1030 movl $_TIF_WORK_MASK,%edi
1038 /* There are two places in the kernel that can potentially fault with
1039 usergs. Handle them here. The exception handlers after
1040 iret run with kernel gs again, so don't set the user space flag.
1041 B stepping K8s sometimes report an truncated RIP for IRET
1042 exceptions returning to compat mode. Check for these here too. */
1043 leaq irq_return(%rip),%rcx
1046 movl %ecx,%ecx /* zero extend */
1049 cmpq $gs_change,RIP(%rsp)
1052 KPROBE_END(error_entry)
1054 /* Reload gs selector with exception handling */
1055 /* edi: new selector */
1056 ENTRY(native_load_gs_index)
1059 CFI_ADJUST_CFA_OFFSET 8
1060 DISABLE_INTERRUPTS(CLBR_ANY | ~(CLBR_RDI))
1064 2: mfence /* workaround */
1067 CFI_ADJUST_CFA_OFFSET -8
1070 ENDPROC(native_load_gs_index)
1072 .section __ex_table,"a"
1074 .quad gs_change,bad_gs
1076 .section .fixup,"ax"
1077 /* running with kernelgs */
1079 SWAPGS /* switch back to user gs */
1086 * Create a kernel thread.
1088 * C extern interface:
1089 * extern long kernel_thread(int (*fn)(void *), void * arg, unsigned long flags)
1091 * asm input arguments:
1092 * rdi: fn, rsi: arg, rdx: flags
1094 ENTRY(kernel_thread)
1096 FAKE_STACK_FRAME $child_rip
1099 # rdi: flags, rsi: usp, rdx: will be &pt_regs
1101 orq kernel_thread_flags(%rip),%rdi
1114 * It isn't worth to check for reschedule here,
1115 * so internally to the x86_64 port you can rely on kernel_thread()
1116 * not to reschedule the child before returning, this avoids the need
1117 * of hacks for example to fork off the per-CPU idle tasks.
1118 * [Hopefully no generic code relies on the reschedule -AK]
1124 ENDPROC(kernel_thread)
1127 pushq $0 # fake return address
1130 * Here we are in the child and the registers are set as they were
1131 * at kernel_thread() invocation in the parent.
1143 * execve(). This function needs to use IRET, not SYSRET, to set up all state properly.
1145 * C extern interface:
1146 * extern long execve(char *name, char **argv, char **envp)
1148 * asm input arguments:
1149 * rdi: name, rsi: argv, rdx: envp
1151 * We want to fallback into:
1152 * extern long sys_execve(char *name, char **argv,char **envp, struct pt_regs *regs)
1154 * do_sys_execve asm fallback arguments:
1155 * rdi: name, rsi: argv, rdx: envp, rcx: fake frame on the stack
1157 ENTRY(kernel_execve)
1163 movq %rax, RAX(%rsp)
1166 je int_ret_from_sys_call
1171 ENDPROC(kernel_execve)
1173 KPROBE_ENTRY(page_fault)
1174 errorentry do_page_fault
1175 KPROBE_END(page_fault)
1177 ENTRY(coprocessor_error)
1178 zeroentry do_coprocessor_error
1179 END(coprocessor_error)
1181 ENTRY(simd_coprocessor_error)
1182 zeroentry do_simd_coprocessor_error
1183 END(simd_coprocessor_error)
1185 ENTRY(device_not_available)
1186 zeroentry math_state_restore
1187 END(device_not_available)
1189 /* runs on exception stack */
1192 PARAVIRT_ADJUST_EXCEPTION_FRAME
1194 CFI_ADJUST_CFA_OFFSET 8
1195 paranoidentry do_debug, DEBUG_STACK
1199 /* runs on exception stack */
1202 PARAVIRT_ADJUST_EXCEPTION_FRAME
1204 CFI_ADJUST_CFA_OFFSET 8
1205 paranoidentry do_nmi, 0, 0
1206 #ifdef CONFIG_TRACE_IRQFLAGS
1216 PARAVIRT_ADJUST_EXCEPTION_FRAME
1218 CFI_ADJUST_CFA_OFFSET 8
1219 paranoidentry do_int3, DEBUG_STACK
1225 zeroentry do_overflow
1233 zeroentry do_invalid_op
1236 ENTRY(coprocessor_segment_overrun)
1237 zeroentry do_coprocessor_segment_overrun
1238 END(coprocessor_segment_overrun)
1240 /* runs on exception stack */
1243 PARAVIRT_ADJUST_EXCEPTION_FRAME
1244 paranoidentry do_double_fault
1250 errorentry do_invalid_TSS
1253 ENTRY(segment_not_present)
1254 errorentry do_segment_not_present
1255 END(segment_not_present)
1257 /* runs on exception stack */
1258 ENTRY(stack_segment)
1260 PARAVIRT_ADJUST_EXCEPTION_FRAME
1261 paranoidentry do_stack_segment
1266 KPROBE_ENTRY(general_protection)
1267 errorentry do_general_protection
1268 KPROBE_END(general_protection)
1270 ENTRY(alignment_check)
1271 errorentry do_alignment_check
1272 END(alignment_check)
1275 zeroentry do_divide_error
1278 ENTRY(spurious_interrupt_bug)
1279 zeroentry do_spurious_interrupt_bug
1280 END(spurious_interrupt_bug)
1282 #ifdef CONFIG_X86_MCE
1283 /* runs on exception stack */
1284 ENTRY(machine_check)
1286 PARAVIRT_ADJUST_EXCEPTION_FRAME
1288 CFI_ADJUST_CFA_OFFSET 8
1289 paranoidentry do_machine_check
1295 /* Call softirq on interrupt stack. Interrupts are off. */
1299 CFI_ADJUST_CFA_OFFSET 8
1300 CFI_REL_OFFSET rbp,0
1302 CFI_DEF_CFA_REGISTER rbp
1303 incl %gs:pda_irqcount
1304 cmove %gs:pda_irqstackptr,%rsp
1305 push %rbp # backlink for old unwinder
1308 CFI_DEF_CFA_REGISTER rsp
1309 CFI_ADJUST_CFA_OFFSET -8
1310 decl %gs:pda_irqcount
1313 ENDPROC(call_softirq)
1315 KPROBE_ENTRY(ignore_sysret)
1320 ENDPROC(ignore_sysret)
1323 ENTRY(xen_hypervisor_callback)
1324 zeroentry xen_do_hypervisor_callback
1325 END(xen_hypervisor_callback)
1328 # A note on the "critical region" in our callback handler.
1329 # We want to avoid stacking callback handlers due to events occurring
1330 # during handling of the last event. To do this, we keep events disabled
1331 # until we've done all processing. HOWEVER, we must enable events before
1332 # popping the stack frame (can't be done atomically) and so it would still
1333 # be possible to get enough handler activations to overflow the stack.
1334 # Although unlikely, bugs of that kind are hard to track down, so we'd
1335 # like to avoid the possibility.
1336 # So, on entry to the handler we detect whether we interrupted an
1337 # existing activation in its critical region -- if so, we pop the current
1338 # activation and restart the handler using the previous one.
1340 ENTRY(xen_do_hypervisor_callback) # do_hypervisor_callback(struct *pt_regs)
1342 /* Since we don't modify %rdi, evtchn_do_upall(struct *pt_regs) will
1343 see the correct pointer to the pt_regs */
1344 movq %rdi, %rsp # we don't return, adjust the stack frame
1347 11: incl %gs:pda_irqcount
1349 CFI_DEF_CFA_REGISTER rbp
1350 cmovzq %gs:pda_irqstackptr,%rsp
1351 pushq %rbp # backlink for old unwinder
1352 call xen_evtchn_do_upcall
1354 CFI_DEF_CFA_REGISTER rsp
1355 decl %gs:pda_irqcount
1358 END(do_hypervisor_callback)
1361 # Hypervisor uses this for application faults while it executes.
1362 # We get here for two reasons:
1363 # 1. Fault while reloading DS, ES, FS or GS
1364 # 2. Fault while executing IRET
1365 # Category 1 we do not need to fix up as Xen has already reloaded all segment
1366 # registers that could be reloaded and zeroed the others.
1367 # Category 2 we fix up by killing the current process. We cannot use the
1368 # normal Linux return path in this case because if we use the IRET hypercall
1369 # to pop the stack frame we end up in an infinite loop of failsafe callbacks.
1370 # We distinguish between categories by comparing each saved segment register
1371 # with its current contents: any discrepancy means we in category 1.
1373 ENTRY(xen_failsafe_callback)
1374 framesz = (RIP-0x30) /* workaround buggy gas */
1376 CFI_REL_OFFSET rcx, 0
1377 CFI_REL_OFFSET r11, 8
1391 /* All segments match their saved values => Category 2 (Bad IRET). */
1397 CFI_ADJUST_CFA_OFFSET -0x30
1399 CFI_ADJUST_CFA_OFFSET 8
1401 CFI_ADJUST_CFA_OFFSET 8
1403 CFI_ADJUST_CFA_OFFSET 8
1404 jmp general_protection
1406 1: /* Segment mismatch => Category 1 (Bad segment). Retry the IRET. */
1412 CFI_ADJUST_CFA_OFFSET -0x30
1414 CFI_ADJUST_CFA_OFFSET 8
1418 END(xen_failsafe_callback)
1420 #endif /* CONFIG_XEN */