2 * linux/arch/x86_64/entry.S
4 * Copyright (C) 1991, 1992 Linus Torvalds
5 * Copyright (C) 2000, 2001, 2002 Andi Kleen SuSE Labs
6 * Copyright (C) 2000 Pavel Machek <pavel@suse.cz>
10 * entry.S contains the system-call and fault low-level handling routines.
12 * NOTE: This code handles signal-recognition, which happens every time
13 * after an interrupt and after each system call.
15 * Normal syscalls and interrupts don't save a full stack frame, this is
16 * only done for syscall tracing, signals or fork/exec et.al.
18 * A note on terminology:
19 * - top of stack: Architecture defined interrupt frame from SS to RIP
20 * at the top of the kernel process stack.
21 * - partial stack frame: partially saved registers upto R11.
22 * - full stack frame: Like partial stack frame, but all register saved.
25 * - CFI macros are used to generate dwarf2 unwind information for better
26 * backtraces. They don't change any code.
27 * - SAVE_ALL/RESTORE_ALL - Save/restore all registers
28 * - SAVE_ARGS/RESTORE_ARGS - Save/restore registers that C functions modify.
29 * There are unfortunately lots of special cases where some registers
30 * not touched. The macro is a big mess that should be cleaned up.
31 * - SAVE_REST/RESTORE_REST - Handle the registers not saved by SAVE_ARGS.
32 * Gives a full stack frame.
33 * - ENTRY/END Define functions in the symbol table.
34 * - FIXUP_TOP_OF_STACK/RESTORE_TOP_OF_STACK - Fix up the hardware stack
35 * frame that is otherwise undefined after a SYSCALL
36 * - TRACE_IRQ_* - Trace hard interrupt state for lock debugging.
37 * - errorentry/paranoidentry/zeroentry - Define exception entry points.
40 #include <linux/linkage.h>
41 #include <asm/segment.h>
42 #include <asm/cache.h>
43 #include <asm/errno.h>
44 #include <asm/dwarf2.h>
45 #include <asm/calling.h>
46 #include <asm/asm-offsets.h>
48 #include <asm/unistd.h>
49 #include <asm/thread_info.h>
50 #include <asm/hw_irq.h>
52 #include <asm/irqflags.h>
53 #include <asm/paravirt.h>
54 #include <asm/ftrace.h>
56 /* Avoid __ASSEMBLER__'ifying <linux/audit.h> just for this. */
57 #include <linux/elf-em.h>
58 #define AUDIT_ARCH_X86_64 (EM_X86_64|__AUDIT_ARCH_64BIT|__AUDIT_ARCH_LE)
59 #define __AUDIT_ARCH_64BIT 0x80000000
60 #define __AUDIT_ARCH_LE 0x40000000
64 #ifdef CONFIG_FUNCTION_TRACER
65 #ifdef CONFIG_DYNAMIC_FTRACE
71 cmpl $0, function_trace_stop
74 /* taken from glibc */
86 subq $MCOUNT_INSN_SIZE, %rdi
101 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
102 .globl ftrace_graph_call
112 #else /* ! CONFIG_DYNAMIC_FTRACE */
114 cmpl $0, function_trace_stop
117 cmpq $ftrace_stub, ftrace_trace_function
120 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
121 cmpq $ftrace_stub, ftrace_graph_return
122 jnz ftrace_graph_caller
124 cmpq $ftrace_graph_entry_stub, ftrace_graph_entry
125 jnz ftrace_graph_caller
133 /* taken from glibc */
143 movq 0x38(%rsp), %rdi
145 subq $MCOUNT_INSN_SIZE, %rdi
147 call *ftrace_trace_function
160 #endif /* CONFIG_DYNAMIC_FTRACE */
161 #endif /* CONFIG_FUNCTION_TRACER */
163 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
164 ENTRY(ftrace_graph_caller)
165 cmpl $0, function_trace_stop
178 movq 0x38(%rsp), %rsi
179 subq $MCOUNT_INSN_SIZE, %rsi
181 call prepare_ftrace_return
192 END(ftrace_graph_caller)
195 .globl return_to_handler
209 call ftrace_return_to_handler
226 #ifndef CONFIG_PREEMPT
227 #define retint_kernel retint_restore_args
230 #ifdef CONFIG_PARAVIRT
231 ENTRY(native_usergs_sysret64)
234 #endif /* CONFIG_PARAVIRT */
237 .macro TRACE_IRQS_IRETQ offset=ARGOFFSET
238 #ifdef CONFIG_TRACE_IRQFLAGS
239 bt $9,EFLAGS-\offset(%rsp) /* interrupts off? */
247 * C code is not supposed to know about undefined top of stack. Every time
248 * a C function with an pt_regs argument is called from the SYSCALL based
249 * fast path FIXUP_TOP_OF_STACK is needed.
250 * RESTORE_TOP_OF_STACK syncs the syscall state after any possible ptregs
254 /* %rsp:at FRAMEEND */
255 .macro FIXUP_TOP_OF_STACK tmp
256 movq %gs:pda_oldrsp,\tmp
258 movq $__USER_DS,SS(%rsp)
259 movq $__USER_CS,CS(%rsp)
261 movq R11(%rsp),\tmp /* get eflags */
262 movq \tmp,EFLAGS(%rsp)
265 .macro RESTORE_TOP_OF_STACK tmp,offset=0
266 movq RSP-\offset(%rsp),\tmp
267 movq \tmp,%gs:pda_oldrsp
268 movq EFLAGS-\offset(%rsp),\tmp
269 movq \tmp,R11-\offset(%rsp)
272 .macro FAKE_STACK_FRAME child_rip
273 /* push in order ss, rsp, eflags, cs, rip */
275 pushq $__KERNEL_DS /* ss */
276 CFI_ADJUST_CFA_OFFSET 8
277 /*CFI_REL_OFFSET ss,0*/
279 CFI_ADJUST_CFA_OFFSET 8
281 pushq $(1<<9) /* eflags - interrupts on */
282 CFI_ADJUST_CFA_OFFSET 8
283 /*CFI_REL_OFFSET rflags,0*/
284 pushq $__KERNEL_CS /* cs */
285 CFI_ADJUST_CFA_OFFSET 8
286 /*CFI_REL_OFFSET cs,0*/
287 pushq \child_rip /* rip */
288 CFI_ADJUST_CFA_OFFSET 8
290 pushq %rax /* orig rax */
291 CFI_ADJUST_CFA_OFFSET 8
294 .macro UNFAKE_STACK_FRAME
296 CFI_ADJUST_CFA_OFFSET -(6*8)
299 .macro CFI_DEFAULT_STACK start=1
305 CFI_DEF_CFA_OFFSET SS+8
307 CFI_REL_OFFSET r15,R15
308 CFI_REL_OFFSET r14,R14
309 CFI_REL_OFFSET r13,R13
310 CFI_REL_OFFSET r12,R12
311 CFI_REL_OFFSET rbp,RBP
312 CFI_REL_OFFSET rbx,RBX
313 CFI_REL_OFFSET r11,R11
314 CFI_REL_OFFSET r10,R10
317 CFI_REL_OFFSET rax,RAX
318 CFI_REL_OFFSET rcx,RCX
319 CFI_REL_OFFSET rdx,RDX
320 CFI_REL_OFFSET rsi,RSI
321 CFI_REL_OFFSET rdi,RDI
322 CFI_REL_OFFSET rip,RIP
323 /*CFI_REL_OFFSET cs,CS*/
324 /*CFI_REL_OFFSET rflags,EFLAGS*/
325 CFI_REL_OFFSET rsp,RSP
326 /*CFI_REL_OFFSET ss,SS*/
329 * A newly forked process directly context switches into this.
334 push kernel_eflags(%rip)
335 CFI_ADJUST_CFA_OFFSET 8
336 popf # reset kernel eflags
337 CFI_ADJUST_CFA_OFFSET -8
339 GET_THREAD_INFO(%rcx)
340 testl $(_TIF_SYSCALL_TRACE|_TIF_SYSCALL_AUDIT),TI_flags(%rcx)
344 testl $3,CS-ARGOFFSET(%rsp) # from kernel_thread?
345 je int_ret_from_sys_call
346 testl $_TIF_IA32,TI_flags(%rcx)
347 jnz int_ret_from_sys_call
348 RESTORE_TOP_OF_STACK %rdi,ARGOFFSET
349 jmp ret_from_sys_call
352 call syscall_trace_leave
353 GET_THREAD_INFO(%rcx)
359 * System call entry. Upto 6 arguments in registers are supported.
361 * SYSCALL does not save anything on the stack and does not change the
367 * rax system call number
369 * rcx return address for syscall/sysret, C arg3
372 * r10 arg3 (--> moved to rcx for C)
375 * r11 eflags for syscall/sysret, temporary for C
376 * r12-r15,rbp,rbx saved by C code, not touched.
378 * Interrupts are off on entry.
379 * Only called from user space.
381 * XXX if we had a free scratch register we could save the RSP into the stack frame
382 * and report it properly in ps. Unfortunately we haven't.
384 * When user can change the frames always force IRET. That is because
385 * it deals with uncanonical addresses better. SYSRET has trouble
386 * with them due to bugs in both AMD and Intel CPUs.
392 CFI_DEF_CFA rsp,PDA_STACKOFFSET
394 /*CFI_REGISTER rflags,r11*/
397 * A hypervisor implementation might want to use a label
398 * after the swapgs, so that it can do the swapgs
399 * for the guest and jump here on syscall.
401 ENTRY(system_call_after_swapgs)
403 movq %rsp,%gs:pda_oldrsp
404 movq %gs:pda_kernelstack,%rsp
406 * No need to follow this irqs off/on section - it's straight
409 ENABLE_INTERRUPTS(CLBR_NONE)
411 movq %rax,ORIG_RAX-ARGOFFSET(%rsp)
412 movq %rcx,RIP-ARGOFFSET(%rsp)
413 CFI_REL_OFFSET rip,RIP-ARGOFFSET
414 GET_THREAD_INFO(%rcx)
415 testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%rcx)
417 system_call_fastpath:
418 cmpq $__NR_syscall_max,%rax
421 call *sys_call_table(,%rax,8) # XXX: rip relative
422 movq %rax,RAX-ARGOFFSET(%rsp)
424 * Syscall return path ending with SYSRET (fast path)
425 * Has incomplete stack frame and undefined top of stack.
428 movl $_TIF_ALLWORK_MASK,%edi
432 GET_THREAD_INFO(%rcx)
433 DISABLE_INTERRUPTS(CLBR_NONE)
435 movl TI_flags(%rcx),%edx
440 * sysretq will re-enable interrupts:
443 movq RIP-ARGOFFSET(%rsp),%rcx
445 RESTORE_ARGS 0,-ARG_SKIP,1
446 /*CFI_REGISTER rflags,r11*/
447 movq %gs:pda_oldrsp, %rsp
451 /* Handle reschedules */
452 /* edx: work, edi: workmask */
454 bt $TIF_NEED_RESCHED,%edx
457 ENABLE_INTERRUPTS(CLBR_NONE)
459 CFI_ADJUST_CFA_OFFSET 8
462 CFI_ADJUST_CFA_OFFSET -8
465 /* Handle a signal */
468 ENABLE_INTERRUPTS(CLBR_NONE)
469 #ifdef CONFIG_AUDITSYSCALL
470 bt $TIF_SYSCALL_AUDIT,%edx
473 /* edx: work flags (arg3) */
474 leaq do_notify_resume(%rip),%rax
475 leaq -ARGOFFSET(%rsp),%rdi # &pt_regs -> arg1
476 xorl %esi,%esi # oldset -> arg2
477 call ptregscall_common
478 movl $_TIF_WORK_MASK,%edi
479 /* Use IRET because user could have changed frame. This
480 works because ptregscall_common has called FIXUP_TOP_OF_STACK. */
481 DISABLE_INTERRUPTS(CLBR_NONE)
486 movq $-ENOSYS,RAX-ARGOFFSET(%rsp)
487 jmp ret_from_sys_call
489 #ifdef CONFIG_AUDITSYSCALL
491 * Fast path for syscall audit without full syscall trace.
492 * We just call audit_syscall_entry() directly, and then
493 * jump back to the normal fast path.
496 movq %r10,%r9 /* 6th arg: 4th syscall arg */
497 movq %rdx,%r8 /* 5th arg: 3rd syscall arg */
498 movq %rsi,%rcx /* 4th arg: 2nd syscall arg */
499 movq %rdi,%rdx /* 3rd arg: 1st syscall arg */
500 movq %rax,%rsi /* 2nd arg: syscall number */
501 movl $AUDIT_ARCH_X86_64,%edi /* 1st arg: audit arch */
502 call audit_syscall_entry
503 LOAD_ARGS 0 /* reload call-clobbered registers */
504 jmp system_call_fastpath
507 * Return fast path for syscall audit. Call audit_syscall_exit()
508 * directly and then jump back to the fast path with TIF_SYSCALL_AUDIT
512 movq %rax,%rsi /* second arg, syscall return value */
513 cmpq $0,%rax /* is it < 0? */
514 setl %al /* 1 if so, 0 if not */
515 movzbl %al,%edi /* zero-extend that into %edi */
516 inc %edi /* first arg, 0->1(AUDITSC_SUCCESS), 1->2(AUDITSC_FAILURE) */
517 call audit_syscall_exit
518 movl $(_TIF_ALLWORK_MASK & ~_TIF_SYSCALL_AUDIT),%edi
520 #endif /* CONFIG_AUDITSYSCALL */
522 /* Do syscall tracing */
524 #ifdef CONFIG_AUDITSYSCALL
525 testl $(_TIF_WORK_SYSCALL_ENTRY & ~_TIF_SYSCALL_AUDIT),TI_flags(%rcx)
529 movq $-ENOSYS,RAX(%rsp) /* ptrace can change this for a bad syscall */
530 FIXUP_TOP_OF_STACK %rdi
532 call syscall_trace_enter
534 * Reload arg registers from stack in case ptrace changed them.
535 * We don't reload %rax because syscall_trace_enter() returned
536 * the value it wants us to use in the table lookup.
538 LOAD_ARGS ARGOFFSET, 1
540 cmpq $__NR_syscall_max,%rax
541 ja int_ret_from_sys_call /* RAX(%rsp) set to -ENOSYS above */
542 movq %r10,%rcx /* fixup for C */
543 call *sys_call_table(,%rax,8)
544 movq %rax,RAX-ARGOFFSET(%rsp)
545 /* Use IRET because user could have changed frame */
548 * Syscall return path ending with IRET.
549 * Has correct top of stack, but partial stack frame.
551 .globl int_ret_from_sys_call
552 .globl int_with_check
553 int_ret_from_sys_call:
554 DISABLE_INTERRUPTS(CLBR_NONE)
556 testl $3,CS-ARGOFFSET(%rsp)
557 je retint_restore_args
558 movl $_TIF_ALLWORK_MASK,%edi
559 /* edi: mask to check */
562 GET_THREAD_INFO(%rcx)
563 movl TI_flags(%rcx),%edx
566 andl $~TS_COMPAT,TI_status(%rcx)
569 /* Either reschedule or signal or syscall exit tracking needed. */
570 /* First do a reschedule test. */
571 /* edx: work, edi: workmask */
573 bt $TIF_NEED_RESCHED,%edx
576 ENABLE_INTERRUPTS(CLBR_NONE)
578 CFI_ADJUST_CFA_OFFSET 8
581 CFI_ADJUST_CFA_OFFSET -8
582 DISABLE_INTERRUPTS(CLBR_NONE)
586 /* handle signals and tracing -- both require a full stack frame */
589 ENABLE_INTERRUPTS(CLBR_NONE)
591 /* Check for syscall exit trace */
592 testl $_TIF_WORK_SYSCALL_EXIT,%edx
595 CFI_ADJUST_CFA_OFFSET 8
596 leaq 8(%rsp),%rdi # &ptregs -> arg1
597 call syscall_trace_leave
599 CFI_ADJUST_CFA_OFFSET -8
600 andl $~(_TIF_WORK_SYSCALL_EXIT|_TIF_SYSCALL_EMU),%edi
604 testl $_TIF_DO_NOTIFY_MASK,%edx
606 movq %rsp,%rdi # &ptregs -> arg1
607 xorl %esi,%esi # oldset -> arg2
608 call do_notify_resume
609 1: movl $_TIF_WORK_MASK,%edi
612 DISABLE_INTERRUPTS(CLBR_NONE)
619 * Certain special system calls that need to save a complete full stack frame.
622 .macro PTREGSCALL label,func,arg
625 leaq \func(%rip),%rax
626 leaq -ARGOFFSET+8(%rsp),\arg /* 8 for return address */
627 jmp ptregscall_common
633 PTREGSCALL stub_clone, sys_clone, %r8
634 PTREGSCALL stub_fork, sys_fork, %rdi
635 PTREGSCALL stub_vfork, sys_vfork, %rdi
636 PTREGSCALL stub_sigaltstack, sys_sigaltstack, %rdx
637 PTREGSCALL stub_iopl, sys_iopl, %rsi
639 ENTRY(ptregscall_common)
641 CFI_ADJUST_CFA_OFFSET -8
642 CFI_REGISTER rip, r11
645 CFI_REGISTER rip, r15
646 FIXUP_TOP_OF_STACK %r11
648 RESTORE_TOP_OF_STACK %r11
650 CFI_REGISTER rip, r11
653 CFI_ADJUST_CFA_OFFSET 8
654 CFI_REL_OFFSET rip, 0
657 END(ptregscall_common)
662 CFI_ADJUST_CFA_OFFSET -8
663 CFI_REGISTER rip, r11
665 FIXUP_TOP_OF_STACK %r11
668 RESTORE_TOP_OF_STACK %r11
671 jmp int_ret_from_sys_call
676 * sigreturn is special because it needs to restore all registers on return.
677 * This cannot be done with SYSRET, so use the IRET return path instead.
679 ENTRY(stub_rt_sigreturn)
682 CFI_ADJUST_CFA_OFFSET -8
685 FIXUP_TOP_OF_STACK %r11
686 call sys_rt_sigreturn
687 movq %rax,RAX(%rsp) # fixme, this could be done at the higher layer
689 jmp int_ret_from_sys_call
691 END(stub_rt_sigreturn)
694 * initial frame state for interrupts and exceptions
699 CFI_DEF_CFA rsp,SS+8-\ref
700 /*CFI_REL_OFFSET ss,SS-\ref*/
701 CFI_REL_OFFSET rsp,RSP-\ref
702 /*CFI_REL_OFFSET rflags,EFLAGS-\ref*/
703 /*CFI_REL_OFFSET cs,CS-\ref*/
704 CFI_REL_OFFSET rip,RIP-\ref
707 /* initial frame state for interrupts (and exceptions without error code) */
708 #define INTR_FRAME _frame RIP
709 /* initial frame state for exceptions with error code (and interrupts with
710 vector already pushed) */
711 #define XCPT_FRAME _frame ORIG_RAX
714 * Interrupt entry/exit.
716 * Interrupt entry points save only callee clobbered registers in fast path.
718 * Entry runs with interrupts off.
721 /* 0(%rsp): interrupt number */
722 .macro interrupt func
725 leaq -ARGOFFSET(%rsp),%rdi # arg1 for handler
728 * Save rbp twice: One is for marking the stack frame, as usual, and the
729 * other, to fill pt_regs properly. This is because bx comes right
730 * before the last saved register in that structure, and not bp. If the
731 * base pointer were in the place bx is today, this would not be needed.
734 CFI_ADJUST_CFA_OFFSET 8
735 CFI_REL_OFFSET rbp, 0
737 CFI_DEF_CFA_REGISTER rbp
741 /* irqcount is used to check if a CPU is already on an interrupt
742 stack or not. While this is essentially redundant with preempt_count
743 it is a little cheaper to use a separate counter in the PDA
744 (short of moving irq_enter into assembly, which would be too
746 1: incl %gs:pda_irqcount
747 cmoveq %gs:pda_irqstackptr,%rsp
748 push %rbp # backlink for old unwinder
750 * We entered an interrupt context - irqs are off:
756 ENTRY(common_interrupt)
759 /* 0(%rsp): oldrsp-ARGOFFSET */
761 DISABLE_INTERRUPTS(CLBR_NONE)
763 decl %gs:pda_irqcount
765 CFI_DEF_CFA_REGISTER rsp
766 CFI_ADJUST_CFA_OFFSET -8
768 GET_THREAD_INFO(%rcx)
769 testl $3,CS-ARGOFFSET(%rsp)
772 /* Interrupt came from user space */
774 * Has a correct top of stack, but a partial stack frame
775 * %rcx: thread info. Interrupts off.
777 retint_with_reschedule:
778 movl $_TIF_WORK_MASK,%edi
781 movl TI_flags(%rcx),%edx
786 retint_swapgs: /* return to user-space */
788 * The iretq could re-enable interrupts:
790 DISABLE_INTERRUPTS(CLBR_ANY)
795 retint_restore_args: /* return to kernel space */
796 DISABLE_INTERRUPTS(CLBR_ANY)
798 * The iretq could re-enable interrupts:
807 .section __ex_table, "a"
808 .quad irq_return, bad_iret
811 #ifdef CONFIG_PARAVIRT
815 .section __ex_table,"a"
816 .quad native_iret, bad_iret
823 * The iret traps when the %cs or %ss being restored is bogus.
824 * We've lost the original trap vector and error code.
825 * #GPF is the most likely one to get for an invalid selector.
826 * So pretend we completed the iret and took the #GPF in user mode.
828 * We are now running with the kernel GS after exception recovery.
829 * But error_entry expects us to have user GS to match the user %cs,
835 jmp general_protection
839 /* edi: workmask, edx: work */
842 bt $TIF_NEED_RESCHED,%edx
845 ENABLE_INTERRUPTS(CLBR_NONE)
847 CFI_ADJUST_CFA_OFFSET 8
850 CFI_ADJUST_CFA_OFFSET -8
851 GET_THREAD_INFO(%rcx)
852 DISABLE_INTERRUPTS(CLBR_NONE)
857 testl $_TIF_DO_NOTIFY_MASK,%edx
860 ENABLE_INTERRUPTS(CLBR_NONE)
862 movq $-1,ORIG_RAX(%rsp)
863 xorl %esi,%esi # oldset
864 movq %rsp,%rdi # &pt_regs
865 call do_notify_resume
867 DISABLE_INTERRUPTS(CLBR_NONE)
869 GET_THREAD_INFO(%rcx)
870 jmp retint_with_reschedule
872 #ifdef CONFIG_PREEMPT
873 /* Returning to kernel space. Check if we need preemption */
874 /* rcx: threadinfo. interrupts off. */
876 cmpl $0,TI_preempt_count(%rcx)
877 jnz retint_restore_args
878 bt $TIF_NEED_RESCHED,TI_flags(%rcx)
879 jnc retint_restore_args
880 bt $9,EFLAGS-ARGOFFSET(%rsp) /* interrupts off? */
881 jnc retint_restore_args
882 call preempt_schedule_irq
887 END(common_interrupt)
892 .macro apicinterrupt num,func
895 CFI_ADJUST_CFA_OFFSET 8
901 ENTRY(thermal_interrupt)
902 apicinterrupt THERMAL_APIC_VECTOR,smp_thermal_interrupt
903 END(thermal_interrupt)
905 ENTRY(threshold_interrupt)
906 apicinterrupt THRESHOLD_APIC_VECTOR,mce_threshold_interrupt
907 END(threshold_interrupt)
910 ENTRY(reschedule_interrupt)
911 apicinterrupt RESCHEDULE_VECTOR,smp_reschedule_interrupt
912 END(reschedule_interrupt)
914 .macro INVALIDATE_ENTRY num
915 ENTRY(invalidate_interrupt\num)
916 apicinterrupt INVALIDATE_TLB_VECTOR_START+\num,smp_invalidate_interrupt
917 END(invalidate_interrupt\num)
929 ENTRY(call_function_interrupt)
930 apicinterrupt CALL_FUNCTION_VECTOR,smp_call_function_interrupt
931 END(call_function_interrupt)
932 ENTRY(call_function_single_interrupt)
933 apicinterrupt CALL_FUNCTION_SINGLE_VECTOR,smp_call_function_single_interrupt
934 END(call_function_single_interrupt)
935 ENTRY(irq_move_cleanup_interrupt)
936 apicinterrupt IRQ_MOVE_CLEANUP_VECTOR,smp_irq_move_cleanup_interrupt
937 END(irq_move_cleanup_interrupt)
940 ENTRY(apic_timer_interrupt)
941 apicinterrupt LOCAL_TIMER_VECTOR,smp_apic_timer_interrupt
942 END(apic_timer_interrupt)
944 ENTRY(uv_bau_message_intr1)
945 apicinterrupt 220,uv_bau_message_interrupt
946 END(uv_bau_message_intr1)
948 ENTRY(error_interrupt)
949 apicinterrupt ERROR_APIC_VECTOR,smp_error_interrupt
952 ENTRY(spurious_interrupt)
953 apicinterrupt SPURIOUS_APIC_VECTOR,smp_spurious_interrupt
954 END(spurious_interrupt)
957 * Exception entry points.
961 PARAVIRT_ADJUST_EXCEPTION_FRAME
962 pushq $0 /* push error code/oldrax */
963 CFI_ADJUST_CFA_OFFSET 8
964 pushq %rax /* push real oldrax to the rdi slot */
965 CFI_ADJUST_CFA_OFFSET 8
972 .macro errorentry sym
974 PARAVIRT_ADJUST_EXCEPTION_FRAME
976 CFI_ADJUST_CFA_OFFSET 8
983 /* error code is on the stack already */
984 /* handle NMI like exceptions that can happen everywhere */
985 .macro paranoidentry sym, ist=0, irqtrace=1
989 movl $MSR_GS_BASE,%ecx
997 movq %gs:pda_data_offset, %rbp
1003 movq ORIG_RAX(%rsp),%rsi
1004 movq $-1,ORIG_RAX(%rsp)
1006 subq $EXCEPTION_STKSZ, per_cpu__init_tss + TSS_ist + (\ist - 1) * 8(%rbp)
1010 addq $EXCEPTION_STKSZ, per_cpu__init_tss + TSS_ist + (\ist - 1) * 8(%rbp)
1012 DISABLE_INTERRUPTS(CLBR_NONE)
1019 * "Paranoid" exit path from exception stack.
1020 * Paranoid because this is used by NMIs and cannot take
1021 * any kernel state for granted.
1022 * We don't do kernel preemption checks here, because only
1023 * NMI should be common and it does not enable IRQs and
1024 * cannot get reschedule ticks.
1026 * "trace" is 0 for the NMI handler only, because irq-tracing
1027 * is fundamentally NMI-unsafe. (we cannot change the soft and
1028 * hard flags at once, atomically)
1030 .macro paranoidexit trace=1
1031 /* ebx: no swapgs flag */
1032 paranoid_exit\trace:
1033 testl %ebx,%ebx /* swapgs needed? */
1034 jnz paranoid_restore\trace
1036 jnz paranoid_userspace\trace
1037 paranoid_swapgs\trace:
1042 paranoid_restore\trace:
1045 paranoid_userspace\trace:
1046 GET_THREAD_INFO(%rcx)
1047 movl TI_flags(%rcx),%ebx
1048 andl $_TIF_WORK_MASK,%ebx
1049 jz paranoid_swapgs\trace
1050 movq %rsp,%rdi /* &pt_regs */
1052 movq %rax,%rsp /* switch stack for scheduling */
1053 testl $_TIF_NEED_RESCHED,%ebx
1054 jnz paranoid_schedule\trace
1055 movl %ebx,%edx /* arg3: thread flags */
1059 ENABLE_INTERRUPTS(CLBR_NONE)
1060 xorl %esi,%esi /* arg2: oldset */
1061 movq %rsp,%rdi /* arg1: &pt_regs */
1062 call do_notify_resume
1063 DISABLE_INTERRUPTS(CLBR_NONE)
1067 jmp paranoid_userspace\trace
1068 paranoid_schedule\trace:
1072 ENABLE_INTERRUPTS(CLBR_ANY)
1074 DISABLE_INTERRUPTS(CLBR_ANY)
1078 jmp paranoid_userspace\trace
1083 * Exception entry point. This expects an error code/orig_rax on the stack
1084 * and the exception handler in %rax.
1086 KPROBE_ENTRY(error_entry)
1088 CFI_REL_OFFSET rax,0
1089 /* rdi slot contains rax, oldrax contains error code */
1092 CFI_ADJUST_CFA_OFFSET (14*8)
1093 movq %rsi,13*8(%rsp)
1094 CFI_REL_OFFSET rsi,RSI
1095 movq 14*8(%rsp),%rsi /* load rax from rdi slot */
1096 CFI_REGISTER rax,rsi
1097 movq %rdx,12*8(%rsp)
1098 CFI_REL_OFFSET rdx,RDX
1099 movq %rcx,11*8(%rsp)
1100 CFI_REL_OFFSET rcx,RCX
1101 movq %rsi,10*8(%rsp) /* store rax */
1102 CFI_REL_OFFSET rax,RAX
1104 CFI_REL_OFFSET r8,R8
1106 CFI_REL_OFFSET r9,R9
1108 CFI_REL_OFFSET r10,R10
1110 CFI_REL_OFFSET r11,R11
1112 CFI_REL_OFFSET rbx,RBX
1114 CFI_REL_OFFSET rbp,RBP
1116 CFI_REL_OFFSET r12,R12
1118 CFI_REL_OFFSET r13,R13
1120 CFI_REL_OFFSET r14,R14
1122 CFI_REL_OFFSET r15,R15
1125 je error_kernelspace
1131 CFI_REL_OFFSET rdi,RDI
1133 movq ORIG_RAX(%rsp),%rsi /* get error code */
1134 movq $-1,ORIG_RAX(%rsp)
1136 /* ebx: no swapgs flag (1: don't need swapgs, 0: need it) */
1140 DISABLE_INTERRUPTS(CLBR_NONE)
1142 GET_THREAD_INFO(%rcx)
1145 LOCKDEP_SYS_EXIT_IRQ
1146 movl TI_flags(%rcx),%edx
1147 movl $_TIF_WORK_MASK,%edi
1155 /* There are two places in the kernel that can potentially fault with
1156 usergs. Handle them here. The exception handlers after
1157 iret run with kernel gs again, so don't set the user space flag.
1158 B stepping K8s sometimes report an truncated RIP for IRET
1159 exceptions returning to compat mode. Check for these here too. */
1160 leaq irq_return(%rip),%rcx
1163 movl %ecx,%ecx /* zero extend */
1166 cmpq $gs_change,RIP(%rsp)
1169 KPROBE_END(error_entry)
1171 /* Reload gs selector with exception handling */
1172 /* edi: new selector */
1173 ENTRY(native_load_gs_index)
1176 CFI_ADJUST_CFA_OFFSET 8
1177 DISABLE_INTERRUPTS(CLBR_ANY | ~(CLBR_RDI))
1181 2: mfence /* workaround */
1184 CFI_ADJUST_CFA_OFFSET -8
1187 ENDPROC(native_load_gs_index)
1189 .section __ex_table,"a"
1191 .quad gs_change,bad_gs
1193 .section .fixup,"ax"
1194 /* running with kernelgs */
1196 SWAPGS /* switch back to user gs */
1203 * Create a kernel thread.
1205 * C extern interface:
1206 * extern long kernel_thread(int (*fn)(void *), void * arg, unsigned long flags)
1208 * asm input arguments:
1209 * rdi: fn, rsi: arg, rdx: flags
1211 ENTRY(kernel_thread)
1213 FAKE_STACK_FRAME $child_rip
1216 # rdi: flags, rsi: usp, rdx: will be &pt_regs
1218 orq kernel_thread_flags(%rip),%rdi
1231 * It isn't worth to check for reschedule here,
1232 * so internally to the x86_64 port you can rely on kernel_thread()
1233 * not to reschedule the child before returning, this avoids the need
1234 * of hacks for example to fork off the per-CPU idle tasks.
1235 * [Hopefully no generic code relies on the reschedule -AK]
1241 ENDPROC(kernel_thread)
1244 pushq $0 # fake return address
1247 * Here we are in the child and the registers are set as they were
1248 * at kernel_thread() invocation in the parent.
1260 * execve(). This function needs to use IRET, not SYSRET, to set up all state properly.
1262 * C extern interface:
1263 * extern long execve(char *name, char **argv, char **envp)
1265 * asm input arguments:
1266 * rdi: name, rsi: argv, rdx: envp
1268 * We want to fallback into:
1269 * extern long sys_execve(char *name, char **argv,char **envp, struct pt_regs *regs)
1271 * do_sys_execve asm fallback arguments:
1272 * rdi: name, rsi: argv, rdx: envp, rcx: fake frame on the stack
1274 ENTRY(kernel_execve)
1280 movq %rax, RAX(%rsp)
1283 je int_ret_from_sys_call
1288 ENDPROC(kernel_execve)
1290 KPROBE_ENTRY(page_fault)
1291 errorentry do_page_fault
1292 KPROBE_END(page_fault)
1294 ENTRY(coprocessor_error)
1295 zeroentry do_coprocessor_error
1296 END(coprocessor_error)
1298 ENTRY(simd_coprocessor_error)
1299 zeroentry do_simd_coprocessor_error
1300 END(simd_coprocessor_error)
1302 ENTRY(device_not_available)
1303 zeroentry do_device_not_available
1304 END(device_not_available)
1306 /* runs on exception stack */
1309 PARAVIRT_ADJUST_EXCEPTION_FRAME
1311 CFI_ADJUST_CFA_OFFSET 8
1312 paranoidentry do_debug, DEBUG_STACK
1316 /* runs on exception stack */
1319 PARAVIRT_ADJUST_EXCEPTION_FRAME
1321 CFI_ADJUST_CFA_OFFSET 8
1322 paranoidentry do_nmi, 0, 0
1323 #ifdef CONFIG_TRACE_IRQFLAGS
1333 PARAVIRT_ADJUST_EXCEPTION_FRAME
1335 CFI_ADJUST_CFA_OFFSET 8
1336 paranoidentry do_int3, DEBUG_STACK
1342 zeroentry do_overflow
1350 zeroentry do_invalid_op
1353 ENTRY(coprocessor_segment_overrun)
1354 zeroentry do_coprocessor_segment_overrun
1355 END(coprocessor_segment_overrun)
1357 /* runs on exception stack */
1360 PARAVIRT_ADJUST_EXCEPTION_FRAME
1361 paranoidentry do_double_fault
1367 errorentry do_invalid_TSS
1370 ENTRY(segment_not_present)
1371 errorentry do_segment_not_present
1372 END(segment_not_present)
1374 /* runs on exception stack */
1375 ENTRY(stack_segment)
1377 PARAVIRT_ADJUST_EXCEPTION_FRAME
1378 paranoidentry do_stack_segment
1383 KPROBE_ENTRY(general_protection)
1384 errorentry do_general_protection
1385 KPROBE_END(general_protection)
1387 ENTRY(alignment_check)
1388 errorentry do_alignment_check
1389 END(alignment_check)
1392 zeroentry do_divide_error
1395 ENTRY(spurious_interrupt_bug)
1396 zeroentry do_spurious_interrupt_bug
1397 END(spurious_interrupt_bug)
1399 #ifdef CONFIG_X86_MCE
1400 /* runs on exception stack */
1401 ENTRY(machine_check)
1403 PARAVIRT_ADJUST_EXCEPTION_FRAME
1405 CFI_ADJUST_CFA_OFFSET 8
1406 paranoidentry do_machine_check
1412 /* Call softirq on interrupt stack. Interrupts are off. */
1416 CFI_ADJUST_CFA_OFFSET 8
1417 CFI_REL_OFFSET rbp,0
1419 CFI_DEF_CFA_REGISTER rbp
1420 incl %gs:pda_irqcount
1421 cmove %gs:pda_irqstackptr,%rsp
1422 push %rbp # backlink for old unwinder
1425 CFI_DEF_CFA_REGISTER rsp
1426 CFI_ADJUST_CFA_OFFSET -8
1427 decl %gs:pda_irqcount
1430 ENDPROC(call_softirq)
1432 KPROBE_ENTRY(ignore_sysret)
1437 ENDPROC(ignore_sysret)
1440 ENTRY(xen_hypervisor_callback)
1441 zeroentry xen_do_hypervisor_callback
1442 END(xen_hypervisor_callback)
1445 # A note on the "critical region" in our callback handler.
1446 # We want to avoid stacking callback handlers due to events occurring
1447 # during handling of the last event. To do this, we keep events disabled
1448 # until we've done all processing. HOWEVER, we must enable events before
1449 # popping the stack frame (can't be done atomically) and so it would still
1450 # be possible to get enough handler activations to overflow the stack.
1451 # Although unlikely, bugs of that kind are hard to track down, so we'd
1452 # like to avoid the possibility.
1453 # So, on entry to the handler we detect whether we interrupted an
1454 # existing activation in its critical region -- if so, we pop the current
1455 # activation and restart the handler using the previous one.
1457 ENTRY(xen_do_hypervisor_callback) # do_hypervisor_callback(struct *pt_regs)
1459 /* Since we don't modify %rdi, evtchn_do_upall(struct *pt_regs) will
1460 see the correct pointer to the pt_regs */
1461 movq %rdi, %rsp # we don't return, adjust the stack frame
1464 11: incl %gs:pda_irqcount
1466 CFI_DEF_CFA_REGISTER rbp
1467 cmovzq %gs:pda_irqstackptr,%rsp
1468 pushq %rbp # backlink for old unwinder
1469 call xen_evtchn_do_upcall
1471 CFI_DEF_CFA_REGISTER rsp
1472 decl %gs:pda_irqcount
1475 END(do_hypervisor_callback)
1478 # Hypervisor uses this for application faults while it executes.
1479 # We get here for two reasons:
1480 # 1. Fault while reloading DS, ES, FS or GS
1481 # 2. Fault while executing IRET
1482 # Category 1 we do not need to fix up as Xen has already reloaded all segment
1483 # registers that could be reloaded and zeroed the others.
1484 # Category 2 we fix up by killing the current process. We cannot use the
1485 # normal Linux return path in this case because if we use the IRET hypercall
1486 # to pop the stack frame we end up in an infinite loop of failsafe callbacks.
1487 # We distinguish between categories by comparing each saved segment register
1488 # with its current contents: any discrepancy means we in category 1.
1490 ENTRY(xen_failsafe_callback)
1491 framesz = (RIP-0x30) /* workaround buggy gas */
1493 CFI_REL_OFFSET rcx, 0
1494 CFI_REL_OFFSET r11, 8
1508 /* All segments match their saved values => Category 2 (Bad IRET). */
1514 CFI_ADJUST_CFA_OFFSET -0x30
1516 CFI_ADJUST_CFA_OFFSET 8
1518 CFI_ADJUST_CFA_OFFSET 8
1520 CFI_ADJUST_CFA_OFFSET 8
1521 jmp general_protection
1523 1: /* Segment mismatch => Category 1 (Bad segment). Retry the IRET. */
1529 CFI_ADJUST_CFA_OFFSET -0x30
1531 CFI_ADJUST_CFA_OFFSET 8
1535 END(xen_failsafe_callback)
1537 #endif /* CONFIG_XEN */