]> git.proxmox.com Git - mirror_ubuntu-jammy-kernel.git/blob - arch/x86/kernel/entry_64.S
x86: clean up after: move entry_64.S register saving out of the macros, fix
[mirror_ubuntu-jammy-kernel.git] / arch / x86 / kernel / entry_64.S
1 /*
2 * linux/arch/x86_64/entry.S
3 *
4 * Copyright (C) 1991, 1992 Linus Torvalds
5 * Copyright (C) 2000, 2001, 2002 Andi Kleen SuSE Labs
6 * Copyright (C) 2000 Pavel Machek <pavel@suse.cz>
7 */
8
9 /*
10 * entry.S contains the system-call and fault low-level handling routines.
11 *
12 * NOTE: This code handles signal-recognition, which happens every time
13 * after an interrupt and after each system call.
14 *
15 * Normal syscalls and interrupts don't save a full stack frame, this is
16 * only done for syscall tracing, signals or fork/exec et.al.
17 *
18 * A note on terminology:
19 * - top of stack: Architecture defined interrupt frame from SS to RIP
20 * at the top of the kernel process stack.
21 * - partial stack frame: partially saved registers upto R11.
22 * - full stack frame: Like partial stack frame, but all register saved.
23 *
24 * Some macro usage:
25 * - CFI macros are used to generate dwarf2 unwind information for better
26 * backtraces. They don't change any code.
27 * - SAVE_ALL/RESTORE_ALL - Save/restore all registers
28 * - SAVE_ARGS/RESTORE_ARGS - Save/restore registers that C functions modify.
29 * There are unfortunately lots of special cases where some registers
30 * not touched. The macro is a big mess that should be cleaned up.
31 * - SAVE_REST/RESTORE_REST - Handle the registers not saved by SAVE_ARGS.
32 * Gives a full stack frame.
33 * - ENTRY/END Define functions in the symbol table.
34 * - FIXUP_TOP_OF_STACK/RESTORE_TOP_OF_STACK - Fix up the hardware stack
35 * frame that is otherwise undefined after a SYSCALL
36 * - TRACE_IRQ_* - Trace hard interrupt state for lock debugging.
37 * - errorentry/paranoidentry/zeroentry - Define exception entry points.
38 */
39
40 #include <linux/linkage.h>
41 #include <asm/segment.h>
42 #include <asm/cache.h>
43 #include <asm/errno.h>
44 #include <asm/dwarf2.h>
45 #include <asm/calling.h>
46 #include <asm/asm-offsets.h>
47 #include <asm/msr.h>
48 #include <asm/unistd.h>
49 #include <asm/thread_info.h>
50 #include <asm/hw_irq.h>
51 #include <asm/page.h>
52 #include <asm/irqflags.h>
53 #include <asm/paravirt.h>
54 #include <asm/ftrace.h>
55
56 /* Avoid __ASSEMBLER__'ifying <linux/audit.h> just for this. */
57 #include <linux/elf-em.h>
58 #define AUDIT_ARCH_X86_64 (EM_X86_64|__AUDIT_ARCH_64BIT|__AUDIT_ARCH_LE)
59 #define __AUDIT_ARCH_64BIT 0x80000000
60 #define __AUDIT_ARCH_LE 0x40000000
61
62 .code64
63 /*
64 * Some macro's to hide the most frequently occuring CFI annotations.
65 */
66 .macro CFI_PUSHQ reg
67 pushq \reg
68 CFI_ADJUST_CFA_OFFSET 8
69 .endm
70
71 .macro CFI_POPQ reg
72 popq \reg
73 CFI_ADJUST_CFA_OFFSET -8
74 .endm
75
76 .macro CFI_MOVQ reg offset=0
77 movq %\reg, \offset(%rsp)
78 CFI_REL_OFFSET \reg, \offset
79 .endm
80
81 #ifdef CONFIG_FUNCTION_TRACER
82 #ifdef CONFIG_DYNAMIC_FTRACE
83 ENTRY(mcount)
84 retq
85 END(mcount)
86
87 ENTRY(ftrace_caller)
88
89 /* taken from glibc */
90 subq $0x38, %rsp
91 movq %rax, (%rsp)
92 movq %rcx, 8(%rsp)
93 movq %rdx, 16(%rsp)
94 movq %rsi, 24(%rsp)
95 movq %rdi, 32(%rsp)
96 movq %r8, 40(%rsp)
97 movq %r9, 48(%rsp)
98
99 movq 0x38(%rsp), %rdi
100 movq 8(%rbp), %rsi
101 subq $MCOUNT_INSN_SIZE, %rdi
102
103 .globl ftrace_call
104 ftrace_call:
105 call ftrace_stub
106
107 movq 48(%rsp), %r9
108 movq 40(%rsp), %r8
109 movq 32(%rsp), %rdi
110 movq 24(%rsp), %rsi
111 movq 16(%rsp), %rdx
112 movq 8(%rsp), %rcx
113 movq (%rsp), %rax
114 addq $0x38, %rsp
115
116 .globl ftrace_stub
117 ftrace_stub:
118 retq
119 END(ftrace_caller)
120
121 #else /* ! CONFIG_DYNAMIC_FTRACE */
122 ENTRY(mcount)
123 cmpq $ftrace_stub, ftrace_trace_function
124 jnz trace
125 .globl ftrace_stub
126 ftrace_stub:
127 retq
128
129 trace:
130 /* taken from glibc */
131 subq $0x38, %rsp
132 movq %rax, (%rsp)
133 movq %rcx, 8(%rsp)
134 movq %rdx, 16(%rsp)
135 movq %rsi, 24(%rsp)
136 movq %rdi, 32(%rsp)
137 movq %r8, 40(%rsp)
138 movq %r9, 48(%rsp)
139
140 movq 0x38(%rsp), %rdi
141 movq 8(%rbp), %rsi
142 subq $MCOUNT_INSN_SIZE, %rdi
143
144 call *ftrace_trace_function
145
146 movq 48(%rsp), %r9
147 movq 40(%rsp), %r8
148 movq 32(%rsp), %rdi
149 movq 24(%rsp), %rsi
150 movq 16(%rsp), %rdx
151 movq 8(%rsp), %rcx
152 movq (%rsp), %rax
153 addq $0x38, %rsp
154
155 jmp ftrace_stub
156 END(mcount)
157 #endif /* CONFIG_DYNAMIC_FTRACE */
158 #endif /* CONFIG_FUNCTION_TRACER */
159
160 #ifndef CONFIG_PREEMPT
161 #define retint_kernel retint_restore_args
162 #endif
163
164 #ifdef CONFIG_PARAVIRT
165 ENTRY(native_usergs_sysret64)
166 swapgs
167 sysretq
168 #endif /* CONFIG_PARAVIRT */
169
170
171 .macro TRACE_IRQS_IRETQ offset=ARGOFFSET
172 #ifdef CONFIG_TRACE_IRQFLAGS
173 bt $9,EFLAGS-\offset(%rsp) /* interrupts off? */
174 jnc 1f
175 TRACE_IRQS_ON
176 1:
177 #endif
178 .endm
179
180 /*
181 * C code is not supposed to know about undefined top of stack. Every time
182 * a C function with an pt_regs argument is called from the SYSCALL based
183 * fast path FIXUP_TOP_OF_STACK is needed.
184 * RESTORE_TOP_OF_STACK syncs the syscall state after any possible ptregs
185 * manipulation.
186 */
187
188 /* %rsp:at FRAMEEND */
189 .macro FIXUP_TOP_OF_STACK tmp
190 movq %gs:pda_oldrsp,\tmp
191 movq \tmp,RSP(%rsp)
192 movq $__USER_DS,SS(%rsp)
193 movq $__USER_CS,CS(%rsp)
194 movq $-1,RCX(%rsp)
195 movq R11(%rsp),\tmp /* get eflags */
196 movq \tmp,EFLAGS(%rsp)
197 .endm
198
199 .macro RESTORE_TOP_OF_STACK tmp,offset=0
200 movq RSP-\offset(%rsp),\tmp
201 movq \tmp,%gs:pda_oldrsp
202 movq EFLAGS-\offset(%rsp),\tmp
203 movq \tmp,R11-\offset(%rsp)
204 .endm
205
206 .macro FAKE_STACK_FRAME child_rip
207 /* push in order ss, rsp, eflags, cs, rip */
208 xorl %eax, %eax
209 pushq $__KERNEL_DS /* ss */
210 CFI_ADJUST_CFA_OFFSET 8
211 /*CFI_REL_OFFSET ss,0*/
212 pushq %rax /* rsp */
213 CFI_ADJUST_CFA_OFFSET 8
214 CFI_REL_OFFSET rsp,0
215 pushq $(1<<9) /* eflags - interrupts on */
216 CFI_ADJUST_CFA_OFFSET 8
217 /*CFI_REL_OFFSET rflags,0*/
218 pushq $__KERNEL_CS /* cs */
219 CFI_ADJUST_CFA_OFFSET 8
220 /*CFI_REL_OFFSET cs,0*/
221 pushq \child_rip /* rip */
222 CFI_ADJUST_CFA_OFFSET 8
223 CFI_REL_OFFSET rip,0
224 pushq %rax /* orig rax */
225 CFI_ADJUST_CFA_OFFSET 8
226 .endm
227
228 .macro UNFAKE_STACK_FRAME
229 addq $8*6, %rsp
230 CFI_ADJUST_CFA_OFFSET -(6*8)
231 .endm
232
233 /*
234 * initial frame state for interrupts (and exceptions without error code)
235 */
236 .macro EMPTY_FRAME start=1 offset=0
237 .if \start
238 CFI_STARTPROC simple
239 CFI_SIGNAL_FRAME
240 CFI_DEF_CFA rsp,8+\offset
241 .else
242 CFI_DEF_CFA_OFFSET 8+\offset
243 .endif
244 .endm
245
246 /*
247 * initial frame state for interrupts (and exceptions without error code)
248 */
249 .macro INTR_FRAME start=1 offset=0
250 EMPTY_FRAME \start, SS+8+\offset-RIP
251 /*CFI_REL_OFFSET ss, SS+\offset-RIP*/
252 CFI_REL_OFFSET rsp, RSP+\offset-RIP
253 /*CFI_REL_OFFSET rflags, EFLAGS+\offset-RIP*/
254 /*CFI_REL_OFFSET cs, CS+\offset-RIP*/
255 CFI_REL_OFFSET rip, RIP+\offset-RIP
256 .endm
257
258 /*
259 * initial frame state for exceptions with error code (and interrupts
260 * with vector already pushed)
261 */
262 .macro XCPT_FRAME start=1 offset=0
263 INTR_FRAME \start, RIP+\offset-ORIG_RAX
264 /*CFI_REL_OFFSET orig_rax, ORIG_RAX-ORIG_RAX*/
265 .endm
266
267 /*
268 * frame that enables calling into C.
269 */
270 .macro PARTIAL_FRAME start=1 offset=0
271 XCPT_FRAME \start, ORIG_RAX+\offset-ARGOFFSET
272 CFI_REL_OFFSET rdi, RDI+\offset-ARGOFFSET
273 CFI_REL_OFFSET rsi, RSI+\offset-ARGOFFSET
274 CFI_REL_OFFSET rdx, RDX+\offset-ARGOFFSET
275 CFI_REL_OFFSET rcx, RCX+\offset-ARGOFFSET
276 CFI_REL_OFFSET rax, RAX+\offset-ARGOFFSET
277 CFI_REL_OFFSET r8, R8+\offset-ARGOFFSET
278 CFI_REL_OFFSET r9, R9+\offset-ARGOFFSET
279 CFI_REL_OFFSET r10, R10+\offset-ARGOFFSET
280 CFI_REL_OFFSET r11, R11+\offset-ARGOFFSET
281 .endm
282
283 /*
284 * frame that enables passing a complete pt_regs to a C function.
285 */
286 .macro DEFAULT_FRAME start=1 offset=0
287 PARTIAL_FRAME \start, R11+\offset-R15
288 CFI_REL_OFFSET rbx, RBX+\offset
289 CFI_REL_OFFSET rbp, RBP+\offset
290 CFI_REL_OFFSET r12, R12+\offset
291 CFI_REL_OFFSET r13, R13+\offset
292 CFI_REL_OFFSET r14, R14+\offset
293 CFI_REL_OFFSET r15, R15+\offset
294 .endm
295
296 /* save partial stack frame */
297 ENTRY(save_args)
298 XCPT_FRAME
299 cld
300 CFI_MOVQ rdi, RDI+16-ARGOFFSET
301 CFI_MOVQ rsi, RSI+16-ARGOFFSET
302 CFI_MOVQ rdx, RDX+16-ARGOFFSET
303 CFI_MOVQ rcx, RCX+16-ARGOFFSET
304 CFI_MOVQ rax, RAX+16-ARGOFFSET
305 CFI_MOVQ r8, R8+16-ARGOFFSET
306 CFI_MOVQ r9, R9+16-ARGOFFSET
307 CFI_MOVQ r10, R10+16-ARGOFFSET
308 CFI_MOVQ r11, R11+16-ARGOFFSET
309 leaq -ARGOFFSET+16(%rsp),%rdi /* arg1 for handler */
310 CFI_MOVQ rbp, 8 /* push %rbp */
311 leaq 8(%rsp), %rbp /* mov %rsp, %ebp */
312 testl $3, CS(%rdi)
313 je 1f
314 SWAPGS
315 /*
316 * irqcount is used to check if a CPU is already on an interrupt stack
317 * or not. While this is essentially redundant with preempt_count it is
318 * a little cheaper to use a separate counter in the PDA (short of
319 * moving irq_enter into assembly, which would be too much work)
320 */
321 1: incl %gs:pda_irqcount
322 jne 2f
323 CFI_POPQ %rax /* move return address... */
324 mov %gs:pda_irqstackptr,%rsp
325 EMPTY_FRAME 0
326 CFI_PUSHQ %rax /* ... to the new stack */
327 /*
328 * We entered an interrupt context - irqs are off:
329 */
330 2: TRACE_IRQS_OFF
331 ret
332 CFI_ENDPROC
333 END(save_args)
334
335 /*
336 * A newly forked process directly context switches into this.
337 */
338 /* rdi: prev */
339 ENTRY(ret_from_fork)
340 DEFAULT_FRAME
341 push kernel_eflags(%rip)
342 CFI_ADJUST_CFA_OFFSET 8
343 popf # reset kernel eflags
344 CFI_ADJUST_CFA_OFFSET -8
345 call schedule_tail
346 GET_THREAD_INFO(%rcx)
347 testl $(_TIF_SYSCALL_TRACE|_TIF_SYSCALL_AUDIT),TI_flags(%rcx)
348 jnz rff_trace
349 rff_action:
350 RESTORE_REST
351 testl $3,CS-ARGOFFSET(%rsp) # from kernel_thread?
352 je int_ret_from_sys_call
353 testl $_TIF_IA32,TI_flags(%rcx)
354 jnz int_ret_from_sys_call
355 RESTORE_TOP_OF_STACK %rdi,ARGOFFSET
356 jmp ret_from_sys_call
357 rff_trace:
358 movq %rsp,%rdi
359 call syscall_trace_leave
360 GET_THREAD_INFO(%rcx)
361 jmp rff_action
362 CFI_ENDPROC
363 END(ret_from_fork)
364
365 /*
366 * System call entry. Upto 6 arguments in registers are supported.
367 *
368 * SYSCALL does not save anything on the stack and does not change the
369 * stack pointer.
370 */
371
372 /*
373 * Register setup:
374 * rax system call number
375 * rdi arg0
376 * rcx return address for syscall/sysret, C arg3
377 * rsi arg1
378 * rdx arg2
379 * r10 arg3 (--> moved to rcx for C)
380 * r8 arg4
381 * r9 arg5
382 * r11 eflags for syscall/sysret, temporary for C
383 * r12-r15,rbp,rbx saved by C code, not touched.
384 *
385 * Interrupts are off on entry.
386 * Only called from user space.
387 *
388 * XXX if we had a free scratch register we could save the RSP into the stack frame
389 * and report it properly in ps. Unfortunately we haven't.
390 *
391 * When user can change the frames always force IRET. That is because
392 * it deals with uncanonical addresses better. SYSRET has trouble
393 * with them due to bugs in both AMD and Intel CPUs.
394 */
395
396 ENTRY(system_call)
397 CFI_STARTPROC simple
398 CFI_SIGNAL_FRAME
399 CFI_DEF_CFA rsp,PDA_STACKOFFSET
400 CFI_REGISTER rip,rcx
401 /*CFI_REGISTER rflags,r11*/
402 SWAPGS_UNSAFE_STACK
403 /*
404 * A hypervisor implementation might want to use a label
405 * after the swapgs, so that it can do the swapgs
406 * for the guest and jump here on syscall.
407 */
408 ENTRY(system_call_after_swapgs)
409
410 movq %rsp,%gs:pda_oldrsp
411 movq %gs:pda_kernelstack,%rsp
412 /*
413 * No need to follow this irqs off/on section - it's straight
414 * and short:
415 */
416 ENABLE_INTERRUPTS(CLBR_NONE)
417 SAVE_ARGS 8,1
418 movq %rax,ORIG_RAX-ARGOFFSET(%rsp)
419 movq %rcx,RIP-ARGOFFSET(%rsp)
420 CFI_REL_OFFSET rip,RIP-ARGOFFSET
421 GET_THREAD_INFO(%rcx)
422 testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%rcx)
423 jnz tracesys
424 system_call_fastpath:
425 cmpq $__NR_syscall_max,%rax
426 ja badsys
427 movq %r10,%rcx
428 call *sys_call_table(,%rax,8) # XXX: rip relative
429 movq %rax,RAX-ARGOFFSET(%rsp)
430 /*
431 * Syscall return path ending with SYSRET (fast path)
432 * Has incomplete stack frame and undefined top of stack.
433 */
434 ret_from_sys_call:
435 movl $_TIF_ALLWORK_MASK,%edi
436 /* edi: flagmask */
437 sysret_check:
438 LOCKDEP_SYS_EXIT
439 GET_THREAD_INFO(%rcx)
440 DISABLE_INTERRUPTS(CLBR_NONE)
441 TRACE_IRQS_OFF
442 movl TI_flags(%rcx),%edx
443 andl %edi,%edx
444 jnz sysret_careful
445 CFI_REMEMBER_STATE
446 /*
447 * sysretq will re-enable interrupts:
448 */
449 TRACE_IRQS_ON
450 movq RIP-ARGOFFSET(%rsp),%rcx
451 CFI_REGISTER rip,rcx
452 RESTORE_ARGS 0,-ARG_SKIP,1
453 /*CFI_REGISTER rflags,r11*/
454 movq %gs:pda_oldrsp, %rsp
455 USERGS_SYSRET64
456
457 CFI_RESTORE_STATE
458 /* Handle reschedules */
459 /* edx: work, edi: workmask */
460 sysret_careful:
461 bt $TIF_NEED_RESCHED,%edx
462 jnc sysret_signal
463 TRACE_IRQS_ON
464 ENABLE_INTERRUPTS(CLBR_NONE)
465 pushq %rdi
466 CFI_ADJUST_CFA_OFFSET 8
467 call schedule
468 popq %rdi
469 CFI_ADJUST_CFA_OFFSET -8
470 jmp sysret_check
471
472 /* Handle a signal */
473 sysret_signal:
474 TRACE_IRQS_ON
475 ENABLE_INTERRUPTS(CLBR_NONE)
476 #ifdef CONFIG_AUDITSYSCALL
477 bt $TIF_SYSCALL_AUDIT,%edx
478 jc sysret_audit
479 #endif
480 /* edx: work flags (arg3) */
481 leaq do_notify_resume(%rip),%rax
482 leaq -ARGOFFSET(%rsp),%rdi # &pt_regs -> arg1
483 xorl %esi,%esi # oldset -> arg2
484 call ptregscall_common
485 movl $_TIF_WORK_MASK,%edi
486 /* Use IRET because user could have changed frame. This
487 works because ptregscall_common has called FIXUP_TOP_OF_STACK. */
488 DISABLE_INTERRUPTS(CLBR_NONE)
489 TRACE_IRQS_OFF
490 jmp int_with_check
491
492 badsys:
493 movq $-ENOSYS,RAX-ARGOFFSET(%rsp)
494 jmp ret_from_sys_call
495
496 #ifdef CONFIG_AUDITSYSCALL
497 /*
498 * Fast path for syscall audit without full syscall trace.
499 * We just call audit_syscall_entry() directly, and then
500 * jump back to the normal fast path.
501 */
502 auditsys:
503 movq %r10,%r9 /* 6th arg: 4th syscall arg */
504 movq %rdx,%r8 /* 5th arg: 3rd syscall arg */
505 movq %rsi,%rcx /* 4th arg: 2nd syscall arg */
506 movq %rdi,%rdx /* 3rd arg: 1st syscall arg */
507 movq %rax,%rsi /* 2nd arg: syscall number */
508 movl $AUDIT_ARCH_X86_64,%edi /* 1st arg: audit arch */
509 call audit_syscall_entry
510 LOAD_ARGS 0 /* reload call-clobbered registers */
511 jmp system_call_fastpath
512
513 /*
514 * Return fast path for syscall audit. Call audit_syscall_exit()
515 * directly and then jump back to the fast path with TIF_SYSCALL_AUDIT
516 * masked off.
517 */
518 sysret_audit:
519 movq %rax,%rsi /* second arg, syscall return value */
520 cmpq $0,%rax /* is it < 0? */
521 setl %al /* 1 if so, 0 if not */
522 movzbl %al,%edi /* zero-extend that into %edi */
523 inc %edi /* first arg, 0->1(AUDITSC_SUCCESS), 1->2(AUDITSC_FAILURE) */
524 call audit_syscall_exit
525 movl $(_TIF_ALLWORK_MASK & ~_TIF_SYSCALL_AUDIT),%edi
526 jmp sysret_check
527 #endif /* CONFIG_AUDITSYSCALL */
528
529 /* Do syscall tracing */
530 tracesys:
531 #ifdef CONFIG_AUDITSYSCALL
532 testl $(_TIF_WORK_SYSCALL_ENTRY & ~_TIF_SYSCALL_AUDIT),TI_flags(%rcx)
533 jz auditsys
534 #endif
535 SAVE_REST
536 movq $-ENOSYS,RAX(%rsp) /* ptrace can change this for a bad syscall */
537 FIXUP_TOP_OF_STACK %rdi
538 movq %rsp,%rdi
539 call syscall_trace_enter
540 /*
541 * Reload arg registers from stack in case ptrace changed them.
542 * We don't reload %rax because syscall_trace_enter() returned
543 * the value it wants us to use in the table lookup.
544 */
545 LOAD_ARGS ARGOFFSET, 1
546 RESTORE_REST
547 cmpq $__NR_syscall_max,%rax
548 ja int_ret_from_sys_call /* RAX(%rsp) set to -ENOSYS above */
549 movq %r10,%rcx /* fixup for C */
550 call *sys_call_table(,%rax,8)
551 movq %rax,RAX-ARGOFFSET(%rsp)
552 /* Use IRET because user could have changed frame */
553
554 /*
555 * Syscall return path ending with IRET.
556 * Has correct top of stack, but partial stack frame.
557 */
558 .globl int_ret_from_sys_call
559 .globl int_with_check
560 int_ret_from_sys_call:
561 DISABLE_INTERRUPTS(CLBR_NONE)
562 TRACE_IRQS_OFF
563 testl $3,CS-ARGOFFSET(%rsp)
564 je retint_restore_args
565 movl $_TIF_ALLWORK_MASK,%edi
566 /* edi: mask to check */
567 int_with_check:
568 LOCKDEP_SYS_EXIT_IRQ
569 GET_THREAD_INFO(%rcx)
570 movl TI_flags(%rcx),%edx
571 andl %edi,%edx
572 jnz int_careful
573 andl $~TS_COMPAT,TI_status(%rcx)
574 jmp retint_swapgs
575
576 /* Either reschedule or signal or syscall exit tracking needed. */
577 /* First do a reschedule test. */
578 /* edx: work, edi: workmask */
579 int_careful:
580 bt $TIF_NEED_RESCHED,%edx
581 jnc int_very_careful
582 TRACE_IRQS_ON
583 ENABLE_INTERRUPTS(CLBR_NONE)
584 pushq %rdi
585 CFI_ADJUST_CFA_OFFSET 8
586 call schedule
587 popq %rdi
588 CFI_ADJUST_CFA_OFFSET -8
589 DISABLE_INTERRUPTS(CLBR_NONE)
590 TRACE_IRQS_OFF
591 jmp int_with_check
592
593 /* handle signals and tracing -- both require a full stack frame */
594 int_very_careful:
595 TRACE_IRQS_ON
596 ENABLE_INTERRUPTS(CLBR_NONE)
597 SAVE_REST
598 /* Check for syscall exit trace */
599 testl $_TIF_WORK_SYSCALL_EXIT,%edx
600 jz int_signal
601 pushq %rdi
602 CFI_ADJUST_CFA_OFFSET 8
603 leaq 8(%rsp),%rdi # &ptregs -> arg1
604 call syscall_trace_leave
605 popq %rdi
606 CFI_ADJUST_CFA_OFFSET -8
607 andl $~(_TIF_WORK_SYSCALL_EXIT|_TIF_SYSCALL_EMU),%edi
608 jmp int_restore_rest
609
610 int_signal:
611 testl $_TIF_DO_NOTIFY_MASK,%edx
612 jz 1f
613 movq %rsp,%rdi # &ptregs -> arg1
614 xorl %esi,%esi # oldset -> arg2
615 call do_notify_resume
616 1: movl $_TIF_WORK_MASK,%edi
617 int_restore_rest:
618 RESTORE_REST
619 DISABLE_INTERRUPTS(CLBR_NONE)
620 TRACE_IRQS_OFF
621 jmp int_with_check
622 CFI_ENDPROC
623 END(system_call)
624
625 /*
626 * Certain special system calls that need to save a complete full stack frame.
627 */
628
629 .macro PTREGSCALL label,func,arg
630 .globl \label
631 \label:
632 leaq \func(%rip),%rax
633 leaq -ARGOFFSET+8(%rsp),\arg /* 8 for return address */
634 jmp ptregscall_common
635 END(\label)
636 .endm
637
638 CFI_STARTPROC
639
640 PTREGSCALL stub_clone, sys_clone, %r8
641 PTREGSCALL stub_fork, sys_fork, %rdi
642 PTREGSCALL stub_vfork, sys_vfork, %rdi
643 PTREGSCALL stub_sigaltstack, sys_sigaltstack, %rdx
644 PTREGSCALL stub_iopl, sys_iopl, %rsi
645
646 ENTRY(ptregscall_common)
647 popq %r11
648 CFI_ADJUST_CFA_OFFSET -8
649 CFI_REGISTER rip, r11
650 SAVE_REST
651 movq %r11, %r15
652 CFI_REGISTER rip, r15
653 FIXUP_TOP_OF_STACK %r11
654 call *%rax
655 RESTORE_TOP_OF_STACK %r11
656 movq %r15, %r11
657 CFI_REGISTER rip, r11
658 RESTORE_REST
659 pushq %r11
660 CFI_ADJUST_CFA_OFFSET 8
661 CFI_REL_OFFSET rip, 0
662 ret
663 CFI_ENDPROC
664 END(ptregscall_common)
665
666 ENTRY(stub_execve)
667 CFI_STARTPROC
668 popq %r11
669 CFI_ADJUST_CFA_OFFSET -8
670 CFI_REGISTER rip, r11
671 SAVE_REST
672 FIXUP_TOP_OF_STACK %r11
673 movq %rsp, %rcx
674 call sys_execve
675 RESTORE_TOP_OF_STACK %r11
676 movq %rax,RAX(%rsp)
677 RESTORE_REST
678 jmp int_ret_from_sys_call
679 CFI_ENDPROC
680 END(stub_execve)
681
682 /*
683 * sigreturn is special because it needs to restore all registers on return.
684 * This cannot be done with SYSRET, so use the IRET return path instead.
685 */
686 ENTRY(stub_rt_sigreturn)
687 CFI_STARTPROC
688 addq $8, %rsp
689 CFI_ADJUST_CFA_OFFSET -8
690 SAVE_REST
691 movq %rsp,%rdi
692 FIXUP_TOP_OF_STACK %r11
693 call sys_rt_sigreturn
694 movq %rax,RAX(%rsp) # fixme, this could be done at the higher layer
695 RESTORE_REST
696 jmp int_ret_from_sys_call
697 CFI_ENDPROC
698 END(stub_rt_sigreturn)
699
700 /*
701 * Build the entry stubs and pointer table with some assembler magic.
702 * We pack 7 stubs into a single 32-byte chunk, which will fit in a
703 * single cache line on all modern x86 implementations.
704 */
705 .section .init.rodata,"a"
706 ENTRY(interrupt)
707 .text
708 .p2align 5
709 .p2align CONFIG_X86_L1_CACHE_SHIFT
710 ENTRY(irq_entries_start)
711 INTR_FRAME
712 vector=FIRST_EXTERNAL_VECTOR
713 .rept (NR_VECTORS-FIRST_EXTERNAL_VECTOR+6)/7
714 .balign 32
715 .rept 7
716 .if vector < NR_VECTORS
717 .if vector <> FIRST_EXTERNAL_VECTOR
718 CFI_ADJUST_CFA_OFFSET -8
719 .endif
720 1: pushq $(~vector+0x80) /* Note: always in signed byte range */
721 CFI_ADJUST_CFA_OFFSET 8
722 .if ((vector-FIRST_EXTERNAL_VECTOR)%7) <> 6
723 jmp 2f
724 .endif
725 .previous
726 .quad 1b
727 .text
728 vector=vector+1
729 .endif
730 .endr
731 2: jmp common_interrupt
732 .endr
733 CFI_ENDPROC
734 END(irq_entries_start)
735
736 .previous
737 END(interrupt)
738 .previous
739
740 /*
741 * Interrupt entry/exit.
742 *
743 * Interrupt entry points save only callee clobbered registers in fast path.
744 *
745 * Entry runs with interrupts off.
746 */
747
748 /* 0(%rsp): ~(interrupt number) */
749 .macro interrupt func
750 subq $10*8, %rsp
751 CFI_ADJUST_CFA_OFFSET 10*8
752 call save_args
753 PARTIAL_FRAME 0
754 call \func
755 .endm
756
757 /*
758 * The interrupt stubs push (~vector+0x80) onto the stack and
759 * then jump to common_interrupt.
760 */
761 .p2align CONFIG_X86_L1_CACHE_SHIFT
762 common_interrupt:
763 XCPT_FRAME
764 addq $-0x80,(%rsp) /* Adjust vector to [-256,-1] range */
765 interrupt do_IRQ
766 /* 0(%rsp): oldrsp-ARGOFFSET */
767 ret_from_intr:
768 DISABLE_INTERRUPTS(CLBR_NONE)
769 TRACE_IRQS_OFF
770 decl %gs:pda_irqcount
771 leaveq
772 CFI_DEF_CFA_REGISTER rsp
773 CFI_ADJUST_CFA_OFFSET -8
774 exit_intr:
775 GET_THREAD_INFO(%rcx)
776 testl $3,CS-ARGOFFSET(%rsp)
777 je retint_kernel
778
779 /* Interrupt came from user space */
780 /*
781 * Has a correct top of stack, but a partial stack frame
782 * %rcx: thread info. Interrupts off.
783 */
784 retint_with_reschedule:
785 movl $_TIF_WORK_MASK,%edi
786 retint_check:
787 LOCKDEP_SYS_EXIT_IRQ
788 movl TI_flags(%rcx),%edx
789 andl %edi,%edx
790 CFI_REMEMBER_STATE
791 jnz retint_careful
792
793 retint_swapgs: /* return to user-space */
794 /*
795 * The iretq could re-enable interrupts:
796 */
797 DISABLE_INTERRUPTS(CLBR_ANY)
798 TRACE_IRQS_IRETQ
799 SWAPGS
800 jmp restore_args
801
802 retint_restore_args: /* return to kernel space */
803 DISABLE_INTERRUPTS(CLBR_ANY)
804 /*
805 * The iretq could re-enable interrupts:
806 */
807 TRACE_IRQS_IRETQ
808 restore_args:
809 RESTORE_ARGS 0,8,0
810
811 irq_return:
812 INTERRUPT_RETURN
813
814 .section __ex_table, "a"
815 .quad irq_return, bad_iret
816 .previous
817
818 #ifdef CONFIG_PARAVIRT
819 ENTRY(native_iret)
820 iretq
821
822 .section __ex_table,"a"
823 .quad native_iret, bad_iret
824 .previous
825 #endif
826
827 .section .fixup,"ax"
828 bad_iret:
829 /*
830 * The iret traps when the %cs or %ss being restored is bogus.
831 * We've lost the original trap vector and error code.
832 * #GPF is the most likely one to get for an invalid selector.
833 * So pretend we completed the iret and took the #GPF in user mode.
834 *
835 * We are now running with the kernel GS after exception recovery.
836 * But error_entry expects us to have user GS to match the user %cs,
837 * so swap back.
838 */
839 pushq $0
840
841 SWAPGS
842 jmp general_protection
843
844 .previous
845
846 /* edi: workmask, edx: work */
847 retint_careful:
848 CFI_RESTORE_STATE
849 bt $TIF_NEED_RESCHED,%edx
850 jnc retint_signal
851 TRACE_IRQS_ON
852 ENABLE_INTERRUPTS(CLBR_NONE)
853 pushq %rdi
854 CFI_ADJUST_CFA_OFFSET 8
855 call schedule
856 popq %rdi
857 CFI_ADJUST_CFA_OFFSET -8
858 GET_THREAD_INFO(%rcx)
859 DISABLE_INTERRUPTS(CLBR_NONE)
860 TRACE_IRQS_OFF
861 jmp retint_check
862
863 retint_signal:
864 testl $_TIF_DO_NOTIFY_MASK,%edx
865 jz retint_swapgs
866 TRACE_IRQS_ON
867 ENABLE_INTERRUPTS(CLBR_NONE)
868 SAVE_REST
869 movq $-1,ORIG_RAX(%rsp)
870 xorl %esi,%esi # oldset
871 movq %rsp,%rdi # &pt_regs
872 call do_notify_resume
873 RESTORE_REST
874 DISABLE_INTERRUPTS(CLBR_NONE)
875 TRACE_IRQS_OFF
876 GET_THREAD_INFO(%rcx)
877 jmp retint_with_reschedule
878
879 #ifdef CONFIG_PREEMPT
880 /* Returning to kernel space. Check if we need preemption */
881 /* rcx: threadinfo. interrupts off. */
882 ENTRY(retint_kernel)
883 cmpl $0,TI_preempt_count(%rcx)
884 jnz retint_restore_args
885 bt $TIF_NEED_RESCHED,TI_flags(%rcx)
886 jnc retint_restore_args
887 bt $9,EFLAGS-ARGOFFSET(%rsp) /* interrupts off? */
888 jnc retint_restore_args
889 call preempt_schedule_irq
890 jmp exit_intr
891 #endif
892
893 CFI_ENDPROC
894 END(common_interrupt)
895
896 /*
897 * APIC interrupts.
898 */
899 .p2align 5
900
901 .macro apicinterrupt num,func
902 INTR_FRAME
903 pushq $~(\num)
904 CFI_ADJUST_CFA_OFFSET 8
905 interrupt \func
906 jmp ret_from_intr
907 CFI_ENDPROC
908 .endm
909
910 ENTRY(thermal_interrupt)
911 apicinterrupt THERMAL_APIC_VECTOR,smp_thermal_interrupt
912 END(thermal_interrupt)
913
914 ENTRY(threshold_interrupt)
915 apicinterrupt THRESHOLD_APIC_VECTOR,mce_threshold_interrupt
916 END(threshold_interrupt)
917
918 #ifdef CONFIG_SMP
919 ENTRY(reschedule_interrupt)
920 apicinterrupt RESCHEDULE_VECTOR,smp_reschedule_interrupt
921 END(reschedule_interrupt)
922
923 .macro INVALIDATE_ENTRY num
924 ENTRY(invalidate_interrupt\num)
925 apicinterrupt INVALIDATE_TLB_VECTOR_START+\num,smp_invalidate_interrupt
926 END(invalidate_interrupt\num)
927 .endm
928
929 INVALIDATE_ENTRY 0
930 INVALIDATE_ENTRY 1
931 INVALIDATE_ENTRY 2
932 INVALIDATE_ENTRY 3
933 INVALIDATE_ENTRY 4
934 INVALIDATE_ENTRY 5
935 INVALIDATE_ENTRY 6
936 INVALIDATE_ENTRY 7
937
938 ENTRY(call_function_interrupt)
939 apicinterrupt CALL_FUNCTION_VECTOR,smp_call_function_interrupt
940 END(call_function_interrupt)
941 ENTRY(call_function_single_interrupt)
942 apicinterrupt CALL_FUNCTION_SINGLE_VECTOR,smp_call_function_single_interrupt
943 END(call_function_single_interrupt)
944 ENTRY(irq_move_cleanup_interrupt)
945 apicinterrupt IRQ_MOVE_CLEANUP_VECTOR,smp_irq_move_cleanup_interrupt
946 END(irq_move_cleanup_interrupt)
947 #endif
948
949 ENTRY(apic_timer_interrupt)
950 apicinterrupt LOCAL_TIMER_VECTOR,smp_apic_timer_interrupt
951 END(apic_timer_interrupt)
952
953 ENTRY(uv_bau_message_intr1)
954 apicinterrupt 220,uv_bau_message_interrupt
955 END(uv_bau_message_intr1)
956
957 ENTRY(error_interrupt)
958 apicinterrupt ERROR_APIC_VECTOR,smp_error_interrupt
959 END(error_interrupt)
960
961 ENTRY(spurious_interrupt)
962 apicinterrupt SPURIOUS_APIC_VECTOR,smp_spurious_interrupt
963 END(spurious_interrupt)
964
965 /*
966 * Exception entry points.
967 */
968 .macro zeroentry sym
969 INTR_FRAME
970 PARAVIRT_ADJUST_EXCEPTION_FRAME
971 CFI_PUSHQ $-1 /* ORIG_RAX: no syscall to restart */
972 subq $15*8,%rsp
973 CFI_ADJUST_CFA_OFFSET 15*8
974 call error_entry
975 DEFAULT_FRAME 0
976 movq %rsp,%rdi /* pt_regs pointer */
977 xorl %esi,%esi /* no error code */
978 call \sym
979 jmp error_exit /* %ebx: no swapgs flag */
980 CFI_ENDPROC
981 .endm
982
983 .macro errorentry sym
984 XCPT_FRAME
985 PARAVIRT_ADJUST_EXCEPTION_FRAME
986 subq $15*8,%rsp
987 CFI_ADJUST_CFA_OFFSET 15*8
988 call error_entry
989 DEFAULT_FRAME 0
990 movq %rsp,%rdi /* pt_regs pointer */
991 movq ORIG_RAX(%rsp),%rsi /* get error code */
992 movq $-1,ORIG_RAX(%rsp) /* no syscall to restart */
993 call \sym
994 jmp error_exit /* %ebx: no swapgs flag */
995 CFI_ENDPROC
996 .endm
997
998 /* error code is on the stack already */
999 /* handle NMI like exceptions that can happen everywhere */
1000 .macro paranoidentry sym, ist=0, irqtrace=1
1001 SAVE_ALL
1002 cld
1003 movl $1,%ebx
1004 movl $MSR_GS_BASE,%ecx
1005 rdmsr
1006 testl %edx,%edx
1007 js 1f
1008 SWAPGS
1009 xorl %ebx,%ebx
1010 1:
1011 .if \ist
1012 movq %gs:pda_data_offset, %rbp
1013 .endif
1014 .if \irqtrace
1015 TRACE_IRQS_OFF
1016 .endif
1017 movq %rsp,%rdi
1018 movq ORIG_RAX(%rsp),%rsi
1019 movq $-1,ORIG_RAX(%rsp)
1020 .if \ist
1021 subq $EXCEPTION_STKSZ, per_cpu__init_tss + TSS_ist + (\ist - 1) * 8(%rbp)
1022 .endif
1023 call \sym
1024 .if \ist
1025 addq $EXCEPTION_STKSZ, per_cpu__init_tss + TSS_ist + (\ist - 1) * 8(%rbp)
1026 .endif
1027 DISABLE_INTERRUPTS(CLBR_NONE)
1028 .if \irqtrace
1029 TRACE_IRQS_OFF
1030 .endif
1031 .endm
1032
1033 /*
1034 * "Paranoid" exit path from exception stack.
1035 * Paranoid because this is used by NMIs and cannot take
1036 * any kernel state for granted.
1037 * We don't do kernel preemption checks here, because only
1038 * NMI should be common and it does not enable IRQs and
1039 * cannot get reschedule ticks.
1040 *
1041 * "trace" is 0 for the NMI handler only, because irq-tracing
1042 * is fundamentally NMI-unsafe. (we cannot change the soft and
1043 * hard flags at once, atomically)
1044 */
1045 .macro paranoidexit trace=1
1046 /* ebx: no swapgs flag */
1047 paranoid_exit\trace:
1048 testl %ebx,%ebx /* swapgs needed? */
1049 jnz paranoid_restore\trace
1050 testl $3,CS(%rsp)
1051 jnz paranoid_userspace\trace
1052 paranoid_swapgs\trace:
1053 .if \trace
1054 TRACE_IRQS_IRETQ 0
1055 .endif
1056 SWAPGS_UNSAFE_STACK
1057 paranoid_restore\trace:
1058 RESTORE_ALL 8
1059 jmp irq_return
1060 paranoid_userspace\trace:
1061 GET_THREAD_INFO(%rcx)
1062 movl TI_flags(%rcx),%ebx
1063 andl $_TIF_WORK_MASK,%ebx
1064 jz paranoid_swapgs\trace
1065 movq %rsp,%rdi /* &pt_regs */
1066 call sync_regs
1067 movq %rax,%rsp /* switch stack for scheduling */
1068 testl $_TIF_NEED_RESCHED,%ebx
1069 jnz paranoid_schedule\trace
1070 movl %ebx,%edx /* arg3: thread flags */
1071 .if \trace
1072 TRACE_IRQS_ON
1073 .endif
1074 ENABLE_INTERRUPTS(CLBR_NONE)
1075 xorl %esi,%esi /* arg2: oldset */
1076 movq %rsp,%rdi /* arg1: &pt_regs */
1077 call do_notify_resume
1078 DISABLE_INTERRUPTS(CLBR_NONE)
1079 .if \trace
1080 TRACE_IRQS_OFF
1081 .endif
1082 jmp paranoid_userspace\trace
1083 paranoid_schedule\trace:
1084 .if \trace
1085 TRACE_IRQS_ON
1086 .endif
1087 ENABLE_INTERRUPTS(CLBR_ANY)
1088 call schedule
1089 DISABLE_INTERRUPTS(CLBR_ANY)
1090 .if \trace
1091 TRACE_IRQS_OFF
1092 .endif
1093 jmp paranoid_userspace\trace
1094 CFI_ENDPROC
1095 .endm
1096
1097 /*
1098 * Exception entry point. This expects an error code/orig_rax on the stack.
1099 * returns in "no swapgs flag" in %ebx.
1100 */
1101 KPROBE_ENTRY(error_entry)
1102 XCPT_FRAME
1103 CFI_ADJUST_CFA_OFFSET 15*8
1104 /* oldrax contains error code */
1105 cld
1106 CFI_MOVQ rdi, RDI+8
1107 CFI_MOVQ rsi, RSI+8
1108 CFI_MOVQ rdx, RDX+8
1109 CFI_MOVQ rcx, RCX+8
1110 CFI_MOVQ rax, RAX+8
1111 CFI_MOVQ r8, R8+8
1112 CFI_MOVQ r9, R9+8
1113 CFI_MOVQ r10, R10+8
1114 CFI_MOVQ r11, R11+8
1115 CFI_MOVQ rbx, RBX+8
1116 CFI_MOVQ rbp, RBP+8
1117 CFI_MOVQ r12, R12+8
1118 CFI_MOVQ r13, R13+8
1119 CFI_MOVQ r14, R14+8
1120 CFI_MOVQ r15, R15+8
1121 xorl %ebx,%ebx
1122 testl $3,CS+8(%rsp)
1123 je error_kernelspace
1124 error_swapgs:
1125 SWAPGS
1126 error_sti:
1127 TRACE_IRQS_OFF
1128 ret
1129 CFI_ENDPROC
1130
1131 /*
1132 * There are two places in the kernel that can potentially fault with
1133 * usergs. Handle them here. The exception handlers after iret run with
1134 * kernel gs again, so don't set the user space flag. B stepping K8s
1135 * sometimes report an truncated RIP for IRET exceptions returning to
1136 * compat mode. Check for these here too.
1137 */
1138 error_kernelspace:
1139 incl %ebx
1140 leaq irq_return(%rip),%rcx
1141 cmpq %rcx,RIP+8(%rsp)
1142 je error_swapgs
1143 movl %ecx,%ecx /* zero extend */
1144 cmpq %rcx,RIP+8(%rsp)
1145 je error_swapgs
1146 cmpq $gs_change,RIP+8(%rsp)
1147 je error_swapgs
1148 jmp error_sti
1149 KPROBE_END(error_entry)
1150
1151
1152 /* ebx: no swapgs flag (1: don't need swapgs, 0: need it) */
1153 KPROBE_ENTRY(error_exit)
1154 DEFAULT_FRAME
1155 movl %ebx,%eax
1156 RESTORE_REST
1157 DISABLE_INTERRUPTS(CLBR_NONE)
1158 TRACE_IRQS_OFF
1159 GET_THREAD_INFO(%rcx)
1160 testl %eax,%eax
1161 jne retint_kernel
1162 LOCKDEP_SYS_EXIT_IRQ
1163 movl TI_flags(%rcx),%edx
1164 movl $_TIF_WORK_MASK,%edi
1165 andl %edi,%edx
1166 jnz retint_careful
1167 jmp retint_swapgs
1168 CFI_ENDPROC
1169 KPROBE_END(error_exit)
1170
1171 /* Reload gs selector with exception handling */
1172 /* edi: new selector */
1173 ENTRY(native_load_gs_index)
1174 CFI_STARTPROC
1175 pushf
1176 CFI_ADJUST_CFA_OFFSET 8
1177 DISABLE_INTERRUPTS(CLBR_ANY | ~(CLBR_RDI))
1178 SWAPGS
1179 gs_change:
1180 movl %edi,%gs
1181 2: mfence /* workaround */
1182 SWAPGS
1183 popf
1184 CFI_ADJUST_CFA_OFFSET -8
1185 ret
1186 CFI_ENDPROC
1187 ENDPROC(native_load_gs_index)
1188
1189 .section __ex_table,"a"
1190 .align 8
1191 .quad gs_change,bad_gs
1192 .previous
1193 .section .fixup,"ax"
1194 /* running with kernelgs */
1195 bad_gs:
1196 SWAPGS /* switch back to user gs */
1197 xorl %eax,%eax
1198 movl %eax,%gs
1199 jmp 2b
1200 .previous
1201
1202 /*
1203 * Create a kernel thread.
1204 *
1205 * C extern interface:
1206 * extern long kernel_thread(int (*fn)(void *), void * arg, unsigned long flags)
1207 *
1208 * asm input arguments:
1209 * rdi: fn, rsi: arg, rdx: flags
1210 */
1211 ENTRY(kernel_thread)
1212 CFI_STARTPROC
1213 FAKE_STACK_FRAME $child_rip
1214 SAVE_ALL
1215
1216 # rdi: flags, rsi: usp, rdx: will be &pt_regs
1217 movq %rdx,%rdi
1218 orq kernel_thread_flags(%rip),%rdi
1219 movq $-1, %rsi
1220 movq %rsp, %rdx
1221
1222 xorl %r8d,%r8d
1223 xorl %r9d,%r9d
1224
1225 # clone now
1226 call do_fork
1227 movq %rax,RAX(%rsp)
1228 xorl %edi,%edi
1229
1230 /*
1231 * It isn't worth to check for reschedule here,
1232 * so internally to the x86_64 port you can rely on kernel_thread()
1233 * not to reschedule the child before returning, this avoids the need
1234 * of hacks for example to fork off the per-CPU idle tasks.
1235 * [Hopefully no generic code relies on the reschedule -AK]
1236 */
1237 RESTORE_ALL
1238 UNFAKE_STACK_FRAME
1239 ret
1240 CFI_ENDPROC
1241 ENDPROC(kernel_thread)
1242
1243 child_rip:
1244 pushq $0 # fake return address
1245 CFI_STARTPROC
1246 /*
1247 * Here we are in the child and the registers are set as they were
1248 * at kernel_thread() invocation in the parent.
1249 */
1250 movq %rdi, %rax
1251 movq %rsi, %rdi
1252 call *%rax
1253 # exit
1254 mov %eax, %edi
1255 call do_exit
1256 CFI_ENDPROC
1257 ENDPROC(child_rip)
1258
1259 /*
1260 * execve(). This function needs to use IRET, not SYSRET, to set up all state properly.
1261 *
1262 * C extern interface:
1263 * extern long execve(char *name, char **argv, char **envp)
1264 *
1265 * asm input arguments:
1266 * rdi: name, rsi: argv, rdx: envp
1267 *
1268 * We want to fallback into:
1269 * extern long sys_execve(char *name, char **argv,char **envp, struct pt_regs *regs)
1270 *
1271 * do_sys_execve asm fallback arguments:
1272 * rdi: name, rsi: argv, rdx: envp, rcx: fake frame on the stack
1273 */
1274 ENTRY(kernel_execve)
1275 CFI_STARTPROC
1276 FAKE_STACK_FRAME $0
1277 SAVE_ALL
1278 movq %rsp,%rcx
1279 call sys_execve
1280 movq %rax, RAX(%rsp)
1281 RESTORE_REST
1282 testq %rax,%rax
1283 je int_ret_from_sys_call
1284 RESTORE_ARGS
1285 UNFAKE_STACK_FRAME
1286 ret
1287 CFI_ENDPROC
1288 ENDPROC(kernel_execve)
1289
1290 KPROBE_ENTRY(page_fault)
1291 errorentry do_page_fault
1292 KPROBE_END(page_fault)
1293
1294 ENTRY(coprocessor_error)
1295 zeroentry do_coprocessor_error
1296 END(coprocessor_error)
1297
1298 ENTRY(simd_coprocessor_error)
1299 zeroentry do_simd_coprocessor_error
1300 END(simd_coprocessor_error)
1301
1302 ENTRY(device_not_available)
1303 zeroentry do_device_not_available
1304 END(device_not_available)
1305
1306 /* runs on exception stack */
1307 KPROBE_ENTRY(debug)
1308 INTR_FRAME
1309 PARAVIRT_ADJUST_EXCEPTION_FRAME
1310 pushq $0
1311 CFI_ADJUST_CFA_OFFSET 8
1312 paranoidentry do_debug, DEBUG_STACK
1313 paranoidexit
1314 KPROBE_END(debug)
1315
1316 /* runs on exception stack */
1317 KPROBE_ENTRY(nmi)
1318 INTR_FRAME
1319 PARAVIRT_ADJUST_EXCEPTION_FRAME
1320 pushq $-1
1321 CFI_ADJUST_CFA_OFFSET 8
1322 paranoidentry do_nmi, 0, 0
1323 #ifdef CONFIG_TRACE_IRQFLAGS
1324 paranoidexit 0
1325 #else
1326 jmp paranoid_exit1
1327 CFI_ENDPROC
1328 #endif
1329 KPROBE_END(nmi)
1330
1331 KPROBE_ENTRY(int3)
1332 INTR_FRAME
1333 PARAVIRT_ADJUST_EXCEPTION_FRAME
1334 pushq $0
1335 CFI_ADJUST_CFA_OFFSET 8
1336 paranoidentry do_int3, DEBUG_STACK
1337 jmp paranoid_exit1
1338 CFI_ENDPROC
1339 KPROBE_END(int3)
1340
1341 ENTRY(overflow)
1342 zeroentry do_overflow
1343 END(overflow)
1344
1345 ENTRY(bounds)
1346 zeroentry do_bounds
1347 END(bounds)
1348
1349 ENTRY(invalid_op)
1350 zeroentry do_invalid_op
1351 END(invalid_op)
1352
1353 ENTRY(coprocessor_segment_overrun)
1354 zeroentry do_coprocessor_segment_overrun
1355 END(coprocessor_segment_overrun)
1356
1357 /* runs on exception stack */
1358 ENTRY(double_fault)
1359 XCPT_FRAME
1360 PARAVIRT_ADJUST_EXCEPTION_FRAME
1361 paranoidentry do_double_fault
1362 jmp paranoid_exit1
1363 CFI_ENDPROC
1364 END(double_fault)
1365
1366 ENTRY(invalid_TSS)
1367 errorentry do_invalid_TSS
1368 END(invalid_TSS)
1369
1370 ENTRY(segment_not_present)
1371 errorentry do_segment_not_present
1372 END(segment_not_present)
1373
1374 /* runs on exception stack */
1375 ENTRY(stack_segment)
1376 XCPT_FRAME
1377 PARAVIRT_ADJUST_EXCEPTION_FRAME
1378 paranoidentry do_stack_segment
1379 jmp paranoid_exit1
1380 CFI_ENDPROC
1381 END(stack_segment)
1382
1383 KPROBE_ENTRY(general_protection)
1384 errorentry do_general_protection
1385 KPROBE_END(general_protection)
1386
1387 ENTRY(alignment_check)
1388 errorentry do_alignment_check
1389 END(alignment_check)
1390
1391 ENTRY(divide_error)
1392 zeroentry do_divide_error
1393 END(divide_error)
1394
1395 ENTRY(spurious_interrupt_bug)
1396 zeroentry do_spurious_interrupt_bug
1397 END(spurious_interrupt_bug)
1398
1399 #ifdef CONFIG_X86_MCE
1400 /* runs on exception stack */
1401 ENTRY(machine_check)
1402 INTR_FRAME
1403 PARAVIRT_ADJUST_EXCEPTION_FRAME
1404 pushq $0
1405 CFI_ADJUST_CFA_OFFSET 8
1406 paranoidentry do_machine_check
1407 jmp paranoid_exit1
1408 CFI_ENDPROC
1409 END(machine_check)
1410 #endif
1411
1412 /* Call softirq on interrupt stack. Interrupts are off. */
1413 ENTRY(call_softirq)
1414 CFI_STARTPROC
1415 push %rbp
1416 CFI_ADJUST_CFA_OFFSET 8
1417 CFI_REL_OFFSET rbp,0
1418 mov %rsp,%rbp
1419 CFI_DEF_CFA_REGISTER rbp
1420 incl %gs:pda_irqcount
1421 cmove %gs:pda_irqstackptr,%rsp
1422 push %rbp # backlink for old unwinder
1423 call __do_softirq
1424 leaveq
1425 CFI_DEF_CFA_REGISTER rsp
1426 CFI_ADJUST_CFA_OFFSET -8
1427 decl %gs:pda_irqcount
1428 ret
1429 CFI_ENDPROC
1430 ENDPROC(call_softirq)
1431
1432 KPROBE_ENTRY(ignore_sysret)
1433 CFI_STARTPROC
1434 mov $-ENOSYS,%eax
1435 sysret
1436 CFI_ENDPROC
1437 ENDPROC(ignore_sysret)
1438
1439 #ifdef CONFIG_XEN
1440 ENTRY(xen_hypervisor_callback)
1441 zeroentry xen_do_hypervisor_callback
1442 END(xen_hypervisor_callback)
1443
1444 /*
1445 # A note on the "critical region" in our callback handler.
1446 # We want to avoid stacking callback handlers due to events occurring
1447 # during handling of the last event. To do this, we keep events disabled
1448 # until we've done all processing. HOWEVER, we must enable events before
1449 # popping the stack frame (can't be done atomically) and so it would still
1450 # be possible to get enough handler activations to overflow the stack.
1451 # Although unlikely, bugs of that kind are hard to track down, so we'd
1452 # like to avoid the possibility.
1453 # So, on entry to the handler we detect whether we interrupted an
1454 # existing activation in its critical region -- if so, we pop the current
1455 # activation and restart the handler using the previous one.
1456 */
1457 ENTRY(xen_do_hypervisor_callback) # do_hypervisor_callback(struct *pt_regs)
1458 CFI_STARTPROC
1459 /* Since we don't modify %rdi, evtchn_do_upall(struct *pt_regs) will
1460 see the correct pointer to the pt_regs */
1461 movq %rdi, %rsp # we don't return, adjust the stack frame
1462 CFI_ENDPROC
1463 DEFAULT_FRAME
1464 11: incl %gs:pda_irqcount
1465 movq %rsp,%rbp
1466 CFI_DEF_CFA_REGISTER rbp
1467 cmovzq %gs:pda_irqstackptr,%rsp
1468 pushq %rbp # backlink for old unwinder
1469 call xen_evtchn_do_upcall
1470 popq %rsp
1471 CFI_DEF_CFA_REGISTER rsp
1472 decl %gs:pda_irqcount
1473 jmp error_exit
1474 CFI_ENDPROC
1475 END(do_hypervisor_callback)
1476
1477 /*
1478 # Hypervisor uses this for application faults while it executes.
1479 # We get here for two reasons:
1480 # 1. Fault while reloading DS, ES, FS or GS
1481 # 2. Fault while executing IRET
1482 # Category 1 we do not need to fix up as Xen has already reloaded all segment
1483 # registers that could be reloaded and zeroed the others.
1484 # Category 2 we fix up by killing the current process. We cannot use the
1485 # normal Linux return path in this case because if we use the IRET hypercall
1486 # to pop the stack frame we end up in an infinite loop of failsafe callbacks.
1487 # We distinguish between categories by comparing each saved segment register
1488 # with its current contents: any discrepancy means we in category 1.
1489 */
1490 ENTRY(xen_failsafe_callback)
1491 INTR_FRAME 1 (6*8)
1492 /*CFI_REL_OFFSET gs,GS*/
1493 /*CFI_REL_OFFSET fs,FS*/
1494 /*CFI_REL_OFFSET es,ES*/
1495 /*CFI_REL_OFFSET ds,DS*/
1496 CFI_REL_OFFSET r11,8
1497 CFI_REL_OFFSET rcx,0
1498 movw %ds,%cx
1499 cmpw %cx,0x10(%rsp)
1500 CFI_REMEMBER_STATE
1501 jne 1f
1502 movw %es,%cx
1503 cmpw %cx,0x18(%rsp)
1504 jne 1f
1505 movw %fs,%cx
1506 cmpw %cx,0x20(%rsp)
1507 jne 1f
1508 movw %gs,%cx
1509 cmpw %cx,0x28(%rsp)
1510 jne 1f
1511 /* All segments match their saved values => Category 2 (Bad IRET). */
1512 movq (%rsp),%rcx
1513 CFI_RESTORE rcx
1514 movq 8(%rsp),%r11
1515 CFI_RESTORE r11
1516 addq $0x30,%rsp
1517 CFI_ADJUST_CFA_OFFSET -0x30
1518 CFI_PUSHQ $0 /* RIP */
1519 CFI_PUSHQ %r11
1520 CFI_PUSHQ %rcx
1521 jmp general_protection
1522 CFI_RESTORE_STATE
1523 1: /* Segment mismatch => Category 1 (Bad segment). Retry the IRET. */
1524 movq (%rsp),%rcx
1525 CFI_RESTORE rcx
1526 movq 8(%rsp),%r11
1527 CFI_RESTORE r11
1528 addq $0x30,%rsp
1529 CFI_ADJUST_CFA_OFFSET -0x30
1530 CFI_PUSHQ $0
1531 SAVE_ALL
1532 jmp error_exit
1533 CFI_ENDPROC
1534 END(xen_failsafe_callback)
1535
1536 #endif /* CONFIG_XEN */