]> git.proxmox.com Git - mirror_ubuntu-jammy-kernel.git/blob - arch/x86/kernel/entry_64.S
Merge branches 'tracing/ftrace', 'tracing/function-graph-tracer' and 'tracing/urgent...
[mirror_ubuntu-jammy-kernel.git] / arch / x86 / kernel / entry_64.S
1 /*
2 * linux/arch/x86_64/entry.S
3 *
4 * Copyright (C) 1991, 1992 Linus Torvalds
5 * Copyright (C) 2000, 2001, 2002 Andi Kleen SuSE Labs
6 * Copyright (C) 2000 Pavel Machek <pavel@suse.cz>
7 */
8
9 /*
10 * entry.S contains the system-call and fault low-level handling routines.
11 *
12 * NOTE: This code handles signal-recognition, which happens every time
13 * after an interrupt and after each system call.
14 *
15 * Normal syscalls and interrupts don't save a full stack frame, this is
16 * only done for syscall tracing, signals or fork/exec et.al.
17 *
18 * A note on terminology:
19 * - top of stack: Architecture defined interrupt frame from SS to RIP
20 * at the top of the kernel process stack.
21 * - partial stack frame: partially saved registers upto R11.
22 * - full stack frame: Like partial stack frame, but all register saved.
23 *
24 * Some macro usage:
25 * - CFI macros are used to generate dwarf2 unwind information for better
26 * backtraces. They don't change any code.
27 * - SAVE_ALL/RESTORE_ALL - Save/restore all registers
28 * - SAVE_ARGS/RESTORE_ARGS - Save/restore registers that C functions modify.
29 * There are unfortunately lots of special cases where some registers
30 * not touched. The macro is a big mess that should be cleaned up.
31 * - SAVE_REST/RESTORE_REST - Handle the registers not saved by SAVE_ARGS.
32 * Gives a full stack frame.
33 * - ENTRY/END Define functions in the symbol table.
34 * - FIXUP_TOP_OF_STACK/RESTORE_TOP_OF_STACK - Fix up the hardware stack
35 * frame that is otherwise undefined after a SYSCALL
36 * - TRACE_IRQ_* - Trace hard interrupt state for lock debugging.
37 * - errorentry/paranoidentry/zeroentry - Define exception entry points.
38 */
39
40 #include <linux/linkage.h>
41 #include <asm/segment.h>
42 #include <asm/cache.h>
43 #include <asm/errno.h>
44 #include <asm/dwarf2.h>
45 #include <asm/calling.h>
46 #include <asm/asm-offsets.h>
47 #include <asm/msr.h>
48 #include <asm/unistd.h>
49 #include <asm/thread_info.h>
50 #include <asm/hw_irq.h>
51 #include <asm/page.h>
52 #include <asm/irqflags.h>
53 #include <asm/paravirt.h>
54 #include <asm/ftrace.h>
55
56 /* Avoid __ASSEMBLER__'ifying <linux/audit.h> just for this. */
57 #include <linux/elf-em.h>
58 #define AUDIT_ARCH_X86_64 (EM_X86_64|__AUDIT_ARCH_64BIT|__AUDIT_ARCH_LE)
59 #define __AUDIT_ARCH_64BIT 0x80000000
60 #define __AUDIT_ARCH_LE 0x40000000
61
62 .code64
63
64 #ifdef CONFIG_FUNCTION_TRACER
65 #ifdef CONFIG_DYNAMIC_FTRACE
66 ENTRY(mcount)
67 retq
68 END(mcount)
69
70 ENTRY(ftrace_caller)
71 cmpl $0, function_trace_stop
72 jne ftrace_stub
73
74 /* taken from glibc */
75 subq $0x38, %rsp
76 movq %rax, (%rsp)
77 movq %rcx, 8(%rsp)
78 movq %rdx, 16(%rsp)
79 movq %rsi, 24(%rsp)
80 movq %rdi, 32(%rsp)
81 movq %r8, 40(%rsp)
82 movq %r9, 48(%rsp)
83
84 movq 0x38(%rsp), %rdi
85 movq 8(%rbp), %rsi
86 subq $MCOUNT_INSN_SIZE, %rdi
87
88 .globl ftrace_call
89 ftrace_call:
90 call ftrace_stub
91
92 movq 48(%rsp), %r9
93 movq 40(%rsp), %r8
94 movq 32(%rsp), %rdi
95 movq 24(%rsp), %rsi
96 movq 16(%rsp), %rdx
97 movq 8(%rsp), %rcx
98 movq (%rsp), %rax
99 addq $0x38, %rsp
100
101 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
102 .globl ftrace_graph_call
103 ftrace_graph_call:
104 jmp ftrace_stub
105 #endif
106
107 .globl ftrace_stub
108 ftrace_stub:
109 retq
110 END(ftrace_caller)
111
112 #else /* ! CONFIG_DYNAMIC_FTRACE */
113 ENTRY(mcount)
114 cmpl $0, function_trace_stop
115 jne ftrace_stub
116
117 cmpq $ftrace_stub, ftrace_trace_function
118 jnz trace
119
120 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
121 cmpq $ftrace_stub, ftrace_graph_return
122 jnz ftrace_graph_caller
123
124 cmpq $ftrace_graph_entry_stub, ftrace_graph_entry
125 jnz ftrace_graph_caller
126 #endif
127
128 .globl ftrace_stub
129 ftrace_stub:
130 retq
131
132 trace:
133 /* taken from glibc */
134 subq $0x38, %rsp
135 movq %rax, (%rsp)
136 movq %rcx, 8(%rsp)
137 movq %rdx, 16(%rsp)
138 movq %rsi, 24(%rsp)
139 movq %rdi, 32(%rsp)
140 movq %r8, 40(%rsp)
141 movq %r9, 48(%rsp)
142
143 movq 0x38(%rsp), %rdi
144 movq 8(%rbp), %rsi
145 subq $MCOUNT_INSN_SIZE, %rdi
146
147 call *ftrace_trace_function
148
149 movq 48(%rsp), %r9
150 movq 40(%rsp), %r8
151 movq 32(%rsp), %rdi
152 movq 24(%rsp), %rsi
153 movq 16(%rsp), %rdx
154 movq 8(%rsp), %rcx
155 movq (%rsp), %rax
156 addq $0x38, %rsp
157
158 jmp ftrace_stub
159 END(mcount)
160 #endif /* CONFIG_DYNAMIC_FTRACE */
161 #endif /* CONFIG_FUNCTION_TRACER */
162
163 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
164 ENTRY(ftrace_graph_caller)
165 cmpl $0, function_trace_stop
166 jne ftrace_stub
167
168 subq $0x38, %rsp
169 movq %rax, (%rsp)
170 movq %rcx, 8(%rsp)
171 movq %rdx, 16(%rsp)
172 movq %rsi, 24(%rsp)
173 movq %rdi, 32(%rsp)
174 movq %r8, 40(%rsp)
175 movq %r9, 48(%rsp)
176
177 leaq 8(%rbp), %rdi
178 movq 0x38(%rsp), %rsi
179 subq $MCOUNT_INSN_SIZE, %rsi
180
181 call prepare_ftrace_return
182
183 movq 48(%rsp), %r9
184 movq 40(%rsp), %r8
185 movq 32(%rsp), %rdi
186 movq 24(%rsp), %rsi
187 movq 16(%rsp), %rdx
188 movq 8(%rsp), %rcx
189 movq (%rsp), %rax
190 addq $0x38, %rsp
191 retq
192 END(ftrace_graph_caller)
193
194
195 .globl return_to_handler
196 return_to_handler:
197 subq $80, %rsp
198
199 movq %rax, (%rsp)
200 movq %rcx, 8(%rsp)
201 movq %rdx, 16(%rsp)
202 movq %rsi, 24(%rsp)
203 movq %rdi, 32(%rsp)
204 movq %r8, 40(%rsp)
205 movq %r9, 48(%rsp)
206 movq %r10, 56(%rsp)
207 movq %r11, 64(%rsp)
208
209 call ftrace_return_to_handler
210
211 movq %rax, 72(%rsp)
212 movq 64(%rsp), %r11
213 movq 56(%rsp), %r10
214 movq 48(%rsp), %r9
215 movq 40(%rsp), %r8
216 movq 32(%rsp), %rdi
217 movq 24(%rsp), %rsi
218 movq 16(%rsp), %rdx
219 movq 8(%rsp), %rcx
220 movq (%rsp), %rax
221 addq $72, %rsp
222 retq
223 #endif
224
225
226 #ifndef CONFIG_PREEMPT
227 #define retint_kernel retint_restore_args
228 #endif
229
230 #ifdef CONFIG_PARAVIRT
231 ENTRY(native_usergs_sysret64)
232 swapgs
233 sysretq
234 #endif /* CONFIG_PARAVIRT */
235
236
237 .macro TRACE_IRQS_IRETQ offset=ARGOFFSET
238 #ifdef CONFIG_TRACE_IRQFLAGS
239 bt $9,EFLAGS-\offset(%rsp) /* interrupts off? */
240 jnc 1f
241 TRACE_IRQS_ON
242 1:
243 #endif
244 .endm
245
246 /*
247 * C code is not supposed to know about undefined top of stack. Every time
248 * a C function with an pt_regs argument is called from the SYSCALL based
249 * fast path FIXUP_TOP_OF_STACK is needed.
250 * RESTORE_TOP_OF_STACK syncs the syscall state after any possible ptregs
251 * manipulation.
252 */
253
254 /* %rsp:at FRAMEEND */
255 .macro FIXUP_TOP_OF_STACK tmp
256 movq %gs:pda_oldrsp,\tmp
257 movq \tmp,RSP(%rsp)
258 movq $__USER_DS,SS(%rsp)
259 movq $__USER_CS,CS(%rsp)
260 movq $-1,RCX(%rsp)
261 movq R11(%rsp),\tmp /* get eflags */
262 movq \tmp,EFLAGS(%rsp)
263 .endm
264
265 .macro RESTORE_TOP_OF_STACK tmp,offset=0
266 movq RSP-\offset(%rsp),\tmp
267 movq \tmp,%gs:pda_oldrsp
268 movq EFLAGS-\offset(%rsp),\tmp
269 movq \tmp,R11-\offset(%rsp)
270 .endm
271
272 .macro FAKE_STACK_FRAME child_rip
273 /* push in order ss, rsp, eflags, cs, rip */
274 xorl %eax, %eax
275 pushq $__KERNEL_DS /* ss */
276 CFI_ADJUST_CFA_OFFSET 8
277 /*CFI_REL_OFFSET ss,0*/
278 pushq %rax /* rsp */
279 CFI_ADJUST_CFA_OFFSET 8
280 CFI_REL_OFFSET rsp,0
281 pushq $(1<<9) /* eflags - interrupts on */
282 CFI_ADJUST_CFA_OFFSET 8
283 /*CFI_REL_OFFSET rflags,0*/
284 pushq $__KERNEL_CS /* cs */
285 CFI_ADJUST_CFA_OFFSET 8
286 /*CFI_REL_OFFSET cs,0*/
287 pushq \child_rip /* rip */
288 CFI_ADJUST_CFA_OFFSET 8
289 CFI_REL_OFFSET rip,0
290 pushq %rax /* orig rax */
291 CFI_ADJUST_CFA_OFFSET 8
292 .endm
293
294 .macro UNFAKE_STACK_FRAME
295 addq $8*6, %rsp
296 CFI_ADJUST_CFA_OFFSET -(6*8)
297 .endm
298
299 .macro CFI_DEFAULT_STACK start=1
300 .if \start
301 CFI_STARTPROC simple
302 CFI_SIGNAL_FRAME
303 CFI_DEF_CFA rsp,SS+8
304 .else
305 CFI_DEF_CFA_OFFSET SS+8
306 .endif
307 CFI_REL_OFFSET r15,R15
308 CFI_REL_OFFSET r14,R14
309 CFI_REL_OFFSET r13,R13
310 CFI_REL_OFFSET r12,R12
311 CFI_REL_OFFSET rbp,RBP
312 CFI_REL_OFFSET rbx,RBX
313 CFI_REL_OFFSET r11,R11
314 CFI_REL_OFFSET r10,R10
315 CFI_REL_OFFSET r9,R9
316 CFI_REL_OFFSET r8,R8
317 CFI_REL_OFFSET rax,RAX
318 CFI_REL_OFFSET rcx,RCX
319 CFI_REL_OFFSET rdx,RDX
320 CFI_REL_OFFSET rsi,RSI
321 CFI_REL_OFFSET rdi,RDI
322 CFI_REL_OFFSET rip,RIP
323 /*CFI_REL_OFFSET cs,CS*/
324 /*CFI_REL_OFFSET rflags,EFLAGS*/
325 CFI_REL_OFFSET rsp,RSP
326 /*CFI_REL_OFFSET ss,SS*/
327 .endm
328 /*
329 * A newly forked process directly context switches into this.
330 */
331 /* rdi: prev */
332 ENTRY(ret_from_fork)
333 CFI_DEFAULT_STACK
334 push kernel_eflags(%rip)
335 CFI_ADJUST_CFA_OFFSET 8
336 popf # reset kernel eflags
337 CFI_ADJUST_CFA_OFFSET -8
338 call schedule_tail
339 GET_THREAD_INFO(%rcx)
340 testl $(_TIF_SYSCALL_TRACE|_TIF_SYSCALL_AUDIT),TI_flags(%rcx)
341 jnz rff_trace
342 rff_action:
343 RESTORE_REST
344 testl $3,CS-ARGOFFSET(%rsp) # from kernel_thread?
345 je int_ret_from_sys_call
346 testl $_TIF_IA32,TI_flags(%rcx)
347 jnz int_ret_from_sys_call
348 RESTORE_TOP_OF_STACK %rdi,ARGOFFSET
349 jmp ret_from_sys_call
350 rff_trace:
351 movq %rsp,%rdi
352 call syscall_trace_leave
353 GET_THREAD_INFO(%rcx)
354 jmp rff_action
355 CFI_ENDPROC
356 END(ret_from_fork)
357
358 /*
359 * System call entry. Upto 6 arguments in registers are supported.
360 *
361 * SYSCALL does not save anything on the stack and does not change the
362 * stack pointer.
363 */
364
365 /*
366 * Register setup:
367 * rax system call number
368 * rdi arg0
369 * rcx return address for syscall/sysret, C arg3
370 * rsi arg1
371 * rdx arg2
372 * r10 arg3 (--> moved to rcx for C)
373 * r8 arg4
374 * r9 arg5
375 * r11 eflags for syscall/sysret, temporary for C
376 * r12-r15,rbp,rbx saved by C code, not touched.
377 *
378 * Interrupts are off on entry.
379 * Only called from user space.
380 *
381 * XXX if we had a free scratch register we could save the RSP into the stack frame
382 * and report it properly in ps. Unfortunately we haven't.
383 *
384 * When user can change the frames always force IRET. That is because
385 * it deals with uncanonical addresses better. SYSRET has trouble
386 * with them due to bugs in both AMD and Intel CPUs.
387 */
388
389 ENTRY(system_call)
390 CFI_STARTPROC simple
391 CFI_SIGNAL_FRAME
392 CFI_DEF_CFA rsp,PDA_STACKOFFSET
393 CFI_REGISTER rip,rcx
394 /*CFI_REGISTER rflags,r11*/
395 SWAPGS_UNSAFE_STACK
396 /*
397 * A hypervisor implementation might want to use a label
398 * after the swapgs, so that it can do the swapgs
399 * for the guest and jump here on syscall.
400 */
401 ENTRY(system_call_after_swapgs)
402
403 movq %rsp,%gs:pda_oldrsp
404 movq %gs:pda_kernelstack,%rsp
405 /*
406 * No need to follow this irqs off/on section - it's straight
407 * and short:
408 */
409 ENABLE_INTERRUPTS(CLBR_NONE)
410 SAVE_ARGS 8,1
411 movq %rax,ORIG_RAX-ARGOFFSET(%rsp)
412 movq %rcx,RIP-ARGOFFSET(%rsp)
413 CFI_REL_OFFSET rip,RIP-ARGOFFSET
414 GET_THREAD_INFO(%rcx)
415 testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%rcx)
416 jnz tracesys
417 system_call_fastpath:
418 cmpq $__NR_syscall_max,%rax
419 ja badsys
420 movq %r10,%rcx
421 call *sys_call_table(,%rax,8) # XXX: rip relative
422 movq %rax,RAX-ARGOFFSET(%rsp)
423 /*
424 * Syscall return path ending with SYSRET (fast path)
425 * Has incomplete stack frame and undefined top of stack.
426 */
427 ret_from_sys_call:
428 movl $_TIF_ALLWORK_MASK,%edi
429 /* edi: flagmask */
430 sysret_check:
431 LOCKDEP_SYS_EXIT
432 GET_THREAD_INFO(%rcx)
433 DISABLE_INTERRUPTS(CLBR_NONE)
434 TRACE_IRQS_OFF
435 movl TI_flags(%rcx),%edx
436 andl %edi,%edx
437 jnz sysret_careful
438 CFI_REMEMBER_STATE
439 /*
440 * sysretq will re-enable interrupts:
441 */
442 TRACE_IRQS_ON
443 movq RIP-ARGOFFSET(%rsp),%rcx
444 CFI_REGISTER rip,rcx
445 RESTORE_ARGS 0,-ARG_SKIP,1
446 /*CFI_REGISTER rflags,r11*/
447 movq %gs:pda_oldrsp, %rsp
448 USERGS_SYSRET64
449
450 CFI_RESTORE_STATE
451 /* Handle reschedules */
452 /* edx: work, edi: workmask */
453 sysret_careful:
454 bt $TIF_NEED_RESCHED,%edx
455 jnc sysret_signal
456 TRACE_IRQS_ON
457 ENABLE_INTERRUPTS(CLBR_NONE)
458 pushq %rdi
459 CFI_ADJUST_CFA_OFFSET 8
460 call schedule
461 popq %rdi
462 CFI_ADJUST_CFA_OFFSET -8
463 jmp sysret_check
464
465 /* Handle a signal */
466 sysret_signal:
467 TRACE_IRQS_ON
468 ENABLE_INTERRUPTS(CLBR_NONE)
469 #ifdef CONFIG_AUDITSYSCALL
470 bt $TIF_SYSCALL_AUDIT,%edx
471 jc sysret_audit
472 #endif
473 /* edx: work flags (arg3) */
474 leaq do_notify_resume(%rip),%rax
475 leaq -ARGOFFSET(%rsp),%rdi # &pt_regs -> arg1
476 xorl %esi,%esi # oldset -> arg2
477 call ptregscall_common
478 movl $_TIF_WORK_MASK,%edi
479 /* Use IRET because user could have changed frame. This
480 works because ptregscall_common has called FIXUP_TOP_OF_STACK. */
481 DISABLE_INTERRUPTS(CLBR_NONE)
482 TRACE_IRQS_OFF
483 jmp int_with_check
484
485 badsys:
486 movq $-ENOSYS,RAX-ARGOFFSET(%rsp)
487 jmp ret_from_sys_call
488
489 #ifdef CONFIG_AUDITSYSCALL
490 /*
491 * Fast path for syscall audit without full syscall trace.
492 * We just call audit_syscall_entry() directly, and then
493 * jump back to the normal fast path.
494 */
495 auditsys:
496 movq %r10,%r9 /* 6th arg: 4th syscall arg */
497 movq %rdx,%r8 /* 5th arg: 3rd syscall arg */
498 movq %rsi,%rcx /* 4th arg: 2nd syscall arg */
499 movq %rdi,%rdx /* 3rd arg: 1st syscall arg */
500 movq %rax,%rsi /* 2nd arg: syscall number */
501 movl $AUDIT_ARCH_X86_64,%edi /* 1st arg: audit arch */
502 call audit_syscall_entry
503 LOAD_ARGS 0 /* reload call-clobbered registers */
504 jmp system_call_fastpath
505
506 /*
507 * Return fast path for syscall audit. Call audit_syscall_exit()
508 * directly and then jump back to the fast path with TIF_SYSCALL_AUDIT
509 * masked off.
510 */
511 sysret_audit:
512 movq %rax,%rsi /* second arg, syscall return value */
513 cmpq $0,%rax /* is it < 0? */
514 setl %al /* 1 if so, 0 if not */
515 movzbl %al,%edi /* zero-extend that into %edi */
516 inc %edi /* first arg, 0->1(AUDITSC_SUCCESS), 1->2(AUDITSC_FAILURE) */
517 call audit_syscall_exit
518 movl $(_TIF_ALLWORK_MASK & ~_TIF_SYSCALL_AUDIT),%edi
519 jmp sysret_check
520 #endif /* CONFIG_AUDITSYSCALL */
521
522 /* Do syscall tracing */
523 tracesys:
524 #ifdef CONFIG_AUDITSYSCALL
525 testl $(_TIF_WORK_SYSCALL_ENTRY & ~_TIF_SYSCALL_AUDIT),TI_flags(%rcx)
526 jz auditsys
527 #endif
528 SAVE_REST
529 movq $-ENOSYS,RAX(%rsp) /* ptrace can change this for a bad syscall */
530 FIXUP_TOP_OF_STACK %rdi
531 movq %rsp,%rdi
532 call syscall_trace_enter
533 /*
534 * Reload arg registers from stack in case ptrace changed them.
535 * We don't reload %rax because syscall_trace_enter() returned
536 * the value it wants us to use in the table lookup.
537 */
538 LOAD_ARGS ARGOFFSET, 1
539 RESTORE_REST
540 cmpq $__NR_syscall_max,%rax
541 ja int_ret_from_sys_call /* RAX(%rsp) set to -ENOSYS above */
542 movq %r10,%rcx /* fixup for C */
543 call *sys_call_table(,%rax,8)
544 movq %rax,RAX-ARGOFFSET(%rsp)
545 /* Use IRET because user could have changed frame */
546
547 /*
548 * Syscall return path ending with IRET.
549 * Has correct top of stack, but partial stack frame.
550 */
551 .globl int_ret_from_sys_call
552 .globl int_with_check
553 int_ret_from_sys_call:
554 DISABLE_INTERRUPTS(CLBR_NONE)
555 TRACE_IRQS_OFF
556 testl $3,CS-ARGOFFSET(%rsp)
557 je retint_restore_args
558 movl $_TIF_ALLWORK_MASK,%edi
559 /* edi: mask to check */
560 int_with_check:
561 LOCKDEP_SYS_EXIT_IRQ
562 GET_THREAD_INFO(%rcx)
563 movl TI_flags(%rcx),%edx
564 andl %edi,%edx
565 jnz int_careful
566 andl $~TS_COMPAT,TI_status(%rcx)
567 jmp retint_swapgs
568
569 /* Either reschedule or signal or syscall exit tracking needed. */
570 /* First do a reschedule test. */
571 /* edx: work, edi: workmask */
572 int_careful:
573 bt $TIF_NEED_RESCHED,%edx
574 jnc int_very_careful
575 TRACE_IRQS_ON
576 ENABLE_INTERRUPTS(CLBR_NONE)
577 pushq %rdi
578 CFI_ADJUST_CFA_OFFSET 8
579 call schedule
580 popq %rdi
581 CFI_ADJUST_CFA_OFFSET -8
582 DISABLE_INTERRUPTS(CLBR_NONE)
583 TRACE_IRQS_OFF
584 jmp int_with_check
585
586 /* handle signals and tracing -- both require a full stack frame */
587 int_very_careful:
588 TRACE_IRQS_ON
589 ENABLE_INTERRUPTS(CLBR_NONE)
590 SAVE_REST
591 /* Check for syscall exit trace */
592 testl $_TIF_WORK_SYSCALL_EXIT,%edx
593 jz int_signal
594 pushq %rdi
595 CFI_ADJUST_CFA_OFFSET 8
596 leaq 8(%rsp),%rdi # &ptregs -> arg1
597 call syscall_trace_leave
598 popq %rdi
599 CFI_ADJUST_CFA_OFFSET -8
600 andl $~(_TIF_WORK_SYSCALL_EXIT|_TIF_SYSCALL_EMU),%edi
601 jmp int_restore_rest
602
603 int_signal:
604 testl $_TIF_DO_NOTIFY_MASK,%edx
605 jz 1f
606 movq %rsp,%rdi # &ptregs -> arg1
607 xorl %esi,%esi # oldset -> arg2
608 call do_notify_resume
609 1: movl $_TIF_WORK_MASK,%edi
610 int_restore_rest:
611 RESTORE_REST
612 DISABLE_INTERRUPTS(CLBR_NONE)
613 TRACE_IRQS_OFF
614 jmp int_with_check
615 CFI_ENDPROC
616 END(system_call)
617
618 /*
619 * Certain special system calls that need to save a complete full stack frame.
620 */
621
622 .macro PTREGSCALL label,func,arg
623 .globl \label
624 \label:
625 leaq \func(%rip),%rax
626 leaq -ARGOFFSET+8(%rsp),\arg /* 8 for return address */
627 jmp ptregscall_common
628 END(\label)
629 .endm
630
631 CFI_STARTPROC
632
633 PTREGSCALL stub_clone, sys_clone, %r8
634 PTREGSCALL stub_fork, sys_fork, %rdi
635 PTREGSCALL stub_vfork, sys_vfork, %rdi
636 PTREGSCALL stub_sigaltstack, sys_sigaltstack, %rdx
637 PTREGSCALL stub_iopl, sys_iopl, %rsi
638
639 ENTRY(ptregscall_common)
640 popq %r11
641 CFI_ADJUST_CFA_OFFSET -8
642 CFI_REGISTER rip, r11
643 SAVE_REST
644 movq %r11, %r15
645 CFI_REGISTER rip, r15
646 FIXUP_TOP_OF_STACK %r11
647 call *%rax
648 RESTORE_TOP_OF_STACK %r11
649 movq %r15, %r11
650 CFI_REGISTER rip, r11
651 RESTORE_REST
652 pushq %r11
653 CFI_ADJUST_CFA_OFFSET 8
654 CFI_REL_OFFSET rip, 0
655 ret
656 CFI_ENDPROC
657 END(ptregscall_common)
658
659 ENTRY(stub_execve)
660 CFI_STARTPROC
661 popq %r11
662 CFI_ADJUST_CFA_OFFSET -8
663 CFI_REGISTER rip, r11
664 SAVE_REST
665 FIXUP_TOP_OF_STACK %r11
666 movq %rsp, %rcx
667 call sys_execve
668 RESTORE_TOP_OF_STACK %r11
669 movq %rax,RAX(%rsp)
670 RESTORE_REST
671 jmp int_ret_from_sys_call
672 CFI_ENDPROC
673 END(stub_execve)
674
675 /*
676 * sigreturn is special because it needs to restore all registers on return.
677 * This cannot be done with SYSRET, so use the IRET return path instead.
678 */
679 ENTRY(stub_rt_sigreturn)
680 CFI_STARTPROC
681 addq $8, %rsp
682 CFI_ADJUST_CFA_OFFSET -8
683 SAVE_REST
684 movq %rsp,%rdi
685 FIXUP_TOP_OF_STACK %r11
686 call sys_rt_sigreturn
687 movq %rax,RAX(%rsp) # fixme, this could be done at the higher layer
688 RESTORE_REST
689 jmp int_ret_from_sys_call
690 CFI_ENDPROC
691 END(stub_rt_sigreturn)
692
693 /*
694 * initial frame state for interrupts and exceptions
695 */
696 .macro _frame ref
697 CFI_STARTPROC simple
698 CFI_SIGNAL_FRAME
699 CFI_DEF_CFA rsp,SS+8-\ref
700 /*CFI_REL_OFFSET ss,SS-\ref*/
701 CFI_REL_OFFSET rsp,RSP-\ref
702 /*CFI_REL_OFFSET rflags,EFLAGS-\ref*/
703 /*CFI_REL_OFFSET cs,CS-\ref*/
704 CFI_REL_OFFSET rip,RIP-\ref
705 .endm
706
707 /* initial frame state for interrupts (and exceptions without error code) */
708 #define INTR_FRAME _frame RIP
709 /* initial frame state for exceptions with error code (and interrupts with
710 vector already pushed) */
711 #define XCPT_FRAME _frame ORIG_RAX
712
713 /*
714 * Interrupt entry/exit.
715 *
716 * Interrupt entry points save only callee clobbered registers in fast path.
717 *
718 * Entry runs with interrupts off.
719 */
720
721 /* 0(%rsp): interrupt number */
722 .macro interrupt func
723 cld
724 SAVE_ARGS
725 leaq -ARGOFFSET(%rsp),%rdi # arg1 for handler
726 pushq %rbp
727 /*
728 * Save rbp twice: One is for marking the stack frame, as usual, and the
729 * other, to fill pt_regs properly. This is because bx comes right
730 * before the last saved register in that structure, and not bp. If the
731 * base pointer were in the place bx is today, this would not be needed.
732 */
733 movq %rbp, -8(%rsp)
734 CFI_ADJUST_CFA_OFFSET 8
735 CFI_REL_OFFSET rbp, 0
736 movq %rsp,%rbp
737 CFI_DEF_CFA_REGISTER rbp
738 testl $3,CS(%rdi)
739 je 1f
740 SWAPGS
741 /* irqcount is used to check if a CPU is already on an interrupt
742 stack or not. While this is essentially redundant with preempt_count
743 it is a little cheaper to use a separate counter in the PDA
744 (short of moving irq_enter into assembly, which would be too
745 much work) */
746 1: incl %gs:pda_irqcount
747 cmoveq %gs:pda_irqstackptr,%rsp
748 push %rbp # backlink for old unwinder
749 /*
750 * We entered an interrupt context - irqs are off:
751 */
752 TRACE_IRQS_OFF
753 call \func
754 .endm
755
756 ENTRY(common_interrupt)
757 XCPT_FRAME
758 interrupt do_IRQ
759 /* 0(%rsp): oldrsp-ARGOFFSET */
760 ret_from_intr:
761 DISABLE_INTERRUPTS(CLBR_NONE)
762 TRACE_IRQS_OFF
763 decl %gs:pda_irqcount
764 leaveq
765 CFI_DEF_CFA_REGISTER rsp
766 CFI_ADJUST_CFA_OFFSET -8
767 exit_intr:
768 GET_THREAD_INFO(%rcx)
769 testl $3,CS-ARGOFFSET(%rsp)
770 je retint_kernel
771
772 /* Interrupt came from user space */
773 /*
774 * Has a correct top of stack, but a partial stack frame
775 * %rcx: thread info. Interrupts off.
776 */
777 retint_with_reschedule:
778 movl $_TIF_WORK_MASK,%edi
779 retint_check:
780 LOCKDEP_SYS_EXIT_IRQ
781 movl TI_flags(%rcx),%edx
782 andl %edi,%edx
783 CFI_REMEMBER_STATE
784 jnz retint_careful
785
786 retint_swapgs: /* return to user-space */
787 /*
788 * The iretq could re-enable interrupts:
789 */
790 DISABLE_INTERRUPTS(CLBR_ANY)
791 TRACE_IRQS_IRETQ
792 SWAPGS
793 jmp restore_args
794
795 retint_restore_args: /* return to kernel space */
796 DISABLE_INTERRUPTS(CLBR_ANY)
797 /*
798 * The iretq could re-enable interrupts:
799 */
800 TRACE_IRQS_IRETQ
801 restore_args:
802 RESTORE_ARGS 0,8,0
803
804 irq_return:
805 INTERRUPT_RETURN
806
807 .section __ex_table, "a"
808 .quad irq_return, bad_iret
809 .previous
810
811 #ifdef CONFIG_PARAVIRT
812 ENTRY(native_iret)
813 iretq
814
815 .section __ex_table,"a"
816 .quad native_iret, bad_iret
817 .previous
818 #endif
819
820 .section .fixup,"ax"
821 bad_iret:
822 /*
823 * The iret traps when the %cs or %ss being restored is bogus.
824 * We've lost the original trap vector and error code.
825 * #GPF is the most likely one to get for an invalid selector.
826 * So pretend we completed the iret and took the #GPF in user mode.
827 *
828 * We are now running with the kernel GS after exception recovery.
829 * But error_entry expects us to have user GS to match the user %cs,
830 * so swap back.
831 */
832 pushq $0
833
834 SWAPGS
835 jmp general_protection
836
837 .previous
838
839 /* edi: workmask, edx: work */
840 retint_careful:
841 CFI_RESTORE_STATE
842 bt $TIF_NEED_RESCHED,%edx
843 jnc retint_signal
844 TRACE_IRQS_ON
845 ENABLE_INTERRUPTS(CLBR_NONE)
846 pushq %rdi
847 CFI_ADJUST_CFA_OFFSET 8
848 call schedule
849 popq %rdi
850 CFI_ADJUST_CFA_OFFSET -8
851 GET_THREAD_INFO(%rcx)
852 DISABLE_INTERRUPTS(CLBR_NONE)
853 TRACE_IRQS_OFF
854 jmp retint_check
855
856 retint_signal:
857 testl $_TIF_DO_NOTIFY_MASK,%edx
858 jz retint_swapgs
859 TRACE_IRQS_ON
860 ENABLE_INTERRUPTS(CLBR_NONE)
861 SAVE_REST
862 movq $-1,ORIG_RAX(%rsp)
863 xorl %esi,%esi # oldset
864 movq %rsp,%rdi # &pt_regs
865 call do_notify_resume
866 RESTORE_REST
867 DISABLE_INTERRUPTS(CLBR_NONE)
868 TRACE_IRQS_OFF
869 GET_THREAD_INFO(%rcx)
870 jmp retint_with_reschedule
871
872 #ifdef CONFIG_PREEMPT
873 /* Returning to kernel space. Check if we need preemption */
874 /* rcx: threadinfo. interrupts off. */
875 ENTRY(retint_kernel)
876 cmpl $0,TI_preempt_count(%rcx)
877 jnz retint_restore_args
878 bt $TIF_NEED_RESCHED,TI_flags(%rcx)
879 jnc retint_restore_args
880 bt $9,EFLAGS-ARGOFFSET(%rsp) /* interrupts off? */
881 jnc retint_restore_args
882 call preempt_schedule_irq
883 jmp exit_intr
884 #endif
885
886 CFI_ENDPROC
887 END(common_interrupt)
888
889 /*
890 * APIC interrupts.
891 */
892 .macro apicinterrupt num,func
893 INTR_FRAME
894 pushq $~(\num)
895 CFI_ADJUST_CFA_OFFSET 8
896 interrupt \func
897 jmp ret_from_intr
898 CFI_ENDPROC
899 .endm
900
901 ENTRY(thermal_interrupt)
902 apicinterrupt THERMAL_APIC_VECTOR,smp_thermal_interrupt
903 END(thermal_interrupt)
904
905 ENTRY(threshold_interrupt)
906 apicinterrupt THRESHOLD_APIC_VECTOR,mce_threshold_interrupt
907 END(threshold_interrupt)
908
909 #ifdef CONFIG_SMP
910 ENTRY(reschedule_interrupt)
911 apicinterrupt RESCHEDULE_VECTOR,smp_reschedule_interrupt
912 END(reschedule_interrupt)
913
914 .macro INVALIDATE_ENTRY num
915 ENTRY(invalidate_interrupt\num)
916 apicinterrupt INVALIDATE_TLB_VECTOR_START+\num,smp_invalidate_interrupt
917 END(invalidate_interrupt\num)
918 .endm
919
920 INVALIDATE_ENTRY 0
921 INVALIDATE_ENTRY 1
922 INVALIDATE_ENTRY 2
923 INVALIDATE_ENTRY 3
924 INVALIDATE_ENTRY 4
925 INVALIDATE_ENTRY 5
926 INVALIDATE_ENTRY 6
927 INVALIDATE_ENTRY 7
928
929 ENTRY(call_function_interrupt)
930 apicinterrupt CALL_FUNCTION_VECTOR,smp_call_function_interrupt
931 END(call_function_interrupt)
932 ENTRY(call_function_single_interrupt)
933 apicinterrupt CALL_FUNCTION_SINGLE_VECTOR,smp_call_function_single_interrupt
934 END(call_function_single_interrupt)
935 ENTRY(irq_move_cleanup_interrupt)
936 apicinterrupt IRQ_MOVE_CLEANUP_VECTOR,smp_irq_move_cleanup_interrupt
937 END(irq_move_cleanup_interrupt)
938 #endif
939
940 ENTRY(apic_timer_interrupt)
941 apicinterrupt LOCAL_TIMER_VECTOR,smp_apic_timer_interrupt
942 END(apic_timer_interrupt)
943
944 ENTRY(uv_bau_message_intr1)
945 apicinterrupt 220,uv_bau_message_interrupt
946 END(uv_bau_message_intr1)
947
948 ENTRY(error_interrupt)
949 apicinterrupt ERROR_APIC_VECTOR,smp_error_interrupt
950 END(error_interrupt)
951
952 ENTRY(spurious_interrupt)
953 apicinterrupt SPURIOUS_APIC_VECTOR,smp_spurious_interrupt
954 END(spurious_interrupt)
955
956 /*
957 * Exception entry points.
958 */
959 .macro zeroentry sym
960 INTR_FRAME
961 PARAVIRT_ADJUST_EXCEPTION_FRAME
962 pushq $0 /* push error code/oldrax */
963 CFI_ADJUST_CFA_OFFSET 8
964 pushq %rax /* push real oldrax to the rdi slot */
965 CFI_ADJUST_CFA_OFFSET 8
966 CFI_REL_OFFSET rax,0
967 leaq \sym(%rip),%rax
968 jmp error_entry
969 CFI_ENDPROC
970 .endm
971
972 .macro errorentry sym
973 XCPT_FRAME
974 PARAVIRT_ADJUST_EXCEPTION_FRAME
975 pushq %rax
976 CFI_ADJUST_CFA_OFFSET 8
977 CFI_REL_OFFSET rax,0
978 leaq \sym(%rip),%rax
979 jmp error_entry
980 CFI_ENDPROC
981 .endm
982
983 /* error code is on the stack already */
984 /* handle NMI like exceptions that can happen everywhere */
985 .macro paranoidentry sym, ist=0, irqtrace=1
986 SAVE_ALL
987 cld
988 movl $1,%ebx
989 movl $MSR_GS_BASE,%ecx
990 rdmsr
991 testl %edx,%edx
992 js 1f
993 SWAPGS
994 xorl %ebx,%ebx
995 1:
996 .if \ist
997 movq %gs:pda_data_offset, %rbp
998 .endif
999 .if \irqtrace
1000 TRACE_IRQS_OFF
1001 .endif
1002 movq %rsp,%rdi
1003 movq ORIG_RAX(%rsp),%rsi
1004 movq $-1,ORIG_RAX(%rsp)
1005 .if \ist
1006 subq $EXCEPTION_STKSZ, per_cpu__init_tss + TSS_ist + (\ist - 1) * 8(%rbp)
1007 .endif
1008 call \sym
1009 .if \ist
1010 addq $EXCEPTION_STKSZ, per_cpu__init_tss + TSS_ist + (\ist - 1) * 8(%rbp)
1011 .endif
1012 DISABLE_INTERRUPTS(CLBR_NONE)
1013 .if \irqtrace
1014 TRACE_IRQS_OFF
1015 .endif
1016 .endm
1017
1018 /*
1019 * "Paranoid" exit path from exception stack.
1020 * Paranoid because this is used by NMIs and cannot take
1021 * any kernel state for granted.
1022 * We don't do kernel preemption checks here, because only
1023 * NMI should be common and it does not enable IRQs and
1024 * cannot get reschedule ticks.
1025 *
1026 * "trace" is 0 for the NMI handler only, because irq-tracing
1027 * is fundamentally NMI-unsafe. (we cannot change the soft and
1028 * hard flags at once, atomically)
1029 */
1030 .macro paranoidexit trace=1
1031 /* ebx: no swapgs flag */
1032 paranoid_exit\trace:
1033 testl %ebx,%ebx /* swapgs needed? */
1034 jnz paranoid_restore\trace
1035 testl $3,CS(%rsp)
1036 jnz paranoid_userspace\trace
1037 paranoid_swapgs\trace:
1038 .if \trace
1039 TRACE_IRQS_IRETQ 0
1040 .endif
1041 SWAPGS_UNSAFE_STACK
1042 paranoid_restore\trace:
1043 RESTORE_ALL 8
1044 jmp irq_return
1045 paranoid_userspace\trace:
1046 GET_THREAD_INFO(%rcx)
1047 movl TI_flags(%rcx),%ebx
1048 andl $_TIF_WORK_MASK,%ebx
1049 jz paranoid_swapgs\trace
1050 movq %rsp,%rdi /* &pt_regs */
1051 call sync_regs
1052 movq %rax,%rsp /* switch stack for scheduling */
1053 testl $_TIF_NEED_RESCHED,%ebx
1054 jnz paranoid_schedule\trace
1055 movl %ebx,%edx /* arg3: thread flags */
1056 .if \trace
1057 TRACE_IRQS_ON
1058 .endif
1059 ENABLE_INTERRUPTS(CLBR_NONE)
1060 xorl %esi,%esi /* arg2: oldset */
1061 movq %rsp,%rdi /* arg1: &pt_regs */
1062 call do_notify_resume
1063 DISABLE_INTERRUPTS(CLBR_NONE)
1064 .if \trace
1065 TRACE_IRQS_OFF
1066 .endif
1067 jmp paranoid_userspace\trace
1068 paranoid_schedule\trace:
1069 .if \trace
1070 TRACE_IRQS_ON
1071 .endif
1072 ENABLE_INTERRUPTS(CLBR_ANY)
1073 call schedule
1074 DISABLE_INTERRUPTS(CLBR_ANY)
1075 .if \trace
1076 TRACE_IRQS_OFF
1077 .endif
1078 jmp paranoid_userspace\trace
1079 CFI_ENDPROC
1080 .endm
1081
1082 /*
1083 * Exception entry point. This expects an error code/orig_rax on the stack
1084 * and the exception handler in %rax.
1085 */
1086 KPROBE_ENTRY(error_entry)
1087 _frame RDI
1088 CFI_REL_OFFSET rax,0
1089 /* rdi slot contains rax, oldrax contains error code */
1090 cld
1091 subq $14*8,%rsp
1092 CFI_ADJUST_CFA_OFFSET (14*8)
1093 movq %rsi,13*8(%rsp)
1094 CFI_REL_OFFSET rsi,RSI
1095 movq 14*8(%rsp),%rsi /* load rax from rdi slot */
1096 CFI_REGISTER rax,rsi
1097 movq %rdx,12*8(%rsp)
1098 CFI_REL_OFFSET rdx,RDX
1099 movq %rcx,11*8(%rsp)
1100 CFI_REL_OFFSET rcx,RCX
1101 movq %rsi,10*8(%rsp) /* store rax */
1102 CFI_REL_OFFSET rax,RAX
1103 movq %r8, 9*8(%rsp)
1104 CFI_REL_OFFSET r8,R8
1105 movq %r9, 8*8(%rsp)
1106 CFI_REL_OFFSET r9,R9
1107 movq %r10,7*8(%rsp)
1108 CFI_REL_OFFSET r10,R10
1109 movq %r11,6*8(%rsp)
1110 CFI_REL_OFFSET r11,R11
1111 movq %rbx,5*8(%rsp)
1112 CFI_REL_OFFSET rbx,RBX
1113 movq %rbp,4*8(%rsp)
1114 CFI_REL_OFFSET rbp,RBP
1115 movq %r12,3*8(%rsp)
1116 CFI_REL_OFFSET r12,R12
1117 movq %r13,2*8(%rsp)
1118 CFI_REL_OFFSET r13,R13
1119 movq %r14,1*8(%rsp)
1120 CFI_REL_OFFSET r14,R14
1121 movq %r15,(%rsp)
1122 CFI_REL_OFFSET r15,R15
1123 xorl %ebx,%ebx
1124 testl $3,CS(%rsp)
1125 je error_kernelspace
1126 error_swapgs:
1127 SWAPGS
1128 error_sti:
1129 TRACE_IRQS_OFF
1130 movq %rdi,RDI(%rsp)
1131 CFI_REL_OFFSET rdi,RDI
1132 movq %rsp,%rdi
1133 movq ORIG_RAX(%rsp),%rsi /* get error code */
1134 movq $-1,ORIG_RAX(%rsp)
1135 call *%rax
1136 /* ebx: no swapgs flag (1: don't need swapgs, 0: need it) */
1137 error_exit:
1138 movl %ebx,%eax
1139 RESTORE_REST
1140 DISABLE_INTERRUPTS(CLBR_NONE)
1141 TRACE_IRQS_OFF
1142 GET_THREAD_INFO(%rcx)
1143 testl %eax,%eax
1144 jne retint_kernel
1145 LOCKDEP_SYS_EXIT_IRQ
1146 movl TI_flags(%rcx),%edx
1147 movl $_TIF_WORK_MASK,%edi
1148 andl %edi,%edx
1149 jnz retint_careful
1150 jmp retint_swapgs
1151 CFI_ENDPROC
1152
1153 error_kernelspace:
1154 incl %ebx
1155 /* There are two places in the kernel that can potentially fault with
1156 usergs. Handle them here. The exception handlers after
1157 iret run with kernel gs again, so don't set the user space flag.
1158 B stepping K8s sometimes report an truncated RIP for IRET
1159 exceptions returning to compat mode. Check for these here too. */
1160 leaq irq_return(%rip),%rcx
1161 cmpq %rcx,RIP(%rsp)
1162 je error_swapgs
1163 movl %ecx,%ecx /* zero extend */
1164 cmpq %rcx,RIP(%rsp)
1165 je error_swapgs
1166 cmpq $gs_change,RIP(%rsp)
1167 je error_swapgs
1168 jmp error_sti
1169 KPROBE_END(error_entry)
1170
1171 /* Reload gs selector with exception handling */
1172 /* edi: new selector */
1173 ENTRY(native_load_gs_index)
1174 CFI_STARTPROC
1175 pushf
1176 CFI_ADJUST_CFA_OFFSET 8
1177 DISABLE_INTERRUPTS(CLBR_ANY | ~(CLBR_RDI))
1178 SWAPGS
1179 gs_change:
1180 movl %edi,%gs
1181 2: mfence /* workaround */
1182 SWAPGS
1183 popf
1184 CFI_ADJUST_CFA_OFFSET -8
1185 ret
1186 CFI_ENDPROC
1187 ENDPROC(native_load_gs_index)
1188
1189 .section __ex_table,"a"
1190 .align 8
1191 .quad gs_change,bad_gs
1192 .previous
1193 .section .fixup,"ax"
1194 /* running with kernelgs */
1195 bad_gs:
1196 SWAPGS /* switch back to user gs */
1197 xorl %eax,%eax
1198 movl %eax,%gs
1199 jmp 2b
1200 .previous
1201
1202 /*
1203 * Create a kernel thread.
1204 *
1205 * C extern interface:
1206 * extern long kernel_thread(int (*fn)(void *), void * arg, unsigned long flags)
1207 *
1208 * asm input arguments:
1209 * rdi: fn, rsi: arg, rdx: flags
1210 */
1211 ENTRY(kernel_thread)
1212 CFI_STARTPROC
1213 FAKE_STACK_FRAME $child_rip
1214 SAVE_ALL
1215
1216 # rdi: flags, rsi: usp, rdx: will be &pt_regs
1217 movq %rdx,%rdi
1218 orq kernel_thread_flags(%rip),%rdi
1219 movq $-1, %rsi
1220 movq %rsp, %rdx
1221
1222 xorl %r8d,%r8d
1223 xorl %r9d,%r9d
1224
1225 # clone now
1226 call do_fork
1227 movq %rax,RAX(%rsp)
1228 xorl %edi,%edi
1229
1230 /*
1231 * It isn't worth to check for reschedule here,
1232 * so internally to the x86_64 port you can rely on kernel_thread()
1233 * not to reschedule the child before returning, this avoids the need
1234 * of hacks for example to fork off the per-CPU idle tasks.
1235 * [Hopefully no generic code relies on the reschedule -AK]
1236 */
1237 RESTORE_ALL
1238 UNFAKE_STACK_FRAME
1239 ret
1240 CFI_ENDPROC
1241 ENDPROC(kernel_thread)
1242
1243 child_rip:
1244 pushq $0 # fake return address
1245 CFI_STARTPROC
1246 /*
1247 * Here we are in the child and the registers are set as they were
1248 * at kernel_thread() invocation in the parent.
1249 */
1250 movq %rdi, %rax
1251 movq %rsi, %rdi
1252 call *%rax
1253 # exit
1254 mov %eax, %edi
1255 call do_exit
1256 CFI_ENDPROC
1257 ENDPROC(child_rip)
1258
1259 /*
1260 * execve(). This function needs to use IRET, not SYSRET, to set up all state properly.
1261 *
1262 * C extern interface:
1263 * extern long execve(char *name, char **argv, char **envp)
1264 *
1265 * asm input arguments:
1266 * rdi: name, rsi: argv, rdx: envp
1267 *
1268 * We want to fallback into:
1269 * extern long sys_execve(char *name, char **argv,char **envp, struct pt_regs *regs)
1270 *
1271 * do_sys_execve asm fallback arguments:
1272 * rdi: name, rsi: argv, rdx: envp, rcx: fake frame on the stack
1273 */
1274 ENTRY(kernel_execve)
1275 CFI_STARTPROC
1276 FAKE_STACK_FRAME $0
1277 SAVE_ALL
1278 movq %rsp,%rcx
1279 call sys_execve
1280 movq %rax, RAX(%rsp)
1281 RESTORE_REST
1282 testq %rax,%rax
1283 je int_ret_from_sys_call
1284 RESTORE_ARGS
1285 UNFAKE_STACK_FRAME
1286 ret
1287 CFI_ENDPROC
1288 ENDPROC(kernel_execve)
1289
1290 KPROBE_ENTRY(page_fault)
1291 errorentry do_page_fault
1292 KPROBE_END(page_fault)
1293
1294 ENTRY(coprocessor_error)
1295 zeroentry do_coprocessor_error
1296 END(coprocessor_error)
1297
1298 ENTRY(simd_coprocessor_error)
1299 zeroentry do_simd_coprocessor_error
1300 END(simd_coprocessor_error)
1301
1302 ENTRY(device_not_available)
1303 zeroentry do_device_not_available
1304 END(device_not_available)
1305
1306 /* runs on exception stack */
1307 KPROBE_ENTRY(debug)
1308 INTR_FRAME
1309 PARAVIRT_ADJUST_EXCEPTION_FRAME
1310 pushq $0
1311 CFI_ADJUST_CFA_OFFSET 8
1312 paranoidentry do_debug, DEBUG_STACK
1313 paranoidexit
1314 KPROBE_END(debug)
1315
1316 /* runs on exception stack */
1317 KPROBE_ENTRY(nmi)
1318 INTR_FRAME
1319 PARAVIRT_ADJUST_EXCEPTION_FRAME
1320 pushq $-1
1321 CFI_ADJUST_CFA_OFFSET 8
1322 paranoidentry do_nmi, 0, 0
1323 #ifdef CONFIG_TRACE_IRQFLAGS
1324 paranoidexit 0
1325 #else
1326 jmp paranoid_exit1
1327 CFI_ENDPROC
1328 #endif
1329 KPROBE_END(nmi)
1330
1331 KPROBE_ENTRY(int3)
1332 INTR_FRAME
1333 PARAVIRT_ADJUST_EXCEPTION_FRAME
1334 pushq $0
1335 CFI_ADJUST_CFA_OFFSET 8
1336 paranoidentry do_int3, DEBUG_STACK
1337 jmp paranoid_exit1
1338 CFI_ENDPROC
1339 KPROBE_END(int3)
1340
1341 ENTRY(overflow)
1342 zeroentry do_overflow
1343 END(overflow)
1344
1345 ENTRY(bounds)
1346 zeroentry do_bounds
1347 END(bounds)
1348
1349 ENTRY(invalid_op)
1350 zeroentry do_invalid_op
1351 END(invalid_op)
1352
1353 ENTRY(coprocessor_segment_overrun)
1354 zeroentry do_coprocessor_segment_overrun
1355 END(coprocessor_segment_overrun)
1356
1357 /* runs on exception stack */
1358 ENTRY(double_fault)
1359 XCPT_FRAME
1360 PARAVIRT_ADJUST_EXCEPTION_FRAME
1361 paranoidentry do_double_fault
1362 jmp paranoid_exit1
1363 CFI_ENDPROC
1364 END(double_fault)
1365
1366 ENTRY(invalid_TSS)
1367 errorentry do_invalid_TSS
1368 END(invalid_TSS)
1369
1370 ENTRY(segment_not_present)
1371 errorentry do_segment_not_present
1372 END(segment_not_present)
1373
1374 /* runs on exception stack */
1375 ENTRY(stack_segment)
1376 XCPT_FRAME
1377 PARAVIRT_ADJUST_EXCEPTION_FRAME
1378 paranoidentry do_stack_segment
1379 jmp paranoid_exit1
1380 CFI_ENDPROC
1381 END(stack_segment)
1382
1383 KPROBE_ENTRY(general_protection)
1384 errorentry do_general_protection
1385 KPROBE_END(general_protection)
1386
1387 ENTRY(alignment_check)
1388 errorentry do_alignment_check
1389 END(alignment_check)
1390
1391 ENTRY(divide_error)
1392 zeroentry do_divide_error
1393 END(divide_error)
1394
1395 ENTRY(spurious_interrupt_bug)
1396 zeroentry do_spurious_interrupt_bug
1397 END(spurious_interrupt_bug)
1398
1399 #ifdef CONFIG_X86_MCE
1400 /* runs on exception stack */
1401 ENTRY(machine_check)
1402 INTR_FRAME
1403 PARAVIRT_ADJUST_EXCEPTION_FRAME
1404 pushq $0
1405 CFI_ADJUST_CFA_OFFSET 8
1406 paranoidentry do_machine_check
1407 jmp paranoid_exit1
1408 CFI_ENDPROC
1409 END(machine_check)
1410 #endif
1411
1412 /* Call softirq on interrupt stack. Interrupts are off. */
1413 ENTRY(call_softirq)
1414 CFI_STARTPROC
1415 push %rbp
1416 CFI_ADJUST_CFA_OFFSET 8
1417 CFI_REL_OFFSET rbp,0
1418 mov %rsp,%rbp
1419 CFI_DEF_CFA_REGISTER rbp
1420 incl %gs:pda_irqcount
1421 cmove %gs:pda_irqstackptr,%rsp
1422 push %rbp # backlink for old unwinder
1423 call __do_softirq
1424 leaveq
1425 CFI_DEF_CFA_REGISTER rsp
1426 CFI_ADJUST_CFA_OFFSET -8
1427 decl %gs:pda_irqcount
1428 ret
1429 CFI_ENDPROC
1430 ENDPROC(call_softirq)
1431
1432 KPROBE_ENTRY(ignore_sysret)
1433 CFI_STARTPROC
1434 mov $-ENOSYS,%eax
1435 sysret
1436 CFI_ENDPROC
1437 ENDPROC(ignore_sysret)
1438
1439 #ifdef CONFIG_XEN
1440 ENTRY(xen_hypervisor_callback)
1441 zeroentry xen_do_hypervisor_callback
1442 END(xen_hypervisor_callback)
1443
1444 /*
1445 # A note on the "critical region" in our callback handler.
1446 # We want to avoid stacking callback handlers due to events occurring
1447 # during handling of the last event. To do this, we keep events disabled
1448 # until we've done all processing. HOWEVER, we must enable events before
1449 # popping the stack frame (can't be done atomically) and so it would still
1450 # be possible to get enough handler activations to overflow the stack.
1451 # Although unlikely, bugs of that kind are hard to track down, so we'd
1452 # like to avoid the possibility.
1453 # So, on entry to the handler we detect whether we interrupted an
1454 # existing activation in its critical region -- if so, we pop the current
1455 # activation and restart the handler using the previous one.
1456 */
1457 ENTRY(xen_do_hypervisor_callback) # do_hypervisor_callback(struct *pt_regs)
1458 CFI_STARTPROC
1459 /* Since we don't modify %rdi, evtchn_do_upall(struct *pt_regs) will
1460 see the correct pointer to the pt_regs */
1461 movq %rdi, %rsp # we don't return, adjust the stack frame
1462 CFI_ENDPROC
1463 CFI_DEFAULT_STACK
1464 11: incl %gs:pda_irqcount
1465 movq %rsp,%rbp
1466 CFI_DEF_CFA_REGISTER rbp
1467 cmovzq %gs:pda_irqstackptr,%rsp
1468 pushq %rbp # backlink for old unwinder
1469 call xen_evtchn_do_upcall
1470 popq %rsp
1471 CFI_DEF_CFA_REGISTER rsp
1472 decl %gs:pda_irqcount
1473 jmp error_exit
1474 CFI_ENDPROC
1475 END(do_hypervisor_callback)
1476
1477 /*
1478 # Hypervisor uses this for application faults while it executes.
1479 # We get here for two reasons:
1480 # 1. Fault while reloading DS, ES, FS or GS
1481 # 2. Fault while executing IRET
1482 # Category 1 we do not need to fix up as Xen has already reloaded all segment
1483 # registers that could be reloaded and zeroed the others.
1484 # Category 2 we fix up by killing the current process. We cannot use the
1485 # normal Linux return path in this case because if we use the IRET hypercall
1486 # to pop the stack frame we end up in an infinite loop of failsafe callbacks.
1487 # We distinguish between categories by comparing each saved segment register
1488 # with its current contents: any discrepancy means we in category 1.
1489 */
1490 ENTRY(xen_failsafe_callback)
1491 framesz = (RIP-0x30) /* workaround buggy gas */
1492 _frame framesz
1493 CFI_REL_OFFSET rcx, 0
1494 CFI_REL_OFFSET r11, 8
1495 movw %ds,%cx
1496 cmpw %cx,0x10(%rsp)
1497 CFI_REMEMBER_STATE
1498 jne 1f
1499 movw %es,%cx
1500 cmpw %cx,0x18(%rsp)
1501 jne 1f
1502 movw %fs,%cx
1503 cmpw %cx,0x20(%rsp)
1504 jne 1f
1505 movw %gs,%cx
1506 cmpw %cx,0x28(%rsp)
1507 jne 1f
1508 /* All segments match their saved values => Category 2 (Bad IRET). */
1509 movq (%rsp),%rcx
1510 CFI_RESTORE rcx
1511 movq 8(%rsp),%r11
1512 CFI_RESTORE r11
1513 addq $0x30,%rsp
1514 CFI_ADJUST_CFA_OFFSET -0x30
1515 pushq $0
1516 CFI_ADJUST_CFA_OFFSET 8
1517 pushq %r11
1518 CFI_ADJUST_CFA_OFFSET 8
1519 pushq %rcx
1520 CFI_ADJUST_CFA_OFFSET 8
1521 jmp general_protection
1522 CFI_RESTORE_STATE
1523 1: /* Segment mismatch => Category 1 (Bad segment). Retry the IRET. */
1524 movq (%rsp),%rcx
1525 CFI_RESTORE rcx
1526 movq 8(%rsp),%r11
1527 CFI_RESTORE r11
1528 addq $0x30,%rsp
1529 CFI_ADJUST_CFA_OFFSET -0x30
1530 pushq $0
1531 CFI_ADJUST_CFA_OFFSET 8
1532 SAVE_ALL
1533 jmp error_exit
1534 CFI_ENDPROC
1535 END(xen_failsafe_callback)
1536
1537 #endif /* CONFIG_XEN */