]> git.proxmox.com Git - mirror_ubuntu-jammy-kernel.git/blob - arch/x86/kernel/entry_64.S
Merge branches 'x86/apic', 'x86/cleanups', 'x86/cpufeature', 'x86/crashdump', 'x86...
[mirror_ubuntu-jammy-kernel.git] / arch / x86 / kernel / entry_64.S
1 /*
2 * linux/arch/x86_64/entry.S
3 *
4 * Copyright (C) 1991, 1992 Linus Torvalds
5 * Copyright (C) 2000, 2001, 2002 Andi Kleen SuSE Labs
6 * Copyright (C) 2000 Pavel Machek <pavel@suse.cz>
7 */
8
9 /*
10 * entry.S contains the system-call and fault low-level handling routines.
11 *
12 * NOTE: This code handles signal-recognition, which happens every time
13 * after an interrupt and after each system call.
14 *
15 * Normal syscalls and interrupts don't save a full stack frame, this is
16 * only done for syscall tracing, signals or fork/exec et.al.
17 *
18 * A note on terminology:
19 * - top of stack: Architecture defined interrupt frame from SS to RIP
20 * at the top of the kernel process stack.
21 * - partial stack frame: partially saved registers upto R11.
22 * - full stack frame: Like partial stack frame, but all register saved.
23 *
24 * Some macro usage:
25 * - CFI macros are used to generate dwarf2 unwind information for better
26 * backtraces. They don't change any code.
27 * - SAVE_ALL/RESTORE_ALL - Save/restore all registers
28 * - SAVE_ARGS/RESTORE_ARGS - Save/restore registers that C functions modify.
29 * There are unfortunately lots of special cases where some registers
30 * not touched. The macro is a big mess that should be cleaned up.
31 * - SAVE_REST/RESTORE_REST - Handle the registers not saved by SAVE_ARGS.
32 * Gives a full stack frame.
33 * - ENTRY/END Define functions in the symbol table.
34 * - FIXUP_TOP_OF_STACK/RESTORE_TOP_OF_STACK - Fix up the hardware stack
35 * frame that is otherwise undefined after a SYSCALL
36 * - TRACE_IRQ_* - Trace hard interrupt state for lock debugging.
37 * - errorentry/paranoidentry/zeroentry - Define exception entry points.
38 */
39
40 #include <linux/linkage.h>
41 #include <asm/segment.h>
42 #include <asm/cache.h>
43 #include <asm/errno.h>
44 #include <asm/dwarf2.h>
45 #include <asm/calling.h>
46 #include <asm/asm-offsets.h>
47 #include <asm/msr.h>
48 #include <asm/unistd.h>
49 #include <asm/thread_info.h>
50 #include <asm/hw_irq.h>
51 #include <asm/page.h>
52 #include <asm/irqflags.h>
53 #include <asm/paravirt.h>
54 #include <asm/ftrace.h>
55
56 /* Avoid __ASSEMBLER__'ifying <linux/audit.h> just for this. */
57 #include <linux/elf-em.h>
58 #define AUDIT_ARCH_X86_64 (EM_X86_64|__AUDIT_ARCH_64BIT|__AUDIT_ARCH_LE)
59 #define __AUDIT_ARCH_64BIT 0x80000000
60 #define __AUDIT_ARCH_LE 0x40000000
61
62 .code64
63
64 #ifdef CONFIG_FUNCTION_TRACER
65 #ifdef CONFIG_DYNAMIC_FTRACE
66 ENTRY(mcount)
67 retq
68 END(mcount)
69
70 ENTRY(ftrace_caller)
71
72 /* taken from glibc */
73 subq $0x38, %rsp
74 movq %rax, (%rsp)
75 movq %rcx, 8(%rsp)
76 movq %rdx, 16(%rsp)
77 movq %rsi, 24(%rsp)
78 movq %rdi, 32(%rsp)
79 movq %r8, 40(%rsp)
80 movq %r9, 48(%rsp)
81
82 movq 0x38(%rsp), %rdi
83 movq 8(%rbp), %rsi
84 subq $MCOUNT_INSN_SIZE, %rdi
85
86 .globl ftrace_call
87 ftrace_call:
88 call ftrace_stub
89
90 movq 48(%rsp), %r9
91 movq 40(%rsp), %r8
92 movq 32(%rsp), %rdi
93 movq 24(%rsp), %rsi
94 movq 16(%rsp), %rdx
95 movq 8(%rsp), %rcx
96 movq (%rsp), %rax
97 addq $0x38, %rsp
98
99 .globl ftrace_stub
100 ftrace_stub:
101 retq
102 END(ftrace_caller)
103
104 #else /* ! CONFIG_DYNAMIC_FTRACE */
105 ENTRY(mcount)
106 cmpq $ftrace_stub, ftrace_trace_function
107 jnz trace
108 .globl ftrace_stub
109 ftrace_stub:
110 retq
111
112 trace:
113 /* taken from glibc */
114 subq $0x38, %rsp
115 movq %rax, (%rsp)
116 movq %rcx, 8(%rsp)
117 movq %rdx, 16(%rsp)
118 movq %rsi, 24(%rsp)
119 movq %rdi, 32(%rsp)
120 movq %r8, 40(%rsp)
121 movq %r9, 48(%rsp)
122
123 movq 0x38(%rsp), %rdi
124 movq 8(%rbp), %rsi
125 subq $MCOUNT_INSN_SIZE, %rdi
126
127 call *ftrace_trace_function
128
129 movq 48(%rsp), %r9
130 movq 40(%rsp), %r8
131 movq 32(%rsp), %rdi
132 movq 24(%rsp), %rsi
133 movq 16(%rsp), %rdx
134 movq 8(%rsp), %rcx
135 movq (%rsp), %rax
136 addq $0x38, %rsp
137
138 jmp ftrace_stub
139 END(mcount)
140 #endif /* CONFIG_DYNAMIC_FTRACE */
141 #endif /* CONFIG_FUNCTION_TRACER */
142
143 #ifndef CONFIG_PREEMPT
144 #define retint_kernel retint_restore_args
145 #endif
146
147 #ifdef CONFIG_PARAVIRT
148 ENTRY(native_usergs_sysret64)
149 swapgs
150 sysretq
151 #endif /* CONFIG_PARAVIRT */
152
153
154 .macro TRACE_IRQS_IRETQ offset=ARGOFFSET
155 #ifdef CONFIG_TRACE_IRQFLAGS
156 bt $9,EFLAGS-\offset(%rsp) /* interrupts off? */
157 jnc 1f
158 TRACE_IRQS_ON
159 1:
160 #endif
161 .endm
162
163 /*
164 * C code is not supposed to know about undefined top of stack. Every time
165 * a C function with an pt_regs argument is called from the SYSCALL based
166 * fast path FIXUP_TOP_OF_STACK is needed.
167 * RESTORE_TOP_OF_STACK syncs the syscall state after any possible ptregs
168 * manipulation.
169 */
170
171 /* %rsp:at FRAMEEND */
172 .macro FIXUP_TOP_OF_STACK tmp
173 movq %gs:pda_oldrsp,\tmp
174 movq \tmp,RSP(%rsp)
175 movq $__USER_DS,SS(%rsp)
176 movq $__USER_CS,CS(%rsp)
177 movq $-1,RCX(%rsp)
178 movq R11(%rsp),\tmp /* get eflags */
179 movq \tmp,EFLAGS(%rsp)
180 .endm
181
182 .macro RESTORE_TOP_OF_STACK tmp,offset=0
183 movq RSP-\offset(%rsp),\tmp
184 movq \tmp,%gs:pda_oldrsp
185 movq EFLAGS-\offset(%rsp),\tmp
186 movq \tmp,R11-\offset(%rsp)
187 .endm
188
189 .macro FAKE_STACK_FRAME child_rip
190 /* push in order ss, rsp, eflags, cs, rip */
191 xorl %eax, %eax
192 pushq $__KERNEL_DS /* ss */
193 CFI_ADJUST_CFA_OFFSET 8
194 /*CFI_REL_OFFSET ss,0*/
195 pushq %rax /* rsp */
196 CFI_ADJUST_CFA_OFFSET 8
197 CFI_REL_OFFSET rsp,0
198 pushq $(1<<9) /* eflags - interrupts on */
199 CFI_ADJUST_CFA_OFFSET 8
200 /*CFI_REL_OFFSET rflags,0*/
201 pushq $__KERNEL_CS /* cs */
202 CFI_ADJUST_CFA_OFFSET 8
203 /*CFI_REL_OFFSET cs,0*/
204 pushq \child_rip /* rip */
205 CFI_ADJUST_CFA_OFFSET 8
206 CFI_REL_OFFSET rip,0
207 pushq %rax /* orig rax */
208 CFI_ADJUST_CFA_OFFSET 8
209 .endm
210
211 .macro UNFAKE_STACK_FRAME
212 addq $8*6, %rsp
213 CFI_ADJUST_CFA_OFFSET -(6*8)
214 .endm
215
216 .macro CFI_DEFAULT_STACK start=1
217 .if \start
218 CFI_STARTPROC simple
219 CFI_SIGNAL_FRAME
220 CFI_DEF_CFA rsp,SS+8
221 .else
222 CFI_DEF_CFA_OFFSET SS+8
223 .endif
224 CFI_REL_OFFSET r15,R15
225 CFI_REL_OFFSET r14,R14
226 CFI_REL_OFFSET r13,R13
227 CFI_REL_OFFSET r12,R12
228 CFI_REL_OFFSET rbp,RBP
229 CFI_REL_OFFSET rbx,RBX
230 CFI_REL_OFFSET r11,R11
231 CFI_REL_OFFSET r10,R10
232 CFI_REL_OFFSET r9,R9
233 CFI_REL_OFFSET r8,R8
234 CFI_REL_OFFSET rax,RAX
235 CFI_REL_OFFSET rcx,RCX
236 CFI_REL_OFFSET rdx,RDX
237 CFI_REL_OFFSET rsi,RSI
238 CFI_REL_OFFSET rdi,RDI
239 CFI_REL_OFFSET rip,RIP
240 /*CFI_REL_OFFSET cs,CS*/
241 /*CFI_REL_OFFSET rflags,EFLAGS*/
242 CFI_REL_OFFSET rsp,RSP
243 /*CFI_REL_OFFSET ss,SS*/
244 .endm
245 /*
246 * A newly forked process directly context switches into this.
247 */
248 /* rdi: prev */
249 ENTRY(ret_from_fork)
250 CFI_DEFAULT_STACK
251 push kernel_eflags(%rip)
252 CFI_ADJUST_CFA_OFFSET 8
253 popf # reset kernel eflags
254 CFI_ADJUST_CFA_OFFSET -8
255 call schedule_tail
256 GET_THREAD_INFO(%rcx)
257 testl $(_TIF_SYSCALL_TRACE|_TIF_SYSCALL_AUDIT),TI_flags(%rcx)
258 CFI_REMEMBER_STATE
259 jnz rff_trace
260 rff_action:
261 RESTORE_REST
262 testl $3,CS-ARGOFFSET(%rsp) # from kernel_thread?
263 je int_ret_from_sys_call
264 testl $_TIF_IA32,TI_flags(%rcx)
265 jnz int_ret_from_sys_call
266 RESTORE_TOP_OF_STACK %rdi,ARGOFFSET
267 jmp ret_from_sys_call
268 CFI_RESTORE_STATE
269 rff_trace:
270 movq %rsp,%rdi
271 call syscall_trace_leave
272 GET_THREAD_INFO(%rcx)
273 jmp rff_action
274 CFI_ENDPROC
275 END(ret_from_fork)
276
277 /*
278 * System call entry. Upto 6 arguments in registers are supported.
279 *
280 * SYSCALL does not save anything on the stack and does not change the
281 * stack pointer.
282 */
283
284 /*
285 * Register setup:
286 * rax system call number
287 * rdi arg0
288 * rcx return address for syscall/sysret, C arg3
289 * rsi arg1
290 * rdx arg2
291 * r10 arg3 (--> moved to rcx for C)
292 * r8 arg4
293 * r9 arg5
294 * r11 eflags for syscall/sysret, temporary for C
295 * r12-r15,rbp,rbx saved by C code, not touched.
296 *
297 * Interrupts are off on entry.
298 * Only called from user space.
299 *
300 * XXX if we had a free scratch register we could save the RSP into the stack frame
301 * and report it properly in ps. Unfortunately we haven't.
302 *
303 * When user can change the frames always force IRET. That is because
304 * it deals with uncanonical addresses better. SYSRET has trouble
305 * with them due to bugs in both AMD and Intel CPUs.
306 */
307
308 ENTRY(system_call)
309 CFI_STARTPROC simple
310 CFI_SIGNAL_FRAME
311 CFI_DEF_CFA rsp,PDA_STACKOFFSET
312 CFI_REGISTER rip,rcx
313 /*CFI_REGISTER rflags,r11*/
314 SWAPGS_UNSAFE_STACK
315 /*
316 * A hypervisor implementation might want to use a label
317 * after the swapgs, so that it can do the swapgs
318 * for the guest and jump here on syscall.
319 */
320 ENTRY(system_call_after_swapgs)
321
322 movq %rsp,%gs:pda_oldrsp
323 movq %gs:pda_kernelstack,%rsp
324 /*
325 * No need to follow this irqs off/on section - it's straight
326 * and short:
327 */
328 ENABLE_INTERRUPTS(CLBR_NONE)
329 SAVE_ARGS 8,1
330 movq %rax,ORIG_RAX-ARGOFFSET(%rsp)
331 movq %rcx,RIP-ARGOFFSET(%rsp)
332 CFI_REL_OFFSET rip,RIP-ARGOFFSET
333 GET_THREAD_INFO(%rcx)
334 testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%rcx)
335 jnz tracesys
336 system_call_fastpath:
337 cmpq $__NR_syscall_max,%rax
338 ja badsys
339 movq %r10,%rcx
340 call *sys_call_table(,%rax,8) # XXX: rip relative
341 movq %rax,RAX-ARGOFFSET(%rsp)
342 /*
343 * Syscall return path ending with SYSRET (fast path)
344 * Has incomplete stack frame and undefined top of stack.
345 */
346 ret_from_sys_call:
347 movl $_TIF_ALLWORK_MASK,%edi
348 /* edi: flagmask */
349 sysret_check:
350 LOCKDEP_SYS_EXIT
351 GET_THREAD_INFO(%rcx)
352 DISABLE_INTERRUPTS(CLBR_NONE)
353 TRACE_IRQS_OFF
354 movl TI_flags(%rcx),%edx
355 andl %edi,%edx
356 jnz sysret_careful
357 CFI_REMEMBER_STATE
358 /*
359 * sysretq will re-enable interrupts:
360 */
361 TRACE_IRQS_ON
362 movq RIP-ARGOFFSET(%rsp),%rcx
363 CFI_REGISTER rip,rcx
364 RESTORE_ARGS 0,-ARG_SKIP,1
365 /*CFI_REGISTER rflags,r11*/
366 movq %gs:pda_oldrsp, %rsp
367 USERGS_SYSRET64
368
369 CFI_RESTORE_STATE
370 /* Handle reschedules */
371 /* edx: work, edi: workmask */
372 sysret_careful:
373 bt $TIF_NEED_RESCHED,%edx
374 jnc sysret_signal
375 TRACE_IRQS_ON
376 ENABLE_INTERRUPTS(CLBR_NONE)
377 pushq %rdi
378 CFI_ADJUST_CFA_OFFSET 8
379 call schedule
380 popq %rdi
381 CFI_ADJUST_CFA_OFFSET -8
382 jmp sysret_check
383
384 /* Handle a signal */
385 sysret_signal:
386 TRACE_IRQS_ON
387 ENABLE_INTERRUPTS(CLBR_NONE)
388 #ifdef CONFIG_AUDITSYSCALL
389 bt $TIF_SYSCALL_AUDIT,%edx
390 jc sysret_audit
391 #endif
392 /* edx: work flags (arg3) */
393 leaq do_notify_resume(%rip),%rax
394 leaq -ARGOFFSET(%rsp),%rdi # &pt_regs -> arg1
395 xorl %esi,%esi # oldset -> arg2
396 call ptregscall_common
397 movl $_TIF_WORK_MASK,%edi
398 /* Use IRET because user could have changed frame. This
399 works because ptregscall_common has called FIXUP_TOP_OF_STACK. */
400 DISABLE_INTERRUPTS(CLBR_NONE)
401 TRACE_IRQS_OFF
402 jmp int_with_check
403
404 badsys:
405 movq $-ENOSYS,RAX-ARGOFFSET(%rsp)
406 jmp ret_from_sys_call
407
408 #ifdef CONFIG_AUDITSYSCALL
409 /*
410 * Fast path for syscall audit without full syscall trace.
411 * We just call audit_syscall_entry() directly, and then
412 * jump back to the normal fast path.
413 */
414 auditsys:
415 movq %r10,%r9 /* 6th arg: 4th syscall arg */
416 movq %rdx,%r8 /* 5th arg: 3rd syscall arg */
417 movq %rsi,%rcx /* 4th arg: 2nd syscall arg */
418 movq %rdi,%rdx /* 3rd arg: 1st syscall arg */
419 movq %rax,%rsi /* 2nd arg: syscall number */
420 movl $AUDIT_ARCH_X86_64,%edi /* 1st arg: audit arch */
421 call audit_syscall_entry
422 LOAD_ARGS 0 /* reload call-clobbered registers */
423 jmp system_call_fastpath
424
425 /*
426 * Return fast path for syscall audit. Call audit_syscall_exit()
427 * directly and then jump back to the fast path with TIF_SYSCALL_AUDIT
428 * masked off.
429 */
430 sysret_audit:
431 movq %rax,%rsi /* second arg, syscall return value */
432 cmpq $0,%rax /* is it < 0? */
433 setl %al /* 1 if so, 0 if not */
434 movzbl %al,%edi /* zero-extend that into %edi */
435 inc %edi /* first arg, 0->1(AUDITSC_SUCCESS), 1->2(AUDITSC_FAILURE) */
436 call audit_syscall_exit
437 movl $(_TIF_ALLWORK_MASK & ~_TIF_SYSCALL_AUDIT),%edi
438 jmp sysret_check
439 #endif /* CONFIG_AUDITSYSCALL */
440
441 /* Do syscall tracing */
442 tracesys:
443 #ifdef CONFIG_AUDITSYSCALL
444 testl $(_TIF_WORK_SYSCALL_ENTRY & ~_TIF_SYSCALL_AUDIT),TI_flags(%rcx)
445 jz auditsys
446 #endif
447 SAVE_REST
448 movq $-ENOSYS,RAX(%rsp) /* ptrace can change this for a bad syscall */
449 FIXUP_TOP_OF_STACK %rdi
450 movq %rsp,%rdi
451 call syscall_trace_enter
452 /*
453 * Reload arg registers from stack in case ptrace changed them.
454 * We don't reload %rax because syscall_trace_enter() returned
455 * the value it wants us to use in the table lookup.
456 */
457 LOAD_ARGS ARGOFFSET, 1
458 RESTORE_REST
459 cmpq $__NR_syscall_max,%rax
460 ja int_ret_from_sys_call /* RAX(%rsp) set to -ENOSYS above */
461 movq %r10,%rcx /* fixup for C */
462 call *sys_call_table(,%rax,8)
463 movq %rax,RAX-ARGOFFSET(%rsp)
464 /* Use IRET because user could have changed frame */
465
466 /*
467 * Syscall return path ending with IRET.
468 * Has correct top of stack, but partial stack frame.
469 */
470 .globl int_ret_from_sys_call
471 .globl int_with_check
472 int_ret_from_sys_call:
473 DISABLE_INTERRUPTS(CLBR_NONE)
474 TRACE_IRQS_OFF
475 testl $3,CS-ARGOFFSET(%rsp)
476 je retint_restore_args
477 movl $_TIF_ALLWORK_MASK,%edi
478 /* edi: mask to check */
479 int_with_check:
480 LOCKDEP_SYS_EXIT_IRQ
481 GET_THREAD_INFO(%rcx)
482 movl TI_flags(%rcx),%edx
483 andl %edi,%edx
484 jnz int_careful
485 andl $~TS_COMPAT,TI_status(%rcx)
486 jmp retint_swapgs
487
488 /* Either reschedule or signal or syscall exit tracking needed. */
489 /* First do a reschedule test. */
490 /* edx: work, edi: workmask */
491 int_careful:
492 bt $TIF_NEED_RESCHED,%edx
493 jnc int_very_careful
494 TRACE_IRQS_ON
495 ENABLE_INTERRUPTS(CLBR_NONE)
496 pushq %rdi
497 CFI_ADJUST_CFA_OFFSET 8
498 call schedule
499 popq %rdi
500 CFI_ADJUST_CFA_OFFSET -8
501 DISABLE_INTERRUPTS(CLBR_NONE)
502 TRACE_IRQS_OFF
503 jmp int_with_check
504
505 /* handle signals and tracing -- both require a full stack frame */
506 int_very_careful:
507 TRACE_IRQS_ON
508 ENABLE_INTERRUPTS(CLBR_NONE)
509 SAVE_REST
510 /* Check for syscall exit trace */
511 testl $_TIF_WORK_SYSCALL_EXIT,%edx
512 jz int_signal
513 pushq %rdi
514 CFI_ADJUST_CFA_OFFSET 8
515 leaq 8(%rsp),%rdi # &ptregs -> arg1
516 call syscall_trace_leave
517 popq %rdi
518 CFI_ADJUST_CFA_OFFSET -8
519 andl $~(_TIF_WORK_SYSCALL_EXIT|_TIF_SYSCALL_EMU),%edi
520 jmp int_restore_rest
521
522 int_signal:
523 testl $_TIF_DO_NOTIFY_MASK,%edx
524 jz 1f
525 movq %rsp,%rdi # &ptregs -> arg1
526 xorl %esi,%esi # oldset -> arg2
527 call do_notify_resume
528 1: movl $_TIF_WORK_MASK,%edi
529 int_restore_rest:
530 RESTORE_REST
531 DISABLE_INTERRUPTS(CLBR_NONE)
532 TRACE_IRQS_OFF
533 jmp int_with_check
534 CFI_ENDPROC
535 END(system_call)
536
537 /*
538 * Certain special system calls that need to save a complete full stack frame.
539 */
540
541 .macro PTREGSCALL label,func,arg
542 .globl \label
543 \label:
544 leaq \func(%rip),%rax
545 leaq -ARGOFFSET+8(%rsp),\arg /* 8 for return address */
546 jmp ptregscall_common
547 END(\label)
548 .endm
549
550 CFI_STARTPROC
551
552 PTREGSCALL stub_clone, sys_clone, %r8
553 PTREGSCALL stub_fork, sys_fork, %rdi
554 PTREGSCALL stub_vfork, sys_vfork, %rdi
555 PTREGSCALL stub_sigaltstack, sys_sigaltstack, %rdx
556 PTREGSCALL stub_iopl, sys_iopl, %rsi
557
558 ENTRY(ptregscall_common)
559 popq %r11
560 CFI_ADJUST_CFA_OFFSET -8
561 CFI_REGISTER rip, r11
562 SAVE_REST
563 movq %r11, %r15
564 CFI_REGISTER rip, r15
565 FIXUP_TOP_OF_STACK %r11
566 call *%rax
567 RESTORE_TOP_OF_STACK %r11
568 movq %r15, %r11
569 CFI_REGISTER rip, r11
570 RESTORE_REST
571 pushq %r11
572 CFI_ADJUST_CFA_OFFSET 8
573 CFI_REL_OFFSET rip, 0
574 ret
575 CFI_ENDPROC
576 END(ptregscall_common)
577
578 ENTRY(stub_execve)
579 CFI_STARTPROC
580 popq %r11
581 CFI_ADJUST_CFA_OFFSET -8
582 CFI_REGISTER rip, r11
583 SAVE_REST
584 FIXUP_TOP_OF_STACK %r11
585 movq %rsp, %rcx
586 call sys_execve
587 RESTORE_TOP_OF_STACK %r11
588 movq %rax,RAX(%rsp)
589 RESTORE_REST
590 jmp int_ret_from_sys_call
591 CFI_ENDPROC
592 END(stub_execve)
593
594 /*
595 * sigreturn is special because it needs to restore all registers on return.
596 * This cannot be done with SYSRET, so use the IRET return path instead.
597 */
598 ENTRY(stub_rt_sigreturn)
599 CFI_STARTPROC
600 addq $8, %rsp
601 CFI_ADJUST_CFA_OFFSET -8
602 SAVE_REST
603 movq %rsp,%rdi
604 FIXUP_TOP_OF_STACK %r11
605 call sys_rt_sigreturn
606 movq %rax,RAX(%rsp) # fixme, this could be done at the higher layer
607 RESTORE_REST
608 jmp int_ret_from_sys_call
609 CFI_ENDPROC
610 END(stub_rt_sigreturn)
611
612 /*
613 * initial frame state for interrupts and exceptions
614 */
615 .macro _frame ref
616 CFI_STARTPROC simple
617 CFI_SIGNAL_FRAME
618 CFI_DEF_CFA rsp,SS+8-\ref
619 /*CFI_REL_OFFSET ss,SS-\ref*/
620 CFI_REL_OFFSET rsp,RSP-\ref
621 /*CFI_REL_OFFSET rflags,EFLAGS-\ref*/
622 /*CFI_REL_OFFSET cs,CS-\ref*/
623 CFI_REL_OFFSET rip,RIP-\ref
624 .endm
625
626 /* initial frame state for interrupts (and exceptions without error code) */
627 #define INTR_FRAME _frame RIP
628 /* initial frame state for exceptions with error code (and interrupts with
629 vector already pushed) */
630 #define XCPT_FRAME _frame ORIG_RAX
631
632 /*
633 * Interrupt entry/exit.
634 *
635 * Interrupt entry points save only callee clobbered registers in fast path.
636 *
637 * Entry runs with interrupts off.
638 */
639
640 /* 0(%rsp): interrupt number */
641 .macro interrupt func
642 cld
643 SAVE_ARGS
644 leaq -ARGOFFSET(%rsp),%rdi # arg1 for handler
645 pushq %rbp
646 /*
647 * Save rbp twice: One is for marking the stack frame, as usual, and the
648 * other, to fill pt_regs properly. This is because bx comes right
649 * before the last saved register in that structure, and not bp. If the
650 * base pointer were in the place bx is today, this would not be needed.
651 */
652 movq %rbp, -8(%rsp)
653 CFI_ADJUST_CFA_OFFSET 8
654 CFI_REL_OFFSET rbp, 0
655 movq %rsp,%rbp
656 CFI_DEF_CFA_REGISTER rbp
657 testl $3,CS(%rdi)
658 je 1f
659 SWAPGS
660 /* irqcount is used to check if a CPU is already on an interrupt
661 stack or not. While this is essentially redundant with preempt_count
662 it is a little cheaper to use a separate counter in the PDA
663 (short of moving irq_enter into assembly, which would be too
664 much work) */
665 1: incl %gs:pda_irqcount
666 cmoveq %gs:pda_irqstackptr,%rsp
667 push %rbp # backlink for old unwinder
668 /*
669 * We entered an interrupt context - irqs are off:
670 */
671 TRACE_IRQS_OFF
672 call \func
673 .endm
674
675 ENTRY(common_interrupt)
676 XCPT_FRAME
677 interrupt do_IRQ
678 /* 0(%rsp): oldrsp-ARGOFFSET */
679 ret_from_intr:
680 DISABLE_INTERRUPTS(CLBR_NONE)
681 TRACE_IRQS_OFF
682 decl %gs:pda_irqcount
683 leaveq
684 CFI_DEF_CFA_REGISTER rsp
685 CFI_ADJUST_CFA_OFFSET -8
686 exit_intr:
687 GET_THREAD_INFO(%rcx)
688 testl $3,CS-ARGOFFSET(%rsp)
689 je retint_kernel
690
691 /* Interrupt came from user space */
692 /*
693 * Has a correct top of stack, but a partial stack frame
694 * %rcx: thread info. Interrupts off.
695 */
696 retint_with_reschedule:
697 movl $_TIF_WORK_MASK,%edi
698 retint_check:
699 LOCKDEP_SYS_EXIT_IRQ
700 movl TI_flags(%rcx),%edx
701 andl %edi,%edx
702 CFI_REMEMBER_STATE
703 jnz retint_careful
704
705 retint_swapgs: /* return to user-space */
706 /*
707 * The iretq could re-enable interrupts:
708 */
709 DISABLE_INTERRUPTS(CLBR_ANY)
710 TRACE_IRQS_IRETQ
711 SWAPGS
712 jmp restore_args
713
714 retint_restore_args: /* return to kernel space */
715 DISABLE_INTERRUPTS(CLBR_ANY)
716 /*
717 * The iretq could re-enable interrupts:
718 */
719 TRACE_IRQS_IRETQ
720 restore_args:
721 RESTORE_ARGS 0,8,0
722
723 irq_return:
724 INTERRUPT_RETURN
725
726 .section __ex_table, "a"
727 .quad irq_return, bad_iret
728 .previous
729
730 #ifdef CONFIG_PARAVIRT
731 ENTRY(native_iret)
732 iretq
733
734 .section __ex_table,"a"
735 .quad native_iret, bad_iret
736 .previous
737 #endif
738
739 .section .fixup,"ax"
740 bad_iret:
741 /*
742 * The iret traps when the %cs or %ss being restored is bogus.
743 * We've lost the original trap vector and error code.
744 * #GPF is the most likely one to get for an invalid selector.
745 * So pretend we completed the iret and took the #GPF in user mode.
746 *
747 * We are now running with the kernel GS after exception recovery.
748 * But error_entry expects us to have user GS to match the user %cs,
749 * so swap back.
750 */
751 pushq $0
752
753 SWAPGS
754 jmp general_protection
755
756 .previous
757
758 /* edi: workmask, edx: work */
759 retint_careful:
760 CFI_RESTORE_STATE
761 bt $TIF_NEED_RESCHED,%edx
762 jnc retint_signal
763 TRACE_IRQS_ON
764 ENABLE_INTERRUPTS(CLBR_NONE)
765 pushq %rdi
766 CFI_ADJUST_CFA_OFFSET 8
767 call schedule
768 popq %rdi
769 CFI_ADJUST_CFA_OFFSET -8
770 GET_THREAD_INFO(%rcx)
771 DISABLE_INTERRUPTS(CLBR_NONE)
772 TRACE_IRQS_OFF
773 jmp retint_check
774
775 retint_signal:
776 testl $_TIF_DO_NOTIFY_MASK,%edx
777 jz retint_swapgs
778 TRACE_IRQS_ON
779 ENABLE_INTERRUPTS(CLBR_NONE)
780 SAVE_REST
781 movq $-1,ORIG_RAX(%rsp)
782 xorl %esi,%esi # oldset
783 movq %rsp,%rdi # &pt_regs
784 call do_notify_resume
785 RESTORE_REST
786 DISABLE_INTERRUPTS(CLBR_NONE)
787 TRACE_IRQS_OFF
788 GET_THREAD_INFO(%rcx)
789 jmp retint_with_reschedule
790
791 #ifdef CONFIG_PREEMPT
792 /* Returning to kernel space. Check if we need preemption */
793 /* rcx: threadinfo. interrupts off. */
794 ENTRY(retint_kernel)
795 cmpl $0,TI_preempt_count(%rcx)
796 jnz retint_restore_args
797 bt $TIF_NEED_RESCHED,TI_flags(%rcx)
798 jnc retint_restore_args
799 bt $9,EFLAGS-ARGOFFSET(%rsp) /* interrupts off? */
800 jnc retint_restore_args
801 call preempt_schedule_irq
802 jmp exit_intr
803 #endif
804
805 CFI_ENDPROC
806 END(common_interrupt)
807
808 /*
809 * APIC interrupts.
810 */
811 .macro apicinterrupt num,func
812 INTR_FRAME
813 pushq $~(\num)
814 CFI_ADJUST_CFA_OFFSET 8
815 interrupt \func
816 jmp ret_from_intr
817 CFI_ENDPROC
818 .endm
819
820 ENTRY(thermal_interrupt)
821 apicinterrupt THERMAL_APIC_VECTOR,smp_thermal_interrupt
822 END(thermal_interrupt)
823
824 ENTRY(threshold_interrupt)
825 apicinterrupt THRESHOLD_APIC_VECTOR,mce_threshold_interrupt
826 END(threshold_interrupt)
827
828 #ifdef CONFIG_SMP
829 ENTRY(reschedule_interrupt)
830 apicinterrupt RESCHEDULE_VECTOR,smp_reschedule_interrupt
831 END(reschedule_interrupt)
832
833 .macro INVALIDATE_ENTRY num
834 ENTRY(invalidate_interrupt\num)
835 apicinterrupt INVALIDATE_TLB_VECTOR_START+\num,smp_invalidate_interrupt
836 END(invalidate_interrupt\num)
837 .endm
838
839 INVALIDATE_ENTRY 0
840 INVALIDATE_ENTRY 1
841 INVALIDATE_ENTRY 2
842 INVALIDATE_ENTRY 3
843 INVALIDATE_ENTRY 4
844 INVALIDATE_ENTRY 5
845 INVALIDATE_ENTRY 6
846 INVALIDATE_ENTRY 7
847
848 ENTRY(call_function_interrupt)
849 apicinterrupt CALL_FUNCTION_VECTOR,smp_call_function_interrupt
850 END(call_function_interrupt)
851 ENTRY(call_function_single_interrupt)
852 apicinterrupt CALL_FUNCTION_SINGLE_VECTOR,smp_call_function_single_interrupt
853 END(call_function_single_interrupt)
854 ENTRY(irq_move_cleanup_interrupt)
855 apicinterrupt IRQ_MOVE_CLEANUP_VECTOR,smp_irq_move_cleanup_interrupt
856 END(irq_move_cleanup_interrupt)
857 #endif
858
859 ENTRY(apic_timer_interrupt)
860 apicinterrupt LOCAL_TIMER_VECTOR,smp_apic_timer_interrupt
861 END(apic_timer_interrupt)
862
863 ENTRY(uv_bau_message_intr1)
864 apicinterrupt 220,uv_bau_message_interrupt
865 END(uv_bau_message_intr1)
866
867 ENTRY(error_interrupt)
868 apicinterrupt ERROR_APIC_VECTOR,smp_error_interrupt
869 END(error_interrupt)
870
871 ENTRY(spurious_interrupt)
872 apicinterrupt SPURIOUS_APIC_VECTOR,smp_spurious_interrupt
873 END(spurious_interrupt)
874
875 /*
876 * Exception entry points.
877 */
878 .macro zeroentry sym
879 INTR_FRAME
880 PARAVIRT_ADJUST_EXCEPTION_FRAME
881 pushq $0 /* push error code/oldrax */
882 CFI_ADJUST_CFA_OFFSET 8
883 pushq %rax /* push real oldrax to the rdi slot */
884 CFI_ADJUST_CFA_OFFSET 8
885 CFI_REL_OFFSET rax,0
886 leaq \sym(%rip),%rax
887 jmp error_entry
888 CFI_ENDPROC
889 .endm
890
891 .macro errorentry sym
892 XCPT_FRAME
893 PARAVIRT_ADJUST_EXCEPTION_FRAME
894 pushq %rax
895 CFI_ADJUST_CFA_OFFSET 8
896 CFI_REL_OFFSET rax,0
897 leaq \sym(%rip),%rax
898 jmp error_entry
899 CFI_ENDPROC
900 .endm
901
902 /* error code is on the stack already */
903 /* handle NMI like exceptions that can happen everywhere */
904 .macro paranoidentry sym, ist=0, irqtrace=1
905 SAVE_ALL
906 cld
907 movl $1,%ebx
908 movl $MSR_GS_BASE,%ecx
909 rdmsr
910 testl %edx,%edx
911 js 1f
912 SWAPGS
913 xorl %ebx,%ebx
914 1:
915 .if \ist
916 movq %gs:pda_data_offset, %rbp
917 .endif
918 .if \irqtrace
919 TRACE_IRQS_OFF
920 .endif
921 movq %rsp,%rdi
922 movq ORIG_RAX(%rsp),%rsi
923 movq $-1,ORIG_RAX(%rsp)
924 .if \ist
925 subq $EXCEPTION_STKSZ, per_cpu__init_tss + TSS_ist + (\ist - 1) * 8(%rbp)
926 .endif
927 call \sym
928 .if \ist
929 addq $EXCEPTION_STKSZ, per_cpu__init_tss + TSS_ist + (\ist - 1) * 8(%rbp)
930 .endif
931 DISABLE_INTERRUPTS(CLBR_NONE)
932 .if \irqtrace
933 TRACE_IRQS_OFF
934 .endif
935 .endm
936
937 /*
938 * "Paranoid" exit path from exception stack.
939 * Paranoid because this is used by NMIs and cannot take
940 * any kernel state for granted.
941 * We don't do kernel preemption checks here, because only
942 * NMI should be common and it does not enable IRQs and
943 * cannot get reschedule ticks.
944 *
945 * "trace" is 0 for the NMI handler only, because irq-tracing
946 * is fundamentally NMI-unsafe. (we cannot change the soft and
947 * hard flags at once, atomically)
948 */
949 .macro paranoidexit trace=1
950 /* ebx: no swapgs flag */
951 paranoid_exit\trace:
952 testl %ebx,%ebx /* swapgs needed? */
953 jnz paranoid_restore\trace
954 testl $3,CS(%rsp)
955 jnz paranoid_userspace\trace
956 paranoid_swapgs\trace:
957 .if \trace
958 TRACE_IRQS_IRETQ 0
959 .endif
960 SWAPGS_UNSAFE_STACK
961 paranoid_restore\trace:
962 RESTORE_ALL 8
963 jmp irq_return
964 paranoid_userspace\trace:
965 GET_THREAD_INFO(%rcx)
966 movl TI_flags(%rcx),%ebx
967 andl $_TIF_WORK_MASK,%ebx
968 jz paranoid_swapgs\trace
969 movq %rsp,%rdi /* &pt_regs */
970 call sync_regs
971 movq %rax,%rsp /* switch stack for scheduling */
972 testl $_TIF_NEED_RESCHED,%ebx
973 jnz paranoid_schedule\trace
974 movl %ebx,%edx /* arg3: thread flags */
975 .if \trace
976 TRACE_IRQS_ON
977 .endif
978 ENABLE_INTERRUPTS(CLBR_NONE)
979 xorl %esi,%esi /* arg2: oldset */
980 movq %rsp,%rdi /* arg1: &pt_regs */
981 call do_notify_resume
982 DISABLE_INTERRUPTS(CLBR_NONE)
983 .if \trace
984 TRACE_IRQS_OFF
985 .endif
986 jmp paranoid_userspace\trace
987 paranoid_schedule\trace:
988 .if \trace
989 TRACE_IRQS_ON
990 .endif
991 ENABLE_INTERRUPTS(CLBR_ANY)
992 call schedule
993 DISABLE_INTERRUPTS(CLBR_ANY)
994 .if \trace
995 TRACE_IRQS_OFF
996 .endif
997 jmp paranoid_userspace\trace
998 CFI_ENDPROC
999 .endm
1000
1001 /*
1002 * Exception entry point. This expects an error code/orig_rax on the stack
1003 * and the exception handler in %rax.
1004 */
1005 KPROBE_ENTRY(error_entry)
1006 _frame RDI
1007 CFI_REL_OFFSET rax,0
1008 /* rdi slot contains rax, oldrax contains error code */
1009 cld
1010 subq $14*8,%rsp
1011 CFI_ADJUST_CFA_OFFSET (14*8)
1012 movq %rsi,13*8(%rsp)
1013 CFI_REL_OFFSET rsi,RSI
1014 movq 14*8(%rsp),%rsi /* load rax from rdi slot */
1015 CFI_REGISTER rax,rsi
1016 movq %rdx,12*8(%rsp)
1017 CFI_REL_OFFSET rdx,RDX
1018 movq %rcx,11*8(%rsp)
1019 CFI_REL_OFFSET rcx,RCX
1020 movq %rsi,10*8(%rsp) /* store rax */
1021 CFI_REL_OFFSET rax,RAX
1022 movq %r8, 9*8(%rsp)
1023 CFI_REL_OFFSET r8,R8
1024 movq %r9, 8*8(%rsp)
1025 CFI_REL_OFFSET r9,R9
1026 movq %r10,7*8(%rsp)
1027 CFI_REL_OFFSET r10,R10
1028 movq %r11,6*8(%rsp)
1029 CFI_REL_OFFSET r11,R11
1030 movq %rbx,5*8(%rsp)
1031 CFI_REL_OFFSET rbx,RBX
1032 movq %rbp,4*8(%rsp)
1033 CFI_REL_OFFSET rbp,RBP
1034 movq %r12,3*8(%rsp)
1035 CFI_REL_OFFSET r12,R12
1036 movq %r13,2*8(%rsp)
1037 CFI_REL_OFFSET r13,R13
1038 movq %r14,1*8(%rsp)
1039 CFI_REL_OFFSET r14,R14
1040 movq %r15,(%rsp)
1041 CFI_REL_OFFSET r15,R15
1042 xorl %ebx,%ebx
1043 testl $3,CS(%rsp)
1044 je error_kernelspace
1045 error_swapgs:
1046 SWAPGS
1047 error_sti:
1048 TRACE_IRQS_OFF
1049 movq %rdi,RDI(%rsp)
1050 CFI_REL_OFFSET rdi,RDI
1051 movq %rsp,%rdi
1052 movq ORIG_RAX(%rsp),%rsi /* get error code */
1053 movq $-1,ORIG_RAX(%rsp)
1054 call *%rax
1055 /* ebx: no swapgs flag (1: don't need swapgs, 0: need it) */
1056 error_exit:
1057 movl %ebx,%eax
1058 RESTORE_REST
1059 DISABLE_INTERRUPTS(CLBR_NONE)
1060 TRACE_IRQS_OFF
1061 GET_THREAD_INFO(%rcx)
1062 testl %eax,%eax
1063 jne retint_kernel
1064 LOCKDEP_SYS_EXIT_IRQ
1065 movl TI_flags(%rcx),%edx
1066 movl $_TIF_WORK_MASK,%edi
1067 andl %edi,%edx
1068 jnz retint_careful
1069 jmp retint_swapgs
1070 CFI_ENDPROC
1071
1072 error_kernelspace:
1073 incl %ebx
1074 /* There are two places in the kernel that can potentially fault with
1075 usergs. Handle them here. The exception handlers after
1076 iret run with kernel gs again, so don't set the user space flag.
1077 B stepping K8s sometimes report an truncated RIP for IRET
1078 exceptions returning to compat mode. Check for these here too. */
1079 leaq irq_return(%rip),%rcx
1080 cmpq %rcx,RIP(%rsp)
1081 je error_swapgs
1082 movl %ecx,%ecx /* zero extend */
1083 cmpq %rcx,RIP(%rsp)
1084 je error_swapgs
1085 cmpq $gs_change,RIP(%rsp)
1086 je error_swapgs
1087 jmp error_sti
1088 KPROBE_END(error_entry)
1089
1090 /* Reload gs selector with exception handling */
1091 /* edi: new selector */
1092 ENTRY(native_load_gs_index)
1093 CFI_STARTPROC
1094 pushf
1095 CFI_ADJUST_CFA_OFFSET 8
1096 DISABLE_INTERRUPTS(CLBR_ANY | ~(CLBR_RDI))
1097 SWAPGS
1098 gs_change:
1099 movl %edi,%gs
1100 2: mfence /* workaround */
1101 SWAPGS
1102 popf
1103 CFI_ADJUST_CFA_OFFSET -8
1104 ret
1105 CFI_ENDPROC
1106 ENDPROC(native_load_gs_index)
1107
1108 .section __ex_table,"a"
1109 .align 8
1110 .quad gs_change,bad_gs
1111 .previous
1112 .section .fixup,"ax"
1113 /* running with kernelgs */
1114 bad_gs:
1115 SWAPGS /* switch back to user gs */
1116 xorl %eax,%eax
1117 movl %eax,%gs
1118 jmp 2b
1119 .previous
1120
1121 /*
1122 * Create a kernel thread.
1123 *
1124 * C extern interface:
1125 * extern long kernel_thread(int (*fn)(void *), void * arg, unsigned long flags)
1126 *
1127 * asm input arguments:
1128 * rdi: fn, rsi: arg, rdx: flags
1129 */
1130 ENTRY(kernel_thread)
1131 CFI_STARTPROC
1132 FAKE_STACK_FRAME $child_rip
1133 SAVE_ALL
1134
1135 # rdi: flags, rsi: usp, rdx: will be &pt_regs
1136 movq %rdx,%rdi
1137 orq kernel_thread_flags(%rip),%rdi
1138 movq $-1, %rsi
1139 movq %rsp, %rdx
1140
1141 xorl %r8d,%r8d
1142 xorl %r9d,%r9d
1143
1144 # clone now
1145 call do_fork
1146 movq %rax,RAX(%rsp)
1147 xorl %edi,%edi
1148
1149 /*
1150 * It isn't worth to check for reschedule here,
1151 * so internally to the x86_64 port you can rely on kernel_thread()
1152 * not to reschedule the child before returning, this avoids the need
1153 * of hacks for example to fork off the per-CPU idle tasks.
1154 * [Hopefully no generic code relies on the reschedule -AK]
1155 */
1156 RESTORE_ALL
1157 UNFAKE_STACK_FRAME
1158 ret
1159 CFI_ENDPROC
1160 ENDPROC(kernel_thread)
1161
1162 child_rip:
1163 pushq $0 # fake return address
1164 CFI_STARTPROC
1165 /*
1166 * Here we are in the child and the registers are set as they were
1167 * at kernel_thread() invocation in the parent.
1168 */
1169 movq %rdi, %rax
1170 movq %rsi, %rdi
1171 call *%rax
1172 # exit
1173 mov %eax, %edi
1174 call do_exit
1175 ud2 # padding for call trace
1176 CFI_ENDPROC
1177 ENDPROC(child_rip)
1178
1179 /*
1180 * execve(). This function needs to use IRET, not SYSRET, to set up all state properly.
1181 *
1182 * C extern interface:
1183 * extern long execve(char *name, char **argv, char **envp)
1184 *
1185 * asm input arguments:
1186 * rdi: name, rsi: argv, rdx: envp
1187 *
1188 * We want to fallback into:
1189 * extern long sys_execve(char *name, char **argv,char **envp, struct pt_regs *regs)
1190 *
1191 * do_sys_execve asm fallback arguments:
1192 * rdi: name, rsi: argv, rdx: envp, rcx: fake frame on the stack
1193 */
1194 ENTRY(kernel_execve)
1195 CFI_STARTPROC
1196 FAKE_STACK_FRAME $0
1197 SAVE_ALL
1198 movq %rsp,%rcx
1199 call sys_execve
1200 movq %rax, RAX(%rsp)
1201 RESTORE_REST
1202 testq %rax,%rax
1203 je int_ret_from_sys_call
1204 RESTORE_ARGS
1205 UNFAKE_STACK_FRAME
1206 ret
1207 CFI_ENDPROC
1208 ENDPROC(kernel_execve)
1209
1210 KPROBE_ENTRY(page_fault)
1211 errorentry do_page_fault
1212 KPROBE_END(page_fault)
1213
1214 ENTRY(coprocessor_error)
1215 zeroentry do_coprocessor_error
1216 END(coprocessor_error)
1217
1218 ENTRY(simd_coprocessor_error)
1219 zeroentry do_simd_coprocessor_error
1220 END(simd_coprocessor_error)
1221
1222 ENTRY(device_not_available)
1223 zeroentry do_device_not_available
1224 END(device_not_available)
1225
1226 /* runs on exception stack */
1227 KPROBE_ENTRY(debug)
1228 INTR_FRAME
1229 PARAVIRT_ADJUST_EXCEPTION_FRAME
1230 pushq $0
1231 CFI_ADJUST_CFA_OFFSET 8
1232 paranoidentry do_debug, DEBUG_STACK
1233 paranoidexit
1234 KPROBE_END(debug)
1235
1236 /* runs on exception stack */
1237 KPROBE_ENTRY(nmi)
1238 INTR_FRAME
1239 PARAVIRT_ADJUST_EXCEPTION_FRAME
1240 pushq $-1
1241 CFI_ADJUST_CFA_OFFSET 8
1242 paranoidentry do_nmi, 0, 0
1243 #ifdef CONFIG_TRACE_IRQFLAGS
1244 paranoidexit 0
1245 #else
1246 jmp paranoid_exit1
1247 CFI_ENDPROC
1248 #endif
1249 KPROBE_END(nmi)
1250
1251 KPROBE_ENTRY(int3)
1252 INTR_FRAME
1253 PARAVIRT_ADJUST_EXCEPTION_FRAME
1254 pushq $0
1255 CFI_ADJUST_CFA_OFFSET 8
1256 paranoidentry do_int3, DEBUG_STACK
1257 jmp paranoid_exit1
1258 CFI_ENDPROC
1259 KPROBE_END(int3)
1260
1261 ENTRY(overflow)
1262 zeroentry do_overflow
1263 END(overflow)
1264
1265 ENTRY(bounds)
1266 zeroentry do_bounds
1267 END(bounds)
1268
1269 ENTRY(invalid_op)
1270 zeroentry do_invalid_op
1271 END(invalid_op)
1272
1273 ENTRY(coprocessor_segment_overrun)
1274 zeroentry do_coprocessor_segment_overrun
1275 END(coprocessor_segment_overrun)
1276
1277 /* runs on exception stack */
1278 ENTRY(double_fault)
1279 XCPT_FRAME
1280 PARAVIRT_ADJUST_EXCEPTION_FRAME
1281 paranoidentry do_double_fault
1282 jmp paranoid_exit1
1283 CFI_ENDPROC
1284 END(double_fault)
1285
1286 ENTRY(invalid_TSS)
1287 errorentry do_invalid_TSS
1288 END(invalid_TSS)
1289
1290 ENTRY(segment_not_present)
1291 errorentry do_segment_not_present
1292 END(segment_not_present)
1293
1294 /* runs on exception stack */
1295 ENTRY(stack_segment)
1296 XCPT_FRAME
1297 PARAVIRT_ADJUST_EXCEPTION_FRAME
1298 paranoidentry do_stack_segment
1299 jmp paranoid_exit1
1300 CFI_ENDPROC
1301 END(stack_segment)
1302
1303 KPROBE_ENTRY(general_protection)
1304 errorentry do_general_protection
1305 KPROBE_END(general_protection)
1306
1307 ENTRY(alignment_check)
1308 errorentry do_alignment_check
1309 END(alignment_check)
1310
1311 ENTRY(divide_error)
1312 zeroentry do_divide_error
1313 END(divide_error)
1314
1315 ENTRY(spurious_interrupt_bug)
1316 zeroentry do_spurious_interrupt_bug
1317 END(spurious_interrupt_bug)
1318
1319 #ifdef CONFIG_X86_MCE
1320 /* runs on exception stack */
1321 ENTRY(machine_check)
1322 INTR_FRAME
1323 PARAVIRT_ADJUST_EXCEPTION_FRAME
1324 pushq $0
1325 CFI_ADJUST_CFA_OFFSET 8
1326 paranoidentry do_machine_check
1327 jmp paranoid_exit1
1328 CFI_ENDPROC
1329 END(machine_check)
1330 #endif
1331
1332 /* Call softirq on interrupt stack. Interrupts are off. */
1333 ENTRY(call_softirq)
1334 CFI_STARTPROC
1335 push %rbp
1336 CFI_ADJUST_CFA_OFFSET 8
1337 CFI_REL_OFFSET rbp,0
1338 mov %rsp,%rbp
1339 CFI_DEF_CFA_REGISTER rbp
1340 incl %gs:pda_irqcount
1341 cmove %gs:pda_irqstackptr,%rsp
1342 push %rbp # backlink for old unwinder
1343 call __do_softirq
1344 leaveq
1345 CFI_DEF_CFA_REGISTER rsp
1346 CFI_ADJUST_CFA_OFFSET -8
1347 decl %gs:pda_irqcount
1348 ret
1349 CFI_ENDPROC
1350 ENDPROC(call_softirq)
1351
1352 KPROBE_ENTRY(ignore_sysret)
1353 CFI_STARTPROC
1354 mov $-ENOSYS,%eax
1355 sysret
1356 CFI_ENDPROC
1357 ENDPROC(ignore_sysret)
1358
1359 #ifdef CONFIG_XEN
1360 ENTRY(xen_hypervisor_callback)
1361 zeroentry xen_do_hypervisor_callback
1362 END(xen_hypervisor_callback)
1363
1364 /*
1365 # A note on the "critical region" in our callback handler.
1366 # We want to avoid stacking callback handlers due to events occurring
1367 # during handling of the last event. To do this, we keep events disabled
1368 # until we've done all processing. HOWEVER, we must enable events before
1369 # popping the stack frame (can't be done atomically) and so it would still
1370 # be possible to get enough handler activations to overflow the stack.
1371 # Although unlikely, bugs of that kind are hard to track down, so we'd
1372 # like to avoid the possibility.
1373 # So, on entry to the handler we detect whether we interrupted an
1374 # existing activation in its critical region -- if so, we pop the current
1375 # activation and restart the handler using the previous one.
1376 */
1377 ENTRY(xen_do_hypervisor_callback) # do_hypervisor_callback(struct *pt_regs)
1378 CFI_STARTPROC
1379 /* Since we don't modify %rdi, evtchn_do_upall(struct *pt_regs) will
1380 see the correct pointer to the pt_regs */
1381 movq %rdi, %rsp # we don't return, adjust the stack frame
1382 CFI_ENDPROC
1383 CFI_DEFAULT_STACK
1384 11: incl %gs:pda_irqcount
1385 movq %rsp,%rbp
1386 CFI_DEF_CFA_REGISTER rbp
1387 cmovzq %gs:pda_irqstackptr,%rsp
1388 pushq %rbp # backlink for old unwinder
1389 call xen_evtchn_do_upcall
1390 popq %rsp
1391 CFI_DEF_CFA_REGISTER rsp
1392 decl %gs:pda_irqcount
1393 jmp error_exit
1394 CFI_ENDPROC
1395 END(do_hypervisor_callback)
1396
1397 /*
1398 # Hypervisor uses this for application faults while it executes.
1399 # We get here for two reasons:
1400 # 1. Fault while reloading DS, ES, FS or GS
1401 # 2. Fault while executing IRET
1402 # Category 1 we do not need to fix up as Xen has already reloaded all segment
1403 # registers that could be reloaded and zeroed the others.
1404 # Category 2 we fix up by killing the current process. We cannot use the
1405 # normal Linux return path in this case because if we use the IRET hypercall
1406 # to pop the stack frame we end up in an infinite loop of failsafe callbacks.
1407 # We distinguish between categories by comparing each saved segment register
1408 # with its current contents: any discrepancy means we in category 1.
1409 */
1410 ENTRY(xen_failsafe_callback)
1411 framesz = (RIP-0x30) /* workaround buggy gas */
1412 _frame framesz
1413 CFI_REL_OFFSET rcx, 0
1414 CFI_REL_OFFSET r11, 8
1415 movw %ds,%cx
1416 cmpw %cx,0x10(%rsp)
1417 CFI_REMEMBER_STATE
1418 jne 1f
1419 movw %es,%cx
1420 cmpw %cx,0x18(%rsp)
1421 jne 1f
1422 movw %fs,%cx
1423 cmpw %cx,0x20(%rsp)
1424 jne 1f
1425 movw %gs,%cx
1426 cmpw %cx,0x28(%rsp)
1427 jne 1f
1428 /* All segments match their saved values => Category 2 (Bad IRET). */
1429 movq (%rsp),%rcx
1430 CFI_RESTORE rcx
1431 movq 8(%rsp),%r11
1432 CFI_RESTORE r11
1433 addq $0x30,%rsp
1434 CFI_ADJUST_CFA_OFFSET -0x30
1435 pushq $0
1436 CFI_ADJUST_CFA_OFFSET 8
1437 pushq %r11
1438 CFI_ADJUST_CFA_OFFSET 8
1439 pushq %rcx
1440 CFI_ADJUST_CFA_OFFSET 8
1441 jmp general_protection
1442 CFI_RESTORE_STATE
1443 1: /* Segment mismatch => Category 1 (Bad segment). Retry the IRET. */
1444 movq (%rsp),%rcx
1445 CFI_RESTORE rcx
1446 movq 8(%rsp),%r11
1447 CFI_RESTORE r11
1448 addq $0x30,%rsp
1449 CFI_ADJUST_CFA_OFFSET -0x30
1450 pushq $0
1451 CFI_ADJUST_CFA_OFFSET 8
1452 SAVE_ALL
1453 jmp error_exit
1454 CFI_ENDPROC
1455 END(xen_failsafe_callback)
1456
1457 #endif /* CONFIG_XEN */