]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blob - arch/x86/kernel/entry_64.S
x86: entry_64.S: rename
[mirror_ubuntu-artful-kernel.git] / arch / x86 / kernel / entry_64.S
1 /*
2 * linux/arch/x86_64/entry.S
3 *
4 * Copyright (C) 1991, 1992 Linus Torvalds
5 * Copyright (C) 2000, 2001, 2002 Andi Kleen SuSE Labs
6 * Copyright (C) 2000 Pavel Machek <pavel@suse.cz>
7 */
8
9 /*
10 * entry.S contains the system-call and fault low-level handling routines.
11 *
12 * NOTE: This code handles signal-recognition, which happens every time
13 * after an interrupt and after each system call.
14 *
15 * Normal syscalls and interrupts don't save a full stack frame, this is
16 * only done for syscall tracing, signals or fork/exec et.al.
17 *
18 * A note on terminology:
19 * - top of stack: Architecture defined interrupt frame from SS to RIP
20 * at the top of the kernel process stack.
21 * - partial stack frame: partially saved registers upto R11.
22 * - full stack frame: Like partial stack frame, but all register saved.
23 *
24 * Some macro usage:
25 * - CFI macros are used to generate dwarf2 unwind information for better
26 * backtraces. They don't change any code.
27 * - SAVE_ALL/RESTORE_ALL - Save/restore all registers
28 * - SAVE_ARGS/RESTORE_ARGS - Save/restore registers that C functions modify.
29 * There are unfortunately lots of special cases where some registers
30 * not touched. The macro is a big mess that should be cleaned up.
31 * - SAVE_REST/RESTORE_REST - Handle the registers not saved by SAVE_ARGS.
32 * Gives a full stack frame.
33 * - ENTRY/END Define functions in the symbol table.
34 * - FIXUP_TOP_OF_STACK/RESTORE_TOP_OF_STACK - Fix up the hardware stack
35 * frame that is otherwise undefined after a SYSCALL
36 * - TRACE_IRQ_* - Trace hard interrupt state for lock debugging.
37 * - errorentry/paranoidentry/zeroentry - Define exception entry points.
38 */
39
40 #include <linux/linkage.h>
41 #include <asm/segment.h>
42 #include <asm/cache.h>
43 #include <asm/errno.h>
44 #include <asm/dwarf2.h>
45 #include <asm/calling.h>
46 #include <asm/asm-offsets.h>
47 #include <asm/msr.h>
48 #include <asm/unistd.h>
49 #include <asm/thread_info.h>
50 #include <asm/hw_irq.h>
51 #include <asm/page.h>
52 #include <asm/irqflags.h>
53 #include <asm/paravirt.h>
54 #include <asm/ftrace.h>
55
56 /* Avoid __ASSEMBLER__'ifying <linux/audit.h> just for this. */
57 #include <linux/elf-em.h>
58 #define AUDIT_ARCH_X86_64 (EM_X86_64|__AUDIT_ARCH_64BIT|__AUDIT_ARCH_LE)
59 #define __AUDIT_ARCH_64BIT 0x80000000
60 #define __AUDIT_ARCH_LE 0x40000000
61
62 .code64
63 /*
64 * Some macro's to hide the most frequently occuring CFI annotations.
65 */
66 .macro pushq_cfi reg
67 pushq \reg
68 CFI_ADJUST_CFA_OFFSET 8
69 .endm
70
71 .macro popq_cfi reg
72 popq \reg
73 CFI_ADJUST_CFA_OFFSET -8
74 .endm
75
76 .macro movq_cfi reg offset=0
77 movq %\reg, \offset(%rsp)
78 CFI_REL_OFFSET \reg, \offset
79 .endm
80
81 #ifdef CONFIG_FUNCTION_TRACER
82 #ifdef CONFIG_DYNAMIC_FTRACE
83 ENTRY(mcount)
84 retq
85 END(mcount)
86
87 ENTRY(ftrace_caller)
88
89 /* taken from glibc */
90 subq $0x38, %rsp
91 movq %rax, (%rsp)
92 movq %rcx, 8(%rsp)
93 movq %rdx, 16(%rsp)
94 movq %rsi, 24(%rsp)
95 movq %rdi, 32(%rsp)
96 movq %r8, 40(%rsp)
97 movq %r9, 48(%rsp)
98
99 movq 0x38(%rsp), %rdi
100 movq 8(%rbp), %rsi
101 subq $MCOUNT_INSN_SIZE, %rdi
102
103 .globl ftrace_call
104 ftrace_call:
105 call ftrace_stub
106
107 movq 48(%rsp), %r9
108 movq 40(%rsp), %r8
109 movq 32(%rsp), %rdi
110 movq 24(%rsp), %rsi
111 movq 16(%rsp), %rdx
112 movq 8(%rsp), %rcx
113 movq (%rsp), %rax
114 addq $0x38, %rsp
115
116 .globl ftrace_stub
117 ftrace_stub:
118 retq
119 END(ftrace_caller)
120
121 #else /* ! CONFIG_DYNAMIC_FTRACE */
122 ENTRY(mcount)
123 cmpq $ftrace_stub, ftrace_trace_function
124 jnz trace
125 .globl ftrace_stub
126 ftrace_stub:
127 retq
128
129 trace:
130 /* taken from glibc */
131 subq $0x38, %rsp
132 movq %rax, (%rsp)
133 movq %rcx, 8(%rsp)
134 movq %rdx, 16(%rsp)
135 movq %rsi, 24(%rsp)
136 movq %rdi, 32(%rsp)
137 movq %r8, 40(%rsp)
138 movq %r9, 48(%rsp)
139
140 movq 0x38(%rsp), %rdi
141 movq 8(%rbp), %rsi
142 subq $MCOUNT_INSN_SIZE, %rdi
143
144 call *ftrace_trace_function
145
146 movq 48(%rsp), %r9
147 movq 40(%rsp), %r8
148 movq 32(%rsp), %rdi
149 movq 24(%rsp), %rsi
150 movq 16(%rsp), %rdx
151 movq 8(%rsp), %rcx
152 movq (%rsp), %rax
153 addq $0x38, %rsp
154
155 jmp ftrace_stub
156 END(mcount)
157 #endif /* CONFIG_DYNAMIC_FTRACE */
158 #endif /* CONFIG_FUNCTION_TRACER */
159
160 #ifndef CONFIG_PREEMPT
161 #define retint_kernel retint_restore_args
162 #endif
163
164 #ifdef CONFIG_PARAVIRT
165 ENTRY(native_usergs_sysret64)
166 swapgs
167 sysretq
168 #endif /* CONFIG_PARAVIRT */
169
170
171 .macro TRACE_IRQS_IRETQ offset=ARGOFFSET
172 #ifdef CONFIG_TRACE_IRQFLAGS
173 bt $9,EFLAGS-\offset(%rsp) /* interrupts off? */
174 jnc 1f
175 TRACE_IRQS_ON
176 1:
177 #endif
178 .endm
179
180 /*
181 * C code is not supposed to know about undefined top of stack. Every time
182 * a C function with an pt_regs argument is called from the SYSCALL based
183 * fast path FIXUP_TOP_OF_STACK is needed.
184 * RESTORE_TOP_OF_STACK syncs the syscall state after any possible ptregs
185 * manipulation.
186 */
187
188 /* %rsp:at FRAMEEND */
189 .macro FIXUP_TOP_OF_STACK tmp
190 movq %gs:pda_oldrsp,\tmp
191 movq \tmp,RSP(%rsp)
192 movq $__USER_DS,SS(%rsp)
193 movq $__USER_CS,CS(%rsp)
194 movq $-1,RCX(%rsp)
195 movq R11(%rsp),\tmp /* get eflags */
196 movq \tmp,EFLAGS(%rsp)
197 .endm
198
199 .macro RESTORE_TOP_OF_STACK tmp,offset=0
200 movq RSP-\offset(%rsp),\tmp
201 movq \tmp,%gs:pda_oldrsp
202 movq EFLAGS-\offset(%rsp),\tmp
203 movq \tmp,R11-\offset(%rsp)
204 .endm
205
206 .macro FAKE_STACK_FRAME child_rip
207 /* push in order ss, rsp, eflags, cs, rip */
208 xorl %eax, %eax
209 pushq $__KERNEL_DS /* ss */
210 CFI_ADJUST_CFA_OFFSET 8
211 /*CFI_REL_OFFSET ss,0*/
212 pushq %rax /* rsp */
213 CFI_ADJUST_CFA_OFFSET 8
214 CFI_REL_OFFSET rsp,0
215 pushq $(1<<9) /* eflags - interrupts on */
216 CFI_ADJUST_CFA_OFFSET 8
217 /*CFI_REL_OFFSET rflags,0*/
218 pushq $__KERNEL_CS /* cs */
219 CFI_ADJUST_CFA_OFFSET 8
220 /*CFI_REL_OFFSET cs,0*/
221 pushq \child_rip /* rip */
222 CFI_ADJUST_CFA_OFFSET 8
223 CFI_REL_OFFSET rip,0
224 pushq %rax /* orig rax */
225 CFI_ADJUST_CFA_OFFSET 8
226 .endm
227
228 .macro UNFAKE_STACK_FRAME
229 addq $8*6, %rsp
230 CFI_ADJUST_CFA_OFFSET -(6*8)
231 .endm
232
233 /*
234 * initial frame state for interrupts (and exceptions without error code)
235 */
236 .macro EMPTY_FRAME start=1 offset=0
237 .if \start
238 CFI_STARTPROC simple
239 CFI_SIGNAL_FRAME
240 CFI_DEF_CFA rsp,8+\offset
241 .else
242 CFI_DEF_CFA_OFFSET 8+\offset
243 .endif
244 .endm
245
246 /*
247 * initial frame state for interrupts (and exceptions without error code)
248 */
249 .macro INTR_FRAME start=1 offset=0
250 EMPTY_FRAME \start, SS+8+\offset-RIP
251 /*CFI_REL_OFFSET ss, SS+\offset-RIP*/
252 CFI_REL_OFFSET rsp, RSP+\offset-RIP
253 /*CFI_REL_OFFSET rflags, EFLAGS+\offset-RIP*/
254 /*CFI_REL_OFFSET cs, CS+\offset-RIP*/
255 CFI_REL_OFFSET rip, RIP+\offset-RIP
256 .endm
257
258 /*
259 * initial frame state for exceptions with error code (and interrupts
260 * with vector already pushed)
261 */
262 .macro XCPT_FRAME start=1 offset=0
263 INTR_FRAME \start, RIP+\offset-ORIG_RAX
264 /*CFI_REL_OFFSET orig_rax, ORIG_RAX-ORIG_RAX*/
265 .endm
266
267 /*
268 * frame that enables calling into C.
269 */
270 .macro PARTIAL_FRAME start=1 offset=0
271 XCPT_FRAME \start, ORIG_RAX+\offset-ARGOFFSET
272 CFI_REL_OFFSET rdi, RDI+\offset-ARGOFFSET
273 CFI_REL_OFFSET rsi, RSI+\offset-ARGOFFSET
274 CFI_REL_OFFSET rdx, RDX+\offset-ARGOFFSET
275 CFI_REL_OFFSET rcx, RCX+\offset-ARGOFFSET
276 CFI_REL_OFFSET rax, RAX+\offset-ARGOFFSET
277 CFI_REL_OFFSET r8, R8+\offset-ARGOFFSET
278 CFI_REL_OFFSET r9, R9+\offset-ARGOFFSET
279 CFI_REL_OFFSET r10, R10+\offset-ARGOFFSET
280 CFI_REL_OFFSET r11, R11+\offset-ARGOFFSET
281 .endm
282
283 /*
284 * frame that enables passing a complete pt_regs to a C function.
285 */
286 .macro DEFAULT_FRAME start=1 offset=0
287 PARTIAL_FRAME \start, R11+\offset-R15
288 CFI_REL_OFFSET rbx, RBX+\offset
289 CFI_REL_OFFSET rbp, RBP+\offset
290 CFI_REL_OFFSET r12, R12+\offset
291 CFI_REL_OFFSET r13, R13+\offset
292 CFI_REL_OFFSET r14, R14+\offset
293 CFI_REL_OFFSET r15, R15+\offset
294 .endm
295
296 /* save partial stack frame */
297 ENTRY(save_args)
298 XCPT_FRAME
299 cld
300 movq_cfi rdi, RDI+16-ARGOFFSET
301 movq_cfi rsi, RSI+16-ARGOFFSET
302 movq_cfi rdx, RDX+16-ARGOFFSET
303 movq_cfi rcx, RCX+16-ARGOFFSET
304 movq_cfi rax, RAX+16-ARGOFFSET
305 movq_cfi r8, R8+16-ARGOFFSET
306 movq_cfi r9, R9+16-ARGOFFSET
307 movq_cfi r10, R10+16-ARGOFFSET
308 movq_cfi r11, R11+16-ARGOFFSET
309
310 leaq -ARGOFFSET+16(%rsp),%rdi /* arg1 for handler */
311 movq_cfi rbp, 8 /* push %rbp */
312 leaq 8(%rsp), %rbp /* mov %rsp, %ebp */
313 testl $3, CS(%rdi)
314 je 1f
315 SWAPGS
316 /*
317 * irqcount is used to check if a CPU is already on an interrupt stack
318 * or not. While this is essentially redundant with preempt_count it is
319 * a little cheaper to use a separate counter in the PDA (short of
320 * moving irq_enter into assembly, which would be too much work)
321 */
322 1: incl %gs:pda_irqcount
323 jne 2f
324 popq_cfi %rax /* move return address... */
325 mov %gs:pda_irqstackptr,%rsp
326 EMPTY_FRAME 0
327 pushq_cfi %rax /* ... to the new stack */
328 /*
329 * We entered an interrupt context - irqs are off:
330 */
331 2: TRACE_IRQS_OFF
332 ret
333 CFI_ENDPROC
334 END(save_args)
335
336 /*
337 * A newly forked process directly context switches into this.
338 */
339 /* rdi: prev */
340 ENTRY(ret_from_fork)
341 DEFAULT_FRAME
342 push kernel_eflags(%rip)
343 CFI_ADJUST_CFA_OFFSET 8
344 popf # reset kernel eflags
345 CFI_ADJUST_CFA_OFFSET -8
346 call schedule_tail
347 GET_THREAD_INFO(%rcx)
348 testl $(_TIF_SYSCALL_TRACE|_TIF_SYSCALL_AUDIT),TI_flags(%rcx)
349 jnz rff_trace
350 rff_action:
351 RESTORE_REST
352 testl $3,CS-ARGOFFSET(%rsp) # from kernel_thread?
353 je int_ret_from_sys_call
354 testl $_TIF_IA32,TI_flags(%rcx)
355 jnz int_ret_from_sys_call
356 RESTORE_TOP_OF_STACK %rdi,ARGOFFSET
357 jmp ret_from_sys_call
358 rff_trace:
359 movq %rsp,%rdi
360 call syscall_trace_leave
361 GET_THREAD_INFO(%rcx)
362 jmp rff_action
363 CFI_ENDPROC
364 END(ret_from_fork)
365
366 /*
367 * System call entry. Upto 6 arguments in registers are supported.
368 *
369 * SYSCALL does not save anything on the stack and does not change the
370 * stack pointer.
371 */
372
373 /*
374 * Register setup:
375 * rax system call number
376 * rdi arg0
377 * rcx return address for syscall/sysret, C arg3
378 * rsi arg1
379 * rdx arg2
380 * r10 arg3 (--> moved to rcx for C)
381 * r8 arg4
382 * r9 arg5
383 * r11 eflags for syscall/sysret, temporary for C
384 * r12-r15,rbp,rbx saved by C code, not touched.
385 *
386 * Interrupts are off on entry.
387 * Only called from user space.
388 *
389 * XXX if we had a free scratch register we could save the RSP into the stack frame
390 * and report it properly in ps. Unfortunately we haven't.
391 *
392 * When user can change the frames always force IRET. That is because
393 * it deals with uncanonical addresses better. SYSRET has trouble
394 * with them due to bugs in both AMD and Intel CPUs.
395 */
396
397 ENTRY(system_call)
398 CFI_STARTPROC simple
399 CFI_SIGNAL_FRAME
400 CFI_DEF_CFA rsp,PDA_STACKOFFSET
401 CFI_REGISTER rip,rcx
402 /*CFI_REGISTER rflags,r11*/
403 SWAPGS_UNSAFE_STACK
404 /*
405 * A hypervisor implementation might want to use a label
406 * after the swapgs, so that it can do the swapgs
407 * for the guest and jump here on syscall.
408 */
409 ENTRY(system_call_after_swapgs)
410
411 movq %rsp,%gs:pda_oldrsp
412 movq %gs:pda_kernelstack,%rsp
413 /*
414 * No need to follow this irqs off/on section - it's straight
415 * and short:
416 */
417 ENABLE_INTERRUPTS(CLBR_NONE)
418 SAVE_ARGS 8,1
419 movq %rax,ORIG_RAX-ARGOFFSET(%rsp)
420 movq %rcx,RIP-ARGOFFSET(%rsp)
421 CFI_REL_OFFSET rip,RIP-ARGOFFSET
422 GET_THREAD_INFO(%rcx)
423 testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%rcx)
424 jnz tracesys
425 system_call_fastpath:
426 cmpq $__NR_syscall_max,%rax
427 ja badsys
428 movq %r10,%rcx
429 call *sys_call_table(,%rax,8) # XXX: rip relative
430 movq %rax,RAX-ARGOFFSET(%rsp)
431 /*
432 * Syscall return path ending with SYSRET (fast path)
433 * Has incomplete stack frame and undefined top of stack.
434 */
435 ret_from_sys_call:
436 movl $_TIF_ALLWORK_MASK,%edi
437 /* edi: flagmask */
438 sysret_check:
439 LOCKDEP_SYS_EXIT
440 GET_THREAD_INFO(%rcx)
441 DISABLE_INTERRUPTS(CLBR_NONE)
442 TRACE_IRQS_OFF
443 movl TI_flags(%rcx),%edx
444 andl %edi,%edx
445 jnz sysret_careful
446 CFI_REMEMBER_STATE
447 /*
448 * sysretq will re-enable interrupts:
449 */
450 TRACE_IRQS_ON
451 movq RIP-ARGOFFSET(%rsp),%rcx
452 CFI_REGISTER rip,rcx
453 RESTORE_ARGS 0,-ARG_SKIP,1
454 /*CFI_REGISTER rflags,r11*/
455 movq %gs:pda_oldrsp, %rsp
456 USERGS_SYSRET64
457
458 CFI_RESTORE_STATE
459 /* Handle reschedules */
460 /* edx: work, edi: workmask */
461 sysret_careful:
462 bt $TIF_NEED_RESCHED,%edx
463 jnc sysret_signal
464 TRACE_IRQS_ON
465 ENABLE_INTERRUPTS(CLBR_NONE)
466 pushq %rdi
467 CFI_ADJUST_CFA_OFFSET 8
468 call schedule
469 popq %rdi
470 CFI_ADJUST_CFA_OFFSET -8
471 jmp sysret_check
472
473 /* Handle a signal */
474 sysret_signal:
475 TRACE_IRQS_ON
476 ENABLE_INTERRUPTS(CLBR_NONE)
477 #ifdef CONFIG_AUDITSYSCALL
478 bt $TIF_SYSCALL_AUDIT,%edx
479 jc sysret_audit
480 #endif
481 /* edx: work flags (arg3) */
482 leaq do_notify_resume(%rip),%rax
483 leaq -ARGOFFSET(%rsp),%rdi # &pt_regs -> arg1
484 xorl %esi,%esi # oldset -> arg2
485 call ptregscall_common
486 movl $_TIF_WORK_MASK,%edi
487 /* Use IRET because user could have changed frame. This
488 works because ptregscall_common has called FIXUP_TOP_OF_STACK. */
489 DISABLE_INTERRUPTS(CLBR_NONE)
490 TRACE_IRQS_OFF
491 jmp int_with_check
492
493 badsys:
494 movq $-ENOSYS,RAX-ARGOFFSET(%rsp)
495 jmp ret_from_sys_call
496
497 #ifdef CONFIG_AUDITSYSCALL
498 /*
499 * Fast path for syscall audit without full syscall trace.
500 * We just call audit_syscall_entry() directly, and then
501 * jump back to the normal fast path.
502 */
503 auditsys:
504 movq %r10,%r9 /* 6th arg: 4th syscall arg */
505 movq %rdx,%r8 /* 5th arg: 3rd syscall arg */
506 movq %rsi,%rcx /* 4th arg: 2nd syscall arg */
507 movq %rdi,%rdx /* 3rd arg: 1st syscall arg */
508 movq %rax,%rsi /* 2nd arg: syscall number */
509 movl $AUDIT_ARCH_X86_64,%edi /* 1st arg: audit arch */
510 call audit_syscall_entry
511 LOAD_ARGS 0 /* reload call-clobbered registers */
512 jmp system_call_fastpath
513
514 /*
515 * Return fast path for syscall audit. Call audit_syscall_exit()
516 * directly and then jump back to the fast path with TIF_SYSCALL_AUDIT
517 * masked off.
518 */
519 sysret_audit:
520 movq %rax,%rsi /* second arg, syscall return value */
521 cmpq $0,%rax /* is it < 0? */
522 setl %al /* 1 if so, 0 if not */
523 movzbl %al,%edi /* zero-extend that into %edi */
524 inc %edi /* first arg, 0->1(AUDITSC_SUCCESS), 1->2(AUDITSC_FAILURE) */
525 call audit_syscall_exit
526 movl $(_TIF_ALLWORK_MASK & ~_TIF_SYSCALL_AUDIT),%edi
527 jmp sysret_check
528 #endif /* CONFIG_AUDITSYSCALL */
529
530 /* Do syscall tracing */
531 tracesys:
532 #ifdef CONFIG_AUDITSYSCALL
533 testl $(_TIF_WORK_SYSCALL_ENTRY & ~_TIF_SYSCALL_AUDIT),TI_flags(%rcx)
534 jz auditsys
535 #endif
536 SAVE_REST
537 movq $-ENOSYS,RAX(%rsp) /* ptrace can change this for a bad syscall */
538 FIXUP_TOP_OF_STACK %rdi
539 movq %rsp,%rdi
540 call syscall_trace_enter
541 /*
542 * Reload arg registers from stack in case ptrace changed them.
543 * We don't reload %rax because syscall_trace_enter() returned
544 * the value it wants us to use in the table lookup.
545 */
546 LOAD_ARGS ARGOFFSET, 1
547 RESTORE_REST
548 cmpq $__NR_syscall_max,%rax
549 ja int_ret_from_sys_call /* RAX(%rsp) set to -ENOSYS above */
550 movq %r10,%rcx /* fixup for C */
551 call *sys_call_table(,%rax,8)
552 movq %rax,RAX-ARGOFFSET(%rsp)
553 /* Use IRET because user could have changed frame */
554
555 /*
556 * Syscall return path ending with IRET.
557 * Has correct top of stack, but partial stack frame.
558 */
559 .globl int_ret_from_sys_call
560 .globl int_with_check
561 int_ret_from_sys_call:
562 DISABLE_INTERRUPTS(CLBR_NONE)
563 TRACE_IRQS_OFF
564 testl $3,CS-ARGOFFSET(%rsp)
565 je retint_restore_args
566 movl $_TIF_ALLWORK_MASK,%edi
567 /* edi: mask to check */
568 int_with_check:
569 LOCKDEP_SYS_EXIT_IRQ
570 GET_THREAD_INFO(%rcx)
571 movl TI_flags(%rcx),%edx
572 andl %edi,%edx
573 jnz int_careful
574 andl $~TS_COMPAT,TI_status(%rcx)
575 jmp retint_swapgs
576
577 /* Either reschedule or signal or syscall exit tracking needed. */
578 /* First do a reschedule test. */
579 /* edx: work, edi: workmask */
580 int_careful:
581 bt $TIF_NEED_RESCHED,%edx
582 jnc int_very_careful
583 TRACE_IRQS_ON
584 ENABLE_INTERRUPTS(CLBR_NONE)
585 pushq %rdi
586 CFI_ADJUST_CFA_OFFSET 8
587 call schedule
588 popq %rdi
589 CFI_ADJUST_CFA_OFFSET -8
590 DISABLE_INTERRUPTS(CLBR_NONE)
591 TRACE_IRQS_OFF
592 jmp int_with_check
593
594 /* handle signals and tracing -- both require a full stack frame */
595 int_very_careful:
596 TRACE_IRQS_ON
597 ENABLE_INTERRUPTS(CLBR_NONE)
598 SAVE_REST
599 /* Check for syscall exit trace */
600 testl $_TIF_WORK_SYSCALL_EXIT,%edx
601 jz int_signal
602 pushq %rdi
603 CFI_ADJUST_CFA_OFFSET 8
604 leaq 8(%rsp),%rdi # &ptregs -> arg1
605 call syscall_trace_leave
606 popq %rdi
607 CFI_ADJUST_CFA_OFFSET -8
608 andl $~(_TIF_WORK_SYSCALL_EXIT|_TIF_SYSCALL_EMU),%edi
609 jmp int_restore_rest
610
611 int_signal:
612 testl $_TIF_DO_NOTIFY_MASK,%edx
613 jz 1f
614 movq %rsp,%rdi # &ptregs -> arg1
615 xorl %esi,%esi # oldset -> arg2
616 call do_notify_resume
617 1: movl $_TIF_WORK_MASK,%edi
618 int_restore_rest:
619 RESTORE_REST
620 DISABLE_INTERRUPTS(CLBR_NONE)
621 TRACE_IRQS_OFF
622 jmp int_with_check
623 CFI_ENDPROC
624 END(system_call)
625
626 /*
627 * Certain special system calls that need to save a complete full stack frame.
628 */
629
630 .macro PTREGSCALL label,func,arg
631 .globl \label
632 \label:
633 leaq \func(%rip),%rax
634 leaq -ARGOFFSET+8(%rsp),\arg /* 8 for return address */
635 jmp ptregscall_common
636 END(\label)
637 .endm
638
639 CFI_STARTPROC
640
641 PTREGSCALL stub_clone, sys_clone, %r8
642 PTREGSCALL stub_fork, sys_fork, %rdi
643 PTREGSCALL stub_vfork, sys_vfork, %rdi
644 PTREGSCALL stub_sigaltstack, sys_sigaltstack, %rdx
645 PTREGSCALL stub_iopl, sys_iopl, %rsi
646
647 ENTRY(ptregscall_common)
648 popq %r11
649 CFI_ADJUST_CFA_OFFSET -8
650 CFI_REGISTER rip, r11
651 SAVE_REST
652 movq %r11, %r15
653 CFI_REGISTER rip, r15
654 FIXUP_TOP_OF_STACK %r11
655 call *%rax
656 RESTORE_TOP_OF_STACK %r11
657 movq %r15, %r11
658 CFI_REGISTER rip, r11
659 RESTORE_REST
660 pushq %r11
661 CFI_ADJUST_CFA_OFFSET 8
662 CFI_REL_OFFSET rip, 0
663 ret
664 CFI_ENDPROC
665 END(ptregscall_common)
666
667 ENTRY(stub_execve)
668 CFI_STARTPROC
669 popq %r11
670 CFI_ADJUST_CFA_OFFSET -8
671 CFI_REGISTER rip, r11
672 SAVE_REST
673 FIXUP_TOP_OF_STACK %r11
674 movq %rsp, %rcx
675 call sys_execve
676 RESTORE_TOP_OF_STACK %r11
677 movq %rax,RAX(%rsp)
678 RESTORE_REST
679 jmp int_ret_from_sys_call
680 CFI_ENDPROC
681 END(stub_execve)
682
683 /*
684 * sigreturn is special because it needs to restore all registers on return.
685 * This cannot be done with SYSRET, so use the IRET return path instead.
686 */
687 ENTRY(stub_rt_sigreturn)
688 CFI_STARTPROC
689 addq $8, %rsp
690 CFI_ADJUST_CFA_OFFSET -8
691 SAVE_REST
692 movq %rsp,%rdi
693 FIXUP_TOP_OF_STACK %r11
694 call sys_rt_sigreturn
695 movq %rax,RAX(%rsp) # fixme, this could be done at the higher layer
696 RESTORE_REST
697 jmp int_ret_from_sys_call
698 CFI_ENDPROC
699 END(stub_rt_sigreturn)
700
701 /*
702 * Build the entry stubs and pointer table with some assembler magic.
703 * We pack 7 stubs into a single 32-byte chunk, which will fit in a
704 * single cache line on all modern x86 implementations.
705 */
706 .section .init.rodata,"a"
707 ENTRY(interrupt)
708 .text
709 .p2align 5
710 .p2align CONFIG_X86_L1_CACHE_SHIFT
711 ENTRY(irq_entries_start)
712 INTR_FRAME
713 vector=FIRST_EXTERNAL_VECTOR
714 .rept (NR_VECTORS-FIRST_EXTERNAL_VECTOR+6)/7
715 .balign 32
716 .rept 7
717 .if vector < NR_VECTORS
718 .if vector <> FIRST_EXTERNAL_VECTOR
719 CFI_ADJUST_CFA_OFFSET -8
720 .endif
721 1: pushq $(~vector+0x80) /* Note: always in signed byte range */
722 CFI_ADJUST_CFA_OFFSET 8
723 .if ((vector-FIRST_EXTERNAL_VECTOR)%7) <> 6
724 jmp 2f
725 .endif
726 .previous
727 .quad 1b
728 .text
729 vector=vector+1
730 .endif
731 .endr
732 2: jmp common_interrupt
733 .endr
734 CFI_ENDPROC
735 END(irq_entries_start)
736
737 .previous
738 END(interrupt)
739 .previous
740
741 /*
742 * Interrupt entry/exit.
743 *
744 * Interrupt entry points save only callee clobbered registers in fast path.
745 *
746 * Entry runs with interrupts off.
747 */
748
749 /* 0(%rsp): ~(interrupt number) */
750 .macro interrupt func
751 subq $10*8, %rsp
752 CFI_ADJUST_CFA_OFFSET 10*8
753 call save_args
754 PARTIAL_FRAME 0
755 call \func
756 .endm
757
758 /*
759 * The interrupt stubs push (~vector+0x80) onto the stack and
760 * then jump to common_interrupt.
761 */
762 .p2align CONFIG_X86_L1_CACHE_SHIFT
763 common_interrupt:
764 XCPT_FRAME
765 addq $-0x80,(%rsp) /* Adjust vector to [-256,-1] range */
766 interrupt do_IRQ
767 /* 0(%rsp): oldrsp-ARGOFFSET */
768 ret_from_intr:
769 DISABLE_INTERRUPTS(CLBR_NONE)
770 TRACE_IRQS_OFF
771 decl %gs:pda_irqcount
772 leaveq
773 CFI_DEF_CFA_REGISTER rsp
774 CFI_ADJUST_CFA_OFFSET -8
775 exit_intr:
776 GET_THREAD_INFO(%rcx)
777 testl $3,CS-ARGOFFSET(%rsp)
778 je retint_kernel
779
780 /* Interrupt came from user space */
781 /*
782 * Has a correct top of stack, but a partial stack frame
783 * %rcx: thread info. Interrupts off.
784 */
785 retint_with_reschedule:
786 movl $_TIF_WORK_MASK,%edi
787 retint_check:
788 LOCKDEP_SYS_EXIT_IRQ
789 movl TI_flags(%rcx),%edx
790 andl %edi,%edx
791 CFI_REMEMBER_STATE
792 jnz retint_careful
793
794 retint_swapgs: /* return to user-space */
795 /*
796 * The iretq could re-enable interrupts:
797 */
798 DISABLE_INTERRUPTS(CLBR_ANY)
799 TRACE_IRQS_IRETQ
800 SWAPGS
801 jmp restore_args
802
803 retint_restore_args: /* return to kernel space */
804 DISABLE_INTERRUPTS(CLBR_ANY)
805 /*
806 * The iretq could re-enable interrupts:
807 */
808 TRACE_IRQS_IRETQ
809 restore_args:
810 RESTORE_ARGS 0,8,0
811
812 irq_return:
813 INTERRUPT_RETURN
814
815 .section __ex_table, "a"
816 .quad irq_return, bad_iret
817 .previous
818
819 #ifdef CONFIG_PARAVIRT
820 ENTRY(native_iret)
821 iretq
822
823 .section __ex_table,"a"
824 .quad native_iret, bad_iret
825 .previous
826 #endif
827
828 .section .fixup,"ax"
829 bad_iret:
830 /*
831 * The iret traps when the %cs or %ss being restored is bogus.
832 * We've lost the original trap vector and error code.
833 * #GPF is the most likely one to get for an invalid selector.
834 * So pretend we completed the iret and took the #GPF in user mode.
835 *
836 * We are now running with the kernel GS after exception recovery.
837 * But error_entry expects us to have user GS to match the user %cs,
838 * so swap back.
839 */
840 pushq $0
841
842 SWAPGS
843 jmp general_protection
844
845 .previous
846
847 /* edi: workmask, edx: work */
848 retint_careful:
849 CFI_RESTORE_STATE
850 bt $TIF_NEED_RESCHED,%edx
851 jnc retint_signal
852 TRACE_IRQS_ON
853 ENABLE_INTERRUPTS(CLBR_NONE)
854 pushq %rdi
855 CFI_ADJUST_CFA_OFFSET 8
856 call schedule
857 popq %rdi
858 CFI_ADJUST_CFA_OFFSET -8
859 GET_THREAD_INFO(%rcx)
860 DISABLE_INTERRUPTS(CLBR_NONE)
861 TRACE_IRQS_OFF
862 jmp retint_check
863
864 retint_signal:
865 testl $_TIF_DO_NOTIFY_MASK,%edx
866 jz retint_swapgs
867 TRACE_IRQS_ON
868 ENABLE_INTERRUPTS(CLBR_NONE)
869 SAVE_REST
870 movq $-1,ORIG_RAX(%rsp)
871 xorl %esi,%esi # oldset
872 movq %rsp,%rdi # &pt_regs
873 call do_notify_resume
874 RESTORE_REST
875 DISABLE_INTERRUPTS(CLBR_NONE)
876 TRACE_IRQS_OFF
877 GET_THREAD_INFO(%rcx)
878 jmp retint_with_reschedule
879
880 #ifdef CONFIG_PREEMPT
881 /* Returning to kernel space. Check if we need preemption */
882 /* rcx: threadinfo. interrupts off. */
883 ENTRY(retint_kernel)
884 cmpl $0,TI_preempt_count(%rcx)
885 jnz retint_restore_args
886 bt $TIF_NEED_RESCHED,TI_flags(%rcx)
887 jnc retint_restore_args
888 bt $9,EFLAGS-ARGOFFSET(%rsp) /* interrupts off? */
889 jnc retint_restore_args
890 call preempt_schedule_irq
891 jmp exit_intr
892 #endif
893
894 CFI_ENDPROC
895 END(common_interrupt)
896
897 /*
898 * APIC interrupts.
899 */
900 .p2align 5
901
902 .macro apicinterrupt num,func
903 INTR_FRAME
904 pushq $~(\num)
905 CFI_ADJUST_CFA_OFFSET 8
906 interrupt \func
907 jmp ret_from_intr
908 CFI_ENDPROC
909 .endm
910
911 ENTRY(thermal_interrupt)
912 apicinterrupt THERMAL_APIC_VECTOR,smp_thermal_interrupt
913 END(thermal_interrupt)
914
915 ENTRY(threshold_interrupt)
916 apicinterrupt THRESHOLD_APIC_VECTOR,mce_threshold_interrupt
917 END(threshold_interrupt)
918
919 #ifdef CONFIG_SMP
920 ENTRY(reschedule_interrupt)
921 apicinterrupt RESCHEDULE_VECTOR,smp_reschedule_interrupt
922 END(reschedule_interrupt)
923
924 .macro INVALIDATE_ENTRY num
925 ENTRY(invalidate_interrupt\num)
926 apicinterrupt INVALIDATE_TLB_VECTOR_START+\num,smp_invalidate_interrupt
927 END(invalidate_interrupt\num)
928 .endm
929
930 INVALIDATE_ENTRY 0
931 INVALIDATE_ENTRY 1
932 INVALIDATE_ENTRY 2
933 INVALIDATE_ENTRY 3
934 INVALIDATE_ENTRY 4
935 INVALIDATE_ENTRY 5
936 INVALIDATE_ENTRY 6
937 INVALIDATE_ENTRY 7
938
939 ENTRY(call_function_interrupt)
940 apicinterrupt CALL_FUNCTION_VECTOR,smp_call_function_interrupt
941 END(call_function_interrupt)
942 ENTRY(call_function_single_interrupt)
943 apicinterrupt CALL_FUNCTION_SINGLE_VECTOR,smp_call_function_single_interrupt
944 END(call_function_single_interrupt)
945 ENTRY(irq_move_cleanup_interrupt)
946 apicinterrupt IRQ_MOVE_CLEANUP_VECTOR,smp_irq_move_cleanup_interrupt
947 END(irq_move_cleanup_interrupt)
948 #endif
949
950 ENTRY(apic_timer_interrupt)
951 apicinterrupt LOCAL_TIMER_VECTOR,smp_apic_timer_interrupt
952 END(apic_timer_interrupt)
953
954 ENTRY(uv_bau_message_intr1)
955 apicinterrupt 220,uv_bau_message_interrupt
956 END(uv_bau_message_intr1)
957
958 ENTRY(error_interrupt)
959 apicinterrupt ERROR_APIC_VECTOR,smp_error_interrupt
960 END(error_interrupt)
961
962 ENTRY(spurious_interrupt)
963 apicinterrupt SPURIOUS_APIC_VECTOR,smp_spurious_interrupt
964 END(spurious_interrupt)
965
966 /*
967 * Exception entry points.
968 */
969 .macro zeroentry sym
970 INTR_FRAME
971 PARAVIRT_ADJUST_EXCEPTION_FRAME
972 pushq_cfi $-1 /* ORIG_RAX: no syscall to restart */
973 subq $15*8,%rsp
974 CFI_ADJUST_CFA_OFFSET 15*8
975 call error_entry
976 DEFAULT_FRAME 0
977 movq %rsp,%rdi /* pt_regs pointer */
978 xorl %esi,%esi /* no error code */
979 call \sym
980 jmp error_exit /* %ebx: no swapgs flag */
981 CFI_ENDPROC
982 .endm
983
984 .macro errorentry sym
985 XCPT_FRAME
986 PARAVIRT_ADJUST_EXCEPTION_FRAME
987 subq $15*8,%rsp
988 CFI_ADJUST_CFA_OFFSET 15*8
989 call error_entry
990 DEFAULT_FRAME 0
991 movq %rsp,%rdi /* pt_regs pointer */
992 movq ORIG_RAX(%rsp),%rsi /* get error code */
993 movq $-1,ORIG_RAX(%rsp) /* no syscall to restart */
994 call \sym
995 jmp error_exit /* %ebx: no swapgs flag */
996 CFI_ENDPROC
997 .endm
998
999 /* error code is on the stack already */
1000 /* handle NMI like exceptions that can happen everywhere */
1001 .macro paranoidentry sym, ist=0, irqtrace=1
1002 SAVE_ALL
1003 cld
1004 movl $1,%ebx
1005 movl $MSR_GS_BASE,%ecx
1006 rdmsr
1007 testl %edx,%edx
1008 js 1f
1009 SWAPGS
1010 xorl %ebx,%ebx
1011 1:
1012 .if \ist
1013 movq %gs:pda_data_offset, %rbp
1014 .endif
1015 .if \irqtrace
1016 TRACE_IRQS_OFF
1017 .endif
1018 movq %rsp,%rdi
1019 movq ORIG_RAX(%rsp),%rsi
1020 movq $-1,ORIG_RAX(%rsp)
1021 .if \ist
1022 subq $EXCEPTION_STKSZ, per_cpu__init_tss + TSS_ist + (\ist - 1) * 8(%rbp)
1023 .endif
1024 call \sym
1025 .if \ist
1026 addq $EXCEPTION_STKSZ, per_cpu__init_tss + TSS_ist + (\ist - 1) * 8(%rbp)
1027 .endif
1028 DISABLE_INTERRUPTS(CLBR_NONE)
1029 .if \irqtrace
1030 TRACE_IRQS_OFF
1031 .endif
1032 .endm
1033
1034 /*
1035 * "Paranoid" exit path from exception stack.
1036 * Paranoid because this is used by NMIs and cannot take
1037 * any kernel state for granted.
1038 * We don't do kernel preemption checks here, because only
1039 * NMI should be common and it does not enable IRQs and
1040 * cannot get reschedule ticks.
1041 *
1042 * "trace" is 0 for the NMI handler only, because irq-tracing
1043 * is fundamentally NMI-unsafe. (we cannot change the soft and
1044 * hard flags at once, atomically)
1045 */
1046 .macro paranoidexit trace=1
1047 /* ebx: no swapgs flag */
1048 paranoid_exit\trace:
1049 testl %ebx,%ebx /* swapgs needed? */
1050 jnz paranoid_restore\trace
1051 testl $3,CS(%rsp)
1052 jnz paranoid_userspace\trace
1053 paranoid_swapgs\trace:
1054 .if \trace
1055 TRACE_IRQS_IRETQ 0
1056 .endif
1057 SWAPGS_UNSAFE_STACK
1058 paranoid_restore\trace:
1059 RESTORE_ALL 8
1060 jmp irq_return
1061 paranoid_userspace\trace:
1062 GET_THREAD_INFO(%rcx)
1063 movl TI_flags(%rcx),%ebx
1064 andl $_TIF_WORK_MASK,%ebx
1065 jz paranoid_swapgs\trace
1066 movq %rsp,%rdi /* &pt_regs */
1067 call sync_regs
1068 movq %rax,%rsp /* switch stack for scheduling */
1069 testl $_TIF_NEED_RESCHED,%ebx
1070 jnz paranoid_schedule\trace
1071 movl %ebx,%edx /* arg3: thread flags */
1072 .if \trace
1073 TRACE_IRQS_ON
1074 .endif
1075 ENABLE_INTERRUPTS(CLBR_NONE)
1076 xorl %esi,%esi /* arg2: oldset */
1077 movq %rsp,%rdi /* arg1: &pt_regs */
1078 call do_notify_resume
1079 DISABLE_INTERRUPTS(CLBR_NONE)
1080 .if \trace
1081 TRACE_IRQS_OFF
1082 .endif
1083 jmp paranoid_userspace\trace
1084 paranoid_schedule\trace:
1085 .if \trace
1086 TRACE_IRQS_ON
1087 .endif
1088 ENABLE_INTERRUPTS(CLBR_ANY)
1089 call schedule
1090 DISABLE_INTERRUPTS(CLBR_ANY)
1091 .if \trace
1092 TRACE_IRQS_OFF
1093 .endif
1094 jmp paranoid_userspace\trace
1095 CFI_ENDPROC
1096 .endm
1097
1098 /*
1099 * Exception entry point. This expects an error code/orig_rax on the stack.
1100 * returns in "no swapgs flag" in %ebx.
1101 */
1102 KPROBE_ENTRY(error_entry)
1103 XCPT_FRAME
1104 CFI_ADJUST_CFA_OFFSET 15*8
1105 /* oldrax contains error code */
1106 cld
1107 movq_cfi rdi, RDI+8
1108 movq_cfi rsi, RSI+8
1109 movq_cfi rdx, RDX+8
1110 movq_cfi rcx, RCX+8
1111 movq_cfi rax, RAX+8
1112 movq_cfi r8, R8+8
1113 movq_cfi r9, R9+8
1114 movq_cfi r10, R10+8
1115 movq_cfi r11, R11+8
1116 movq_cfi rbx, RBX+8
1117 movq_cfi rbp, RBP+8
1118 movq_cfi r12, R12+8
1119 movq_cfi r13, R13+8
1120 movq_cfi r14, R14+8
1121 movq_cfi r15, R15+8
1122 xorl %ebx,%ebx
1123 testl $3,CS+8(%rsp)
1124 je error_kernelspace
1125 error_swapgs:
1126 SWAPGS
1127 error_sti:
1128 TRACE_IRQS_OFF
1129 ret
1130 CFI_ENDPROC
1131
1132 /*
1133 * There are two places in the kernel that can potentially fault with
1134 * usergs. Handle them here. The exception handlers after iret run with
1135 * kernel gs again, so don't set the user space flag. B stepping K8s
1136 * sometimes report an truncated RIP for IRET exceptions returning to
1137 * compat mode. Check for these here too.
1138 */
1139 error_kernelspace:
1140 incl %ebx
1141 leaq irq_return(%rip),%rcx
1142 cmpq %rcx,RIP+8(%rsp)
1143 je error_swapgs
1144 movl %ecx,%ecx /* zero extend */
1145 cmpq %rcx,RIP+8(%rsp)
1146 je error_swapgs
1147 cmpq $gs_change,RIP+8(%rsp)
1148 je error_swapgs
1149 jmp error_sti
1150 KPROBE_END(error_entry)
1151
1152
1153 /* ebx: no swapgs flag (1: don't need swapgs, 0: need it) */
1154 KPROBE_ENTRY(error_exit)
1155 DEFAULT_FRAME
1156 movl %ebx,%eax
1157 RESTORE_REST
1158 DISABLE_INTERRUPTS(CLBR_NONE)
1159 TRACE_IRQS_OFF
1160 GET_THREAD_INFO(%rcx)
1161 testl %eax,%eax
1162 jne retint_kernel
1163 LOCKDEP_SYS_EXIT_IRQ
1164 movl TI_flags(%rcx),%edx
1165 movl $_TIF_WORK_MASK,%edi
1166 andl %edi,%edx
1167 jnz retint_careful
1168 jmp retint_swapgs
1169 CFI_ENDPROC
1170 KPROBE_END(error_exit)
1171
1172 /* Reload gs selector with exception handling */
1173 /* edi: new selector */
1174 ENTRY(native_load_gs_index)
1175 CFI_STARTPROC
1176 pushf
1177 CFI_ADJUST_CFA_OFFSET 8
1178 DISABLE_INTERRUPTS(CLBR_ANY | ~(CLBR_RDI))
1179 SWAPGS
1180 gs_change:
1181 movl %edi,%gs
1182 2: mfence /* workaround */
1183 SWAPGS
1184 popf
1185 CFI_ADJUST_CFA_OFFSET -8
1186 ret
1187 CFI_ENDPROC
1188 ENDPROC(native_load_gs_index)
1189
1190 .section __ex_table,"a"
1191 .align 8
1192 .quad gs_change,bad_gs
1193 .previous
1194 .section .fixup,"ax"
1195 /* running with kernelgs */
1196 bad_gs:
1197 SWAPGS /* switch back to user gs */
1198 xorl %eax,%eax
1199 movl %eax,%gs
1200 jmp 2b
1201 .previous
1202
1203 /*
1204 * Create a kernel thread.
1205 *
1206 * C extern interface:
1207 * extern long kernel_thread(int (*fn)(void *), void * arg, unsigned long flags)
1208 *
1209 * asm input arguments:
1210 * rdi: fn, rsi: arg, rdx: flags
1211 */
1212 ENTRY(kernel_thread)
1213 CFI_STARTPROC
1214 FAKE_STACK_FRAME $child_rip
1215 SAVE_ALL
1216
1217 # rdi: flags, rsi: usp, rdx: will be &pt_regs
1218 movq %rdx,%rdi
1219 orq kernel_thread_flags(%rip),%rdi
1220 movq $-1, %rsi
1221 movq %rsp, %rdx
1222
1223 xorl %r8d,%r8d
1224 xorl %r9d,%r9d
1225
1226 # clone now
1227 call do_fork
1228 movq %rax,RAX(%rsp)
1229 xorl %edi,%edi
1230
1231 /*
1232 * It isn't worth to check for reschedule here,
1233 * so internally to the x86_64 port you can rely on kernel_thread()
1234 * not to reschedule the child before returning, this avoids the need
1235 * of hacks for example to fork off the per-CPU idle tasks.
1236 * [Hopefully no generic code relies on the reschedule -AK]
1237 */
1238 RESTORE_ALL
1239 UNFAKE_STACK_FRAME
1240 ret
1241 CFI_ENDPROC
1242 ENDPROC(kernel_thread)
1243
1244 child_rip:
1245 pushq $0 # fake return address
1246 CFI_STARTPROC
1247 /*
1248 * Here we are in the child and the registers are set as they were
1249 * at kernel_thread() invocation in the parent.
1250 */
1251 movq %rdi, %rax
1252 movq %rsi, %rdi
1253 call *%rax
1254 # exit
1255 mov %eax, %edi
1256 call do_exit
1257 CFI_ENDPROC
1258 ENDPROC(child_rip)
1259
1260 /*
1261 * execve(). This function needs to use IRET, not SYSRET, to set up all state properly.
1262 *
1263 * C extern interface:
1264 * extern long execve(char *name, char **argv, char **envp)
1265 *
1266 * asm input arguments:
1267 * rdi: name, rsi: argv, rdx: envp
1268 *
1269 * We want to fallback into:
1270 * extern long sys_execve(char *name, char **argv,char **envp, struct pt_regs *regs)
1271 *
1272 * do_sys_execve asm fallback arguments:
1273 * rdi: name, rsi: argv, rdx: envp, rcx: fake frame on the stack
1274 */
1275 ENTRY(kernel_execve)
1276 CFI_STARTPROC
1277 FAKE_STACK_FRAME $0
1278 SAVE_ALL
1279 movq %rsp,%rcx
1280 call sys_execve
1281 movq %rax, RAX(%rsp)
1282 RESTORE_REST
1283 testq %rax,%rax
1284 je int_ret_from_sys_call
1285 RESTORE_ARGS
1286 UNFAKE_STACK_FRAME
1287 ret
1288 CFI_ENDPROC
1289 ENDPROC(kernel_execve)
1290
1291 KPROBE_ENTRY(page_fault)
1292 errorentry do_page_fault
1293 KPROBE_END(page_fault)
1294
1295 ENTRY(coprocessor_error)
1296 zeroentry do_coprocessor_error
1297 END(coprocessor_error)
1298
1299 ENTRY(simd_coprocessor_error)
1300 zeroentry do_simd_coprocessor_error
1301 END(simd_coprocessor_error)
1302
1303 ENTRY(device_not_available)
1304 zeroentry do_device_not_available
1305 END(device_not_available)
1306
1307 /* runs on exception stack */
1308 KPROBE_ENTRY(debug)
1309 INTR_FRAME
1310 PARAVIRT_ADJUST_EXCEPTION_FRAME
1311 pushq $0
1312 CFI_ADJUST_CFA_OFFSET 8
1313 paranoidentry do_debug, DEBUG_STACK
1314 paranoidexit
1315 KPROBE_END(debug)
1316
1317 /* runs on exception stack */
1318 KPROBE_ENTRY(nmi)
1319 INTR_FRAME
1320 PARAVIRT_ADJUST_EXCEPTION_FRAME
1321 pushq $-1
1322 CFI_ADJUST_CFA_OFFSET 8
1323 paranoidentry do_nmi, 0, 0
1324 #ifdef CONFIG_TRACE_IRQFLAGS
1325 paranoidexit 0
1326 #else
1327 jmp paranoid_exit1
1328 CFI_ENDPROC
1329 #endif
1330 KPROBE_END(nmi)
1331
1332 KPROBE_ENTRY(int3)
1333 INTR_FRAME
1334 PARAVIRT_ADJUST_EXCEPTION_FRAME
1335 pushq $0
1336 CFI_ADJUST_CFA_OFFSET 8
1337 paranoidentry do_int3, DEBUG_STACK
1338 jmp paranoid_exit1
1339 CFI_ENDPROC
1340 KPROBE_END(int3)
1341
1342 ENTRY(overflow)
1343 zeroentry do_overflow
1344 END(overflow)
1345
1346 ENTRY(bounds)
1347 zeroentry do_bounds
1348 END(bounds)
1349
1350 ENTRY(invalid_op)
1351 zeroentry do_invalid_op
1352 END(invalid_op)
1353
1354 ENTRY(coprocessor_segment_overrun)
1355 zeroentry do_coprocessor_segment_overrun
1356 END(coprocessor_segment_overrun)
1357
1358 /* runs on exception stack */
1359 ENTRY(double_fault)
1360 XCPT_FRAME
1361 PARAVIRT_ADJUST_EXCEPTION_FRAME
1362 paranoidentry do_double_fault
1363 jmp paranoid_exit1
1364 CFI_ENDPROC
1365 END(double_fault)
1366
1367 ENTRY(invalid_TSS)
1368 errorentry do_invalid_TSS
1369 END(invalid_TSS)
1370
1371 ENTRY(segment_not_present)
1372 errorentry do_segment_not_present
1373 END(segment_not_present)
1374
1375 /* runs on exception stack */
1376 ENTRY(stack_segment)
1377 XCPT_FRAME
1378 PARAVIRT_ADJUST_EXCEPTION_FRAME
1379 paranoidentry do_stack_segment
1380 jmp paranoid_exit1
1381 CFI_ENDPROC
1382 END(stack_segment)
1383
1384 KPROBE_ENTRY(general_protection)
1385 errorentry do_general_protection
1386 KPROBE_END(general_protection)
1387
1388 ENTRY(alignment_check)
1389 errorentry do_alignment_check
1390 END(alignment_check)
1391
1392 ENTRY(divide_error)
1393 zeroentry do_divide_error
1394 END(divide_error)
1395
1396 ENTRY(spurious_interrupt_bug)
1397 zeroentry do_spurious_interrupt_bug
1398 END(spurious_interrupt_bug)
1399
1400 #ifdef CONFIG_X86_MCE
1401 /* runs on exception stack */
1402 ENTRY(machine_check)
1403 INTR_FRAME
1404 PARAVIRT_ADJUST_EXCEPTION_FRAME
1405 pushq $0
1406 CFI_ADJUST_CFA_OFFSET 8
1407 paranoidentry do_machine_check
1408 jmp paranoid_exit1
1409 CFI_ENDPROC
1410 END(machine_check)
1411 #endif
1412
1413 /* Call softirq on interrupt stack. Interrupts are off. */
1414 ENTRY(call_softirq)
1415 CFI_STARTPROC
1416 push %rbp
1417 CFI_ADJUST_CFA_OFFSET 8
1418 CFI_REL_OFFSET rbp,0
1419 mov %rsp,%rbp
1420 CFI_DEF_CFA_REGISTER rbp
1421 incl %gs:pda_irqcount
1422 cmove %gs:pda_irqstackptr,%rsp
1423 push %rbp # backlink for old unwinder
1424 call __do_softirq
1425 leaveq
1426 CFI_DEF_CFA_REGISTER rsp
1427 CFI_ADJUST_CFA_OFFSET -8
1428 decl %gs:pda_irqcount
1429 ret
1430 CFI_ENDPROC
1431 ENDPROC(call_softirq)
1432
1433 KPROBE_ENTRY(ignore_sysret)
1434 CFI_STARTPROC
1435 mov $-ENOSYS,%eax
1436 sysret
1437 CFI_ENDPROC
1438 ENDPROC(ignore_sysret)
1439
1440 #ifdef CONFIG_XEN
1441 ENTRY(xen_hypervisor_callback)
1442 zeroentry xen_do_hypervisor_callback
1443 END(xen_hypervisor_callback)
1444
1445 /*
1446 # A note on the "critical region" in our callback handler.
1447 # We want to avoid stacking callback handlers due to events occurring
1448 # during handling of the last event. To do this, we keep events disabled
1449 # until we've done all processing. HOWEVER, we must enable events before
1450 # popping the stack frame (can't be done atomically) and so it would still
1451 # be possible to get enough handler activations to overflow the stack.
1452 # Although unlikely, bugs of that kind are hard to track down, so we'd
1453 # like to avoid the possibility.
1454 # So, on entry to the handler we detect whether we interrupted an
1455 # existing activation in its critical region -- if so, we pop the current
1456 # activation and restart the handler using the previous one.
1457 */
1458 ENTRY(xen_do_hypervisor_callback) # do_hypervisor_callback(struct *pt_regs)
1459 CFI_STARTPROC
1460 /* Since we don't modify %rdi, evtchn_do_upall(struct *pt_regs) will
1461 see the correct pointer to the pt_regs */
1462 movq %rdi, %rsp # we don't return, adjust the stack frame
1463 CFI_ENDPROC
1464 DEFAULT_FRAME
1465 11: incl %gs:pda_irqcount
1466 movq %rsp,%rbp
1467 CFI_DEF_CFA_REGISTER rbp
1468 cmovzq %gs:pda_irqstackptr,%rsp
1469 pushq %rbp # backlink for old unwinder
1470 call xen_evtchn_do_upcall
1471 popq %rsp
1472 CFI_DEF_CFA_REGISTER rsp
1473 decl %gs:pda_irqcount
1474 jmp error_exit
1475 CFI_ENDPROC
1476 END(do_hypervisor_callback)
1477
1478 /*
1479 # Hypervisor uses this for application faults while it executes.
1480 # We get here for two reasons:
1481 # 1. Fault while reloading DS, ES, FS or GS
1482 # 2. Fault while executing IRET
1483 # Category 1 we do not need to fix up as Xen has already reloaded all segment
1484 # registers that could be reloaded and zeroed the others.
1485 # Category 2 we fix up by killing the current process. We cannot use the
1486 # normal Linux return path in this case because if we use the IRET hypercall
1487 # to pop the stack frame we end up in an infinite loop of failsafe callbacks.
1488 # We distinguish between categories by comparing each saved segment register
1489 # with its current contents: any discrepancy means we in category 1.
1490 */
1491 ENTRY(xen_failsafe_callback)
1492 INTR_FRAME 1 (6*8)
1493 /*CFI_REL_OFFSET gs,GS*/
1494 /*CFI_REL_OFFSET fs,FS*/
1495 /*CFI_REL_OFFSET es,ES*/
1496 /*CFI_REL_OFFSET ds,DS*/
1497 CFI_REL_OFFSET r11,8
1498 CFI_REL_OFFSET rcx,0
1499 movw %ds,%cx
1500 cmpw %cx,0x10(%rsp)
1501 CFI_REMEMBER_STATE
1502 jne 1f
1503 movw %es,%cx
1504 cmpw %cx,0x18(%rsp)
1505 jne 1f
1506 movw %fs,%cx
1507 cmpw %cx,0x20(%rsp)
1508 jne 1f
1509 movw %gs,%cx
1510 cmpw %cx,0x28(%rsp)
1511 jne 1f
1512 /* All segments match their saved values => Category 2 (Bad IRET). */
1513 movq (%rsp),%rcx
1514 CFI_RESTORE rcx
1515 movq 8(%rsp),%r11
1516 CFI_RESTORE r11
1517 addq $0x30,%rsp
1518 CFI_ADJUST_CFA_OFFSET -0x30
1519 pushq_cfi $0 /* RIP */
1520 pushq_cfi %r11
1521 pushq_cfi %rcx
1522 jmp general_protection
1523 CFI_RESTORE_STATE
1524 1: /* Segment mismatch => Category 1 (Bad segment). Retry the IRET. */
1525 movq (%rsp),%rcx
1526 CFI_RESTORE rcx
1527 movq 8(%rsp),%r11
1528 CFI_RESTORE r11
1529 addq $0x30,%rsp
1530 CFI_ADJUST_CFA_OFFSET -0x30
1531 pushq_cfi $0
1532 SAVE_ALL
1533 jmp error_exit
1534 CFI_ENDPROC
1535 END(xen_failsafe_callback)
1536
1537 #endif /* CONFIG_XEN */