]> git.proxmox.com Git - mirror_ubuntu-jammy-kernel.git/blame - arch/x86/kernel/entry_64.S
x86: clean up after: move entry_64.S register saving out of the macros, fix
[mirror_ubuntu-jammy-kernel.git] / arch / x86 / kernel / entry_64.S
CommitLineData
1da177e4
LT
1/*
2 * linux/arch/x86_64/entry.S
3 *
4 * Copyright (C) 1991, 1992 Linus Torvalds
5 * Copyright (C) 2000, 2001, 2002 Andi Kleen SuSE Labs
6 * Copyright (C) 2000 Pavel Machek <pavel@suse.cz>
1da177e4
LT
7 */
8
9/*
10 * entry.S contains the system-call and fault low-level handling routines.
11 *
12 * NOTE: This code handles signal-recognition, which happens every time
13 * after an interrupt and after each system call.
0bd7b798
AH
14 *
15 * Normal syscalls and interrupts don't save a full stack frame, this is
1da177e4 16 * only done for syscall tracing, signals or fork/exec et.al.
0bd7b798
AH
17 *
18 * A note on terminology:
19 * - top of stack: Architecture defined interrupt frame from SS to RIP
20 * at the top of the kernel process stack.
1da177e4 21 * - partial stack frame: partially saved registers upto R11.
0bd7b798 22 * - full stack frame: Like partial stack frame, but all register saved.
2e91a17b
AK
23 *
24 * Some macro usage:
25 * - CFI macros are used to generate dwarf2 unwind information for better
26 * backtraces. They don't change any code.
27 * - SAVE_ALL/RESTORE_ALL - Save/restore all registers
28 * - SAVE_ARGS/RESTORE_ARGS - Save/restore registers that C functions modify.
29 * There are unfortunately lots of special cases where some registers
30 * not touched. The macro is a big mess that should be cleaned up.
31 * - SAVE_REST/RESTORE_REST - Handle the registers not saved by SAVE_ARGS.
32 * Gives a full stack frame.
33 * - ENTRY/END Define functions in the symbol table.
34 * - FIXUP_TOP_OF_STACK/RESTORE_TOP_OF_STACK - Fix up the hardware stack
35 * frame that is otherwise undefined after a SYSCALL
36 * - TRACE_IRQ_* - Trace hard interrupt state for lock debugging.
37 * - errorentry/paranoidentry/zeroentry - Define exception entry points.
1da177e4
LT
38 */
39
1da177e4
LT
40#include <linux/linkage.h>
41#include <asm/segment.h>
1da177e4
LT
42#include <asm/cache.h>
43#include <asm/errno.h>
44#include <asm/dwarf2.h>
45#include <asm/calling.h>
e2d5df93 46#include <asm/asm-offsets.h>
1da177e4
LT
47#include <asm/msr.h>
48#include <asm/unistd.h>
49#include <asm/thread_info.h>
50#include <asm/hw_irq.h>
5f8efbb9 51#include <asm/page.h>
2601e64d 52#include <asm/irqflags.h>
72fe4858 53#include <asm/paravirt.h>
395a59d0 54#include <asm/ftrace.h>
1da177e4 55
86a1c34a
RM
56/* Avoid __ASSEMBLER__'ifying <linux/audit.h> just for this. */
57#include <linux/elf-em.h>
58#define AUDIT_ARCH_X86_64 (EM_X86_64|__AUDIT_ARCH_64BIT|__AUDIT_ARCH_LE)
59#define __AUDIT_ARCH_64BIT 0x80000000
60#define __AUDIT_ARCH_LE 0x40000000
61
1da177e4 62 .code64
dcd072e2
AH
63/*
64 * Some macro's to hide the most frequently occuring CFI annotations.
65 */
66 .macro CFI_PUSHQ reg
67 pushq \reg
68 CFI_ADJUST_CFA_OFFSET 8
69 .endm
70
71 .macro CFI_POPQ reg
72 popq \reg
73 CFI_ADJUST_CFA_OFFSET -8
74 .endm
75
76 .macro CFI_MOVQ reg offset=0
77 movq %\reg, \offset(%rsp)
78 CFI_REL_OFFSET \reg, \offset
79 .endm
1da177e4 80
606576ce 81#ifdef CONFIG_FUNCTION_TRACER
d61f82d0
SR
82#ifdef CONFIG_DYNAMIC_FTRACE
83ENTRY(mcount)
d61f82d0
SR
84 retq
85END(mcount)
86
87ENTRY(ftrace_caller)
88
89 /* taken from glibc */
90 subq $0x38, %rsp
91 movq %rax, (%rsp)
92 movq %rcx, 8(%rsp)
93 movq %rdx, 16(%rsp)
94 movq %rsi, 24(%rsp)
95 movq %rdi, 32(%rsp)
96 movq %r8, 40(%rsp)
97 movq %r9, 48(%rsp)
98
99 movq 0x38(%rsp), %rdi
100 movq 8(%rbp), %rsi
395a59d0 101 subq $MCOUNT_INSN_SIZE, %rdi
d61f82d0
SR
102
103.globl ftrace_call
104ftrace_call:
105 call ftrace_stub
106
107 movq 48(%rsp), %r9
108 movq 40(%rsp), %r8
109 movq 32(%rsp), %rdi
110 movq 24(%rsp), %rsi
111 movq 16(%rsp), %rdx
112 movq 8(%rsp), %rcx
113 movq (%rsp), %rax
114 addq $0x38, %rsp
115
116.globl ftrace_stub
117ftrace_stub:
118 retq
119END(ftrace_caller)
120
121#else /* ! CONFIG_DYNAMIC_FTRACE */
16444a8a
ACM
122ENTRY(mcount)
123 cmpq $ftrace_stub, ftrace_trace_function
124 jnz trace
125.globl ftrace_stub
126ftrace_stub:
127 retq
128
129trace:
130 /* taken from glibc */
131 subq $0x38, %rsp
132 movq %rax, (%rsp)
133 movq %rcx, 8(%rsp)
134 movq %rdx, 16(%rsp)
135 movq %rsi, 24(%rsp)
136 movq %rdi, 32(%rsp)
137 movq %r8, 40(%rsp)
138 movq %r9, 48(%rsp)
139
140 movq 0x38(%rsp), %rdi
141 movq 8(%rbp), %rsi
395a59d0 142 subq $MCOUNT_INSN_SIZE, %rdi
16444a8a
ACM
143
144 call *ftrace_trace_function
145
146 movq 48(%rsp), %r9
147 movq 40(%rsp), %r8
148 movq 32(%rsp), %rdi
149 movq 24(%rsp), %rsi
150 movq 16(%rsp), %rdx
151 movq 8(%rsp), %rcx
152 movq (%rsp), %rax
153 addq $0x38, %rsp
154
155 jmp ftrace_stub
156END(mcount)
d61f82d0 157#endif /* CONFIG_DYNAMIC_FTRACE */
606576ce 158#endif /* CONFIG_FUNCTION_TRACER */
16444a8a 159
dc37db4d 160#ifndef CONFIG_PREEMPT
1da177e4 161#define retint_kernel retint_restore_args
0bd7b798 162#endif
2601e64d 163
72fe4858 164#ifdef CONFIG_PARAVIRT
2be29982 165ENTRY(native_usergs_sysret64)
72fe4858
GOC
166 swapgs
167 sysretq
168#endif /* CONFIG_PARAVIRT */
169
2601e64d
IM
170
171.macro TRACE_IRQS_IRETQ offset=ARGOFFSET
172#ifdef CONFIG_TRACE_IRQFLAGS
173 bt $9,EFLAGS-\offset(%rsp) /* interrupts off? */
174 jnc 1f
175 TRACE_IRQS_ON
1761:
177#endif
178.endm
179
1da177e4 180/*
0bd7b798
AH
181 * C code is not supposed to know about undefined top of stack. Every time
182 * a C function with an pt_regs argument is called from the SYSCALL based
1da177e4
LT
183 * fast path FIXUP_TOP_OF_STACK is needed.
184 * RESTORE_TOP_OF_STACK syncs the syscall state after any possible ptregs
185 * manipulation.
0bd7b798
AH
186 */
187
188 /* %rsp:at FRAMEEND */
1da177e4
LT
189 .macro FIXUP_TOP_OF_STACK tmp
190 movq %gs:pda_oldrsp,\tmp
191 movq \tmp,RSP(%rsp)
192 movq $__USER_DS,SS(%rsp)
193 movq $__USER_CS,CS(%rsp)
194 movq $-1,RCX(%rsp)
195 movq R11(%rsp),\tmp /* get eflags */
196 movq \tmp,EFLAGS(%rsp)
197 .endm
198
199 .macro RESTORE_TOP_OF_STACK tmp,offset=0
200 movq RSP-\offset(%rsp),\tmp
201 movq \tmp,%gs:pda_oldrsp
202 movq EFLAGS-\offset(%rsp),\tmp
203 movq \tmp,R11-\offset(%rsp)
204 .endm
205
206 .macro FAKE_STACK_FRAME child_rip
207 /* push in order ss, rsp, eflags, cs, rip */
3829ee6b 208 xorl %eax, %eax
e04e0a63 209 pushq $__KERNEL_DS /* ss */
1da177e4 210 CFI_ADJUST_CFA_OFFSET 8
7effaa88 211 /*CFI_REL_OFFSET ss,0*/
1da177e4
LT
212 pushq %rax /* rsp */
213 CFI_ADJUST_CFA_OFFSET 8
7effaa88 214 CFI_REL_OFFSET rsp,0
1da177e4
LT
215 pushq $(1<<9) /* eflags - interrupts on */
216 CFI_ADJUST_CFA_OFFSET 8
7effaa88 217 /*CFI_REL_OFFSET rflags,0*/
1da177e4
LT
218 pushq $__KERNEL_CS /* cs */
219 CFI_ADJUST_CFA_OFFSET 8
7effaa88 220 /*CFI_REL_OFFSET cs,0*/
1da177e4
LT
221 pushq \child_rip /* rip */
222 CFI_ADJUST_CFA_OFFSET 8
7effaa88 223 CFI_REL_OFFSET rip,0
1da177e4
LT
224 pushq %rax /* orig rax */
225 CFI_ADJUST_CFA_OFFSET 8
226 .endm
227
228 .macro UNFAKE_STACK_FRAME
229 addq $8*6, %rsp
230 CFI_ADJUST_CFA_OFFSET -(6*8)
231 .endm
232
dcd072e2
AH
233/*
234 * initial frame state for interrupts (and exceptions without error code)
235 */
236 .macro EMPTY_FRAME start=1 offset=0
7effaa88 237 .if \start
dcd072e2 238 CFI_STARTPROC simple
adf14236 239 CFI_SIGNAL_FRAME
dcd072e2 240 CFI_DEF_CFA rsp,8+\offset
7effaa88 241 .else
dcd072e2 242 CFI_DEF_CFA_OFFSET 8+\offset
7effaa88 243 .endif
1da177e4 244 .endm
d99015b1
AH
245
246/*
dcd072e2 247 * initial frame state for interrupts (and exceptions without error code)
d99015b1 248 */
dcd072e2 249 .macro INTR_FRAME start=1 offset=0
e8a0e276
IM
250 EMPTY_FRAME \start, SS+8+\offset-RIP
251 /*CFI_REL_OFFSET ss, SS+\offset-RIP*/
252 CFI_REL_OFFSET rsp, RSP+\offset-RIP
253 /*CFI_REL_OFFSET rflags, EFLAGS+\offset-RIP*/
254 /*CFI_REL_OFFSET cs, CS+\offset-RIP*/
255 CFI_REL_OFFSET rip, RIP+\offset-RIP
d99015b1
AH
256 .endm
257
d99015b1
AH
258/*
259 * initial frame state for exceptions with error code (and interrupts
260 * with vector already pushed)
261 */
dcd072e2 262 .macro XCPT_FRAME start=1 offset=0
e8a0e276 263 INTR_FRAME \start, RIP+\offset-ORIG_RAX
dcd072e2
AH
264 /*CFI_REL_OFFSET orig_rax, ORIG_RAX-ORIG_RAX*/
265 .endm
266
267/*
268 * frame that enables calling into C.
269 */
270 .macro PARTIAL_FRAME start=1 offset=0
e8a0e276
IM
271 XCPT_FRAME \start, ORIG_RAX+\offset-ARGOFFSET
272 CFI_REL_OFFSET rdi, RDI+\offset-ARGOFFSET
273 CFI_REL_OFFSET rsi, RSI+\offset-ARGOFFSET
274 CFI_REL_OFFSET rdx, RDX+\offset-ARGOFFSET
275 CFI_REL_OFFSET rcx, RCX+\offset-ARGOFFSET
276 CFI_REL_OFFSET rax, RAX+\offset-ARGOFFSET
277 CFI_REL_OFFSET r8, R8+\offset-ARGOFFSET
278 CFI_REL_OFFSET r9, R9+\offset-ARGOFFSET
279 CFI_REL_OFFSET r10, R10+\offset-ARGOFFSET
280 CFI_REL_OFFSET r11, R11+\offset-ARGOFFSET
dcd072e2
AH
281 .endm
282
283/*
284 * frame that enables passing a complete pt_regs to a C function.
285 */
286 .macro DEFAULT_FRAME start=1 offset=0
e8a0e276 287 PARTIAL_FRAME \start, R11+\offset-R15
dcd072e2
AH
288 CFI_REL_OFFSET rbx, RBX+\offset
289 CFI_REL_OFFSET rbp, RBP+\offset
290 CFI_REL_OFFSET r12, R12+\offset
291 CFI_REL_OFFSET r13, R13+\offset
292 CFI_REL_OFFSET r14, R14+\offset
293 CFI_REL_OFFSET r15, R15+\offset
294 .endm
d99015b1
AH
295
296/* save partial stack frame */
297ENTRY(save_args)
298 XCPT_FRAME
299 cld
e8a0e276
IM
300 CFI_MOVQ rdi, RDI+16-ARGOFFSET
301 CFI_MOVQ rsi, RSI+16-ARGOFFSET
302 CFI_MOVQ rdx, RDX+16-ARGOFFSET
303 CFI_MOVQ rcx, RCX+16-ARGOFFSET
304 CFI_MOVQ rax, RAX+16-ARGOFFSET
305 CFI_MOVQ r8, R8+16-ARGOFFSET
306 CFI_MOVQ r9, R9+16-ARGOFFSET
307 CFI_MOVQ r10, R10+16-ARGOFFSET
308 CFI_MOVQ r11, R11+16-ARGOFFSET
d99015b1 309 leaq -ARGOFFSET+16(%rsp),%rdi /* arg1 for handler */
dcd072e2 310 CFI_MOVQ rbp, 8 /* push %rbp */
d99015b1
AH
311 leaq 8(%rsp), %rbp /* mov %rsp, %ebp */
312 testl $3, CS(%rdi)
313 je 1f
314 SWAPGS
315 /*
316 * irqcount is used to check if a CPU is already on an interrupt stack
317 * or not. While this is essentially redundant with preempt_count it is
318 * a little cheaper to use a separate counter in the PDA (short of
319 * moving irq_enter into assembly, which would be too much work)
320 */
3211: incl %gs:pda_irqcount
322 jne 2f
dcd072e2 323 CFI_POPQ %rax /* move return address... */
d99015b1 324 mov %gs:pda_irqstackptr,%rsp
dcd072e2
AH
325 EMPTY_FRAME 0
326 CFI_PUSHQ %rax /* ... to the new stack */
d99015b1
AH
327 /*
328 * We entered an interrupt context - irqs are off:
329 */
3302: TRACE_IRQS_OFF
331 ret
332 CFI_ENDPROC
333END(save_args)
334
1da177e4
LT
335/*
336 * A newly forked process directly context switches into this.
0bd7b798
AH
337 */
338/* rdi: prev */
1da177e4 339ENTRY(ret_from_fork)
dcd072e2 340 DEFAULT_FRAME
658fdbef 341 push kernel_eflags(%rip)
e0a5a5d9 342 CFI_ADJUST_CFA_OFFSET 8
658fdbef 343 popf # reset kernel eflags
e0a5a5d9 344 CFI_ADJUST_CFA_OFFSET -8
1da177e4
LT
345 call schedule_tail
346 GET_THREAD_INFO(%rcx)
26ccb8a7 347 testl $(_TIF_SYSCALL_TRACE|_TIF_SYSCALL_AUDIT),TI_flags(%rcx)
1da177e4 348 jnz rff_trace
0bd7b798 349rff_action:
1da177e4
LT
350 RESTORE_REST
351 testl $3,CS-ARGOFFSET(%rsp) # from kernel_thread?
352 je int_ret_from_sys_call
26ccb8a7 353 testl $_TIF_IA32,TI_flags(%rcx)
1da177e4
LT
354 jnz int_ret_from_sys_call
355 RESTORE_TOP_OF_STACK %rdi,ARGOFFSET
356 jmp ret_from_sys_call
357rff_trace:
358 movq %rsp,%rdi
359 call syscall_trace_leave
0bd7b798 360 GET_THREAD_INFO(%rcx)
1da177e4
LT
361 jmp rff_action
362 CFI_ENDPROC
4b787e0b 363END(ret_from_fork)
1da177e4
LT
364
365/*
366 * System call entry. Upto 6 arguments in registers are supported.
367 *
368 * SYSCALL does not save anything on the stack and does not change the
369 * stack pointer.
370 */
0bd7b798 371
1da177e4 372/*
0bd7b798 373 * Register setup:
1da177e4
LT
374 * rax system call number
375 * rdi arg0
0bd7b798 376 * rcx return address for syscall/sysret, C arg3
1da177e4 377 * rsi arg1
0bd7b798 378 * rdx arg2
1da177e4
LT
379 * r10 arg3 (--> moved to rcx for C)
380 * r8 arg4
381 * r9 arg5
382 * r11 eflags for syscall/sysret, temporary for C
0bd7b798
AH
383 * r12-r15,rbp,rbx saved by C code, not touched.
384 *
1da177e4
LT
385 * Interrupts are off on entry.
386 * Only called from user space.
387 *
388 * XXX if we had a free scratch register we could save the RSP into the stack frame
389 * and report it properly in ps. Unfortunately we haven't.
7bf36bbc
AK
390 *
391 * When user can change the frames always force IRET. That is because
392 * it deals with uncanonical addresses better. SYSRET has trouble
393 * with them due to bugs in both AMD and Intel CPUs.
0bd7b798 394 */
1da177e4
LT
395
396ENTRY(system_call)
7effaa88 397 CFI_STARTPROC simple
adf14236 398 CFI_SIGNAL_FRAME
dffead4e 399 CFI_DEF_CFA rsp,PDA_STACKOFFSET
7effaa88
JB
400 CFI_REGISTER rip,rcx
401 /*CFI_REGISTER rflags,r11*/
72fe4858
GOC
402 SWAPGS_UNSAFE_STACK
403 /*
404 * A hypervisor implementation might want to use a label
405 * after the swapgs, so that it can do the swapgs
406 * for the guest and jump here on syscall.
407 */
408ENTRY(system_call_after_swapgs)
409
0bd7b798 410 movq %rsp,%gs:pda_oldrsp
1da177e4 411 movq %gs:pda_kernelstack,%rsp
2601e64d
IM
412 /*
413 * No need to follow this irqs off/on section - it's straight
414 * and short:
415 */
72fe4858 416 ENABLE_INTERRUPTS(CLBR_NONE)
1da177e4 417 SAVE_ARGS 8,1
0bd7b798 418 movq %rax,ORIG_RAX-ARGOFFSET(%rsp)
7effaa88
JB
419 movq %rcx,RIP-ARGOFFSET(%rsp)
420 CFI_REL_OFFSET rip,RIP-ARGOFFSET
1da177e4 421 GET_THREAD_INFO(%rcx)
d4d67150 422 testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%rcx)
1da177e4 423 jnz tracesys
86a1c34a 424system_call_fastpath:
1da177e4
LT
425 cmpq $__NR_syscall_max,%rax
426 ja badsys
427 movq %r10,%rcx
428 call *sys_call_table(,%rax,8) # XXX: rip relative
429 movq %rax,RAX-ARGOFFSET(%rsp)
430/*
431 * Syscall return path ending with SYSRET (fast path)
0bd7b798
AH
432 * Has incomplete stack frame and undefined top of stack.
433 */
1da177e4 434ret_from_sys_call:
11b854b2 435 movl $_TIF_ALLWORK_MASK,%edi
1da177e4 436 /* edi: flagmask */
0bd7b798 437sysret_check:
10cd706d 438 LOCKDEP_SYS_EXIT
1da177e4 439 GET_THREAD_INFO(%rcx)
72fe4858 440 DISABLE_INTERRUPTS(CLBR_NONE)
2601e64d 441 TRACE_IRQS_OFF
26ccb8a7 442 movl TI_flags(%rcx),%edx
1da177e4 443 andl %edi,%edx
0bd7b798 444 jnz sysret_careful
bcddc015 445 CFI_REMEMBER_STATE
2601e64d
IM
446 /*
447 * sysretq will re-enable interrupts:
448 */
449 TRACE_IRQS_ON
1da177e4 450 movq RIP-ARGOFFSET(%rsp),%rcx
7effaa88 451 CFI_REGISTER rip,rcx
1da177e4 452 RESTORE_ARGS 0,-ARG_SKIP,1
7effaa88 453 /*CFI_REGISTER rflags,r11*/
c7245da6 454 movq %gs:pda_oldrsp, %rsp
2be29982 455 USERGS_SYSRET64
1da177e4 456
bcddc015 457 CFI_RESTORE_STATE
1da177e4 458 /* Handle reschedules */
0bd7b798 459 /* edx: work, edi: workmask */
1da177e4
LT
460sysret_careful:
461 bt $TIF_NEED_RESCHED,%edx
462 jnc sysret_signal
2601e64d 463 TRACE_IRQS_ON
72fe4858 464 ENABLE_INTERRUPTS(CLBR_NONE)
1da177e4 465 pushq %rdi
7effaa88 466 CFI_ADJUST_CFA_OFFSET 8
1da177e4
LT
467 call schedule
468 popq %rdi
7effaa88 469 CFI_ADJUST_CFA_OFFSET -8
1da177e4
LT
470 jmp sysret_check
471
0bd7b798 472 /* Handle a signal */
1da177e4 473sysret_signal:
2601e64d 474 TRACE_IRQS_ON
72fe4858 475 ENABLE_INTERRUPTS(CLBR_NONE)
86a1c34a
RM
476#ifdef CONFIG_AUDITSYSCALL
477 bt $TIF_SYSCALL_AUDIT,%edx
478 jc sysret_audit
479#endif
10ffdbb8 480 /* edx: work flags (arg3) */
1da177e4
LT
481 leaq do_notify_resume(%rip),%rax
482 leaq -ARGOFFSET(%rsp),%rdi # &pt_regs -> arg1
483 xorl %esi,%esi # oldset -> arg2
484 call ptregscall_common
15e8f348 485 movl $_TIF_WORK_MASK,%edi
7bf36bbc
AK
486 /* Use IRET because user could have changed frame. This
487 works because ptregscall_common has called FIXUP_TOP_OF_STACK. */
72fe4858 488 DISABLE_INTERRUPTS(CLBR_NONE)
2601e64d 489 TRACE_IRQS_OFF
7bf36bbc 490 jmp int_with_check
0bd7b798 491
7effaa88
JB
492badsys:
493 movq $-ENOSYS,RAX-ARGOFFSET(%rsp)
494 jmp ret_from_sys_call
495
86a1c34a
RM
496#ifdef CONFIG_AUDITSYSCALL
497 /*
498 * Fast path for syscall audit without full syscall trace.
499 * We just call audit_syscall_entry() directly, and then
500 * jump back to the normal fast path.
501 */
502auditsys:
503 movq %r10,%r9 /* 6th arg: 4th syscall arg */
504 movq %rdx,%r8 /* 5th arg: 3rd syscall arg */
505 movq %rsi,%rcx /* 4th arg: 2nd syscall arg */
506 movq %rdi,%rdx /* 3rd arg: 1st syscall arg */
507 movq %rax,%rsi /* 2nd arg: syscall number */
508 movl $AUDIT_ARCH_X86_64,%edi /* 1st arg: audit arch */
509 call audit_syscall_entry
510 LOAD_ARGS 0 /* reload call-clobbered registers */
511 jmp system_call_fastpath
512
513 /*
514 * Return fast path for syscall audit. Call audit_syscall_exit()
515 * directly and then jump back to the fast path with TIF_SYSCALL_AUDIT
516 * masked off.
517 */
518sysret_audit:
519 movq %rax,%rsi /* second arg, syscall return value */
520 cmpq $0,%rax /* is it < 0? */
521 setl %al /* 1 if so, 0 if not */
522 movzbl %al,%edi /* zero-extend that into %edi */
523 inc %edi /* first arg, 0->1(AUDITSC_SUCCESS), 1->2(AUDITSC_FAILURE) */
524 call audit_syscall_exit
525 movl $(_TIF_ALLWORK_MASK & ~_TIF_SYSCALL_AUDIT),%edi
526 jmp sysret_check
527#endif /* CONFIG_AUDITSYSCALL */
528
1da177e4 529 /* Do syscall tracing */
0bd7b798 530tracesys:
86a1c34a
RM
531#ifdef CONFIG_AUDITSYSCALL
532 testl $(_TIF_WORK_SYSCALL_ENTRY & ~_TIF_SYSCALL_AUDIT),TI_flags(%rcx)
533 jz auditsys
534#endif
1da177e4 535 SAVE_REST
a31f8dd7 536 movq $-ENOSYS,RAX(%rsp) /* ptrace can change this for a bad syscall */
1da177e4
LT
537 FIXUP_TOP_OF_STACK %rdi
538 movq %rsp,%rdi
539 call syscall_trace_enter
d4d67150
RM
540 /*
541 * Reload arg registers from stack in case ptrace changed them.
542 * We don't reload %rax because syscall_trace_enter() returned
543 * the value it wants us to use in the table lookup.
544 */
545 LOAD_ARGS ARGOFFSET, 1
1da177e4
LT
546 RESTORE_REST
547 cmpq $__NR_syscall_max,%rax
a31f8dd7 548 ja int_ret_from_sys_call /* RAX(%rsp) set to -ENOSYS above */
1da177e4
LT
549 movq %r10,%rcx /* fixup for C */
550 call *sys_call_table(,%rax,8)
a31f8dd7 551 movq %rax,RAX-ARGOFFSET(%rsp)
7bf36bbc 552 /* Use IRET because user could have changed frame */
0bd7b798
AH
553
554/*
1da177e4
LT
555 * Syscall return path ending with IRET.
556 * Has correct top of stack, but partial stack frame.
bcddc015
JB
557 */
558 .globl int_ret_from_sys_call
5cbf1565 559 .globl int_with_check
bcddc015 560int_ret_from_sys_call:
72fe4858 561 DISABLE_INTERRUPTS(CLBR_NONE)
2601e64d 562 TRACE_IRQS_OFF
1da177e4
LT
563 testl $3,CS-ARGOFFSET(%rsp)
564 je retint_restore_args
565 movl $_TIF_ALLWORK_MASK,%edi
566 /* edi: mask to check */
567int_with_check:
10cd706d 568 LOCKDEP_SYS_EXIT_IRQ
1da177e4 569 GET_THREAD_INFO(%rcx)
26ccb8a7 570 movl TI_flags(%rcx),%edx
1da177e4
LT
571 andl %edi,%edx
572 jnz int_careful
26ccb8a7 573 andl $~TS_COMPAT,TI_status(%rcx)
1da177e4
LT
574 jmp retint_swapgs
575
576 /* Either reschedule or signal or syscall exit tracking needed. */
577 /* First do a reschedule test. */
578 /* edx: work, edi: workmask */
579int_careful:
580 bt $TIF_NEED_RESCHED,%edx
581 jnc int_very_careful
2601e64d 582 TRACE_IRQS_ON
72fe4858 583 ENABLE_INTERRUPTS(CLBR_NONE)
1da177e4 584 pushq %rdi
7effaa88 585 CFI_ADJUST_CFA_OFFSET 8
1da177e4
LT
586 call schedule
587 popq %rdi
7effaa88 588 CFI_ADJUST_CFA_OFFSET -8
72fe4858 589 DISABLE_INTERRUPTS(CLBR_NONE)
2601e64d 590 TRACE_IRQS_OFF
1da177e4
LT
591 jmp int_with_check
592
593 /* handle signals and tracing -- both require a full stack frame */
594int_very_careful:
2601e64d 595 TRACE_IRQS_ON
72fe4858 596 ENABLE_INTERRUPTS(CLBR_NONE)
1da177e4 597 SAVE_REST
0bd7b798 598 /* Check for syscall exit trace */
d4d67150 599 testl $_TIF_WORK_SYSCALL_EXIT,%edx
1da177e4
LT
600 jz int_signal
601 pushq %rdi
7effaa88 602 CFI_ADJUST_CFA_OFFSET 8
0bd7b798 603 leaq 8(%rsp),%rdi # &ptregs -> arg1
1da177e4
LT
604 call syscall_trace_leave
605 popq %rdi
7effaa88 606 CFI_ADJUST_CFA_OFFSET -8
d4d67150 607 andl $~(_TIF_WORK_SYSCALL_EXIT|_TIF_SYSCALL_EMU),%edi
1da177e4 608 jmp int_restore_rest
0bd7b798 609
1da177e4 610int_signal:
8f4d37ec 611 testl $_TIF_DO_NOTIFY_MASK,%edx
1da177e4
LT
612 jz 1f
613 movq %rsp,%rdi # &ptregs -> arg1
614 xorl %esi,%esi # oldset -> arg2
615 call do_notify_resume
eca91e78 6161: movl $_TIF_WORK_MASK,%edi
1da177e4
LT
617int_restore_rest:
618 RESTORE_REST
72fe4858 619 DISABLE_INTERRUPTS(CLBR_NONE)
2601e64d 620 TRACE_IRQS_OFF
1da177e4
LT
621 jmp int_with_check
622 CFI_ENDPROC
bcddc015 623END(system_call)
0bd7b798
AH
624
625/*
1da177e4 626 * Certain special system calls that need to save a complete full stack frame.
0bd7b798
AH
627 */
628
1da177e4
LT
629 .macro PTREGSCALL label,func,arg
630 .globl \label
631\label:
632 leaq \func(%rip),%rax
633 leaq -ARGOFFSET+8(%rsp),\arg /* 8 for return address */
634 jmp ptregscall_common
4b787e0b 635END(\label)
1da177e4
LT
636 .endm
637
7effaa88
JB
638 CFI_STARTPROC
639
1da177e4
LT
640 PTREGSCALL stub_clone, sys_clone, %r8
641 PTREGSCALL stub_fork, sys_fork, %rdi
642 PTREGSCALL stub_vfork, sys_vfork, %rdi
1da177e4
LT
643 PTREGSCALL stub_sigaltstack, sys_sigaltstack, %rdx
644 PTREGSCALL stub_iopl, sys_iopl, %rsi
645
646ENTRY(ptregscall_common)
1da177e4 647 popq %r11
7effaa88
JB
648 CFI_ADJUST_CFA_OFFSET -8
649 CFI_REGISTER rip, r11
1da177e4
LT
650 SAVE_REST
651 movq %r11, %r15
7effaa88 652 CFI_REGISTER rip, r15
1da177e4
LT
653 FIXUP_TOP_OF_STACK %r11
654 call *%rax
655 RESTORE_TOP_OF_STACK %r11
656 movq %r15, %r11
7effaa88 657 CFI_REGISTER rip, r11
1da177e4
LT
658 RESTORE_REST
659 pushq %r11
7effaa88
JB
660 CFI_ADJUST_CFA_OFFSET 8
661 CFI_REL_OFFSET rip, 0
1da177e4
LT
662 ret
663 CFI_ENDPROC
4b787e0b 664END(ptregscall_common)
0bd7b798 665
1da177e4
LT
666ENTRY(stub_execve)
667 CFI_STARTPROC
668 popq %r11
7effaa88
JB
669 CFI_ADJUST_CFA_OFFSET -8
670 CFI_REGISTER rip, r11
1da177e4 671 SAVE_REST
1da177e4 672 FIXUP_TOP_OF_STACK %r11
5d119b2c 673 movq %rsp, %rcx
1da177e4 674 call sys_execve
1da177e4 675 RESTORE_TOP_OF_STACK %r11
1da177e4
LT
676 movq %rax,RAX(%rsp)
677 RESTORE_REST
678 jmp int_ret_from_sys_call
679 CFI_ENDPROC
4b787e0b 680END(stub_execve)
0bd7b798 681
1da177e4
LT
682/*
683 * sigreturn is special because it needs to restore all registers on return.
684 * This cannot be done with SYSRET, so use the IRET return path instead.
0bd7b798 685 */
1da177e4
LT
686ENTRY(stub_rt_sigreturn)
687 CFI_STARTPROC
7effaa88
JB
688 addq $8, %rsp
689 CFI_ADJUST_CFA_OFFSET -8
1da177e4
LT
690 SAVE_REST
691 movq %rsp,%rdi
692 FIXUP_TOP_OF_STACK %r11
693 call sys_rt_sigreturn
694 movq %rax,RAX(%rsp) # fixme, this could be done at the higher layer
695 RESTORE_REST
696 jmp int_ret_from_sys_call
697 CFI_ENDPROC
4b787e0b 698END(stub_rt_sigreturn)
1da177e4 699
939b7871
PA
700/*
701 * Build the entry stubs and pointer table with some assembler magic.
702 * We pack 7 stubs into a single 32-byte chunk, which will fit in a
703 * single cache line on all modern x86 implementations.
704 */
705 .section .init.rodata,"a"
706ENTRY(interrupt)
707 .text
708 .p2align 5
709 .p2align CONFIG_X86_L1_CACHE_SHIFT
710ENTRY(irq_entries_start)
711 INTR_FRAME
712vector=FIRST_EXTERNAL_VECTOR
713.rept (NR_VECTORS-FIRST_EXTERNAL_VECTOR+6)/7
714 .balign 32
715 .rept 7
716 .if vector < NR_VECTORS
8665596e 717 .if vector <> FIRST_EXTERNAL_VECTOR
939b7871
PA
718 CFI_ADJUST_CFA_OFFSET -8
719 .endif
7201: pushq $(~vector+0x80) /* Note: always in signed byte range */
721 CFI_ADJUST_CFA_OFFSET 8
8665596e 722 .if ((vector-FIRST_EXTERNAL_VECTOR)%7) <> 6
939b7871
PA
723 jmp 2f
724 .endif
725 .previous
726 .quad 1b
727 .text
728vector=vector+1
729 .endif
730 .endr
7312: jmp common_interrupt
732.endr
733 CFI_ENDPROC
734END(irq_entries_start)
735
736.previous
737END(interrupt)
738.previous
739
d99015b1 740/*
1da177e4
LT
741 * Interrupt entry/exit.
742 *
743 * Interrupt entry points save only callee clobbered registers in fast path.
d99015b1
AH
744 *
745 * Entry runs with interrupts off.
746 */
1da177e4 747
722024db 748/* 0(%rsp): ~(interrupt number) */
1da177e4 749 .macro interrupt func
d99015b1
AH
750 subq $10*8, %rsp
751 CFI_ADJUST_CFA_OFFSET 10*8
752 call save_args
dcd072e2 753 PARTIAL_FRAME 0
1da177e4
LT
754 call \func
755 .endm
756
722024db
AH
757 /*
758 * The interrupt stubs push (~vector+0x80) onto the stack and
759 * then jump to common_interrupt.
760 */
939b7871
PA
761 .p2align CONFIG_X86_L1_CACHE_SHIFT
762common_interrupt:
7effaa88 763 XCPT_FRAME
722024db 764 addq $-0x80,(%rsp) /* Adjust vector to [-256,-1] range */
1da177e4
LT
765 interrupt do_IRQ
766 /* 0(%rsp): oldrsp-ARGOFFSET */
7effaa88 767ret_from_intr:
72fe4858 768 DISABLE_INTERRUPTS(CLBR_NONE)
2601e64d 769 TRACE_IRQS_OFF
3829ee6b 770 decl %gs:pda_irqcount
1de9c3f6 771 leaveq
7effaa88 772 CFI_DEF_CFA_REGISTER rsp
1de9c3f6 773 CFI_ADJUST_CFA_OFFSET -8
7effaa88 774exit_intr:
1da177e4
LT
775 GET_THREAD_INFO(%rcx)
776 testl $3,CS-ARGOFFSET(%rsp)
777 je retint_kernel
0bd7b798 778
1da177e4
LT
779 /* Interrupt came from user space */
780 /*
781 * Has a correct top of stack, but a partial stack frame
782 * %rcx: thread info. Interrupts off.
0bd7b798 783 */
1da177e4
LT
784retint_with_reschedule:
785 movl $_TIF_WORK_MASK,%edi
7effaa88 786retint_check:
10cd706d 787 LOCKDEP_SYS_EXIT_IRQ
26ccb8a7 788 movl TI_flags(%rcx),%edx
1da177e4 789 andl %edi,%edx
7effaa88 790 CFI_REMEMBER_STATE
1da177e4 791 jnz retint_careful
10cd706d
PZ
792
793retint_swapgs: /* return to user-space */
2601e64d
IM
794 /*
795 * The iretq could re-enable interrupts:
796 */
72fe4858 797 DISABLE_INTERRUPTS(CLBR_ANY)
2601e64d 798 TRACE_IRQS_IRETQ
72fe4858 799 SWAPGS
2601e64d
IM
800 jmp restore_args
801
10cd706d 802retint_restore_args: /* return to kernel space */
72fe4858 803 DISABLE_INTERRUPTS(CLBR_ANY)
2601e64d
IM
804 /*
805 * The iretq could re-enable interrupts:
806 */
807 TRACE_IRQS_IRETQ
808restore_args:
3701d863
IM
809 RESTORE_ARGS 0,8,0
810
f7f3d791 811irq_return:
72fe4858 812 INTERRUPT_RETURN
3701d863
IM
813
814 .section __ex_table, "a"
815 .quad irq_return, bad_iret
816 .previous
817
818#ifdef CONFIG_PARAVIRT
72fe4858 819ENTRY(native_iret)
1da177e4
LT
820 iretq
821
822 .section __ex_table,"a"
72fe4858 823 .quad native_iret, bad_iret
1da177e4 824 .previous
3701d863
IM
825#endif
826
1da177e4 827 .section .fixup,"ax"
1da177e4 828bad_iret:
3aa4b37d
RM
829 /*
830 * The iret traps when the %cs or %ss being restored is bogus.
831 * We've lost the original trap vector and error code.
832 * #GPF is the most likely one to get for an invalid selector.
833 * So pretend we completed the iret and took the #GPF in user mode.
834 *
835 * We are now running with the kernel GS after exception recovery.
836 * But error_entry expects us to have user GS to match the user %cs,
837 * so swap back.
838 */
839 pushq $0
840
841 SWAPGS
842 jmp general_protection
843
72fe4858
GOC
844 .previous
845
7effaa88 846 /* edi: workmask, edx: work */
1da177e4 847retint_careful:
7effaa88 848 CFI_RESTORE_STATE
1da177e4
LT
849 bt $TIF_NEED_RESCHED,%edx
850 jnc retint_signal
2601e64d 851 TRACE_IRQS_ON
72fe4858 852 ENABLE_INTERRUPTS(CLBR_NONE)
1da177e4 853 pushq %rdi
7effaa88 854 CFI_ADJUST_CFA_OFFSET 8
1da177e4 855 call schedule
0bd7b798 856 popq %rdi
7effaa88 857 CFI_ADJUST_CFA_OFFSET -8
1da177e4 858 GET_THREAD_INFO(%rcx)
72fe4858 859 DISABLE_INTERRUPTS(CLBR_NONE)
2601e64d 860 TRACE_IRQS_OFF
1da177e4 861 jmp retint_check
0bd7b798 862
1da177e4 863retint_signal:
8f4d37ec 864 testl $_TIF_DO_NOTIFY_MASK,%edx
10ffdbb8 865 jz retint_swapgs
2601e64d 866 TRACE_IRQS_ON
72fe4858 867 ENABLE_INTERRUPTS(CLBR_NONE)
1da177e4 868 SAVE_REST
0bd7b798 869 movq $-1,ORIG_RAX(%rsp)
3829ee6b 870 xorl %esi,%esi # oldset
1da177e4
LT
871 movq %rsp,%rdi # &pt_regs
872 call do_notify_resume
873 RESTORE_REST
72fe4858 874 DISABLE_INTERRUPTS(CLBR_NONE)
2601e64d 875 TRACE_IRQS_OFF
be9e6870 876 GET_THREAD_INFO(%rcx)
eca91e78 877 jmp retint_with_reschedule
1da177e4
LT
878
879#ifdef CONFIG_PREEMPT
880 /* Returning to kernel space. Check if we need preemption */
881 /* rcx: threadinfo. interrupts off. */
b06babac 882ENTRY(retint_kernel)
26ccb8a7 883 cmpl $0,TI_preempt_count(%rcx)
1da177e4 884 jnz retint_restore_args
26ccb8a7 885 bt $TIF_NEED_RESCHED,TI_flags(%rcx)
1da177e4
LT
886 jnc retint_restore_args
887 bt $9,EFLAGS-ARGOFFSET(%rsp) /* interrupts off? */
888 jnc retint_restore_args
889 call preempt_schedule_irq
890 jmp exit_intr
0bd7b798 891#endif
4b787e0b 892
1da177e4 893 CFI_ENDPROC
4b787e0b 894END(common_interrupt)
0bd7b798 895
1da177e4
LT
896/*
897 * APIC interrupts.
0bd7b798 898 */
d99015b1
AH
899 .p2align 5
900
1da177e4 901 .macro apicinterrupt num,func
7effaa88 902 INTR_FRAME
19eadf98 903 pushq $~(\num)
7effaa88 904 CFI_ADJUST_CFA_OFFSET 8
1da177e4
LT
905 interrupt \func
906 jmp ret_from_intr
907 CFI_ENDPROC
908 .endm
909
910ENTRY(thermal_interrupt)
911 apicinterrupt THERMAL_APIC_VECTOR,smp_thermal_interrupt
4b787e0b 912END(thermal_interrupt)
1da177e4 913
89b831ef
JS
914ENTRY(threshold_interrupt)
915 apicinterrupt THRESHOLD_APIC_VECTOR,mce_threshold_interrupt
4b787e0b 916END(threshold_interrupt)
89b831ef 917
0bd7b798 918#ifdef CONFIG_SMP
1da177e4
LT
919ENTRY(reschedule_interrupt)
920 apicinterrupt RESCHEDULE_VECTOR,smp_reschedule_interrupt
4b787e0b 921END(reschedule_interrupt)
1da177e4 922
e5bc8b6b
AK
923 .macro INVALIDATE_ENTRY num
924ENTRY(invalidate_interrupt\num)
0bd7b798 925 apicinterrupt INVALIDATE_TLB_VECTOR_START+\num,smp_invalidate_interrupt
4b787e0b 926END(invalidate_interrupt\num)
e5bc8b6b
AK
927 .endm
928
929 INVALIDATE_ENTRY 0
930 INVALIDATE_ENTRY 1
931 INVALIDATE_ENTRY 2
932 INVALIDATE_ENTRY 3
933 INVALIDATE_ENTRY 4
934 INVALIDATE_ENTRY 5
935 INVALIDATE_ENTRY 6
936 INVALIDATE_ENTRY 7
1da177e4
LT
937
938ENTRY(call_function_interrupt)
939 apicinterrupt CALL_FUNCTION_VECTOR,smp_call_function_interrupt
4b787e0b 940END(call_function_interrupt)
3b16cf87
JA
941ENTRY(call_function_single_interrupt)
942 apicinterrupt CALL_FUNCTION_SINGLE_VECTOR,smp_call_function_single_interrupt
943END(call_function_single_interrupt)
61014292
EB
944ENTRY(irq_move_cleanup_interrupt)
945 apicinterrupt IRQ_MOVE_CLEANUP_VECTOR,smp_irq_move_cleanup_interrupt
946END(irq_move_cleanup_interrupt)
1da177e4
LT
947#endif
948
1da177e4
LT
949ENTRY(apic_timer_interrupt)
950 apicinterrupt LOCAL_TIMER_VECTOR,smp_apic_timer_interrupt
4b787e0b 951END(apic_timer_interrupt)
1da177e4 952
1812924b
CW
953ENTRY(uv_bau_message_intr1)
954 apicinterrupt 220,uv_bau_message_interrupt
955END(uv_bau_message_intr1)
956
1da177e4
LT
957ENTRY(error_interrupt)
958 apicinterrupt ERROR_APIC_VECTOR,smp_error_interrupt
4b787e0b 959END(error_interrupt)
1da177e4
LT
960
961ENTRY(spurious_interrupt)
962 apicinterrupt SPURIOUS_APIC_VECTOR,smp_spurious_interrupt
4b787e0b 963END(spurious_interrupt)
0bd7b798 964
1da177e4
LT
965/*
966 * Exception entry points.
0bd7b798 967 */
1da177e4 968 .macro zeroentry sym
7effaa88 969 INTR_FRAME
fab58420 970 PARAVIRT_ADJUST_EXCEPTION_FRAME
dcd072e2 971 CFI_PUSHQ $-1 /* ORIG_RAX: no syscall to restart */
d99015b1
AH
972 subq $15*8,%rsp
973 CFI_ADJUST_CFA_OFFSET 15*8
974 call error_entry
dcd072e2 975 DEFAULT_FRAME 0
d99015b1
AH
976 movq %rsp,%rdi /* pt_regs pointer */
977 xorl %esi,%esi /* no error code */
978 call \sym
979 jmp error_exit /* %ebx: no swapgs flag */
7effaa88 980 CFI_ENDPROC
0bd7b798 981 .endm
1da177e4
LT
982
983 .macro errorentry sym
7effaa88 984 XCPT_FRAME
fab58420 985 PARAVIRT_ADJUST_EXCEPTION_FRAME
d99015b1
AH
986 subq $15*8,%rsp
987 CFI_ADJUST_CFA_OFFSET 15*8
988 call error_entry
dcd072e2 989 DEFAULT_FRAME 0
d99015b1
AH
990 movq %rsp,%rdi /* pt_regs pointer */
991 movq ORIG_RAX(%rsp),%rsi /* get error code */
992 movq $-1,ORIG_RAX(%rsp) /* no syscall to restart */
993 call \sym
994 jmp error_exit /* %ebx: no swapgs flag */
7effaa88 995 CFI_ENDPROC
1da177e4
LT
996 .endm
997
998 /* error code is on the stack already */
999 /* handle NMI like exceptions that can happen everywhere */
2601e64d 1000 .macro paranoidentry sym, ist=0, irqtrace=1
1da177e4
LT
1001 SAVE_ALL
1002 cld
1003 movl $1,%ebx
1004 movl $MSR_GS_BASE,%ecx
1005 rdmsr
1006 testl %edx,%edx
1007 js 1f
72fe4858 1008 SWAPGS
1da177e4 1009 xorl %ebx,%ebx
b556b35e
JB
10101:
1011 .if \ist
1012 movq %gs:pda_data_offset, %rbp
1013 .endif
7e61a793
AH
1014 .if \irqtrace
1015 TRACE_IRQS_OFF
1016 .endif
b556b35e 1017 movq %rsp,%rdi
1da177e4
LT
1018 movq ORIG_RAX(%rsp),%rsi
1019 movq $-1,ORIG_RAX(%rsp)
b556b35e 1020 .if \ist
5f8efbb9 1021 subq $EXCEPTION_STKSZ, per_cpu__init_tss + TSS_ist + (\ist - 1) * 8(%rbp)
b556b35e 1022 .endif
1da177e4 1023 call \sym
b556b35e 1024 .if \ist
5f8efbb9 1025 addq $EXCEPTION_STKSZ, per_cpu__init_tss + TSS_ist + (\ist - 1) * 8(%rbp)
b556b35e 1026 .endif
72fe4858 1027 DISABLE_INTERRUPTS(CLBR_NONE)
2601e64d
IM
1028 .if \irqtrace
1029 TRACE_IRQS_OFF
1030 .endif
1da177e4 1031 .endm
2601e64d
IM
1032
1033 /*
1034 * "Paranoid" exit path from exception stack.
1035 * Paranoid because this is used by NMIs and cannot take
1036 * any kernel state for granted.
1037 * We don't do kernel preemption checks here, because only
1038 * NMI should be common and it does not enable IRQs and
1039 * cannot get reschedule ticks.
1040 *
1041 * "trace" is 0 for the NMI handler only, because irq-tracing
1042 * is fundamentally NMI-unsafe. (we cannot change the soft and
1043 * hard flags at once, atomically)
1044 */
1045 .macro paranoidexit trace=1
1046 /* ebx: no swapgs flag */
1047paranoid_exit\trace:
1048 testl %ebx,%ebx /* swapgs needed? */
1049 jnz paranoid_restore\trace
1050 testl $3,CS(%rsp)
1051 jnz paranoid_userspace\trace
1052paranoid_swapgs\trace:
7a0a2dff 1053 .if \trace
2601e64d 1054 TRACE_IRQS_IRETQ 0
7a0a2dff 1055 .endif
72fe4858 1056 SWAPGS_UNSAFE_STACK
2601e64d
IM
1057paranoid_restore\trace:
1058 RESTORE_ALL 8
3701d863 1059 jmp irq_return
2601e64d
IM
1060paranoid_userspace\trace:
1061 GET_THREAD_INFO(%rcx)
26ccb8a7 1062 movl TI_flags(%rcx),%ebx
2601e64d
IM
1063 andl $_TIF_WORK_MASK,%ebx
1064 jz paranoid_swapgs\trace
1065 movq %rsp,%rdi /* &pt_regs */
1066 call sync_regs
1067 movq %rax,%rsp /* switch stack for scheduling */
1068 testl $_TIF_NEED_RESCHED,%ebx
1069 jnz paranoid_schedule\trace
1070 movl %ebx,%edx /* arg3: thread flags */
1071 .if \trace
1072 TRACE_IRQS_ON
1073 .endif
72fe4858 1074 ENABLE_INTERRUPTS(CLBR_NONE)
2601e64d
IM
1075 xorl %esi,%esi /* arg2: oldset */
1076 movq %rsp,%rdi /* arg1: &pt_regs */
1077 call do_notify_resume
72fe4858 1078 DISABLE_INTERRUPTS(CLBR_NONE)
2601e64d
IM
1079 .if \trace
1080 TRACE_IRQS_OFF
1081 .endif
1082 jmp paranoid_userspace\trace
1083paranoid_schedule\trace:
1084 .if \trace
1085 TRACE_IRQS_ON
1086 .endif
72fe4858 1087 ENABLE_INTERRUPTS(CLBR_ANY)
2601e64d 1088 call schedule
72fe4858 1089 DISABLE_INTERRUPTS(CLBR_ANY)
2601e64d
IM
1090 .if \trace
1091 TRACE_IRQS_OFF
1092 .endif
1093 jmp paranoid_userspace\trace
1094 CFI_ENDPROC
1095 .endm
1096
1da177e4 1097/*
d99015b1
AH
1098 * Exception entry point. This expects an error code/orig_rax on the stack.
1099 * returns in "no swapgs flag" in %ebx.
0bd7b798 1100 */
d28c4393 1101KPROBE_ENTRY(error_entry)
dcd072e2 1102 XCPT_FRAME
d99015b1
AH
1103 CFI_ADJUST_CFA_OFFSET 15*8
1104 /* oldrax contains error code */
0bd7b798 1105 cld
dcd072e2
AH
1106 CFI_MOVQ rdi, RDI+8
1107 CFI_MOVQ rsi, RSI+8
1108 CFI_MOVQ rdx, RDX+8
1109 CFI_MOVQ rcx, RCX+8
1110 CFI_MOVQ rax, RAX+8
1111 CFI_MOVQ r8, R8+8
1112 CFI_MOVQ r9, R9+8
1113 CFI_MOVQ r10, R10+8
1114 CFI_MOVQ r11, R11+8
1115 CFI_MOVQ rbx, RBX+8
1116 CFI_MOVQ rbp, RBP+8
1117 CFI_MOVQ r12, R12+8
1118 CFI_MOVQ r13, R13+8
1119 CFI_MOVQ r14, R14+8
1120 CFI_MOVQ r15, R15+8
0bd7b798 1121 xorl %ebx,%ebx
d99015b1
AH
1122 testl $3,CS+8(%rsp)
1123 je error_kernelspace
0bd7b798 1124error_swapgs:
72fe4858 1125 SWAPGS
6b11d4ef
AH
1126error_sti:
1127 TRACE_IRQS_OFF
d99015b1
AH
1128 ret
1129 CFI_ENDPROC
1130
1131/*
1132 * There are two places in the kernel that can potentially fault with
1133 * usergs. Handle them here. The exception handlers after iret run with
1134 * kernel gs again, so don't set the user space flag. B stepping K8s
1135 * sometimes report an truncated RIP for IRET exceptions returning to
1136 * compat mode. Check for these here too.
1137 */
1138error_kernelspace:
1139 incl %ebx
1140 leaq irq_return(%rip),%rcx
1141 cmpq %rcx,RIP+8(%rsp)
1142 je error_swapgs
1143 movl %ecx,%ecx /* zero extend */
1144 cmpq %rcx,RIP+8(%rsp)
1145 je error_swapgs
1146 cmpq $gs_change,RIP+8(%rsp)
1147 je error_swapgs
1148 jmp error_sti
1149KPROBE_END(error_entry)
1150
1151
1152/* ebx: no swapgs flag (1: don't need swapgs, 0: need it) */
1153KPROBE_ENTRY(error_exit)
dcd072e2 1154 DEFAULT_FRAME
10cd706d 1155 movl %ebx,%eax
1da177e4 1156 RESTORE_REST
72fe4858 1157 DISABLE_INTERRUPTS(CLBR_NONE)
2601e64d 1158 TRACE_IRQS_OFF
0bd7b798 1159 GET_THREAD_INFO(%rcx)
1da177e4 1160 testl %eax,%eax
d99015b1 1161 jne retint_kernel
10cd706d 1162 LOCKDEP_SYS_EXIT_IRQ
d99015b1
AH
1163 movl TI_flags(%rcx),%edx
1164 movl $_TIF_WORK_MASK,%edi
1165 andl %edi,%edx
1166 jnz retint_careful
10cd706d 1167 jmp retint_swapgs
1da177e4 1168 CFI_ENDPROC
d99015b1 1169KPROBE_END(error_exit)
0bd7b798 1170
1da177e4 1171 /* Reload gs selector with exception handling */
0bd7b798 1172 /* edi: new selector */
9f9d489a 1173ENTRY(native_load_gs_index)
7effaa88 1174 CFI_STARTPROC
1da177e4 1175 pushf
7effaa88 1176 CFI_ADJUST_CFA_OFFSET 8
72fe4858
GOC
1177 DISABLE_INTERRUPTS(CLBR_ANY | ~(CLBR_RDI))
1178 SWAPGS
0bd7b798
AH
1179gs_change:
1180 movl %edi,%gs
1da177e4 11812: mfence /* workaround */
72fe4858 1182 SWAPGS
1da177e4 1183 popf
7effaa88 1184 CFI_ADJUST_CFA_OFFSET -8
1da177e4 1185 ret
7effaa88 1186 CFI_ENDPROC
9f9d489a 1187ENDPROC(native_load_gs_index)
0bd7b798 1188
1da177e4
LT
1189 .section __ex_table,"a"
1190 .align 8
1191 .quad gs_change,bad_gs
1192 .previous
1193 .section .fixup,"ax"
1194 /* running with kernelgs */
0bd7b798 1195bad_gs:
72fe4858 1196 SWAPGS /* switch back to user gs */
1da177e4
LT
1197 xorl %eax,%eax
1198 movl %eax,%gs
1199 jmp 2b
0bd7b798
AH
1200 .previous
1201
1da177e4
LT
1202/*
1203 * Create a kernel thread.
1204 *
1205 * C extern interface:
1206 * extern long kernel_thread(int (*fn)(void *), void * arg, unsigned long flags)
1207 *
1208 * asm input arguments:
1209 * rdi: fn, rsi: arg, rdx: flags
1210 */
1211ENTRY(kernel_thread)
1212 CFI_STARTPROC
1213 FAKE_STACK_FRAME $child_rip
1214 SAVE_ALL
1215
1216 # rdi: flags, rsi: usp, rdx: will be &pt_regs
1217 movq %rdx,%rdi
1218 orq kernel_thread_flags(%rip),%rdi
1219 movq $-1, %rsi
1220 movq %rsp, %rdx
1221
1222 xorl %r8d,%r8d
1223 xorl %r9d,%r9d
0bd7b798 1224
1da177e4
LT
1225 # clone now
1226 call do_fork
1227 movq %rax,RAX(%rsp)
1228 xorl %edi,%edi
1229
1230 /*
1231 * It isn't worth to check for reschedule here,
1232 * so internally to the x86_64 port you can rely on kernel_thread()
1233 * not to reschedule the child before returning, this avoids the need
1234 * of hacks for example to fork off the per-CPU idle tasks.
0bd7b798 1235 * [Hopefully no generic code relies on the reschedule -AK]
1da177e4
LT
1236 */
1237 RESTORE_ALL
1238 UNFAKE_STACK_FRAME
1239 ret
1240 CFI_ENDPROC
4b787e0b 1241ENDPROC(kernel_thread)
0bd7b798 1242
1da177e4 1243child_rip:
c05991ed
AK
1244 pushq $0 # fake return address
1245 CFI_STARTPROC
1da177e4
LT
1246 /*
1247 * Here we are in the child and the registers are set as they were
1248 * at kernel_thread() invocation in the parent.
1249 */
1250 movq %rdi, %rax
1251 movq %rsi, %rdi
1252 call *%rax
1253 # exit
1c5b5cfd 1254 mov %eax, %edi
1da177e4 1255 call do_exit
c05991ed 1256 CFI_ENDPROC
4b787e0b 1257ENDPROC(child_rip)
1da177e4
LT
1258
1259/*
1260 * execve(). This function needs to use IRET, not SYSRET, to set up all state properly.
1261 *
1262 * C extern interface:
1263 * extern long execve(char *name, char **argv, char **envp)
1264 *
1265 * asm input arguments:
1266 * rdi: name, rsi: argv, rdx: envp
1267 *
1268 * We want to fallback into:
5d119b2c 1269 * extern long sys_execve(char *name, char **argv,char **envp, struct pt_regs *regs)
1da177e4
LT
1270 *
1271 * do_sys_execve asm fallback arguments:
5d119b2c 1272 * rdi: name, rsi: argv, rdx: envp, rcx: fake frame on the stack
1da177e4 1273 */
3db03b4a 1274ENTRY(kernel_execve)
1da177e4
LT
1275 CFI_STARTPROC
1276 FAKE_STACK_FRAME $0
0bd7b798 1277 SAVE_ALL
5d119b2c 1278 movq %rsp,%rcx
1da177e4 1279 call sys_execve
0bd7b798 1280 movq %rax, RAX(%rsp)
1da177e4
LT
1281 RESTORE_REST
1282 testq %rax,%rax
1283 je int_ret_from_sys_call
1284 RESTORE_ARGS
1285 UNFAKE_STACK_FRAME
1286 ret
1287 CFI_ENDPROC
3db03b4a 1288ENDPROC(kernel_execve)
1da177e4 1289
0f2fbdcb 1290KPROBE_ENTRY(page_fault)
1da177e4 1291 errorentry do_page_fault
d28c4393 1292KPROBE_END(page_fault)
1da177e4
LT
1293
1294ENTRY(coprocessor_error)
1295 zeroentry do_coprocessor_error
4b787e0b 1296END(coprocessor_error)
1da177e4
LT
1297
1298ENTRY(simd_coprocessor_error)
0bd7b798 1299 zeroentry do_simd_coprocessor_error
4b787e0b 1300END(simd_coprocessor_error)
1da177e4
LT
1301
1302ENTRY(device_not_available)
e407d620 1303 zeroentry do_device_not_available
4b787e0b 1304END(device_not_available)
1da177e4
LT
1305
1306 /* runs on exception stack */
0f2fbdcb 1307KPROBE_ENTRY(debug)
7effaa88 1308 INTR_FRAME
09402947 1309 PARAVIRT_ADJUST_EXCEPTION_FRAME
1da177e4 1310 pushq $0
0bd7b798 1311 CFI_ADJUST_CFA_OFFSET 8
5f8efbb9 1312 paranoidentry do_debug, DEBUG_STACK
2601e64d 1313 paranoidexit
d28c4393 1314KPROBE_END(debug)
1da177e4 1315
0bd7b798 1316 /* runs on exception stack */
eddb6fb9 1317KPROBE_ENTRY(nmi)
7effaa88 1318 INTR_FRAME
09402947 1319 PARAVIRT_ADJUST_EXCEPTION_FRAME
1da177e4 1320 pushq $-1
7effaa88 1321 CFI_ADJUST_CFA_OFFSET 8
2601e64d
IM
1322 paranoidentry do_nmi, 0, 0
1323#ifdef CONFIG_TRACE_IRQFLAGS
1324 paranoidexit 0
1325#else
1326 jmp paranoid_exit1
1327 CFI_ENDPROC
1328#endif
d28c4393 1329KPROBE_END(nmi)
6fefb0d1 1330
0f2fbdcb 1331KPROBE_ENTRY(int3)
b556b35e 1332 INTR_FRAME
09402947 1333 PARAVIRT_ADJUST_EXCEPTION_FRAME
b556b35e
JB
1334 pushq $0
1335 CFI_ADJUST_CFA_OFFSET 8
5f8efbb9 1336 paranoidentry do_int3, DEBUG_STACK
2601e64d 1337 jmp paranoid_exit1
b556b35e 1338 CFI_ENDPROC
d28c4393 1339KPROBE_END(int3)
1da177e4
LT
1340
1341ENTRY(overflow)
1342 zeroentry do_overflow
4b787e0b 1343END(overflow)
1da177e4
LT
1344
1345ENTRY(bounds)
1346 zeroentry do_bounds
4b787e0b 1347END(bounds)
1da177e4
LT
1348
1349ENTRY(invalid_op)
0bd7b798 1350 zeroentry do_invalid_op
4b787e0b 1351END(invalid_op)
1da177e4
LT
1352
1353ENTRY(coprocessor_segment_overrun)
1354 zeroentry do_coprocessor_segment_overrun
4b787e0b 1355END(coprocessor_segment_overrun)
1da177e4 1356
1da177e4
LT
1357 /* runs on exception stack */
1358ENTRY(double_fault)
7effaa88 1359 XCPT_FRAME
09402947 1360 PARAVIRT_ADJUST_EXCEPTION_FRAME
1da177e4 1361 paranoidentry do_double_fault
2601e64d 1362 jmp paranoid_exit1
1da177e4 1363 CFI_ENDPROC
4b787e0b 1364END(double_fault)
1da177e4
LT
1365
1366ENTRY(invalid_TSS)
1367 errorentry do_invalid_TSS
4b787e0b 1368END(invalid_TSS)
1da177e4
LT
1369
1370ENTRY(segment_not_present)
1371 errorentry do_segment_not_present
4b787e0b 1372END(segment_not_present)
1da177e4
LT
1373
1374 /* runs on exception stack */
1375ENTRY(stack_segment)
7effaa88 1376 XCPT_FRAME
09402947 1377 PARAVIRT_ADJUST_EXCEPTION_FRAME
1da177e4 1378 paranoidentry do_stack_segment
2601e64d 1379 jmp paranoid_exit1
1da177e4 1380 CFI_ENDPROC
4b787e0b 1381END(stack_segment)
1da177e4 1382
0f2fbdcb 1383KPROBE_ENTRY(general_protection)
1da177e4 1384 errorentry do_general_protection
d28c4393 1385KPROBE_END(general_protection)
1da177e4
LT
1386
1387ENTRY(alignment_check)
1388 errorentry do_alignment_check
4b787e0b 1389END(alignment_check)
1da177e4
LT
1390
1391ENTRY(divide_error)
1392 zeroentry do_divide_error
4b787e0b 1393END(divide_error)
1da177e4
LT
1394
1395ENTRY(spurious_interrupt_bug)
1396 zeroentry do_spurious_interrupt_bug
4b787e0b 1397END(spurious_interrupt_bug)
1da177e4
LT
1398
1399#ifdef CONFIG_X86_MCE
1400 /* runs on exception stack */
1401ENTRY(machine_check)
7effaa88 1402 INTR_FRAME
09402947 1403 PARAVIRT_ADJUST_EXCEPTION_FRAME
1da177e4 1404 pushq $0
0bd7b798 1405 CFI_ADJUST_CFA_OFFSET 8
1da177e4 1406 paranoidentry do_machine_check
2601e64d 1407 jmp paranoid_exit1
1da177e4 1408 CFI_ENDPROC
4b787e0b 1409END(machine_check)
1da177e4
LT
1410#endif
1411
2699500b 1412/* Call softirq on interrupt stack. Interrupts are off. */
ed6b676c 1413ENTRY(call_softirq)
7effaa88 1414 CFI_STARTPROC
2699500b
AK
1415 push %rbp
1416 CFI_ADJUST_CFA_OFFSET 8
1417 CFI_REL_OFFSET rbp,0
1418 mov %rsp,%rbp
1419 CFI_DEF_CFA_REGISTER rbp
ed6b676c 1420 incl %gs:pda_irqcount
2699500b
AK
1421 cmove %gs:pda_irqstackptr,%rsp
1422 push %rbp # backlink for old unwinder
ed6b676c 1423 call __do_softirq
2699500b 1424 leaveq
7effaa88 1425 CFI_DEF_CFA_REGISTER rsp
2699500b 1426 CFI_ADJUST_CFA_OFFSET -8
ed6b676c 1427 decl %gs:pda_irqcount
ed6b676c 1428 ret
7effaa88 1429 CFI_ENDPROC
4b787e0b 1430ENDPROC(call_softirq)
75154f40
AK
1431
1432KPROBE_ENTRY(ignore_sysret)
1433 CFI_STARTPROC
1434 mov $-ENOSYS,%eax
1435 sysret
1436 CFI_ENDPROC
1437ENDPROC(ignore_sysret)
3d75e1b8
JF
1438
1439#ifdef CONFIG_XEN
1440ENTRY(xen_hypervisor_callback)
1441 zeroentry xen_do_hypervisor_callback
1442END(xen_hypervisor_callback)
1443
1444/*
1445# A note on the "critical region" in our callback handler.
1446# We want to avoid stacking callback handlers due to events occurring
1447# during handling of the last event. To do this, we keep events disabled
1448# until we've done all processing. HOWEVER, we must enable events before
1449# popping the stack frame (can't be done atomically) and so it would still
1450# be possible to get enough handler activations to overflow the stack.
1451# Although unlikely, bugs of that kind are hard to track down, so we'd
1452# like to avoid the possibility.
1453# So, on entry to the handler we detect whether we interrupted an
1454# existing activation in its critical region -- if so, we pop the current
1455# activation and restart the handler using the previous one.
1456*/
1457ENTRY(xen_do_hypervisor_callback) # do_hypervisor_callback(struct *pt_regs)
1458 CFI_STARTPROC
1459/* Since we don't modify %rdi, evtchn_do_upall(struct *pt_regs) will
1460 see the correct pointer to the pt_regs */
1461 movq %rdi, %rsp # we don't return, adjust the stack frame
1462 CFI_ENDPROC
dcd072e2 1463 DEFAULT_FRAME
3d75e1b8
JF
146411: incl %gs:pda_irqcount
1465 movq %rsp,%rbp
1466 CFI_DEF_CFA_REGISTER rbp
1467 cmovzq %gs:pda_irqstackptr,%rsp
1468 pushq %rbp # backlink for old unwinder
1469 call xen_evtchn_do_upcall
1470 popq %rsp
1471 CFI_DEF_CFA_REGISTER rsp
1472 decl %gs:pda_irqcount
1473 jmp error_exit
1474 CFI_ENDPROC
1475END(do_hypervisor_callback)
1476
1477/*
1478# Hypervisor uses this for application faults while it executes.
1479# We get here for two reasons:
1480# 1. Fault while reloading DS, ES, FS or GS
1481# 2. Fault while executing IRET
1482# Category 1 we do not need to fix up as Xen has already reloaded all segment
1483# registers that could be reloaded and zeroed the others.
1484# Category 2 we fix up by killing the current process. We cannot use the
1485# normal Linux return path in this case because if we use the IRET hypercall
1486# to pop the stack frame we end up in an infinite loop of failsafe callbacks.
1487# We distinguish between categories by comparing each saved segment register
1488# with its current contents: any discrepancy means we in category 1.
1489*/
1490ENTRY(xen_failsafe_callback)
dcd072e2
AH
1491 INTR_FRAME 1 (6*8)
1492 /*CFI_REL_OFFSET gs,GS*/
1493 /*CFI_REL_OFFSET fs,FS*/
1494 /*CFI_REL_OFFSET es,ES*/
1495 /*CFI_REL_OFFSET ds,DS*/
1496 CFI_REL_OFFSET r11,8
1497 CFI_REL_OFFSET rcx,0
3d75e1b8
JF
1498 movw %ds,%cx
1499 cmpw %cx,0x10(%rsp)
1500 CFI_REMEMBER_STATE
1501 jne 1f
1502 movw %es,%cx
1503 cmpw %cx,0x18(%rsp)
1504 jne 1f
1505 movw %fs,%cx
1506 cmpw %cx,0x20(%rsp)
1507 jne 1f
1508 movw %gs,%cx
1509 cmpw %cx,0x28(%rsp)
1510 jne 1f
1511 /* All segments match their saved values => Category 2 (Bad IRET). */
1512 movq (%rsp),%rcx
1513 CFI_RESTORE rcx
1514 movq 8(%rsp),%r11
1515 CFI_RESTORE r11
1516 addq $0x30,%rsp
1517 CFI_ADJUST_CFA_OFFSET -0x30
dcd072e2
AH
1518 CFI_PUSHQ $0 /* RIP */
1519 CFI_PUSHQ %r11
1520 CFI_PUSHQ %rcx
4a5c3e77 1521 jmp general_protection
3d75e1b8
JF
1522 CFI_RESTORE_STATE
15231: /* Segment mismatch => Category 1 (Bad segment). Retry the IRET. */
1524 movq (%rsp),%rcx
1525 CFI_RESTORE rcx
1526 movq 8(%rsp),%r11
1527 CFI_RESTORE r11
1528 addq $0x30,%rsp
1529 CFI_ADJUST_CFA_OFFSET -0x30
dcd072e2 1530 CFI_PUSHQ $0
3d75e1b8
JF
1531 SAVE_ALL
1532 jmp error_exit
1533 CFI_ENDPROC
3d75e1b8
JF
1534END(xen_failsafe_callback)
1535
1536#endif /* CONFIG_XEN */