]> git.proxmox.com Git - mirror_ubuntu-focal-kernel.git/blame - arch/x86/kernel/entry_64.S
x86: introduce save_rest and restructure the PTREGSCALL macro in entry_64.S
[mirror_ubuntu-focal-kernel.git] / arch / x86 / kernel / entry_64.S
CommitLineData
1da177e4
LT
1/*
2 * linux/arch/x86_64/entry.S
3 *
4 * Copyright (C) 1991, 1992 Linus Torvalds
5 * Copyright (C) 2000, 2001, 2002 Andi Kleen SuSE Labs
6 * Copyright (C) 2000 Pavel Machek <pavel@suse.cz>
1da177e4
LT
7 */
8
9/*
10 * entry.S contains the system-call and fault low-level handling routines.
11 *
12 * NOTE: This code handles signal-recognition, which happens every time
13 * after an interrupt and after each system call.
0bd7b798
AH
14 *
15 * Normal syscalls and interrupts don't save a full stack frame, this is
1da177e4 16 * only done for syscall tracing, signals or fork/exec et.al.
0bd7b798
AH
17 *
18 * A note on terminology:
19 * - top of stack: Architecture defined interrupt frame from SS to RIP
20 * at the top of the kernel process stack.
1da177e4 21 * - partial stack frame: partially saved registers upto R11.
0bd7b798 22 * - full stack frame: Like partial stack frame, but all register saved.
2e91a17b
AK
23 *
24 * Some macro usage:
25 * - CFI macros are used to generate dwarf2 unwind information for better
26 * backtraces. They don't change any code.
27 * - SAVE_ALL/RESTORE_ALL - Save/restore all registers
28 * - SAVE_ARGS/RESTORE_ARGS - Save/restore registers that C functions modify.
29 * There are unfortunately lots of special cases where some registers
30 * not touched. The macro is a big mess that should be cleaned up.
31 * - SAVE_REST/RESTORE_REST - Handle the registers not saved by SAVE_ARGS.
32 * Gives a full stack frame.
33 * - ENTRY/END Define functions in the symbol table.
34 * - FIXUP_TOP_OF_STACK/RESTORE_TOP_OF_STACK - Fix up the hardware stack
35 * frame that is otherwise undefined after a SYSCALL
36 * - TRACE_IRQ_* - Trace hard interrupt state for lock debugging.
37 * - errorentry/paranoidentry/zeroentry - Define exception entry points.
1da177e4
LT
38 */
39
1da177e4
LT
40#include <linux/linkage.h>
41#include <asm/segment.h>
1da177e4
LT
42#include <asm/cache.h>
43#include <asm/errno.h>
44#include <asm/dwarf2.h>
45#include <asm/calling.h>
e2d5df93 46#include <asm/asm-offsets.h>
1da177e4
LT
47#include <asm/msr.h>
48#include <asm/unistd.h>
49#include <asm/thread_info.h>
50#include <asm/hw_irq.h>
5f8efbb9 51#include <asm/page.h>
2601e64d 52#include <asm/irqflags.h>
72fe4858 53#include <asm/paravirt.h>
395a59d0 54#include <asm/ftrace.h>
1da177e4 55
86a1c34a
RM
56/* Avoid __ASSEMBLER__'ifying <linux/audit.h> just for this. */
57#include <linux/elf-em.h>
58#define AUDIT_ARCH_X86_64 (EM_X86_64|__AUDIT_ARCH_64BIT|__AUDIT_ARCH_LE)
59#define __AUDIT_ARCH_64BIT 0x80000000
60#define __AUDIT_ARCH_LE 0x40000000
61
1da177e4 62 .code64
dcd072e2
AH
63/*
64 * Some macro's to hide the most frequently occuring CFI annotations.
65 */
14ae22ba 66 .macro pushq_cfi reg
dcd072e2
AH
67 pushq \reg
68 CFI_ADJUST_CFA_OFFSET 8
69 .endm
70
14ae22ba 71 .macro popq_cfi reg
dcd072e2
AH
72 popq \reg
73 CFI_ADJUST_CFA_OFFSET -8
74 .endm
75
14ae22ba 76 .macro movq_cfi reg offset=0
dcd072e2
AH
77 movq %\reg, \offset(%rsp)
78 CFI_REL_OFFSET \reg, \offset
79 .endm
1da177e4 80
c002a1e6
AH
81 .macro movq_cfi_restore offset reg
82 movq \offset(%rsp), %\reg
83 CFI_RESTORE \reg
84 .endm
85
606576ce 86#ifdef CONFIG_FUNCTION_TRACER
d61f82d0
SR
87#ifdef CONFIG_DYNAMIC_FTRACE
88ENTRY(mcount)
d61f82d0
SR
89 retq
90END(mcount)
91
92ENTRY(ftrace_caller)
93
94 /* taken from glibc */
95 subq $0x38, %rsp
96 movq %rax, (%rsp)
97 movq %rcx, 8(%rsp)
98 movq %rdx, 16(%rsp)
99 movq %rsi, 24(%rsp)
100 movq %rdi, 32(%rsp)
101 movq %r8, 40(%rsp)
102 movq %r9, 48(%rsp)
103
104 movq 0x38(%rsp), %rdi
105 movq 8(%rbp), %rsi
395a59d0 106 subq $MCOUNT_INSN_SIZE, %rdi
d61f82d0
SR
107
108.globl ftrace_call
109ftrace_call:
110 call ftrace_stub
111
112 movq 48(%rsp), %r9
113 movq 40(%rsp), %r8
114 movq 32(%rsp), %rdi
115 movq 24(%rsp), %rsi
116 movq 16(%rsp), %rdx
117 movq 8(%rsp), %rcx
118 movq (%rsp), %rax
119 addq $0x38, %rsp
120
121.globl ftrace_stub
122ftrace_stub:
123 retq
124END(ftrace_caller)
125
126#else /* ! CONFIG_DYNAMIC_FTRACE */
16444a8a
ACM
127ENTRY(mcount)
128 cmpq $ftrace_stub, ftrace_trace_function
129 jnz trace
130.globl ftrace_stub
131ftrace_stub:
132 retq
133
134trace:
135 /* taken from glibc */
136 subq $0x38, %rsp
137 movq %rax, (%rsp)
138 movq %rcx, 8(%rsp)
139 movq %rdx, 16(%rsp)
140 movq %rsi, 24(%rsp)
141 movq %rdi, 32(%rsp)
142 movq %r8, 40(%rsp)
143 movq %r9, 48(%rsp)
144
145 movq 0x38(%rsp), %rdi
146 movq 8(%rbp), %rsi
395a59d0 147 subq $MCOUNT_INSN_SIZE, %rdi
16444a8a
ACM
148
149 call *ftrace_trace_function
150
151 movq 48(%rsp), %r9
152 movq 40(%rsp), %r8
153 movq 32(%rsp), %rdi
154 movq 24(%rsp), %rsi
155 movq 16(%rsp), %rdx
156 movq 8(%rsp), %rcx
157 movq (%rsp), %rax
158 addq $0x38, %rsp
159
160 jmp ftrace_stub
161END(mcount)
d61f82d0 162#endif /* CONFIG_DYNAMIC_FTRACE */
606576ce 163#endif /* CONFIG_FUNCTION_TRACER */
16444a8a 164
dc37db4d 165#ifndef CONFIG_PREEMPT
1da177e4 166#define retint_kernel retint_restore_args
0bd7b798 167#endif
2601e64d 168
72fe4858 169#ifdef CONFIG_PARAVIRT
2be29982 170ENTRY(native_usergs_sysret64)
72fe4858
GOC
171 swapgs
172 sysretq
173#endif /* CONFIG_PARAVIRT */
174
2601e64d
IM
175
176.macro TRACE_IRQS_IRETQ offset=ARGOFFSET
177#ifdef CONFIG_TRACE_IRQFLAGS
178 bt $9,EFLAGS-\offset(%rsp) /* interrupts off? */
179 jnc 1f
180 TRACE_IRQS_ON
1811:
182#endif
183.endm
184
1da177e4 185/*
0bd7b798
AH
186 * C code is not supposed to know about undefined top of stack. Every time
187 * a C function with an pt_regs argument is called from the SYSCALL based
1da177e4
LT
188 * fast path FIXUP_TOP_OF_STACK is needed.
189 * RESTORE_TOP_OF_STACK syncs the syscall state after any possible ptregs
190 * manipulation.
0bd7b798
AH
191 */
192
193 /* %rsp:at FRAMEEND */
c002a1e6
AH
194 .macro FIXUP_TOP_OF_STACK tmp offset=0
195 movq %gs:pda_oldrsp,\tmp
196 movq \tmp,RSP+\offset(%rsp)
197 movq $__USER_DS,SS+\offset(%rsp)
198 movq $__USER_CS,CS+\offset(%rsp)
199 movq $-1,RCX+\offset(%rsp)
200 movq R11+\offset(%rsp),\tmp /* get eflags */
201 movq \tmp,EFLAGS+\offset(%rsp)
1da177e4
LT
202 .endm
203
c002a1e6
AH
204 .macro RESTORE_TOP_OF_STACK tmp offset=0
205 movq RSP+\offset(%rsp),\tmp
206 movq \tmp,%gs:pda_oldrsp
207 movq EFLAGS+\offset(%rsp),\tmp
208 movq \tmp,R11+\offset(%rsp)
1da177e4
LT
209 .endm
210
211 .macro FAKE_STACK_FRAME child_rip
212 /* push in order ss, rsp, eflags, cs, rip */
3829ee6b 213 xorl %eax, %eax
e04e0a63 214 pushq $__KERNEL_DS /* ss */
1da177e4 215 CFI_ADJUST_CFA_OFFSET 8
7effaa88 216 /*CFI_REL_OFFSET ss,0*/
1da177e4
LT
217 pushq %rax /* rsp */
218 CFI_ADJUST_CFA_OFFSET 8
7effaa88 219 CFI_REL_OFFSET rsp,0
1da177e4
LT
220 pushq $(1<<9) /* eflags - interrupts on */
221 CFI_ADJUST_CFA_OFFSET 8
7effaa88 222 /*CFI_REL_OFFSET rflags,0*/
1da177e4
LT
223 pushq $__KERNEL_CS /* cs */
224 CFI_ADJUST_CFA_OFFSET 8
7effaa88 225 /*CFI_REL_OFFSET cs,0*/
1da177e4
LT
226 pushq \child_rip /* rip */
227 CFI_ADJUST_CFA_OFFSET 8
7effaa88 228 CFI_REL_OFFSET rip,0
1da177e4
LT
229 pushq %rax /* orig rax */
230 CFI_ADJUST_CFA_OFFSET 8
231 .endm
232
233 .macro UNFAKE_STACK_FRAME
234 addq $8*6, %rsp
235 CFI_ADJUST_CFA_OFFSET -(6*8)
236 .endm
237
dcd072e2
AH
238/*
239 * initial frame state for interrupts (and exceptions without error code)
240 */
241 .macro EMPTY_FRAME start=1 offset=0
7effaa88 242 .if \start
dcd072e2 243 CFI_STARTPROC simple
adf14236 244 CFI_SIGNAL_FRAME
dcd072e2 245 CFI_DEF_CFA rsp,8+\offset
7effaa88 246 .else
dcd072e2 247 CFI_DEF_CFA_OFFSET 8+\offset
7effaa88 248 .endif
1da177e4 249 .endm
d99015b1
AH
250
251/*
dcd072e2 252 * initial frame state for interrupts (and exceptions without error code)
d99015b1 253 */
dcd072e2 254 .macro INTR_FRAME start=1 offset=0
e8a0e276
IM
255 EMPTY_FRAME \start, SS+8+\offset-RIP
256 /*CFI_REL_OFFSET ss, SS+\offset-RIP*/
257 CFI_REL_OFFSET rsp, RSP+\offset-RIP
258 /*CFI_REL_OFFSET rflags, EFLAGS+\offset-RIP*/
259 /*CFI_REL_OFFSET cs, CS+\offset-RIP*/
260 CFI_REL_OFFSET rip, RIP+\offset-RIP
d99015b1
AH
261 .endm
262
d99015b1
AH
263/*
264 * initial frame state for exceptions with error code (and interrupts
265 * with vector already pushed)
266 */
dcd072e2 267 .macro XCPT_FRAME start=1 offset=0
e8a0e276 268 INTR_FRAME \start, RIP+\offset-ORIG_RAX
dcd072e2
AH
269 /*CFI_REL_OFFSET orig_rax, ORIG_RAX-ORIG_RAX*/
270 .endm
271
272/*
273 * frame that enables calling into C.
274 */
275 .macro PARTIAL_FRAME start=1 offset=0
e8a0e276
IM
276 XCPT_FRAME \start, ORIG_RAX+\offset-ARGOFFSET
277 CFI_REL_OFFSET rdi, RDI+\offset-ARGOFFSET
278 CFI_REL_OFFSET rsi, RSI+\offset-ARGOFFSET
279 CFI_REL_OFFSET rdx, RDX+\offset-ARGOFFSET
280 CFI_REL_OFFSET rcx, RCX+\offset-ARGOFFSET
281 CFI_REL_OFFSET rax, RAX+\offset-ARGOFFSET
282 CFI_REL_OFFSET r8, R8+\offset-ARGOFFSET
283 CFI_REL_OFFSET r9, R9+\offset-ARGOFFSET
284 CFI_REL_OFFSET r10, R10+\offset-ARGOFFSET
285 CFI_REL_OFFSET r11, R11+\offset-ARGOFFSET
dcd072e2
AH
286 .endm
287
288/*
289 * frame that enables passing a complete pt_regs to a C function.
290 */
291 .macro DEFAULT_FRAME start=1 offset=0
e8a0e276 292 PARTIAL_FRAME \start, R11+\offset-R15
dcd072e2
AH
293 CFI_REL_OFFSET rbx, RBX+\offset
294 CFI_REL_OFFSET rbp, RBP+\offset
295 CFI_REL_OFFSET r12, R12+\offset
296 CFI_REL_OFFSET r13, R13+\offset
297 CFI_REL_OFFSET r14, R14+\offset
298 CFI_REL_OFFSET r15, R15+\offset
299 .endm
d99015b1
AH
300
301/* save partial stack frame */
302ENTRY(save_args)
303 XCPT_FRAME
304 cld
14ae22ba
IM
305 movq_cfi rdi, RDI+16-ARGOFFSET
306 movq_cfi rsi, RSI+16-ARGOFFSET
307 movq_cfi rdx, RDX+16-ARGOFFSET
308 movq_cfi rcx, RCX+16-ARGOFFSET
309 movq_cfi rax, RAX+16-ARGOFFSET
310 movq_cfi r8, R8+16-ARGOFFSET
311 movq_cfi r9, R9+16-ARGOFFSET
312 movq_cfi r10, R10+16-ARGOFFSET
313 movq_cfi r11, R11+16-ARGOFFSET
314
d99015b1 315 leaq -ARGOFFSET+16(%rsp),%rdi /* arg1 for handler */
14ae22ba 316 movq_cfi rbp, 8 /* push %rbp */
d99015b1
AH
317 leaq 8(%rsp), %rbp /* mov %rsp, %ebp */
318 testl $3, CS(%rdi)
319 je 1f
320 SWAPGS
321 /*
322 * irqcount is used to check if a CPU is already on an interrupt stack
323 * or not. While this is essentially redundant with preempt_count it is
324 * a little cheaper to use a separate counter in the PDA (short of
325 * moving irq_enter into assembly, which would be too much work)
326 */
3271: incl %gs:pda_irqcount
328 jne 2f
14ae22ba 329 popq_cfi %rax /* move return address... */
d99015b1 330 mov %gs:pda_irqstackptr,%rsp
dcd072e2 331 EMPTY_FRAME 0
14ae22ba 332 pushq_cfi %rax /* ... to the new stack */
d99015b1
AH
333 /*
334 * We entered an interrupt context - irqs are off:
335 */
3362: TRACE_IRQS_OFF
337 ret
338 CFI_ENDPROC
339END(save_args)
340
c002a1e6
AH
341ENTRY(save_rest)
342 PARTIAL_FRAME 1 REST_SKIP+8
343 movq 5*8+16(%rsp), %r11 /* save return address */
344 movq_cfi rbx, RBX+16
345 movq_cfi rbp, RBP+16
346 movq_cfi r12, R12+16
347 movq_cfi r13, R13+16
348 movq_cfi r14, R14+16
349 movq_cfi r15, R15+16
350 movq %r11, 8(%rsp) /* return address */
351 FIXUP_TOP_OF_STACK %r11, 16
352 ret
353 CFI_ENDPROC
354END(save_rest)
355
1da177e4
LT
356/*
357 * A newly forked process directly context switches into this.
0bd7b798
AH
358 */
359/* rdi: prev */
1da177e4 360ENTRY(ret_from_fork)
dcd072e2 361 DEFAULT_FRAME
658fdbef 362 push kernel_eflags(%rip)
e0a5a5d9 363 CFI_ADJUST_CFA_OFFSET 8
658fdbef 364 popf # reset kernel eflags
e0a5a5d9 365 CFI_ADJUST_CFA_OFFSET -8
1da177e4
LT
366 call schedule_tail
367 GET_THREAD_INFO(%rcx)
26ccb8a7 368 testl $(_TIF_SYSCALL_TRACE|_TIF_SYSCALL_AUDIT),TI_flags(%rcx)
1da177e4 369 jnz rff_trace
0bd7b798 370rff_action:
1da177e4
LT
371 RESTORE_REST
372 testl $3,CS-ARGOFFSET(%rsp) # from kernel_thread?
373 je int_ret_from_sys_call
26ccb8a7 374 testl $_TIF_IA32,TI_flags(%rcx)
1da177e4 375 jnz int_ret_from_sys_call
c002a1e6 376 RESTORE_TOP_OF_STACK %rdi, -ARGOFFSET
1da177e4
LT
377 jmp ret_from_sys_call
378rff_trace:
379 movq %rsp,%rdi
380 call syscall_trace_leave
0bd7b798 381 GET_THREAD_INFO(%rcx)
1da177e4
LT
382 jmp rff_action
383 CFI_ENDPROC
4b787e0b 384END(ret_from_fork)
1da177e4
LT
385
386/*
387 * System call entry. Upto 6 arguments in registers are supported.
388 *
389 * SYSCALL does not save anything on the stack and does not change the
390 * stack pointer.
391 */
0bd7b798 392
1da177e4 393/*
0bd7b798 394 * Register setup:
1da177e4
LT
395 * rax system call number
396 * rdi arg0
0bd7b798 397 * rcx return address for syscall/sysret, C arg3
1da177e4 398 * rsi arg1
0bd7b798 399 * rdx arg2
1da177e4
LT
400 * r10 arg3 (--> moved to rcx for C)
401 * r8 arg4
402 * r9 arg5
403 * r11 eflags for syscall/sysret, temporary for C
0bd7b798
AH
404 * r12-r15,rbp,rbx saved by C code, not touched.
405 *
1da177e4
LT
406 * Interrupts are off on entry.
407 * Only called from user space.
408 *
409 * XXX if we had a free scratch register we could save the RSP into the stack frame
410 * and report it properly in ps. Unfortunately we haven't.
7bf36bbc
AK
411 *
412 * When user can change the frames always force IRET. That is because
413 * it deals with uncanonical addresses better. SYSRET has trouble
414 * with them due to bugs in both AMD and Intel CPUs.
0bd7b798 415 */
1da177e4
LT
416
417ENTRY(system_call)
7effaa88 418 CFI_STARTPROC simple
adf14236 419 CFI_SIGNAL_FRAME
dffead4e 420 CFI_DEF_CFA rsp,PDA_STACKOFFSET
7effaa88
JB
421 CFI_REGISTER rip,rcx
422 /*CFI_REGISTER rflags,r11*/
72fe4858
GOC
423 SWAPGS_UNSAFE_STACK
424 /*
425 * A hypervisor implementation might want to use a label
426 * after the swapgs, so that it can do the swapgs
427 * for the guest and jump here on syscall.
428 */
429ENTRY(system_call_after_swapgs)
430
0bd7b798 431 movq %rsp,%gs:pda_oldrsp
1da177e4 432 movq %gs:pda_kernelstack,%rsp
2601e64d
IM
433 /*
434 * No need to follow this irqs off/on section - it's straight
435 * and short:
436 */
72fe4858 437 ENABLE_INTERRUPTS(CLBR_NONE)
1da177e4 438 SAVE_ARGS 8,1
0bd7b798 439 movq %rax,ORIG_RAX-ARGOFFSET(%rsp)
7effaa88
JB
440 movq %rcx,RIP-ARGOFFSET(%rsp)
441 CFI_REL_OFFSET rip,RIP-ARGOFFSET
1da177e4 442 GET_THREAD_INFO(%rcx)
d4d67150 443 testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%rcx)
1da177e4 444 jnz tracesys
86a1c34a 445system_call_fastpath:
1da177e4
LT
446 cmpq $__NR_syscall_max,%rax
447 ja badsys
448 movq %r10,%rcx
449 call *sys_call_table(,%rax,8) # XXX: rip relative
450 movq %rax,RAX-ARGOFFSET(%rsp)
451/*
452 * Syscall return path ending with SYSRET (fast path)
0bd7b798
AH
453 * Has incomplete stack frame and undefined top of stack.
454 */
1da177e4 455ret_from_sys_call:
11b854b2 456 movl $_TIF_ALLWORK_MASK,%edi
1da177e4 457 /* edi: flagmask */
0bd7b798 458sysret_check:
10cd706d 459 LOCKDEP_SYS_EXIT
1da177e4 460 GET_THREAD_INFO(%rcx)
72fe4858 461 DISABLE_INTERRUPTS(CLBR_NONE)
2601e64d 462 TRACE_IRQS_OFF
26ccb8a7 463 movl TI_flags(%rcx),%edx
1da177e4 464 andl %edi,%edx
0bd7b798 465 jnz sysret_careful
bcddc015 466 CFI_REMEMBER_STATE
2601e64d
IM
467 /*
468 * sysretq will re-enable interrupts:
469 */
470 TRACE_IRQS_ON
1da177e4 471 movq RIP-ARGOFFSET(%rsp),%rcx
7effaa88 472 CFI_REGISTER rip,rcx
1da177e4 473 RESTORE_ARGS 0,-ARG_SKIP,1
7effaa88 474 /*CFI_REGISTER rflags,r11*/
c7245da6 475 movq %gs:pda_oldrsp, %rsp
2be29982 476 USERGS_SYSRET64
1da177e4 477
bcddc015 478 CFI_RESTORE_STATE
1da177e4 479 /* Handle reschedules */
0bd7b798 480 /* edx: work, edi: workmask */
1da177e4
LT
481sysret_careful:
482 bt $TIF_NEED_RESCHED,%edx
483 jnc sysret_signal
2601e64d 484 TRACE_IRQS_ON
72fe4858 485 ENABLE_INTERRUPTS(CLBR_NONE)
1da177e4 486 pushq %rdi
7effaa88 487 CFI_ADJUST_CFA_OFFSET 8
1da177e4
LT
488 call schedule
489 popq %rdi
7effaa88 490 CFI_ADJUST_CFA_OFFSET -8
1da177e4
LT
491 jmp sysret_check
492
0bd7b798 493 /* Handle a signal */
1da177e4 494sysret_signal:
2601e64d 495 TRACE_IRQS_ON
72fe4858 496 ENABLE_INTERRUPTS(CLBR_NONE)
86a1c34a
RM
497#ifdef CONFIG_AUDITSYSCALL
498 bt $TIF_SYSCALL_AUDIT,%edx
499 jc sysret_audit
500#endif
10ffdbb8 501 /* edx: work flags (arg3) */
1da177e4
LT
502 leaq do_notify_resume(%rip),%rax
503 leaq -ARGOFFSET(%rsp),%rdi # &pt_regs -> arg1
504 xorl %esi,%esi # oldset -> arg2
505 call ptregscall_common
15e8f348 506 movl $_TIF_WORK_MASK,%edi
7bf36bbc
AK
507 /* Use IRET because user could have changed frame. This
508 works because ptregscall_common has called FIXUP_TOP_OF_STACK. */
72fe4858 509 DISABLE_INTERRUPTS(CLBR_NONE)
2601e64d 510 TRACE_IRQS_OFF
7bf36bbc 511 jmp int_with_check
0bd7b798 512
7effaa88
JB
513badsys:
514 movq $-ENOSYS,RAX-ARGOFFSET(%rsp)
515 jmp ret_from_sys_call
516
86a1c34a
RM
517#ifdef CONFIG_AUDITSYSCALL
518 /*
519 * Fast path for syscall audit without full syscall trace.
520 * We just call audit_syscall_entry() directly, and then
521 * jump back to the normal fast path.
522 */
523auditsys:
524 movq %r10,%r9 /* 6th arg: 4th syscall arg */
525 movq %rdx,%r8 /* 5th arg: 3rd syscall arg */
526 movq %rsi,%rcx /* 4th arg: 2nd syscall arg */
527 movq %rdi,%rdx /* 3rd arg: 1st syscall arg */
528 movq %rax,%rsi /* 2nd arg: syscall number */
529 movl $AUDIT_ARCH_X86_64,%edi /* 1st arg: audit arch */
530 call audit_syscall_entry
531 LOAD_ARGS 0 /* reload call-clobbered registers */
532 jmp system_call_fastpath
533
534 /*
535 * Return fast path for syscall audit. Call audit_syscall_exit()
536 * directly and then jump back to the fast path with TIF_SYSCALL_AUDIT
537 * masked off.
538 */
539sysret_audit:
540 movq %rax,%rsi /* second arg, syscall return value */
541 cmpq $0,%rax /* is it < 0? */
542 setl %al /* 1 if so, 0 if not */
543 movzbl %al,%edi /* zero-extend that into %edi */
544 inc %edi /* first arg, 0->1(AUDITSC_SUCCESS), 1->2(AUDITSC_FAILURE) */
545 call audit_syscall_exit
546 movl $(_TIF_ALLWORK_MASK & ~_TIF_SYSCALL_AUDIT),%edi
547 jmp sysret_check
548#endif /* CONFIG_AUDITSYSCALL */
549
1da177e4 550 /* Do syscall tracing */
0bd7b798 551tracesys:
86a1c34a
RM
552#ifdef CONFIG_AUDITSYSCALL
553 testl $(_TIF_WORK_SYSCALL_ENTRY & ~_TIF_SYSCALL_AUDIT),TI_flags(%rcx)
554 jz auditsys
555#endif
1da177e4 556 SAVE_REST
a31f8dd7 557 movq $-ENOSYS,RAX(%rsp) /* ptrace can change this for a bad syscall */
1da177e4
LT
558 FIXUP_TOP_OF_STACK %rdi
559 movq %rsp,%rdi
560 call syscall_trace_enter
d4d67150
RM
561 /*
562 * Reload arg registers from stack in case ptrace changed them.
563 * We don't reload %rax because syscall_trace_enter() returned
564 * the value it wants us to use in the table lookup.
565 */
566 LOAD_ARGS ARGOFFSET, 1
1da177e4
LT
567 RESTORE_REST
568 cmpq $__NR_syscall_max,%rax
a31f8dd7 569 ja int_ret_from_sys_call /* RAX(%rsp) set to -ENOSYS above */
1da177e4
LT
570 movq %r10,%rcx /* fixup for C */
571 call *sys_call_table(,%rax,8)
a31f8dd7 572 movq %rax,RAX-ARGOFFSET(%rsp)
7bf36bbc 573 /* Use IRET because user could have changed frame */
0bd7b798
AH
574
575/*
1da177e4
LT
576 * Syscall return path ending with IRET.
577 * Has correct top of stack, but partial stack frame.
bcddc015
JB
578 */
579 .globl int_ret_from_sys_call
5cbf1565 580 .globl int_with_check
bcddc015 581int_ret_from_sys_call:
72fe4858 582 DISABLE_INTERRUPTS(CLBR_NONE)
2601e64d 583 TRACE_IRQS_OFF
1da177e4
LT
584 testl $3,CS-ARGOFFSET(%rsp)
585 je retint_restore_args
586 movl $_TIF_ALLWORK_MASK,%edi
587 /* edi: mask to check */
588int_with_check:
10cd706d 589 LOCKDEP_SYS_EXIT_IRQ
1da177e4 590 GET_THREAD_INFO(%rcx)
26ccb8a7 591 movl TI_flags(%rcx),%edx
1da177e4
LT
592 andl %edi,%edx
593 jnz int_careful
26ccb8a7 594 andl $~TS_COMPAT,TI_status(%rcx)
1da177e4
LT
595 jmp retint_swapgs
596
597 /* Either reschedule or signal or syscall exit tracking needed. */
598 /* First do a reschedule test. */
599 /* edx: work, edi: workmask */
600int_careful:
601 bt $TIF_NEED_RESCHED,%edx
602 jnc int_very_careful
2601e64d 603 TRACE_IRQS_ON
72fe4858 604 ENABLE_INTERRUPTS(CLBR_NONE)
1da177e4 605 pushq %rdi
7effaa88 606 CFI_ADJUST_CFA_OFFSET 8
1da177e4
LT
607 call schedule
608 popq %rdi
7effaa88 609 CFI_ADJUST_CFA_OFFSET -8
72fe4858 610 DISABLE_INTERRUPTS(CLBR_NONE)
2601e64d 611 TRACE_IRQS_OFF
1da177e4
LT
612 jmp int_with_check
613
614 /* handle signals and tracing -- both require a full stack frame */
615int_very_careful:
2601e64d 616 TRACE_IRQS_ON
72fe4858 617 ENABLE_INTERRUPTS(CLBR_NONE)
1da177e4 618 SAVE_REST
0bd7b798 619 /* Check for syscall exit trace */
d4d67150 620 testl $_TIF_WORK_SYSCALL_EXIT,%edx
1da177e4
LT
621 jz int_signal
622 pushq %rdi
7effaa88 623 CFI_ADJUST_CFA_OFFSET 8
0bd7b798 624 leaq 8(%rsp),%rdi # &ptregs -> arg1
1da177e4
LT
625 call syscall_trace_leave
626 popq %rdi
7effaa88 627 CFI_ADJUST_CFA_OFFSET -8
d4d67150 628 andl $~(_TIF_WORK_SYSCALL_EXIT|_TIF_SYSCALL_EMU),%edi
1da177e4 629 jmp int_restore_rest
0bd7b798 630
1da177e4 631int_signal:
8f4d37ec 632 testl $_TIF_DO_NOTIFY_MASK,%edx
1da177e4
LT
633 jz 1f
634 movq %rsp,%rdi # &ptregs -> arg1
635 xorl %esi,%esi # oldset -> arg2
636 call do_notify_resume
eca91e78 6371: movl $_TIF_WORK_MASK,%edi
1da177e4
LT
638int_restore_rest:
639 RESTORE_REST
72fe4858 640 DISABLE_INTERRUPTS(CLBR_NONE)
2601e64d 641 TRACE_IRQS_OFF
1da177e4
LT
642 jmp int_with_check
643 CFI_ENDPROC
bcddc015 644END(system_call)
0bd7b798
AH
645
646/*
1da177e4 647 * Certain special system calls that need to save a complete full stack frame.
0bd7b798 648 */
1da177e4 649 .macro PTREGSCALL label,func,arg
c002a1e6
AH
650ENTRY(\label)
651 PARTIAL_FRAME 1 8 /* offset 8: return address */
652 subq $REST_SKIP, %rsp
653 CFI_ADJUST_CFA_OFFSET REST_SKIP
654 call save_rest
655 DEFAULT_FRAME 0 8 /* offset 8: return address */
656 leaq 8(%rsp), \arg /* pt_regs pointer */
657 call \func
658 jmp ptregscall_common
659 CFI_ENDPROC
4b787e0b 660END(\label)
1da177e4
LT
661 .endm
662
663 PTREGSCALL stub_clone, sys_clone, %r8
664 PTREGSCALL stub_fork, sys_fork, %rdi
665 PTREGSCALL stub_vfork, sys_vfork, %rdi
1da177e4
LT
666 PTREGSCALL stub_sigaltstack, sys_sigaltstack, %rdx
667 PTREGSCALL stub_iopl, sys_iopl, %rsi
668
669ENTRY(ptregscall_common)
c002a1e6
AH
670 DEFAULT_FRAME 1 8 /* offset 8: return address */
671 RESTORE_TOP_OF_STACK %r11, 8
672 movq_cfi_restore R15+8, r15
673 movq_cfi_restore R14+8, r14
674 movq_cfi_restore R13+8, r13
675 movq_cfi_restore R12+8, r12
676 movq_cfi_restore RBP+8, rbp
677 movq_cfi_restore RBX+8, rbx
678 ret $REST_SKIP /* pop extended registers */
1da177e4 679 CFI_ENDPROC
4b787e0b 680END(ptregscall_common)
0bd7b798 681
1da177e4
LT
682ENTRY(stub_execve)
683 CFI_STARTPROC
684 popq %r11
7effaa88
JB
685 CFI_ADJUST_CFA_OFFSET -8
686 CFI_REGISTER rip, r11
1da177e4 687 SAVE_REST
1da177e4 688 FIXUP_TOP_OF_STACK %r11
5d119b2c 689 movq %rsp, %rcx
1da177e4 690 call sys_execve
1da177e4 691 RESTORE_TOP_OF_STACK %r11
1da177e4
LT
692 movq %rax,RAX(%rsp)
693 RESTORE_REST
694 jmp int_ret_from_sys_call
695 CFI_ENDPROC
4b787e0b 696END(stub_execve)
0bd7b798 697
1da177e4
LT
698/*
699 * sigreturn is special because it needs to restore all registers on return.
700 * This cannot be done with SYSRET, so use the IRET return path instead.
0bd7b798 701 */
1da177e4
LT
702ENTRY(stub_rt_sigreturn)
703 CFI_STARTPROC
7effaa88
JB
704 addq $8, %rsp
705 CFI_ADJUST_CFA_OFFSET -8
1da177e4
LT
706 SAVE_REST
707 movq %rsp,%rdi
708 FIXUP_TOP_OF_STACK %r11
709 call sys_rt_sigreturn
710 movq %rax,RAX(%rsp) # fixme, this could be done at the higher layer
711 RESTORE_REST
712 jmp int_ret_from_sys_call
713 CFI_ENDPROC
4b787e0b 714END(stub_rt_sigreturn)
1da177e4 715
939b7871
PA
716/*
717 * Build the entry stubs and pointer table with some assembler magic.
718 * We pack 7 stubs into a single 32-byte chunk, which will fit in a
719 * single cache line on all modern x86 implementations.
720 */
721 .section .init.rodata,"a"
722ENTRY(interrupt)
723 .text
724 .p2align 5
725 .p2align CONFIG_X86_L1_CACHE_SHIFT
726ENTRY(irq_entries_start)
727 INTR_FRAME
728vector=FIRST_EXTERNAL_VECTOR
729.rept (NR_VECTORS-FIRST_EXTERNAL_VECTOR+6)/7
730 .balign 32
731 .rept 7
732 .if vector < NR_VECTORS
8665596e 733 .if vector <> FIRST_EXTERNAL_VECTOR
939b7871
PA
734 CFI_ADJUST_CFA_OFFSET -8
735 .endif
7361: pushq $(~vector+0x80) /* Note: always in signed byte range */
737 CFI_ADJUST_CFA_OFFSET 8
8665596e 738 .if ((vector-FIRST_EXTERNAL_VECTOR)%7) <> 6
939b7871
PA
739 jmp 2f
740 .endif
741 .previous
742 .quad 1b
743 .text
744vector=vector+1
745 .endif
746 .endr
7472: jmp common_interrupt
748.endr
749 CFI_ENDPROC
750END(irq_entries_start)
751
752.previous
753END(interrupt)
754.previous
755
d99015b1 756/*
1da177e4
LT
757 * Interrupt entry/exit.
758 *
759 * Interrupt entry points save only callee clobbered registers in fast path.
d99015b1
AH
760 *
761 * Entry runs with interrupts off.
762 */
1da177e4 763
722024db 764/* 0(%rsp): ~(interrupt number) */
1da177e4 765 .macro interrupt func
d99015b1
AH
766 subq $10*8, %rsp
767 CFI_ADJUST_CFA_OFFSET 10*8
768 call save_args
dcd072e2 769 PARTIAL_FRAME 0
1da177e4
LT
770 call \func
771 .endm
772
722024db
AH
773 /*
774 * The interrupt stubs push (~vector+0x80) onto the stack and
775 * then jump to common_interrupt.
776 */
939b7871
PA
777 .p2align CONFIG_X86_L1_CACHE_SHIFT
778common_interrupt:
7effaa88 779 XCPT_FRAME
722024db 780 addq $-0x80,(%rsp) /* Adjust vector to [-256,-1] range */
1da177e4
LT
781 interrupt do_IRQ
782 /* 0(%rsp): oldrsp-ARGOFFSET */
7effaa88 783ret_from_intr:
72fe4858 784 DISABLE_INTERRUPTS(CLBR_NONE)
2601e64d 785 TRACE_IRQS_OFF
3829ee6b 786 decl %gs:pda_irqcount
1de9c3f6 787 leaveq
7effaa88 788 CFI_DEF_CFA_REGISTER rsp
1de9c3f6 789 CFI_ADJUST_CFA_OFFSET -8
7effaa88 790exit_intr:
1da177e4
LT
791 GET_THREAD_INFO(%rcx)
792 testl $3,CS-ARGOFFSET(%rsp)
793 je retint_kernel
0bd7b798 794
1da177e4
LT
795 /* Interrupt came from user space */
796 /*
797 * Has a correct top of stack, but a partial stack frame
798 * %rcx: thread info. Interrupts off.
0bd7b798 799 */
1da177e4
LT
800retint_with_reschedule:
801 movl $_TIF_WORK_MASK,%edi
7effaa88 802retint_check:
10cd706d 803 LOCKDEP_SYS_EXIT_IRQ
26ccb8a7 804 movl TI_flags(%rcx),%edx
1da177e4 805 andl %edi,%edx
7effaa88 806 CFI_REMEMBER_STATE
1da177e4 807 jnz retint_careful
10cd706d
PZ
808
809retint_swapgs: /* return to user-space */
2601e64d
IM
810 /*
811 * The iretq could re-enable interrupts:
812 */
72fe4858 813 DISABLE_INTERRUPTS(CLBR_ANY)
2601e64d 814 TRACE_IRQS_IRETQ
72fe4858 815 SWAPGS
2601e64d
IM
816 jmp restore_args
817
10cd706d 818retint_restore_args: /* return to kernel space */
72fe4858 819 DISABLE_INTERRUPTS(CLBR_ANY)
2601e64d
IM
820 /*
821 * The iretq could re-enable interrupts:
822 */
823 TRACE_IRQS_IRETQ
824restore_args:
3701d863
IM
825 RESTORE_ARGS 0,8,0
826
f7f3d791 827irq_return:
72fe4858 828 INTERRUPT_RETURN
3701d863
IM
829
830 .section __ex_table, "a"
831 .quad irq_return, bad_iret
832 .previous
833
834#ifdef CONFIG_PARAVIRT
72fe4858 835ENTRY(native_iret)
1da177e4
LT
836 iretq
837
838 .section __ex_table,"a"
72fe4858 839 .quad native_iret, bad_iret
1da177e4 840 .previous
3701d863
IM
841#endif
842
1da177e4 843 .section .fixup,"ax"
1da177e4 844bad_iret:
3aa4b37d
RM
845 /*
846 * The iret traps when the %cs or %ss being restored is bogus.
847 * We've lost the original trap vector and error code.
848 * #GPF is the most likely one to get for an invalid selector.
849 * So pretend we completed the iret and took the #GPF in user mode.
850 *
851 * We are now running with the kernel GS after exception recovery.
852 * But error_entry expects us to have user GS to match the user %cs,
853 * so swap back.
854 */
855 pushq $0
856
857 SWAPGS
858 jmp general_protection
859
72fe4858
GOC
860 .previous
861
7effaa88 862 /* edi: workmask, edx: work */
1da177e4 863retint_careful:
7effaa88 864 CFI_RESTORE_STATE
1da177e4
LT
865 bt $TIF_NEED_RESCHED,%edx
866 jnc retint_signal
2601e64d 867 TRACE_IRQS_ON
72fe4858 868 ENABLE_INTERRUPTS(CLBR_NONE)
1da177e4 869 pushq %rdi
7effaa88 870 CFI_ADJUST_CFA_OFFSET 8
1da177e4 871 call schedule
0bd7b798 872 popq %rdi
7effaa88 873 CFI_ADJUST_CFA_OFFSET -8
1da177e4 874 GET_THREAD_INFO(%rcx)
72fe4858 875 DISABLE_INTERRUPTS(CLBR_NONE)
2601e64d 876 TRACE_IRQS_OFF
1da177e4 877 jmp retint_check
0bd7b798 878
1da177e4 879retint_signal:
8f4d37ec 880 testl $_TIF_DO_NOTIFY_MASK,%edx
10ffdbb8 881 jz retint_swapgs
2601e64d 882 TRACE_IRQS_ON
72fe4858 883 ENABLE_INTERRUPTS(CLBR_NONE)
1da177e4 884 SAVE_REST
0bd7b798 885 movq $-1,ORIG_RAX(%rsp)
3829ee6b 886 xorl %esi,%esi # oldset
1da177e4
LT
887 movq %rsp,%rdi # &pt_regs
888 call do_notify_resume
889 RESTORE_REST
72fe4858 890 DISABLE_INTERRUPTS(CLBR_NONE)
2601e64d 891 TRACE_IRQS_OFF
be9e6870 892 GET_THREAD_INFO(%rcx)
eca91e78 893 jmp retint_with_reschedule
1da177e4
LT
894
895#ifdef CONFIG_PREEMPT
896 /* Returning to kernel space. Check if we need preemption */
897 /* rcx: threadinfo. interrupts off. */
b06babac 898ENTRY(retint_kernel)
26ccb8a7 899 cmpl $0,TI_preempt_count(%rcx)
1da177e4 900 jnz retint_restore_args
26ccb8a7 901 bt $TIF_NEED_RESCHED,TI_flags(%rcx)
1da177e4
LT
902 jnc retint_restore_args
903 bt $9,EFLAGS-ARGOFFSET(%rsp) /* interrupts off? */
904 jnc retint_restore_args
905 call preempt_schedule_irq
906 jmp exit_intr
0bd7b798 907#endif
4b787e0b 908
1da177e4 909 CFI_ENDPROC
4b787e0b 910END(common_interrupt)
0bd7b798 911
1da177e4
LT
912/*
913 * APIC interrupts.
0bd7b798 914 */
d99015b1
AH
915 .p2align 5
916
1da177e4 917 .macro apicinterrupt num,func
7effaa88 918 INTR_FRAME
19eadf98 919 pushq $~(\num)
7effaa88 920 CFI_ADJUST_CFA_OFFSET 8
1da177e4
LT
921 interrupt \func
922 jmp ret_from_intr
923 CFI_ENDPROC
924 .endm
925
926ENTRY(thermal_interrupt)
927 apicinterrupt THERMAL_APIC_VECTOR,smp_thermal_interrupt
4b787e0b 928END(thermal_interrupt)
1da177e4 929
89b831ef
JS
930ENTRY(threshold_interrupt)
931 apicinterrupt THRESHOLD_APIC_VECTOR,mce_threshold_interrupt
4b787e0b 932END(threshold_interrupt)
89b831ef 933
0bd7b798 934#ifdef CONFIG_SMP
1da177e4
LT
935ENTRY(reschedule_interrupt)
936 apicinterrupt RESCHEDULE_VECTOR,smp_reschedule_interrupt
4b787e0b 937END(reschedule_interrupt)
1da177e4 938
e5bc8b6b
AK
939 .macro INVALIDATE_ENTRY num
940ENTRY(invalidate_interrupt\num)
0bd7b798 941 apicinterrupt INVALIDATE_TLB_VECTOR_START+\num,smp_invalidate_interrupt
4b787e0b 942END(invalidate_interrupt\num)
e5bc8b6b
AK
943 .endm
944
945 INVALIDATE_ENTRY 0
946 INVALIDATE_ENTRY 1
947 INVALIDATE_ENTRY 2
948 INVALIDATE_ENTRY 3
949 INVALIDATE_ENTRY 4
950 INVALIDATE_ENTRY 5
951 INVALIDATE_ENTRY 6
952 INVALIDATE_ENTRY 7
1da177e4
LT
953
954ENTRY(call_function_interrupt)
955 apicinterrupt CALL_FUNCTION_VECTOR,smp_call_function_interrupt
4b787e0b 956END(call_function_interrupt)
3b16cf87
JA
957ENTRY(call_function_single_interrupt)
958 apicinterrupt CALL_FUNCTION_SINGLE_VECTOR,smp_call_function_single_interrupt
959END(call_function_single_interrupt)
61014292
EB
960ENTRY(irq_move_cleanup_interrupt)
961 apicinterrupt IRQ_MOVE_CLEANUP_VECTOR,smp_irq_move_cleanup_interrupt
962END(irq_move_cleanup_interrupt)
1da177e4
LT
963#endif
964
1da177e4
LT
965ENTRY(apic_timer_interrupt)
966 apicinterrupt LOCAL_TIMER_VECTOR,smp_apic_timer_interrupt
4b787e0b 967END(apic_timer_interrupt)
1da177e4 968
1812924b
CW
969ENTRY(uv_bau_message_intr1)
970 apicinterrupt 220,uv_bau_message_interrupt
971END(uv_bau_message_intr1)
972
1da177e4
LT
973ENTRY(error_interrupt)
974 apicinterrupt ERROR_APIC_VECTOR,smp_error_interrupt
4b787e0b 975END(error_interrupt)
1da177e4
LT
976
977ENTRY(spurious_interrupt)
978 apicinterrupt SPURIOUS_APIC_VECTOR,smp_spurious_interrupt
4b787e0b 979END(spurious_interrupt)
0bd7b798 980
1da177e4
LT
981/*
982 * Exception entry points.
0bd7b798 983 */
1da177e4 984 .macro zeroentry sym
7effaa88 985 INTR_FRAME
fab58420 986 PARAVIRT_ADJUST_EXCEPTION_FRAME
14ae22ba 987 pushq_cfi $-1 /* ORIG_RAX: no syscall to restart */
d99015b1
AH
988 subq $15*8,%rsp
989 CFI_ADJUST_CFA_OFFSET 15*8
990 call error_entry
dcd072e2 991 DEFAULT_FRAME 0
d99015b1
AH
992 movq %rsp,%rdi /* pt_regs pointer */
993 xorl %esi,%esi /* no error code */
994 call \sym
995 jmp error_exit /* %ebx: no swapgs flag */
7effaa88 996 CFI_ENDPROC
0bd7b798 997 .endm
1da177e4
LT
998
999 .macro errorentry sym
7effaa88 1000 XCPT_FRAME
fab58420 1001 PARAVIRT_ADJUST_EXCEPTION_FRAME
d99015b1
AH
1002 subq $15*8,%rsp
1003 CFI_ADJUST_CFA_OFFSET 15*8
1004 call error_entry
dcd072e2 1005 DEFAULT_FRAME 0
d99015b1
AH
1006 movq %rsp,%rdi /* pt_regs pointer */
1007 movq ORIG_RAX(%rsp),%rsi /* get error code */
1008 movq $-1,ORIG_RAX(%rsp) /* no syscall to restart */
1009 call \sym
1010 jmp error_exit /* %ebx: no swapgs flag */
7effaa88 1011 CFI_ENDPROC
1da177e4
LT
1012 .endm
1013
1014 /* error code is on the stack already */
1015 /* handle NMI like exceptions that can happen everywhere */
2601e64d 1016 .macro paranoidentry sym, ist=0, irqtrace=1
1da177e4
LT
1017 SAVE_ALL
1018 cld
1019 movl $1,%ebx
1020 movl $MSR_GS_BASE,%ecx
1021 rdmsr
1022 testl %edx,%edx
1023 js 1f
72fe4858 1024 SWAPGS
1da177e4 1025 xorl %ebx,%ebx
b556b35e
JB
10261:
1027 .if \ist
1028 movq %gs:pda_data_offset, %rbp
1029 .endif
7e61a793
AH
1030 .if \irqtrace
1031 TRACE_IRQS_OFF
1032 .endif
b556b35e 1033 movq %rsp,%rdi
1da177e4
LT
1034 movq ORIG_RAX(%rsp),%rsi
1035 movq $-1,ORIG_RAX(%rsp)
b556b35e 1036 .if \ist
5f8efbb9 1037 subq $EXCEPTION_STKSZ, per_cpu__init_tss + TSS_ist + (\ist - 1) * 8(%rbp)
b556b35e 1038 .endif
1da177e4 1039 call \sym
b556b35e 1040 .if \ist
5f8efbb9 1041 addq $EXCEPTION_STKSZ, per_cpu__init_tss + TSS_ist + (\ist - 1) * 8(%rbp)
b556b35e 1042 .endif
72fe4858 1043 DISABLE_INTERRUPTS(CLBR_NONE)
2601e64d
IM
1044 .if \irqtrace
1045 TRACE_IRQS_OFF
1046 .endif
1da177e4 1047 .endm
2601e64d
IM
1048
1049 /*
1050 * "Paranoid" exit path from exception stack.
1051 * Paranoid because this is used by NMIs and cannot take
1052 * any kernel state for granted.
1053 * We don't do kernel preemption checks here, because only
1054 * NMI should be common and it does not enable IRQs and
1055 * cannot get reschedule ticks.
1056 *
1057 * "trace" is 0 for the NMI handler only, because irq-tracing
1058 * is fundamentally NMI-unsafe. (we cannot change the soft and
1059 * hard flags at once, atomically)
1060 */
1061 .macro paranoidexit trace=1
1062 /* ebx: no swapgs flag */
1063paranoid_exit\trace:
1064 testl %ebx,%ebx /* swapgs needed? */
1065 jnz paranoid_restore\trace
1066 testl $3,CS(%rsp)
1067 jnz paranoid_userspace\trace
1068paranoid_swapgs\trace:
7a0a2dff 1069 .if \trace
2601e64d 1070 TRACE_IRQS_IRETQ 0
7a0a2dff 1071 .endif
72fe4858 1072 SWAPGS_UNSAFE_STACK
2601e64d
IM
1073paranoid_restore\trace:
1074 RESTORE_ALL 8
3701d863 1075 jmp irq_return
2601e64d
IM
1076paranoid_userspace\trace:
1077 GET_THREAD_INFO(%rcx)
26ccb8a7 1078 movl TI_flags(%rcx),%ebx
2601e64d
IM
1079 andl $_TIF_WORK_MASK,%ebx
1080 jz paranoid_swapgs\trace
1081 movq %rsp,%rdi /* &pt_regs */
1082 call sync_regs
1083 movq %rax,%rsp /* switch stack for scheduling */
1084 testl $_TIF_NEED_RESCHED,%ebx
1085 jnz paranoid_schedule\trace
1086 movl %ebx,%edx /* arg3: thread flags */
1087 .if \trace
1088 TRACE_IRQS_ON
1089 .endif
72fe4858 1090 ENABLE_INTERRUPTS(CLBR_NONE)
2601e64d
IM
1091 xorl %esi,%esi /* arg2: oldset */
1092 movq %rsp,%rdi /* arg1: &pt_regs */
1093 call do_notify_resume
72fe4858 1094 DISABLE_INTERRUPTS(CLBR_NONE)
2601e64d
IM
1095 .if \trace
1096 TRACE_IRQS_OFF
1097 .endif
1098 jmp paranoid_userspace\trace
1099paranoid_schedule\trace:
1100 .if \trace
1101 TRACE_IRQS_ON
1102 .endif
72fe4858 1103 ENABLE_INTERRUPTS(CLBR_ANY)
2601e64d 1104 call schedule
72fe4858 1105 DISABLE_INTERRUPTS(CLBR_ANY)
2601e64d
IM
1106 .if \trace
1107 TRACE_IRQS_OFF
1108 .endif
1109 jmp paranoid_userspace\trace
1110 CFI_ENDPROC
1111 .endm
1112
1da177e4 1113/*
d99015b1
AH
1114 * Exception entry point. This expects an error code/orig_rax on the stack.
1115 * returns in "no swapgs flag" in %ebx.
0bd7b798 1116 */
d28c4393 1117KPROBE_ENTRY(error_entry)
dcd072e2 1118 XCPT_FRAME
d99015b1
AH
1119 CFI_ADJUST_CFA_OFFSET 15*8
1120 /* oldrax contains error code */
0bd7b798 1121 cld
14ae22ba
IM
1122 movq_cfi rdi, RDI+8
1123 movq_cfi rsi, RSI+8
1124 movq_cfi rdx, RDX+8
1125 movq_cfi rcx, RCX+8
1126 movq_cfi rax, RAX+8
1127 movq_cfi r8, R8+8
1128 movq_cfi r9, R9+8
1129 movq_cfi r10, R10+8
1130 movq_cfi r11, R11+8
1131 movq_cfi rbx, RBX+8
1132 movq_cfi rbp, RBP+8
1133 movq_cfi r12, R12+8
1134 movq_cfi r13, R13+8
1135 movq_cfi r14, R14+8
1136 movq_cfi r15, R15+8
0bd7b798 1137 xorl %ebx,%ebx
d99015b1
AH
1138 testl $3,CS+8(%rsp)
1139 je error_kernelspace
0bd7b798 1140error_swapgs:
72fe4858 1141 SWAPGS
6b11d4ef
AH
1142error_sti:
1143 TRACE_IRQS_OFF
d99015b1
AH
1144 ret
1145 CFI_ENDPROC
1146
1147/*
1148 * There are two places in the kernel that can potentially fault with
1149 * usergs. Handle them here. The exception handlers after iret run with
1150 * kernel gs again, so don't set the user space flag. B stepping K8s
1151 * sometimes report an truncated RIP for IRET exceptions returning to
1152 * compat mode. Check for these here too.
1153 */
1154error_kernelspace:
1155 incl %ebx
1156 leaq irq_return(%rip),%rcx
1157 cmpq %rcx,RIP+8(%rsp)
1158 je error_swapgs
1159 movl %ecx,%ecx /* zero extend */
1160 cmpq %rcx,RIP+8(%rsp)
1161 je error_swapgs
1162 cmpq $gs_change,RIP+8(%rsp)
1163 je error_swapgs
1164 jmp error_sti
1165KPROBE_END(error_entry)
1166
1167
1168/* ebx: no swapgs flag (1: don't need swapgs, 0: need it) */
1169KPROBE_ENTRY(error_exit)
dcd072e2 1170 DEFAULT_FRAME
10cd706d 1171 movl %ebx,%eax
1da177e4 1172 RESTORE_REST
72fe4858 1173 DISABLE_INTERRUPTS(CLBR_NONE)
2601e64d 1174 TRACE_IRQS_OFF
0bd7b798 1175 GET_THREAD_INFO(%rcx)
1da177e4 1176 testl %eax,%eax
d99015b1 1177 jne retint_kernel
10cd706d 1178 LOCKDEP_SYS_EXIT_IRQ
d99015b1
AH
1179 movl TI_flags(%rcx),%edx
1180 movl $_TIF_WORK_MASK,%edi
1181 andl %edi,%edx
1182 jnz retint_careful
10cd706d 1183 jmp retint_swapgs
1da177e4 1184 CFI_ENDPROC
d99015b1 1185KPROBE_END(error_exit)
0bd7b798 1186
1da177e4 1187 /* Reload gs selector with exception handling */
0bd7b798 1188 /* edi: new selector */
9f9d489a 1189ENTRY(native_load_gs_index)
7effaa88 1190 CFI_STARTPROC
1da177e4 1191 pushf
7effaa88 1192 CFI_ADJUST_CFA_OFFSET 8
72fe4858
GOC
1193 DISABLE_INTERRUPTS(CLBR_ANY | ~(CLBR_RDI))
1194 SWAPGS
0bd7b798
AH
1195gs_change:
1196 movl %edi,%gs
1da177e4 11972: mfence /* workaround */
72fe4858 1198 SWAPGS
1da177e4 1199 popf
7effaa88 1200 CFI_ADJUST_CFA_OFFSET -8
1da177e4 1201 ret
7effaa88 1202 CFI_ENDPROC
9f9d489a 1203ENDPROC(native_load_gs_index)
0bd7b798 1204
1da177e4
LT
1205 .section __ex_table,"a"
1206 .align 8
1207 .quad gs_change,bad_gs
1208 .previous
1209 .section .fixup,"ax"
1210 /* running with kernelgs */
0bd7b798 1211bad_gs:
72fe4858 1212 SWAPGS /* switch back to user gs */
1da177e4
LT
1213 xorl %eax,%eax
1214 movl %eax,%gs
1215 jmp 2b
0bd7b798
AH
1216 .previous
1217
1da177e4
LT
1218/*
1219 * Create a kernel thread.
1220 *
1221 * C extern interface:
1222 * extern long kernel_thread(int (*fn)(void *), void * arg, unsigned long flags)
1223 *
1224 * asm input arguments:
1225 * rdi: fn, rsi: arg, rdx: flags
1226 */
1227ENTRY(kernel_thread)
1228 CFI_STARTPROC
1229 FAKE_STACK_FRAME $child_rip
1230 SAVE_ALL
1231
1232 # rdi: flags, rsi: usp, rdx: will be &pt_regs
1233 movq %rdx,%rdi
1234 orq kernel_thread_flags(%rip),%rdi
1235 movq $-1, %rsi
1236 movq %rsp, %rdx
1237
1238 xorl %r8d,%r8d
1239 xorl %r9d,%r9d
0bd7b798 1240
1da177e4
LT
1241 # clone now
1242 call do_fork
1243 movq %rax,RAX(%rsp)
1244 xorl %edi,%edi
1245
1246 /*
1247 * It isn't worth to check for reschedule here,
1248 * so internally to the x86_64 port you can rely on kernel_thread()
1249 * not to reschedule the child before returning, this avoids the need
1250 * of hacks for example to fork off the per-CPU idle tasks.
0bd7b798 1251 * [Hopefully no generic code relies on the reschedule -AK]
1da177e4
LT
1252 */
1253 RESTORE_ALL
1254 UNFAKE_STACK_FRAME
1255 ret
1256 CFI_ENDPROC
4b787e0b 1257ENDPROC(kernel_thread)
0bd7b798 1258
1da177e4 1259child_rip:
c05991ed
AK
1260 pushq $0 # fake return address
1261 CFI_STARTPROC
1da177e4
LT
1262 /*
1263 * Here we are in the child and the registers are set as they were
1264 * at kernel_thread() invocation in the parent.
1265 */
1266 movq %rdi, %rax
1267 movq %rsi, %rdi
1268 call *%rax
1269 # exit
1c5b5cfd 1270 mov %eax, %edi
1da177e4 1271 call do_exit
c05991ed 1272 CFI_ENDPROC
4b787e0b 1273ENDPROC(child_rip)
1da177e4
LT
1274
1275/*
1276 * execve(). This function needs to use IRET, not SYSRET, to set up all state properly.
1277 *
1278 * C extern interface:
1279 * extern long execve(char *name, char **argv, char **envp)
1280 *
1281 * asm input arguments:
1282 * rdi: name, rsi: argv, rdx: envp
1283 *
1284 * We want to fallback into:
5d119b2c 1285 * extern long sys_execve(char *name, char **argv,char **envp, struct pt_regs *regs)
1da177e4
LT
1286 *
1287 * do_sys_execve asm fallback arguments:
5d119b2c 1288 * rdi: name, rsi: argv, rdx: envp, rcx: fake frame on the stack
1da177e4 1289 */
3db03b4a 1290ENTRY(kernel_execve)
1da177e4
LT
1291 CFI_STARTPROC
1292 FAKE_STACK_FRAME $0
0bd7b798 1293 SAVE_ALL
5d119b2c 1294 movq %rsp,%rcx
1da177e4 1295 call sys_execve
0bd7b798 1296 movq %rax, RAX(%rsp)
1da177e4
LT
1297 RESTORE_REST
1298 testq %rax,%rax
1299 je int_ret_from_sys_call
1300 RESTORE_ARGS
1301 UNFAKE_STACK_FRAME
1302 ret
1303 CFI_ENDPROC
3db03b4a 1304ENDPROC(kernel_execve)
1da177e4 1305
0f2fbdcb 1306KPROBE_ENTRY(page_fault)
1da177e4 1307 errorentry do_page_fault
d28c4393 1308KPROBE_END(page_fault)
1da177e4
LT
1309
1310ENTRY(coprocessor_error)
1311 zeroentry do_coprocessor_error
4b787e0b 1312END(coprocessor_error)
1da177e4
LT
1313
1314ENTRY(simd_coprocessor_error)
0bd7b798 1315 zeroentry do_simd_coprocessor_error
4b787e0b 1316END(simd_coprocessor_error)
1da177e4
LT
1317
1318ENTRY(device_not_available)
e407d620 1319 zeroentry do_device_not_available
4b787e0b 1320END(device_not_available)
1da177e4
LT
1321
1322 /* runs on exception stack */
0f2fbdcb 1323KPROBE_ENTRY(debug)
7effaa88 1324 INTR_FRAME
09402947 1325 PARAVIRT_ADJUST_EXCEPTION_FRAME
1da177e4 1326 pushq $0
0bd7b798 1327 CFI_ADJUST_CFA_OFFSET 8
5f8efbb9 1328 paranoidentry do_debug, DEBUG_STACK
2601e64d 1329 paranoidexit
d28c4393 1330KPROBE_END(debug)
1da177e4 1331
0bd7b798 1332 /* runs on exception stack */
eddb6fb9 1333KPROBE_ENTRY(nmi)
7effaa88 1334 INTR_FRAME
09402947 1335 PARAVIRT_ADJUST_EXCEPTION_FRAME
1da177e4 1336 pushq $-1
7effaa88 1337 CFI_ADJUST_CFA_OFFSET 8
2601e64d
IM
1338 paranoidentry do_nmi, 0, 0
1339#ifdef CONFIG_TRACE_IRQFLAGS
1340 paranoidexit 0
1341#else
1342 jmp paranoid_exit1
1343 CFI_ENDPROC
1344#endif
d28c4393 1345KPROBE_END(nmi)
6fefb0d1 1346
0f2fbdcb 1347KPROBE_ENTRY(int3)
b556b35e 1348 INTR_FRAME
09402947 1349 PARAVIRT_ADJUST_EXCEPTION_FRAME
b556b35e
JB
1350 pushq $0
1351 CFI_ADJUST_CFA_OFFSET 8
5f8efbb9 1352 paranoidentry do_int3, DEBUG_STACK
2601e64d 1353 jmp paranoid_exit1
b556b35e 1354 CFI_ENDPROC
d28c4393 1355KPROBE_END(int3)
1da177e4
LT
1356
1357ENTRY(overflow)
1358 zeroentry do_overflow
4b787e0b 1359END(overflow)
1da177e4
LT
1360
1361ENTRY(bounds)
1362 zeroentry do_bounds
4b787e0b 1363END(bounds)
1da177e4
LT
1364
1365ENTRY(invalid_op)
0bd7b798 1366 zeroentry do_invalid_op
4b787e0b 1367END(invalid_op)
1da177e4
LT
1368
1369ENTRY(coprocessor_segment_overrun)
1370 zeroentry do_coprocessor_segment_overrun
4b787e0b 1371END(coprocessor_segment_overrun)
1da177e4 1372
1da177e4
LT
1373 /* runs on exception stack */
1374ENTRY(double_fault)
7effaa88 1375 XCPT_FRAME
09402947 1376 PARAVIRT_ADJUST_EXCEPTION_FRAME
1da177e4 1377 paranoidentry do_double_fault
2601e64d 1378 jmp paranoid_exit1
1da177e4 1379 CFI_ENDPROC
4b787e0b 1380END(double_fault)
1da177e4
LT
1381
1382ENTRY(invalid_TSS)
1383 errorentry do_invalid_TSS
4b787e0b 1384END(invalid_TSS)
1da177e4
LT
1385
1386ENTRY(segment_not_present)
1387 errorentry do_segment_not_present
4b787e0b 1388END(segment_not_present)
1da177e4
LT
1389
1390 /* runs on exception stack */
1391ENTRY(stack_segment)
7effaa88 1392 XCPT_FRAME
09402947 1393 PARAVIRT_ADJUST_EXCEPTION_FRAME
1da177e4 1394 paranoidentry do_stack_segment
2601e64d 1395 jmp paranoid_exit1
1da177e4 1396 CFI_ENDPROC
4b787e0b 1397END(stack_segment)
1da177e4 1398
0f2fbdcb 1399KPROBE_ENTRY(general_protection)
1da177e4 1400 errorentry do_general_protection
d28c4393 1401KPROBE_END(general_protection)
1da177e4
LT
1402
1403ENTRY(alignment_check)
1404 errorentry do_alignment_check
4b787e0b 1405END(alignment_check)
1da177e4
LT
1406
1407ENTRY(divide_error)
1408 zeroentry do_divide_error
4b787e0b 1409END(divide_error)
1da177e4
LT
1410
1411ENTRY(spurious_interrupt_bug)
1412 zeroentry do_spurious_interrupt_bug
4b787e0b 1413END(spurious_interrupt_bug)
1da177e4
LT
1414
1415#ifdef CONFIG_X86_MCE
1416 /* runs on exception stack */
1417ENTRY(machine_check)
7effaa88 1418 INTR_FRAME
09402947 1419 PARAVIRT_ADJUST_EXCEPTION_FRAME
1da177e4 1420 pushq $0
0bd7b798 1421 CFI_ADJUST_CFA_OFFSET 8
1da177e4 1422 paranoidentry do_machine_check
2601e64d 1423 jmp paranoid_exit1
1da177e4 1424 CFI_ENDPROC
4b787e0b 1425END(machine_check)
1da177e4
LT
1426#endif
1427
2699500b 1428/* Call softirq on interrupt stack. Interrupts are off. */
ed6b676c 1429ENTRY(call_softirq)
7effaa88 1430 CFI_STARTPROC
2699500b
AK
1431 push %rbp
1432 CFI_ADJUST_CFA_OFFSET 8
1433 CFI_REL_OFFSET rbp,0
1434 mov %rsp,%rbp
1435 CFI_DEF_CFA_REGISTER rbp
ed6b676c 1436 incl %gs:pda_irqcount
2699500b
AK
1437 cmove %gs:pda_irqstackptr,%rsp
1438 push %rbp # backlink for old unwinder
ed6b676c 1439 call __do_softirq
2699500b 1440 leaveq
7effaa88 1441 CFI_DEF_CFA_REGISTER rsp
2699500b 1442 CFI_ADJUST_CFA_OFFSET -8
ed6b676c 1443 decl %gs:pda_irqcount
ed6b676c 1444 ret
7effaa88 1445 CFI_ENDPROC
4b787e0b 1446ENDPROC(call_softirq)
75154f40
AK
1447
1448KPROBE_ENTRY(ignore_sysret)
1449 CFI_STARTPROC
1450 mov $-ENOSYS,%eax
1451 sysret
1452 CFI_ENDPROC
1453ENDPROC(ignore_sysret)
3d75e1b8
JF
1454
1455#ifdef CONFIG_XEN
1456ENTRY(xen_hypervisor_callback)
1457 zeroentry xen_do_hypervisor_callback
1458END(xen_hypervisor_callback)
1459
1460/*
1461# A note on the "critical region" in our callback handler.
1462# We want to avoid stacking callback handlers due to events occurring
1463# during handling of the last event. To do this, we keep events disabled
1464# until we've done all processing. HOWEVER, we must enable events before
1465# popping the stack frame (can't be done atomically) and so it would still
1466# be possible to get enough handler activations to overflow the stack.
1467# Although unlikely, bugs of that kind are hard to track down, so we'd
1468# like to avoid the possibility.
1469# So, on entry to the handler we detect whether we interrupted an
1470# existing activation in its critical region -- if so, we pop the current
1471# activation and restart the handler using the previous one.
1472*/
1473ENTRY(xen_do_hypervisor_callback) # do_hypervisor_callback(struct *pt_regs)
1474 CFI_STARTPROC
1475/* Since we don't modify %rdi, evtchn_do_upall(struct *pt_regs) will
1476 see the correct pointer to the pt_regs */
1477 movq %rdi, %rsp # we don't return, adjust the stack frame
1478 CFI_ENDPROC
dcd072e2 1479 DEFAULT_FRAME
3d75e1b8
JF
148011: incl %gs:pda_irqcount
1481 movq %rsp,%rbp
1482 CFI_DEF_CFA_REGISTER rbp
1483 cmovzq %gs:pda_irqstackptr,%rsp
1484 pushq %rbp # backlink for old unwinder
1485 call xen_evtchn_do_upcall
1486 popq %rsp
1487 CFI_DEF_CFA_REGISTER rsp
1488 decl %gs:pda_irqcount
1489 jmp error_exit
1490 CFI_ENDPROC
1491END(do_hypervisor_callback)
1492
1493/*
1494# Hypervisor uses this for application faults while it executes.
1495# We get here for two reasons:
1496# 1. Fault while reloading DS, ES, FS or GS
1497# 2. Fault while executing IRET
1498# Category 1 we do not need to fix up as Xen has already reloaded all segment
1499# registers that could be reloaded and zeroed the others.
1500# Category 2 we fix up by killing the current process. We cannot use the
1501# normal Linux return path in this case because if we use the IRET hypercall
1502# to pop the stack frame we end up in an infinite loop of failsafe callbacks.
1503# We distinguish between categories by comparing each saved segment register
1504# with its current contents: any discrepancy means we in category 1.
1505*/
1506ENTRY(xen_failsafe_callback)
dcd072e2
AH
1507 INTR_FRAME 1 (6*8)
1508 /*CFI_REL_OFFSET gs,GS*/
1509 /*CFI_REL_OFFSET fs,FS*/
1510 /*CFI_REL_OFFSET es,ES*/
1511 /*CFI_REL_OFFSET ds,DS*/
1512 CFI_REL_OFFSET r11,8
1513 CFI_REL_OFFSET rcx,0
3d75e1b8
JF
1514 movw %ds,%cx
1515 cmpw %cx,0x10(%rsp)
1516 CFI_REMEMBER_STATE
1517 jne 1f
1518 movw %es,%cx
1519 cmpw %cx,0x18(%rsp)
1520 jne 1f
1521 movw %fs,%cx
1522 cmpw %cx,0x20(%rsp)
1523 jne 1f
1524 movw %gs,%cx
1525 cmpw %cx,0x28(%rsp)
1526 jne 1f
1527 /* All segments match their saved values => Category 2 (Bad IRET). */
1528 movq (%rsp),%rcx
1529 CFI_RESTORE rcx
1530 movq 8(%rsp),%r11
1531 CFI_RESTORE r11
1532 addq $0x30,%rsp
1533 CFI_ADJUST_CFA_OFFSET -0x30
14ae22ba
IM
1534 pushq_cfi $0 /* RIP */
1535 pushq_cfi %r11
1536 pushq_cfi %rcx
4a5c3e77 1537 jmp general_protection
3d75e1b8
JF
1538 CFI_RESTORE_STATE
15391: /* Segment mismatch => Category 1 (Bad segment). Retry the IRET. */
1540 movq (%rsp),%rcx
1541 CFI_RESTORE rcx
1542 movq 8(%rsp),%r11
1543 CFI_RESTORE r11
1544 addq $0x30,%rsp
1545 CFI_ADJUST_CFA_OFFSET -0x30
14ae22ba 1546 pushq_cfi $0
3d75e1b8
JF
1547 SAVE_ALL
1548 jmp error_exit
1549 CFI_ENDPROC
3d75e1b8
JF
1550END(xen_failsafe_callback)
1551
1552#endif /* CONFIG_XEN */