]> git.proxmox.com Git - mirror_ubuntu-kernels.git/blame - arch/x86_64/kernel/entry.S
[PATCH] x86-64: dump_trace() atomicity fix
[mirror_ubuntu-kernels.git] / arch / x86_64 / kernel / entry.S
CommitLineData
1da177e4
LT
1/*
2 * linux/arch/x86_64/entry.S
3 *
4 * Copyright (C) 1991, 1992 Linus Torvalds
5 * Copyright (C) 2000, 2001, 2002 Andi Kleen SuSE Labs
6 * Copyright (C) 2000 Pavel Machek <pavel@suse.cz>
1da177e4
LT
7 */
8
9/*
10 * entry.S contains the system-call and fault low-level handling routines.
11 *
12 * NOTE: This code handles signal-recognition, which happens every time
13 * after an interrupt and after each system call.
14 *
15 * Normal syscalls and interrupts don't save a full stack frame, this is
16 * only done for syscall tracing, signals or fork/exec et.al.
17 *
18 * A note on terminology:
19 * - top of stack: Architecture defined interrupt frame from SS to RIP
20 * at the top of the kernel process stack.
21 * - partial stack frame: partially saved registers upto R11.
22 * - full stack frame: Like partial stack frame, but all register saved.
2e91a17b
AK
23 *
24 * Some macro usage:
25 * - CFI macros are used to generate dwarf2 unwind information for better
26 * backtraces. They don't change any code.
27 * - SAVE_ALL/RESTORE_ALL - Save/restore all registers
28 * - SAVE_ARGS/RESTORE_ARGS - Save/restore registers that C functions modify.
29 * There are unfortunately lots of special cases where some registers
30 * not touched. The macro is a big mess that should be cleaned up.
31 * - SAVE_REST/RESTORE_REST - Handle the registers not saved by SAVE_ARGS.
32 * Gives a full stack frame.
33 * - ENTRY/END Define functions in the symbol table.
34 * - FIXUP_TOP_OF_STACK/RESTORE_TOP_OF_STACK - Fix up the hardware stack
35 * frame that is otherwise undefined after a SYSCALL
36 * - TRACE_IRQ_* - Trace hard interrupt state for lock debugging.
37 * - errorentry/paranoidentry/zeroentry - Define exception entry points.
1da177e4
LT
38 */
39
1da177e4
LT
40#include <linux/linkage.h>
41#include <asm/segment.h>
1da177e4
LT
42#include <asm/cache.h>
43#include <asm/errno.h>
44#include <asm/dwarf2.h>
45#include <asm/calling.h>
e2d5df93 46#include <asm/asm-offsets.h>
1da177e4
LT
47#include <asm/msr.h>
48#include <asm/unistd.h>
49#include <asm/thread_info.h>
50#include <asm/hw_irq.h>
5f8efbb9 51#include <asm/page.h>
2601e64d 52#include <asm/irqflags.h>
1da177e4
LT
53
54 .code64
55
dc37db4d 56#ifndef CONFIG_PREEMPT
1da177e4
LT
57#define retint_kernel retint_restore_args
58#endif
2601e64d
IM
59
60
61.macro TRACE_IRQS_IRETQ offset=ARGOFFSET
62#ifdef CONFIG_TRACE_IRQFLAGS
63 bt $9,EFLAGS-\offset(%rsp) /* interrupts off? */
64 jnc 1f
65 TRACE_IRQS_ON
661:
67#endif
68.endm
69
1da177e4
LT
70/*
71 * C code is not supposed to know about undefined top of stack. Every time
72 * a C function with an pt_regs argument is called from the SYSCALL based
73 * fast path FIXUP_TOP_OF_STACK is needed.
74 * RESTORE_TOP_OF_STACK syncs the syscall state after any possible ptregs
75 * manipulation.
76 */
77
78 /* %rsp:at FRAMEEND */
79 .macro FIXUP_TOP_OF_STACK tmp
80 movq %gs:pda_oldrsp,\tmp
81 movq \tmp,RSP(%rsp)
82 movq $__USER_DS,SS(%rsp)
83 movq $__USER_CS,CS(%rsp)
84 movq $-1,RCX(%rsp)
85 movq R11(%rsp),\tmp /* get eflags */
86 movq \tmp,EFLAGS(%rsp)
87 .endm
88
89 .macro RESTORE_TOP_OF_STACK tmp,offset=0
90 movq RSP-\offset(%rsp),\tmp
91 movq \tmp,%gs:pda_oldrsp
92 movq EFLAGS-\offset(%rsp),\tmp
93 movq \tmp,R11-\offset(%rsp)
94 .endm
95
96 .macro FAKE_STACK_FRAME child_rip
97 /* push in order ss, rsp, eflags, cs, rip */
3829ee6b 98 xorl %eax, %eax
1da177e4
LT
99 pushq %rax /* ss */
100 CFI_ADJUST_CFA_OFFSET 8
7effaa88 101 /*CFI_REL_OFFSET ss,0*/
1da177e4
LT
102 pushq %rax /* rsp */
103 CFI_ADJUST_CFA_OFFSET 8
7effaa88 104 CFI_REL_OFFSET rsp,0
1da177e4
LT
105 pushq $(1<<9) /* eflags - interrupts on */
106 CFI_ADJUST_CFA_OFFSET 8
7effaa88 107 /*CFI_REL_OFFSET rflags,0*/
1da177e4
LT
108 pushq $__KERNEL_CS /* cs */
109 CFI_ADJUST_CFA_OFFSET 8
7effaa88 110 /*CFI_REL_OFFSET cs,0*/
1da177e4
LT
111 pushq \child_rip /* rip */
112 CFI_ADJUST_CFA_OFFSET 8
7effaa88 113 CFI_REL_OFFSET rip,0
1da177e4
LT
114 pushq %rax /* orig rax */
115 CFI_ADJUST_CFA_OFFSET 8
116 .endm
117
118 .macro UNFAKE_STACK_FRAME
119 addq $8*6, %rsp
120 CFI_ADJUST_CFA_OFFSET -(6*8)
121 .endm
122
7effaa88
JB
123 .macro CFI_DEFAULT_STACK start=1
124 .if \start
125 CFI_STARTPROC simple
adf14236 126 CFI_SIGNAL_FRAME
7effaa88
JB
127 CFI_DEF_CFA rsp,SS+8
128 .else
129 CFI_DEF_CFA_OFFSET SS+8
130 .endif
131 CFI_REL_OFFSET r15,R15
132 CFI_REL_OFFSET r14,R14
133 CFI_REL_OFFSET r13,R13
134 CFI_REL_OFFSET r12,R12
135 CFI_REL_OFFSET rbp,RBP
136 CFI_REL_OFFSET rbx,RBX
137 CFI_REL_OFFSET r11,R11
138 CFI_REL_OFFSET r10,R10
139 CFI_REL_OFFSET r9,R9
140 CFI_REL_OFFSET r8,R8
141 CFI_REL_OFFSET rax,RAX
142 CFI_REL_OFFSET rcx,RCX
143 CFI_REL_OFFSET rdx,RDX
144 CFI_REL_OFFSET rsi,RSI
145 CFI_REL_OFFSET rdi,RDI
146 CFI_REL_OFFSET rip,RIP
147 /*CFI_REL_OFFSET cs,CS*/
148 /*CFI_REL_OFFSET rflags,EFLAGS*/
149 CFI_REL_OFFSET rsp,RSP
150 /*CFI_REL_OFFSET ss,SS*/
1da177e4
LT
151 .endm
152/*
153 * A newly forked process directly context switches into this.
154 */
155/* rdi: prev */
156ENTRY(ret_from_fork)
1da177e4 157 CFI_DEFAULT_STACK
658fdbef
AK
158 push kernel_eflags(%rip)
159 CFI_ADJUST_CFA_OFFSET 4
160 popf # reset kernel eflags
161 CFI_ADJUST_CFA_OFFSET -4
1da177e4
LT
162 call schedule_tail
163 GET_THREAD_INFO(%rcx)
164 testl $(_TIF_SYSCALL_TRACE|_TIF_SYSCALL_AUDIT),threadinfo_flags(%rcx)
165 jnz rff_trace
166rff_action:
167 RESTORE_REST
168 testl $3,CS-ARGOFFSET(%rsp) # from kernel_thread?
169 je int_ret_from_sys_call
170 testl $_TIF_IA32,threadinfo_flags(%rcx)
171 jnz int_ret_from_sys_call
172 RESTORE_TOP_OF_STACK %rdi,ARGOFFSET
173 jmp ret_from_sys_call
174rff_trace:
175 movq %rsp,%rdi
176 call syscall_trace_leave
177 GET_THREAD_INFO(%rcx)
178 jmp rff_action
179 CFI_ENDPROC
4b787e0b 180END(ret_from_fork)
1da177e4
LT
181
182/*
183 * System call entry. Upto 6 arguments in registers are supported.
184 *
185 * SYSCALL does not save anything on the stack and does not change the
186 * stack pointer.
187 */
188
189/*
190 * Register setup:
191 * rax system call number
192 * rdi arg0
193 * rcx return address for syscall/sysret, C arg3
194 * rsi arg1
195 * rdx arg2
196 * r10 arg3 (--> moved to rcx for C)
197 * r8 arg4
198 * r9 arg5
199 * r11 eflags for syscall/sysret, temporary for C
200 * r12-r15,rbp,rbx saved by C code, not touched.
201 *
202 * Interrupts are off on entry.
203 * Only called from user space.
204 *
205 * XXX if we had a free scratch register we could save the RSP into the stack frame
206 * and report it properly in ps. Unfortunately we haven't.
7bf36bbc
AK
207 *
208 * When user can change the frames always force IRET. That is because
209 * it deals with uncanonical addresses better. SYSRET has trouble
210 * with them due to bugs in both AMD and Intel CPUs.
1da177e4
LT
211 */
212
213ENTRY(system_call)
7effaa88 214 CFI_STARTPROC simple
adf14236 215 CFI_SIGNAL_FRAME
dffead4e 216 CFI_DEF_CFA rsp,PDA_STACKOFFSET
7effaa88
JB
217 CFI_REGISTER rip,rcx
218 /*CFI_REGISTER rflags,r11*/
1da177e4
LT
219 swapgs
220 movq %rsp,%gs:pda_oldrsp
221 movq %gs:pda_kernelstack,%rsp
2601e64d
IM
222 /*
223 * No need to follow this irqs off/on section - it's straight
224 * and short:
225 */
1da177e4
LT
226 sti
227 SAVE_ARGS 8,1
228 movq %rax,ORIG_RAX-ARGOFFSET(%rsp)
7effaa88
JB
229 movq %rcx,RIP-ARGOFFSET(%rsp)
230 CFI_REL_OFFSET rip,RIP-ARGOFFSET
1da177e4
LT
231 GET_THREAD_INFO(%rcx)
232 testl $(_TIF_SYSCALL_TRACE|_TIF_SYSCALL_AUDIT|_TIF_SECCOMP),threadinfo_flags(%rcx)
7effaa88 233 CFI_REMEMBER_STATE
1da177e4
LT
234 jnz tracesys
235 cmpq $__NR_syscall_max,%rax
236 ja badsys
237 movq %r10,%rcx
238 call *sys_call_table(,%rax,8) # XXX: rip relative
239 movq %rax,RAX-ARGOFFSET(%rsp)
240/*
241 * Syscall return path ending with SYSRET (fast path)
242 * Has incomplete stack frame and undefined top of stack.
243 */
244 .globl ret_from_sys_call
245ret_from_sys_call:
11b854b2 246 movl $_TIF_ALLWORK_MASK,%edi
1da177e4
LT
247 /* edi: flagmask */
248sysret_check:
249 GET_THREAD_INFO(%rcx)
250 cli
2601e64d 251 TRACE_IRQS_OFF
1da177e4
LT
252 movl threadinfo_flags(%rcx),%edx
253 andl %edi,%edx
7effaa88 254 CFI_REMEMBER_STATE
1da177e4 255 jnz sysret_careful
2601e64d
IM
256 /*
257 * sysretq will re-enable interrupts:
258 */
259 TRACE_IRQS_ON
1da177e4 260 movq RIP-ARGOFFSET(%rsp),%rcx
7effaa88 261 CFI_REGISTER rip,rcx
1da177e4 262 RESTORE_ARGS 0,-ARG_SKIP,1
7effaa88 263 /*CFI_REGISTER rflags,r11*/
1da177e4
LT
264 movq %gs:pda_oldrsp,%rsp
265 swapgs
266 sysretq
267
268 /* Handle reschedules */
269 /* edx: work, edi: workmask */
270sysret_careful:
7effaa88 271 CFI_RESTORE_STATE
1da177e4
LT
272 bt $TIF_NEED_RESCHED,%edx
273 jnc sysret_signal
2601e64d 274 TRACE_IRQS_ON
1da177e4
LT
275 sti
276 pushq %rdi
7effaa88 277 CFI_ADJUST_CFA_OFFSET 8
1da177e4
LT
278 call schedule
279 popq %rdi
7effaa88 280 CFI_ADJUST_CFA_OFFSET -8
1da177e4
LT
281 jmp sysret_check
282
283 /* Handle a signal */
284sysret_signal:
2601e64d 285 TRACE_IRQS_ON
1da177e4 286 sti
10ffdbb8
AK
287 testl $(_TIF_SIGPENDING|_TIF_NOTIFY_RESUME|_TIF_SINGLESTEP),%edx
288 jz 1f
289
290 /* Really a signal */
291 /* edx: work flags (arg3) */
1da177e4
LT
292 leaq do_notify_resume(%rip),%rax
293 leaq -ARGOFFSET(%rsp),%rdi # &pt_regs -> arg1
294 xorl %esi,%esi # oldset -> arg2
295 call ptregscall_common
10ffdbb8 2961: movl $_TIF_NEED_RESCHED,%edi
7bf36bbc
AK
297 /* Use IRET because user could have changed frame. This
298 works because ptregscall_common has called FIXUP_TOP_OF_STACK. */
299 cli
2601e64d 300 TRACE_IRQS_OFF
7bf36bbc 301 jmp int_with_check
1da177e4 302
7effaa88
JB
303badsys:
304 movq $-ENOSYS,RAX-ARGOFFSET(%rsp)
305 jmp ret_from_sys_call
306
1da177e4
LT
307 /* Do syscall tracing */
308tracesys:
7effaa88 309 CFI_RESTORE_STATE
1da177e4
LT
310 SAVE_REST
311 movq $-ENOSYS,RAX(%rsp)
312 FIXUP_TOP_OF_STACK %rdi
313 movq %rsp,%rdi
314 call syscall_trace_enter
315 LOAD_ARGS ARGOFFSET /* reload args from stack in case ptrace changed it */
316 RESTORE_REST
317 cmpq $__NR_syscall_max,%rax
cc7d479f
JB
318 movq $-ENOSYS,%rcx
319 cmova %rcx,%rax
1da177e4
LT
320 ja 1f
321 movq %r10,%rcx /* fixup for C */
322 call *sys_call_table(,%rax,8)
822ff019 3231: movq %rax,RAX-ARGOFFSET(%rsp)
7bf36bbc
AK
324 /* Use IRET because user could have changed frame */
325 jmp int_ret_from_sys_call
7effaa88 326 CFI_ENDPROC
4b787e0b 327END(system_call)
1da177e4 328
1da177e4
LT
329/*
330 * Syscall return path ending with IRET.
331 * Has correct top of stack, but partial stack frame.
332 */
7effaa88
JB
333ENTRY(int_ret_from_sys_call)
334 CFI_STARTPROC simple
adf14236 335 CFI_SIGNAL_FRAME
7effaa88
JB
336 CFI_DEF_CFA rsp,SS+8-ARGOFFSET
337 /*CFI_REL_OFFSET ss,SS-ARGOFFSET*/
338 CFI_REL_OFFSET rsp,RSP-ARGOFFSET
339 /*CFI_REL_OFFSET rflags,EFLAGS-ARGOFFSET*/
340 /*CFI_REL_OFFSET cs,CS-ARGOFFSET*/
341 CFI_REL_OFFSET rip,RIP-ARGOFFSET
342 CFI_REL_OFFSET rdx,RDX-ARGOFFSET
343 CFI_REL_OFFSET rcx,RCX-ARGOFFSET
344 CFI_REL_OFFSET rax,RAX-ARGOFFSET
345 CFI_REL_OFFSET rdi,RDI-ARGOFFSET
346 CFI_REL_OFFSET rsi,RSI-ARGOFFSET
347 CFI_REL_OFFSET r8,R8-ARGOFFSET
348 CFI_REL_OFFSET r9,R9-ARGOFFSET
349 CFI_REL_OFFSET r10,R10-ARGOFFSET
350 CFI_REL_OFFSET r11,R11-ARGOFFSET
1da177e4 351 cli
2601e64d 352 TRACE_IRQS_OFF
1da177e4
LT
353 testl $3,CS-ARGOFFSET(%rsp)
354 je retint_restore_args
355 movl $_TIF_ALLWORK_MASK,%edi
356 /* edi: mask to check */
357int_with_check:
358 GET_THREAD_INFO(%rcx)
359 movl threadinfo_flags(%rcx),%edx
360 andl %edi,%edx
361 jnz int_careful
bf2fcc6f 362 andl $~TS_COMPAT,threadinfo_status(%rcx)
1da177e4
LT
363 jmp retint_swapgs
364
365 /* Either reschedule or signal or syscall exit tracking needed. */
366 /* First do a reschedule test. */
367 /* edx: work, edi: workmask */
368int_careful:
369 bt $TIF_NEED_RESCHED,%edx
370 jnc int_very_careful
2601e64d 371 TRACE_IRQS_ON
1da177e4
LT
372 sti
373 pushq %rdi
7effaa88 374 CFI_ADJUST_CFA_OFFSET 8
1da177e4
LT
375 call schedule
376 popq %rdi
7effaa88 377 CFI_ADJUST_CFA_OFFSET -8
cdd219cd 378 cli
2601e64d 379 TRACE_IRQS_OFF
1da177e4
LT
380 jmp int_with_check
381
382 /* handle signals and tracing -- both require a full stack frame */
383int_very_careful:
2601e64d 384 TRACE_IRQS_ON
1da177e4
LT
385 sti
386 SAVE_REST
387 /* Check for syscall exit trace */
388 testl $(_TIF_SYSCALL_TRACE|_TIF_SYSCALL_AUDIT|_TIF_SINGLESTEP),%edx
389 jz int_signal
390 pushq %rdi
7effaa88 391 CFI_ADJUST_CFA_OFFSET 8
1da177e4
LT
392 leaq 8(%rsp),%rdi # &ptregs -> arg1
393 call syscall_trace_leave
394 popq %rdi
7effaa88 395 CFI_ADJUST_CFA_OFFSET -8
36c1104e 396 andl $~(_TIF_SYSCALL_TRACE|_TIF_SYSCALL_AUDIT|_TIF_SINGLESTEP),%edi
be9e6870 397 cli
2601e64d 398 TRACE_IRQS_OFF
1da177e4
LT
399 jmp int_restore_rest
400
401int_signal:
402 testl $(_TIF_NOTIFY_RESUME|_TIF_SIGPENDING|_TIF_SINGLESTEP),%edx
403 jz 1f
404 movq %rsp,%rdi # &ptregs -> arg1
405 xorl %esi,%esi # oldset -> arg2
406 call do_notify_resume
4071: movl $_TIF_NEED_RESCHED,%edi
408int_restore_rest:
409 RESTORE_REST
be9e6870 410 cli
2601e64d 411 TRACE_IRQS_OFF
1da177e4
LT
412 jmp int_with_check
413 CFI_ENDPROC
4b787e0b 414END(int_ret_from_sys_call)
1da177e4
LT
415
416/*
417 * Certain special system calls that need to save a complete full stack frame.
418 */
419
420 .macro PTREGSCALL label,func,arg
421 .globl \label
422\label:
423 leaq \func(%rip),%rax
424 leaq -ARGOFFSET+8(%rsp),\arg /* 8 for return address */
425 jmp ptregscall_common
4b787e0b 426END(\label)
1da177e4
LT
427 .endm
428
7effaa88
JB
429 CFI_STARTPROC
430
1da177e4
LT
431 PTREGSCALL stub_clone, sys_clone, %r8
432 PTREGSCALL stub_fork, sys_fork, %rdi
433 PTREGSCALL stub_vfork, sys_vfork, %rdi
434 PTREGSCALL stub_rt_sigsuspend, sys_rt_sigsuspend, %rdx
435 PTREGSCALL stub_sigaltstack, sys_sigaltstack, %rdx
436 PTREGSCALL stub_iopl, sys_iopl, %rsi
437
438ENTRY(ptregscall_common)
1da177e4 439 popq %r11
7effaa88
JB
440 CFI_ADJUST_CFA_OFFSET -8
441 CFI_REGISTER rip, r11
1da177e4
LT
442 SAVE_REST
443 movq %r11, %r15
7effaa88 444 CFI_REGISTER rip, r15
1da177e4
LT
445 FIXUP_TOP_OF_STACK %r11
446 call *%rax
447 RESTORE_TOP_OF_STACK %r11
448 movq %r15, %r11
7effaa88 449 CFI_REGISTER rip, r11
1da177e4
LT
450 RESTORE_REST
451 pushq %r11
7effaa88
JB
452 CFI_ADJUST_CFA_OFFSET 8
453 CFI_REL_OFFSET rip, 0
1da177e4
LT
454 ret
455 CFI_ENDPROC
4b787e0b 456END(ptregscall_common)
1da177e4
LT
457
458ENTRY(stub_execve)
459 CFI_STARTPROC
460 popq %r11
7effaa88
JB
461 CFI_ADJUST_CFA_OFFSET -8
462 CFI_REGISTER rip, r11
1da177e4 463 SAVE_REST
1da177e4
LT
464 FIXUP_TOP_OF_STACK %r11
465 call sys_execve
1da177e4 466 RESTORE_TOP_OF_STACK %r11
1da177e4
LT
467 movq %rax,RAX(%rsp)
468 RESTORE_REST
469 jmp int_ret_from_sys_call
470 CFI_ENDPROC
4b787e0b 471END(stub_execve)
1da177e4
LT
472
473/*
474 * sigreturn is special because it needs to restore all registers on return.
475 * This cannot be done with SYSRET, so use the IRET return path instead.
476 */
477ENTRY(stub_rt_sigreturn)
478 CFI_STARTPROC
7effaa88
JB
479 addq $8, %rsp
480 CFI_ADJUST_CFA_OFFSET -8
1da177e4
LT
481 SAVE_REST
482 movq %rsp,%rdi
483 FIXUP_TOP_OF_STACK %r11
484 call sys_rt_sigreturn
485 movq %rax,RAX(%rsp) # fixme, this could be done at the higher layer
486 RESTORE_REST
487 jmp int_ret_from_sys_call
488 CFI_ENDPROC
4b787e0b 489END(stub_rt_sigreturn)
1da177e4 490
7effaa88
JB
491/*
492 * initial frame state for interrupts and exceptions
493 */
494 .macro _frame ref
495 CFI_STARTPROC simple
adf14236 496 CFI_SIGNAL_FRAME
7effaa88
JB
497 CFI_DEF_CFA rsp,SS+8-\ref
498 /*CFI_REL_OFFSET ss,SS-\ref*/
499 CFI_REL_OFFSET rsp,RSP-\ref
500 /*CFI_REL_OFFSET rflags,EFLAGS-\ref*/
501 /*CFI_REL_OFFSET cs,CS-\ref*/
502 CFI_REL_OFFSET rip,RIP-\ref
503 .endm
504
505/* initial frame state for interrupts (and exceptions without error code) */
506#define INTR_FRAME _frame RIP
507/* initial frame state for exceptions with error code (and interrupts with
508 vector already pushed) */
509#define XCPT_FRAME _frame ORIG_RAX
510
1da177e4
LT
511/*
512 * Interrupt entry/exit.
513 *
514 * Interrupt entry points save only callee clobbered registers in fast path.
515 *
516 * Entry runs with interrupts off.
517 */
518
519/* 0(%rsp): interrupt number */
520 .macro interrupt func
1da177e4 521 cld
1da177e4
LT
522 SAVE_ARGS
523 leaq -ARGOFFSET(%rsp),%rdi # arg1 for handler
1de9c3f6
JB
524 pushq %rbp
525 CFI_ADJUST_CFA_OFFSET 8
526 CFI_REL_OFFSET rbp, 0
527 movq %rsp,%rbp
528 CFI_DEF_CFA_REGISTER rbp
1da177e4
LT
529 testl $3,CS(%rdi)
530 je 1f
531 swapgs
96e54049
AK
532 /* irqcount is used to check if a CPU is already on an interrupt
533 stack or not. While this is essentially redundant with preempt_count
534 it is a little cheaper to use a separate counter in the PDA
535 (short of moving irq_enter into assembly, which would be too
536 much work) */
5371: incl %gs:pda_irqcount
1de9c3f6 538 cmoveq %gs:pda_irqstackptr,%rsp
2699500b 539 push %rbp # backlink for old unwinder
2601e64d
IM
540 /*
541 * We entered an interrupt context - irqs are off:
542 */
543 TRACE_IRQS_OFF
1da177e4
LT
544 call \func
545 .endm
546
547ENTRY(common_interrupt)
7effaa88 548 XCPT_FRAME
1da177e4
LT
549 interrupt do_IRQ
550 /* 0(%rsp): oldrsp-ARGOFFSET */
7effaa88 551ret_from_intr:
1da177e4 552 cli
2601e64d 553 TRACE_IRQS_OFF
3829ee6b 554 decl %gs:pda_irqcount
1de9c3f6 555 leaveq
7effaa88 556 CFI_DEF_CFA_REGISTER rsp
1de9c3f6 557 CFI_ADJUST_CFA_OFFSET -8
7effaa88 558exit_intr:
1da177e4
LT
559 GET_THREAD_INFO(%rcx)
560 testl $3,CS-ARGOFFSET(%rsp)
561 je retint_kernel
562
563 /* Interrupt came from user space */
564 /*
565 * Has a correct top of stack, but a partial stack frame
566 * %rcx: thread info. Interrupts off.
567 */
568retint_with_reschedule:
569 movl $_TIF_WORK_MASK,%edi
7effaa88 570retint_check:
1da177e4
LT
571 movl threadinfo_flags(%rcx),%edx
572 andl %edi,%edx
7effaa88 573 CFI_REMEMBER_STATE
1da177e4
LT
574 jnz retint_careful
575retint_swapgs:
2601e64d
IM
576 /*
577 * The iretq could re-enable interrupts:
578 */
579 cli
580 TRACE_IRQS_IRETQ
1da177e4 581 swapgs
2601e64d
IM
582 jmp restore_args
583
1da177e4
LT
584retint_restore_args:
585 cli
2601e64d
IM
586 /*
587 * The iretq could re-enable interrupts:
588 */
589 TRACE_IRQS_IRETQ
590restore_args:
1da177e4
LT
591 RESTORE_ARGS 0,8,0
592iret_label:
593 iretq
594
595 .section __ex_table,"a"
596 .quad iret_label,bad_iret
597 .previous
598 .section .fixup,"ax"
599 /* force a signal here? this matches i386 behaviour */
600 /* running with kernel gs */
601bad_iret:
3076a492 602 movq $11,%rdi /* SIGSEGV */
2601e64d 603 TRACE_IRQS_ON
2391c4b5 604 sti
1da177e4
LT
605 jmp do_exit
606 .previous
607
7effaa88 608 /* edi: workmask, edx: work */
1da177e4 609retint_careful:
7effaa88 610 CFI_RESTORE_STATE
1da177e4
LT
611 bt $TIF_NEED_RESCHED,%edx
612 jnc retint_signal
2601e64d 613 TRACE_IRQS_ON
1da177e4
LT
614 sti
615 pushq %rdi
7effaa88 616 CFI_ADJUST_CFA_OFFSET 8
1da177e4
LT
617 call schedule
618 popq %rdi
7effaa88 619 CFI_ADJUST_CFA_OFFSET -8
1da177e4
LT
620 GET_THREAD_INFO(%rcx)
621 cli
2601e64d 622 TRACE_IRQS_OFF
1da177e4
LT
623 jmp retint_check
624
625retint_signal:
10ffdbb8
AK
626 testl $(_TIF_SIGPENDING|_TIF_NOTIFY_RESUME|_TIF_SINGLESTEP),%edx
627 jz retint_swapgs
2601e64d 628 TRACE_IRQS_ON
1da177e4
LT
629 sti
630 SAVE_REST
631 movq $-1,ORIG_RAX(%rsp)
3829ee6b 632 xorl %esi,%esi # oldset
1da177e4
LT
633 movq %rsp,%rdi # &pt_regs
634 call do_notify_resume
635 RESTORE_REST
636 cli
2601e64d 637 TRACE_IRQS_OFF
10ffdbb8 638 movl $_TIF_NEED_RESCHED,%edi
be9e6870 639 GET_THREAD_INFO(%rcx)
1da177e4
LT
640 jmp retint_check
641
642#ifdef CONFIG_PREEMPT
643 /* Returning to kernel space. Check if we need preemption */
644 /* rcx: threadinfo. interrupts off. */
b06babac 645ENTRY(retint_kernel)
1da177e4
LT
646 cmpl $0,threadinfo_preempt_count(%rcx)
647 jnz retint_restore_args
648 bt $TIF_NEED_RESCHED,threadinfo_flags(%rcx)
649 jnc retint_restore_args
650 bt $9,EFLAGS-ARGOFFSET(%rsp) /* interrupts off? */
651 jnc retint_restore_args
652 call preempt_schedule_irq
653 jmp exit_intr
654#endif
4b787e0b 655
1da177e4 656 CFI_ENDPROC
4b787e0b 657END(common_interrupt)
1da177e4
LT
658
659/*
660 * APIC interrupts.
661 */
662 .macro apicinterrupt num,func
7effaa88 663 INTR_FRAME
19eadf98 664 pushq $~(\num)
7effaa88 665 CFI_ADJUST_CFA_OFFSET 8
1da177e4
LT
666 interrupt \func
667 jmp ret_from_intr
668 CFI_ENDPROC
669 .endm
670
671ENTRY(thermal_interrupt)
672 apicinterrupt THERMAL_APIC_VECTOR,smp_thermal_interrupt
4b787e0b 673END(thermal_interrupt)
1da177e4 674
89b831ef
JS
675ENTRY(threshold_interrupt)
676 apicinterrupt THRESHOLD_APIC_VECTOR,mce_threshold_interrupt
4b787e0b 677END(threshold_interrupt)
89b831ef 678
1da177e4
LT
679#ifdef CONFIG_SMP
680ENTRY(reschedule_interrupt)
681 apicinterrupt RESCHEDULE_VECTOR,smp_reschedule_interrupt
4b787e0b 682END(reschedule_interrupt)
1da177e4 683
e5bc8b6b
AK
684 .macro INVALIDATE_ENTRY num
685ENTRY(invalidate_interrupt\num)
686 apicinterrupt INVALIDATE_TLB_VECTOR_START+\num,smp_invalidate_interrupt
4b787e0b 687END(invalidate_interrupt\num)
e5bc8b6b
AK
688 .endm
689
690 INVALIDATE_ENTRY 0
691 INVALIDATE_ENTRY 1
692 INVALIDATE_ENTRY 2
693 INVALIDATE_ENTRY 3
694 INVALIDATE_ENTRY 4
695 INVALIDATE_ENTRY 5
696 INVALIDATE_ENTRY 6
697 INVALIDATE_ENTRY 7
1da177e4
LT
698
699ENTRY(call_function_interrupt)
700 apicinterrupt CALL_FUNCTION_VECTOR,smp_call_function_interrupt
4b787e0b 701END(call_function_interrupt)
1da177e4
LT
702#endif
703
1da177e4
LT
704ENTRY(apic_timer_interrupt)
705 apicinterrupt LOCAL_TIMER_VECTOR,smp_apic_timer_interrupt
4b787e0b 706END(apic_timer_interrupt)
1da177e4
LT
707
708ENTRY(error_interrupt)
709 apicinterrupt ERROR_APIC_VECTOR,smp_error_interrupt
4b787e0b 710END(error_interrupt)
1da177e4
LT
711
712ENTRY(spurious_interrupt)
713 apicinterrupt SPURIOUS_APIC_VECTOR,smp_spurious_interrupt
4b787e0b 714END(spurious_interrupt)
1da177e4
LT
715
716/*
717 * Exception entry points.
718 */
719 .macro zeroentry sym
7effaa88 720 INTR_FRAME
1da177e4 721 pushq $0 /* push error code/oldrax */
7effaa88 722 CFI_ADJUST_CFA_OFFSET 8
1da177e4 723 pushq %rax /* push real oldrax to the rdi slot */
7effaa88 724 CFI_ADJUST_CFA_OFFSET 8
1da177e4
LT
725 leaq \sym(%rip),%rax
726 jmp error_entry
7effaa88 727 CFI_ENDPROC
1da177e4
LT
728 .endm
729
730 .macro errorentry sym
7effaa88 731 XCPT_FRAME
1da177e4 732 pushq %rax
7effaa88 733 CFI_ADJUST_CFA_OFFSET 8
1da177e4
LT
734 leaq \sym(%rip),%rax
735 jmp error_entry
7effaa88 736 CFI_ENDPROC
1da177e4
LT
737 .endm
738
739 /* error code is on the stack already */
740 /* handle NMI like exceptions that can happen everywhere */
2601e64d 741 .macro paranoidentry sym, ist=0, irqtrace=1
1da177e4
LT
742 SAVE_ALL
743 cld
744 movl $1,%ebx
745 movl $MSR_GS_BASE,%ecx
746 rdmsr
747 testl %edx,%edx
748 js 1f
749 swapgs
750 xorl %ebx,%ebx
b556b35e
JB
7511:
752 .if \ist
753 movq %gs:pda_data_offset, %rbp
754 .endif
755 movq %rsp,%rdi
1da177e4
LT
756 movq ORIG_RAX(%rsp),%rsi
757 movq $-1,ORIG_RAX(%rsp)
b556b35e 758 .if \ist
5f8efbb9 759 subq $EXCEPTION_STKSZ, per_cpu__init_tss + TSS_ist + (\ist - 1) * 8(%rbp)
b556b35e 760 .endif
1da177e4 761 call \sym
b556b35e 762 .if \ist
5f8efbb9 763 addq $EXCEPTION_STKSZ, per_cpu__init_tss + TSS_ist + (\ist - 1) * 8(%rbp)
b556b35e 764 .endif
6fefb0d1 765 cli
2601e64d
IM
766 .if \irqtrace
767 TRACE_IRQS_OFF
768 .endif
1da177e4 769 .endm
2601e64d
IM
770
771 /*
772 * "Paranoid" exit path from exception stack.
773 * Paranoid because this is used by NMIs and cannot take
774 * any kernel state for granted.
775 * We don't do kernel preemption checks here, because only
776 * NMI should be common and it does not enable IRQs and
777 * cannot get reschedule ticks.
778 *
779 * "trace" is 0 for the NMI handler only, because irq-tracing
780 * is fundamentally NMI-unsafe. (we cannot change the soft and
781 * hard flags at once, atomically)
782 */
783 .macro paranoidexit trace=1
784 /* ebx: no swapgs flag */
785paranoid_exit\trace:
786 testl %ebx,%ebx /* swapgs needed? */
787 jnz paranoid_restore\trace
788 testl $3,CS(%rsp)
789 jnz paranoid_userspace\trace
790paranoid_swapgs\trace:
7a0a2dff 791 .if \trace
2601e64d 792 TRACE_IRQS_IRETQ 0
7a0a2dff 793 .endif
2601e64d
IM
794 swapgs
795paranoid_restore\trace:
796 RESTORE_ALL 8
797 iretq
798paranoid_userspace\trace:
799 GET_THREAD_INFO(%rcx)
800 movl threadinfo_flags(%rcx),%ebx
801 andl $_TIF_WORK_MASK,%ebx
802 jz paranoid_swapgs\trace
803 movq %rsp,%rdi /* &pt_regs */
804 call sync_regs
805 movq %rax,%rsp /* switch stack for scheduling */
806 testl $_TIF_NEED_RESCHED,%ebx
807 jnz paranoid_schedule\trace
808 movl %ebx,%edx /* arg3: thread flags */
809 .if \trace
810 TRACE_IRQS_ON
811 .endif
812 sti
813 xorl %esi,%esi /* arg2: oldset */
814 movq %rsp,%rdi /* arg1: &pt_regs */
815 call do_notify_resume
816 cli
817 .if \trace
818 TRACE_IRQS_OFF
819 .endif
820 jmp paranoid_userspace\trace
821paranoid_schedule\trace:
822 .if \trace
823 TRACE_IRQS_ON
824 .endif
825 sti
826 call schedule
827 cli
828 .if \trace
829 TRACE_IRQS_OFF
830 .endif
831 jmp paranoid_userspace\trace
832 CFI_ENDPROC
833 .endm
834
1da177e4
LT
835/*
836 * Exception entry point. This expects an error code/orig_rax on the stack
837 * and the exception handler in %rax.
838 */
d28c4393 839KPROBE_ENTRY(error_entry)
7effaa88 840 _frame RDI
1da177e4
LT
841 /* rdi slot contains rax, oldrax contains error code */
842 cld
843 subq $14*8,%rsp
844 CFI_ADJUST_CFA_OFFSET (14*8)
845 movq %rsi,13*8(%rsp)
846 CFI_REL_OFFSET rsi,RSI
847 movq 14*8(%rsp),%rsi /* load rax from rdi slot */
848 movq %rdx,12*8(%rsp)
849 CFI_REL_OFFSET rdx,RDX
850 movq %rcx,11*8(%rsp)
851 CFI_REL_OFFSET rcx,RCX
852 movq %rsi,10*8(%rsp) /* store rax */
853 CFI_REL_OFFSET rax,RAX
854 movq %r8, 9*8(%rsp)
855 CFI_REL_OFFSET r8,R8
856 movq %r9, 8*8(%rsp)
857 CFI_REL_OFFSET r9,R9
858 movq %r10,7*8(%rsp)
859 CFI_REL_OFFSET r10,R10
860 movq %r11,6*8(%rsp)
861 CFI_REL_OFFSET r11,R11
862 movq %rbx,5*8(%rsp)
863 CFI_REL_OFFSET rbx,RBX
864 movq %rbp,4*8(%rsp)
865 CFI_REL_OFFSET rbp,RBP
866 movq %r12,3*8(%rsp)
867 CFI_REL_OFFSET r12,R12
868 movq %r13,2*8(%rsp)
869 CFI_REL_OFFSET r13,R13
870 movq %r14,1*8(%rsp)
871 CFI_REL_OFFSET r14,R14
872 movq %r15,(%rsp)
873 CFI_REL_OFFSET r15,R15
874 xorl %ebx,%ebx
875 testl $3,CS(%rsp)
876 je error_kernelspace
877error_swapgs:
878 swapgs
879error_sti:
880 movq %rdi,RDI(%rsp)
881 movq %rsp,%rdi
882 movq ORIG_RAX(%rsp),%rsi /* get error code */
883 movq $-1,ORIG_RAX(%rsp)
884 call *%rax
885 /* ebx: no swapgs flag (1: don't need swapgs, 0: need it) */
886error_exit:
887 movl %ebx,%eax
888 RESTORE_REST
889 cli
2601e64d 890 TRACE_IRQS_OFF
1da177e4
LT
891 GET_THREAD_INFO(%rcx)
892 testl %eax,%eax
893 jne retint_kernel
894 movl threadinfo_flags(%rcx),%edx
895 movl $_TIF_WORK_MASK,%edi
896 andl %edi,%edx
897 jnz retint_careful
2601e64d
IM
898 /*
899 * The iret might restore flags:
900 */
901 TRACE_IRQS_IRETQ
1da177e4
LT
902 swapgs
903 RESTORE_ARGS 0,8,0
505cc4e1 904 jmp iret_label
1da177e4
LT
905 CFI_ENDPROC
906
907error_kernelspace:
908 incl %ebx
909 /* There are two places in the kernel that can potentially fault with
910 usergs. Handle them here. The exception handlers after
911 iret run with kernel gs again, so don't set the user space flag.
912 B stepping K8s sometimes report an truncated RIP for IRET
913 exceptions returning to compat mode. Check for these here too. */
914 leaq iret_label(%rip),%rbp
915 cmpq %rbp,RIP(%rsp)
916 je error_swapgs
917 movl %ebp,%ebp /* zero extend */
918 cmpq %rbp,RIP(%rsp)
919 je error_swapgs
920 cmpq $gs_change,RIP(%rsp)
921 je error_swapgs
922 jmp error_sti
d28c4393 923KPROBE_END(error_entry)
1da177e4
LT
924
925 /* Reload gs selector with exception handling */
926 /* edi: new selector */
927ENTRY(load_gs_index)
7effaa88 928 CFI_STARTPROC
1da177e4 929 pushf
7effaa88 930 CFI_ADJUST_CFA_OFFSET 8
1da177e4
LT
931 cli
932 swapgs
933gs_change:
934 movl %edi,%gs
9352: mfence /* workaround */
936 swapgs
937 popf
7effaa88 938 CFI_ADJUST_CFA_OFFSET -8
1da177e4 939 ret
7effaa88 940 CFI_ENDPROC
4b787e0b 941ENDPROC(load_gs_index)
1da177e4
LT
942
943 .section __ex_table,"a"
944 .align 8
945 .quad gs_change,bad_gs
946 .previous
947 .section .fixup,"ax"
948 /* running with kernelgs */
949bad_gs:
950 swapgs /* switch back to user gs */
951 xorl %eax,%eax
952 movl %eax,%gs
953 jmp 2b
954 .previous
955
956/*
957 * Create a kernel thread.
958 *
959 * C extern interface:
960 * extern long kernel_thread(int (*fn)(void *), void * arg, unsigned long flags)
961 *
962 * asm input arguments:
963 * rdi: fn, rsi: arg, rdx: flags
964 */
965ENTRY(kernel_thread)
966 CFI_STARTPROC
967 FAKE_STACK_FRAME $child_rip
968 SAVE_ALL
969
970 # rdi: flags, rsi: usp, rdx: will be &pt_regs
971 movq %rdx,%rdi
972 orq kernel_thread_flags(%rip),%rdi
973 movq $-1, %rsi
974 movq %rsp, %rdx
975
976 xorl %r8d,%r8d
977 xorl %r9d,%r9d
978
979 # clone now
980 call do_fork
981 movq %rax,RAX(%rsp)
982 xorl %edi,%edi
983
984 /*
985 * It isn't worth to check for reschedule here,
986 * so internally to the x86_64 port you can rely on kernel_thread()
987 * not to reschedule the child before returning, this avoids the need
988 * of hacks for example to fork off the per-CPU idle tasks.
989 * [Hopefully no generic code relies on the reschedule -AK]
990 */
991 RESTORE_ALL
992 UNFAKE_STACK_FRAME
993 ret
994 CFI_ENDPROC
4b787e0b 995ENDPROC(kernel_thread)
1da177e4
LT
996
997child_rip:
c05991ed
AK
998 pushq $0 # fake return address
999 CFI_STARTPROC
1da177e4
LT
1000 /*
1001 * Here we are in the child and the registers are set as they were
1002 * at kernel_thread() invocation in the parent.
1003 */
1004 movq %rdi, %rax
1005 movq %rsi, %rdi
1006 call *%rax
1007 # exit
3829ee6b 1008 xorl %edi, %edi
1da177e4 1009 call do_exit
c05991ed 1010 CFI_ENDPROC
4b787e0b 1011ENDPROC(child_rip)
1da177e4
LT
1012
1013/*
1014 * execve(). This function needs to use IRET, not SYSRET, to set up all state properly.
1015 *
1016 * C extern interface:
1017 * extern long execve(char *name, char **argv, char **envp)
1018 *
1019 * asm input arguments:
1020 * rdi: name, rsi: argv, rdx: envp
1021 *
1022 * We want to fallback into:
1023 * extern long sys_execve(char *name, char **argv,char **envp, struct pt_regs regs)
1024 *
1025 * do_sys_execve asm fallback arguments:
1026 * rdi: name, rsi: argv, rdx: envp, fake frame on the stack
1027 */
3db03b4a 1028ENTRY(kernel_execve)
1da177e4
LT
1029 CFI_STARTPROC
1030 FAKE_STACK_FRAME $0
1031 SAVE_ALL
1032 call sys_execve
1033 movq %rax, RAX(%rsp)
1034 RESTORE_REST
1035 testq %rax,%rax
1036 je int_ret_from_sys_call
1037 RESTORE_ARGS
1038 UNFAKE_STACK_FRAME
1039 ret
1040 CFI_ENDPROC
3db03b4a 1041ENDPROC(kernel_execve)
1da177e4 1042
0f2fbdcb 1043KPROBE_ENTRY(page_fault)
1da177e4 1044 errorentry do_page_fault
d28c4393 1045KPROBE_END(page_fault)
1da177e4
LT
1046
1047ENTRY(coprocessor_error)
1048 zeroentry do_coprocessor_error
4b787e0b 1049END(coprocessor_error)
1da177e4
LT
1050
1051ENTRY(simd_coprocessor_error)
1052 zeroentry do_simd_coprocessor_error
4b787e0b 1053END(simd_coprocessor_error)
1da177e4
LT
1054
1055ENTRY(device_not_available)
1056 zeroentry math_state_restore
4b787e0b 1057END(device_not_available)
1da177e4
LT
1058
1059 /* runs on exception stack */
0f2fbdcb 1060KPROBE_ENTRY(debug)
7effaa88 1061 INTR_FRAME
1da177e4
LT
1062 pushq $0
1063 CFI_ADJUST_CFA_OFFSET 8
5f8efbb9 1064 paranoidentry do_debug, DEBUG_STACK
2601e64d 1065 paranoidexit
d28c4393 1066KPROBE_END(debug)
1da177e4
LT
1067
1068 /* runs on exception stack */
eddb6fb9 1069KPROBE_ENTRY(nmi)
7effaa88 1070 INTR_FRAME
1da177e4 1071 pushq $-1
7effaa88 1072 CFI_ADJUST_CFA_OFFSET 8
2601e64d
IM
1073 paranoidentry do_nmi, 0, 0
1074#ifdef CONFIG_TRACE_IRQFLAGS
1075 paranoidexit 0
1076#else
1077 jmp paranoid_exit1
1078 CFI_ENDPROC
1079#endif
d28c4393 1080KPROBE_END(nmi)
6fefb0d1 1081
0f2fbdcb 1082KPROBE_ENTRY(int3)
b556b35e
JB
1083 INTR_FRAME
1084 pushq $0
1085 CFI_ADJUST_CFA_OFFSET 8
5f8efbb9 1086 paranoidentry do_int3, DEBUG_STACK
2601e64d 1087 jmp paranoid_exit1
b556b35e 1088 CFI_ENDPROC
d28c4393 1089KPROBE_END(int3)
1da177e4
LT
1090
1091ENTRY(overflow)
1092 zeroentry do_overflow
4b787e0b 1093END(overflow)
1da177e4
LT
1094
1095ENTRY(bounds)
1096 zeroentry do_bounds
4b787e0b 1097END(bounds)
1da177e4
LT
1098
1099ENTRY(invalid_op)
1100 zeroentry do_invalid_op
4b787e0b 1101END(invalid_op)
1da177e4
LT
1102
1103ENTRY(coprocessor_segment_overrun)
1104 zeroentry do_coprocessor_segment_overrun
4b787e0b 1105END(coprocessor_segment_overrun)
1da177e4
LT
1106
1107ENTRY(reserved)
1108 zeroentry do_reserved
4b787e0b 1109END(reserved)
1da177e4
LT
1110
1111 /* runs on exception stack */
1112ENTRY(double_fault)
7effaa88 1113 XCPT_FRAME
1da177e4 1114 paranoidentry do_double_fault
2601e64d 1115 jmp paranoid_exit1
1da177e4 1116 CFI_ENDPROC
4b787e0b 1117END(double_fault)
1da177e4
LT
1118
1119ENTRY(invalid_TSS)
1120 errorentry do_invalid_TSS
4b787e0b 1121END(invalid_TSS)
1da177e4
LT
1122
1123ENTRY(segment_not_present)
1124 errorentry do_segment_not_present
4b787e0b 1125END(segment_not_present)
1da177e4
LT
1126
1127 /* runs on exception stack */
1128ENTRY(stack_segment)
7effaa88 1129 XCPT_FRAME
1da177e4 1130 paranoidentry do_stack_segment
2601e64d 1131 jmp paranoid_exit1
1da177e4 1132 CFI_ENDPROC
4b787e0b 1133END(stack_segment)
1da177e4 1134
0f2fbdcb 1135KPROBE_ENTRY(general_protection)
1da177e4 1136 errorentry do_general_protection
d28c4393 1137KPROBE_END(general_protection)
1da177e4
LT
1138
1139ENTRY(alignment_check)
1140 errorentry do_alignment_check
4b787e0b 1141END(alignment_check)
1da177e4
LT
1142
1143ENTRY(divide_error)
1144 zeroentry do_divide_error
4b787e0b 1145END(divide_error)
1da177e4
LT
1146
1147ENTRY(spurious_interrupt_bug)
1148 zeroentry do_spurious_interrupt_bug
4b787e0b 1149END(spurious_interrupt_bug)
1da177e4
LT
1150
1151#ifdef CONFIG_X86_MCE
1152 /* runs on exception stack */
1153ENTRY(machine_check)
7effaa88 1154 INTR_FRAME
1da177e4
LT
1155 pushq $0
1156 CFI_ADJUST_CFA_OFFSET 8
1157 paranoidentry do_machine_check
2601e64d 1158 jmp paranoid_exit1
1da177e4 1159 CFI_ENDPROC
4b787e0b 1160END(machine_check)
1da177e4
LT
1161#endif
1162
2699500b 1163/* Call softirq on interrupt stack. Interrupts are off. */
ed6b676c 1164ENTRY(call_softirq)
7effaa88 1165 CFI_STARTPROC
2699500b
AK
1166 push %rbp
1167 CFI_ADJUST_CFA_OFFSET 8
1168 CFI_REL_OFFSET rbp,0
1169 mov %rsp,%rbp
1170 CFI_DEF_CFA_REGISTER rbp
ed6b676c 1171 incl %gs:pda_irqcount
2699500b
AK
1172 cmove %gs:pda_irqstackptr,%rsp
1173 push %rbp # backlink for old unwinder
ed6b676c 1174 call __do_softirq
2699500b 1175 leaveq
7effaa88 1176 CFI_DEF_CFA_REGISTER rsp
2699500b 1177 CFI_ADJUST_CFA_OFFSET -8
ed6b676c 1178 decl %gs:pda_irqcount
ed6b676c 1179 ret
7effaa88 1180 CFI_ENDPROC
4b787e0b 1181ENDPROC(call_softirq)
b538ed27
JB
1182
1183#ifdef CONFIG_STACK_UNWIND
1184ENTRY(arch_unwind_init_running)
1185 CFI_STARTPROC
1186 movq %r15, R15(%rdi)
1187 movq %r14, R14(%rdi)
1188 xchgq %rsi, %rdx
1189 movq %r13, R13(%rdi)
1190 movq %r12, R12(%rdi)
1191 xorl %eax, %eax
1192 movq %rbp, RBP(%rdi)
1193 movq %rbx, RBX(%rdi)
1194 movq (%rsp), %rcx
1195 movq %rax, R11(%rdi)
1196 movq %rax, R10(%rdi)
1197 movq %rax, R9(%rdi)
1198 movq %rax, R8(%rdi)
1199 movq %rax, RAX(%rdi)
1200 movq %rax, RCX(%rdi)
1201 movq %rax, RDX(%rdi)
1202 movq %rax, RSI(%rdi)
1203 movq %rax, RDI(%rdi)
1204 movq %rax, ORIG_RAX(%rdi)
1205 movq %rcx, RIP(%rdi)
1206 leaq 8(%rsp), %rcx
1207 movq $__KERNEL_CS, CS(%rdi)
1208 movq %rax, EFLAGS(%rdi)
1209 movq %rcx, RSP(%rdi)
1210 movq $__KERNEL_DS, SS(%rdi)
1211 jmpq *%rdx
1212 CFI_ENDPROC
1213ENDPROC(arch_unwind_init_running)
1214#endif