]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blame - arch/x86_64/kernel/entry.S
[PATCH] introduce kernel_execve
[mirror_ubuntu-bionic-kernel.git] / arch / x86_64 / kernel / entry.S
CommitLineData
1da177e4
LT
1/*
2 * linux/arch/x86_64/entry.S
3 *
4 * Copyright (C) 1991, 1992 Linus Torvalds
5 * Copyright (C) 2000, 2001, 2002 Andi Kleen SuSE Labs
6 * Copyright (C) 2000 Pavel Machek <pavel@suse.cz>
1da177e4
LT
7 */
8
9/*
10 * entry.S contains the system-call and fault low-level handling routines.
11 *
12 * NOTE: This code handles signal-recognition, which happens every time
13 * after an interrupt and after each system call.
14 *
15 * Normal syscalls and interrupts don't save a full stack frame, this is
16 * only done for syscall tracing, signals or fork/exec et.al.
17 *
18 * A note on terminology:
19 * - top of stack: Architecture defined interrupt frame from SS to RIP
20 * at the top of the kernel process stack.
21 * - partial stack frame: partially saved registers upto R11.
22 * - full stack frame: Like partial stack frame, but all register saved.
2e91a17b
AK
23 *
24 * Some macro usage:
25 * - CFI macros are used to generate dwarf2 unwind information for better
26 * backtraces. They don't change any code.
27 * - SAVE_ALL/RESTORE_ALL - Save/restore all registers
28 * - SAVE_ARGS/RESTORE_ARGS - Save/restore registers that C functions modify.
29 * There are unfortunately lots of special cases where some registers
30 * not touched. The macro is a big mess that should be cleaned up.
31 * - SAVE_REST/RESTORE_REST - Handle the registers not saved by SAVE_ARGS.
32 * Gives a full stack frame.
33 * - ENTRY/END Define functions in the symbol table.
34 * - FIXUP_TOP_OF_STACK/RESTORE_TOP_OF_STACK - Fix up the hardware stack
35 * frame that is otherwise undefined after a SYSCALL
36 * - TRACE_IRQ_* - Trace hard interrupt state for lock debugging.
37 * - errorentry/paranoidentry/zeroentry - Define exception entry points.
1da177e4
LT
38 */
39
1da177e4
LT
40#include <linux/linkage.h>
41#include <asm/segment.h>
1da177e4
LT
42#include <asm/cache.h>
43#include <asm/errno.h>
44#include <asm/dwarf2.h>
45#include <asm/calling.h>
e2d5df93 46#include <asm/asm-offsets.h>
1da177e4
LT
47#include <asm/msr.h>
48#include <asm/unistd.h>
49#include <asm/thread_info.h>
50#include <asm/hw_irq.h>
5f8efbb9 51#include <asm/page.h>
2601e64d 52#include <asm/irqflags.h>
1da177e4
LT
53
54 .code64
55
dc37db4d 56#ifndef CONFIG_PREEMPT
1da177e4
LT
57#define retint_kernel retint_restore_args
58#endif
2601e64d
IM
59
60
61.macro TRACE_IRQS_IRETQ offset=ARGOFFSET
62#ifdef CONFIG_TRACE_IRQFLAGS
63 bt $9,EFLAGS-\offset(%rsp) /* interrupts off? */
64 jnc 1f
65 TRACE_IRQS_ON
661:
67#endif
68.endm
69
1da177e4
LT
70/*
71 * C code is not supposed to know about undefined top of stack. Every time
72 * a C function with an pt_regs argument is called from the SYSCALL based
73 * fast path FIXUP_TOP_OF_STACK is needed.
74 * RESTORE_TOP_OF_STACK syncs the syscall state after any possible ptregs
75 * manipulation.
76 */
77
78 /* %rsp:at FRAMEEND */
79 .macro FIXUP_TOP_OF_STACK tmp
80 movq %gs:pda_oldrsp,\tmp
81 movq \tmp,RSP(%rsp)
82 movq $__USER_DS,SS(%rsp)
83 movq $__USER_CS,CS(%rsp)
84 movq $-1,RCX(%rsp)
85 movq R11(%rsp),\tmp /* get eflags */
86 movq \tmp,EFLAGS(%rsp)
87 .endm
88
89 .macro RESTORE_TOP_OF_STACK tmp,offset=0
90 movq RSP-\offset(%rsp),\tmp
91 movq \tmp,%gs:pda_oldrsp
92 movq EFLAGS-\offset(%rsp),\tmp
93 movq \tmp,R11-\offset(%rsp)
94 .endm
95
96 .macro FAKE_STACK_FRAME child_rip
97 /* push in order ss, rsp, eflags, cs, rip */
3829ee6b 98 xorl %eax, %eax
1da177e4
LT
99 pushq %rax /* ss */
100 CFI_ADJUST_CFA_OFFSET 8
7effaa88 101 /*CFI_REL_OFFSET ss,0*/
1da177e4
LT
102 pushq %rax /* rsp */
103 CFI_ADJUST_CFA_OFFSET 8
7effaa88 104 CFI_REL_OFFSET rsp,0
1da177e4
LT
105 pushq $(1<<9) /* eflags - interrupts on */
106 CFI_ADJUST_CFA_OFFSET 8
7effaa88 107 /*CFI_REL_OFFSET rflags,0*/
1da177e4
LT
108 pushq $__KERNEL_CS /* cs */
109 CFI_ADJUST_CFA_OFFSET 8
7effaa88 110 /*CFI_REL_OFFSET cs,0*/
1da177e4
LT
111 pushq \child_rip /* rip */
112 CFI_ADJUST_CFA_OFFSET 8
7effaa88 113 CFI_REL_OFFSET rip,0
1da177e4
LT
114 pushq %rax /* orig rax */
115 CFI_ADJUST_CFA_OFFSET 8
116 .endm
117
118 .macro UNFAKE_STACK_FRAME
119 addq $8*6, %rsp
120 CFI_ADJUST_CFA_OFFSET -(6*8)
121 .endm
122
7effaa88
JB
123 .macro CFI_DEFAULT_STACK start=1
124 .if \start
125 CFI_STARTPROC simple
adf14236 126 CFI_SIGNAL_FRAME
7effaa88
JB
127 CFI_DEF_CFA rsp,SS+8
128 .else
129 CFI_DEF_CFA_OFFSET SS+8
130 .endif
131 CFI_REL_OFFSET r15,R15
132 CFI_REL_OFFSET r14,R14
133 CFI_REL_OFFSET r13,R13
134 CFI_REL_OFFSET r12,R12
135 CFI_REL_OFFSET rbp,RBP
136 CFI_REL_OFFSET rbx,RBX
137 CFI_REL_OFFSET r11,R11
138 CFI_REL_OFFSET r10,R10
139 CFI_REL_OFFSET r9,R9
140 CFI_REL_OFFSET r8,R8
141 CFI_REL_OFFSET rax,RAX
142 CFI_REL_OFFSET rcx,RCX
143 CFI_REL_OFFSET rdx,RDX
144 CFI_REL_OFFSET rsi,RSI
145 CFI_REL_OFFSET rdi,RDI
146 CFI_REL_OFFSET rip,RIP
147 /*CFI_REL_OFFSET cs,CS*/
148 /*CFI_REL_OFFSET rflags,EFLAGS*/
149 CFI_REL_OFFSET rsp,RSP
150 /*CFI_REL_OFFSET ss,SS*/
1da177e4
LT
151 .endm
152/*
153 * A newly forked process directly context switches into this.
154 */
155/* rdi: prev */
156ENTRY(ret_from_fork)
1da177e4 157 CFI_DEFAULT_STACK
658fdbef
AK
158 push kernel_eflags(%rip)
159 CFI_ADJUST_CFA_OFFSET 4
160 popf # reset kernel eflags
161 CFI_ADJUST_CFA_OFFSET -4
1da177e4
LT
162 call schedule_tail
163 GET_THREAD_INFO(%rcx)
164 testl $(_TIF_SYSCALL_TRACE|_TIF_SYSCALL_AUDIT),threadinfo_flags(%rcx)
165 jnz rff_trace
166rff_action:
167 RESTORE_REST
168 testl $3,CS-ARGOFFSET(%rsp) # from kernel_thread?
169 je int_ret_from_sys_call
170 testl $_TIF_IA32,threadinfo_flags(%rcx)
171 jnz int_ret_from_sys_call
172 RESTORE_TOP_OF_STACK %rdi,ARGOFFSET
173 jmp ret_from_sys_call
174rff_trace:
175 movq %rsp,%rdi
176 call syscall_trace_leave
177 GET_THREAD_INFO(%rcx)
178 jmp rff_action
179 CFI_ENDPROC
4b787e0b 180END(ret_from_fork)
1da177e4
LT
181
182/*
183 * System call entry. Upto 6 arguments in registers are supported.
184 *
185 * SYSCALL does not save anything on the stack and does not change the
186 * stack pointer.
187 */
188
189/*
190 * Register setup:
191 * rax system call number
192 * rdi arg0
193 * rcx return address for syscall/sysret, C arg3
194 * rsi arg1
195 * rdx arg2
196 * r10 arg3 (--> moved to rcx for C)
197 * r8 arg4
198 * r9 arg5
199 * r11 eflags for syscall/sysret, temporary for C
200 * r12-r15,rbp,rbx saved by C code, not touched.
201 *
202 * Interrupts are off on entry.
203 * Only called from user space.
204 *
205 * XXX if we had a free scratch register we could save the RSP into the stack frame
206 * and report it properly in ps. Unfortunately we haven't.
7bf36bbc
AK
207 *
208 * When user can change the frames always force IRET. That is because
209 * it deals with uncanonical addresses better. SYSRET has trouble
210 * with them due to bugs in both AMD and Intel CPUs.
1da177e4
LT
211 */
212
213ENTRY(system_call)
7effaa88 214 CFI_STARTPROC simple
adf14236 215 CFI_SIGNAL_FRAME
dffead4e 216 CFI_DEF_CFA rsp,PDA_STACKOFFSET
7effaa88
JB
217 CFI_REGISTER rip,rcx
218 /*CFI_REGISTER rflags,r11*/
1da177e4
LT
219 swapgs
220 movq %rsp,%gs:pda_oldrsp
221 movq %gs:pda_kernelstack,%rsp
2601e64d
IM
222 /*
223 * No need to follow this irqs off/on section - it's straight
224 * and short:
225 */
1da177e4
LT
226 sti
227 SAVE_ARGS 8,1
228 movq %rax,ORIG_RAX-ARGOFFSET(%rsp)
7effaa88
JB
229 movq %rcx,RIP-ARGOFFSET(%rsp)
230 CFI_REL_OFFSET rip,RIP-ARGOFFSET
1da177e4
LT
231 GET_THREAD_INFO(%rcx)
232 testl $(_TIF_SYSCALL_TRACE|_TIF_SYSCALL_AUDIT|_TIF_SECCOMP),threadinfo_flags(%rcx)
7effaa88 233 CFI_REMEMBER_STATE
1da177e4
LT
234 jnz tracesys
235 cmpq $__NR_syscall_max,%rax
236 ja badsys
237 movq %r10,%rcx
238 call *sys_call_table(,%rax,8) # XXX: rip relative
239 movq %rax,RAX-ARGOFFSET(%rsp)
240/*
241 * Syscall return path ending with SYSRET (fast path)
242 * Has incomplete stack frame and undefined top of stack.
243 */
244 .globl ret_from_sys_call
245ret_from_sys_call:
11b854b2 246 movl $_TIF_ALLWORK_MASK,%edi
1da177e4
LT
247 /* edi: flagmask */
248sysret_check:
249 GET_THREAD_INFO(%rcx)
250 cli
2601e64d 251 TRACE_IRQS_OFF
1da177e4
LT
252 movl threadinfo_flags(%rcx),%edx
253 andl %edi,%edx
7effaa88 254 CFI_REMEMBER_STATE
1da177e4 255 jnz sysret_careful
2601e64d
IM
256 /*
257 * sysretq will re-enable interrupts:
258 */
259 TRACE_IRQS_ON
1da177e4 260 movq RIP-ARGOFFSET(%rsp),%rcx
7effaa88 261 CFI_REGISTER rip,rcx
1da177e4 262 RESTORE_ARGS 0,-ARG_SKIP,1
7effaa88 263 /*CFI_REGISTER rflags,r11*/
1da177e4
LT
264 movq %gs:pda_oldrsp,%rsp
265 swapgs
266 sysretq
267
268 /* Handle reschedules */
269 /* edx: work, edi: workmask */
270sysret_careful:
7effaa88 271 CFI_RESTORE_STATE
1da177e4
LT
272 bt $TIF_NEED_RESCHED,%edx
273 jnc sysret_signal
2601e64d 274 TRACE_IRQS_ON
1da177e4
LT
275 sti
276 pushq %rdi
7effaa88 277 CFI_ADJUST_CFA_OFFSET 8
1da177e4
LT
278 call schedule
279 popq %rdi
7effaa88 280 CFI_ADJUST_CFA_OFFSET -8
1da177e4
LT
281 jmp sysret_check
282
283 /* Handle a signal */
284sysret_signal:
2601e64d 285 TRACE_IRQS_ON
1da177e4 286 sti
10ffdbb8
AK
287 testl $(_TIF_SIGPENDING|_TIF_NOTIFY_RESUME|_TIF_SINGLESTEP),%edx
288 jz 1f
289
290 /* Really a signal */
291 /* edx: work flags (arg3) */
1da177e4
LT
292 leaq do_notify_resume(%rip),%rax
293 leaq -ARGOFFSET(%rsp),%rdi # &pt_regs -> arg1
294 xorl %esi,%esi # oldset -> arg2
295 call ptregscall_common
10ffdbb8 2961: movl $_TIF_NEED_RESCHED,%edi
7bf36bbc
AK
297 /* Use IRET because user could have changed frame. This
298 works because ptregscall_common has called FIXUP_TOP_OF_STACK. */
299 cli
2601e64d 300 TRACE_IRQS_OFF
7bf36bbc 301 jmp int_with_check
1da177e4 302
7effaa88
JB
303badsys:
304 movq $-ENOSYS,RAX-ARGOFFSET(%rsp)
305 jmp ret_from_sys_call
306
1da177e4
LT
307 /* Do syscall tracing */
308tracesys:
7effaa88 309 CFI_RESTORE_STATE
1da177e4
LT
310 SAVE_REST
311 movq $-ENOSYS,RAX(%rsp)
312 FIXUP_TOP_OF_STACK %rdi
313 movq %rsp,%rdi
314 call syscall_trace_enter
315 LOAD_ARGS ARGOFFSET /* reload args from stack in case ptrace changed it */
316 RESTORE_REST
317 cmpq $__NR_syscall_max,%rax
318 ja 1f
319 movq %r10,%rcx /* fixup for C */
320 call *sys_call_table(,%rax,8)
822ff019 3211: movq %rax,RAX-ARGOFFSET(%rsp)
7bf36bbc
AK
322 /* Use IRET because user could have changed frame */
323 jmp int_ret_from_sys_call
7effaa88 324 CFI_ENDPROC
4b787e0b 325END(system_call)
1da177e4 326
1da177e4
LT
327/*
328 * Syscall return path ending with IRET.
329 * Has correct top of stack, but partial stack frame.
330 */
7effaa88
JB
331ENTRY(int_ret_from_sys_call)
332 CFI_STARTPROC simple
adf14236 333 CFI_SIGNAL_FRAME
7effaa88
JB
334 CFI_DEF_CFA rsp,SS+8-ARGOFFSET
335 /*CFI_REL_OFFSET ss,SS-ARGOFFSET*/
336 CFI_REL_OFFSET rsp,RSP-ARGOFFSET
337 /*CFI_REL_OFFSET rflags,EFLAGS-ARGOFFSET*/
338 /*CFI_REL_OFFSET cs,CS-ARGOFFSET*/
339 CFI_REL_OFFSET rip,RIP-ARGOFFSET
340 CFI_REL_OFFSET rdx,RDX-ARGOFFSET
341 CFI_REL_OFFSET rcx,RCX-ARGOFFSET
342 CFI_REL_OFFSET rax,RAX-ARGOFFSET
343 CFI_REL_OFFSET rdi,RDI-ARGOFFSET
344 CFI_REL_OFFSET rsi,RSI-ARGOFFSET
345 CFI_REL_OFFSET r8,R8-ARGOFFSET
346 CFI_REL_OFFSET r9,R9-ARGOFFSET
347 CFI_REL_OFFSET r10,R10-ARGOFFSET
348 CFI_REL_OFFSET r11,R11-ARGOFFSET
1da177e4 349 cli
2601e64d 350 TRACE_IRQS_OFF
1da177e4
LT
351 testl $3,CS-ARGOFFSET(%rsp)
352 je retint_restore_args
353 movl $_TIF_ALLWORK_MASK,%edi
354 /* edi: mask to check */
355int_with_check:
356 GET_THREAD_INFO(%rcx)
357 movl threadinfo_flags(%rcx),%edx
358 andl %edi,%edx
359 jnz int_careful
bf2fcc6f 360 andl $~TS_COMPAT,threadinfo_status(%rcx)
1da177e4
LT
361 jmp retint_swapgs
362
363 /* Either reschedule or signal or syscall exit tracking needed. */
364 /* First do a reschedule test. */
365 /* edx: work, edi: workmask */
366int_careful:
367 bt $TIF_NEED_RESCHED,%edx
368 jnc int_very_careful
2601e64d 369 TRACE_IRQS_ON
1da177e4
LT
370 sti
371 pushq %rdi
7effaa88 372 CFI_ADJUST_CFA_OFFSET 8
1da177e4
LT
373 call schedule
374 popq %rdi
7effaa88 375 CFI_ADJUST_CFA_OFFSET -8
cdd219cd 376 cli
2601e64d 377 TRACE_IRQS_OFF
1da177e4
LT
378 jmp int_with_check
379
380 /* handle signals and tracing -- both require a full stack frame */
381int_very_careful:
2601e64d 382 TRACE_IRQS_ON
1da177e4
LT
383 sti
384 SAVE_REST
385 /* Check for syscall exit trace */
386 testl $(_TIF_SYSCALL_TRACE|_TIF_SYSCALL_AUDIT|_TIF_SINGLESTEP),%edx
387 jz int_signal
388 pushq %rdi
7effaa88 389 CFI_ADJUST_CFA_OFFSET 8
1da177e4
LT
390 leaq 8(%rsp),%rdi # &ptregs -> arg1
391 call syscall_trace_leave
392 popq %rdi
7effaa88 393 CFI_ADJUST_CFA_OFFSET -8
36c1104e 394 andl $~(_TIF_SYSCALL_TRACE|_TIF_SYSCALL_AUDIT|_TIF_SINGLESTEP),%edi
be9e6870 395 cli
2601e64d 396 TRACE_IRQS_OFF
1da177e4
LT
397 jmp int_restore_rest
398
399int_signal:
400 testl $(_TIF_NOTIFY_RESUME|_TIF_SIGPENDING|_TIF_SINGLESTEP),%edx
401 jz 1f
402 movq %rsp,%rdi # &ptregs -> arg1
403 xorl %esi,%esi # oldset -> arg2
404 call do_notify_resume
4051: movl $_TIF_NEED_RESCHED,%edi
406int_restore_rest:
407 RESTORE_REST
be9e6870 408 cli
2601e64d 409 TRACE_IRQS_OFF
1da177e4
LT
410 jmp int_with_check
411 CFI_ENDPROC
4b787e0b 412END(int_ret_from_sys_call)
1da177e4
LT
413
414/*
415 * Certain special system calls that need to save a complete full stack frame.
416 */
417
418 .macro PTREGSCALL label,func,arg
419 .globl \label
420\label:
421 leaq \func(%rip),%rax
422 leaq -ARGOFFSET+8(%rsp),\arg /* 8 for return address */
423 jmp ptregscall_common
4b787e0b 424END(\label)
1da177e4
LT
425 .endm
426
7effaa88
JB
427 CFI_STARTPROC
428
1da177e4
LT
429 PTREGSCALL stub_clone, sys_clone, %r8
430 PTREGSCALL stub_fork, sys_fork, %rdi
431 PTREGSCALL stub_vfork, sys_vfork, %rdi
432 PTREGSCALL stub_rt_sigsuspend, sys_rt_sigsuspend, %rdx
433 PTREGSCALL stub_sigaltstack, sys_sigaltstack, %rdx
434 PTREGSCALL stub_iopl, sys_iopl, %rsi
435
436ENTRY(ptregscall_common)
1da177e4 437 popq %r11
7effaa88
JB
438 CFI_ADJUST_CFA_OFFSET -8
439 CFI_REGISTER rip, r11
1da177e4
LT
440 SAVE_REST
441 movq %r11, %r15
7effaa88 442 CFI_REGISTER rip, r15
1da177e4
LT
443 FIXUP_TOP_OF_STACK %r11
444 call *%rax
445 RESTORE_TOP_OF_STACK %r11
446 movq %r15, %r11
7effaa88 447 CFI_REGISTER rip, r11
1da177e4
LT
448 RESTORE_REST
449 pushq %r11
7effaa88
JB
450 CFI_ADJUST_CFA_OFFSET 8
451 CFI_REL_OFFSET rip, 0
1da177e4
LT
452 ret
453 CFI_ENDPROC
4b787e0b 454END(ptregscall_common)
1da177e4
LT
455
456ENTRY(stub_execve)
457 CFI_STARTPROC
458 popq %r11
7effaa88
JB
459 CFI_ADJUST_CFA_OFFSET -8
460 CFI_REGISTER rip, r11
1da177e4 461 SAVE_REST
1da177e4
LT
462 FIXUP_TOP_OF_STACK %r11
463 call sys_execve
1da177e4 464 RESTORE_TOP_OF_STACK %r11
1da177e4
LT
465 movq %rax,RAX(%rsp)
466 RESTORE_REST
467 jmp int_ret_from_sys_call
468 CFI_ENDPROC
4b787e0b 469END(stub_execve)
1da177e4
LT
470
471/*
472 * sigreturn is special because it needs to restore all registers on return.
473 * This cannot be done with SYSRET, so use the IRET return path instead.
474 */
475ENTRY(stub_rt_sigreturn)
476 CFI_STARTPROC
7effaa88
JB
477 addq $8, %rsp
478 CFI_ADJUST_CFA_OFFSET -8
1da177e4
LT
479 SAVE_REST
480 movq %rsp,%rdi
481 FIXUP_TOP_OF_STACK %r11
482 call sys_rt_sigreturn
483 movq %rax,RAX(%rsp) # fixme, this could be done at the higher layer
484 RESTORE_REST
485 jmp int_ret_from_sys_call
486 CFI_ENDPROC
4b787e0b 487END(stub_rt_sigreturn)
1da177e4 488
7effaa88
JB
489/*
490 * initial frame state for interrupts and exceptions
491 */
492 .macro _frame ref
493 CFI_STARTPROC simple
adf14236 494 CFI_SIGNAL_FRAME
7effaa88
JB
495 CFI_DEF_CFA rsp,SS+8-\ref
496 /*CFI_REL_OFFSET ss,SS-\ref*/
497 CFI_REL_OFFSET rsp,RSP-\ref
498 /*CFI_REL_OFFSET rflags,EFLAGS-\ref*/
499 /*CFI_REL_OFFSET cs,CS-\ref*/
500 CFI_REL_OFFSET rip,RIP-\ref
501 .endm
502
503/* initial frame state for interrupts (and exceptions without error code) */
504#define INTR_FRAME _frame RIP
505/* initial frame state for exceptions with error code (and interrupts with
506 vector already pushed) */
507#define XCPT_FRAME _frame ORIG_RAX
508
1da177e4
LT
509/*
510 * Interrupt entry/exit.
511 *
512 * Interrupt entry points save only callee clobbered registers in fast path.
513 *
514 * Entry runs with interrupts off.
515 */
516
517/* 0(%rsp): interrupt number */
518 .macro interrupt func
1da177e4 519 cld
1da177e4
LT
520 SAVE_ARGS
521 leaq -ARGOFFSET(%rsp),%rdi # arg1 for handler
1de9c3f6
JB
522 pushq %rbp
523 CFI_ADJUST_CFA_OFFSET 8
524 CFI_REL_OFFSET rbp, 0
525 movq %rsp,%rbp
526 CFI_DEF_CFA_REGISTER rbp
1da177e4
LT
527 testl $3,CS(%rdi)
528 je 1f
529 swapgs
96e54049
AK
530 /* irqcount is used to check if a CPU is already on an interrupt
531 stack or not. While this is essentially redundant with preempt_count
532 it is a little cheaper to use a separate counter in the PDA
533 (short of moving irq_enter into assembly, which would be too
534 much work) */
5351: incl %gs:pda_irqcount
1de9c3f6 536 cmoveq %gs:pda_irqstackptr,%rsp
2699500b 537 push %rbp # backlink for old unwinder
2601e64d
IM
538 /*
539 * We entered an interrupt context - irqs are off:
540 */
541 TRACE_IRQS_OFF
1da177e4
LT
542 call \func
543 .endm
544
545ENTRY(common_interrupt)
7effaa88 546 XCPT_FRAME
1da177e4
LT
547 interrupt do_IRQ
548 /* 0(%rsp): oldrsp-ARGOFFSET */
7effaa88 549ret_from_intr:
1da177e4 550 cli
2601e64d 551 TRACE_IRQS_OFF
3829ee6b 552 decl %gs:pda_irqcount
1de9c3f6 553 leaveq
7effaa88 554 CFI_DEF_CFA_REGISTER rsp
1de9c3f6 555 CFI_ADJUST_CFA_OFFSET -8
7effaa88 556exit_intr:
1da177e4
LT
557 GET_THREAD_INFO(%rcx)
558 testl $3,CS-ARGOFFSET(%rsp)
559 je retint_kernel
560
561 /* Interrupt came from user space */
562 /*
563 * Has a correct top of stack, but a partial stack frame
564 * %rcx: thread info. Interrupts off.
565 */
566retint_with_reschedule:
567 movl $_TIF_WORK_MASK,%edi
7effaa88 568retint_check:
1da177e4
LT
569 movl threadinfo_flags(%rcx),%edx
570 andl %edi,%edx
7effaa88 571 CFI_REMEMBER_STATE
1da177e4
LT
572 jnz retint_careful
573retint_swapgs:
2601e64d
IM
574 /*
575 * The iretq could re-enable interrupts:
576 */
577 cli
578 TRACE_IRQS_IRETQ
1da177e4 579 swapgs
2601e64d
IM
580 jmp restore_args
581
1da177e4
LT
582retint_restore_args:
583 cli
2601e64d
IM
584 /*
585 * The iretq could re-enable interrupts:
586 */
587 TRACE_IRQS_IRETQ
588restore_args:
1da177e4
LT
589 RESTORE_ARGS 0,8,0
590iret_label:
591 iretq
592
593 .section __ex_table,"a"
594 .quad iret_label,bad_iret
595 .previous
596 .section .fixup,"ax"
597 /* force a signal here? this matches i386 behaviour */
598 /* running with kernel gs */
599bad_iret:
3076a492 600 movq $11,%rdi /* SIGSEGV */
2601e64d 601 TRACE_IRQS_ON
2391c4b5 602 sti
1da177e4
LT
603 jmp do_exit
604 .previous
605
7effaa88 606 /* edi: workmask, edx: work */
1da177e4 607retint_careful:
7effaa88 608 CFI_RESTORE_STATE
1da177e4
LT
609 bt $TIF_NEED_RESCHED,%edx
610 jnc retint_signal
2601e64d 611 TRACE_IRQS_ON
1da177e4
LT
612 sti
613 pushq %rdi
7effaa88 614 CFI_ADJUST_CFA_OFFSET 8
1da177e4
LT
615 call schedule
616 popq %rdi
7effaa88 617 CFI_ADJUST_CFA_OFFSET -8
1da177e4
LT
618 GET_THREAD_INFO(%rcx)
619 cli
2601e64d 620 TRACE_IRQS_OFF
1da177e4
LT
621 jmp retint_check
622
623retint_signal:
10ffdbb8
AK
624 testl $(_TIF_SIGPENDING|_TIF_NOTIFY_RESUME|_TIF_SINGLESTEP),%edx
625 jz retint_swapgs
2601e64d 626 TRACE_IRQS_ON
1da177e4
LT
627 sti
628 SAVE_REST
629 movq $-1,ORIG_RAX(%rsp)
3829ee6b 630 xorl %esi,%esi # oldset
1da177e4
LT
631 movq %rsp,%rdi # &pt_regs
632 call do_notify_resume
633 RESTORE_REST
634 cli
2601e64d 635 TRACE_IRQS_OFF
10ffdbb8 636 movl $_TIF_NEED_RESCHED,%edi
be9e6870 637 GET_THREAD_INFO(%rcx)
1da177e4
LT
638 jmp retint_check
639
640#ifdef CONFIG_PREEMPT
641 /* Returning to kernel space. Check if we need preemption */
642 /* rcx: threadinfo. interrupts off. */
b06babac 643ENTRY(retint_kernel)
1da177e4
LT
644 cmpl $0,threadinfo_preempt_count(%rcx)
645 jnz retint_restore_args
646 bt $TIF_NEED_RESCHED,threadinfo_flags(%rcx)
647 jnc retint_restore_args
648 bt $9,EFLAGS-ARGOFFSET(%rsp) /* interrupts off? */
649 jnc retint_restore_args
650 call preempt_schedule_irq
651 jmp exit_intr
652#endif
4b787e0b 653
1da177e4 654 CFI_ENDPROC
4b787e0b 655END(common_interrupt)
1da177e4
LT
656
657/*
658 * APIC interrupts.
659 */
660 .macro apicinterrupt num,func
7effaa88 661 INTR_FRAME
19eadf98 662 pushq $~(\num)
7effaa88 663 CFI_ADJUST_CFA_OFFSET 8
1da177e4
LT
664 interrupt \func
665 jmp ret_from_intr
666 CFI_ENDPROC
667 .endm
668
669ENTRY(thermal_interrupt)
670 apicinterrupt THERMAL_APIC_VECTOR,smp_thermal_interrupt
4b787e0b 671END(thermal_interrupt)
1da177e4 672
89b831ef
JS
673ENTRY(threshold_interrupt)
674 apicinterrupt THRESHOLD_APIC_VECTOR,mce_threshold_interrupt
4b787e0b 675END(threshold_interrupt)
89b831ef 676
1da177e4
LT
677#ifdef CONFIG_SMP
678ENTRY(reschedule_interrupt)
679 apicinterrupt RESCHEDULE_VECTOR,smp_reschedule_interrupt
4b787e0b 680END(reschedule_interrupt)
1da177e4 681
e5bc8b6b
AK
682 .macro INVALIDATE_ENTRY num
683ENTRY(invalidate_interrupt\num)
684 apicinterrupt INVALIDATE_TLB_VECTOR_START+\num,smp_invalidate_interrupt
4b787e0b 685END(invalidate_interrupt\num)
e5bc8b6b
AK
686 .endm
687
688 INVALIDATE_ENTRY 0
689 INVALIDATE_ENTRY 1
690 INVALIDATE_ENTRY 2
691 INVALIDATE_ENTRY 3
692 INVALIDATE_ENTRY 4
693 INVALIDATE_ENTRY 5
694 INVALIDATE_ENTRY 6
695 INVALIDATE_ENTRY 7
1da177e4
LT
696
697ENTRY(call_function_interrupt)
698 apicinterrupt CALL_FUNCTION_VECTOR,smp_call_function_interrupt
4b787e0b 699END(call_function_interrupt)
1da177e4
LT
700#endif
701
1da177e4
LT
702ENTRY(apic_timer_interrupt)
703 apicinterrupt LOCAL_TIMER_VECTOR,smp_apic_timer_interrupt
4b787e0b 704END(apic_timer_interrupt)
1da177e4
LT
705
706ENTRY(error_interrupt)
707 apicinterrupt ERROR_APIC_VECTOR,smp_error_interrupt
4b787e0b 708END(error_interrupt)
1da177e4
LT
709
710ENTRY(spurious_interrupt)
711 apicinterrupt SPURIOUS_APIC_VECTOR,smp_spurious_interrupt
4b787e0b 712END(spurious_interrupt)
1da177e4
LT
713
714/*
715 * Exception entry points.
716 */
717 .macro zeroentry sym
7effaa88 718 INTR_FRAME
1da177e4 719 pushq $0 /* push error code/oldrax */
7effaa88 720 CFI_ADJUST_CFA_OFFSET 8
1da177e4 721 pushq %rax /* push real oldrax to the rdi slot */
7effaa88 722 CFI_ADJUST_CFA_OFFSET 8
1da177e4
LT
723 leaq \sym(%rip),%rax
724 jmp error_entry
7effaa88 725 CFI_ENDPROC
1da177e4
LT
726 .endm
727
728 .macro errorentry sym
7effaa88 729 XCPT_FRAME
1da177e4 730 pushq %rax
7effaa88 731 CFI_ADJUST_CFA_OFFSET 8
1da177e4
LT
732 leaq \sym(%rip),%rax
733 jmp error_entry
7effaa88 734 CFI_ENDPROC
1da177e4
LT
735 .endm
736
737 /* error code is on the stack already */
738 /* handle NMI like exceptions that can happen everywhere */
2601e64d 739 .macro paranoidentry sym, ist=0, irqtrace=1
1da177e4
LT
740 SAVE_ALL
741 cld
742 movl $1,%ebx
743 movl $MSR_GS_BASE,%ecx
744 rdmsr
745 testl %edx,%edx
746 js 1f
747 swapgs
748 xorl %ebx,%ebx
b556b35e
JB
7491:
750 .if \ist
751 movq %gs:pda_data_offset, %rbp
752 .endif
753 movq %rsp,%rdi
1da177e4
LT
754 movq ORIG_RAX(%rsp),%rsi
755 movq $-1,ORIG_RAX(%rsp)
b556b35e 756 .if \ist
5f8efbb9 757 subq $EXCEPTION_STKSZ, per_cpu__init_tss + TSS_ist + (\ist - 1) * 8(%rbp)
b556b35e 758 .endif
1da177e4 759 call \sym
b556b35e 760 .if \ist
5f8efbb9 761 addq $EXCEPTION_STKSZ, per_cpu__init_tss + TSS_ist + (\ist - 1) * 8(%rbp)
b556b35e 762 .endif
6fefb0d1 763 cli
2601e64d
IM
764 .if \irqtrace
765 TRACE_IRQS_OFF
766 .endif
1da177e4 767 .endm
2601e64d
IM
768
769 /*
770 * "Paranoid" exit path from exception stack.
771 * Paranoid because this is used by NMIs and cannot take
772 * any kernel state for granted.
773 * We don't do kernel preemption checks here, because only
774 * NMI should be common and it does not enable IRQs and
775 * cannot get reschedule ticks.
776 *
777 * "trace" is 0 for the NMI handler only, because irq-tracing
778 * is fundamentally NMI-unsafe. (we cannot change the soft and
779 * hard flags at once, atomically)
780 */
781 .macro paranoidexit trace=1
782 /* ebx: no swapgs flag */
783paranoid_exit\trace:
784 testl %ebx,%ebx /* swapgs needed? */
785 jnz paranoid_restore\trace
786 testl $3,CS(%rsp)
787 jnz paranoid_userspace\trace
788paranoid_swapgs\trace:
7a0a2dff 789 .if \trace
2601e64d 790 TRACE_IRQS_IRETQ 0
7a0a2dff 791 .endif
2601e64d
IM
792 swapgs
793paranoid_restore\trace:
794 RESTORE_ALL 8
795 iretq
796paranoid_userspace\trace:
797 GET_THREAD_INFO(%rcx)
798 movl threadinfo_flags(%rcx),%ebx
799 andl $_TIF_WORK_MASK,%ebx
800 jz paranoid_swapgs\trace
801 movq %rsp,%rdi /* &pt_regs */
802 call sync_regs
803 movq %rax,%rsp /* switch stack for scheduling */
804 testl $_TIF_NEED_RESCHED,%ebx
805 jnz paranoid_schedule\trace
806 movl %ebx,%edx /* arg3: thread flags */
807 .if \trace
808 TRACE_IRQS_ON
809 .endif
810 sti
811 xorl %esi,%esi /* arg2: oldset */
812 movq %rsp,%rdi /* arg1: &pt_regs */
813 call do_notify_resume
814 cli
815 .if \trace
816 TRACE_IRQS_OFF
817 .endif
818 jmp paranoid_userspace\trace
819paranoid_schedule\trace:
820 .if \trace
821 TRACE_IRQS_ON
822 .endif
823 sti
824 call schedule
825 cli
826 .if \trace
827 TRACE_IRQS_OFF
828 .endif
829 jmp paranoid_userspace\trace
830 CFI_ENDPROC
831 .endm
832
1da177e4
LT
833/*
834 * Exception entry point. This expects an error code/orig_rax on the stack
835 * and the exception handler in %rax.
836 */
d28c4393 837KPROBE_ENTRY(error_entry)
7effaa88 838 _frame RDI
1da177e4
LT
839 /* rdi slot contains rax, oldrax contains error code */
840 cld
841 subq $14*8,%rsp
842 CFI_ADJUST_CFA_OFFSET (14*8)
843 movq %rsi,13*8(%rsp)
844 CFI_REL_OFFSET rsi,RSI
845 movq 14*8(%rsp),%rsi /* load rax from rdi slot */
846 movq %rdx,12*8(%rsp)
847 CFI_REL_OFFSET rdx,RDX
848 movq %rcx,11*8(%rsp)
849 CFI_REL_OFFSET rcx,RCX
850 movq %rsi,10*8(%rsp) /* store rax */
851 CFI_REL_OFFSET rax,RAX
852 movq %r8, 9*8(%rsp)
853 CFI_REL_OFFSET r8,R8
854 movq %r9, 8*8(%rsp)
855 CFI_REL_OFFSET r9,R9
856 movq %r10,7*8(%rsp)
857 CFI_REL_OFFSET r10,R10
858 movq %r11,6*8(%rsp)
859 CFI_REL_OFFSET r11,R11
860 movq %rbx,5*8(%rsp)
861 CFI_REL_OFFSET rbx,RBX
862 movq %rbp,4*8(%rsp)
863 CFI_REL_OFFSET rbp,RBP
864 movq %r12,3*8(%rsp)
865 CFI_REL_OFFSET r12,R12
866 movq %r13,2*8(%rsp)
867 CFI_REL_OFFSET r13,R13
868 movq %r14,1*8(%rsp)
869 CFI_REL_OFFSET r14,R14
870 movq %r15,(%rsp)
871 CFI_REL_OFFSET r15,R15
872 xorl %ebx,%ebx
873 testl $3,CS(%rsp)
874 je error_kernelspace
875error_swapgs:
876 swapgs
877error_sti:
878 movq %rdi,RDI(%rsp)
879 movq %rsp,%rdi
880 movq ORIG_RAX(%rsp),%rsi /* get error code */
881 movq $-1,ORIG_RAX(%rsp)
882 call *%rax
883 /* ebx: no swapgs flag (1: don't need swapgs, 0: need it) */
884error_exit:
885 movl %ebx,%eax
886 RESTORE_REST
887 cli
2601e64d 888 TRACE_IRQS_OFF
1da177e4
LT
889 GET_THREAD_INFO(%rcx)
890 testl %eax,%eax
891 jne retint_kernel
892 movl threadinfo_flags(%rcx),%edx
893 movl $_TIF_WORK_MASK,%edi
894 andl %edi,%edx
895 jnz retint_careful
2601e64d
IM
896 /*
897 * The iret might restore flags:
898 */
899 TRACE_IRQS_IRETQ
1da177e4
LT
900 swapgs
901 RESTORE_ARGS 0,8,0
505cc4e1 902 jmp iret_label
1da177e4
LT
903 CFI_ENDPROC
904
905error_kernelspace:
906 incl %ebx
907 /* There are two places in the kernel that can potentially fault with
908 usergs. Handle them here. The exception handlers after
909 iret run with kernel gs again, so don't set the user space flag.
910 B stepping K8s sometimes report an truncated RIP for IRET
911 exceptions returning to compat mode. Check for these here too. */
912 leaq iret_label(%rip),%rbp
913 cmpq %rbp,RIP(%rsp)
914 je error_swapgs
915 movl %ebp,%ebp /* zero extend */
916 cmpq %rbp,RIP(%rsp)
917 je error_swapgs
918 cmpq $gs_change,RIP(%rsp)
919 je error_swapgs
920 jmp error_sti
d28c4393 921KPROBE_END(error_entry)
1da177e4
LT
922
923 /* Reload gs selector with exception handling */
924 /* edi: new selector */
925ENTRY(load_gs_index)
7effaa88 926 CFI_STARTPROC
1da177e4 927 pushf
7effaa88 928 CFI_ADJUST_CFA_OFFSET 8
1da177e4
LT
929 cli
930 swapgs
931gs_change:
932 movl %edi,%gs
9332: mfence /* workaround */
934 swapgs
935 popf
7effaa88 936 CFI_ADJUST_CFA_OFFSET -8
1da177e4 937 ret
7effaa88 938 CFI_ENDPROC
4b787e0b 939ENDPROC(load_gs_index)
1da177e4
LT
940
941 .section __ex_table,"a"
942 .align 8
943 .quad gs_change,bad_gs
944 .previous
945 .section .fixup,"ax"
946 /* running with kernelgs */
947bad_gs:
948 swapgs /* switch back to user gs */
949 xorl %eax,%eax
950 movl %eax,%gs
951 jmp 2b
952 .previous
953
954/*
955 * Create a kernel thread.
956 *
957 * C extern interface:
958 * extern long kernel_thread(int (*fn)(void *), void * arg, unsigned long flags)
959 *
960 * asm input arguments:
961 * rdi: fn, rsi: arg, rdx: flags
962 */
963ENTRY(kernel_thread)
964 CFI_STARTPROC
965 FAKE_STACK_FRAME $child_rip
966 SAVE_ALL
967
968 # rdi: flags, rsi: usp, rdx: will be &pt_regs
969 movq %rdx,%rdi
970 orq kernel_thread_flags(%rip),%rdi
971 movq $-1, %rsi
972 movq %rsp, %rdx
973
974 xorl %r8d,%r8d
975 xorl %r9d,%r9d
976
977 # clone now
978 call do_fork
979 movq %rax,RAX(%rsp)
980 xorl %edi,%edi
981
982 /*
983 * It isn't worth to check for reschedule here,
984 * so internally to the x86_64 port you can rely on kernel_thread()
985 * not to reschedule the child before returning, this avoids the need
986 * of hacks for example to fork off the per-CPU idle tasks.
987 * [Hopefully no generic code relies on the reschedule -AK]
988 */
989 RESTORE_ALL
990 UNFAKE_STACK_FRAME
991 ret
992 CFI_ENDPROC
4b787e0b 993ENDPROC(kernel_thread)
1da177e4
LT
994
995child_rip:
c05991ed
AK
996 pushq $0 # fake return address
997 CFI_STARTPROC
1da177e4
LT
998 /*
999 * Here we are in the child and the registers are set as they were
1000 * at kernel_thread() invocation in the parent.
1001 */
1002 movq %rdi, %rax
1003 movq %rsi, %rdi
1004 call *%rax
1005 # exit
3829ee6b 1006 xorl %edi, %edi
1da177e4 1007 call do_exit
c05991ed 1008 CFI_ENDPROC
4b787e0b 1009ENDPROC(child_rip)
1da177e4
LT
1010
1011/*
1012 * execve(). This function needs to use IRET, not SYSRET, to set up all state properly.
1013 *
1014 * C extern interface:
1015 * extern long execve(char *name, char **argv, char **envp)
1016 *
1017 * asm input arguments:
1018 * rdi: name, rsi: argv, rdx: envp
1019 *
1020 * We want to fallback into:
1021 * extern long sys_execve(char *name, char **argv,char **envp, struct pt_regs regs)
1022 *
1023 * do_sys_execve asm fallback arguments:
1024 * rdi: name, rsi: argv, rdx: envp, fake frame on the stack
1025 */
1026ENTRY(execve)
1027 CFI_STARTPROC
1028 FAKE_STACK_FRAME $0
1029 SAVE_ALL
1030 call sys_execve
1031 movq %rax, RAX(%rsp)
1032 RESTORE_REST
1033 testq %rax,%rax
1034 je int_ret_from_sys_call
1035 RESTORE_ARGS
1036 UNFAKE_STACK_FRAME
1037 ret
1038 CFI_ENDPROC
4b787e0b 1039ENDPROC(execve)
1da177e4 1040
0f2fbdcb 1041KPROBE_ENTRY(page_fault)
1da177e4 1042 errorentry do_page_fault
d28c4393 1043KPROBE_END(page_fault)
1da177e4
LT
1044
1045ENTRY(coprocessor_error)
1046 zeroentry do_coprocessor_error
4b787e0b 1047END(coprocessor_error)
1da177e4
LT
1048
1049ENTRY(simd_coprocessor_error)
1050 zeroentry do_simd_coprocessor_error
4b787e0b 1051END(simd_coprocessor_error)
1da177e4
LT
1052
1053ENTRY(device_not_available)
1054 zeroentry math_state_restore
4b787e0b 1055END(device_not_available)
1da177e4
LT
1056
1057 /* runs on exception stack */
0f2fbdcb 1058KPROBE_ENTRY(debug)
7effaa88 1059 INTR_FRAME
1da177e4
LT
1060 pushq $0
1061 CFI_ADJUST_CFA_OFFSET 8
5f8efbb9 1062 paranoidentry do_debug, DEBUG_STACK
2601e64d 1063 paranoidexit
d28c4393 1064KPROBE_END(debug)
1da177e4
LT
1065
1066 /* runs on exception stack */
eddb6fb9 1067KPROBE_ENTRY(nmi)
7effaa88 1068 INTR_FRAME
1da177e4 1069 pushq $-1
7effaa88 1070 CFI_ADJUST_CFA_OFFSET 8
2601e64d
IM
1071 paranoidentry do_nmi, 0, 0
1072#ifdef CONFIG_TRACE_IRQFLAGS
1073 paranoidexit 0
1074#else
1075 jmp paranoid_exit1
1076 CFI_ENDPROC
1077#endif
d28c4393 1078KPROBE_END(nmi)
6fefb0d1 1079
0f2fbdcb 1080KPROBE_ENTRY(int3)
b556b35e
JB
1081 INTR_FRAME
1082 pushq $0
1083 CFI_ADJUST_CFA_OFFSET 8
5f8efbb9 1084 paranoidentry do_int3, DEBUG_STACK
2601e64d 1085 jmp paranoid_exit1
b556b35e 1086 CFI_ENDPROC
d28c4393 1087KPROBE_END(int3)
1da177e4
LT
1088
1089ENTRY(overflow)
1090 zeroentry do_overflow
4b787e0b 1091END(overflow)
1da177e4
LT
1092
1093ENTRY(bounds)
1094 zeroentry do_bounds
4b787e0b 1095END(bounds)
1da177e4
LT
1096
1097ENTRY(invalid_op)
1098 zeroentry do_invalid_op
4b787e0b 1099END(invalid_op)
1da177e4
LT
1100
1101ENTRY(coprocessor_segment_overrun)
1102 zeroentry do_coprocessor_segment_overrun
4b787e0b 1103END(coprocessor_segment_overrun)
1da177e4
LT
1104
1105ENTRY(reserved)
1106 zeroentry do_reserved
4b787e0b 1107END(reserved)
1da177e4
LT
1108
1109 /* runs on exception stack */
1110ENTRY(double_fault)
7effaa88 1111 XCPT_FRAME
1da177e4 1112 paranoidentry do_double_fault
2601e64d 1113 jmp paranoid_exit1
1da177e4 1114 CFI_ENDPROC
4b787e0b 1115END(double_fault)
1da177e4
LT
1116
1117ENTRY(invalid_TSS)
1118 errorentry do_invalid_TSS
4b787e0b 1119END(invalid_TSS)
1da177e4
LT
1120
1121ENTRY(segment_not_present)
1122 errorentry do_segment_not_present
4b787e0b 1123END(segment_not_present)
1da177e4
LT
1124
1125 /* runs on exception stack */
1126ENTRY(stack_segment)
7effaa88 1127 XCPT_FRAME
1da177e4 1128 paranoidentry do_stack_segment
2601e64d 1129 jmp paranoid_exit1
1da177e4 1130 CFI_ENDPROC
4b787e0b 1131END(stack_segment)
1da177e4 1132
0f2fbdcb 1133KPROBE_ENTRY(general_protection)
1da177e4 1134 errorentry do_general_protection
d28c4393 1135KPROBE_END(general_protection)
1da177e4
LT
1136
1137ENTRY(alignment_check)
1138 errorentry do_alignment_check
4b787e0b 1139END(alignment_check)
1da177e4
LT
1140
1141ENTRY(divide_error)
1142 zeroentry do_divide_error
4b787e0b 1143END(divide_error)
1da177e4
LT
1144
1145ENTRY(spurious_interrupt_bug)
1146 zeroentry do_spurious_interrupt_bug
4b787e0b 1147END(spurious_interrupt_bug)
1da177e4
LT
1148
1149#ifdef CONFIG_X86_MCE
1150 /* runs on exception stack */
1151ENTRY(machine_check)
7effaa88 1152 INTR_FRAME
1da177e4
LT
1153 pushq $0
1154 CFI_ADJUST_CFA_OFFSET 8
1155 paranoidentry do_machine_check
2601e64d 1156 jmp paranoid_exit1
1da177e4 1157 CFI_ENDPROC
4b787e0b 1158END(machine_check)
1da177e4
LT
1159#endif
1160
2699500b 1161/* Call softirq on interrupt stack. Interrupts are off. */
ed6b676c 1162ENTRY(call_softirq)
7effaa88 1163 CFI_STARTPROC
2699500b
AK
1164 push %rbp
1165 CFI_ADJUST_CFA_OFFSET 8
1166 CFI_REL_OFFSET rbp,0
1167 mov %rsp,%rbp
1168 CFI_DEF_CFA_REGISTER rbp
ed6b676c 1169 incl %gs:pda_irqcount
2699500b
AK
1170 cmove %gs:pda_irqstackptr,%rsp
1171 push %rbp # backlink for old unwinder
ed6b676c 1172 call __do_softirq
2699500b 1173 leaveq
7effaa88 1174 CFI_DEF_CFA_REGISTER rsp
2699500b 1175 CFI_ADJUST_CFA_OFFSET -8
ed6b676c 1176 decl %gs:pda_irqcount
ed6b676c 1177 ret
7effaa88 1178 CFI_ENDPROC
4b787e0b 1179ENDPROC(call_softirq)
b538ed27
JB
1180
1181#ifdef CONFIG_STACK_UNWIND
1182ENTRY(arch_unwind_init_running)
1183 CFI_STARTPROC
1184 movq %r15, R15(%rdi)
1185 movq %r14, R14(%rdi)
1186 xchgq %rsi, %rdx
1187 movq %r13, R13(%rdi)
1188 movq %r12, R12(%rdi)
1189 xorl %eax, %eax
1190 movq %rbp, RBP(%rdi)
1191 movq %rbx, RBX(%rdi)
1192 movq (%rsp), %rcx
1193 movq %rax, R11(%rdi)
1194 movq %rax, R10(%rdi)
1195 movq %rax, R9(%rdi)
1196 movq %rax, R8(%rdi)
1197 movq %rax, RAX(%rdi)
1198 movq %rax, RCX(%rdi)
1199 movq %rax, RDX(%rdi)
1200 movq %rax, RSI(%rdi)
1201 movq %rax, RDI(%rdi)
1202 movq %rax, ORIG_RAX(%rdi)
1203 movq %rcx, RIP(%rdi)
1204 leaq 8(%rsp), %rcx
1205 movq $__KERNEL_CS, CS(%rdi)
1206 movq %rax, EFLAGS(%rdi)
1207 movq %rcx, RSP(%rdi)
1208 movq $__KERNEL_DS, SS(%rdi)
1209 jmpq *%rdx
1210 CFI_ENDPROC
1211ENDPROC(arch_unwind_init_running)
1212#endif