]> git.proxmox.com Git - mirror_ubuntu-eoan-kernel.git/blame - arch/x86_64/kernel/entry.S
[PATCH] x86: Revert e820 MCFG heuristics
[mirror_ubuntu-eoan-kernel.git] / arch / x86_64 / kernel / entry.S
CommitLineData
1da177e4
LT
1/*
2 * linux/arch/x86_64/entry.S
3 *
4 * Copyright (C) 1991, 1992 Linus Torvalds
5 * Copyright (C) 2000, 2001, 2002 Andi Kleen SuSE Labs
6 * Copyright (C) 2000 Pavel Machek <pavel@suse.cz>
7 *
8 * $Id$
9 */
10
11/*
12 * entry.S contains the system-call and fault low-level handling routines.
13 *
14 * NOTE: This code handles signal-recognition, which happens every time
15 * after an interrupt and after each system call.
16 *
17 * Normal syscalls and interrupts don't save a full stack frame, this is
18 * only done for syscall tracing, signals or fork/exec et.al.
19 *
20 * A note on terminology:
21 * - top of stack: Architecture defined interrupt frame from SS to RIP
22 * at the top of the kernel process stack.
23 * - partial stack frame: partially saved registers upto R11.
24 * - full stack frame: Like partial stack frame, but all register saved.
25 *
26 * TODO:
27 * - schedule it carefully for the final hardware.
28 */
29
30#define ASSEMBLY 1
1da177e4
LT
31#include <linux/linkage.h>
32#include <asm/segment.h>
33#include <asm/smp.h>
34#include <asm/cache.h>
35#include <asm/errno.h>
36#include <asm/dwarf2.h>
37#include <asm/calling.h>
e2d5df93 38#include <asm/asm-offsets.h>
1da177e4
LT
39#include <asm/msr.h>
40#include <asm/unistd.h>
41#include <asm/thread_info.h>
42#include <asm/hw_irq.h>
5f8efbb9 43#include <asm/page.h>
2601e64d 44#include <asm/irqflags.h>
1da177e4
LT
45
46 .code64
47
dc37db4d 48#ifndef CONFIG_PREEMPT
1da177e4
LT
49#define retint_kernel retint_restore_args
50#endif
2601e64d
IM
51
52
53.macro TRACE_IRQS_IRETQ offset=ARGOFFSET
54#ifdef CONFIG_TRACE_IRQFLAGS
55 bt $9,EFLAGS-\offset(%rsp) /* interrupts off? */
56 jnc 1f
57 TRACE_IRQS_ON
581:
59#endif
60.endm
61
1da177e4
LT
62/*
63 * C code is not supposed to know about undefined top of stack. Every time
64 * a C function with an pt_regs argument is called from the SYSCALL based
65 * fast path FIXUP_TOP_OF_STACK is needed.
66 * RESTORE_TOP_OF_STACK syncs the syscall state after any possible ptregs
67 * manipulation.
68 */
69
70 /* %rsp:at FRAMEEND */
71 .macro FIXUP_TOP_OF_STACK tmp
72 movq %gs:pda_oldrsp,\tmp
73 movq \tmp,RSP(%rsp)
74 movq $__USER_DS,SS(%rsp)
75 movq $__USER_CS,CS(%rsp)
76 movq $-1,RCX(%rsp)
77 movq R11(%rsp),\tmp /* get eflags */
78 movq \tmp,EFLAGS(%rsp)
79 .endm
80
81 .macro RESTORE_TOP_OF_STACK tmp,offset=0
82 movq RSP-\offset(%rsp),\tmp
83 movq \tmp,%gs:pda_oldrsp
84 movq EFLAGS-\offset(%rsp),\tmp
85 movq \tmp,R11-\offset(%rsp)
86 .endm
87
88 .macro FAKE_STACK_FRAME child_rip
89 /* push in order ss, rsp, eflags, cs, rip */
3829ee6b 90 xorl %eax, %eax
1da177e4
LT
91 pushq %rax /* ss */
92 CFI_ADJUST_CFA_OFFSET 8
7effaa88 93 /*CFI_REL_OFFSET ss,0*/
1da177e4
LT
94 pushq %rax /* rsp */
95 CFI_ADJUST_CFA_OFFSET 8
7effaa88 96 CFI_REL_OFFSET rsp,0
1da177e4
LT
97 pushq $(1<<9) /* eflags - interrupts on */
98 CFI_ADJUST_CFA_OFFSET 8
7effaa88 99 /*CFI_REL_OFFSET rflags,0*/
1da177e4
LT
100 pushq $__KERNEL_CS /* cs */
101 CFI_ADJUST_CFA_OFFSET 8
7effaa88 102 /*CFI_REL_OFFSET cs,0*/
1da177e4
LT
103 pushq \child_rip /* rip */
104 CFI_ADJUST_CFA_OFFSET 8
7effaa88 105 CFI_REL_OFFSET rip,0
1da177e4
LT
106 pushq %rax /* orig rax */
107 CFI_ADJUST_CFA_OFFSET 8
108 .endm
109
110 .macro UNFAKE_STACK_FRAME
111 addq $8*6, %rsp
112 CFI_ADJUST_CFA_OFFSET -(6*8)
113 .endm
114
7effaa88
JB
115 .macro CFI_DEFAULT_STACK start=1
116 .if \start
117 CFI_STARTPROC simple
118 CFI_DEF_CFA rsp,SS+8
119 .else
120 CFI_DEF_CFA_OFFSET SS+8
121 .endif
122 CFI_REL_OFFSET r15,R15
123 CFI_REL_OFFSET r14,R14
124 CFI_REL_OFFSET r13,R13
125 CFI_REL_OFFSET r12,R12
126 CFI_REL_OFFSET rbp,RBP
127 CFI_REL_OFFSET rbx,RBX
128 CFI_REL_OFFSET r11,R11
129 CFI_REL_OFFSET r10,R10
130 CFI_REL_OFFSET r9,R9
131 CFI_REL_OFFSET r8,R8
132 CFI_REL_OFFSET rax,RAX
133 CFI_REL_OFFSET rcx,RCX
134 CFI_REL_OFFSET rdx,RDX
135 CFI_REL_OFFSET rsi,RSI
136 CFI_REL_OFFSET rdi,RDI
137 CFI_REL_OFFSET rip,RIP
138 /*CFI_REL_OFFSET cs,CS*/
139 /*CFI_REL_OFFSET rflags,EFLAGS*/
140 CFI_REL_OFFSET rsp,RSP
141 /*CFI_REL_OFFSET ss,SS*/
1da177e4
LT
142 .endm
143/*
144 * A newly forked process directly context switches into this.
145 */
146/* rdi: prev */
147ENTRY(ret_from_fork)
1da177e4
LT
148 CFI_DEFAULT_STACK
149 call schedule_tail
150 GET_THREAD_INFO(%rcx)
151 testl $(_TIF_SYSCALL_TRACE|_TIF_SYSCALL_AUDIT),threadinfo_flags(%rcx)
152 jnz rff_trace
153rff_action:
154 RESTORE_REST
155 testl $3,CS-ARGOFFSET(%rsp) # from kernel_thread?
156 je int_ret_from_sys_call
157 testl $_TIF_IA32,threadinfo_flags(%rcx)
158 jnz int_ret_from_sys_call
159 RESTORE_TOP_OF_STACK %rdi,ARGOFFSET
160 jmp ret_from_sys_call
161rff_trace:
162 movq %rsp,%rdi
163 call syscall_trace_leave
164 GET_THREAD_INFO(%rcx)
165 jmp rff_action
166 CFI_ENDPROC
4b787e0b 167END(ret_from_fork)
1da177e4
LT
168
169/*
170 * System call entry. Upto 6 arguments in registers are supported.
171 *
172 * SYSCALL does not save anything on the stack and does not change the
173 * stack pointer.
174 */
175
176/*
177 * Register setup:
178 * rax system call number
179 * rdi arg0
180 * rcx return address for syscall/sysret, C arg3
181 * rsi arg1
182 * rdx arg2
183 * r10 arg3 (--> moved to rcx for C)
184 * r8 arg4
185 * r9 arg5
186 * r11 eflags for syscall/sysret, temporary for C
187 * r12-r15,rbp,rbx saved by C code, not touched.
188 *
189 * Interrupts are off on entry.
190 * Only called from user space.
191 *
192 * XXX if we had a free scratch register we could save the RSP into the stack frame
193 * and report it properly in ps. Unfortunately we haven't.
7bf36bbc
AK
194 *
195 * When user can change the frames always force IRET. That is because
196 * it deals with uncanonical addresses better. SYSRET has trouble
197 * with them due to bugs in both AMD and Intel CPUs.
1da177e4
LT
198 */
199
200ENTRY(system_call)
7effaa88 201 CFI_STARTPROC simple
dffead4e 202 CFI_DEF_CFA rsp,PDA_STACKOFFSET
7effaa88
JB
203 CFI_REGISTER rip,rcx
204 /*CFI_REGISTER rflags,r11*/
1da177e4
LT
205 swapgs
206 movq %rsp,%gs:pda_oldrsp
207 movq %gs:pda_kernelstack,%rsp
2601e64d
IM
208 /*
209 * No need to follow this irqs off/on section - it's straight
210 * and short:
211 */
1da177e4
LT
212 sti
213 SAVE_ARGS 8,1
214 movq %rax,ORIG_RAX-ARGOFFSET(%rsp)
7effaa88
JB
215 movq %rcx,RIP-ARGOFFSET(%rsp)
216 CFI_REL_OFFSET rip,RIP-ARGOFFSET
1da177e4
LT
217 GET_THREAD_INFO(%rcx)
218 testl $(_TIF_SYSCALL_TRACE|_TIF_SYSCALL_AUDIT|_TIF_SECCOMP),threadinfo_flags(%rcx)
7effaa88 219 CFI_REMEMBER_STATE
1da177e4
LT
220 jnz tracesys
221 cmpq $__NR_syscall_max,%rax
222 ja badsys
223 movq %r10,%rcx
224 call *sys_call_table(,%rax,8) # XXX: rip relative
225 movq %rax,RAX-ARGOFFSET(%rsp)
226/*
227 * Syscall return path ending with SYSRET (fast path)
228 * Has incomplete stack frame and undefined top of stack.
229 */
230 .globl ret_from_sys_call
231ret_from_sys_call:
11b854b2 232 movl $_TIF_ALLWORK_MASK,%edi
1da177e4
LT
233 /* edi: flagmask */
234sysret_check:
235 GET_THREAD_INFO(%rcx)
236 cli
2601e64d 237 TRACE_IRQS_OFF
1da177e4
LT
238 movl threadinfo_flags(%rcx),%edx
239 andl %edi,%edx
7effaa88 240 CFI_REMEMBER_STATE
1da177e4 241 jnz sysret_careful
2601e64d
IM
242 /*
243 * sysretq will re-enable interrupts:
244 */
245 TRACE_IRQS_ON
1da177e4 246 movq RIP-ARGOFFSET(%rsp),%rcx
7effaa88 247 CFI_REGISTER rip,rcx
1da177e4 248 RESTORE_ARGS 0,-ARG_SKIP,1
7effaa88 249 /*CFI_REGISTER rflags,r11*/
1da177e4
LT
250 movq %gs:pda_oldrsp,%rsp
251 swapgs
252 sysretq
253
254 /* Handle reschedules */
255 /* edx: work, edi: workmask */
256sysret_careful:
7effaa88 257 CFI_RESTORE_STATE
1da177e4
LT
258 bt $TIF_NEED_RESCHED,%edx
259 jnc sysret_signal
2601e64d 260 TRACE_IRQS_ON
1da177e4
LT
261 sti
262 pushq %rdi
7effaa88 263 CFI_ADJUST_CFA_OFFSET 8
1da177e4
LT
264 call schedule
265 popq %rdi
7effaa88 266 CFI_ADJUST_CFA_OFFSET -8
1da177e4
LT
267 jmp sysret_check
268
269 /* Handle a signal */
270sysret_signal:
2601e64d 271 TRACE_IRQS_ON
1da177e4 272 sti
10ffdbb8
AK
273 testl $(_TIF_SIGPENDING|_TIF_NOTIFY_RESUME|_TIF_SINGLESTEP),%edx
274 jz 1f
275
276 /* Really a signal */
277 /* edx: work flags (arg3) */
1da177e4
LT
278 leaq do_notify_resume(%rip),%rax
279 leaq -ARGOFFSET(%rsp),%rdi # &pt_regs -> arg1
280 xorl %esi,%esi # oldset -> arg2
281 call ptregscall_common
10ffdbb8 2821: movl $_TIF_NEED_RESCHED,%edi
7bf36bbc
AK
283 /* Use IRET because user could have changed frame. This
284 works because ptregscall_common has called FIXUP_TOP_OF_STACK. */
285 cli
2601e64d 286 TRACE_IRQS_OFF
7bf36bbc 287 jmp int_with_check
1da177e4 288
7effaa88
JB
289badsys:
290 movq $-ENOSYS,RAX-ARGOFFSET(%rsp)
291 jmp ret_from_sys_call
292
1da177e4
LT
293 /* Do syscall tracing */
294tracesys:
7effaa88 295 CFI_RESTORE_STATE
1da177e4
LT
296 SAVE_REST
297 movq $-ENOSYS,RAX(%rsp)
298 FIXUP_TOP_OF_STACK %rdi
299 movq %rsp,%rdi
300 call syscall_trace_enter
301 LOAD_ARGS ARGOFFSET /* reload args from stack in case ptrace changed it */
302 RESTORE_REST
303 cmpq $__NR_syscall_max,%rax
304 ja 1f
305 movq %r10,%rcx /* fixup for C */
306 call *sys_call_table(,%rax,8)
822ff019 3071: movq %rax,RAX-ARGOFFSET(%rsp)
7bf36bbc
AK
308 /* Use IRET because user could have changed frame */
309 jmp int_ret_from_sys_call
7effaa88 310 CFI_ENDPROC
4b787e0b 311END(system_call)
1da177e4 312
1da177e4
LT
313/*
314 * Syscall return path ending with IRET.
315 * Has correct top of stack, but partial stack frame.
316 */
7effaa88
JB
317ENTRY(int_ret_from_sys_call)
318 CFI_STARTPROC simple
319 CFI_DEF_CFA rsp,SS+8-ARGOFFSET
320 /*CFI_REL_OFFSET ss,SS-ARGOFFSET*/
321 CFI_REL_OFFSET rsp,RSP-ARGOFFSET
322 /*CFI_REL_OFFSET rflags,EFLAGS-ARGOFFSET*/
323 /*CFI_REL_OFFSET cs,CS-ARGOFFSET*/
324 CFI_REL_OFFSET rip,RIP-ARGOFFSET
325 CFI_REL_OFFSET rdx,RDX-ARGOFFSET
326 CFI_REL_OFFSET rcx,RCX-ARGOFFSET
327 CFI_REL_OFFSET rax,RAX-ARGOFFSET
328 CFI_REL_OFFSET rdi,RDI-ARGOFFSET
329 CFI_REL_OFFSET rsi,RSI-ARGOFFSET
330 CFI_REL_OFFSET r8,R8-ARGOFFSET
331 CFI_REL_OFFSET r9,R9-ARGOFFSET
332 CFI_REL_OFFSET r10,R10-ARGOFFSET
333 CFI_REL_OFFSET r11,R11-ARGOFFSET
1da177e4 334 cli
2601e64d 335 TRACE_IRQS_OFF
1da177e4
LT
336 testl $3,CS-ARGOFFSET(%rsp)
337 je retint_restore_args
338 movl $_TIF_ALLWORK_MASK,%edi
339 /* edi: mask to check */
340int_with_check:
341 GET_THREAD_INFO(%rcx)
342 movl threadinfo_flags(%rcx),%edx
343 andl %edi,%edx
344 jnz int_careful
bf2fcc6f 345 andl $~TS_COMPAT,threadinfo_status(%rcx)
1da177e4
LT
346 jmp retint_swapgs
347
348 /* Either reschedule or signal or syscall exit tracking needed. */
349 /* First do a reschedule test. */
350 /* edx: work, edi: workmask */
351int_careful:
352 bt $TIF_NEED_RESCHED,%edx
353 jnc int_very_careful
2601e64d 354 TRACE_IRQS_ON
1da177e4
LT
355 sti
356 pushq %rdi
7effaa88 357 CFI_ADJUST_CFA_OFFSET 8
1da177e4
LT
358 call schedule
359 popq %rdi
7effaa88 360 CFI_ADJUST_CFA_OFFSET -8
cdd219cd 361 cli
2601e64d 362 TRACE_IRQS_OFF
1da177e4
LT
363 jmp int_with_check
364
365 /* handle signals and tracing -- both require a full stack frame */
366int_very_careful:
2601e64d 367 TRACE_IRQS_ON
1da177e4
LT
368 sti
369 SAVE_REST
370 /* Check for syscall exit trace */
371 testl $(_TIF_SYSCALL_TRACE|_TIF_SYSCALL_AUDIT|_TIF_SINGLESTEP),%edx
372 jz int_signal
373 pushq %rdi
7effaa88 374 CFI_ADJUST_CFA_OFFSET 8
1da177e4
LT
375 leaq 8(%rsp),%rdi # &ptregs -> arg1
376 call syscall_trace_leave
377 popq %rdi
7effaa88 378 CFI_ADJUST_CFA_OFFSET -8
36c1104e 379 andl $~(_TIF_SYSCALL_TRACE|_TIF_SYSCALL_AUDIT|_TIF_SINGLESTEP),%edi
be9e6870 380 cli
2601e64d 381 TRACE_IRQS_OFF
1da177e4
LT
382 jmp int_restore_rest
383
384int_signal:
385 testl $(_TIF_NOTIFY_RESUME|_TIF_SIGPENDING|_TIF_SINGLESTEP),%edx
386 jz 1f
387 movq %rsp,%rdi # &ptregs -> arg1
388 xorl %esi,%esi # oldset -> arg2
389 call do_notify_resume
3901: movl $_TIF_NEED_RESCHED,%edi
391int_restore_rest:
392 RESTORE_REST
be9e6870 393 cli
2601e64d 394 TRACE_IRQS_OFF
1da177e4
LT
395 jmp int_with_check
396 CFI_ENDPROC
4b787e0b 397END(int_ret_from_sys_call)
1da177e4
LT
398
399/*
400 * Certain special system calls that need to save a complete full stack frame.
401 */
402
403 .macro PTREGSCALL label,func,arg
404 .globl \label
405\label:
406 leaq \func(%rip),%rax
407 leaq -ARGOFFSET+8(%rsp),\arg /* 8 for return address */
408 jmp ptregscall_common
4b787e0b 409END(\label)
1da177e4
LT
410 .endm
411
7effaa88
JB
412 CFI_STARTPROC
413
1da177e4
LT
414 PTREGSCALL stub_clone, sys_clone, %r8
415 PTREGSCALL stub_fork, sys_fork, %rdi
416 PTREGSCALL stub_vfork, sys_vfork, %rdi
417 PTREGSCALL stub_rt_sigsuspend, sys_rt_sigsuspend, %rdx
418 PTREGSCALL stub_sigaltstack, sys_sigaltstack, %rdx
419 PTREGSCALL stub_iopl, sys_iopl, %rsi
420
421ENTRY(ptregscall_common)
1da177e4 422 popq %r11
7effaa88
JB
423 CFI_ADJUST_CFA_OFFSET -8
424 CFI_REGISTER rip, r11
1da177e4
LT
425 SAVE_REST
426 movq %r11, %r15
7effaa88 427 CFI_REGISTER rip, r15
1da177e4
LT
428 FIXUP_TOP_OF_STACK %r11
429 call *%rax
430 RESTORE_TOP_OF_STACK %r11
431 movq %r15, %r11
7effaa88 432 CFI_REGISTER rip, r11
1da177e4
LT
433 RESTORE_REST
434 pushq %r11
7effaa88
JB
435 CFI_ADJUST_CFA_OFFSET 8
436 CFI_REL_OFFSET rip, 0
1da177e4
LT
437 ret
438 CFI_ENDPROC
4b787e0b 439END(ptregscall_common)
1da177e4
LT
440
441ENTRY(stub_execve)
442 CFI_STARTPROC
443 popq %r11
7effaa88
JB
444 CFI_ADJUST_CFA_OFFSET -8
445 CFI_REGISTER rip, r11
1da177e4 446 SAVE_REST
1da177e4
LT
447 FIXUP_TOP_OF_STACK %r11
448 call sys_execve
1da177e4 449 RESTORE_TOP_OF_STACK %r11
1da177e4
LT
450 movq %rax,RAX(%rsp)
451 RESTORE_REST
452 jmp int_ret_from_sys_call
453 CFI_ENDPROC
4b787e0b 454END(stub_execve)
1da177e4
LT
455
456/*
457 * sigreturn is special because it needs to restore all registers on return.
458 * This cannot be done with SYSRET, so use the IRET return path instead.
459 */
460ENTRY(stub_rt_sigreturn)
461 CFI_STARTPROC
7effaa88
JB
462 addq $8, %rsp
463 CFI_ADJUST_CFA_OFFSET -8
1da177e4
LT
464 SAVE_REST
465 movq %rsp,%rdi
466 FIXUP_TOP_OF_STACK %r11
467 call sys_rt_sigreturn
468 movq %rax,RAX(%rsp) # fixme, this could be done at the higher layer
469 RESTORE_REST
470 jmp int_ret_from_sys_call
471 CFI_ENDPROC
4b787e0b 472END(stub_rt_sigreturn)
1da177e4 473
7effaa88
JB
474/*
475 * initial frame state for interrupts and exceptions
476 */
477 .macro _frame ref
478 CFI_STARTPROC simple
479 CFI_DEF_CFA rsp,SS+8-\ref
480 /*CFI_REL_OFFSET ss,SS-\ref*/
481 CFI_REL_OFFSET rsp,RSP-\ref
482 /*CFI_REL_OFFSET rflags,EFLAGS-\ref*/
483 /*CFI_REL_OFFSET cs,CS-\ref*/
484 CFI_REL_OFFSET rip,RIP-\ref
485 .endm
486
487/* initial frame state for interrupts (and exceptions without error code) */
488#define INTR_FRAME _frame RIP
489/* initial frame state for exceptions with error code (and interrupts with
490 vector already pushed) */
491#define XCPT_FRAME _frame ORIG_RAX
492
1da177e4
LT
493/*
494 * Interrupt entry/exit.
495 *
496 * Interrupt entry points save only callee clobbered registers in fast path.
497 *
498 * Entry runs with interrupts off.
499 */
500
501/* 0(%rsp): interrupt number */
502 .macro interrupt func
1da177e4 503 cld
1da177e4
LT
504 SAVE_ARGS
505 leaq -ARGOFFSET(%rsp),%rdi # arg1 for handler
1de9c3f6
JB
506 pushq %rbp
507 CFI_ADJUST_CFA_OFFSET 8
508 CFI_REL_OFFSET rbp, 0
509 movq %rsp,%rbp
510 CFI_DEF_CFA_REGISTER rbp
1da177e4
LT
511 testl $3,CS(%rdi)
512 je 1f
513 swapgs
3829ee6b 5141: incl %gs:pda_irqcount # RED-PEN should check preempt count
1de9c3f6 515 cmoveq %gs:pda_irqstackptr,%rsp
2699500b 516 push %rbp # backlink for old unwinder
2601e64d
IM
517 /*
518 * We entered an interrupt context - irqs are off:
519 */
520 TRACE_IRQS_OFF
1da177e4
LT
521 call \func
522 .endm
523
524ENTRY(common_interrupt)
7effaa88 525 XCPT_FRAME
1da177e4
LT
526 interrupt do_IRQ
527 /* 0(%rsp): oldrsp-ARGOFFSET */
7effaa88 528ret_from_intr:
1da177e4 529 cli
2601e64d 530 TRACE_IRQS_OFF
3829ee6b 531 decl %gs:pda_irqcount
1de9c3f6 532 leaveq
7effaa88 533 CFI_DEF_CFA_REGISTER rsp
1de9c3f6 534 CFI_ADJUST_CFA_OFFSET -8
7effaa88 535exit_intr:
1da177e4
LT
536 GET_THREAD_INFO(%rcx)
537 testl $3,CS-ARGOFFSET(%rsp)
538 je retint_kernel
539
540 /* Interrupt came from user space */
541 /*
542 * Has a correct top of stack, but a partial stack frame
543 * %rcx: thread info. Interrupts off.
544 */
545retint_with_reschedule:
546 movl $_TIF_WORK_MASK,%edi
7effaa88 547retint_check:
1da177e4
LT
548 movl threadinfo_flags(%rcx),%edx
549 andl %edi,%edx
7effaa88 550 CFI_REMEMBER_STATE
1da177e4
LT
551 jnz retint_careful
552retint_swapgs:
2601e64d
IM
553 /*
554 * The iretq could re-enable interrupts:
555 */
556 cli
557 TRACE_IRQS_IRETQ
1da177e4 558 swapgs
2601e64d
IM
559 jmp restore_args
560
1da177e4
LT
561retint_restore_args:
562 cli
2601e64d
IM
563 /*
564 * The iretq could re-enable interrupts:
565 */
566 TRACE_IRQS_IRETQ
567restore_args:
1da177e4
LT
568 RESTORE_ARGS 0,8,0
569iret_label:
570 iretq
571
572 .section __ex_table,"a"
573 .quad iret_label,bad_iret
574 .previous
575 .section .fixup,"ax"
576 /* force a signal here? this matches i386 behaviour */
577 /* running with kernel gs */
578bad_iret:
3076a492 579 movq $11,%rdi /* SIGSEGV */
2601e64d 580 TRACE_IRQS_ON
2391c4b5 581 sti
1da177e4
LT
582 jmp do_exit
583 .previous
584
7effaa88 585 /* edi: workmask, edx: work */
1da177e4 586retint_careful:
7effaa88 587 CFI_RESTORE_STATE
1da177e4
LT
588 bt $TIF_NEED_RESCHED,%edx
589 jnc retint_signal
2601e64d 590 TRACE_IRQS_ON
1da177e4
LT
591 sti
592 pushq %rdi
7effaa88 593 CFI_ADJUST_CFA_OFFSET 8
1da177e4
LT
594 call schedule
595 popq %rdi
7effaa88 596 CFI_ADJUST_CFA_OFFSET -8
1da177e4
LT
597 GET_THREAD_INFO(%rcx)
598 cli
2601e64d 599 TRACE_IRQS_OFF
1da177e4
LT
600 jmp retint_check
601
602retint_signal:
10ffdbb8
AK
603 testl $(_TIF_SIGPENDING|_TIF_NOTIFY_RESUME|_TIF_SINGLESTEP),%edx
604 jz retint_swapgs
2601e64d 605 TRACE_IRQS_ON
1da177e4
LT
606 sti
607 SAVE_REST
608 movq $-1,ORIG_RAX(%rsp)
3829ee6b 609 xorl %esi,%esi # oldset
1da177e4
LT
610 movq %rsp,%rdi # &pt_regs
611 call do_notify_resume
612 RESTORE_REST
613 cli
2601e64d 614 TRACE_IRQS_OFF
10ffdbb8 615 movl $_TIF_NEED_RESCHED,%edi
be9e6870 616 GET_THREAD_INFO(%rcx)
1da177e4
LT
617 jmp retint_check
618
619#ifdef CONFIG_PREEMPT
620 /* Returning to kernel space. Check if we need preemption */
621 /* rcx: threadinfo. interrupts off. */
622 .p2align
623retint_kernel:
624 cmpl $0,threadinfo_preempt_count(%rcx)
625 jnz retint_restore_args
626 bt $TIF_NEED_RESCHED,threadinfo_flags(%rcx)
627 jnc retint_restore_args
628 bt $9,EFLAGS-ARGOFFSET(%rsp) /* interrupts off? */
629 jnc retint_restore_args
630 call preempt_schedule_irq
631 jmp exit_intr
632#endif
4b787e0b 633
1da177e4 634 CFI_ENDPROC
4b787e0b 635END(common_interrupt)
1da177e4
LT
636
637/*
638 * APIC interrupts.
639 */
640 .macro apicinterrupt num,func
7effaa88 641 INTR_FRAME
19eadf98 642 pushq $~(\num)
7effaa88 643 CFI_ADJUST_CFA_OFFSET 8
1da177e4
LT
644 interrupt \func
645 jmp ret_from_intr
646 CFI_ENDPROC
647 .endm
648
649ENTRY(thermal_interrupt)
650 apicinterrupt THERMAL_APIC_VECTOR,smp_thermal_interrupt
4b787e0b 651END(thermal_interrupt)
1da177e4 652
89b831ef
JS
653ENTRY(threshold_interrupt)
654 apicinterrupt THRESHOLD_APIC_VECTOR,mce_threshold_interrupt
4b787e0b 655END(threshold_interrupt)
89b831ef 656
1da177e4
LT
657#ifdef CONFIG_SMP
658ENTRY(reschedule_interrupt)
659 apicinterrupt RESCHEDULE_VECTOR,smp_reschedule_interrupt
4b787e0b 660END(reschedule_interrupt)
1da177e4 661
e5bc8b6b
AK
662 .macro INVALIDATE_ENTRY num
663ENTRY(invalidate_interrupt\num)
664 apicinterrupt INVALIDATE_TLB_VECTOR_START+\num,smp_invalidate_interrupt
4b787e0b 665END(invalidate_interrupt\num)
e5bc8b6b
AK
666 .endm
667
668 INVALIDATE_ENTRY 0
669 INVALIDATE_ENTRY 1
670 INVALIDATE_ENTRY 2
671 INVALIDATE_ENTRY 3
672 INVALIDATE_ENTRY 4
673 INVALIDATE_ENTRY 5
674 INVALIDATE_ENTRY 6
675 INVALIDATE_ENTRY 7
1da177e4
LT
676
677ENTRY(call_function_interrupt)
678 apicinterrupt CALL_FUNCTION_VECTOR,smp_call_function_interrupt
4b787e0b 679END(call_function_interrupt)
1da177e4
LT
680#endif
681
682#ifdef CONFIG_X86_LOCAL_APIC
683ENTRY(apic_timer_interrupt)
684 apicinterrupt LOCAL_TIMER_VECTOR,smp_apic_timer_interrupt
4b787e0b 685END(apic_timer_interrupt)
1da177e4
LT
686
687ENTRY(error_interrupt)
688 apicinterrupt ERROR_APIC_VECTOR,smp_error_interrupt
4b787e0b 689END(error_interrupt)
1da177e4
LT
690
691ENTRY(spurious_interrupt)
692 apicinterrupt SPURIOUS_APIC_VECTOR,smp_spurious_interrupt
4b787e0b 693END(spurious_interrupt)
1da177e4
LT
694#endif
695
696/*
697 * Exception entry points.
698 */
699 .macro zeroentry sym
7effaa88 700 INTR_FRAME
1da177e4 701 pushq $0 /* push error code/oldrax */
7effaa88 702 CFI_ADJUST_CFA_OFFSET 8
1da177e4 703 pushq %rax /* push real oldrax to the rdi slot */
7effaa88 704 CFI_ADJUST_CFA_OFFSET 8
1da177e4
LT
705 leaq \sym(%rip),%rax
706 jmp error_entry
7effaa88 707 CFI_ENDPROC
1da177e4
LT
708 .endm
709
710 .macro errorentry sym
7effaa88 711 XCPT_FRAME
1da177e4 712 pushq %rax
7effaa88 713 CFI_ADJUST_CFA_OFFSET 8
1da177e4
LT
714 leaq \sym(%rip),%rax
715 jmp error_entry
7effaa88 716 CFI_ENDPROC
1da177e4
LT
717 .endm
718
719 /* error code is on the stack already */
720 /* handle NMI like exceptions that can happen everywhere */
2601e64d 721 .macro paranoidentry sym, ist=0, irqtrace=1
1da177e4
LT
722 SAVE_ALL
723 cld
724 movl $1,%ebx
725 movl $MSR_GS_BASE,%ecx
726 rdmsr
727 testl %edx,%edx
728 js 1f
729 swapgs
730 xorl %ebx,%ebx
b556b35e
JB
7311:
732 .if \ist
733 movq %gs:pda_data_offset, %rbp
734 .endif
735 movq %rsp,%rdi
1da177e4
LT
736 movq ORIG_RAX(%rsp),%rsi
737 movq $-1,ORIG_RAX(%rsp)
b556b35e 738 .if \ist
5f8efbb9 739 subq $EXCEPTION_STKSZ, per_cpu__init_tss + TSS_ist + (\ist - 1) * 8(%rbp)
b556b35e 740 .endif
1da177e4 741 call \sym
b556b35e 742 .if \ist
5f8efbb9 743 addq $EXCEPTION_STKSZ, per_cpu__init_tss + TSS_ist + (\ist - 1) * 8(%rbp)
b556b35e 744 .endif
6fefb0d1 745 cli
2601e64d
IM
746 .if \irqtrace
747 TRACE_IRQS_OFF
748 .endif
1da177e4 749 .endm
2601e64d
IM
750
751 /*
752 * "Paranoid" exit path from exception stack.
753 * Paranoid because this is used by NMIs and cannot take
754 * any kernel state for granted.
755 * We don't do kernel preemption checks here, because only
756 * NMI should be common and it does not enable IRQs and
757 * cannot get reschedule ticks.
758 *
759 * "trace" is 0 for the NMI handler only, because irq-tracing
760 * is fundamentally NMI-unsafe. (we cannot change the soft and
761 * hard flags at once, atomically)
762 */
763 .macro paranoidexit trace=1
764 /* ebx: no swapgs flag */
765paranoid_exit\trace:
766 testl %ebx,%ebx /* swapgs needed? */
767 jnz paranoid_restore\trace
768 testl $3,CS(%rsp)
769 jnz paranoid_userspace\trace
770paranoid_swapgs\trace:
771 TRACE_IRQS_IRETQ 0
772 swapgs
773paranoid_restore\trace:
774 RESTORE_ALL 8
775 iretq
776paranoid_userspace\trace:
777 GET_THREAD_INFO(%rcx)
778 movl threadinfo_flags(%rcx),%ebx
779 andl $_TIF_WORK_MASK,%ebx
780 jz paranoid_swapgs\trace
781 movq %rsp,%rdi /* &pt_regs */
782 call sync_regs
783 movq %rax,%rsp /* switch stack for scheduling */
784 testl $_TIF_NEED_RESCHED,%ebx
785 jnz paranoid_schedule\trace
786 movl %ebx,%edx /* arg3: thread flags */
787 .if \trace
788 TRACE_IRQS_ON
789 .endif
790 sti
791 xorl %esi,%esi /* arg2: oldset */
792 movq %rsp,%rdi /* arg1: &pt_regs */
793 call do_notify_resume
794 cli
795 .if \trace
796 TRACE_IRQS_OFF
797 .endif
798 jmp paranoid_userspace\trace
799paranoid_schedule\trace:
800 .if \trace
801 TRACE_IRQS_ON
802 .endif
803 sti
804 call schedule
805 cli
806 .if \trace
807 TRACE_IRQS_OFF
808 .endif
809 jmp paranoid_userspace\trace
810 CFI_ENDPROC
811 .endm
812
1da177e4
LT
813/*
814 * Exception entry point. This expects an error code/orig_rax on the stack
815 * and the exception handler in %rax.
816 */
817ENTRY(error_entry)
7effaa88 818 _frame RDI
1da177e4
LT
819 /* rdi slot contains rax, oldrax contains error code */
820 cld
821 subq $14*8,%rsp
822 CFI_ADJUST_CFA_OFFSET (14*8)
823 movq %rsi,13*8(%rsp)
824 CFI_REL_OFFSET rsi,RSI
825 movq 14*8(%rsp),%rsi /* load rax from rdi slot */
826 movq %rdx,12*8(%rsp)
827 CFI_REL_OFFSET rdx,RDX
828 movq %rcx,11*8(%rsp)
829 CFI_REL_OFFSET rcx,RCX
830 movq %rsi,10*8(%rsp) /* store rax */
831 CFI_REL_OFFSET rax,RAX
832 movq %r8, 9*8(%rsp)
833 CFI_REL_OFFSET r8,R8
834 movq %r9, 8*8(%rsp)
835 CFI_REL_OFFSET r9,R9
836 movq %r10,7*8(%rsp)
837 CFI_REL_OFFSET r10,R10
838 movq %r11,6*8(%rsp)
839 CFI_REL_OFFSET r11,R11
840 movq %rbx,5*8(%rsp)
841 CFI_REL_OFFSET rbx,RBX
842 movq %rbp,4*8(%rsp)
843 CFI_REL_OFFSET rbp,RBP
844 movq %r12,3*8(%rsp)
845 CFI_REL_OFFSET r12,R12
846 movq %r13,2*8(%rsp)
847 CFI_REL_OFFSET r13,R13
848 movq %r14,1*8(%rsp)
849 CFI_REL_OFFSET r14,R14
850 movq %r15,(%rsp)
851 CFI_REL_OFFSET r15,R15
852 xorl %ebx,%ebx
853 testl $3,CS(%rsp)
854 je error_kernelspace
855error_swapgs:
856 swapgs
857error_sti:
858 movq %rdi,RDI(%rsp)
859 movq %rsp,%rdi
860 movq ORIG_RAX(%rsp),%rsi /* get error code */
861 movq $-1,ORIG_RAX(%rsp)
862 call *%rax
863 /* ebx: no swapgs flag (1: don't need swapgs, 0: need it) */
864error_exit:
865 movl %ebx,%eax
866 RESTORE_REST
867 cli
2601e64d 868 TRACE_IRQS_OFF
1da177e4
LT
869 GET_THREAD_INFO(%rcx)
870 testl %eax,%eax
871 jne retint_kernel
872 movl threadinfo_flags(%rcx),%edx
873 movl $_TIF_WORK_MASK,%edi
874 andl %edi,%edx
875 jnz retint_careful
2601e64d
IM
876 /*
877 * The iret might restore flags:
878 */
879 TRACE_IRQS_IRETQ
1da177e4
LT
880 swapgs
881 RESTORE_ARGS 0,8,0
505cc4e1 882 jmp iret_label
1da177e4
LT
883 CFI_ENDPROC
884
885error_kernelspace:
886 incl %ebx
887 /* There are two places in the kernel that can potentially fault with
888 usergs. Handle them here. The exception handlers after
889 iret run with kernel gs again, so don't set the user space flag.
890 B stepping K8s sometimes report an truncated RIP for IRET
891 exceptions returning to compat mode. Check for these here too. */
892 leaq iret_label(%rip),%rbp
893 cmpq %rbp,RIP(%rsp)
894 je error_swapgs
895 movl %ebp,%ebp /* zero extend */
896 cmpq %rbp,RIP(%rsp)
897 je error_swapgs
898 cmpq $gs_change,RIP(%rsp)
899 je error_swapgs
900 jmp error_sti
4b787e0b 901END(error_entry)
1da177e4
LT
902
903 /* Reload gs selector with exception handling */
904 /* edi: new selector */
905ENTRY(load_gs_index)
7effaa88 906 CFI_STARTPROC
1da177e4 907 pushf
7effaa88 908 CFI_ADJUST_CFA_OFFSET 8
1da177e4
LT
909 cli
910 swapgs
911gs_change:
912 movl %edi,%gs
9132: mfence /* workaround */
914 swapgs
915 popf
7effaa88 916 CFI_ADJUST_CFA_OFFSET -8
1da177e4 917 ret
7effaa88 918 CFI_ENDPROC
4b787e0b 919ENDPROC(load_gs_index)
1da177e4
LT
920
921 .section __ex_table,"a"
922 .align 8
923 .quad gs_change,bad_gs
924 .previous
925 .section .fixup,"ax"
926 /* running with kernelgs */
927bad_gs:
928 swapgs /* switch back to user gs */
929 xorl %eax,%eax
930 movl %eax,%gs
931 jmp 2b
932 .previous
933
934/*
935 * Create a kernel thread.
936 *
937 * C extern interface:
938 * extern long kernel_thread(int (*fn)(void *), void * arg, unsigned long flags)
939 *
940 * asm input arguments:
941 * rdi: fn, rsi: arg, rdx: flags
942 */
943ENTRY(kernel_thread)
944 CFI_STARTPROC
945 FAKE_STACK_FRAME $child_rip
946 SAVE_ALL
947
948 # rdi: flags, rsi: usp, rdx: will be &pt_regs
949 movq %rdx,%rdi
950 orq kernel_thread_flags(%rip),%rdi
951 movq $-1, %rsi
952 movq %rsp, %rdx
953
954 xorl %r8d,%r8d
955 xorl %r9d,%r9d
956
957 # clone now
958 call do_fork
959 movq %rax,RAX(%rsp)
960 xorl %edi,%edi
961
962 /*
963 * It isn't worth to check for reschedule here,
964 * so internally to the x86_64 port you can rely on kernel_thread()
965 * not to reschedule the child before returning, this avoids the need
966 * of hacks for example to fork off the per-CPU idle tasks.
967 * [Hopefully no generic code relies on the reschedule -AK]
968 */
969 RESTORE_ALL
970 UNFAKE_STACK_FRAME
971 ret
972 CFI_ENDPROC
4b787e0b 973ENDPROC(kernel_thread)
1da177e4
LT
974
975child_rip:
976 /*
977 * Here we are in the child and the registers are set as they were
978 * at kernel_thread() invocation in the parent.
979 */
980 movq %rdi, %rax
981 movq %rsi, %rdi
982 call *%rax
983 # exit
3829ee6b 984 xorl %edi, %edi
1da177e4 985 call do_exit
4b787e0b 986ENDPROC(child_rip)
1da177e4
LT
987
988/*
989 * execve(). This function needs to use IRET, not SYSRET, to set up all state properly.
990 *
991 * C extern interface:
992 * extern long execve(char *name, char **argv, char **envp)
993 *
994 * asm input arguments:
995 * rdi: name, rsi: argv, rdx: envp
996 *
997 * We want to fallback into:
998 * extern long sys_execve(char *name, char **argv,char **envp, struct pt_regs regs)
999 *
1000 * do_sys_execve asm fallback arguments:
1001 * rdi: name, rsi: argv, rdx: envp, fake frame on the stack
1002 */
1003ENTRY(execve)
1004 CFI_STARTPROC
1005 FAKE_STACK_FRAME $0
1006 SAVE_ALL
1007 call sys_execve
1008 movq %rax, RAX(%rsp)
1009 RESTORE_REST
1010 testq %rax,%rax
1011 je int_ret_from_sys_call
1012 RESTORE_ARGS
1013 UNFAKE_STACK_FRAME
1014 ret
1015 CFI_ENDPROC
4b787e0b 1016ENDPROC(execve)
1da177e4 1017
0f2fbdcb 1018KPROBE_ENTRY(page_fault)
1da177e4 1019 errorentry do_page_fault
4b787e0b 1020END(page_fault)
0f2fbdcb 1021 .previous .text
1da177e4
LT
1022
1023ENTRY(coprocessor_error)
1024 zeroentry do_coprocessor_error
4b787e0b 1025END(coprocessor_error)
1da177e4
LT
1026
1027ENTRY(simd_coprocessor_error)
1028 zeroentry do_simd_coprocessor_error
4b787e0b 1029END(simd_coprocessor_error)
1da177e4
LT
1030
1031ENTRY(device_not_available)
1032 zeroentry math_state_restore
4b787e0b 1033END(device_not_available)
1da177e4
LT
1034
1035 /* runs on exception stack */
0f2fbdcb 1036KPROBE_ENTRY(debug)
7effaa88 1037 INTR_FRAME
1da177e4
LT
1038 pushq $0
1039 CFI_ADJUST_CFA_OFFSET 8
5f8efbb9 1040 paranoidentry do_debug, DEBUG_STACK
2601e64d 1041 paranoidexit
4b787e0b 1042END(debug)
0f2fbdcb 1043 .previous .text
1da177e4
LT
1044
1045 /* runs on exception stack */
eddb6fb9 1046KPROBE_ENTRY(nmi)
7effaa88 1047 INTR_FRAME
1da177e4 1048 pushq $-1
7effaa88 1049 CFI_ADJUST_CFA_OFFSET 8
2601e64d
IM
1050 paranoidentry do_nmi, 0, 0
1051#ifdef CONFIG_TRACE_IRQFLAGS
1052 paranoidexit 0
1053#else
1054 jmp paranoid_exit1
1055 CFI_ENDPROC
1056#endif
4b787e0b 1057END(nmi)
eddb6fb9 1058 .previous .text
6fefb0d1 1059
0f2fbdcb 1060KPROBE_ENTRY(int3)
b556b35e
JB
1061 INTR_FRAME
1062 pushq $0
1063 CFI_ADJUST_CFA_OFFSET 8
5f8efbb9 1064 paranoidentry do_int3, DEBUG_STACK
2601e64d 1065 jmp paranoid_exit1
b556b35e 1066 CFI_ENDPROC
4b787e0b 1067END(int3)
0f2fbdcb 1068 .previous .text
1da177e4
LT
1069
1070ENTRY(overflow)
1071 zeroentry do_overflow
4b787e0b 1072END(overflow)
1da177e4
LT
1073
1074ENTRY(bounds)
1075 zeroentry do_bounds
4b787e0b 1076END(bounds)
1da177e4
LT
1077
1078ENTRY(invalid_op)
1079 zeroentry do_invalid_op
4b787e0b 1080END(invalid_op)
1da177e4
LT
1081
1082ENTRY(coprocessor_segment_overrun)
1083 zeroentry do_coprocessor_segment_overrun
4b787e0b 1084END(coprocessor_segment_overrun)
1da177e4
LT
1085
1086ENTRY(reserved)
1087 zeroentry do_reserved
4b787e0b 1088END(reserved)
1da177e4
LT
1089
1090 /* runs on exception stack */
1091ENTRY(double_fault)
7effaa88 1092 XCPT_FRAME
1da177e4 1093 paranoidentry do_double_fault
2601e64d 1094 jmp paranoid_exit1
1da177e4 1095 CFI_ENDPROC
4b787e0b 1096END(double_fault)
1da177e4
LT
1097
1098ENTRY(invalid_TSS)
1099 errorentry do_invalid_TSS
4b787e0b 1100END(invalid_TSS)
1da177e4
LT
1101
1102ENTRY(segment_not_present)
1103 errorentry do_segment_not_present
4b787e0b 1104END(segment_not_present)
1da177e4
LT
1105
1106 /* runs on exception stack */
1107ENTRY(stack_segment)
7effaa88 1108 XCPT_FRAME
1da177e4 1109 paranoidentry do_stack_segment
2601e64d 1110 jmp paranoid_exit1
1da177e4 1111 CFI_ENDPROC
4b787e0b 1112END(stack_segment)
1da177e4 1113
0f2fbdcb 1114KPROBE_ENTRY(general_protection)
1da177e4 1115 errorentry do_general_protection
4b787e0b 1116END(general_protection)
0f2fbdcb 1117 .previous .text
1da177e4
LT
1118
1119ENTRY(alignment_check)
1120 errorentry do_alignment_check
4b787e0b 1121END(alignment_check)
1da177e4
LT
1122
1123ENTRY(divide_error)
1124 zeroentry do_divide_error
4b787e0b 1125END(divide_error)
1da177e4
LT
1126
1127ENTRY(spurious_interrupt_bug)
1128 zeroentry do_spurious_interrupt_bug
4b787e0b 1129END(spurious_interrupt_bug)
1da177e4
LT
1130
1131#ifdef CONFIG_X86_MCE
1132 /* runs on exception stack */
1133ENTRY(machine_check)
7effaa88 1134 INTR_FRAME
1da177e4
LT
1135 pushq $0
1136 CFI_ADJUST_CFA_OFFSET 8
1137 paranoidentry do_machine_check
2601e64d 1138 jmp paranoid_exit1
1da177e4 1139 CFI_ENDPROC
4b787e0b 1140END(machine_check)
1da177e4
LT
1141#endif
1142
2699500b 1143/* Call softirq on interrupt stack. Interrupts are off. */
ed6b676c 1144ENTRY(call_softirq)
7effaa88 1145 CFI_STARTPROC
2699500b
AK
1146 push %rbp
1147 CFI_ADJUST_CFA_OFFSET 8
1148 CFI_REL_OFFSET rbp,0
1149 mov %rsp,%rbp
1150 CFI_DEF_CFA_REGISTER rbp
ed6b676c 1151 incl %gs:pda_irqcount
2699500b
AK
1152 cmove %gs:pda_irqstackptr,%rsp
1153 push %rbp # backlink for old unwinder
ed6b676c 1154 call __do_softirq
2699500b 1155 leaveq
7effaa88 1156 CFI_DEF_CFA_REGISTER rsp
2699500b 1157 CFI_ADJUST_CFA_OFFSET -8
ed6b676c 1158 decl %gs:pda_irqcount
ed6b676c 1159 ret
7effaa88 1160 CFI_ENDPROC
4b787e0b 1161ENDPROC(call_softirq)
b538ed27
JB
1162
1163#ifdef CONFIG_STACK_UNWIND
1164ENTRY(arch_unwind_init_running)
1165 CFI_STARTPROC
1166 movq %r15, R15(%rdi)
1167 movq %r14, R14(%rdi)
1168 xchgq %rsi, %rdx
1169 movq %r13, R13(%rdi)
1170 movq %r12, R12(%rdi)
1171 xorl %eax, %eax
1172 movq %rbp, RBP(%rdi)
1173 movq %rbx, RBX(%rdi)
1174 movq (%rsp), %rcx
1175 movq %rax, R11(%rdi)
1176 movq %rax, R10(%rdi)
1177 movq %rax, R9(%rdi)
1178 movq %rax, R8(%rdi)
1179 movq %rax, RAX(%rdi)
1180 movq %rax, RCX(%rdi)
1181 movq %rax, RDX(%rdi)
1182 movq %rax, RSI(%rdi)
1183 movq %rax, RDI(%rdi)
1184 movq %rax, ORIG_RAX(%rdi)
1185 movq %rcx, RIP(%rdi)
1186 leaq 8(%rsp), %rcx
1187 movq $__KERNEL_CS, CS(%rdi)
1188 movq %rax, EFLAGS(%rdi)
1189 movq %rcx, RSP(%rdi)
1190 movq $__KERNEL_DS, SS(%rdi)
1191 jmpq *%rdx
1192 CFI_ENDPROC
1193ENDPROC(arch_unwind_init_running)
1194#endif