]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blame - arch/x86/kernel/entry_64.S
sched: high-res preemption tick
[mirror_ubuntu-bionic-kernel.git] / arch / x86 / kernel / entry_64.S
CommitLineData
1da177e4
LT
1/*
2 * linux/arch/x86_64/entry.S
3 *
4 * Copyright (C) 1991, 1992 Linus Torvalds
5 * Copyright (C) 2000, 2001, 2002 Andi Kleen SuSE Labs
6 * Copyright (C) 2000 Pavel Machek <pavel@suse.cz>
1da177e4
LT
7 */
8
9/*
10 * entry.S contains the system-call and fault low-level handling routines.
11 *
12 * NOTE: This code handles signal-recognition, which happens every time
13 * after an interrupt and after each system call.
14 *
15 * Normal syscalls and interrupts don't save a full stack frame, this is
16 * only done for syscall tracing, signals or fork/exec et.al.
17 *
18 * A note on terminology:
19 * - top of stack: Architecture defined interrupt frame from SS to RIP
20 * at the top of the kernel process stack.
21 * - partial stack frame: partially saved registers upto R11.
22 * - full stack frame: Like partial stack frame, but all register saved.
2e91a17b
AK
23 *
24 * Some macro usage:
25 * - CFI macros are used to generate dwarf2 unwind information for better
26 * backtraces. They don't change any code.
27 * - SAVE_ALL/RESTORE_ALL - Save/restore all registers
28 * - SAVE_ARGS/RESTORE_ARGS - Save/restore registers that C functions modify.
29 * There are unfortunately lots of special cases where some registers
30 * not touched. The macro is a big mess that should be cleaned up.
31 * - SAVE_REST/RESTORE_REST - Handle the registers not saved by SAVE_ARGS.
32 * Gives a full stack frame.
33 * - ENTRY/END Define functions in the symbol table.
34 * - FIXUP_TOP_OF_STACK/RESTORE_TOP_OF_STACK - Fix up the hardware stack
35 * frame that is otherwise undefined after a SYSCALL
36 * - TRACE_IRQ_* - Trace hard interrupt state for lock debugging.
37 * - errorentry/paranoidentry/zeroentry - Define exception entry points.
1da177e4
LT
38 */
39
1da177e4
LT
40#include <linux/linkage.h>
41#include <asm/segment.h>
1da177e4
LT
42#include <asm/cache.h>
43#include <asm/errno.h>
44#include <asm/dwarf2.h>
45#include <asm/calling.h>
e2d5df93 46#include <asm/asm-offsets.h>
1da177e4
LT
47#include <asm/msr.h>
48#include <asm/unistd.h>
49#include <asm/thread_info.h>
50#include <asm/hw_irq.h>
5f8efbb9 51#include <asm/page.h>
2601e64d 52#include <asm/irqflags.h>
1da177e4
LT
53
54 .code64
55
dc37db4d 56#ifndef CONFIG_PREEMPT
1da177e4
LT
57#define retint_kernel retint_restore_args
58#endif
2601e64d
IM
59
60
61.macro TRACE_IRQS_IRETQ offset=ARGOFFSET
62#ifdef CONFIG_TRACE_IRQFLAGS
63 bt $9,EFLAGS-\offset(%rsp) /* interrupts off? */
64 jnc 1f
65 TRACE_IRQS_ON
661:
67#endif
68.endm
69
1da177e4
LT
70/*
71 * C code is not supposed to know about undefined top of stack. Every time
72 * a C function with an pt_regs argument is called from the SYSCALL based
73 * fast path FIXUP_TOP_OF_STACK is needed.
74 * RESTORE_TOP_OF_STACK syncs the syscall state after any possible ptregs
75 * manipulation.
76 */
77
78 /* %rsp:at FRAMEEND */
79 .macro FIXUP_TOP_OF_STACK tmp
80 movq %gs:pda_oldrsp,\tmp
81 movq \tmp,RSP(%rsp)
82 movq $__USER_DS,SS(%rsp)
83 movq $__USER_CS,CS(%rsp)
84 movq $-1,RCX(%rsp)
85 movq R11(%rsp),\tmp /* get eflags */
86 movq \tmp,EFLAGS(%rsp)
87 .endm
88
89 .macro RESTORE_TOP_OF_STACK tmp,offset=0
90 movq RSP-\offset(%rsp),\tmp
91 movq \tmp,%gs:pda_oldrsp
92 movq EFLAGS-\offset(%rsp),\tmp
93 movq \tmp,R11-\offset(%rsp)
94 .endm
95
96 .macro FAKE_STACK_FRAME child_rip
97 /* push in order ss, rsp, eflags, cs, rip */
3829ee6b 98 xorl %eax, %eax
1da177e4
LT
99 pushq %rax /* ss */
100 CFI_ADJUST_CFA_OFFSET 8
7effaa88 101 /*CFI_REL_OFFSET ss,0*/
1da177e4
LT
102 pushq %rax /* rsp */
103 CFI_ADJUST_CFA_OFFSET 8
7effaa88 104 CFI_REL_OFFSET rsp,0
1da177e4
LT
105 pushq $(1<<9) /* eflags - interrupts on */
106 CFI_ADJUST_CFA_OFFSET 8
7effaa88 107 /*CFI_REL_OFFSET rflags,0*/
1da177e4
LT
108 pushq $__KERNEL_CS /* cs */
109 CFI_ADJUST_CFA_OFFSET 8
7effaa88 110 /*CFI_REL_OFFSET cs,0*/
1da177e4
LT
111 pushq \child_rip /* rip */
112 CFI_ADJUST_CFA_OFFSET 8
7effaa88 113 CFI_REL_OFFSET rip,0
1da177e4
LT
114 pushq %rax /* orig rax */
115 CFI_ADJUST_CFA_OFFSET 8
116 .endm
117
118 .macro UNFAKE_STACK_FRAME
119 addq $8*6, %rsp
120 CFI_ADJUST_CFA_OFFSET -(6*8)
121 .endm
122
7effaa88
JB
123 .macro CFI_DEFAULT_STACK start=1
124 .if \start
125 CFI_STARTPROC simple
adf14236 126 CFI_SIGNAL_FRAME
7effaa88
JB
127 CFI_DEF_CFA rsp,SS+8
128 .else
129 CFI_DEF_CFA_OFFSET SS+8
130 .endif
131 CFI_REL_OFFSET r15,R15
132 CFI_REL_OFFSET r14,R14
133 CFI_REL_OFFSET r13,R13
134 CFI_REL_OFFSET r12,R12
135 CFI_REL_OFFSET rbp,RBP
136 CFI_REL_OFFSET rbx,RBX
137 CFI_REL_OFFSET r11,R11
138 CFI_REL_OFFSET r10,R10
139 CFI_REL_OFFSET r9,R9
140 CFI_REL_OFFSET r8,R8
141 CFI_REL_OFFSET rax,RAX
142 CFI_REL_OFFSET rcx,RCX
143 CFI_REL_OFFSET rdx,RDX
144 CFI_REL_OFFSET rsi,RSI
145 CFI_REL_OFFSET rdi,RDI
146 CFI_REL_OFFSET rip,RIP
147 /*CFI_REL_OFFSET cs,CS*/
148 /*CFI_REL_OFFSET rflags,EFLAGS*/
149 CFI_REL_OFFSET rsp,RSP
150 /*CFI_REL_OFFSET ss,SS*/
1da177e4
LT
151 .endm
152/*
153 * A newly forked process directly context switches into this.
154 */
155/* rdi: prev */
156ENTRY(ret_from_fork)
1da177e4 157 CFI_DEFAULT_STACK
658fdbef
AK
158 push kernel_eflags(%rip)
159 CFI_ADJUST_CFA_OFFSET 4
160 popf # reset kernel eflags
161 CFI_ADJUST_CFA_OFFSET -4
1da177e4
LT
162 call schedule_tail
163 GET_THREAD_INFO(%rcx)
164 testl $(_TIF_SYSCALL_TRACE|_TIF_SYSCALL_AUDIT),threadinfo_flags(%rcx)
165 jnz rff_trace
166rff_action:
167 RESTORE_REST
168 testl $3,CS-ARGOFFSET(%rsp) # from kernel_thread?
169 je int_ret_from_sys_call
170 testl $_TIF_IA32,threadinfo_flags(%rcx)
171 jnz int_ret_from_sys_call
172 RESTORE_TOP_OF_STACK %rdi,ARGOFFSET
173 jmp ret_from_sys_call
174rff_trace:
175 movq %rsp,%rdi
176 call syscall_trace_leave
177 GET_THREAD_INFO(%rcx)
178 jmp rff_action
179 CFI_ENDPROC
4b787e0b 180END(ret_from_fork)
1da177e4
LT
181
182/*
183 * System call entry. Upto 6 arguments in registers are supported.
184 *
185 * SYSCALL does not save anything on the stack and does not change the
186 * stack pointer.
187 */
188
189/*
190 * Register setup:
191 * rax system call number
192 * rdi arg0
193 * rcx return address for syscall/sysret, C arg3
194 * rsi arg1
195 * rdx arg2
196 * r10 arg3 (--> moved to rcx for C)
197 * r8 arg4
198 * r9 arg5
199 * r11 eflags for syscall/sysret, temporary for C
200 * r12-r15,rbp,rbx saved by C code, not touched.
201 *
202 * Interrupts are off on entry.
203 * Only called from user space.
204 *
205 * XXX if we had a free scratch register we could save the RSP into the stack frame
206 * and report it properly in ps. Unfortunately we haven't.
7bf36bbc
AK
207 *
208 * When user can change the frames always force IRET. That is because
209 * it deals with uncanonical addresses better. SYSRET has trouble
210 * with them due to bugs in both AMD and Intel CPUs.
1da177e4
LT
211 */
212
213ENTRY(system_call)
7effaa88 214 CFI_STARTPROC simple
adf14236 215 CFI_SIGNAL_FRAME
dffead4e 216 CFI_DEF_CFA rsp,PDA_STACKOFFSET
7effaa88
JB
217 CFI_REGISTER rip,rcx
218 /*CFI_REGISTER rflags,r11*/
1da177e4
LT
219 swapgs
220 movq %rsp,%gs:pda_oldrsp
221 movq %gs:pda_kernelstack,%rsp
2601e64d
IM
222 /*
223 * No need to follow this irqs off/on section - it's straight
224 * and short:
225 */
1da177e4
LT
226 sti
227 SAVE_ARGS 8,1
228 movq %rax,ORIG_RAX-ARGOFFSET(%rsp)
7effaa88
JB
229 movq %rcx,RIP-ARGOFFSET(%rsp)
230 CFI_REL_OFFSET rip,RIP-ARGOFFSET
1da177e4
LT
231 GET_THREAD_INFO(%rcx)
232 testl $(_TIF_SYSCALL_TRACE|_TIF_SYSCALL_AUDIT|_TIF_SECCOMP),threadinfo_flags(%rcx)
233 jnz tracesys
234 cmpq $__NR_syscall_max,%rax
235 ja badsys
236 movq %r10,%rcx
237 call *sys_call_table(,%rax,8) # XXX: rip relative
238 movq %rax,RAX-ARGOFFSET(%rsp)
239/*
240 * Syscall return path ending with SYSRET (fast path)
241 * Has incomplete stack frame and undefined top of stack.
242 */
1da177e4 243ret_from_sys_call:
11b854b2 244 movl $_TIF_ALLWORK_MASK,%edi
1da177e4
LT
245 /* edi: flagmask */
246sysret_check:
10cd706d 247 LOCKDEP_SYS_EXIT
1da177e4
LT
248 GET_THREAD_INFO(%rcx)
249 cli
2601e64d 250 TRACE_IRQS_OFF
1da177e4
LT
251 movl threadinfo_flags(%rcx),%edx
252 andl %edi,%edx
253 jnz sysret_careful
bcddc015 254 CFI_REMEMBER_STATE
2601e64d
IM
255 /*
256 * sysretq will re-enable interrupts:
257 */
258 TRACE_IRQS_ON
1da177e4 259 movq RIP-ARGOFFSET(%rsp),%rcx
7effaa88 260 CFI_REGISTER rip,rcx
1da177e4 261 RESTORE_ARGS 0,-ARG_SKIP,1
7effaa88 262 /*CFI_REGISTER rflags,r11*/
1da177e4
LT
263 movq %gs:pda_oldrsp,%rsp
264 swapgs
265 sysretq
266
bcddc015 267 CFI_RESTORE_STATE
1da177e4
LT
268 /* Handle reschedules */
269 /* edx: work, edi: workmask */
270sysret_careful:
271 bt $TIF_NEED_RESCHED,%edx
272 jnc sysret_signal
2601e64d 273 TRACE_IRQS_ON
1da177e4
LT
274 sti
275 pushq %rdi
7effaa88 276 CFI_ADJUST_CFA_OFFSET 8
1da177e4
LT
277 call schedule
278 popq %rdi
7effaa88 279 CFI_ADJUST_CFA_OFFSET -8
1da177e4
LT
280 jmp sysret_check
281
282 /* Handle a signal */
283sysret_signal:
2601e64d 284 TRACE_IRQS_ON
1da177e4 285 sti
8f4d37ec 286 testl $_TIF_DO_NOTIFY_MASK,%edx
10ffdbb8
AK
287 jz 1f
288
289 /* Really a signal */
290 /* edx: work flags (arg3) */
1da177e4
LT
291 leaq do_notify_resume(%rip),%rax
292 leaq -ARGOFFSET(%rsp),%rdi # &pt_regs -> arg1
293 xorl %esi,%esi # oldset -> arg2
294 call ptregscall_common
10ffdbb8 2951: movl $_TIF_NEED_RESCHED,%edi
7bf36bbc
AK
296 /* Use IRET because user could have changed frame. This
297 works because ptregscall_common has called FIXUP_TOP_OF_STACK. */
298 cli
2601e64d 299 TRACE_IRQS_OFF
7bf36bbc 300 jmp int_with_check
1da177e4 301
7effaa88
JB
302badsys:
303 movq $-ENOSYS,RAX-ARGOFFSET(%rsp)
304 jmp ret_from_sys_call
305
1da177e4
LT
306 /* Do syscall tracing */
307tracesys:
308 SAVE_REST
309 movq $-ENOSYS,RAX(%rsp)
310 FIXUP_TOP_OF_STACK %rdi
311 movq %rsp,%rdi
312 call syscall_trace_enter
313 LOAD_ARGS ARGOFFSET /* reload args from stack in case ptrace changed it */
314 RESTORE_REST
315 cmpq $__NR_syscall_max,%rax
cc7d479f
JB
316 movq $-ENOSYS,%rcx
317 cmova %rcx,%rax
1da177e4
LT
318 ja 1f
319 movq %r10,%rcx /* fixup for C */
320 call *sys_call_table(,%rax,8)
822ff019 3211: movq %rax,RAX-ARGOFFSET(%rsp)
7bf36bbc 322 /* Use IRET because user could have changed frame */
1da177e4 323
1da177e4
LT
324/*
325 * Syscall return path ending with IRET.
326 * Has correct top of stack, but partial stack frame.
bcddc015
JB
327 */
328 .globl int_ret_from_sys_call
329int_ret_from_sys_call:
1da177e4 330 cli
2601e64d 331 TRACE_IRQS_OFF
1da177e4
LT
332 testl $3,CS-ARGOFFSET(%rsp)
333 je retint_restore_args
334 movl $_TIF_ALLWORK_MASK,%edi
335 /* edi: mask to check */
336int_with_check:
10cd706d 337 LOCKDEP_SYS_EXIT_IRQ
1da177e4
LT
338 GET_THREAD_INFO(%rcx)
339 movl threadinfo_flags(%rcx),%edx
340 andl %edi,%edx
341 jnz int_careful
bf2fcc6f 342 andl $~TS_COMPAT,threadinfo_status(%rcx)
1da177e4
LT
343 jmp retint_swapgs
344
345 /* Either reschedule or signal or syscall exit tracking needed. */
346 /* First do a reschedule test. */
347 /* edx: work, edi: workmask */
348int_careful:
349 bt $TIF_NEED_RESCHED,%edx
350 jnc int_very_careful
2601e64d 351 TRACE_IRQS_ON
1da177e4
LT
352 sti
353 pushq %rdi
7effaa88 354 CFI_ADJUST_CFA_OFFSET 8
1da177e4
LT
355 call schedule
356 popq %rdi
7effaa88 357 CFI_ADJUST_CFA_OFFSET -8
cdd219cd 358 cli
2601e64d 359 TRACE_IRQS_OFF
1da177e4
LT
360 jmp int_with_check
361
362 /* handle signals and tracing -- both require a full stack frame */
363int_very_careful:
2601e64d 364 TRACE_IRQS_ON
1da177e4
LT
365 sti
366 SAVE_REST
367 /* Check for syscall exit trace */
368 testl $(_TIF_SYSCALL_TRACE|_TIF_SYSCALL_AUDIT|_TIF_SINGLESTEP),%edx
369 jz int_signal
370 pushq %rdi
7effaa88 371 CFI_ADJUST_CFA_OFFSET 8
1da177e4
LT
372 leaq 8(%rsp),%rdi # &ptregs -> arg1
373 call syscall_trace_leave
374 popq %rdi
7effaa88 375 CFI_ADJUST_CFA_OFFSET -8
36c1104e 376 andl $~(_TIF_SYSCALL_TRACE|_TIF_SYSCALL_AUDIT|_TIF_SINGLESTEP),%edi
1da177e4
LT
377 jmp int_restore_rest
378
379int_signal:
8f4d37ec 380 testl $_TIF_DO_NOTIFY_MASK,%edx
1da177e4
LT
381 jz 1f
382 movq %rsp,%rdi # &ptregs -> arg1
383 xorl %esi,%esi # oldset -> arg2
384 call do_notify_resume
3851: movl $_TIF_NEED_RESCHED,%edi
386int_restore_rest:
387 RESTORE_REST
be9e6870 388 cli
2601e64d 389 TRACE_IRQS_OFF
1da177e4
LT
390 jmp int_with_check
391 CFI_ENDPROC
bcddc015 392END(system_call)
1da177e4
LT
393
394/*
395 * Certain special system calls that need to save a complete full stack frame.
396 */
397
398 .macro PTREGSCALL label,func,arg
399 .globl \label
400\label:
401 leaq \func(%rip),%rax
402 leaq -ARGOFFSET+8(%rsp),\arg /* 8 for return address */
403 jmp ptregscall_common
4b787e0b 404END(\label)
1da177e4
LT
405 .endm
406
7effaa88
JB
407 CFI_STARTPROC
408
1da177e4
LT
409 PTREGSCALL stub_clone, sys_clone, %r8
410 PTREGSCALL stub_fork, sys_fork, %rdi
411 PTREGSCALL stub_vfork, sys_vfork, %rdi
412 PTREGSCALL stub_rt_sigsuspend, sys_rt_sigsuspend, %rdx
413 PTREGSCALL stub_sigaltstack, sys_sigaltstack, %rdx
414 PTREGSCALL stub_iopl, sys_iopl, %rsi
415
416ENTRY(ptregscall_common)
1da177e4 417 popq %r11
7effaa88
JB
418 CFI_ADJUST_CFA_OFFSET -8
419 CFI_REGISTER rip, r11
1da177e4
LT
420 SAVE_REST
421 movq %r11, %r15
7effaa88 422 CFI_REGISTER rip, r15
1da177e4
LT
423 FIXUP_TOP_OF_STACK %r11
424 call *%rax
425 RESTORE_TOP_OF_STACK %r11
426 movq %r15, %r11
7effaa88 427 CFI_REGISTER rip, r11
1da177e4
LT
428 RESTORE_REST
429 pushq %r11
7effaa88
JB
430 CFI_ADJUST_CFA_OFFSET 8
431 CFI_REL_OFFSET rip, 0
1da177e4
LT
432 ret
433 CFI_ENDPROC
4b787e0b 434END(ptregscall_common)
1da177e4
LT
435
436ENTRY(stub_execve)
437 CFI_STARTPROC
438 popq %r11
7effaa88
JB
439 CFI_ADJUST_CFA_OFFSET -8
440 CFI_REGISTER rip, r11
1da177e4 441 SAVE_REST
1da177e4
LT
442 FIXUP_TOP_OF_STACK %r11
443 call sys_execve
1da177e4 444 RESTORE_TOP_OF_STACK %r11
1da177e4
LT
445 movq %rax,RAX(%rsp)
446 RESTORE_REST
447 jmp int_ret_from_sys_call
448 CFI_ENDPROC
4b787e0b 449END(stub_execve)
1da177e4
LT
450
451/*
452 * sigreturn is special because it needs to restore all registers on return.
453 * This cannot be done with SYSRET, so use the IRET return path instead.
454 */
455ENTRY(stub_rt_sigreturn)
456 CFI_STARTPROC
7effaa88
JB
457 addq $8, %rsp
458 CFI_ADJUST_CFA_OFFSET -8
1da177e4
LT
459 SAVE_REST
460 movq %rsp,%rdi
461 FIXUP_TOP_OF_STACK %r11
462 call sys_rt_sigreturn
463 movq %rax,RAX(%rsp) # fixme, this could be done at the higher layer
464 RESTORE_REST
465 jmp int_ret_from_sys_call
466 CFI_ENDPROC
4b787e0b 467END(stub_rt_sigreturn)
1da177e4 468
7effaa88
JB
469/*
470 * initial frame state for interrupts and exceptions
471 */
472 .macro _frame ref
473 CFI_STARTPROC simple
adf14236 474 CFI_SIGNAL_FRAME
7effaa88
JB
475 CFI_DEF_CFA rsp,SS+8-\ref
476 /*CFI_REL_OFFSET ss,SS-\ref*/
477 CFI_REL_OFFSET rsp,RSP-\ref
478 /*CFI_REL_OFFSET rflags,EFLAGS-\ref*/
479 /*CFI_REL_OFFSET cs,CS-\ref*/
480 CFI_REL_OFFSET rip,RIP-\ref
481 .endm
482
483/* initial frame state for interrupts (and exceptions without error code) */
484#define INTR_FRAME _frame RIP
485/* initial frame state for exceptions with error code (and interrupts with
486 vector already pushed) */
487#define XCPT_FRAME _frame ORIG_RAX
488
1da177e4
LT
489/*
490 * Interrupt entry/exit.
491 *
492 * Interrupt entry points save only callee clobbered registers in fast path.
493 *
494 * Entry runs with interrupts off.
495 */
496
497/* 0(%rsp): interrupt number */
498 .macro interrupt func
1da177e4 499 cld
1da177e4
LT
500 SAVE_ARGS
501 leaq -ARGOFFSET(%rsp),%rdi # arg1 for handler
1de9c3f6
JB
502 pushq %rbp
503 CFI_ADJUST_CFA_OFFSET 8
504 CFI_REL_OFFSET rbp, 0
505 movq %rsp,%rbp
506 CFI_DEF_CFA_REGISTER rbp
1da177e4
LT
507 testl $3,CS(%rdi)
508 je 1f
509 swapgs
96e54049
AK
510 /* irqcount is used to check if a CPU is already on an interrupt
511 stack or not. While this is essentially redundant with preempt_count
512 it is a little cheaper to use a separate counter in the PDA
513 (short of moving irq_enter into assembly, which would be too
514 much work) */
5151: incl %gs:pda_irqcount
1de9c3f6 516 cmoveq %gs:pda_irqstackptr,%rsp
2699500b 517 push %rbp # backlink for old unwinder
2601e64d
IM
518 /*
519 * We entered an interrupt context - irqs are off:
520 */
521 TRACE_IRQS_OFF
1da177e4
LT
522 call \func
523 .endm
524
525ENTRY(common_interrupt)
7effaa88 526 XCPT_FRAME
1da177e4
LT
527 interrupt do_IRQ
528 /* 0(%rsp): oldrsp-ARGOFFSET */
7effaa88 529ret_from_intr:
1da177e4 530 cli
2601e64d 531 TRACE_IRQS_OFF
3829ee6b 532 decl %gs:pda_irqcount
1de9c3f6 533 leaveq
7effaa88 534 CFI_DEF_CFA_REGISTER rsp
1de9c3f6 535 CFI_ADJUST_CFA_OFFSET -8
7effaa88 536exit_intr:
1da177e4
LT
537 GET_THREAD_INFO(%rcx)
538 testl $3,CS-ARGOFFSET(%rsp)
539 je retint_kernel
540
541 /* Interrupt came from user space */
542 /*
543 * Has a correct top of stack, but a partial stack frame
544 * %rcx: thread info. Interrupts off.
545 */
546retint_with_reschedule:
547 movl $_TIF_WORK_MASK,%edi
7effaa88 548retint_check:
10cd706d 549 LOCKDEP_SYS_EXIT_IRQ
1da177e4
LT
550 movl threadinfo_flags(%rcx),%edx
551 andl %edi,%edx
7effaa88 552 CFI_REMEMBER_STATE
1da177e4 553 jnz retint_careful
10cd706d
PZ
554
555retint_swapgs: /* return to user-space */
2601e64d
IM
556 /*
557 * The iretq could re-enable interrupts:
558 */
559 cli
560 TRACE_IRQS_IRETQ
1da177e4 561 swapgs
2601e64d
IM
562 jmp restore_args
563
10cd706d 564retint_restore_args: /* return to kernel space */
1da177e4 565 cli
2601e64d
IM
566 /*
567 * The iretq could re-enable interrupts:
568 */
569 TRACE_IRQS_IRETQ
570restore_args:
1da177e4
LT
571 RESTORE_ARGS 0,8,0
572iret_label:
573 iretq
574
575 .section __ex_table,"a"
576 .quad iret_label,bad_iret
577 .previous
578 .section .fixup,"ax"
579 /* force a signal here? this matches i386 behaviour */
580 /* running with kernel gs */
581bad_iret:
3076a492 582 movq $11,%rdi /* SIGSEGV */
2601e64d 583 TRACE_IRQS_ON
2391c4b5 584 sti
1da177e4
LT
585 jmp do_exit
586 .previous
587
7effaa88 588 /* edi: workmask, edx: work */
1da177e4 589retint_careful:
7effaa88 590 CFI_RESTORE_STATE
1da177e4
LT
591 bt $TIF_NEED_RESCHED,%edx
592 jnc retint_signal
2601e64d 593 TRACE_IRQS_ON
1da177e4
LT
594 sti
595 pushq %rdi
7effaa88 596 CFI_ADJUST_CFA_OFFSET 8
1da177e4
LT
597 call schedule
598 popq %rdi
7effaa88 599 CFI_ADJUST_CFA_OFFSET -8
1da177e4
LT
600 GET_THREAD_INFO(%rcx)
601 cli
2601e64d 602 TRACE_IRQS_OFF
1da177e4
LT
603 jmp retint_check
604
605retint_signal:
8f4d37ec 606 testl $_TIF_DO_NOTIFY_MASK,%edx
10ffdbb8 607 jz retint_swapgs
2601e64d 608 TRACE_IRQS_ON
1da177e4
LT
609 sti
610 SAVE_REST
611 movq $-1,ORIG_RAX(%rsp)
3829ee6b 612 xorl %esi,%esi # oldset
1da177e4
LT
613 movq %rsp,%rdi # &pt_regs
614 call do_notify_resume
615 RESTORE_REST
616 cli
2601e64d 617 TRACE_IRQS_OFF
10ffdbb8 618 movl $_TIF_NEED_RESCHED,%edi
be9e6870 619 GET_THREAD_INFO(%rcx)
1da177e4
LT
620 jmp retint_check
621
622#ifdef CONFIG_PREEMPT
623 /* Returning to kernel space. Check if we need preemption */
624 /* rcx: threadinfo. interrupts off. */
b06babac 625ENTRY(retint_kernel)
1da177e4
LT
626 cmpl $0,threadinfo_preempt_count(%rcx)
627 jnz retint_restore_args
628 bt $TIF_NEED_RESCHED,threadinfo_flags(%rcx)
629 jnc retint_restore_args
630 bt $9,EFLAGS-ARGOFFSET(%rsp) /* interrupts off? */
631 jnc retint_restore_args
632 call preempt_schedule_irq
633 jmp exit_intr
634#endif
4b787e0b 635
1da177e4 636 CFI_ENDPROC
4b787e0b 637END(common_interrupt)
1da177e4
LT
638
639/*
640 * APIC interrupts.
641 */
642 .macro apicinterrupt num,func
7effaa88 643 INTR_FRAME
19eadf98 644 pushq $~(\num)
7effaa88 645 CFI_ADJUST_CFA_OFFSET 8
1da177e4
LT
646 interrupt \func
647 jmp ret_from_intr
648 CFI_ENDPROC
649 .endm
650
651ENTRY(thermal_interrupt)
652 apicinterrupt THERMAL_APIC_VECTOR,smp_thermal_interrupt
4b787e0b 653END(thermal_interrupt)
1da177e4 654
89b831ef
JS
655ENTRY(threshold_interrupt)
656 apicinterrupt THRESHOLD_APIC_VECTOR,mce_threshold_interrupt
4b787e0b 657END(threshold_interrupt)
89b831ef 658
1da177e4
LT
659#ifdef CONFIG_SMP
660ENTRY(reschedule_interrupt)
661 apicinterrupt RESCHEDULE_VECTOR,smp_reschedule_interrupt
4b787e0b 662END(reschedule_interrupt)
1da177e4 663
e5bc8b6b
AK
664 .macro INVALIDATE_ENTRY num
665ENTRY(invalidate_interrupt\num)
666 apicinterrupt INVALIDATE_TLB_VECTOR_START+\num,smp_invalidate_interrupt
4b787e0b 667END(invalidate_interrupt\num)
e5bc8b6b
AK
668 .endm
669
670 INVALIDATE_ENTRY 0
671 INVALIDATE_ENTRY 1
672 INVALIDATE_ENTRY 2
673 INVALIDATE_ENTRY 3
674 INVALIDATE_ENTRY 4
675 INVALIDATE_ENTRY 5
676 INVALIDATE_ENTRY 6
677 INVALIDATE_ENTRY 7
1da177e4
LT
678
679ENTRY(call_function_interrupt)
680 apicinterrupt CALL_FUNCTION_VECTOR,smp_call_function_interrupt
4b787e0b 681END(call_function_interrupt)
61014292
EB
682ENTRY(irq_move_cleanup_interrupt)
683 apicinterrupt IRQ_MOVE_CLEANUP_VECTOR,smp_irq_move_cleanup_interrupt
684END(irq_move_cleanup_interrupt)
1da177e4
LT
685#endif
686
1da177e4
LT
687ENTRY(apic_timer_interrupt)
688 apicinterrupt LOCAL_TIMER_VECTOR,smp_apic_timer_interrupt
4b787e0b 689END(apic_timer_interrupt)
1da177e4
LT
690
691ENTRY(error_interrupt)
692 apicinterrupt ERROR_APIC_VECTOR,smp_error_interrupt
4b787e0b 693END(error_interrupt)
1da177e4
LT
694
695ENTRY(spurious_interrupt)
696 apicinterrupt SPURIOUS_APIC_VECTOR,smp_spurious_interrupt
4b787e0b 697END(spurious_interrupt)
1da177e4
LT
698
699/*
700 * Exception entry points.
701 */
702 .macro zeroentry sym
7effaa88 703 INTR_FRAME
1da177e4 704 pushq $0 /* push error code/oldrax */
7effaa88 705 CFI_ADJUST_CFA_OFFSET 8
1da177e4 706 pushq %rax /* push real oldrax to the rdi slot */
7effaa88 707 CFI_ADJUST_CFA_OFFSET 8
37550907 708 CFI_REL_OFFSET rax,0
1da177e4
LT
709 leaq \sym(%rip),%rax
710 jmp error_entry
7effaa88 711 CFI_ENDPROC
1da177e4
LT
712 .endm
713
714 .macro errorentry sym
7effaa88 715 XCPT_FRAME
1da177e4 716 pushq %rax
7effaa88 717 CFI_ADJUST_CFA_OFFSET 8
37550907 718 CFI_REL_OFFSET rax,0
1da177e4
LT
719 leaq \sym(%rip),%rax
720 jmp error_entry
7effaa88 721 CFI_ENDPROC
1da177e4
LT
722 .endm
723
724 /* error code is on the stack already */
725 /* handle NMI like exceptions that can happen everywhere */
2601e64d 726 .macro paranoidentry sym, ist=0, irqtrace=1
1da177e4
LT
727 SAVE_ALL
728 cld
729 movl $1,%ebx
730 movl $MSR_GS_BASE,%ecx
731 rdmsr
732 testl %edx,%edx
733 js 1f
734 swapgs
735 xorl %ebx,%ebx
b556b35e
JB
7361:
737 .if \ist
738 movq %gs:pda_data_offset, %rbp
739 .endif
740 movq %rsp,%rdi
1da177e4
LT
741 movq ORIG_RAX(%rsp),%rsi
742 movq $-1,ORIG_RAX(%rsp)
b556b35e 743 .if \ist
5f8efbb9 744 subq $EXCEPTION_STKSZ, per_cpu__init_tss + TSS_ist + (\ist - 1) * 8(%rbp)
b556b35e 745 .endif
1da177e4 746 call \sym
b556b35e 747 .if \ist
5f8efbb9 748 addq $EXCEPTION_STKSZ, per_cpu__init_tss + TSS_ist + (\ist - 1) * 8(%rbp)
b556b35e 749 .endif
6fefb0d1 750 cli
2601e64d
IM
751 .if \irqtrace
752 TRACE_IRQS_OFF
753 .endif
1da177e4 754 .endm
2601e64d
IM
755
756 /*
757 * "Paranoid" exit path from exception stack.
758 * Paranoid because this is used by NMIs and cannot take
759 * any kernel state for granted.
760 * We don't do kernel preemption checks here, because only
761 * NMI should be common and it does not enable IRQs and
762 * cannot get reschedule ticks.
763 *
764 * "trace" is 0 for the NMI handler only, because irq-tracing
765 * is fundamentally NMI-unsafe. (we cannot change the soft and
766 * hard flags at once, atomically)
767 */
768 .macro paranoidexit trace=1
769 /* ebx: no swapgs flag */
770paranoid_exit\trace:
771 testl %ebx,%ebx /* swapgs needed? */
772 jnz paranoid_restore\trace
773 testl $3,CS(%rsp)
774 jnz paranoid_userspace\trace
775paranoid_swapgs\trace:
7a0a2dff 776 .if \trace
2601e64d 777 TRACE_IRQS_IRETQ 0
7a0a2dff 778 .endif
2601e64d
IM
779 swapgs
780paranoid_restore\trace:
781 RESTORE_ALL 8
782 iretq
783paranoid_userspace\trace:
784 GET_THREAD_INFO(%rcx)
785 movl threadinfo_flags(%rcx),%ebx
786 andl $_TIF_WORK_MASK,%ebx
787 jz paranoid_swapgs\trace
788 movq %rsp,%rdi /* &pt_regs */
789 call sync_regs
790 movq %rax,%rsp /* switch stack for scheduling */
791 testl $_TIF_NEED_RESCHED,%ebx
792 jnz paranoid_schedule\trace
793 movl %ebx,%edx /* arg3: thread flags */
794 .if \trace
795 TRACE_IRQS_ON
796 .endif
797 sti
798 xorl %esi,%esi /* arg2: oldset */
799 movq %rsp,%rdi /* arg1: &pt_regs */
800 call do_notify_resume
801 cli
802 .if \trace
803 TRACE_IRQS_OFF
804 .endif
805 jmp paranoid_userspace\trace
806paranoid_schedule\trace:
807 .if \trace
808 TRACE_IRQS_ON
809 .endif
810 sti
811 call schedule
812 cli
813 .if \trace
814 TRACE_IRQS_OFF
815 .endif
816 jmp paranoid_userspace\trace
817 CFI_ENDPROC
818 .endm
819
1da177e4
LT
820/*
821 * Exception entry point. This expects an error code/orig_rax on the stack
822 * and the exception handler in %rax.
823 */
d28c4393 824KPROBE_ENTRY(error_entry)
7effaa88 825 _frame RDI
37550907 826 CFI_REL_OFFSET rax,0
1da177e4
LT
827 /* rdi slot contains rax, oldrax contains error code */
828 cld
829 subq $14*8,%rsp
830 CFI_ADJUST_CFA_OFFSET (14*8)
831 movq %rsi,13*8(%rsp)
832 CFI_REL_OFFSET rsi,RSI
833 movq 14*8(%rsp),%rsi /* load rax from rdi slot */
37550907 834 CFI_REGISTER rax,rsi
1da177e4
LT
835 movq %rdx,12*8(%rsp)
836 CFI_REL_OFFSET rdx,RDX
837 movq %rcx,11*8(%rsp)
838 CFI_REL_OFFSET rcx,RCX
839 movq %rsi,10*8(%rsp) /* store rax */
840 CFI_REL_OFFSET rax,RAX
841 movq %r8, 9*8(%rsp)
842 CFI_REL_OFFSET r8,R8
843 movq %r9, 8*8(%rsp)
844 CFI_REL_OFFSET r9,R9
845 movq %r10,7*8(%rsp)
846 CFI_REL_OFFSET r10,R10
847 movq %r11,6*8(%rsp)
848 CFI_REL_OFFSET r11,R11
849 movq %rbx,5*8(%rsp)
850 CFI_REL_OFFSET rbx,RBX
851 movq %rbp,4*8(%rsp)
852 CFI_REL_OFFSET rbp,RBP
853 movq %r12,3*8(%rsp)
854 CFI_REL_OFFSET r12,R12
855 movq %r13,2*8(%rsp)
856 CFI_REL_OFFSET r13,R13
857 movq %r14,1*8(%rsp)
858 CFI_REL_OFFSET r14,R14
859 movq %r15,(%rsp)
860 CFI_REL_OFFSET r15,R15
861 xorl %ebx,%ebx
862 testl $3,CS(%rsp)
863 je error_kernelspace
864error_swapgs:
865 swapgs
866error_sti:
867 movq %rdi,RDI(%rsp)
37550907 868 CFI_REL_OFFSET rdi,RDI
1da177e4
LT
869 movq %rsp,%rdi
870 movq ORIG_RAX(%rsp),%rsi /* get error code */
871 movq $-1,ORIG_RAX(%rsp)
872 call *%rax
10cd706d
PZ
873 /* ebx: no swapgs flag (1: don't need swapgs, 0: need it) */
874error_exit:
875 movl %ebx,%eax
1da177e4
LT
876 RESTORE_REST
877 cli
2601e64d 878 TRACE_IRQS_OFF
1da177e4
LT
879 GET_THREAD_INFO(%rcx)
880 testl %eax,%eax
881 jne retint_kernel
10cd706d 882 LOCKDEP_SYS_EXIT_IRQ
1da177e4
LT
883 movl threadinfo_flags(%rcx),%edx
884 movl $_TIF_WORK_MASK,%edi
885 andl %edi,%edx
886 jnz retint_careful
10cd706d 887 jmp retint_swapgs
1da177e4
LT
888 CFI_ENDPROC
889
890error_kernelspace:
891 incl %ebx
892 /* There are two places in the kernel that can potentially fault with
893 usergs. Handle them here. The exception handlers after
894 iret run with kernel gs again, so don't set the user space flag.
895 B stepping K8s sometimes report an truncated RIP for IRET
896 exceptions returning to compat mode. Check for these here too. */
897 leaq iret_label(%rip),%rbp
898 cmpq %rbp,RIP(%rsp)
899 je error_swapgs
900 movl %ebp,%ebp /* zero extend */
901 cmpq %rbp,RIP(%rsp)
902 je error_swapgs
903 cmpq $gs_change,RIP(%rsp)
904 je error_swapgs
905 jmp error_sti
d28c4393 906KPROBE_END(error_entry)
1da177e4
LT
907
908 /* Reload gs selector with exception handling */
909 /* edi: new selector */
910ENTRY(load_gs_index)
7effaa88 911 CFI_STARTPROC
1da177e4 912 pushf
7effaa88 913 CFI_ADJUST_CFA_OFFSET 8
1da177e4
LT
914 cli
915 swapgs
916gs_change:
917 movl %edi,%gs
9182: mfence /* workaround */
919 swapgs
920 popf
7effaa88 921 CFI_ADJUST_CFA_OFFSET -8
1da177e4 922 ret
7effaa88 923 CFI_ENDPROC
4b787e0b 924ENDPROC(load_gs_index)
1da177e4
LT
925
926 .section __ex_table,"a"
927 .align 8
928 .quad gs_change,bad_gs
929 .previous
930 .section .fixup,"ax"
931 /* running with kernelgs */
932bad_gs:
933 swapgs /* switch back to user gs */
934 xorl %eax,%eax
935 movl %eax,%gs
936 jmp 2b
937 .previous
938
939/*
940 * Create a kernel thread.
941 *
942 * C extern interface:
943 * extern long kernel_thread(int (*fn)(void *), void * arg, unsigned long flags)
944 *
945 * asm input arguments:
946 * rdi: fn, rsi: arg, rdx: flags
947 */
948ENTRY(kernel_thread)
949 CFI_STARTPROC
950 FAKE_STACK_FRAME $child_rip
951 SAVE_ALL
952
953 # rdi: flags, rsi: usp, rdx: will be &pt_regs
954 movq %rdx,%rdi
955 orq kernel_thread_flags(%rip),%rdi
956 movq $-1, %rsi
957 movq %rsp, %rdx
958
959 xorl %r8d,%r8d
960 xorl %r9d,%r9d
961
962 # clone now
963 call do_fork
964 movq %rax,RAX(%rsp)
965 xorl %edi,%edi
966
967 /*
968 * It isn't worth to check for reschedule here,
969 * so internally to the x86_64 port you can rely on kernel_thread()
970 * not to reschedule the child before returning, this avoids the need
971 * of hacks for example to fork off the per-CPU idle tasks.
972 * [Hopefully no generic code relies on the reschedule -AK]
973 */
974 RESTORE_ALL
975 UNFAKE_STACK_FRAME
976 ret
977 CFI_ENDPROC
4b787e0b 978ENDPROC(kernel_thread)
1da177e4
LT
979
980child_rip:
c05991ed
AK
981 pushq $0 # fake return address
982 CFI_STARTPROC
1da177e4
LT
983 /*
984 * Here we are in the child and the registers are set as they were
985 * at kernel_thread() invocation in the parent.
986 */
987 movq %rdi, %rax
988 movq %rsi, %rdi
989 call *%rax
990 # exit
1c5b5cfd 991 mov %eax, %edi
1da177e4 992 call do_exit
c05991ed 993 CFI_ENDPROC
4b787e0b 994ENDPROC(child_rip)
1da177e4
LT
995
996/*
997 * execve(). This function needs to use IRET, not SYSRET, to set up all state properly.
998 *
999 * C extern interface:
1000 * extern long execve(char *name, char **argv, char **envp)
1001 *
1002 * asm input arguments:
1003 * rdi: name, rsi: argv, rdx: envp
1004 *
1005 * We want to fallback into:
1006 * extern long sys_execve(char *name, char **argv,char **envp, struct pt_regs regs)
1007 *
1008 * do_sys_execve asm fallback arguments:
1009 * rdi: name, rsi: argv, rdx: envp, fake frame on the stack
1010 */
3db03b4a 1011ENTRY(kernel_execve)
1da177e4
LT
1012 CFI_STARTPROC
1013 FAKE_STACK_FRAME $0
1014 SAVE_ALL
1015 call sys_execve
1016 movq %rax, RAX(%rsp)
1017 RESTORE_REST
1018 testq %rax,%rax
1019 je int_ret_from_sys_call
1020 RESTORE_ARGS
1021 UNFAKE_STACK_FRAME
1022 ret
1023 CFI_ENDPROC
3db03b4a 1024ENDPROC(kernel_execve)
1da177e4 1025
0f2fbdcb 1026KPROBE_ENTRY(page_fault)
1da177e4 1027 errorentry do_page_fault
d28c4393 1028KPROBE_END(page_fault)
1da177e4
LT
1029
1030ENTRY(coprocessor_error)
1031 zeroentry do_coprocessor_error
4b787e0b 1032END(coprocessor_error)
1da177e4
LT
1033
1034ENTRY(simd_coprocessor_error)
1035 zeroentry do_simd_coprocessor_error
4b787e0b 1036END(simd_coprocessor_error)
1da177e4
LT
1037
1038ENTRY(device_not_available)
1039 zeroentry math_state_restore
4b787e0b 1040END(device_not_available)
1da177e4
LT
1041
1042 /* runs on exception stack */
0f2fbdcb 1043KPROBE_ENTRY(debug)
7effaa88 1044 INTR_FRAME
1da177e4
LT
1045 pushq $0
1046 CFI_ADJUST_CFA_OFFSET 8
5f8efbb9 1047 paranoidentry do_debug, DEBUG_STACK
2601e64d 1048 paranoidexit
d28c4393 1049KPROBE_END(debug)
1da177e4
LT
1050
1051 /* runs on exception stack */
eddb6fb9 1052KPROBE_ENTRY(nmi)
7effaa88 1053 INTR_FRAME
1da177e4 1054 pushq $-1
7effaa88 1055 CFI_ADJUST_CFA_OFFSET 8
2601e64d
IM
1056 paranoidentry do_nmi, 0, 0
1057#ifdef CONFIG_TRACE_IRQFLAGS
1058 paranoidexit 0
1059#else
1060 jmp paranoid_exit1
1061 CFI_ENDPROC
1062#endif
d28c4393 1063KPROBE_END(nmi)
6fefb0d1 1064
0f2fbdcb 1065KPROBE_ENTRY(int3)
b556b35e
JB
1066 INTR_FRAME
1067 pushq $0
1068 CFI_ADJUST_CFA_OFFSET 8
5f8efbb9 1069 paranoidentry do_int3, DEBUG_STACK
2601e64d 1070 jmp paranoid_exit1
b556b35e 1071 CFI_ENDPROC
d28c4393 1072KPROBE_END(int3)
1da177e4
LT
1073
1074ENTRY(overflow)
1075 zeroentry do_overflow
4b787e0b 1076END(overflow)
1da177e4
LT
1077
1078ENTRY(bounds)
1079 zeroentry do_bounds
4b787e0b 1080END(bounds)
1da177e4
LT
1081
1082ENTRY(invalid_op)
1083 zeroentry do_invalid_op
4b787e0b 1084END(invalid_op)
1da177e4
LT
1085
1086ENTRY(coprocessor_segment_overrun)
1087 zeroentry do_coprocessor_segment_overrun
4b787e0b 1088END(coprocessor_segment_overrun)
1da177e4
LT
1089
1090ENTRY(reserved)
1091 zeroentry do_reserved
4b787e0b 1092END(reserved)
1da177e4
LT
1093
1094 /* runs on exception stack */
1095ENTRY(double_fault)
7effaa88 1096 XCPT_FRAME
1da177e4 1097 paranoidentry do_double_fault
2601e64d 1098 jmp paranoid_exit1
1da177e4 1099 CFI_ENDPROC
4b787e0b 1100END(double_fault)
1da177e4
LT
1101
1102ENTRY(invalid_TSS)
1103 errorentry do_invalid_TSS
4b787e0b 1104END(invalid_TSS)
1da177e4
LT
1105
1106ENTRY(segment_not_present)
1107 errorentry do_segment_not_present
4b787e0b 1108END(segment_not_present)
1da177e4
LT
1109
1110 /* runs on exception stack */
1111ENTRY(stack_segment)
7effaa88 1112 XCPT_FRAME
1da177e4 1113 paranoidentry do_stack_segment
2601e64d 1114 jmp paranoid_exit1
1da177e4 1115 CFI_ENDPROC
4b787e0b 1116END(stack_segment)
1da177e4 1117
0f2fbdcb 1118KPROBE_ENTRY(general_protection)
1da177e4 1119 errorentry do_general_protection
d28c4393 1120KPROBE_END(general_protection)
1da177e4
LT
1121
1122ENTRY(alignment_check)
1123 errorentry do_alignment_check
4b787e0b 1124END(alignment_check)
1da177e4
LT
1125
1126ENTRY(divide_error)
1127 zeroentry do_divide_error
4b787e0b 1128END(divide_error)
1da177e4
LT
1129
1130ENTRY(spurious_interrupt_bug)
1131 zeroentry do_spurious_interrupt_bug
4b787e0b 1132END(spurious_interrupt_bug)
1da177e4
LT
1133
1134#ifdef CONFIG_X86_MCE
1135 /* runs on exception stack */
1136ENTRY(machine_check)
7effaa88 1137 INTR_FRAME
1da177e4
LT
1138 pushq $0
1139 CFI_ADJUST_CFA_OFFSET 8
1140 paranoidentry do_machine_check
2601e64d 1141 jmp paranoid_exit1
1da177e4 1142 CFI_ENDPROC
4b787e0b 1143END(machine_check)
1da177e4
LT
1144#endif
1145
2699500b 1146/* Call softirq on interrupt stack. Interrupts are off. */
ed6b676c 1147ENTRY(call_softirq)
7effaa88 1148 CFI_STARTPROC
2699500b
AK
1149 push %rbp
1150 CFI_ADJUST_CFA_OFFSET 8
1151 CFI_REL_OFFSET rbp,0
1152 mov %rsp,%rbp
1153 CFI_DEF_CFA_REGISTER rbp
ed6b676c 1154 incl %gs:pda_irqcount
2699500b
AK
1155 cmove %gs:pda_irqstackptr,%rsp
1156 push %rbp # backlink for old unwinder
ed6b676c 1157 call __do_softirq
2699500b 1158 leaveq
7effaa88 1159 CFI_DEF_CFA_REGISTER rsp
2699500b 1160 CFI_ADJUST_CFA_OFFSET -8
ed6b676c 1161 decl %gs:pda_irqcount
ed6b676c 1162 ret
7effaa88 1163 CFI_ENDPROC
4b787e0b 1164ENDPROC(call_softirq)
75154f40
AK
1165
1166KPROBE_ENTRY(ignore_sysret)
1167 CFI_STARTPROC
1168 mov $-ENOSYS,%eax
1169 sysret
1170 CFI_ENDPROC
1171ENDPROC(ignore_sysret)