]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blame - arch/x86/entry/entry_32.S
x86/entry/32: Remove 32-bit syscall audit optimizations
[mirror_ubuntu-artful-kernel.git] / arch / x86 / entry / entry_32.S
CommitLineData
1da177e4 1/*
a49976d1 2 * Copyright (C) 1991,1992 Linus Torvalds
1da177e4 3 *
a49976d1 4 * entry_32.S contains the system-call and low-level fault and trap handling routines.
1da177e4 5 *
889f21ce 6 * Stack layout in 'syscall_exit':
a49976d1
IM
7 * ptrace needs to have all registers on the stack.
8 * If the order here is changed, it needs to be
9 * updated in fork.c:copy_process(), signal.c:do_signal(),
1da177e4
LT
10 * ptrace.c and ptrace.h
11 *
12 * 0(%esp) - %ebx
13 * 4(%esp) - %ecx
14 * 8(%esp) - %edx
9b47feb7 15 * C(%esp) - %esi
1da177e4
LT
16 * 10(%esp) - %edi
17 * 14(%esp) - %ebp
18 * 18(%esp) - %eax
19 * 1C(%esp) - %ds
20 * 20(%esp) - %es
464d1a78 21 * 24(%esp) - %fs
ccbeed3a
TH
22 * 28(%esp) - %gs saved iff !CONFIG_X86_32_LAZY_GS
23 * 2C(%esp) - orig_eax
24 * 30(%esp) - %eip
25 * 34(%esp) - %cs
26 * 38(%esp) - %eflags
27 * 3C(%esp) - %oldesp
28 * 40(%esp) - %oldss
1da177e4
LT
29 */
30
1da177e4 31#include <linux/linkage.h>
d7e7528b 32#include <linux/err.h>
1da177e4 33#include <asm/thread_info.h>
55f327fa 34#include <asm/irqflags.h>
1da177e4
LT
35#include <asm/errno.h>
36#include <asm/segment.h>
37#include <asm/smp.h>
0341c14d 38#include <asm/page_types.h>
be44d2aa 39#include <asm/percpu.h>
ab68ed98 40#include <asm/processor-flags.h>
395a59d0 41#include <asm/ftrace.h>
9b7dc567 42#include <asm/irq_vectors.h>
40d2e763 43#include <asm/cpufeature.h>
b4ca46e4 44#include <asm/alternative-asm.h>
6837a54d 45#include <asm/asm.h>
e59d1b0a 46#include <asm/smap.h>
1da177e4 47
ea714547
JO
48 .section .entry.text, "ax"
49
139ec7c4
RR
50/*
51 * We use macros for low-level operations which need to be overridden
52 * for paravirtualization. The following will never clobber any registers:
53 * INTERRUPT_RETURN (aka. "iret")
54 * GET_CR0_INTO_EAX (aka. "movl %cr0, %eax")
d75cd22f 55 * ENABLE_INTERRUPTS_SYSEXIT (aka "sti; sysexit").
139ec7c4
RR
56 *
57 * For DISABLE_INTERRUPTS/ENABLE_INTERRUPTS (aka "cli"/"sti"), you must
58 * specify what registers can be overwritten (CLBR_NONE, CLBR_EAX/EDX/ECX/ANY).
59 * Allowing a register to be clobbered can shrink the paravirt replacement
60 * enough to patch inline, increasing performance.
61 */
62
1da177e4 63#ifdef CONFIG_PREEMPT
a49976d1 64# define preempt_stop(clobbers) DISABLE_INTERRUPTS(clobbers); TRACE_IRQS_OFF
1da177e4 65#else
a49976d1
IM
66# define preempt_stop(clobbers)
67# define resume_kernel restore_all
1da177e4
LT
68#endif
69
55f327fa
IM
70.macro TRACE_IRQS_IRET
71#ifdef CONFIG_TRACE_IRQFLAGS
a49976d1
IM
72 testl $X86_EFLAGS_IF, PT_EFLAGS(%esp) # interrupts off?
73 jz 1f
55f327fa
IM
74 TRACE_IRQS_ON
751:
76#endif
77.endm
78
ccbeed3a
TH
79/*
80 * User gs save/restore
81 *
82 * %gs is used for userland TLS and kernel only uses it for stack
83 * canary which is required to be at %gs:20 by gcc. Read the comment
84 * at the top of stackprotector.h for more info.
85 *
86 * Local labels 98 and 99 are used.
87 */
88#ifdef CONFIG_X86_32_LAZY_GS
89
90 /* unfortunately push/pop can't be no-op */
91.macro PUSH_GS
a49976d1 92 pushl $0
ccbeed3a
TH
93.endm
94.macro POP_GS pop=0
a49976d1 95 addl $(4 + \pop), %esp
ccbeed3a
TH
96.endm
97.macro POP_GS_EX
98.endm
99
100 /* all the rest are no-op */
101.macro PTGS_TO_GS
102.endm
103.macro PTGS_TO_GS_EX
104.endm
105.macro GS_TO_REG reg
106.endm
107.macro REG_TO_PTGS reg
108.endm
109.macro SET_KERNEL_GS reg
110.endm
111
112#else /* CONFIG_X86_32_LAZY_GS */
113
114.macro PUSH_GS
a49976d1 115 pushl %gs
ccbeed3a
TH
116.endm
117
118.macro POP_GS pop=0
a49976d1 11998: popl %gs
ccbeed3a 120 .if \pop <> 0
9b47feb7 121 add $\pop, %esp
ccbeed3a
TH
122 .endif
123.endm
124.macro POP_GS_EX
125.pushsection .fixup, "ax"
a49976d1
IM
12699: movl $0, (%esp)
127 jmp 98b
ccbeed3a 128.popsection
a49976d1 129 _ASM_EXTABLE(98b, 99b)
ccbeed3a
TH
130.endm
131
132.macro PTGS_TO_GS
a49976d1 13398: mov PT_GS(%esp), %gs
ccbeed3a
TH
134.endm
135.macro PTGS_TO_GS_EX
136.pushsection .fixup, "ax"
a49976d1
IM
13799: movl $0, PT_GS(%esp)
138 jmp 98b
ccbeed3a 139.popsection
a49976d1 140 _ASM_EXTABLE(98b, 99b)
ccbeed3a
TH
141.endm
142
143.macro GS_TO_REG reg
a49976d1 144 movl %gs, \reg
ccbeed3a
TH
145.endm
146.macro REG_TO_PTGS reg
a49976d1 147 movl \reg, PT_GS(%esp)
ccbeed3a
TH
148.endm
149.macro SET_KERNEL_GS reg
a49976d1
IM
150 movl $(__KERNEL_STACK_CANARY), \reg
151 movl \reg, %gs
ccbeed3a
TH
152.endm
153
a49976d1 154#endif /* CONFIG_X86_32_LAZY_GS */
ccbeed3a 155
f0d96110
TH
156.macro SAVE_ALL
157 cld
ccbeed3a 158 PUSH_GS
a49976d1
IM
159 pushl %fs
160 pushl %es
161 pushl %ds
162 pushl %eax
163 pushl %ebp
164 pushl %edi
165 pushl %esi
166 pushl %edx
167 pushl %ecx
168 pushl %ebx
169 movl $(__USER_DS), %edx
170 movl %edx, %ds
171 movl %edx, %es
172 movl $(__KERNEL_PERCPU), %edx
173 movl %edx, %fs
ccbeed3a 174 SET_KERNEL_GS %edx
f0d96110 175.endm
1da177e4 176
f0d96110 177.macro RESTORE_INT_REGS
a49976d1
IM
178 popl %ebx
179 popl %ecx
180 popl %edx
181 popl %esi
182 popl %edi
183 popl %ebp
184 popl %eax
f0d96110 185.endm
1da177e4 186
ccbeed3a 187.macro RESTORE_REGS pop=0
f0d96110 188 RESTORE_INT_REGS
a49976d1
IM
1891: popl %ds
1902: popl %es
1913: popl %fs
ccbeed3a 192 POP_GS \pop
f0d96110 193.pushsection .fixup, "ax"
a49976d1
IM
1944: movl $0, (%esp)
195 jmp 1b
1965: movl $0, (%esp)
197 jmp 2b
1986: movl $0, (%esp)
199 jmp 3b
f95d47ca 200.popsection
a49976d1
IM
201 _ASM_EXTABLE(1b, 4b)
202 _ASM_EXTABLE(2b, 5b)
203 _ASM_EXTABLE(3b, 6b)
ccbeed3a 204 POP_GS_EX
f0d96110 205.endm
1da177e4 206
1da177e4 207ENTRY(ret_from_fork)
a49976d1
IM
208 pushl %eax
209 call schedule_tail
1da177e4 210 GET_THREAD_INFO(%ebp)
a49976d1
IM
211 popl %eax
212 pushl $0x0202 # Reset kernel eflags
131484c8 213 popfl
a49976d1 214 jmp syscall_exit
47a55cd7 215END(ret_from_fork)
1da177e4 216
22e2430d 217ENTRY(ret_from_kernel_thread)
a49976d1
IM
218 pushl %eax
219 call schedule_tail
6783eaa2 220 GET_THREAD_INFO(%ebp)
a49976d1
IM
221 popl %eax
222 pushl $0x0202 # Reset kernel eflags
131484c8 223 popfl
a49976d1
IM
224 movl PT_EBP(%esp), %eax
225 call *PT_EBX(%esp)
226 movl $0, PT_EAX(%esp)
227 jmp syscall_exit
22e2430d 228ENDPROC(ret_from_kernel_thread)
6783eaa2 229
1da177e4
LT
230/*
231 * Return to user mode is not as complex as all this looks,
232 * but we want the default path for a system call return to
233 * go as quickly as possible which is why some of this is
234 * less clear than it otherwise should be.
235 */
236
237 # userspace resumption stub bypassing syscall exit tracing
238 ALIGN
239ret_from_exception:
139ec7c4 240 preempt_stop(CLBR_ANY)
1da177e4
LT
241ret_from_intr:
242 GET_THREAD_INFO(%ebp)
29a2e283 243#ifdef CONFIG_VM86
a49976d1
IM
244 movl PT_EFLAGS(%esp), %eax # mix EFLAGS and CS
245 movb PT_CS(%esp), %al
246 andl $(X86_EFLAGS_VM | SEGMENT_RPL_MASK), %eax
29a2e283
DA
247#else
248 /*
6783eaa2 249 * We can be coming here from child spawned by kernel_thread().
29a2e283 250 */
a49976d1
IM
251 movl PT_CS(%esp), %eax
252 andl $SEGMENT_RPL_MASK, %eax
29a2e283 253#endif
a49976d1
IM
254 cmpl $USER_RPL, %eax
255 jb resume_kernel # not returning to v8086 or userspace
f95d47ca 256
1da177e4 257ENTRY(resume_userspace)
c7e872e7 258 LOCKDEP_SYS_EXIT
a49976d1
IM
259 DISABLE_INTERRUPTS(CLBR_ANY) # make sure we don't miss an interrupt
260 # setting need_resched or sigpending
261 # between sampling and the iret
e32e58a9 262 TRACE_IRQS_OFF
a49976d1
IM
263 movl TI_flags(%ebp), %ecx
264 andl $_TIF_WORK_MASK, %ecx # is there any work to be done on
265 # int/exception return?
266 jne work_pending
267 jmp restore_all
47a55cd7 268END(ret_from_exception)
1da177e4
LT
269
270#ifdef CONFIG_PREEMPT
271ENTRY(resume_kernel)
139ec7c4 272 DISABLE_INTERRUPTS(CLBR_ANY)
1da177e4 273need_resched:
a49976d1
IM
274 cmpl $0, PER_CPU_VAR(__preempt_count)
275 jnz restore_all
276 testl $X86_EFLAGS_IF, PT_EFLAGS(%esp) # interrupts off (exception path) ?
277 jz restore_all
278 call preempt_schedule_irq
279 jmp need_resched
47a55cd7 280END(resume_kernel)
1da177e4
LT
281#endif
282
a49976d1
IM
283/*
284 * SYSENTER_RETURN points to after the SYSENTER instruction
285 * in the vsyscall page. See vsyscall-sysentry.S, which defines
286 * the symbol.
287 */
1da177e4 288
a49976d1 289 # SYSENTER call handler stub
4c8cd0c5 290ENTRY(entry_SYSENTER_32)
a49976d1 291 movl TSS_sysenter_sp0(%esp), %esp
1da177e4 292sysenter_past_esp:
55f327fa 293 /*
d93c870b
JF
294 * Interrupts are disabled here, but we can't trace it until
295 * enough kernel state to call TRACE_IRQS_OFF can be called - but
296 * we immediately enable interrupts at that point anyway.
55f327fa 297 */
a49976d1
IM
298 pushl $__USER_DS
299 pushl %ebp
131484c8 300 pushfl
a49976d1
IM
301 orl $X86_EFLAGS_IF, (%esp)
302 pushl $__USER_CS
e6e5494c
IM
303 /*
304 * Push current_thread_info()->sysenter_return to the stack.
ff8287f3
AL
305 * A tiny bit of offset fixup is necessary: TI_sysenter_return
306 * is relative to thread_info, which is at the bottom of the
307 * kernel stack page. 4*4 means the 4 words pushed above;
308 * TOP_OF_KERNEL_STACK_PADDING takes us to the top of the stack;
309 * and THREAD_SIZE takes us to the bottom.
e6e5494c 310 */
a49976d1 311 pushl ((TI_sysenter_return) - THREAD_SIZE + TOP_OF_KERNEL_STACK_PADDING + 4*4)(%esp)
1da177e4 312
a49976d1 313 pushl %eax
d93c870b
JF
314 SAVE_ALL
315 ENABLE_INTERRUPTS(CLBR_NONE)
316
1da177e4
LT
317/*
318 * Load the potential sixth argument from user stack.
319 * Careful about security.
320 */
a49976d1
IM
321 cmpl $__PAGE_OFFSET-3, %ebp
322 jae syscall_fault
e59d1b0a 323 ASM_STAC
a49976d1 3241: movl (%ebp), %ebp
e59d1b0a 325 ASM_CLAC
a49976d1
IM
326 movl %ebp, PT_EBP(%esp)
327 _ASM_EXTABLE(1b, syscall_fault)
1da177e4 328
1da177e4
LT
329 GET_THREAD_INFO(%ebp)
330
a49976d1 331 testl $_TIF_WORK_SYSCALL_ENTRY, TI_flags(%ebp)
c5f69fde 332 jnz syscall_trace_entry
af0575bb 333sysenter_do_call:
a49976d1
IM
334 cmpl $(NR_syscalls), %eax
335 jae sysenter_badsys
336 call *sys_call_table(, %eax, 4)
554086d8 337sysenter_after_call:
a49976d1 338 movl %eax, PT_EAX(%esp)
c7e872e7 339 LOCKDEP_SYS_EXIT
42c24fa2 340 DISABLE_INTERRUPTS(CLBR_ANY)
55f327fa 341 TRACE_IRQS_OFF
a49976d1
IM
342 movl TI_flags(%ebp), %ecx
343 testl $_TIF_ALLWORK_MASK, %ecx
c5f69fde 344 jnz syscall_exit_work
af0575bb 345sysenter_exit:
1da177e4 346/* if something modifies registers it must also disable sysexit */
a49976d1
IM
347 movl PT_EIP(%esp), %edx
348 movl PT_OLDESP(%esp), %ecx
349 xorl %ebp, %ebp
55f327fa 350 TRACE_IRQS_ON
a49976d1 3511: mov PT_FS(%esp), %fs
ccbeed3a 352 PTGS_TO_GS
d75cd22f 353 ENABLE_INTERRUPTS_SYSEXIT
af0575bb 354
a49976d1
IM
355.pushsection .fixup, "ax"
3562: movl $0, PT_FS(%esp)
357 jmp 1b
f95d47ca 358.popsection
a49976d1 359 _ASM_EXTABLE(1b, 2b)
ccbeed3a 360 PTGS_TO_GS_EX
4c8cd0c5 361ENDPROC(entry_SYSENTER_32)
1da177e4
LT
362
363 # system call handler stub
b2502b41 364ENTRY(entry_INT80_32)
e59d1b0a 365 ASM_CLAC
a49976d1 366 pushl %eax # save orig_eax
1da177e4
LT
367 SAVE_ALL
368 GET_THREAD_INFO(%ebp)
a49976d1
IM
369 # system call tracing in operation / emulation
370 testl $_TIF_WORK_SYSCALL_ENTRY, TI_flags(%ebp)
371 jnz syscall_trace_entry
372 cmpl $(NR_syscalls), %eax
373 jae syscall_badsys
1da177e4 374syscall_call:
a49976d1 375 call *sys_call_table(, %eax, 4)
8142b215 376syscall_after_call:
a49976d1 377 movl %eax, PT_EAX(%esp) # store the return value
1da177e4 378syscall_exit:
c7e872e7 379 LOCKDEP_SYS_EXIT
a49976d1
IM
380 DISABLE_INTERRUPTS(CLBR_ANY) # make sure we don't miss an interrupt
381 # setting need_resched or sigpending
382 # between sampling and the iret
55f327fa 383 TRACE_IRQS_OFF
a49976d1
IM
384 movl TI_flags(%ebp), %ecx
385 testl $_TIF_ALLWORK_MASK, %ecx # current->work
386 jnz syscall_exit_work
1da177e4
LT
387
388restore_all:
2e04bc76
AH
389 TRACE_IRQS_IRET
390restore_all_notrace:
34273f41 391#ifdef CONFIG_X86_ESPFIX32
a49976d1
IM
392 movl PT_EFLAGS(%esp), %eax # mix EFLAGS, SS and CS
393 /*
394 * Warning: PT_OLDSS(%esp) contains the wrong/random values if we
395 * are returning to the kernel.
396 * See comments in process.c:copy_thread() for details.
397 */
398 movb PT_OLDSS(%esp), %ah
399 movb PT_CS(%esp), %al
400 andl $(X86_EFLAGS_VM | (SEGMENT_TI_MASK << 8) | SEGMENT_RPL_MASK), %eax
401 cmpl $((SEGMENT_LDT << 8) | USER_RPL), %eax
402 je ldt_ss # returning to user-space with LDT SS
34273f41 403#endif
1da177e4 404restore_nocheck:
a49976d1 405 RESTORE_REGS 4 # skip orig_eax/error_code
f7f3d791 406irq_return:
3701d863 407 INTERRUPT_RETURN
a49976d1
IM
408.section .fixup, "ax"
409ENTRY(iret_exc )
410 pushl $0 # no error code
411 pushl $do_iret_error
412 jmp error_code
1da177e4 413.previous
a49976d1 414 _ASM_EXTABLE(irq_return, iret_exc)
1da177e4 415
34273f41 416#ifdef CONFIG_X86_ESPFIX32
1da177e4 417ldt_ss:
d3561b7f
RR
418#ifdef CONFIG_PARAVIRT
419 /*
420 * The kernel can't run on a non-flat stack if paravirt mode
421 * is active. Rather than try to fixup the high bits of
422 * ESP, bypass this code entirely. This may break DOSemu
423 * and/or Wine support in a paravirt VM, although the option
424 * is still available to implement the setting of the high
425 * 16-bits in the INTERRUPT_RETURN paravirt-op.
426 */
a49976d1
IM
427 cmpl $0, pv_info+PARAVIRT_enabled
428 jne restore_nocheck
d3561b7f
RR
429#endif
430
dc4c2a0a
AH
431/*
432 * Setup and switch to ESPFIX stack
433 *
434 * We're returning to userspace with a 16 bit stack. The CPU will not
435 * restore the high word of ESP for us on executing iret... This is an
436 * "official" bug of all the x86-compatible CPUs, which we can work
437 * around to make dosemu and wine happy. We do this by preloading the
438 * high word of ESP with the high word of the userspace ESP while
439 * compensating for the offset by changing to the ESPFIX segment with
440 * a base address that matches for the difference.
441 */
72c511dd 442#define GDT_ESPFIX_SS PER_CPU_VAR(gdt_page) + (GDT_ENTRY_ESPFIX_SS * 8)
a49976d1
IM
443 mov %esp, %edx /* load kernel esp */
444 mov PT_OLDESP(%esp), %eax /* load userspace esp */
445 mov %dx, %ax /* eax: new kernel esp */
9b47feb7
DV
446 sub %eax, %edx /* offset (low word is 0) */
447 shr $16, %edx
a49976d1
IM
448 mov %dl, GDT_ESPFIX_SS + 4 /* bits 16..23 */
449 mov %dh, GDT_ESPFIX_SS + 7 /* bits 24..31 */
450 pushl $__ESPFIX_SS
451 pushl %eax /* new kernel esp */
452 /*
453 * Disable interrupts, but do not irqtrace this section: we
2e04bc76 454 * will soon execute iret and the tracer was already set to
a49976d1
IM
455 * the irqstate after the IRET:
456 */
139ec7c4 457 DISABLE_INTERRUPTS(CLBR_EAX)
a49976d1
IM
458 lss (%esp), %esp /* switch to espfix segment */
459 jmp restore_nocheck
34273f41 460#endif
b2502b41 461ENDPROC(entry_INT80_32)
1da177e4
LT
462
463 # perform work that needs to be done immediately before resumption
464 ALIGN
465work_pending:
9b47feb7 466 testb $_TIF_NEED_RESCHED, %cl
a49976d1 467 jz work_notifysig
1da177e4 468work_resched:
a49976d1 469 call schedule
c7e872e7 470 LOCKDEP_SYS_EXIT
a49976d1
IM
471 DISABLE_INTERRUPTS(CLBR_ANY) # make sure we don't miss an interrupt
472 # setting need_resched or sigpending
473 # between sampling and the iret
55f327fa 474 TRACE_IRQS_OFF
a49976d1
IM
475 movl TI_flags(%ebp), %ecx
476 andl $_TIF_WORK_MASK, %ecx # is there any work to be done other
477 # than syscall tracing?
478 jz restore_all
9b47feb7 479 testb $_TIF_NEED_RESCHED, %cl
a49976d1 480 jnz work_resched
1da177e4 481
a49976d1
IM
482work_notifysig: # deal with pending signals and
483 # notify-resume requests
3596ff4e
SD
484 TRACE_IRQS_ON
485 ENABLE_INTERRUPTS(CLBR_NONE)
5ed92a8a 486 movl %esp, %eax
a49976d1
IM
487 xorl %edx, %edx
488 call do_notify_resume
489 jmp resume_userspace
47a55cd7 490END(work_pending)
1da177e4
LT
491
492 # perform syscall exit tracing
493 ALIGN
494syscall_trace_entry:
a49976d1
IM
495 movl $-ENOSYS, PT_EAX(%esp)
496 movl %esp, %eax
497 call syscall_trace_enter
d4d67150 498 /* What it returned is what we'll actually use. */
a49976d1
IM
499 cmpl $(NR_syscalls), %eax
500 jnae syscall_call
501 jmp syscall_exit
47a55cd7 502END(syscall_trace_entry)
1da177e4
LT
503
504 # perform syscall exit tracing
505 ALIGN
506syscall_exit_work:
a49976d1
IM
507 testl $_TIF_WORK_SYSCALL_EXIT, %ecx
508 jz work_pending
55f327fa 509 TRACE_IRQS_ON
a49976d1
IM
510 ENABLE_INTERRUPTS(CLBR_ANY) # could let syscall_trace_leave() call
511 # schedule() instead
512 movl %esp, %eax
513 call syscall_trace_leave
514 jmp resume_userspace
47a55cd7 515END(syscall_exit_work)
1da177e4 516
1da177e4 517syscall_fault:
e59d1b0a 518 ASM_CLAC
1da177e4 519 GET_THREAD_INFO(%ebp)
a49976d1
IM
520 movl $-EFAULT, PT_EAX(%esp)
521 jmp resume_userspace
47a55cd7 522END(syscall_fault)
1da177e4 523
1da177e4 524syscall_badsys:
a49976d1
IM
525 movl $-ENOSYS, %eax
526 jmp syscall_after_call
554086d8
AL
527END(syscall_badsys)
528
529sysenter_badsys:
a49976d1
IM
530 movl $-ENOSYS, %eax
531 jmp sysenter_after_call
fb21b84e 532END(sysenter_badsys)
1da177e4 533
f0d96110 534.macro FIXUP_ESPFIX_STACK
dc4c2a0a
AH
535/*
536 * Switch back for ESPFIX stack to the normal zerobased stack
537 *
538 * We can't call C functions using the ESPFIX stack. This code reads
539 * the high word of the segment base from the GDT and swiches to the
540 * normal stack and adjusts ESP with the matching offset.
541 */
34273f41 542#ifdef CONFIG_X86_ESPFIX32
dc4c2a0a 543 /* fixup the stack */
a49976d1
IM
544 mov GDT_ESPFIX_SS + 4, %al /* bits 16..23 */
545 mov GDT_ESPFIX_SS + 7, %ah /* bits 24..31 */
9b47feb7 546 shl $16, %eax
a49976d1
IM
547 addl %esp, %eax /* the adjusted stack pointer */
548 pushl $__KERNEL_DS
549 pushl %eax
550 lss (%esp), %esp /* switch to the normal stack segment */
34273f41 551#endif
f0d96110
TH
552.endm
553.macro UNWIND_ESPFIX_STACK
34273f41 554#ifdef CONFIG_X86_ESPFIX32
a49976d1 555 movl %ss, %eax
f0d96110 556 /* see if on espfix stack */
a49976d1
IM
557 cmpw $__ESPFIX_SS, %ax
558 jne 27f
559 movl $__KERNEL_DS, %eax
560 movl %eax, %ds
561 movl %eax, %es
f0d96110
TH
562 /* switch to normal stack */
563 FIXUP_ESPFIX_STACK
56427:
34273f41 565#endif
f0d96110 566.endm
1da177e4
LT
567
568/*
3304c9c3
DV
569 * Build the entry stubs with some assembler magic.
570 * We pack 1 stub into every 8-byte block.
1da177e4 571 */
3304c9c3 572 .align 8
1da177e4 573ENTRY(irq_entries_start)
3304c9c3
DV
574 vector=FIRST_EXTERNAL_VECTOR
575 .rept (FIRST_SYSTEM_VECTOR - FIRST_EXTERNAL_VECTOR)
a49976d1 576 pushl $(~vector+0x80) /* Note: always in signed byte range */
3304c9c3
DV
577 vector=vector+1
578 jmp common_interrupt
3304c9c3
DV
579 .align 8
580 .endr
47a55cd7
JB
581END(irq_entries_start)
582
55f327fa
IM
583/*
584 * the CPU automatically disables interrupts when executing an IRQ vector,
585 * so IRQ-flags tracing has to follow that:
586 */
b7c6244f 587 .p2align CONFIG_X86_L1_CACHE_SHIFT
1da177e4 588common_interrupt:
e59d1b0a 589 ASM_CLAC
a49976d1 590 addl $-0x80, (%esp) /* Adjust vector into the [-256, -1] range */
1da177e4 591 SAVE_ALL
55f327fa 592 TRACE_IRQS_OFF
a49976d1
IM
593 movl %esp, %eax
594 call do_IRQ
595 jmp ret_from_intr
47a55cd7 596ENDPROC(common_interrupt)
1da177e4 597
02cf94c3 598#define BUILD_INTERRUPT3(name, nr, fn) \
1da177e4 599ENTRY(name) \
e59d1b0a 600 ASM_CLAC; \
a49976d1 601 pushl $~(nr); \
fe7cacc1 602 SAVE_ALL; \
55f327fa 603 TRACE_IRQS_OFF \
a49976d1
IM
604 movl %esp, %eax; \
605 call fn; \
606 jmp ret_from_intr; \
47a55cd7 607ENDPROC(name)
1da177e4 608
cf910e83
SA
609
610#ifdef CONFIG_TRACING
a49976d1 611# define TRACE_BUILD_INTERRUPT(name, nr) BUILD_INTERRUPT3(trace_##name, nr, smp_trace_##name)
cf910e83 612#else
a49976d1 613# define TRACE_BUILD_INTERRUPT(name, nr)
cf910e83
SA
614#endif
615
a49976d1
IM
616#define BUILD_INTERRUPT(name, nr) \
617 BUILD_INTERRUPT3(name, nr, smp_##name); \
cf910e83 618 TRACE_BUILD_INTERRUPT(name, nr)
02cf94c3 619
1da177e4 620/* The include is where all of the SMP etc. interrupts come from */
1164dd00 621#include <asm/entry_arch.h>
1da177e4 622
1da177e4 623ENTRY(coprocessor_error)
e59d1b0a 624 ASM_CLAC
a49976d1
IM
625 pushl $0
626 pushl $do_coprocessor_error
627 jmp error_code
47a55cd7 628END(coprocessor_error)
1da177e4
LT
629
630ENTRY(simd_coprocessor_error)
e59d1b0a 631 ASM_CLAC
a49976d1 632 pushl $0
40d2e763
BG
633#ifdef CONFIG_X86_INVD_BUG
634 /* AMD 486 bug: invd from userspace calls exception 19 instead of #GP */
a49976d1
IM
635 ALTERNATIVE "pushl $do_general_protection", \
636 "pushl $do_simd_coprocessor_error", \
8e65f6e0 637 X86_FEATURE_XMM
40d2e763 638#else
a49976d1 639 pushl $do_simd_coprocessor_error
40d2e763 640#endif
a49976d1 641 jmp error_code
47a55cd7 642END(simd_coprocessor_error)
1da177e4
LT
643
644ENTRY(device_not_available)
e59d1b0a 645 ASM_CLAC
a49976d1
IM
646 pushl $-1 # mark this as an int
647 pushl $do_device_not_available
648 jmp error_code
47a55cd7 649END(device_not_available)
1da177e4 650
d3561b7f
RR
651#ifdef CONFIG_PARAVIRT
652ENTRY(native_iret)
3701d863 653 iret
6837a54d 654 _ASM_EXTABLE(native_iret, iret_exc)
47a55cd7 655END(native_iret)
d3561b7f 656
d75cd22f 657ENTRY(native_irq_enable_sysexit)
d3561b7f
RR
658 sti
659 sysexit
d75cd22f 660END(native_irq_enable_sysexit)
d3561b7f
RR
661#endif
662
1da177e4 663ENTRY(overflow)
e59d1b0a 664 ASM_CLAC
a49976d1
IM
665 pushl $0
666 pushl $do_overflow
667 jmp error_code
47a55cd7 668END(overflow)
1da177e4
LT
669
670ENTRY(bounds)
e59d1b0a 671 ASM_CLAC
a49976d1
IM
672 pushl $0
673 pushl $do_bounds
674 jmp error_code
47a55cd7 675END(bounds)
1da177e4
LT
676
677ENTRY(invalid_op)
e59d1b0a 678 ASM_CLAC
a49976d1
IM
679 pushl $0
680 pushl $do_invalid_op
681 jmp error_code
47a55cd7 682END(invalid_op)
1da177e4
LT
683
684ENTRY(coprocessor_segment_overrun)
e59d1b0a 685 ASM_CLAC
a49976d1
IM
686 pushl $0
687 pushl $do_coprocessor_segment_overrun
688 jmp error_code
47a55cd7 689END(coprocessor_segment_overrun)
1da177e4
LT
690
691ENTRY(invalid_TSS)
e59d1b0a 692 ASM_CLAC
a49976d1
IM
693 pushl $do_invalid_TSS
694 jmp error_code
47a55cd7 695END(invalid_TSS)
1da177e4
LT
696
697ENTRY(segment_not_present)
e59d1b0a 698 ASM_CLAC
a49976d1
IM
699 pushl $do_segment_not_present
700 jmp error_code
47a55cd7 701END(segment_not_present)
1da177e4
LT
702
703ENTRY(stack_segment)
e59d1b0a 704 ASM_CLAC
a49976d1
IM
705 pushl $do_stack_segment
706 jmp error_code
47a55cd7 707END(stack_segment)
1da177e4 708
1da177e4 709ENTRY(alignment_check)
e59d1b0a 710 ASM_CLAC
a49976d1
IM
711 pushl $do_alignment_check
712 jmp error_code
47a55cd7 713END(alignment_check)
1da177e4 714
d28c4393 715ENTRY(divide_error)
e59d1b0a 716 ASM_CLAC
a49976d1
IM
717 pushl $0 # no error code
718 pushl $do_divide_error
719 jmp error_code
47a55cd7 720END(divide_error)
1da177e4
LT
721
722#ifdef CONFIG_X86_MCE
723ENTRY(machine_check)
e59d1b0a 724 ASM_CLAC
a49976d1
IM
725 pushl $0
726 pushl machine_check_vector
727 jmp error_code
47a55cd7 728END(machine_check)
1da177e4
LT
729#endif
730
731ENTRY(spurious_interrupt_bug)
e59d1b0a 732 ASM_CLAC
a49976d1
IM
733 pushl $0
734 pushl $do_spurious_interrupt_bug
735 jmp error_code
47a55cd7 736END(spurious_interrupt_bug)
1da177e4 737
5ead97c8 738#ifdef CONFIG_XEN
a49976d1
IM
739/*
740 * Xen doesn't set %esp to be precisely what the normal SYSENTER
741 * entry point expects, so fix it up before using the normal path.
742 */
e2a81baf 743ENTRY(xen_sysenter_target)
a49976d1
IM
744 addl $5*4, %esp /* remove xen-provided frame */
745 jmp sysenter_past_esp
e2a81baf 746
5ead97c8 747ENTRY(xen_hypervisor_callback)
a49976d1 748 pushl $-1 /* orig_ax = -1 => not a system call */
5ead97c8
JF
749 SAVE_ALL
750 TRACE_IRQS_OFF
9ec2b804 751
a49976d1
IM
752 /*
753 * Check to see if we got the event in the critical
754 * region in xen_iret_direct, after we've reenabled
755 * events and checked for pending events. This simulates
756 * iret instruction's behaviour where it delivers a
757 * pending interrupt when enabling interrupts:
758 */
759 movl PT_EIP(%esp), %eax
760 cmpl $xen_iret_start_crit, %eax
761 jb 1f
762 cmpl $xen_iret_end_crit, %eax
763 jae 1f
9ec2b804 764
a49976d1 765 jmp xen_iret_crit_fixup
e2a81baf 766
e2a81baf 767ENTRY(xen_do_upcall)
a49976d1
IM
7681: mov %esp, %eax
769 call xen_evtchn_do_upcall
fdfd811d 770#ifndef CONFIG_PREEMPT
a49976d1 771 call xen_maybe_preempt_hcall
fdfd811d 772#endif
a49976d1 773 jmp ret_from_intr
5ead97c8
JF
774ENDPROC(xen_hypervisor_callback)
775
a49976d1
IM
776/*
777 * Hypervisor uses this for application faults while it executes.
778 * We get here for two reasons:
779 * 1. Fault while reloading DS, ES, FS or GS
780 * 2. Fault while executing IRET
781 * Category 1 we fix up by reattempting the load, and zeroing the segment
782 * register if the load fails.
783 * Category 2 we fix up by jumping to do_iret_error. We cannot use the
784 * normal Linux return path in this case because if we use the IRET hypercall
785 * to pop the stack frame we end up in an infinite loop of failsafe callbacks.
786 * We distinguish between categories by maintaining a status value in EAX.
787 */
5ead97c8 788ENTRY(xen_failsafe_callback)
a49976d1
IM
789 pushl %eax
790 movl $1, %eax
7911: mov 4(%esp), %ds
7922: mov 8(%esp), %es
7933: mov 12(%esp), %fs
7944: mov 16(%esp), %gs
a349e23d
DV
795 /* EAX == 0 => Category 1 (Bad segment)
796 EAX != 0 => Category 2 (Bad IRET) */
a49976d1
IM
797 testl %eax, %eax
798 popl %eax
799 lea 16(%esp), %esp
800 jz 5f
801 jmp iret_exc
8025: pushl $-1 /* orig_ax = -1 => not a system call */
5ead97c8 803 SAVE_ALL
a49976d1
IM
804 jmp ret_from_exception
805
806.section .fixup, "ax"
8076: xorl %eax, %eax
808 movl %eax, 4(%esp)
809 jmp 1b
8107: xorl %eax, %eax
811 movl %eax, 8(%esp)
812 jmp 2b
8138: xorl %eax, %eax
814 movl %eax, 12(%esp)
815 jmp 3b
8169: xorl %eax, %eax
817 movl %eax, 16(%esp)
818 jmp 4b
5ead97c8 819.previous
a49976d1
IM
820 _ASM_EXTABLE(1b, 6b)
821 _ASM_EXTABLE(2b, 7b)
822 _ASM_EXTABLE(3b, 8b)
823 _ASM_EXTABLE(4b, 9b)
5ead97c8
JF
824ENDPROC(xen_failsafe_callback)
825
bc2b0331 826BUILD_INTERRUPT3(xen_hvm_callback_vector, HYPERVISOR_CALLBACK_VECTOR,
38e20b07
SY
827 xen_evtchn_do_upcall)
828
a49976d1 829#endif /* CONFIG_XEN */
bc2b0331
S
830
831#if IS_ENABLED(CONFIG_HYPERV)
832
833BUILD_INTERRUPT3(hyperv_callback_vector, HYPERVISOR_CALLBACK_VECTOR,
834 hyperv_vector_handler)
835
836#endif /* CONFIG_HYPERV */
5ead97c8 837
606576ce 838#ifdef CONFIG_FUNCTION_TRACER
d61f82d0
SR
839#ifdef CONFIG_DYNAMIC_FTRACE
840
841ENTRY(mcount)
d61f82d0
SR
842 ret
843END(mcount)
844
845ENTRY(ftrace_caller)
a49976d1
IM
846 pushl %eax
847 pushl %ecx
848 pushl %edx
849 pushl $0 /* Pass NULL as regs pointer */
850 movl 4*4(%esp), %eax
851 movl 0x4(%ebp), %edx
852 movl function_trace_op, %ecx
853 subl $MCOUNT_INSN_SIZE, %eax
d61f82d0
SR
854
855.globl ftrace_call
856ftrace_call:
a49976d1 857 call ftrace_stub
d61f82d0 858
a49976d1
IM
859 addl $4, %esp /* skip NULL pointer */
860 popl %edx
861 popl %ecx
862 popl %eax
4de72395 863ftrace_ret:
5a45cfe1
SR
864#ifdef CONFIG_FUNCTION_GRAPH_TRACER
865.globl ftrace_graph_call
866ftrace_graph_call:
a49976d1 867 jmp ftrace_stub
5a45cfe1 868#endif
d61f82d0
SR
869
870.globl ftrace_stub
871ftrace_stub:
872 ret
873END(ftrace_caller)
874
4de72395
SR
875ENTRY(ftrace_regs_caller)
876 pushf /* push flags before compare (in cs location) */
4de72395
SR
877
878 /*
879 * i386 does not save SS and ESP when coming from kernel.
880 * Instead, to get sp, &regs->sp is used (see ptrace.h).
881 * Unfortunately, that means eflags must be at the same location
882 * as the current return ip is. We move the return ip into the
883 * ip location, and move flags into the return ip location.
884 */
a49976d1
IM
885 pushl 4(%esp) /* save return ip into ip slot */
886
887 pushl $0 /* Load 0 into orig_ax */
888 pushl %gs
889 pushl %fs
890 pushl %es
891 pushl %ds
892 pushl %eax
893 pushl %ebp
894 pushl %edi
895 pushl %esi
896 pushl %edx
897 pushl %ecx
898 pushl %ebx
899
900 movl 13*4(%esp), %eax /* Get the saved flags */
901 movl %eax, 14*4(%esp) /* Move saved flags into regs->flags location */
902 /* clobbering return ip */
903 movl $__KERNEL_CS, 13*4(%esp)
904
905 movl 12*4(%esp), %eax /* Load ip (1st parameter) */
906 subl $MCOUNT_INSN_SIZE, %eax /* Adjust ip */
907 movl 0x4(%ebp), %edx /* Load parent ip (2nd parameter) */
908 movl function_trace_op, %ecx /* Save ftrace_pos in 3rd parameter */
909 pushl %esp /* Save pt_regs as 4th parameter */
4de72395
SR
910
911GLOBAL(ftrace_regs_call)
a49976d1
IM
912 call ftrace_stub
913
914 addl $4, %esp /* Skip pt_regs */
915 movl 14*4(%esp), %eax /* Move flags back into cs */
916 movl %eax, 13*4(%esp) /* Needed to keep addl from modifying flags */
917 movl 12*4(%esp), %eax /* Get return ip from regs->ip */
918 movl %eax, 14*4(%esp) /* Put return ip back for ret */
919
920 popl %ebx
921 popl %ecx
922 popl %edx
923 popl %esi
924 popl %edi
925 popl %ebp
926 popl %eax
927 popl %ds
928 popl %es
929 popl %fs
930 popl %gs
931 addl $8, %esp /* Skip orig_ax and ip */
932 popf /* Pop flags at end (no addl to corrupt flags) */
933 jmp ftrace_ret
4de72395 934
4de72395 935 popf
a49976d1 936 jmp ftrace_stub
d61f82d0
SR
937#else /* ! CONFIG_DYNAMIC_FTRACE */
938
16444a8a 939ENTRY(mcount)
a49976d1
IM
940 cmpl $__PAGE_OFFSET, %esp
941 jb ftrace_stub /* Paging not enabled yet? */
af058ab0 942
a49976d1
IM
943 cmpl $ftrace_stub, ftrace_trace_function
944 jnz trace
fb52607a 945#ifdef CONFIG_FUNCTION_GRAPH_TRACER
a49976d1
IM
946 cmpl $ftrace_stub, ftrace_graph_return
947 jnz ftrace_graph_caller
e49dc19c 948
a49976d1
IM
949 cmpl $ftrace_graph_entry_stub, ftrace_graph_entry
950 jnz ftrace_graph_caller
caf4b323 951#endif
16444a8a
ACM
952.globl ftrace_stub
953ftrace_stub:
954 ret
955
956 /* taken from glibc */
957trace:
a49976d1
IM
958 pushl %eax
959 pushl %ecx
960 pushl %edx
961 movl 0xc(%esp), %eax
962 movl 0x4(%ebp), %edx
963 subl $MCOUNT_INSN_SIZE, %eax
964
965 call *ftrace_trace_function
966
967 popl %edx
968 popl %ecx
969 popl %eax
970 jmp ftrace_stub
16444a8a 971END(mcount)
d61f82d0 972#endif /* CONFIG_DYNAMIC_FTRACE */
606576ce 973#endif /* CONFIG_FUNCTION_TRACER */
16444a8a 974
fb52607a
FW
975#ifdef CONFIG_FUNCTION_GRAPH_TRACER
976ENTRY(ftrace_graph_caller)
a49976d1
IM
977 pushl %eax
978 pushl %ecx
979 pushl %edx
980 movl 0xc(%esp), %eax
981 lea 0x4(%ebp), %edx
982 movl (%ebp), %ecx
983 subl $MCOUNT_INSN_SIZE, %eax
984 call prepare_ftrace_return
985 popl %edx
986 popl %ecx
987 popl %eax
e7d3737e 988 ret
fb52607a 989END(ftrace_graph_caller)
caf4b323
FW
990
991.globl return_to_handler
992return_to_handler:
a49976d1
IM
993 pushl %eax
994 pushl %edx
995 movl %ebp, %eax
996 call ftrace_return_to_handler
997 movl %eax, %ecx
998 popl %edx
999 popl %eax
1000 jmp *%ecx
e7d3737e 1001#endif
16444a8a 1002
25c74b10
SA
1003#ifdef CONFIG_TRACING
1004ENTRY(trace_page_fault)
25c74b10 1005 ASM_CLAC
a49976d1
IM
1006 pushl $trace_do_page_fault
1007 jmp error_code
25c74b10
SA
1008END(trace_page_fault)
1009#endif
1010
d211af05 1011ENTRY(page_fault)
e59d1b0a 1012 ASM_CLAC
a49976d1 1013 pushl $do_page_fault
d211af05
AH
1014 ALIGN
1015error_code:
ccbeed3a 1016 /* the function address is in %gs's slot on the stack */
a49976d1
IM
1017 pushl %fs
1018 pushl %es
1019 pushl %ds
1020 pushl %eax
1021 pushl %ebp
1022 pushl %edi
1023 pushl %esi
1024 pushl %edx
1025 pushl %ecx
1026 pushl %ebx
d211af05 1027 cld
a49976d1
IM
1028 movl $(__KERNEL_PERCPU), %ecx
1029 movl %ecx, %fs
d211af05 1030 UNWIND_ESPFIX_STACK
ccbeed3a 1031 GS_TO_REG %ecx
a49976d1
IM
1032 movl PT_GS(%esp), %edi # get the function address
1033 movl PT_ORIG_EAX(%esp), %edx # get the error code
1034 movl $-1, PT_ORIG_EAX(%esp) # no syscall to restart
ccbeed3a
TH
1035 REG_TO_PTGS %ecx
1036 SET_KERNEL_GS %ecx
a49976d1
IM
1037 movl $(__USER_DS), %ecx
1038 movl %ecx, %ds
1039 movl %ecx, %es
d211af05 1040 TRACE_IRQS_OFF
a49976d1
IM
1041 movl %esp, %eax # pt_regs pointer
1042 call *%edi
1043 jmp ret_from_exception
d211af05
AH
1044END(page_fault)
1045
1046/*
1047 * Debug traps and NMI can happen at the one SYSENTER instruction
1048 * that sets up the real kernel stack. Check here, since we can't
1049 * allow the wrong stack to be used.
1050 *
1051 * "TSS_sysenter_sp0+12" is because the NMI/debug handler will have
1052 * already pushed 3 words if it hits on the sysenter instruction:
1053 * eflags, cs and eip.
1054 *
1055 * We just load the right stack, and push the three (known) values
1056 * by hand onto the new stack - while updating the return eip past
1057 * the instruction that would have done it for sysenter.
1058 */
f0d96110 1059.macro FIX_STACK offset ok label
a49976d1
IM
1060 cmpw $__KERNEL_CS, 4(%esp)
1061 jne \ok
f0d96110 1062\label:
a49976d1 1063 movl TSS_sysenter_sp0 + \offset(%esp), %esp
131484c8 1064 pushfl
a49976d1
IM
1065 pushl $__KERNEL_CS
1066 pushl $sysenter_past_esp
f0d96110 1067.endm
d211af05
AH
1068
1069ENTRY(debug)
e59d1b0a 1070 ASM_CLAC
a49976d1
IM
1071 cmpl $entry_SYSENTER_32, (%esp)
1072 jne debug_stack_correct
f0d96110 1073 FIX_STACK 12, debug_stack_correct, debug_esp_fix_insn
d211af05 1074debug_stack_correct:
a49976d1 1075 pushl $-1 # mark this as an int
d211af05
AH
1076 SAVE_ALL
1077 TRACE_IRQS_OFF
a49976d1
IM
1078 xorl %edx, %edx # error code 0
1079 movl %esp, %eax # pt_regs pointer
1080 call do_debug
1081 jmp ret_from_exception
d211af05
AH
1082END(debug)
1083
1084/*
1085 * NMI is doubly nasty. It can happen _while_ we're handling
1086 * a debug fault, and the debug fault hasn't yet been able to
1087 * clear up the stack. So we first check whether we got an
1088 * NMI on the sysenter entry path, but after that we need to
1089 * check whether we got an NMI on the debug path where the debug
1090 * fault happened on the sysenter path.
1091 */
1092ENTRY(nmi)
e59d1b0a 1093 ASM_CLAC
34273f41 1094#ifdef CONFIG_X86_ESPFIX32
a49976d1
IM
1095 pushl %eax
1096 movl %ss, %eax
1097 cmpw $__ESPFIX_SS, %ax
1098 popl %eax
1099 je nmi_espfix_stack
34273f41 1100#endif
a49976d1
IM
1101 cmpl $entry_SYSENTER_32, (%esp)
1102 je nmi_stack_fixup
1103 pushl %eax
1104 movl %esp, %eax
1105 /*
1106 * Do not access memory above the end of our stack page,
d211af05
AH
1107 * it might not exist.
1108 */
a49976d1
IM
1109 andl $(THREAD_SIZE-1), %eax
1110 cmpl $(THREAD_SIZE-20), %eax
1111 popl %eax
1112 jae nmi_stack_correct
1113 cmpl $entry_SYSENTER_32, 12(%esp)
1114 je nmi_debug_stack_check
d211af05 1115nmi_stack_correct:
a49976d1 1116 pushl %eax
d211af05 1117 SAVE_ALL
a49976d1
IM
1118 xorl %edx, %edx # zero error code
1119 movl %esp, %eax # pt_regs pointer
1120 call do_nmi
1121 jmp restore_all_notrace
d211af05
AH
1122
1123nmi_stack_fixup:
f0d96110 1124 FIX_STACK 12, nmi_stack_correct, 1
a49976d1 1125 jmp nmi_stack_correct
d211af05
AH
1126
1127nmi_debug_stack_check:
a49976d1
IM
1128 cmpw $__KERNEL_CS, 16(%esp)
1129 jne nmi_stack_correct
1130 cmpl $debug, (%esp)
1131 jb nmi_stack_correct
1132 cmpl $debug_esp_fix_insn, (%esp)
1133 ja nmi_stack_correct
f0d96110 1134 FIX_STACK 24, nmi_stack_correct, 1
a49976d1 1135 jmp nmi_stack_correct
d211af05 1136
34273f41 1137#ifdef CONFIG_X86_ESPFIX32
d211af05 1138nmi_espfix_stack:
131484c8 1139 /*
d211af05
AH
1140 * create the pointer to lss back
1141 */
a49976d1
IM
1142 pushl %ss
1143 pushl %esp
1144 addl $4, (%esp)
d211af05
AH
1145 /* copy the iret frame of 12 bytes */
1146 .rept 3
a49976d1 1147 pushl 16(%esp)
d211af05 1148 .endr
a49976d1 1149 pushl %eax
d211af05 1150 SAVE_ALL
a49976d1
IM
1151 FIXUP_ESPFIX_STACK # %eax == %esp
1152 xorl %edx, %edx # zero error code
1153 call do_nmi
d211af05 1154 RESTORE_REGS
a49976d1
IM
1155 lss 12+4(%esp), %esp # back to espfix stack
1156 jmp irq_return
34273f41 1157#endif
d211af05
AH
1158END(nmi)
1159
1160ENTRY(int3)
e59d1b0a 1161 ASM_CLAC
a49976d1 1162 pushl $-1 # mark this as an int
d211af05
AH
1163 SAVE_ALL
1164 TRACE_IRQS_OFF
a49976d1
IM
1165 xorl %edx, %edx # zero error code
1166 movl %esp, %eax # pt_regs pointer
1167 call do_int3
1168 jmp ret_from_exception
d211af05
AH
1169END(int3)
1170
1171ENTRY(general_protection)
a49976d1
IM
1172 pushl $do_general_protection
1173 jmp error_code
d211af05
AH
1174END(general_protection)
1175
631bc487
GN
1176#ifdef CONFIG_KVM_GUEST
1177ENTRY(async_page_fault)
e59d1b0a 1178 ASM_CLAC
a49976d1
IM
1179 pushl $do_async_page_fault
1180 jmp error_code
2ae9d293 1181END(async_page_fault)
631bc487 1182#endif