]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blob - arch/x86/entry/entry_32.S
UBUNTU: Ubuntu-4.13.0-45.50
[mirror_ubuntu-artful-kernel.git] / arch / x86 / entry / entry_32.S
1 /*
2 * Copyright (C) 1991,1992 Linus Torvalds
3 *
4 * entry_32.S contains the system-call and low-level fault and trap handling routines.
5 *
6 * Stack layout while running C code:
7 * ptrace needs to have all registers on the stack.
8 * If the order here is changed, it needs to be
9 * updated in fork.c:copy_process(), signal.c:do_signal(),
10 * ptrace.c and ptrace.h
11 *
12 * 0(%esp) - %ebx
13 * 4(%esp) - %ecx
14 * 8(%esp) - %edx
15 * C(%esp) - %esi
16 * 10(%esp) - %edi
17 * 14(%esp) - %ebp
18 * 18(%esp) - %eax
19 * 1C(%esp) - %ds
20 * 20(%esp) - %es
21 * 24(%esp) - %fs
22 * 28(%esp) - %gs saved iff !CONFIG_X86_32_LAZY_GS
23 * 2C(%esp) - orig_eax
24 * 30(%esp) - %eip
25 * 34(%esp) - %cs
26 * 38(%esp) - %eflags
27 * 3C(%esp) - %oldesp
28 * 40(%esp) - %oldss
29 */
30
31 #include <linux/linkage.h>
32 #include <linux/err.h>
33 #include <asm/thread_info.h>
34 #include <asm/irqflags.h>
35 #include <asm/errno.h>
36 #include <asm/segment.h>
37 #include <asm/smp.h>
38 #include <asm/percpu.h>
39 #include <asm/processor-flags.h>
40 #include <asm/irq_vectors.h>
41 #include <asm/cpufeatures.h>
42 #include <asm/alternative-asm.h>
43 #include <asm/asm.h>
44 #include <asm/smap.h>
45 #include <asm/frame.h>
46 #include <asm/nospec-branch.h>
47
48 .section .entry.text, "ax"
49
50 /*
51 * We use macros for low-level operations which need to be overridden
52 * for paravirtualization. The following will never clobber any registers:
53 * INTERRUPT_RETURN (aka. "iret")
54 * GET_CR0_INTO_EAX (aka. "movl %cr0, %eax")
55 * ENABLE_INTERRUPTS_SYSEXIT (aka "sti; sysexit").
56 *
57 * For DISABLE_INTERRUPTS/ENABLE_INTERRUPTS (aka "cli"/"sti"), you must
58 * specify what registers can be overwritten (CLBR_NONE, CLBR_EAX/EDX/ECX/ANY).
59 * Allowing a register to be clobbered can shrink the paravirt replacement
60 * enough to patch inline, increasing performance.
61 */
62
63 #ifdef CONFIG_PREEMPT
64 # define preempt_stop(clobbers) DISABLE_INTERRUPTS(clobbers); TRACE_IRQS_OFF
65 #else
66 # define preempt_stop(clobbers)
67 # define resume_kernel restore_all
68 #endif
69
70 .macro TRACE_IRQS_IRET
71 #ifdef CONFIG_TRACE_IRQFLAGS
72 testl $X86_EFLAGS_IF, PT_EFLAGS(%esp) # interrupts off?
73 jz 1f
74 TRACE_IRQS_ON
75 1:
76 #endif
77 .endm
78
79 /*
80 * User gs save/restore
81 *
82 * %gs is used for userland TLS and kernel only uses it for stack
83 * canary which is required to be at %gs:20 by gcc. Read the comment
84 * at the top of stackprotector.h for more info.
85 *
86 * Local labels 98 and 99 are used.
87 */
88 #ifdef CONFIG_X86_32_LAZY_GS
89
90 /* unfortunately push/pop can't be no-op */
91 .macro PUSH_GS
92 pushl $0
93 .endm
94 .macro POP_GS pop=0
95 addl $(4 + \pop), %esp
96 .endm
97 .macro POP_GS_EX
98 .endm
99
100 /* all the rest are no-op */
101 .macro PTGS_TO_GS
102 .endm
103 .macro PTGS_TO_GS_EX
104 .endm
105 .macro GS_TO_REG reg
106 .endm
107 .macro REG_TO_PTGS reg
108 .endm
109 .macro SET_KERNEL_GS reg
110 .endm
111
112 #else /* CONFIG_X86_32_LAZY_GS */
113
114 .macro PUSH_GS
115 pushl %gs
116 .endm
117
118 .macro POP_GS pop=0
119 98: popl %gs
120 .if \pop <> 0
121 add $\pop, %esp
122 .endif
123 .endm
124 .macro POP_GS_EX
125 .pushsection .fixup, "ax"
126 99: movl $0, (%esp)
127 jmp 98b
128 .popsection
129 _ASM_EXTABLE(98b, 99b)
130 .endm
131
132 .macro PTGS_TO_GS
133 98: mov PT_GS(%esp), %gs
134 .endm
135 .macro PTGS_TO_GS_EX
136 .pushsection .fixup, "ax"
137 99: movl $0, PT_GS(%esp)
138 jmp 98b
139 .popsection
140 _ASM_EXTABLE(98b, 99b)
141 .endm
142
143 .macro GS_TO_REG reg
144 movl %gs, \reg
145 .endm
146 .macro REG_TO_PTGS reg
147 movl \reg, PT_GS(%esp)
148 .endm
149 .macro SET_KERNEL_GS reg
150 movl $(__KERNEL_STACK_CANARY), \reg
151 movl \reg, %gs
152 .endm
153
154 #endif /* CONFIG_X86_32_LAZY_GS */
155
156 .macro SAVE_ALL pt_regs_ax=%eax
157 cld
158 PUSH_GS
159 pushl %fs
160 pushl %es
161 pushl %ds
162 pushl \pt_regs_ax
163 pushl %ebp
164 pushl %edi
165 pushl %esi
166 pushl %edx
167 pushl %ecx
168 pushl %ebx
169 movl $(__USER_DS), %edx
170 movl %edx, %ds
171 movl %edx, %es
172 movl $(__KERNEL_PERCPU), %edx
173 movl %edx, %fs
174 SET_KERNEL_GS %edx
175 .endm
176
177 /*
178 * This is a sneaky trick to help the unwinder find pt_regs on the stack. The
179 * frame pointer is replaced with an encoded pointer to pt_regs. The encoding
180 * is just setting the LSB, which makes it an invalid stack address and is also
181 * a signal to the unwinder that it's a pt_regs pointer in disguise.
182 *
183 * NOTE: This macro must be used *after* SAVE_ALL because it corrupts the
184 * original rbp.
185 */
186 .macro ENCODE_FRAME_POINTER
187 #ifdef CONFIG_FRAME_POINTER
188 mov %esp, %ebp
189 orl $0x1, %ebp
190 #endif
191 .endm
192
193 .macro RESTORE_INT_REGS
194 popl %ebx
195 popl %ecx
196 popl %edx
197 popl %esi
198 popl %edi
199 popl %ebp
200 popl %eax
201 .endm
202
203 .macro RESTORE_REGS pop=0
204 RESTORE_INT_REGS
205 1: popl %ds
206 2: popl %es
207 3: popl %fs
208 POP_GS \pop
209 .pushsection .fixup, "ax"
210 4: movl $0, (%esp)
211 jmp 1b
212 5: movl $0, (%esp)
213 jmp 2b
214 6: movl $0, (%esp)
215 jmp 3b
216 .popsection
217 _ASM_EXTABLE(1b, 4b)
218 _ASM_EXTABLE(2b, 5b)
219 _ASM_EXTABLE(3b, 6b)
220 POP_GS_EX
221 .endm
222
223 /*
224 * %eax: prev task
225 * %edx: next task
226 */
227 ENTRY(__switch_to_asm)
228 /*
229 * Save callee-saved registers
230 * This must match the order in struct inactive_task_frame
231 */
232 pushl %ebp
233 pushl %ebx
234 pushl %edi
235 pushl %esi
236
237 /* switch stack */
238 movl %esp, TASK_threadsp(%eax)
239 movl TASK_threadsp(%edx), %esp
240
241 #ifdef CONFIG_CC_STACKPROTECTOR
242 movl TASK_stack_canary(%edx), %ebx
243 movl %ebx, PER_CPU_VAR(stack_canary)+stack_canary_offset
244 #endif
245
246 #ifdef CONFIG_RETPOLINE
247 /*
248 * When switching from a shallower to a deeper call stack
249 * the RSB may either underflow or use entries populated
250 * with userspace addresses. On CPUs where those concerns
251 * exist, overwrite the RSB with entries which capture
252 * speculative execution to prevent attack.
253 */
254 /* Clobbers %ebx */
255 FILL_RETURN_BUFFER RSB_CLEAR_LOOPS, X86_FEATURE_RSB_CTXSW
256 #endif
257
258 /* restore callee-saved registers */
259 popl %esi
260 popl %edi
261 popl %ebx
262 popl %ebp
263
264 jmp __switch_to
265 END(__switch_to_asm)
266
267 /*
268 * The unwinder expects the last frame on the stack to always be at the same
269 * offset from the end of the page, which allows it to validate the stack.
270 * Calling schedule_tail() directly would break that convention because its an
271 * asmlinkage function so its argument has to be pushed on the stack. This
272 * wrapper creates a proper "end of stack" frame header before the call.
273 */
274 ENTRY(schedule_tail_wrapper)
275 FRAME_BEGIN
276
277 pushl %eax
278 call schedule_tail
279 popl %eax
280
281 FRAME_END
282 ret
283 ENDPROC(schedule_tail_wrapper)
284 /*
285 * A newly forked process directly context switches into this address.
286 *
287 * eax: prev task we switched from
288 * ebx: kernel thread func (NULL for user thread)
289 * edi: kernel thread arg
290 */
291 ENTRY(ret_from_fork)
292 call schedule_tail_wrapper
293
294 testl %ebx, %ebx
295 jnz 1f /* kernel threads are uncommon */
296
297 2:
298 /* When we fork, we trace the syscall return in the child, too. */
299 movl %esp, %eax
300 call syscall_return_slowpath
301 jmp restore_all
302
303 /* kernel thread */
304 1: movl %edi, %eax
305 CALL_NOSPEC %ebx
306 /*
307 * A kernel thread is allowed to return here after successfully
308 * calling do_execve(). Exit to userspace to complete the execve()
309 * syscall.
310 */
311 movl $0, PT_EAX(%esp)
312 jmp 2b
313 END(ret_from_fork)
314
315 /*
316 * Return to user mode is not as complex as all this looks,
317 * but we want the default path for a system call return to
318 * go as quickly as possible which is why some of this is
319 * less clear than it otherwise should be.
320 */
321
322 # userspace resumption stub bypassing syscall exit tracing
323 ALIGN
324 ret_from_exception:
325 preempt_stop(CLBR_ANY)
326 ret_from_intr:
327 #ifdef CONFIG_VM86
328 movl PT_EFLAGS(%esp), %eax # mix EFLAGS and CS
329 movb PT_CS(%esp), %al
330 andl $(X86_EFLAGS_VM | SEGMENT_RPL_MASK), %eax
331 #else
332 /*
333 * We can be coming here from child spawned by kernel_thread().
334 */
335 movl PT_CS(%esp), %eax
336 andl $SEGMENT_RPL_MASK, %eax
337 #endif
338 cmpl $USER_RPL, %eax
339 jb resume_kernel # not returning to v8086 or userspace
340
341 ENTRY(resume_userspace)
342 DISABLE_INTERRUPTS(CLBR_ANY)
343 TRACE_IRQS_OFF
344 movl %esp, %eax
345 call prepare_exit_to_usermode
346 jmp restore_all
347 END(ret_from_exception)
348
349 #ifdef CONFIG_PREEMPT
350 ENTRY(resume_kernel)
351 DISABLE_INTERRUPTS(CLBR_ANY)
352 .Lneed_resched:
353 cmpl $0, PER_CPU_VAR(__preempt_count)
354 jnz restore_all
355 testl $X86_EFLAGS_IF, PT_EFLAGS(%esp) # interrupts off (exception path) ?
356 jz restore_all
357 call preempt_schedule_irq
358 jmp .Lneed_resched
359 END(resume_kernel)
360 #endif
361
362 GLOBAL(__begin_SYSENTER_singlestep_region)
363 /*
364 * All code from here through __end_SYSENTER_singlestep_region is subject
365 * to being single-stepped if a user program sets TF and executes SYSENTER.
366 * There is absolutely nothing that we can do to prevent this from happening
367 * (thanks Intel!). To keep our handling of this situation as simple as
368 * possible, we handle TF just like AC and NT, except that our #DB handler
369 * will ignore all of the single-step traps generated in this range.
370 */
371
372 #ifdef CONFIG_XEN
373 /*
374 * Xen doesn't set %esp to be precisely what the normal SYSENTER
375 * entry point expects, so fix it up before using the normal path.
376 */
377 ENTRY(xen_sysenter_target)
378 addl $5*4, %esp /* remove xen-provided frame */
379 jmp .Lsysenter_past_esp
380 #endif
381
382 /*
383 * 32-bit SYSENTER entry.
384 *
385 * 32-bit system calls through the vDSO's __kernel_vsyscall enter here
386 * if X86_FEATURE_SEP is available. This is the preferred system call
387 * entry on 32-bit systems.
388 *
389 * The SYSENTER instruction, in principle, should *only* occur in the
390 * vDSO. In practice, a small number of Android devices were shipped
391 * with a copy of Bionic that inlined a SYSENTER instruction. This
392 * never happened in any of Google's Bionic versions -- it only happened
393 * in a narrow range of Intel-provided versions.
394 *
395 * SYSENTER loads SS, ESP, CS, and EIP from previously programmed MSRs.
396 * IF and VM in RFLAGS are cleared (IOW: interrupts are off).
397 * SYSENTER does not save anything on the stack,
398 * and does not save old EIP (!!!), ESP, or EFLAGS.
399 *
400 * To avoid losing track of EFLAGS.VM (and thus potentially corrupting
401 * user and/or vm86 state), we explicitly disable the SYSENTER
402 * instruction in vm86 mode by reprogramming the MSRs.
403 *
404 * Arguments:
405 * eax system call number
406 * ebx arg1
407 * ecx arg2
408 * edx arg3
409 * esi arg4
410 * edi arg5
411 * ebp user stack
412 * 0(%ebp) arg6
413 */
414 ENTRY(entry_SYSENTER_32)
415 movl TSS_sysenter_sp0(%esp), %esp
416 .Lsysenter_past_esp:
417 pushl $__USER_DS /* pt_regs->ss */
418 pushl %ebp /* pt_regs->sp (stashed in bp) */
419 pushfl /* pt_regs->flags (except IF = 0) */
420 orl $X86_EFLAGS_IF, (%esp) /* Fix IF */
421 pushl $__USER_CS /* pt_regs->cs */
422 pushl $0 /* pt_regs->ip = 0 (placeholder) */
423 pushl %eax /* pt_regs->orig_ax */
424 SAVE_ALL pt_regs_ax=$-ENOSYS /* save rest */
425
426 /*
427 * SYSENTER doesn't filter flags, so we need to clear NT, AC
428 * and TF ourselves. To save a few cycles, we can check whether
429 * either was set instead of doing an unconditional popfq.
430 * This needs to happen before enabling interrupts so that
431 * we don't get preempted with NT set.
432 *
433 * If TF is set, we will single-step all the way to here -- do_debug
434 * will ignore all the traps. (Yes, this is slow, but so is
435 * single-stepping in general. This allows us to avoid having
436 * a more complicated code to handle the case where a user program
437 * forces us to single-step through the SYSENTER entry code.)
438 *
439 * NB.: .Lsysenter_fix_flags is a label with the code under it moved
440 * out-of-line as an optimization: NT is unlikely to be set in the
441 * majority of the cases and instead of polluting the I$ unnecessarily,
442 * we're keeping that code behind a branch which will predict as
443 * not-taken and therefore its instructions won't be fetched.
444 */
445 testl $X86_EFLAGS_NT|X86_EFLAGS_AC|X86_EFLAGS_TF, PT_EFLAGS(%esp)
446 jnz .Lsysenter_fix_flags
447 .Lsysenter_flags_fixed:
448
449 /*
450 * User mode is traced as though IRQs are on, and SYSENTER
451 * turned them off.
452 */
453 TRACE_IRQS_OFF
454
455 movl %esp, %eax
456 call do_fast_syscall_32
457 /* XEN PV guests always use IRET path */
458 ALTERNATIVE "testl %eax, %eax; jz .Lsyscall_32_done", \
459 "jmp .Lsyscall_32_done", X86_FEATURE_XENPV
460
461 /* Opportunistic SYSEXIT */
462 TRACE_IRQS_ON /* User mode traces as IRQs on. */
463 movl PT_EIP(%esp), %edx /* pt_regs->ip */
464 movl PT_OLDESP(%esp), %ecx /* pt_regs->sp */
465 1: mov PT_FS(%esp), %fs
466 PTGS_TO_GS
467 popl %ebx /* pt_regs->bx */
468 addl $2*4, %esp /* skip pt_regs->cx and pt_regs->dx */
469 popl %esi /* pt_regs->si */
470 popl %edi /* pt_regs->di */
471 popl %ebp /* pt_regs->bp */
472 popl %eax /* pt_regs->ax */
473
474 /*
475 * Restore all flags except IF. (We restore IF separately because
476 * STI gives a one-instruction window in which we won't be interrupted,
477 * whereas POPF does not.)
478 */
479 addl $PT_EFLAGS-PT_DS, %esp /* point esp at pt_regs->flags */
480 btr $X86_EFLAGS_IF_BIT, (%esp)
481 popfl
482
483 /*
484 * Return back to the vDSO, which will pop ecx and edx.
485 * Don't bother with DS and ES (they already contain __USER_DS).
486 */
487 sti
488 sysexit
489
490 .pushsection .fixup, "ax"
491 2: movl $0, PT_FS(%esp)
492 jmp 1b
493 .popsection
494 _ASM_EXTABLE(1b, 2b)
495 PTGS_TO_GS_EX
496
497 .Lsysenter_fix_flags:
498 pushl $X86_EFLAGS_FIXED
499 popfl
500 jmp .Lsysenter_flags_fixed
501 GLOBAL(__end_SYSENTER_singlestep_region)
502 ENDPROC(entry_SYSENTER_32)
503
504 /*
505 * 32-bit legacy system call entry.
506 *
507 * 32-bit x86 Linux system calls traditionally used the INT $0x80
508 * instruction. INT $0x80 lands here.
509 *
510 * This entry point can be used by any 32-bit perform system calls.
511 * Instances of INT $0x80 can be found inline in various programs and
512 * libraries. It is also used by the vDSO's __kernel_vsyscall
513 * fallback for hardware that doesn't support a faster entry method.
514 * Restarted 32-bit system calls also fall back to INT $0x80
515 * regardless of what instruction was originally used to do the system
516 * call. (64-bit programs can use INT $0x80 as well, but they can
517 * only run on 64-bit kernels and therefore land in
518 * entry_INT80_compat.)
519 *
520 * This is considered a slow path. It is not used by most libc
521 * implementations on modern hardware except during process startup.
522 *
523 * Arguments:
524 * eax system call number
525 * ebx arg1
526 * ecx arg2
527 * edx arg3
528 * esi arg4
529 * edi arg5
530 * ebp arg6
531 */
532 ENTRY(entry_INT80_32)
533 ASM_CLAC
534 pushl %eax /* pt_regs->orig_ax */
535 SAVE_ALL pt_regs_ax=$-ENOSYS /* save rest */
536
537 /*
538 * User mode is traced as though IRQs are on, and the interrupt gate
539 * turned them off.
540 */
541 TRACE_IRQS_OFF
542
543 movl %esp, %eax
544 call do_int80_syscall_32
545 .Lsyscall_32_done:
546
547 restore_all:
548 TRACE_IRQS_IRET
549 .Lrestore_all_notrace:
550 #ifdef CONFIG_X86_ESPFIX32
551 ALTERNATIVE "jmp .Lrestore_nocheck", "", X86_BUG_ESPFIX
552
553 movl PT_EFLAGS(%esp), %eax # mix EFLAGS, SS and CS
554 /*
555 * Warning: PT_OLDSS(%esp) contains the wrong/random values if we
556 * are returning to the kernel.
557 * See comments in process.c:copy_thread() for details.
558 */
559 movb PT_OLDSS(%esp), %ah
560 movb PT_CS(%esp), %al
561 andl $(X86_EFLAGS_VM | (SEGMENT_TI_MASK << 8) | SEGMENT_RPL_MASK), %eax
562 cmpl $((SEGMENT_LDT << 8) | USER_RPL), %eax
563 je .Lldt_ss # returning to user-space with LDT SS
564 #endif
565 .Lrestore_nocheck:
566 RESTORE_REGS 4 # skip orig_eax/error_code
567 .Lirq_return:
568 INTERRUPT_RETURN
569
570 .section .fixup, "ax"
571 ENTRY(iret_exc )
572 pushl $0 # no error code
573 pushl $do_iret_error
574 jmp common_exception
575 .previous
576 _ASM_EXTABLE(.Lirq_return, iret_exc)
577
578 #ifdef CONFIG_X86_ESPFIX32
579 .Lldt_ss:
580 /*
581 * Setup and switch to ESPFIX stack
582 *
583 * We're returning to userspace with a 16 bit stack. The CPU will not
584 * restore the high word of ESP for us on executing iret... This is an
585 * "official" bug of all the x86-compatible CPUs, which we can work
586 * around to make dosemu and wine happy. We do this by preloading the
587 * high word of ESP with the high word of the userspace ESP while
588 * compensating for the offset by changing to the ESPFIX segment with
589 * a base address that matches for the difference.
590 */
591 #define GDT_ESPFIX_SS PER_CPU_VAR(gdt_page) + (GDT_ENTRY_ESPFIX_SS * 8)
592 mov %esp, %edx /* load kernel esp */
593 mov PT_OLDESP(%esp), %eax /* load userspace esp */
594 mov %dx, %ax /* eax: new kernel esp */
595 sub %eax, %edx /* offset (low word is 0) */
596 shr $16, %edx
597 mov %dl, GDT_ESPFIX_SS + 4 /* bits 16..23 */
598 mov %dh, GDT_ESPFIX_SS + 7 /* bits 24..31 */
599 pushl $__ESPFIX_SS
600 pushl %eax /* new kernel esp */
601 /*
602 * Disable interrupts, but do not irqtrace this section: we
603 * will soon execute iret and the tracer was already set to
604 * the irqstate after the IRET:
605 */
606 DISABLE_INTERRUPTS(CLBR_ANY)
607 lss (%esp), %esp /* switch to espfix segment */
608 jmp .Lrestore_nocheck
609 #endif
610 ENDPROC(entry_INT80_32)
611
612 .macro FIXUP_ESPFIX_STACK
613 /*
614 * Switch back for ESPFIX stack to the normal zerobased stack
615 *
616 * We can't call C functions using the ESPFIX stack. This code reads
617 * the high word of the segment base from the GDT and swiches to the
618 * normal stack and adjusts ESP with the matching offset.
619 */
620 #ifdef CONFIG_X86_ESPFIX32
621 /* fixup the stack */
622 mov GDT_ESPFIX_SS + 4, %al /* bits 16..23 */
623 mov GDT_ESPFIX_SS + 7, %ah /* bits 24..31 */
624 shl $16, %eax
625 addl %esp, %eax /* the adjusted stack pointer */
626 pushl $__KERNEL_DS
627 pushl %eax
628 lss (%esp), %esp /* switch to the normal stack segment */
629 #endif
630 .endm
631 .macro UNWIND_ESPFIX_STACK
632 #ifdef CONFIG_X86_ESPFIX32
633 movl %ss, %eax
634 /* see if on espfix stack */
635 cmpw $__ESPFIX_SS, %ax
636 jne 27f
637 movl $__KERNEL_DS, %eax
638 movl %eax, %ds
639 movl %eax, %es
640 /* switch to normal stack */
641 FIXUP_ESPFIX_STACK
642 27:
643 #endif
644 .endm
645
646 /*
647 * Build the entry stubs with some assembler magic.
648 * We pack 1 stub into every 8-byte block.
649 */
650 .align 8
651 ENTRY(irq_entries_start)
652 vector=FIRST_EXTERNAL_VECTOR
653 .rept (FIRST_SYSTEM_VECTOR - FIRST_EXTERNAL_VECTOR)
654 pushl $(~vector+0x80) /* Note: always in signed byte range */
655 vector=vector+1
656 jmp common_interrupt
657 .align 8
658 .endr
659 END(irq_entries_start)
660
661 /*
662 * the CPU automatically disables interrupts when executing an IRQ vector,
663 * so IRQ-flags tracing has to follow that:
664 */
665 .p2align CONFIG_X86_L1_CACHE_SHIFT
666 common_interrupt:
667 ASM_CLAC
668 addl $-0x80, (%esp) /* Adjust vector into the [-256, -1] range */
669 SAVE_ALL
670 ENCODE_FRAME_POINTER
671 TRACE_IRQS_OFF
672 movl %esp, %eax
673 call do_IRQ
674 jmp ret_from_intr
675 ENDPROC(common_interrupt)
676
677 #define BUILD_INTERRUPT3(name, nr, fn) \
678 ENTRY(name) \
679 ASM_CLAC; \
680 pushl $~(nr); \
681 SAVE_ALL; \
682 ENCODE_FRAME_POINTER; \
683 TRACE_IRQS_OFF \
684 movl %esp, %eax; \
685 call fn; \
686 jmp ret_from_intr; \
687 ENDPROC(name)
688
689
690 #ifdef CONFIG_TRACING
691 # define TRACE_BUILD_INTERRUPT(name, nr) BUILD_INTERRUPT3(trace_##name, nr, smp_trace_##name)
692 #else
693 # define TRACE_BUILD_INTERRUPT(name, nr)
694 #endif
695
696 #define BUILD_INTERRUPT(name, nr) \
697 BUILD_INTERRUPT3(name, nr, smp_##name); \
698 TRACE_BUILD_INTERRUPT(name, nr)
699
700 /* The include is where all of the SMP etc. interrupts come from */
701 #include <asm/entry_arch.h>
702
703 ENTRY(coprocessor_error)
704 ASM_CLAC
705 pushl $0
706 pushl $do_coprocessor_error
707 jmp common_exception
708 END(coprocessor_error)
709
710 ENTRY(simd_coprocessor_error)
711 ASM_CLAC
712 pushl $0
713 #ifdef CONFIG_X86_INVD_BUG
714 /* AMD 486 bug: invd from userspace calls exception 19 instead of #GP */
715 ALTERNATIVE "pushl $do_general_protection", \
716 "pushl $do_simd_coprocessor_error", \
717 X86_FEATURE_XMM
718 #else
719 pushl $do_simd_coprocessor_error
720 #endif
721 jmp common_exception
722 END(simd_coprocessor_error)
723
724 ENTRY(device_not_available)
725 ASM_CLAC
726 pushl $-1 # mark this as an int
727 pushl $do_device_not_available
728 jmp common_exception
729 END(device_not_available)
730
731 #ifdef CONFIG_PARAVIRT
732 ENTRY(native_iret)
733 iret
734 _ASM_EXTABLE(native_iret, iret_exc)
735 END(native_iret)
736 #endif
737
738 ENTRY(overflow)
739 ASM_CLAC
740 pushl $0
741 pushl $do_overflow
742 jmp common_exception
743 END(overflow)
744
745 ENTRY(bounds)
746 ASM_CLAC
747 pushl $0
748 pushl $do_bounds
749 jmp common_exception
750 END(bounds)
751
752 ENTRY(invalid_op)
753 ASM_CLAC
754 pushl $0
755 pushl $do_invalid_op
756 jmp common_exception
757 END(invalid_op)
758
759 ENTRY(coprocessor_segment_overrun)
760 ASM_CLAC
761 pushl $0
762 pushl $do_coprocessor_segment_overrun
763 jmp common_exception
764 END(coprocessor_segment_overrun)
765
766 ENTRY(invalid_TSS)
767 ASM_CLAC
768 pushl $do_invalid_TSS
769 jmp common_exception
770 END(invalid_TSS)
771
772 ENTRY(segment_not_present)
773 ASM_CLAC
774 pushl $do_segment_not_present
775 jmp common_exception
776 END(segment_not_present)
777
778 ENTRY(stack_segment)
779 ASM_CLAC
780 pushl $do_stack_segment
781 jmp common_exception
782 END(stack_segment)
783
784 ENTRY(alignment_check)
785 ASM_CLAC
786 pushl $do_alignment_check
787 jmp common_exception
788 END(alignment_check)
789
790 ENTRY(divide_error)
791 ASM_CLAC
792 pushl $0 # no error code
793 pushl $do_divide_error
794 jmp common_exception
795 END(divide_error)
796
797 #ifdef CONFIG_X86_MCE
798 ENTRY(machine_check)
799 ASM_CLAC
800 pushl $0
801 pushl machine_check_vector
802 jmp common_exception
803 END(machine_check)
804 #endif
805
806 ENTRY(spurious_interrupt_bug)
807 ASM_CLAC
808 pushl $0
809 pushl $do_spurious_interrupt_bug
810 jmp common_exception
811 END(spurious_interrupt_bug)
812
813 #ifdef CONFIG_XEN
814 ENTRY(xen_hypervisor_callback)
815 pushl $-1 /* orig_ax = -1 => not a system call */
816 SAVE_ALL
817 ENCODE_FRAME_POINTER
818 TRACE_IRQS_OFF
819
820 /*
821 * Check to see if we got the event in the critical
822 * region in xen_iret_direct, after we've reenabled
823 * events and checked for pending events. This simulates
824 * iret instruction's behaviour where it delivers a
825 * pending interrupt when enabling interrupts:
826 */
827 movl PT_EIP(%esp), %eax
828 cmpl $xen_iret_start_crit, %eax
829 jb 1f
830 cmpl $xen_iret_end_crit, %eax
831 jae 1f
832
833 jmp xen_iret_crit_fixup
834
835 ENTRY(xen_do_upcall)
836 1: mov %esp, %eax
837 call xen_evtchn_do_upcall
838 #ifndef CONFIG_PREEMPT
839 call xen_maybe_preempt_hcall
840 #endif
841 jmp ret_from_intr
842 ENDPROC(xen_hypervisor_callback)
843
844 /*
845 * Hypervisor uses this for application faults while it executes.
846 * We get here for two reasons:
847 * 1. Fault while reloading DS, ES, FS or GS
848 * 2. Fault while executing IRET
849 * Category 1 we fix up by reattempting the load, and zeroing the segment
850 * register if the load fails.
851 * Category 2 we fix up by jumping to do_iret_error. We cannot use the
852 * normal Linux return path in this case because if we use the IRET hypercall
853 * to pop the stack frame we end up in an infinite loop of failsafe callbacks.
854 * We distinguish between categories by maintaining a status value in EAX.
855 */
856 ENTRY(xen_failsafe_callback)
857 pushl %eax
858 movl $1, %eax
859 1: mov 4(%esp), %ds
860 2: mov 8(%esp), %es
861 3: mov 12(%esp), %fs
862 4: mov 16(%esp), %gs
863 /* EAX == 0 => Category 1 (Bad segment)
864 EAX != 0 => Category 2 (Bad IRET) */
865 testl %eax, %eax
866 popl %eax
867 lea 16(%esp), %esp
868 jz 5f
869 jmp iret_exc
870 5: pushl $-1 /* orig_ax = -1 => not a system call */
871 SAVE_ALL
872 ENCODE_FRAME_POINTER
873 jmp ret_from_exception
874
875 .section .fixup, "ax"
876 6: xorl %eax, %eax
877 movl %eax, 4(%esp)
878 jmp 1b
879 7: xorl %eax, %eax
880 movl %eax, 8(%esp)
881 jmp 2b
882 8: xorl %eax, %eax
883 movl %eax, 12(%esp)
884 jmp 3b
885 9: xorl %eax, %eax
886 movl %eax, 16(%esp)
887 jmp 4b
888 .previous
889 _ASM_EXTABLE(1b, 6b)
890 _ASM_EXTABLE(2b, 7b)
891 _ASM_EXTABLE(3b, 8b)
892 _ASM_EXTABLE(4b, 9b)
893 ENDPROC(xen_failsafe_callback)
894
895 BUILD_INTERRUPT3(xen_hvm_callback_vector, HYPERVISOR_CALLBACK_VECTOR,
896 xen_evtchn_do_upcall)
897
898 #endif /* CONFIG_XEN */
899
900 #if IS_ENABLED(CONFIG_HYPERV)
901
902 BUILD_INTERRUPT3(hyperv_callback_vector, HYPERVISOR_CALLBACK_VECTOR,
903 hyperv_vector_handler)
904
905 #endif /* CONFIG_HYPERV */
906
907 ENTRY(page_fault)
908 ASM_CLAC
909 pushl $do_page_fault
910 ALIGN
911 jmp common_exception
912 END(page_fault)
913
914 common_exception:
915 /* the function address is in %gs's slot on the stack */
916 pushl %fs
917 pushl %es
918 pushl %ds
919 pushl %eax
920 pushl %ebp
921 pushl %edi
922 pushl %esi
923 pushl %edx
924 pushl %ecx
925 pushl %ebx
926 ENCODE_FRAME_POINTER
927 cld
928 movl $(__KERNEL_PERCPU), %ecx
929 movl %ecx, %fs
930 UNWIND_ESPFIX_STACK
931 GS_TO_REG %ecx
932 movl PT_GS(%esp), %edi # get the function address
933 movl PT_ORIG_EAX(%esp), %edx # get the error code
934 movl $-1, PT_ORIG_EAX(%esp) # no syscall to restart
935 REG_TO_PTGS %ecx
936 SET_KERNEL_GS %ecx
937 movl $(__USER_DS), %ecx
938 movl %ecx, %ds
939 movl %ecx, %es
940 TRACE_IRQS_OFF
941 movl %esp, %eax # pt_regs pointer
942 CALL_NOSPEC %edi
943 jmp ret_from_exception
944 END(common_exception)
945
946 ENTRY(debug)
947 /*
948 * #DB can happen at the first instruction of
949 * entry_SYSENTER_32 or in Xen's SYSENTER prologue. If this
950 * happens, then we will be running on a very small stack. We
951 * need to detect this condition and switch to the thread
952 * stack before calling any C code at all.
953 *
954 * If you edit this code, keep in mind that NMIs can happen in here.
955 */
956 ASM_CLAC
957 pushl $-1 # mark this as an int
958 SAVE_ALL
959 ENCODE_FRAME_POINTER
960 xorl %edx, %edx # error code 0
961 movl %esp, %eax # pt_regs pointer
962
963 /* Are we currently on the SYSENTER stack? */
964 movl PER_CPU_VAR(cpu_entry_area), %ecx
965 addl $CPU_ENTRY_AREA_entry_stack + SIZEOF_entry_stack, %ecx
966 subl %eax, %ecx /* ecx = (end of entry_stack) - esp */
967 cmpl $SIZEOF_entry_stack, %ecx
968 jb .Ldebug_from_sysenter_stack
969
970 TRACE_IRQS_OFF
971 call do_debug
972 jmp ret_from_exception
973
974 .Ldebug_from_sysenter_stack:
975 /* We're on the SYSENTER stack. Switch off. */
976 movl %esp, %ebx
977 movl PER_CPU_VAR(cpu_current_top_of_stack), %esp
978 TRACE_IRQS_OFF
979 call do_debug
980 movl %ebx, %esp
981 jmp ret_from_exception
982 END(debug)
983
984 /*
985 * NMI is doubly nasty. It can happen on the first instruction of
986 * entry_SYSENTER_32 (just like #DB), but it can also interrupt the beginning
987 * of the #DB handler even if that #DB in turn hit before entry_SYSENTER_32
988 * switched stacks. We handle both conditions by simply checking whether we
989 * interrupted kernel code running on the SYSENTER stack.
990 */
991 ENTRY(nmi)
992 ASM_CLAC
993 #ifdef CONFIG_X86_ESPFIX32
994 pushl %eax
995 movl %ss, %eax
996 cmpw $__ESPFIX_SS, %ax
997 popl %eax
998 je .Lnmi_espfix_stack
999 #endif
1000
1001 pushl %eax # pt_regs->orig_ax
1002 SAVE_ALL
1003 ENCODE_FRAME_POINTER
1004 xorl %edx, %edx # zero error code
1005 movl %esp, %eax # pt_regs pointer
1006
1007 /* Are we currently on the SYSENTER stack? */
1008 movl PER_CPU_VAR(cpu_entry_area), %ecx
1009 addl $CPU_ENTRY_AREA_entry_stack + SIZEOF_entry_stack, %ecx
1010 subl %eax, %ecx /* ecx = (end of entry_stack) - esp */
1011 cmpl $SIZEOF_entry_stack, %ecx
1012 jb .Lnmi_from_sysenter_stack
1013
1014 /* Not on SYSENTER stack. */
1015 call do_nmi
1016 jmp .Lrestore_all_notrace
1017
1018 .Lnmi_from_sysenter_stack:
1019 /*
1020 * We're on the SYSENTER stack. Switch off. No one (not even debug)
1021 * is using the thread stack right now, so it's safe for us to use it.
1022 */
1023 movl %esp, %ebx
1024 movl PER_CPU_VAR(cpu_current_top_of_stack), %esp
1025 call do_nmi
1026 movl %ebx, %esp
1027 jmp .Lrestore_all_notrace
1028
1029 #ifdef CONFIG_X86_ESPFIX32
1030 .Lnmi_espfix_stack:
1031 /*
1032 * create the pointer to lss back
1033 */
1034 pushl %ss
1035 pushl %esp
1036 addl $4, (%esp)
1037 /* copy the iret frame of 12 bytes */
1038 .rept 3
1039 pushl 16(%esp)
1040 .endr
1041 pushl %eax
1042 SAVE_ALL
1043 ENCODE_FRAME_POINTER
1044 FIXUP_ESPFIX_STACK # %eax == %esp
1045 xorl %edx, %edx # zero error code
1046 call do_nmi
1047 RESTORE_REGS
1048 lss 12+4(%esp), %esp # back to espfix stack
1049 jmp .Lirq_return
1050 #endif
1051 END(nmi)
1052
1053 ENTRY(int3)
1054 ASM_CLAC
1055 pushl $-1 # mark this as an int
1056 SAVE_ALL
1057 ENCODE_FRAME_POINTER
1058 TRACE_IRQS_OFF
1059 xorl %edx, %edx # zero error code
1060 movl %esp, %eax # pt_regs pointer
1061 call do_int3
1062 jmp ret_from_exception
1063 END(int3)
1064
1065 ENTRY(general_protection)
1066 pushl $do_general_protection
1067 jmp common_exception
1068 END(general_protection)
1069
1070 #ifdef CONFIG_KVM_GUEST
1071 ENTRY(async_page_fault)
1072 ASM_CLAC
1073 pushl $do_async_page_fault
1074 jmp common_exception
1075 END(async_page_fault)
1076 #endif
1077
1078 ENTRY(rewind_stack_do_exit)
1079 /* Prevent any naive code from trying to unwind to our caller. */
1080 xorl %ebp, %ebp
1081
1082 movl PER_CPU_VAR(cpu_current_top_of_stack), %esi
1083 leal -TOP_OF_KERNEL_STACK_PADDING-PTREGS_SIZE(%esi), %esp
1084
1085 call do_exit
1086 1: jmp 1b
1087 END(rewind_stack_do_exit)