]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blame - arch/i386/kernel/entry.S
[PATCH] lockdep: irqtrace subsystem, docs
[mirror_ubuntu-artful-kernel.git] / arch / i386 / kernel / entry.S
CommitLineData
1da177e4
LT
1/*
2 * linux/arch/i386/entry.S
3 *
4 * Copyright (C) 1991, 1992 Linus Torvalds
5 */
6
7/*
8 * entry.S contains the system-call and fault low-level handling routines.
9 * This also contains the timer-interrupt handler, as well as all interrupts
10 * and faults that can result in a task-switch.
11 *
12 * NOTE: This code handles signal-recognition, which happens every time
13 * after a timer-interrupt and after each system call.
14 *
15 * I changed all the .align's to 4 (16 byte alignment), as that's faster
16 * on a 486.
17 *
18 * Stack layout in 'ret_from_system_call':
19 * ptrace needs to have all regs on the stack.
20 * if the order here is changed, it needs to be
21 * updated in fork.c:copy_process, signal.c:do_signal,
22 * ptrace.c and ptrace.h
23 *
24 * 0(%esp) - %ebx
25 * 4(%esp) - %ecx
26 * 8(%esp) - %edx
27 * C(%esp) - %esi
28 * 10(%esp) - %edi
29 * 14(%esp) - %ebp
30 * 18(%esp) - %eax
31 * 1C(%esp) - %ds
32 * 20(%esp) - %es
33 * 24(%esp) - orig_eax
34 * 28(%esp) - %eip
35 * 2C(%esp) - %cs
36 * 30(%esp) - %eflags
37 * 34(%esp) - %oldesp
38 * 38(%esp) - %oldss
39 *
40 * "current" is in register %ebx during any slow entries.
41 */
42
1da177e4
LT
43#include <linux/linkage.h>
44#include <asm/thread_info.h>
45#include <asm/errno.h>
46#include <asm/segment.h>
47#include <asm/smp.h>
48#include <asm/page.h>
49#include <asm/desc.h>
fe7cacc1 50#include <asm/dwarf2.h>
1da177e4
LT
51#include "irq_vectors.h"
52
53#define nr_syscalls ((syscall_table_size)/4)
54
55EBX = 0x00
56ECX = 0x04
57EDX = 0x08
58ESI = 0x0C
59EDI = 0x10
60EBP = 0x14
61EAX = 0x18
62DS = 0x1C
63ES = 0x20
64ORIG_EAX = 0x24
65EIP = 0x28
66CS = 0x2C
67EFLAGS = 0x30
68OLDESP = 0x34
69OLDSS = 0x38
70
71CF_MASK = 0x00000001
72TF_MASK = 0x00000100
73IF_MASK = 0x00000200
74DF_MASK = 0x00000400
75NT_MASK = 0x00004000
76VM_MASK = 0x00020000
77
78#ifdef CONFIG_PREEMPT
79#define preempt_stop cli
80#else
81#define preempt_stop
82#define resume_kernel restore_nocheck
83#endif
84
4031ff38
AG
85#ifdef CONFIG_VM86
86#define resume_userspace_sig check_userspace
87#else
88#define resume_userspace_sig resume_userspace
89#endif
90
1da177e4
LT
91#define SAVE_ALL \
92 cld; \
93 pushl %es; \
fe7cacc1
JB
94 CFI_ADJUST_CFA_OFFSET 4;\
95 /*CFI_REL_OFFSET es, 0;*/\
1da177e4 96 pushl %ds; \
fe7cacc1
JB
97 CFI_ADJUST_CFA_OFFSET 4;\
98 /*CFI_REL_OFFSET ds, 0;*/\
1da177e4 99 pushl %eax; \
fe7cacc1
JB
100 CFI_ADJUST_CFA_OFFSET 4;\
101 CFI_REL_OFFSET eax, 0;\
1da177e4 102 pushl %ebp; \
fe7cacc1
JB
103 CFI_ADJUST_CFA_OFFSET 4;\
104 CFI_REL_OFFSET ebp, 0;\
1da177e4 105 pushl %edi; \
fe7cacc1
JB
106 CFI_ADJUST_CFA_OFFSET 4;\
107 CFI_REL_OFFSET edi, 0;\
1da177e4 108 pushl %esi; \
fe7cacc1
JB
109 CFI_ADJUST_CFA_OFFSET 4;\
110 CFI_REL_OFFSET esi, 0;\
1da177e4 111 pushl %edx; \
fe7cacc1
JB
112 CFI_ADJUST_CFA_OFFSET 4;\
113 CFI_REL_OFFSET edx, 0;\
1da177e4 114 pushl %ecx; \
fe7cacc1
JB
115 CFI_ADJUST_CFA_OFFSET 4;\
116 CFI_REL_OFFSET ecx, 0;\
1da177e4 117 pushl %ebx; \
fe7cacc1
JB
118 CFI_ADJUST_CFA_OFFSET 4;\
119 CFI_REL_OFFSET ebx, 0;\
1da177e4
LT
120 movl $(__USER_DS), %edx; \
121 movl %edx, %ds; \
122 movl %edx, %es;
123
124#define RESTORE_INT_REGS \
125 popl %ebx; \
fe7cacc1
JB
126 CFI_ADJUST_CFA_OFFSET -4;\
127 CFI_RESTORE ebx;\
1da177e4 128 popl %ecx; \
fe7cacc1
JB
129 CFI_ADJUST_CFA_OFFSET -4;\
130 CFI_RESTORE ecx;\
1da177e4 131 popl %edx; \
fe7cacc1
JB
132 CFI_ADJUST_CFA_OFFSET -4;\
133 CFI_RESTORE edx;\
1da177e4 134 popl %esi; \
fe7cacc1
JB
135 CFI_ADJUST_CFA_OFFSET -4;\
136 CFI_RESTORE esi;\
1da177e4 137 popl %edi; \
fe7cacc1
JB
138 CFI_ADJUST_CFA_OFFSET -4;\
139 CFI_RESTORE edi;\
1da177e4 140 popl %ebp; \
fe7cacc1
JB
141 CFI_ADJUST_CFA_OFFSET -4;\
142 CFI_RESTORE ebp;\
143 popl %eax; \
144 CFI_ADJUST_CFA_OFFSET -4;\
145 CFI_RESTORE eax
1da177e4
LT
146
147#define RESTORE_REGS \
148 RESTORE_INT_REGS; \
1491: popl %ds; \
fe7cacc1
JB
150 CFI_ADJUST_CFA_OFFSET -4;\
151 /*CFI_RESTORE ds;*/\
1da177e4 1522: popl %es; \
fe7cacc1
JB
153 CFI_ADJUST_CFA_OFFSET -4;\
154 /*CFI_RESTORE es;*/\
1da177e4
LT
155.section .fixup,"ax"; \
1563: movl $0,(%esp); \
157 jmp 1b; \
1584: movl $0,(%esp); \
159 jmp 2b; \
160.previous; \
161.section __ex_table,"a";\
162 .align 4; \
163 .long 1b,3b; \
164 .long 2b,4b; \
165.previous
166
fe7cacc1
JB
167#define RING0_INT_FRAME \
168 CFI_STARTPROC simple;\
169 CFI_DEF_CFA esp, 3*4;\
170 /*CFI_OFFSET cs, -2*4;*/\
171 CFI_OFFSET eip, -3*4
172
173#define RING0_EC_FRAME \
174 CFI_STARTPROC simple;\
175 CFI_DEF_CFA esp, 4*4;\
176 /*CFI_OFFSET cs, -2*4;*/\
177 CFI_OFFSET eip, -3*4
178
179#define RING0_PTREGS_FRAME \
180 CFI_STARTPROC simple;\
181 CFI_DEF_CFA esp, OLDESP-EBX;\
182 /*CFI_OFFSET cs, CS-OLDESP;*/\
183 CFI_OFFSET eip, EIP-OLDESP;\
184 /*CFI_OFFSET es, ES-OLDESP;*/\
185 /*CFI_OFFSET ds, DS-OLDESP;*/\
186 CFI_OFFSET eax, EAX-OLDESP;\
187 CFI_OFFSET ebp, EBP-OLDESP;\
188 CFI_OFFSET edi, EDI-OLDESP;\
189 CFI_OFFSET esi, ESI-OLDESP;\
190 CFI_OFFSET edx, EDX-OLDESP;\
191 CFI_OFFSET ecx, ECX-OLDESP;\
192 CFI_OFFSET ebx, EBX-OLDESP
1da177e4
LT
193
194ENTRY(ret_from_fork)
fe7cacc1 195 CFI_STARTPROC
1da177e4 196 pushl %eax
fe7cacc1 197 CFI_ADJUST_CFA_OFFSET -4
1da177e4
LT
198 call schedule_tail
199 GET_THREAD_INFO(%ebp)
200 popl %eax
fe7cacc1 201 CFI_ADJUST_CFA_OFFSET -4
1da177e4 202 jmp syscall_exit
fe7cacc1 203 CFI_ENDPROC
1da177e4
LT
204
205/*
206 * Return to user mode is not as complex as all this looks,
207 * but we want the default path for a system call return to
208 * go as quickly as possible which is why some of this is
209 * less clear than it otherwise should be.
210 */
211
212 # userspace resumption stub bypassing syscall exit tracing
213 ALIGN
fe7cacc1 214 RING0_PTREGS_FRAME
1da177e4
LT
215ret_from_exception:
216 preempt_stop
217ret_from_intr:
218 GET_THREAD_INFO(%ebp)
4031ff38 219check_userspace:
1da177e4
LT
220 movl EFLAGS(%esp), %eax # mix EFLAGS and CS
221 movb CS(%esp), %al
222 testl $(VM_MASK | 3), %eax
223 jz resume_kernel
224ENTRY(resume_userspace)
225 cli # make sure we don't miss an interrupt
226 # setting need_resched or sigpending
227 # between sampling and the iret
228 movl TI_flags(%ebp), %ecx
229 andl $_TIF_WORK_MASK, %ecx # is there any work to be done on
230 # int/exception return?
231 jne work_pending
232 jmp restore_all
233
234#ifdef CONFIG_PREEMPT
235ENTRY(resume_kernel)
236 cli
237 cmpl $0,TI_preempt_count(%ebp) # non-zero preempt_count ?
238 jnz restore_nocheck
239need_resched:
240 movl TI_flags(%ebp), %ecx # need_resched set ?
241 testb $_TIF_NEED_RESCHED, %cl
242 jz restore_all
243 testl $IF_MASK,EFLAGS(%esp) # interrupts off (exception path) ?
244 jz restore_all
245 call preempt_schedule_irq
246 jmp need_resched
247#endif
fe7cacc1 248 CFI_ENDPROC
1da177e4
LT
249
250/* SYSENTER_RETURN points to after the "sysenter" instruction in
251 the vsyscall page. See vsyscall-sysentry.S, which defines the symbol. */
252
253 # sysenter call handler stub
254ENTRY(sysenter_entry)
fe7cacc1
JB
255 CFI_STARTPROC simple
256 CFI_DEF_CFA esp, 0
257 CFI_REGISTER esp, ebp
1da177e4
LT
258 movl TSS_sysenter_esp0(%esp),%esp
259sysenter_past_esp:
260 sti
261 pushl $(__USER_DS)
fe7cacc1
JB
262 CFI_ADJUST_CFA_OFFSET 4
263 /*CFI_REL_OFFSET ss, 0*/
1da177e4 264 pushl %ebp
fe7cacc1
JB
265 CFI_ADJUST_CFA_OFFSET 4
266 CFI_REL_OFFSET esp, 0
1da177e4 267 pushfl
fe7cacc1 268 CFI_ADJUST_CFA_OFFSET 4
1da177e4 269 pushl $(__USER_CS)
fe7cacc1
JB
270 CFI_ADJUST_CFA_OFFSET 4
271 /*CFI_REL_OFFSET cs, 0*/
e6e5494c
IM
272 /*
273 * Push current_thread_info()->sysenter_return to the stack.
274 * A tiny bit of offset fixup is necessary - 4*4 means the 4 words
275 * pushed above; +8 corresponds to copy_thread's esp0 setting.
276 */
277 pushl (TI_sysenter_return-THREAD_SIZE+8+4*4)(%esp)
fe7cacc1
JB
278 CFI_ADJUST_CFA_OFFSET 4
279 CFI_REL_OFFSET eip, 0
1da177e4
LT
280
281/*
282 * Load the potential sixth argument from user stack.
283 * Careful about security.
284 */
285 cmpl $__PAGE_OFFSET-3,%ebp
286 jae syscall_fault
2871: movl (%ebp),%ebp
288.section __ex_table,"a"
289 .align 4
290 .long 1b,syscall_fault
291.previous
292
293 pushl %eax
fe7cacc1 294 CFI_ADJUST_CFA_OFFSET 4
1da177e4
LT
295 SAVE_ALL
296 GET_THREAD_INFO(%ebp)
297
298 /* Note, _TIF_SECCOMP is bit number 8, and so it needs testw and not testb */
ed75e8d5 299 testw $(_TIF_SYSCALL_EMU|_TIF_SYSCALL_TRACE|_TIF_SECCOMP|_TIF_SYSCALL_AUDIT),TI_flags(%ebp)
1da177e4
LT
300 jnz syscall_trace_entry
301 cmpl $(nr_syscalls), %eax
302 jae syscall_badsys
303 call *sys_call_table(,%eax,4)
304 movl %eax,EAX(%esp)
305 cli
306 movl TI_flags(%ebp), %ecx
307 testw $_TIF_ALLWORK_MASK, %cx
308 jne syscall_exit_work
309/* if something modifies registers it must also disable sysexit */
310 movl EIP(%esp), %edx
311 movl OLDESP(%esp), %ecx
312 xorl %ebp,%ebp
313 sti
314 sysexit
fe7cacc1 315 CFI_ENDPROC
1da177e4
LT
316
317
318 # system call handler stub
319ENTRY(system_call)
fe7cacc1 320 RING0_INT_FRAME # can't unwind into user space anyway
1da177e4 321 pushl %eax # save orig_eax
fe7cacc1 322 CFI_ADJUST_CFA_OFFSET 4
1da177e4
LT
323 SAVE_ALL
324 GET_THREAD_INFO(%ebp)
635cf99a
CE
325 testl $TF_MASK,EFLAGS(%esp)
326 jz no_singlestep
327 orl $_TIF_SINGLESTEP,TI_flags(%ebp)
328no_singlestep:
ed75e8d5 329 # system call tracing in operation / emulation
1da177e4 330 /* Note, _TIF_SECCOMP is bit number 8, and so it needs testw and not testb */
ed75e8d5 331 testw $(_TIF_SYSCALL_EMU|_TIF_SYSCALL_TRACE|_TIF_SECCOMP|_TIF_SYSCALL_AUDIT),TI_flags(%ebp)
1da177e4
LT
332 jnz syscall_trace_entry
333 cmpl $(nr_syscalls), %eax
334 jae syscall_badsys
335syscall_call:
336 call *sys_call_table(,%eax,4)
337 movl %eax,EAX(%esp) # store the return value
338syscall_exit:
339 cli # make sure we don't miss an interrupt
340 # setting need_resched or sigpending
341 # between sampling and the iret
342 movl TI_flags(%ebp), %ecx
343 testw $_TIF_ALLWORK_MASK, %cx # current->work
344 jne syscall_exit_work
345
346restore_all:
347 movl EFLAGS(%esp), %eax # mix EFLAGS, SS and CS
5df24082
SS
348 # Warning: OLDSS(%esp) contains the wrong/random values if we
349 # are returning to the kernel.
350 # See comments in process.c:copy_thread() for details.
1da177e4
LT
351 movb OLDSS(%esp), %ah
352 movb CS(%esp), %al
353 andl $(VM_MASK | (4 << 8) | 3), %eax
354 cmpl $((4 << 8) | 3), %eax
fe7cacc1 355 CFI_REMEMBER_STATE
1da177e4
LT
356 je ldt_ss # returning to user-space with LDT SS
357restore_nocheck:
358 RESTORE_REGS
359 addl $4, %esp
fe7cacc1 360 CFI_ADJUST_CFA_OFFSET -4
1da177e4
LT
3611: iret
362.section .fixup,"ax"
363iret_exc:
364 sti
a879cbbb
LT
365 pushl $0 # no error code
366 pushl $do_iret_error
367 jmp error_code
1da177e4
LT
368.previous
369.section __ex_table,"a"
370 .align 4
371 .long 1b,iret_exc
372.previous
373
fe7cacc1 374 CFI_RESTORE_STATE
1da177e4
LT
375ldt_ss:
376 larl OLDSS(%esp), %eax
377 jnz restore_nocheck
378 testl $0x00400000, %eax # returning to 32bit stack?
379 jnz restore_nocheck # allright, normal return
380 /* If returning to userspace with 16bit stack,
381 * try to fix the higher word of ESP, as the CPU
382 * won't restore it.
383 * This is an "official" bug of all the x86-compatible
384 * CPUs, which we can try to work around to make
385 * dosemu and wine happy. */
386 subl $8, %esp # reserve space for switch16 pointer
fe7cacc1 387 CFI_ADJUST_CFA_OFFSET 8
1da177e4
LT
388 cli
389 movl %esp, %eax
390 /* Set up the 16bit stack frame with switch32 pointer on top,
391 * and a switch16 pointer on top of the current frame. */
392 call setup_x86_bogus_stack
fe7cacc1 393 CFI_ADJUST_CFA_OFFSET -8 # frame has moved
1da177e4
LT
394 RESTORE_REGS
395 lss 20+4(%esp), %esp # switch to 16bit stack
3961: iret
397.section __ex_table,"a"
398 .align 4
399 .long 1b,iret_exc
400.previous
fe7cacc1 401 CFI_ENDPROC
1da177e4
LT
402
403 # perform work that needs to be done immediately before resumption
404 ALIGN
fe7cacc1 405 RING0_PTREGS_FRAME # can't unwind into user space anyway
1da177e4
LT
406work_pending:
407 testb $_TIF_NEED_RESCHED, %cl
408 jz work_notifysig
409work_resched:
410 call schedule
411 cli # make sure we don't miss an interrupt
412 # setting need_resched or sigpending
413 # between sampling and the iret
414 movl TI_flags(%ebp), %ecx
415 andl $_TIF_WORK_MASK, %ecx # is there any work to be done other
416 # than syscall tracing?
417 jz restore_all
418 testb $_TIF_NEED_RESCHED, %cl
419 jnz work_resched
420
421work_notifysig: # deal with pending signals and
422 # notify-resume requests
423 testl $VM_MASK, EFLAGS(%esp)
424 movl %esp, %eax
425 jne work_notifysig_v86 # returning to kernel-space or
426 # vm86-space
427 xorl %edx, %edx
428 call do_notify_resume
4031ff38 429 jmp resume_userspace_sig
1da177e4
LT
430
431 ALIGN
432work_notifysig_v86:
64ca9004 433#ifdef CONFIG_VM86
1da177e4 434 pushl %ecx # save ti_flags for do_notify_resume
fe7cacc1 435 CFI_ADJUST_CFA_OFFSET 4
1da177e4
LT
436 call save_v86_state # %eax contains pt_regs pointer
437 popl %ecx
fe7cacc1 438 CFI_ADJUST_CFA_OFFSET -4
1da177e4
LT
439 movl %eax, %esp
440 xorl %edx, %edx
441 call do_notify_resume
4031ff38 442 jmp resume_userspace_sig
64ca9004 443#endif
1da177e4
LT
444
445 # perform syscall exit tracing
446 ALIGN
447syscall_trace_entry:
448 movl $-ENOSYS,EAX(%esp)
449 movl %esp, %eax
450 xorl %edx,%edx
451 call do_syscall_trace
ed75e8d5 452 cmpl $0, %eax
640aa46e 453 jne resume_userspace # ret != 0 -> running under PTRACE_SYSEMU,
ed75e8d5 454 # so must skip actual syscall
1da177e4
LT
455 movl ORIG_EAX(%esp), %eax
456 cmpl $(nr_syscalls), %eax
457 jnae syscall_call
458 jmp syscall_exit
459
460 # perform syscall exit tracing
461 ALIGN
462syscall_exit_work:
463 testb $(_TIF_SYSCALL_TRACE|_TIF_SYSCALL_AUDIT|_TIF_SINGLESTEP), %cl
464 jz work_pending
465 sti # could let do_syscall_trace() call
466 # schedule() instead
467 movl %esp, %eax
468 movl $1, %edx
469 call do_syscall_trace
470 jmp resume_userspace
fe7cacc1 471 CFI_ENDPROC
1da177e4 472
fe7cacc1 473 RING0_INT_FRAME # can't unwind into user space anyway
1da177e4
LT
474syscall_fault:
475 pushl %eax # save orig_eax
fe7cacc1 476 CFI_ADJUST_CFA_OFFSET 4
1da177e4
LT
477 SAVE_ALL
478 GET_THREAD_INFO(%ebp)
479 movl $-EFAULT,EAX(%esp)
480 jmp resume_userspace
481
1da177e4
LT
482syscall_badsys:
483 movl $-ENOSYS,EAX(%esp)
484 jmp resume_userspace
fe7cacc1 485 CFI_ENDPROC
1da177e4
LT
486
487#define FIXUP_ESPFIX_STACK \
488 movl %esp, %eax; \
489 /* switch to 32bit stack using the pointer on top of 16bit stack */ \
490 lss %ss:CPU_16BIT_STACK_SIZE-8, %esp; \
491 /* copy data from 16bit stack to 32bit stack */ \
492 call fixup_x86_bogus_stack; \
493 /* put ESP to the proper location */ \
494 movl %eax, %esp;
495#define UNWIND_ESPFIX_STACK \
496 pushl %eax; \
fe7cacc1 497 CFI_ADJUST_CFA_OFFSET 4; \
1da177e4
LT
498 movl %ss, %eax; \
499 /* see if on 16bit stack */ \
500 cmpw $__ESPFIX_SS, %ax; \
fe7cacc1
JB
501 je 28f; \
50227: popl %eax; \
503 CFI_ADJUST_CFA_OFFSET -4; \
504.section .fixup,"ax"; \
50528: movl $__KERNEL_DS, %eax; \
506 movl %eax, %ds; \
507 movl %eax, %es; \
1da177e4 508 /* switch to 32bit stack */ \
fe7cacc1
JB
509 FIXUP_ESPFIX_STACK; \
510 jmp 27b; \
511.previous
1da177e4
LT
512
513/*
514 * Build the entry stubs and pointer table with
515 * some assembler magic.
516 */
517.data
518ENTRY(interrupt)
519.text
520
521vector=0
522ENTRY(irq_entries_start)
fe7cacc1 523 RING0_INT_FRAME
1da177e4
LT
524.rept NR_IRQS
525 ALIGN
fe7cacc1
JB
526 .if vector
527 CFI_ADJUST_CFA_OFFSET -4
528 .endif
19eadf98 5291: pushl $~(vector)
fe7cacc1 530 CFI_ADJUST_CFA_OFFSET 4
1da177e4
LT
531 jmp common_interrupt
532.data
533 .long 1b
534.text
535vector=vector+1
536.endr
537
538 ALIGN
539common_interrupt:
540 SAVE_ALL
541 movl %esp,%eax
542 call do_IRQ
543 jmp ret_from_intr
fe7cacc1 544 CFI_ENDPROC
1da177e4
LT
545
546#define BUILD_INTERRUPT(name, nr) \
547ENTRY(name) \
fe7cacc1 548 RING0_INT_FRAME; \
19eadf98 549 pushl $~(nr); \
fe7cacc1
JB
550 CFI_ADJUST_CFA_OFFSET 4; \
551 SAVE_ALL; \
1da177e4
LT
552 movl %esp,%eax; \
553 call smp_/**/name; \
fe7cacc1
JB
554 jmp ret_from_intr; \
555 CFI_ENDPROC
1da177e4
LT
556
557/* The include is where all of the SMP etc. interrupts come from */
558#include "entry_arch.h"
559
560ENTRY(divide_error)
fe7cacc1 561 RING0_INT_FRAME
1da177e4 562 pushl $0 # no error code
fe7cacc1 563 CFI_ADJUST_CFA_OFFSET 4
1da177e4 564 pushl $do_divide_error
fe7cacc1 565 CFI_ADJUST_CFA_OFFSET 4
1da177e4
LT
566 ALIGN
567error_code:
568 pushl %ds
fe7cacc1
JB
569 CFI_ADJUST_CFA_OFFSET 4
570 /*CFI_REL_OFFSET ds, 0*/
1da177e4 571 pushl %eax
fe7cacc1
JB
572 CFI_ADJUST_CFA_OFFSET 4
573 CFI_REL_OFFSET eax, 0
1da177e4
LT
574 xorl %eax, %eax
575 pushl %ebp
fe7cacc1
JB
576 CFI_ADJUST_CFA_OFFSET 4
577 CFI_REL_OFFSET ebp, 0
1da177e4 578 pushl %edi
fe7cacc1
JB
579 CFI_ADJUST_CFA_OFFSET 4
580 CFI_REL_OFFSET edi, 0
1da177e4 581 pushl %esi
fe7cacc1
JB
582 CFI_ADJUST_CFA_OFFSET 4
583 CFI_REL_OFFSET esi, 0
1da177e4 584 pushl %edx
fe7cacc1
JB
585 CFI_ADJUST_CFA_OFFSET 4
586 CFI_REL_OFFSET edx, 0
1da177e4
LT
587 decl %eax # eax = -1
588 pushl %ecx
fe7cacc1
JB
589 CFI_ADJUST_CFA_OFFSET 4
590 CFI_REL_OFFSET ecx, 0
1da177e4 591 pushl %ebx
fe7cacc1
JB
592 CFI_ADJUST_CFA_OFFSET 4
593 CFI_REL_OFFSET ebx, 0
1da177e4
LT
594 cld
595 pushl %es
fe7cacc1
JB
596 CFI_ADJUST_CFA_OFFSET 4
597 /*CFI_REL_OFFSET es, 0*/
1da177e4
LT
598 UNWIND_ESPFIX_STACK
599 popl %ecx
fe7cacc1
JB
600 CFI_ADJUST_CFA_OFFSET -4
601 /*CFI_REGISTER es, ecx*/
1da177e4
LT
602 movl ES(%esp), %edi # get the function address
603 movl ORIG_EAX(%esp), %edx # get the error code
604 movl %eax, ORIG_EAX(%esp)
605 movl %ecx, ES(%esp)
fe7cacc1 606 /*CFI_REL_OFFSET es, ES*/
1da177e4
LT
607 movl $(__USER_DS), %ecx
608 movl %ecx, %ds
609 movl %ecx, %es
610 movl %esp,%eax # pt_regs pointer
611 call *%edi
612 jmp ret_from_exception
fe7cacc1 613 CFI_ENDPROC
1da177e4
LT
614
615ENTRY(coprocessor_error)
fe7cacc1 616 RING0_INT_FRAME
1da177e4 617 pushl $0
fe7cacc1 618 CFI_ADJUST_CFA_OFFSET 4
1da177e4 619 pushl $do_coprocessor_error
fe7cacc1 620 CFI_ADJUST_CFA_OFFSET 4
1da177e4 621 jmp error_code
fe7cacc1 622 CFI_ENDPROC
1da177e4
LT
623
624ENTRY(simd_coprocessor_error)
fe7cacc1 625 RING0_INT_FRAME
1da177e4 626 pushl $0
fe7cacc1 627 CFI_ADJUST_CFA_OFFSET 4
1da177e4 628 pushl $do_simd_coprocessor_error
fe7cacc1 629 CFI_ADJUST_CFA_OFFSET 4
1da177e4 630 jmp error_code
fe7cacc1 631 CFI_ENDPROC
1da177e4
LT
632
633ENTRY(device_not_available)
fe7cacc1 634 RING0_INT_FRAME
1da177e4 635 pushl $-1 # mark this as an int
fe7cacc1 636 CFI_ADJUST_CFA_OFFSET 4
1da177e4
LT
637 SAVE_ALL
638 movl %cr0, %eax
639 testl $0x4, %eax # EM (math emulation bit)
640 jne device_not_available_emulate
641 preempt_stop
642 call math_state_restore
643 jmp ret_from_exception
644device_not_available_emulate:
645 pushl $0 # temporary storage for ORIG_EIP
fe7cacc1 646 CFI_ADJUST_CFA_OFFSET 4
1da177e4
LT
647 call math_emulate
648 addl $4, %esp
fe7cacc1 649 CFI_ADJUST_CFA_OFFSET -4
1da177e4 650 jmp ret_from_exception
fe7cacc1 651 CFI_ENDPROC
1da177e4
LT
652
653/*
654 * Debug traps and NMI can happen at the one SYSENTER instruction
655 * that sets up the real kernel stack. Check here, since we can't
656 * allow the wrong stack to be used.
657 *
658 * "TSS_sysenter_esp0+12" is because the NMI/debug handler will have
659 * already pushed 3 words if it hits on the sysenter instruction:
660 * eflags, cs and eip.
661 *
662 * We just load the right stack, and push the three (known) values
663 * by hand onto the new stack - while updating the return eip past
664 * the instruction that would have done it for sysenter.
665 */
666#define FIX_STACK(offset, ok, label) \
667 cmpw $__KERNEL_CS,4(%esp); \
668 jne ok; \
669label: \
670 movl TSS_sysenter_esp0+offset(%esp),%esp; \
671 pushfl; \
672 pushl $__KERNEL_CS; \
673 pushl $sysenter_past_esp
674
3d97ae5b 675KPROBE_ENTRY(debug)
fe7cacc1 676 RING0_INT_FRAME
1da177e4
LT
677 cmpl $sysenter_entry,(%esp)
678 jne debug_stack_correct
679 FIX_STACK(12, debug_stack_correct, debug_esp_fix_insn)
680debug_stack_correct:
681 pushl $-1 # mark this as an int
fe7cacc1 682 CFI_ADJUST_CFA_OFFSET 4
1da177e4
LT
683 SAVE_ALL
684 xorl %edx,%edx # error code 0
685 movl %esp,%eax # pt_regs pointer
686 call do_debug
1da177e4 687 jmp ret_from_exception
fe7cacc1 688 CFI_ENDPROC
3d97ae5b 689 .previous .text
1da177e4
LT
690/*
691 * NMI is doubly nasty. It can happen _while_ we're handling
692 * a debug fault, and the debug fault hasn't yet been able to
693 * clear up the stack. So we first check whether we got an
694 * NMI on the sysenter entry path, but after that we need to
695 * check whether we got an NMI on the debug path where the debug
696 * fault happened on the sysenter path.
697 */
698ENTRY(nmi)
fe7cacc1 699 RING0_INT_FRAME
1da177e4 700 pushl %eax
fe7cacc1 701 CFI_ADJUST_CFA_OFFSET 4
1da177e4
LT
702 movl %ss, %eax
703 cmpw $__ESPFIX_SS, %ax
704 popl %eax
fe7cacc1 705 CFI_ADJUST_CFA_OFFSET -4
1da177e4
LT
706 je nmi_16bit_stack
707 cmpl $sysenter_entry,(%esp)
708 je nmi_stack_fixup
709 pushl %eax
fe7cacc1 710 CFI_ADJUST_CFA_OFFSET 4
1da177e4
LT
711 movl %esp,%eax
712 /* Do not access memory above the end of our stack page,
713 * it might not exist.
714 */
715 andl $(THREAD_SIZE-1),%eax
716 cmpl $(THREAD_SIZE-20),%eax
717 popl %eax
fe7cacc1 718 CFI_ADJUST_CFA_OFFSET -4
1da177e4
LT
719 jae nmi_stack_correct
720 cmpl $sysenter_entry,12(%esp)
721 je nmi_debug_stack_check
722nmi_stack_correct:
723 pushl %eax
fe7cacc1 724 CFI_ADJUST_CFA_OFFSET 4
1da177e4
LT
725 SAVE_ALL
726 xorl %edx,%edx # zero error code
727 movl %esp,%eax # pt_regs pointer
728 call do_nmi
729 jmp restore_all
fe7cacc1 730 CFI_ENDPROC
1da177e4
LT
731
732nmi_stack_fixup:
733 FIX_STACK(12,nmi_stack_correct, 1)
734 jmp nmi_stack_correct
735nmi_debug_stack_check:
736 cmpw $__KERNEL_CS,16(%esp)
737 jne nmi_stack_correct
e2718208
JB
738 cmpl $debug,(%esp)
739 jb nmi_stack_correct
1da177e4 740 cmpl $debug_esp_fix_insn,(%esp)
e2718208 741 ja nmi_stack_correct
1da177e4
LT
742 FIX_STACK(24,nmi_stack_correct, 1)
743 jmp nmi_stack_correct
744
745nmi_16bit_stack:
fe7cacc1 746 RING0_INT_FRAME
1da177e4
LT
747 /* create the pointer to lss back */
748 pushl %ss
fe7cacc1 749 CFI_ADJUST_CFA_OFFSET 4
1da177e4 750 pushl %esp
fe7cacc1 751 CFI_ADJUST_CFA_OFFSET 4
1da177e4
LT
752 movzwl %sp, %esp
753 addw $4, (%esp)
754 /* copy the iret frame of 12 bytes */
755 .rept 3
756 pushl 16(%esp)
fe7cacc1 757 CFI_ADJUST_CFA_OFFSET 4
1da177e4
LT
758 .endr
759 pushl %eax
fe7cacc1 760 CFI_ADJUST_CFA_OFFSET 4
1da177e4
LT
761 SAVE_ALL
762 FIXUP_ESPFIX_STACK # %eax == %esp
fe7cacc1 763 CFI_ADJUST_CFA_OFFSET -20 # the frame has now moved
1da177e4
LT
764 xorl %edx,%edx # zero error code
765 call do_nmi
766 RESTORE_REGS
767 lss 12+4(%esp), %esp # back to 16bit stack
7681: iret
fe7cacc1 769 CFI_ENDPROC
1da177e4
LT
770.section __ex_table,"a"
771 .align 4
772 .long 1b,iret_exc
773.previous
774
3d97ae5b 775KPROBE_ENTRY(int3)
fe7cacc1 776 RING0_INT_FRAME
1da177e4 777 pushl $-1 # mark this as an int
fe7cacc1 778 CFI_ADJUST_CFA_OFFSET 4
1da177e4
LT
779 SAVE_ALL
780 xorl %edx,%edx # zero error code
781 movl %esp,%eax # pt_regs pointer
782 call do_int3
1da177e4 783 jmp ret_from_exception
fe7cacc1 784 CFI_ENDPROC
3d97ae5b 785 .previous .text
1da177e4
LT
786
787ENTRY(overflow)
fe7cacc1 788 RING0_INT_FRAME
1da177e4 789 pushl $0
fe7cacc1 790 CFI_ADJUST_CFA_OFFSET 4
1da177e4 791 pushl $do_overflow
fe7cacc1 792 CFI_ADJUST_CFA_OFFSET 4
1da177e4 793 jmp error_code
fe7cacc1 794 CFI_ENDPROC
1da177e4
LT
795
796ENTRY(bounds)
fe7cacc1 797 RING0_INT_FRAME
1da177e4 798 pushl $0
fe7cacc1 799 CFI_ADJUST_CFA_OFFSET 4
1da177e4 800 pushl $do_bounds
fe7cacc1 801 CFI_ADJUST_CFA_OFFSET 4
1da177e4 802 jmp error_code
fe7cacc1 803 CFI_ENDPROC
1da177e4
LT
804
805ENTRY(invalid_op)
fe7cacc1 806 RING0_INT_FRAME
1da177e4 807 pushl $0
fe7cacc1 808 CFI_ADJUST_CFA_OFFSET 4
1da177e4 809 pushl $do_invalid_op
fe7cacc1 810 CFI_ADJUST_CFA_OFFSET 4
1da177e4 811 jmp error_code
fe7cacc1 812 CFI_ENDPROC
1da177e4
LT
813
814ENTRY(coprocessor_segment_overrun)
fe7cacc1 815 RING0_INT_FRAME
1da177e4 816 pushl $0
fe7cacc1 817 CFI_ADJUST_CFA_OFFSET 4
1da177e4 818 pushl $do_coprocessor_segment_overrun
fe7cacc1 819 CFI_ADJUST_CFA_OFFSET 4
1da177e4 820 jmp error_code
fe7cacc1 821 CFI_ENDPROC
1da177e4
LT
822
823ENTRY(invalid_TSS)
fe7cacc1 824 RING0_EC_FRAME
1da177e4 825 pushl $do_invalid_TSS
fe7cacc1 826 CFI_ADJUST_CFA_OFFSET 4
1da177e4 827 jmp error_code
fe7cacc1 828 CFI_ENDPROC
1da177e4
LT
829
830ENTRY(segment_not_present)
fe7cacc1 831 RING0_EC_FRAME
1da177e4 832 pushl $do_segment_not_present
fe7cacc1 833 CFI_ADJUST_CFA_OFFSET 4
1da177e4 834 jmp error_code
fe7cacc1 835 CFI_ENDPROC
1da177e4
LT
836
837ENTRY(stack_segment)
fe7cacc1 838 RING0_EC_FRAME
1da177e4 839 pushl $do_stack_segment
fe7cacc1 840 CFI_ADJUST_CFA_OFFSET 4
1da177e4 841 jmp error_code
fe7cacc1 842 CFI_ENDPROC
1da177e4 843
3d97ae5b 844KPROBE_ENTRY(general_protection)
fe7cacc1 845 RING0_EC_FRAME
1da177e4 846 pushl $do_general_protection
fe7cacc1 847 CFI_ADJUST_CFA_OFFSET 4
1da177e4 848 jmp error_code
fe7cacc1 849 CFI_ENDPROC
3d97ae5b 850 .previous .text
1da177e4
LT
851
852ENTRY(alignment_check)
fe7cacc1 853 RING0_EC_FRAME
1da177e4 854 pushl $do_alignment_check
fe7cacc1 855 CFI_ADJUST_CFA_OFFSET 4
1da177e4 856 jmp error_code
fe7cacc1 857 CFI_ENDPROC
1da177e4 858
3d97ae5b 859KPROBE_ENTRY(page_fault)
fe7cacc1 860 RING0_EC_FRAME
1da177e4 861 pushl $do_page_fault
fe7cacc1 862 CFI_ADJUST_CFA_OFFSET 4
1da177e4 863 jmp error_code
fe7cacc1 864 CFI_ENDPROC
3d97ae5b 865 .previous .text
1da177e4
LT
866
867#ifdef CONFIG_X86_MCE
868ENTRY(machine_check)
fe7cacc1 869 RING0_INT_FRAME
1da177e4 870 pushl $0
fe7cacc1 871 CFI_ADJUST_CFA_OFFSET 4
1da177e4 872 pushl machine_check_vector
fe7cacc1 873 CFI_ADJUST_CFA_OFFSET 4
1da177e4 874 jmp error_code
fe7cacc1 875 CFI_ENDPROC
1da177e4
LT
876#endif
877
878ENTRY(spurious_interrupt_bug)
fe7cacc1 879 RING0_INT_FRAME
1da177e4 880 pushl $0
fe7cacc1 881 CFI_ADJUST_CFA_OFFSET 4
1da177e4 882 pushl $do_spurious_interrupt_bug
fe7cacc1 883 CFI_ADJUST_CFA_OFFSET 4
1da177e4 884 jmp error_code
fe7cacc1 885 CFI_ENDPROC
1da177e4 886
176a2718
JB
887#ifdef CONFIG_STACK_UNWIND
888ENTRY(arch_unwind_init_running)
fe7cacc1 889 CFI_STARTPROC
176a2718
JB
890 movl 4(%esp), %edx
891 movl (%esp), %ecx
892 leal 4(%esp), %eax
893 movl %ebx, EBX(%edx)
894 xorl %ebx, %ebx
895 movl %ebx, ECX(%edx)
896 movl %ebx, EDX(%edx)
897 movl %esi, ESI(%edx)
898 movl %edi, EDI(%edx)
899 movl %ebp, EBP(%edx)
900 movl %ebx, EAX(%edx)
901 movl $__USER_DS, DS(%edx)
902 movl $__USER_DS, ES(%edx)
903 movl %ebx, ORIG_EAX(%edx)
904 movl %ecx, EIP(%edx)
905 movl 12(%esp), %ecx
906 movl $__KERNEL_CS, CS(%edx)
907 movl %ebx, EFLAGS(%edx)
908 movl %eax, OLDESP(%edx)
909 movl 8(%esp), %eax
910 movl %ecx, 8(%esp)
911 movl EBX(%edx), %ebx
912 movl $__KERNEL_DS, OLDSS(%edx)
913 jmpl *%eax
fe7cacc1 914 CFI_ENDPROC
176a2718
JB
915ENDPROC(arch_unwind_init_running)
916#endif
917
bb152f53 918.section .rodata,"a"
5e7b83ff 919#include "syscall_table.S"
1da177e4
LT
920
921syscall_table_size=(.-sys_call_table)