]>
Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | /* |
2 | * linux/arch/i386/entry.S | |
3 | * | |
4 | * Copyright (C) 1991, 1992 Linus Torvalds | |
5 | */ | |
6 | ||
7 | /* | |
8 | * entry.S contains the system-call and fault low-level handling routines. | |
9 | * This also contains the timer-interrupt handler, as well as all interrupts | |
10 | * and faults that can result in a task-switch. | |
11 | * | |
12 | * NOTE: This code handles signal-recognition, which happens every time | |
13 | * after a timer-interrupt and after each system call. | |
14 | * | |
15 | * I changed all the .align's to 4 (16 byte alignment), as that's faster | |
16 | * on a 486. | |
17 | * | |
18 | * Stack layout in 'ret_from_system_call': | |
19 | * ptrace needs to have all regs on the stack. | |
20 | * if the order here is changed, it needs to be | |
21 | * updated in fork.c:copy_process, signal.c:do_signal, | |
22 | * ptrace.c and ptrace.h | |
23 | * | |
24 | * 0(%esp) - %ebx | |
25 | * 4(%esp) - %ecx | |
26 | * 8(%esp) - %edx | |
27 | * C(%esp) - %esi | |
28 | * 10(%esp) - %edi | |
29 | * 14(%esp) - %ebp | |
30 | * 18(%esp) - %eax | |
31 | * 1C(%esp) - %ds | |
32 | * 20(%esp) - %es | |
f95d47ca JF |
33 | * 24(%esp) - %gs |
34 | * 28(%esp) - orig_eax | |
35 | * 2C(%esp) - %eip | |
36 | * 30(%esp) - %cs | |
37 | * 34(%esp) - %eflags | |
38 | * 38(%esp) - %oldesp | |
39 | * 3C(%esp) - %oldss | |
1da177e4 LT |
40 | * |
41 | * "current" is in register %ebx during any slow entries. | |
42 | */ | |
43 | ||
1da177e4 LT |
44 | #include <linux/linkage.h> |
45 | #include <asm/thread_info.h> | |
55f327fa | 46 | #include <asm/irqflags.h> |
1da177e4 LT |
47 | #include <asm/errno.h> |
48 | #include <asm/segment.h> | |
49 | #include <asm/smp.h> | |
50 | #include <asm/page.h> | |
51 | #include <asm/desc.h> | |
be44d2aa | 52 | #include <asm/percpu.h> |
fe7cacc1 | 53 | #include <asm/dwarf2.h> |
1da177e4 LT |
54 | #include "irq_vectors.h" |
55 | ||
56 | #define nr_syscalls ((syscall_table_size)/4) | |
57 | ||
1da177e4 LT |
58 | CF_MASK = 0x00000001 |
59 | TF_MASK = 0x00000100 | |
60 | IF_MASK = 0x00000200 | |
61 | DF_MASK = 0x00000400 | |
62 | NT_MASK = 0x00004000 | |
63 | VM_MASK = 0x00020000 | |
64 | ||
65 | #ifdef CONFIG_PREEMPT | |
0da5db31 | 66 | #define preempt_stop DISABLE_INTERRUPTS; TRACE_IRQS_OFF |
1da177e4 LT |
67 | #else |
68 | #define preempt_stop | |
69 | #define resume_kernel restore_nocheck | |
70 | #endif | |
71 | ||
55f327fa IM |
72 | .macro TRACE_IRQS_IRET |
73 | #ifdef CONFIG_TRACE_IRQFLAGS | |
eb5b7b9d | 74 | testl $IF_MASK,PT_EFLAGS(%esp) # interrupts off? |
55f327fa IM |
75 | jz 1f |
76 | TRACE_IRQS_ON | |
77 | 1: | |
78 | #endif | |
79 | .endm | |
80 | ||
4031ff38 AG |
81 | #ifdef CONFIG_VM86 |
82 | #define resume_userspace_sig check_userspace | |
83 | #else | |
84 | #define resume_userspace_sig resume_userspace | |
85 | #endif | |
86 | ||
1da177e4 LT |
87 | #define SAVE_ALL \ |
88 | cld; \ | |
f95d47ca JF |
89 | pushl %gs; \ |
90 | CFI_ADJUST_CFA_OFFSET 4;\ | |
91 | /*CFI_REL_OFFSET gs, 0;*/\ | |
1da177e4 | 92 | pushl %es; \ |
fe7cacc1 JB |
93 | CFI_ADJUST_CFA_OFFSET 4;\ |
94 | /*CFI_REL_OFFSET es, 0;*/\ | |
1da177e4 | 95 | pushl %ds; \ |
fe7cacc1 JB |
96 | CFI_ADJUST_CFA_OFFSET 4;\ |
97 | /*CFI_REL_OFFSET ds, 0;*/\ | |
1da177e4 | 98 | pushl %eax; \ |
fe7cacc1 JB |
99 | CFI_ADJUST_CFA_OFFSET 4;\ |
100 | CFI_REL_OFFSET eax, 0;\ | |
1da177e4 | 101 | pushl %ebp; \ |
fe7cacc1 JB |
102 | CFI_ADJUST_CFA_OFFSET 4;\ |
103 | CFI_REL_OFFSET ebp, 0;\ | |
1da177e4 | 104 | pushl %edi; \ |
fe7cacc1 JB |
105 | CFI_ADJUST_CFA_OFFSET 4;\ |
106 | CFI_REL_OFFSET edi, 0;\ | |
1da177e4 | 107 | pushl %esi; \ |
fe7cacc1 JB |
108 | CFI_ADJUST_CFA_OFFSET 4;\ |
109 | CFI_REL_OFFSET esi, 0;\ | |
1da177e4 | 110 | pushl %edx; \ |
fe7cacc1 JB |
111 | CFI_ADJUST_CFA_OFFSET 4;\ |
112 | CFI_REL_OFFSET edx, 0;\ | |
1da177e4 | 113 | pushl %ecx; \ |
fe7cacc1 JB |
114 | CFI_ADJUST_CFA_OFFSET 4;\ |
115 | CFI_REL_OFFSET ecx, 0;\ | |
1da177e4 | 116 | pushl %ebx; \ |
fe7cacc1 JB |
117 | CFI_ADJUST_CFA_OFFSET 4;\ |
118 | CFI_REL_OFFSET ebx, 0;\ | |
1da177e4 LT |
119 | movl $(__USER_DS), %edx; \ |
120 | movl %edx, %ds; \ | |
f95d47ca JF |
121 | movl %edx, %es; \ |
122 | movl $(__KERNEL_PDA), %edx; \ | |
123 | movl %edx, %gs | |
1da177e4 LT |
124 | |
125 | #define RESTORE_INT_REGS \ | |
126 | popl %ebx; \ | |
fe7cacc1 JB |
127 | CFI_ADJUST_CFA_OFFSET -4;\ |
128 | CFI_RESTORE ebx;\ | |
1da177e4 | 129 | popl %ecx; \ |
fe7cacc1 JB |
130 | CFI_ADJUST_CFA_OFFSET -4;\ |
131 | CFI_RESTORE ecx;\ | |
1da177e4 | 132 | popl %edx; \ |
fe7cacc1 JB |
133 | CFI_ADJUST_CFA_OFFSET -4;\ |
134 | CFI_RESTORE edx;\ | |
1da177e4 | 135 | popl %esi; \ |
fe7cacc1 JB |
136 | CFI_ADJUST_CFA_OFFSET -4;\ |
137 | CFI_RESTORE esi;\ | |
1da177e4 | 138 | popl %edi; \ |
fe7cacc1 JB |
139 | CFI_ADJUST_CFA_OFFSET -4;\ |
140 | CFI_RESTORE edi;\ | |
1da177e4 | 141 | popl %ebp; \ |
fe7cacc1 JB |
142 | CFI_ADJUST_CFA_OFFSET -4;\ |
143 | CFI_RESTORE ebp;\ | |
144 | popl %eax; \ | |
145 | CFI_ADJUST_CFA_OFFSET -4;\ | |
146 | CFI_RESTORE eax | |
1da177e4 LT |
147 | |
148 | #define RESTORE_REGS \ | |
149 | RESTORE_INT_REGS; \ | |
150 | 1: popl %ds; \ | |
fe7cacc1 JB |
151 | CFI_ADJUST_CFA_OFFSET -4;\ |
152 | /*CFI_RESTORE ds;*/\ | |
1da177e4 | 153 | 2: popl %es; \ |
fe7cacc1 JB |
154 | CFI_ADJUST_CFA_OFFSET -4;\ |
155 | /*CFI_RESTORE es;*/\ | |
f95d47ca JF |
156 | 3: popl %gs; \ |
157 | CFI_ADJUST_CFA_OFFSET -4;\ | |
158 | /*CFI_RESTORE gs;*/\ | |
159 | .pushsection .fixup,"ax"; \ | |
1da177e4 | 160 | 4: movl $0,(%esp); \ |
f95d47ca JF |
161 | jmp 1b; \ |
162 | 5: movl $0,(%esp); \ | |
1da177e4 | 163 | jmp 2b; \ |
f95d47ca JF |
164 | 6: movl $0,(%esp); \ |
165 | jmp 3b; \ | |
1da177e4 LT |
166 | .section __ex_table,"a";\ |
167 | .align 4; \ | |
f95d47ca JF |
168 | .long 1b,4b; \ |
169 | .long 2b,5b; \ | |
170 | .long 3b,6b; \ | |
171 | .popsection | |
1da177e4 | 172 | |
fe7cacc1 JB |
173 | #define RING0_INT_FRAME \ |
174 | CFI_STARTPROC simple;\ | |
adf14236 | 175 | CFI_SIGNAL_FRAME;\ |
fe7cacc1 JB |
176 | CFI_DEF_CFA esp, 3*4;\ |
177 | /*CFI_OFFSET cs, -2*4;*/\ | |
178 | CFI_OFFSET eip, -3*4 | |
179 | ||
180 | #define RING0_EC_FRAME \ | |
181 | CFI_STARTPROC simple;\ | |
adf14236 | 182 | CFI_SIGNAL_FRAME;\ |
fe7cacc1 JB |
183 | CFI_DEF_CFA esp, 4*4;\ |
184 | /*CFI_OFFSET cs, -2*4;*/\ | |
185 | CFI_OFFSET eip, -3*4 | |
186 | ||
187 | #define RING0_PTREGS_FRAME \ | |
188 | CFI_STARTPROC simple;\ | |
adf14236 | 189 | CFI_SIGNAL_FRAME;\ |
eb5b7b9d JF |
190 | CFI_DEF_CFA esp, PT_OLDESP-PT_EBX;\ |
191 | /*CFI_OFFSET cs, PT_CS-PT_OLDESP;*/\ | |
192 | CFI_OFFSET eip, PT_EIP-PT_OLDESP;\ | |
193 | /*CFI_OFFSET es, PT_ES-PT_OLDESP;*/\ | |
194 | /*CFI_OFFSET ds, PT_DS-PT_OLDESP;*/\ | |
195 | CFI_OFFSET eax, PT_EAX-PT_OLDESP;\ | |
196 | CFI_OFFSET ebp, PT_EBP-PT_OLDESP;\ | |
197 | CFI_OFFSET edi, PT_EDI-PT_OLDESP;\ | |
198 | CFI_OFFSET esi, PT_ESI-PT_OLDESP;\ | |
199 | CFI_OFFSET edx, PT_EDX-PT_OLDESP;\ | |
200 | CFI_OFFSET ecx, PT_ECX-PT_OLDESP;\ | |
201 | CFI_OFFSET ebx, PT_EBX-PT_OLDESP | |
1da177e4 LT |
202 | |
203 | ENTRY(ret_from_fork) | |
fe7cacc1 | 204 | CFI_STARTPROC |
1da177e4 | 205 | pushl %eax |
25d7dfda | 206 | CFI_ADJUST_CFA_OFFSET 4 |
1da177e4 LT |
207 | call schedule_tail |
208 | GET_THREAD_INFO(%ebp) | |
209 | popl %eax | |
fe7cacc1 | 210 | CFI_ADJUST_CFA_OFFSET -4 |
47a5c6fa LT |
211 | pushl $0x0202 # Reset kernel eflags |
212 | CFI_ADJUST_CFA_OFFSET 4 | |
213 | popfl | |
214 | CFI_ADJUST_CFA_OFFSET -4 | |
1da177e4 | 215 | jmp syscall_exit |
fe7cacc1 | 216 | CFI_ENDPROC |
1da177e4 LT |
217 | |
218 | /* | |
219 | * Return to user mode is not as complex as all this looks, | |
220 | * but we want the default path for a system call return to | |
221 | * go as quickly as possible which is why some of this is | |
222 | * less clear than it otherwise should be. | |
223 | */ | |
224 | ||
225 | # userspace resumption stub bypassing syscall exit tracing | |
226 | ALIGN | |
fe7cacc1 | 227 | RING0_PTREGS_FRAME |
1da177e4 LT |
228 | ret_from_exception: |
229 | preempt_stop | |
230 | ret_from_intr: | |
231 | GET_THREAD_INFO(%ebp) | |
4031ff38 | 232 | check_userspace: |
eb5b7b9d JF |
233 | movl PT_EFLAGS(%esp), %eax # mix EFLAGS and CS |
234 | movb PT_CS(%esp), %al | |
78be3706 RR |
235 | andl $(VM_MASK | SEGMENT_RPL_MASK), %eax |
236 | cmpl $USER_RPL, %eax | |
237 | jb resume_kernel # not returning to v8086 or userspace | |
f95d47ca | 238 | |
1da177e4 | 239 | ENTRY(resume_userspace) |
0da5db31 | 240 | DISABLE_INTERRUPTS # make sure we don't miss an interrupt |
1da177e4 LT |
241 | # setting need_resched or sigpending |
242 | # between sampling and the iret | |
243 | movl TI_flags(%ebp), %ecx | |
244 | andl $_TIF_WORK_MASK, %ecx # is there any work to be done on | |
245 | # int/exception return? | |
246 | jne work_pending | |
247 | jmp restore_all | |
248 | ||
249 | #ifdef CONFIG_PREEMPT | |
250 | ENTRY(resume_kernel) | |
0da5db31 | 251 | DISABLE_INTERRUPTS |
1da177e4 LT |
252 | cmpl $0,TI_preempt_count(%ebp) # non-zero preempt_count ? |
253 | jnz restore_nocheck | |
254 | need_resched: | |
255 | movl TI_flags(%ebp), %ecx # need_resched set ? | |
256 | testb $_TIF_NEED_RESCHED, %cl | |
257 | jz restore_all | |
eb5b7b9d | 258 | testl $IF_MASK,PT_EFLAGS(%esp) # interrupts off (exception path) ? |
1da177e4 LT |
259 | jz restore_all |
260 | call preempt_schedule_irq | |
261 | jmp need_resched | |
262 | #endif | |
fe7cacc1 | 263 | CFI_ENDPROC |
1da177e4 LT |
264 | |
265 | /* SYSENTER_RETURN points to after the "sysenter" instruction in | |
266 | the vsyscall page. See vsyscall-sysentry.S, which defines the symbol. */ | |
267 | ||
268 | # sysenter call handler stub | |
269 | ENTRY(sysenter_entry) | |
fe7cacc1 | 270 | CFI_STARTPROC simple |
adf14236 | 271 | CFI_SIGNAL_FRAME |
fe7cacc1 JB |
272 | CFI_DEF_CFA esp, 0 |
273 | CFI_REGISTER esp, ebp | |
1da177e4 LT |
274 | movl TSS_sysenter_esp0(%esp),%esp |
275 | sysenter_past_esp: | |
55f327fa IM |
276 | /* |
277 | * No need to follow this irqs on/off section: the syscall | |
278 | * disabled irqs and here we enable it straight after entry: | |
279 | */ | |
0da5db31 | 280 | ENABLE_INTERRUPTS |
1da177e4 | 281 | pushl $(__USER_DS) |
fe7cacc1 JB |
282 | CFI_ADJUST_CFA_OFFSET 4 |
283 | /*CFI_REL_OFFSET ss, 0*/ | |
1da177e4 | 284 | pushl %ebp |
fe7cacc1 JB |
285 | CFI_ADJUST_CFA_OFFSET 4 |
286 | CFI_REL_OFFSET esp, 0 | |
1da177e4 | 287 | pushfl |
fe7cacc1 | 288 | CFI_ADJUST_CFA_OFFSET 4 |
1da177e4 | 289 | pushl $(__USER_CS) |
fe7cacc1 JB |
290 | CFI_ADJUST_CFA_OFFSET 4 |
291 | /*CFI_REL_OFFSET cs, 0*/ | |
e6e5494c IM |
292 | /* |
293 | * Push current_thread_info()->sysenter_return to the stack. | |
294 | * A tiny bit of offset fixup is necessary - 4*4 means the 4 words | |
295 | * pushed above; +8 corresponds to copy_thread's esp0 setting. | |
296 | */ | |
297 | pushl (TI_sysenter_return-THREAD_SIZE+8+4*4)(%esp) | |
fe7cacc1 JB |
298 | CFI_ADJUST_CFA_OFFSET 4 |
299 | CFI_REL_OFFSET eip, 0 | |
1da177e4 LT |
300 | |
301 | /* | |
302 | * Load the potential sixth argument from user stack. | |
303 | * Careful about security. | |
304 | */ | |
305 | cmpl $__PAGE_OFFSET-3,%ebp | |
306 | jae syscall_fault | |
307 | 1: movl (%ebp),%ebp | |
308 | .section __ex_table,"a" | |
309 | .align 4 | |
310 | .long 1b,syscall_fault | |
311 | .previous | |
312 | ||
313 | pushl %eax | |
fe7cacc1 | 314 | CFI_ADJUST_CFA_OFFSET 4 |
1da177e4 LT |
315 | SAVE_ALL |
316 | GET_THREAD_INFO(%ebp) | |
317 | ||
318 | /* Note, _TIF_SECCOMP is bit number 8, and so it needs testw and not testb */ | |
ed75e8d5 | 319 | testw $(_TIF_SYSCALL_EMU|_TIF_SYSCALL_TRACE|_TIF_SECCOMP|_TIF_SYSCALL_AUDIT),TI_flags(%ebp) |
1da177e4 LT |
320 | jnz syscall_trace_entry |
321 | cmpl $(nr_syscalls), %eax | |
322 | jae syscall_badsys | |
323 | call *sys_call_table(,%eax,4) | |
eb5b7b9d | 324 | movl %eax,PT_EAX(%esp) |
0da5db31 | 325 | DISABLE_INTERRUPTS |
55f327fa | 326 | TRACE_IRQS_OFF |
1da177e4 LT |
327 | movl TI_flags(%ebp), %ecx |
328 | testw $_TIF_ALLWORK_MASK, %cx | |
329 | jne syscall_exit_work | |
330 | /* if something modifies registers it must also disable sysexit */ | |
eb5b7b9d JF |
331 | movl PT_EIP(%esp), %edx |
332 | movl PT_OLDESP(%esp), %ecx | |
1da177e4 | 333 | xorl %ebp,%ebp |
55f327fa | 334 | TRACE_IRQS_ON |
f95d47ca | 335 | 1: mov PT_GS(%esp), %gs |
0da5db31 | 336 | ENABLE_INTERRUPTS_SYSEXIT |
fe7cacc1 | 337 | CFI_ENDPROC |
f95d47ca JF |
338 | .pushsection .fixup,"ax" |
339 | 2: movl $0,PT_GS(%esp) | |
340 | jmp 1b | |
341 | .section __ex_table,"a" | |
342 | .align 4 | |
343 | .long 1b,2b | |
344 | .popsection | |
1da177e4 LT |
345 | |
346 | # system call handler stub | |
347 | ENTRY(system_call) | |
fe7cacc1 | 348 | RING0_INT_FRAME # can't unwind into user space anyway |
1da177e4 | 349 | pushl %eax # save orig_eax |
fe7cacc1 | 350 | CFI_ADJUST_CFA_OFFSET 4 |
1da177e4 LT |
351 | SAVE_ALL |
352 | GET_THREAD_INFO(%ebp) | |
eb5b7b9d | 353 | testl $TF_MASK,PT_EFLAGS(%esp) |
635cf99a CE |
354 | jz no_singlestep |
355 | orl $_TIF_SINGLESTEP,TI_flags(%ebp) | |
356 | no_singlestep: | |
ed75e8d5 | 357 | # system call tracing in operation / emulation |
1da177e4 | 358 | /* Note, _TIF_SECCOMP is bit number 8, and so it needs testw and not testb */ |
ed75e8d5 | 359 | testw $(_TIF_SYSCALL_EMU|_TIF_SYSCALL_TRACE|_TIF_SECCOMP|_TIF_SYSCALL_AUDIT),TI_flags(%ebp) |
1da177e4 LT |
360 | jnz syscall_trace_entry |
361 | cmpl $(nr_syscalls), %eax | |
362 | jae syscall_badsys | |
363 | syscall_call: | |
364 | call *sys_call_table(,%eax,4) | |
eb5b7b9d | 365 | movl %eax,PT_EAX(%esp) # store the return value |
1da177e4 | 366 | syscall_exit: |
0da5db31 | 367 | DISABLE_INTERRUPTS # make sure we don't miss an interrupt |
1da177e4 LT |
368 | # setting need_resched or sigpending |
369 | # between sampling and the iret | |
55f327fa | 370 | TRACE_IRQS_OFF |
1da177e4 LT |
371 | movl TI_flags(%ebp), %ecx |
372 | testw $_TIF_ALLWORK_MASK, %cx # current->work | |
373 | jne syscall_exit_work | |
374 | ||
375 | restore_all: | |
eb5b7b9d JF |
376 | movl PT_EFLAGS(%esp), %eax # mix EFLAGS, SS and CS |
377 | # Warning: PT_OLDSS(%esp) contains the wrong/random values if we | |
5df24082 SS |
378 | # are returning to the kernel. |
379 | # See comments in process.c:copy_thread() for details. | |
eb5b7b9d JF |
380 | movb PT_OLDSS(%esp), %ah |
381 | movb PT_CS(%esp), %al | |
78be3706 RR |
382 | andl $(VM_MASK | (SEGMENT_TI_MASK << 8) | SEGMENT_RPL_MASK), %eax |
383 | cmpl $((SEGMENT_LDT << 8) | USER_RPL), %eax | |
fe7cacc1 | 384 | CFI_REMEMBER_STATE |
1da177e4 LT |
385 | je ldt_ss # returning to user-space with LDT SS |
386 | restore_nocheck: | |
55f327fa IM |
387 | TRACE_IRQS_IRET |
388 | restore_nocheck_notrace: | |
1da177e4 | 389 | RESTORE_REGS |
f95d47ca | 390 | addl $4, %esp # skip orig_eax/error_code |
fe7cacc1 | 391 | CFI_ADJUST_CFA_OFFSET -4 |
0da5db31 | 392 | 1: INTERRUPT_RETURN |
1da177e4 LT |
393 | .section .fixup,"ax" |
394 | iret_exc: | |
55f327fa | 395 | TRACE_IRQS_ON |
0da5db31 | 396 | ENABLE_INTERRUPTS |
a879cbbb LT |
397 | pushl $0 # no error code |
398 | pushl $do_iret_error | |
399 | jmp error_code | |
1da177e4 LT |
400 | .previous |
401 | .section __ex_table,"a" | |
402 | .align 4 | |
403 | .long 1b,iret_exc | |
404 | .previous | |
405 | ||
fe7cacc1 | 406 | CFI_RESTORE_STATE |
1da177e4 | 407 | ldt_ss: |
eb5b7b9d | 408 | larl PT_OLDSS(%esp), %eax |
1da177e4 LT |
409 | jnz restore_nocheck |
410 | testl $0x00400000, %eax # returning to 32bit stack? | |
411 | jnz restore_nocheck # allright, normal return | |
d3561b7f RR |
412 | |
413 | #ifdef CONFIG_PARAVIRT | |
414 | /* | |
415 | * The kernel can't run on a non-flat stack if paravirt mode | |
416 | * is active. Rather than try to fixup the high bits of | |
417 | * ESP, bypass this code entirely. This may break DOSemu | |
418 | * and/or Wine support in a paravirt VM, although the option | |
419 | * is still available to implement the setting of the high | |
420 | * 16-bits in the INTERRUPT_RETURN paravirt-op. | |
421 | */ | |
422 | cmpl $0, paravirt_ops+PARAVIRT_enabled | |
423 | jne restore_nocheck | |
424 | #endif | |
425 | ||
1da177e4 LT |
426 | /* If returning to userspace with 16bit stack, |
427 | * try to fix the higher word of ESP, as the CPU | |
428 | * won't restore it. | |
429 | * This is an "official" bug of all the x86-compatible | |
430 | * CPUs, which we can try to work around to make | |
431 | * dosemu and wine happy. */ | |
eb5b7b9d | 432 | movl PT_OLDESP(%esp), %eax |
be44d2aa SS |
433 | movl %esp, %edx |
434 | call patch_espfix_desc | |
435 | pushl $__ESPFIX_SS | |
436 | CFI_ADJUST_CFA_OFFSET 4 | |
437 | pushl %eax | |
438 | CFI_ADJUST_CFA_OFFSET 4 | |
0da5db31 | 439 | DISABLE_INTERRUPTS |
55f327fa | 440 | TRACE_IRQS_OFF |
be44d2aa SS |
441 | lss (%esp), %esp |
442 | CFI_ADJUST_CFA_OFFSET -8 | |
443 | jmp restore_nocheck | |
fe7cacc1 | 444 | CFI_ENDPROC |
1da177e4 LT |
445 | |
446 | # perform work that needs to be done immediately before resumption | |
447 | ALIGN | |
fe7cacc1 | 448 | RING0_PTREGS_FRAME # can't unwind into user space anyway |
1da177e4 LT |
449 | work_pending: |
450 | testb $_TIF_NEED_RESCHED, %cl | |
451 | jz work_notifysig | |
452 | work_resched: | |
453 | call schedule | |
0da5db31 | 454 | DISABLE_INTERRUPTS # make sure we don't miss an interrupt |
1da177e4 LT |
455 | # setting need_resched or sigpending |
456 | # between sampling and the iret | |
55f327fa | 457 | TRACE_IRQS_OFF |
1da177e4 LT |
458 | movl TI_flags(%ebp), %ecx |
459 | andl $_TIF_WORK_MASK, %ecx # is there any work to be done other | |
460 | # than syscall tracing? | |
461 | jz restore_all | |
462 | testb $_TIF_NEED_RESCHED, %cl | |
463 | jnz work_resched | |
464 | ||
465 | work_notifysig: # deal with pending signals and | |
466 | # notify-resume requests | |
74b47a78 | 467 | #ifdef CONFIG_VM86 |
eb5b7b9d | 468 | testl $VM_MASK, PT_EFLAGS(%esp) |
1da177e4 LT |
469 | movl %esp, %eax |
470 | jne work_notifysig_v86 # returning to kernel-space or | |
471 | # vm86-space | |
472 | xorl %edx, %edx | |
473 | call do_notify_resume | |
4031ff38 | 474 | jmp resume_userspace_sig |
1da177e4 LT |
475 | |
476 | ALIGN | |
477 | work_notifysig_v86: | |
478 | pushl %ecx # save ti_flags for do_notify_resume | |
fe7cacc1 | 479 | CFI_ADJUST_CFA_OFFSET 4 |
1da177e4 LT |
480 | call save_v86_state # %eax contains pt_regs pointer |
481 | popl %ecx | |
fe7cacc1 | 482 | CFI_ADJUST_CFA_OFFSET -4 |
1da177e4 | 483 | movl %eax, %esp |
74b47a78 JK |
484 | #else |
485 | movl %esp, %eax | |
486 | #endif | |
1da177e4 LT |
487 | xorl %edx, %edx |
488 | call do_notify_resume | |
4031ff38 | 489 | jmp resume_userspace_sig |
1da177e4 LT |
490 | |
491 | # perform syscall exit tracing | |
492 | ALIGN | |
493 | syscall_trace_entry: | |
eb5b7b9d | 494 | movl $-ENOSYS,PT_EAX(%esp) |
1da177e4 LT |
495 | movl %esp, %eax |
496 | xorl %edx,%edx | |
497 | call do_syscall_trace | |
ed75e8d5 | 498 | cmpl $0, %eax |
640aa46e | 499 | jne resume_userspace # ret != 0 -> running under PTRACE_SYSEMU, |
ed75e8d5 | 500 | # so must skip actual syscall |
eb5b7b9d | 501 | movl PT_ORIG_EAX(%esp), %eax |
1da177e4 LT |
502 | cmpl $(nr_syscalls), %eax |
503 | jnae syscall_call | |
504 | jmp syscall_exit | |
505 | ||
506 | # perform syscall exit tracing | |
507 | ALIGN | |
508 | syscall_exit_work: | |
509 | testb $(_TIF_SYSCALL_TRACE|_TIF_SYSCALL_AUDIT|_TIF_SINGLESTEP), %cl | |
510 | jz work_pending | |
55f327fa | 511 | TRACE_IRQS_ON |
0da5db31 | 512 | ENABLE_INTERRUPTS # could let do_syscall_trace() call |
1da177e4 LT |
513 | # schedule() instead |
514 | movl %esp, %eax | |
515 | movl $1, %edx | |
516 | call do_syscall_trace | |
517 | jmp resume_userspace | |
fe7cacc1 | 518 | CFI_ENDPROC |
1da177e4 | 519 | |
fe7cacc1 | 520 | RING0_INT_FRAME # can't unwind into user space anyway |
1da177e4 LT |
521 | syscall_fault: |
522 | pushl %eax # save orig_eax | |
fe7cacc1 | 523 | CFI_ADJUST_CFA_OFFSET 4 |
1da177e4 LT |
524 | SAVE_ALL |
525 | GET_THREAD_INFO(%ebp) | |
eb5b7b9d | 526 | movl $-EFAULT,PT_EAX(%esp) |
1da177e4 LT |
527 | jmp resume_userspace |
528 | ||
1da177e4 | 529 | syscall_badsys: |
eb5b7b9d | 530 | movl $-ENOSYS,PT_EAX(%esp) |
1da177e4 | 531 | jmp resume_userspace |
fe7cacc1 | 532 | CFI_ENDPROC |
1da177e4 LT |
533 | |
534 | #define FIXUP_ESPFIX_STACK \ | |
be44d2aa | 535 | /* since we are on a wrong stack, we cant make it a C code :( */ \ |
b2938f88 | 536 | movl %gs:PDA_cpu, %ebx; \ |
be44d2aa SS |
537 | PER_CPU(cpu_gdt_descr, %ebx); \ |
538 | movl GDS_address(%ebx), %ebx; \ | |
539 | GET_DESC_BASE(GDT_ENTRY_ESPFIX_SS, %ebx, %eax, %ax, %al, %ah); \ | |
540 | addl %esp, %eax; \ | |
541 | pushl $__KERNEL_DS; \ | |
542 | CFI_ADJUST_CFA_OFFSET 4; \ | |
1da177e4 | 543 | pushl %eax; \ |
fe7cacc1 | 544 | CFI_ADJUST_CFA_OFFSET 4; \ |
be44d2aa SS |
545 | lss (%esp), %esp; \ |
546 | CFI_ADJUST_CFA_OFFSET -8; | |
547 | #define UNWIND_ESPFIX_STACK \ | |
1da177e4 | 548 | movl %ss, %eax; \ |
be44d2aa | 549 | /* see if on espfix stack */ \ |
1da177e4 | 550 | cmpw $__ESPFIX_SS, %ax; \ |
be44d2aa SS |
551 | jne 27f; \ |
552 | movl $__KERNEL_DS, %eax; \ | |
fe7cacc1 JB |
553 | movl %eax, %ds; \ |
554 | movl %eax, %es; \ | |
be44d2aa | 555 | /* switch to normal stack */ \ |
fe7cacc1 | 556 | FIXUP_ESPFIX_STACK; \ |
be44d2aa | 557 | 27:; |
1da177e4 LT |
558 | |
559 | /* | |
560 | * Build the entry stubs and pointer table with | |
561 | * some assembler magic. | |
562 | */ | |
563 | .data | |
564 | ENTRY(interrupt) | |
565 | .text | |
566 | ||
567 | vector=0 | |
568 | ENTRY(irq_entries_start) | |
fe7cacc1 | 569 | RING0_INT_FRAME |
1da177e4 LT |
570 | .rept NR_IRQS |
571 | ALIGN | |
fe7cacc1 JB |
572 | .if vector |
573 | CFI_ADJUST_CFA_OFFSET -4 | |
574 | .endif | |
19eadf98 | 575 | 1: pushl $~(vector) |
fe7cacc1 | 576 | CFI_ADJUST_CFA_OFFSET 4 |
1da177e4 LT |
577 | jmp common_interrupt |
578 | .data | |
579 | .long 1b | |
580 | .text | |
581 | vector=vector+1 | |
582 | .endr | |
583 | ||
55f327fa IM |
584 | /* |
585 | * the CPU automatically disables interrupts when executing an IRQ vector, | |
586 | * so IRQ-flags tracing has to follow that: | |
587 | */ | |
1da177e4 LT |
588 | ALIGN |
589 | common_interrupt: | |
590 | SAVE_ALL | |
55f327fa | 591 | TRACE_IRQS_OFF |
1da177e4 LT |
592 | movl %esp,%eax |
593 | call do_IRQ | |
594 | jmp ret_from_intr | |
fe7cacc1 | 595 | CFI_ENDPROC |
1da177e4 LT |
596 | |
597 | #define BUILD_INTERRUPT(name, nr) \ | |
598 | ENTRY(name) \ | |
fe7cacc1 | 599 | RING0_INT_FRAME; \ |
19eadf98 | 600 | pushl $~(nr); \ |
fe7cacc1 JB |
601 | CFI_ADJUST_CFA_OFFSET 4; \ |
602 | SAVE_ALL; \ | |
55f327fa | 603 | TRACE_IRQS_OFF \ |
1da177e4 LT |
604 | movl %esp,%eax; \ |
605 | call smp_/**/name; \ | |
55f327fa | 606 | jmp ret_from_intr; \ |
fe7cacc1 | 607 | CFI_ENDPROC |
1da177e4 LT |
608 | |
609 | /* The include is where all of the SMP etc. interrupts come from */ | |
610 | #include "entry_arch.h" | |
611 | ||
d28c4393 P |
612 | KPROBE_ENTRY(page_fault) |
613 | RING0_EC_FRAME | |
614 | pushl $do_page_fault | |
fe7cacc1 | 615 | CFI_ADJUST_CFA_OFFSET 4 |
1da177e4 LT |
616 | ALIGN |
617 | error_code: | |
f95d47ca JF |
618 | /* the function address is in %gs's slot on the stack */ |
619 | pushl %es | |
620 | CFI_ADJUST_CFA_OFFSET 4 | |
621 | /*CFI_REL_OFFSET es, 0*/ | |
1da177e4 | 622 | pushl %ds |
fe7cacc1 JB |
623 | CFI_ADJUST_CFA_OFFSET 4 |
624 | /*CFI_REL_OFFSET ds, 0*/ | |
1da177e4 | 625 | pushl %eax |
fe7cacc1 JB |
626 | CFI_ADJUST_CFA_OFFSET 4 |
627 | CFI_REL_OFFSET eax, 0 | |
1da177e4 | 628 | pushl %ebp |
fe7cacc1 JB |
629 | CFI_ADJUST_CFA_OFFSET 4 |
630 | CFI_REL_OFFSET ebp, 0 | |
1da177e4 | 631 | pushl %edi |
fe7cacc1 JB |
632 | CFI_ADJUST_CFA_OFFSET 4 |
633 | CFI_REL_OFFSET edi, 0 | |
1da177e4 | 634 | pushl %esi |
fe7cacc1 JB |
635 | CFI_ADJUST_CFA_OFFSET 4 |
636 | CFI_REL_OFFSET esi, 0 | |
1da177e4 | 637 | pushl %edx |
fe7cacc1 JB |
638 | CFI_ADJUST_CFA_OFFSET 4 |
639 | CFI_REL_OFFSET edx, 0 | |
1da177e4 | 640 | pushl %ecx |
fe7cacc1 JB |
641 | CFI_ADJUST_CFA_OFFSET 4 |
642 | CFI_REL_OFFSET ecx, 0 | |
1da177e4 | 643 | pushl %ebx |
fe7cacc1 JB |
644 | CFI_ADJUST_CFA_OFFSET 4 |
645 | CFI_REL_OFFSET ebx, 0 | |
1da177e4 | 646 | cld |
f95d47ca | 647 | pushl %gs |
fe7cacc1 | 648 | CFI_ADJUST_CFA_OFFSET 4 |
f95d47ca JF |
649 | /*CFI_REL_OFFSET gs, 0*/ |
650 | movl $(__KERNEL_PDA), %ecx | |
651 | movl %ecx, %gs | |
1da177e4 LT |
652 | UNWIND_ESPFIX_STACK |
653 | popl %ecx | |
fe7cacc1 JB |
654 | CFI_ADJUST_CFA_OFFSET -4 |
655 | /*CFI_REGISTER es, ecx*/ | |
f95d47ca | 656 | movl PT_GS(%esp), %edi # get the function address |
eb5b7b9d | 657 | movl PT_ORIG_EAX(%esp), %edx # get the error code |
f95d47ca JF |
658 | movl $-1, PT_ORIG_EAX(%esp) # no syscall to restart |
659 | mov %ecx, PT_GS(%esp) | |
660 | /*CFI_REL_OFFSET gs, ES*/ | |
1da177e4 LT |
661 | movl $(__USER_DS), %ecx |
662 | movl %ecx, %ds | |
663 | movl %ecx, %es | |
664 | movl %esp,%eax # pt_regs pointer | |
665 | call *%edi | |
666 | jmp ret_from_exception | |
fe7cacc1 | 667 | CFI_ENDPROC |
d28c4393 | 668 | KPROBE_END(page_fault) |
1da177e4 LT |
669 | |
670 | ENTRY(coprocessor_error) | |
fe7cacc1 | 671 | RING0_INT_FRAME |
1da177e4 | 672 | pushl $0 |
fe7cacc1 | 673 | CFI_ADJUST_CFA_OFFSET 4 |
1da177e4 | 674 | pushl $do_coprocessor_error |
fe7cacc1 | 675 | CFI_ADJUST_CFA_OFFSET 4 |
1da177e4 | 676 | jmp error_code |
fe7cacc1 | 677 | CFI_ENDPROC |
1da177e4 LT |
678 | |
679 | ENTRY(simd_coprocessor_error) | |
fe7cacc1 | 680 | RING0_INT_FRAME |
1da177e4 | 681 | pushl $0 |
fe7cacc1 | 682 | CFI_ADJUST_CFA_OFFSET 4 |
1da177e4 | 683 | pushl $do_simd_coprocessor_error |
fe7cacc1 | 684 | CFI_ADJUST_CFA_OFFSET 4 |
1da177e4 | 685 | jmp error_code |
fe7cacc1 | 686 | CFI_ENDPROC |
1da177e4 LT |
687 | |
688 | ENTRY(device_not_available) | |
fe7cacc1 | 689 | RING0_INT_FRAME |
1da177e4 | 690 | pushl $-1 # mark this as an int |
fe7cacc1 | 691 | CFI_ADJUST_CFA_OFFSET 4 |
1da177e4 | 692 | SAVE_ALL |
0da5db31 | 693 | GET_CR0_INTO_EAX |
1da177e4 LT |
694 | testl $0x4, %eax # EM (math emulation bit) |
695 | jne device_not_available_emulate | |
696 | preempt_stop | |
697 | call math_state_restore | |
698 | jmp ret_from_exception | |
699 | device_not_available_emulate: | |
700 | pushl $0 # temporary storage for ORIG_EIP | |
fe7cacc1 | 701 | CFI_ADJUST_CFA_OFFSET 4 |
1da177e4 LT |
702 | call math_emulate |
703 | addl $4, %esp | |
fe7cacc1 | 704 | CFI_ADJUST_CFA_OFFSET -4 |
1da177e4 | 705 | jmp ret_from_exception |
fe7cacc1 | 706 | CFI_ENDPROC |
1da177e4 LT |
707 | |
708 | /* | |
709 | * Debug traps and NMI can happen at the one SYSENTER instruction | |
710 | * that sets up the real kernel stack. Check here, since we can't | |
711 | * allow the wrong stack to be used. | |
712 | * | |
713 | * "TSS_sysenter_esp0+12" is because the NMI/debug handler will have | |
714 | * already pushed 3 words if it hits on the sysenter instruction: | |
715 | * eflags, cs and eip. | |
716 | * | |
717 | * We just load the right stack, and push the three (known) values | |
718 | * by hand onto the new stack - while updating the return eip past | |
719 | * the instruction that would have done it for sysenter. | |
720 | */ | |
721 | #define FIX_STACK(offset, ok, label) \ | |
722 | cmpw $__KERNEL_CS,4(%esp); \ | |
723 | jne ok; \ | |
724 | label: \ | |
725 | movl TSS_sysenter_esp0+offset(%esp),%esp; \ | |
a549b86d CE |
726 | CFI_DEF_CFA esp, 0; \ |
727 | CFI_UNDEFINED eip; \ | |
1da177e4 | 728 | pushfl; \ |
a549b86d | 729 | CFI_ADJUST_CFA_OFFSET 4; \ |
1da177e4 | 730 | pushl $__KERNEL_CS; \ |
a549b86d CE |
731 | CFI_ADJUST_CFA_OFFSET 4; \ |
732 | pushl $sysenter_past_esp; \ | |
733 | CFI_ADJUST_CFA_OFFSET 4; \ | |
734 | CFI_REL_OFFSET eip, 0 | |
1da177e4 | 735 | |
3d97ae5b | 736 | KPROBE_ENTRY(debug) |
fe7cacc1 | 737 | RING0_INT_FRAME |
1da177e4 LT |
738 | cmpl $sysenter_entry,(%esp) |
739 | jne debug_stack_correct | |
740 | FIX_STACK(12, debug_stack_correct, debug_esp_fix_insn) | |
741 | debug_stack_correct: | |
742 | pushl $-1 # mark this as an int | |
fe7cacc1 | 743 | CFI_ADJUST_CFA_OFFSET 4 |
1da177e4 LT |
744 | SAVE_ALL |
745 | xorl %edx,%edx # error code 0 | |
746 | movl %esp,%eax # pt_regs pointer | |
747 | call do_debug | |
1da177e4 | 748 | jmp ret_from_exception |
fe7cacc1 | 749 | CFI_ENDPROC |
d28c4393 P |
750 | KPROBE_END(debug) |
751 | ||
1da177e4 LT |
752 | /* |
753 | * NMI is doubly nasty. It can happen _while_ we're handling | |
754 | * a debug fault, and the debug fault hasn't yet been able to | |
755 | * clear up the stack. So we first check whether we got an | |
756 | * NMI on the sysenter entry path, but after that we need to | |
757 | * check whether we got an NMI on the debug path where the debug | |
758 | * fault happened on the sysenter path. | |
759 | */ | |