]>
Commit | Line | Data |
---|---|---|
1da177e4 | 1 | /* |
1da177e4 LT |
2 | * |
3 | * Copyright (C) 1991, 1992 Linus Torvalds | |
4 | */ | |
5 | ||
6 | /* | |
7 | * entry.S contains the system-call and fault low-level handling routines. | |
8 | * This also contains the timer-interrupt handler, as well as all interrupts | |
9 | * and faults that can result in a task-switch. | |
10 | * | |
11 | * NOTE: This code handles signal-recognition, which happens every time | |
12 | * after a timer-interrupt and after each system call. | |
13 | * | |
14 | * I changed all the .align's to 4 (16 byte alignment), as that's faster | |
15 | * on a 486. | |
16 | * | |
889f21ce | 17 | * Stack layout in 'syscall_exit': |
1da177e4 LT |
18 | * ptrace needs to have all regs on the stack. |
19 | * if the order here is changed, it needs to be | |
20 | * updated in fork.c:copy_process, signal.c:do_signal, | |
21 | * ptrace.c and ptrace.h | |
22 | * | |
23 | * 0(%esp) - %ebx | |
24 | * 4(%esp) - %ecx | |
25 | * 8(%esp) - %edx | |
26 | * C(%esp) - %esi | |
27 | * 10(%esp) - %edi | |
28 | * 14(%esp) - %ebp | |
29 | * 18(%esp) - %eax | |
30 | * 1C(%esp) - %ds | |
31 | * 20(%esp) - %es | |
464d1a78 | 32 | * 24(%esp) - %fs |
f95d47ca JF |
33 | * 28(%esp) - orig_eax |
34 | * 2C(%esp) - %eip | |
35 | * 30(%esp) - %cs | |
36 | * 34(%esp) - %eflags | |
37 | * 38(%esp) - %oldesp | |
38 | * 3C(%esp) - %oldss | |
1da177e4 LT |
39 | * |
40 | * "current" is in register %ebx during any slow entries. | |
41 | */ | |
42 | ||
1da177e4 LT |
43 | #include <linux/linkage.h> |
44 | #include <asm/thread_info.h> | |
55f327fa | 45 | #include <asm/irqflags.h> |
1da177e4 LT |
46 | #include <asm/errno.h> |
47 | #include <asm/segment.h> | |
48 | #include <asm/smp.h> | |
49 | #include <asm/page.h> | |
50 | #include <asm/desc.h> | |
be44d2aa | 51 | #include <asm/percpu.h> |
fe7cacc1 | 52 | #include <asm/dwarf2.h> |
ab68ed98 | 53 | #include <asm/processor-flags.h> |
395a59d0 | 54 | #include <asm/ftrace.h> |
9b7dc567 | 55 | #include <asm/irq_vectors.h> |
1da177e4 | 56 | |
af0575bb RM |
57 | /* Avoid __ASSEMBLER__'ifying <linux/audit.h> just for this. */ |
58 | #include <linux/elf-em.h> | |
59 | #define AUDIT_ARCH_I386 (EM_386|__AUDIT_ARCH_LE) | |
60 | #define __AUDIT_ARCH_LE 0x40000000 | |
61 | ||
62 | #ifndef CONFIG_AUDITSYSCALL | |
63 | #define sysenter_audit syscall_trace_entry | |
64 | #define sysexit_audit syscall_exit_work | |
65 | #endif | |
66 | ||
139ec7c4 RR |
67 | /* |
68 | * We use macros for low-level operations which need to be overridden | |
69 | * for paravirtualization. The following will never clobber any registers: | |
70 | * INTERRUPT_RETURN (aka. "iret") | |
71 | * GET_CR0_INTO_EAX (aka. "movl %cr0, %eax") | |
d75cd22f | 72 | * ENABLE_INTERRUPTS_SYSEXIT (aka "sti; sysexit"). |
139ec7c4 RR |
73 | * |
74 | * For DISABLE_INTERRUPTS/ENABLE_INTERRUPTS (aka "cli"/"sti"), you must | |
75 | * specify what registers can be overwritten (CLBR_NONE, CLBR_EAX/EDX/ECX/ANY). | |
76 | * Allowing a register to be clobbered can shrink the paravirt replacement | |
77 | * enough to patch inline, increasing performance. | |
78 | */ | |
79 | ||
1da177e4 LT |
80 | #define nr_syscalls ((syscall_table_size)/4) |
81 | ||
1da177e4 | 82 | #ifdef CONFIG_PREEMPT |
139ec7c4 | 83 | #define preempt_stop(clobbers) DISABLE_INTERRUPTS(clobbers); TRACE_IRQS_OFF |
1da177e4 | 84 | #else |
139ec7c4 | 85 | #define preempt_stop(clobbers) |
1da177e4 LT |
86 | #define resume_kernel restore_nocheck |
87 | #endif | |
88 | ||
55f327fa IM |
89 | .macro TRACE_IRQS_IRET |
90 | #ifdef CONFIG_TRACE_IRQFLAGS | |
ab68ed98 | 91 | testl $X86_EFLAGS_IF,PT_EFLAGS(%esp) # interrupts off? |
55f327fa IM |
92 | jz 1f |
93 | TRACE_IRQS_ON | |
94 | 1: | |
95 | #endif | |
96 | .endm | |
97 | ||
4031ff38 AG |
98 | #ifdef CONFIG_VM86 |
99 | #define resume_userspace_sig check_userspace | |
100 | #else | |
101 | #define resume_userspace_sig resume_userspace | |
102 | #endif | |
103 | ||
f0d96110 TH |
104 | .macro SAVE_ALL |
105 | cld | |
106 | pushl %fs | |
107 | CFI_ADJUST_CFA_OFFSET 4 | |
108 | /*CFI_REL_OFFSET fs, 0;*/ | |
109 | pushl %es | |
110 | CFI_ADJUST_CFA_OFFSET 4 | |
111 | /*CFI_REL_OFFSET es, 0;*/ | |
112 | pushl %ds | |
113 | CFI_ADJUST_CFA_OFFSET 4 | |
114 | /*CFI_REL_OFFSET ds, 0;*/ | |
115 | pushl %eax | |
116 | CFI_ADJUST_CFA_OFFSET 4 | |
117 | CFI_REL_OFFSET eax, 0 | |
118 | pushl %ebp | |
119 | CFI_ADJUST_CFA_OFFSET 4 | |
120 | CFI_REL_OFFSET ebp, 0 | |
121 | pushl %edi | |
122 | CFI_ADJUST_CFA_OFFSET 4 | |
123 | CFI_REL_OFFSET edi, 0 | |
124 | pushl %esi | |
125 | CFI_ADJUST_CFA_OFFSET 4 | |
126 | CFI_REL_OFFSET esi, 0 | |
127 | pushl %edx | |
128 | CFI_ADJUST_CFA_OFFSET 4 | |
129 | CFI_REL_OFFSET edx, 0 | |
130 | pushl %ecx | |
131 | CFI_ADJUST_CFA_OFFSET 4 | |
132 | CFI_REL_OFFSET ecx, 0 | |
133 | pushl %ebx | |
134 | CFI_ADJUST_CFA_OFFSET 4 | |
135 | CFI_REL_OFFSET ebx, 0 | |
136 | movl $(__USER_DS), %edx | |
137 | movl %edx, %ds | |
138 | movl %edx, %es | |
139 | movl $(__KERNEL_PERCPU), %edx | |
464d1a78 | 140 | movl %edx, %fs |
f0d96110 | 141 | .endm |
1da177e4 | 142 | |
f0d96110 TH |
143 | .macro RESTORE_INT_REGS |
144 | popl %ebx | |
145 | CFI_ADJUST_CFA_OFFSET -4 | |
146 | CFI_RESTORE ebx | |
147 | popl %ecx | |
148 | CFI_ADJUST_CFA_OFFSET -4 | |
149 | CFI_RESTORE ecx | |
150 | popl %edx | |
151 | CFI_ADJUST_CFA_OFFSET -4 | |
152 | CFI_RESTORE edx | |
153 | popl %esi | |
154 | CFI_ADJUST_CFA_OFFSET -4 | |
155 | CFI_RESTORE esi | |
156 | popl %edi | |
157 | CFI_ADJUST_CFA_OFFSET -4 | |
158 | CFI_RESTORE edi | |
159 | popl %ebp | |
160 | CFI_ADJUST_CFA_OFFSET -4 | |
161 | CFI_RESTORE ebp | |
162 | popl %eax | |
163 | CFI_ADJUST_CFA_OFFSET -4 | |
fe7cacc1 | 164 | CFI_RESTORE eax |
f0d96110 | 165 | .endm |
1da177e4 | 166 | |
f0d96110 TH |
167 | .macro RESTORE_REGS |
168 | RESTORE_INT_REGS | |
169 | 1: popl %ds | |
170 | CFI_ADJUST_CFA_OFFSET -4 | |
171 | /*CFI_RESTORE ds;*/ | |
172 | 2: popl %es | |
173 | CFI_ADJUST_CFA_OFFSET -4 | |
174 | /*CFI_RESTORE es;*/ | |
175 | 3: popl %fs | |
176 | CFI_ADJUST_CFA_OFFSET -4 | |
177 | /*CFI_RESTORE fs;*/ | |
178 | .pushsection .fixup, "ax" | |
179 | 4: movl $0, (%esp) | |
180 | jmp 1b | |
181 | 5: movl $0, (%esp) | |
182 | jmp 2b | |
183 | 6: movl $0, (%esp) | |
184 | jmp 3b | |
185 | .section __ex_table, "a" | |
186 | .align 4 | |
187 | .long 1b, 4b | |
188 | .long 2b, 5b | |
189 | .long 3b, 6b | |
f95d47ca | 190 | .popsection |
f0d96110 | 191 | .endm |
1da177e4 | 192 | |
f0d96110 TH |
193 | .macro RING0_INT_FRAME |
194 | CFI_STARTPROC simple | |
195 | CFI_SIGNAL_FRAME | |
196 | CFI_DEF_CFA esp, 3*4 | |
197 | /*CFI_OFFSET cs, -2*4;*/ | |
fe7cacc1 | 198 | CFI_OFFSET eip, -3*4 |
f0d96110 | 199 | .endm |
fe7cacc1 | 200 | |
f0d96110 TH |
201 | .macro RING0_EC_FRAME |
202 | CFI_STARTPROC simple | |
203 | CFI_SIGNAL_FRAME | |
204 | CFI_DEF_CFA esp, 4*4 | |
205 | /*CFI_OFFSET cs, -2*4;*/ | |
fe7cacc1 | 206 | CFI_OFFSET eip, -3*4 |
f0d96110 | 207 | .endm |
fe7cacc1 | 208 | |
f0d96110 TH |
209 | .macro RING0_PTREGS_FRAME |
210 | CFI_STARTPROC simple | |
211 | CFI_SIGNAL_FRAME | |
212 | CFI_DEF_CFA esp, PT_OLDESP-PT_EBX | |
213 | /*CFI_OFFSET cs, PT_CS-PT_OLDESP;*/ | |
214 | CFI_OFFSET eip, PT_EIP-PT_OLDESP | |
215 | /*CFI_OFFSET es, PT_ES-PT_OLDESP;*/ | |
216 | /*CFI_OFFSET ds, PT_DS-PT_OLDESP;*/ | |
217 | CFI_OFFSET eax, PT_EAX-PT_OLDESP | |
218 | CFI_OFFSET ebp, PT_EBP-PT_OLDESP | |
219 | CFI_OFFSET edi, PT_EDI-PT_OLDESP | |
220 | CFI_OFFSET esi, PT_ESI-PT_OLDESP | |
221 | CFI_OFFSET edx, PT_EDX-PT_OLDESP | |
222 | CFI_OFFSET ecx, PT_ECX-PT_OLDESP | |
eb5b7b9d | 223 | CFI_OFFSET ebx, PT_EBX-PT_OLDESP |
f0d96110 | 224 | .endm |
1da177e4 LT |
225 | |
226 | ENTRY(ret_from_fork) | |
fe7cacc1 | 227 | CFI_STARTPROC |
1da177e4 | 228 | pushl %eax |
25d7dfda | 229 | CFI_ADJUST_CFA_OFFSET 4 |
1da177e4 LT |
230 | call schedule_tail |
231 | GET_THREAD_INFO(%ebp) | |
232 | popl %eax | |
fe7cacc1 | 233 | CFI_ADJUST_CFA_OFFSET -4 |
47a5c6fa LT |
234 | pushl $0x0202 # Reset kernel eflags |
235 | CFI_ADJUST_CFA_OFFSET 4 | |
236 | popfl | |
237 | CFI_ADJUST_CFA_OFFSET -4 | |
1da177e4 | 238 | jmp syscall_exit |
fe7cacc1 | 239 | CFI_ENDPROC |
47a55cd7 | 240 | END(ret_from_fork) |
1da177e4 LT |
241 | |
242 | /* | |
243 | * Return to user mode is not as complex as all this looks, | |
244 | * but we want the default path for a system call return to | |
245 | * go as quickly as possible which is why some of this is | |
246 | * less clear than it otherwise should be. | |
247 | */ | |
248 | ||
249 | # userspace resumption stub bypassing syscall exit tracing | |
250 | ALIGN | |
fe7cacc1 | 251 | RING0_PTREGS_FRAME |
1da177e4 | 252 | ret_from_exception: |
139ec7c4 | 253 | preempt_stop(CLBR_ANY) |
1da177e4 LT |
254 | ret_from_intr: |
255 | GET_THREAD_INFO(%ebp) | |
4031ff38 | 256 | check_userspace: |
eb5b7b9d JF |
257 | movl PT_EFLAGS(%esp), %eax # mix EFLAGS and CS |
258 | movb PT_CS(%esp), %al | |
ab68ed98 | 259 | andl $(X86_EFLAGS_VM | SEGMENT_RPL_MASK), %eax |
78be3706 RR |
260 | cmpl $USER_RPL, %eax |
261 | jb resume_kernel # not returning to v8086 or userspace | |
f95d47ca | 262 | |
1da177e4 | 263 | ENTRY(resume_userspace) |
c7e872e7 | 264 | LOCKDEP_SYS_EXIT |
139ec7c4 | 265 | DISABLE_INTERRUPTS(CLBR_ANY) # make sure we don't miss an interrupt |
1da177e4 LT |
266 | # setting need_resched or sigpending |
267 | # between sampling and the iret | |
e32e58a9 | 268 | TRACE_IRQS_OFF |
1da177e4 LT |
269 | movl TI_flags(%ebp), %ecx |
270 | andl $_TIF_WORK_MASK, %ecx # is there any work to be done on | |
271 | # int/exception return? | |
272 | jne work_pending | |
273 | jmp restore_all | |
47a55cd7 | 274 | END(ret_from_exception) |
1da177e4 LT |
275 | |
276 | #ifdef CONFIG_PREEMPT | |
277 | ENTRY(resume_kernel) | |
139ec7c4 | 278 | DISABLE_INTERRUPTS(CLBR_ANY) |
1da177e4 LT |
279 | cmpl $0,TI_preempt_count(%ebp) # non-zero preempt_count ? |
280 | jnz restore_nocheck | |
281 | need_resched: | |
282 | movl TI_flags(%ebp), %ecx # need_resched set ? | |
283 | testb $_TIF_NEED_RESCHED, %cl | |
284 | jz restore_all | |
ab68ed98 | 285 | testl $X86_EFLAGS_IF,PT_EFLAGS(%esp) # interrupts off (exception path) ? |
1da177e4 LT |
286 | jz restore_all |
287 | call preempt_schedule_irq | |
288 | jmp need_resched | |
47a55cd7 | 289 | END(resume_kernel) |
1da177e4 | 290 | #endif |
fe7cacc1 | 291 | CFI_ENDPROC |
1da177e4 LT |
292 | |
293 | /* SYSENTER_RETURN points to after the "sysenter" instruction in | |
294 | the vsyscall page. See vsyscall-sysentry.S, which defines the symbol. */ | |
295 | ||
296 | # sysenter call handler stub | |
0aa97fb2 | 297 | ENTRY(ia32_sysenter_target) |
fe7cacc1 | 298 | CFI_STARTPROC simple |
adf14236 | 299 | CFI_SIGNAL_FRAME |
fe7cacc1 JB |
300 | CFI_DEF_CFA esp, 0 |
301 | CFI_REGISTER esp, ebp | |
faca6227 | 302 | movl TSS_sysenter_sp0(%esp),%esp |
1da177e4 | 303 | sysenter_past_esp: |
55f327fa | 304 | /* |
d93c870b JF |
305 | * Interrupts are disabled here, but we can't trace it until |
306 | * enough kernel state to call TRACE_IRQS_OFF can be called - but | |
307 | * we immediately enable interrupts at that point anyway. | |
55f327fa | 308 | */ |
1da177e4 | 309 | pushl $(__USER_DS) |
fe7cacc1 JB |
310 | CFI_ADJUST_CFA_OFFSET 4 |
311 | /*CFI_REL_OFFSET ss, 0*/ | |
1da177e4 | 312 | pushl %ebp |
fe7cacc1 JB |
313 | CFI_ADJUST_CFA_OFFSET 4 |
314 | CFI_REL_OFFSET esp, 0 | |
1da177e4 | 315 | pushfl |
d93c870b | 316 | orl $X86_EFLAGS_IF, (%esp) |
fe7cacc1 | 317 | CFI_ADJUST_CFA_OFFSET 4 |
1da177e4 | 318 | pushl $(__USER_CS) |
fe7cacc1 JB |
319 | CFI_ADJUST_CFA_OFFSET 4 |
320 | /*CFI_REL_OFFSET cs, 0*/ | |
e6e5494c IM |
321 | /* |
322 | * Push current_thread_info()->sysenter_return to the stack. | |
323 | * A tiny bit of offset fixup is necessary - 4*4 means the 4 words | |
324 | * pushed above; +8 corresponds to copy_thread's esp0 setting. | |
325 | */ | |
326 | pushl (TI_sysenter_return-THREAD_SIZE+8+4*4)(%esp) | |
fe7cacc1 JB |
327 | CFI_ADJUST_CFA_OFFSET 4 |
328 | CFI_REL_OFFSET eip, 0 | |
1da177e4 | 329 | |
d93c870b JF |
330 | pushl %eax |
331 | CFI_ADJUST_CFA_OFFSET 4 | |
332 | SAVE_ALL | |
333 | ENABLE_INTERRUPTS(CLBR_NONE) | |
334 | ||
1da177e4 LT |
335 | /* |
336 | * Load the potential sixth argument from user stack. | |
337 | * Careful about security. | |
338 | */ | |
339 | cmpl $__PAGE_OFFSET-3,%ebp | |
340 | jae syscall_fault | |
341 | 1: movl (%ebp),%ebp | |
d93c870b | 342 | movl %ebp,PT_EBP(%esp) |
1da177e4 LT |
343 | .section __ex_table,"a" |
344 | .align 4 | |
345 | .long 1b,syscall_fault | |
346 | .previous | |
347 | ||
1da177e4 LT |
348 | GET_THREAD_INFO(%ebp) |
349 | ||
350 | /* Note, _TIF_SECCOMP is bit number 8, and so it needs testw and not testb */ | |
d4d67150 | 351 | testw $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%ebp) |
af0575bb RM |
352 | jnz sysenter_audit |
353 | sysenter_do_call: | |
1da177e4 LT |
354 | cmpl $(nr_syscalls), %eax |
355 | jae syscall_badsys | |
356 | call *sys_call_table(,%eax,4) | |
eb5b7b9d | 357 | movl %eax,PT_EAX(%esp) |
c7e872e7 | 358 | LOCKDEP_SYS_EXIT |
42c24fa2 | 359 | DISABLE_INTERRUPTS(CLBR_ANY) |
55f327fa | 360 | TRACE_IRQS_OFF |
1da177e4 LT |
361 | movl TI_flags(%ebp), %ecx |
362 | testw $_TIF_ALLWORK_MASK, %cx | |
af0575bb RM |
363 | jne sysexit_audit |
364 | sysenter_exit: | |
1da177e4 | 365 | /* if something modifies registers it must also disable sysexit */ |
eb5b7b9d JF |
366 | movl PT_EIP(%esp), %edx |
367 | movl PT_OLDESP(%esp), %ecx | |
1da177e4 | 368 | xorl %ebp,%ebp |
55f327fa | 369 | TRACE_IRQS_ON |
464d1a78 | 370 | 1: mov PT_FS(%esp), %fs |
d75cd22f | 371 | ENABLE_INTERRUPTS_SYSEXIT |
af0575bb RM |
372 | |
373 | #ifdef CONFIG_AUDITSYSCALL | |
374 | sysenter_audit: | |
375 | testw $(_TIF_WORK_SYSCALL_ENTRY & ~_TIF_SYSCALL_AUDIT),TI_flags(%ebp) | |
376 | jnz syscall_trace_entry | |
377 | addl $4,%esp | |
378 | CFI_ADJUST_CFA_OFFSET -4 | |
379 | /* %esi already in 8(%esp) 6th arg: 4th syscall arg */ | |
380 | /* %edx already in 4(%esp) 5th arg: 3rd syscall arg */ | |
381 | /* %ecx already in 0(%esp) 4th arg: 2nd syscall arg */ | |
382 | movl %ebx,%ecx /* 3rd arg: 1st syscall arg */ | |
383 | movl %eax,%edx /* 2nd arg: syscall number */ | |
384 | movl $AUDIT_ARCH_I386,%eax /* 1st arg: audit arch */ | |
385 | call audit_syscall_entry | |
386 | pushl %ebx | |
387 | CFI_ADJUST_CFA_OFFSET 4 | |
388 | movl PT_EAX(%esp),%eax /* reload syscall number */ | |
389 | jmp sysenter_do_call | |
390 | ||
391 | sysexit_audit: | |
392 | testw $(_TIF_ALLWORK_MASK & ~_TIF_SYSCALL_AUDIT), %cx | |
393 | jne syscall_exit_work | |
394 | TRACE_IRQS_ON | |
395 | ENABLE_INTERRUPTS(CLBR_ANY) | |
396 | movl %eax,%edx /* second arg, syscall return value */ | |
397 | cmpl $0,%eax /* is it < 0? */ | |
398 | setl %al /* 1 if so, 0 if not */ | |
399 | movzbl %al,%eax /* zero-extend that */ | |
400 | inc %eax /* first arg, 0->1(AUDITSC_SUCCESS), 1->2(AUDITSC_FAILURE) */ | |
401 | call audit_syscall_exit | |
402 | DISABLE_INTERRUPTS(CLBR_ANY) | |
403 | TRACE_IRQS_OFF | |
404 | movl TI_flags(%ebp), %ecx | |
405 | testw $(_TIF_ALLWORK_MASK & ~_TIF_SYSCALL_AUDIT), %cx | |
406 | jne syscall_exit_work | |
407 | movl PT_EAX(%esp),%eax /* reload syscall return value */ | |
408 | jmp sysenter_exit | |
409 | #endif | |
410 | ||
fe7cacc1 | 411 | CFI_ENDPROC |
f95d47ca | 412 | .pushsection .fixup,"ax" |
464d1a78 | 413 | 2: movl $0,PT_FS(%esp) |
f95d47ca JF |
414 | jmp 1b |
415 | .section __ex_table,"a" | |
416 | .align 4 | |
417 | .long 1b,2b | |
418 | .popsection | |
0aa97fb2 | 419 | ENDPROC(ia32_sysenter_target) |
1da177e4 LT |
420 | |
421 | # system call handler stub | |
422 | ENTRY(system_call) | |
fe7cacc1 | 423 | RING0_INT_FRAME # can't unwind into user space anyway |
1da177e4 | 424 | pushl %eax # save orig_eax |
fe7cacc1 | 425 | CFI_ADJUST_CFA_OFFSET 4 |
1da177e4 LT |
426 | SAVE_ALL |
427 | GET_THREAD_INFO(%ebp) | |
ed75e8d5 | 428 | # system call tracing in operation / emulation |
1da177e4 | 429 | /* Note, _TIF_SECCOMP is bit number 8, and so it needs testw and not testb */ |
d4d67150 | 430 | testw $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%ebp) |
1da177e4 LT |
431 | jnz syscall_trace_entry |
432 | cmpl $(nr_syscalls), %eax | |
433 | jae syscall_badsys | |
434 | syscall_call: | |
435 | call *sys_call_table(,%eax,4) | |
eb5b7b9d | 436 | movl %eax,PT_EAX(%esp) # store the return value |
1da177e4 | 437 | syscall_exit: |
c7e872e7 | 438 | LOCKDEP_SYS_EXIT |
139ec7c4 | 439 | DISABLE_INTERRUPTS(CLBR_ANY) # make sure we don't miss an interrupt |
1da177e4 LT |
440 | # setting need_resched or sigpending |
441 | # between sampling and the iret | |
55f327fa | 442 | TRACE_IRQS_OFF |
1da177e4 LT |
443 | movl TI_flags(%ebp), %ecx |
444 | testw $_TIF_ALLWORK_MASK, %cx # current->work | |
445 | jne syscall_exit_work | |
446 | ||
447 | restore_all: | |
eb5b7b9d JF |
448 | movl PT_EFLAGS(%esp), %eax # mix EFLAGS, SS and CS |
449 | # Warning: PT_OLDSS(%esp) contains the wrong/random values if we | |
5df24082 SS |
450 | # are returning to the kernel. |
451 | # See comments in process.c:copy_thread() for details. | |
eb5b7b9d JF |
452 | movb PT_OLDSS(%esp), %ah |
453 | movb PT_CS(%esp), %al | |
ab68ed98 | 454 | andl $(X86_EFLAGS_VM | (SEGMENT_TI_MASK << 8) | SEGMENT_RPL_MASK), %eax |
78be3706 | 455 | cmpl $((SEGMENT_LDT << 8) | USER_RPL), %eax |
fe7cacc1 | 456 | CFI_REMEMBER_STATE |
1da177e4 LT |
457 | je ldt_ss # returning to user-space with LDT SS |
458 | restore_nocheck: | |
55f327fa IM |
459 | TRACE_IRQS_IRET |
460 | restore_nocheck_notrace: | |
1da177e4 | 461 | RESTORE_REGS |
f95d47ca | 462 | addl $4, %esp # skip orig_eax/error_code |
fe7cacc1 | 463 | CFI_ADJUST_CFA_OFFSET -4 |
f7f3d791 | 464 | irq_return: |
3701d863 | 465 | INTERRUPT_RETURN |
1da177e4 | 466 | .section .fixup,"ax" |
90e9f536 | 467 | ENTRY(iret_exc) |
a879cbbb LT |
468 | pushl $0 # no error code |
469 | pushl $do_iret_error | |
470 | jmp error_code | |
1da177e4 LT |
471 | .previous |
472 | .section __ex_table,"a" | |
473 | .align 4 | |
3701d863 | 474 | .long irq_return,iret_exc |
1da177e4 LT |
475 | .previous |
476 | ||
fe7cacc1 | 477 | CFI_RESTORE_STATE |
1da177e4 | 478 | ldt_ss: |
eb5b7b9d | 479 | larl PT_OLDSS(%esp), %eax |
1da177e4 LT |
480 | jnz restore_nocheck |
481 | testl $0x00400000, %eax # returning to 32bit stack? | |
482 | jnz restore_nocheck # allright, normal return | |
d3561b7f RR |
483 | |
484 | #ifdef CONFIG_PARAVIRT | |
485 | /* | |
486 | * The kernel can't run on a non-flat stack if paravirt mode | |
487 | * is active. Rather than try to fixup the high bits of | |
488 | * ESP, bypass this code entirely. This may break DOSemu | |
489 | * and/or Wine support in a paravirt VM, although the option | |
490 | * is still available to implement the setting of the high | |
491 | * 16-bits in the INTERRUPT_RETURN paravirt-op. | |
492 | */ | |
93b1eab3 | 493 | cmpl $0, pv_info+PARAVIRT_enabled |
d3561b7f RR |
494 | jne restore_nocheck |
495 | #endif | |
496 | ||
1da177e4 LT |
497 | /* If returning to userspace with 16bit stack, |
498 | * try to fix the higher word of ESP, as the CPU | |
499 | * won't restore it. | |
500 | * This is an "official" bug of all the x86-compatible | |
501 | * CPUs, which we can try to work around to make | |
502 | * dosemu and wine happy. */ | |
eb5b7b9d | 503 | movl PT_OLDESP(%esp), %eax |
be44d2aa SS |
504 | movl %esp, %edx |
505 | call patch_espfix_desc | |
506 | pushl $__ESPFIX_SS | |
507 | CFI_ADJUST_CFA_OFFSET 4 | |
508 | pushl %eax | |
509 | CFI_ADJUST_CFA_OFFSET 4 | |
139ec7c4 | 510 | DISABLE_INTERRUPTS(CLBR_EAX) |
55f327fa | 511 | TRACE_IRQS_OFF |
be44d2aa SS |
512 | lss (%esp), %esp |
513 | CFI_ADJUST_CFA_OFFSET -8 | |
514 | jmp restore_nocheck | |
fe7cacc1 | 515 | CFI_ENDPROC |
47a55cd7 | 516 | ENDPROC(system_call) |
1da177e4 LT |
517 | |
518 | # perform work that needs to be done immediately before resumption | |
519 | ALIGN | |
fe7cacc1 | 520 | RING0_PTREGS_FRAME # can't unwind into user space anyway |
1da177e4 LT |
521 | work_pending: |
522 | testb $_TIF_NEED_RESCHED, %cl | |
523 | jz work_notifysig | |
524 | work_resched: | |
525 | call schedule | |
c7e872e7 | 526 | LOCKDEP_SYS_EXIT |
139ec7c4 | 527 | DISABLE_INTERRUPTS(CLBR_ANY) # make sure we don't miss an interrupt |
1da177e4 LT |
528 | # setting need_resched or sigpending |
529 | # between sampling and the iret | |
55f327fa | 530 | TRACE_IRQS_OFF |
1da177e4 LT |
531 | movl TI_flags(%ebp), %ecx |
532 | andl $_TIF_WORK_MASK, %ecx # is there any work to be done other | |
533 | # than syscall tracing? | |
534 | jz restore_all | |
535 | testb $_TIF_NEED_RESCHED, %cl | |
536 | jnz work_resched | |
537 | ||
538 | work_notifysig: # deal with pending signals and | |
539 | # notify-resume requests | |
74b47a78 | 540 | #ifdef CONFIG_VM86 |
ab68ed98 | 541 | testl $X86_EFLAGS_VM, PT_EFLAGS(%esp) |
1da177e4 LT |
542 | movl %esp, %eax |
543 | jne work_notifysig_v86 # returning to kernel-space or | |
544 | # vm86-space | |
545 | xorl %edx, %edx | |
546 | call do_notify_resume | |
4031ff38 | 547 | jmp resume_userspace_sig |
1da177e4 LT |
548 | |
549 | ALIGN | |
550 | work_notifysig_v86: | |
551 | pushl %ecx # save ti_flags for do_notify_resume | |
fe7cacc1 | 552 | CFI_ADJUST_CFA_OFFSET 4 |
1da177e4 LT |
553 | call save_v86_state # %eax contains pt_regs pointer |
554 | popl %ecx | |
fe7cacc1 | 555 | CFI_ADJUST_CFA_OFFSET -4 |
1da177e4 | 556 | movl %eax, %esp |
74b47a78 JK |
557 | #else |
558 | movl %esp, %eax | |
559 | #endif | |
1da177e4 LT |
560 | xorl %edx, %edx |
561 | call do_notify_resume | |
4031ff38 | 562 | jmp resume_userspace_sig |
47a55cd7 | 563 | END(work_pending) |
1da177e4 LT |
564 | |
565 | # perform syscall exit tracing | |
566 | ALIGN | |
567 | syscall_trace_entry: | |
eb5b7b9d | 568 | movl $-ENOSYS,PT_EAX(%esp) |
1da177e4 | 569 | movl %esp, %eax |
d4d67150 RM |
570 | call syscall_trace_enter |
571 | /* What it returned is what we'll actually use. */ | |
1da177e4 LT |
572 | cmpl $(nr_syscalls), %eax |
573 | jnae syscall_call | |
574 | jmp syscall_exit | |
47a55cd7 | 575 | END(syscall_trace_entry) |
1da177e4 LT |
576 | |
577 | # perform syscall exit tracing | |
578 | ALIGN | |
579 | syscall_exit_work: | |
d4d67150 | 580 | testb $_TIF_WORK_SYSCALL_EXIT, %cl |
1da177e4 | 581 | jz work_pending |
55f327fa | 582 | TRACE_IRQS_ON |
d4d67150 | 583 | ENABLE_INTERRUPTS(CLBR_ANY) # could let syscall_trace_leave() call |
1da177e4 LT |
584 | # schedule() instead |
585 | movl %esp, %eax | |
d4d67150 | 586 | call syscall_trace_leave |
1da177e4 | 587 | jmp resume_userspace |
47a55cd7 | 588 | END(syscall_exit_work) |
fe7cacc1 | 589 | CFI_ENDPROC |
1da177e4 | 590 | |
fe7cacc1 | 591 | RING0_INT_FRAME # can't unwind into user space anyway |
1da177e4 | 592 | syscall_fault: |
1da177e4 | 593 | GET_THREAD_INFO(%ebp) |
eb5b7b9d | 594 | movl $-EFAULT,PT_EAX(%esp) |
1da177e4 | 595 | jmp resume_userspace |
47a55cd7 | 596 | END(syscall_fault) |
1da177e4 | 597 | |
1da177e4 | 598 | syscall_badsys: |
eb5b7b9d | 599 | movl $-ENOSYS,PT_EAX(%esp) |
1da177e4 | 600 | jmp resume_userspace |
47a55cd7 | 601 | END(syscall_badsys) |
fe7cacc1 | 602 | CFI_ENDPROC |
1da177e4 | 603 | |
f0d96110 TH |
604 | .macro FIXUP_ESPFIX_STACK |
605 | /* since we are on a wrong stack, we cant make it a C code :( */ | |
606 | PER_CPU(gdt_page, %ebx) | |
607 | GET_DESC_BASE(GDT_ENTRY_ESPFIX_SS, %ebx, %eax, %ax, %al, %ah) | |
608 | addl %esp, %eax | |
609 | pushl $__KERNEL_DS | |
610 | CFI_ADJUST_CFA_OFFSET 4 | |
611 | pushl %eax | |
612 | CFI_ADJUST_CFA_OFFSET 4 | |
613 | lss (%esp), %esp | |
614 | CFI_ADJUST_CFA_OFFSET -8 | |
615 | .endm | |
616 | .macro UNWIND_ESPFIX_STACK | |
617 | movl %ss, %eax | |
618 | /* see if on espfix stack */ | |
619 | cmpw $__ESPFIX_SS, %ax | |
620 | jne 27f | |
621 | movl $__KERNEL_DS, %eax | |
622 | movl %eax, %ds | |
623 | movl %eax, %es | |
624 | /* switch to normal stack */ | |
625 | FIXUP_ESPFIX_STACK | |
626 | 27: | |
627 | .endm | |
1da177e4 LT |
628 | |
629 | /* | |
b7c6244f PA |
630 | * Build the entry stubs and pointer table with some assembler magic. |
631 | * We pack 7 stubs into a single 32-byte chunk, which will fit in a | |
632 | * single cache line on all modern x86 implementations. | |
1da177e4 | 633 | */ |
4687518c | 634 | .section .init.rodata,"a" |
1da177e4 LT |
635 | ENTRY(interrupt) |
636 | .text | |
b7c6244f PA |
637 | .p2align 5 |
638 | .p2align CONFIG_X86_L1_CACHE_SHIFT | |
1da177e4 | 639 | ENTRY(irq_entries_start) |
fe7cacc1 | 640 | RING0_INT_FRAME |
4687518c | 641 | vector=FIRST_EXTERNAL_VECTOR |
b7c6244f PA |
642 | .rept (NR_VECTORS-FIRST_EXTERNAL_VECTOR+6)/7 |
643 | .balign 32 | |
644 | .rept 7 | |
645 | .if vector < NR_VECTORS | |
8665596e | 646 | .if vector <> FIRST_EXTERNAL_VECTOR |
fe7cacc1 | 647 | CFI_ADJUST_CFA_OFFSET -4 |
b7c6244f PA |
648 | .endif |
649 | 1: pushl $(~vector+0x80) /* Note: always in signed byte range */ | |
fe7cacc1 | 650 | CFI_ADJUST_CFA_OFFSET 4 |
8665596e | 651 | .if ((vector-FIRST_EXTERNAL_VECTOR)%7) <> 6 |
b7c6244f PA |
652 | jmp 2f |
653 | .endif | |
654 | .previous | |
1da177e4 | 655 | .long 1b |
b7c6244f | 656 | .text |
1da177e4 | 657 | vector=vector+1 |
b7c6244f PA |
658 | .endif |
659 | .endr | |
660 | 2: jmp common_interrupt | |
1da177e4 | 661 | .endr |
47a55cd7 JB |
662 | END(irq_entries_start) |
663 | ||
664 | .previous | |
665 | END(interrupt) | |
666 | .previous | |
1da177e4 | 667 | |
55f327fa IM |
668 | /* |
669 | * the CPU automatically disables interrupts when executing an IRQ vector, | |
670 | * so IRQ-flags tracing has to follow that: | |
671 | */ | |
b7c6244f | 672 | .p2align CONFIG_X86_L1_CACHE_SHIFT |
1da177e4 | 673 | common_interrupt: |
b7c6244f | 674 | addl $-0x80,(%esp) /* Adjust vector into the [-256,-1] range */ |
1da177e4 | 675 | SAVE_ALL |
55f327fa | 676 | TRACE_IRQS_OFF |
1da177e4 LT |
677 | movl %esp,%eax |
678 | call do_IRQ | |
679 | jmp ret_from_intr | |
47a55cd7 | 680 | ENDPROC(common_interrupt) |
fe7cacc1 | 681 | CFI_ENDPROC |
1da177e4 | 682 | |
02cf94c3 | 683 | #define BUILD_INTERRUPT3(name, nr, fn) \ |
1da177e4 | 684 | ENTRY(name) \ |
fe7cacc1 | 685 | RING0_INT_FRAME; \ |
19eadf98 | 686 | pushl $~(nr); \ |
fe7cacc1 JB |
687 | CFI_ADJUST_CFA_OFFSET 4; \ |
688 | SAVE_ALL; \ | |
55f327fa | 689 | TRACE_IRQS_OFF \ |
1da177e4 | 690 | movl %esp,%eax; \ |
02cf94c3 | 691 | call fn; \ |
55f327fa | 692 | jmp ret_from_intr; \ |
47a55cd7 JB |
693 | CFI_ENDPROC; \ |
694 | ENDPROC(name) | |
1da177e4 | 695 | |
02cf94c3 TH |
696 | #define BUILD_INTERRUPT(name, nr) BUILD_INTERRUPT3(name, nr, smp_##name) |
697 | ||
1da177e4 LT |
698 | /* The include is where all of the SMP etc. interrupts come from */ |
699 | #include "entry_arch.h" | |
700 | ||
1da177e4 | 701 | ENTRY(coprocessor_error) |
fe7cacc1 | 702 | RING0_INT_FRAME |
1da177e4 | 703 | pushl $0 |
fe7cacc1 | 704 | CFI_ADJUST_CFA_OFFSET 4 |
1da177e4 | 705 | pushl $do_coprocessor_error |
fe7cacc1 | 706 | CFI_ADJUST_CFA_OFFSET 4 |
1da177e4 | 707 | jmp error_code |
fe7cacc1 | 708 | CFI_ENDPROC |
47a55cd7 | 709 | END(coprocessor_error) |
1da177e4 LT |
710 | |
711 | ENTRY(simd_coprocessor_error) | |
fe7cacc1 | 712 | RING0_INT_FRAME |
1da177e4 | 713 | pushl $0 |
fe7cacc1 | 714 | CFI_ADJUST_CFA_OFFSET 4 |
1da177e4 | 715 | pushl $do_simd_coprocessor_error |
fe7cacc1 | 716 | CFI_ADJUST_CFA_OFFSET 4 |
1da177e4 | 717 | jmp error_code |
fe7cacc1 | 718 | CFI_ENDPROC |
47a55cd7 | 719 | END(simd_coprocessor_error) |
1da177e4 LT |
720 | |
721 | ENTRY(device_not_available) | |
fe7cacc1 | 722 | RING0_INT_FRAME |
1da177e4 | 723 | pushl $-1 # mark this as an int |
fe7cacc1 | 724 | CFI_ADJUST_CFA_OFFSET 4 |
7643e9b9 | 725 | pushl $do_device_not_available |
fe7cacc1 | 726 | CFI_ADJUST_CFA_OFFSET 4 |
7643e9b9 | 727 | jmp error_code |
fe7cacc1 | 728 | CFI_ENDPROC |
47a55cd7 | 729 | END(device_not_available) |
1da177e4 | 730 | |
d3561b7f RR |
731 | #ifdef CONFIG_PARAVIRT |
732 | ENTRY(native_iret) | |
3701d863 | 733 | iret |
d3561b7f RR |
734 | .section __ex_table,"a" |
735 | .align 4 | |
3701d863 | 736 | .long native_iret, iret_exc |
d3561b7f | 737 | .previous |
47a55cd7 | 738 | END(native_iret) |
d3561b7f | 739 | |
d75cd22f | 740 | ENTRY(native_irq_enable_sysexit) |
d3561b7f RR |
741 | sti |
742 | sysexit | |
d75cd22f | 743 | END(native_irq_enable_sysexit) |
d3561b7f RR |
744 | #endif |
745 | ||
1da177e4 | 746 | ENTRY(overflow) |
fe7cacc1 | 747 | RING0_INT_FRAME |
1da177e4 | 748 | pushl $0 |
fe7cacc1 | 749 | CFI_ADJUST_CFA_OFFSET 4 |
1da177e4 | 750 | pushl $do_overflow |
fe7cacc1 | 751 | CFI_ADJUST_CFA_OFFSET 4 |
1da177e4 | 752 | jmp error_code |
fe7cacc1 | 753 | CFI_ENDPROC |
47a55cd7 | 754 | END(overflow) |
1da177e4 LT |
755 | |
756 | ENTRY(bounds) | |
fe7cacc1 | 757 | RING0_INT_FRAME |
1da177e4 | 758 | pushl $0 |
fe7cacc1 | 759 | CFI_ADJUST_CFA_OFFSET 4 |
1da177e4 | 760 | pushl $do_bounds |
fe7cacc1 | 761 | CFI_ADJUST_CFA_OFFSET 4 |
1da177e4 | 762 | jmp error_code |
fe7cacc1 | 763 | CFI_ENDPROC |
47a55cd7 | 764 | END(bounds) |
1da177e4 LT |
765 | |
766 | ENTRY(invalid_op) | |
fe7cacc1 | 767 | RING0_INT_FRAME |
1da177e4 | 768 | pushl $0 |
fe7cacc1 | 769 | CFI_ADJUST_CFA_OFFSET 4 |
1da177e4 | 770 | pushl $do_invalid_op |
fe7cacc1 | 771 | CFI_ADJUST_CFA_OFFSET 4 |
1da177e4 | 772 | jmp error_code |
fe7cacc1 | 773 | CFI_ENDPROC |
47a55cd7 | 774 | END(invalid_op) |
1da177e4 LT |
775 | |
776 | ENTRY(coprocessor_segment_overrun) | |
fe7cacc1 | 777 | RING0_INT_FRAME |
1da177e4 | 778 | pushl $0 |
fe7cacc1 | 779 | CFI_ADJUST_CFA_OFFSET 4 |
1da177e4 | 780 | pushl $do_coprocessor_segment_overrun |
fe7cacc1 | 781 | CFI_ADJUST_CFA_OFFSET 4 |
1da177e4 | 782 | jmp error_code |
fe7cacc1 | 783 | CFI_ENDPROC |
47a55cd7 | 784 | END(coprocessor_segment_overrun) |
1da177e4 LT |
785 | |
786 | ENTRY(invalid_TSS) | |
fe7cacc1 | 787 | RING0_EC_FRAME |
1da177e4 | 788 | pushl $do_invalid_TSS |
fe7cacc1 | 789 | CFI_ADJUST_CFA_OFFSET 4 |
1da177e4 | 790 | jmp error_code |
fe7cacc1 | 791 | CFI_ENDPROC |
47a55cd7 | 792 | END(invalid_TSS) |
1da177e4 LT |
793 | |
794 | ENTRY(segment_not_present) | |
fe7cacc1 | 795 | RING0_EC_FRAME |
1da177e4 | 796 | pushl $do_segment_not_present |
fe7cacc1 | 797 | CFI_ADJUST_CFA_OFFSET 4 |
1da177e4 | 798 | jmp error_code |
fe7cacc1 | 799 | CFI_ENDPROC |
47a55cd7 | 800 | END(segment_not_present) |
1da177e4 LT |
801 | |
802 | ENTRY(stack_segment) | |
fe7cacc1 | 803 | RING0_EC_FRAME |
1da177e4 | 804 | pushl $do_stack_segment |
fe7cacc1 | 805 | CFI_ADJUST_CFA_OFFSET 4 |
1da177e4 | 806 | jmp error_code |
fe7cacc1 | 807 | CFI_ENDPROC |
47a55cd7 | 808 | END(stack_segment) |
1da177e4 | 809 | |
1da177e4 | 810 | ENTRY(alignment_check) |
fe7cacc1 | 811 | RING0_EC_FRAME |
1da177e4 | 812 | pushl $do_alignment_check |
fe7cacc1 | 813 | CFI_ADJUST_CFA_OFFSET 4 |
1da177e4 | 814 | jmp error_code |
fe7cacc1 | 815 | CFI_ENDPROC |
47a55cd7 | 816 | END(alignment_check) |
1da177e4 | 817 | |
d28c4393 P |
818 | ENTRY(divide_error) |
819 | RING0_INT_FRAME | |
820 | pushl $0 # no error code | |
821 | CFI_ADJUST_CFA_OFFSET 4 | |
822 | pushl $do_divide_error | |
fe7cacc1 | 823 | CFI_ADJUST_CFA_OFFSET 4 |
1da177e4 | 824 | jmp error_code |
fe7cacc1 | 825 | CFI_ENDPROC |
47a55cd7 | 826 | END(divide_error) |
1da177e4 LT |
827 | |
828 | #ifdef CONFIG_X86_MCE | |
829 | ENTRY(machine_check) | |
fe7cacc1 | 830 | RING0_INT_FRAME |
1da177e4 | 831 | pushl $0 |
fe7cacc1 | 832 | CFI_ADJUST_CFA_OFFSET 4 |
d2f6f7ae | 833 | pushl machine_check_vector |
fe7cacc1 | 834 | CFI_ADJUST_CFA_OFFSET 4 |
1da177e4 | 835 | jmp error_code |
fe7cacc1 | 836 | CFI_ENDPROC |
47a55cd7 | 837 | END(machine_check) |
1da177e4 LT |
838 | #endif |
839 | ||
840 | ENTRY(spurious_interrupt_bug) | |
fe7cacc1 | 841 | RING0_INT_FRAME |
1da177e4 | 842 | pushl $0 |
fe7cacc1 | 843 | CFI_ADJUST_CFA_OFFSET 4 |
1da177e4 | 844 | pushl $do_spurious_interrupt_bug |
fe7cacc1 | 845 | CFI_ADJUST_CFA_OFFSET 4 |
1da177e4 | 846 | jmp error_code |
fe7cacc1 | 847 | CFI_ENDPROC |
47a55cd7 | 848 | END(spurious_interrupt_bug) |
1da177e4 | 849 | |
02ba1a32 AK |
850 | ENTRY(kernel_thread_helper) |
851 | pushl $0 # fake return address for unwinder | |
852 | CFI_STARTPROC | |
853 | movl %edx,%eax | |
854 | push %edx | |
855 | CFI_ADJUST_CFA_OFFSET 4 | |
856 | call *%ebx | |
857 | push %eax | |
858 | CFI_ADJUST_CFA_OFFSET 4 | |
859 | call do_exit | |
5f5db591 | 860 | ud2 # padding for call trace |
02ba1a32 AK |
861 | CFI_ENDPROC |
862 | ENDPROC(kernel_thread_helper) | |
863 | ||
5ead97c8 | 864 | #ifdef CONFIG_XEN |
e2a81baf JF |
865 | /* Xen doesn't set %esp to be precisely what the normal sysenter |
866 | entrypoint expects, so fix it up before using the normal path. */ | |
867 | ENTRY(xen_sysenter_target) | |
868 | RING0_INT_FRAME | |
869 | addl $5*4, %esp /* remove xen-provided frame */ | |
2ddf9b7b | 870 | CFI_ADJUST_CFA_OFFSET -5*4 |
e2a81baf | 871 | jmp sysenter_past_esp |
557d7d4e | 872 | CFI_ENDPROC |
e2a81baf | 873 | |
5ead97c8 JF |
874 | ENTRY(xen_hypervisor_callback) |
875 | CFI_STARTPROC | |
876 | pushl $0 | |
877 | CFI_ADJUST_CFA_OFFSET 4 | |
878 | SAVE_ALL | |
879 | TRACE_IRQS_OFF | |
9ec2b804 JF |
880 | |
881 | /* Check to see if we got the event in the critical | |
882 | region in xen_iret_direct, after we've reenabled | |
883 | events and checked for pending events. This simulates | |
884 | iret instruction's behaviour where it delivers a | |
885 | pending interrupt when enabling interrupts. */ | |
886 | movl PT_EIP(%esp),%eax | |
887 | cmpl $xen_iret_start_crit,%eax | |
888 | jb 1f | |
889 | cmpl $xen_iret_end_crit,%eax | |
890 | jae 1f | |
891 | ||
0f2c8769 | 892 | jmp xen_iret_crit_fixup |
e2a81baf | 893 | |
e2a81baf | 894 | ENTRY(xen_do_upcall) |
b77797fb | 895 | 1: mov %esp, %eax |
5ead97c8 JF |
896 | call xen_evtchn_do_upcall |
897 | jmp ret_from_intr | |
898 | CFI_ENDPROC | |
899 | ENDPROC(xen_hypervisor_callback) | |
900 | ||
901 | # Hypervisor uses this for application faults while it executes. | |
902 | # We get here for two reasons: | |
903 | # 1. Fault while reloading DS, ES, FS or GS | |
904 | # 2. Fault while executing IRET | |
905 | # Category 1 we fix up by reattempting the load, and zeroing the segment | |
906 | # register if the load fails. | |
907 | # Category 2 we fix up by jumping to do_iret_error. We cannot use the | |
908 | # normal Linux return path in this case because if we use the IRET hypercall | |
909 | # to pop the stack frame we end up in an infinite loop of failsafe callbacks. | |
910 | # We distinguish between categories by maintaining a status value in EAX. | |
911 | ENTRY(xen_failsafe_callback) | |
912 | CFI_STARTPROC | |
913 | pushl %eax | |
914 | CFI_ADJUST_CFA_OFFSET 4 | |
915 | movl $1,%eax | |
916 | 1: mov 4(%esp),%ds | |
917 | 2: mov 8(%esp),%es | |
918 | 3: mov 12(%esp),%fs | |
919 | 4: mov 16(%esp),%gs | |
920 | testl %eax,%eax | |
921 | popl %eax | |
922 | CFI_ADJUST_CFA_OFFSET -4 | |
923 | lea 16(%esp),%esp | |
924 | CFI_ADJUST_CFA_OFFSET -16 | |
925 | jz 5f | |
926 | addl $16,%esp | |
927 | jmp iret_exc # EAX != 0 => Category 2 (Bad IRET) | |
928 | 5: pushl $0 # EAX == 0 => Category 1 (Bad segment) | |
929 | CFI_ADJUST_CFA_OFFSET 4 | |
930 | SAVE_ALL | |
931 | jmp ret_from_exception | |
932 | CFI_ENDPROC | |
933 | ||
934 | .section .fixup,"ax" | |
935 | 6: xorl %eax,%eax | |
936 | movl %eax,4(%esp) | |
937 | jmp 1b | |
938 | 7: xorl %eax,%eax | |
939 | movl %eax,8(%esp) | |
940 | jmp 2b | |
941 | 8: xorl %eax,%eax | |
942 | movl %eax,12(%esp) | |
943 | jmp 3b | |
944 | 9: xorl %eax,%eax | |
945 | movl %eax,16(%esp) | |
946 | jmp 4b | |
947 | .previous | |
948 | .section __ex_table,"a" | |
949 | .align 4 | |
950 | .long 1b,6b | |
951 | .long 2b,7b | |
952 | .long 3b,8b | |
953 | .long 4b,9b | |
954 | .previous | |
955 | ENDPROC(xen_failsafe_callback) | |
956 | ||
957 | #endif /* CONFIG_XEN */ | |
958 | ||
606576ce | 959 | #ifdef CONFIG_FUNCTION_TRACER |
d61f82d0 SR |
960 | #ifdef CONFIG_DYNAMIC_FTRACE |
961 | ||
962 | ENTRY(mcount) | |
d61f82d0 SR |
963 | ret |
964 | END(mcount) | |
965 | ||
966 | ENTRY(ftrace_caller) | |
60a7ecf4 SR |
967 | cmpl $0, function_trace_stop |
968 | jne ftrace_stub | |
969 | ||
d61f82d0 SR |
970 | pushl %eax |
971 | pushl %ecx | |
972 | pushl %edx | |
973 | movl 0xc(%esp), %eax | |
974 | movl 0x4(%ebp), %edx | |
395a59d0 | 975 | subl $MCOUNT_INSN_SIZE, %eax |
d61f82d0 SR |
976 | |
977 | .globl ftrace_call | |
978 | ftrace_call: | |
979 | call ftrace_stub | |
980 | ||
981 | popl %edx | |
982 | popl %ecx | |
983 | popl %eax | |
5a45cfe1 SR |
984 | #ifdef CONFIG_FUNCTION_GRAPH_TRACER |
985 | .globl ftrace_graph_call | |
986 | ftrace_graph_call: | |
987 | jmp ftrace_stub | |
988 | #endif | |
d61f82d0 SR |
989 | |
990 | .globl ftrace_stub | |
991 | ftrace_stub: | |
992 | ret | |
993 | END(ftrace_caller) | |
994 | ||
995 | #else /* ! CONFIG_DYNAMIC_FTRACE */ | |
996 | ||
16444a8a | 997 | ENTRY(mcount) |
60a7ecf4 SR |
998 | cmpl $0, function_trace_stop |
999 | jne ftrace_stub | |
1000 | ||
16444a8a ACM |
1001 | cmpl $ftrace_stub, ftrace_trace_function |
1002 | jnz trace | |
fb52607a | 1003 | #ifdef CONFIG_FUNCTION_GRAPH_TRACER |
c2324b69 | 1004 | cmpl $ftrace_stub, ftrace_graph_return |
fb52607a | 1005 | jnz ftrace_graph_caller |
e49dc19c SR |
1006 | |
1007 | cmpl $ftrace_graph_entry_stub, ftrace_graph_entry | |
1008 | jnz ftrace_graph_caller | |
caf4b323 | 1009 | #endif |
16444a8a ACM |
1010 | .globl ftrace_stub |
1011 | ftrace_stub: | |
1012 | ret | |
1013 | ||
1014 | /* taken from glibc */ | |
1015 | trace: | |
1016 | pushl %eax | |
1017 | pushl %ecx | |
1018 | pushl %edx | |
1019 | movl 0xc(%esp), %eax | |
1020 | movl 0x4(%ebp), %edx | |
395a59d0 | 1021 | subl $MCOUNT_INSN_SIZE, %eax |
16444a8a | 1022 | |
d61f82d0 | 1023 | call *ftrace_trace_function |
16444a8a ACM |
1024 | |
1025 | popl %edx | |
1026 | popl %ecx | |
1027 | popl %eax | |
16444a8a ACM |
1028 | jmp ftrace_stub |
1029 | END(mcount) | |
d61f82d0 | 1030 | #endif /* CONFIG_DYNAMIC_FTRACE */ |
606576ce | 1031 | #endif /* CONFIG_FUNCTION_TRACER */ |
16444a8a | 1032 | |
fb52607a FW |
1033 | #ifdef CONFIG_FUNCTION_GRAPH_TRACER |
1034 | ENTRY(ftrace_graph_caller) | |
e7d3737e FW |
1035 | cmpl $0, function_trace_stop |
1036 | jne ftrace_stub | |
1037 | ||
caf4b323 FW |
1038 | pushl %eax |
1039 | pushl %ecx | |
1040 | pushl %edx | |
1dc1c6ad | 1041 | movl 0xc(%esp), %edx |
caf4b323 | 1042 | lea 0x4(%ebp), %eax |
bb4304c7 | 1043 | subl $MCOUNT_INSN_SIZE, %edx |
caf4b323 | 1044 | call prepare_ftrace_return |
caf4b323 FW |
1045 | popl %edx |
1046 | popl %ecx | |
1047 | popl %eax | |
e7d3737e | 1048 | ret |
fb52607a | 1049 | END(ftrace_graph_caller) |
caf4b323 FW |
1050 | |
1051 | .globl return_to_handler | |
1052 | return_to_handler: | |
1053 | pushl $0 | |
1054 | pushl %eax | |
1055 | pushl %ecx | |
1056 | pushl %edx | |
1057 | call ftrace_return_to_handler | |
1058 | movl %eax, 0xc(%esp) | |
1059 | popl %edx | |
1060 | popl %ecx | |
1061 | popl %eax | |
1062 | ret | |
e7d3737e | 1063 | #endif |
16444a8a | 1064 | |
bb152f53 | 1065 | .section .rodata,"a" |
541054d9 | 1066 | #include "syscall_table_32.S" |
1da177e4 LT |
1067 | |
1068 | syscall_table_size=(.-sys_call_table) | |
d211af05 AH |
1069 | |
1070 | /* | |
1071 | * Some functions should be protected against kprobes | |
1072 | */ | |
1073 | .pushsection .kprobes.text, "ax" | |
1074 | ||
1075 | ENTRY(page_fault) | |
1076 | RING0_EC_FRAME | |
1077 | pushl $do_page_fault | |
1078 | CFI_ADJUST_CFA_OFFSET 4 | |
1079 | ALIGN | |
1080 | error_code: | |
1081 | /* the function address is in %fs's slot on the stack */ | |
1082 | pushl %es | |
1083 | CFI_ADJUST_CFA_OFFSET 4 | |
1084 | /*CFI_REL_OFFSET es, 0*/ | |
1085 | pushl %ds | |
1086 | CFI_ADJUST_CFA_OFFSET 4 | |
1087 | /*CFI_REL_OFFSET ds, 0*/ | |
1088 | pushl %eax | |
1089 | CFI_ADJUST_CFA_OFFSET 4 | |
1090 | CFI_REL_OFFSET eax, 0 | |
1091 | pushl %ebp | |
1092 | CFI_ADJUST_CFA_OFFSET 4 | |
1093 | CFI_REL_OFFSET ebp, 0 | |
1094 | pushl %edi | |
1095 | CFI_ADJUST_CFA_OFFSET 4 | |
1096 | CFI_REL_OFFSET edi, 0 | |
1097 | pushl %esi | |
1098 | CFI_ADJUST_CFA_OFFSET 4 | |
1099 | CFI_REL_OFFSET esi, 0 | |
1100 | pushl %edx | |
1101 | CFI_ADJUST_CFA_OFFSET 4 | |
1102 | CFI_REL_OFFSET edx, 0 | |
1103 | pushl %ecx | |
1104 | CFI_ADJUST_CFA_OFFSET 4 | |
1105 | CFI_REL_OFFSET ecx, 0 | |
1106 | pushl %ebx | |
1107 | CFI_ADJUST_CFA_OFFSET 4 | |
1108 | CFI_REL_OFFSET ebx, 0 | |
1109 | cld | |
1110 | pushl %fs | |
1111 | CFI_ADJUST_CFA_OFFSET 4 | |
1112 | /*CFI_REL_OFFSET fs, 0*/ | |
1113 | movl $(__KERNEL_PERCPU), %ecx | |
1114 | movl %ecx, %fs | |
1115 | UNWIND_ESPFIX_STACK | |
1116 | popl %ecx | |
1117 | CFI_ADJUST_CFA_OFFSET -4 | |
1118 | /*CFI_REGISTER es, ecx*/ | |
1119 | movl PT_FS(%esp), %edi # get the function address | |
1120 | movl PT_ORIG_EAX(%esp), %edx # get the error code | |
1121 | movl $-1, PT_ORIG_EAX(%esp) # no syscall to restart | |
1122 | mov %ecx, PT_FS(%esp) | |
1123 | /*CFI_REL_OFFSET fs, ES*/ | |
1124 | movl $(__USER_DS), %ecx | |
1125 | movl %ecx, %ds | |
1126 | movl %ecx, %es | |
1127 | TRACE_IRQS_OFF | |
1128 | movl %esp,%eax # pt_regs pointer | |
1129 | call *%edi | |
1130 | jmp ret_from_exception | |
1131 | CFI_ENDPROC | |
1132 | END(page_fault) | |
1133 | ||
1134 | /* | |
1135 | * Debug traps and NMI can happen at the one SYSENTER instruction | |
1136 | * that sets up the real kernel stack. Check here, since we can't | |
1137 | * allow the wrong stack to be used. | |
1138 | * | |
1139 | * "TSS_sysenter_sp0+12" is because the NMI/debug handler will have | |
1140 | * already pushed 3 words if it hits on the sysenter instruction: | |
1141 | * eflags, cs and eip. | |
1142 | * | |
1143 | * We just load the right stack, and push the three (known) values | |
1144 | * by hand onto the new stack - while updating the return eip past | |
1145 | * the instruction that would have done it for sysenter. | |
1146 | */ | |
f0d96110 TH |
1147 | .macro FIX_STACK offset ok label |
1148 | cmpw $__KERNEL_CS, 4(%esp) | |
1149 | jne \ok | |
1150 | \label: | |
1151 | movl TSS_sysenter_sp0 + \offset(%esp), %esp | |
1152 | CFI_DEF_CFA esp, 0 | |
1153 | CFI_UNDEFINED eip | |
1154 | pushfl | |
1155 | CFI_ADJUST_CFA_OFFSET 4 | |
1156 | pushl $__KERNEL_CS | |
1157 | CFI_ADJUST_CFA_OFFSET 4 | |
1158 | pushl $sysenter_past_esp | |
1159 | CFI_ADJUST_CFA_OFFSET 4 | |
d211af05 | 1160 | CFI_REL_OFFSET eip, 0 |
f0d96110 | 1161 | .endm |
d211af05 AH |
1162 | |
1163 | ENTRY(debug) | |
1164 | RING0_INT_FRAME | |
1165 | cmpl $ia32_sysenter_target,(%esp) | |
1166 | jne debug_stack_correct | |
f0d96110 | 1167 | FIX_STACK 12, debug_stack_correct, debug_esp_fix_insn |
d211af05 AH |
1168 | debug_stack_correct: |
1169 | pushl $-1 # mark this as an int | |
1170 | CFI_ADJUST_CFA_OFFSET 4 | |
1171 | SAVE_ALL | |
1172 | TRACE_IRQS_OFF | |
1173 | xorl %edx,%edx # error code 0 | |
1174 | movl %esp,%eax # pt_regs pointer | |
1175 | call do_debug | |
1176 | jmp ret_from_exception | |
1177 | CFI_ENDPROC | |
1178 | END(debug) | |
1179 | ||
1180 | /* | |
1181 | * NMI is doubly nasty. It can happen _while_ we're handling | |
1182 | * a debug fault, and the debug fault hasn't yet been able to | |
1183 | * clear up the stack. So we first check whether we got an | |
1184 | * NMI on the sysenter entry path, but after that we need to | |
1185 | * check whether we got an NMI on the debug path where the debug | |
1186 | * fault happened on the sysenter path. | |
1187 | */ | |
1188 | ENTRY(nmi) | |
1189 | RING0_INT_FRAME | |
1190 | pushl %eax | |
1191 | CFI_ADJUST_CFA_OFFSET 4 | |
1192 | movl %ss, %eax | |
1193 | cmpw $__ESPFIX_SS, %ax | |
1194 | popl %eax | |
1195 | CFI_ADJUST_CFA_OFFSET -4 | |
1196 | je nmi_espfix_stack | |
1197 | cmpl $ia32_sysenter_target,(%esp) | |
1198 | je nmi_stack_fixup | |
1199 | pushl %eax | |
1200 | CFI_ADJUST_CFA_OFFSET 4 | |
1201 | movl %esp,%eax | |
1202 | /* Do not access memory above the end of our stack page, | |
1203 | * it might not exist. | |
1204 | */ | |
1205 | andl $(THREAD_SIZE-1),%eax | |
1206 | cmpl $(THREAD_SIZE-20),%eax | |
1207 | popl %eax | |
1208 | CFI_ADJUST_CFA_OFFSET -4 | |
1209 | jae nmi_stack_correct | |
1210 | cmpl $ia32_sysenter_target,12(%esp) | |
1211 | je nmi_debug_stack_check | |
1212 | nmi_stack_correct: | |
1213 | /* We have a RING0_INT_FRAME here */ | |
1214 | pushl %eax | |
1215 | CFI_ADJUST_CFA_OFFSET 4 | |
1216 | SAVE_ALL | |
d211af05 AH |
1217 | xorl %edx,%edx # zero error code |
1218 | movl %esp,%eax # pt_regs pointer | |
1219 | call do_nmi | |
1220 | jmp restore_nocheck_notrace | |
1221 | CFI_ENDPROC | |
1222 | ||
1223 | nmi_stack_fixup: | |
1224 | RING0_INT_FRAME | |
f0d96110 | 1225 | FIX_STACK 12, nmi_stack_correct, 1 |
d211af05 AH |
1226 | jmp nmi_stack_correct |
1227 | ||
1228 | nmi_debug_stack_check: | |
1229 | /* We have a RING0_INT_FRAME here */ | |
1230 | cmpw $__KERNEL_CS,16(%esp) | |
1231 | jne nmi_stack_correct | |
1232 | cmpl $debug,(%esp) | |
1233 | jb nmi_stack_correct | |
1234 | cmpl $debug_esp_fix_insn,(%esp) | |
1235 | ja nmi_stack_correct | |
f0d96110 | 1236 | FIX_STACK 24, nmi_stack_correct, 1 |
d211af05 AH |
1237 | jmp nmi_stack_correct |
1238 | ||
1239 | nmi_espfix_stack: | |
1240 | /* We have a RING0_INT_FRAME here. | |
1241 | * | |
1242 | * create the pointer to lss back | |
1243 | */ | |
1244 | pushl %ss | |
1245 | CFI_ADJUST_CFA_OFFSET 4 | |
1246 | pushl %esp | |
1247 | CFI_ADJUST_CFA_OFFSET 4 | |
1248 | addw $4, (%esp) | |
1249 | /* copy the iret frame of 12 bytes */ | |
1250 | .rept 3 | |
1251 | pushl 16(%esp) | |
1252 | CFI_ADJUST_CFA_OFFSET 4 | |
1253 | .endr | |
1254 | pushl %eax | |
1255 | CFI_ADJUST_CFA_OFFSET 4 | |
1256 | SAVE_ALL | |
d211af05 AH |
1257 | FIXUP_ESPFIX_STACK # %eax == %esp |
1258 | xorl %edx,%edx # zero error code | |
1259 | call do_nmi | |
1260 | RESTORE_REGS | |
1261 | lss 12+4(%esp), %esp # back to espfix stack | |
1262 | CFI_ADJUST_CFA_OFFSET -24 | |
1263 | jmp irq_return | |
1264 | CFI_ENDPROC | |
1265 | END(nmi) | |
1266 | ||
1267 | ENTRY(int3) | |
1268 | RING0_INT_FRAME | |
1269 | pushl $-1 # mark this as an int | |
1270 | CFI_ADJUST_CFA_OFFSET 4 | |
1271 | SAVE_ALL | |
1272 | TRACE_IRQS_OFF | |
1273 | xorl %edx,%edx # zero error code | |
1274 | movl %esp,%eax # pt_regs pointer | |
1275 | call do_int3 | |
1276 | jmp ret_from_exception | |
1277 | CFI_ENDPROC | |
1278 | END(int3) | |
1279 | ||
1280 | ENTRY(general_protection) | |
1281 | RING0_EC_FRAME | |
1282 | pushl $do_general_protection | |
1283 | CFI_ADJUST_CFA_OFFSET 4 | |
1284 | jmp error_code | |
1285 | CFI_ENDPROC | |
1286 | END(general_protection) | |
1287 | ||
1288 | /* | |
1289 | * End of kprobes section | |
1290 | */ | |
1291 | .popsection |