]>
Commit | Line | Data |
---|---|---|
1da177e4 | 1 | /* |
1da177e4 LT |
2 | * |
3 | * Copyright (C) 1991, 1992 Linus Torvalds | |
4 | */ | |
5 | ||
6 | /* | |
7 | * entry.S contains the system-call and fault low-level handling routines. | |
8 | * This also contains the timer-interrupt handler, as well as all interrupts | |
9 | * and faults that can result in a task-switch. | |
10 | * | |
11 | * NOTE: This code handles signal-recognition, which happens every time | |
12 | * after a timer-interrupt and after each system call. | |
13 | * | |
14 | * I changed all the .align's to 4 (16 byte alignment), as that's faster | |
15 | * on a 486. | |
16 | * | |
889f21ce | 17 | * Stack layout in 'syscall_exit': |
1da177e4 LT |
18 | * ptrace needs to have all regs on the stack. |
19 | * if the order here is changed, it needs to be | |
20 | * updated in fork.c:copy_process, signal.c:do_signal, | |
21 | * ptrace.c and ptrace.h | |
22 | * | |
23 | * 0(%esp) - %ebx | |
24 | * 4(%esp) - %ecx | |
25 | * 8(%esp) - %edx | |
26 | * C(%esp) - %esi | |
27 | * 10(%esp) - %edi | |
28 | * 14(%esp) - %ebp | |
29 | * 18(%esp) - %eax | |
30 | * 1C(%esp) - %ds | |
31 | * 20(%esp) - %es | |
464d1a78 | 32 | * 24(%esp) - %fs |
ccbeed3a TH |
33 | * 28(%esp) - %gs saved iff !CONFIG_X86_32_LAZY_GS |
34 | * 2C(%esp) - orig_eax | |
35 | * 30(%esp) - %eip | |
36 | * 34(%esp) - %cs | |
37 | * 38(%esp) - %eflags | |
38 | * 3C(%esp) - %oldesp | |
39 | * 40(%esp) - %oldss | |
1da177e4 LT |
40 | * |
41 | * "current" is in register %ebx during any slow entries. | |
42 | */ | |
43 | ||
1da177e4 LT |
44 | #include <linux/linkage.h> |
45 | #include <asm/thread_info.h> | |
55f327fa | 46 | #include <asm/irqflags.h> |
1da177e4 LT |
47 | #include <asm/errno.h> |
48 | #include <asm/segment.h> | |
49 | #include <asm/smp.h> | |
0341c14d | 50 | #include <asm/page_types.h> |
be44d2aa | 51 | #include <asm/percpu.h> |
fe7cacc1 | 52 | #include <asm/dwarf2.h> |
ab68ed98 | 53 | #include <asm/processor-flags.h> |
395a59d0 | 54 | #include <asm/ftrace.h> |
9b7dc567 | 55 | #include <asm/irq_vectors.h> |
40d2e763 | 56 | #include <asm/cpufeature.h> |
1da177e4 | 57 | |
af0575bb RM |
58 | /* Avoid __ASSEMBLER__'ifying <linux/audit.h> just for this. */ |
59 | #include <linux/elf-em.h> | |
60 | #define AUDIT_ARCH_I386 (EM_386|__AUDIT_ARCH_LE) | |
61 | #define __AUDIT_ARCH_LE 0x40000000 | |
62 | ||
63 | #ifndef CONFIG_AUDITSYSCALL | |
64 | #define sysenter_audit syscall_trace_entry | |
65 | #define sysexit_audit syscall_exit_work | |
66 | #endif | |
67 | ||
ea714547 JO |
68 | .section .entry.text, "ax" |
69 | ||
139ec7c4 RR |
70 | /* |
71 | * We use macros for low-level operations which need to be overridden | |
72 | * for paravirtualization. The following will never clobber any registers: | |
73 | * INTERRUPT_RETURN (aka. "iret") | |
74 | * GET_CR0_INTO_EAX (aka. "movl %cr0, %eax") | |
d75cd22f | 75 | * ENABLE_INTERRUPTS_SYSEXIT (aka "sti; sysexit"). |
139ec7c4 RR |
76 | * |
77 | * For DISABLE_INTERRUPTS/ENABLE_INTERRUPTS (aka "cli"/"sti"), you must | |
78 | * specify what registers can be overwritten (CLBR_NONE, CLBR_EAX/EDX/ECX/ANY). | |
79 | * Allowing a register to be clobbered can shrink the paravirt replacement | |
80 | * enough to patch inline, increasing performance. | |
81 | */ | |
82 | ||
1da177e4 LT |
83 | #define nr_syscalls ((syscall_table_size)/4) |
84 | ||
1da177e4 | 85 | #ifdef CONFIG_PREEMPT |
139ec7c4 | 86 | #define preempt_stop(clobbers) DISABLE_INTERRUPTS(clobbers); TRACE_IRQS_OFF |
1da177e4 | 87 | #else |
139ec7c4 | 88 | #define preempt_stop(clobbers) |
2e04bc76 | 89 | #define resume_kernel restore_all |
1da177e4 LT |
90 | #endif |
91 | ||
55f327fa IM |
92 | .macro TRACE_IRQS_IRET |
93 | #ifdef CONFIG_TRACE_IRQFLAGS | |
ab68ed98 | 94 | testl $X86_EFLAGS_IF,PT_EFLAGS(%esp) # interrupts off? |
55f327fa IM |
95 | jz 1f |
96 | TRACE_IRQS_ON | |
97 | 1: | |
98 | #endif | |
99 | .endm | |
100 | ||
4031ff38 AG |
101 | #ifdef CONFIG_VM86 |
102 | #define resume_userspace_sig check_userspace | |
103 | #else | |
104 | #define resume_userspace_sig resume_userspace | |
105 | #endif | |
106 | ||
ccbeed3a TH |
107 | /* |
108 | * User gs save/restore | |
109 | * | |
110 | * %gs is used for userland TLS and kernel only uses it for stack | |
111 | * canary which is required to be at %gs:20 by gcc. Read the comment | |
112 | * at the top of stackprotector.h for more info. | |
113 | * | |
114 | * Local labels 98 and 99 are used. | |
115 | */ | |
116 | #ifdef CONFIG_X86_32_LAZY_GS | |
117 | ||
118 | /* unfortunately push/pop can't be no-op */ | |
119 | .macro PUSH_GS | |
df5d1874 | 120 | pushl_cfi $0 |
ccbeed3a TH |
121 | .endm |
122 | .macro POP_GS pop=0 | |
123 | addl $(4 + \pop), %esp | |
124 | CFI_ADJUST_CFA_OFFSET -(4 + \pop) | |
125 | .endm | |
126 | .macro POP_GS_EX | |
127 | .endm | |
128 | ||
129 | /* all the rest are no-op */ | |
130 | .macro PTGS_TO_GS | |
131 | .endm | |
132 | .macro PTGS_TO_GS_EX | |
133 | .endm | |
134 | .macro GS_TO_REG reg | |
135 | .endm | |
136 | .macro REG_TO_PTGS reg | |
137 | .endm | |
138 | .macro SET_KERNEL_GS reg | |
139 | .endm | |
140 | ||
141 | #else /* CONFIG_X86_32_LAZY_GS */ | |
142 | ||
143 | .macro PUSH_GS | |
df5d1874 | 144 | pushl_cfi %gs |
ccbeed3a TH |
145 | /*CFI_REL_OFFSET gs, 0*/ |
146 | .endm | |
147 | ||
148 | .macro POP_GS pop=0 | |
df5d1874 | 149 | 98: popl_cfi %gs |
ccbeed3a TH |
150 | /*CFI_RESTORE gs*/ |
151 | .if \pop <> 0 | |
152 | add $\pop, %esp | |
153 | CFI_ADJUST_CFA_OFFSET -\pop | |
154 | .endif | |
155 | .endm | |
156 | .macro POP_GS_EX | |
157 | .pushsection .fixup, "ax" | |
158 | 99: movl $0, (%esp) | |
159 | jmp 98b | |
160 | .section __ex_table, "a" | |
161 | .align 4 | |
162 | .long 98b, 99b | |
163 | .popsection | |
164 | .endm | |
165 | ||
166 | .macro PTGS_TO_GS | |
167 | 98: mov PT_GS(%esp), %gs | |
168 | .endm | |
169 | .macro PTGS_TO_GS_EX | |
170 | .pushsection .fixup, "ax" | |
171 | 99: movl $0, PT_GS(%esp) | |
172 | jmp 98b | |
173 | .section __ex_table, "a" | |
174 | .align 4 | |
175 | .long 98b, 99b | |
176 | .popsection | |
177 | .endm | |
178 | ||
179 | .macro GS_TO_REG reg | |
180 | movl %gs, \reg | |
181 | /*CFI_REGISTER gs, \reg*/ | |
182 | .endm | |
183 | .macro REG_TO_PTGS reg | |
184 | movl \reg, PT_GS(%esp) | |
185 | /*CFI_REL_OFFSET gs, PT_GS*/ | |
186 | .endm | |
187 | .macro SET_KERNEL_GS reg | |
60a5317f | 188 | movl $(__KERNEL_STACK_CANARY), \reg |
ccbeed3a TH |
189 | movl \reg, %gs |
190 | .endm | |
191 | ||
192 | #endif /* CONFIG_X86_32_LAZY_GS */ | |
193 | ||
f0d96110 TH |
194 | .macro SAVE_ALL |
195 | cld | |
ccbeed3a | 196 | PUSH_GS |
df5d1874 | 197 | pushl_cfi %fs |
f0d96110 | 198 | /*CFI_REL_OFFSET fs, 0;*/ |
df5d1874 | 199 | pushl_cfi %es |
f0d96110 | 200 | /*CFI_REL_OFFSET es, 0;*/ |
df5d1874 | 201 | pushl_cfi %ds |
f0d96110 | 202 | /*CFI_REL_OFFSET ds, 0;*/ |
df5d1874 | 203 | pushl_cfi %eax |
f0d96110 | 204 | CFI_REL_OFFSET eax, 0 |
df5d1874 | 205 | pushl_cfi %ebp |
f0d96110 | 206 | CFI_REL_OFFSET ebp, 0 |
df5d1874 | 207 | pushl_cfi %edi |
f0d96110 | 208 | CFI_REL_OFFSET edi, 0 |
df5d1874 | 209 | pushl_cfi %esi |
f0d96110 | 210 | CFI_REL_OFFSET esi, 0 |
df5d1874 | 211 | pushl_cfi %edx |
f0d96110 | 212 | CFI_REL_OFFSET edx, 0 |
df5d1874 | 213 | pushl_cfi %ecx |
f0d96110 | 214 | CFI_REL_OFFSET ecx, 0 |
df5d1874 | 215 | pushl_cfi %ebx |
f0d96110 TH |
216 | CFI_REL_OFFSET ebx, 0 |
217 | movl $(__USER_DS), %edx | |
218 | movl %edx, %ds | |
219 | movl %edx, %es | |
220 | movl $(__KERNEL_PERCPU), %edx | |
464d1a78 | 221 | movl %edx, %fs |
ccbeed3a | 222 | SET_KERNEL_GS %edx |
f0d96110 | 223 | .endm |
1da177e4 | 224 | |
f0d96110 | 225 | .macro RESTORE_INT_REGS |
df5d1874 | 226 | popl_cfi %ebx |
f0d96110 | 227 | CFI_RESTORE ebx |
df5d1874 | 228 | popl_cfi %ecx |
f0d96110 | 229 | CFI_RESTORE ecx |
df5d1874 | 230 | popl_cfi %edx |
f0d96110 | 231 | CFI_RESTORE edx |
df5d1874 | 232 | popl_cfi %esi |
f0d96110 | 233 | CFI_RESTORE esi |
df5d1874 | 234 | popl_cfi %edi |
f0d96110 | 235 | CFI_RESTORE edi |
df5d1874 | 236 | popl_cfi %ebp |
f0d96110 | 237 | CFI_RESTORE ebp |
df5d1874 | 238 | popl_cfi %eax |
fe7cacc1 | 239 | CFI_RESTORE eax |
f0d96110 | 240 | .endm |
1da177e4 | 241 | |
ccbeed3a | 242 | .macro RESTORE_REGS pop=0 |
f0d96110 | 243 | RESTORE_INT_REGS |
df5d1874 | 244 | 1: popl_cfi %ds |
f0d96110 | 245 | /*CFI_RESTORE ds;*/ |
df5d1874 | 246 | 2: popl_cfi %es |
f0d96110 | 247 | /*CFI_RESTORE es;*/ |
df5d1874 | 248 | 3: popl_cfi %fs |
f0d96110 | 249 | /*CFI_RESTORE fs;*/ |
ccbeed3a | 250 | POP_GS \pop |
f0d96110 TH |
251 | .pushsection .fixup, "ax" |
252 | 4: movl $0, (%esp) | |
253 | jmp 1b | |
254 | 5: movl $0, (%esp) | |
255 | jmp 2b | |
256 | 6: movl $0, (%esp) | |
257 | jmp 3b | |
258 | .section __ex_table, "a" | |
259 | .align 4 | |
260 | .long 1b, 4b | |
261 | .long 2b, 5b | |
262 | .long 3b, 6b | |
f95d47ca | 263 | .popsection |
ccbeed3a | 264 | POP_GS_EX |
f0d96110 | 265 | .endm |
1da177e4 | 266 | |
f0d96110 TH |
267 | .macro RING0_INT_FRAME |
268 | CFI_STARTPROC simple | |
269 | CFI_SIGNAL_FRAME | |
270 | CFI_DEF_CFA esp, 3*4 | |
271 | /*CFI_OFFSET cs, -2*4;*/ | |
fe7cacc1 | 272 | CFI_OFFSET eip, -3*4 |
f0d96110 | 273 | .endm |
fe7cacc1 | 274 | |
f0d96110 TH |
275 | .macro RING0_EC_FRAME |
276 | CFI_STARTPROC simple | |
277 | CFI_SIGNAL_FRAME | |
278 | CFI_DEF_CFA esp, 4*4 | |
279 | /*CFI_OFFSET cs, -2*4;*/ | |
fe7cacc1 | 280 | CFI_OFFSET eip, -3*4 |
f0d96110 | 281 | .endm |
fe7cacc1 | 282 | |
f0d96110 TH |
283 | .macro RING0_PTREGS_FRAME |
284 | CFI_STARTPROC simple | |
285 | CFI_SIGNAL_FRAME | |
286 | CFI_DEF_CFA esp, PT_OLDESP-PT_EBX | |
287 | /*CFI_OFFSET cs, PT_CS-PT_OLDESP;*/ | |
288 | CFI_OFFSET eip, PT_EIP-PT_OLDESP | |
289 | /*CFI_OFFSET es, PT_ES-PT_OLDESP;*/ | |
290 | /*CFI_OFFSET ds, PT_DS-PT_OLDESP;*/ | |
291 | CFI_OFFSET eax, PT_EAX-PT_OLDESP | |
292 | CFI_OFFSET ebp, PT_EBP-PT_OLDESP | |
293 | CFI_OFFSET edi, PT_EDI-PT_OLDESP | |
294 | CFI_OFFSET esi, PT_ESI-PT_OLDESP | |
295 | CFI_OFFSET edx, PT_EDX-PT_OLDESP | |
296 | CFI_OFFSET ecx, PT_ECX-PT_OLDESP | |
eb5b7b9d | 297 | CFI_OFFSET ebx, PT_EBX-PT_OLDESP |
f0d96110 | 298 | .endm |
1da177e4 LT |
299 | |
300 | ENTRY(ret_from_fork) | |
fe7cacc1 | 301 | CFI_STARTPROC |
df5d1874 | 302 | pushl_cfi %eax |
1da177e4 LT |
303 | call schedule_tail |
304 | GET_THREAD_INFO(%ebp) | |
df5d1874 JB |
305 | popl_cfi %eax |
306 | pushl_cfi $0x0202 # Reset kernel eflags | |
307 | popfl_cfi | |
1da177e4 | 308 | jmp syscall_exit |
fe7cacc1 | 309 | CFI_ENDPROC |
47a55cd7 | 310 | END(ret_from_fork) |
1da177e4 | 311 | |
a00e817f MH |
312 | /* |
313 | * Interrupt exit functions should be protected against kprobes | |
314 | */ | |
315 | .pushsection .kprobes.text, "ax" | |
1da177e4 LT |
316 | /* |
317 | * Return to user mode is not as complex as all this looks, | |
318 | * but we want the default path for a system call return to | |
319 | * go as quickly as possible which is why some of this is | |
320 | * less clear than it otherwise should be. | |
321 | */ | |
322 | ||
323 | # userspace resumption stub bypassing syscall exit tracing | |
324 | ALIGN | |
fe7cacc1 | 325 | RING0_PTREGS_FRAME |
1da177e4 | 326 | ret_from_exception: |
139ec7c4 | 327 | preempt_stop(CLBR_ANY) |
1da177e4 LT |
328 | ret_from_intr: |
329 | GET_THREAD_INFO(%ebp) | |
4031ff38 | 330 | check_userspace: |
eb5b7b9d JF |
331 | movl PT_EFLAGS(%esp), %eax # mix EFLAGS and CS |
332 | movb PT_CS(%esp), %al | |
ab68ed98 | 333 | andl $(X86_EFLAGS_VM | SEGMENT_RPL_MASK), %eax |
78be3706 RR |
334 | cmpl $USER_RPL, %eax |
335 | jb resume_kernel # not returning to v8086 or userspace | |
f95d47ca | 336 | |
1da177e4 | 337 | ENTRY(resume_userspace) |
c7e872e7 | 338 | LOCKDEP_SYS_EXIT |
139ec7c4 | 339 | DISABLE_INTERRUPTS(CLBR_ANY) # make sure we don't miss an interrupt |
1da177e4 LT |
340 | # setting need_resched or sigpending |
341 | # between sampling and the iret | |
e32e58a9 | 342 | TRACE_IRQS_OFF |
1da177e4 LT |
343 | movl TI_flags(%ebp), %ecx |
344 | andl $_TIF_WORK_MASK, %ecx # is there any work to be done on | |
345 | # int/exception return? | |
346 | jne work_pending | |
347 | jmp restore_all | |
47a55cd7 | 348 | END(ret_from_exception) |
1da177e4 LT |
349 | |
350 | #ifdef CONFIG_PREEMPT | |
351 | ENTRY(resume_kernel) | |
139ec7c4 | 352 | DISABLE_INTERRUPTS(CLBR_ANY) |
1da177e4 | 353 | cmpl $0,TI_preempt_count(%ebp) # non-zero preempt_count ? |
2e04bc76 | 354 | jnz restore_all |
1da177e4 LT |
355 | need_resched: |
356 | movl TI_flags(%ebp), %ecx # need_resched set ? | |
357 | testb $_TIF_NEED_RESCHED, %cl | |
358 | jz restore_all | |
ab68ed98 | 359 | testl $X86_EFLAGS_IF,PT_EFLAGS(%esp) # interrupts off (exception path) ? |
1da177e4 LT |
360 | jz restore_all |
361 | call preempt_schedule_irq | |
362 | jmp need_resched | |
47a55cd7 | 363 | END(resume_kernel) |
1da177e4 | 364 | #endif |
fe7cacc1 | 365 | CFI_ENDPROC |
a00e817f MH |
366 | /* |
367 | * End of kprobes section | |
368 | */ | |
369 | .popsection | |
1da177e4 LT |
370 | |
371 | /* SYSENTER_RETURN points to after the "sysenter" instruction in | |
372 | the vsyscall page. See vsyscall-sysentry.S, which defines the symbol. */ | |
373 | ||
374 | # sysenter call handler stub | |
0aa97fb2 | 375 | ENTRY(ia32_sysenter_target) |
fe7cacc1 | 376 | CFI_STARTPROC simple |
adf14236 | 377 | CFI_SIGNAL_FRAME |
fe7cacc1 JB |
378 | CFI_DEF_CFA esp, 0 |
379 | CFI_REGISTER esp, ebp | |
faca6227 | 380 | movl TSS_sysenter_sp0(%esp),%esp |
1da177e4 | 381 | sysenter_past_esp: |
55f327fa | 382 | /* |
d93c870b JF |
383 | * Interrupts are disabled here, but we can't trace it until |
384 | * enough kernel state to call TRACE_IRQS_OFF can be called - but | |
385 | * we immediately enable interrupts at that point anyway. | |
55f327fa | 386 | */ |
3234282f | 387 | pushl_cfi $__USER_DS |
fe7cacc1 | 388 | /*CFI_REL_OFFSET ss, 0*/ |
df5d1874 | 389 | pushl_cfi %ebp |
fe7cacc1 | 390 | CFI_REL_OFFSET esp, 0 |
df5d1874 | 391 | pushfl_cfi |
d93c870b | 392 | orl $X86_EFLAGS_IF, (%esp) |
3234282f | 393 | pushl_cfi $__USER_CS |
fe7cacc1 | 394 | /*CFI_REL_OFFSET cs, 0*/ |
e6e5494c IM |
395 | /* |
396 | * Push current_thread_info()->sysenter_return to the stack. | |
397 | * A tiny bit of offset fixup is necessary - 4*4 means the 4 words | |
398 | * pushed above; +8 corresponds to copy_thread's esp0 setting. | |
399 | */ | |
7bf04be8 | 400 | pushl_cfi ((TI_sysenter_return)-THREAD_SIZE+8+4*4)(%esp) |
fe7cacc1 | 401 | CFI_REL_OFFSET eip, 0 |
1da177e4 | 402 | |
df5d1874 | 403 | pushl_cfi %eax |
d93c870b JF |
404 | SAVE_ALL |
405 | ENABLE_INTERRUPTS(CLBR_NONE) | |
406 | ||
1da177e4 LT |
407 | /* |
408 | * Load the potential sixth argument from user stack. | |
409 | * Careful about security. | |
410 | */ | |
411 | cmpl $__PAGE_OFFSET-3,%ebp | |
412 | jae syscall_fault | |
413 | 1: movl (%ebp),%ebp | |
d93c870b | 414 | movl %ebp,PT_EBP(%esp) |
1da177e4 LT |
415 | .section __ex_table,"a" |
416 | .align 4 | |
417 | .long 1b,syscall_fault | |
418 | .previous | |
419 | ||
1da177e4 LT |
420 | GET_THREAD_INFO(%ebp) |
421 | ||
88200bc2 | 422 | testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%ebp) |
af0575bb RM |
423 | jnz sysenter_audit |
424 | sysenter_do_call: | |
1da177e4 LT |
425 | cmpl $(nr_syscalls), %eax |
426 | jae syscall_badsys | |
427 | call *sys_call_table(,%eax,4) | |
eb5b7b9d | 428 | movl %eax,PT_EAX(%esp) |
c7e872e7 | 429 | LOCKDEP_SYS_EXIT |
42c24fa2 | 430 | DISABLE_INTERRUPTS(CLBR_ANY) |
55f327fa | 431 | TRACE_IRQS_OFF |
1da177e4 | 432 | movl TI_flags(%ebp), %ecx |
88200bc2 | 433 | testl $_TIF_ALLWORK_MASK, %ecx |
af0575bb RM |
434 | jne sysexit_audit |
435 | sysenter_exit: | |
1da177e4 | 436 | /* if something modifies registers it must also disable sysexit */ |
eb5b7b9d JF |
437 | movl PT_EIP(%esp), %edx |
438 | movl PT_OLDESP(%esp), %ecx | |
1da177e4 | 439 | xorl %ebp,%ebp |
55f327fa | 440 | TRACE_IRQS_ON |
464d1a78 | 441 | 1: mov PT_FS(%esp), %fs |
ccbeed3a | 442 | PTGS_TO_GS |
d75cd22f | 443 | ENABLE_INTERRUPTS_SYSEXIT |
af0575bb RM |
444 | |
445 | #ifdef CONFIG_AUDITSYSCALL | |
446 | sysenter_audit: | |
88200bc2 | 447 | testl $(_TIF_WORK_SYSCALL_ENTRY & ~_TIF_SYSCALL_AUDIT),TI_flags(%ebp) |
af0575bb RM |
448 | jnz syscall_trace_entry |
449 | addl $4,%esp | |
450 | CFI_ADJUST_CFA_OFFSET -4 | |
451 | /* %esi already in 8(%esp) 6th arg: 4th syscall arg */ | |
452 | /* %edx already in 4(%esp) 5th arg: 3rd syscall arg */ | |
453 | /* %ecx already in 0(%esp) 4th arg: 2nd syscall arg */ | |
454 | movl %ebx,%ecx /* 3rd arg: 1st syscall arg */ | |
455 | movl %eax,%edx /* 2nd arg: syscall number */ | |
456 | movl $AUDIT_ARCH_I386,%eax /* 1st arg: audit arch */ | |
457 | call audit_syscall_entry | |
df5d1874 | 458 | pushl_cfi %ebx |
af0575bb RM |
459 | movl PT_EAX(%esp),%eax /* reload syscall number */ |
460 | jmp sysenter_do_call | |
461 | ||
462 | sysexit_audit: | |
88200bc2 | 463 | testl $(_TIF_ALLWORK_MASK & ~_TIF_SYSCALL_AUDIT), %ecx |
af0575bb RM |
464 | jne syscall_exit_work |
465 | TRACE_IRQS_ON | |
466 | ENABLE_INTERRUPTS(CLBR_ANY) | |
467 | movl %eax,%edx /* second arg, syscall return value */ | |
468 | cmpl $0,%eax /* is it < 0? */ | |
469 | setl %al /* 1 if so, 0 if not */ | |
470 | movzbl %al,%eax /* zero-extend that */ | |
471 | inc %eax /* first arg, 0->1(AUDITSC_SUCCESS), 1->2(AUDITSC_FAILURE) */ | |
472 | call audit_syscall_exit | |
473 | DISABLE_INTERRUPTS(CLBR_ANY) | |
474 | TRACE_IRQS_OFF | |
475 | movl TI_flags(%ebp), %ecx | |
88200bc2 | 476 | testl $(_TIF_ALLWORK_MASK & ~_TIF_SYSCALL_AUDIT), %ecx |
af0575bb RM |
477 | jne syscall_exit_work |
478 | movl PT_EAX(%esp),%eax /* reload syscall return value */ | |
479 | jmp sysenter_exit | |
480 | #endif | |
481 | ||
fe7cacc1 | 482 | CFI_ENDPROC |
f95d47ca | 483 | .pushsection .fixup,"ax" |
464d1a78 | 484 | 2: movl $0,PT_FS(%esp) |
f95d47ca JF |
485 | jmp 1b |
486 | .section __ex_table,"a" | |
487 | .align 4 | |
488 | .long 1b,2b | |
489 | .popsection | |
ccbeed3a | 490 | PTGS_TO_GS_EX |
0aa97fb2 | 491 | ENDPROC(ia32_sysenter_target) |
1da177e4 | 492 | |
a00e817f MH |
493 | /* |
494 | * syscall stub including irq exit should be protected against kprobes | |
495 | */ | |
496 | .pushsection .kprobes.text, "ax" | |
1da177e4 LT |
497 | # system call handler stub |
498 | ENTRY(system_call) | |
fe7cacc1 | 499 | RING0_INT_FRAME # can't unwind into user space anyway |
df5d1874 | 500 | pushl_cfi %eax # save orig_eax |
1da177e4 LT |
501 | SAVE_ALL |
502 | GET_THREAD_INFO(%ebp) | |
ed75e8d5 | 503 | # system call tracing in operation / emulation |
88200bc2 | 504 | testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%ebp) |
1da177e4 LT |
505 | jnz syscall_trace_entry |
506 | cmpl $(nr_syscalls), %eax | |
507 | jae syscall_badsys | |
508 | syscall_call: | |
509 | call *sys_call_table(,%eax,4) | |
eb5b7b9d | 510 | movl %eax,PT_EAX(%esp) # store the return value |
1da177e4 | 511 | syscall_exit: |
c7e872e7 | 512 | LOCKDEP_SYS_EXIT |
139ec7c4 | 513 | DISABLE_INTERRUPTS(CLBR_ANY) # make sure we don't miss an interrupt |
1da177e4 LT |
514 | # setting need_resched or sigpending |
515 | # between sampling and the iret | |
55f327fa | 516 | TRACE_IRQS_OFF |
1da177e4 | 517 | movl TI_flags(%ebp), %ecx |
88200bc2 | 518 | testl $_TIF_ALLWORK_MASK, %ecx # current->work |
1da177e4 LT |
519 | jne syscall_exit_work |
520 | ||
521 | restore_all: | |
2e04bc76 AH |
522 | TRACE_IRQS_IRET |
523 | restore_all_notrace: | |
eb5b7b9d JF |
524 | movl PT_EFLAGS(%esp), %eax # mix EFLAGS, SS and CS |
525 | # Warning: PT_OLDSS(%esp) contains the wrong/random values if we | |
5df24082 SS |
526 | # are returning to the kernel. |
527 | # See comments in process.c:copy_thread() for details. | |
eb5b7b9d JF |
528 | movb PT_OLDSS(%esp), %ah |
529 | movb PT_CS(%esp), %al | |
ab68ed98 | 530 | andl $(X86_EFLAGS_VM | (SEGMENT_TI_MASK << 8) | SEGMENT_RPL_MASK), %eax |
78be3706 | 531 | cmpl $((SEGMENT_LDT << 8) | USER_RPL), %eax |
fe7cacc1 | 532 | CFI_REMEMBER_STATE |
1da177e4 LT |
533 | je ldt_ss # returning to user-space with LDT SS |
534 | restore_nocheck: | |
ccbeed3a | 535 | RESTORE_REGS 4 # skip orig_eax/error_code |
f7f3d791 | 536 | irq_return: |
3701d863 | 537 | INTERRUPT_RETURN |
1da177e4 | 538 | .section .fixup,"ax" |
90e9f536 | 539 | ENTRY(iret_exc) |
a879cbbb LT |
540 | pushl $0 # no error code |
541 | pushl $do_iret_error | |
542 | jmp error_code | |
1da177e4 LT |
543 | .previous |
544 | .section __ex_table,"a" | |
545 | .align 4 | |
3701d863 | 546 | .long irq_return,iret_exc |
1da177e4 LT |
547 | .previous |
548 | ||
fe7cacc1 | 549 | CFI_RESTORE_STATE |
1da177e4 | 550 | ldt_ss: |
eb5b7b9d | 551 | larl PT_OLDSS(%esp), %eax |
1da177e4 LT |
552 | jnz restore_nocheck |
553 | testl $0x00400000, %eax # returning to 32bit stack? | |
554 | jnz restore_nocheck # allright, normal return | |
d3561b7f RR |
555 | |
556 | #ifdef CONFIG_PARAVIRT | |
557 | /* | |
558 | * The kernel can't run on a non-flat stack if paravirt mode | |
559 | * is active. Rather than try to fixup the high bits of | |
560 | * ESP, bypass this code entirely. This may break DOSemu | |
561 | * and/or Wine support in a paravirt VM, although the option | |
562 | * is still available to implement the setting of the high | |
563 | * 16-bits in the INTERRUPT_RETURN paravirt-op. | |
564 | */ | |
93b1eab3 | 565 | cmpl $0, pv_info+PARAVIRT_enabled |
d3561b7f RR |
566 | jne restore_nocheck |
567 | #endif | |
568 | ||
dc4c2a0a AH |
569 | /* |
570 | * Setup and switch to ESPFIX stack | |
571 | * | |
572 | * We're returning to userspace with a 16 bit stack. The CPU will not | |
573 | * restore the high word of ESP for us on executing iret... This is an | |
574 | * "official" bug of all the x86-compatible CPUs, which we can work | |
575 | * around to make dosemu and wine happy. We do this by preloading the | |
576 | * high word of ESP with the high word of the userspace ESP while | |
577 | * compensating for the offset by changing to the ESPFIX segment with | |
578 | * a base address that matches for the difference. | |
579 | */ | |
72c511dd | 580 | #define GDT_ESPFIX_SS PER_CPU_VAR(gdt_page) + (GDT_ENTRY_ESPFIX_SS * 8) |
dc4c2a0a AH |
581 | mov %esp, %edx /* load kernel esp */ |
582 | mov PT_OLDESP(%esp), %eax /* load userspace esp */ | |
583 | mov %dx, %ax /* eax: new kernel esp */ | |
584 | sub %eax, %edx /* offset (low word is 0) */ | |
dc4c2a0a | 585 | shr $16, %edx |
72c511dd BG |
586 | mov %dl, GDT_ESPFIX_SS + 4 /* bits 16..23 */ |
587 | mov %dh, GDT_ESPFIX_SS + 7 /* bits 24..31 */ | |
df5d1874 JB |
588 | pushl_cfi $__ESPFIX_SS |
589 | pushl_cfi %eax /* new kernel esp */ | |
2e04bc76 AH |
590 | /* Disable interrupts, but do not irqtrace this section: we |
591 | * will soon execute iret and the tracer was already set to | |
592 | * the irqstate after the iret */ | |
139ec7c4 | 593 | DISABLE_INTERRUPTS(CLBR_EAX) |
dc4c2a0a | 594 | lss (%esp), %esp /* switch to espfix segment */ |
be44d2aa SS |
595 | CFI_ADJUST_CFA_OFFSET -8 |
596 | jmp restore_nocheck | |
fe7cacc1 | 597 | CFI_ENDPROC |
47a55cd7 | 598 | ENDPROC(system_call) |
1da177e4 LT |
599 | |
600 | # perform work that needs to be done immediately before resumption | |
601 | ALIGN | |
fe7cacc1 | 602 | RING0_PTREGS_FRAME # can't unwind into user space anyway |
1da177e4 LT |
603 | work_pending: |
604 | testb $_TIF_NEED_RESCHED, %cl | |
605 | jz work_notifysig | |
606 | work_resched: | |
607 | call schedule | |
c7e872e7 | 608 | LOCKDEP_SYS_EXIT |
139ec7c4 | 609 | DISABLE_INTERRUPTS(CLBR_ANY) # make sure we don't miss an interrupt |
1da177e4 LT |
610 | # setting need_resched or sigpending |
611 | # between sampling and the iret | |
55f327fa | 612 | TRACE_IRQS_OFF |
1da177e4 LT |
613 | movl TI_flags(%ebp), %ecx |
614 | andl $_TIF_WORK_MASK, %ecx # is there any work to be done other | |
615 | # than syscall tracing? | |
616 | jz restore_all | |
617 | testb $_TIF_NEED_RESCHED, %cl | |
618 | jnz work_resched | |
619 | ||
620 | work_notifysig: # deal with pending signals and | |
621 | # notify-resume requests | |
74b47a78 | 622 | #ifdef CONFIG_VM86 |
ab68ed98 | 623 | testl $X86_EFLAGS_VM, PT_EFLAGS(%esp) |
1da177e4 LT |
624 | movl %esp, %eax |
625 | jne work_notifysig_v86 # returning to kernel-space or | |
626 | # vm86-space | |
627 | xorl %edx, %edx | |
628 | call do_notify_resume | |
4031ff38 | 629 | jmp resume_userspace_sig |
1da177e4 LT |
630 | |
631 | ALIGN | |
632 | work_notifysig_v86: | |
df5d1874 | 633 | pushl_cfi %ecx # save ti_flags for do_notify_resume |
1da177e4 | 634 | call save_v86_state # %eax contains pt_regs pointer |
df5d1874 | 635 | popl_cfi %ecx |
1da177e4 | 636 | movl %eax, %esp |
74b47a78 JK |
637 | #else |
638 | movl %esp, %eax | |
639 | #endif | |
1da177e4 LT |
640 | xorl %edx, %edx |
641 | call do_notify_resume | |
4031ff38 | 642 | jmp resume_userspace_sig |
47a55cd7 | 643 | END(work_pending) |
1da177e4 LT |
644 | |
645 | # perform syscall exit tracing | |
646 | ALIGN | |
647 | syscall_trace_entry: | |
eb5b7b9d | 648 | movl $-ENOSYS,PT_EAX(%esp) |
1da177e4 | 649 | movl %esp, %eax |
d4d67150 RM |
650 | call syscall_trace_enter |
651 | /* What it returned is what we'll actually use. */ | |
1da177e4 LT |
652 | cmpl $(nr_syscalls), %eax |
653 | jnae syscall_call | |
654 | jmp syscall_exit | |
47a55cd7 | 655 | END(syscall_trace_entry) |
1da177e4 LT |
656 | |
657 | # perform syscall exit tracing | |
658 | ALIGN | |
659 | syscall_exit_work: | |
88200bc2 | 660 | testl $_TIF_WORK_SYSCALL_EXIT, %ecx |
1da177e4 | 661 | jz work_pending |
55f327fa | 662 | TRACE_IRQS_ON |
d4d67150 | 663 | ENABLE_INTERRUPTS(CLBR_ANY) # could let syscall_trace_leave() call |
1da177e4 LT |
664 | # schedule() instead |
665 | movl %esp, %eax | |
d4d67150 | 666 | call syscall_trace_leave |
1da177e4 | 667 | jmp resume_userspace |
47a55cd7 | 668 | END(syscall_exit_work) |
fe7cacc1 | 669 | CFI_ENDPROC |
1da177e4 | 670 | |
fe7cacc1 | 671 | RING0_INT_FRAME # can't unwind into user space anyway |
1da177e4 | 672 | syscall_fault: |
1da177e4 | 673 | GET_THREAD_INFO(%ebp) |
eb5b7b9d | 674 | movl $-EFAULT,PT_EAX(%esp) |
1da177e4 | 675 | jmp resume_userspace |
47a55cd7 | 676 | END(syscall_fault) |
1da177e4 | 677 | |
1da177e4 | 678 | syscall_badsys: |
eb5b7b9d | 679 | movl $-ENOSYS,PT_EAX(%esp) |
1da177e4 | 680 | jmp resume_userspace |
47a55cd7 | 681 | END(syscall_badsys) |
fe7cacc1 | 682 | CFI_ENDPROC |
a00e817f MH |
683 | /* |
684 | * End of kprobes section | |
685 | */ | |
686 | .popsection | |
1da177e4 | 687 | |
253f29a4 BG |
688 | /* |
689 | * System calls that need a pt_regs pointer. | |
690 | */ | |
e258e4e0 | 691 | #define PTREGSCALL0(name) \ |
253f29a4 BG |
692 | ALIGN; \ |
693 | ptregs_##name: \ | |
694 | leal 4(%esp),%eax; \ | |
695 | jmp sys_##name; | |
696 | ||
e258e4e0 BG |
697 | #define PTREGSCALL1(name) \ |
698 | ALIGN; \ | |
699 | ptregs_##name: \ | |
700 | leal 4(%esp),%edx; \ | |
ce9119ad | 701 | movl (PT_EBX+4)(%esp),%eax; \ |
e258e4e0 BG |
702 | jmp sys_##name; |
703 | ||
704 | #define PTREGSCALL2(name) \ | |
705 | ALIGN; \ | |
706 | ptregs_##name: \ | |
707 | leal 4(%esp),%ecx; \ | |
ce9119ad PA |
708 | movl (PT_ECX+4)(%esp),%edx; \ |
709 | movl (PT_EBX+4)(%esp),%eax; \ | |
e258e4e0 BG |
710 | jmp sys_##name; |
711 | ||
712 | #define PTREGSCALL3(name) \ | |
713 | ALIGN; \ | |
714 | ptregs_##name: \ | |
a34107b5 | 715 | CFI_STARTPROC; \ |
e258e4e0 | 716 | leal 4(%esp),%eax; \ |
a34107b5 | 717 | pushl_cfi %eax; \ |
e258e4e0 BG |
718 | movl PT_EDX(%eax),%ecx; \ |
719 | movl PT_ECX(%eax),%edx; \ | |
720 | movl PT_EBX(%eax),%eax; \ | |
721 | call sys_##name; \ | |
722 | addl $4,%esp; \ | |
a34107b5 JB |
723 | CFI_ADJUST_CFA_OFFSET -4; \ |
724 | ret; \ | |
725 | CFI_ENDPROC; \ | |
726 | ENDPROC(ptregs_##name) | |
e258e4e0 | 727 | |
27f59559 | 728 | PTREGSCALL1(iopl) |
e258e4e0 | 729 | PTREGSCALL0(fork) |
e258e4e0 | 730 | PTREGSCALL0(vfork) |
11cf88bd | 731 | PTREGSCALL3(execve) |
052acad4 | 732 | PTREGSCALL2(sigaltstack) |
e258e4e0 BG |
733 | PTREGSCALL0(sigreturn) |
734 | PTREGSCALL0(rt_sigreturn) | |
f1382f15 BG |
735 | PTREGSCALL2(vm86) |
736 | PTREGSCALL1(vm86old) | |
253f29a4 | 737 | |
f839bbc5 BG |
738 | /* Clone is an oddball. The 4th arg is in %edi */ |
739 | ALIGN; | |
740 | ptregs_clone: | |
a34107b5 | 741 | CFI_STARTPROC |
f839bbc5 | 742 | leal 4(%esp),%eax |
a34107b5 JB |
743 | pushl_cfi %eax |
744 | pushl_cfi PT_EDI(%eax) | |
f839bbc5 BG |
745 | movl PT_EDX(%eax),%ecx |
746 | movl PT_ECX(%eax),%edx | |
747 | movl PT_EBX(%eax),%eax | |
748 | call sys_clone | |
749 | addl $8,%esp | |
a34107b5 | 750 | CFI_ADJUST_CFA_OFFSET -8 |
f839bbc5 | 751 | ret |
a34107b5 JB |
752 | CFI_ENDPROC |
753 | ENDPROC(ptregs_clone) | |
f839bbc5 | 754 | |
f0d96110 | 755 | .macro FIXUP_ESPFIX_STACK |
dc4c2a0a AH |
756 | /* |
757 | * Switch back for ESPFIX stack to the normal zerobased stack | |
758 | * | |
759 | * We can't call C functions using the ESPFIX stack. This code reads | |
760 | * the high word of the segment base from the GDT and swiches to the | |
761 | * normal stack and adjusts ESP with the matching offset. | |
762 | */ | |
763 | /* fixup the stack */ | |
72c511dd BG |
764 | mov GDT_ESPFIX_SS + 4, %al /* bits 16..23 */ |
765 | mov GDT_ESPFIX_SS + 7, %ah /* bits 24..31 */ | |
dc4c2a0a AH |
766 | shl $16, %eax |
767 | addl %esp, %eax /* the adjusted stack pointer */ | |
df5d1874 JB |
768 | pushl_cfi $__KERNEL_DS |
769 | pushl_cfi %eax | |
dc4c2a0a | 770 | lss (%esp), %esp /* switch to the normal stack segment */ |
f0d96110 TH |
771 | CFI_ADJUST_CFA_OFFSET -8 |
772 | .endm | |
773 | .macro UNWIND_ESPFIX_STACK | |
774 | movl %ss, %eax | |
775 | /* see if on espfix stack */ | |
776 | cmpw $__ESPFIX_SS, %ax | |
777 | jne 27f | |
778 | movl $__KERNEL_DS, %eax | |
779 | movl %eax, %ds | |
780 | movl %eax, %es | |
781 | /* switch to normal stack */ | |
782 | FIXUP_ESPFIX_STACK | |
783 | 27: | |
784 | .endm | |
1da177e4 LT |
785 | |
786 | /* | |
b7c6244f PA |
787 | * Build the entry stubs and pointer table with some assembler magic. |
788 | * We pack 7 stubs into a single 32-byte chunk, which will fit in a | |
789 | * single cache line on all modern x86 implementations. | |
1da177e4 | 790 | */ |
4687518c | 791 | .section .init.rodata,"a" |
1da177e4 | 792 | ENTRY(interrupt) |
ea714547 | 793 | .section .entry.text, "ax" |
b7c6244f PA |
794 | .p2align 5 |
795 | .p2align CONFIG_X86_L1_CACHE_SHIFT | |
1da177e4 | 796 | ENTRY(irq_entries_start) |
fe7cacc1 | 797 | RING0_INT_FRAME |
4687518c | 798 | vector=FIRST_EXTERNAL_VECTOR |
b7c6244f PA |
799 | .rept (NR_VECTORS-FIRST_EXTERNAL_VECTOR+6)/7 |
800 | .balign 32 | |
801 | .rept 7 | |
802 | .if vector < NR_VECTORS | |
8665596e | 803 | .if vector <> FIRST_EXTERNAL_VECTOR |
fe7cacc1 | 804 | CFI_ADJUST_CFA_OFFSET -4 |
b7c6244f | 805 | .endif |
df5d1874 | 806 | 1: pushl_cfi $(~vector+0x80) /* Note: always in signed byte range */ |
8665596e | 807 | .if ((vector-FIRST_EXTERNAL_VECTOR)%7) <> 6 |
b7c6244f PA |
808 | jmp 2f |
809 | .endif | |
810 | .previous | |
1da177e4 | 811 | .long 1b |
ea714547 | 812 | .section .entry.text, "ax" |
1da177e4 | 813 | vector=vector+1 |
b7c6244f PA |
814 | .endif |
815 | .endr | |
816 | 2: jmp common_interrupt | |
1da177e4 | 817 | .endr |
47a55cd7 JB |
818 | END(irq_entries_start) |
819 | ||
820 | .previous | |
821 | END(interrupt) | |
822 | .previous | |
1da177e4 | 823 | |
55f327fa IM |
824 | /* |
825 | * the CPU automatically disables interrupts when executing an IRQ vector, | |
826 | * so IRQ-flags tracing has to follow that: | |
827 | */ | |
b7c6244f | 828 | .p2align CONFIG_X86_L1_CACHE_SHIFT |
1da177e4 | 829 | common_interrupt: |
b7c6244f | 830 | addl $-0x80,(%esp) /* Adjust vector into the [-256,-1] range */ |
1da177e4 | 831 | SAVE_ALL |
55f327fa | 832 | TRACE_IRQS_OFF |
1da177e4 LT |
833 | movl %esp,%eax |
834 | call do_IRQ | |
835 | jmp ret_from_intr | |
47a55cd7 | 836 | ENDPROC(common_interrupt) |
fe7cacc1 | 837 | CFI_ENDPROC |
1da177e4 | 838 | |
a00e817f MH |
839 | /* |
840 | * Irq entries should be protected against kprobes | |
841 | */ | |
842 | .pushsection .kprobes.text, "ax" | |
02cf94c3 | 843 | #define BUILD_INTERRUPT3(name, nr, fn) \ |
1da177e4 | 844 | ENTRY(name) \ |
fe7cacc1 | 845 | RING0_INT_FRAME; \ |
df5d1874 | 846 | pushl_cfi $~(nr); \ |
fe7cacc1 | 847 | SAVE_ALL; \ |
55f327fa | 848 | TRACE_IRQS_OFF \ |
1da177e4 | 849 | movl %esp,%eax; \ |
02cf94c3 | 850 | call fn; \ |
55f327fa | 851 | jmp ret_from_intr; \ |
47a55cd7 JB |
852 | CFI_ENDPROC; \ |
853 | ENDPROC(name) | |
1da177e4 | 854 | |
02cf94c3 TH |
855 | #define BUILD_INTERRUPT(name, nr) BUILD_INTERRUPT3(name, nr, smp_##name) |
856 | ||
1da177e4 | 857 | /* The include is where all of the SMP etc. interrupts come from */ |
1164dd00 | 858 | #include <asm/entry_arch.h> |
1da177e4 | 859 | |
1da177e4 | 860 | ENTRY(coprocessor_error) |
fe7cacc1 | 861 | RING0_INT_FRAME |
df5d1874 JB |
862 | pushl_cfi $0 |
863 | pushl_cfi $do_coprocessor_error | |
1da177e4 | 864 | jmp error_code |
fe7cacc1 | 865 | CFI_ENDPROC |
47a55cd7 | 866 | END(coprocessor_error) |
1da177e4 LT |
867 | |
868 | ENTRY(simd_coprocessor_error) | |
fe7cacc1 | 869 | RING0_INT_FRAME |
df5d1874 | 870 | pushl_cfi $0 |
40d2e763 BG |
871 | #ifdef CONFIG_X86_INVD_BUG |
872 | /* AMD 486 bug: invd from userspace calls exception 19 instead of #GP */ | |
df5d1874 | 873 | 661: pushl_cfi $do_general_protection |
40d2e763 BG |
874 | 662: |
875 | .section .altinstructions,"a" | |
876 | .balign 4 | |
877 | .long 661b | |
878 | .long 663f | |
83a7a2ad | 879 | .word X86_FEATURE_XMM |
40d2e763 BG |
880 | .byte 662b-661b |
881 | .byte 664f-663f | |
882 | .previous | |
883 | .section .altinstr_replacement,"ax" | |
884 | 663: pushl $do_simd_coprocessor_error | |
885 | 664: | |
886 | .previous | |
887 | #else | |
df5d1874 | 888 | pushl_cfi $do_simd_coprocessor_error |
40d2e763 | 889 | #endif |
1da177e4 | 890 | jmp error_code |
fe7cacc1 | 891 | CFI_ENDPROC |
47a55cd7 | 892 | END(simd_coprocessor_error) |
1da177e4 LT |
893 | |
894 | ENTRY(device_not_available) | |
fe7cacc1 | 895 | RING0_INT_FRAME |
df5d1874 JB |
896 | pushl_cfi $-1 # mark this as an int |
897 | pushl_cfi $do_device_not_available | |
7643e9b9 | 898 | jmp error_code |
fe7cacc1 | 899 | CFI_ENDPROC |
47a55cd7 | 900 | END(device_not_available) |
1da177e4 | 901 | |
d3561b7f RR |
902 | #ifdef CONFIG_PARAVIRT |
903 | ENTRY(native_iret) | |
3701d863 | 904 | iret |
d3561b7f RR |
905 | .section __ex_table,"a" |
906 | .align 4 | |
3701d863 | 907 | .long native_iret, iret_exc |
d3561b7f | 908 | .previous |
47a55cd7 | 909 | END(native_iret) |
d3561b7f | 910 | |
d75cd22f | 911 | ENTRY(native_irq_enable_sysexit) |
d3561b7f RR |
912 | sti |
913 | sysexit | |
d75cd22f | 914 | END(native_irq_enable_sysexit) |
d3561b7f RR |
915 | #endif |
916 | ||
1da177e4 | 917 | ENTRY(overflow) |
fe7cacc1 | 918 | RING0_INT_FRAME |
df5d1874 JB |
919 | pushl_cfi $0 |
920 | pushl_cfi $do_overflow | |
1da177e4 | 921 | jmp error_code |
fe7cacc1 | 922 | CFI_ENDPROC |
47a55cd7 | 923 | END(overflow) |
1da177e4 LT |
924 | |
925 | ENTRY(bounds) | |
fe7cacc1 | 926 | RING0_INT_FRAME |
df5d1874 JB |
927 | pushl_cfi $0 |
928 | pushl_cfi $do_bounds | |
1da177e4 | 929 | jmp error_code |
fe7cacc1 | 930 | CFI_ENDPROC |
47a55cd7 | 931 | END(bounds) |
1da177e4 LT |
932 | |
933 | ENTRY(invalid_op) | |
fe7cacc1 | 934 | RING0_INT_FRAME |
df5d1874 JB |
935 | pushl_cfi $0 |
936 | pushl_cfi $do_invalid_op | |
1da177e4 | 937 | jmp error_code |
fe7cacc1 | 938 | CFI_ENDPROC |
47a55cd7 | 939 | END(invalid_op) |
1da177e4 LT |
940 | |
941 | ENTRY(coprocessor_segment_overrun) | |
fe7cacc1 | 942 | RING0_INT_FRAME |
df5d1874 JB |
943 | pushl_cfi $0 |
944 | pushl_cfi $do_coprocessor_segment_overrun | |
1da177e4 | 945 | jmp error_code |
fe7cacc1 | 946 | CFI_ENDPROC |
47a55cd7 | 947 | END(coprocessor_segment_overrun) |
1da177e4 LT |
948 | |
949 | ENTRY(invalid_TSS) | |
fe7cacc1 | 950 | RING0_EC_FRAME |
df5d1874 | 951 | pushl_cfi $do_invalid_TSS |
1da177e4 | 952 | jmp error_code |
fe7cacc1 | 953 | CFI_ENDPROC |
47a55cd7 | 954 | END(invalid_TSS) |
1da177e4 LT |
955 | |
956 | ENTRY(segment_not_present) | |
fe7cacc1 | 957 | RING0_EC_FRAME |
df5d1874 | 958 | pushl_cfi $do_segment_not_present |
1da177e4 | 959 | jmp error_code |
fe7cacc1 | 960 | CFI_ENDPROC |
47a55cd7 | 961 | END(segment_not_present) |
1da177e4 LT |
962 | |
963 | ENTRY(stack_segment) | |
fe7cacc1 | 964 | RING0_EC_FRAME |
df5d1874 | 965 | pushl_cfi $do_stack_segment |
1da177e4 | 966 | jmp error_code |
fe7cacc1 | 967 | CFI_ENDPROC |
47a55cd7 | 968 | END(stack_segment) |
1da177e4 | 969 | |
1da177e4 | 970 | ENTRY(alignment_check) |
fe7cacc1 | 971 | RING0_EC_FRAME |
df5d1874 | 972 | pushl_cfi $do_alignment_check |
1da177e4 | 973 | jmp error_code |
fe7cacc1 | 974 | CFI_ENDPROC |
47a55cd7 | 975 | END(alignment_check) |
1da177e4 | 976 | |
d28c4393 P |
977 | ENTRY(divide_error) |
978 | RING0_INT_FRAME | |
df5d1874 JB |
979 | pushl_cfi $0 # no error code |
980 | pushl_cfi $do_divide_error | |
1da177e4 | 981 | jmp error_code |
fe7cacc1 | 982 | CFI_ENDPROC |
47a55cd7 | 983 | END(divide_error) |
1da177e4 LT |
984 | |
985 | #ifdef CONFIG_X86_MCE | |
986 | ENTRY(machine_check) | |
fe7cacc1 | 987 | RING0_INT_FRAME |
df5d1874 JB |
988 | pushl_cfi $0 |
989 | pushl_cfi machine_check_vector | |
1da177e4 | 990 | jmp error_code |
fe7cacc1 | 991 | CFI_ENDPROC |
47a55cd7 | 992 | END(machine_check) |
1da177e4 LT |
993 | #endif |
994 | ||
995 | ENTRY(spurious_interrupt_bug) | |
fe7cacc1 | 996 | RING0_INT_FRAME |
df5d1874 JB |
997 | pushl_cfi $0 |
998 | pushl_cfi $do_spurious_interrupt_bug | |
1da177e4 | 999 | jmp error_code |
fe7cacc1 | 1000 | CFI_ENDPROC |
47a55cd7 | 1001 | END(spurious_interrupt_bug) |
a00e817f MH |
1002 | /* |
1003 | * End of kprobes section | |
1004 | */ | |
1005 | .popsection | |
1da177e4 | 1006 | |
02ba1a32 AK |
1007 | ENTRY(kernel_thread_helper) |
1008 | pushl $0 # fake return address for unwinder | |
1009 | CFI_STARTPROC | |
e840227c BG |
1010 | movl %edi,%eax |
1011 | call *%esi | |
02ba1a32 | 1012 | call do_exit |
5f5db591 | 1013 | ud2 # padding for call trace |
02ba1a32 AK |
1014 | CFI_ENDPROC |
1015 | ENDPROC(kernel_thread_helper) | |
1016 | ||
5ead97c8 | 1017 | #ifdef CONFIG_XEN |
e2a81baf JF |
1018 | /* Xen doesn't set %esp to be precisely what the normal sysenter |
1019 | entrypoint expects, so fix it up before using the normal path. */ | |
1020 | ENTRY(xen_sysenter_target) | |
1021 | RING0_INT_FRAME | |
1022 | addl $5*4, %esp /* remove xen-provided frame */ | |
2ddf9b7b | 1023 | CFI_ADJUST_CFA_OFFSET -5*4 |
e2a81baf | 1024 | jmp sysenter_past_esp |
557d7d4e | 1025 | CFI_ENDPROC |
e2a81baf | 1026 | |
5ead97c8 JF |
1027 | ENTRY(xen_hypervisor_callback) |
1028 | CFI_STARTPROC | |
df5d1874 | 1029 | pushl_cfi $0 |
5ead97c8 JF |
1030 | SAVE_ALL |
1031 | TRACE_IRQS_OFF | |
9ec2b804 JF |
1032 | |
1033 | /* Check to see if we got the event in the critical | |
1034 | region in xen_iret_direct, after we've reenabled | |
1035 | events and checked for pending events. This simulates | |
1036 | iret instruction's behaviour where it delivers a | |
1037 | pending interrupt when enabling interrupts. */ | |
1038 | movl PT_EIP(%esp),%eax | |
1039 | cmpl $xen_iret_start_crit,%eax | |
1040 | jb 1f | |
1041 | cmpl $xen_iret_end_crit,%eax | |
1042 | jae 1f | |
1043 | ||
0f2c8769 | 1044 | jmp xen_iret_crit_fixup |
e2a81baf | 1045 | |
e2a81baf | 1046 | ENTRY(xen_do_upcall) |
b77797fb | 1047 | 1: mov %esp, %eax |
5ead97c8 JF |
1048 | call xen_evtchn_do_upcall |
1049 | jmp ret_from_intr | |
1050 | CFI_ENDPROC | |
1051 | ENDPROC(xen_hypervisor_callback) | |
1052 | ||
1053 | # Hypervisor uses this for application faults while it executes. | |
1054 | # We get here for two reasons: | |
1055 | # 1. Fault while reloading DS, ES, FS or GS | |
1056 | # 2. Fault while executing IRET | |
1057 | # Category 1 we fix up by reattempting the load, and zeroing the segment | |
1058 | # register if the load fails. | |
1059 | # Category 2 we fix up by jumping to do_iret_error. We cannot use the | |
1060 | # normal Linux return path in this case because if we use the IRET hypercall | |
1061 | # to pop the stack frame we end up in an infinite loop of failsafe callbacks. | |
1062 | # We distinguish between categories by maintaining a status value in EAX. | |
1063 | ENTRY(xen_failsafe_callback) | |
1064 | CFI_STARTPROC | |
df5d1874 | 1065 | pushl_cfi %eax |
5ead97c8 JF |
1066 | movl $1,%eax |
1067 | 1: mov 4(%esp),%ds | |
1068 | 2: mov 8(%esp),%es | |
1069 | 3: mov 12(%esp),%fs | |
1070 | 4: mov 16(%esp),%gs | |
1071 | testl %eax,%eax | |
df5d1874 | 1072 | popl_cfi %eax |
5ead97c8 JF |
1073 | lea 16(%esp),%esp |
1074 | CFI_ADJUST_CFA_OFFSET -16 | |
1075 | jz 5f | |
1076 | addl $16,%esp | |
1077 | jmp iret_exc # EAX != 0 => Category 2 (Bad IRET) | |
df5d1874 | 1078 | 5: pushl_cfi $0 # EAX == 0 => Category 1 (Bad segment) |
5ead97c8 JF |
1079 | SAVE_ALL |
1080 | jmp ret_from_exception | |
1081 | CFI_ENDPROC | |
1082 | ||
1083 | .section .fixup,"ax" | |
1084 | 6: xorl %eax,%eax | |
1085 | movl %eax,4(%esp) | |
1086 | jmp 1b | |
1087 | 7: xorl %eax,%eax | |
1088 | movl %eax,8(%esp) | |
1089 | jmp 2b | |
1090 | 8: xorl %eax,%eax | |
1091 | movl %eax,12(%esp) | |
1092 | jmp 3b | |
1093 | 9: xorl %eax,%eax | |
1094 | movl %eax,16(%esp) | |
1095 | jmp 4b | |
1096 | .previous | |
1097 | .section __ex_table,"a" | |
1098 | .align 4 | |
1099 | .long 1b,6b | |
1100 | .long 2b,7b | |
1101 | .long 3b,8b | |
1102 | .long 4b,9b | |
1103 | .previous | |
1104 | ENDPROC(xen_failsafe_callback) | |
1105 | ||
38e20b07 SY |
1106 | BUILD_INTERRUPT3(xen_hvm_callback_vector, XEN_HVM_EVTCHN_CALLBACK, |
1107 | xen_evtchn_do_upcall) | |
1108 | ||
5ead97c8 JF |
1109 | #endif /* CONFIG_XEN */ |
1110 | ||
606576ce | 1111 | #ifdef CONFIG_FUNCTION_TRACER |
d61f82d0 SR |
1112 | #ifdef CONFIG_DYNAMIC_FTRACE |
1113 | ||
1114 | ENTRY(mcount) | |
d61f82d0 SR |
1115 | ret |
1116 | END(mcount) | |
1117 | ||
1118 | ENTRY(ftrace_caller) | |
60a7ecf4 SR |
1119 | cmpl $0, function_trace_stop |
1120 | jne ftrace_stub | |
1121 | ||
d61f82d0 SR |
1122 | pushl %eax |
1123 | pushl %ecx | |
1124 | pushl %edx | |
1125 | movl 0xc(%esp), %eax | |
1126 | movl 0x4(%ebp), %edx | |
395a59d0 | 1127 | subl $MCOUNT_INSN_SIZE, %eax |
d61f82d0 SR |
1128 | |
1129 | .globl ftrace_call | |
1130 | ftrace_call: | |
1131 | call ftrace_stub | |
1132 | ||
1133 | popl %edx | |
1134 | popl %ecx | |
1135 | popl %eax | |
5a45cfe1 SR |
1136 | #ifdef CONFIG_FUNCTION_GRAPH_TRACER |
1137 | .globl ftrace_graph_call | |
1138 | ftrace_graph_call: | |
1139 | jmp ftrace_stub | |
1140 | #endif | |
d61f82d0 SR |
1141 | |
1142 | .globl ftrace_stub | |
1143 | ftrace_stub: | |
1144 | ret | |
1145 | END(ftrace_caller) | |
1146 | ||
1147 | #else /* ! CONFIG_DYNAMIC_FTRACE */ | |
1148 | ||
16444a8a | 1149 | ENTRY(mcount) |
60a7ecf4 SR |
1150 | cmpl $0, function_trace_stop |
1151 | jne ftrace_stub | |
1152 | ||
16444a8a ACM |
1153 | cmpl $ftrace_stub, ftrace_trace_function |
1154 | jnz trace | |
fb52607a | 1155 | #ifdef CONFIG_FUNCTION_GRAPH_TRACER |
c2324b69 | 1156 | cmpl $ftrace_stub, ftrace_graph_return |
fb52607a | 1157 | jnz ftrace_graph_caller |
e49dc19c SR |
1158 | |
1159 | cmpl $ftrace_graph_entry_stub, ftrace_graph_entry | |
1160 | jnz ftrace_graph_caller | |
caf4b323 | 1161 | #endif |
16444a8a ACM |
1162 | .globl ftrace_stub |
1163 | ftrace_stub: | |
1164 | ret | |
1165 | ||
1166 | /* taken from glibc */ | |
1167 | trace: | |
1168 | pushl %eax | |
1169 | pushl %ecx | |
1170 | pushl %edx | |
1171 | movl 0xc(%esp), %eax | |
1172 | movl 0x4(%ebp), %edx | |
395a59d0 | 1173 | subl $MCOUNT_INSN_SIZE, %eax |
16444a8a | 1174 | |
d61f82d0 | 1175 | call *ftrace_trace_function |
16444a8a ACM |
1176 | |
1177 | popl %edx | |
1178 | popl %ecx | |
1179 | popl %eax | |
16444a8a ACM |
1180 | jmp ftrace_stub |
1181 | END(mcount) | |
d61f82d0 | 1182 | #endif /* CONFIG_DYNAMIC_FTRACE */ |
606576ce | 1183 | #endif /* CONFIG_FUNCTION_TRACER */ |
16444a8a | 1184 | |
fb52607a FW |
1185 | #ifdef CONFIG_FUNCTION_GRAPH_TRACER |
1186 | ENTRY(ftrace_graph_caller) | |
e7d3737e FW |
1187 | cmpl $0, function_trace_stop |
1188 | jne ftrace_stub | |
1189 | ||
caf4b323 FW |
1190 | pushl %eax |
1191 | pushl %ecx | |
1192 | pushl %edx | |
1dc1c6ad | 1193 | movl 0xc(%esp), %edx |
caf4b323 | 1194 | lea 0x4(%ebp), %eax |
71e308a2 | 1195 | movl (%ebp), %ecx |
bb4304c7 | 1196 | subl $MCOUNT_INSN_SIZE, %edx |
caf4b323 | 1197 | call prepare_ftrace_return |
caf4b323 FW |
1198 | popl %edx |
1199 | popl %ecx | |
1200 | popl %eax | |
e7d3737e | 1201 | ret |
fb52607a | 1202 | END(ftrace_graph_caller) |
caf4b323 FW |
1203 | |
1204 | .globl return_to_handler | |
1205 | return_to_handler: | |
caf4b323 | 1206 | pushl %eax |
caf4b323 | 1207 | pushl %edx |
71e308a2 | 1208 | movl %ebp, %eax |
caf4b323 | 1209 | call ftrace_return_to_handler |
194ec341 | 1210 | movl %eax, %ecx |
caf4b323 | 1211 | popl %edx |
caf4b323 | 1212 | popl %eax |
194ec341 | 1213 | jmp *%ecx |
e7d3737e | 1214 | #endif |
16444a8a | 1215 | |
bb152f53 | 1216 | .section .rodata,"a" |
541054d9 | 1217 | #include "syscall_table_32.S" |
1da177e4 LT |
1218 | |
1219 | syscall_table_size=(.-sys_call_table) | |
d211af05 AH |
1220 | |
1221 | /* | |
1222 | * Some functions should be protected against kprobes | |
1223 | */ | |
1224 | .pushsection .kprobes.text, "ax" | |
1225 | ||
1226 | ENTRY(page_fault) | |
1227 | RING0_EC_FRAME | |
df5d1874 | 1228 | pushl_cfi $do_page_fault |
d211af05 AH |
1229 | ALIGN |
1230 | error_code: | |
ccbeed3a | 1231 | /* the function address is in %gs's slot on the stack */ |
df5d1874 | 1232 | pushl_cfi %fs |
ccbeed3a | 1233 | /*CFI_REL_OFFSET fs, 0*/ |
df5d1874 | 1234 | pushl_cfi %es |
d211af05 | 1235 | /*CFI_REL_OFFSET es, 0*/ |
df5d1874 | 1236 | pushl_cfi %ds |
d211af05 | 1237 | /*CFI_REL_OFFSET ds, 0*/ |
df5d1874 | 1238 | pushl_cfi %eax |
d211af05 | 1239 | CFI_REL_OFFSET eax, 0 |
df5d1874 | 1240 | pushl_cfi %ebp |
d211af05 | 1241 | CFI_REL_OFFSET ebp, 0 |
df5d1874 | 1242 | pushl_cfi %edi |
d211af05 | 1243 | CFI_REL_OFFSET edi, 0 |
df5d1874 | 1244 | pushl_cfi %esi |
d211af05 | 1245 | CFI_REL_OFFSET esi, 0 |
df5d1874 | 1246 | pushl_cfi %edx |
d211af05 | 1247 | CFI_REL_OFFSET edx, 0 |
df5d1874 | 1248 | pushl_cfi %ecx |
d211af05 | 1249 | CFI_REL_OFFSET ecx, 0 |
df5d1874 | 1250 | pushl_cfi %ebx |
d211af05 AH |
1251 | CFI_REL_OFFSET ebx, 0 |
1252 | cld | |
d211af05 AH |
1253 | movl $(__KERNEL_PERCPU), %ecx |
1254 | movl %ecx, %fs | |
1255 | UNWIND_ESPFIX_STACK | |
ccbeed3a TH |
1256 | GS_TO_REG %ecx |
1257 | movl PT_GS(%esp), %edi # get the function address | |
d211af05 AH |
1258 | movl PT_ORIG_EAX(%esp), %edx # get the error code |
1259 | movl $-1, PT_ORIG_EAX(%esp) # no syscall to restart | |
ccbeed3a TH |
1260 | REG_TO_PTGS %ecx |
1261 | SET_KERNEL_GS %ecx | |
d211af05 AH |
1262 | movl $(__USER_DS), %ecx |
1263 | movl %ecx, %ds | |
1264 | movl %ecx, %es | |
1265 | TRACE_IRQS_OFF | |
1266 | movl %esp,%eax # pt_regs pointer | |
1267 | call *%edi | |
1268 | jmp ret_from_exception | |
1269 | CFI_ENDPROC | |
1270 | END(page_fault) | |
1271 | ||
1272 | /* | |
1273 | * Debug traps and NMI can happen at the one SYSENTER instruction | |
1274 | * that sets up the real kernel stack. Check here, since we can't | |
1275 | * allow the wrong stack to be used. | |
1276 | * | |
1277 | * "TSS_sysenter_sp0+12" is because the NMI/debug handler will have | |
1278 | * already pushed 3 words if it hits on the sysenter instruction: | |
1279 | * eflags, cs and eip. | |
1280 | * | |
1281 | * We just load the right stack, and push the three (known) values | |
1282 | * by hand onto the new stack - while updating the return eip past | |
1283 | * the instruction that would have done it for sysenter. | |
1284 | */ | |
f0d96110 TH |
1285 | .macro FIX_STACK offset ok label |
1286 | cmpw $__KERNEL_CS, 4(%esp) | |
1287 | jne \ok | |
1288 | \label: | |
1289 | movl TSS_sysenter_sp0 + \offset(%esp), %esp | |
1290 | CFI_DEF_CFA esp, 0 | |
1291 | CFI_UNDEFINED eip | |
df5d1874 JB |
1292 | pushfl_cfi |
1293 | pushl_cfi $__KERNEL_CS | |
1294 | pushl_cfi $sysenter_past_esp | |
d211af05 | 1295 | CFI_REL_OFFSET eip, 0 |
f0d96110 | 1296 | .endm |
d211af05 AH |
1297 | |
1298 | ENTRY(debug) | |
1299 | RING0_INT_FRAME | |
1300 | cmpl $ia32_sysenter_target,(%esp) | |
1301 | jne debug_stack_correct | |
f0d96110 | 1302 | FIX_STACK 12, debug_stack_correct, debug_esp_fix_insn |
d211af05 | 1303 | debug_stack_correct: |
df5d1874 | 1304 | pushl_cfi $-1 # mark this as an int |
d211af05 AH |
1305 | SAVE_ALL |
1306 | TRACE_IRQS_OFF | |
1307 | xorl %edx,%edx # error code 0 | |
1308 | movl %esp,%eax # pt_regs pointer | |
1309 | call do_debug | |
1310 | jmp ret_from_exception | |
1311 | CFI_ENDPROC | |
1312 | END(debug) | |
1313 | ||
1314 | /* | |
1315 | * NMI is doubly nasty. It can happen _while_ we're handling | |
1316 | * a debug fault, and the debug fault hasn't yet been able to | |
1317 | * clear up the stack. So we first check whether we got an | |
1318 | * NMI on the sysenter entry path, but after that we need to | |
1319 | * check whether we got an NMI on the debug path where the debug | |
1320 | * fault happened on the sysenter path. | |
1321 | */ | |
1322 | ENTRY(nmi) | |
1323 | RING0_INT_FRAME | |
df5d1874 | 1324 | pushl_cfi %eax |
d211af05 AH |
1325 | movl %ss, %eax |
1326 | cmpw $__ESPFIX_SS, %ax | |
df5d1874 | 1327 | popl_cfi %eax |
d211af05 AH |
1328 | je nmi_espfix_stack |
1329 | cmpl $ia32_sysenter_target,(%esp) | |
1330 | je nmi_stack_fixup | |
df5d1874 | 1331 | pushl_cfi %eax |
d211af05 AH |
1332 | movl %esp,%eax |
1333 | /* Do not access memory above the end of our stack page, | |
1334 | * it might not exist. | |
1335 | */ | |
1336 | andl $(THREAD_SIZE-1),%eax | |
1337 | cmpl $(THREAD_SIZE-20),%eax | |
df5d1874 | 1338 | popl_cfi %eax |
d211af05 AH |
1339 | jae nmi_stack_correct |
1340 | cmpl $ia32_sysenter_target,12(%esp) | |
1341 | je nmi_debug_stack_check | |
1342 | nmi_stack_correct: | |
1343 | /* We have a RING0_INT_FRAME here */ | |
df5d1874 | 1344 | pushl_cfi %eax |
d211af05 | 1345 | SAVE_ALL |
d211af05 AH |
1346 | xorl %edx,%edx # zero error code |
1347 | movl %esp,%eax # pt_regs pointer | |
1348 | call do_nmi | |
2e04bc76 | 1349 | jmp restore_all_notrace |
d211af05 AH |
1350 | CFI_ENDPROC |
1351 | ||
1352 | nmi_stack_fixup: | |
1353 | RING0_INT_FRAME | |
f0d96110 | 1354 | FIX_STACK 12, nmi_stack_correct, 1 |
d211af05 AH |
1355 | jmp nmi_stack_correct |
1356 | ||
1357 | nmi_debug_stack_check: | |
1358 | /* We have a RING0_INT_FRAME here */ | |
1359 | cmpw $__KERNEL_CS,16(%esp) | |
1360 | jne nmi_stack_correct | |
1361 | cmpl $debug,(%esp) | |
1362 | jb nmi_stack_correct | |
1363 | cmpl $debug_esp_fix_insn,(%esp) | |
1364 | ja nmi_stack_correct | |
f0d96110 | 1365 | FIX_STACK 24, nmi_stack_correct, 1 |
d211af05 AH |
1366 | jmp nmi_stack_correct |
1367 | ||
1368 | nmi_espfix_stack: | |
1369 | /* We have a RING0_INT_FRAME here. | |
1370 | * | |
1371 | * create the pointer to lss back | |
1372 | */ | |
df5d1874 JB |
1373 | pushl_cfi %ss |
1374 | pushl_cfi %esp | |
bda3a897 | 1375 | addl $4, (%esp) |
d211af05 AH |
1376 | /* copy the iret frame of 12 bytes */ |
1377 | .rept 3 | |
df5d1874 | 1378 | pushl_cfi 16(%esp) |
d211af05 | 1379 | .endr |
df5d1874 | 1380 | pushl_cfi %eax |
d211af05 | 1381 | SAVE_ALL |
d211af05 AH |
1382 | FIXUP_ESPFIX_STACK # %eax == %esp |
1383 | xorl %edx,%edx # zero error code | |
1384 | call do_nmi | |
1385 | RESTORE_REGS | |
1386 | lss 12+4(%esp), %esp # back to espfix stack | |
1387 | CFI_ADJUST_CFA_OFFSET -24 | |
1388 | jmp irq_return | |
1389 | CFI_ENDPROC | |
1390 | END(nmi) | |
1391 | ||
1392 | ENTRY(int3) | |
1393 | RING0_INT_FRAME | |
df5d1874 | 1394 | pushl_cfi $-1 # mark this as an int |
d211af05 AH |
1395 | SAVE_ALL |
1396 | TRACE_IRQS_OFF | |
1397 | xorl %edx,%edx # zero error code | |
1398 | movl %esp,%eax # pt_regs pointer | |
1399 | call do_int3 | |
1400 | jmp ret_from_exception | |
1401 | CFI_ENDPROC | |
1402 | END(int3) | |
1403 | ||
1404 | ENTRY(general_protection) | |
1405 | RING0_EC_FRAME | |
df5d1874 | 1406 | pushl_cfi $do_general_protection |
d211af05 AH |
1407 | jmp error_code |
1408 | CFI_ENDPROC | |
1409 | END(general_protection) | |
1410 | ||
631bc487 GN |
1411 | #ifdef CONFIG_KVM_GUEST |
1412 | ENTRY(async_page_fault) | |
1413 | RING0_EC_FRAME | |
60cf637a | 1414 | pushl_cfi $do_async_page_fault |
631bc487 GN |
1415 | jmp error_code |
1416 | CFI_ENDPROC | |
2ae9d293 | 1417 | END(async_page_fault) |
631bc487 GN |
1418 | #endif |
1419 | ||
d211af05 AH |
1420 | /* |
1421 | * End of kprobes section | |
1422 | */ | |
1423 | .popsection |