]>
Commit | Line | Data |
---|---|---|
1da177e4 | 1 | /* |
1da177e4 LT |
2 | * |
3 | * Copyright (C) 1991, 1992 Linus Torvalds | |
4 | */ | |
5 | ||
6 | /* | |
7 | * entry.S contains the system-call and fault low-level handling routines. | |
8 | * This also contains the timer-interrupt handler, as well as all interrupts | |
9 | * and faults that can result in a task-switch. | |
10 | * | |
11 | * NOTE: This code handles signal-recognition, which happens every time | |
12 | * after a timer-interrupt and after each system call. | |
13 | * | |
14 | * I changed all the .align's to 4 (16 byte alignment), as that's faster | |
15 | * on a 486. | |
16 | * | |
889f21ce | 17 | * Stack layout in 'syscall_exit': |
1da177e4 LT |
18 | * ptrace needs to have all regs on the stack. |
19 | * if the order here is changed, it needs to be | |
20 | * updated in fork.c:copy_process, signal.c:do_signal, | |
21 | * ptrace.c and ptrace.h | |
22 | * | |
23 | * 0(%esp) - %ebx | |
24 | * 4(%esp) - %ecx | |
25 | * 8(%esp) - %edx | |
26 | * C(%esp) - %esi | |
27 | * 10(%esp) - %edi | |
28 | * 14(%esp) - %ebp | |
29 | * 18(%esp) - %eax | |
30 | * 1C(%esp) - %ds | |
31 | * 20(%esp) - %es | |
464d1a78 | 32 | * 24(%esp) - %fs |
ccbeed3a TH |
33 | * 28(%esp) - %gs saved iff !CONFIG_X86_32_LAZY_GS |
34 | * 2C(%esp) - orig_eax | |
35 | * 30(%esp) - %eip | |
36 | * 34(%esp) - %cs | |
37 | * 38(%esp) - %eflags | |
38 | * 3C(%esp) - %oldesp | |
39 | * 40(%esp) - %oldss | |
1da177e4 LT |
40 | * |
41 | * "current" is in register %ebx during any slow entries. | |
42 | */ | |
43 | ||
1da177e4 LT |
44 | #include <linux/linkage.h> |
45 | #include <asm/thread_info.h> | |
55f327fa | 46 | #include <asm/irqflags.h> |
1da177e4 LT |
47 | #include <asm/errno.h> |
48 | #include <asm/segment.h> | |
49 | #include <asm/smp.h> | |
0341c14d | 50 | #include <asm/page_types.h> |
be44d2aa | 51 | #include <asm/percpu.h> |
fe7cacc1 | 52 | #include <asm/dwarf2.h> |
ab68ed98 | 53 | #include <asm/processor-flags.h> |
395a59d0 | 54 | #include <asm/ftrace.h> |
9b7dc567 | 55 | #include <asm/irq_vectors.h> |
40d2e763 | 56 | #include <asm/cpufeature.h> |
1da177e4 | 57 | |
af0575bb RM |
58 | /* Avoid __ASSEMBLER__'ifying <linux/audit.h> just for this. */ |
59 | #include <linux/elf-em.h> | |
60 | #define AUDIT_ARCH_I386 (EM_386|__AUDIT_ARCH_LE) | |
61 | #define __AUDIT_ARCH_LE 0x40000000 | |
62 | ||
63 | #ifndef CONFIG_AUDITSYSCALL | |
64 | #define sysenter_audit syscall_trace_entry | |
65 | #define sysexit_audit syscall_exit_work | |
66 | #endif | |
67 | ||
139ec7c4 RR |
68 | /* |
69 | * We use macros for low-level operations which need to be overridden | |
70 | * for paravirtualization. The following will never clobber any registers: | |
71 | * INTERRUPT_RETURN (aka. "iret") | |
72 | * GET_CR0_INTO_EAX (aka. "movl %cr0, %eax") | |
d75cd22f | 73 | * ENABLE_INTERRUPTS_SYSEXIT (aka "sti; sysexit"). |
139ec7c4 RR |
74 | * |
75 | * For DISABLE_INTERRUPTS/ENABLE_INTERRUPTS (aka "cli"/"sti"), you must | |
76 | * specify what registers can be overwritten (CLBR_NONE, CLBR_EAX/EDX/ECX/ANY). | |
77 | * Allowing a register to be clobbered can shrink the paravirt replacement | |
78 | * enough to patch inline, increasing performance. | |
79 | */ | |
80 | ||
1da177e4 LT |
81 | #define nr_syscalls ((syscall_table_size)/4) |
82 | ||
1da177e4 | 83 | #ifdef CONFIG_PREEMPT |
139ec7c4 | 84 | #define preempt_stop(clobbers) DISABLE_INTERRUPTS(clobbers); TRACE_IRQS_OFF |
1da177e4 | 85 | #else |
139ec7c4 | 86 | #define preempt_stop(clobbers) |
2e04bc76 | 87 | #define resume_kernel restore_all |
1da177e4 LT |
88 | #endif |
89 | ||
55f327fa IM |
90 | .macro TRACE_IRQS_IRET |
91 | #ifdef CONFIG_TRACE_IRQFLAGS | |
ab68ed98 | 92 | testl $X86_EFLAGS_IF,PT_EFLAGS(%esp) # interrupts off? |
55f327fa IM |
93 | jz 1f |
94 | TRACE_IRQS_ON | |
95 | 1: | |
96 | #endif | |
97 | .endm | |
98 | ||
4031ff38 AG |
99 | #ifdef CONFIG_VM86 |
100 | #define resume_userspace_sig check_userspace | |
101 | #else | |
102 | #define resume_userspace_sig resume_userspace | |
103 | #endif | |
104 | ||
ccbeed3a TH |
105 | /* |
106 | * User gs save/restore | |
107 | * | |
108 | * %gs is used for userland TLS and kernel only uses it for stack | |
109 | * canary which is required to be at %gs:20 by gcc. Read the comment | |
110 | * at the top of stackprotector.h for more info. | |
111 | * | |
112 | * Local labels 98 and 99 are used. | |
113 | */ | |
114 | #ifdef CONFIG_X86_32_LAZY_GS | |
115 | ||
116 | /* unfortunately push/pop can't be no-op */ | |
117 | .macro PUSH_GS | |
df5d1874 | 118 | pushl_cfi $0 |
ccbeed3a TH |
119 | .endm |
120 | .macro POP_GS pop=0 | |
121 | addl $(4 + \pop), %esp | |
122 | CFI_ADJUST_CFA_OFFSET -(4 + \pop) | |
123 | .endm | |
124 | .macro POP_GS_EX | |
125 | .endm | |
126 | ||
127 | /* all the rest are no-op */ | |
128 | .macro PTGS_TO_GS | |
129 | .endm | |
130 | .macro PTGS_TO_GS_EX | |
131 | .endm | |
132 | .macro GS_TO_REG reg | |
133 | .endm | |
134 | .macro REG_TO_PTGS reg | |
135 | .endm | |
136 | .macro SET_KERNEL_GS reg | |
137 | .endm | |
138 | ||
139 | #else /* CONFIG_X86_32_LAZY_GS */ | |
140 | ||
141 | .macro PUSH_GS | |
df5d1874 | 142 | pushl_cfi %gs |
ccbeed3a TH |
143 | /*CFI_REL_OFFSET gs, 0*/ |
144 | .endm | |
145 | ||
146 | .macro POP_GS pop=0 | |
df5d1874 | 147 | 98: popl_cfi %gs |
ccbeed3a TH |
148 | /*CFI_RESTORE gs*/ |
149 | .if \pop <> 0 | |
150 | add $\pop, %esp | |
151 | CFI_ADJUST_CFA_OFFSET -\pop | |
152 | .endif | |
153 | .endm | |
154 | .macro POP_GS_EX | |
155 | .pushsection .fixup, "ax" | |
156 | 99: movl $0, (%esp) | |
157 | jmp 98b | |
158 | .section __ex_table, "a" | |
159 | .align 4 | |
160 | .long 98b, 99b | |
161 | .popsection | |
162 | .endm | |
163 | ||
164 | .macro PTGS_TO_GS | |
165 | 98: mov PT_GS(%esp), %gs | |
166 | .endm | |
167 | .macro PTGS_TO_GS_EX | |
168 | .pushsection .fixup, "ax" | |
169 | 99: movl $0, PT_GS(%esp) | |
170 | jmp 98b | |
171 | .section __ex_table, "a" | |
172 | .align 4 | |
173 | .long 98b, 99b | |
174 | .popsection | |
175 | .endm | |
176 | ||
177 | .macro GS_TO_REG reg | |
178 | movl %gs, \reg | |
179 | /*CFI_REGISTER gs, \reg*/ | |
180 | .endm | |
181 | .macro REG_TO_PTGS reg | |
182 | movl \reg, PT_GS(%esp) | |
183 | /*CFI_REL_OFFSET gs, PT_GS*/ | |
184 | .endm | |
185 | .macro SET_KERNEL_GS reg | |
60a5317f | 186 | movl $(__KERNEL_STACK_CANARY), \reg |
ccbeed3a TH |
187 | movl \reg, %gs |
188 | .endm | |
189 | ||
190 | #endif /* CONFIG_X86_32_LAZY_GS */ | |
191 | ||
f0d96110 TH |
192 | .macro SAVE_ALL |
193 | cld | |
ccbeed3a | 194 | PUSH_GS |
df5d1874 | 195 | pushl_cfi %fs |
f0d96110 | 196 | /*CFI_REL_OFFSET fs, 0;*/ |
df5d1874 | 197 | pushl_cfi %es |
f0d96110 | 198 | /*CFI_REL_OFFSET es, 0;*/ |
df5d1874 | 199 | pushl_cfi %ds |
f0d96110 | 200 | /*CFI_REL_OFFSET ds, 0;*/ |
df5d1874 | 201 | pushl_cfi %eax |
f0d96110 | 202 | CFI_REL_OFFSET eax, 0 |
df5d1874 | 203 | pushl_cfi %ebp |
f0d96110 | 204 | CFI_REL_OFFSET ebp, 0 |
df5d1874 | 205 | pushl_cfi %edi |
f0d96110 | 206 | CFI_REL_OFFSET edi, 0 |
df5d1874 | 207 | pushl_cfi %esi |
f0d96110 | 208 | CFI_REL_OFFSET esi, 0 |
df5d1874 | 209 | pushl_cfi %edx |
f0d96110 | 210 | CFI_REL_OFFSET edx, 0 |
df5d1874 | 211 | pushl_cfi %ecx |
f0d96110 | 212 | CFI_REL_OFFSET ecx, 0 |
df5d1874 | 213 | pushl_cfi %ebx |
f0d96110 TH |
214 | CFI_REL_OFFSET ebx, 0 |
215 | movl $(__USER_DS), %edx | |
216 | movl %edx, %ds | |
217 | movl %edx, %es | |
218 | movl $(__KERNEL_PERCPU), %edx | |
464d1a78 | 219 | movl %edx, %fs |
ccbeed3a | 220 | SET_KERNEL_GS %edx |
f0d96110 | 221 | .endm |
1da177e4 | 222 | |
f0d96110 | 223 | .macro RESTORE_INT_REGS |
df5d1874 | 224 | popl_cfi %ebx |
f0d96110 | 225 | CFI_RESTORE ebx |
df5d1874 | 226 | popl_cfi %ecx |
f0d96110 | 227 | CFI_RESTORE ecx |
df5d1874 | 228 | popl_cfi %edx |
f0d96110 | 229 | CFI_RESTORE edx |
df5d1874 | 230 | popl_cfi %esi |
f0d96110 | 231 | CFI_RESTORE esi |
df5d1874 | 232 | popl_cfi %edi |
f0d96110 | 233 | CFI_RESTORE edi |
df5d1874 | 234 | popl_cfi %ebp |
f0d96110 | 235 | CFI_RESTORE ebp |
df5d1874 | 236 | popl_cfi %eax |
fe7cacc1 | 237 | CFI_RESTORE eax |
f0d96110 | 238 | .endm |
1da177e4 | 239 | |
ccbeed3a | 240 | .macro RESTORE_REGS pop=0 |
f0d96110 | 241 | RESTORE_INT_REGS |
df5d1874 | 242 | 1: popl_cfi %ds |
f0d96110 | 243 | /*CFI_RESTORE ds;*/ |
df5d1874 | 244 | 2: popl_cfi %es |
f0d96110 | 245 | /*CFI_RESTORE es;*/ |
df5d1874 | 246 | 3: popl_cfi %fs |
f0d96110 | 247 | /*CFI_RESTORE fs;*/ |
ccbeed3a | 248 | POP_GS \pop |
f0d96110 TH |
249 | .pushsection .fixup, "ax" |
250 | 4: movl $0, (%esp) | |
251 | jmp 1b | |
252 | 5: movl $0, (%esp) | |
253 | jmp 2b | |
254 | 6: movl $0, (%esp) | |
255 | jmp 3b | |
256 | .section __ex_table, "a" | |
257 | .align 4 | |
258 | .long 1b, 4b | |
259 | .long 2b, 5b | |
260 | .long 3b, 6b | |
f95d47ca | 261 | .popsection |
ccbeed3a | 262 | POP_GS_EX |
f0d96110 | 263 | .endm |
1da177e4 | 264 | |
f0d96110 TH |
265 | .macro RING0_INT_FRAME |
266 | CFI_STARTPROC simple | |
267 | CFI_SIGNAL_FRAME | |
268 | CFI_DEF_CFA esp, 3*4 | |
269 | /*CFI_OFFSET cs, -2*4;*/ | |
fe7cacc1 | 270 | CFI_OFFSET eip, -3*4 |
f0d96110 | 271 | .endm |
fe7cacc1 | 272 | |
f0d96110 TH |
273 | .macro RING0_EC_FRAME |
274 | CFI_STARTPROC simple | |
275 | CFI_SIGNAL_FRAME | |
276 | CFI_DEF_CFA esp, 4*4 | |
277 | /*CFI_OFFSET cs, -2*4;*/ | |
fe7cacc1 | 278 | CFI_OFFSET eip, -3*4 |
f0d96110 | 279 | .endm |
fe7cacc1 | 280 | |
f0d96110 TH |
281 | .macro RING0_PTREGS_FRAME |
282 | CFI_STARTPROC simple | |
283 | CFI_SIGNAL_FRAME | |
284 | CFI_DEF_CFA esp, PT_OLDESP-PT_EBX | |
285 | /*CFI_OFFSET cs, PT_CS-PT_OLDESP;*/ | |
286 | CFI_OFFSET eip, PT_EIP-PT_OLDESP | |
287 | /*CFI_OFFSET es, PT_ES-PT_OLDESP;*/ | |
288 | /*CFI_OFFSET ds, PT_DS-PT_OLDESP;*/ | |
289 | CFI_OFFSET eax, PT_EAX-PT_OLDESP | |
290 | CFI_OFFSET ebp, PT_EBP-PT_OLDESP | |
291 | CFI_OFFSET edi, PT_EDI-PT_OLDESP | |
292 | CFI_OFFSET esi, PT_ESI-PT_OLDESP | |
293 | CFI_OFFSET edx, PT_EDX-PT_OLDESP | |
294 | CFI_OFFSET ecx, PT_ECX-PT_OLDESP | |
eb5b7b9d | 295 | CFI_OFFSET ebx, PT_EBX-PT_OLDESP |
f0d96110 | 296 | .endm |
1da177e4 LT |
297 | |
298 | ENTRY(ret_from_fork) | |
fe7cacc1 | 299 | CFI_STARTPROC |
df5d1874 | 300 | pushl_cfi %eax |
1da177e4 LT |
301 | call schedule_tail |
302 | GET_THREAD_INFO(%ebp) | |
df5d1874 JB |
303 | popl_cfi %eax |
304 | pushl_cfi $0x0202 # Reset kernel eflags | |
305 | popfl_cfi | |
1da177e4 | 306 | jmp syscall_exit |
fe7cacc1 | 307 | CFI_ENDPROC |
47a55cd7 | 308 | END(ret_from_fork) |
1da177e4 | 309 | |
a00e817f MH |
310 | /* |
311 | * Interrupt exit functions should be protected against kprobes | |
312 | */ | |
313 | .pushsection .kprobes.text, "ax" | |
1da177e4 LT |
314 | /* |
315 | * Return to user mode is not as complex as all this looks, | |
316 | * but we want the default path for a system call return to | |
317 | * go as quickly as possible which is why some of this is | |
318 | * less clear than it otherwise should be. | |
319 | */ | |
320 | ||
321 | # userspace resumption stub bypassing syscall exit tracing | |
322 | ALIGN | |
fe7cacc1 | 323 | RING0_PTREGS_FRAME |
1da177e4 | 324 | ret_from_exception: |
139ec7c4 | 325 | preempt_stop(CLBR_ANY) |
1da177e4 LT |
326 | ret_from_intr: |
327 | GET_THREAD_INFO(%ebp) | |
4031ff38 | 328 | check_userspace: |
eb5b7b9d JF |
329 | movl PT_EFLAGS(%esp), %eax # mix EFLAGS and CS |
330 | movb PT_CS(%esp), %al | |
ab68ed98 | 331 | andl $(X86_EFLAGS_VM | SEGMENT_RPL_MASK), %eax |
78be3706 RR |
332 | cmpl $USER_RPL, %eax |
333 | jb resume_kernel # not returning to v8086 or userspace | |
f95d47ca | 334 | |
1da177e4 | 335 | ENTRY(resume_userspace) |
c7e872e7 | 336 | LOCKDEP_SYS_EXIT |
139ec7c4 | 337 | DISABLE_INTERRUPTS(CLBR_ANY) # make sure we don't miss an interrupt |
1da177e4 LT |
338 | # setting need_resched or sigpending |
339 | # between sampling and the iret | |
e32e58a9 | 340 | TRACE_IRQS_OFF |
1da177e4 LT |
341 | movl TI_flags(%ebp), %ecx |
342 | andl $_TIF_WORK_MASK, %ecx # is there any work to be done on | |
343 | # int/exception return? | |
344 | jne work_pending | |
345 | jmp restore_all | |
47a55cd7 | 346 | END(ret_from_exception) |
1da177e4 LT |
347 | |
348 | #ifdef CONFIG_PREEMPT | |
349 | ENTRY(resume_kernel) | |
139ec7c4 | 350 | DISABLE_INTERRUPTS(CLBR_ANY) |
1da177e4 | 351 | cmpl $0,TI_preempt_count(%ebp) # non-zero preempt_count ? |
2e04bc76 | 352 | jnz restore_all |
1da177e4 LT |
353 | need_resched: |
354 | movl TI_flags(%ebp), %ecx # need_resched set ? | |
355 | testb $_TIF_NEED_RESCHED, %cl | |
356 | jz restore_all | |
ab68ed98 | 357 | testl $X86_EFLAGS_IF,PT_EFLAGS(%esp) # interrupts off (exception path) ? |
1da177e4 LT |
358 | jz restore_all |
359 | call preempt_schedule_irq | |
360 | jmp need_resched | |
47a55cd7 | 361 | END(resume_kernel) |
1da177e4 | 362 | #endif |
fe7cacc1 | 363 | CFI_ENDPROC |
a00e817f MH |
364 | /* |
365 | * End of kprobes section | |
366 | */ | |
367 | .popsection | |
1da177e4 LT |
368 | |
369 | /* SYSENTER_RETURN points to after the "sysenter" instruction in | |
370 | the vsyscall page. See vsyscall-sysentry.S, which defines the symbol. */ | |
371 | ||
372 | # sysenter call handler stub | |
0aa97fb2 | 373 | ENTRY(ia32_sysenter_target) |
fe7cacc1 | 374 | CFI_STARTPROC simple |
adf14236 | 375 | CFI_SIGNAL_FRAME |
fe7cacc1 JB |
376 | CFI_DEF_CFA esp, 0 |
377 | CFI_REGISTER esp, ebp | |
faca6227 | 378 | movl TSS_sysenter_sp0(%esp),%esp |
1da177e4 | 379 | sysenter_past_esp: |
55f327fa | 380 | /* |
d93c870b JF |
381 | * Interrupts are disabled here, but we can't trace it until |
382 | * enough kernel state to call TRACE_IRQS_OFF can be called - but | |
383 | * we immediately enable interrupts at that point anyway. | |
55f327fa | 384 | */ |
3234282f | 385 | pushl_cfi $__USER_DS |
fe7cacc1 | 386 | /*CFI_REL_OFFSET ss, 0*/ |
df5d1874 | 387 | pushl_cfi %ebp |
fe7cacc1 | 388 | CFI_REL_OFFSET esp, 0 |
df5d1874 | 389 | pushfl_cfi |
d93c870b | 390 | orl $X86_EFLAGS_IF, (%esp) |
3234282f | 391 | pushl_cfi $__USER_CS |
fe7cacc1 | 392 | /*CFI_REL_OFFSET cs, 0*/ |
e6e5494c IM |
393 | /* |
394 | * Push current_thread_info()->sysenter_return to the stack. | |
395 | * A tiny bit of offset fixup is necessary - 4*4 means the 4 words | |
396 | * pushed above; +8 corresponds to copy_thread's esp0 setting. | |
397 | */ | |
96e612ff | 398 | pushl_cfi ((TI_sysenter_return)-THREAD_SIZE_asm+8+4*4)(%esp) |
fe7cacc1 | 399 | CFI_REL_OFFSET eip, 0 |
1da177e4 | 400 | |
df5d1874 | 401 | pushl_cfi %eax |
d93c870b JF |
402 | SAVE_ALL |
403 | ENABLE_INTERRUPTS(CLBR_NONE) | |
404 | ||
1da177e4 LT |
405 | /* |
406 | * Load the potential sixth argument from user stack. | |
407 | * Careful about security. | |
408 | */ | |
409 | cmpl $__PAGE_OFFSET-3,%ebp | |
410 | jae syscall_fault | |
411 | 1: movl (%ebp),%ebp | |
d93c870b | 412 | movl %ebp,PT_EBP(%esp) |
1da177e4 LT |
413 | .section __ex_table,"a" |
414 | .align 4 | |
415 | .long 1b,syscall_fault | |
416 | .previous | |
417 | ||
1da177e4 LT |
418 | GET_THREAD_INFO(%ebp) |
419 | ||
88200bc2 | 420 | testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%ebp) |
af0575bb RM |
421 | jnz sysenter_audit |
422 | sysenter_do_call: | |
1da177e4 LT |
423 | cmpl $(nr_syscalls), %eax |
424 | jae syscall_badsys | |
425 | call *sys_call_table(,%eax,4) | |
eb5b7b9d | 426 | movl %eax,PT_EAX(%esp) |
c7e872e7 | 427 | LOCKDEP_SYS_EXIT |
42c24fa2 | 428 | DISABLE_INTERRUPTS(CLBR_ANY) |
55f327fa | 429 | TRACE_IRQS_OFF |
1da177e4 | 430 | movl TI_flags(%ebp), %ecx |
88200bc2 | 431 | testl $_TIF_ALLWORK_MASK, %ecx |
af0575bb RM |
432 | jne sysexit_audit |
433 | sysenter_exit: | |
1da177e4 | 434 | /* if something modifies registers it must also disable sysexit */ |
eb5b7b9d JF |
435 | movl PT_EIP(%esp), %edx |
436 | movl PT_OLDESP(%esp), %ecx | |
1da177e4 | 437 | xorl %ebp,%ebp |
55f327fa | 438 | TRACE_IRQS_ON |
464d1a78 | 439 | 1: mov PT_FS(%esp), %fs |
ccbeed3a | 440 | PTGS_TO_GS |
d75cd22f | 441 | ENABLE_INTERRUPTS_SYSEXIT |
af0575bb RM |
442 | |
443 | #ifdef CONFIG_AUDITSYSCALL | |
444 | sysenter_audit: | |
88200bc2 | 445 | testl $(_TIF_WORK_SYSCALL_ENTRY & ~_TIF_SYSCALL_AUDIT),TI_flags(%ebp) |
af0575bb RM |
446 | jnz syscall_trace_entry |
447 | addl $4,%esp | |
448 | CFI_ADJUST_CFA_OFFSET -4 | |
449 | /* %esi already in 8(%esp) 6th arg: 4th syscall arg */ | |
450 | /* %edx already in 4(%esp) 5th arg: 3rd syscall arg */ | |
451 | /* %ecx already in 0(%esp) 4th arg: 2nd syscall arg */ | |
452 | movl %ebx,%ecx /* 3rd arg: 1st syscall arg */ | |
453 | movl %eax,%edx /* 2nd arg: syscall number */ | |
454 | movl $AUDIT_ARCH_I386,%eax /* 1st arg: audit arch */ | |
455 | call audit_syscall_entry | |
df5d1874 | 456 | pushl_cfi %ebx |
af0575bb RM |
457 | movl PT_EAX(%esp),%eax /* reload syscall number */ |
458 | jmp sysenter_do_call | |
459 | ||
460 | sysexit_audit: | |
88200bc2 | 461 | testl $(_TIF_ALLWORK_MASK & ~_TIF_SYSCALL_AUDIT), %ecx |
af0575bb RM |
462 | jne syscall_exit_work |
463 | TRACE_IRQS_ON | |
464 | ENABLE_INTERRUPTS(CLBR_ANY) | |
465 | movl %eax,%edx /* second arg, syscall return value */ | |
466 | cmpl $0,%eax /* is it < 0? */ | |
467 | setl %al /* 1 if so, 0 if not */ | |
468 | movzbl %al,%eax /* zero-extend that */ | |
469 | inc %eax /* first arg, 0->1(AUDITSC_SUCCESS), 1->2(AUDITSC_FAILURE) */ | |
470 | call audit_syscall_exit | |
471 | DISABLE_INTERRUPTS(CLBR_ANY) | |
472 | TRACE_IRQS_OFF | |
473 | movl TI_flags(%ebp), %ecx | |
88200bc2 | 474 | testl $(_TIF_ALLWORK_MASK & ~_TIF_SYSCALL_AUDIT), %ecx |
af0575bb RM |
475 | jne syscall_exit_work |
476 | movl PT_EAX(%esp),%eax /* reload syscall return value */ | |
477 | jmp sysenter_exit | |
478 | #endif | |
479 | ||
fe7cacc1 | 480 | CFI_ENDPROC |
f95d47ca | 481 | .pushsection .fixup,"ax" |
464d1a78 | 482 | 2: movl $0,PT_FS(%esp) |
f95d47ca JF |
483 | jmp 1b |
484 | .section __ex_table,"a" | |
485 | .align 4 | |
486 | .long 1b,2b | |
487 | .popsection | |
ccbeed3a | 488 | PTGS_TO_GS_EX |
0aa97fb2 | 489 | ENDPROC(ia32_sysenter_target) |
1da177e4 | 490 | |
a00e817f MH |
491 | /* |
492 | * syscall stub including irq exit should be protected against kprobes | |
493 | */ | |
494 | .pushsection .kprobes.text, "ax" | |
1da177e4 LT |
495 | # system call handler stub |
496 | ENTRY(system_call) | |
fe7cacc1 | 497 | RING0_INT_FRAME # can't unwind into user space anyway |
df5d1874 | 498 | pushl_cfi %eax # save orig_eax |
1da177e4 LT |
499 | SAVE_ALL |
500 | GET_THREAD_INFO(%ebp) | |
ed75e8d5 | 501 | # system call tracing in operation / emulation |
88200bc2 | 502 | testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%ebp) |
1da177e4 LT |
503 | jnz syscall_trace_entry |
504 | cmpl $(nr_syscalls), %eax | |
505 | jae syscall_badsys | |
506 | syscall_call: | |
507 | call *sys_call_table(,%eax,4) | |
eb5b7b9d | 508 | movl %eax,PT_EAX(%esp) # store the return value |
1da177e4 | 509 | syscall_exit: |
c7e872e7 | 510 | LOCKDEP_SYS_EXIT |
139ec7c4 | 511 | DISABLE_INTERRUPTS(CLBR_ANY) # make sure we don't miss an interrupt |
1da177e4 LT |
512 | # setting need_resched or sigpending |
513 | # between sampling and the iret | |
55f327fa | 514 | TRACE_IRQS_OFF |
1da177e4 | 515 | movl TI_flags(%ebp), %ecx |
88200bc2 | 516 | testl $_TIF_ALLWORK_MASK, %ecx # current->work |
1da177e4 LT |
517 | jne syscall_exit_work |
518 | ||
519 | restore_all: | |
2e04bc76 AH |
520 | TRACE_IRQS_IRET |
521 | restore_all_notrace: | |
eb5b7b9d JF |
522 | movl PT_EFLAGS(%esp), %eax # mix EFLAGS, SS and CS |
523 | # Warning: PT_OLDSS(%esp) contains the wrong/random values if we | |
5df24082 SS |
524 | # are returning to the kernel. |
525 | # See comments in process.c:copy_thread() for details. | |
eb5b7b9d JF |
526 | movb PT_OLDSS(%esp), %ah |
527 | movb PT_CS(%esp), %al | |
ab68ed98 | 528 | andl $(X86_EFLAGS_VM | (SEGMENT_TI_MASK << 8) | SEGMENT_RPL_MASK), %eax |
78be3706 | 529 | cmpl $((SEGMENT_LDT << 8) | USER_RPL), %eax |
fe7cacc1 | 530 | CFI_REMEMBER_STATE |
1da177e4 LT |
531 | je ldt_ss # returning to user-space with LDT SS |
532 | restore_nocheck: | |
ccbeed3a | 533 | RESTORE_REGS 4 # skip orig_eax/error_code |
f7f3d791 | 534 | irq_return: |
3701d863 | 535 | INTERRUPT_RETURN |
1da177e4 | 536 | .section .fixup,"ax" |
90e9f536 | 537 | ENTRY(iret_exc) |
a879cbbb LT |
538 | pushl $0 # no error code |
539 | pushl $do_iret_error | |
540 | jmp error_code | |
1da177e4 LT |
541 | .previous |
542 | .section __ex_table,"a" | |
543 | .align 4 | |
3701d863 | 544 | .long irq_return,iret_exc |
1da177e4 LT |
545 | .previous |
546 | ||
fe7cacc1 | 547 | CFI_RESTORE_STATE |
1da177e4 | 548 | ldt_ss: |
eb5b7b9d | 549 | larl PT_OLDSS(%esp), %eax |
1da177e4 LT |
550 | jnz restore_nocheck |
551 | testl $0x00400000, %eax # returning to 32bit stack? | |
552 | jnz restore_nocheck # allright, normal return | |
d3561b7f RR |
553 | |
554 | #ifdef CONFIG_PARAVIRT | |
555 | /* | |
556 | * The kernel can't run on a non-flat stack if paravirt mode | |
557 | * is active. Rather than try to fixup the high bits of | |
558 | * ESP, bypass this code entirely. This may break DOSemu | |
559 | * and/or Wine support in a paravirt VM, although the option | |
560 | * is still available to implement the setting of the high | |
561 | * 16-bits in the INTERRUPT_RETURN paravirt-op. | |
562 | */ | |
93b1eab3 | 563 | cmpl $0, pv_info+PARAVIRT_enabled |
d3561b7f RR |
564 | jne restore_nocheck |
565 | #endif | |
566 | ||
dc4c2a0a AH |
567 | /* |
568 | * Setup and switch to ESPFIX stack | |
569 | * | |
570 | * We're returning to userspace with a 16 bit stack. The CPU will not | |
571 | * restore the high word of ESP for us on executing iret... This is an | |
572 | * "official" bug of all the x86-compatible CPUs, which we can work | |
573 | * around to make dosemu and wine happy. We do this by preloading the | |
574 | * high word of ESP with the high word of the userspace ESP while | |
575 | * compensating for the offset by changing to the ESPFIX segment with | |
576 | * a base address that matches for the difference. | |
577 | */ | |
72c511dd | 578 | #define GDT_ESPFIX_SS PER_CPU_VAR(gdt_page) + (GDT_ENTRY_ESPFIX_SS * 8) |
dc4c2a0a AH |
579 | mov %esp, %edx /* load kernel esp */ |
580 | mov PT_OLDESP(%esp), %eax /* load userspace esp */ | |
581 | mov %dx, %ax /* eax: new kernel esp */ | |
582 | sub %eax, %edx /* offset (low word is 0) */ | |
dc4c2a0a | 583 | shr $16, %edx |
72c511dd BG |
584 | mov %dl, GDT_ESPFIX_SS + 4 /* bits 16..23 */ |
585 | mov %dh, GDT_ESPFIX_SS + 7 /* bits 24..31 */ | |
df5d1874 JB |
586 | pushl_cfi $__ESPFIX_SS |
587 | pushl_cfi %eax /* new kernel esp */ | |
2e04bc76 AH |
588 | /* Disable interrupts, but do not irqtrace this section: we |
589 | * will soon execute iret and the tracer was already set to | |
590 | * the irqstate after the iret */ | |
139ec7c4 | 591 | DISABLE_INTERRUPTS(CLBR_EAX) |
dc4c2a0a | 592 | lss (%esp), %esp /* switch to espfix segment */ |
be44d2aa SS |
593 | CFI_ADJUST_CFA_OFFSET -8 |
594 | jmp restore_nocheck | |
fe7cacc1 | 595 | CFI_ENDPROC |
47a55cd7 | 596 | ENDPROC(system_call) |
1da177e4 LT |
597 | |
598 | # perform work that needs to be done immediately before resumption | |
599 | ALIGN | |
fe7cacc1 | 600 | RING0_PTREGS_FRAME # can't unwind into user space anyway |
1da177e4 LT |
601 | work_pending: |
602 | testb $_TIF_NEED_RESCHED, %cl | |
603 | jz work_notifysig | |
604 | work_resched: | |
605 | call schedule | |
c7e872e7 | 606 | LOCKDEP_SYS_EXIT |
139ec7c4 | 607 | DISABLE_INTERRUPTS(CLBR_ANY) # make sure we don't miss an interrupt |
1da177e4 LT |
608 | # setting need_resched or sigpending |
609 | # between sampling and the iret | |
55f327fa | 610 | TRACE_IRQS_OFF |
1da177e4 LT |
611 | movl TI_flags(%ebp), %ecx |
612 | andl $_TIF_WORK_MASK, %ecx # is there any work to be done other | |
613 | # than syscall tracing? | |
614 | jz restore_all | |
615 | testb $_TIF_NEED_RESCHED, %cl | |
616 | jnz work_resched | |
617 | ||
618 | work_notifysig: # deal with pending signals and | |
619 | # notify-resume requests | |
74b47a78 | 620 | #ifdef CONFIG_VM86 |
ab68ed98 | 621 | testl $X86_EFLAGS_VM, PT_EFLAGS(%esp) |
1da177e4 LT |
622 | movl %esp, %eax |
623 | jne work_notifysig_v86 # returning to kernel-space or | |
624 | # vm86-space | |
625 | xorl %edx, %edx | |
626 | call do_notify_resume | |
4031ff38 | 627 | jmp resume_userspace_sig |
1da177e4 LT |
628 | |
629 | ALIGN | |
630 | work_notifysig_v86: | |
df5d1874 | 631 | pushl_cfi %ecx # save ti_flags for do_notify_resume |
1da177e4 | 632 | call save_v86_state # %eax contains pt_regs pointer |
df5d1874 | 633 | popl_cfi %ecx |
1da177e4 | 634 | movl %eax, %esp |
74b47a78 JK |
635 | #else |
636 | movl %esp, %eax | |
637 | #endif | |
1da177e4 LT |
638 | xorl %edx, %edx |
639 | call do_notify_resume | |
4031ff38 | 640 | jmp resume_userspace_sig |
47a55cd7 | 641 | END(work_pending) |
1da177e4 LT |
642 | |
643 | # perform syscall exit tracing | |
644 | ALIGN | |
645 | syscall_trace_entry: | |
eb5b7b9d | 646 | movl $-ENOSYS,PT_EAX(%esp) |
1da177e4 | 647 | movl %esp, %eax |
d4d67150 RM |
648 | call syscall_trace_enter |
649 | /* What it returned is what we'll actually use. */ | |
1da177e4 LT |
650 | cmpl $(nr_syscalls), %eax |
651 | jnae syscall_call | |
652 | jmp syscall_exit | |
47a55cd7 | 653 | END(syscall_trace_entry) |
1da177e4 LT |
654 | |
655 | # perform syscall exit tracing | |
656 | ALIGN | |
657 | syscall_exit_work: | |
88200bc2 | 658 | testl $_TIF_WORK_SYSCALL_EXIT, %ecx |
1da177e4 | 659 | jz work_pending |
55f327fa | 660 | TRACE_IRQS_ON |
d4d67150 | 661 | ENABLE_INTERRUPTS(CLBR_ANY) # could let syscall_trace_leave() call |
1da177e4 LT |
662 | # schedule() instead |
663 | movl %esp, %eax | |
d4d67150 | 664 | call syscall_trace_leave |
1da177e4 | 665 | jmp resume_userspace |
47a55cd7 | 666 | END(syscall_exit_work) |
fe7cacc1 | 667 | CFI_ENDPROC |
1da177e4 | 668 | |
fe7cacc1 | 669 | RING0_INT_FRAME # can't unwind into user space anyway |
1da177e4 | 670 | syscall_fault: |
1da177e4 | 671 | GET_THREAD_INFO(%ebp) |
eb5b7b9d | 672 | movl $-EFAULT,PT_EAX(%esp) |
1da177e4 | 673 | jmp resume_userspace |
47a55cd7 | 674 | END(syscall_fault) |
1da177e4 | 675 | |
1da177e4 | 676 | syscall_badsys: |
eb5b7b9d | 677 | movl $-ENOSYS,PT_EAX(%esp) |
1da177e4 | 678 | jmp resume_userspace |
47a55cd7 | 679 | END(syscall_badsys) |
fe7cacc1 | 680 | CFI_ENDPROC |
a00e817f MH |
681 | /* |
682 | * End of kprobes section | |
683 | */ | |
684 | .popsection | |
1da177e4 | 685 | |
253f29a4 BG |
686 | /* |
687 | * System calls that need a pt_regs pointer. | |
688 | */ | |
e258e4e0 | 689 | #define PTREGSCALL0(name) \ |
253f29a4 BG |
690 | ALIGN; \ |
691 | ptregs_##name: \ | |
692 | leal 4(%esp),%eax; \ | |
693 | jmp sys_##name; | |
694 | ||
e258e4e0 BG |
695 | #define PTREGSCALL1(name) \ |
696 | ALIGN; \ | |
697 | ptregs_##name: \ | |
698 | leal 4(%esp),%edx; \ | |
ce9119ad | 699 | movl (PT_EBX+4)(%esp),%eax; \ |
e258e4e0 BG |
700 | jmp sys_##name; |
701 | ||
702 | #define PTREGSCALL2(name) \ | |
703 | ALIGN; \ | |
704 | ptregs_##name: \ | |
705 | leal 4(%esp),%ecx; \ | |
ce9119ad PA |
706 | movl (PT_ECX+4)(%esp),%edx; \ |
707 | movl (PT_EBX+4)(%esp),%eax; \ | |
e258e4e0 BG |
708 | jmp sys_##name; |
709 | ||
710 | #define PTREGSCALL3(name) \ | |
711 | ALIGN; \ | |
712 | ptregs_##name: \ | |
a34107b5 | 713 | CFI_STARTPROC; \ |
e258e4e0 | 714 | leal 4(%esp),%eax; \ |
a34107b5 | 715 | pushl_cfi %eax; \ |
e258e4e0 BG |
716 | movl PT_EDX(%eax),%ecx; \ |
717 | movl PT_ECX(%eax),%edx; \ | |
718 | movl PT_EBX(%eax),%eax; \ | |
719 | call sys_##name; \ | |
720 | addl $4,%esp; \ | |
a34107b5 JB |
721 | CFI_ADJUST_CFA_OFFSET -4; \ |
722 | ret; \ | |
723 | CFI_ENDPROC; \ | |
724 | ENDPROC(ptregs_##name) | |
e258e4e0 | 725 | |
27f59559 | 726 | PTREGSCALL1(iopl) |
e258e4e0 | 727 | PTREGSCALL0(fork) |
e258e4e0 | 728 | PTREGSCALL0(vfork) |
11cf88bd | 729 | PTREGSCALL3(execve) |
052acad4 | 730 | PTREGSCALL2(sigaltstack) |
e258e4e0 BG |
731 | PTREGSCALL0(sigreturn) |
732 | PTREGSCALL0(rt_sigreturn) | |
f1382f15 BG |
733 | PTREGSCALL2(vm86) |
734 | PTREGSCALL1(vm86old) | |
253f29a4 | 735 | |
f839bbc5 BG |
736 | /* Clone is an oddball. The 4th arg is in %edi */ |
737 | ALIGN; | |
738 | ptregs_clone: | |
a34107b5 | 739 | CFI_STARTPROC |
f839bbc5 | 740 | leal 4(%esp),%eax |
a34107b5 JB |
741 | pushl_cfi %eax |
742 | pushl_cfi PT_EDI(%eax) | |
f839bbc5 BG |
743 | movl PT_EDX(%eax),%ecx |
744 | movl PT_ECX(%eax),%edx | |
745 | movl PT_EBX(%eax),%eax | |
746 | call sys_clone | |
747 | addl $8,%esp | |
a34107b5 | 748 | CFI_ADJUST_CFA_OFFSET -8 |
f839bbc5 | 749 | ret |
a34107b5 JB |
750 | CFI_ENDPROC |
751 | ENDPROC(ptregs_clone) | |
f839bbc5 | 752 | |
f0d96110 | 753 | .macro FIXUP_ESPFIX_STACK |
dc4c2a0a AH |
754 | /* |
755 | * Switch back for ESPFIX stack to the normal zerobased stack | |
756 | * | |
757 | * We can't call C functions using the ESPFIX stack. This code reads | |
758 | * the high word of the segment base from the GDT and swiches to the | |
759 | * normal stack and adjusts ESP with the matching offset. | |
760 | */ | |
761 | /* fixup the stack */ | |
72c511dd BG |
762 | mov GDT_ESPFIX_SS + 4, %al /* bits 16..23 */ |
763 | mov GDT_ESPFIX_SS + 7, %ah /* bits 24..31 */ | |
dc4c2a0a AH |
764 | shl $16, %eax |
765 | addl %esp, %eax /* the adjusted stack pointer */ | |
df5d1874 JB |
766 | pushl_cfi $__KERNEL_DS |
767 | pushl_cfi %eax | |
dc4c2a0a | 768 | lss (%esp), %esp /* switch to the normal stack segment */ |
f0d96110 TH |
769 | CFI_ADJUST_CFA_OFFSET -8 |
770 | .endm | |
771 | .macro UNWIND_ESPFIX_STACK | |
772 | movl %ss, %eax | |
773 | /* see if on espfix stack */ | |
774 | cmpw $__ESPFIX_SS, %ax | |
775 | jne 27f | |
776 | movl $__KERNEL_DS, %eax | |
777 | movl %eax, %ds | |
778 | movl %eax, %es | |
779 | /* switch to normal stack */ | |
780 | FIXUP_ESPFIX_STACK | |
781 | 27: | |
782 | .endm | |
1da177e4 LT |
783 | |
784 | /* | |
b7c6244f PA |
785 | * Build the entry stubs and pointer table with some assembler magic. |
786 | * We pack 7 stubs into a single 32-byte chunk, which will fit in a | |
787 | * single cache line on all modern x86 implementations. | |
1da177e4 | 788 | */ |
4687518c | 789 | .section .init.rodata,"a" |
1da177e4 LT |
790 | ENTRY(interrupt) |
791 | .text | |
b7c6244f PA |
792 | .p2align 5 |
793 | .p2align CONFIG_X86_L1_CACHE_SHIFT | |
1da177e4 | 794 | ENTRY(irq_entries_start) |
fe7cacc1 | 795 | RING0_INT_FRAME |
4687518c | 796 | vector=FIRST_EXTERNAL_VECTOR |
b7c6244f PA |
797 | .rept (NR_VECTORS-FIRST_EXTERNAL_VECTOR+6)/7 |
798 | .balign 32 | |
799 | .rept 7 | |
800 | .if vector < NR_VECTORS | |
8665596e | 801 | .if vector <> FIRST_EXTERNAL_VECTOR |
fe7cacc1 | 802 | CFI_ADJUST_CFA_OFFSET -4 |
b7c6244f | 803 | .endif |
df5d1874 | 804 | 1: pushl_cfi $(~vector+0x80) /* Note: always in signed byte range */ |
8665596e | 805 | .if ((vector-FIRST_EXTERNAL_VECTOR)%7) <> 6 |
b7c6244f PA |
806 | jmp 2f |
807 | .endif | |
808 | .previous | |
1da177e4 | 809 | .long 1b |
b7c6244f | 810 | .text |
1da177e4 | 811 | vector=vector+1 |
b7c6244f PA |
812 | .endif |
813 | .endr | |
814 | 2: jmp common_interrupt | |
1da177e4 | 815 | .endr |
47a55cd7 JB |
816 | END(irq_entries_start) |
817 | ||
818 | .previous | |
819 | END(interrupt) | |
820 | .previous | |
1da177e4 | 821 | |
55f327fa IM |
822 | /* |
823 | * the CPU automatically disables interrupts when executing an IRQ vector, | |
824 | * so IRQ-flags tracing has to follow that: | |
825 | */ | |
b7c6244f | 826 | .p2align CONFIG_X86_L1_CACHE_SHIFT |
1da177e4 | 827 | common_interrupt: |
b7c6244f | 828 | addl $-0x80,(%esp) /* Adjust vector into the [-256,-1] range */ |
1da177e4 | 829 | SAVE_ALL |
55f327fa | 830 | TRACE_IRQS_OFF |
1da177e4 LT |
831 | movl %esp,%eax |
832 | call do_IRQ | |
833 | jmp ret_from_intr | |
47a55cd7 | 834 | ENDPROC(common_interrupt) |
fe7cacc1 | 835 | CFI_ENDPROC |
1da177e4 | 836 | |
a00e817f MH |
837 | /* |
838 | * Irq entries should be protected against kprobes | |
839 | */ | |
840 | .pushsection .kprobes.text, "ax" | |
02cf94c3 | 841 | #define BUILD_INTERRUPT3(name, nr, fn) \ |
1da177e4 | 842 | ENTRY(name) \ |
fe7cacc1 | 843 | RING0_INT_FRAME; \ |
df5d1874 | 844 | pushl_cfi $~(nr); \ |
fe7cacc1 | 845 | SAVE_ALL; \ |
55f327fa | 846 | TRACE_IRQS_OFF \ |
1da177e4 | 847 | movl %esp,%eax; \ |
02cf94c3 | 848 | call fn; \ |
55f327fa | 849 | jmp ret_from_intr; \ |
47a55cd7 JB |
850 | CFI_ENDPROC; \ |
851 | ENDPROC(name) | |
1da177e4 | 852 | |
02cf94c3 TH |
853 | #define BUILD_INTERRUPT(name, nr) BUILD_INTERRUPT3(name, nr, smp_##name) |
854 | ||
1da177e4 | 855 | /* The include is where all of the SMP etc. interrupts come from */ |
1164dd00 | 856 | #include <asm/entry_arch.h> |
1da177e4 | 857 | |
1da177e4 | 858 | ENTRY(coprocessor_error) |
fe7cacc1 | 859 | RING0_INT_FRAME |
df5d1874 JB |
860 | pushl_cfi $0 |
861 | pushl_cfi $do_coprocessor_error | |
1da177e4 | 862 | jmp error_code |
fe7cacc1 | 863 | CFI_ENDPROC |
47a55cd7 | 864 | END(coprocessor_error) |
1da177e4 LT |
865 | |
866 | ENTRY(simd_coprocessor_error) | |
fe7cacc1 | 867 | RING0_INT_FRAME |
df5d1874 | 868 | pushl_cfi $0 |
40d2e763 BG |
869 | #ifdef CONFIG_X86_INVD_BUG |
870 | /* AMD 486 bug: invd from userspace calls exception 19 instead of #GP */ | |
df5d1874 | 871 | 661: pushl_cfi $do_general_protection |
40d2e763 BG |
872 | 662: |
873 | .section .altinstructions,"a" | |
874 | .balign 4 | |
875 | .long 661b | |
876 | .long 663f | |
83a7a2ad | 877 | .word X86_FEATURE_XMM |
40d2e763 BG |
878 | .byte 662b-661b |
879 | .byte 664f-663f | |
880 | .previous | |
881 | .section .altinstr_replacement,"ax" | |
882 | 663: pushl $do_simd_coprocessor_error | |
883 | 664: | |
884 | .previous | |
885 | #else | |
df5d1874 | 886 | pushl_cfi $do_simd_coprocessor_error |
40d2e763 | 887 | #endif |
1da177e4 | 888 | jmp error_code |
fe7cacc1 | 889 | CFI_ENDPROC |
47a55cd7 | 890 | END(simd_coprocessor_error) |
1da177e4 LT |
891 | |
892 | ENTRY(device_not_available) | |
fe7cacc1 | 893 | RING0_INT_FRAME |
df5d1874 JB |
894 | pushl_cfi $-1 # mark this as an int |
895 | pushl_cfi $do_device_not_available | |
7643e9b9 | 896 | jmp error_code |
fe7cacc1 | 897 | CFI_ENDPROC |
47a55cd7 | 898 | END(device_not_available) |
1da177e4 | 899 | |
d3561b7f RR |
900 | #ifdef CONFIG_PARAVIRT |
901 | ENTRY(native_iret) | |
3701d863 | 902 | iret |
d3561b7f RR |
903 | .section __ex_table,"a" |
904 | .align 4 | |
3701d863 | 905 | .long native_iret, iret_exc |
d3561b7f | 906 | .previous |
47a55cd7 | 907 | END(native_iret) |
d3561b7f | 908 | |
d75cd22f | 909 | ENTRY(native_irq_enable_sysexit) |
d3561b7f RR |
910 | sti |
911 | sysexit | |
d75cd22f | 912 | END(native_irq_enable_sysexit) |
d3561b7f RR |
913 | #endif |
914 | ||
1da177e4 | 915 | ENTRY(overflow) |
fe7cacc1 | 916 | RING0_INT_FRAME |
df5d1874 JB |
917 | pushl_cfi $0 |
918 | pushl_cfi $do_overflow | |
1da177e4 | 919 | jmp error_code |
fe7cacc1 | 920 | CFI_ENDPROC |
47a55cd7 | 921 | END(overflow) |
1da177e4 LT |
922 | |
923 | ENTRY(bounds) | |
fe7cacc1 | 924 | RING0_INT_FRAME |
df5d1874 JB |
925 | pushl_cfi $0 |
926 | pushl_cfi $do_bounds | |
1da177e4 | 927 | jmp error_code |
fe7cacc1 | 928 | CFI_ENDPROC |
47a55cd7 | 929 | END(bounds) |
1da177e4 LT |
930 | |
931 | ENTRY(invalid_op) | |
fe7cacc1 | 932 | RING0_INT_FRAME |
df5d1874 JB |
933 | pushl_cfi $0 |
934 | pushl_cfi $do_invalid_op | |
1da177e4 | 935 | jmp error_code |
fe7cacc1 | 936 | CFI_ENDPROC |
47a55cd7 | 937 | END(invalid_op) |
1da177e4 LT |
938 | |
939 | ENTRY(coprocessor_segment_overrun) | |
fe7cacc1 | 940 | RING0_INT_FRAME |
df5d1874 JB |
941 | pushl_cfi $0 |
942 | pushl_cfi $do_coprocessor_segment_overrun | |
1da177e4 | 943 | jmp error_code |
fe7cacc1 | 944 | CFI_ENDPROC |
47a55cd7 | 945 | END(coprocessor_segment_overrun) |
1da177e4 LT |
946 | |
947 | ENTRY(invalid_TSS) | |
fe7cacc1 | 948 | RING0_EC_FRAME |
df5d1874 | 949 | pushl_cfi $do_invalid_TSS |
1da177e4 | 950 | jmp error_code |
fe7cacc1 | 951 | CFI_ENDPROC |
47a55cd7 | 952 | END(invalid_TSS) |
1da177e4 LT |
953 | |
954 | ENTRY(segment_not_present) | |
fe7cacc1 | 955 | RING0_EC_FRAME |
df5d1874 | 956 | pushl_cfi $do_segment_not_present |
1da177e4 | 957 | jmp error_code |
fe7cacc1 | 958 | CFI_ENDPROC |
47a55cd7 | 959 | END(segment_not_present) |
1da177e4 LT |
960 | |
961 | ENTRY(stack_segment) | |
fe7cacc1 | 962 | RING0_EC_FRAME |
df5d1874 | 963 | pushl_cfi $do_stack_segment |
1da177e4 | 964 | jmp error_code |
fe7cacc1 | 965 | CFI_ENDPROC |
47a55cd7 | 966 | END(stack_segment) |
1da177e4 | 967 | |
1da177e4 | 968 | ENTRY(alignment_check) |
fe7cacc1 | 969 | RING0_EC_FRAME |
df5d1874 | 970 | pushl_cfi $do_alignment_check |
1da177e4 | 971 | jmp error_code |
fe7cacc1 | 972 | CFI_ENDPROC |
47a55cd7 | 973 | END(alignment_check) |
1da177e4 | 974 | |
d28c4393 P |
975 | ENTRY(divide_error) |
976 | RING0_INT_FRAME | |
df5d1874 JB |
977 | pushl_cfi $0 # no error code |
978 | pushl_cfi $do_divide_error | |
1da177e4 | 979 | jmp error_code |
fe7cacc1 | 980 | CFI_ENDPROC |
47a55cd7 | 981 | END(divide_error) |
1da177e4 LT |
982 | |
983 | #ifdef CONFIG_X86_MCE | |
984 | ENTRY(machine_check) | |
fe7cacc1 | 985 | RING0_INT_FRAME |
df5d1874 JB |
986 | pushl_cfi $0 |
987 | pushl_cfi machine_check_vector | |
1da177e4 | 988 | jmp error_code |
fe7cacc1 | 989 | CFI_ENDPROC |
47a55cd7 | 990 | END(machine_check) |
1da177e4 LT |
991 | #endif |
992 | ||
993 | ENTRY(spurious_interrupt_bug) | |
fe7cacc1 | 994 | RING0_INT_FRAME |
df5d1874 JB |
995 | pushl_cfi $0 |
996 | pushl_cfi $do_spurious_interrupt_bug | |
1da177e4 | 997 | jmp error_code |
fe7cacc1 | 998 | CFI_ENDPROC |
47a55cd7 | 999 | END(spurious_interrupt_bug) |
a00e817f MH |
1000 | /* |
1001 | * End of kprobes section | |
1002 | */ | |
1003 | .popsection | |
1da177e4 | 1004 | |
02ba1a32 AK |
1005 | ENTRY(kernel_thread_helper) |
1006 | pushl $0 # fake return address for unwinder | |
1007 | CFI_STARTPROC | |
e840227c BG |
1008 | movl %edi,%eax |
1009 | call *%esi | |
02ba1a32 | 1010 | call do_exit |
5f5db591 | 1011 | ud2 # padding for call trace |
02ba1a32 AK |
1012 | CFI_ENDPROC |
1013 | ENDPROC(kernel_thread_helper) | |
1014 | ||
5ead97c8 | 1015 | #ifdef CONFIG_XEN |
e2a81baf JF |
1016 | /* Xen doesn't set %esp to be precisely what the normal sysenter |
1017 | entrypoint expects, so fix it up before using the normal path. */ | |
1018 | ENTRY(xen_sysenter_target) | |
1019 | RING0_INT_FRAME | |
1020 | addl $5*4, %esp /* remove xen-provided frame */ | |
2ddf9b7b | 1021 | CFI_ADJUST_CFA_OFFSET -5*4 |
e2a81baf | 1022 | jmp sysenter_past_esp |
557d7d4e | 1023 | CFI_ENDPROC |
e2a81baf | 1024 | |
5ead97c8 JF |
1025 | ENTRY(xen_hypervisor_callback) |
1026 | CFI_STARTPROC | |
df5d1874 | 1027 | pushl_cfi $0 |
5ead97c8 JF |
1028 | SAVE_ALL |
1029 | TRACE_IRQS_OFF | |
9ec2b804 JF |
1030 | |
1031 | /* Check to see if we got the event in the critical | |
1032 | region in xen_iret_direct, after we've reenabled | |
1033 | events and checked for pending events. This simulates | |
1034 | iret instruction's behaviour where it delivers a | |
1035 | pending interrupt when enabling interrupts. */ | |
1036 | movl PT_EIP(%esp),%eax | |
1037 | cmpl $xen_iret_start_crit,%eax | |
1038 | jb 1f | |
1039 | cmpl $xen_iret_end_crit,%eax | |
1040 | jae 1f | |
1041 | ||
0f2c8769 | 1042 | jmp xen_iret_crit_fixup |
e2a81baf | 1043 | |
e2a81baf | 1044 | ENTRY(xen_do_upcall) |
b77797fb | 1045 | 1: mov %esp, %eax |
5ead97c8 JF |
1046 | call xen_evtchn_do_upcall |
1047 | jmp ret_from_intr | |
1048 | CFI_ENDPROC | |
1049 | ENDPROC(xen_hypervisor_callback) | |
1050 | ||
1051 | # Hypervisor uses this for application faults while it executes. | |
1052 | # We get here for two reasons: | |
1053 | # 1. Fault while reloading DS, ES, FS or GS | |
1054 | # 2. Fault while executing IRET | |
1055 | # Category 1 we fix up by reattempting the load, and zeroing the segment | |
1056 | # register if the load fails. | |
1057 | # Category 2 we fix up by jumping to do_iret_error. We cannot use the | |
1058 | # normal Linux return path in this case because if we use the IRET hypercall | |
1059 | # to pop the stack frame we end up in an infinite loop of failsafe callbacks. | |
1060 | # We distinguish between categories by maintaining a status value in EAX. | |
1061 | ENTRY(xen_failsafe_callback) | |
1062 | CFI_STARTPROC | |
df5d1874 | 1063 | pushl_cfi %eax |
5ead97c8 JF |
1064 | movl $1,%eax |
1065 | 1: mov 4(%esp),%ds | |
1066 | 2: mov 8(%esp),%es | |
1067 | 3: mov 12(%esp),%fs | |
1068 | 4: mov 16(%esp),%gs | |
1069 | testl %eax,%eax | |
df5d1874 | 1070 | popl_cfi %eax |
5ead97c8 JF |
1071 | lea 16(%esp),%esp |
1072 | CFI_ADJUST_CFA_OFFSET -16 | |
1073 | jz 5f | |
1074 | addl $16,%esp | |
1075 | jmp iret_exc # EAX != 0 => Category 2 (Bad IRET) | |
df5d1874 | 1076 | 5: pushl_cfi $0 # EAX == 0 => Category 1 (Bad segment) |
5ead97c8 JF |
1077 | SAVE_ALL |
1078 | jmp ret_from_exception | |
1079 | CFI_ENDPROC | |
1080 | ||
1081 | .section .fixup,"ax" | |
1082 | 6: xorl %eax,%eax | |
1083 | movl %eax,4(%esp) | |
1084 | jmp 1b | |
1085 | 7: xorl %eax,%eax | |
1086 | movl %eax,8(%esp) | |
1087 | jmp 2b | |
1088 | 8: xorl %eax,%eax | |
1089 | movl %eax,12(%esp) | |
1090 | jmp 3b | |
1091 | 9: xorl %eax,%eax | |
1092 | movl %eax,16(%esp) | |
1093 | jmp 4b | |
1094 | .previous | |
1095 | .section __ex_table,"a" | |
1096 | .align 4 | |
1097 | .long 1b,6b | |
1098 | .long 2b,7b | |
1099 | .long 3b,8b | |
1100 | .long 4b,9b | |
1101 | .previous | |
1102 | ENDPROC(xen_failsafe_callback) | |
1103 | ||
38e20b07 SY |
1104 | BUILD_INTERRUPT3(xen_hvm_callback_vector, XEN_HVM_EVTCHN_CALLBACK, |
1105 | xen_evtchn_do_upcall) | |
1106 | ||
5ead97c8 JF |
1107 | #endif /* CONFIG_XEN */ |
1108 | ||
606576ce | 1109 | #ifdef CONFIG_FUNCTION_TRACER |
d61f82d0 SR |
1110 | #ifdef CONFIG_DYNAMIC_FTRACE |
1111 | ||
1112 | ENTRY(mcount) | |
d61f82d0 SR |
1113 | ret |
1114 | END(mcount) | |
1115 | ||
1116 | ENTRY(ftrace_caller) | |
60a7ecf4 SR |
1117 | cmpl $0, function_trace_stop |
1118 | jne ftrace_stub | |
1119 | ||
d61f82d0 SR |
1120 | pushl %eax |
1121 | pushl %ecx | |
1122 | pushl %edx | |
1123 | movl 0xc(%esp), %eax | |
1124 | movl 0x4(%ebp), %edx | |
395a59d0 | 1125 | subl $MCOUNT_INSN_SIZE, %eax |
d61f82d0 SR |
1126 | |
1127 | .globl ftrace_call | |
1128 | ftrace_call: | |
1129 | call ftrace_stub | |
1130 | ||
1131 | popl %edx | |
1132 | popl %ecx | |
1133 | popl %eax | |
5a45cfe1 SR |
1134 | #ifdef CONFIG_FUNCTION_GRAPH_TRACER |
1135 | .globl ftrace_graph_call | |
1136 | ftrace_graph_call: | |
1137 | jmp ftrace_stub | |
1138 | #endif | |
d61f82d0 SR |
1139 | |
1140 | .globl ftrace_stub | |
1141 | ftrace_stub: | |
1142 | ret | |
1143 | END(ftrace_caller) | |
1144 | ||
1145 | #else /* ! CONFIG_DYNAMIC_FTRACE */ | |
1146 | ||
16444a8a | 1147 | ENTRY(mcount) |
60a7ecf4 SR |
1148 | cmpl $0, function_trace_stop |
1149 | jne ftrace_stub | |
1150 | ||
16444a8a ACM |
1151 | cmpl $ftrace_stub, ftrace_trace_function |
1152 | jnz trace | |
fb52607a | 1153 | #ifdef CONFIG_FUNCTION_GRAPH_TRACER |
c2324b69 | 1154 | cmpl $ftrace_stub, ftrace_graph_return |
fb52607a | 1155 | jnz ftrace_graph_caller |
e49dc19c SR |
1156 | |
1157 | cmpl $ftrace_graph_entry_stub, ftrace_graph_entry | |
1158 | jnz ftrace_graph_caller | |
caf4b323 | 1159 | #endif |
16444a8a ACM |
1160 | .globl ftrace_stub |
1161 | ftrace_stub: | |
1162 | ret | |
1163 | ||
1164 | /* taken from glibc */ | |
1165 | trace: | |
1166 | pushl %eax | |
1167 | pushl %ecx | |
1168 | pushl %edx | |
1169 | movl 0xc(%esp), %eax | |
1170 | movl 0x4(%ebp), %edx | |
395a59d0 | 1171 | subl $MCOUNT_INSN_SIZE, %eax |
16444a8a | 1172 | |
d61f82d0 | 1173 | call *ftrace_trace_function |
16444a8a ACM |
1174 | |
1175 | popl %edx | |
1176 | popl %ecx | |
1177 | popl %eax | |
16444a8a ACM |
1178 | jmp ftrace_stub |
1179 | END(mcount) | |
d61f82d0 | 1180 | #endif /* CONFIG_DYNAMIC_FTRACE */ |
606576ce | 1181 | #endif /* CONFIG_FUNCTION_TRACER */ |
16444a8a | 1182 | |
fb52607a FW |
1183 | #ifdef CONFIG_FUNCTION_GRAPH_TRACER |
1184 | ENTRY(ftrace_graph_caller) | |
e7d3737e FW |
1185 | cmpl $0, function_trace_stop |
1186 | jne ftrace_stub | |
1187 | ||
caf4b323 FW |
1188 | pushl %eax |
1189 | pushl %ecx | |
1190 | pushl %edx | |
1dc1c6ad | 1191 | movl 0xc(%esp), %edx |
caf4b323 | 1192 | lea 0x4(%ebp), %eax |
71e308a2 | 1193 | movl (%ebp), %ecx |
bb4304c7 | 1194 | subl $MCOUNT_INSN_SIZE, %edx |
caf4b323 | 1195 | call prepare_ftrace_return |
caf4b323 FW |
1196 | popl %edx |
1197 | popl %ecx | |
1198 | popl %eax | |
e7d3737e | 1199 | ret |
fb52607a | 1200 | END(ftrace_graph_caller) |
caf4b323 FW |
1201 | |
1202 | .globl return_to_handler | |
1203 | return_to_handler: | |
caf4b323 | 1204 | pushl %eax |
caf4b323 | 1205 | pushl %edx |
71e308a2 | 1206 | movl %ebp, %eax |
caf4b323 | 1207 | call ftrace_return_to_handler |
194ec341 | 1208 | movl %eax, %ecx |
caf4b323 | 1209 | popl %edx |
caf4b323 | 1210 | popl %eax |
194ec341 | 1211 | jmp *%ecx |
e7d3737e | 1212 | #endif |
16444a8a | 1213 | |
bb152f53 | 1214 | .section .rodata,"a" |
541054d9 | 1215 | #include "syscall_table_32.S" |
1da177e4 LT |
1216 | |
1217 | syscall_table_size=(.-sys_call_table) | |
d211af05 AH |
1218 | |
1219 | /* | |
1220 | * Some functions should be protected against kprobes | |
1221 | */ | |
1222 | .pushsection .kprobes.text, "ax" | |
1223 | ||
1224 | ENTRY(page_fault) | |
1225 | RING0_EC_FRAME | |
df5d1874 | 1226 | pushl_cfi $do_page_fault |
d211af05 AH |
1227 | ALIGN |
1228 | error_code: | |
ccbeed3a | 1229 | /* the function address is in %gs's slot on the stack */ |
df5d1874 | 1230 | pushl_cfi %fs |
ccbeed3a | 1231 | /*CFI_REL_OFFSET fs, 0*/ |
df5d1874 | 1232 | pushl_cfi %es |
d211af05 | 1233 | /*CFI_REL_OFFSET es, 0*/ |
df5d1874 | 1234 | pushl_cfi %ds |
d211af05 | 1235 | /*CFI_REL_OFFSET ds, 0*/ |
df5d1874 | 1236 | pushl_cfi %eax |
d211af05 | 1237 | CFI_REL_OFFSET eax, 0 |
df5d1874 | 1238 | pushl_cfi %ebp |
d211af05 | 1239 | CFI_REL_OFFSET ebp, 0 |
df5d1874 | 1240 | pushl_cfi %edi |
d211af05 | 1241 | CFI_REL_OFFSET edi, 0 |
df5d1874 | 1242 | pushl_cfi %esi |
d211af05 | 1243 | CFI_REL_OFFSET esi, 0 |
df5d1874 | 1244 | pushl_cfi %edx |
d211af05 | 1245 | CFI_REL_OFFSET edx, 0 |
df5d1874 | 1246 | pushl_cfi %ecx |
d211af05 | 1247 | CFI_REL_OFFSET ecx, 0 |
df5d1874 | 1248 | pushl_cfi %ebx |
d211af05 AH |
1249 | CFI_REL_OFFSET ebx, 0 |
1250 | cld | |
d211af05 AH |
1251 | movl $(__KERNEL_PERCPU), %ecx |
1252 | movl %ecx, %fs | |
1253 | UNWIND_ESPFIX_STACK | |
ccbeed3a TH |
1254 | GS_TO_REG %ecx |
1255 | movl PT_GS(%esp), %edi # get the function address | |
d211af05 AH |
1256 | movl PT_ORIG_EAX(%esp), %edx # get the error code |
1257 | movl $-1, PT_ORIG_EAX(%esp) # no syscall to restart | |
ccbeed3a TH |
1258 | REG_TO_PTGS %ecx |
1259 | SET_KERNEL_GS %ecx | |
d211af05 AH |
1260 | movl $(__USER_DS), %ecx |
1261 | movl %ecx, %ds | |
1262 | movl %ecx, %es | |
1263 | TRACE_IRQS_OFF | |
1264 | movl %esp,%eax # pt_regs pointer | |
1265 | call *%edi | |
1266 | jmp ret_from_exception | |
1267 | CFI_ENDPROC | |
1268 | END(page_fault) | |
1269 | ||
1270 | /* | |
1271 | * Debug traps and NMI can happen at the one SYSENTER instruction | |
1272 | * that sets up the real kernel stack. Check here, since we can't | |
1273 | * allow the wrong stack to be used. | |
1274 | * | |
1275 | * "TSS_sysenter_sp0+12" is because the NMI/debug handler will have | |
1276 | * already pushed 3 words if it hits on the sysenter instruction: | |
1277 | * eflags, cs and eip. | |
1278 | * | |
1279 | * We just load the right stack, and push the three (known) values | |
1280 | * by hand onto the new stack - while updating the return eip past | |
1281 | * the instruction that would have done it for sysenter. | |
1282 | */ | |
f0d96110 TH |
1283 | .macro FIX_STACK offset ok label |
1284 | cmpw $__KERNEL_CS, 4(%esp) | |
1285 | jne \ok | |
1286 | \label: | |
1287 | movl TSS_sysenter_sp0 + \offset(%esp), %esp | |
1288 | CFI_DEF_CFA esp, 0 | |
1289 | CFI_UNDEFINED eip | |
df5d1874 JB |
1290 | pushfl_cfi |
1291 | pushl_cfi $__KERNEL_CS | |
1292 | pushl_cfi $sysenter_past_esp | |
d211af05 | 1293 | CFI_REL_OFFSET eip, 0 |
f0d96110 | 1294 | .endm |
d211af05 AH |
1295 | |
1296 | ENTRY(debug) | |
1297 | RING0_INT_FRAME | |
1298 | cmpl $ia32_sysenter_target,(%esp) | |
1299 | jne debug_stack_correct | |
f0d96110 | 1300 | FIX_STACK 12, debug_stack_correct, debug_esp_fix_insn |
d211af05 | 1301 | debug_stack_correct: |
df5d1874 | 1302 | pushl_cfi $-1 # mark this as an int |
d211af05 AH |
1303 | SAVE_ALL |
1304 | TRACE_IRQS_OFF | |
1305 | xorl %edx,%edx # error code 0 | |
1306 | movl %esp,%eax # pt_regs pointer | |
1307 | call do_debug | |
1308 | jmp ret_from_exception | |
1309 | CFI_ENDPROC | |
1310 | END(debug) | |
1311 | ||
1312 | /* | |
1313 | * NMI is doubly nasty. It can happen _while_ we're handling | |
1314 | * a debug fault, and the debug fault hasn't yet been able to | |
1315 | * clear up the stack. So we first check whether we got an | |
1316 | * NMI on the sysenter entry path, but after that we need to | |
1317 | * check whether we got an NMI on the debug path where the debug | |
1318 | * fault happened on the sysenter path. | |
1319 | */ | |
1320 | ENTRY(nmi) | |
1321 | RING0_INT_FRAME | |
df5d1874 | 1322 | pushl_cfi %eax |
d211af05 AH |
1323 | movl %ss, %eax |
1324 | cmpw $__ESPFIX_SS, %ax | |
df5d1874 | 1325 | popl_cfi %eax |
d211af05 AH |
1326 | je nmi_espfix_stack |
1327 | cmpl $ia32_sysenter_target,(%esp) | |
1328 | je nmi_stack_fixup | |
df5d1874 | 1329 | pushl_cfi %eax |
d211af05 AH |
1330 | movl %esp,%eax |
1331 | /* Do not access memory above the end of our stack page, | |
1332 | * it might not exist. | |
1333 | */ | |
1334 | andl $(THREAD_SIZE-1),%eax | |
1335 | cmpl $(THREAD_SIZE-20),%eax | |
df5d1874 | 1336 | popl_cfi %eax |
d211af05 AH |
1337 | jae nmi_stack_correct |
1338 | cmpl $ia32_sysenter_target,12(%esp) | |
1339 | je nmi_debug_stack_check | |
1340 | nmi_stack_correct: | |
1341 | /* We have a RING0_INT_FRAME here */ | |
df5d1874 | 1342 | pushl_cfi %eax |
d211af05 | 1343 | SAVE_ALL |
d211af05 AH |
1344 | xorl %edx,%edx # zero error code |
1345 | movl %esp,%eax # pt_regs pointer | |
1346 | call do_nmi | |
2e04bc76 | 1347 | jmp restore_all_notrace |
d211af05 AH |
1348 | CFI_ENDPROC |
1349 | ||
1350 | nmi_stack_fixup: | |
1351 | RING0_INT_FRAME | |
f0d96110 | 1352 | FIX_STACK 12, nmi_stack_correct, 1 |
d211af05 AH |
1353 | jmp nmi_stack_correct |
1354 | ||
1355 | nmi_debug_stack_check: | |
1356 | /* We have a RING0_INT_FRAME here */ | |
1357 | cmpw $__KERNEL_CS,16(%esp) | |
1358 | jne nmi_stack_correct | |
1359 | cmpl $debug,(%esp) | |
1360 | jb nmi_stack_correct | |
1361 | cmpl $debug_esp_fix_insn,(%esp) | |
1362 | ja nmi_stack_correct | |
f0d96110 | 1363 | FIX_STACK 24, nmi_stack_correct, 1 |
d211af05 AH |
1364 | jmp nmi_stack_correct |
1365 | ||
1366 | nmi_espfix_stack: | |
1367 | /* We have a RING0_INT_FRAME here. | |
1368 | * | |
1369 | * create the pointer to lss back | |
1370 | */ | |
df5d1874 JB |
1371 | pushl_cfi %ss |
1372 | pushl_cfi %esp | |
bda3a897 | 1373 | addl $4, (%esp) |
d211af05 AH |
1374 | /* copy the iret frame of 12 bytes */ |
1375 | .rept 3 | |
df5d1874 | 1376 | pushl_cfi 16(%esp) |
d211af05 | 1377 | .endr |
df5d1874 | 1378 | pushl_cfi %eax |
d211af05 | 1379 | SAVE_ALL |
d211af05 AH |
1380 | FIXUP_ESPFIX_STACK # %eax == %esp |
1381 | xorl %edx,%edx # zero error code | |
1382 | call do_nmi | |
1383 | RESTORE_REGS | |
1384 | lss 12+4(%esp), %esp # back to espfix stack | |
1385 | CFI_ADJUST_CFA_OFFSET -24 | |
1386 | jmp irq_return | |
1387 | CFI_ENDPROC | |
1388 | END(nmi) | |
1389 | ||
1390 | ENTRY(int3) | |
1391 | RING0_INT_FRAME | |
df5d1874 | 1392 | pushl_cfi $-1 # mark this as an int |
d211af05 AH |
1393 | SAVE_ALL |
1394 | TRACE_IRQS_OFF | |
1395 | xorl %edx,%edx # zero error code | |
1396 | movl %esp,%eax # pt_regs pointer | |
1397 | call do_int3 | |
1398 | jmp ret_from_exception | |
1399 | CFI_ENDPROC | |
1400 | END(int3) | |
1401 | ||
1402 | ENTRY(general_protection) | |
1403 | RING0_EC_FRAME | |
df5d1874 | 1404 | pushl_cfi $do_general_protection |
d211af05 AH |
1405 | jmp error_code |
1406 | CFI_ENDPROC | |
1407 | END(general_protection) | |
1408 | ||
1409 | /* | |
1410 | * End of kprobes section | |
1411 | */ | |
1412 | .popsection |