]>
Commit | Line | Data |
---|---|---|
1 | /* | |
2 | * linux/arch/x86_64/entry.S | |
3 | * | |
4 | * Copyright (C) 1991, 1992 Linus Torvalds | |
5 | * Copyright (C) 2000, 2001, 2002 Andi Kleen SuSE Labs | |
6 | * Copyright (C) 2000 Pavel Machek <pavel@suse.cz> | |
7 | */ | |
8 | ||
9 | /* | |
10 | * entry.S contains the system-call and fault low-level handling routines. | |
11 | * | |
12 | * Some of this is documented in Documentation/x86/entry_64.txt | |
13 | * | |
14 | * NOTE: This code handles signal-recognition, which happens every time | |
15 | * after an interrupt and after each system call. | |
16 | * | |
17 | * Normal syscalls and interrupts don't save a full stack frame, this is | |
18 | * only done for syscall tracing, signals or fork/exec et.al. | |
19 | * | |
20 | * A note on terminology: | |
21 | * - top of stack: Architecture defined interrupt frame from SS to RIP | |
22 | * at the top of the kernel process stack. | |
23 | * - partial stack frame: partially saved registers up to R11. | |
24 | * - full stack frame: Like partial stack frame, but all register saved. | |
25 | * | |
26 | * Some macro usage: | |
27 | * - CFI macros are used to generate dwarf2 unwind information for better | |
28 | * backtraces. They don't change any code. | |
29 | * - SAVE_ALL/RESTORE_ALL - Save/restore all registers | |
30 | * - SAVE_ARGS/RESTORE_ARGS - Save/restore registers that C functions modify. | |
31 | * There are unfortunately lots of special cases where some registers | |
32 | * not touched. The macro is a big mess that should be cleaned up. | |
33 | * - SAVE_REST/RESTORE_REST - Handle the registers not saved by SAVE_ARGS. | |
34 | * Gives a full stack frame. | |
35 | * - ENTRY/END Define functions in the symbol table. | |
36 | * - FIXUP_TOP_OF_STACK/RESTORE_TOP_OF_STACK - Fix up the hardware stack | |
37 | * frame that is otherwise undefined after a SYSCALL | |
38 | * - TRACE_IRQ_* - Trace hard interrupt state for lock debugging. | |
39 | * - errorentry/paranoidentry/zeroentry - Define exception entry points. | |
40 | */ | |
41 | ||
42 | #include <linux/linkage.h> | |
43 | #include <asm/segment.h> | |
44 | #include <asm/cache.h> | |
45 | #include <asm/errno.h> | |
46 | #include <asm/dwarf2.h> | |
47 | #include <asm/calling.h> | |
48 | #include <asm/asm-offsets.h> | |
49 | #include <asm/msr.h> | |
50 | #include <asm/unistd.h> | |
51 | #include <asm/thread_info.h> | |
52 | #include <asm/hw_irq.h> | |
53 | #include <asm/page_types.h> | |
54 | #include <asm/irqflags.h> | |
55 | #include <asm/paravirt.h> | |
56 | #include <asm/ftrace.h> | |
57 | #include <asm/percpu.h> | |
58 | #include <linux/err.h> | |
59 | ||
60 | /* Avoid __ASSEMBLER__'ifying <linux/audit.h> just for this. */ | |
61 | #include <linux/elf-em.h> | |
62 | #define AUDIT_ARCH_X86_64 (EM_X86_64|__AUDIT_ARCH_64BIT|__AUDIT_ARCH_LE) | |
63 | #define __AUDIT_ARCH_64BIT 0x80000000 | |
64 | #define __AUDIT_ARCH_LE 0x40000000 | |
65 | ||
66 | .code64 | |
67 | .section .entry.text, "ax" | |
68 | ||
69 | #ifdef CONFIG_FUNCTION_TRACER | |
70 | #ifdef CONFIG_DYNAMIC_FTRACE | |
71 | ENTRY(mcount) | |
72 | retq | |
73 | END(mcount) | |
74 | ||
75 | ENTRY(ftrace_caller) | |
76 | cmpl $0, function_trace_stop | |
77 | jne ftrace_stub | |
78 | ||
79 | MCOUNT_SAVE_FRAME | |
80 | ||
81 | movq 0x38(%rsp), %rdi | |
82 | movq 8(%rbp), %rsi | |
83 | subq $MCOUNT_INSN_SIZE, %rdi | |
84 | ||
85 | GLOBAL(ftrace_call) | |
86 | call ftrace_stub | |
87 | ||
88 | MCOUNT_RESTORE_FRAME | |
89 | ||
90 | #ifdef CONFIG_FUNCTION_GRAPH_TRACER | |
91 | GLOBAL(ftrace_graph_call) | |
92 | jmp ftrace_stub | |
93 | #endif | |
94 | ||
95 | GLOBAL(ftrace_stub) | |
96 | retq | |
97 | END(ftrace_caller) | |
98 | ||
99 | #else /* ! CONFIG_DYNAMIC_FTRACE */ | |
100 | ENTRY(mcount) | |
101 | cmpl $0, function_trace_stop | |
102 | jne ftrace_stub | |
103 | ||
104 | cmpq $ftrace_stub, ftrace_trace_function | |
105 | jnz trace | |
106 | ||
107 | #ifdef CONFIG_FUNCTION_GRAPH_TRACER | |
108 | cmpq $ftrace_stub, ftrace_graph_return | |
109 | jnz ftrace_graph_caller | |
110 | ||
111 | cmpq $ftrace_graph_entry_stub, ftrace_graph_entry | |
112 | jnz ftrace_graph_caller | |
113 | #endif | |
114 | ||
115 | GLOBAL(ftrace_stub) | |
116 | retq | |
117 | ||
118 | trace: | |
119 | MCOUNT_SAVE_FRAME | |
120 | ||
121 | movq 0x38(%rsp), %rdi | |
122 | movq 8(%rbp), %rsi | |
123 | subq $MCOUNT_INSN_SIZE, %rdi | |
124 | ||
125 | call *ftrace_trace_function | |
126 | ||
127 | MCOUNT_RESTORE_FRAME | |
128 | ||
129 | jmp ftrace_stub | |
130 | END(mcount) | |
131 | #endif /* CONFIG_DYNAMIC_FTRACE */ | |
132 | #endif /* CONFIG_FUNCTION_TRACER */ | |
133 | ||
134 | #ifdef CONFIG_FUNCTION_GRAPH_TRACER | |
135 | ENTRY(ftrace_graph_caller) | |
136 | cmpl $0, function_trace_stop | |
137 | jne ftrace_stub | |
138 | ||
139 | MCOUNT_SAVE_FRAME | |
140 | ||
141 | leaq 8(%rbp), %rdi | |
142 | movq 0x38(%rsp), %rsi | |
143 | movq (%rbp), %rdx | |
144 | subq $MCOUNT_INSN_SIZE, %rsi | |
145 | ||
146 | call prepare_ftrace_return | |
147 | ||
148 | MCOUNT_RESTORE_FRAME | |
149 | ||
150 | retq | |
151 | END(ftrace_graph_caller) | |
152 | ||
153 | GLOBAL(return_to_handler) | |
154 | subq $24, %rsp | |
155 | ||
156 | /* Save the return values */ | |
157 | movq %rax, (%rsp) | |
158 | movq %rdx, 8(%rsp) | |
159 | movq %rbp, %rdi | |
160 | ||
161 | call ftrace_return_to_handler | |
162 | ||
163 | movq %rax, %rdi | |
164 | movq 8(%rsp), %rdx | |
165 | movq (%rsp), %rax | |
166 | addq $24, %rsp | |
167 | jmp *%rdi | |
168 | #endif | |
169 | ||
170 | ||
171 | #ifndef CONFIG_PREEMPT | |
172 | #define retint_kernel retint_restore_args | |
173 | #endif | |
174 | ||
175 | #ifdef CONFIG_PARAVIRT | |
176 | ENTRY(native_usergs_sysret64) | |
177 | swapgs | |
178 | sysretq | |
179 | ENDPROC(native_usergs_sysret64) | |
180 | #endif /* CONFIG_PARAVIRT */ | |
181 | ||
182 | ||
183 | .macro TRACE_IRQS_IRETQ offset=ARGOFFSET | |
184 | #ifdef CONFIG_TRACE_IRQFLAGS | |
185 | bt $9,EFLAGS-\offset(%rsp) /* interrupts off? */ | |
186 | jnc 1f | |
187 | TRACE_IRQS_ON | |
188 | 1: | |
189 | #endif | |
190 | .endm | |
191 | ||
192 | /* | |
193 | * C code is not supposed to know about undefined top of stack. Every time | |
194 | * a C function with an pt_regs argument is called from the SYSCALL based | |
195 | * fast path FIXUP_TOP_OF_STACK is needed. | |
196 | * RESTORE_TOP_OF_STACK syncs the syscall state after any possible ptregs | |
197 | * manipulation. | |
198 | */ | |
199 | ||
200 | /* %rsp:at FRAMEEND */ | |
201 | .macro FIXUP_TOP_OF_STACK tmp offset=0 | |
202 | movq PER_CPU_VAR(old_rsp),\tmp | |
203 | movq \tmp,RSP+\offset(%rsp) | |
204 | movq $__USER_DS,SS+\offset(%rsp) | |
205 | movq $__USER_CS,CS+\offset(%rsp) | |
206 | movq $-1,RCX+\offset(%rsp) | |
207 | movq R11+\offset(%rsp),\tmp /* get eflags */ | |
208 | movq \tmp,EFLAGS+\offset(%rsp) | |
209 | .endm | |
210 | ||
211 | .macro RESTORE_TOP_OF_STACK tmp offset=0 | |
212 | movq RSP+\offset(%rsp),\tmp | |
213 | movq \tmp,PER_CPU_VAR(old_rsp) | |
214 | movq EFLAGS+\offset(%rsp),\tmp | |
215 | movq \tmp,R11+\offset(%rsp) | |
216 | .endm | |
217 | ||
218 | .macro FAKE_STACK_FRAME child_rip | |
219 | /* push in order ss, rsp, eflags, cs, rip */ | |
220 | xorl %eax, %eax | |
221 | pushq_cfi $__KERNEL_DS /* ss */ | |
222 | /*CFI_REL_OFFSET ss,0*/ | |
223 | pushq_cfi %rax /* rsp */ | |
224 | CFI_REL_OFFSET rsp,0 | |
225 | pushq_cfi $(X86_EFLAGS_IF|X86_EFLAGS_BIT1) /* eflags - interrupts on */ | |
226 | /*CFI_REL_OFFSET rflags,0*/ | |
227 | pushq_cfi $__KERNEL_CS /* cs */ | |
228 | /*CFI_REL_OFFSET cs,0*/ | |
229 | pushq_cfi \child_rip /* rip */ | |
230 | CFI_REL_OFFSET rip,0 | |
231 | pushq_cfi %rax /* orig rax */ | |
232 | .endm | |
233 | ||
234 | .macro UNFAKE_STACK_FRAME | |
235 | addq $8*6, %rsp | |
236 | CFI_ADJUST_CFA_OFFSET -(6*8) | |
237 | .endm | |
238 | ||
239 | /* | |
240 | * initial frame state for interrupts (and exceptions without error code) | |
241 | */ | |
242 | .macro EMPTY_FRAME start=1 offset=0 | |
243 | .if \start | |
244 | CFI_STARTPROC simple | |
245 | CFI_SIGNAL_FRAME | |
246 | CFI_DEF_CFA rsp,8+\offset | |
247 | .else | |
248 | CFI_DEF_CFA_OFFSET 8+\offset | |
249 | .endif | |
250 | .endm | |
251 | ||
252 | /* | |
253 | * initial frame state for interrupts (and exceptions without error code) | |
254 | */ | |
255 | .macro INTR_FRAME start=1 offset=0 | |
256 | EMPTY_FRAME \start, SS+8+\offset-RIP | |
257 | /*CFI_REL_OFFSET ss, SS+\offset-RIP*/ | |
258 | CFI_REL_OFFSET rsp, RSP+\offset-RIP | |
259 | /*CFI_REL_OFFSET rflags, EFLAGS+\offset-RIP*/ | |
260 | /*CFI_REL_OFFSET cs, CS+\offset-RIP*/ | |
261 | CFI_REL_OFFSET rip, RIP+\offset-RIP | |
262 | .endm | |
263 | ||
264 | /* | |
265 | * initial frame state for exceptions with error code (and interrupts | |
266 | * with vector already pushed) | |
267 | */ | |
268 | .macro XCPT_FRAME start=1 offset=0 | |
269 | INTR_FRAME \start, RIP+\offset-ORIG_RAX | |
270 | /*CFI_REL_OFFSET orig_rax, ORIG_RAX-ORIG_RAX*/ | |
271 | .endm | |
272 | ||
273 | /* | |
274 | * frame that enables calling into C. | |
275 | */ | |
276 | .macro PARTIAL_FRAME start=1 offset=0 | |
277 | XCPT_FRAME \start, ORIG_RAX+\offset-ARGOFFSET | |
278 | CFI_REL_OFFSET rdi, RDI+\offset-ARGOFFSET | |
279 | CFI_REL_OFFSET rsi, RSI+\offset-ARGOFFSET | |
280 | CFI_REL_OFFSET rdx, RDX+\offset-ARGOFFSET | |
281 | CFI_REL_OFFSET rcx, RCX+\offset-ARGOFFSET | |
282 | CFI_REL_OFFSET rax, RAX+\offset-ARGOFFSET | |
283 | CFI_REL_OFFSET r8, R8+\offset-ARGOFFSET | |
284 | CFI_REL_OFFSET r9, R9+\offset-ARGOFFSET | |
285 | CFI_REL_OFFSET r10, R10+\offset-ARGOFFSET | |
286 | CFI_REL_OFFSET r11, R11+\offset-ARGOFFSET | |
287 | .endm | |
288 | ||
289 | /* | |
290 | * frame that enables passing a complete pt_regs to a C function. | |
291 | */ | |
292 | .macro DEFAULT_FRAME start=1 offset=0 | |
293 | PARTIAL_FRAME \start, R11+\offset-R15 | |
294 | CFI_REL_OFFSET rbx, RBX+\offset | |
295 | CFI_REL_OFFSET rbp, RBP+\offset | |
296 | CFI_REL_OFFSET r12, R12+\offset | |
297 | CFI_REL_OFFSET r13, R13+\offset | |
298 | CFI_REL_OFFSET r14, R14+\offset | |
299 | CFI_REL_OFFSET r15, R15+\offset | |
300 | .endm | |
301 | ||
302 | /* save partial stack frame */ | |
303 | .macro SAVE_ARGS_IRQ | |
304 | cld | |
305 | /* start from rbp in pt_regs and jump over */ | |
306 | movq_cfi rdi, RDI-RBP | |
307 | movq_cfi rsi, RSI-RBP | |
308 | movq_cfi rdx, RDX-RBP | |
309 | movq_cfi rcx, RCX-RBP | |
310 | movq_cfi rax, RAX-RBP | |
311 | movq_cfi r8, R8-RBP | |
312 | movq_cfi r9, R9-RBP | |
313 | movq_cfi r10, R10-RBP | |
314 | movq_cfi r11, R11-RBP | |
315 | ||
316 | /* Save rbp so that we can unwind from get_irq_regs() */ | |
317 | movq_cfi rbp, 0 | |
318 | ||
319 | /* Save previous stack value */ | |
320 | movq %rsp, %rsi | |
321 | ||
322 | leaq -RBP(%rsp),%rdi /* arg1 for handler */ | |
323 | testl $3, CS(%rdi) | |
324 | je 1f | |
325 | SWAPGS | |
326 | /* | |
327 | * irq_count is used to check if a CPU is already on an interrupt stack | |
328 | * or not. While this is essentially redundant with preempt_count it is | |
329 | * a little cheaper to use a separate counter in the PDA (short of | |
330 | * moving irq_enter into assembly, which would be too much work) | |
331 | */ | |
332 | 1: incl PER_CPU_VAR(irq_count) | |
333 | jne 2f | |
334 | mov PER_CPU_VAR(irq_stack_ptr),%rsp | |
335 | CFI_DEF_CFA_REGISTER rsi | |
336 | ||
337 | 2: /* Store previous stack value */ | |
338 | pushq %rsi | |
339 | CFI_ESCAPE 0x0f /* DW_CFA_def_cfa_expression */, 6, \ | |
340 | 0x77 /* DW_OP_breg7 */, 0, \ | |
341 | 0x06 /* DW_OP_deref */, \ | |
342 | 0x08 /* DW_OP_const1u */, SS+8-RBP, \ | |
343 | 0x22 /* DW_OP_plus */ | |
344 | /* We entered an interrupt context - irqs are off: */ | |
345 | TRACE_IRQS_OFF | |
346 | .endm | |
347 | ||
348 | ENTRY(save_rest) | |
349 | PARTIAL_FRAME 1 REST_SKIP+8 | |
350 | movq 5*8+16(%rsp), %r11 /* save return address */ | |
351 | movq_cfi rbx, RBX+16 | |
352 | movq_cfi rbp, RBP+16 | |
353 | movq_cfi r12, R12+16 | |
354 | movq_cfi r13, R13+16 | |
355 | movq_cfi r14, R14+16 | |
356 | movq_cfi r15, R15+16 | |
357 | movq %r11, 8(%rsp) /* return address */ | |
358 | FIXUP_TOP_OF_STACK %r11, 16 | |
359 | ret | |
360 | CFI_ENDPROC | |
361 | END(save_rest) | |
362 | ||
363 | /* save complete stack frame */ | |
364 | .pushsection .kprobes.text, "ax" | |
365 | ENTRY(save_paranoid) | |
366 | XCPT_FRAME 1 RDI+8 | |
367 | cld | |
368 | movq_cfi rdi, RDI+8 | |
369 | movq_cfi rsi, RSI+8 | |
370 | movq_cfi rdx, RDX+8 | |
371 | movq_cfi rcx, RCX+8 | |
372 | movq_cfi rax, RAX+8 | |
373 | movq_cfi r8, R8+8 | |
374 | movq_cfi r9, R9+8 | |
375 | movq_cfi r10, R10+8 | |
376 | movq_cfi r11, R11+8 | |
377 | movq_cfi rbx, RBX+8 | |
378 | movq_cfi rbp, RBP+8 | |
379 | movq_cfi r12, R12+8 | |
380 | movq_cfi r13, R13+8 | |
381 | movq_cfi r14, R14+8 | |
382 | movq_cfi r15, R15+8 | |
383 | movl $1,%ebx | |
384 | movl $MSR_GS_BASE,%ecx | |
385 | rdmsr | |
386 | testl %edx,%edx | |
387 | js 1f /* negative -> in kernel */ | |
388 | SWAPGS | |
389 | xorl %ebx,%ebx | |
390 | 1: ret | |
391 | CFI_ENDPROC | |
392 | END(save_paranoid) | |
393 | .popsection | |
394 | ||
395 | /* | |
396 | * A newly forked process directly context switches into this address. | |
397 | * | |
398 | * rdi: prev task we switched from | |
399 | */ | |
400 | ENTRY(ret_from_fork) | |
401 | DEFAULT_FRAME | |
402 | ||
403 | LOCK ; btr $TIF_FORK,TI_flags(%r8) | |
404 | ||
405 | pushq_cfi kernel_eflags(%rip) | |
406 | popfq_cfi # reset kernel eflags | |
407 | ||
408 | call schedule_tail # rdi: 'prev' task parameter | |
409 | ||
410 | GET_THREAD_INFO(%rcx) | |
411 | ||
412 | RESTORE_REST | |
413 | ||
414 | testl $3, CS-ARGOFFSET(%rsp) # from kernel_thread? | |
415 | jz retint_restore_args | |
416 | ||
417 | testl $_TIF_IA32, TI_flags(%rcx) # 32-bit compat task needs IRET | |
418 | jnz int_ret_from_sys_call | |
419 | ||
420 | RESTORE_TOP_OF_STACK %rdi, -ARGOFFSET | |
421 | jmp ret_from_sys_call # go to the SYSRET fastpath | |
422 | ||
423 | CFI_ENDPROC | |
424 | END(ret_from_fork) | |
425 | ||
426 | /* | |
427 | * System call entry. Up to 6 arguments in registers are supported. | |
428 | * | |
429 | * SYSCALL does not save anything on the stack and does not change the | |
430 | * stack pointer. | |
431 | */ | |
432 | ||
433 | /* | |
434 | * Register setup: | |
435 | * rax system call number | |
436 | * rdi arg0 | |
437 | * rcx return address for syscall/sysret, C arg3 | |
438 | * rsi arg1 | |
439 | * rdx arg2 | |
440 | * r10 arg3 (--> moved to rcx for C) | |
441 | * r8 arg4 | |
442 | * r9 arg5 | |
443 | * r11 eflags for syscall/sysret, temporary for C | |
444 | * r12-r15,rbp,rbx saved by C code, not touched. | |
445 | * | |
446 | * Interrupts are off on entry. | |
447 | * Only called from user space. | |
448 | * | |
449 | * XXX if we had a free scratch register we could save the RSP into the stack frame | |
450 | * and report it properly in ps. Unfortunately we haven't. | |
451 | * | |
452 | * When user can change the frames always force IRET. That is because | |
453 | * it deals with uncanonical addresses better. SYSRET has trouble | |
454 | * with them due to bugs in both AMD and Intel CPUs. | |
455 | */ | |
456 | ||
457 | ENTRY(system_call) | |
458 | CFI_STARTPROC simple | |
459 | CFI_SIGNAL_FRAME | |
460 | CFI_DEF_CFA rsp,KERNEL_STACK_OFFSET | |
461 | CFI_REGISTER rip,rcx | |
462 | /*CFI_REGISTER rflags,r11*/ | |
463 | SWAPGS_UNSAFE_STACK | |
464 | /* | |
465 | * A hypervisor implementation might want to use a label | |
466 | * after the swapgs, so that it can do the swapgs | |
467 | * for the guest and jump here on syscall. | |
468 | */ | |
469 | GLOBAL(system_call_after_swapgs) | |
470 | ||
471 | movq %rsp,PER_CPU_VAR(old_rsp) | |
472 | movq PER_CPU_VAR(kernel_stack),%rsp | |
473 | /* | |
474 | * No need to follow this irqs off/on section - it's straight | |
475 | * and short: | |
476 | */ | |
477 | ENABLE_INTERRUPTS(CLBR_NONE) | |
478 | SAVE_ARGS 8,0 | |
479 | movq %rax,ORIG_RAX-ARGOFFSET(%rsp) | |
480 | movq %rcx,RIP-ARGOFFSET(%rsp) | |
481 | CFI_REL_OFFSET rip,RIP-ARGOFFSET | |
482 | testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET) | |
483 | jnz tracesys | |
484 | system_call_fastpath: | |
485 | #if __SYSCALL_MASK == ~0 | |
486 | cmpq $__NR_syscall_max,%rax | |
487 | #else | |
488 | andl $__SYSCALL_MASK,%eax | |
489 | cmpl $__NR_syscall_max,%eax | |
490 | #endif | |
491 | ja badsys | |
492 | movq %r10,%rcx | |
493 | call *sys_call_table(,%rax,8) # XXX: rip relative | |
494 | movq %rax,RAX-ARGOFFSET(%rsp) | |
495 | /* | |
496 | * Syscall return path ending with SYSRET (fast path) | |
497 | * Has incomplete stack frame and undefined top of stack. | |
498 | */ | |
499 | ret_from_sys_call: | |
500 | movl $_TIF_ALLWORK_MASK,%edi | |
501 | /* edi: flagmask */ | |
502 | sysret_check: | |
503 | LOCKDEP_SYS_EXIT | |
504 | DISABLE_INTERRUPTS(CLBR_NONE) | |
505 | TRACE_IRQS_OFF | |
506 | movl TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET),%edx | |
507 | andl %edi,%edx | |
508 | jnz sysret_careful | |
509 | CFI_REMEMBER_STATE | |
510 | /* | |
511 | * sysretq will re-enable interrupts: | |
512 | */ | |
513 | TRACE_IRQS_ON | |
514 | movq RIP-ARGOFFSET(%rsp),%rcx | |
515 | CFI_REGISTER rip,rcx | |
516 | RESTORE_ARGS 1,-ARG_SKIP,0 | |
517 | /*CFI_REGISTER rflags,r11*/ | |
518 | movq PER_CPU_VAR(old_rsp), %rsp | |
519 | USERGS_SYSRET64 | |
520 | ||
521 | CFI_RESTORE_STATE | |
522 | /* Handle reschedules */ | |
523 | /* edx: work, edi: workmask */ | |
524 | sysret_careful: | |
525 | bt $TIF_NEED_RESCHED,%edx | |
526 | jnc sysret_signal | |
527 | TRACE_IRQS_ON | |
528 | ENABLE_INTERRUPTS(CLBR_NONE) | |
529 | pushq_cfi %rdi | |
530 | call schedule | |
531 | popq_cfi %rdi | |
532 | jmp sysret_check | |
533 | ||
534 | /* Handle a signal */ | |
535 | sysret_signal: | |
536 | TRACE_IRQS_ON | |
537 | ENABLE_INTERRUPTS(CLBR_NONE) | |
538 | #ifdef CONFIG_AUDITSYSCALL | |
539 | bt $TIF_SYSCALL_AUDIT,%edx | |
540 | jc sysret_audit | |
541 | #endif | |
542 | /* | |
543 | * We have a signal, or exit tracing or single-step. | |
544 | * These all wind up with the iret return path anyway, | |
545 | * so just join that path right now. | |
546 | */ | |
547 | FIXUP_TOP_OF_STACK %r11, -ARGOFFSET | |
548 | jmp int_check_syscall_exit_work | |
549 | ||
550 | badsys: | |
551 | movq $-ENOSYS,RAX-ARGOFFSET(%rsp) | |
552 | jmp ret_from_sys_call | |
553 | ||
554 | #ifdef CONFIG_AUDITSYSCALL | |
555 | /* | |
556 | * Fast path for syscall audit without full syscall trace. | |
557 | * We just call __audit_syscall_entry() directly, and then | |
558 | * jump back to the normal fast path. | |
559 | */ | |
560 | auditsys: | |
561 | movq %r10,%r9 /* 6th arg: 4th syscall arg */ | |
562 | movq %rdx,%r8 /* 5th arg: 3rd syscall arg */ | |
563 | movq %rsi,%rcx /* 4th arg: 2nd syscall arg */ | |
564 | movq %rdi,%rdx /* 3rd arg: 1st syscall arg */ | |
565 | movq %rax,%rsi /* 2nd arg: syscall number */ | |
566 | movl $AUDIT_ARCH_X86_64,%edi /* 1st arg: audit arch */ | |
567 | call __audit_syscall_entry | |
568 | LOAD_ARGS 0 /* reload call-clobbered registers */ | |
569 | jmp system_call_fastpath | |
570 | ||
571 | /* | |
572 | * Return fast path for syscall audit. Call __audit_syscall_exit() | |
573 | * directly and then jump back to the fast path with TIF_SYSCALL_AUDIT | |
574 | * masked off. | |
575 | */ | |
576 | sysret_audit: | |
577 | movq RAX-ARGOFFSET(%rsp),%rsi /* second arg, syscall return value */ | |
578 | cmpq $-MAX_ERRNO,%rsi /* is it < -MAX_ERRNO? */ | |
579 | setbe %al /* 1 if so, 0 if not */ | |
580 | movzbl %al,%edi /* zero-extend that into %edi */ | |
581 | call __audit_syscall_exit | |
582 | movl $(_TIF_ALLWORK_MASK & ~_TIF_SYSCALL_AUDIT),%edi | |
583 | jmp sysret_check | |
584 | #endif /* CONFIG_AUDITSYSCALL */ | |
585 | ||
586 | /* Do syscall tracing */ | |
587 | tracesys: | |
588 | #ifdef CONFIG_AUDITSYSCALL | |
589 | testl $(_TIF_WORK_SYSCALL_ENTRY & ~_TIF_SYSCALL_AUDIT),TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET) | |
590 | jz auditsys | |
591 | #endif | |
592 | SAVE_REST | |
593 | movq $-ENOSYS,RAX(%rsp) /* ptrace can change this for a bad syscall */ | |
594 | FIXUP_TOP_OF_STACK %rdi | |
595 | movq %rsp,%rdi | |
596 | call syscall_trace_enter | |
597 | /* | |
598 | * Reload arg registers from stack in case ptrace changed them. | |
599 | * We don't reload %rax because syscall_trace_enter() returned | |
600 | * the value it wants us to use in the table lookup. | |
601 | */ | |
602 | LOAD_ARGS ARGOFFSET, 1 | |
603 | RESTORE_REST | |
604 | #if __SYSCALL_MASK == ~0 | |
605 | cmpq $__NR_syscall_max,%rax | |
606 | #else | |
607 | andl $__SYSCALL_MASK,%eax | |
608 | cmpl $__NR_syscall_max,%eax | |
609 | #endif | |
610 | ja int_ret_from_sys_call /* RAX(%rsp) set to -ENOSYS above */ | |
611 | movq %r10,%rcx /* fixup for C */ | |
612 | call *sys_call_table(,%rax,8) | |
613 | movq %rax,RAX-ARGOFFSET(%rsp) | |
614 | /* Use IRET because user could have changed frame */ | |
615 | ||
616 | /* | |
617 | * Syscall return path ending with IRET. | |
618 | * Has correct top of stack, but partial stack frame. | |
619 | */ | |
620 | GLOBAL(int_ret_from_sys_call) | |
621 | DISABLE_INTERRUPTS(CLBR_NONE) | |
622 | TRACE_IRQS_OFF | |
623 | movl $_TIF_ALLWORK_MASK,%edi | |
624 | /* edi: mask to check */ | |
625 | GLOBAL(int_with_check) | |
626 | LOCKDEP_SYS_EXIT_IRQ | |
627 | GET_THREAD_INFO(%rcx) | |
628 | movl TI_flags(%rcx),%edx | |
629 | andl %edi,%edx | |
630 | jnz int_careful | |
631 | andl $~TS_COMPAT,TI_status(%rcx) | |
632 | jmp retint_swapgs | |
633 | ||
634 | /* Either reschedule or signal or syscall exit tracking needed. */ | |
635 | /* First do a reschedule test. */ | |
636 | /* edx: work, edi: workmask */ | |
637 | int_careful: | |
638 | bt $TIF_NEED_RESCHED,%edx | |
639 | jnc int_very_careful | |
640 | TRACE_IRQS_ON | |
641 | ENABLE_INTERRUPTS(CLBR_NONE) | |
642 | pushq_cfi %rdi | |
643 | call schedule | |
644 | popq_cfi %rdi | |
645 | DISABLE_INTERRUPTS(CLBR_NONE) | |
646 | TRACE_IRQS_OFF | |
647 | jmp int_with_check | |
648 | ||
649 | /* handle signals and tracing -- both require a full stack frame */ | |
650 | int_very_careful: | |
651 | TRACE_IRQS_ON | |
652 | ENABLE_INTERRUPTS(CLBR_NONE) | |
653 | int_check_syscall_exit_work: | |
654 | SAVE_REST | |
655 | /* Check for syscall exit trace */ | |
656 | testl $_TIF_WORK_SYSCALL_EXIT,%edx | |
657 | jz int_signal | |
658 | pushq_cfi %rdi | |
659 | leaq 8(%rsp),%rdi # &ptregs -> arg1 | |
660 | call syscall_trace_leave | |
661 | popq_cfi %rdi | |
662 | andl $~(_TIF_WORK_SYSCALL_EXIT|_TIF_SYSCALL_EMU),%edi | |
663 | jmp int_restore_rest | |
664 | ||
665 | int_signal: | |
666 | testl $_TIF_DO_NOTIFY_MASK,%edx | |
667 | jz 1f | |
668 | movq %rsp,%rdi # &ptregs -> arg1 | |
669 | xorl %esi,%esi # oldset -> arg2 | |
670 | call do_notify_resume | |
671 | 1: movl $_TIF_WORK_MASK,%edi | |
672 | int_restore_rest: | |
673 | RESTORE_REST | |
674 | DISABLE_INTERRUPTS(CLBR_NONE) | |
675 | TRACE_IRQS_OFF | |
676 | jmp int_with_check | |
677 | CFI_ENDPROC | |
678 | END(system_call) | |
679 | ||
680 | /* | |
681 | * Certain special system calls that need to save a complete full stack frame. | |
682 | */ | |
683 | .macro PTREGSCALL label,func,arg | |
684 | ENTRY(\label) | |
685 | PARTIAL_FRAME 1 8 /* offset 8: return address */ | |
686 | subq $REST_SKIP, %rsp | |
687 | CFI_ADJUST_CFA_OFFSET REST_SKIP | |
688 | call save_rest | |
689 | DEFAULT_FRAME 0 8 /* offset 8: return address */ | |
690 | leaq 8(%rsp), \arg /* pt_regs pointer */ | |
691 | call \func | |
692 | jmp ptregscall_common | |
693 | CFI_ENDPROC | |
694 | END(\label) | |
695 | .endm | |
696 | ||
697 | PTREGSCALL stub_clone, sys_clone, %r8 | |
698 | PTREGSCALL stub_fork, sys_fork, %rdi | |
699 | PTREGSCALL stub_vfork, sys_vfork, %rdi | |
700 | PTREGSCALL stub_sigaltstack, sys_sigaltstack, %rdx | |
701 | PTREGSCALL stub_iopl, sys_iopl, %rsi | |
702 | ||
703 | ENTRY(ptregscall_common) | |
704 | DEFAULT_FRAME 1 8 /* offset 8: return address */ | |
705 | RESTORE_TOP_OF_STACK %r11, 8 | |
706 | movq_cfi_restore R15+8, r15 | |
707 | movq_cfi_restore R14+8, r14 | |
708 | movq_cfi_restore R13+8, r13 | |
709 | movq_cfi_restore R12+8, r12 | |
710 | movq_cfi_restore RBP+8, rbp | |
711 | movq_cfi_restore RBX+8, rbx | |
712 | ret $REST_SKIP /* pop extended registers */ | |
713 | CFI_ENDPROC | |
714 | END(ptregscall_common) | |
715 | ||
716 | ENTRY(stub_execve) | |
717 | CFI_STARTPROC | |
718 | addq $8, %rsp | |
719 | PARTIAL_FRAME 0 | |
720 | SAVE_REST | |
721 | FIXUP_TOP_OF_STACK %r11 | |
722 | movq %rsp, %rcx | |
723 | call sys_execve | |
724 | RESTORE_TOP_OF_STACK %r11 | |
725 | movq %rax,RAX(%rsp) | |
726 | RESTORE_REST | |
727 | jmp int_ret_from_sys_call | |
728 | CFI_ENDPROC | |
729 | END(stub_execve) | |
730 | ||
731 | /* | |
732 | * sigreturn is special because it needs to restore all registers on return. | |
733 | * This cannot be done with SYSRET, so use the IRET return path instead. | |
734 | */ | |
735 | ENTRY(stub_rt_sigreturn) | |
736 | CFI_STARTPROC | |
737 | addq $8, %rsp | |
738 | PARTIAL_FRAME 0 | |
739 | SAVE_REST | |
740 | movq %rsp,%rdi | |
741 | FIXUP_TOP_OF_STACK %r11 | |
742 | call sys_rt_sigreturn | |
743 | movq %rax,RAX(%rsp) # fixme, this could be done at the higher layer | |
744 | RESTORE_REST | |
745 | jmp int_ret_from_sys_call | |
746 | CFI_ENDPROC | |
747 | END(stub_rt_sigreturn) | |
748 | ||
749 | #ifdef CONFIG_X86_X32_ABI | |
750 | PTREGSCALL stub_x32_sigaltstack, sys32_sigaltstack, %rdx | |
751 | ||
752 | ENTRY(stub_x32_rt_sigreturn) | |
753 | CFI_STARTPROC | |
754 | addq $8, %rsp | |
755 | PARTIAL_FRAME 0 | |
756 | SAVE_REST | |
757 | movq %rsp,%rdi | |
758 | FIXUP_TOP_OF_STACK %r11 | |
759 | call sys32_x32_rt_sigreturn | |
760 | movq %rax,RAX(%rsp) # fixme, this could be done at the higher layer | |
761 | RESTORE_REST | |
762 | jmp int_ret_from_sys_call | |
763 | CFI_ENDPROC | |
764 | END(stub_x32_rt_sigreturn) | |
765 | ||
766 | ENTRY(stub_x32_execve) | |
767 | CFI_STARTPROC | |
768 | addq $8, %rsp | |
769 | PARTIAL_FRAME 0 | |
770 | SAVE_REST | |
771 | FIXUP_TOP_OF_STACK %r11 | |
772 | movq %rsp, %rcx | |
773 | call sys32_execve | |
774 | RESTORE_TOP_OF_STACK %r11 | |
775 | movq %rax,RAX(%rsp) | |
776 | RESTORE_REST | |
777 | jmp int_ret_from_sys_call | |
778 | CFI_ENDPROC | |
779 | END(stub_x32_execve) | |
780 | ||
781 | #endif | |
782 | ||
783 | /* | |
784 | * Build the entry stubs and pointer table with some assembler magic. | |
785 | * We pack 7 stubs into a single 32-byte chunk, which will fit in a | |
786 | * single cache line on all modern x86 implementations. | |
787 | */ | |
788 | .section .init.rodata,"a" | |
789 | ENTRY(interrupt) | |
790 | .section .entry.text | |
791 | .p2align 5 | |
792 | .p2align CONFIG_X86_L1_CACHE_SHIFT | |
793 | ENTRY(irq_entries_start) | |
794 | INTR_FRAME | |
795 | vector=FIRST_EXTERNAL_VECTOR | |
796 | .rept (NR_VECTORS-FIRST_EXTERNAL_VECTOR+6)/7 | |
797 | .balign 32 | |
798 | .rept 7 | |
799 | .if vector < NR_VECTORS | |
800 | .if vector <> FIRST_EXTERNAL_VECTOR | |
801 | CFI_ADJUST_CFA_OFFSET -8 | |
802 | .endif | |
803 | 1: pushq_cfi $(~vector+0x80) /* Note: always in signed byte range */ | |
804 | .if ((vector-FIRST_EXTERNAL_VECTOR)%7) <> 6 | |
805 | jmp 2f | |
806 | .endif | |
807 | .previous | |
808 | .quad 1b | |
809 | .section .entry.text | |
810 | vector=vector+1 | |
811 | .endif | |
812 | .endr | |
813 | 2: jmp common_interrupt | |
814 | .endr | |
815 | CFI_ENDPROC | |
816 | END(irq_entries_start) | |
817 | ||
818 | .previous | |
819 | END(interrupt) | |
820 | .previous | |
821 | ||
822 | /* | |
823 | * Interrupt entry/exit. | |
824 | * | |
825 | * Interrupt entry points save only callee clobbered registers in fast path. | |
826 | * | |
827 | * Entry runs with interrupts off. | |
828 | */ | |
829 | ||
830 | /* 0(%rsp): ~(interrupt number) */ | |
831 | .macro interrupt func | |
832 | /* reserve pt_regs for scratch regs and rbp */ | |
833 | subq $ORIG_RAX-RBP, %rsp | |
834 | CFI_ADJUST_CFA_OFFSET ORIG_RAX-RBP | |
835 | SAVE_ARGS_IRQ | |
836 | call \func | |
837 | .endm | |
838 | ||
839 | /* | |
840 | * Interrupt entry/exit should be protected against kprobes | |
841 | */ | |
842 | .pushsection .kprobes.text, "ax" | |
843 | /* | |
844 | * The interrupt stubs push (~vector+0x80) onto the stack and | |
845 | * then jump to common_interrupt. | |
846 | */ | |
847 | .p2align CONFIG_X86_L1_CACHE_SHIFT | |
848 | common_interrupt: | |
849 | XCPT_FRAME | |
850 | addq $-0x80,(%rsp) /* Adjust vector to [-256,-1] range */ | |
851 | interrupt do_IRQ | |
852 | /* 0(%rsp): old_rsp-ARGOFFSET */ | |
853 | ret_from_intr: | |
854 | DISABLE_INTERRUPTS(CLBR_NONE) | |
855 | TRACE_IRQS_OFF | |
856 | decl PER_CPU_VAR(irq_count) | |
857 | ||
858 | /* Restore saved previous stack */ | |
859 | popq %rsi | |
860 | CFI_DEF_CFA_REGISTER rsi | |
861 | leaq ARGOFFSET-RBP(%rsi), %rsp | |
862 | CFI_DEF_CFA_REGISTER rsp | |
863 | CFI_ADJUST_CFA_OFFSET RBP-ARGOFFSET | |
864 | ||
865 | exit_intr: | |
866 | GET_THREAD_INFO(%rcx) | |
867 | testl $3,CS-ARGOFFSET(%rsp) | |
868 | je retint_kernel | |
869 | ||
870 | /* Interrupt came from user space */ | |
871 | /* | |
872 | * Has a correct top of stack, but a partial stack frame | |
873 | * %rcx: thread info. Interrupts off. | |
874 | */ | |
875 | retint_with_reschedule: | |
876 | movl $_TIF_WORK_MASK,%edi | |
877 | retint_check: | |
878 | LOCKDEP_SYS_EXIT_IRQ | |
879 | movl TI_flags(%rcx),%edx | |
880 | andl %edi,%edx | |
881 | CFI_REMEMBER_STATE | |
882 | jnz retint_careful | |
883 | ||
884 | retint_swapgs: /* return to user-space */ | |
885 | /* | |
886 | * The iretq could re-enable interrupts: | |
887 | */ | |
888 | DISABLE_INTERRUPTS(CLBR_ANY) | |
889 | TRACE_IRQS_IRETQ | |
890 | SWAPGS | |
891 | jmp restore_args | |
892 | ||
893 | retint_restore_args: /* return to kernel space */ | |
894 | DISABLE_INTERRUPTS(CLBR_ANY) | |
895 | /* | |
896 | * The iretq could re-enable interrupts: | |
897 | */ | |
898 | TRACE_IRQS_IRETQ | |
899 | restore_args: | |
900 | RESTORE_ARGS 1,8,1 | |
901 | ||
902 | irq_return: | |
903 | INTERRUPT_RETURN | |
904 | ||
905 | .section __ex_table, "a" | |
906 | .quad irq_return, bad_iret | |
907 | .previous | |
908 | ||
909 | #ifdef CONFIG_PARAVIRT | |
910 | ENTRY(native_iret) | |
911 | iretq | |
912 | ||
913 | .section __ex_table,"a" | |
914 | .quad native_iret, bad_iret | |
915 | .previous | |
916 | #endif | |
917 | ||
918 | .section .fixup,"ax" | |
919 | bad_iret: | |
920 | /* | |
921 | * The iret traps when the %cs or %ss being restored is bogus. | |
922 | * We've lost the original trap vector and error code. | |
923 | * #GPF is the most likely one to get for an invalid selector. | |
924 | * So pretend we completed the iret and took the #GPF in user mode. | |
925 | * | |
926 | * We are now running with the kernel GS after exception recovery. | |
927 | * But error_entry expects us to have user GS to match the user %cs, | |
928 | * so swap back. | |
929 | */ | |
930 | pushq $0 | |
931 | ||
932 | SWAPGS | |
933 | jmp general_protection | |
934 | ||
935 | .previous | |
936 | ||
937 | /* edi: workmask, edx: work */ | |
938 | retint_careful: | |
939 | CFI_RESTORE_STATE | |
940 | bt $TIF_NEED_RESCHED,%edx | |
941 | jnc retint_signal | |
942 | TRACE_IRQS_ON | |
943 | ENABLE_INTERRUPTS(CLBR_NONE) | |
944 | pushq_cfi %rdi | |
945 | call schedule | |
946 | popq_cfi %rdi | |
947 | GET_THREAD_INFO(%rcx) | |
948 | DISABLE_INTERRUPTS(CLBR_NONE) | |
949 | TRACE_IRQS_OFF | |
950 | jmp retint_check | |
951 | ||
952 | retint_signal: | |
953 | testl $_TIF_DO_NOTIFY_MASK,%edx | |
954 | jz retint_swapgs | |
955 | TRACE_IRQS_ON | |
956 | ENABLE_INTERRUPTS(CLBR_NONE) | |
957 | SAVE_REST | |
958 | movq $-1,ORIG_RAX(%rsp) | |
959 | xorl %esi,%esi # oldset | |
960 | movq %rsp,%rdi # &pt_regs | |
961 | call do_notify_resume | |
962 | RESTORE_REST | |
963 | DISABLE_INTERRUPTS(CLBR_NONE) | |
964 | TRACE_IRQS_OFF | |
965 | GET_THREAD_INFO(%rcx) | |
966 | jmp retint_with_reschedule | |
967 | ||
968 | #ifdef CONFIG_PREEMPT | |
969 | /* Returning to kernel space. Check if we need preemption */ | |
970 | /* rcx: threadinfo. interrupts off. */ | |
971 | ENTRY(retint_kernel) | |
972 | cmpl $0,TI_preempt_count(%rcx) | |
973 | jnz retint_restore_args | |
974 | bt $TIF_NEED_RESCHED,TI_flags(%rcx) | |
975 | jnc retint_restore_args | |
976 | bt $9,EFLAGS-ARGOFFSET(%rsp) /* interrupts off? */ | |
977 | jnc retint_restore_args | |
978 | call preempt_schedule_irq | |
979 | jmp exit_intr | |
980 | #endif | |
981 | ||
982 | CFI_ENDPROC | |
983 | END(common_interrupt) | |
984 | /* | |
985 | * End of kprobes section | |
986 | */ | |
987 | .popsection | |
988 | ||
989 | /* | |
990 | * APIC interrupts. | |
991 | */ | |
992 | .macro apicinterrupt num sym do_sym | |
993 | ENTRY(\sym) | |
994 | INTR_FRAME | |
995 | pushq_cfi $~(\num) | |
996 | .Lcommon_\sym: | |
997 | interrupt \do_sym | |
998 | jmp ret_from_intr | |
999 | CFI_ENDPROC | |
1000 | END(\sym) | |
1001 | .endm | |
1002 | ||
1003 | #ifdef CONFIG_SMP | |
1004 | apicinterrupt IRQ_MOVE_CLEANUP_VECTOR \ | |
1005 | irq_move_cleanup_interrupt smp_irq_move_cleanup_interrupt | |
1006 | apicinterrupt REBOOT_VECTOR \ | |
1007 | reboot_interrupt smp_reboot_interrupt | |
1008 | #endif | |
1009 | ||
1010 | #ifdef CONFIG_X86_UV | |
1011 | apicinterrupt UV_BAU_MESSAGE \ | |
1012 | uv_bau_message_intr1 uv_bau_message_interrupt | |
1013 | #endif | |
1014 | apicinterrupt LOCAL_TIMER_VECTOR \ | |
1015 | apic_timer_interrupt smp_apic_timer_interrupt | |
1016 | apicinterrupt X86_PLATFORM_IPI_VECTOR \ | |
1017 | x86_platform_ipi smp_x86_platform_ipi | |
1018 | ||
1019 | #ifdef CONFIG_SMP | |
1020 | ALIGN | |
1021 | INTR_FRAME | |
1022 | .irp idx,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15, \ | |
1023 | 16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31 | |
1024 | .if NUM_INVALIDATE_TLB_VECTORS > \idx | |
1025 | ENTRY(invalidate_interrupt\idx) | |
1026 | pushq_cfi $~(INVALIDATE_TLB_VECTOR_START+\idx) | |
1027 | jmp .Lcommon_invalidate_interrupt0 | |
1028 | CFI_ADJUST_CFA_OFFSET -8 | |
1029 | END(invalidate_interrupt\idx) | |
1030 | .endif | |
1031 | .endr | |
1032 | CFI_ENDPROC | |
1033 | apicinterrupt INVALIDATE_TLB_VECTOR_START, \ | |
1034 | invalidate_interrupt0, smp_invalidate_interrupt | |
1035 | #endif | |
1036 | ||
1037 | apicinterrupt THRESHOLD_APIC_VECTOR \ | |
1038 | threshold_interrupt smp_threshold_interrupt | |
1039 | apicinterrupt THERMAL_APIC_VECTOR \ | |
1040 | thermal_interrupt smp_thermal_interrupt | |
1041 | ||
1042 | #ifdef CONFIG_SMP | |
1043 | apicinterrupt CALL_FUNCTION_SINGLE_VECTOR \ | |
1044 | call_function_single_interrupt smp_call_function_single_interrupt | |
1045 | apicinterrupt CALL_FUNCTION_VECTOR \ | |
1046 | call_function_interrupt smp_call_function_interrupt | |
1047 | apicinterrupt RESCHEDULE_VECTOR \ | |
1048 | reschedule_interrupt smp_reschedule_interrupt | |
1049 | #endif | |
1050 | ||
1051 | apicinterrupt ERROR_APIC_VECTOR \ | |
1052 | error_interrupt smp_error_interrupt | |
1053 | apicinterrupt SPURIOUS_APIC_VECTOR \ | |
1054 | spurious_interrupt smp_spurious_interrupt | |
1055 | ||
1056 | #ifdef CONFIG_IRQ_WORK | |
1057 | apicinterrupt IRQ_WORK_VECTOR \ | |
1058 | irq_work_interrupt smp_irq_work_interrupt | |
1059 | #endif | |
1060 | ||
1061 | /* | |
1062 | * Exception entry points. | |
1063 | */ | |
1064 | .macro zeroentry sym do_sym | |
1065 | ENTRY(\sym) | |
1066 | INTR_FRAME | |
1067 | PARAVIRT_ADJUST_EXCEPTION_FRAME | |
1068 | pushq_cfi $-1 /* ORIG_RAX: no syscall to restart */ | |
1069 | subq $ORIG_RAX-R15, %rsp | |
1070 | CFI_ADJUST_CFA_OFFSET ORIG_RAX-R15 | |
1071 | call error_entry | |
1072 | DEFAULT_FRAME 0 | |
1073 | movq %rsp,%rdi /* pt_regs pointer */ | |
1074 | xorl %esi,%esi /* no error code */ | |
1075 | call \do_sym | |
1076 | jmp error_exit /* %ebx: no swapgs flag */ | |
1077 | CFI_ENDPROC | |
1078 | END(\sym) | |
1079 | .endm | |
1080 | ||
1081 | .macro paranoidzeroentry sym do_sym | |
1082 | ENTRY(\sym) | |
1083 | INTR_FRAME | |
1084 | PARAVIRT_ADJUST_EXCEPTION_FRAME | |
1085 | pushq_cfi $-1 /* ORIG_RAX: no syscall to restart */ | |
1086 | subq $ORIG_RAX-R15, %rsp | |
1087 | CFI_ADJUST_CFA_OFFSET ORIG_RAX-R15 | |
1088 | call save_paranoid | |
1089 | TRACE_IRQS_OFF | |
1090 | movq %rsp,%rdi /* pt_regs pointer */ | |
1091 | xorl %esi,%esi /* no error code */ | |
1092 | call \do_sym | |
1093 | jmp paranoid_exit /* %ebx: no swapgs flag */ | |
1094 | CFI_ENDPROC | |
1095 | END(\sym) | |
1096 | .endm | |
1097 | ||
1098 | #define INIT_TSS_IST(x) PER_CPU_VAR(init_tss) + (TSS_ist + ((x) - 1) * 8) | |
1099 | .macro paranoidzeroentry_ist sym do_sym ist | |
1100 | ENTRY(\sym) | |
1101 | INTR_FRAME | |
1102 | PARAVIRT_ADJUST_EXCEPTION_FRAME | |
1103 | pushq_cfi $-1 /* ORIG_RAX: no syscall to restart */ | |
1104 | subq $ORIG_RAX-R15, %rsp | |
1105 | CFI_ADJUST_CFA_OFFSET ORIG_RAX-R15 | |
1106 | call save_paranoid | |
1107 | TRACE_IRQS_OFF | |
1108 | movq %rsp,%rdi /* pt_regs pointer */ | |
1109 | xorl %esi,%esi /* no error code */ | |
1110 | subq $EXCEPTION_STKSZ, INIT_TSS_IST(\ist) | |
1111 | call \do_sym | |
1112 | addq $EXCEPTION_STKSZ, INIT_TSS_IST(\ist) | |
1113 | jmp paranoid_exit /* %ebx: no swapgs flag */ | |
1114 | CFI_ENDPROC | |
1115 | END(\sym) | |
1116 | .endm | |
1117 | ||
1118 | .macro errorentry sym do_sym | |
1119 | ENTRY(\sym) | |
1120 | XCPT_FRAME | |
1121 | PARAVIRT_ADJUST_EXCEPTION_FRAME | |
1122 | subq $ORIG_RAX-R15, %rsp | |
1123 | CFI_ADJUST_CFA_OFFSET ORIG_RAX-R15 | |
1124 | call error_entry | |
1125 | DEFAULT_FRAME 0 | |
1126 | movq %rsp,%rdi /* pt_regs pointer */ | |
1127 | movq ORIG_RAX(%rsp),%rsi /* get error code */ | |
1128 | movq $-1,ORIG_RAX(%rsp) /* no syscall to restart */ | |
1129 | call \do_sym | |
1130 | jmp error_exit /* %ebx: no swapgs flag */ | |
1131 | CFI_ENDPROC | |
1132 | END(\sym) | |
1133 | .endm | |
1134 | ||
1135 | /* error code is on the stack already */ | |
1136 | .macro paranoiderrorentry sym do_sym | |
1137 | ENTRY(\sym) | |
1138 | XCPT_FRAME | |
1139 | PARAVIRT_ADJUST_EXCEPTION_FRAME | |
1140 | subq $ORIG_RAX-R15, %rsp | |
1141 | CFI_ADJUST_CFA_OFFSET ORIG_RAX-R15 | |
1142 | call save_paranoid | |
1143 | DEFAULT_FRAME 0 | |
1144 | TRACE_IRQS_OFF | |
1145 | movq %rsp,%rdi /* pt_regs pointer */ | |
1146 | movq ORIG_RAX(%rsp),%rsi /* get error code */ | |
1147 | movq $-1,ORIG_RAX(%rsp) /* no syscall to restart */ | |
1148 | call \do_sym | |
1149 | jmp paranoid_exit /* %ebx: no swapgs flag */ | |
1150 | CFI_ENDPROC | |
1151 | END(\sym) | |
1152 | .endm | |
1153 | ||
1154 | zeroentry divide_error do_divide_error | |
1155 | zeroentry overflow do_overflow | |
1156 | zeroentry bounds do_bounds | |
1157 | zeroentry invalid_op do_invalid_op | |
1158 | zeroentry device_not_available do_device_not_available | |
1159 | paranoiderrorentry double_fault do_double_fault | |
1160 | zeroentry coprocessor_segment_overrun do_coprocessor_segment_overrun | |
1161 | errorentry invalid_TSS do_invalid_TSS | |
1162 | errorentry segment_not_present do_segment_not_present | |
1163 | zeroentry spurious_interrupt_bug do_spurious_interrupt_bug | |
1164 | zeroentry coprocessor_error do_coprocessor_error | |
1165 | errorentry alignment_check do_alignment_check | |
1166 | zeroentry simd_coprocessor_error do_simd_coprocessor_error | |
1167 | ||
1168 | ||
1169 | /* Reload gs selector with exception handling */ | |
1170 | /* edi: new selector */ | |
1171 | ENTRY(native_load_gs_index) | |
1172 | CFI_STARTPROC | |
1173 | pushfq_cfi | |
1174 | DISABLE_INTERRUPTS(CLBR_ANY & ~CLBR_RDI) | |
1175 | SWAPGS | |
1176 | gs_change: | |
1177 | movl %edi,%gs | |
1178 | 2: mfence /* workaround */ | |
1179 | SWAPGS | |
1180 | popfq_cfi | |
1181 | ret | |
1182 | CFI_ENDPROC | |
1183 | END(native_load_gs_index) | |
1184 | ||
1185 | .section __ex_table,"a" | |
1186 | .align 8 | |
1187 | .quad gs_change,bad_gs | |
1188 | .previous | |
1189 | .section .fixup,"ax" | |
1190 | /* running with kernelgs */ | |
1191 | bad_gs: | |
1192 | SWAPGS /* switch back to user gs */ | |
1193 | xorl %eax,%eax | |
1194 | movl %eax,%gs | |
1195 | jmp 2b | |
1196 | .previous | |
1197 | ||
1198 | ENTRY(kernel_thread_helper) | |
1199 | pushq $0 # fake return address | |
1200 | CFI_STARTPROC | |
1201 | /* | |
1202 | * Here we are in the child and the registers are set as they were | |
1203 | * at kernel_thread() invocation in the parent. | |
1204 | */ | |
1205 | call *%rsi | |
1206 | # exit | |
1207 | mov %eax, %edi | |
1208 | call do_exit | |
1209 | ud2 # padding for call trace | |
1210 | CFI_ENDPROC | |
1211 | END(kernel_thread_helper) | |
1212 | ||
1213 | /* | |
1214 | * execve(). This function needs to use IRET, not SYSRET, to set up all state properly. | |
1215 | * | |
1216 | * C extern interface: | |
1217 | * extern long execve(const char *name, char **argv, char **envp) | |
1218 | * | |
1219 | * asm input arguments: | |
1220 | * rdi: name, rsi: argv, rdx: envp | |
1221 | * | |
1222 | * We want to fallback into: | |
1223 | * extern long sys_execve(const char *name, char **argv,char **envp, struct pt_regs *regs) | |
1224 | * | |
1225 | * do_sys_execve asm fallback arguments: | |
1226 | * rdi: name, rsi: argv, rdx: envp, rcx: fake frame on the stack | |
1227 | */ | |
1228 | ENTRY(kernel_execve) | |
1229 | CFI_STARTPROC | |
1230 | FAKE_STACK_FRAME $0 | |
1231 | SAVE_ALL | |
1232 | movq %rsp,%rcx | |
1233 | call sys_execve | |
1234 | movq %rax, RAX(%rsp) | |
1235 | RESTORE_REST | |
1236 | testq %rax,%rax | |
1237 | je int_ret_from_sys_call | |
1238 | RESTORE_ARGS | |
1239 | UNFAKE_STACK_FRAME | |
1240 | ret | |
1241 | CFI_ENDPROC | |
1242 | END(kernel_execve) | |
1243 | ||
1244 | /* Call softirq on interrupt stack. Interrupts are off. */ | |
1245 | ENTRY(call_softirq) | |
1246 | CFI_STARTPROC | |
1247 | pushq_cfi %rbp | |
1248 | CFI_REL_OFFSET rbp,0 | |
1249 | mov %rsp,%rbp | |
1250 | CFI_DEF_CFA_REGISTER rbp | |
1251 | incl PER_CPU_VAR(irq_count) | |
1252 | cmove PER_CPU_VAR(irq_stack_ptr),%rsp | |
1253 | push %rbp # backlink for old unwinder | |
1254 | call __do_softirq | |
1255 | leaveq | |
1256 | CFI_RESTORE rbp | |
1257 | CFI_DEF_CFA_REGISTER rsp | |
1258 | CFI_ADJUST_CFA_OFFSET -8 | |
1259 | decl PER_CPU_VAR(irq_count) | |
1260 | ret | |
1261 | CFI_ENDPROC | |
1262 | END(call_softirq) | |
1263 | ||
1264 | #ifdef CONFIG_XEN | |
1265 | zeroentry xen_hypervisor_callback xen_do_hypervisor_callback | |
1266 | ||
1267 | /* | |
1268 | * A note on the "critical region" in our callback handler. | |
1269 | * We want to avoid stacking callback handlers due to events occurring | |
1270 | * during handling of the last event. To do this, we keep events disabled | |
1271 | * until we've done all processing. HOWEVER, we must enable events before | |
1272 | * popping the stack frame (can't be done atomically) and so it would still | |
1273 | * be possible to get enough handler activations to overflow the stack. | |
1274 | * Although unlikely, bugs of that kind are hard to track down, so we'd | |
1275 | * like to avoid the possibility. | |
1276 | * So, on entry to the handler we detect whether we interrupted an | |
1277 | * existing activation in its critical region -- if so, we pop the current | |
1278 | * activation and restart the handler using the previous one. | |
1279 | */ | |
1280 | ENTRY(xen_do_hypervisor_callback) # do_hypervisor_callback(struct *pt_regs) | |
1281 | CFI_STARTPROC | |
1282 | /* | |
1283 | * Since we don't modify %rdi, evtchn_do_upall(struct *pt_regs) will | |
1284 | * see the correct pointer to the pt_regs | |
1285 | */ | |
1286 | movq %rdi, %rsp # we don't return, adjust the stack frame | |
1287 | CFI_ENDPROC | |
1288 | DEFAULT_FRAME | |
1289 | 11: incl PER_CPU_VAR(irq_count) | |
1290 | movq %rsp,%rbp | |
1291 | CFI_DEF_CFA_REGISTER rbp | |
1292 | cmovzq PER_CPU_VAR(irq_stack_ptr),%rsp | |
1293 | pushq %rbp # backlink for old unwinder | |
1294 | call xen_evtchn_do_upcall | |
1295 | popq %rsp | |
1296 | CFI_DEF_CFA_REGISTER rsp | |
1297 | decl PER_CPU_VAR(irq_count) | |
1298 | jmp error_exit | |
1299 | CFI_ENDPROC | |
1300 | END(xen_do_hypervisor_callback) | |
1301 | ||
1302 | /* | |
1303 | * Hypervisor uses this for application faults while it executes. | |
1304 | * We get here for two reasons: | |
1305 | * 1. Fault while reloading DS, ES, FS or GS | |
1306 | * 2. Fault while executing IRET | |
1307 | * Category 1 we do not need to fix up as Xen has already reloaded all segment | |
1308 | * registers that could be reloaded and zeroed the others. | |
1309 | * Category 2 we fix up by killing the current process. We cannot use the | |
1310 | * normal Linux return path in this case because if we use the IRET hypercall | |
1311 | * to pop the stack frame we end up in an infinite loop of failsafe callbacks. | |
1312 | * We distinguish between categories by comparing each saved segment register | |
1313 | * with its current contents: any discrepancy means we in category 1. | |
1314 | */ | |
1315 | ENTRY(xen_failsafe_callback) | |
1316 | INTR_FRAME 1 (6*8) | |
1317 | /*CFI_REL_OFFSET gs,GS*/ | |
1318 | /*CFI_REL_OFFSET fs,FS*/ | |
1319 | /*CFI_REL_OFFSET es,ES*/ | |
1320 | /*CFI_REL_OFFSET ds,DS*/ | |
1321 | CFI_REL_OFFSET r11,8 | |
1322 | CFI_REL_OFFSET rcx,0 | |
1323 | movw %ds,%cx | |
1324 | cmpw %cx,0x10(%rsp) | |
1325 | CFI_REMEMBER_STATE | |
1326 | jne 1f | |
1327 | movw %es,%cx | |
1328 | cmpw %cx,0x18(%rsp) | |
1329 | jne 1f | |
1330 | movw %fs,%cx | |
1331 | cmpw %cx,0x20(%rsp) | |
1332 | jne 1f | |
1333 | movw %gs,%cx | |
1334 | cmpw %cx,0x28(%rsp) | |
1335 | jne 1f | |
1336 | /* All segments match their saved values => Category 2 (Bad IRET). */ | |
1337 | movq (%rsp),%rcx | |
1338 | CFI_RESTORE rcx | |
1339 | movq 8(%rsp),%r11 | |
1340 | CFI_RESTORE r11 | |
1341 | addq $0x30,%rsp | |
1342 | CFI_ADJUST_CFA_OFFSET -0x30 | |
1343 | pushq_cfi $0 /* RIP */ | |
1344 | pushq_cfi %r11 | |
1345 | pushq_cfi %rcx | |
1346 | jmp general_protection | |
1347 | CFI_RESTORE_STATE | |
1348 | 1: /* Segment mismatch => Category 1 (Bad segment). Retry the IRET. */ | |
1349 | movq (%rsp),%rcx | |
1350 | CFI_RESTORE rcx | |
1351 | movq 8(%rsp),%r11 | |
1352 | CFI_RESTORE r11 | |
1353 | addq $0x30,%rsp | |
1354 | CFI_ADJUST_CFA_OFFSET -0x30 | |
1355 | pushq_cfi $0 | |
1356 | SAVE_ALL | |
1357 | jmp error_exit | |
1358 | CFI_ENDPROC | |
1359 | END(xen_failsafe_callback) | |
1360 | ||
1361 | apicinterrupt XEN_HVM_EVTCHN_CALLBACK \ | |
1362 | xen_hvm_callback_vector xen_evtchn_do_upcall | |
1363 | ||
1364 | #endif /* CONFIG_XEN */ | |
1365 | ||
1366 | /* | |
1367 | * Some functions should be protected against kprobes | |
1368 | */ | |
1369 | .pushsection .kprobes.text, "ax" | |
1370 | ||
1371 | paranoidzeroentry_ist debug do_debug DEBUG_STACK | |
1372 | paranoidzeroentry_ist int3 do_int3 DEBUG_STACK | |
1373 | paranoiderrorentry stack_segment do_stack_segment | |
1374 | #ifdef CONFIG_XEN | |
1375 | zeroentry xen_debug do_debug | |
1376 | zeroentry xen_int3 do_int3 | |
1377 | errorentry xen_stack_segment do_stack_segment | |
1378 | #endif | |
1379 | errorentry general_protection do_general_protection | |
1380 | errorentry page_fault do_page_fault | |
1381 | #ifdef CONFIG_KVM_GUEST | |
1382 | errorentry async_page_fault do_async_page_fault | |
1383 | #endif | |
1384 | #ifdef CONFIG_X86_MCE | |
1385 | paranoidzeroentry machine_check *machine_check_vector(%rip) | |
1386 | #endif | |
1387 | ||
1388 | /* | |
1389 | * "Paranoid" exit path from exception stack. | |
1390 | * Paranoid because this is used by NMIs and cannot take | |
1391 | * any kernel state for granted. | |
1392 | * We don't do kernel preemption checks here, because only | |
1393 | * NMI should be common and it does not enable IRQs and | |
1394 | * cannot get reschedule ticks. | |
1395 | * | |
1396 | * "trace" is 0 for the NMI handler only, because irq-tracing | |
1397 | * is fundamentally NMI-unsafe. (we cannot change the soft and | |
1398 | * hard flags at once, atomically) | |
1399 | */ | |
1400 | ||
1401 | /* ebx: no swapgs flag */ | |
1402 | ENTRY(paranoid_exit) | |
1403 | DEFAULT_FRAME | |
1404 | DISABLE_INTERRUPTS(CLBR_NONE) | |
1405 | TRACE_IRQS_OFF | |
1406 | testl %ebx,%ebx /* swapgs needed? */ | |
1407 | jnz paranoid_restore | |
1408 | testl $3,CS(%rsp) | |
1409 | jnz paranoid_userspace | |
1410 | paranoid_swapgs: | |
1411 | TRACE_IRQS_IRETQ 0 | |
1412 | SWAPGS_UNSAFE_STACK | |
1413 | RESTORE_ALL 8 | |
1414 | jmp irq_return | |
1415 | paranoid_restore: | |
1416 | TRACE_IRQS_IRETQ 0 | |
1417 | RESTORE_ALL 8 | |
1418 | jmp irq_return | |
1419 | paranoid_userspace: | |
1420 | GET_THREAD_INFO(%rcx) | |
1421 | movl TI_flags(%rcx),%ebx | |
1422 | andl $_TIF_WORK_MASK,%ebx | |
1423 | jz paranoid_swapgs | |
1424 | movq %rsp,%rdi /* &pt_regs */ | |
1425 | call sync_regs | |
1426 | movq %rax,%rsp /* switch stack for scheduling */ | |
1427 | testl $_TIF_NEED_RESCHED,%ebx | |
1428 | jnz paranoid_schedule | |
1429 | movl %ebx,%edx /* arg3: thread flags */ | |
1430 | TRACE_IRQS_ON | |
1431 | ENABLE_INTERRUPTS(CLBR_NONE) | |
1432 | xorl %esi,%esi /* arg2: oldset */ | |
1433 | movq %rsp,%rdi /* arg1: &pt_regs */ | |
1434 | call do_notify_resume | |
1435 | DISABLE_INTERRUPTS(CLBR_NONE) | |
1436 | TRACE_IRQS_OFF | |
1437 | jmp paranoid_userspace | |
1438 | paranoid_schedule: | |
1439 | TRACE_IRQS_ON | |
1440 | ENABLE_INTERRUPTS(CLBR_ANY) | |
1441 | call schedule | |
1442 | DISABLE_INTERRUPTS(CLBR_ANY) | |
1443 | TRACE_IRQS_OFF | |
1444 | jmp paranoid_userspace | |
1445 | CFI_ENDPROC | |
1446 | END(paranoid_exit) | |
1447 | ||
1448 | /* | |
1449 | * Exception entry point. This expects an error code/orig_rax on the stack. | |
1450 | * returns in "no swapgs flag" in %ebx. | |
1451 | */ | |
1452 | ENTRY(error_entry) | |
1453 | XCPT_FRAME | |
1454 | CFI_ADJUST_CFA_OFFSET 15*8 | |
1455 | /* oldrax contains error code */ | |
1456 | cld | |
1457 | movq_cfi rdi, RDI+8 | |
1458 | movq_cfi rsi, RSI+8 | |
1459 | movq_cfi rdx, RDX+8 | |
1460 | movq_cfi rcx, RCX+8 | |
1461 | movq_cfi rax, RAX+8 | |
1462 | movq_cfi r8, R8+8 | |
1463 | movq_cfi r9, R9+8 | |
1464 | movq_cfi r10, R10+8 | |
1465 | movq_cfi r11, R11+8 | |
1466 | movq_cfi rbx, RBX+8 | |
1467 | movq_cfi rbp, RBP+8 | |
1468 | movq_cfi r12, R12+8 | |
1469 | movq_cfi r13, R13+8 | |
1470 | movq_cfi r14, R14+8 | |
1471 | movq_cfi r15, R15+8 | |
1472 | xorl %ebx,%ebx | |
1473 | testl $3,CS+8(%rsp) | |
1474 | je error_kernelspace | |
1475 | error_swapgs: | |
1476 | SWAPGS | |
1477 | error_sti: | |
1478 | TRACE_IRQS_OFF | |
1479 | ret | |
1480 | ||
1481 | /* | |
1482 | * There are two places in the kernel that can potentially fault with | |
1483 | * usergs. Handle them here. The exception handlers after iret run with | |
1484 | * kernel gs again, so don't set the user space flag. B stepping K8s | |
1485 | * sometimes report an truncated RIP for IRET exceptions returning to | |
1486 | * compat mode. Check for these here too. | |
1487 | */ | |
1488 | error_kernelspace: | |
1489 | incl %ebx | |
1490 | leaq irq_return(%rip),%rcx | |
1491 | cmpq %rcx,RIP+8(%rsp) | |
1492 | je error_swapgs | |
1493 | movl %ecx,%eax /* zero extend */ | |
1494 | cmpq %rax,RIP+8(%rsp) | |
1495 | je bstep_iret | |
1496 | cmpq $gs_change,RIP+8(%rsp) | |
1497 | je error_swapgs | |
1498 | jmp error_sti | |
1499 | ||
1500 | bstep_iret: | |
1501 | /* Fix truncated RIP */ | |
1502 | movq %rcx,RIP+8(%rsp) | |
1503 | jmp error_swapgs | |
1504 | CFI_ENDPROC | |
1505 | END(error_entry) | |
1506 | ||
1507 | ||
1508 | /* ebx: no swapgs flag (1: don't need swapgs, 0: need it) */ | |
1509 | ENTRY(error_exit) | |
1510 | DEFAULT_FRAME | |
1511 | movl %ebx,%eax | |
1512 | RESTORE_REST | |
1513 | DISABLE_INTERRUPTS(CLBR_NONE) | |
1514 | TRACE_IRQS_OFF | |
1515 | GET_THREAD_INFO(%rcx) | |
1516 | testl %eax,%eax | |
1517 | jne retint_kernel | |
1518 | LOCKDEP_SYS_EXIT_IRQ | |
1519 | movl TI_flags(%rcx),%edx | |
1520 | movl $_TIF_WORK_MASK,%edi | |
1521 | andl %edi,%edx | |
1522 | jnz retint_careful | |
1523 | jmp retint_swapgs | |
1524 | CFI_ENDPROC | |
1525 | END(error_exit) | |
1526 | ||
1527 | /* | |
1528 | * Test if a given stack is an NMI stack or not. | |
1529 | */ | |
1530 | .macro test_in_nmi reg stack nmi_ret normal_ret | |
1531 | cmpq %\reg, \stack | |
1532 | ja \normal_ret | |
1533 | subq $EXCEPTION_STKSZ, %\reg | |
1534 | cmpq %\reg, \stack | |
1535 | jb \normal_ret | |
1536 | jmp \nmi_ret | |
1537 | .endm | |
1538 | ||
1539 | /* runs on exception stack */ | |
1540 | ENTRY(nmi) | |
1541 | INTR_FRAME | |
1542 | PARAVIRT_ADJUST_EXCEPTION_FRAME | |
1543 | /* | |
1544 | * We allow breakpoints in NMIs. If a breakpoint occurs, then | |
1545 | * the iretq it performs will take us out of NMI context. | |
1546 | * This means that we can have nested NMIs where the next | |
1547 | * NMI is using the top of the stack of the previous NMI. We | |
1548 | * can't let it execute because the nested NMI will corrupt the | |
1549 | * stack of the previous NMI. NMI handlers are not re-entrant | |
1550 | * anyway. | |
1551 | * | |
1552 | * To handle this case we do the following: | |
1553 | * Check the a special location on the stack that contains | |
1554 | * a variable that is set when NMIs are executing. | |
1555 | * The interrupted task's stack is also checked to see if it | |
1556 | * is an NMI stack. | |
1557 | * If the variable is not set and the stack is not the NMI | |
1558 | * stack then: | |
1559 | * o Set the special variable on the stack | |
1560 | * o Copy the interrupt frame into a "saved" location on the stack | |
1561 | * o Copy the interrupt frame into a "copy" location on the stack | |
1562 | * o Continue processing the NMI | |
1563 | * If the variable is set or the previous stack is the NMI stack: | |
1564 | * o Modify the "copy" location to jump to the repeate_nmi | |
1565 | * o return back to the first NMI | |
1566 | * | |
1567 | * Now on exit of the first NMI, we first clear the stack variable | |
1568 | * The NMI stack will tell any nested NMIs at that point that it is | |
1569 | * nested. Then we pop the stack normally with iret, and if there was | |
1570 | * a nested NMI that updated the copy interrupt stack frame, a | |
1571 | * jump will be made to the repeat_nmi code that will handle the second | |
1572 | * NMI. | |
1573 | */ | |
1574 | ||
1575 | /* Use %rdx as out temp variable throughout */ | |
1576 | pushq_cfi %rdx | |
1577 | ||
1578 | /* | |
1579 | * Check the special variable on the stack to see if NMIs are | |
1580 | * executing. | |
1581 | */ | |
1582 | cmp $1, -8(%rsp) | |
1583 | je nested_nmi | |
1584 | ||
1585 | /* | |
1586 | * Now test if the previous stack was an NMI stack. | |
1587 | * We need the double check. We check the NMI stack to satisfy the | |
1588 | * race when the first NMI clears the variable before returning. | |
1589 | * We check the variable because the first NMI could be in a | |
1590 | * breakpoint routine using a breakpoint stack. | |
1591 | */ | |
1592 | lea 6*8(%rsp), %rdx | |
1593 | test_in_nmi rdx, 4*8(%rsp), nested_nmi, first_nmi | |
1594 | ||
1595 | nested_nmi: | |
1596 | /* | |
1597 | * Do nothing if we interrupted the fixup in repeat_nmi. | |
1598 | * It's about to repeat the NMI handler, so we are fine | |
1599 | * with ignoring this one. | |
1600 | */ | |
1601 | movq $repeat_nmi, %rdx | |
1602 | cmpq 8(%rsp), %rdx | |
1603 | ja 1f | |
1604 | movq $end_repeat_nmi, %rdx | |
1605 | cmpq 8(%rsp), %rdx | |
1606 | ja nested_nmi_out | |
1607 | ||
1608 | 1: | |
1609 | /* Set up the interrupted NMIs stack to jump to repeat_nmi */ | |
1610 | leaq -6*8(%rsp), %rdx | |
1611 | movq %rdx, %rsp | |
1612 | CFI_ADJUST_CFA_OFFSET 6*8 | |
1613 | pushq_cfi $__KERNEL_DS | |
1614 | pushq_cfi %rdx | |
1615 | pushfq_cfi | |
1616 | pushq_cfi $__KERNEL_CS | |
1617 | pushq_cfi $repeat_nmi | |
1618 | ||
1619 | /* Put stack back */ | |
1620 | addq $(11*8), %rsp | |
1621 | CFI_ADJUST_CFA_OFFSET -11*8 | |
1622 | ||
1623 | nested_nmi_out: | |
1624 | popq_cfi %rdx | |
1625 | ||
1626 | /* No need to check faults here */ | |
1627 | INTERRUPT_RETURN | |
1628 | ||
1629 | first_nmi: | |
1630 | /* | |
1631 | * Because nested NMIs will use the pushed location that we | |
1632 | * stored in rdx, we must keep that space available. | |
1633 | * Here's what our stack frame will look like: | |
1634 | * +-------------------------+ | |
1635 | * | original SS | | |
1636 | * | original Return RSP | | |
1637 | * | original RFLAGS | | |
1638 | * | original CS | | |
1639 | * | original RIP | | |
1640 | * +-------------------------+ | |
1641 | * | temp storage for rdx | | |
1642 | * +-------------------------+ | |
1643 | * | NMI executing variable | | |
1644 | * +-------------------------+ | |
1645 | * | Saved SS | | |
1646 | * | Saved Return RSP | | |
1647 | * | Saved RFLAGS | | |
1648 | * | Saved CS | | |
1649 | * | Saved RIP | | |
1650 | * +-------------------------+ | |
1651 | * | copied SS | | |
1652 | * | copied Return RSP | | |
1653 | * | copied RFLAGS | | |
1654 | * | copied CS | | |
1655 | * | copied RIP | | |
1656 | * +-------------------------+ | |
1657 | * | pt_regs | | |
1658 | * +-------------------------+ | |
1659 | * | |
1660 | * The saved RIP is used to fix up the copied RIP that a nested | |
1661 | * NMI may zero out. The original stack frame and the temp storage | |
1662 | * is also used by nested NMIs and can not be trusted on exit. | |
1663 | */ | |
1664 | /* Set the NMI executing variable on the stack. */ | |
1665 | pushq_cfi $1 | |
1666 | ||
1667 | /* Copy the stack frame to the Saved frame */ | |
1668 | .rept 5 | |
1669 | pushq_cfi 6*8(%rsp) | |
1670 | .endr | |
1671 | ||
1672 | /* Make another copy, this one may be modified by nested NMIs */ | |
1673 | .rept 5 | |
1674 | pushq_cfi 4*8(%rsp) | |
1675 | .endr | |
1676 | ||
1677 | /* Do not pop rdx, nested NMIs will corrupt it */ | |
1678 | movq 11*8(%rsp), %rdx | |
1679 | ||
1680 | /* | |
1681 | * Everything below this point can be preempted by a nested | |
1682 | * NMI if the first NMI took an exception. Repeated NMIs | |
1683 | * caused by an exception and nested NMI will start here, and | |
1684 | * can still be preempted by another NMI. | |
1685 | */ | |
1686 | restart_nmi: | |
1687 | pushq_cfi $-1 /* ORIG_RAX: no syscall to restart */ | |
1688 | subq $ORIG_RAX-R15, %rsp | |
1689 | CFI_ADJUST_CFA_OFFSET ORIG_RAX-R15 | |
1690 | /* | |
1691 | * Use save_paranoid to handle SWAPGS, but no need to use paranoid_exit | |
1692 | * as we should not be calling schedule in NMI context. | |
1693 | * Even with normal interrupts enabled. An NMI should not be | |
1694 | * setting NEED_RESCHED or anything that normal interrupts and | |
1695 | * exceptions might do. | |
1696 | */ | |
1697 | call save_paranoid | |
1698 | DEFAULT_FRAME 0 | |
1699 | /* paranoidentry do_nmi, 0; without TRACE_IRQS_OFF */ | |
1700 | movq %rsp,%rdi | |
1701 | movq $-1,%rsi | |
1702 | call do_nmi | |
1703 | testl %ebx,%ebx /* swapgs needed? */ | |
1704 | jnz nmi_restore | |
1705 | nmi_swapgs: | |
1706 | SWAPGS_UNSAFE_STACK | |
1707 | nmi_restore: | |
1708 | RESTORE_ALL 8 | |
1709 | /* Clear the NMI executing stack variable */ | |
1710 | movq $0, 10*8(%rsp) | |
1711 | jmp irq_return | |
1712 | CFI_ENDPROC | |
1713 | END(nmi) | |
1714 | ||
1715 | /* | |
1716 | * If an NMI hit an iret because of an exception or breakpoint, | |
1717 | * it can lose its NMI context, and a nested NMI may come in. | |
1718 | * In that case, the nested NMI will change the preempted NMI's | |
1719 | * stack to jump to here when it does the final iret. | |
1720 | */ | |
1721 | repeat_nmi: | |
1722 | INTR_FRAME | |
1723 | /* Update the stack variable to say we are still in NMI */ | |
1724 | movq $1, 5*8(%rsp) | |
1725 | ||
1726 | /* copy the saved stack back to copy stack */ | |
1727 | .rept 5 | |
1728 | pushq_cfi 4*8(%rsp) | |
1729 | .endr | |
1730 | ||
1731 | jmp restart_nmi | |
1732 | CFI_ENDPROC | |
1733 | end_repeat_nmi: | |
1734 | ||
1735 | ENTRY(ignore_sysret) | |
1736 | CFI_STARTPROC | |
1737 | mov $-ENOSYS,%eax | |
1738 | sysret | |
1739 | CFI_ENDPROC | |
1740 | END(ignore_sysret) | |
1741 | ||
1742 | /* | |
1743 | * End of kprobes section | |
1744 | */ | |
1745 | .popsection |