]>
Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | /* |
2 | * linux/arch/x86_64/entry.S | |
3 | * | |
4 | * Copyright (C) 1991, 1992 Linus Torvalds | |
5 | * Copyright (C) 2000, 2001, 2002 Andi Kleen SuSE Labs | |
6 | * Copyright (C) 2000 Pavel Machek <pavel@suse.cz> | |
4d732138 | 7 | * |
1da177e4 LT |
8 | * entry.S contains the system-call and fault low-level handling routines. |
9 | * | |
8b4777a4 AL |
10 | * Some of this is documented in Documentation/x86/entry_64.txt |
11 | * | |
0bd7b798 | 12 | * A note on terminology: |
4d732138 IM |
13 | * - iret frame: Architecture defined interrupt frame from SS to RIP |
14 | * at the top of the kernel process stack. | |
2e91a17b AK |
15 | * |
16 | * Some macro usage: | |
4d732138 IM |
17 | * - ENTRY/END: Define functions in the symbol table. |
18 | * - TRACE_IRQ_*: Trace hardirq state for lock debugging. | |
19 | * - idtentry: Define exception entry points. | |
1da177e4 | 20 | */ |
1da177e4 LT |
21 | #include <linux/linkage.h> |
22 | #include <asm/segment.h> | |
1da177e4 LT |
23 | #include <asm/cache.h> |
24 | #include <asm/errno.h> | |
d36f9479 | 25 | #include "calling.h" |
e2d5df93 | 26 | #include <asm/asm-offsets.h> |
1da177e4 LT |
27 | #include <asm/msr.h> |
28 | #include <asm/unistd.h> | |
29 | #include <asm/thread_info.h> | |
30 | #include <asm/hw_irq.h> | |
0341c14d | 31 | #include <asm/page_types.h> |
2601e64d | 32 | #include <asm/irqflags.h> |
72fe4858 | 33 | #include <asm/paravirt.h> |
9939ddaf | 34 | #include <asm/percpu.h> |
d7abc0fa | 35 | #include <asm/asm.h> |
91d1aa43 | 36 | #include <asm/context_tracking.h> |
63bcff2a | 37 | #include <asm/smap.h> |
3891a04a | 38 | #include <asm/pgtable_types.h> |
d7e7528b | 39 | #include <linux/err.h> |
1da177e4 | 40 | |
86a1c34a RM |
41 | /* Avoid __ASSEMBLER__'ifying <linux/audit.h> just for this. */ |
42 | #include <linux/elf-em.h> | |
4d732138 IM |
43 | #define AUDIT_ARCH_X86_64 (EM_X86_64|__AUDIT_ARCH_64BIT|__AUDIT_ARCH_LE) |
44 | #define __AUDIT_ARCH_64BIT 0x80000000 | |
45 | #define __AUDIT_ARCH_LE 0x40000000 | |
ea714547 | 46 | |
4d732138 IM |
47 | .code64 |
48 | .section .entry.text, "ax" | |
16444a8a | 49 | |
72fe4858 | 50 | #ifdef CONFIG_PARAVIRT |
2be29982 | 51 | ENTRY(native_usergs_sysret64) |
72fe4858 GOC |
52 | swapgs |
53 | sysretq | |
b3baaa13 | 54 | ENDPROC(native_usergs_sysret64) |
72fe4858 GOC |
55 | #endif /* CONFIG_PARAVIRT */ |
56 | ||
f2db9382 | 57 | .macro TRACE_IRQS_IRETQ |
2601e64d | 58 | #ifdef CONFIG_TRACE_IRQFLAGS |
4d732138 IM |
59 | bt $9, EFLAGS(%rsp) /* interrupts off? */ |
60 | jnc 1f | |
2601e64d IM |
61 | TRACE_IRQS_ON |
62 | 1: | |
63 | #endif | |
64 | .endm | |
65 | ||
5963e317 SR |
66 | /* |
67 | * When dynamic function tracer is enabled it will add a breakpoint | |
68 | * to all locations that it is about to modify, sync CPUs, update | |
69 | * all the code, sync CPUs, then remove the breakpoints. In this time | |
70 | * if lockdep is enabled, it might jump back into the debug handler | |
71 | * outside the updating of the IST protection. (TRACE_IRQS_ON/OFF). | |
72 | * | |
73 | * We need to change the IDT table before calling TRACE_IRQS_ON/OFF to | |
74 | * make sure the stack pointer does not get reset back to the top | |
75 | * of the debug stack, and instead just reuses the current stack. | |
76 | */ | |
77 | #if defined(CONFIG_DYNAMIC_FTRACE) && defined(CONFIG_TRACE_IRQFLAGS) | |
78 | ||
79 | .macro TRACE_IRQS_OFF_DEBUG | |
4d732138 | 80 | call debug_stack_set_zero |
5963e317 | 81 | TRACE_IRQS_OFF |
4d732138 | 82 | call debug_stack_reset |
5963e317 SR |
83 | .endm |
84 | ||
85 | .macro TRACE_IRQS_ON_DEBUG | |
4d732138 | 86 | call debug_stack_set_zero |
5963e317 | 87 | TRACE_IRQS_ON |
4d732138 | 88 | call debug_stack_reset |
5963e317 SR |
89 | .endm |
90 | ||
f2db9382 | 91 | .macro TRACE_IRQS_IRETQ_DEBUG |
4d732138 IM |
92 | bt $9, EFLAGS(%rsp) /* interrupts off? */ |
93 | jnc 1f | |
5963e317 SR |
94 | TRACE_IRQS_ON_DEBUG |
95 | 1: | |
96 | .endm | |
97 | ||
98 | #else | |
4d732138 IM |
99 | # define TRACE_IRQS_OFF_DEBUG TRACE_IRQS_OFF |
100 | # define TRACE_IRQS_ON_DEBUG TRACE_IRQS_ON | |
101 | # define TRACE_IRQS_IRETQ_DEBUG TRACE_IRQS_IRETQ | |
5963e317 SR |
102 | #endif |
103 | ||
1da177e4 | 104 | /* |
4d732138 | 105 | * 64-bit SYSCALL instruction entry. Up to 6 arguments in registers. |
1da177e4 | 106 | * |
4d732138 | 107 | * 64-bit SYSCALL saves rip to rcx, clears rflags.RF, then saves rflags to r11, |
b87cf63e DV |
108 | * then loads new ss, cs, and rip from previously programmed MSRs. |
109 | * rflags gets masked by a value from another MSR (so CLD and CLAC | |
110 | * are not needed). SYSCALL does not save anything on the stack | |
111 | * and does not change rsp. | |
112 | * | |
113 | * Registers on entry: | |
1da177e4 | 114 | * rax system call number |
b87cf63e DV |
115 | * rcx return address |
116 | * r11 saved rflags (note: r11 is callee-clobbered register in C ABI) | |
1da177e4 | 117 | * rdi arg0 |
1da177e4 | 118 | * rsi arg1 |
0bd7b798 | 119 | * rdx arg2 |
b87cf63e | 120 | * r10 arg3 (needs to be moved to rcx to conform to C ABI) |
1da177e4 LT |
121 | * r8 arg4 |
122 | * r9 arg5 | |
4d732138 | 123 | * (note: r12-r15, rbp, rbx are callee-preserved in C ABI) |
0bd7b798 | 124 | * |
1da177e4 LT |
125 | * Only called from user space. |
126 | * | |
7fcb3bc3 | 127 | * When user can change pt_regs->foo always force IRET. That is because |
7bf36bbc AK |
128 | * it deals with uncanonical addresses better. SYSRET has trouble |
129 | * with them due to bugs in both AMD and Intel CPUs. | |
0bd7b798 | 130 | */ |
1da177e4 | 131 | |
b2502b41 | 132 | ENTRY(entry_SYSCALL_64) |
9ed8e7d8 DV |
133 | /* |
134 | * Interrupts are off on entry. | |
135 | * We do not frame this tiny irq-off block with TRACE_IRQS_OFF/ON, | |
136 | * it is too small to ever cause noticeable irq latency. | |
137 | */ | |
72fe4858 GOC |
138 | SWAPGS_UNSAFE_STACK |
139 | /* | |
140 | * A hypervisor implementation might want to use a label | |
141 | * after the swapgs, so that it can do the swapgs | |
142 | * for the guest and jump here on syscall. | |
143 | */ | |
b2502b41 | 144 | GLOBAL(entry_SYSCALL_64_after_swapgs) |
72fe4858 | 145 | |
4d732138 IM |
146 | movq %rsp, PER_CPU_VAR(rsp_scratch) |
147 | movq PER_CPU_VAR(cpu_current_top_of_stack), %rsp | |
9ed8e7d8 DV |
148 | |
149 | /* Construct struct pt_regs on stack */ | |
4d732138 IM |
150 | pushq $__USER_DS /* pt_regs->ss */ |
151 | pushq PER_CPU_VAR(rsp_scratch) /* pt_regs->sp */ | |
33db1fd4 | 152 | /* |
9ed8e7d8 DV |
153 | * Re-enable interrupts. |
154 | * We use 'rsp_scratch' as a scratch space, hence irq-off block above | |
155 | * must execute atomically in the face of possible interrupt-driven | |
156 | * task preemption. We must enable interrupts only after we're done | |
157 | * with using rsp_scratch: | |
33db1fd4 DV |
158 | */ |
159 | ENABLE_INTERRUPTS(CLBR_NONE) | |
4d732138 IM |
160 | pushq %r11 /* pt_regs->flags */ |
161 | pushq $__USER_CS /* pt_regs->cs */ | |
162 | pushq %rcx /* pt_regs->ip */ | |
163 | pushq %rax /* pt_regs->orig_ax */ | |
164 | pushq %rdi /* pt_regs->di */ | |
165 | pushq %rsi /* pt_regs->si */ | |
166 | pushq %rdx /* pt_regs->dx */ | |
167 | pushq %rcx /* pt_regs->cx */ | |
168 | pushq $-ENOSYS /* pt_regs->ax */ | |
169 | pushq %r8 /* pt_regs->r8 */ | |
170 | pushq %r9 /* pt_regs->r9 */ | |
171 | pushq %r10 /* pt_regs->r10 */ | |
172 | pushq %r11 /* pt_regs->r11 */ | |
173 | sub $(6*8), %rsp /* pt_regs->bp, bx, r12-15 not saved */ | |
174 | ||
175 | testl $_TIF_WORK_SYSCALL_ENTRY, ASM_THREAD_INFO(TI_flags, %rsp, SIZEOF_PTREGS) | |
176 | jnz tracesys | |
b2502b41 | 177 | entry_SYSCALL_64_fastpath: |
fca460f9 | 178 | #if __SYSCALL_MASK == ~0 |
4d732138 | 179 | cmpq $__NR_syscall_max, %rax |
fca460f9 | 180 | #else |
4d732138 IM |
181 | andl $__SYSCALL_MASK, %eax |
182 | cmpl $__NR_syscall_max, %eax | |
fca460f9 | 183 | #endif |
4d732138 IM |
184 | ja 1f /* return -ENOSYS (already in pt_regs->ax) */ |
185 | movq %r10, %rcx | |
186 | call *sys_call_table(, %rax, 8) | |
187 | movq %rax, RAX(%rsp) | |
146b2b09 | 188 | 1: |
1da177e4 | 189 | /* |
146b2b09 DV |
190 | * Syscall return path ending with SYSRET (fast path). |
191 | * Has incompletely filled pt_regs. | |
0bd7b798 | 192 | */ |
10cd706d | 193 | LOCKDEP_SYS_EXIT |
4416c5a6 DV |
194 | /* |
195 | * We do not frame this tiny irq-off block with TRACE_IRQS_OFF/ON, | |
196 | * it is too small to ever cause noticeable irq latency. | |
197 | */ | |
72fe4858 | 198 | DISABLE_INTERRUPTS(CLBR_NONE) |
b3494a4a AL |
199 | |
200 | /* | |
201 | * We must check ti flags with interrupts (or at least preemption) | |
202 | * off because we must *never* return to userspace without | |
203 | * processing exit work that is enqueued if we're preempted here. | |
204 | * In particular, returning to userspace with any of the one-shot | |
205 | * flags (TIF_NOTIFY_RESUME, TIF_USER_RETURN_NOTIFY, etc) set is | |
206 | * very bad. | |
207 | */ | |
4d732138 IM |
208 | testl $_TIF_ALLWORK_MASK, ASM_THREAD_INFO(TI_flags, %rsp, SIZEOF_PTREGS) |
209 | jnz int_ret_from_sys_call_irqs_off /* Go to the slow path */ | |
b3494a4a | 210 | |
29722cd4 | 211 | RESTORE_C_REGS_EXCEPT_RCX_R11 |
4d732138 IM |
212 | movq RIP(%rsp), %rcx |
213 | movq EFLAGS(%rsp), %r11 | |
214 | movq RSP(%rsp), %rsp | |
b87cf63e | 215 | /* |
4d732138 | 216 | * 64-bit SYSRET restores rip from rcx, |
b87cf63e DV |
217 | * rflags from r11 (but RF and VM bits are forced to 0), |
218 | * cs and ss are loaded from MSRs. | |
4416c5a6 | 219 | * Restoration of rflags re-enables interrupts. |
61f01dd9 AL |
220 | * |
221 | * NB: On AMD CPUs with the X86_BUG_SYSRET_SS_ATTRS bug, the ss | |
222 | * descriptor is not reinitialized. This means that we should | |
223 | * avoid SYSRET with SS == NULL, which could happen if we schedule, | |
224 | * exit the kernel, and re-enter using an interrupt vector. (All | |
225 | * interrupt entries on x86_64 set SS to NULL.) We prevent that | |
226 | * from happening by reloading SS in __switch_to. (Actually | |
227 | * detecting the failure in 64-bit userspace is tricky but can be | |
228 | * done.) | |
b87cf63e | 229 | */ |
2be29982 | 230 | USERGS_SYSRET64 |
1da177e4 | 231 | |
29ea1b25 AL |
232 | GLOBAL(int_ret_from_sys_call_irqs_off) |
233 | TRACE_IRQS_ON | |
234 | ENABLE_INTERRUPTS(CLBR_NONE) | |
235 | jmp int_ret_from_sys_call | |
236 | ||
7fcb3bc3 | 237 | /* Do syscall entry tracing */ |
0bd7b798 | 238 | tracesys: |
4d732138 IM |
239 | movq %rsp, %rdi |
240 | movl $AUDIT_ARCH_X86_64, %esi | |
241 | call syscall_trace_enter_phase1 | |
242 | test %rax, %rax | |
243 | jnz tracesys_phase2 /* if needed, run the slow path */ | |
244 | RESTORE_C_REGS_EXCEPT_RAX /* else restore clobbered regs */ | |
245 | movq ORIG_RAX(%rsp), %rax | |
246 | jmp entry_SYSCALL_64_fastpath /* and return to the fast path */ | |
1dcf74f6 AL |
247 | |
248 | tracesys_phase2: | |
76f5df43 | 249 | SAVE_EXTRA_REGS |
4d732138 IM |
250 | movq %rsp, %rdi |
251 | movl $AUDIT_ARCH_X86_64, %esi | |
252 | movq %rax, %rdx | |
253 | call syscall_trace_enter_phase2 | |
1dcf74f6 | 254 | |
d4d67150 | 255 | /* |
e90e147c | 256 | * Reload registers from stack in case ptrace changed them. |
1dcf74f6 | 257 | * We don't reload %rax because syscall_trace_entry_phase2() returned |
d4d67150 RM |
258 | * the value it wants us to use in the table lookup. |
259 | */ | |
76f5df43 DV |
260 | RESTORE_C_REGS_EXCEPT_RAX |
261 | RESTORE_EXTRA_REGS | |
fca460f9 | 262 | #if __SYSCALL_MASK == ~0 |
4d732138 | 263 | cmpq $__NR_syscall_max, %rax |
fca460f9 | 264 | #else |
4d732138 IM |
265 | andl $__SYSCALL_MASK, %eax |
266 | cmpl $__NR_syscall_max, %eax | |
fca460f9 | 267 | #endif |
4d732138 IM |
268 | ja 1f /* return -ENOSYS (already in pt_regs->ax) */ |
269 | movq %r10, %rcx /* fixup for C */ | |
270 | call *sys_call_table(, %rax, 8) | |
271 | movq %rax, RAX(%rsp) | |
a6de5a21 | 272 | 1: |
7fcb3bc3 | 273 | /* Use IRET because user could have changed pt_regs->foo */ |
0bd7b798 AH |
274 | |
275 | /* | |
1da177e4 | 276 | * Syscall return path ending with IRET. |
7fcb3bc3 | 277 | * Has correct iret frame. |
bcddc015 | 278 | */ |
bc8b2b92 | 279 | GLOBAL(int_ret_from_sys_call) |
76f5df43 | 280 | SAVE_EXTRA_REGS |
29ea1b25 AL |
281 | movq %rsp, %rdi |
282 | call syscall_return_slowpath /* returns with IRQs disabled */ | |
76f5df43 | 283 | RESTORE_EXTRA_REGS |
29ea1b25 | 284 | TRACE_IRQS_IRETQ /* we're about to change IF */ |
fffbb5dc DV |
285 | |
286 | /* | |
287 | * Try to use SYSRET instead of IRET if we're returning to | |
288 | * a completely clean 64-bit userspace context. | |
289 | */ | |
4d732138 IM |
290 | movq RCX(%rsp), %rcx |
291 | movq RIP(%rsp), %r11 | |
292 | cmpq %rcx, %r11 /* RCX == RIP */ | |
293 | jne opportunistic_sysret_failed | |
fffbb5dc DV |
294 | |
295 | /* | |
296 | * On Intel CPUs, SYSRET with non-canonical RCX/RIP will #GP | |
297 | * in kernel space. This essentially lets the user take over | |
17be0aec | 298 | * the kernel, since userspace controls RSP. |
fffbb5dc | 299 | * |
17be0aec | 300 | * If width of "canonical tail" ever becomes variable, this will need |
fffbb5dc DV |
301 | * to be updated to remain correct on both old and new CPUs. |
302 | */ | |
303 | .ifne __VIRTUAL_MASK_SHIFT - 47 | |
304 | .error "virtual address width changed -- SYSRET checks need update" | |
305 | .endif | |
4d732138 | 306 | |
17be0aec DV |
307 | /* Change top 16 bits to be the sign-extension of 47th bit */ |
308 | shl $(64 - (__VIRTUAL_MASK_SHIFT+1)), %rcx | |
309 | sar $(64 - (__VIRTUAL_MASK_SHIFT+1)), %rcx | |
4d732138 | 310 | |
17be0aec DV |
311 | /* If this changed %rcx, it was not canonical */ |
312 | cmpq %rcx, %r11 | |
313 | jne opportunistic_sysret_failed | |
fffbb5dc | 314 | |
4d732138 IM |
315 | cmpq $__USER_CS, CS(%rsp) /* CS must match SYSRET */ |
316 | jne opportunistic_sysret_failed | |
fffbb5dc | 317 | |
4d732138 IM |
318 | movq R11(%rsp), %r11 |
319 | cmpq %r11, EFLAGS(%rsp) /* R11 == RFLAGS */ | |
320 | jne opportunistic_sysret_failed | |
fffbb5dc DV |
321 | |
322 | /* | |
323 | * SYSRET can't restore RF. SYSRET can restore TF, but unlike IRET, | |
324 | * restoring TF results in a trap from userspace immediately after | |
325 | * SYSRET. This would cause an infinite loop whenever #DB happens | |
326 | * with register state that satisfies the opportunistic SYSRET | |
327 | * conditions. For example, single-stepping this user code: | |
328 | * | |
4d732138 | 329 | * movq $stuck_here, %rcx |
fffbb5dc DV |
330 | * pushfq |
331 | * popq %r11 | |
332 | * stuck_here: | |
333 | * | |
334 | * would never get past 'stuck_here'. | |
335 | */ | |
4d732138 IM |
336 | testq $(X86_EFLAGS_RF|X86_EFLAGS_TF), %r11 |
337 | jnz opportunistic_sysret_failed | |
fffbb5dc DV |
338 | |
339 | /* nothing to check for RSP */ | |
340 | ||
4d732138 IM |
341 | cmpq $__USER_DS, SS(%rsp) /* SS must match SYSRET */ |
342 | jne opportunistic_sysret_failed | |
fffbb5dc DV |
343 | |
344 | /* | |
4d732138 IM |
345 | * We win! This label is here just for ease of understanding |
346 | * perf profiles. Nothing jumps here. | |
fffbb5dc DV |
347 | */ |
348 | syscall_return_via_sysret: | |
17be0aec DV |
349 | /* rcx and r11 are already restored (see code above) */ |
350 | RESTORE_C_REGS_EXCEPT_RCX_R11 | |
4d732138 | 351 | movq RSP(%rsp), %rsp |
fffbb5dc | 352 | USERGS_SYSRET64 |
fffbb5dc DV |
353 | |
354 | opportunistic_sysret_failed: | |
355 | SWAPGS | |
356 | jmp restore_c_regs_and_iret | |
b2502b41 | 357 | END(entry_SYSCALL_64) |
0bd7b798 | 358 | |
fffbb5dc | 359 | |
1d4b4b29 AV |
360 | .macro FORK_LIKE func |
361 | ENTRY(stub_\func) | |
76f5df43 | 362 | SAVE_EXTRA_REGS 8 |
4d732138 | 363 | jmp sys_\func |
1d4b4b29 AV |
364 | END(stub_\func) |
365 | .endm | |
366 | ||
367 | FORK_LIKE clone | |
368 | FORK_LIKE fork | |
369 | FORK_LIKE vfork | |
1da177e4 | 370 | |
1da177e4 | 371 | ENTRY(stub_execve) |
fc3e958a DV |
372 | call sys_execve |
373 | return_from_execve: | |
374 | testl %eax, %eax | |
375 | jz 1f | |
376 | /* exec failed, can use fast SYSRET code path in this case */ | |
377 | ret | |
378 | 1: | |
379 | /* must use IRET code path (pt_regs->cs may have changed) */ | |
380 | addq $8, %rsp | |
381 | ZERO_EXTRA_REGS | |
4d732138 | 382 | movq %rax, RAX(%rsp) |
fc3e958a | 383 | jmp int_ret_from_sys_call |
4b787e0b | 384 | END(stub_execve) |
a37f34a3 DV |
385 | /* |
386 | * Remaining execve stubs are only 7 bytes long. | |
387 | * ENTRY() often aligns to 16 bytes, which in this case has no benefits. | |
388 | */ | |
389 | .align 8 | |
390 | GLOBAL(stub_execveat) | |
fc3e958a DV |
391 | call sys_execveat |
392 | jmp return_from_execve | |
27d6ec7a DD |
393 | END(stub_execveat) |
394 | ||
ac7f5dfb | 395 | #if defined(CONFIG_X86_X32_ABI) || defined(CONFIG_IA32_EMULATION) |
a37f34a3 DV |
396 | .align 8 |
397 | GLOBAL(stub_x32_execve) | |
ac7f5dfb | 398 | GLOBAL(stub32_execve) |
05f1752d DV |
399 | call compat_sys_execve |
400 | jmp return_from_execve | |
ac7f5dfb | 401 | END(stub32_execve) |
05f1752d | 402 | END(stub_x32_execve) |
a37f34a3 DV |
403 | .align 8 |
404 | GLOBAL(stub_x32_execveat) | |
a37f34a3 | 405 | GLOBAL(stub32_execveat) |
0f90fb97 DV |
406 | call compat_sys_execveat |
407 | jmp return_from_execve | |
0f90fb97 | 408 | END(stub32_execveat) |
ac7f5dfb | 409 | END(stub_x32_execveat) |
0f90fb97 DV |
410 | #endif |
411 | ||
1da177e4 LT |
412 | /* |
413 | * sigreturn is special because it needs to restore all registers on return. | |
414 | * This cannot be done with SYSRET, so use the IRET return path instead. | |
0bd7b798 | 415 | */ |
1da177e4 | 416 | ENTRY(stub_rt_sigreturn) |
31f0119b DV |
417 | /* |
418 | * SAVE_EXTRA_REGS result is not normally needed: | |
419 | * sigreturn overwrites all pt_regs->GPREGS. | |
420 | * But sigreturn can fail (!), and there is no easy way to detect that. | |
421 | * To make sure RESTORE_EXTRA_REGS doesn't restore garbage on error, | |
422 | * we SAVE_EXTRA_REGS here. | |
423 | */ | |
424 | SAVE_EXTRA_REGS 8 | |
4d732138 | 425 | call sys_rt_sigreturn |
31f0119b DV |
426 | return_from_stub: |
427 | addq $8, %rsp | |
76f5df43 | 428 | RESTORE_EXTRA_REGS |
4d732138 IM |
429 | movq %rax, RAX(%rsp) |
430 | jmp int_ret_from_sys_call | |
4b787e0b | 431 | END(stub_rt_sigreturn) |
1da177e4 | 432 | |
c5a37394 | 433 | #ifdef CONFIG_X86_X32_ABI |
c5a37394 | 434 | ENTRY(stub_x32_rt_sigreturn) |
31f0119b | 435 | SAVE_EXTRA_REGS 8 |
4d732138 IM |
436 | call sys32_x32_rt_sigreturn |
437 | jmp return_from_stub | |
c5a37394 | 438 | END(stub_x32_rt_sigreturn) |
c5a37394 PA |
439 | #endif |
440 | ||
1eeb207f DV |
441 | /* |
442 | * A newly forked process directly context switches into this address. | |
443 | * | |
444 | * rdi: prev task we switched from | |
445 | */ | |
446 | ENTRY(ret_from_fork) | |
1eeb207f | 447 | |
4d732138 | 448 | LOCK ; btr $TIF_FORK, TI_flags(%r8) |
1eeb207f | 449 | |
4d732138 IM |
450 | pushq $0x0002 |
451 | popfq /* reset kernel eflags */ | |
1eeb207f | 452 | |
4d732138 | 453 | call schedule_tail /* rdi: 'prev' task parameter */ |
1eeb207f | 454 | |
1eeb207f DV |
455 | RESTORE_EXTRA_REGS |
456 | ||
4d732138 | 457 | testb $3, CS(%rsp) /* from kernel_thread? */ |
1eeb207f | 458 | |
1e3fbb8a AL |
459 | /* |
460 | * By the time we get here, we have no idea whether our pt_regs, | |
461 | * ti flags, and ti status came from the 64-bit SYSCALL fast path, | |
138bd56a | 462 | * the slow path, or one of the 32-bit compat paths. |
66ad4efa | 463 | * Use IRET code path to return, since it can safely handle |
1e3fbb8a AL |
464 | * all of the above. |
465 | */ | |
66ad4efa | 466 | jnz int_ret_from_sys_call |
1eeb207f | 467 | |
4d732138 IM |
468 | /* |
469 | * We came from kernel_thread | |
470 | * nb: we depend on RESTORE_EXTRA_REGS above | |
471 | */ | |
472 | movq %rbp, %rdi | |
473 | call *%rbx | |
474 | movl $0, RAX(%rsp) | |
1eeb207f | 475 | RESTORE_EXTRA_REGS |
4d732138 | 476 | jmp int_ret_from_sys_call |
1eeb207f DV |
477 | END(ret_from_fork) |
478 | ||
939b7871 | 479 | /* |
3304c9c3 DV |
480 | * Build the entry stubs with some assembler magic. |
481 | * We pack 1 stub into every 8-byte block. | |
939b7871 | 482 | */ |
3304c9c3 | 483 | .align 8 |
939b7871 | 484 | ENTRY(irq_entries_start) |
3304c9c3 DV |
485 | vector=FIRST_EXTERNAL_VECTOR |
486 | .rept (FIRST_SYSTEM_VECTOR - FIRST_EXTERNAL_VECTOR) | |
4d732138 | 487 | pushq $(~vector+0x80) /* Note: always in signed byte range */ |
3304c9c3 DV |
488 | vector=vector+1 |
489 | jmp common_interrupt | |
3304c9c3 DV |
490 | .align 8 |
491 | .endr | |
939b7871 PA |
492 | END(irq_entries_start) |
493 | ||
d99015b1 | 494 | /* |
1da177e4 LT |
495 | * Interrupt entry/exit. |
496 | * | |
497 | * Interrupt entry points save only callee clobbered registers in fast path. | |
d99015b1 AH |
498 | * |
499 | * Entry runs with interrupts off. | |
500 | */ | |
1da177e4 | 501 | |
722024db | 502 | /* 0(%rsp): ~(interrupt number) */ |
1da177e4 | 503 | .macro interrupt func |
f6f64681 | 504 | cld |
ff467594 AL |
505 | ALLOC_PT_GPREGS_ON_STACK |
506 | SAVE_C_REGS | |
507 | SAVE_EXTRA_REGS | |
76f5df43 | 508 | |
ff467594 | 509 | testb $3, CS(%rsp) |
dde74f2e | 510 | jz 1f |
f6f64681 | 511 | SWAPGS |
76f5df43 | 512 | 1: |
f6f64681 | 513 | /* |
e90e147c | 514 | * Save previous stack pointer, optionally switch to interrupt stack. |
f6f64681 DV |
515 | * irq_count is used to check if a CPU is already on an interrupt stack |
516 | * or not. While this is essentially redundant with preempt_count it is | |
517 | * a little cheaper to use a separate counter in the PDA (short of | |
518 | * moving irq_enter into assembly, which would be too much work) | |
519 | */ | |
a586f98e | 520 | movq %rsp, %rdi |
4d732138 IM |
521 | incl PER_CPU_VAR(irq_count) |
522 | cmovzq PER_CPU_VAR(irq_stack_ptr), %rsp | |
a586f98e | 523 | pushq %rdi |
f6f64681 DV |
524 | /* We entered an interrupt context - irqs are off: */ |
525 | TRACE_IRQS_OFF | |
526 | ||
a586f98e | 527 | call \func /* rdi points to pt_regs */ |
1da177e4 LT |
528 | .endm |
529 | ||
722024db AH |
530 | /* |
531 | * The interrupt stubs push (~vector+0x80) onto the stack and | |
532 | * then jump to common_interrupt. | |
533 | */ | |
939b7871 PA |
534 | .p2align CONFIG_X86_L1_CACHE_SHIFT |
535 | common_interrupt: | |
ee4eb87b | 536 | ASM_CLAC |
4d732138 | 537 | addq $-0x80, (%rsp) /* Adjust vector to [-256, -1] range */ |
1da177e4 | 538 | interrupt do_IRQ |
34061f13 | 539 | /* 0(%rsp): old RSP */ |
7effaa88 | 540 | ret_from_intr: |
72fe4858 | 541 | DISABLE_INTERRUPTS(CLBR_NONE) |
2601e64d | 542 | TRACE_IRQS_OFF |
4d732138 | 543 | decl PER_CPU_VAR(irq_count) |
625dbc3b | 544 | |
a2bbe750 | 545 | /* Restore saved previous stack */ |
ff467594 | 546 | popq %rsp |
625dbc3b | 547 | |
03335e95 | 548 | testb $3, CS(%rsp) |
dde74f2e | 549 | jz retint_kernel |
1da177e4 | 550 | /* Interrupt came from user space */ |
5e99cb7c | 551 | GLOBAL(retint_user) |
a3675b32 | 552 | GET_THREAD_INFO(%rcx) |
4d732138 IM |
553 | |
554 | /* %rcx: thread info. Interrupts are off. */ | |
1da177e4 | 555 | retint_with_reschedule: |
4d732138 | 556 | movl $_TIF_WORK_MASK, %edi |
7effaa88 | 557 | retint_check: |
10cd706d | 558 | LOCKDEP_SYS_EXIT_IRQ |
4d732138 IM |
559 | movl TI_flags(%rcx), %edx |
560 | andl %edi, %edx | |
561 | jnz retint_careful | |
10cd706d | 562 | |
4d732138 | 563 | retint_swapgs: /* return to user-space */ |
2601e64d IM |
564 | /* |
565 | * The iretq could re-enable interrupts: | |
566 | */ | |
72fe4858 | 567 | DISABLE_INTERRUPTS(CLBR_ANY) |
2601e64d | 568 | TRACE_IRQS_IRETQ |
2a23c6b8 | 569 | |
72fe4858 | 570 | SWAPGS |
ff467594 | 571 | jmp restore_regs_and_iret |
2601e64d | 572 | |
627276cb | 573 | /* Returning to kernel space */ |
6ba71b76 | 574 | retint_kernel: |
627276cb DV |
575 | #ifdef CONFIG_PREEMPT |
576 | /* Interrupts are off */ | |
577 | /* Check if we need preemption */ | |
4d732138 | 578 | bt $9, EFLAGS(%rsp) /* were interrupts off? */ |
6ba71b76 | 579 | jnc 1f |
4d732138 | 580 | 0: cmpl $0, PER_CPU_VAR(__preempt_count) |
36acef25 | 581 | jnz 1f |
627276cb | 582 | call preempt_schedule_irq |
36acef25 | 583 | jmp 0b |
6ba71b76 | 584 | 1: |
627276cb | 585 | #endif |
2601e64d IM |
586 | /* |
587 | * The iretq could re-enable interrupts: | |
588 | */ | |
589 | TRACE_IRQS_IRETQ | |
fffbb5dc DV |
590 | |
591 | /* | |
592 | * At this label, code paths which return to kernel and to user, | |
593 | * which come from interrupts/exception and from syscalls, merge. | |
594 | */ | |
ff467594 AL |
595 | restore_regs_and_iret: |
596 | RESTORE_EXTRA_REGS | |
fffbb5dc | 597 | restore_c_regs_and_iret: |
76f5df43 DV |
598 | RESTORE_C_REGS |
599 | REMOVE_PT_GPREGS_FROM_STACK 8 | |
7209a75d AL |
600 | INTERRUPT_RETURN |
601 | ||
602 | ENTRY(native_iret) | |
3891a04a PA |
603 | /* |
604 | * Are we returning to a stack segment from the LDT? Note: in | |
605 | * 64-bit mode SS:RSP on the exception stack is always valid. | |
606 | */ | |
34273f41 | 607 | #ifdef CONFIG_X86_ESPFIX64 |
4d732138 IM |
608 | testb $4, (SS-RIP)(%rsp) |
609 | jnz native_irq_return_ldt | |
34273f41 | 610 | #endif |
3891a04a | 611 | |
af726f21 | 612 | .global native_irq_return_iret |
7209a75d | 613 | native_irq_return_iret: |
b645af2d AL |
614 | /* |
615 | * This may fault. Non-paranoid faults on return to userspace are | |
616 | * handled by fixup_bad_iret. These include #SS, #GP, and #NP. | |
617 | * Double-faults due to espfix64 are handled in do_double_fault. | |
618 | * Other faults here are fatal. | |
619 | */ | |
1da177e4 | 620 | iretq |
3701d863 | 621 | |
34273f41 | 622 | #ifdef CONFIG_X86_ESPFIX64 |
7209a75d | 623 | native_irq_return_ldt: |
4d732138 IM |
624 | pushq %rax |
625 | pushq %rdi | |
3891a04a | 626 | SWAPGS |
4d732138 IM |
627 | movq PER_CPU_VAR(espfix_waddr), %rdi |
628 | movq %rax, (0*8)(%rdi) /* RAX */ | |
629 | movq (2*8)(%rsp), %rax /* RIP */ | |
630 | movq %rax, (1*8)(%rdi) | |
631 | movq (3*8)(%rsp), %rax /* CS */ | |
632 | movq %rax, (2*8)(%rdi) | |
633 | movq (4*8)(%rsp), %rax /* RFLAGS */ | |
634 | movq %rax, (3*8)(%rdi) | |
635 | movq (6*8)(%rsp), %rax /* SS */ | |
636 | movq %rax, (5*8)(%rdi) | |
637 | movq (5*8)(%rsp), %rax /* RSP */ | |
638 | movq %rax, (4*8)(%rdi) | |
639 | andl $0xffff0000, %eax | |
640 | popq %rdi | |
641 | orq PER_CPU_VAR(espfix_stack), %rax | |
3891a04a | 642 | SWAPGS |
4d732138 IM |
643 | movq %rax, %rsp |
644 | popq %rax | |
645 | jmp native_irq_return_iret | |
34273f41 | 646 | #endif |
3891a04a | 647 | |
7effaa88 | 648 | /* edi: workmask, edx: work */ |
1da177e4 | 649 | retint_careful: |
4d732138 IM |
650 | bt $TIF_NEED_RESCHED, %edx |
651 | jnc retint_signal | |
2601e64d | 652 | TRACE_IRQS_ON |
72fe4858 | 653 | ENABLE_INTERRUPTS(CLBR_NONE) |
4d732138 | 654 | pushq %rdi |
0430499c | 655 | SCHEDULE_USER |
4d732138 | 656 | popq %rdi |
1da177e4 | 657 | GET_THREAD_INFO(%rcx) |
72fe4858 | 658 | DISABLE_INTERRUPTS(CLBR_NONE) |
2601e64d | 659 | TRACE_IRQS_OFF |
4d732138 | 660 | jmp retint_check |
0bd7b798 | 661 | |
1da177e4 | 662 | retint_signal: |
4d732138 IM |
663 | testl $_TIF_DO_NOTIFY_MASK, %edx |
664 | jz retint_swapgs | |
2601e64d | 665 | TRACE_IRQS_ON |
72fe4858 | 666 | ENABLE_INTERRUPTS(CLBR_NONE) |
4d732138 IM |
667 | movq $-1, ORIG_RAX(%rsp) |
668 | xorl %esi, %esi /* oldset */ | |
669 | movq %rsp, %rdi /* &pt_regs */ | |
670 | call do_notify_resume | |
72fe4858 | 671 | DISABLE_INTERRUPTS(CLBR_NONE) |
2601e64d | 672 | TRACE_IRQS_OFF |
be9e6870 | 673 | GET_THREAD_INFO(%rcx) |
4d732138 | 674 | jmp retint_with_reschedule |
1da177e4 | 675 | |
4b787e0b | 676 | END(common_interrupt) |
3891a04a | 677 | |
1da177e4 LT |
678 | /* |
679 | * APIC interrupts. | |
0bd7b798 | 680 | */ |
cf910e83 | 681 | .macro apicinterrupt3 num sym do_sym |
322648d1 | 682 | ENTRY(\sym) |
ee4eb87b | 683 | ASM_CLAC |
4d732138 | 684 | pushq $~(\num) |
39e95433 | 685 | .Lcommon_\sym: |
322648d1 | 686 | interrupt \do_sym |
4d732138 | 687 | jmp ret_from_intr |
322648d1 AH |
688 | END(\sym) |
689 | .endm | |
1da177e4 | 690 | |
cf910e83 SA |
691 | #ifdef CONFIG_TRACING |
692 | #define trace(sym) trace_##sym | |
693 | #define smp_trace(sym) smp_trace_##sym | |
694 | ||
695 | .macro trace_apicinterrupt num sym | |
696 | apicinterrupt3 \num trace(\sym) smp_trace(\sym) | |
697 | .endm | |
698 | #else | |
699 | .macro trace_apicinterrupt num sym do_sym | |
700 | .endm | |
701 | #endif | |
702 | ||
703 | .macro apicinterrupt num sym do_sym | |
704 | apicinterrupt3 \num \sym \do_sym | |
705 | trace_apicinterrupt \num \sym | |
706 | .endm | |
707 | ||
322648d1 | 708 | #ifdef CONFIG_SMP |
4d732138 IM |
709 | apicinterrupt3 IRQ_MOVE_CLEANUP_VECTOR irq_move_cleanup_interrupt smp_irq_move_cleanup_interrupt |
710 | apicinterrupt3 REBOOT_VECTOR reboot_interrupt smp_reboot_interrupt | |
322648d1 | 711 | #endif |
1da177e4 | 712 | |
03b48632 | 713 | #ifdef CONFIG_X86_UV |
4d732138 | 714 | apicinterrupt3 UV_BAU_MESSAGE uv_bau_message_intr1 uv_bau_message_interrupt |
03b48632 | 715 | #endif |
4d732138 IM |
716 | |
717 | apicinterrupt LOCAL_TIMER_VECTOR apic_timer_interrupt smp_apic_timer_interrupt | |
718 | apicinterrupt X86_PLATFORM_IPI_VECTOR x86_platform_ipi smp_x86_platform_ipi | |
89b831ef | 719 | |
d78f2664 | 720 | #ifdef CONFIG_HAVE_KVM |
4d732138 IM |
721 | apicinterrupt3 POSTED_INTR_VECTOR kvm_posted_intr_ipi smp_kvm_posted_intr_ipi |
722 | apicinterrupt3 POSTED_INTR_WAKEUP_VECTOR kvm_posted_intr_wakeup_ipi smp_kvm_posted_intr_wakeup_ipi | |
d78f2664 YZ |
723 | #endif |
724 | ||
33e5ff63 | 725 | #ifdef CONFIG_X86_MCE_THRESHOLD |
4d732138 | 726 | apicinterrupt THRESHOLD_APIC_VECTOR threshold_interrupt smp_threshold_interrupt |
33e5ff63 SA |
727 | #endif |
728 | ||
24fd78a8 | 729 | #ifdef CONFIG_X86_MCE_AMD |
4d732138 | 730 | apicinterrupt DEFERRED_ERROR_VECTOR deferred_error_interrupt smp_deferred_error_interrupt |
24fd78a8 AG |
731 | #endif |
732 | ||
33e5ff63 | 733 | #ifdef CONFIG_X86_THERMAL_VECTOR |
4d732138 | 734 | apicinterrupt THERMAL_APIC_VECTOR thermal_interrupt smp_thermal_interrupt |
33e5ff63 | 735 | #endif |
1812924b | 736 | |
322648d1 | 737 | #ifdef CONFIG_SMP |
4d732138 IM |
738 | apicinterrupt CALL_FUNCTION_SINGLE_VECTOR call_function_single_interrupt smp_call_function_single_interrupt |
739 | apicinterrupt CALL_FUNCTION_VECTOR call_function_interrupt smp_call_function_interrupt | |
740 | apicinterrupt RESCHEDULE_VECTOR reschedule_interrupt smp_reschedule_interrupt | |
322648d1 | 741 | #endif |
1da177e4 | 742 | |
4d732138 IM |
743 | apicinterrupt ERROR_APIC_VECTOR error_interrupt smp_error_interrupt |
744 | apicinterrupt SPURIOUS_APIC_VECTOR spurious_interrupt smp_spurious_interrupt | |
0bd7b798 | 745 | |
e360adbe | 746 | #ifdef CONFIG_IRQ_WORK |
4d732138 | 747 | apicinterrupt IRQ_WORK_VECTOR irq_work_interrupt smp_irq_work_interrupt |
241771ef IM |
748 | #endif |
749 | ||
1da177e4 LT |
750 | /* |
751 | * Exception entry points. | |
0bd7b798 | 752 | */ |
9b476688 | 753 | #define CPU_TSS_IST(x) PER_CPU_VAR(cpu_tss) + (TSS_ist + ((x) - 1) * 8) |
577ed45e AL |
754 | |
755 | .macro idtentry sym do_sym has_error_code:req paranoid=0 shift_ist=-1 | |
322648d1 | 756 | ENTRY(\sym) |
577ed45e AL |
757 | /* Sanity check */ |
758 | .if \shift_ist != -1 && \paranoid == 0 | |
759 | .error "using shift_ist requires paranoid=1" | |
760 | .endif | |
761 | ||
ee4eb87b | 762 | ASM_CLAC |
b8b1d08b | 763 | PARAVIRT_ADJUST_EXCEPTION_FRAME |
cb5dd2c5 AL |
764 | |
765 | .ifeq \has_error_code | |
4d732138 | 766 | pushq $-1 /* ORIG_RAX: no syscall to restart */ |
cb5dd2c5 AL |
767 | .endif |
768 | ||
76f5df43 | 769 | ALLOC_PT_GPREGS_ON_STACK |
cb5dd2c5 AL |
770 | |
771 | .if \paranoid | |
48e08d0f | 772 | .if \paranoid == 1 |
4d732138 IM |
773 | testb $3, CS(%rsp) /* If coming from userspace, switch stacks */ |
774 | jnz 1f | |
48e08d0f | 775 | .endif |
4d732138 | 776 | call paranoid_entry |
cb5dd2c5 | 777 | .else |
4d732138 | 778 | call error_entry |
cb5dd2c5 | 779 | .endif |
ebfc453e | 780 | /* returned flag: ebx=0: need swapgs on exit, ebx=1: don't need it */ |
cb5dd2c5 | 781 | |
cb5dd2c5 | 782 | .if \paranoid |
577ed45e | 783 | .if \shift_ist != -1 |
4d732138 | 784 | TRACE_IRQS_OFF_DEBUG /* reload IDT in case of recursion */ |
577ed45e | 785 | .else |
b8b1d08b | 786 | TRACE_IRQS_OFF |
cb5dd2c5 | 787 | .endif |
577ed45e | 788 | .endif |
cb5dd2c5 | 789 | |
4d732138 | 790 | movq %rsp, %rdi /* pt_regs pointer */ |
cb5dd2c5 AL |
791 | |
792 | .if \has_error_code | |
4d732138 IM |
793 | movq ORIG_RAX(%rsp), %rsi /* get error code */ |
794 | movq $-1, ORIG_RAX(%rsp) /* no syscall to restart */ | |
cb5dd2c5 | 795 | .else |
4d732138 | 796 | xorl %esi, %esi /* no error code */ |
cb5dd2c5 AL |
797 | .endif |
798 | ||
577ed45e | 799 | .if \shift_ist != -1 |
4d732138 | 800 | subq $EXCEPTION_STKSZ, CPU_TSS_IST(\shift_ist) |
577ed45e AL |
801 | .endif |
802 | ||
4d732138 | 803 | call \do_sym |
cb5dd2c5 | 804 | |
577ed45e | 805 | .if \shift_ist != -1 |
4d732138 | 806 | addq $EXCEPTION_STKSZ, CPU_TSS_IST(\shift_ist) |
577ed45e AL |
807 | .endif |
808 | ||
ebfc453e | 809 | /* these procedures expect "no swapgs" flag in ebx */ |
cb5dd2c5 | 810 | .if \paranoid |
4d732138 | 811 | jmp paranoid_exit |
cb5dd2c5 | 812 | .else |
4d732138 | 813 | jmp error_exit |
cb5dd2c5 AL |
814 | .endif |
815 | ||
48e08d0f | 816 | .if \paranoid == 1 |
48e08d0f AL |
817 | /* |
818 | * Paranoid entry from userspace. Switch stacks and treat it | |
819 | * as a normal entry. This means that paranoid handlers | |
820 | * run in real process context if user_mode(regs). | |
821 | */ | |
822 | 1: | |
4d732138 | 823 | call error_entry |
48e08d0f | 824 | |
48e08d0f | 825 | |
4d732138 IM |
826 | movq %rsp, %rdi /* pt_regs pointer */ |
827 | call sync_regs | |
828 | movq %rax, %rsp /* switch stack */ | |
48e08d0f | 829 | |
4d732138 | 830 | movq %rsp, %rdi /* pt_regs pointer */ |
48e08d0f AL |
831 | |
832 | .if \has_error_code | |
4d732138 IM |
833 | movq ORIG_RAX(%rsp), %rsi /* get error code */ |
834 | movq $-1, ORIG_RAX(%rsp) /* no syscall to restart */ | |
48e08d0f | 835 | .else |
4d732138 | 836 | xorl %esi, %esi /* no error code */ |
48e08d0f AL |
837 | .endif |
838 | ||
4d732138 | 839 | call \do_sym |
48e08d0f | 840 | |
4d732138 | 841 | jmp error_exit /* %ebx: no swapgs flag */ |
48e08d0f | 842 | .endif |
ddeb8f21 | 843 | END(\sym) |
322648d1 | 844 | .endm |
b8b1d08b | 845 | |
25c74b10 | 846 | #ifdef CONFIG_TRACING |
cb5dd2c5 AL |
847 | .macro trace_idtentry sym do_sym has_error_code:req |
848 | idtentry trace(\sym) trace(\do_sym) has_error_code=\has_error_code | |
849 | idtentry \sym \do_sym has_error_code=\has_error_code | |
25c74b10 SA |
850 | .endm |
851 | #else | |
cb5dd2c5 AL |
852 | .macro trace_idtentry sym do_sym has_error_code:req |
853 | idtentry \sym \do_sym has_error_code=\has_error_code | |
25c74b10 SA |
854 | .endm |
855 | #endif | |
856 | ||
4d732138 IM |
857 | idtentry divide_error do_divide_error has_error_code=0 |
858 | idtentry overflow do_overflow has_error_code=0 | |
859 | idtentry bounds do_bounds has_error_code=0 | |
860 | idtentry invalid_op do_invalid_op has_error_code=0 | |
861 | idtentry device_not_available do_device_not_available has_error_code=0 | |
862 | idtentry double_fault do_double_fault has_error_code=1 paranoid=2 | |
863 | idtentry coprocessor_segment_overrun do_coprocessor_segment_overrun has_error_code=0 | |
864 | idtentry invalid_TSS do_invalid_TSS has_error_code=1 | |
865 | idtentry segment_not_present do_segment_not_present has_error_code=1 | |
866 | idtentry spurious_interrupt_bug do_spurious_interrupt_bug has_error_code=0 | |
867 | idtentry coprocessor_error do_coprocessor_error has_error_code=0 | |
868 | idtentry alignment_check do_alignment_check has_error_code=1 | |
869 | idtentry simd_coprocessor_error do_simd_coprocessor_error has_error_code=0 | |
870 | ||
871 | ||
872 | /* | |
873 | * Reload gs selector with exception handling | |
874 | * edi: new selector | |
875 | */ | |
9f9d489a | 876 | ENTRY(native_load_gs_index) |
131484c8 | 877 | pushfq |
b8aa287f | 878 | DISABLE_INTERRUPTS(CLBR_ANY & ~CLBR_RDI) |
9f1e87ea | 879 | SWAPGS |
0bd7b798 | 880 | gs_change: |
4d732138 IM |
881 | movl %edi, %gs |
882 | 2: mfence /* workaround */ | |
72fe4858 | 883 | SWAPGS |
131484c8 | 884 | popfq |
9f1e87ea | 885 | ret |
6efdcfaf | 886 | END(native_load_gs_index) |
0bd7b798 | 887 | |
4d732138 IM |
888 | _ASM_EXTABLE(gs_change, bad_gs) |
889 | .section .fixup, "ax" | |
1da177e4 | 890 | /* running with kernelgs */ |
0bd7b798 | 891 | bad_gs: |
4d732138 IM |
892 | SWAPGS /* switch back to user gs */ |
893 | xorl %eax, %eax | |
894 | movl %eax, %gs | |
895 | jmp 2b | |
9f1e87ea | 896 | .previous |
0bd7b798 | 897 | |
2699500b | 898 | /* Call softirq on interrupt stack. Interrupts are off. */ |
7d65f4a6 | 899 | ENTRY(do_softirq_own_stack) |
4d732138 IM |
900 | pushq %rbp |
901 | mov %rsp, %rbp | |
902 | incl PER_CPU_VAR(irq_count) | |
903 | cmove PER_CPU_VAR(irq_stack_ptr), %rsp | |
904 | push %rbp /* frame pointer backlink */ | |
905 | call __do_softirq | |
2699500b | 906 | leaveq |
4d732138 | 907 | decl PER_CPU_VAR(irq_count) |
ed6b676c | 908 | ret |
7d65f4a6 | 909 | END(do_softirq_own_stack) |
75154f40 | 910 | |
3d75e1b8 | 911 | #ifdef CONFIG_XEN |
cb5dd2c5 | 912 | idtentry xen_hypervisor_callback xen_do_hypervisor_callback has_error_code=0 |
3d75e1b8 JF |
913 | |
914 | /* | |
9f1e87ea CG |
915 | * A note on the "critical region" in our callback handler. |
916 | * We want to avoid stacking callback handlers due to events occurring | |
917 | * during handling of the last event. To do this, we keep events disabled | |
918 | * until we've done all processing. HOWEVER, we must enable events before | |
919 | * popping the stack frame (can't be done atomically) and so it would still | |
920 | * be possible to get enough handler activations to overflow the stack. | |
921 | * Although unlikely, bugs of that kind are hard to track down, so we'd | |
922 | * like to avoid the possibility. | |
923 | * So, on entry to the handler we detect whether we interrupted an | |
924 | * existing activation in its critical region -- if so, we pop the current | |
925 | * activation and restart the handler using the previous one. | |
926 | */ | |
4d732138 IM |
927 | ENTRY(xen_do_hypervisor_callback) /* do_hypervisor_callback(struct *pt_regs) */ |
928 | ||
9f1e87ea CG |
929 | /* |
930 | * Since we don't modify %rdi, evtchn_do_upall(struct *pt_regs) will | |
931 | * see the correct pointer to the pt_regs | |
932 | */ | |
4d732138 IM |
933 | movq %rdi, %rsp /* we don't return, adjust the stack frame */ |
934 | 11: incl PER_CPU_VAR(irq_count) | |
935 | movq %rsp, %rbp | |
936 | cmovzq PER_CPU_VAR(irq_stack_ptr), %rsp | |
937 | pushq %rbp /* frame pointer backlink */ | |
938 | call xen_evtchn_do_upcall | |
939 | popq %rsp | |
940 | decl PER_CPU_VAR(irq_count) | |
fdfd811d | 941 | #ifndef CONFIG_PREEMPT |
4d732138 | 942 | call xen_maybe_preempt_hcall |
fdfd811d | 943 | #endif |
4d732138 | 944 | jmp error_exit |
371c394a | 945 | END(xen_do_hypervisor_callback) |
3d75e1b8 JF |
946 | |
947 | /* | |
9f1e87ea CG |
948 | * Hypervisor uses this for application faults while it executes. |
949 | * We get here for two reasons: | |
950 | * 1. Fault while reloading DS, ES, FS or GS | |
951 | * 2. Fault while executing IRET | |
952 | * Category 1 we do not need to fix up as Xen has already reloaded all segment | |
953 | * registers that could be reloaded and zeroed the others. | |
954 | * Category 2 we fix up by killing the current process. We cannot use the | |
955 | * normal Linux return path in this case because if we use the IRET hypercall | |
956 | * to pop the stack frame we end up in an infinite loop of failsafe callbacks. | |
957 | * We distinguish between categories by comparing each saved segment register | |
958 | * with its current contents: any discrepancy means we in category 1. | |
959 | */ | |
3d75e1b8 | 960 | ENTRY(xen_failsafe_callback) |
4d732138 IM |
961 | movl %ds, %ecx |
962 | cmpw %cx, 0x10(%rsp) | |
963 | jne 1f | |
964 | movl %es, %ecx | |
965 | cmpw %cx, 0x18(%rsp) | |
966 | jne 1f | |
967 | movl %fs, %ecx | |
968 | cmpw %cx, 0x20(%rsp) | |
969 | jne 1f | |
970 | movl %gs, %ecx | |
971 | cmpw %cx, 0x28(%rsp) | |
972 | jne 1f | |
3d75e1b8 | 973 | /* All segments match their saved values => Category 2 (Bad IRET). */ |
4d732138 IM |
974 | movq (%rsp), %rcx |
975 | movq 8(%rsp), %r11 | |
976 | addq $0x30, %rsp | |
977 | pushq $0 /* RIP */ | |
978 | pushq %r11 | |
979 | pushq %rcx | |
980 | jmp general_protection | |
3d75e1b8 | 981 | 1: /* Segment mismatch => Category 1 (Bad segment). Retry the IRET. */ |
4d732138 IM |
982 | movq (%rsp), %rcx |
983 | movq 8(%rsp), %r11 | |
984 | addq $0x30, %rsp | |
985 | pushq $-1 /* orig_ax = -1 => not a system call */ | |
76f5df43 DV |
986 | ALLOC_PT_GPREGS_ON_STACK |
987 | SAVE_C_REGS | |
988 | SAVE_EXTRA_REGS | |
4d732138 | 989 | jmp error_exit |
3d75e1b8 JF |
990 | END(xen_failsafe_callback) |
991 | ||
cf910e83 | 992 | apicinterrupt3 HYPERVISOR_CALLBACK_VECTOR \ |
38e20b07 SY |
993 | xen_hvm_callback_vector xen_evtchn_do_upcall |
994 | ||
3d75e1b8 | 995 | #endif /* CONFIG_XEN */ |
ddeb8f21 | 996 | |
bc2b0331 | 997 | #if IS_ENABLED(CONFIG_HYPERV) |
cf910e83 | 998 | apicinterrupt3 HYPERVISOR_CALLBACK_VECTOR \ |
bc2b0331 S |
999 | hyperv_callback_vector hyperv_vector_handler |
1000 | #endif /* CONFIG_HYPERV */ | |
1001 | ||
4d732138 IM |
1002 | idtentry debug do_debug has_error_code=0 paranoid=1 shift_ist=DEBUG_STACK |
1003 | idtentry int3 do_int3 has_error_code=0 paranoid=1 shift_ist=DEBUG_STACK | |
1004 | idtentry stack_segment do_stack_segment has_error_code=1 | |
1005 | ||
6cac5a92 | 1006 | #ifdef CONFIG_XEN |
4d732138 IM |
1007 | idtentry xen_debug do_debug has_error_code=0 |
1008 | idtentry xen_int3 do_int3 has_error_code=0 | |
1009 | idtentry xen_stack_segment do_stack_segment has_error_code=1 | |
6cac5a92 | 1010 | #endif |
4d732138 IM |
1011 | |
1012 | idtentry general_protection do_general_protection has_error_code=1 | |
1013 | trace_idtentry page_fault do_page_fault has_error_code=1 | |
1014 | ||
631bc487 | 1015 | #ifdef CONFIG_KVM_GUEST |
4d732138 | 1016 | idtentry async_page_fault do_async_page_fault has_error_code=1 |
631bc487 | 1017 | #endif |
4d732138 | 1018 | |
ddeb8f21 | 1019 | #ifdef CONFIG_X86_MCE |
4d732138 | 1020 | idtentry machine_check has_error_code=0 paranoid=1 do_sym=*machine_check_vector(%rip) |
ddeb8f21 AH |
1021 | #endif |
1022 | ||
ebfc453e DV |
1023 | /* |
1024 | * Save all registers in pt_regs, and switch gs if needed. | |
1025 | * Use slow, but surefire "are we in kernel?" check. | |
1026 | * Return: ebx=0: need swapgs on exit, ebx=1: otherwise | |
1027 | */ | |
1028 | ENTRY(paranoid_entry) | |
1eeb207f DV |
1029 | cld |
1030 | SAVE_C_REGS 8 | |
1031 | SAVE_EXTRA_REGS 8 | |
4d732138 IM |
1032 | movl $1, %ebx |
1033 | movl $MSR_GS_BASE, %ecx | |
1eeb207f | 1034 | rdmsr |
4d732138 IM |
1035 | testl %edx, %edx |
1036 | js 1f /* negative -> in kernel */ | |
1eeb207f | 1037 | SWAPGS |
4d732138 | 1038 | xorl %ebx, %ebx |
1eeb207f | 1039 | 1: ret |
ebfc453e | 1040 | END(paranoid_entry) |
ddeb8f21 | 1041 | |
ebfc453e DV |
1042 | /* |
1043 | * "Paranoid" exit path from exception stack. This is invoked | |
1044 | * only on return from non-NMI IST interrupts that came | |
1045 | * from kernel space. | |
1046 | * | |
1047 | * We may be returning to very strange contexts (e.g. very early | |
1048 | * in syscall entry), so checking for preemption here would | |
1049 | * be complicated. Fortunately, we there's no good reason | |
1050 | * to try to handle preemption here. | |
4d732138 IM |
1051 | * |
1052 | * On entry, ebx is "no swapgs" flag (1: don't need swapgs, 0: need it) | |
ebfc453e | 1053 | */ |
ddeb8f21 | 1054 | ENTRY(paranoid_exit) |
ddeb8f21 | 1055 | DISABLE_INTERRUPTS(CLBR_NONE) |
5963e317 | 1056 | TRACE_IRQS_OFF_DEBUG |
4d732138 IM |
1057 | testl %ebx, %ebx /* swapgs needed? */ |
1058 | jnz paranoid_exit_no_swapgs | |
f2db9382 | 1059 | TRACE_IRQS_IRETQ |
ddeb8f21 | 1060 | SWAPGS_UNSAFE_STACK |
4d732138 | 1061 | jmp paranoid_exit_restore |
0d550836 | 1062 | paranoid_exit_no_swapgs: |
f2db9382 | 1063 | TRACE_IRQS_IRETQ_DEBUG |
0d550836 | 1064 | paranoid_exit_restore: |
76f5df43 DV |
1065 | RESTORE_EXTRA_REGS |
1066 | RESTORE_C_REGS | |
1067 | REMOVE_PT_GPREGS_FROM_STACK 8 | |
48e08d0f | 1068 | INTERRUPT_RETURN |
ddeb8f21 AH |
1069 | END(paranoid_exit) |
1070 | ||
1071 | /* | |
ebfc453e | 1072 | * Save all registers in pt_regs, and switch gs if needed. |
539f5113 | 1073 | * Return: EBX=0: came from user mode; EBX=1: otherwise |
ddeb8f21 AH |
1074 | */ |
1075 | ENTRY(error_entry) | |
ddeb8f21 | 1076 | cld |
76f5df43 DV |
1077 | SAVE_C_REGS 8 |
1078 | SAVE_EXTRA_REGS 8 | |
4d732138 | 1079 | xorl %ebx, %ebx |
03335e95 | 1080 | testb $3, CS+8(%rsp) |
cb6f64ed | 1081 | jz .Lerror_kernelspace |
539f5113 | 1082 | |
cb6f64ed AL |
1083 | .Lerror_entry_from_usermode_swapgs: |
1084 | /* | |
1085 | * We entered from user mode or we're pretending to have entered | |
1086 | * from user mode due to an IRET fault. | |
1087 | */ | |
ddeb8f21 | 1088 | SWAPGS |
539f5113 | 1089 | |
cb6f64ed AL |
1090 | .Lerror_entry_from_usermode_after_swapgs: |
1091 | .Lerror_entry_done: | |
ddeb8f21 AH |
1092 | TRACE_IRQS_OFF |
1093 | ret | |
ddeb8f21 | 1094 | |
ebfc453e DV |
1095 | /* |
1096 | * There are two places in the kernel that can potentially fault with | |
1097 | * usergs. Handle them here. B stepping K8s sometimes report a | |
1098 | * truncated RIP for IRET exceptions returning to compat mode. Check | |
1099 | * for these here too. | |
1100 | */ | |
cb6f64ed | 1101 | .Lerror_kernelspace: |
4d732138 IM |
1102 | incl %ebx |
1103 | leaq native_irq_return_iret(%rip), %rcx | |
1104 | cmpq %rcx, RIP+8(%rsp) | |
cb6f64ed | 1105 | je .Lerror_bad_iret |
4d732138 IM |
1106 | movl %ecx, %eax /* zero extend */ |
1107 | cmpq %rax, RIP+8(%rsp) | |
cb6f64ed | 1108 | je .Lbstep_iret |
4d732138 | 1109 | cmpq $gs_change, RIP+8(%rsp) |
cb6f64ed | 1110 | jne .Lerror_entry_done |
539f5113 AL |
1111 | |
1112 | /* | |
1113 | * hack: gs_change can fail with user gsbase. If this happens, fix up | |
1114 | * gsbase and proceed. We'll fix up the exception and land in | |
1115 | * gs_change's error handler with kernel gsbase. | |
1116 | */ | |
cb6f64ed | 1117 | jmp .Lerror_entry_from_usermode_swapgs |
ae24ffe5 | 1118 | |
cb6f64ed | 1119 | .Lbstep_iret: |
ae24ffe5 | 1120 | /* Fix truncated RIP */ |
4d732138 | 1121 | movq %rcx, RIP+8(%rsp) |
b645af2d AL |
1122 | /* fall through */ |
1123 | ||
cb6f64ed | 1124 | .Lerror_bad_iret: |
539f5113 AL |
1125 | /* |
1126 | * We came from an IRET to user mode, so we have user gsbase. | |
1127 | * Switch to kernel gsbase: | |
1128 | */ | |
b645af2d | 1129 | SWAPGS |
539f5113 AL |
1130 | |
1131 | /* | |
1132 | * Pretend that the exception came from user mode: set up pt_regs | |
1133 | * as if we faulted immediately after IRET and clear EBX so that | |
1134 | * error_exit knows that we will be returning to user mode. | |
1135 | */ | |
4d732138 IM |
1136 | mov %rsp, %rdi |
1137 | call fixup_bad_iret | |
1138 | mov %rax, %rsp | |
539f5113 | 1139 | decl %ebx |
cb6f64ed | 1140 | jmp .Lerror_entry_from_usermode_after_swapgs |
ddeb8f21 AH |
1141 | END(error_entry) |
1142 | ||
1143 | ||
539f5113 AL |
1144 | /* |
1145 | * On entry, EBS is a "return to kernel mode" flag: | |
1146 | * 1: already in kernel mode, don't need SWAPGS | |
1147 | * 0: user gsbase is loaded, we need SWAPGS and standard preparation for return to usermode | |
1148 | */ | |
ddeb8f21 | 1149 | ENTRY(error_exit) |
4d732138 | 1150 | movl %ebx, %eax |
ddeb8f21 AH |
1151 | DISABLE_INTERRUPTS(CLBR_NONE) |
1152 | TRACE_IRQS_OFF | |
4d732138 IM |
1153 | testl %eax, %eax |
1154 | jnz retint_kernel | |
1155 | jmp retint_user | |
ddeb8f21 AH |
1156 | END(error_exit) |
1157 | ||
0784b364 | 1158 | /* Runs on exception stack */ |
ddeb8f21 | 1159 | ENTRY(nmi) |
ddeb8f21 | 1160 | PARAVIRT_ADJUST_EXCEPTION_FRAME |
3f3c8b8c SR |
1161 | /* |
1162 | * We allow breakpoints in NMIs. If a breakpoint occurs, then | |
1163 | * the iretq it performs will take us out of NMI context. | |
1164 | * This means that we can have nested NMIs where the next | |
1165 | * NMI is using the top of the stack of the previous NMI. We | |
1166 | * can't let it execute because the nested NMI will corrupt the | |
1167 | * stack of the previous NMI. NMI handlers are not re-entrant | |
1168 | * anyway. | |
1169 | * | |
1170 | * To handle this case we do the following: | |
1171 | * Check the a special location on the stack that contains | |
1172 | * a variable that is set when NMIs are executing. | |
1173 | * The interrupted task's stack is also checked to see if it | |
1174 | * is an NMI stack. | |
1175 | * If the variable is not set and the stack is not the NMI | |
1176 | * stack then: | |
1177 | * o Set the special variable on the stack | |
1178 | * o Copy the interrupt frame into a "saved" location on the stack | |
1179 | * o Copy the interrupt frame into a "copy" location on the stack | |
1180 | * o Continue processing the NMI | |
1181 | * If the variable is set or the previous stack is the NMI stack: | |
1182 | * o Modify the "copy" location to jump to the repeate_nmi | |
1183 | * o return back to the first NMI | |
1184 | * | |
1185 | * Now on exit of the first NMI, we first clear the stack variable | |
1186 | * The NMI stack will tell any nested NMIs at that point that it is | |
1187 | * nested. Then we pop the stack normally with iret, and if there was | |
1188 | * a nested NMI that updated the copy interrupt stack frame, a | |
1189 | * jump will be made to the repeat_nmi code that will handle the second | |
1190 | * NMI. | |
1191 | */ | |
1192 | ||
146b2b09 | 1193 | /* Use %rdx as our temp variable throughout */ |
4d732138 | 1194 | pushq %rdx |
3f3c8b8c | 1195 | |
45d5a168 SR |
1196 | /* |
1197 | * If %cs was not the kernel segment, then the NMI triggered in user | |
1198 | * space, which means it is definitely not nested. | |
1199 | */ | |
4d732138 IM |
1200 | cmpl $__KERNEL_CS, 16(%rsp) |
1201 | jne first_nmi | |
45d5a168 | 1202 | |
3f3c8b8c SR |
1203 | /* |
1204 | * Check the special variable on the stack to see if NMIs are | |
1205 | * executing. | |
1206 | */ | |
4d732138 IM |
1207 | cmpl $1, -8(%rsp) |
1208 | je nested_nmi | |
3f3c8b8c SR |
1209 | |
1210 | /* | |
1211 | * Now test if the previous stack was an NMI stack. | |
1212 | * We need the double check. We check the NMI stack to satisfy the | |
1213 | * race when the first NMI clears the variable before returning. | |
1214 | * We check the variable because the first NMI could be in a | |
1215 | * breakpoint routine using a breakpoint stack. | |
1216 | */ | |
0784b364 DV |
1217 | lea 6*8(%rsp), %rdx |
1218 | /* Compare the NMI stack (rdx) with the stack we came from (4*8(%rsp)) */ | |
1219 | cmpq %rdx, 4*8(%rsp) | |
1220 | /* If the stack pointer is above the NMI stack, this is a normal NMI */ | |
1221 | ja first_nmi | |
4d732138 | 1222 | |
0784b364 DV |
1223 | subq $EXCEPTION_STKSZ, %rdx |
1224 | cmpq %rdx, 4*8(%rsp) | |
1225 | /* If it is below the NMI stack, it is a normal NMI */ | |
1226 | jb first_nmi | |
1227 | /* Ah, it is within the NMI stack, treat it as nested */ | |
0784b364 | 1228 | |
3f3c8b8c SR |
1229 | nested_nmi: |
1230 | /* | |
1231 | * Do nothing if we interrupted the fixup in repeat_nmi. | |
1232 | * It's about to repeat the NMI handler, so we are fine | |
1233 | * with ignoring this one. | |
1234 | */ | |
4d732138 IM |
1235 | movq $repeat_nmi, %rdx |
1236 | cmpq 8(%rsp), %rdx | |
1237 | ja 1f | |
1238 | movq $end_repeat_nmi, %rdx | |
1239 | cmpq 8(%rsp), %rdx | |
1240 | ja nested_nmi_out | |
3f3c8b8c SR |
1241 | |
1242 | 1: | |
1243 | /* Set up the interrupted NMIs stack to jump to repeat_nmi */ | |
4d732138 IM |
1244 | leaq -1*8(%rsp), %rdx |
1245 | movq %rdx, %rsp | |
1246 | leaq -10*8(%rsp), %rdx | |
1247 | pushq $__KERNEL_DS | |
1248 | pushq %rdx | |
131484c8 | 1249 | pushfq |
4d732138 IM |
1250 | pushq $__KERNEL_CS |
1251 | pushq $repeat_nmi | |
3f3c8b8c SR |
1252 | |
1253 | /* Put stack back */ | |
4d732138 | 1254 | addq $(6*8), %rsp |
3f3c8b8c SR |
1255 | |
1256 | nested_nmi_out: | |
4d732138 | 1257 | popq %rdx |
3f3c8b8c SR |
1258 | |
1259 | /* No need to check faults here */ | |
1260 | INTERRUPT_RETURN | |
1261 | ||
1262 | first_nmi: | |
1263 | /* | |
1264 | * Because nested NMIs will use the pushed location that we | |
1265 | * stored in rdx, we must keep that space available. | |
1266 | * Here's what our stack frame will look like: | |
1267 | * +-------------------------+ | |
1268 | * | original SS | | |
1269 | * | original Return RSP | | |
1270 | * | original RFLAGS | | |
1271 | * | original CS | | |
1272 | * | original RIP | | |
1273 | * +-------------------------+ | |
1274 | * | temp storage for rdx | | |
1275 | * +-------------------------+ | |
1276 | * | NMI executing variable | | |
1277 | * +-------------------------+ | |
3f3c8b8c SR |
1278 | * | copied SS | |
1279 | * | copied Return RSP | | |
1280 | * | copied RFLAGS | | |
1281 | * | copied CS | | |
1282 | * | copied RIP | | |
1283 | * +-------------------------+ | |
28696f43 SQ |
1284 | * | Saved SS | |
1285 | * | Saved Return RSP | | |
1286 | * | Saved RFLAGS | | |
1287 | * | Saved CS | | |
1288 | * | Saved RIP | | |
1289 | * +-------------------------+ | |
3f3c8b8c SR |
1290 | * | pt_regs | |
1291 | * +-------------------------+ | |
1292 | * | |
79fb4ad6 SR |
1293 | * The saved stack frame is used to fix up the copied stack frame |
1294 | * that a nested NMI may change to make the interrupted NMI iret jump | |
1295 | * to the repeat_nmi. The original stack frame and the temp storage | |
3f3c8b8c SR |
1296 | * is also used by nested NMIs and can not be trusted on exit. |
1297 | */ | |
79fb4ad6 | 1298 | /* Do not pop rdx, nested NMIs will corrupt that part of the stack */ |
4d732138 | 1299 | movq (%rsp), %rdx |
62610913 | 1300 | |
3f3c8b8c | 1301 | /* Set the NMI executing variable on the stack. */ |
4d732138 | 1302 | pushq $1 |
3f3c8b8c | 1303 | |
4d732138 IM |
1304 | /* Leave room for the "copied" frame */ |
1305 | subq $(5*8), %rsp | |
28696f43 | 1306 | |
3f3c8b8c SR |
1307 | /* Copy the stack frame to the Saved frame */ |
1308 | .rept 5 | |
4d732138 | 1309 | pushq 11*8(%rsp) |
3f3c8b8c | 1310 | .endr |
62610913 | 1311 | |
79fb4ad6 SR |
1312 | /* Everything up to here is safe from nested NMIs */ |
1313 | ||
62610913 JB |
1314 | /* |
1315 | * If there was a nested NMI, the first NMI's iret will return | |
1316 | * here. But NMIs are still enabled and we can take another | |
1317 | * nested NMI. The nested NMI checks the interrupted RIP to see | |
1318 | * if it is between repeat_nmi and end_repeat_nmi, and if so | |
1319 | * it will just return, as we are about to repeat an NMI anyway. | |
1320 | * This makes it safe to copy to the stack frame that a nested | |
1321 | * NMI will update. | |
1322 | */ | |
1323 | repeat_nmi: | |
1324 | /* | |
1325 | * Update the stack variable to say we are still in NMI (the update | |
1326 | * is benign for the non-repeat case, where 1 was pushed just above | |
1327 | * to this very stack slot). | |
1328 | */ | |
4d732138 | 1329 | movq $1, 10*8(%rsp) |
3f3c8b8c SR |
1330 | |
1331 | /* Make another copy, this one may be modified by nested NMIs */ | |
4d732138 | 1332 | addq $(10*8), %rsp |
3f3c8b8c | 1333 | .rept 5 |
4d732138 | 1334 | pushq -6*8(%rsp) |
3f3c8b8c | 1335 | .endr |
4d732138 | 1336 | subq $(5*8), %rsp |
62610913 | 1337 | end_repeat_nmi: |
3f3c8b8c SR |
1338 | |
1339 | /* | |
1340 | * Everything below this point can be preempted by a nested | |
79fb4ad6 SR |
1341 | * NMI if the first NMI took an exception and reset our iret stack |
1342 | * so that we repeat another NMI. | |
3f3c8b8c | 1343 | */ |
4d732138 | 1344 | pushq $-1 /* ORIG_RAX: no syscall to restart */ |
76f5df43 DV |
1345 | ALLOC_PT_GPREGS_ON_STACK |
1346 | ||
1fd466ef | 1347 | /* |
ebfc453e | 1348 | * Use paranoid_entry to handle SWAPGS, but no need to use paranoid_exit |
1fd466ef SR |
1349 | * as we should not be calling schedule in NMI context. |
1350 | * Even with normal interrupts enabled. An NMI should not be | |
1351 | * setting NEED_RESCHED or anything that normal interrupts and | |
1352 | * exceptions might do. | |
1353 | */ | |
4d732138 | 1354 | call paranoid_entry |
7fbb98c5 SR |
1355 | |
1356 | /* | |
1357 | * Save off the CR2 register. If we take a page fault in the NMI then | |
1358 | * it could corrupt the CR2 value. If the NMI preempts a page fault | |
1359 | * handler before it was able to read the CR2 register, and then the | |
1360 | * NMI itself takes a page fault, the page fault that was preempted | |
1361 | * will read the information from the NMI page fault and not the | |
1362 | * origin fault. Save it off and restore it if it changes. | |
1363 | * Use the r12 callee-saved register. | |
1364 | */ | |
4d732138 | 1365 | movq %cr2, %r12 |
7fbb98c5 | 1366 | |
ddeb8f21 | 1367 | /* paranoidentry do_nmi, 0; without TRACE_IRQS_OFF */ |
4d732138 IM |
1368 | movq %rsp, %rdi |
1369 | movq $-1, %rsi | |
1370 | call do_nmi | |
7fbb98c5 SR |
1371 | |
1372 | /* Did the NMI take a page fault? Restore cr2 if it did */ | |
4d732138 IM |
1373 | movq %cr2, %rcx |
1374 | cmpq %rcx, %r12 | |
1375 | je 1f | |
1376 | movq %r12, %cr2 | |
7fbb98c5 | 1377 | 1: |
4d732138 IM |
1378 | testl %ebx, %ebx /* swapgs needed? */ |
1379 | jnz nmi_restore | |
ddeb8f21 AH |
1380 | nmi_swapgs: |
1381 | SWAPGS_UNSAFE_STACK | |
1382 | nmi_restore: | |
76f5df43 DV |
1383 | RESTORE_EXTRA_REGS |
1384 | RESTORE_C_REGS | |
444723dc | 1385 | /* Pop the extra iret frame at once */ |
76f5df43 | 1386 | REMOVE_PT_GPREGS_FROM_STACK 6*8 |
28696f43 | 1387 | |
3f3c8b8c | 1388 | /* Clear the NMI executing stack variable */ |
4d732138 | 1389 | movq $0, 5*8(%rsp) |
5ca6f70f | 1390 | INTERRUPT_RETURN |
ddeb8f21 AH |
1391 | END(nmi) |
1392 | ||
1393 | ENTRY(ignore_sysret) | |
4d732138 | 1394 | mov $-ENOSYS, %eax |
ddeb8f21 | 1395 | sysret |
ddeb8f21 | 1396 | END(ignore_sysret) |