]>
Commit | Line | Data |
---|---|---|
1da177e4 | 1 | /* |
54ad726c IM |
2 | * Compatibility mode system call entry point for x86-64. |
3 | * | |
1da177e4 | 4 | * Copyright 2000-2002 Andi Kleen, SuSE Labs. |
54ad726c | 5 | */ |
d36f9479 | 6 | #include "calling.h" |
e2d5df93 | 7 | #include <asm/asm-offsets.h> |
1da177e4 LT |
8 | #include <asm/current.h> |
9 | #include <asm/errno.h> | |
54ad726c IM |
10 | #include <asm/ia32_unistd.h> |
11 | #include <asm/thread_info.h> | |
1da177e4 | 12 | #include <asm/segment.h> |
2601e64d | 13 | #include <asm/irqflags.h> |
1ce6f868 | 14 | #include <asm/asm.h> |
63bcff2a | 15 | #include <asm/smap.h> |
d7eb5f9e | 16 | #include <asm/spec_ctrl.h> |
1da177e4 | 17 | #include <linux/linkage.h> |
d7e7528b | 18 | #include <linux/err.h> |
1da177e4 | 19 | |
ea714547 JO |
20 | .section .entry.text, "ax" |
21 | ||
1da177e4 | 22 | /* |
fda57b22 | 23 | * 32-bit SYSENTER entry. |
1da177e4 | 24 | * |
fda57b22 AL |
25 | * 32-bit system calls through the vDSO's __kernel_vsyscall enter here |
26 | * on 64-bit kernels running on Intel CPUs. | |
27 | * | |
28 | * The SYSENTER instruction, in principle, should *only* occur in the | |
29 | * vDSO. In practice, a small number of Android devices were shipped | |
30 | * with a copy of Bionic that inlined a SYSENTER instruction. This | |
31 | * never happened in any of Google's Bionic versions -- it only happened | |
32 | * in a narrow range of Intel-provided versions. | |
33 | * | |
34 | * SYSENTER loads SS, RSP, CS, and RIP from previously programmed MSRs. | |
35 | * IF and VM in RFLAGS are cleared (IOW: interrupts are off). | |
b87cf63e | 36 | * SYSENTER does not save anything on the stack, |
fda57b22 | 37 | * and does not save old RIP (!!!), RSP, or RFLAGS. |
b87cf63e | 38 | * |
1da177e4 | 39 | * Arguments: |
b87cf63e DV |
40 | * eax system call number |
41 | * ebx arg1 | |
42 | * ecx arg2 | |
43 | * edx arg3 | |
44 | * esi arg4 | |
45 | * edi arg5 | |
46 | * ebp user stack | |
47 | * 0(%ebp) arg6 | |
b87cf63e | 48 | */ |
4c8cd0c5 | 49 | ENTRY(entry_SYSENTER_compat) |
b611acf4 | 50 | /* Interrupts are off on entry. */ |
8e621515 | 51 | SWAPGS |
313dfb59 DH |
52 | |
53 | /* We are about to clobber %rsp anyway, clobbering here is OK */ | |
54 | SWITCH_TO_KERNEL_CR3 scratch_reg=%rsp | |
55 | ||
3a23208e | 56 | movq PER_CPU_VAR(cpu_current_top_of_stack), %rsp |
a232e3d5 | 57 | |
a474e67c AL |
58 | /* |
59 | * User tracing code (ptrace or signal handlers) might assume that | |
60 | * the saved RAX contains a 32-bit number when we're invoking a 32-bit | |
61 | * syscall. Just in case the high bits are nonzero, zero-extend | |
62 | * the syscall number. (This could almost certainly be deleted | |
63 | * with no ill effects.) | |
64 | */ | |
4ee8ec17 DV |
65 | movl %eax, %eax |
66 | ||
4c9c0e91 | 67 | /* Construct struct pt_regs on stack */ |
131484c8 | 68 | pushq $__USER32_DS /* pt_regs->ss */ |
30bfa7b3 | 69 | pushq %rbp /* pt_regs->sp (stashed in bp) */ |
b611acf4 AL |
70 | |
71 | /* | |
72 | * Push flags. This is nasty. First, interrupts are currently | |
73 | * off, but we need pt_regs->flags to have IF set. Second, even | |
74 | * if TF was set when SYSENTER started, it's clear by now. We fix | |
75 | * that later using TIF_SINGLESTEP. | |
76 | */ | |
77 | pushfq /* pt_regs->flags (except IF = 0) */ | |
78 | orl $X86_EFLAGS_IF, (%rsp) /* Fix saved flags */ | |
131484c8 | 79 | pushq $__USER32_CS /* pt_regs->cs */ |
778843f9 | 80 | pushq $0 /* pt_regs->ip = 0 (placeholder) */ |
131484c8 IM |
81 | pushq %rax /* pt_regs->orig_ax */ |
82 | pushq %rdi /* pt_regs->di */ | |
83 | pushq %rsi /* pt_regs->si */ | |
84 | pushq %rdx /* pt_regs->dx */ | |
30bfa7b3 | 85 | pushq %rcx /* pt_regs->cx */ |
131484c8 | 86 | pushq $-ENOSYS /* pt_regs->ax */ |
778843f9 DV |
87 | pushq $0 /* pt_regs->r8 = 0 */ |
88 | pushq $0 /* pt_regs->r9 = 0 */ | |
89 | pushq $0 /* pt_regs->r10 = 0 */ | |
90 | pushq $0 /* pt_regs->r11 = 0 */ | |
a474e67c | 91 | pushq %rbx /* pt_regs->rbx */ |
30bfa7b3 | 92 | pushq %rbp /* pt_regs->rbp (will be overwritten) */ |
778843f9 DV |
93 | pushq $0 /* pt_regs->r12 = 0 */ |
94 | pushq $0 /* pt_regs->r13 = 0 */ | |
95 | pushq $0 /* pt_regs->r14 = 0 */ | |
96 | pushq $0 /* pt_regs->r15 = 0 */ | |
1da177e4 | 97 | cld |
4c9c0e91 | 98 | |
d7eb5f9e TC |
99 | ENABLE_IBRS |
100 | ||
8c7aa698 | 101 | /* |
e7860411 | 102 | * SYSENTER doesn't filter flags, so we need to clear NT and AC |
8c7aa698 | 103 | * ourselves. To save a few cycles, we can check whether |
e7860411 | 104 | * either was set instead of doing an unconditional popfq. |
b611acf4 AL |
105 | * This needs to happen before enabling interrupts so that |
106 | * we don't get preempted with NT set. | |
374a3a39 | 107 | * |
f2b37575 AL |
108 | * If TF is set, we will single-step all the way to here -- do_debug |
109 | * will ignore all the traps. (Yes, this is slow, but so is | |
110 | * single-stepping in general. This allows us to avoid having | |
111 | * a more complicated code to handle the case where a user program | |
112 | * forces us to single-step through the SYSENTER entry code.) | |
113 | * | |
f74acf0e | 114 | * NB.: .Lsysenter_fix_flags is a label with the code under it moved |
374a3a39 BP |
115 | * out-of-line as an optimization: NT is unlikely to be set in the |
116 | * majority of the cases and instead of polluting the I$ unnecessarily, | |
117 | * we're keeping that code behind a branch which will predict as | |
118 | * not-taken and therefore its instructions won't be fetched. | |
8c7aa698 | 119 | */ |
f2b37575 | 120 | testl $X86_EFLAGS_NT|X86_EFLAGS_AC|X86_EFLAGS_TF, EFLAGS(%rsp) |
f74acf0e BP |
121 | jnz .Lsysenter_fix_flags |
122 | .Lsysenter_flags_fixed: | |
8c7aa698 | 123 | |
a474e67c AL |
124 | /* |
125 | * User mode is traced as though IRQs are on, and SYSENTER | |
126 | * turned them off. | |
127 | */ | |
128 | TRACE_IRQS_OFF | |
e62a254a | 129 | |
a474e67c AL |
130 | movq %rsp, %rdi |
131 | call do_fast_syscall_32 | |
91e2eea9 BO |
132 | /* XEN PV guests always use IRET path */ |
133 | ALTERNATIVE "testl %eax, %eax; jz .Lsyscall_32_done", \ | |
134 | "jmp .Lsyscall_32_done", X86_FEATURE_XENPV | |
7841b408 | 135 | jmp sysret32_from_system_call |
1da177e4 | 136 | |
f74acf0e | 137 | .Lsysenter_fix_flags: |
b611acf4 | 138 | pushq $X86_EFLAGS_FIXED |
131484c8 | 139 | popfq |
f74acf0e | 140 | jmp .Lsysenter_flags_fixed |
f2b37575 | 141 | GLOBAL(__end_entry_SYSENTER_compat) |
4c8cd0c5 | 142 | ENDPROC(entry_SYSENTER_compat) |
1da177e4 LT |
143 | |
144 | /* | |
fda57b22 AL |
145 | * 32-bit SYSCALL entry. |
146 | * | |
147 | * 32-bit system calls through the vDSO's __kernel_vsyscall enter here | |
148 | * on 64-bit kernels running on AMD CPUs. | |
149 | * | |
150 | * The SYSCALL instruction, in principle, should *only* occur in the | |
151 | * vDSO. In practice, it appears that this really is the case. | |
152 | * As evidence: | |
153 | * | |
154 | * - The calling convention for SYSCALL has changed several times without | |
155 | * anyone noticing. | |
156 | * | |
157 | * - Prior to the in-kernel X86_BUG_SYSRET_SS_ATTRS fixup, anything | |
158 | * user task that did SYSCALL without immediately reloading SS | |
159 | * would randomly crash. | |
1da177e4 | 160 | * |
fda57b22 AL |
161 | * - Most programmers do not directly target AMD CPUs, and the 32-bit |
162 | * SYSCALL instruction does not exist on Intel CPUs. Even on AMD | |
163 | * CPUs, Linux disables the SYSCALL instruction on 32-bit kernels | |
164 | * because the SYSCALL instruction in legacy/native 32-bit mode (as | |
165 | * opposed to compat mode) is sufficiently poorly designed as to be | |
166 | * essentially unusable. | |
b87cf63e | 167 | * |
fda57b22 AL |
168 | * 32-bit SYSCALL saves RIP to RCX, clears RFLAGS.RF, then saves |
169 | * RFLAGS to R11, then loads new SS, CS, and RIP from previously | |
170 | * programmed MSRs. RFLAGS gets masked by a value from another MSR | |
171 | * (so CLD and CLAC are not needed). SYSCALL does not save anything on | |
172 | * the stack and does not change RSP. | |
173 | * | |
174 | * Note: RFLAGS saving+masking-with-MSR happens only in Long mode | |
54ad726c | 175 | * (in legacy 32-bit mode, IF, RF and VM bits are cleared and that's it). |
fda57b22 | 176 | * Don't get confused: RFLAGS saving+masking depends on Long Mode Active bit |
b87cf63e DV |
177 | * (EFER.LMA=1), NOT on bitness of userspace where SYSCALL executes |
178 | * or target CS descriptor's L bit (SYSCALL does not read segment descriptors). | |
179 | * | |
1da177e4 | 180 | * Arguments: |
b87cf63e DV |
181 | * eax system call number |
182 | * ecx return address | |
183 | * ebx arg1 | |
184 | * ebp arg2 (note: not saved in the stack frame, should not be touched) | |
185 | * edx arg3 | |
186 | * esi arg4 | |
187 | * edi arg5 | |
188 | * esp user stack | |
189 | * 0(%esp) arg6 | |
b87cf63e | 190 | */ |
2cd23553 | 191 | ENTRY(entry_SYSCALL_compat) |
a474e67c | 192 | /* Interrupts are off on entry. */ |
b8cec41e | 193 | swapgs |
e62a254a | 194 | |
2f45cd7a | 195 | /* Stash user ESP.*/ |
54ad726c | 196 | movl %esp, %r8d |
2f45cd7a TG |
197 | |
198 | /* Use %rsp as scratch reg. User ESP is stashed in r8 */ | |
199 | SWITCH_TO_KERNEL_CR3 scratch_reg=%rsp | |
d7eb5f9e | 200 | ENABLE_IBRS |
2f45cd7a TG |
201 | |
202 | /* Switch to the kernel stack */ | |
54ad726c | 203 | movq PER_CPU_VAR(cpu_current_top_of_stack), %rsp |
a232e3d5 | 204 | |
4c9c0e91 | 205 | /* Construct struct pt_regs on stack */ |
131484c8 IM |
206 | pushq $__USER32_DS /* pt_regs->ss */ |
207 | pushq %r8 /* pt_regs->sp */ | |
208 | pushq %r11 /* pt_regs->flags */ | |
209 | pushq $__USER32_CS /* pt_regs->cs */ | |
210 | pushq %rcx /* pt_regs->ip */ | |
b8cec41e AL |
211 | GLOBAL(entry_SYSCALL_compat_after_hwframe) |
212 | movl %eax, %eax /* discard orig_ax high bits */ | |
131484c8 IM |
213 | pushq %rax /* pt_regs->orig_ax */ |
214 | pushq %rdi /* pt_regs->di */ | |
215 | pushq %rsi /* pt_regs->si */ | |
216 | pushq %rdx /* pt_regs->dx */ | |
30bfa7b3 | 217 | pushq %rbp /* pt_regs->cx (stashed in bp) */ |
131484c8 | 218 | pushq $-ENOSYS /* pt_regs->ax */ |
778843f9 DV |
219 | pushq $0 /* pt_regs->r8 = 0 */ |
220 | pushq $0 /* pt_regs->r9 = 0 */ | |
221 | pushq $0 /* pt_regs->r10 = 0 */ | |
222 | pushq $0 /* pt_regs->r11 = 0 */ | |
a474e67c | 223 | pushq %rbx /* pt_regs->rbx */ |
30bfa7b3 | 224 | pushq %rbp /* pt_regs->rbp (will be overwritten) */ |
e7571a03 AW |
225 | pushq $0 /* pt_regs->r12 = 0 */ |
226 | pushq $0 /* pt_regs->r13 = 0 */ | |
227 | pushq $0 /* pt_regs->r14 = 0 */ | |
228 | pushq $0 /* pt_regs->r15 = 0 */ | |
4c9c0e91 | 229 | |
a474e67c AL |
230 | /* |
231 | * User mode is traced as though IRQs are on, and SYSENTER | |
232 | * turned them off. | |
233 | */ | |
234 | TRACE_IRQS_OFF | |
235 | ||
236 | movq %rsp, %rdi | |
237 | call do_fast_syscall_32 | |
91e2eea9 BO |
238 | /* XEN PV guests always use IRET path */ |
239 | ALTERNATIVE "testl %eax, %eax; jz .Lsyscall_32_done", \ | |
240 | "jmp .Lsyscall_32_done", X86_FEATURE_XENPV | |
7841b408 AL |
241 | |
242 | /* Opportunistic SYSRET */ | |
243 | sysret32_from_system_call: | |
244 | TRACE_IRQS_ON /* User mode traces as IRQs on. */ | |
245 | movq RBX(%rsp), %rbx /* pt_regs->rbx */ | |
246 | movq RBP(%rsp), %rbp /* pt_regs->rbp */ | |
247 | movq EFLAGS(%rsp), %r11 /* pt_regs->flags (in r11) */ | |
248 | movq RIP(%rsp), %rcx /* pt_regs->ip (in rcx) */ | |
249 | addq $RAX, %rsp /* Skip r8-r15 */ | |
250 | popq %rax /* pt_regs->rax */ | |
251 | popq %rdx /* Skip pt_regs->cx */ | |
252 | popq %rdx /* pt_regs->dx */ | |
253 | popq %rsi /* pt_regs->si */ | |
254 | popq %rdi /* pt_regs->di */ | |
255 | ||
d7eb5f9e | 256 | DISABLE_IBRS |
7841b408 AL |
257 | /* |
258 | * USERGS_SYSRET32 does: | |
259 | * GSBASE = user's GS base | |
260 | * EIP = ECX | |
261 | * RFLAGS = R11 | |
262 | * CS = __USER32_CS | |
263 | * SS = __USER_DS | |
264 | * | |
265 | * ECX will not match pt_regs->cx, but we're returning to a vDSO | |
266 | * trampoline that will fix up RCX, so this is okay. | |
267 | * | |
268 | * R12-R15 are callee-saved, so they contain whatever was in them | |
269 | * when the system call started, which is already known to user | |
270 | * code. We zero R8-R10 to avoid info leaks. | |
271 | */ | |
313dfb59 DH |
272 | movq RSP-ORIG_RAX(%rsp), %rsp |
273 | ||
274 | /* | |
275 | * The original userspace %rsp (RSP-ORIG_RAX(%rsp)) is stored | |
276 | * on the process stack which is not mapped to userspace and | |
277 | * not readable after we SWITCH_TO_USER_CR3. Delay the CR3 | |
278 | * switch until after after the last reference to the process | |
279 | * stack. | |
280 | * | |
ac747136 | 281 | * %r8/%r9 are zeroed before the sysret, thus safe to clobber. |
313dfb59 | 282 | */ |
ac747136 | 283 | SWITCH_TO_USER_CR3_NOSTACK scratch_reg=%r8 scratch_reg2=%r9 |
313dfb59 | 284 | |
7841b408 AL |
285 | xorq %r8, %r8 |
286 | xorq %r9, %r9 | |
287 | xorq %r10, %r10 | |
75ef8219 BO |
288 | swapgs |
289 | sysretl | |
2cd23553 | 290 | END(entry_SYSCALL_compat) |
54ad726c | 291 | |
b87cf63e | 292 | /* |
fda57b22 AL |
293 | * 32-bit legacy system call entry. |
294 | * | |
295 | * 32-bit x86 Linux system calls traditionally used the INT $0x80 | |
296 | * instruction. INT $0x80 lands here. | |
297 | * | |
298 | * This entry point can be used by 32-bit and 64-bit programs to perform | |
299 | * 32-bit system calls. Instances of INT $0x80 can be found inline in | |
300 | * various programs and libraries. It is also used by the vDSO's | |
301 | * __kernel_vsyscall fallback for hardware that doesn't support a faster | |
302 | * entry method. Restarted 32-bit system calls also fall back to INT | |
303 | * $0x80 regardless of what instruction was originally used to do the | |
304 | * system call. | |
305 | * | |
306 | * This is considered a slow path. It is not used by most libc | |
307 | * implementations on modern hardware except during process startup. | |
1da177e4 | 308 | * |
b87cf63e DV |
309 | * Arguments: |
310 | * eax system call number | |
311 | * ebx arg1 | |
312 | * ecx arg2 | |
313 | * edx arg3 | |
314 | * esi arg4 | |
315 | * edi arg5 | |
fda57b22 | 316 | * ebp arg6 |
b87cf63e | 317 | */ |
2cd23553 | 318 | ENTRY(entry_INT80_compat) |
2601e64d | 319 | /* |
a232e3d5 | 320 | * Interrupts are off on entry. |
2601e64d | 321 | */ |
3d44d51b | 322 | ASM_CLAC /* Do this early to minimize exposure */ |
a232e3d5 | 323 | SWAPGS |
a232e3d5 | 324 | |
ee08c6bd AL |
325 | /* |
326 | * User tracing code (ptrace or signal handlers) might assume that | |
327 | * the saved RAX contains a 32-bit number when we're invoking a 32-bit | |
328 | * syscall. Just in case the high bits are nonzero, zero-extend | |
329 | * the syscall number. (This could almost certainly be deleted | |
330 | * with no ill effects.) | |
331 | */ | |
54ad726c | 332 | movl %eax, %eax |
4ee8ec17 | 333 | |
131484c8 | 334 | pushq %rax /* pt_regs->orig_ax */ |
bfb2d0ed AL |
335 | |
336 | /* switch to thread stack expects orig_ax to be pushed */ | |
337 | call switch_to_thread_stack | |
338 | ||
131484c8 IM |
339 | pushq %rdi /* pt_regs->di */ |
340 | pushq %rsi /* pt_regs->si */ | |
341 | pushq %rdx /* pt_regs->dx */ | |
342 | pushq %rcx /* pt_regs->cx */ | |
343 | pushq $-ENOSYS /* pt_regs->ax */ | |
778843f9 DV |
344 | pushq $0 /* pt_regs->r8 = 0 */ |
345 | pushq $0 /* pt_regs->r9 = 0 */ | |
346 | pushq $0 /* pt_regs->r10 = 0 */ | |
347 | pushq $0 /* pt_regs->r11 = 0 */ | |
8169aff6 AL |
348 | pushq %rbx /* pt_regs->rbx */ |
349 | pushq %rbp /* pt_regs->rbp */ | |
350 | pushq %r12 /* pt_regs->r12 */ | |
351 | pushq %r13 /* pt_regs->r13 */ | |
352 | pushq %r14 /* pt_regs->r14 */ | |
353 | pushq %r15 /* pt_regs->r15 */ | |
1da177e4 | 354 | cld |
54ad726c | 355 | |
d7eb5f9e TC |
356 | ENABLE_IBRS |
357 | ||
73cbf687 | 358 | /* |
ee08c6bd AL |
359 | * User mode is traced as though IRQs are on, and the interrupt |
360 | * gate turned them off. | |
73cbf687 | 361 | */ |
ee08c6bd AL |
362 | TRACE_IRQS_OFF |
363 | ||
364 | movq %rsp, %rdi | |
a798f091 | 365 | call do_int80_syscall_32 |
a474e67c | 366 | .Lsyscall_32_done: |
ee08c6bd AL |
367 | |
368 | /* Go back to user mode. */ | |
369 | TRACE_IRQS_ON | |
62a85594 | 370 | jmp swapgs_restore_regs_and_return_to_usermode |
2cd23553 | 371 | END(entry_INT80_compat) |
1da177e4 | 372 | |
1d4b4b29 AV |
373 | ALIGN |
374 | GLOBAL(stub32_clone) | |
5cdc683b | 375 | /* |
7a5a9824 DV |
376 | * The 32-bit clone ABI is: clone(..., int tls_val, int *child_tidptr). |
377 | * The 64-bit clone ABI is: clone(..., int *child_tidptr, int tls_val). | |
378 | * | |
379 | * The native 64-bit kernel's sys_clone() implements the latter, | |
380 | * so we need to swap arguments here before calling it: | |
5cdc683b | 381 | */ |
7a5a9824 | 382 | xchg %r8, %rcx |
8169aff6 | 383 | jmp sys_clone |