]>
Commit | Line | Data |
---|---|---|
b2441318 | 1 | /* SPDX-License-Identifier: GPL-2.0 */ |
1da177e4 | 2 | /* |
54ad726c IM |
3 | * Compatibility mode system call entry point for x86-64. |
4 | * | |
1da177e4 | 5 | * Copyright 2000-2002 Andi Kleen, SuSE Labs. |
54ad726c | 6 | */ |
d36f9479 | 7 | #include "calling.h" |
e2d5df93 | 8 | #include <asm/asm-offsets.h> |
1da177e4 LT |
9 | #include <asm/current.h> |
10 | #include <asm/errno.h> | |
54ad726c IM |
11 | #include <asm/ia32_unistd.h> |
12 | #include <asm/thread_info.h> | |
1da177e4 | 13 | #include <asm/segment.h> |
2601e64d | 14 | #include <asm/irqflags.h> |
1ce6f868 | 15 | #include <asm/asm.h> |
63bcff2a | 16 | #include <asm/smap.h> |
1da177e4 | 17 | #include <linux/linkage.h> |
d7e7528b | 18 | #include <linux/err.h> |
1da177e4 | 19 | |
ea714547 JO |
20 | .section .entry.text, "ax" |
21 | ||
1da177e4 | 22 | /* |
fda57b22 | 23 | * 32-bit SYSENTER entry. |
1da177e4 | 24 | * |
fda57b22 AL |
25 | * 32-bit system calls through the vDSO's __kernel_vsyscall enter here |
26 | * on 64-bit kernels running on Intel CPUs. | |
27 | * | |
28 | * The SYSENTER instruction, in principle, should *only* occur in the | |
29 | * vDSO. In practice, a small number of Android devices were shipped | |
30 | * with a copy of Bionic that inlined a SYSENTER instruction. This | |
31 | * never happened in any of Google's Bionic versions -- it only happened | |
32 | * in a narrow range of Intel-provided versions. | |
33 | * | |
34 | * SYSENTER loads SS, RSP, CS, and RIP from previously programmed MSRs. | |
35 | * IF and VM in RFLAGS are cleared (IOW: interrupts are off). | |
b87cf63e | 36 | * SYSENTER does not save anything on the stack, |
fda57b22 | 37 | * and does not save old RIP (!!!), RSP, or RFLAGS. |
b87cf63e | 38 | * |
1da177e4 | 39 | * Arguments: |
b87cf63e DV |
40 | * eax system call number |
41 | * ebx arg1 | |
42 | * ecx arg2 | |
43 | * edx arg3 | |
44 | * esi arg4 | |
45 | * edi arg5 | |
46 | * ebp user stack | |
47 | * 0(%ebp) arg6 | |
b87cf63e | 48 | */ |
4c8cd0c5 | 49 | ENTRY(entry_SYSENTER_compat) |
b611acf4 | 50 | /* Interrupts are off on entry. */ |
1a79797b | 51 | SWAPGS |
8a09317b DH |
52 | |
53 | /* We are about to clobber %rsp anyway, clobbering here is OK */ | |
54 | SWITCH_TO_KERNEL_CR3 scratch_reg=%rsp | |
55 | ||
3a23208e | 56 | movq PER_CPU_VAR(cpu_current_top_of_stack), %rsp |
a232e3d5 | 57 | |
a474e67c AL |
58 | /* |
59 | * User tracing code (ptrace or signal handlers) might assume that | |
60 | * the saved RAX contains a 32-bit number when we're invoking a 32-bit | |
61 | * syscall. Just in case the high bits are nonzero, zero-extend | |
62 | * the syscall number. (This could almost certainly be deleted | |
63 | * with no ill effects.) | |
64 | */ | |
4ee8ec17 DV |
65 | movl %eax, %eax |
66 | ||
4c9c0e91 | 67 | /* Construct struct pt_regs on stack */ |
131484c8 | 68 | pushq $__USER32_DS /* pt_regs->ss */ |
30bfa7b3 | 69 | pushq %rbp /* pt_regs->sp (stashed in bp) */ |
b611acf4 AL |
70 | |
71 | /* | |
72 | * Push flags. This is nasty. First, interrupts are currently | |
73 | * off, but we need pt_regs->flags to have IF set. Second, even | |
74 | * if TF was set when SYSENTER started, it's clear by now. We fix | |
75 | * that later using TIF_SINGLESTEP. | |
76 | */ | |
77 | pushfq /* pt_regs->flags (except IF = 0) */ | |
78 | orl $X86_EFLAGS_IF, (%rsp) /* Fix saved flags */ | |
131484c8 | 79 | pushq $__USER32_CS /* pt_regs->cs */ |
778843f9 | 80 | pushq $0 /* pt_regs->ip = 0 (placeholder) */ |
131484c8 IM |
81 | pushq %rax /* pt_regs->orig_ax */ |
82 | pushq %rdi /* pt_regs->di */ | |
83 | pushq %rsi /* pt_regs->si */ | |
84 | pushq %rdx /* pt_regs->dx */ | |
30bfa7b3 | 85 | pushq %rcx /* pt_regs->cx */ |
131484c8 | 86 | pushq $-ENOSYS /* pt_regs->ax */ |
778843f9 DV |
87 | pushq $0 /* pt_regs->r8 = 0 */ |
88 | pushq $0 /* pt_regs->r9 = 0 */ | |
89 | pushq $0 /* pt_regs->r10 = 0 */ | |
90 | pushq $0 /* pt_regs->r11 = 0 */ | |
a474e67c | 91 | pushq %rbx /* pt_regs->rbx */ |
30bfa7b3 | 92 | pushq %rbp /* pt_regs->rbp (will be overwritten) */ |
778843f9 DV |
93 | pushq $0 /* pt_regs->r12 = 0 */ |
94 | pushq $0 /* pt_regs->r13 = 0 */ | |
95 | pushq $0 /* pt_regs->r14 = 0 */ | |
96 | pushq $0 /* pt_regs->r15 = 0 */ | |
1da177e4 | 97 | cld |
4c9c0e91 | 98 | |
8c7aa698 | 99 | /* |
e7860411 | 100 | * SYSENTER doesn't filter flags, so we need to clear NT and AC |
8c7aa698 | 101 | * ourselves. To save a few cycles, we can check whether |
e7860411 | 102 | * either was set instead of doing an unconditional popfq. |
b611acf4 AL |
103 | * This needs to happen before enabling interrupts so that |
104 | * we don't get preempted with NT set. | |
374a3a39 | 105 | * |
f2b37575 AL |
106 | * If TF is set, we will single-step all the way to here -- do_debug |
107 | * will ignore all the traps. (Yes, this is slow, but so is | |
108 | * single-stepping in general. This allows us to avoid having | |
109 | * a more complicated code to handle the case where a user program | |
110 | * forces us to single-step through the SYSENTER entry code.) | |
111 | * | |
f74acf0e | 112 | * NB.: .Lsysenter_fix_flags is a label with the code under it moved |
374a3a39 BP |
113 | * out-of-line as an optimization: NT is unlikely to be set in the |
114 | * majority of the cases and instead of polluting the I$ unnecessarily, | |
115 | * we're keeping that code behind a branch which will predict as | |
116 | * not-taken and therefore its instructions won't be fetched. | |
8c7aa698 | 117 | */ |
f2b37575 | 118 | testl $X86_EFLAGS_NT|X86_EFLAGS_AC|X86_EFLAGS_TF, EFLAGS(%rsp) |
f74acf0e BP |
119 | jnz .Lsysenter_fix_flags |
120 | .Lsysenter_flags_fixed: | |
8c7aa698 | 121 | |
a474e67c AL |
122 | /* |
123 | * User mode is traced as though IRQs are on, and SYSENTER | |
124 | * turned them off. | |
125 | */ | |
126 | TRACE_IRQS_OFF | |
e62a254a | 127 | |
a474e67c AL |
128 | movq %rsp, %rdi |
129 | call do_fast_syscall_32 | |
91e2eea9 BO |
130 | /* XEN PV guests always use IRET path */ |
131 | ALTERNATIVE "testl %eax, %eax; jz .Lsyscall_32_done", \ | |
132 | "jmp .Lsyscall_32_done", X86_FEATURE_XENPV | |
7841b408 | 133 | jmp sysret32_from_system_call |
1da177e4 | 134 | |
f74acf0e | 135 | .Lsysenter_fix_flags: |
b611acf4 | 136 | pushq $X86_EFLAGS_FIXED |
131484c8 | 137 | popfq |
f74acf0e | 138 | jmp .Lsysenter_flags_fixed |
f2b37575 | 139 | GLOBAL(__end_entry_SYSENTER_compat) |
4c8cd0c5 | 140 | ENDPROC(entry_SYSENTER_compat) |
1da177e4 LT |
141 | |
142 | /* | |
fda57b22 AL |
143 | * 32-bit SYSCALL entry. |
144 | * | |
145 | * 32-bit system calls through the vDSO's __kernel_vsyscall enter here | |
146 | * on 64-bit kernels running on AMD CPUs. | |
147 | * | |
148 | * The SYSCALL instruction, in principle, should *only* occur in the | |
149 | * vDSO. In practice, it appears that this really is the case. | |
150 | * As evidence: | |
151 | * | |
152 | * - The calling convention for SYSCALL has changed several times without | |
153 | * anyone noticing. | |
154 | * | |
155 | * - Prior to the in-kernel X86_BUG_SYSRET_SS_ATTRS fixup, anything | |
156 | * user task that did SYSCALL without immediately reloading SS | |
157 | * would randomly crash. | |
1da177e4 | 158 | * |
fda57b22 AL |
159 | * - Most programmers do not directly target AMD CPUs, and the 32-bit |
160 | * SYSCALL instruction does not exist on Intel CPUs. Even on AMD | |
161 | * CPUs, Linux disables the SYSCALL instruction on 32-bit kernels | |
162 | * because the SYSCALL instruction in legacy/native 32-bit mode (as | |
163 | * opposed to compat mode) is sufficiently poorly designed as to be | |
164 | * essentially unusable. | |
b87cf63e | 165 | * |
fda57b22 AL |
166 | * 32-bit SYSCALL saves RIP to RCX, clears RFLAGS.RF, then saves |
167 | * RFLAGS to R11, then loads new SS, CS, and RIP from previously | |
168 | * programmed MSRs. RFLAGS gets masked by a value from another MSR | |
169 | * (so CLD and CLAC are not needed). SYSCALL does not save anything on | |
170 | * the stack and does not change RSP. | |
171 | * | |
172 | * Note: RFLAGS saving+masking-with-MSR happens only in Long mode | |
54ad726c | 173 | * (in legacy 32-bit mode, IF, RF and VM bits are cleared and that's it). |
fda57b22 | 174 | * Don't get confused: RFLAGS saving+masking depends on Long Mode Active bit |
b87cf63e DV |
175 | * (EFER.LMA=1), NOT on bitness of userspace where SYSCALL executes |
176 | * or target CS descriptor's L bit (SYSCALL does not read segment descriptors). | |
177 | * | |
1da177e4 | 178 | * Arguments: |
b87cf63e DV |
179 | * eax system call number |
180 | * ecx return address | |
181 | * ebx arg1 | |
182 | * ebp arg2 (note: not saved in the stack frame, should not be touched) | |
183 | * edx arg3 | |
184 | * esi arg4 | |
185 | * edi arg5 | |
186 | * esp user stack | |
187 | * 0(%esp) arg6 | |
b87cf63e | 188 | */ |
2cd23553 | 189 | ENTRY(entry_SYSCALL_compat) |
a474e67c | 190 | /* Interrupts are off on entry. */ |
8a9949bc | 191 | swapgs |
e62a254a | 192 | |
a474e67c | 193 | /* Stash user ESP and switch to the kernel stack. */ |
54ad726c IM |
194 | movl %esp, %r8d |
195 | movq PER_CPU_VAR(cpu_current_top_of_stack), %rsp | |
a232e3d5 | 196 | |
4c9c0e91 | 197 | /* Construct struct pt_regs on stack */ |
131484c8 IM |
198 | pushq $__USER32_DS /* pt_regs->ss */ |
199 | pushq %r8 /* pt_regs->sp */ | |
200 | pushq %r11 /* pt_regs->flags */ | |
201 | pushq $__USER32_CS /* pt_regs->cs */ | |
202 | pushq %rcx /* pt_regs->ip */ | |
8a9949bc AL |
203 | GLOBAL(entry_SYSCALL_compat_after_hwframe) |
204 | movl %eax, %eax /* discard orig_ax high bits */ | |
131484c8 IM |
205 | pushq %rax /* pt_regs->orig_ax */ |
206 | pushq %rdi /* pt_regs->di */ | |
207 | pushq %rsi /* pt_regs->si */ | |
208 | pushq %rdx /* pt_regs->dx */ | |
30bfa7b3 | 209 | pushq %rbp /* pt_regs->cx (stashed in bp) */ |
131484c8 | 210 | pushq $-ENOSYS /* pt_regs->ax */ |
778843f9 DV |
211 | pushq $0 /* pt_regs->r8 = 0 */ |
212 | pushq $0 /* pt_regs->r9 = 0 */ | |
213 | pushq $0 /* pt_regs->r10 = 0 */ | |
214 | pushq $0 /* pt_regs->r11 = 0 */ | |
a474e67c | 215 | pushq %rbx /* pt_regs->rbx */ |
30bfa7b3 | 216 | pushq %rbp /* pt_regs->rbp (will be overwritten) */ |
778843f9 DV |
217 | pushq $0 /* pt_regs->r12 = 0 */ |
218 | pushq $0 /* pt_regs->r13 = 0 */ | |
219 | pushq $0 /* pt_regs->r14 = 0 */ | |
220 | pushq $0 /* pt_regs->r15 = 0 */ | |
4c9c0e91 | 221 | |
8a09317b DH |
222 | /* |
223 | * We just saved %rdi so it is safe to clobber. It is not | |
224 | * preserved during the C calls inside TRACE_IRQS_OFF anyway. | |
225 | */ | |
226 | SWITCH_TO_KERNEL_CR3 scratch_reg=%rdi | |
227 | ||
a474e67c AL |
228 | /* |
229 | * User mode is traced as though IRQs are on, and SYSENTER | |
230 | * turned them off. | |
231 | */ | |
232 | TRACE_IRQS_OFF | |
233 | ||
234 | movq %rsp, %rdi | |
235 | call do_fast_syscall_32 | |
91e2eea9 BO |
236 | /* XEN PV guests always use IRET path */ |
237 | ALTERNATIVE "testl %eax, %eax; jz .Lsyscall_32_done", \ | |
238 | "jmp .Lsyscall_32_done", X86_FEATURE_XENPV | |
7841b408 AL |
239 | |
240 | /* Opportunistic SYSRET */ | |
241 | sysret32_from_system_call: | |
242 | TRACE_IRQS_ON /* User mode traces as IRQs on. */ | |
243 | movq RBX(%rsp), %rbx /* pt_regs->rbx */ | |
244 | movq RBP(%rsp), %rbp /* pt_regs->rbp */ | |
245 | movq EFLAGS(%rsp), %r11 /* pt_regs->flags (in r11) */ | |
246 | movq RIP(%rsp), %rcx /* pt_regs->ip (in rcx) */ | |
247 | addq $RAX, %rsp /* Skip r8-r15 */ | |
248 | popq %rax /* pt_regs->rax */ | |
249 | popq %rdx /* Skip pt_regs->cx */ | |
250 | popq %rdx /* pt_regs->dx */ | |
251 | popq %rsi /* pt_regs->si */ | |
252 | popq %rdi /* pt_regs->di */ | |
253 | ||
254 | /* | |
255 | * USERGS_SYSRET32 does: | |
256 | * GSBASE = user's GS base | |
257 | * EIP = ECX | |
258 | * RFLAGS = R11 | |
259 | * CS = __USER32_CS | |
260 | * SS = __USER_DS | |
261 | * | |
262 | * ECX will not match pt_regs->cx, but we're returning to a vDSO | |
263 | * trampoline that will fix up RCX, so this is okay. | |
264 | * | |
265 | * R12-R15 are callee-saved, so they contain whatever was in them | |
266 | * when the system call started, which is already known to user | |
267 | * code. We zero R8-R10 to avoid info leaks. | |
268 | */ | |
8a09317b DH |
269 | movq RSP-ORIG_RAX(%rsp), %rsp |
270 | ||
271 | /* | |
272 | * The original userspace %rsp (RSP-ORIG_RAX(%rsp)) is stored | |
273 | * on the process stack which is not mapped to userspace and | |
274 | * not readable after we SWITCH_TO_USER_CR3. Delay the CR3 | |
275 | * switch until after after the last reference to the process | |
276 | * stack. | |
277 | * | |
6fd166aa | 278 | * %r8/%r9 are zeroed before the sysret, thus safe to clobber. |
8a09317b | 279 | */ |
6fd166aa | 280 | SWITCH_TO_USER_CR3_NOSTACK scratch_reg=%r8 scratch_reg2=%r9 |
8a09317b | 281 | |
7841b408 AL |
282 | xorq %r8, %r8 |
283 | xorq %r9, %r9 | |
284 | xorq %r10, %r10 | |
75ef8219 BO |
285 | swapgs |
286 | sysretl | |
2cd23553 | 287 | END(entry_SYSCALL_compat) |
54ad726c | 288 | |
b87cf63e | 289 | /* |
fda57b22 AL |
290 | * 32-bit legacy system call entry. |
291 | * | |
292 | * 32-bit x86 Linux system calls traditionally used the INT $0x80 | |
293 | * instruction. INT $0x80 lands here. | |
294 | * | |
295 | * This entry point can be used by 32-bit and 64-bit programs to perform | |
296 | * 32-bit system calls. Instances of INT $0x80 can be found inline in | |
297 | * various programs and libraries. It is also used by the vDSO's | |
298 | * __kernel_vsyscall fallback for hardware that doesn't support a faster | |
299 | * entry method. Restarted 32-bit system calls also fall back to INT | |
300 | * $0x80 regardless of what instruction was originally used to do the | |
301 | * system call. | |
302 | * | |
303 | * This is considered a slow path. It is not used by most libc | |
304 | * implementations on modern hardware except during process startup. | |
1da177e4 | 305 | * |
b87cf63e DV |
306 | * Arguments: |
307 | * eax system call number | |
308 | * ebx arg1 | |
309 | * ecx arg2 | |
310 | * edx arg3 | |
311 | * esi arg4 | |
312 | * edi arg5 | |
fda57b22 | 313 | * ebp arg6 |
b87cf63e | 314 | */ |
2cd23553 | 315 | ENTRY(entry_INT80_compat) |
2601e64d | 316 | /* |
a232e3d5 | 317 | * Interrupts are off on entry. |
2601e64d | 318 | */ |
3d44d51b | 319 | ASM_CLAC /* Do this early to minimize exposure */ |
a232e3d5 | 320 | SWAPGS |
a232e3d5 | 321 | |
ee08c6bd AL |
322 | /* |
323 | * User tracing code (ptrace or signal handlers) might assume that | |
324 | * the saved RAX contains a 32-bit number when we're invoking a 32-bit | |
325 | * syscall. Just in case the high bits are nonzero, zero-extend | |
326 | * the syscall number. (This could almost certainly be deleted | |
327 | * with no ill effects.) | |
328 | */ | |
54ad726c | 329 | movl %eax, %eax |
4ee8ec17 | 330 | |
131484c8 | 331 | pushq %rax /* pt_regs->orig_ax */ |
7f2590a1 AL |
332 | |
333 | /* switch to thread stack expects orig_ax to be pushed */ | |
334 | call switch_to_thread_stack | |
335 | ||
131484c8 IM |
336 | pushq %rdi /* pt_regs->di */ |
337 | pushq %rsi /* pt_regs->si */ | |
338 | pushq %rdx /* pt_regs->dx */ | |
339 | pushq %rcx /* pt_regs->cx */ | |
340 | pushq $-ENOSYS /* pt_regs->ax */ | |
778843f9 DV |
341 | pushq $0 /* pt_regs->r8 = 0 */ |
342 | pushq $0 /* pt_regs->r9 = 0 */ | |
343 | pushq $0 /* pt_regs->r10 = 0 */ | |
344 | pushq $0 /* pt_regs->r11 = 0 */ | |
8169aff6 AL |
345 | pushq %rbx /* pt_regs->rbx */ |
346 | pushq %rbp /* pt_regs->rbp */ | |
347 | pushq %r12 /* pt_regs->r12 */ | |
348 | pushq %r13 /* pt_regs->r13 */ | |
349 | pushq %r14 /* pt_regs->r14 */ | |
350 | pushq %r15 /* pt_regs->r15 */ | |
1da177e4 | 351 | cld |
54ad726c | 352 | |
73cbf687 | 353 | /* |
ee08c6bd AL |
354 | * User mode is traced as though IRQs are on, and the interrupt |
355 | * gate turned them off. | |
73cbf687 | 356 | */ |
ee08c6bd AL |
357 | TRACE_IRQS_OFF |
358 | ||
359 | movq %rsp, %rdi | |
a798f091 | 360 | call do_int80_syscall_32 |
a474e67c | 361 | .Lsyscall_32_done: |
ee08c6bd AL |
362 | |
363 | /* Go back to user mode. */ | |
364 | TRACE_IRQS_ON | |
8a055d7f | 365 | jmp swapgs_restore_regs_and_return_to_usermode |
2cd23553 | 366 | END(entry_INT80_compat) |
1da177e4 | 367 | |
49993489 | 368 | ENTRY(stub32_clone) |
5cdc683b | 369 | /* |
7a5a9824 DV |
370 | * The 32-bit clone ABI is: clone(..., int tls_val, int *child_tidptr). |
371 | * The 64-bit clone ABI is: clone(..., int *child_tidptr, int tls_val). | |
372 | * | |
373 | * The native 64-bit kernel's sys_clone() implements the latter, | |
374 | * so we need to swap arguments here before calling it: | |
5cdc683b | 375 | */ |
7a5a9824 | 376 | xchg %r8, %rcx |
8169aff6 | 377 | jmp sys_clone |
49993489 | 378 | ENDPROC(stub32_clone) |