]>
Commit | Line | Data |
---|---|---|
1da177e4 | 1 | /* |
6a613ac6 | 2 | * AT_SYSINFO entry point |
29c0ce95 AL |
3 | */ |
4 | ||
5 | #include <asm/dwarf2.h> | |
a474e67c AL |
6 | #include <asm/cpufeature.h> |
7 | #include <asm/alternative-asm.h> | |
29c0ce95 AL |
8 | |
9 | /* | |
69d0627a RM |
10 | * First get the common code for the sigreturn entry points. |
11 | * This must come first. | |
1da177e4 | 12 | */ |
69d0627a | 13 | #include "sigreturn.S" |
1da177e4 LT |
14 | |
15 | .text | |
16 | .globl __kernel_vsyscall | |
17 | .type __kernel_vsyscall,@function | |
69d0627a | 18 | ALIGN |
1da177e4 | 19 | __kernel_vsyscall: |
29c0ce95 | 20 | CFI_STARTPROC |
8242c6c8 AL |
21 | /* |
22 | * Reshuffle regs so that all of any of the entry instructions | |
23 | * will preserve enough state. | |
30bfa7b3 AL |
24 | * |
25 | * A really nice entry sequence would be: | |
26 | * pushl %edx | |
27 | * pushl %ecx | |
28 | * movl %esp, %ecx | |
29 | * | |
30 | * Unfortunately, naughty Android versions between July and December | |
31 | * 2015 actually hardcode the traditional Linux SYSENTER entry | |
32 | * sequence. That is severely broken for a number of reasons (ask | |
33 | * anyone with an AMD CPU, for example). Nonetheless, we try to keep | |
34 | * it working approximately as well as it ever worked. | |
35 | * | |
36 | * This link may eludicate some of the history: | |
37 | * https://android-review.googlesource.com/#/q/Iac3295376d61ef83e713ac9b528f3b50aa780cd7 | |
38 | * personally, I find it hard to understand what's going on there. | |
39 | * | |
40 | * Note to future user developers: DO NOT USE SYSENTER IN YOUR CODE. | |
41 | * Execute an indirect call to the address in the AT_SYSINFO auxv | |
42 | * entry. That is the ONLY correct way to make a fast 32-bit system | |
43 | * call on Linux. (Open-coding int $0x80 is also fine, but it's | |
44 | * slow.) | |
8242c6c8 | 45 | */ |
30bfa7b3 AL |
46 | pushl %ecx |
47 | CFI_ADJUST_CFA_OFFSET 4 | |
48 | CFI_REL_OFFSET ecx, 0 | |
8242c6c8 AL |
49 | pushl %edx |
50 | CFI_ADJUST_CFA_OFFSET 4 | |
51 | CFI_REL_OFFSET edx, 0 | |
30bfa7b3 | 52 | pushl %ebp |
8242c6c8 | 53 | CFI_ADJUST_CFA_OFFSET 4 |
30bfa7b3 AL |
54 | CFI_REL_OFFSET ebp, 0 |
55 | ||
56 | #define SYSENTER_SEQUENCE "movl %esp, %ebp; sysenter" | |
57 | #define SYSCALL_SEQUENCE "movl %ecx, %ebp; syscall" | |
8242c6c8 | 58 | |
a474e67c AL |
59 | #ifdef CONFIG_X86_64 |
60 | /* If SYSENTER (Intel) or SYSCALL32 (AMD) is available, use it. */ | |
30bfa7b3 AL |
61 | ALTERNATIVE_2 "", SYSENTER_SEQUENCE, X86_FEATURE_SYSENTER32, \ |
62 | SYSCALL_SEQUENCE, X86_FEATURE_SYSCALL32 | |
5f310f73 | 63 | #else |
30bfa7b3 | 64 | ALTERNATIVE "", SYSENTER_SEQUENCE, X86_FEATURE_SEP |
a474e67c AL |
65 | #endif |
66 | ||
8242c6c8 | 67 | /* Enter using int $0x80 */ |
8242c6c8 AL |
68 | int $0x80 |
69 | GLOBAL(int80_landing_pad) | |
70 | ||
30bfa7b3 AL |
71 | /* |
72 | * Restore EDX and ECX in case they were clobbered. EBP is not | |
73 | * clobbered (the kernel restores it), but it's cleaner and | |
74 | * probably faster to pop it than to adjust ESP using addl. | |
75 | */ | |
76 | popl %ebp | |
77 | CFI_RESTORE ebp | |
8242c6c8 AL |
78 | CFI_ADJUST_CFA_OFFSET -4 |
79 | popl %edx | |
80 | CFI_RESTORE edx | |
81 | CFI_ADJUST_CFA_OFFSET -4 | |
30bfa7b3 AL |
82 | popl %ecx |
83 | CFI_RESTORE ecx | |
84 | CFI_ADJUST_CFA_OFFSET -4 | |
1da177e4 | 85 | ret |
29c0ce95 | 86 | CFI_ENDPROC |
1da177e4 | 87 | |
29c0ce95 | 88 | .size __kernel_vsyscall,.-__kernel_vsyscall |
69d0627a | 89 | .previous |