]>
Commit | Line | Data |
---|---|---|
b2441318 | 1 | /* SPDX-License-Identifier: GPL-2.0 */ |
f05e798a DH |
2 | #ifndef _ASM_X86_SPECIAL_INSNS_H |
3 | #define _ASM_X86_SPECIAL_INSNS_H | |
4 | ||
5 | ||
6 | #ifdef __KERNEL__ | |
7 | ||
719d359d | 8 | #include <asm/nops.h> |
873d50d5 KC |
9 | #include <asm/processor-flags.h> |
10 | #include <linux/jump_label.h> | |
719d359d | 11 | |
f05e798a | 12 | /* |
8b470bfe AS |
13 | * The compiler should not reorder volatile asm statements with respect to each |
14 | * other: they should execute in program order. However GCC 4.9.x and 5.x have | |
15 | * a bug (which was fixed in 8.1, 7.3 and 6.5) where they might reorder | |
16 | * volatile asm. The write functions are not affected since they have memory | |
17 | * clobbers preventing reordering. To prevent reads from being reordered with | |
18 | * respect to writes, use a dummy memory operand. | |
f05e798a | 19 | */ |
8b470bfe AS |
20 | |
21 | #define __FORCE_ORDER "m"(*(unsigned int *)0x1000UL) | |
f05e798a | 22 | |
7652ac92 | 23 | void native_write_cr0(unsigned long val); |
873d50d5 | 24 | |
f05e798a DH |
25 | static inline unsigned long native_read_cr0(void) |
26 | { | |
27 | unsigned long val; | |
8b470bfe | 28 | asm volatile("mov %%cr0,%0\n\t" : "=r" (val) : __FORCE_ORDER); |
f05e798a DH |
29 | return val; |
30 | } | |
31 | ||
f05e798a DH |
32 | static inline unsigned long native_read_cr2(void) |
33 | { | |
34 | unsigned long val; | |
8b470bfe | 35 | asm volatile("mov %%cr2,%0\n\t" : "=r" (val) : __FORCE_ORDER); |
f05e798a DH |
36 | return val; |
37 | } | |
38 | ||
39 | static inline void native_write_cr2(unsigned long val) | |
40 | { | |
8b470bfe | 41 | asm volatile("mov %0,%%cr2": : "r" (val) : "memory"); |
f05e798a DH |
42 | } |
43 | ||
6c690ee1 | 44 | static inline unsigned long __native_read_cr3(void) |
f05e798a DH |
45 | { |
46 | unsigned long val; | |
8b470bfe | 47 | asm volatile("mov %%cr3,%0\n\t" : "=r" (val) : __FORCE_ORDER); |
f05e798a DH |
48 | return val; |
49 | } | |
50 | ||
51 | static inline void native_write_cr3(unsigned long val) | |
52 | { | |
8b470bfe | 53 | asm volatile("mov %0,%%cr3": : "r" (val) : "memory"); |
f05e798a DH |
54 | } |
55 | ||
56 | static inline unsigned long native_read_cr4(void) | |
57 | { | |
58 | unsigned long val; | |
f05e798a | 59 | #ifdef CONFIG_X86_32 |
1ef55be1 AL |
60 | /* |
61 | * This could fault if CR4 does not exist. Non-existent CR4 | |
62 | * is functionally equivalent to CR4 == 0. Keep it simple and pretend | |
63 | * that CR4 == 0 on CPUs that don't have CR4. | |
64 | */ | |
f05e798a DH |
65 | asm volatile("1: mov %%cr4, %0\n" |
66 | "2:\n" | |
67 | _ASM_EXTABLE(1b, 2b) | |
8b470bfe | 68 | : "=r" (val) : "0" (0), __FORCE_ORDER); |
f05e798a | 69 | #else |
1ef55be1 | 70 | /* CR4 always exists on x86_64. */ |
8b470bfe | 71 | asm volatile("mov %%cr4,%0\n\t" : "=r" (val) : __FORCE_ORDER); |
f05e798a DH |
72 | #endif |
73 | return val; | |
74 | } | |
75 | ||
7652ac92 | 76 | void native_write_cr4(unsigned long val); |
f05e798a | 77 | |
a927cb83 | 78 | #ifdef CONFIG_X86_INTEL_MEMORY_PROTECTION_KEYS |
c806e887 | 79 | static inline u32 rdpkru(void) |
a927cb83 DH |
80 | { |
81 | u32 ecx = 0; | |
82 | u32 edx, pkru; | |
83 | ||
84 | /* | |
85 | * "rdpkru" instruction. Places PKRU contents in to EAX, | |
86 | * clears EDX and requires that ecx=0. | |
87 | */ | |
88 | asm volatile(".byte 0x0f,0x01,0xee\n\t" | |
89 | : "=a" (pkru), "=d" (edx) | |
90 | : "c" (ecx)); | |
91 | return pkru; | |
92 | } | |
9e90199c | 93 | |
c806e887 | 94 | static inline void wrpkru(u32 pkru) |
9e90199c XG |
95 | { |
96 | u32 ecx = 0, edx = 0; | |
97 | ||
98 | /* | |
99 | * "wrpkru" instruction. Loads contents in EAX to PKRU, | |
100 | * requires that ecx = edx = 0. | |
101 | */ | |
102 | asm volatile(".byte 0x0f,0x01,0xef\n\t" | |
103 | : : "a" (pkru), "c"(ecx), "d"(edx)); | |
104 | } | |
c806e887 SAS |
105 | |
106 | static inline void __write_pkru(u32 pkru) | |
107 | { | |
577ff465 SAS |
108 | /* |
109 | * WRPKRU is relatively expensive compared to RDPKRU. | |
110 | * Avoid WRPKRU when it would not change the value. | |
111 | */ | |
112 | if (pkru == rdpkru()) | |
113 | return; | |
114 | ||
c806e887 SAS |
115 | wrpkru(pkru); |
116 | } | |
117 | ||
a927cb83 | 118 | #else |
c806e887 | 119 | static inline u32 rdpkru(void) |
a927cb83 DH |
120 | { |
121 | return 0; | |
122 | } | |
9e90199c XG |
123 | |
124 | static inline void __write_pkru(u32 pkru) | |
125 | { | |
126 | } | |
a927cb83 DH |
127 | #endif |
128 | ||
f05e798a DH |
129 | static inline void native_wbinvd(void) |
130 | { | |
131 | asm volatile("wbinvd": : :"memory"); | |
132 | } | |
133 | ||
277d5b40 | 134 | extern asmlinkage void native_load_gs_index(unsigned); |
f05e798a | 135 | |
87930019 JG |
136 | static inline unsigned long __read_cr4(void) |
137 | { | |
138 | return native_read_cr4(); | |
139 | } | |
140 | ||
fdc0269e | 141 | #ifdef CONFIG_PARAVIRT_XXL |
f05e798a | 142 | #include <asm/paravirt.h> |
fdc0269e | 143 | #else |
f05e798a DH |
144 | |
145 | static inline unsigned long read_cr0(void) | |
146 | { | |
147 | return native_read_cr0(); | |
148 | } | |
149 | ||
150 | static inline void write_cr0(unsigned long x) | |
151 | { | |
152 | native_write_cr0(x); | |
153 | } | |
154 | ||
155 | static inline unsigned long read_cr2(void) | |
156 | { | |
157 | return native_read_cr2(); | |
158 | } | |
159 | ||
160 | static inline void write_cr2(unsigned long x) | |
161 | { | |
162 | native_write_cr2(x); | |
163 | } | |
164 | ||
6c690ee1 AL |
165 | /* |
166 | * Careful! CR3 contains more than just an address. You probably want | |
167 | * read_cr3_pa() instead. | |
168 | */ | |
169 | static inline unsigned long __read_cr3(void) | |
f05e798a | 170 | { |
6c690ee1 | 171 | return __native_read_cr3(); |
f05e798a DH |
172 | } |
173 | ||
174 | static inline void write_cr3(unsigned long x) | |
175 | { | |
176 | native_write_cr3(x); | |
177 | } | |
178 | ||
1e02ce4c | 179 | static inline void __write_cr4(unsigned long x) |
f05e798a DH |
180 | { |
181 | native_write_cr4(x); | |
182 | } | |
183 | ||
184 | static inline void wbinvd(void) | |
185 | { | |
186 | native_wbinvd(); | |
187 | } | |
188 | ||
189 | #ifdef CONFIG_X86_64 | |
190 | ||
f05e798a DH |
191 | static inline void load_gs_index(unsigned selector) |
192 | { | |
193 | native_load_gs_index(selector); | |
194 | } | |
195 | ||
196 | #endif | |
197 | ||
fdc0269e | 198 | #endif /* CONFIG_PARAVIRT_XXL */ |
f05e798a | 199 | |
f05e798a DH |
200 | static inline void clflush(volatile void *__p) |
201 | { | |
202 | asm volatile("clflush %0" : "+m" (*(volatile char __force *)__p)); | |
203 | } | |
204 | ||
171699f7 RZ |
205 | static inline void clflushopt(volatile void *__p) |
206 | { | |
207 | alternative_io(".byte " __stringify(NOP_DS_PREFIX) "; clflush %P0", | |
208 | ".byte 0x66; clflush %P0", | |
209 | X86_FEATURE_CLFLUSHOPT, | |
210 | "+m" (*(volatile char __force *)__p)); | |
211 | } | |
212 | ||
d9dc64f3 RZ |
213 | static inline void clwb(volatile void *__p) |
214 | { | |
215 | volatile struct { char x[64]; } *p = __p; | |
216 | ||
217 | asm volatile(ALTERNATIVE_2( | |
218 | ".byte " __stringify(NOP_DS_PREFIX) "; clflush (%[pax])", | |
219 | ".byte 0x66; clflush (%[pax])", /* clflushopt (%%rax) */ | |
220 | X86_FEATURE_CLFLUSHOPT, | |
221 | ".byte 0x66, 0x0f, 0xae, 0x30", /* clwb (%%rax) */ | |
222 | X86_FEATURE_CLWB) | |
223 | : [p] "+m" (*p) | |
224 | : [pax] "a" (p)); | |
225 | } | |
226 | ||
f05e798a DH |
227 | #define nop() asm volatile ("nop") |
228 | ||
229 | ||
230 | #endif /* __KERNEL__ */ | |
231 | ||
232 | #endif /* _ASM_X86_SPECIAL_INSNS_H */ |