]>
Commit | Line | Data |
---|---|---|
1361b83a LT |
1 | /* |
2 | * Copyright (C) 1994 Linus Torvalds | |
3 | * | |
4 | * Pentium III FXSR, SSE support | |
5 | * General FPU state handling cleanups | |
6 | * Gareth Hughes <gareth@valinux.com>, May 2000 | |
7 | * x86-64 work by Andi Kleen 2002 | |
8 | */ | |
9 | ||
78f7f1e5 IM |
10 | #ifndef _ASM_X86_FPU_INTERNAL_H |
11 | #define _ASM_X86_FPU_INTERNAL_H | |
1361b83a | 12 | |
050902c0 | 13 | #include <linux/compat.h> |
952f07ec | 14 | #include <linux/sched.h> |
1361b83a | 15 | #include <linux/slab.h> |
f89e32e0 | 16 | |
1361b83a | 17 | #include <asm/user.h> |
df6b35f4 | 18 | #include <asm/fpu/api.h> |
669ebabb | 19 | #include <asm/fpu/xstate.h> |
cd4d09ec | 20 | #include <asm/cpufeature.h> |
d1898b73 | 21 | #include <asm/trace/fpu.h> |
1361b83a | 22 | |
6ffc152e IM |
23 | /* |
24 | * High level FPU state handling functions: | |
25 | */ | |
0c306bcf | 26 | extern void fpu__activate_curr(struct fpu *fpu); |
05602812 | 27 | extern void fpu__activate_fpstate_read(struct fpu *fpu); |
6a81d7eb | 28 | extern void fpu__activate_fpstate_write(struct fpu *fpu); |
b8b9b6ba DH |
29 | extern void fpu__current_fpstate_write_begin(void); |
30 | extern void fpu__current_fpstate_write_end(void); | |
6ffc152e | 31 | extern void fpu__save(struct fpu *fpu); |
e1884d69 | 32 | extern void fpu__restore(struct fpu *fpu); |
82c0e45e | 33 | extern int fpu__restore_sig(void __user *buf, int ia32_frame); |
6ffc152e IM |
34 | extern void fpu__drop(struct fpu *fpu); |
35 | extern int fpu__copy(struct fpu *dst_fpu, struct fpu *src_fpu); | |
04c8e01d | 36 | extern void fpu__clear(struct fpu *fpu); |
b1b64dc3 IM |
37 | extern int fpu__exception_code(struct fpu *fpu, int trap_nr); |
38 | extern int dump_fpu(struct pt_regs *ptregs, struct user_i387_struct *fpstate); | |
6ffc152e | 39 | |
b1b64dc3 IM |
40 | /* |
41 | * Boot time FPU initialization functions: | |
42 | */ | |
43 | extern void fpu__init_cpu(void); | |
44 | extern void fpu__init_system_xstate(void); | |
45 | extern void fpu__init_cpu_xstate(void); | |
46 | extern void fpu__init_system(struct cpuinfo_x86 *c); | |
952f07ec IM |
47 | extern void fpu__init_check_bugs(void); |
48 | extern void fpu__resume_cpu(void); | |
a5fe93a5 | 49 | extern u64 fpu__get_supported_xfeatures_mask(void); |
952f07ec | 50 | |
e97131a8 IM |
51 | /* |
52 | * Debugging facility: | |
53 | */ | |
54 | #ifdef CONFIG_X86_DEBUG_FPU | |
55 | # define WARN_ON_FPU(x) WARN_ON_ONCE(x) | |
56 | #else | |
83242c51 | 57 | # define WARN_ON_FPU(x) ({ (void)(x); 0; }) |
e97131a8 IM |
58 | #endif |
59 | ||
1c927eea | 60 | /* |
b1b64dc3 | 61 | * FPU related CPU feature flag helper routines: |
1c927eea | 62 | */ |
1361b83a LT |
63 | static __always_inline __pure bool use_xsaveopt(void) |
64 | { | |
bc696ca0 | 65 | return static_cpu_has(X86_FEATURE_XSAVEOPT); |
1361b83a LT |
66 | } |
67 | ||
68 | static __always_inline __pure bool use_xsave(void) | |
69 | { | |
bc696ca0 | 70 | return static_cpu_has(X86_FEATURE_XSAVE); |
1361b83a LT |
71 | } |
72 | ||
73 | static __always_inline __pure bool use_fxsr(void) | |
74 | { | |
bc696ca0 | 75 | return static_cpu_has(X86_FEATURE_FXSR); |
1361b83a LT |
76 | } |
77 | ||
b1b64dc3 IM |
78 | /* |
79 | * fpstate handling functions: | |
80 | */ | |
81 | ||
82 | extern union fpregs_state init_fpstate; | |
83 | ||
84 | extern void fpstate_init(union fpregs_state *state); | |
85 | #ifdef CONFIG_MATH_EMULATION | |
86 | extern void fpstate_init_soft(struct swregs_state *soft); | |
87 | #else | |
88 | static inline void fpstate_init_soft(struct swregs_state *soft) {} | |
89 | #endif | |
a5828ed3 YY |
90 | |
91 | static inline void fpstate_init_xstate(struct xregs_state *xsave) | |
92 | { | |
93 | /* | |
94 | * XRSTORS requires these bits set in xcomp_bv, or it will | |
95 | * trigger #GP: | |
96 | */ | |
97 | xsave->header.xcomp_bv = XCOMP_BV_COMPACTED_FORMAT | xfeatures_mask; | |
98 | } | |
99 | ||
b1b64dc3 IM |
100 | static inline void fpstate_init_fxstate(struct fxregs_state *fx) |
101 | { | |
102 | fx->cwd = 0x37f; | |
103 | fx->mxcsr = MXCSR_DEFAULT; | |
104 | } | |
36e49e7f | 105 | extern void fpstate_sanitize_xstate(struct fpu *fpu); |
1361b83a | 106 | |
49b8c695 PA |
107 | #define user_insn(insn, output, input...) \ |
108 | ({ \ | |
109 | int err; \ | |
110 | asm volatile(ASM_STAC "\n" \ | |
111 | "1:" #insn "\n\t" \ | |
112 | "2: " ASM_CLAC "\n" \ | |
113 | ".section .fixup,\"ax\"\n" \ | |
114 | "3: movl $-1,%[err]\n" \ | |
115 | " jmp 2b\n" \ | |
116 | ".previous\n" \ | |
117 | _ASM_EXTABLE(1b, 3b) \ | |
118 | : [err] "=r" (err), output \ | |
119 | : "0"(0), input); \ | |
120 | err; \ | |
121 | }) | |
122 | ||
0ca5bd0d SS |
123 | #define check_insn(insn, output, input...) \ |
124 | ({ \ | |
125 | int err; \ | |
126 | asm volatile("1:" #insn "\n\t" \ | |
127 | "2:\n" \ | |
128 | ".section .fixup,\"ax\"\n" \ | |
129 | "3: movl $-1,%[err]\n" \ | |
130 | " jmp 2b\n" \ | |
131 | ".previous\n" \ | |
132 | _ASM_EXTABLE(1b, 3b) \ | |
133 | : [err] "=r" (err), output \ | |
134 | : "0"(0), input); \ | |
135 | err; \ | |
136 | }) | |
137 | ||
c47ada30 | 138 | static inline int copy_fregs_to_user(struct fregs_state __user *fx) |
1361b83a | 139 | { |
49b8c695 | 140 | return user_insn(fnsave %[fx]; fwait, [fx] "=m" (*fx), "m" (*fx)); |
1361b83a LT |
141 | } |
142 | ||
c47ada30 | 143 | static inline int copy_fxregs_to_user(struct fxregs_state __user *fx) |
1361b83a | 144 | { |
97f2645f | 145 | if (IS_ENABLED(CONFIG_X86_32)) |
49b8c695 | 146 | return user_insn(fxsave %[fx], [fx] "=m" (*fx), "m" (*fx)); |
97f2645f | 147 | else if (IS_ENABLED(CONFIG_AS_FXSAVEQ)) |
49b8c695 | 148 | return user_insn(fxsaveq %[fx], [fx] "=m" (*fx), "m" (*fx)); |
1361b83a | 149 | |
c6813144 | 150 | /* See comment in copy_fxregs_to_kernel() below. */ |
49b8c695 | 151 | return user_insn(rex64/fxsave (%[fx]), "=m" (*fx), [fx] "R" (fx)); |
1361b83a LT |
152 | } |
153 | ||
9ccc27a5 | 154 | static inline void copy_kernel_to_fxregs(struct fxregs_state *fx) |
1361b83a | 155 | { |
43b287b3 IM |
156 | int err; |
157 | ||
97f2645f | 158 | if (IS_ENABLED(CONFIG_X86_32)) { |
43b287b3 IM |
159 | err = check_insn(fxrstor %[fx], "=m" (*fx), [fx] "m" (*fx)); |
160 | } else { | |
97f2645f | 161 | if (IS_ENABLED(CONFIG_AS_FXSAVEQ)) { |
43b287b3 IM |
162 | err = check_insn(fxrstorq %[fx], "=m" (*fx), [fx] "m" (*fx)); |
163 | } else { | |
164 | /* See comment in copy_fxregs_to_kernel() below. */ | |
165 | err = check_insn(rex64/fxrstor (%[fx]), "=m" (*fx), [fx] "R" (fx), "m" (*fx)); | |
166 | } | |
167 | } | |
168 | /* Copying from a kernel buffer to FPU registers should never fail: */ | |
169 | WARN_ON_FPU(err); | |
1361b83a LT |
170 | } |
171 | ||
c47ada30 | 172 | static inline int copy_user_to_fxregs(struct fxregs_state __user *fx) |
e139e955 | 173 | { |
97f2645f | 174 | if (IS_ENABLED(CONFIG_X86_32)) |
e139e955 | 175 | return user_insn(fxrstor %[fx], "=m" (*fx), [fx] "m" (*fx)); |
97f2645f | 176 | else if (IS_ENABLED(CONFIG_AS_FXSAVEQ)) |
e139e955 PA |
177 | return user_insn(fxrstorq %[fx], "=m" (*fx), [fx] "m" (*fx)); |
178 | ||
c6813144 | 179 | /* See comment in copy_fxregs_to_kernel() below. */ |
e139e955 PA |
180 | return user_insn(rex64/fxrstor (%[fx]), "=m" (*fx), [fx] "R" (fx), |
181 | "m" (*fx)); | |
182 | } | |
183 | ||
9ccc27a5 | 184 | static inline void copy_kernel_to_fregs(struct fregs_state *fx) |
1361b83a | 185 | { |
43b287b3 IM |
186 | int err = check_insn(frstor %[fx], "=m" (*fx), [fx] "m" (*fx)); |
187 | ||
188 | WARN_ON_FPU(err); | |
e139e955 PA |
189 | } |
190 | ||
c47ada30 | 191 | static inline int copy_user_to_fregs(struct fregs_state __user *fx) |
e139e955 PA |
192 | { |
193 | return user_insn(frstor %[fx], "=m" (*fx), [fx] "m" (*fx)); | |
1361b83a LT |
194 | } |
195 | ||
c6813144 | 196 | static inline void copy_fxregs_to_kernel(struct fpu *fpu) |
1361b83a | 197 | { |
97f2645f | 198 | if (IS_ENABLED(CONFIG_X86_32)) |
7366ed77 | 199 | asm volatile( "fxsave %[fx]" : [fx] "=m" (fpu->state.fxsave)); |
97f2645f | 200 | else if (IS_ENABLED(CONFIG_AS_FXSAVEQ)) |
7366ed77 | 201 | asm volatile("fxsaveq %[fx]" : [fx] "=m" (fpu->state.fxsave)); |
0ca5bd0d SS |
202 | else { |
203 | /* Using "rex64; fxsave %0" is broken because, if the memory | |
204 | * operand uses any extended registers for addressing, a second | |
205 | * REX prefix will be generated (to the assembler, rex64 | |
206 | * followed by semicolon is a separate instruction), and hence | |
207 | * the 64-bitness is lost. | |
208 | * | |
209 | * Using "fxsaveq %0" would be the ideal choice, but is only | |
210 | * supported starting with gas 2.16. | |
211 | * | |
212 | * Using, as a workaround, the properly prefixed form below | |
213 | * isn't accepted by any binutils version so far released, | |
214 | * complaining that the same type of prefix is used twice if | |
215 | * an extended register is needed for addressing (fix submitted | |
216 | * to mainline 2005-11-21). | |
217 | * | |
7366ed77 | 218 | * asm volatile("rex64/fxsave %0" : "=m" (fpu->state.fxsave)); |
0ca5bd0d SS |
219 | * |
220 | * This, however, we can work around by forcing the compiler to | |
221 | * select an addressing mode that doesn't require extended | |
222 | * registers. | |
223 | */ | |
224 | asm volatile( "rex64/fxsave (%[fx])" | |
7366ed77 IM |
225 | : "=m" (fpu->state.fxsave) |
226 | : [fx] "R" (&fpu->state.fxsave)); | |
0ca5bd0d | 227 | } |
1361b83a LT |
228 | } |
229 | ||
fd169b05 IM |
230 | /* These macros all use (%edi)/(%rdi) as the single memory argument. */ |
231 | #define XSAVE ".byte " REX_PREFIX "0x0f,0xae,0x27" | |
232 | #define XSAVEOPT ".byte " REX_PREFIX "0x0f,0xae,0x37" | |
233 | #define XSAVES ".byte " REX_PREFIX "0x0f,0xc7,0x2f" | |
234 | #define XRSTOR ".byte " REX_PREFIX "0x0f,0xae,0x2f" | |
235 | #define XRSTORS ".byte " REX_PREFIX "0x0f,0xc7,0x1f" | |
236 | ||
b74a0cf1 BP |
237 | #define XSTATE_OP(op, st, lmask, hmask, err) \ |
238 | asm volatile("1:" op "\n\t" \ | |
239 | "xor %[err], %[err]\n" \ | |
240 | "2:\n\t" \ | |
241 | ".pushsection .fixup,\"ax\"\n\t" \ | |
242 | "3: movl $-2,%[err]\n\t" \ | |
243 | "jmp 2b\n\t" \ | |
244 | ".popsection\n\t" \ | |
245 | _ASM_EXTABLE(1b, 3b) \ | |
246 | : [err] "=r" (err) \ | |
247 | : "D" (st), "m" (*st), "a" (lmask), "d" (hmask) \ | |
248 | : "memory") | |
249 | ||
b7106fa0 BP |
250 | /* |
251 | * If XSAVES is enabled, it replaces XSAVEOPT because it supports a compact | |
252 | * format and supervisor states in addition to modified optimization in | |
253 | * XSAVEOPT. | |
254 | * | |
255 | * Otherwise, if XSAVEOPT is enabled, XSAVEOPT replaces XSAVE because XSAVEOPT | |
256 | * supports modified optimization which is not supported by XSAVE. | |
257 | * | |
258 | * We use XSAVE as a fallback. | |
259 | * | |
260 | * The 661 label is defined in the ALTERNATIVE* macros as the address of the | |
261 | * original instruction which gets replaced. We need to use it here as the | |
262 | * address of the instruction where we might get an exception at. | |
263 | */ | |
264 | #define XSTATE_XSAVE(st, lmask, hmask, err) \ | |
265 | asm volatile(ALTERNATIVE_2(XSAVE, \ | |
266 | XSAVEOPT, X86_FEATURE_XSAVEOPT, \ | |
267 | XSAVES, X86_FEATURE_XSAVES) \ | |
268 | "\n" \ | |
269 | "xor %[err], %[err]\n" \ | |
270 | "3:\n" \ | |
271 | ".pushsection .fixup,\"ax\"\n" \ | |
272 | "4: movl $-2, %[err]\n" \ | |
273 | "jmp 3b\n" \ | |
274 | ".popsection\n" \ | |
275 | _ASM_EXTABLE(661b, 4b) \ | |
276 | : [err] "=r" (err) \ | |
277 | : "D" (st), "m" (*st), "a" (lmask), "d" (hmask) \ | |
278 | : "memory") | |
279 | ||
280 | /* | |
281 | * Use XRSTORS to restore context if it is enabled. XRSTORS supports compact | |
282 | * XSAVE area format. | |
283 | */ | |
284 | #define XSTATE_XRESTORE(st, lmask, hmask, err) \ | |
285 | asm volatile(ALTERNATIVE(XRSTOR, \ | |
286 | XRSTORS, X86_FEATURE_XSAVES) \ | |
287 | "\n" \ | |
288 | "xor %[err], %[err]\n" \ | |
289 | "3:\n" \ | |
290 | ".pushsection .fixup,\"ax\"\n" \ | |
291 | "4: movl $-2, %[err]\n" \ | |
292 | "jmp 3b\n" \ | |
293 | ".popsection\n" \ | |
294 | _ASM_EXTABLE(661b, 4b) \ | |
295 | : [err] "=r" (err) \ | |
296 | : "D" (st), "m" (*st), "a" (lmask), "d" (hmask) \ | |
297 | : "memory") | |
b74a0cf1 | 298 | |
fd169b05 IM |
299 | /* |
300 | * This function is called only during boot time when x86 caps are not set | |
301 | * up and alternative can not be used yet. | |
302 | */ | |
8c05f05e | 303 | static inline void copy_xregs_to_kernel_booting(struct xregs_state *xstate) |
fd169b05 IM |
304 | { |
305 | u64 mask = -1; | |
306 | u32 lmask = mask; | |
307 | u32 hmask = mask >> 32; | |
b74a0cf1 | 308 | int err; |
fd169b05 IM |
309 | |
310 | WARN_ON(system_state != SYSTEM_BOOTING); | |
311 | ||
bc696ca0 | 312 | if (static_cpu_has(X86_FEATURE_XSAVES)) |
b74a0cf1 | 313 | XSTATE_OP(XSAVES, xstate, lmask, hmask, err); |
fd169b05 | 314 | else |
b74a0cf1 | 315 | XSTATE_OP(XSAVE, xstate, lmask, hmask, err); |
8c05f05e IM |
316 | |
317 | /* We should never fault when copying to a kernel buffer: */ | |
318 | WARN_ON_FPU(err); | |
fd169b05 IM |
319 | } |
320 | ||
321 | /* | |
322 | * This function is called only during boot time when x86 caps are not set | |
323 | * up and alternative can not be used yet. | |
324 | */ | |
d65fcd60 | 325 | static inline void copy_kernel_to_xregs_booting(struct xregs_state *xstate) |
fd169b05 | 326 | { |
d65fcd60 | 327 | u64 mask = -1; |
fd169b05 IM |
328 | u32 lmask = mask; |
329 | u32 hmask = mask >> 32; | |
b74a0cf1 | 330 | int err; |
fd169b05 IM |
331 | |
332 | WARN_ON(system_state != SYSTEM_BOOTING); | |
333 | ||
bc696ca0 | 334 | if (static_cpu_has(X86_FEATURE_XSAVES)) |
b74a0cf1 | 335 | XSTATE_OP(XRSTORS, xstate, lmask, hmask, err); |
fd169b05 | 336 | else |
b74a0cf1 | 337 | XSTATE_OP(XRSTOR, xstate, lmask, hmask, err); |
8c05f05e IM |
338 | |
339 | /* We should never fault when copying from a kernel buffer: */ | |
340 | WARN_ON_FPU(err); | |
fd169b05 IM |
341 | } |
342 | ||
343 | /* | |
344 | * Save processor xstate to xsave area. | |
345 | */ | |
8c05f05e | 346 | static inline void copy_xregs_to_kernel(struct xregs_state *xstate) |
fd169b05 IM |
347 | { |
348 | u64 mask = -1; | |
349 | u32 lmask = mask; | |
350 | u32 hmask = mask >> 32; | |
b7106fa0 | 351 | int err; |
fd169b05 IM |
352 | |
353 | WARN_ON(!alternatives_patched); | |
354 | ||
b7106fa0 | 355 | XSTATE_XSAVE(xstate, lmask, hmask, err); |
fd169b05 | 356 | |
8c05f05e IM |
357 | /* We should never fault when copying to a kernel buffer: */ |
358 | WARN_ON_FPU(err); | |
fd169b05 IM |
359 | } |
360 | ||
361 | /* | |
362 | * Restore processor xstate from xsave area. | |
363 | */ | |
8c05f05e | 364 | static inline void copy_kernel_to_xregs(struct xregs_state *xstate, u64 mask) |
fd169b05 | 365 | { |
fd169b05 IM |
366 | u32 lmask = mask; |
367 | u32 hmask = mask >> 32; | |
b7106fa0 | 368 | int err; |
fd169b05 | 369 | |
b7106fa0 | 370 | XSTATE_XRESTORE(xstate, lmask, hmask, err); |
fd169b05 | 371 | |
8c05f05e IM |
372 | /* We should never fault when copying from a kernel buffer: */ |
373 | WARN_ON_FPU(err); | |
fd169b05 IM |
374 | } |
375 | ||
376 | /* | |
377 | * Save xstate to user space xsave area. | |
378 | * | |
379 | * We don't use modified optimization because xrstor/xrstors might track | |
380 | * a different application. | |
381 | * | |
382 | * We don't use compacted format xsave area for | |
383 | * backward compatibility for old applications which don't understand | |
384 | * compacted format of xsave area. | |
385 | */ | |
386 | static inline int copy_xregs_to_user(struct xregs_state __user *buf) | |
387 | { | |
388 | int err; | |
389 | ||
390 | /* | |
391 | * Clear the xsave header first, so that reserved fields are | |
392 | * initialized to zero. | |
393 | */ | |
394 | err = __clear_user(&buf->header, sizeof(buf->header)); | |
395 | if (unlikely(err)) | |
396 | return -EFAULT; | |
397 | ||
b74a0cf1 BP |
398 | stac(); |
399 | XSTATE_OP(XSAVE, buf, -1, -1, err); | |
400 | clac(); | |
401 | ||
fd169b05 IM |
402 | return err; |
403 | } | |
404 | ||
405 | /* | |
406 | * Restore xstate from user space xsave area. | |
407 | */ | |
408 | static inline int copy_user_to_xregs(struct xregs_state __user *buf, u64 mask) | |
409 | { | |
fd169b05 IM |
410 | struct xregs_state *xstate = ((__force struct xregs_state *)buf); |
411 | u32 lmask = mask; | |
412 | u32 hmask = mask >> 32; | |
b74a0cf1 BP |
413 | int err; |
414 | ||
415 | stac(); | |
416 | XSTATE_OP(XRSTOR, xstate, lmask, hmask, err); | |
417 | clac(); | |
fd169b05 | 418 | |
fd169b05 IM |
419 | return err; |
420 | } | |
421 | ||
1361b83a LT |
422 | /* |
423 | * These must be called with preempt disabled. Returns | |
4f836347 IM |
424 | * 'true' if the FPU state is still intact and we can |
425 | * keep registers active. | |
426 | * | |
427 | * The legacy FNSAVE instruction cleared all FPU state | |
428 | * unconditionally, so registers are essentially destroyed. | |
429 | * Modern FPU state can be kept in registers, if there are | |
1bc6b056 | 430 | * no pending FP exceptions. |
1361b83a | 431 | */ |
4f836347 | 432 | static inline int copy_fpregs_to_fpstate(struct fpu *fpu) |
1361b83a | 433 | { |
1bc6b056 | 434 | if (likely(use_xsave())) { |
c6813144 | 435 | copy_xregs_to_kernel(&fpu->state.xsave); |
1bc6b056 IM |
436 | return 1; |
437 | } | |
1361b83a | 438 | |
1bc6b056 | 439 | if (likely(use_fxsr())) { |
c6813144 | 440 | copy_fxregs_to_kernel(fpu); |
1bc6b056 | 441 | return 1; |
1361b83a LT |
442 | } |
443 | ||
444 | /* | |
1bc6b056 IM |
445 | * Legacy FPU register saving, FNSAVE always clears FPU registers, |
446 | * so we have to mark them inactive: | |
1361b83a | 447 | */ |
87dafd41 | 448 | asm volatile("fnsave %[fp]; fwait" : [fp] "=m" (fpu->state.fsave)); |
4f836347 | 449 | |
4f836347 | 450 | return 0; |
1361b83a LT |
451 | } |
452 | ||
38cfd5e3 | 453 | static inline void __copy_kernel_to_fpregs(union fpregs_state *fpstate, u64 mask) |
1361b83a | 454 | { |
8c05f05e | 455 | if (use_xsave()) { |
38cfd5e3 | 456 | copy_kernel_to_xregs(&fpstate->xsave, mask); |
8c05f05e IM |
457 | } else { |
458 | if (use_fxsr()) | |
003e2e8b | 459 | copy_kernel_to_fxregs(&fpstate->fxsave); |
8c05f05e | 460 | else |
003e2e8b | 461 | copy_kernel_to_fregs(&fpstate->fsave); |
8c05f05e | 462 | } |
1361b83a LT |
463 | } |
464 | ||
003e2e8b | 465 | static inline void copy_kernel_to_fpregs(union fpregs_state *fpstate) |
1361b83a | 466 | { |
6ca7a8a1 BP |
467 | /* |
468 | * AMD K7/K8 CPUs don't save/restore FDP/FIP/FOP unless an exception is | |
469 | * pending. Clear the x87 state here by setting it to fixed values. | |
470 | * "m" is a random variable that should be in L1. | |
471 | */ | |
bc696ca0 | 472 | if (unlikely(static_cpu_has_bug(X86_BUG_FXSAVE_LEAK))) { |
26bef131 LT |
473 | asm volatile( |
474 | "fnclex\n\t" | |
475 | "emms\n\t" | |
476 | "fildl %P[addr]" /* set F?P to defined value */ | |
003e2e8b | 477 | : : [addr] "m" (fpstate)); |
26bef131 | 478 | } |
1361b83a | 479 | |
38cfd5e3 | 480 | __copy_kernel_to_fpregs(fpstate, -1); |
1361b83a LT |
481 | } |
482 | ||
87dafd41 | 483 | extern int copy_fpstate_to_sigframe(void __user *buf, void __user *fp, int size); |
b1b64dc3 IM |
484 | |
485 | /* | |
486 | * FPU context switch related helper methods: | |
487 | */ | |
488 | ||
489 | DECLARE_PER_CPU(struct fpu *, fpu_fpregs_owner_ctx); | |
490 | ||
491 | /* | |
25d83b53 RR |
492 | * The in-register FPU state for an FPU context on a CPU is assumed to be |
493 | * valid if the fpu->last_cpu matches the CPU, and the fpu_fpregs_owner_ctx | |
494 | * matches the FPU. | |
495 | * | |
496 | * If the FPU register state is valid, the kernel can skip restoring the | |
497 | * FPU state from memory. | |
498 | * | |
499 | * Any code that clobbers the FPU registers or updates the in-memory | |
500 | * FPU state for a task MUST let the rest of the kernel know that the | |
317b622c | 501 | * FPU registers are no longer valid for this task. |
25d83b53 | 502 | * |
317b622c RR |
503 | * Either one of these invalidation functions is enough. Invalidate |
504 | * a resource you control: CPU if using the CPU for something else | |
505 | * (with preemption disabled), FPU for the current task, or a task that | |
506 | * is prevented from running by the current task. | |
b1b64dc3 | 507 | */ |
317b622c | 508 | static inline void __cpu_invalidate_fpregs_state(void) |
b1b64dc3 | 509 | { |
317b622c | 510 | __this_cpu_write(fpu_fpregs_owner_ctx, NULL); |
b1b64dc3 IM |
511 | } |
512 | ||
25d83b53 RR |
513 | static inline void __fpu_invalidate_fpregs_state(struct fpu *fpu) |
514 | { | |
515 | fpu->last_cpu = -1; | |
516 | } | |
517 | ||
518 | static inline int fpregs_state_valid(struct fpu *fpu, unsigned int cpu) | |
b1b64dc3 IM |
519 | { |
520 | return fpu == this_cpu_read_stable(fpu_fpregs_owner_ctx) && cpu == fpu->last_cpu; | |
521 | } | |
522 | ||
66f314ef RR |
523 | /* |
524 | * These generally need preemption protection to work, | |
525 | * do try to avoid using these on their own: | |
526 | */ | |
527 | static inline void fpregs_deactivate(struct fpu *fpu) | |
1361b83a | 528 | { |
e97131a8 IM |
529 | WARN_ON_FPU(!fpu->fpregs_active); |
530 | ||
d5cea9b0 | 531 | fpu->fpregs_active = 0; |
36b544dc | 532 | this_cpu_write(fpu_fpregs_owner_ctx, NULL); |
d1898b73 | 533 | trace_x86_fpu_regs_deactivated(fpu); |
1361b83a LT |
534 | } |
535 | ||
66f314ef | 536 | static inline void fpregs_activate(struct fpu *fpu) |
1361b83a | 537 | { |
e97131a8 IM |
538 | WARN_ON_FPU(fpu->fpregs_active); |
539 | ||
d5cea9b0 | 540 | fpu->fpregs_active = 1; |
c0311f63 | 541 | this_cpu_write(fpu_fpregs_owner_ctx, fpu); |
d1898b73 | 542 | trace_x86_fpu_regs_activated(fpu); |
1361b83a LT |
543 | } |
544 | ||
952f07ec IM |
545 | /* |
546 | * The question "does this thread have fpu access?" | |
547 | * is slightly racy, since preemption could come in | |
548 | * and revoke it immediately after the test. | |
549 | * | |
550 | * However, even in that very unlikely scenario, | |
551 | * we can just assume we have FPU access - typically | |
552 | * to save the FP state - we'll just take a #NM | |
553 | * fault and get the FPU access back. | |
554 | */ | |
3c6dffa9 | 555 | static inline int fpregs_active(void) |
952f07ec IM |
556 | { |
557 | return current->thread.fpu.fpregs_active; | |
558 | } | |
559 | ||
1361b83a LT |
560 | /* |
561 | * FPU state switching for scheduling. | |
562 | * | |
563 | * This is a two-stage process: | |
564 | * | |
c474e507 RR |
565 | * - switch_fpu_prepare() saves the old state. |
566 | * This is done within the context of the old process. | |
1361b83a LT |
567 | * |
568 | * - switch_fpu_finish() restores the new state as | |
569 | * necessary. | |
570 | */ | |
c474e507 RR |
571 | static inline void |
572 | switch_fpu_prepare(struct fpu *old_fpu, int cpu) | |
1361b83a | 573 | { |
d5cea9b0 | 574 | if (old_fpu->fpregs_active) { |
4f836347 | 575 | if (!copy_fpregs_to_fpstate(old_fpu)) |
cb8818b6 | 576 | old_fpu->last_cpu = -1; |
1361ef29 | 577 | else |
cb8818b6 | 578 | old_fpu->last_cpu = cpu; |
1361ef29 | 579 | |
36b544dc | 580 | /* But leave fpu_fpregs_owner_ctx! */ |
d5cea9b0 | 581 | old_fpu->fpregs_active = 0; |
d1898b73 | 582 | trace_x86_fpu_regs_deactivated(old_fpu); |
9ad93fe3 RR |
583 | } else |
584 | old_fpu->last_cpu = -1; | |
1361b83a LT |
585 | } |
586 | ||
b1b64dc3 IM |
587 | /* |
588 | * Misc helper functions: | |
589 | */ | |
590 | ||
1361b83a | 591 | /* |
c474e507 RR |
592 | * Set up the userspace FPU context for the new task, if the task |
593 | * has used the FPU. | |
1361b83a | 594 | */ |
c474e507 | 595 | static inline void switch_fpu_finish(struct fpu *new_fpu, int cpu) |
1361b83a | 596 | { |
c474e507 RR |
597 | bool preload = static_cpu_has(X86_FEATURE_FPU) && |
598 | new_fpu->fpstate_active; | |
599 | ||
600 | if (preload) { | |
601 | if (!fpregs_state_valid(new_fpu, cpu)) | |
602 | copy_kernel_to_fpregs(&new_fpu->state); | |
603 | fpregs_activate(new_fpu); | |
604 | } | |
1361b83a LT |
605 | } |
606 | ||
1361b83a | 607 | /* |
fb14b4ea | 608 | * Needs to be preemption-safe. |
1361b83a | 609 | * |
377ffbcc | 610 | * NOTE! user_fpu_begin() must be used only immediately before restoring |
fb14b4ea ON |
611 | * the save state. It does not do any saving/restoring on its own. In |
612 | * lazy FPU mode, it is just an optimization to avoid a #NM exception, | |
613 | * the task can lose the FPU right after preempt_enable(). | |
1361b83a | 614 | */ |
1361b83a LT |
615 | static inline void user_fpu_begin(void) |
616 | { | |
4540d3fa IM |
617 | struct fpu *fpu = ¤t->thread.fpu; |
618 | ||
1361b83a | 619 | preempt_disable(); |
3c6dffa9 | 620 | if (!fpregs_active()) |
232f62cd | 621 | fpregs_activate(fpu); |
1361b83a LT |
622 | preempt_enable(); |
623 | } | |
624 | ||
b1b64dc3 IM |
625 | /* |
626 | * MXCSR and XCR definitions: | |
627 | */ | |
628 | ||
629 | extern unsigned int mxcsr_feature_mask; | |
630 | ||
631 | #define XCR_XFEATURE_ENABLED_MASK 0x00000000 | |
632 | ||
633 | static inline u64 xgetbv(u32 index) | |
634 | { | |
635 | u32 eax, edx; | |
636 | ||
637 | asm volatile(".byte 0x0f,0x01,0xd0" /* xgetbv */ | |
638 | : "=a" (eax), "=d" (edx) | |
639 | : "c" (index)); | |
640 | return eax + ((u64)edx << 32); | |
641 | } | |
642 | ||
643 | static inline void xsetbv(u32 index, u64 value) | |
644 | { | |
645 | u32 eax = value; | |
646 | u32 edx = value >> 32; | |
647 | ||
648 | asm volatile(".byte 0x0f,0x01,0xd1" /* xsetbv */ | |
649 | : : "a" (eax), "d" (edx), "c" (index)); | |
650 | } | |
651 | ||
78f7f1e5 | 652 | #endif /* _ASM_X86_FPU_INTERNAL_H */ |