]>
Commit | Line | Data |
---|---|---|
1361b83a LT |
1 | /* |
2 | * Copyright (C) 1994 Linus Torvalds | |
3 | * | |
4 | * Pentium III FXSR, SSE support | |
5 | * General FPU state handling cleanups | |
6 | * Gareth Hughes <gareth@valinux.com>, May 2000 | |
7 | * x86-64 work by Andi Kleen 2002 | |
8 | */ | |
9 | ||
78f7f1e5 IM |
10 | #ifndef _ASM_X86_FPU_INTERNAL_H |
11 | #define _ASM_X86_FPU_INTERNAL_H | |
1361b83a | 12 | |
050902c0 | 13 | #include <linux/compat.h> |
952f07ec | 14 | #include <linux/sched.h> |
1361b83a | 15 | #include <linux/slab.h> |
f89e32e0 | 16 | |
1361b83a | 17 | #include <asm/user.h> |
df6b35f4 | 18 | #include <asm/fpu/api.h> |
669ebabb | 19 | #include <asm/fpu/xstate.h> |
1361b83a | 20 | |
6ffc152e IM |
21 | /* |
22 | * High level FPU state handling functions: | |
23 | */ | |
0c306bcf | 24 | extern void fpu__activate_curr(struct fpu *fpu); |
05602812 | 25 | extern void fpu__activate_fpstate_read(struct fpu *fpu); |
6a81d7eb | 26 | extern void fpu__activate_fpstate_write(struct fpu *fpu); |
6ffc152e | 27 | extern void fpu__save(struct fpu *fpu); |
e1884d69 | 28 | extern void fpu__restore(struct fpu *fpu); |
82c0e45e | 29 | extern int fpu__restore_sig(void __user *buf, int ia32_frame); |
6ffc152e IM |
30 | extern void fpu__drop(struct fpu *fpu); |
31 | extern int fpu__copy(struct fpu *dst_fpu, struct fpu *src_fpu); | |
04c8e01d | 32 | extern void fpu__clear(struct fpu *fpu); |
b1b64dc3 IM |
33 | extern int fpu__exception_code(struct fpu *fpu, int trap_nr); |
34 | extern int dump_fpu(struct pt_regs *ptregs, struct user_i387_struct *fpstate); | |
6ffc152e | 35 | |
b1b64dc3 IM |
36 | /* |
37 | * Boot time FPU initialization functions: | |
38 | */ | |
39 | extern void fpu__init_cpu(void); | |
40 | extern void fpu__init_system_xstate(void); | |
41 | extern void fpu__init_cpu_xstate(void); | |
42 | extern void fpu__init_system(struct cpuinfo_x86 *c); | |
952f07ec IM |
43 | extern void fpu__init_check_bugs(void); |
44 | extern void fpu__resume_cpu(void); | |
45 | ||
e97131a8 IM |
46 | /* |
47 | * Debugging facility: | |
48 | */ | |
49 | #ifdef CONFIG_X86_DEBUG_FPU | |
50 | # define WARN_ON_FPU(x) WARN_ON_ONCE(x) | |
51 | #else | |
52 | # define WARN_ON_FPU(x) ({ 0; }) | |
53 | #endif | |
54 | ||
1c927eea | 55 | /* |
b1b64dc3 | 56 | * FPU related CPU feature flag helper routines: |
1c927eea | 57 | */ |
5d2bd700 SS |
58 | static __always_inline __pure bool use_eager_fpu(void) |
59 | { | |
c6b40691 | 60 | return static_cpu_has_safe(X86_FEATURE_EAGER_FPU); |
5d2bd700 SS |
61 | } |
62 | ||
1361b83a LT |
63 | static __always_inline __pure bool use_xsaveopt(void) |
64 | { | |
c6b40691 | 65 | return static_cpu_has_safe(X86_FEATURE_XSAVEOPT); |
1361b83a LT |
66 | } |
67 | ||
68 | static __always_inline __pure bool use_xsave(void) | |
69 | { | |
c6b40691 | 70 | return static_cpu_has_safe(X86_FEATURE_XSAVE); |
1361b83a LT |
71 | } |
72 | ||
73 | static __always_inline __pure bool use_fxsr(void) | |
74 | { | |
c6b40691 | 75 | return static_cpu_has_safe(X86_FEATURE_FXSR); |
1361b83a LT |
76 | } |
77 | ||
b1b64dc3 IM |
78 | /* |
79 | * fpstate handling functions: | |
80 | */ | |
81 | ||
82 | extern union fpregs_state init_fpstate; | |
83 | ||
84 | extern void fpstate_init(union fpregs_state *state); | |
85 | #ifdef CONFIG_MATH_EMULATION | |
86 | extern void fpstate_init_soft(struct swregs_state *soft); | |
87 | #else | |
88 | static inline void fpstate_init_soft(struct swregs_state *soft) {} | |
89 | #endif | |
90 | static inline void fpstate_init_fxstate(struct fxregs_state *fx) | |
91 | { | |
92 | fx->cwd = 0x37f; | |
93 | fx->mxcsr = MXCSR_DEFAULT; | |
94 | } | |
36e49e7f | 95 | extern void fpstate_sanitize_xstate(struct fpu *fpu); |
1361b83a | 96 | |
49b8c695 PA |
97 | #define user_insn(insn, output, input...) \ |
98 | ({ \ | |
99 | int err; \ | |
100 | asm volatile(ASM_STAC "\n" \ | |
101 | "1:" #insn "\n\t" \ | |
102 | "2: " ASM_CLAC "\n" \ | |
103 | ".section .fixup,\"ax\"\n" \ | |
104 | "3: movl $-1,%[err]\n" \ | |
105 | " jmp 2b\n" \ | |
106 | ".previous\n" \ | |
107 | _ASM_EXTABLE(1b, 3b) \ | |
108 | : [err] "=r" (err), output \ | |
109 | : "0"(0), input); \ | |
110 | err; \ | |
111 | }) | |
112 | ||
0ca5bd0d SS |
113 | #define check_insn(insn, output, input...) \ |
114 | ({ \ | |
115 | int err; \ | |
116 | asm volatile("1:" #insn "\n\t" \ | |
117 | "2:\n" \ | |
118 | ".section .fixup,\"ax\"\n" \ | |
119 | "3: movl $-1,%[err]\n" \ | |
120 | " jmp 2b\n" \ | |
121 | ".previous\n" \ | |
122 | _ASM_EXTABLE(1b, 3b) \ | |
123 | : [err] "=r" (err), output \ | |
124 | : "0"(0), input); \ | |
125 | err; \ | |
126 | }) | |
127 | ||
c47ada30 | 128 | static inline int copy_fregs_to_user(struct fregs_state __user *fx) |
1361b83a | 129 | { |
49b8c695 | 130 | return user_insn(fnsave %[fx]; fwait, [fx] "=m" (*fx), "m" (*fx)); |
1361b83a LT |
131 | } |
132 | ||
c47ada30 | 133 | static inline int copy_fxregs_to_user(struct fxregs_state __user *fx) |
1361b83a | 134 | { |
0ca5bd0d | 135 | if (config_enabled(CONFIG_X86_32)) |
49b8c695 | 136 | return user_insn(fxsave %[fx], [fx] "=m" (*fx), "m" (*fx)); |
0ca5bd0d | 137 | else if (config_enabled(CONFIG_AS_FXSAVEQ)) |
49b8c695 | 138 | return user_insn(fxsaveq %[fx], [fx] "=m" (*fx), "m" (*fx)); |
1361b83a | 139 | |
c6813144 | 140 | /* See comment in copy_fxregs_to_kernel() below. */ |
49b8c695 | 141 | return user_insn(rex64/fxsave (%[fx]), "=m" (*fx), [fx] "R" (fx)); |
1361b83a LT |
142 | } |
143 | ||
9ccc27a5 | 144 | static inline void copy_kernel_to_fxregs(struct fxregs_state *fx) |
1361b83a | 145 | { |
43b287b3 IM |
146 | int err; |
147 | ||
148 | if (config_enabled(CONFIG_X86_32)) { | |
149 | err = check_insn(fxrstor %[fx], "=m" (*fx), [fx] "m" (*fx)); | |
150 | } else { | |
151 | if (config_enabled(CONFIG_AS_FXSAVEQ)) { | |
152 | err = check_insn(fxrstorq %[fx], "=m" (*fx), [fx] "m" (*fx)); | |
153 | } else { | |
154 | /* See comment in copy_fxregs_to_kernel() below. */ | |
155 | err = check_insn(rex64/fxrstor (%[fx]), "=m" (*fx), [fx] "R" (fx), "m" (*fx)); | |
156 | } | |
157 | } | |
158 | /* Copying from a kernel buffer to FPU registers should never fail: */ | |
159 | WARN_ON_FPU(err); | |
1361b83a LT |
160 | } |
161 | ||
c47ada30 | 162 | static inline int copy_user_to_fxregs(struct fxregs_state __user *fx) |
e139e955 PA |
163 | { |
164 | if (config_enabled(CONFIG_X86_32)) | |
165 | return user_insn(fxrstor %[fx], "=m" (*fx), [fx] "m" (*fx)); | |
166 | else if (config_enabled(CONFIG_AS_FXSAVEQ)) | |
167 | return user_insn(fxrstorq %[fx], "=m" (*fx), [fx] "m" (*fx)); | |
168 | ||
c6813144 | 169 | /* See comment in copy_fxregs_to_kernel() below. */ |
e139e955 PA |
170 | return user_insn(rex64/fxrstor (%[fx]), "=m" (*fx), [fx] "R" (fx), |
171 | "m" (*fx)); | |
172 | } | |
173 | ||
9ccc27a5 | 174 | static inline void copy_kernel_to_fregs(struct fregs_state *fx) |
1361b83a | 175 | { |
43b287b3 IM |
176 | int err = check_insn(frstor %[fx], "=m" (*fx), [fx] "m" (*fx)); |
177 | ||
178 | WARN_ON_FPU(err); | |
e139e955 PA |
179 | } |
180 | ||
c47ada30 | 181 | static inline int copy_user_to_fregs(struct fregs_state __user *fx) |
e139e955 PA |
182 | { |
183 | return user_insn(frstor %[fx], "=m" (*fx), [fx] "m" (*fx)); | |
1361b83a LT |
184 | } |
185 | ||
c6813144 | 186 | static inline void copy_fxregs_to_kernel(struct fpu *fpu) |
1361b83a | 187 | { |
0ca5bd0d | 188 | if (config_enabled(CONFIG_X86_32)) |
7366ed77 | 189 | asm volatile( "fxsave %[fx]" : [fx] "=m" (fpu->state.fxsave)); |
0ca5bd0d | 190 | else if (config_enabled(CONFIG_AS_FXSAVEQ)) |
7366ed77 | 191 | asm volatile("fxsaveq %[fx]" : [fx] "=m" (fpu->state.fxsave)); |
0ca5bd0d SS |
192 | else { |
193 | /* Using "rex64; fxsave %0" is broken because, if the memory | |
194 | * operand uses any extended registers for addressing, a second | |
195 | * REX prefix will be generated (to the assembler, rex64 | |
196 | * followed by semicolon is a separate instruction), and hence | |
197 | * the 64-bitness is lost. | |
198 | * | |
199 | * Using "fxsaveq %0" would be the ideal choice, but is only | |
200 | * supported starting with gas 2.16. | |
201 | * | |
202 | * Using, as a workaround, the properly prefixed form below | |
203 | * isn't accepted by any binutils version so far released, | |
204 | * complaining that the same type of prefix is used twice if | |
205 | * an extended register is needed for addressing (fix submitted | |
206 | * to mainline 2005-11-21). | |
207 | * | |
7366ed77 | 208 | * asm volatile("rex64/fxsave %0" : "=m" (fpu->state.fxsave)); |
0ca5bd0d SS |
209 | * |
210 | * This, however, we can work around by forcing the compiler to | |
211 | * select an addressing mode that doesn't require extended | |
212 | * registers. | |
213 | */ | |
214 | asm volatile( "rex64/fxsave (%[fx])" | |
7366ed77 IM |
215 | : "=m" (fpu->state.fxsave) |
216 | : [fx] "R" (&fpu->state.fxsave)); | |
0ca5bd0d | 217 | } |
1361b83a LT |
218 | } |
219 | ||
fd169b05 IM |
220 | /* These macros all use (%edi)/(%rdi) as the single memory argument. */ |
221 | #define XSAVE ".byte " REX_PREFIX "0x0f,0xae,0x27" | |
222 | #define XSAVEOPT ".byte " REX_PREFIX "0x0f,0xae,0x37" | |
223 | #define XSAVES ".byte " REX_PREFIX "0x0f,0xc7,0x2f" | |
224 | #define XRSTOR ".byte " REX_PREFIX "0x0f,0xae,0x2f" | |
225 | #define XRSTORS ".byte " REX_PREFIX "0x0f,0xc7,0x1f" | |
226 | ||
227 | /* xstate instruction fault handler: */ | |
228 | #define xstate_fault(__err) \ | |
229 | \ | |
230 | ".section .fixup,\"ax\"\n" \ | |
231 | \ | |
87b6559d | 232 | "3: movl $-2,%[_err]\n" \ |
fd169b05 IM |
233 | " jmp 2b\n" \ |
234 | \ | |
235 | ".previous\n" \ | |
236 | \ | |
237 | _ASM_EXTABLE(1b, 3b) \ | |
87b6559d | 238 | : [_err] "=r" (__err) |
fd169b05 IM |
239 | |
240 | /* | |
241 | * This function is called only during boot time when x86 caps are not set | |
242 | * up and alternative can not be used yet. | |
243 | */ | |
8c05f05e | 244 | static inline void copy_xregs_to_kernel_booting(struct xregs_state *xstate) |
fd169b05 IM |
245 | { |
246 | u64 mask = -1; | |
247 | u32 lmask = mask; | |
248 | u32 hmask = mask >> 32; | |
249 | int err = 0; | |
250 | ||
251 | WARN_ON(system_state != SYSTEM_BOOTING); | |
252 | ||
253 | if (boot_cpu_has(X86_FEATURE_XSAVES)) | |
254 | asm volatile("1:"XSAVES"\n\t" | |
255 | "2:\n\t" | |
256 | xstate_fault(err) | |
87b6559d IM |
257 | : "D" (xstate), "m" (*xstate), "a" (lmask), "d" (hmask), "0" (err) |
258 | : "memory"); | |
fd169b05 IM |
259 | else |
260 | asm volatile("1:"XSAVE"\n\t" | |
261 | "2:\n\t" | |
262 | xstate_fault(err) | |
87b6559d IM |
263 | : "D" (xstate), "m" (*xstate), "a" (lmask), "d" (hmask), "0" (err) |
264 | : "memory"); | |
8c05f05e IM |
265 | |
266 | /* We should never fault when copying to a kernel buffer: */ | |
267 | WARN_ON_FPU(err); | |
fd169b05 IM |
268 | } |
269 | ||
270 | /* | |
271 | * This function is called only during boot time when x86 caps are not set | |
272 | * up and alternative can not be used yet. | |
273 | */ | |
8c05f05e | 274 | static inline void copy_kernel_to_xregs_booting(struct xregs_state *xstate, u64 mask) |
fd169b05 IM |
275 | { |
276 | u32 lmask = mask; | |
277 | u32 hmask = mask >> 32; | |
278 | int err = 0; | |
279 | ||
280 | WARN_ON(system_state != SYSTEM_BOOTING); | |
281 | ||
282 | if (boot_cpu_has(X86_FEATURE_XSAVES)) | |
283 | asm volatile("1:"XRSTORS"\n\t" | |
284 | "2:\n\t" | |
285 | xstate_fault(err) | |
87b6559d IM |
286 | : "D" (xstate), "m" (*xstate), "a" (lmask), "d" (hmask), "0" (err) |
287 | : "memory"); | |
fd169b05 IM |
288 | else |
289 | asm volatile("1:"XRSTOR"\n\t" | |
290 | "2:\n\t" | |
291 | xstate_fault(err) | |
87b6559d IM |
292 | : "D" (xstate), "m" (*xstate), "a" (lmask), "d" (hmask), "0" (err) |
293 | : "memory"); | |
8c05f05e IM |
294 | |
295 | /* We should never fault when copying from a kernel buffer: */ | |
296 | WARN_ON_FPU(err); | |
fd169b05 IM |
297 | } |
298 | ||
299 | /* | |
300 | * Save processor xstate to xsave area. | |
301 | */ | |
8c05f05e | 302 | static inline void copy_xregs_to_kernel(struct xregs_state *xstate) |
fd169b05 IM |
303 | { |
304 | u64 mask = -1; | |
305 | u32 lmask = mask; | |
306 | u32 hmask = mask >> 32; | |
307 | int err = 0; | |
308 | ||
309 | WARN_ON(!alternatives_patched); | |
310 | ||
311 | /* | |
312 | * If xsaves is enabled, xsaves replaces xsaveopt because | |
313 | * it supports compact format and supervisor states in addition to | |
314 | * modified optimization in xsaveopt. | |
315 | * | |
316 | * Otherwise, if xsaveopt is enabled, xsaveopt replaces xsave | |
317 | * because xsaveopt supports modified optimization which is not | |
318 | * supported by xsave. | |
319 | * | |
320 | * If none of xsaves and xsaveopt is enabled, use xsave. | |
321 | */ | |
322 | alternative_input_2( | |
323 | "1:"XSAVE, | |
324 | XSAVEOPT, | |
325 | X86_FEATURE_XSAVEOPT, | |
326 | XSAVES, | |
327 | X86_FEATURE_XSAVES, | |
87dafd41 | 328 | [xstate] "D" (xstate), "a" (lmask), "d" (hmask) : |
fd169b05 IM |
329 | "memory"); |
330 | asm volatile("2:\n\t" | |
331 | xstate_fault(err) | |
685c9616 | 332 | : "0" (err) |
fd169b05 IM |
333 | : "memory"); |
334 | ||
8c05f05e IM |
335 | /* We should never fault when copying to a kernel buffer: */ |
336 | WARN_ON_FPU(err); | |
fd169b05 IM |
337 | } |
338 | ||
339 | /* | |
340 | * Restore processor xstate from xsave area. | |
341 | */ | |
8c05f05e | 342 | static inline void copy_kernel_to_xregs(struct xregs_state *xstate, u64 mask) |
fd169b05 | 343 | { |
fd169b05 IM |
344 | u32 lmask = mask; |
345 | u32 hmask = mask >> 32; | |
685c9616 | 346 | int err = 0; |
fd169b05 IM |
347 | |
348 | /* | |
349 | * Use xrstors to restore context if it is enabled. xrstors supports | |
350 | * compacted format of xsave area which is not supported by xrstor. | |
351 | */ | |
352 | alternative_input( | |
353 | "1: " XRSTOR, | |
354 | XRSTORS, | |
355 | X86_FEATURE_XSAVES, | |
87dafd41 | 356 | "D" (xstate), "m" (*xstate), "a" (lmask), "d" (hmask) |
fd169b05 IM |
357 | : "memory"); |
358 | ||
359 | asm volatile("2:\n" | |
360 | xstate_fault(err) | |
685c9616 | 361 | : "0" (err) |
fd169b05 IM |
362 | : "memory"); |
363 | ||
8c05f05e IM |
364 | /* We should never fault when copying from a kernel buffer: */ |
365 | WARN_ON_FPU(err); | |
fd169b05 IM |
366 | } |
367 | ||
368 | /* | |
369 | * Save xstate to user space xsave area. | |
370 | * | |
371 | * We don't use modified optimization because xrstor/xrstors might track | |
372 | * a different application. | |
373 | * | |
374 | * We don't use compacted format xsave area for | |
375 | * backward compatibility for old applications which don't understand | |
376 | * compacted format of xsave area. | |
377 | */ | |
378 | static inline int copy_xregs_to_user(struct xregs_state __user *buf) | |
379 | { | |
380 | int err; | |
381 | ||
382 | /* | |
383 | * Clear the xsave header first, so that reserved fields are | |
384 | * initialized to zero. | |
385 | */ | |
386 | err = __clear_user(&buf->header, sizeof(buf->header)); | |
387 | if (unlikely(err)) | |
388 | return -EFAULT; | |
389 | ||
390 | __asm__ __volatile__(ASM_STAC "\n" | |
391 | "1:"XSAVE"\n" | |
392 | "2: " ASM_CLAC "\n" | |
393 | xstate_fault(err) | |
685c9616 | 394 | : "D" (buf), "a" (-1), "d" (-1), "0" (err) |
fd169b05 IM |
395 | : "memory"); |
396 | return err; | |
397 | } | |
398 | ||
399 | /* | |
400 | * Restore xstate from user space xsave area. | |
401 | */ | |
402 | static inline int copy_user_to_xregs(struct xregs_state __user *buf, u64 mask) | |
403 | { | |
fd169b05 IM |
404 | struct xregs_state *xstate = ((__force struct xregs_state *)buf); |
405 | u32 lmask = mask; | |
406 | u32 hmask = mask >> 32; | |
685c9616 | 407 | int err = 0; |
fd169b05 IM |
408 | |
409 | __asm__ __volatile__(ASM_STAC "\n" | |
410 | "1:"XRSTOR"\n" | |
411 | "2: " ASM_CLAC "\n" | |
412 | xstate_fault(err) | |
685c9616 | 413 | : "D" (xstate), "a" (lmask), "d" (hmask), "0" (err) |
fd169b05 IM |
414 | : "memory"); /* memory required? */ |
415 | return err; | |
416 | } | |
417 | ||
1361b83a LT |
418 | /* |
419 | * These must be called with preempt disabled. Returns | |
4f836347 IM |
420 | * 'true' if the FPU state is still intact and we can |
421 | * keep registers active. | |
422 | * | |
423 | * The legacy FNSAVE instruction cleared all FPU state | |
424 | * unconditionally, so registers are essentially destroyed. | |
425 | * Modern FPU state can be kept in registers, if there are | |
1bc6b056 | 426 | * no pending FP exceptions. |
1361b83a | 427 | */ |
4f836347 | 428 | static inline int copy_fpregs_to_fpstate(struct fpu *fpu) |
1361b83a | 429 | { |
1bc6b056 | 430 | if (likely(use_xsave())) { |
c6813144 | 431 | copy_xregs_to_kernel(&fpu->state.xsave); |
1bc6b056 IM |
432 | return 1; |
433 | } | |
1361b83a | 434 | |
1bc6b056 | 435 | if (likely(use_fxsr())) { |
c6813144 | 436 | copy_fxregs_to_kernel(fpu); |
1bc6b056 | 437 | return 1; |
1361b83a LT |
438 | } |
439 | ||
440 | /* | |
1bc6b056 IM |
441 | * Legacy FPU register saving, FNSAVE always clears FPU registers, |
442 | * so we have to mark them inactive: | |
1361b83a | 443 | */ |
87dafd41 | 444 | asm volatile("fnsave %[fp]; fwait" : [fp] "=m" (fpu->state.fsave)); |
4f836347 | 445 | |
4f836347 | 446 | return 0; |
1361b83a LT |
447 | } |
448 | ||
9ccc27a5 | 449 | static inline void __copy_kernel_to_fpregs(struct fpu *fpu) |
1361b83a | 450 | { |
8c05f05e IM |
451 | if (use_xsave()) { |
452 | copy_kernel_to_xregs(&fpu->state.xsave, -1); | |
8c05f05e IM |
453 | } else { |
454 | if (use_fxsr()) | |
9ccc27a5 | 455 | copy_kernel_to_fxregs(&fpu->state.fxsave); |
8c05f05e | 456 | else |
9ccc27a5 | 457 | copy_kernel_to_fregs(&fpu->state.fsave); |
8c05f05e | 458 | } |
1361b83a LT |
459 | } |
460 | ||
9ccc27a5 | 461 | static inline void copy_kernel_to_fpregs(struct fpu *fpu) |
1361b83a | 462 | { |
6ca7a8a1 BP |
463 | /* |
464 | * AMD K7/K8 CPUs don't save/restore FDP/FIP/FOP unless an exception is | |
465 | * pending. Clear the x87 state here by setting it to fixed values. | |
466 | * "m" is a random variable that should be in L1. | |
467 | */ | |
9b13a93d | 468 | if (unlikely(static_cpu_has_bug_safe(X86_BUG_FXSAVE_LEAK))) { |
26bef131 LT |
469 | asm volatile( |
470 | "fnclex\n\t" | |
471 | "emms\n\t" | |
472 | "fildl %P[addr]" /* set F?P to defined value */ | |
d5cea9b0 | 473 | : : [addr] "m" (fpu->fpregs_active)); |
26bef131 | 474 | } |
1361b83a | 475 | |
9ccc27a5 | 476 | __copy_kernel_to_fpregs(fpu); |
1361b83a LT |
477 | } |
478 | ||
87dafd41 | 479 | extern int copy_fpstate_to_sigframe(void __user *buf, void __user *fp, int size); |
b1b64dc3 IM |
480 | |
481 | /* | |
482 | * FPU context switch related helper methods: | |
483 | */ | |
484 | ||
485 | DECLARE_PER_CPU(struct fpu *, fpu_fpregs_owner_ctx); | |
486 | ||
487 | /* | |
488 | * Must be run with preemption disabled: this clears the fpu_fpregs_owner_ctx, | |
489 | * on this CPU. | |
490 | * | |
491 | * This will disable any lazy FPU state restore of the current FPU state, | |
492 | * but if the current thread owns the FPU, it will still be saved by. | |
493 | */ | |
494 | static inline void __cpu_disable_lazy_restore(unsigned int cpu) | |
495 | { | |
496 | per_cpu(fpu_fpregs_owner_ctx, cpu) = NULL; | |
497 | } | |
498 | ||
499 | static inline int fpu_want_lazy_restore(struct fpu *fpu, unsigned int cpu) | |
500 | { | |
501 | return fpu == this_cpu_read_stable(fpu_fpregs_owner_ctx) && cpu == fpu->last_cpu; | |
502 | } | |
503 | ||
504 | ||
32b49b3c IM |
505 | /* |
506 | * Wrap lazy FPU TS handling in a 'hw fpregs activation/deactivation' | |
507 | * idiom, which is then paired with the sw-flag (fpregs_active) later on: | |
508 | */ | |
509 | ||
510 | static inline void __fpregs_activate_hw(void) | |
511 | { | |
512 | if (!use_eager_fpu()) | |
513 | clts(); | |
514 | } | |
515 | ||
516 | static inline void __fpregs_deactivate_hw(void) | |
517 | { | |
518 | if (!use_eager_fpu()) | |
519 | stts(); | |
520 | } | |
521 | ||
522 | /* Must be paired with an 'stts' (fpregs_deactivate_hw()) after! */ | |
723c58e4 | 523 | static inline void __fpregs_deactivate(struct fpu *fpu) |
1361b83a | 524 | { |
e97131a8 IM |
525 | WARN_ON_FPU(!fpu->fpregs_active); |
526 | ||
d5cea9b0 | 527 | fpu->fpregs_active = 0; |
36b544dc | 528 | this_cpu_write(fpu_fpregs_owner_ctx, NULL); |
1361b83a LT |
529 | } |
530 | ||
32b49b3c | 531 | /* Must be paired with a 'clts' (fpregs_activate_hw()) before! */ |
dfaea4e6 | 532 | static inline void __fpregs_activate(struct fpu *fpu) |
1361b83a | 533 | { |
e97131a8 IM |
534 | WARN_ON_FPU(fpu->fpregs_active); |
535 | ||
d5cea9b0 | 536 | fpu->fpregs_active = 1; |
c0311f63 | 537 | this_cpu_write(fpu_fpregs_owner_ctx, fpu); |
1361b83a LT |
538 | } |
539 | ||
952f07ec IM |
540 | /* |
541 | * The question "does this thread have fpu access?" | |
542 | * is slightly racy, since preemption could come in | |
543 | * and revoke it immediately after the test. | |
544 | * | |
545 | * However, even in that very unlikely scenario, | |
546 | * we can just assume we have FPU access - typically | |
547 | * to save the FP state - we'll just take a #NM | |
548 | * fault and get the FPU access back. | |
549 | */ | |
3c6dffa9 | 550 | static inline int fpregs_active(void) |
952f07ec IM |
551 | { |
552 | return current->thread.fpu.fpregs_active; | |
553 | } | |
554 | ||
1361b83a LT |
555 | /* |
556 | * Encapsulate the CR0.TS handling together with the | |
557 | * software flag. | |
558 | * | |
559 | * These generally need preemption protection to work, | |
560 | * do try to avoid using these on their own. | |
561 | */ | |
66af8e27 | 562 | static inline void fpregs_activate(struct fpu *fpu) |
1361b83a | 563 | { |
32b49b3c | 564 | __fpregs_activate_hw(); |
66af8e27 | 565 | __fpregs_activate(fpu); |
1361b83a LT |
566 | } |
567 | ||
66af8e27 | 568 | static inline void fpregs_deactivate(struct fpu *fpu) |
1361b83a | 569 | { |
66af8e27 | 570 | __fpregs_deactivate(fpu); |
32b49b3c | 571 | __fpregs_deactivate_hw(); |
1361b83a LT |
572 | } |
573 | ||
574 | /* | |
575 | * FPU state switching for scheduling. | |
576 | * | |
577 | * This is a two-stage process: | |
578 | * | |
579 | * - switch_fpu_prepare() saves the old state and | |
580 | * sets the new state of the CR0.TS bit. This is | |
581 | * done within the context of the old process. | |
582 | * | |
583 | * - switch_fpu_finish() restores the new state as | |
584 | * necessary. | |
585 | */ | |
586 | typedef struct { int preload; } fpu_switch_t; | |
587 | ||
cb8818b6 IM |
588 | static inline fpu_switch_t |
589 | switch_fpu_prepare(struct fpu *old_fpu, struct fpu *new_fpu, int cpu) | |
1361b83a LT |
590 | { |
591 | fpu_switch_t fpu; | |
592 | ||
304bceda SS |
593 | /* |
594 | * If the task has used the math, pre-load the FPU on xsave processors | |
595 | * or if the past 5 consecutive context-switches used math. | |
596 | */ | |
c5bedc68 | 597 | fpu.preload = new_fpu->fpstate_active && |
cb8818b6 | 598 | (use_eager_fpu() || new_fpu->counter > 5); |
1361ef29 | 599 | |
d5cea9b0 | 600 | if (old_fpu->fpregs_active) { |
4f836347 | 601 | if (!copy_fpregs_to_fpstate(old_fpu)) |
cb8818b6 | 602 | old_fpu->last_cpu = -1; |
1361ef29 | 603 | else |
cb8818b6 | 604 | old_fpu->last_cpu = cpu; |
1361ef29 | 605 | |
36b544dc | 606 | /* But leave fpu_fpregs_owner_ctx! */ |
d5cea9b0 | 607 | old_fpu->fpregs_active = 0; |
1361b83a LT |
608 | |
609 | /* Don't change CR0.TS if we just switch! */ | |
610 | if (fpu.preload) { | |
cb8818b6 | 611 | new_fpu->counter++; |
dfaea4e6 | 612 | __fpregs_activate(new_fpu); |
7366ed77 | 613 | prefetch(&new_fpu->state); |
32b49b3c IM |
614 | } else { |
615 | __fpregs_deactivate_hw(); | |
616 | } | |
1361b83a | 617 | } else { |
cb8818b6 IM |
618 | old_fpu->counter = 0; |
619 | old_fpu->last_cpu = -1; | |
1361b83a | 620 | if (fpu.preload) { |
cb8818b6 | 621 | new_fpu->counter++; |
66ddc2cb | 622 | if (fpu_want_lazy_restore(new_fpu, cpu)) |
1361b83a LT |
623 | fpu.preload = 0; |
624 | else | |
7366ed77 | 625 | prefetch(&new_fpu->state); |
232f62cd | 626 | fpregs_activate(new_fpu); |
1361b83a LT |
627 | } |
628 | } | |
629 | return fpu; | |
630 | } | |
631 | ||
b1b64dc3 IM |
632 | /* |
633 | * Misc helper functions: | |
634 | */ | |
635 | ||
1361b83a LT |
636 | /* |
637 | * By the time this gets called, we've already cleared CR0.TS and | |
638 | * given the process the FPU if we are going to preload the FPU | |
639 | * state - all we need to do is to conditionally restore the register | |
640 | * state itself. | |
641 | */ | |
384a23f9 | 642 | static inline void switch_fpu_finish(struct fpu *new_fpu, fpu_switch_t fpu_switch) |
1361b83a | 643 | { |
9ccc27a5 IM |
644 | if (fpu_switch.preload) |
645 | copy_kernel_to_fpregs(new_fpu); | |
1361b83a LT |
646 | } |
647 | ||
1361b83a | 648 | /* |
fb14b4ea | 649 | * Needs to be preemption-safe. |
1361b83a | 650 | * |
377ffbcc | 651 | * NOTE! user_fpu_begin() must be used only immediately before restoring |
fb14b4ea ON |
652 | * the save state. It does not do any saving/restoring on its own. In |
653 | * lazy FPU mode, it is just an optimization to avoid a #NM exception, | |
654 | * the task can lose the FPU right after preempt_enable(). | |
1361b83a | 655 | */ |
1361b83a LT |
656 | static inline void user_fpu_begin(void) |
657 | { | |
4540d3fa IM |
658 | struct fpu *fpu = ¤t->thread.fpu; |
659 | ||
1361b83a | 660 | preempt_disable(); |
3c6dffa9 | 661 | if (!fpregs_active()) |
232f62cd | 662 | fpregs_activate(fpu); |
1361b83a LT |
663 | preempt_enable(); |
664 | } | |
665 | ||
b1b64dc3 IM |
666 | /* |
667 | * MXCSR and XCR definitions: | |
668 | */ | |
669 | ||
670 | extern unsigned int mxcsr_feature_mask; | |
671 | ||
672 | #define XCR_XFEATURE_ENABLED_MASK 0x00000000 | |
673 | ||
674 | static inline u64 xgetbv(u32 index) | |
675 | { | |
676 | u32 eax, edx; | |
677 | ||
678 | asm volatile(".byte 0x0f,0x01,0xd0" /* xgetbv */ | |
679 | : "=a" (eax), "=d" (edx) | |
680 | : "c" (index)); | |
681 | return eax + ((u64)edx << 32); | |
682 | } | |
683 | ||
684 | static inline void xsetbv(u32 index, u64 value) | |
685 | { | |
686 | u32 eax = value; | |
687 | u32 edx = value >> 32; | |
688 | ||
689 | asm volatile(".byte 0x0f,0x01,0xd1" /* xsetbv */ | |
690 | : : "a" (eax), "d" (edx), "c" (index)); | |
691 | } | |
692 | ||
78f7f1e5 | 693 | #endif /* _ASM_X86_FPU_INTERNAL_H */ |