2 * Copyright (C) 1994 Linus Torvalds
4 * Pentium III FXSR, SSE support
5 * General FPU state handling cleanups
6 * Gareth Hughes <gareth@valinux.com>, May 2000
8 #include <asm/fpu/internal.h>
9 #include <asm/fpu/regset.h>
10 #include <asm/fpu/signal.h>
11 #include <asm/fpu/types.h>
12 #include <asm/traps.h>
14 #include <linux/hardirq.h>
15 #include <linux/pkeys.h>
17 #define CREATE_TRACE_POINTS
18 #include <asm/trace/fpu.h>
21 * Represents the initial FPU state. It's mostly (but not completely) zeroes,
22 * depending on the FPU hardware format:
24 union fpregs_state init_fpstate __read_mostly
;
27 * Track whether the kernel is using the FPU state
32 * - by IRQ context code to potentially use the FPU
35 * - to debug kernel_fpu_begin()/end() correctness
37 static DEFINE_PER_CPU(bool, in_kernel_fpu
);
40 * Track which context is using the FPU on the CPU:
42 DEFINE_PER_CPU(struct fpu
*, fpu_fpregs_owner_ctx
);
44 static void kernel_fpu_disable(void)
46 WARN_ON_FPU(this_cpu_read(in_kernel_fpu
));
47 this_cpu_write(in_kernel_fpu
, true);
50 static void kernel_fpu_enable(void)
52 WARN_ON_FPU(!this_cpu_read(in_kernel_fpu
));
53 this_cpu_write(in_kernel_fpu
, false);
56 static bool kernel_fpu_disabled(void)
58 return this_cpu_read(in_kernel_fpu
);
61 static bool interrupted_kernel_fpu_idle(void)
63 return !kernel_fpu_disabled();
67 * Were we in user mode (or vm86 mode) when we were
70 * Doing kernel_fpu_begin/end() is ok if we are running
71 * in an interrupt context from user mode - we'll just
72 * save the FPU state as required.
74 static bool interrupted_user_mode(void)
76 struct pt_regs
*regs
= get_irq_regs();
77 return regs
&& user_mode(regs
);
81 * Can we use the FPU in kernel mode with the
82 * whole "kernel_fpu_begin/end()" sequence?
84 * It's always ok in process context (ie "not interrupt")
85 * but it is sometimes ok even from an irq.
87 bool irq_fpu_usable(void)
89 return !in_interrupt() ||
90 interrupted_user_mode() ||
91 interrupted_kernel_fpu_idle();
93 EXPORT_SYMBOL(irq_fpu_usable
);
95 void __kernel_fpu_begin(void)
97 struct fpu
*fpu
= ¤t
->thread
.fpu
;
99 WARN_ON_FPU(!irq_fpu_usable());
101 kernel_fpu_disable();
103 if (fpu
->fpregs_active
) {
105 * Ignore return value -- we don't care if reg state
108 copy_fpregs_to_fpstate(fpu
);
110 __cpu_invalidate_fpregs_state();
113 EXPORT_SYMBOL(__kernel_fpu_begin
);
115 void __kernel_fpu_end(void)
117 struct fpu
*fpu
= ¤t
->thread
.fpu
;
119 if (fpu
->fpregs_active
)
120 copy_kernel_to_fpregs(&fpu
->state
);
124 EXPORT_SYMBOL(__kernel_fpu_end
);
126 void kernel_fpu_begin(void)
129 __kernel_fpu_begin();
131 EXPORT_SYMBOL_GPL(kernel_fpu_begin
);
133 void kernel_fpu_end(void)
138 EXPORT_SYMBOL_GPL(kernel_fpu_end
);
141 * CR0::TS save/restore functions:
143 int irq_ts_save(void)
146 * If in process context and not atomic, we can take a spurious DNA fault.
147 * Otherwise, doing clts() in process context requires disabling preemption
148 * or some heavy lifting like kernel_fpu_begin()
153 if (read_cr0() & X86_CR0_TS
) {
160 EXPORT_SYMBOL_GPL(irq_ts_save
);
162 void irq_ts_restore(int TS_state
)
167 EXPORT_SYMBOL_GPL(irq_ts_restore
);
170 * Save the FPU state (mark it for reload if necessary):
172 * This only ever gets called for the current task.
174 void fpu__save(struct fpu
*fpu
)
176 WARN_ON_FPU(fpu
!= ¤t
->thread
.fpu
);
179 trace_x86_fpu_before_save(fpu
);
180 if (fpu
->fpregs_active
) {
181 if (!copy_fpregs_to_fpstate(fpu
)) {
182 copy_kernel_to_fpregs(&fpu
->state
);
185 trace_x86_fpu_after_save(fpu
);
188 EXPORT_SYMBOL_GPL(fpu__save
);
191 * Legacy x87 fpstate state init:
193 static inline void fpstate_init_fstate(struct fregs_state
*fp
)
195 fp
->cwd
= 0xffff037fu
;
196 fp
->swd
= 0xffff0000u
;
197 fp
->twd
= 0xffffffffu
;
198 fp
->fos
= 0xffff0000u
;
201 void fpstate_init(union fpregs_state
*state
)
203 if (!static_cpu_has(X86_FEATURE_FPU
)) {
204 fpstate_init_soft(&state
->soft
);
208 memset(state
, 0, fpu_kernel_xstate_size
);
211 * XRSTORS requires that this bit is set in xcomp_bv, or
212 * it will #GP. Make sure it is replaced after the memset().
214 if (static_cpu_has(X86_FEATURE_XSAVES
))
215 state
->xsave
.header
.xcomp_bv
= XCOMP_BV_COMPACTED_FORMAT
;
217 if (static_cpu_has(X86_FEATURE_FXSR
))
218 fpstate_init_fxstate(&state
->fxsave
);
220 fpstate_init_fstate(&state
->fsave
);
222 EXPORT_SYMBOL_GPL(fpstate_init
);
224 int fpu__copy(struct fpu
*dst_fpu
, struct fpu
*src_fpu
)
226 dst_fpu
->fpregs_active
= 0;
227 dst_fpu
->last_cpu
= -1;
229 if (!src_fpu
->fpstate_active
|| !static_cpu_has(X86_FEATURE_FPU
))
232 WARN_ON_FPU(src_fpu
!= ¤t
->thread
.fpu
);
235 * Don't let 'init optimized' areas of the XSAVE area
236 * leak into the child task:
238 memset(&dst_fpu
->state
.xsave
, 0, fpu_kernel_xstate_size
);
241 * Save current FPU registers directly into the child
242 * FPU context, without any memory-to-memory copying.
243 * In lazy mode, if the FPU context isn't loaded into
244 * fpregs, CR0.TS will be set and do_device_not_available
245 * will load the FPU context.
247 * We have to do all this with preemption disabled,
248 * mostly because of the FNSAVE case, because in that
249 * case we must not allow preemption in the window
250 * between the FNSAVE and us marking the context lazy.
252 * It shouldn't be an issue as even FNSAVE is plenty
253 * fast in terms of critical section length.
256 if (!copy_fpregs_to_fpstate(dst_fpu
)) {
257 memcpy(&src_fpu
->state
, &dst_fpu
->state
,
258 fpu_kernel_xstate_size
);
260 copy_kernel_to_fpregs(&src_fpu
->state
);
264 trace_x86_fpu_copy_src(src_fpu
);
265 trace_x86_fpu_copy_dst(dst_fpu
);
271 * Activate the current task's in-memory FPU context,
272 * if it has not been used before:
274 void fpu__activate_curr(struct fpu
*fpu
)
276 WARN_ON_FPU(fpu
!= ¤t
->thread
.fpu
);
278 if (!fpu
->fpstate_active
) {
279 fpstate_init(&fpu
->state
);
280 trace_x86_fpu_init_state(fpu
);
282 trace_x86_fpu_activate_state(fpu
);
283 /* Safe to do for the current task: */
284 fpu
->fpstate_active
= 1;
287 EXPORT_SYMBOL_GPL(fpu__activate_curr
);
290 * This function must be called before we read a task's fpstate.
292 * If the task has not used the FPU before then initialize its
295 * If the task has used the FPU before then save it.
297 void fpu__activate_fpstate_read(struct fpu
*fpu
)
300 * If fpregs are active (in the current CPU), then
301 * copy them to the fpstate:
303 if (fpu
->fpregs_active
) {
306 if (!fpu
->fpstate_active
) {
307 fpstate_init(&fpu
->state
);
308 trace_x86_fpu_init_state(fpu
);
310 trace_x86_fpu_activate_state(fpu
);
311 /* Safe to do for current and for stopped child tasks: */
312 fpu
->fpstate_active
= 1;
318 * This function must be called before we write a task's fpstate.
320 * If the task has used the FPU before then unlazy it.
321 * If the task has not used the FPU before then initialize its fpstate.
323 * After this function call, after registers in the fpstate are
324 * modified and the child task has woken up, the child task will
325 * restore the modified FPU state from the modified context. If we
326 * didn't clear its lazy status here then the lazy in-registers
327 * state pending on its former CPU could be restored, corrupting
330 void fpu__activate_fpstate_write(struct fpu
*fpu
)
333 * Only stopped child tasks can be used to modify the FPU
334 * state in the fpstate buffer:
336 WARN_ON_FPU(fpu
== ¤t
->thread
.fpu
);
338 if (fpu
->fpstate_active
) {
339 /* Invalidate any lazy state: */
340 __fpu_invalidate_fpregs_state(fpu
);
342 fpstate_init(&fpu
->state
);
343 trace_x86_fpu_init_state(fpu
);
345 trace_x86_fpu_activate_state(fpu
);
346 /* Safe to do for stopped child tasks: */
347 fpu
->fpstate_active
= 1;
352 * This function must be called before we write the current
355 * This call gets the current FPU register state and moves
356 * it in to the 'fpstate'. Preemption is disabled so that
357 * no writes to the 'fpstate' can occur from context
360 * Must be followed by a fpu__current_fpstate_write_end().
362 void fpu__current_fpstate_write_begin(void)
364 struct fpu
*fpu
= ¤t
->thread
.fpu
;
367 * Ensure that the context-switching code does not write
368 * over the fpstate while we are doing our update.
373 * Move the fpregs in to the fpu's 'fpstate'.
375 fpu__activate_fpstate_read(fpu
);
378 * The caller is about to write to 'fpu'. Ensure that no
379 * CPU thinks that its fpregs match the fpstate. This
380 * ensures we will not be lazy and skip a XRSTOR in the
383 __fpu_invalidate_fpregs_state(fpu
);
387 * This function must be paired with fpu__current_fpstate_write_begin()
389 * This will ensure that the modified fpstate gets placed back in
390 * the fpregs if necessary.
392 * Note: This function may be called whether or not an _actual_
393 * write to the fpstate occurred.
395 void fpu__current_fpstate_write_end(void)
397 struct fpu
*fpu
= ¤t
->thread
.fpu
;
400 * 'fpu' now has an updated copy of the state, but the
401 * registers may still be out of date. Update them with
402 * an XRSTOR if they are active.
405 copy_kernel_to_fpregs(&fpu
->state
);
408 * Our update is done and the fpregs/fpstate are in sync
409 * if necessary. Context switches can happen again.
415 * 'fpu__restore()' is called to copy FPU registers from
416 * the FPU fpstate to the live hw registers and to activate
417 * access to the hardware registers, so that FPU instructions
418 * can be used afterwards.
420 * Must be called with kernel preemption disabled (for example
421 * with local interrupts disabled, as it is in the case of
422 * do_device_not_available()).
424 void fpu__restore(struct fpu
*fpu
)
426 fpu__activate_curr(fpu
);
428 /* Avoid __kernel_fpu_begin() right after fpregs_activate() */
429 kernel_fpu_disable();
430 trace_x86_fpu_before_restore(fpu
);
431 fpregs_activate(fpu
);
432 copy_kernel_to_fpregs(&fpu
->state
);
433 trace_x86_fpu_after_restore(fpu
);
436 EXPORT_SYMBOL_GPL(fpu__restore
);
439 * Drops current FPU state: deactivates the fpregs and
440 * the fpstate. NOTE: it still leaves previous contents
441 * in the fpregs in the eager-FPU case.
443 * This function can be used in cases where we know that
444 * a state-restore is coming: either an explicit one,
447 void fpu__drop(struct fpu
*fpu
)
451 if (fpu
->fpregs_active
) {
452 /* Ignore delayed exceptions from user space */
453 asm volatile("1: fwait\n"
455 _ASM_EXTABLE(1b
, 2b
));
456 fpregs_deactivate(fpu
);
459 fpu
->fpstate_active
= 0;
461 trace_x86_fpu_dropped(fpu
);
467 * Clear FPU registers by setting them up from
470 static inline void copy_init_fpstate_to_fpregs(void)
473 copy_kernel_to_xregs(&init_fpstate
.xsave
, -1);
474 else if (static_cpu_has(X86_FEATURE_FXSR
))
475 copy_kernel_to_fxregs(&init_fpstate
.fxsave
);
477 copy_kernel_to_fregs(&init_fpstate
.fsave
);
479 if (boot_cpu_has(X86_FEATURE_OSPKE
))
480 copy_init_pkru_to_fpregs();
484 * Clear the FPU state back to init state.
486 * Called by sys_execve(), by the signal handler code and by various
489 void fpu__clear(struct fpu
*fpu
)
491 WARN_ON_FPU(fpu
!= ¤t
->thread
.fpu
); /* Almost certainly an anomaly */
493 if (!static_cpu_has(X86_FEATURE_FPU
)) {
494 /* FPU state will be reallocated lazily at the first use. */
497 if (!fpu
->fpstate_active
) {
498 fpu__activate_curr(fpu
);
501 copy_init_fpstate_to_fpregs();
506 * x87 math exception handling:
509 int fpu__exception_code(struct fpu
*fpu
, int trap_nr
)
513 if (trap_nr
== X86_TRAP_MF
) {
514 unsigned short cwd
, swd
;
516 * (~cwd & swd) will mask out exceptions that are not set to unmasked
517 * status. 0x3f is the exception bits in these regs, 0x200 is the
518 * C1 reg you need in case of a stack fault, 0x040 is the stack
519 * fault bit. We should only be taking one exception at a time,
520 * so if this combination doesn't produce any single exception,
521 * then we have a bad program that isn't synchronizing its FPU usage
522 * and it will suffer the consequences since we won't be able to
523 * fully reproduce the context of the exception.
525 if (boot_cpu_has(X86_FEATURE_FXSR
)) {
526 cwd
= fpu
->state
.fxsave
.cwd
;
527 swd
= fpu
->state
.fxsave
.swd
;
529 cwd
= (unsigned short)fpu
->state
.fsave
.cwd
;
530 swd
= (unsigned short)fpu
->state
.fsave
.swd
;
536 * The SIMD FPU exceptions are handled a little differently, as there
537 * is only a single status/control register. Thus, to determine which
538 * unmasked exception was caught we must mask the exception mask bits
539 * at 0x1f80, and then use these to mask the exception bits at 0x3f.
541 unsigned short mxcsr
= MXCSR_DEFAULT
;
543 if (boot_cpu_has(X86_FEATURE_XMM
))
544 mxcsr
= fpu
->state
.fxsave
.mxcsr
;
546 err
= ~(mxcsr
>> 7) & mxcsr
;
549 if (err
& 0x001) { /* Invalid op */
551 * swd & 0x240 == 0x040: Stack Underflow
552 * swd & 0x240 == 0x240: Stack Overflow
553 * User must clear the SF bit (0x40) if set
556 } else if (err
& 0x004) { /* Divide by Zero */
558 } else if (err
& 0x008) { /* Overflow */
560 } else if (err
& 0x012) { /* Denormal, Underflow */
562 } else if (err
& 0x020) { /* Precision */
567 * If we're using IRQ 13, or supposedly even some trap
568 * X86_TRAP_MF implementations, it's possible
569 * we get a spurious trap, which is not an error.