2 * Copyright (C) 1994 Linus Torvalds
4 * Pentium III FXSR, SSE support
5 * General FPU state handling cleanups
6 * Gareth Hughes <gareth@valinux.com>, May 2000
8 #include <asm/fpu/internal.h>
9 #include <asm/fpu/regset.h>
10 #include <asm/fpu/signal.h>
11 #include <asm/fpu/types.h>
12 #include <asm/traps.h>
14 #include <linux/hardirq.h>
15 #include <linux/pkeys.h>
17 #define CREATE_TRACE_POINTS
18 #include <asm/trace/fpu.h>
21 * Represents the initial FPU state. It's mostly (but not completely) zeroes,
22 * depending on the FPU hardware format:
24 union fpregs_state init_fpstate __read_mostly
;
27 * Track whether the kernel is using the FPU state
32 * - by IRQ context code to potentially use the FPU
35 * - to debug kernel_fpu_begin()/end() correctness
37 static DEFINE_PER_CPU(bool, in_kernel_fpu
);
40 * Track which context is using the FPU on the CPU:
42 DEFINE_PER_CPU(struct fpu
*, fpu_fpregs_owner_ctx
);
44 static void kernel_fpu_disable(void)
46 WARN_ON_FPU(this_cpu_read(in_kernel_fpu
));
47 this_cpu_write(in_kernel_fpu
, true);
50 static void kernel_fpu_enable(void)
52 WARN_ON_FPU(!this_cpu_read(in_kernel_fpu
));
53 this_cpu_write(in_kernel_fpu
, false);
56 static bool kernel_fpu_disabled(void)
58 return this_cpu_read(in_kernel_fpu
);
61 static bool interrupted_kernel_fpu_idle(void)
63 return !kernel_fpu_disabled();
67 * Were we in user mode (or vm86 mode) when we were
70 * Doing kernel_fpu_begin/end() is ok if we are running
71 * in an interrupt context from user mode - we'll just
72 * save the FPU state as required.
74 static bool interrupted_user_mode(void)
76 struct pt_regs
*regs
= get_irq_regs();
77 return regs
&& user_mode(regs
);
81 * Can we use the FPU in kernel mode with the
82 * whole "kernel_fpu_begin/end()" sequence?
84 * It's always ok in process context (ie "not interrupt")
85 * but it is sometimes ok even from an irq.
87 bool irq_fpu_usable(void)
89 return !in_interrupt() ||
90 interrupted_user_mode() ||
91 interrupted_kernel_fpu_idle();
93 EXPORT_SYMBOL(irq_fpu_usable
);
95 void __kernel_fpu_begin(void)
97 struct fpu
*fpu
= ¤t
->thread
.fpu
;
99 WARN_ON_FPU(!irq_fpu_usable());
101 kernel_fpu_disable();
103 if (fpu
->fpstate_active
) {
105 * Ignore return value -- we don't care if reg state
108 copy_fpregs_to_fpstate(fpu
);
110 __cpu_invalidate_fpregs_state();
113 EXPORT_SYMBOL(__kernel_fpu_begin
);
115 void __kernel_fpu_end(void)
117 struct fpu
*fpu
= ¤t
->thread
.fpu
;
119 if (fpu
->fpstate_active
)
120 copy_kernel_to_fpregs(&fpu
->state
);
124 EXPORT_SYMBOL(__kernel_fpu_end
);
126 void kernel_fpu_begin(void)
129 __kernel_fpu_begin();
131 EXPORT_SYMBOL_GPL(kernel_fpu_begin
);
133 void kernel_fpu_end(void)
138 EXPORT_SYMBOL_GPL(kernel_fpu_end
);
141 * Save the FPU state (mark it for reload if necessary):
143 * This only ever gets called for the current task.
145 void fpu__save(struct fpu
*fpu
)
147 WARN_ON_FPU(fpu
!= ¤t
->thread
.fpu
);
150 WARN_ON_FPU(fpu
->fpstate_active
!= fpu
->fpregs_active
);
152 trace_x86_fpu_before_save(fpu
);
153 if (fpu
->fpstate_active
) {
154 if (!copy_fpregs_to_fpstate(fpu
)) {
155 copy_kernel_to_fpregs(&fpu
->state
);
158 trace_x86_fpu_after_save(fpu
);
161 EXPORT_SYMBOL_GPL(fpu__save
);
164 * Legacy x87 fpstate state init:
166 static inline void fpstate_init_fstate(struct fregs_state
*fp
)
168 fp
->cwd
= 0xffff037fu
;
169 fp
->swd
= 0xffff0000u
;
170 fp
->twd
= 0xffffffffu
;
171 fp
->fos
= 0xffff0000u
;
174 void fpstate_init(union fpregs_state
*state
)
176 if (!static_cpu_has(X86_FEATURE_FPU
)) {
177 fpstate_init_soft(&state
->soft
);
181 memset(state
, 0, fpu_kernel_xstate_size
);
183 if (static_cpu_has(X86_FEATURE_XSAVES
))
184 fpstate_init_xstate(&state
->xsave
);
185 if (static_cpu_has(X86_FEATURE_FXSR
))
186 fpstate_init_fxstate(&state
->fxsave
);
188 fpstate_init_fstate(&state
->fsave
);
190 EXPORT_SYMBOL_GPL(fpstate_init
);
192 int fpu__copy(struct fpu
*dst_fpu
, struct fpu
*src_fpu
)
194 dst_fpu
->fpregs_active
= 0;
195 dst_fpu
->last_cpu
= -1;
197 if (!src_fpu
->fpstate_active
|| !static_cpu_has(X86_FEATURE_FPU
))
200 WARN_ON_FPU(src_fpu
!= ¤t
->thread
.fpu
);
203 * Don't let 'init optimized' areas of the XSAVE area
204 * leak into the child task:
206 memset(&dst_fpu
->state
.xsave
, 0, fpu_kernel_xstate_size
);
209 * Save current FPU registers directly into the child
210 * FPU context, without any memory-to-memory copying.
211 * In lazy mode, if the FPU context isn't loaded into
212 * fpregs, CR0.TS will be set and do_device_not_available
213 * will load the FPU context.
215 * We have to do all this with preemption disabled,
216 * mostly because of the FNSAVE case, because in that
217 * case we must not allow preemption in the window
218 * between the FNSAVE and us marking the context lazy.
220 * It shouldn't be an issue as even FNSAVE is plenty
221 * fast in terms of critical section length.
224 if (!copy_fpregs_to_fpstate(dst_fpu
)) {
225 memcpy(&src_fpu
->state
, &dst_fpu
->state
,
226 fpu_kernel_xstate_size
);
228 copy_kernel_to_fpregs(&src_fpu
->state
);
232 trace_x86_fpu_copy_src(src_fpu
);
233 trace_x86_fpu_copy_dst(dst_fpu
);
239 * Activate the current task's in-memory FPU context,
240 * if it has not been used before:
242 void fpu__activate_curr(struct fpu
*fpu
)
244 WARN_ON_FPU(fpu
!= ¤t
->thread
.fpu
);
246 if (!fpu
->fpstate_active
) {
247 fpstate_init(&fpu
->state
);
248 trace_x86_fpu_init_state(fpu
);
250 trace_x86_fpu_activate_state(fpu
);
251 /* Safe to do for the current task: */
252 fpu
->fpstate_active
= 1;
255 EXPORT_SYMBOL_GPL(fpu__activate_curr
);
258 * This function must be called before we read a task's fpstate.
260 * If the task has not used the FPU before then initialize its
263 * If the task has used the FPU before then save it.
265 void fpu__activate_fpstate_read(struct fpu
*fpu
)
267 WARN_ON_FPU(fpu
->fpstate_active
!= fpu
->fpregs_active
);
269 * If fpregs are active (in the current CPU), then
270 * copy them to the fpstate:
272 if (fpu
->fpstate_active
) {
275 if (!fpu
->fpstate_active
) {
276 fpstate_init(&fpu
->state
);
277 trace_x86_fpu_init_state(fpu
);
279 trace_x86_fpu_activate_state(fpu
);
280 /* Safe to do for current and for stopped child tasks: */
281 fpu
->fpstate_active
= 1;
287 * This function must be called before we write a task's fpstate.
289 * If the task has used the FPU before then unlazy it.
290 * If the task has not used the FPU before then initialize its fpstate.
292 * After this function call, after registers in the fpstate are
293 * modified and the child task has woken up, the child task will
294 * restore the modified FPU state from the modified context. If we
295 * didn't clear its lazy status here then the lazy in-registers
296 * state pending on its former CPU could be restored, corrupting
299 void fpu__activate_fpstate_write(struct fpu
*fpu
)
302 * Only stopped child tasks can be used to modify the FPU
303 * state in the fpstate buffer:
305 WARN_ON_FPU(fpu
== ¤t
->thread
.fpu
);
307 if (fpu
->fpstate_active
) {
308 /* Invalidate any lazy state: */
309 __fpu_invalidate_fpregs_state(fpu
);
311 fpstate_init(&fpu
->state
);
312 trace_x86_fpu_init_state(fpu
);
314 trace_x86_fpu_activate_state(fpu
);
315 /* Safe to do for stopped child tasks: */
316 fpu
->fpstate_active
= 1;
321 * This function must be called before we write the current
324 * This call gets the current FPU register state and moves
325 * it in to the 'fpstate'. Preemption is disabled so that
326 * no writes to the 'fpstate' can occur from context
329 * Must be followed by a fpu__current_fpstate_write_end().
331 void fpu__current_fpstate_write_begin(void)
333 struct fpu
*fpu
= ¤t
->thread
.fpu
;
336 * Ensure that the context-switching code does not write
337 * over the fpstate while we are doing our update.
342 * Move the fpregs in to the fpu's 'fpstate'.
344 fpu__activate_fpstate_read(fpu
);
347 * The caller is about to write to 'fpu'. Ensure that no
348 * CPU thinks that its fpregs match the fpstate. This
349 * ensures we will not be lazy and skip a XRSTOR in the
352 __fpu_invalidate_fpregs_state(fpu
);
356 * This function must be paired with fpu__current_fpstate_write_begin()
358 * This will ensure that the modified fpstate gets placed back in
359 * the fpregs if necessary.
361 * Note: This function may be called whether or not an _actual_
362 * write to the fpstate occurred.
364 void fpu__current_fpstate_write_end(void)
366 struct fpu
*fpu
= ¤t
->thread
.fpu
;
368 WARN_ON_FPU(fpu
->fpstate_active
!= fpu
->fpregs_active
);
370 * 'fpu' now has an updated copy of the state, but the
371 * registers may still be out of date. Update them with
372 * an XRSTOR if they are active.
374 if (fpu
->fpstate_active
)
375 copy_kernel_to_fpregs(&fpu
->state
);
378 * Our update is done and the fpregs/fpstate are in sync
379 * if necessary. Context switches can happen again.
385 * 'fpu__restore()' is called to copy FPU registers from
386 * the FPU fpstate to the live hw registers and to activate
387 * access to the hardware registers, so that FPU instructions
388 * can be used afterwards.
390 * Must be called with kernel preemption disabled (for example
391 * with local interrupts disabled, as it is in the case of
392 * do_device_not_available()).
394 void fpu__restore(struct fpu
*fpu
)
396 fpu__activate_curr(fpu
);
398 /* Avoid __kernel_fpu_begin() right after fpregs_activate() */
399 kernel_fpu_disable();
400 trace_x86_fpu_before_restore(fpu
);
401 fpregs_activate(fpu
);
402 copy_kernel_to_fpregs(&fpu
->state
);
403 trace_x86_fpu_after_restore(fpu
);
406 EXPORT_SYMBOL_GPL(fpu__restore
);
409 * Drops current FPU state: deactivates the fpregs and
410 * the fpstate. NOTE: it still leaves previous contents
411 * in the fpregs in the eager-FPU case.
413 * This function can be used in cases where we know that
414 * a state-restore is coming: either an explicit one,
417 void fpu__drop(struct fpu
*fpu
)
421 if (fpu
== ¤t
->thread
.fpu
) {
422 WARN_ON_FPU(fpu
->fpstate_active
!= fpu
->fpregs_active
);
424 if (fpu
->fpstate_active
) {
425 /* Ignore delayed exceptions from user space */
426 asm volatile("1: fwait\n"
428 _ASM_EXTABLE(1b
, 2b
));
429 if (fpu
->fpregs_active
)
430 fpregs_deactivate(fpu
);
433 WARN_ON_FPU(fpu
->fpregs_active
);
436 fpu
->fpstate_active
= 0;
438 trace_x86_fpu_dropped(fpu
);
444 * Clear FPU registers by setting them up from
447 static inline void copy_init_fpstate_to_fpregs(void)
450 copy_kernel_to_xregs(&init_fpstate
.xsave
, -1);
451 else if (static_cpu_has(X86_FEATURE_FXSR
))
452 copy_kernel_to_fxregs(&init_fpstate
.fxsave
);
454 copy_kernel_to_fregs(&init_fpstate
.fsave
);
456 if (boot_cpu_has(X86_FEATURE_OSPKE
))
457 copy_init_pkru_to_fpregs();
461 * Clear the FPU state back to init state.
463 * Called by sys_execve(), by the signal handler code and by various
466 void fpu__clear(struct fpu
*fpu
)
468 WARN_ON_FPU(fpu
!= ¤t
->thread
.fpu
); /* Almost certainly an anomaly */
473 * Make sure fpstate is cleared and initialized.
475 if (static_cpu_has(X86_FEATURE_FPU
)) {
477 fpu__activate_curr(fpu
);
479 copy_init_fpstate_to_fpregs();
485 * x87 math exception handling:
488 int fpu__exception_code(struct fpu
*fpu
, int trap_nr
)
492 if (trap_nr
== X86_TRAP_MF
) {
493 unsigned short cwd
, swd
;
495 * (~cwd & swd) will mask out exceptions that are not set to unmasked
496 * status. 0x3f is the exception bits in these regs, 0x200 is the
497 * C1 reg you need in case of a stack fault, 0x040 is the stack
498 * fault bit. We should only be taking one exception at a time,
499 * so if this combination doesn't produce any single exception,
500 * then we have a bad program that isn't synchronizing its FPU usage
501 * and it will suffer the consequences since we won't be able to
502 * fully reproduce the context of the exception.
504 if (boot_cpu_has(X86_FEATURE_FXSR
)) {
505 cwd
= fpu
->state
.fxsave
.cwd
;
506 swd
= fpu
->state
.fxsave
.swd
;
508 cwd
= (unsigned short)fpu
->state
.fsave
.cwd
;
509 swd
= (unsigned short)fpu
->state
.fsave
.swd
;
515 * The SIMD FPU exceptions are handled a little differently, as there
516 * is only a single status/control register. Thus, to determine which
517 * unmasked exception was caught we must mask the exception mask bits
518 * at 0x1f80, and then use these to mask the exception bits at 0x3f.
520 unsigned short mxcsr
= MXCSR_DEFAULT
;
522 if (boot_cpu_has(X86_FEATURE_XMM
))
523 mxcsr
= fpu
->state
.fxsave
.mxcsr
;
525 err
= ~(mxcsr
>> 7) & mxcsr
;
528 if (err
& 0x001) { /* Invalid op */
530 * swd & 0x240 == 0x040: Stack Underflow
531 * swd & 0x240 == 0x240: Stack Overflow
532 * User must clear the SF bit (0x40) if set
535 } else if (err
& 0x004) { /* Divide by Zero */
537 } else if (err
& 0x008) { /* Overflow */
539 } else if (err
& 0x012) { /* Denormal, Underflow */
541 } else if (err
& 0x020) { /* Precision */
546 * If we're using IRQ 13, or supposedly even some trap
547 * X86_TRAP_MF implementations, it's possible
548 * we get a spurious trap, which is not an error.