1 // SPDX-License-Identifier: GPL-2.0-only
3 * Copyright (C) 1994 Linus Torvalds
5 * Pentium III FXSR, SSE support
6 * General FPU state handling cleanups
7 * Gareth Hughes <gareth@valinux.com>, May 2000
9 #include <asm/fpu/internal.h>
10 #include <asm/fpu/regset.h>
11 #include <asm/fpu/signal.h>
12 #include <asm/fpu/types.h>
13 #include <asm/traps.h>
14 #include <asm/irq_regs.h>
16 #include <linux/hardirq.h>
17 #include <linux/pkeys.h>
19 #define CREATE_TRACE_POINTS
20 #include <asm/trace/fpu.h>
23 * Represents the initial FPU state. It's mostly (but not completely) zeroes,
24 * depending on the FPU hardware format:
26 union fpregs_state init_fpstate __read_mostly
;
29 * Track whether the kernel is using the FPU state
34 * - by IRQ context code to potentially use the FPU
37 * - to debug kernel_fpu_begin()/end() correctness
39 static DEFINE_PER_CPU(bool, in_kernel_fpu
);
42 * Track which context is using the FPU on the CPU:
44 DEFINE_PER_CPU(struct fpu
*, fpu_fpregs_owner_ctx
);
46 static bool kernel_fpu_disabled(void)
48 return this_cpu_read(in_kernel_fpu
);
51 static bool interrupted_kernel_fpu_idle(void)
53 return !kernel_fpu_disabled();
57 * Were we in user mode (or vm86 mode) when we were
60 * Doing kernel_fpu_begin/end() is ok if we are running
61 * in an interrupt context from user mode - we'll just
62 * save the FPU state as required.
64 static bool interrupted_user_mode(void)
66 struct pt_regs
*regs
= get_irq_regs();
67 return regs
&& user_mode(regs
);
71 * Can we use the FPU in kernel mode with the
72 * whole "kernel_fpu_begin/end()" sequence?
74 * It's always ok in process context (ie "not interrupt")
75 * but it is sometimes ok even from an irq.
77 bool irq_fpu_usable(void)
79 return !in_interrupt() ||
80 interrupted_user_mode() ||
81 interrupted_kernel_fpu_idle();
83 EXPORT_SYMBOL(irq_fpu_usable
);
86 * These must be called with preempt disabled. Returns
87 * 'true' if the FPU state is still intact and we can
88 * keep registers active.
90 * The legacy FNSAVE instruction cleared all FPU state
91 * unconditionally, so registers are essentially destroyed.
92 * Modern FPU state can be kept in registers, if there are
93 * no pending FP exceptions.
95 int copy_fpregs_to_fpstate(struct fpu
*fpu
)
97 if (likely(use_xsave())) {
98 copy_xregs_to_kernel(&fpu
->state
.xsave
);
101 * AVX512 state is tracked here because its use is
102 * known to slow the max clock speed of the core.
104 if (fpu
->state
.xsave
.header
.xfeatures
& XFEATURE_MASK_AVX512
)
105 fpu
->avx512_timestamp
= jiffies
;
109 if (likely(use_fxsr())) {
110 copy_fxregs_to_kernel(fpu
);
115 * Legacy FPU register saving, FNSAVE always clears FPU registers,
116 * so we have to mark them inactive:
118 asm volatile("fnsave %[fp]; fwait" : [fp
] "=m" (fpu
->state
.fsave
));
122 EXPORT_SYMBOL(copy_fpregs_to_fpstate
);
124 void kernel_fpu_begin_mask(unsigned int kfpu_mask
)
128 WARN_ON_FPU(!irq_fpu_usable());
129 WARN_ON_FPU(this_cpu_read(in_kernel_fpu
));
131 this_cpu_write(in_kernel_fpu
, true);
133 if (!(current
->flags
& PF_KTHREAD
) &&
134 !test_thread_flag(TIF_NEED_FPU_LOAD
)) {
135 set_thread_flag(TIF_NEED_FPU_LOAD
);
137 * Ignore return value -- we don't care if reg state
140 copy_fpregs_to_fpstate(¤t
->thread
.fpu
);
142 __cpu_invalidate_fpregs_state();
144 /* Put sane initial values into the control registers. */
145 if (likely(kfpu_mask
& KFPU_MXCSR
) && boot_cpu_has(X86_FEATURE_XMM
))
146 ldmxcsr(MXCSR_DEFAULT
);
148 if (unlikely(kfpu_mask
& KFPU_387
) && boot_cpu_has(X86_FEATURE_FPU
))
149 asm volatile ("fninit");
151 EXPORT_SYMBOL_GPL(kernel_fpu_begin_mask
);
153 void kernel_fpu_end(void)
155 WARN_ON_FPU(!this_cpu_read(in_kernel_fpu
));
157 this_cpu_write(in_kernel_fpu
, false);
160 EXPORT_SYMBOL_GPL(kernel_fpu_end
);
163 * Save the FPU state (mark it for reload if necessary):
165 * This only ever gets called for the current task.
167 void fpu__save(struct fpu
*fpu
)
169 WARN_ON_FPU(fpu
!= ¤t
->thread
.fpu
);
172 trace_x86_fpu_before_save(fpu
);
174 if (!test_thread_flag(TIF_NEED_FPU_LOAD
)) {
175 if (!copy_fpregs_to_fpstate(fpu
)) {
176 copy_kernel_to_fpregs(&fpu
->state
);
180 trace_x86_fpu_after_save(fpu
);
185 * Legacy x87 fpstate state init:
187 static inline void fpstate_init_fstate(struct fregs_state
*fp
)
189 fp
->cwd
= 0xffff037fu
;
190 fp
->swd
= 0xffff0000u
;
191 fp
->twd
= 0xffffffffu
;
192 fp
->fos
= 0xffff0000u
;
195 void fpstate_init(union fpregs_state
*state
)
197 if (!static_cpu_has(X86_FEATURE_FPU
)) {
198 fpstate_init_soft(&state
->soft
);
202 memset(state
, 0, fpu_kernel_xstate_size
);
204 if (static_cpu_has(X86_FEATURE_XSAVES
))
205 fpstate_init_xstate(&state
->xsave
);
206 if (static_cpu_has(X86_FEATURE_FXSR
))
207 fpstate_init_fxstate(&state
->fxsave
);
209 fpstate_init_fstate(&state
->fsave
);
211 EXPORT_SYMBOL_GPL(fpstate_init
);
213 int fpu__copy(struct task_struct
*dst
, struct task_struct
*src
)
215 struct fpu
*dst_fpu
= &dst
->thread
.fpu
;
216 struct fpu
*src_fpu
= &src
->thread
.fpu
;
218 dst_fpu
->last_cpu
= -1;
220 if (!static_cpu_has(X86_FEATURE_FPU
))
223 WARN_ON_FPU(src_fpu
!= ¤t
->thread
.fpu
);
226 * Don't let 'init optimized' areas of the XSAVE area
227 * leak into the child task:
229 memset(&dst_fpu
->state
.xsave
, 0, fpu_kernel_xstate_size
);
232 * If the FPU registers are not current just memcpy() the state.
233 * Otherwise save current FPU registers directly into the child's FPU
234 * context, without any memory-to-memory copying.
236 * ( The function 'fails' in the FNSAVE case, which destroys
237 * register contents so we have to load them back. )
240 if (test_thread_flag(TIF_NEED_FPU_LOAD
))
241 memcpy(&dst_fpu
->state
, &src_fpu
->state
, fpu_kernel_xstate_size
);
243 else if (!copy_fpregs_to_fpstate(dst_fpu
))
244 copy_kernel_to_fpregs(&dst_fpu
->state
);
248 set_tsk_thread_flag(dst
, TIF_NEED_FPU_LOAD
);
250 trace_x86_fpu_copy_src(src_fpu
);
251 trace_x86_fpu_copy_dst(dst_fpu
);
257 * Activate the current task's in-memory FPU context,
258 * if it has not been used before:
260 static void fpu__initialize(struct fpu
*fpu
)
262 WARN_ON_FPU(fpu
!= ¤t
->thread
.fpu
);
264 set_thread_flag(TIF_NEED_FPU_LOAD
);
265 fpstate_init(&fpu
->state
);
266 trace_x86_fpu_init_state(fpu
);
270 * This function must be called before we read a task's fpstate.
272 * There's two cases where this gets called:
274 * - for the current task (when coredumping), in which case we have
275 * to save the latest FPU registers into the fpstate,
277 * - or it's called for stopped tasks (ptrace), in which case the
278 * registers were already saved by the context-switch code when
279 * the task scheduled out.
281 * If the task has used the FPU before then save it.
283 void fpu__prepare_read(struct fpu
*fpu
)
285 if (fpu
== ¤t
->thread
.fpu
)
290 * This function must be called before we write a task's fpstate.
292 * Invalidate any cached FPU registers.
294 * After this function call, after registers in the fpstate are
295 * modified and the child task has woken up, the child task will
296 * restore the modified FPU state from the modified context. If we
297 * didn't clear its cached status here then the cached in-registers
298 * state pending on its former CPU could be restored, corrupting
301 void fpu__prepare_write(struct fpu
*fpu
)
304 * Only stopped child tasks can be used to modify the FPU
305 * state in the fpstate buffer:
307 WARN_ON_FPU(fpu
== ¤t
->thread
.fpu
);
309 /* Invalidate any cached state: */
310 __fpu_invalidate_fpregs_state(fpu
);
314 * Drops current FPU state: deactivates the fpregs and
315 * the fpstate. NOTE: it still leaves previous contents
316 * in the fpregs in the eager-FPU case.
318 * This function can be used in cases where we know that
319 * a state-restore is coming: either an explicit one,
322 void fpu__drop(struct fpu
*fpu
)
326 if (fpu
== ¤t
->thread
.fpu
) {
327 /* Ignore delayed exceptions from user space */
328 asm volatile("1: fwait\n"
330 _ASM_EXTABLE(1b
, 2b
));
331 fpregs_deactivate(fpu
);
334 trace_x86_fpu_dropped(fpu
);
340 * Clear FPU registers by setting them up from the init fpstate.
341 * Caller must do fpregs_[un]lock() around it.
343 static inline void copy_init_fpstate_to_fpregs(u64 features_mask
)
346 copy_kernel_to_xregs(&init_fpstate
.xsave
, features_mask
);
347 else if (static_cpu_has(X86_FEATURE_FXSR
))
348 copy_kernel_to_fxregs(&init_fpstate
.fxsave
);
350 copy_kernel_to_fregs(&init_fpstate
.fsave
);
352 if (boot_cpu_has(X86_FEATURE_OSPKE
))
353 copy_init_pkru_to_fpregs();
357 * Clear the FPU state back to init state.
359 * Called by sys_execve(), by the signal handler code and by various
362 static void fpu__clear(struct fpu
*fpu
, bool user_only
)
364 WARN_ON_FPU(fpu
!= ¤t
->thread
.fpu
);
366 if (!static_cpu_has(X86_FEATURE_FPU
)) {
368 fpu__initialize(fpu
);
375 if (!fpregs_state_valid(fpu
, smp_processor_id()) &&
376 xfeatures_mask_supervisor())
377 copy_kernel_to_xregs(&fpu
->state
.xsave
,
378 xfeatures_mask_supervisor());
379 copy_init_fpstate_to_fpregs(xfeatures_mask_user());
381 copy_init_fpstate_to_fpregs(xfeatures_mask_all
);
384 fpregs_mark_activate();
388 void fpu__clear_user_states(struct fpu
*fpu
)
390 fpu__clear(fpu
, true);
393 void fpu__clear_all(struct fpu
*fpu
)
395 fpu__clear(fpu
, false);
399 * Load FPU context before returning to userspace.
401 void switch_fpu_return(void)
403 if (!static_cpu_has(X86_FEATURE_FPU
))
406 __fpregs_load_activate();
408 EXPORT_SYMBOL_GPL(switch_fpu_return
);
410 #ifdef CONFIG_X86_DEBUG_FPU
412 * If current FPU state according to its tracking (loaded FPU context on this
413 * CPU) is not valid then we must have TIF_NEED_FPU_LOAD set so the context is
414 * loaded on return to userland.
416 void fpregs_assert_state_consistent(void)
418 struct fpu
*fpu
= ¤t
->thread
.fpu
;
420 if (test_thread_flag(TIF_NEED_FPU_LOAD
))
423 WARN_ON_FPU(!fpregs_state_valid(fpu
, smp_processor_id()));
425 EXPORT_SYMBOL_GPL(fpregs_assert_state_consistent
);
428 void fpregs_mark_activate(void)
430 struct fpu
*fpu
= ¤t
->thread
.fpu
;
432 fpregs_activate(fpu
);
433 fpu
->last_cpu
= smp_processor_id();
434 clear_thread_flag(TIF_NEED_FPU_LOAD
);
436 EXPORT_SYMBOL_GPL(fpregs_mark_activate
);
439 * x87 math exception handling:
442 int fpu__exception_code(struct fpu
*fpu
, int trap_nr
)
446 if (trap_nr
== X86_TRAP_MF
) {
447 unsigned short cwd
, swd
;
449 * (~cwd & swd) will mask out exceptions that are not set to unmasked
450 * status. 0x3f is the exception bits in these regs, 0x200 is the
451 * C1 reg you need in case of a stack fault, 0x040 is the stack
452 * fault bit. We should only be taking one exception at a time,
453 * so if this combination doesn't produce any single exception,
454 * then we have a bad program that isn't synchronizing its FPU usage
455 * and it will suffer the consequences since we won't be able to
456 * fully reproduce the context of the exception.
458 if (boot_cpu_has(X86_FEATURE_FXSR
)) {
459 cwd
= fpu
->state
.fxsave
.cwd
;
460 swd
= fpu
->state
.fxsave
.swd
;
462 cwd
= (unsigned short)fpu
->state
.fsave
.cwd
;
463 swd
= (unsigned short)fpu
->state
.fsave
.swd
;
469 * The SIMD FPU exceptions are handled a little differently, as there
470 * is only a single status/control register. Thus, to determine which
471 * unmasked exception was caught we must mask the exception mask bits
472 * at 0x1f80, and then use these to mask the exception bits at 0x3f.
474 unsigned short mxcsr
= MXCSR_DEFAULT
;
476 if (boot_cpu_has(X86_FEATURE_XMM
))
477 mxcsr
= fpu
->state
.fxsave
.mxcsr
;
479 err
= ~(mxcsr
>> 7) & mxcsr
;
482 if (err
& 0x001) { /* Invalid op */
484 * swd & 0x240 == 0x040: Stack Underflow
485 * swd & 0x240 == 0x240: Stack Overflow
486 * User must clear the SF bit (0x40) if set
489 } else if (err
& 0x004) { /* Divide by Zero */
491 } else if (err
& 0x008) { /* Overflow */
493 } else if (err
& 0x012) { /* Denormal, Underflow */
495 } else if (err
& 0x020) { /* Precision */
500 * If we're using IRQ 13, or supposedly even some trap
501 * X86_TRAP_MF implementations, it's possible
502 * we get a spurious trap, which is not an error.