#ifndef _FPU_INTERNAL_H
#define _FPU_INTERNAL_H
-#include <linux/kernel_stat.h>
#include <linux/regset.h>
#include <linux/compat.h>
#include <linux/slab.h>
-#include <asm/asm.h>
-#include <asm/cpufeature.h>
-#include <asm/processor.h>
-#include <asm/sigcontext.h>
+
#include <asm/user.h>
-#include <asm/uaccess.h>
-#include <asm/xsave.h>
-#include <asm/smap.h>
+#include <asm/fpu/api.h>
+#include <asm/fpu/xsave.h>
#ifdef CONFIG_X86_64
# include <asm/sigcontext32.h>
#endif
extern unsigned int mxcsr_feature_mask;
-extern void fpu_init(void);
+extern void fpu__cpu_init(void);
extern void eager_fpu_init(void);
-DECLARE_PER_CPU(struct task_struct *, fpu_owner_task);
+DECLARE_PER_CPU(struct fpu *, fpu_fpregs_owner_ctx);
extern void convert_from_fxsr(struct user_i387_ia32_struct *env,
struct task_struct *tsk);
#endif
/*
- * Must be run with preemption disabled: this clears the fpu_owner_task,
+ * Must be run with preemption disabled: this clears the fpu_fpregs_owner_ctx,
* on this CPU.
*
* This will disable any lazy FPU state restore of the current FPU state,
*/
static inline void __cpu_disable_lazy_restore(unsigned int cpu)
{
- per_cpu(fpu_owner_task, cpu) = NULL;
+ per_cpu(fpu_fpregs_owner_ctx, cpu) = NULL;
}
-/*
- * Used to indicate that the FPU state in memory is newer than the FPU
- * state in registers, and the FPU state should be reloaded next time the
- * task is run. Only safe on the current task, or non-running tasks.
- */
-static inline void task_disable_lazy_fpu_restore(struct task_struct *tsk)
+static inline int fpu_want_lazy_restore(struct fpu *fpu, unsigned int cpu)
{
- tsk->thread.fpu.last_cpu = ~0;
-}
-
-static inline int fpu_lazy_restore(struct task_struct *new, unsigned int cpu)
-{
- return new == this_cpu_read_stable(fpu_owner_task) &&
- cpu == new->thread.fpu.last_cpu;
+ return fpu == this_cpu_read_stable(fpu_fpregs_owner_ctx) && cpu == fpu->last_cpu;
}
static inline int is_ia32_compat_frame(void)
static inline int fpu_save_init(struct fpu *fpu)
{
if (use_xsave()) {
- fpu_xsave(fpu);
+ xsave_state(&fpu->state->xsave);
/*
* xsave header may indicate the init state of the FP.
return 1;
}
-static inline int __save_init_fpu(struct task_struct *tsk)
-{
- return fpu_save_init(&tsk->thread.fpu);
-}
-
static inline int fpu_restore_checking(struct fpu *fpu)
{
if (use_xsave())
return frstor_checking(&fpu->state->fsave);
}
-static inline int restore_fpu_checking(struct task_struct *tsk)
+static inline int restore_fpu_checking(struct fpu *fpu)
{
/*
* AMD K7/K8 CPUs don't save/restore FDP/FIP/FOP unless an exception is
"fnclex\n\t"
"emms\n\t"
"fildl %P[addr]" /* set F?P to defined value */
- : : [addr] "m" (tsk->thread.fpu.has_fpu));
+ : : [addr] "m" (fpu->has_fpu));
}
- return fpu_restore_checking(&tsk->thread.fpu);
-}
-
-/*
- * Software FPU state helpers. Careful: these need to
- * be preemption protection *and* they need to be
- * properly paired with the CR0.TS changes!
- */
-static inline int __thread_has_fpu(struct task_struct *tsk)
-{
- return tsk->thread.fpu.has_fpu;
+ return fpu_restore_checking(fpu);
}
/* Must be paired with an 'stts' after! */
-static inline void __thread_clear_has_fpu(struct task_struct *tsk)
+static inline void __thread_clear_has_fpu(struct fpu *fpu)
{
- tsk->thread.fpu.has_fpu = 0;
- this_cpu_write(fpu_owner_task, NULL);
+ fpu->has_fpu = 0;
+ this_cpu_write(fpu_fpregs_owner_ctx, NULL);
}
/* Must be paired with a 'clts' before! */
-static inline void __thread_set_has_fpu(struct task_struct *tsk)
+static inline void __thread_set_has_fpu(struct fpu *fpu)
{
- tsk->thread.fpu.has_fpu = 1;
- this_cpu_write(fpu_owner_task, tsk);
+ fpu->has_fpu = 1;
+ this_cpu_write(fpu_fpregs_owner_ctx, fpu);
}
/*
* These generally need preemption protection to work,
* do try to avoid using these on their own.
*/
-static inline void __thread_fpu_end(struct task_struct *tsk)
+static inline void __thread_fpu_end(struct fpu *fpu)
{
- __thread_clear_has_fpu(tsk);
+ __thread_clear_has_fpu(fpu);
if (!use_eager_fpu())
stts();
}
-static inline void __thread_fpu_begin(struct task_struct *tsk)
+static inline void __thread_fpu_begin(struct fpu *fpu)
{
if (!use_eager_fpu())
clts();
- __thread_set_has_fpu(tsk);
+ __thread_set_has_fpu(fpu);
}
-static inline void drop_fpu(struct task_struct *tsk)
+static inline void drop_fpu(struct fpu *fpu)
{
/*
* Forget coprocessor state..
*/
preempt_disable();
- tsk->thread.fpu_counter = 0;
+ fpu->counter = 0;
- if (__thread_has_fpu(tsk)) {
+ if (fpu->has_fpu) {
/* Ignore delayed exceptions from user space */
asm volatile("1: fwait\n"
"2:\n"
_ASM_EXTABLE(1b, 2b));
- __thread_fpu_end(tsk);
+ __thread_fpu_end(fpu);
}
- clear_stopped_child_used_math(tsk);
+ fpu->fpstate_active = 0;
+
preempt_enable();
}
* Reset the FPU state in the eager case and drop it in the lazy case (later use
* will reinit it).
*/
-static inline void fpu_reset_state(struct task_struct *tsk)
+static inline void fpu_reset_state(struct fpu *fpu)
{
if (!use_eager_fpu())
- drop_fpu(tsk);
+ drop_fpu(fpu);
else
restore_init_xstate();
}
*/
typedef struct { int preload; } fpu_switch_t;
-static inline fpu_switch_t switch_fpu_prepare(struct task_struct *old, struct task_struct *new, int cpu)
+static inline fpu_switch_t
+switch_fpu_prepare(struct fpu *old_fpu, struct fpu *new_fpu, int cpu)
{
fpu_switch_t fpu;
* If the task has used the math, pre-load the FPU on xsave processors
* or if the past 5 consecutive context-switches used math.
*/
- fpu.preload = tsk_used_math(new) &&
- (use_eager_fpu() || new->thread.fpu_counter > 5);
+ fpu.preload = new_fpu->fpstate_active &&
+ (use_eager_fpu() || new_fpu->counter > 5);
- if (__thread_has_fpu(old)) {
- if (!__save_init_fpu(old))
- task_disable_lazy_fpu_restore(old);
+ if (old_fpu->has_fpu) {
+ if (!fpu_save_init(old_fpu))
+ old_fpu->last_cpu = -1;
else
- old->thread.fpu.last_cpu = cpu;
+ old_fpu->last_cpu = cpu;
- /* But leave fpu_owner_task! */
- old->thread.fpu.has_fpu = 0;
+ /* But leave fpu_fpregs_owner_ctx! */
+ old_fpu->has_fpu = 0;
/* Don't change CR0.TS if we just switch! */
if (fpu.preload) {
- new->thread.fpu_counter++;
- __thread_set_has_fpu(new);
- prefetch(new->thread.fpu.state);
+ new_fpu->counter++;
+ __thread_set_has_fpu(new_fpu);
+ prefetch(new_fpu->state);
} else if (!use_eager_fpu())
stts();
} else {
- old->thread.fpu_counter = 0;
- task_disable_lazy_fpu_restore(old);
+ old_fpu->counter = 0;
+ old_fpu->last_cpu = -1;
if (fpu.preload) {
- new->thread.fpu_counter++;
- if (fpu_lazy_restore(new, cpu))
+ new_fpu->counter++;
+ if (fpu_want_lazy_restore(new_fpu, cpu))
fpu.preload = 0;
else
- prefetch(new->thread.fpu.state);
- __thread_fpu_begin(new);
+ prefetch(new_fpu->state);
+ __thread_fpu_begin(new_fpu);
}
}
return fpu;
* state - all we need to do is to conditionally restore the register
* state itself.
*/
-static inline void switch_fpu_finish(struct task_struct *new, fpu_switch_t fpu)
+static inline void switch_fpu_finish(struct fpu *new_fpu, fpu_switch_t fpu_switch)
{
- if (fpu.preload) {
- if (unlikely(restore_fpu_checking(new)))
- fpu_reset_state(new);
+ if (fpu_switch.preload) {
+ if (unlikely(restore_fpu_checking(new_fpu)))
+ fpu_reset_state(new_fpu);
}
}
*/
static inline void user_fpu_begin(void)
{
+ struct fpu *fpu = ¤t->thread.fpu;
+
preempt_disable();
if (!user_has_fpu())
- __thread_fpu_begin(current);
+ __thread_fpu_begin(fpu);
preempt_enable();
}
-static inline void __save_fpu(struct task_struct *tsk)
-{
- if (use_xsave()) {
- if (unlikely(system_state == SYSTEM_BOOTING))
- xsave_state_booting(&tsk->thread.fpu.state->xsave, -1);
- else
- xsave_state(&tsk->thread.fpu.state->xsave, -1);
- } else
- fpu_fxsave(&tsk->thread.fpu);
-}
-
/*
* i387 state interaction
*/
}
}
-static bool fpu_allocated(struct fpu *fpu)
-{
- return fpu->state != NULL;
-}
-
-static inline int fpu_alloc(struct fpu *fpu)
-{
- if (fpu_allocated(fpu))
- return 0;
- fpu->state = kmem_cache_alloc(task_xstate_cachep, GFP_KERNEL);
- if (!fpu->state)
- return -ENOMEM;
- WARN_ON((unsigned long)fpu->state & 15);
- return 0;
-}
-
-static inline void fpu_free(struct fpu *fpu)
-{
- if (fpu->state) {
- kmem_cache_free(task_xstate_cachep, fpu->state);
- fpu->state = NULL;
- }
-}
-
-static inline void fpu_copy(struct task_struct *dst, struct task_struct *src)
-{
- if (use_eager_fpu()) {
- memset(&dst->thread.fpu.state->xsave, 0, xstate_size);
- __save_fpu(dst);
- } else {
- struct fpu *dfpu = &dst->thread.fpu;
- struct fpu *sfpu = &src->thread.fpu;
+extern void fpstate_cache_init(void);
- unlazy_fpu(src);
- memcpy(dfpu->state, sfpu->state, xstate_size);
- }
-}
+extern int fpstate_alloc(struct fpu *fpu);
+extern void fpstate_free(struct fpu *fpu);
+extern int fpu__copy(struct fpu *dst_fpu, struct fpu *src_fpu);
static inline unsigned long
alloc_mathframe(unsigned long sp, int ia32_frame, unsigned long *buf_fx,