]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blob - arch/x86/include/asm/i387.h
x86/fpu: Factor out the FPU bug detection code into fpu__init_check_bugs()
[mirror_ubuntu-bionic-kernel.git] / arch / x86 / include / asm / i387.h
1 /*
2 * Copyright (C) 1994 Linus Torvalds
3 *
4 * Pentium III FXSR, SSE support
5 * General FPU state handling cleanups
6 * Gareth Hughes <gareth@valinux.com>, May 2000
7 * x86-64 work by Andi Kleen 2002
8 */
9
10 #ifndef _ASM_X86_I387_H
11 #define _ASM_X86_I387_H
12
13 #ifndef __ASSEMBLY__
14
15 #include <linux/sched.h>
16 #include <linux/hardirq.h>
17
18 struct pt_regs;
19 struct user_i387_struct;
20
21 extern int fpstate_alloc_init(struct task_struct *curr);
22 extern void fpstate_init(struct fpu *fpu);
23 extern void fpu__flush_thread(struct task_struct *tsk);
24
25 extern int dump_fpu(struct pt_regs *, struct user_i387_struct *);
26 extern void fpu__restore(void);
27 extern void fpu__init_check_bugs(void);
28
29 extern bool irq_fpu_usable(void);
30
31 /*
32 * Careful: __kernel_fpu_begin/end() must be called with preempt disabled
33 * and they don't touch the preempt state on their own.
34 * If you enable preemption after __kernel_fpu_begin(), preempt notifier
35 * should call the __kernel_fpu_end() to prevent the kernel/user FPU
36 * state from getting corrupted. KVM for example uses this model.
37 *
38 * All other cases use kernel_fpu_begin/end() which disable preemption
39 * during kernel FPU usage.
40 */
41 extern void __kernel_fpu_begin(void);
42 extern void __kernel_fpu_end(void);
43
44 static inline void kernel_fpu_begin(void)
45 {
46 preempt_disable();
47 WARN_ON_ONCE(!irq_fpu_usable());
48 __kernel_fpu_begin();
49 }
50
51 static inline void kernel_fpu_end(void)
52 {
53 __kernel_fpu_end();
54 preempt_enable();
55 }
56
57 /* Must be called with preempt disabled */
58 extern void kernel_fpu_disable(void);
59 extern void kernel_fpu_enable(void);
60
61 /*
62 * Some instructions like VIA's padlock instructions generate a spurious
63 * DNA fault but don't modify SSE registers. And these instructions
64 * get used from interrupt context as well. To prevent these kernel instructions
65 * in interrupt context interacting wrongly with other user/kernel fpu usage, we
66 * should use them only in the context of irq_ts_save/restore()
67 */
68 static inline int irq_ts_save(void)
69 {
70 /*
71 * If in process context and not atomic, we can take a spurious DNA fault.
72 * Otherwise, doing clts() in process context requires disabling preemption
73 * or some heavy lifting like kernel_fpu_begin()
74 */
75 if (!in_atomic())
76 return 0;
77
78 if (read_cr0() & X86_CR0_TS) {
79 clts();
80 return 1;
81 }
82
83 return 0;
84 }
85
86 static inline void irq_ts_restore(int TS_state)
87 {
88 if (TS_state)
89 stts();
90 }
91
92 /*
93 * The question "does this thread have fpu access?"
94 * is slightly racy, since preemption could come in
95 * and revoke it immediately after the test.
96 *
97 * However, even in that very unlikely scenario,
98 * we can just assume we have FPU access - typically
99 * to save the FP state - we'll just take a #NM
100 * fault and get the FPU access back.
101 */
102 static inline int user_has_fpu(void)
103 {
104 return current->thread.fpu.has_fpu;
105 }
106
107 extern void fpu__save(struct task_struct *tsk);
108
109 #endif /* __ASSEMBLY__ */
110
111 #endif /* _ASM_X86_I387_H */