]>
Commit | Line | Data |
---|---|---|
1eeaed76 RM |
1 | /* |
2 | * Copyright (C) 1994 Linus Torvalds | |
3 | * | |
4 | * Pentium III FXSR, SSE support | |
5 | * General FPU state handling cleanups | |
6 | * Gareth Hughes <gareth@valinux.com>, May 2000 | |
7 | * x86-64 work by Andi Kleen 2002 | |
8 | */ | |
9 | ||
1965aae3 PA |
10 | #ifndef _ASM_X86_I387_H |
11 | #define _ASM_X86_I387_H | |
1eeaed76 | 12 | |
3b0d6596 HX |
13 | #ifndef __ASSEMBLY__ |
14 | ||
1eeaed76 RM |
15 | #include <linux/sched.h> |
16 | #include <linux/kernel_stat.h> | |
17 | #include <linux/regset.h> | |
e4914012 | 18 | #include <linux/hardirq.h> |
86603283 | 19 | #include <linux/slab.h> |
92c37fa3 | 20 | #include <asm/asm.h> |
c9775b4c | 21 | #include <asm/cpufeature.h> |
1eeaed76 RM |
22 | #include <asm/processor.h> |
23 | #include <asm/sigcontext.h> | |
24 | #include <asm/user.h> | |
25 | #include <asm/uaccess.h> | |
dc1e35c6 | 26 | #include <asm/xsave.h> |
1eeaed76 | 27 | |
3c1c7f10 | 28 | extern unsigned int sig_xstate_size; |
1eeaed76 | 29 | extern void fpu_init(void); |
1eeaed76 | 30 | extern void mxcsr_feature_mask_init(void); |
aa283f49 | 31 | extern int init_fpu(struct task_struct *child); |
1eeaed76 | 32 | extern asmlinkage void math_state_restore(void); |
e6e9cac8 | 33 | extern void __math_state_restore(void); |
36454936 | 34 | extern int dump_fpu(struct pt_regs *, struct user_i387_struct *); |
1eeaed76 RM |
35 | |
36 | extern user_regset_active_fn fpregs_active, xfpregs_active; | |
5b3efd50 SS |
37 | extern user_regset_get_fn fpregs_get, xfpregs_get, fpregs_soft_get, |
38 | xstateregs_get; | |
39 | extern user_regset_set_fn fpregs_set, xfpregs_set, fpregs_soft_set, | |
40 | xstateregs_set; | |
41 | ||
42 | /* | |
43 | * xstateregs_active == fpregs_active. Please refer to the comment | |
44 | * at the definition of fpregs_active. | |
45 | */ | |
46 | #define xstateregs_active fpregs_active | |
1eeaed76 | 47 | |
c37b5efe | 48 | extern struct _fpx_sw_bytes fx_sw_reserved; |
1eeaed76 | 49 | #ifdef CONFIG_IA32_EMULATION |
3c1c7f10 | 50 | extern unsigned int sig_xstate_ia32_size; |
c37b5efe | 51 | extern struct _fpx_sw_bytes fx_sw_reserved_ia32; |
1eeaed76 | 52 | struct _fpstate_ia32; |
ab513701 SS |
53 | struct _xstate_ia32; |
54 | extern int save_i387_xstate_ia32(void __user *buf); | |
55 | extern int restore_i387_xstate_ia32(void __user *buf); | |
1eeaed76 RM |
56 | #endif |
57 | ||
8eb91a57 BG |
58 | #ifdef CONFIG_MATH_EMULATION |
59 | extern void finit_soft_fpu(struct i387_soft_struct *soft); | |
60 | #else | |
61 | static inline void finit_soft_fpu(struct i387_soft_struct *soft) {} | |
62 | #endif | |
63 | ||
b359e8a4 SS |
64 | #define X87_FSW_ES (1 << 7) /* Exception Summary */ |
65 | ||
29104e10 SS |
66 | static __always_inline __pure bool use_xsaveopt(void) |
67 | { | |
6bad06b7 | 68 | return static_cpu_has(X86_FEATURE_XSAVEOPT); |
29104e10 SS |
69 | } |
70 | ||
c9775b4c | 71 | static __always_inline __pure bool use_xsave(void) |
c9ad4882 | 72 | { |
c9775b4c | 73 | return static_cpu_has(X86_FEATURE_XSAVE); |
c9ad4882 AK |
74 | } |
75 | ||
58a992b9 BG |
76 | static __always_inline __pure bool use_fxsr(void) |
77 | { | |
78 | return static_cpu_has(X86_FEATURE_FXSR); | |
79 | } | |
80 | ||
29104e10 SS |
81 | extern void __sanitize_i387_state(struct task_struct *); |
82 | ||
83 | static inline void sanitize_i387_state(struct task_struct *tsk) | |
84 | { | |
85 | if (!use_xsaveopt()) | |
86 | return; | |
87 | __sanitize_i387_state(tsk); | |
88 | } | |
89 | ||
1eeaed76 | 90 | #ifdef CONFIG_X86_64 |
b359e8a4 | 91 | static inline int fxrstor_checking(struct i387_fxsave_struct *fx) |
1eeaed76 RM |
92 | { |
93 | int err; | |
94 | ||
82024135 | 95 | /* See comment in fxsave() below. */ |
1eeaed76 RM |
96 | asm volatile("1: rex64/fxrstor (%[fx])\n\t" |
97 | "2:\n" | |
98 | ".section .fixup,\"ax\"\n" | |
99 | "3: movl $-1,%[err]\n" | |
100 | " jmp 2b\n" | |
101 | ".previous\n" | |
affe6637 | 102 | _ASM_EXTABLE(1b, 3b) |
1eeaed76 | 103 | : [err] "=r" (err) |
82024135 | 104 | : [fx] "R" (fx), "m" (*fx), "0" (0)); |
1eeaed76 RM |
105 | return err; |
106 | } | |
107 | ||
c37b5efe | 108 | static inline int fxsave_user(struct i387_fxsave_struct __user *fx) |
1eeaed76 RM |
109 | { |
110 | int err; | |
111 | ||
8e221b6d SS |
112 | /* |
113 | * Clear the bytes not touched by the fxsave and reserved | |
114 | * for the SW usage. | |
115 | */ | |
116 | err = __clear_user(&fx->sw_reserved, | |
117 | sizeof(struct _fpx_sw_bytes)); | |
118 | if (unlikely(err)) | |
119 | return -EFAULT; | |
120 | ||
82024135 | 121 | /* See comment in fxsave() below. */ |
1eeaed76 RM |
122 | asm volatile("1: rex64/fxsave (%[fx])\n\t" |
123 | "2:\n" | |
124 | ".section .fixup,\"ax\"\n" | |
125 | "3: movl $-1,%[err]\n" | |
126 | " jmp 2b\n" | |
127 | ".previous\n" | |
affe6637 | 128 | _ASM_EXTABLE(1b, 3b) |
1eeaed76 | 129 | : [err] "=r" (err), "=m" (*fx) |
82024135 | 130 | : [fx] "R" (fx), "0" (0)); |
affe6637 JP |
131 | if (unlikely(err) && |
132 | __clear_user(fx, sizeof(struct i387_fxsave_struct))) | |
1eeaed76 RM |
133 | err = -EFAULT; |
134 | /* No need to clear here because the caller clears USED_MATH */ | |
135 | return err; | |
136 | } | |
137 | ||
86603283 | 138 | static inline void fpu_fxsave(struct fpu *fpu) |
1eeaed76 RM |
139 | { |
140 | /* Using "rex64; fxsave %0" is broken because, if the memory operand | |
141 | uses any extended registers for addressing, a second REX prefix | |
142 | will be generated (to the assembler, rex64 followed by semicolon | |
143 | is a separate instruction), and hence the 64-bitness is lost. */ | |
b6f7e38d | 144 | |
d7acb92f | 145 | #ifdef CONFIG_AS_FXSAVEQ |
1eeaed76 RM |
146 | /* Using "fxsaveq %0" would be the ideal choice, but is only supported |
147 | starting with gas 2.16. */ | |
148 | __asm__ __volatile__("fxsaveq %0" | |
86603283 | 149 | : "=m" (fpu->state->fxsave)); |
b6f7e38d | 150 | #else |
1eeaed76 RM |
151 | /* Using, as a workaround, the properly prefixed form below isn't |
152 | accepted by any binutils version so far released, complaining that | |
153 | the same type of prefix is used twice if an extended register is | |
82024135 BG |
154 | needed for addressing (fix submitted to mainline 2005-11-21). |
155 | asm volatile("rex64/fxsave %0" | |
156 | : "=m" (fpu->state->fxsave)); | |
157 | This, however, we can work around by forcing the compiler to select | |
1eeaed76 | 158 | an addressing mode that doesn't require extended registers. */ |
82024135 BG |
159 | asm volatile("rex64/fxsave (%[fx])" |
160 | : "=m" (fpu->state->fxsave) | |
161 | : [fx] "R" (&fpu->state->fxsave)); | |
1eeaed76 | 162 | #endif |
b359e8a4 SS |
163 | } |
164 | ||
1eeaed76 RM |
165 | #else /* CONFIG_X86_32 */ |
166 | ||
34ba476a JS |
167 | /* perform fxrstor iff the processor has extended states, otherwise frstor */ |
168 | static inline int fxrstor_checking(struct i387_fxsave_struct *fx) | |
1eeaed76 RM |
169 | { |
170 | /* | |
171 | * The "nop" is needed to make the instructions the same | |
172 | * length. | |
173 | */ | |
174 | alternative_input( | |
175 | "nop ; frstor %1", | |
176 | "fxrstor %1", | |
177 | X86_FEATURE_FXSR, | |
34ba476a JS |
178 | "m" (*fx)); |
179 | ||
fcb2ac5b | 180 | return 0; |
1eeaed76 RM |
181 | } |
182 | ||
58a992b9 BG |
183 | static inline void fpu_fxsave(struct fpu *fpu) |
184 | { | |
185 | asm volatile("fxsave %[fx]" | |
186 | : [fx] "=m" (fpu->state->fxsave)); | |
187 | } | |
188 | ||
b2b57fe0 BG |
189 | #endif /* CONFIG_X86_64 */ |
190 | ||
1eeaed76 RM |
191 | /* We need a safe address that is cheap to find and that is already |
192 | in L1 during context switch. The best choices are unfortunately | |
193 | different for UP and SMP */ | |
194 | #ifdef CONFIG_SMP | |
195 | #define safe_address (__per_cpu_offset[0]) | |
196 | #else | |
197 | #define safe_address (kstat_cpu(0).cpustat.user) | |
198 | #endif | |
199 | ||
200 | /* | |
201 | * These must be called with preempt disabled | |
202 | */ | |
86603283 | 203 | static inline void fpu_save_init(struct fpu *fpu) |
1eeaed76 | 204 | { |
c9ad4882 | 205 | if (use_xsave()) { |
86603283 | 206 | fpu_xsave(fpu); |
b359e8a4 SS |
207 | |
208 | /* | |
209 | * xsave header may indicate the init state of the FP. | |
210 | */ | |
58a992b9 BG |
211 | if (!(fpu->state->xsave.xsave_hdr.xstate_bv & XSTATE_FP)) |
212 | return; | |
213 | } else if (use_fxsr()) { | |
214 | fpu_fxsave(fpu); | |
215 | } else { | |
216 | asm volatile("fsave %[fx]; fwait" | |
217 | : [fx] "=m" (fpu->state->fsave)); | |
218 | return; | |
b359e8a4 SS |
219 | } |
220 | ||
58a992b9 BG |
221 | if (unlikely(fpu->state->fxsave.swd & X87_FSW_ES)) |
222 | asm volatile("fnclex"); | |
223 | ||
1eeaed76 RM |
224 | /* AMD K7/K8 CPUs don't save/restore FDP/FIP/FOP unless an exception |
225 | is pending. Clear the x87 state here by setting it to fixed | |
226 | values. safe_address is a random variable that should be in L1 */ | |
227 | alternative_input( | |
b2b57fe0 | 228 | ASM_NOP8 ASM_NOP2, |
1eeaed76 | 229 | "emms\n\t" /* clear stack tags */ |
b2b57fe0 | 230 | "fildl %P[addr]", /* set F?P to defined value */ |
1eeaed76 RM |
231 | X86_FEATURE_FXSAVE_LEAK, |
232 | [addr] "m" (safe_address)); | |
86603283 AK |
233 | } |
234 | ||
235 | static inline void __save_init_fpu(struct task_struct *tsk) | |
236 | { | |
237 | fpu_save_init(&tsk->thread.fpu); | |
1eeaed76 RM |
238 | task_thread_info(tsk)->status &= ~TS_USEDFPU; |
239 | } | |
240 | ||
86603283 AK |
241 | static inline int fpu_fxrstor_checking(struct fpu *fpu) |
242 | { | |
243 | return fxrstor_checking(&fpu->state->fxsave); | |
244 | } | |
245 | ||
246 | static inline int fpu_restore_checking(struct fpu *fpu) | |
34ba476a | 247 | { |
c9ad4882 | 248 | if (use_xsave()) |
86603283 | 249 | return fpu_xrstor_checking(fpu); |
34ba476a | 250 | else |
86603283 AK |
251 | return fpu_fxrstor_checking(fpu); |
252 | } | |
253 | ||
254 | static inline int restore_fpu_checking(struct task_struct *tsk) | |
255 | { | |
256 | return fpu_restore_checking(&tsk->thread.fpu); | |
34ba476a JS |
257 | } |
258 | ||
1eeaed76 RM |
259 | /* |
260 | * Signal frame handlers... | |
261 | */ | |
ab513701 SS |
262 | extern int save_i387_xstate(void __user *buf); |
263 | extern int restore_i387_xstate(void __user *buf); | |
1eeaed76 RM |
264 | |
265 | static inline void __unlazy_fpu(struct task_struct *tsk) | |
266 | { | |
267 | if (task_thread_info(tsk)->status & TS_USEDFPU) { | |
268 | __save_init_fpu(tsk); | |
269 | stts(); | |
270 | } else | |
271 | tsk->fpu_counter = 0; | |
272 | } | |
273 | ||
274 | static inline void __clear_fpu(struct task_struct *tsk) | |
275 | { | |
276 | if (task_thread_info(tsk)->status & TS_USEDFPU) { | |
51115d4d BG |
277 | /* Ignore delayed exceptions from user space */ |
278 | asm volatile("1: fwait\n" | |
279 | "2:\n" | |
280 | _ASM_EXTABLE(1b, 2b)); | |
1eeaed76 RM |
281 | task_thread_info(tsk)->status &= ~TS_USEDFPU; |
282 | stts(); | |
283 | } | |
284 | } | |
285 | ||
286 | static inline void kernel_fpu_begin(void) | |
287 | { | |
288 | struct thread_info *me = current_thread_info(); | |
289 | preempt_disable(); | |
290 | if (me->status & TS_USEDFPU) | |
291 | __save_init_fpu(me->task); | |
292 | else | |
293 | clts(); | |
294 | } | |
295 | ||
296 | static inline void kernel_fpu_end(void) | |
297 | { | |
298 | stts(); | |
299 | preempt_enable(); | |
300 | } | |
301 | ||
ae4b688d HY |
302 | static inline bool irq_fpu_usable(void) |
303 | { | |
304 | struct pt_regs *regs; | |
305 | ||
306 | return !in_interrupt() || !(regs = get_irq_regs()) || \ | |
307 | user_mode(regs) || (read_cr0() & X86_CR0_TS); | |
308 | } | |
309 | ||
e4914012 SS |
310 | /* |
311 | * Some instructions like VIA's padlock instructions generate a spurious | |
312 | * DNA fault but don't modify SSE registers. And these instructions | |
0b8c3d5a CE |
313 | * get used from interrupt context as well. To prevent these kernel instructions |
314 | * in interrupt context interacting wrongly with other user/kernel fpu usage, we | |
e4914012 SS |
315 | * should use them only in the context of irq_ts_save/restore() |
316 | */ | |
317 | static inline int irq_ts_save(void) | |
318 | { | |
319 | /* | |
0b8c3d5a CE |
320 | * If in process context and not atomic, we can take a spurious DNA fault. |
321 | * Otherwise, doing clts() in process context requires disabling preemption | |
322 | * or some heavy lifting like kernel_fpu_begin() | |
e4914012 | 323 | */ |
0b8c3d5a | 324 | if (!in_atomic()) |
e4914012 SS |
325 | return 0; |
326 | ||
327 | if (read_cr0() & X86_CR0_TS) { | |
328 | clts(); | |
329 | return 1; | |
330 | } | |
331 | ||
332 | return 0; | |
333 | } | |
334 | ||
335 | static inline void irq_ts_restore(int TS_state) | |
336 | { | |
337 | if (TS_state) | |
338 | stts(); | |
339 | } | |
340 | ||
1eeaed76 RM |
341 | /* |
342 | * These disable preemption on their own and are safe | |
343 | */ | |
344 | static inline void save_init_fpu(struct task_struct *tsk) | |
345 | { | |
346 | preempt_disable(); | |
347 | __save_init_fpu(tsk); | |
348 | stts(); | |
349 | preempt_enable(); | |
350 | } | |
351 | ||
352 | static inline void unlazy_fpu(struct task_struct *tsk) | |
353 | { | |
354 | preempt_disable(); | |
355 | __unlazy_fpu(tsk); | |
356 | preempt_enable(); | |
357 | } | |
358 | ||
359 | static inline void clear_fpu(struct task_struct *tsk) | |
360 | { | |
361 | preempt_disable(); | |
362 | __clear_fpu(tsk); | |
363 | preempt_enable(); | |
364 | } | |
365 | ||
1eeaed76 RM |
366 | /* |
367 | * i387 state interaction | |
368 | */ | |
369 | static inline unsigned short get_fpu_cwd(struct task_struct *tsk) | |
370 | { | |
371 | if (cpu_has_fxsr) { | |
86603283 | 372 | return tsk->thread.fpu.state->fxsave.cwd; |
1eeaed76 | 373 | } else { |
86603283 | 374 | return (unsigned short)tsk->thread.fpu.state->fsave.cwd; |
1eeaed76 RM |
375 | } |
376 | } | |
377 | ||
378 | static inline unsigned short get_fpu_swd(struct task_struct *tsk) | |
379 | { | |
380 | if (cpu_has_fxsr) { | |
86603283 | 381 | return tsk->thread.fpu.state->fxsave.swd; |
1eeaed76 | 382 | } else { |
86603283 | 383 | return (unsigned short)tsk->thread.fpu.state->fsave.swd; |
1eeaed76 RM |
384 | } |
385 | } | |
386 | ||
387 | static inline unsigned short get_fpu_mxcsr(struct task_struct *tsk) | |
388 | { | |
389 | if (cpu_has_xmm) { | |
86603283 | 390 | return tsk->thread.fpu.state->fxsave.mxcsr; |
1eeaed76 RM |
391 | } else { |
392 | return MXCSR_DEFAULT; | |
393 | } | |
394 | } | |
395 | ||
86603283 AK |
396 | static bool fpu_allocated(struct fpu *fpu) |
397 | { | |
398 | return fpu->state != NULL; | |
399 | } | |
400 | ||
401 | static inline int fpu_alloc(struct fpu *fpu) | |
402 | { | |
403 | if (fpu_allocated(fpu)) | |
404 | return 0; | |
405 | fpu->state = kmem_cache_alloc(task_xstate_cachep, GFP_KERNEL); | |
406 | if (!fpu->state) | |
407 | return -ENOMEM; | |
408 | WARN_ON((unsigned long)fpu->state & 15); | |
409 | return 0; | |
410 | } | |
411 | ||
412 | static inline void fpu_free(struct fpu *fpu) | |
413 | { | |
414 | if (fpu->state) { | |
415 | kmem_cache_free(task_xstate_cachep, fpu->state); | |
416 | fpu->state = NULL; | |
417 | } | |
418 | } | |
419 | ||
420 | static inline void fpu_copy(struct fpu *dst, struct fpu *src) | |
421 | { | |
422 | memcpy(dst->state, src->state, xstate_size); | |
423 | } | |
424 | ||
5ee481da SY |
425 | extern void fpu_finit(struct fpu *fpu); |
426 | ||
3b0d6596 HX |
427 | #endif /* __ASSEMBLY__ */ |
428 | ||
1965aae3 | 429 | #endif /* _ASM_X86_I387_H */ |