]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blame - arch/x86/kernel/i387.c
x86: i387 renaming
[mirror_ubuntu-bionic-kernel.git] / arch / x86 / kernel / i387.c
CommitLineData
1da177e4 1/*
1da177e4
LT
2 * Copyright (C) 1994 Linus Torvalds
3 *
4 * Pentium III FXSR, SSE support
5 * General FPU state handling cleanups
6 * Gareth Hughes <gareth@valinux.com>, May 2000
7 */
8
1da177e4 9#include <linux/sched.h>
129f6946 10#include <linux/module.h>
1da177e4
LT
11#include <asm/processor.h>
12#include <asm/i387.h>
13#include <asm/math_emu.h>
14#include <asm/sigcontext.h>
15#include <asm/user.h>
16#include <asm/ptrace.h>
17#include <asm/uaccess.h>
18
19#ifdef CONFIG_MATH_EMULATION
20#define HAVE_HWFP (boot_cpu_data.hard_math)
21#else
22#define HAVE_HWFP 1
23#endif
24
7b0c2d92 25static unsigned long mxcsr_feature_mask __read_mostly = 0xffffffff;
1da177e4
LT
26
27void mxcsr_feature_mask_init(void)
28{
29 unsigned long mask = 0;
30 clts();
31 if (cpu_has_fxsr) {
3b095a04
CG
32 memset(&current->thread.i387.fxsave, 0,
33 sizeof(struct i387_fxsave_struct));
34 asm volatile("fxsave %0" : : "m" (current->thread.i387.fxsave));
1da177e4 35 mask = current->thread.i387.fxsave.mxcsr_mask;
3b095a04
CG
36 if (mask == 0)
37 mask = 0x0000ffbf;
38 }
1da177e4
LT
39 mxcsr_feature_mask &= mask;
40 stts();
41}
42
43/*
44 * The _current_ task is using the FPU for the first time
45 * so initialize it and set the mxcsr to its default
46 * value at reset if we support XMM instructions and then
47 * remeber the current task has used the FPU.
48 */
49void init_fpu(struct task_struct *tsk)
50{
51 if (cpu_has_fxsr) {
3b095a04
CG
52 memset(&tsk->thread.i387.fxsave, 0,
53 sizeof(struct i387_fxsave_struct));
1da177e4
LT
54 tsk->thread.i387.fxsave.cwd = 0x37f;
55 if (cpu_has_xmm)
56 tsk->thread.i387.fxsave.mxcsr = 0x1f80;
57 } else {
3b095a04
CG
58 memset(&tsk->thread.i387.fsave, 0,
59 sizeof(struct i387_fsave_struct));
1da177e4
LT
60 tsk->thread.i387.fsave.cwd = 0xffff037fu;
61 tsk->thread.i387.fsave.swd = 0xffff0000u;
62 tsk->thread.i387.fsave.twd = 0xffffffffu;
63 tsk->thread.i387.fsave.fos = 0xffff0000u;
64 }
3b095a04
CG
65 /* only the device not available exception
66 * or ptrace can call init_fpu */
1da177e4
LT
67 set_stopped_child_used_math(tsk);
68}
69
70/*
71 * FPU lazy state save handling.
72 */
73
74void kernel_fpu_begin(void)
75{
76 struct thread_info *thread = current_thread_info();
77
78 preempt_disable();
79 if (thread->status & TS_USEDFPU) {
80 __save_init_fpu(thread->task);
81 return;
82 }
83 clts();
84}
129f6946 85EXPORT_SYMBOL_GPL(kernel_fpu_begin);
1da177e4 86
1da177e4
LT
87/*
88 * FPU tag word conversions.
89 */
90
3b095a04 91static inline unsigned short twd_i387_to_fxsr(unsigned short twd)
1da177e4
LT
92{
93 unsigned int tmp; /* to avoid 16 bit prefixes in the code */
3b095a04 94
1da177e4 95 /* Transform each pair of bits into 01 (valid) or 00 (empty) */
3b095a04
CG
96 tmp = ~twd;
97 tmp = (tmp | (tmp >> 1)) & 0x5555; /* 0V0V0V0V0V0V0V0V */
98 /* and move the valid bits to the lower byte. */
99 tmp = (tmp | (tmp >> 1)) & 0x3333; /* 00VV00VV00VV00VV */
100 tmp = (tmp | (tmp >> 2)) & 0x0f0f; /* 0000VVVV0000VVVV */
101 tmp = (tmp | (tmp >> 4)) & 0x00ff; /* 00000000VVVVVVVV */
102
103 return tmp;
1da177e4
LT
104}
105
3b095a04 106static inline unsigned long twd_fxsr_to_i387(struct i387_fxsave_struct *fxsave)
1da177e4
LT
107{
108 struct _fpxreg *st = NULL;
109 unsigned long tos = (fxsave->swd >> 11) & 7;
110 unsigned long twd = (unsigned long) fxsave->twd;
111 unsigned long tag;
112 unsigned long ret = 0xffff0000u;
113 int i;
114
115#define FPREG_ADDR(f, n) ((void *)&(f)->st_space + (n) * 16);
116
3b095a04
CG
117 for (i = 0; i < 8; i++) {
118 if (twd & 0x1) {
119 st = FPREG_ADDR(fxsave, (i - tos) & 7);
1da177e4 120
3b095a04 121 switch (st->exponent & 0x7fff) {
1da177e4
LT
122 case 0x7fff:
123 tag = 2; /* Special */
124 break;
125 case 0x0000:
3b095a04
CG
126 if (!st->significand[0] &&
127 !st->significand[1] &&
128 !st->significand[2] &&
129 !st->significand[3]) {
1da177e4
LT
130 tag = 1; /* Zero */
131 } else {
132 tag = 2; /* Special */
133 }
134 break;
135 default:
3b095a04 136 if (st->significand[3] & 0x8000) {
1da177e4
LT
137 tag = 0; /* Valid */
138 } else {
139 tag = 2; /* Special */
140 }
141 break;
142 }
143 } else {
144 tag = 3; /* Empty */
145 }
146 ret |= (tag << (2 * i));
147 twd = twd >> 1;
148 }
149 return ret;
150}
151
152/*
153 * FPU state interaction.
154 */
155
3b095a04 156unsigned short get_fpu_cwd(struct task_struct *tsk)
1da177e4 157{
3b095a04 158 if (cpu_has_fxsr) {
1da177e4
LT
159 return tsk->thread.i387.fxsave.cwd;
160 } else {
161 return (unsigned short)tsk->thread.i387.fsave.cwd;
162 }
163}
164
3b095a04 165unsigned short get_fpu_swd(struct task_struct *tsk)
1da177e4 166{
3b095a04 167 if (cpu_has_fxsr) {
1da177e4
LT
168 return tsk->thread.i387.fxsave.swd;
169 } else {
170 return (unsigned short)tsk->thread.i387.fsave.swd;
171 }
172}
173
174#if 0
3b095a04 175unsigned short get_fpu_twd(struct task_struct *tsk)
1da177e4 176{
3b095a04 177 if (cpu_has_fxsr) {
1da177e4
LT
178 return tsk->thread.i387.fxsave.twd;
179 } else {
180 return (unsigned short)tsk->thread.i387.fsave.twd;
181 }
182}
183#endif /* 0 */
184
3b095a04 185unsigned short get_fpu_mxcsr(struct task_struct *tsk)
1da177e4 186{
3b095a04 187 if (cpu_has_xmm) {
1da177e4
LT
188 return tsk->thread.i387.fxsave.mxcsr;
189 } else {
190 return 0x1f80;
191 }
192}
193
194#if 0
195
3b095a04 196void set_fpu_cwd(struct task_struct *tsk, unsigned short cwd)
1da177e4 197{
3b095a04 198 if (cpu_has_fxsr) {
1da177e4
LT
199 tsk->thread.i387.fxsave.cwd = cwd;
200 } else {
201 tsk->thread.i387.fsave.cwd = ((long)cwd | 0xffff0000u);
202 }
203}
204
3b095a04 205void set_fpu_swd(struct task_struct *tsk, unsigned short swd)
1da177e4 206{
3b095a04 207 if (cpu_has_fxsr) {
1da177e4
LT
208 tsk->thread.i387.fxsave.swd = swd;
209 } else {
210 tsk->thread.i387.fsave.swd = ((long)swd | 0xffff0000u);
211 }
212}
213
3b095a04 214void set_fpu_twd(struct task_struct *tsk, unsigned short twd)
1da177e4 215{
3b095a04 216 if (cpu_has_fxsr) {
1da177e4
LT
217 tsk->thread.i387.fxsave.twd = twd_i387_to_fxsr(twd);
218 } else {
219 tsk->thread.i387.fsave.twd = ((long)twd | 0xffff0000u);
220 }
221}
222
223#endif /* 0 */
224
225/*
226 * FXSR floating point environment conversions.
227 */
228
3b095a04
CG
229static int convert_fxsr_to_user(struct _fpstate __user *buf,
230 struct i387_fxsave_struct *fxsave)
1da177e4
LT
231{
232 unsigned long env[7];
233 struct _fpreg __user *to;
234 struct _fpxreg *from;
235 int i;
236
237 env[0] = (unsigned long)fxsave->cwd | 0xffff0000ul;
238 env[1] = (unsigned long)fxsave->swd | 0xffff0000ul;
239 env[2] = twd_fxsr_to_i387(fxsave);
240 env[3] = fxsave->fip;
241 env[4] = fxsave->fcs | ((unsigned long)fxsave->fop << 16);
242 env[5] = fxsave->foo;
243 env[6] = fxsave->fos;
244
3b095a04 245 if (__copy_to_user(buf, env, 7 * sizeof(unsigned long)))
1da177e4
LT
246 return 1;
247
248 to = &buf->_st[0];
249 from = (struct _fpxreg *) &fxsave->st_space[0];
3b095a04 250 for (i = 0; i < 8; i++, to++, from++) {
1da177e4
LT
251 unsigned long __user *t = (unsigned long __user *)to;
252 unsigned long *f = (unsigned long *)from;
253
254 if (__put_user(*f, t) ||
3b095a04
CG
255 __put_user(*(f + 1), t + 1) ||
256 __put_user(from->exponent, &to->exponent))
1da177e4
LT
257 return 1;
258 }
259 return 0;
260}
261
3b095a04
CG
262static int convert_fxsr_from_user(struct i387_fxsave_struct *fxsave,
263 struct _fpstate __user *buf)
1da177e4
LT
264{
265 unsigned long env[7];
266 struct _fpxreg *to;
267 struct _fpreg __user *from;
268 int i;
269
3b095a04 270 if (__copy_from_user(env, buf, 7 * sizeof(long)))
1da177e4
LT
271 return 1;
272
273 fxsave->cwd = (unsigned short)(env[0] & 0xffff);
274 fxsave->swd = (unsigned short)(env[1] & 0xffff);
275 fxsave->twd = twd_i387_to_fxsr((unsigned short)(env[2] & 0xffff));
276 fxsave->fip = env[3];
277 fxsave->fop = (unsigned short)((env[4] & 0xffff0000ul) >> 16);
278 fxsave->fcs = (env[4] & 0xffff);
279 fxsave->foo = env[5];
280 fxsave->fos = env[6];
281
282 to = (struct _fpxreg *) &fxsave->st_space[0];
283 from = &buf->_st[0];
3b095a04 284 for (i = 0; i < 8; i++, to++, from++) {
1da177e4
LT
285 unsigned long *t = (unsigned long *)to;
286 unsigned long __user *f = (unsigned long __user *)from;
287
288 if (__get_user(*t, f) ||
3b095a04
CG
289 __get_user(*(t + 1), f + 1) ||
290 __get_user(to->exponent, &from->exponent))
1da177e4
LT
291 return 1;
292 }
293 return 0;
294}
295
296/*
297 * Signal frame handlers.
298 */
299
3b095a04 300static inline int save_i387_fsave(struct _fpstate __user *buf)
1da177e4
LT
301{
302 struct task_struct *tsk = current;
303
3b095a04 304 unlazy_fpu(tsk);
1da177e4 305 tsk->thread.i387.fsave.status = tsk->thread.i387.fsave.swd;
3b095a04
CG
306 if (__copy_to_user(buf, &tsk->thread.i387.fsave,
307 sizeof(struct i387_fsave_struct)))
1da177e4
LT
308 return -1;
309 return 1;
310}
311
3b095a04 312static int save_i387_fxsave(struct _fpstate __user *buf)
1da177e4
LT
313{
314 struct task_struct *tsk = current;
315 int err = 0;
316
3b095a04 317 unlazy_fpu(tsk);
1da177e4 318
3b095a04 319 if (convert_fxsr_to_user(buf, &tsk->thread.i387.fxsave))
1da177e4
LT
320 return -1;
321
3b095a04
CG
322 err |= __put_user(tsk->thread.i387.fxsave.swd, &buf->status);
323 err |= __put_user(X86_FXSR_MAGIC, &buf->magic);
324 if (err)
1da177e4
LT
325 return -1;
326
3b095a04
CG
327 if (__copy_to_user(&buf->_fxsr_env[0], &tsk->thread.i387.fxsave,
328 sizeof(struct i387_fxsave_struct)))
1da177e4
LT
329 return -1;
330 return 1;
331}
332
3b095a04 333int save_i387(struct _fpstate __user *buf)
1da177e4 334{
3b095a04 335 if (!used_math())
1da177e4
LT
336 return 0;
337
338 /* This will cause a "finit" to be triggered by the next
339 * attempted FPU operation by the 'current' process.
340 */
341 clear_used_math();
342
3b095a04
CG
343 if (HAVE_HWFP) {
344 if (cpu_has_fxsr) {
345 return save_i387_fxsave(buf);
1da177e4 346 } else {
3b095a04 347 return save_i387_fsave(buf);
1da177e4
LT
348 }
349 } else {
3b095a04 350 return save_i387_soft(&current->thread.i387.soft, buf);
1da177e4
LT
351 }
352}
353
3b095a04 354static inline int restore_i387_fsave(struct _fpstate __user *buf)
1da177e4
LT
355{
356 struct task_struct *tsk = current;
3b095a04
CG
357 clear_fpu(tsk);
358 return __copy_from_user(&tsk->thread.i387.fsave, buf,
359 sizeof(struct i387_fsave_struct));
1da177e4
LT
360}
361
3b095a04 362static int restore_i387_fxsave(struct _fpstate __user *buf)
1da177e4
LT
363{
364 int err;
365 struct task_struct *tsk = current;
3b095a04
CG
366 clear_fpu(tsk);
367 err = __copy_from_user(&tsk->thread.i387.fxsave, &buf->_fxsr_env[0],
368 sizeof(struct i387_fxsave_struct));
1da177e4
LT
369 /* mxcsr reserved bits must be masked to zero for security reasons */
370 tsk->thread.i387.fxsave.mxcsr &= mxcsr_feature_mask;
3b095a04 371 return err ? 1 : convert_fxsr_from_user(&tsk->thread.i387.fxsave, buf);
1da177e4
LT
372}
373
3b095a04 374int restore_i387(struct _fpstate __user *buf)
1da177e4
LT
375{
376 int err;
377
3b095a04
CG
378 if (HAVE_HWFP) {
379 if (cpu_has_fxsr) {
380 err = restore_i387_fxsave(buf);
1da177e4 381 } else {
3b095a04 382 err = restore_i387_fsave(buf);
1da177e4
LT
383 }
384 } else {
3b095a04 385 err = restore_i387_soft(&current->thread.i387.soft, buf);
1da177e4
LT
386 }
387 set_used_math();
388 return err;
389}
390
391/*
392 * ptrace request handlers.
393 */
394
3b095a04
CG
395static inline int get_fpregs_fsave(struct user_i387_struct __user *buf,
396 struct task_struct *tsk)
1da177e4 397{
3b095a04
CG
398 return __copy_to_user(buf, &tsk->thread.i387.fsave,
399 sizeof(struct user_i387_struct));
1da177e4
LT
400}
401
3b095a04
CG
402static inline int get_fpregs_fxsave(struct user_i387_struct __user *buf,
403 struct task_struct *tsk)
1da177e4 404{
3b095a04
CG
405 return convert_fxsr_to_user((struct _fpstate __user *)buf,
406 &tsk->thread.i387.fxsave);
1da177e4
LT
407}
408
3b095a04 409int get_fpregs(struct user_i387_struct __user *buf, struct task_struct *tsk)
1da177e4 410{
3b095a04
CG
411 if (HAVE_HWFP) {
412 if (cpu_has_fxsr) {
413 return get_fpregs_fxsave(buf, tsk);
1da177e4 414 } else {
3b095a04 415 return get_fpregs_fsave(buf, tsk);
1da177e4
LT
416 }
417 } else {
3b095a04
CG
418 return save_i387_soft(&tsk->thread.i387.soft,
419 (struct _fpstate __user *)buf);
1da177e4
LT
420 }
421}
422
3b095a04
CG
423static inline int set_fpregs_fsave(struct task_struct *tsk,
424 struct user_i387_struct __user *buf)
1da177e4 425{
3b095a04
CG
426 return __copy_from_user(&tsk->thread.i387.fsave, buf,
427 sizeof(struct user_i387_struct));
1da177e4
LT
428}
429
3b095a04
CG
430static inline int set_fpregs_fxsave(struct task_struct *tsk,
431 struct user_i387_struct __user *buf)
1da177e4 432{
3b095a04
CG
433 return convert_fxsr_from_user(&tsk->thread.i387.fxsave,
434 (struct _fpstate __user *)buf);
1da177e4
LT
435}
436
3b095a04 437int set_fpregs(struct task_struct *tsk, struct user_i387_struct __user *buf)
1da177e4 438{
3b095a04
CG
439 if (HAVE_HWFP) {
440 if (cpu_has_fxsr) {
441 return set_fpregs_fxsave(tsk, buf);
1da177e4 442 } else {
3b095a04 443 return set_fpregs_fsave(tsk, buf);
1da177e4
LT
444 }
445 } else {
3b095a04
CG
446 return restore_i387_soft(&tsk->thread.i387.soft,
447 (struct _fpstate __user *)buf);
1da177e4
LT
448 }
449}
450
3b095a04 451int get_fpxregs(struct user_fxsr_struct __user *buf, struct task_struct *tsk)
1da177e4 452{
3b095a04
CG
453 if (cpu_has_fxsr) {
454 if (__copy_to_user(buf, &tsk->thread.i387.fxsave,
455 sizeof(struct user_fxsr_struct)))
1da177e4
LT
456 return -EFAULT;
457 return 0;
458 } else {
459 return -EIO;
460 }
461}
462
3b095a04 463int set_fpxregs(struct task_struct *tsk, struct user_fxsr_struct __user *buf)
1da177e4
LT
464{
465 int ret = 0;
466
3b095a04
CG
467 if (cpu_has_fxsr) {
468 if (__copy_from_user(&tsk->thread.i387.fxsave, buf,
469 sizeof(struct user_fxsr_struct)))
1da177e4 470 ret = -EFAULT;
3b095a04
CG
471 /* mxcsr reserved bits must be masked to zero
472 * for security reasons */
1da177e4
LT
473 tsk->thread.i387.fxsave.mxcsr &= mxcsr_feature_mask;
474 } else {
475 ret = -EIO;
476 }
477 return ret;
478}
479
480/*
481 * FPU state for core dumps.
482 */
483
3b095a04
CG
484static inline void copy_fpu_fsave(struct task_struct *tsk,
485 struct user_i387_struct *fpu)
1da177e4 486{
3b095a04
CG
487 memcpy(fpu, &tsk->thread.i387.fsave,
488 sizeof(struct user_i387_struct));
1da177e4
LT
489}
490
3b095a04
CG
491static inline void copy_fpu_fxsave(struct task_struct *tsk,
492 struct user_i387_struct *fpu)
1da177e4
LT
493{
494 unsigned short *to;
495 unsigned short *from;
496 int i;
497
3b095a04 498 memcpy(fpu, &tsk->thread.i387.fxsave, 7 * sizeof(long));
1da177e4
LT
499
500 to = (unsigned short *)&fpu->st_space[0];
501 from = (unsigned short *)&tsk->thread.i387.fxsave.st_space[0];
3b095a04
CG
502 for (i = 0; i < 8; i++, to += 5, from += 8)
503 memcpy(to, from, 5 * sizeof(unsigned short));
1da177e4
LT
504}
505
3b095a04 506int dump_fpu(struct pt_regs *regs, struct user_i387_struct *fpu)
1da177e4
LT
507{
508 int fpvalid;
509 struct task_struct *tsk = current;
510
511 fpvalid = !!used_math();
3b095a04
CG
512 if (fpvalid) {
513 unlazy_fpu(tsk);
514 if (cpu_has_fxsr) {
515 copy_fpu_fxsave(tsk, fpu);
1da177e4 516 } else {
3b095a04 517 copy_fpu_fsave(tsk, fpu);
1da177e4
LT
518 }
519 }
520
521 return fpvalid;
522}
129f6946 523EXPORT_SYMBOL(dump_fpu);
1da177e4
LT
524
525int dump_task_fpu(struct task_struct *tsk, struct user_i387_struct *fpu)
526{
527 int fpvalid = !!tsk_used_math(tsk);
528
529 if (fpvalid) {
530 if (tsk == current)
531 unlazy_fpu(tsk);
532 if (cpu_has_fxsr)
533 copy_fpu_fxsave(tsk, fpu);
534 else
535 copy_fpu_fsave(tsk, fpu);
536 }
537 return fpvalid;
538}
539
3b095a04
CG
540int dump_task_extended_fpu(struct task_struct *tsk,
541 struct user_fxsr_struct *fpu)
1da177e4
LT
542{
543 int fpvalid = tsk_used_math(tsk) && cpu_has_fxsr;
544
545 if (fpvalid) {
546 if (tsk == current)
547 unlazy_fpu(tsk);
548 memcpy(fpu, &tsk->thread.i387.fxsave, sizeof(*fpu));
549 }
550 return fpvalid;
551}