]>
Commit | Line | Data |
---|---|---|
1da177e4 | 1 | /* |
1da177e4 LT |
2 | * Copyright (C) 1994 Linus Torvalds |
3 | * | |
4 | * Pentium III FXSR, SSE support | |
5 | * General FPU state handling cleanups | |
6 | * Gareth Hughes <gareth@valinux.com>, May 2000 | |
7 | */ | |
78f7f1e5 | 8 | #include <asm/fpu/internal.h> |
59a36d16 | 9 | #include <asm/fpu/regset.h> |
fcbc99c4 | 10 | #include <asm/fpu/signal.h> |
e1cebad4 | 11 | #include <asm/traps.h> |
fcbc99c4 | 12 | |
91066588 | 13 | #include <linux/hardirq.h> |
1da177e4 | 14 | |
6f575023 IM |
15 | /* |
16 | * Represents the initial FPU state. It's mostly (but not completely) zeroes, | |
17 | * depending on the FPU hardware format: | |
18 | */ | |
19 | union thread_xstate init_fpstate __read_mostly; | |
20 | ||
085cc281 IM |
21 | /* |
22 | * Track whether the kernel is using the FPU state | |
23 | * currently. | |
24 | * | |
25 | * This flag is used: | |
26 | * | |
27 | * - by IRQ context code to potentially use the FPU | |
28 | * if it's unused. | |
29 | * | |
30 | * - to debug kernel_fpu_begin()/end() correctness | |
31 | */ | |
14e153ef ON |
32 | static DEFINE_PER_CPU(bool, in_kernel_fpu); |
33 | ||
b0c050c5 | 34 | /* |
36b544dc | 35 | * Track which context is using the FPU on the CPU: |
b0c050c5 | 36 | */ |
36b544dc | 37 | DEFINE_PER_CPU(struct fpu *, fpu_fpregs_owner_ctx); |
b0c050c5 | 38 | |
416d49ac | 39 | static void kernel_fpu_disable(void) |
7575637a ON |
40 | { |
41 | WARN_ON(this_cpu_read(in_kernel_fpu)); | |
42 | this_cpu_write(in_kernel_fpu, true); | |
43 | } | |
44 | ||
416d49ac | 45 | static void kernel_fpu_enable(void) |
7575637a | 46 | { |
3103ae3a | 47 | WARN_ON_ONCE(!this_cpu_read(in_kernel_fpu)); |
7575637a ON |
48 | this_cpu_write(in_kernel_fpu, false); |
49 | } | |
50 | ||
085cc281 IM |
51 | static bool kernel_fpu_disabled(void) |
52 | { | |
53 | return this_cpu_read(in_kernel_fpu); | |
54 | } | |
55 | ||
8546c008 LT |
56 | /* |
57 | * Were we in an interrupt that interrupted kernel mode? | |
58 | * | |
304bceda | 59 | * On others, we can do a kernel_fpu_begin/end() pair *ONLY* if that |
8546c008 LT |
60 | * pair does nothing at all: the thread must not have fpu (so |
61 | * that we don't try to save the FPU state), and TS must | |
62 | * be set (so that the clts/stts pair does nothing that is | |
63 | * visible in the interrupted kernel thread). | |
5187b28f | 64 | * |
4b2e762e ON |
65 | * Except for the eagerfpu case when we return true; in the likely case |
66 | * the thread has FPU but we are not going to set/clear TS. | |
8546c008 | 67 | */ |
416d49ac | 68 | static bool interrupted_kernel_fpu_idle(void) |
8546c008 | 69 | { |
085cc281 | 70 | if (kernel_fpu_disabled()) |
14e153ef ON |
71 | return false; |
72 | ||
5d2bd700 | 73 | if (use_eager_fpu()) |
4b2e762e | 74 | return true; |
304bceda | 75 | |
d5cea9b0 | 76 | return !current->thread.fpu.fpregs_active && (read_cr0() & X86_CR0_TS); |
8546c008 LT |
77 | } |
78 | ||
79 | /* | |
80 | * Were we in user mode (or vm86 mode) when we were | |
81 | * interrupted? | |
82 | * | |
83 | * Doing kernel_fpu_begin/end() is ok if we are running | |
84 | * in an interrupt context from user mode - we'll just | |
85 | * save the FPU state as required. | |
86 | */ | |
416d49ac | 87 | static bool interrupted_user_mode(void) |
8546c008 LT |
88 | { |
89 | struct pt_regs *regs = get_irq_regs(); | |
f39b6f0e | 90 | return regs && user_mode(regs); |
8546c008 LT |
91 | } |
92 | ||
93 | /* | |
94 | * Can we use the FPU in kernel mode with the | |
95 | * whole "kernel_fpu_begin/end()" sequence? | |
96 | * | |
97 | * It's always ok in process context (ie "not interrupt") | |
98 | * but it is sometimes ok even from an irq. | |
99 | */ | |
100 | bool irq_fpu_usable(void) | |
101 | { | |
102 | return !in_interrupt() || | |
103 | interrupted_user_mode() || | |
104 | interrupted_kernel_fpu_idle(); | |
105 | } | |
106 | EXPORT_SYMBOL(irq_fpu_usable); | |
107 | ||
b1a74bf8 | 108 | void __kernel_fpu_begin(void) |
8546c008 | 109 | { |
36b544dc | 110 | struct fpu *fpu = ¤t->thread.fpu; |
8546c008 | 111 | |
3103ae3a | 112 | kernel_fpu_disable(); |
14e153ef | 113 | |
d5cea9b0 | 114 | if (fpu->fpregs_active) { |
4f836347 | 115 | copy_fpregs_to_fpstate(fpu); |
7aeccb83 | 116 | } else { |
36b544dc | 117 | this_cpu_write(fpu_fpregs_owner_ctx, NULL); |
32b49b3c | 118 | __fpregs_activate_hw(); |
8546c008 LT |
119 | } |
120 | } | |
b1a74bf8 | 121 | EXPORT_SYMBOL(__kernel_fpu_begin); |
8546c008 | 122 | |
b1a74bf8 | 123 | void __kernel_fpu_end(void) |
8546c008 | 124 | { |
af2d94fd | 125 | struct fpu *fpu = ¤t->thread.fpu; |
33a3ebdc | 126 | |
d5cea9b0 | 127 | if (fpu->fpregs_active) { |
0e75c54f | 128 | if (WARN_ON(copy_fpstate_to_fpregs(fpu))) |
fbce7782 | 129 | fpu__clear(fpu); |
32b49b3c IM |
130 | } else { |
131 | __fpregs_deactivate_hw(); | |
731bd6a9 | 132 | } |
14e153ef | 133 | |
3103ae3a | 134 | kernel_fpu_enable(); |
8546c008 | 135 | } |
b1a74bf8 | 136 | EXPORT_SYMBOL(__kernel_fpu_end); |
8546c008 | 137 | |
d63e79b1 IM |
138 | void kernel_fpu_begin(void) |
139 | { | |
140 | preempt_disable(); | |
141 | WARN_ON_ONCE(!irq_fpu_usable()); | |
142 | __kernel_fpu_begin(); | |
143 | } | |
144 | EXPORT_SYMBOL_GPL(kernel_fpu_begin); | |
145 | ||
146 | void kernel_fpu_end(void) | |
147 | { | |
148 | __kernel_fpu_end(); | |
149 | preempt_enable(); | |
150 | } | |
151 | EXPORT_SYMBOL_GPL(kernel_fpu_end); | |
152 | ||
91066588 IM |
153 | /* |
154 | * CR0::TS save/restore functions: | |
155 | */ | |
156 | int irq_ts_save(void) | |
157 | { | |
158 | /* | |
159 | * If in process context and not atomic, we can take a spurious DNA fault. | |
160 | * Otherwise, doing clts() in process context requires disabling preemption | |
161 | * or some heavy lifting like kernel_fpu_begin() | |
162 | */ | |
163 | if (!in_atomic()) | |
164 | return 0; | |
165 | ||
166 | if (read_cr0() & X86_CR0_TS) { | |
167 | clts(); | |
168 | return 1; | |
169 | } | |
170 | ||
171 | return 0; | |
172 | } | |
173 | EXPORT_SYMBOL_GPL(irq_ts_save); | |
174 | ||
175 | void irq_ts_restore(int TS_state) | |
176 | { | |
177 | if (TS_state) | |
178 | stts(); | |
179 | } | |
180 | EXPORT_SYMBOL_GPL(irq_ts_restore); | |
181 | ||
4af08f2f | 182 | /* |
48c4717f | 183 | * Save the FPU state (mark it for reload if necessary): |
87cdb98a IM |
184 | * |
185 | * This only ever gets called for the current task. | |
4af08f2f | 186 | */ |
0c070595 | 187 | void fpu__save(struct fpu *fpu) |
8546c008 | 188 | { |
0c070595 | 189 | WARN_ON(fpu != ¤t->thread.fpu); |
87cdb98a | 190 | |
8546c008 | 191 | preempt_disable(); |
d5cea9b0 | 192 | if (fpu->fpregs_active) { |
48c4717f | 193 | if (!copy_fpregs_to_fpstate(fpu)) |
66af8e27 | 194 | fpregs_deactivate(fpu); |
a9241ea5 | 195 | } |
8546c008 LT |
196 | preempt_enable(); |
197 | } | |
4af08f2f | 198 | EXPORT_SYMBOL_GPL(fpu__save); |
8546c008 | 199 | |
0aba6978 IM |
200 | /* |
201 | * Legacy x87 fpstate state init: | |
202 | */ | |
203 | static inline void fpstate_init_fstate(struct i387_fsave_struct *fp) | |
204 | { | |
205 | fp->cwd = 0xffff037fu; | |
206 | fp->swd = 0xffff0000u; | |
207 | fp->twd = 0xffffffffu; | |
208 | fp->fos = 0xffff0000u; | |
209 | } | |
210 | ||
bf935b0b | 211 | void fpstate_init(union thread_xstate *state) |
1da177e4 | 212 | { |
60e019eb | 213 | if (!cpu_has_fpu) { |
bf935b0b | 214 | fpstate_init_soft(&state->soft); |
86603283 | 215 | return; |
e8a496ac | 216 | } |
e8a496ac | 217 | |
bf935b0b | 218 | memset(state, 0, xstate_size); |
1d23c451 | 219 | |
0aba6978 | 220 | if (cpu_has_fxsr) |
bf935b0b | 221 | fpstate_init_fxstate(&state->fxsave); |
0aba6978 | 222 | else |
bf935b0b | 223 | fpstate_init_fstate(&state->fsave); |
86603283 | 224 | } |
c0ee2cf6 | 225 | EXPORT_SYMBOL_GPL(fpstate_init); |
86603283 | 226 | |
bfd6fc05 IM |
227 | /* |
228 | * Copy the current task's FPU state to a new task's FPU context. | |
229 | * | |
230 | * In the 'eager' case we just save to the destination context. | |
231 | * | |
232 | * In the 'lazy' case we save to the source context, mark the FPU lazy | |
233 | * via stts() and copy the source context into the destination context. | |
234 | */ | |
f9bc977f | 235 | static void fpu_copy(struct fpu *dst_fpu, struct fpu *src_fpu) |
e102f30f | 236 | { |
f9bc977f | 237 | WARN_ON(src_fpu != ¤t->thread.fpu); |
bfd6fc05 | 238 | |
b1652900 IM |
239 | /* |
240 | * Don't let 'init optimized' areas of the XSAVE area | |
241 | * leak into the child task: | |
242 | */ | |
243 | if (use_eager_fpu()) | |
7366ed77 | 244 | memset(&dst_fpu->state.xsave, 0, xstate_size); |
b1652900 IM |
245 | |
246 | /* | |
247 | * Save current FPU registers directly into the child | |
248 | * FPU context, without any memory-to-memory copying. | |
249 | * | |
250 | * If the FPU context got destroyed in the process (FNSAVE | |
251 | * done on old CPUs) then copy it back into the source | |
252 | * context and mark the current task for lazy restore. | |
253 | * | |
254 | * We have to do all this with preemption disabled, | |
255 | * mostly because of the FNSAVE case, because in that | |
256 | * case we must not allow preemption in the window | |
257 | * between the FNSAVE and us marking the context lazy. | |
258 | * | |
259 | * It shouldn't be an issue as even FNSAVE is plenty | |
260 | * fast in terms of critical section length. | |
261 | */ | |
262 | preempt_disable(); | |
263 | if (!copy_fpregs_to_fpstate(dst_fpu)) { | |
264 | memcpy(&src_fpu->state, &dst_fpu->state, xstate_size); | |
265 | fpregs_deactivate(src_fpu); | |
e102f30f | 266 | } |
b1652900 | 267 | preempt_enable(); |
e102f30f IM |
268 | } |
269 | ||
c69e098b | 270 | int fpu__copy(struct fpu *dst_fpu, struct fpu *src_fpu) |
a752b53d | 271 | { |
c69e098b | 272 | dst_fpu->counter = 0; |
d5cea9b0 | 273 | dst_fpu->fpregs_active = 0; |
c69e098b | 274 | dst_fpu->last_cpu = -1; |
a752b53d | 275 | |
c4d6ee6e | 276 | if (src_fpu->fpstate_active) |
f9bc977f | 277 | fpu_copy(dst_fpu, src_fpu); |
c4d6ee6e | 278 | |
a752b53d IM |
279 | return 0; |
280 | } | |
281 | ||
97185c95 | 282 | /* |
c4d72e2d IM |
283 | * Activate the current task's in-memory FPU context, |
284 | * if it has not been used before: | |
97185c95 | 285 | */ |
c4d72e2d | 286 | void fpu__activate_curr(struct fpu *fpu) |
97185c95 | 287 | { |
91d93d0e | 288 | WARN_ON_ONCE(fpu != ¤t->thread.fpu); |
97185c95 | 289 | |
c4d72e2d | 290 | if (!fpu->fpstate_active) { |
bf935b0b | 291 | fpstate_init(&fpu->state); |
97185c95 | 292 | |
c4d72e2d IM |
293 | /* Safe to do for the current task: */ |
294 | fpu->fpstate_active = 1; | |
295 | } | |
97185c95 | 296 | } |
c4d72e2d | 297 | EXPORT_SYMBOL_GPL(fpu__activate_curr); |
97185c95 | 298 | |
86603283 | 299 | /* |
67ee658e IM |
300 | * This function must be called before we modify a stopped child's |
301 | * fpstate. | |
af7f8721 IM |
302 | * |
303 | * If the child has not used the FPU before then initialize its | |
67ee658e | 304 | * fpstate. |
af7f8721 IM |
305 | * |
306 | * If the child has used the FPU before then unlazy it. | |
307 | * | |
67ee658e IM |
308 | * [ After this function call, after registers in the fpstate are |
309 | * modified and the child task has woken up, the child task will | |
310 | * restore the modified FPU state from the modified context. If we | |
af7f8721 | 311 | * didn't clear its lazy status here then the lazy in-registers |
67ee658e | 312 | * state pending on its former CPU could be restored, corrupting |
af7f8721 IM |
313 | * the modifications. ] |
314 | * | |
315 | * This function is also called before we read a stopped child's | |
67ee658e IM |
316 | * FPU state - to make sure it's initialized if the child has |
317 | * no active FPU state. | |
af7f8721 IM |
318 | * |
319 | * TODO: A future optimization would be to skip the unlazying in | |
320 | * the read-only case, it's not strictly necessary for | |
321 | * read-only access to the context. | |
86603283 | 322 | */ |
67ee658e | 323 | static void fpu__activate_stopped(struct fpu *child_fpu) |
86603283 | 324 | { |
2fb29fc7 | 325 | WARN_ON_ONCE(child_fpu == ¤t->thread.fpu); |
67e97fc2 | 326 | |
c5bedc68 | 327 | if (child_fpu->fpstate_active) { |
cc08d545 | 328 | child_fpu->last_cpu = -1; |
2fb29fc7 | 329 | } else { |
bf935b0b | 330 | fpstate_init(&child_fpu->state); |
071ae621 | 331 | |
2fb29fc7 IM |
332 | /* Safe to do for stopped child tasks: */ |
333 | child_fpu->fpstate_active = 1; | |
334 | } | |
1da177e4 LT |
335 | } |
336 | ||
93b90712 | 337 | /* |
be7436d5 IM |
338 | * 'fpu__restore()' is called to copy FPU registers from |
339 | * the FPU fpstate to the live hw registers and to activate | |
340 | * access to the hardware registers, so that FPU instructions | |
341 | * can be used afterwards. | |
93b90712 | 342 | * |
be7436d5 IM |
343 | * Must be called with kernel preemption disabled (for example |
344 | * with local interrupts disabled, as it is in the case of | |
345 | * do_device_not_available()). | |
93b90712 | 346 | */ |
3a0aee48 | 347 | void fpu__restore(void) |
93b90712 IM |
348 | { |
349 | struct task_struct *tsk = current; | |
4540d3fa | 350 | struct fpu *fpu = &tsk->thread.fpu; |
93b90712 | 351 | |
c4d72e2d | 352 | fpu__activate_curr(fpu); |
93b90712 | 353 | |
232f62cd | 354 | /* Avoid __kernel_fpu_begin() right after fpregs_activate() */ |
93b90712 | 355 | kernel_fpu_disable(); |
232f62cd | 356 | fpregs_activate(fpu); |
0e75c54f | 357 | if (unlikely(copy_fpstate_to_fpregs(fpu))) { |
fbce7782 | 358 | fpu__clear(fpu); |
93b90712 IM |
359 | force_sig_info(SIGSEGV, SEND_SIG_PRIV, tsk); |
360 | } else { | |
361 | tsk->thread.fpu.counter++; | |
362 | } | |
363 | kernel_fpu_enable(); | |
364 | } | |
3a0aee48 | 365 | EXPORT_SYMBOL_GPL(fpu__restore); |
93b90712 | 366 | |
6ffc152e IM |
367 | /* |
368 | * Drops current FPU state: deactivates the fpregs and | |
369 | * the fpstate. NOTE: it still leaves previous contents | |
370 | * in the fpregs in the eager-FPU case. | |
371 | * | |
372 | * This function can be used in cases where we know that | |
373 | * a state-restore is coming: either an explicit one, | |
374 | * or a reschedule. | |
375 | */ | |
376 | void fpu__drop(struct fpu *fpu) | |
377 | { | |
378 | preempt_disable(); | |
379 | fpu->counter = 0; | |
380 | ||
381 | if (fpu->fpregs_active) { | |
382 | /* Ignore delayed exceptions from user space */ | |
383 | asm volatile("1: fwait\n" | |
384 | "2:\n" | |
385 | _ASM_EXTABLE(1b, 2b)); | |
386 | fpregs_deactivate(fpu); | |
387 | } | |
388 | ||
389 | fpu->fpstate_active = 0; | |
390 | ||
391 | preempt_enable(); | |
392 | } | |
393 | ||
81541889 IM |
394 | /* |
395 | * Clear FPU registers by setting them up from | |
396 | * the init fpstate: | |
397 | */ | |
398 | static inline void copy_init_fpstate_to_fpregs(void) | |
399 | { | |
400 | if (use_xsave()) | |
401 | xrstor_state(&init_fpstate.xsave, -1); | |
402 | else | |
403 | fxrstor_checking(&init_fpstate.fxsave); | |
404 | } | |
405 | ||
6ffc152e | 406 | /* |
fbce7782 IM |
407 | * Clear the FPU state back to init state. |
408 | * | |
409 | * Called by sys_execve(), by the signal handler code and by various | |
410 | * error paths. | |
2e85591a | 411 | */ |
04c8e01d | 412 | void fpu__clear(struct fpu *fpu) |
81683cc8 | 413 | { |
04c8e01d | 414 | WARN_ON_ONCE(fpu != ¤t->thread.fpu); /* Almost certainly an anomaly */ |
4c138410 | 415 | |
81683cc8 IM |
416 | if (!use_eager_fpu()) { |
417 | /* FPU state will be reallocated lazily at the first use. */ | |
50338615 | 418 | fpu__drop(fpu); |
81683cc8 | 419 | } else { |
c5bedc68 | 420 | if (!fpu->fpstate_active) { |
c4d72e2d | 421 | fpu__activate_curr(fpu); |
81683cc8 IM |
422 | user_fpu_begin(); |
423 | } | |
81541889 | 424 | copy_init_fpstate_to_fpregs(); |
81683cc8 IM |
425 | } |
426 | } | |
427 | ||
5b3efd50 | 428 | /* |
678eaf60 | 429 | * The xstateregs_active() routine is the same as the regset_fpregs_active() routine, |
5b3efd50 SS |
430 | * as the "regset->n" for the xstate regset will be updated based on the feature |
431 | * capabilites supported by the xsave. | |
432 | */ | |
678eaf60 | 433 | int regset_fpregs_active(struct task_struct *target, const struct user_regset *regset) |
44210111 | 434 | { |
c5bedc68 IM |
435 | struct fpu *target_fpu = &target->thread.fpu; |
436 | ||
437 | return target_fpu->fpstate_active ? regset->n : 0; | |
44210111 | 438 | } |
1da177e4 | 439 | |
678eaf60 | 440 | int regset_xregset_fpregs_active(struct task_struct *target, const struct user_regset *regset) |
1da177e4 | 441 | { |
c5bedc68 IM |
442 | struct fpu *target_fpu = &target->thread.fpu; |
443 | ||
444 | return (cpu_has_fxsr && target_fpu->fpstate_active) ? regset->n : 0; | |
44210111 | 445 | } |
1da177e4 | 446 | |
44210111 RM |
447 | int xfpregs_get(struct task_struct *target, const struct user_regset *regset, |
448 | unsigned int pos, unsigned int count, | |
449 | void *kbuf, void __user *ubuf) | |
450 | { | |
cc08d545 | 451 | struct fpu *fpu = &target->thread.fpu; |
aa283f49 | 452 | |
44210111 RM |
453 | if (!cpu_has_fxsr) |
454 | return -ENODEV; | |
455 | ||
67ee658e | 456 | fpu__activate_stopped(fpu); |
36e49e7f | 457 | fpstate_sanitize_xstate(fpu); |
29104e10 | 458 | |
44210111 | 459 | return user_regset_copyout(&pos, &count, &kbuf, &ubuf, |
7366ed77 | 460 | &fpu->state.fxsave, 0, -1); |
1da177e4 | 461 | } |
44210111 RM |
462 | |
463 | int xfpregs_set(struct task_struct *target, const struct user_regset *regset, | |
464 | unsigned int pos, unsigned int count, | |
465 | const void *kbuf, const void __user *ubuf) | |
466 | { | |
cc08d545 | 467 | struct fpu *fpu = &target->thread.fpu; |
44210111 RM |
468 | int ret; |
469 | ||
470 | if (!cpu_has_fxsr) | |
471 | return -ENODEV; | |
472 | ||
67ee658e | 473 | fpu__activate_stopped(fpu); |
36e49e7f | 474 | fpstate_sanitize_xstate(fpu); |
29104e10 | 475 | |
44210111 | 476 | ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, |
7366ed77 | 477 | &fpu->state.fxsave, 0, -1); |
44210111 RM |
478 | |
479 | /* | |
480 | * mxcsr reserved bits must be masked to zero for security reasons. | |
481 | */ | |
7366ed77 | 482 | fpu->state.fxsave.mxcsr &= mxcsr_feature_mask; |
44210111 | 483 | |
42deec6f SS |
484 | /* |
485 | * update the header bits in the xsave header, indicating the | |
486 | * presence of FP and SSE state. | |
487 | */ | |
488 | if (cpu_has_xsave) | |
7366ed77 | 489 | fpu->state.xsave.header.xfeatures |= XSTATE_FPSSE; |
42deec6f | 490 | |
44210111 RM |
491 | return ret; |
492 | } | |
493 | ||
5b3efd50 SS |
494 | int xstateregs_get(struct task_struct *target, const struct user_regset *regset, |
495 | unsigned int pos, unsigned int count, | |
496 | void *kbuf, void __user *ubuf) | |
497 | { | |
cc08d545 | 498 | struct fpu *fpu = &target->thread.fpu; |
18ecb3bf | 499 | struct xsave_struct *xsave; |
5b3efd50 SS |
500 | int ret; |
501 | ||
502 | if (!cpu_has_xsave) | |
503 | return -ENODEV; | |
504 | ||
67ee658e | 505 | fpu__activate_stopped(fpu); |
5b3efd50 | 506 | |
7366ed77 | 507 | xsave = &fpu->state.xsave; |
18ecb3bf | 508 | |
5b3efd50 | 509 | /* |
ff7fbc72 SS |
510 | * Copy the 48bytes defined by the software first into the xstate |
511 | * memory layout in the thread struct, so that we can copy the entire | |
512 | * xstateregs to the user using one user_regset_copyout(). | |
5b3efd50 | 513 | */ |
e7f180dc ON |
514 | memcpy(&xsave->i387.sw_reserved, |
515 | xstate_fx_sw_bytes, sizeof(xstate_fx_sw_bytes)); | |
5b3efd50 | 516 | /* |
ff7fbc72 | 517 | * Copy the xstate memory layout. |
5b3efd50 | 518 | */ |
e7f180dc | 519 | ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf, xsave, 0, -1); |
5b3efd50 SS |
520 | return ret; |
521 | } | |
522 | ||
523 | int xstateregs_set(struct task_struct *target, const struct user_regset *regset, | |
524 | unsigned int pos, unsigned int count, | |
525 | const void *kbuf, const void __user *ubuf) | |
526 | { | |
cc08d545 | 527 | struct fpu *fpu = &target->thread.fpu; |
18ecb3bf | 528 | struct xsave_struct *xsave; |
5b3efd50 | 529 | int ret; |
5b3efd50 SS |
530 | |
531 | if (!cpu_has_xsave) | |
532 | return -ENODEV; | |
533 | ||
67ee658e | 534 | fpu__activate_stopped(fpu); |
5b3efd50 | 535 | |
7366ed77 | 536 | xsave = &fpu->state.xsave; |
18ecb3bf | 537 | |
e7f180dc | 538 | ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, xsave, 0, -1); |
5b3efd50 SS |
539 | /* |
540 | * mxcsr reserved bits must be masked to zero for security reasons. | |
541 | */ | |
e7f180dc | 542 | xsave->i387.mxcsr &= mxcsr_feature_mask; |
400e4b20 | 543 | xsave->header.xfeatures &= xfeatures_mask; |
5b3efd50 SS |
544 | /* |
545 | * These bits must be zero. | |
546 | */ | |
3a54450b | 547 | memset(&xsave->header.reserved, 0, 48); |
8dcea8db | 548 | |
5b3efd50 SS |
549 | return ret; |
550 | } | |
551 | ||
44210111 | 552 | #if defined CONFIG_X86_32 || defined CONFIG_IA32_EMULATION |
1da177e4 | 553 | |
1da177e4 LT |
554 | /* |
555 | * FPU tag word conversions. | |
556 | */ | |
557 | ||
3b095a04 | 558 | static inline unsigned short twd_i387_to_fxsr(unsigned short twd) |
1da177e4 LT |
559 | { |
560 | unsigned int tmp; /* to avoid 16 bit prefixes in the code */ | |
3b095a04 | 561 | |
1da177e4 | 562 | /* Transform each pair of bits into 01 (valid) or 00 (empty) */ |
3b095a04 | 563 | tmp = ~twd; |
44210111 | 564 | tmp = (tmp | (tmp>>1)) & 0x5555; /* 0V0V0V0V0V0V0V0V */ |
3b095a04 CG |
565 | /* and move the valid bits to the lower byte. */ |
566 | tmp = (tmp | (tmp >> 1)) & 0x3333; /* 00VV00VV00VV00VV */ | |
567 | tmp = (tmp | (tmp >> 2)) & 0x0f0f; /* 0000VVVV0000VVVV */ | |
568 | tmp = (tmp | (tmp >> 4)) & 0x00ff; /* 00000000VVVVVVVV */ | |
f668964e | 569 | |
3b095a04 | 570 | return tmp; |
1da177e4 LT |
571 | } |
572 | ||
497888cf | 573 | #define FPREG_ADDR(f, n) ((void *)&(f)->st_space + (n) * 16) |
44210111 RM |
574 | #define FP_EXP_TAG_VALID 0 |
575 | #define FP_EXP_TAG_ZERO 1 | |
576 | #define FP_EXP_TAG_SPECIAL 2 | |
577 | #define FP_EXP_TAG_EMPTY 3 | |
578 | ||
579 | static inline u32 twd_fxsr_to_i387(struct i387_fxsave_struct *fxsave) | |
580 | { | |
581 | struct _fpxreg *st; | |
582 | u32 tos = (fxsave->swd >> 11) & 7; | |
583 | u32 twd = (unsigned long) fxsave->twd; | |
584 | u32 tag; | |
585 | u32 ret = 0xffff0000u; | |
586 | int i; | |
1da177e4 | 587 | |
44210111 | 588 | for (i = 0; i < 8; i++, twd >>= 1) { |
3b095a04 CG |
589 | if (twd & 0x1) { |
590 | st = FPREG_ADDR(fxsave, (i - tos) & 7); | |
1da177e4 | 591 | |
3b095a04 | 592 | switch (st->exponent & 0x7fff) { |
1da177e4 | 593 | case 0x7fff: |
44210111 | 594 | tag = FP_EXP_TAG_SPECIAL; |
1da177e4 LT |
595 | break; |
596 | case 0x0000: | |
3b095a04 CG |
597 | if (!st->significand[0] && |
598 | !st->significand[1] && | |
599 | !st->significand[2] && | |
44210111 RM |
600 | !st->significand[3]) |
601 | tag = FP_EXP_TAG_ZERO; | |
602 | else | |
603 | tag = FP_EXP_TAG_SPECIAL; | |
1da177e4 LT |
604 | break; |
605 | default: | |
44210111 RM |
606 | if (st->significand[3] & 0x8000) |
607 | tag = FP_EXP_TAG_VALID; | |
608 | else | |
609 | tag = FP_EXP_TAG_SPECIAL; | |
1da177e4 LT |
610 | break; |
611 | } | |
612 | } else { | |
44210111 | 613 | tag = FP_EXP_TAG_EMPTY; |
1da177e4 | 614 | } |
44210111 | 615 | ret |= tag << (2 * i); |
1da177e4 LT |
616 | } |
617 | return ret; | |
618 | } | |
619 | ||
620 | /* | |
44210111 | 621 | * FXSR floating point environment conversions. |
1da177e4 LT |
622 | */ |
623 | ||
72a671ce | 624 | void |
f668964e | 625 | convert_from_fxsr(struct user_i387_ia32_struct *env, struct task_struct *tsk) |
1da177e4 | 626 | { |
7366ed77 | 627 | struct i387_fxsave_struct *fxsave = &tsk->thread.fpu.state.fxsave; |
44210111 RM |
628 | struct _fpreg *to = (struct _fpreg *) &env->st_space[0]; |
629 | struct _fpxreg *from = (struct _fpxreg *) &fxsave->st_space[0]; | |
630 | int i; | |
1da177e4 | 631 | |
44210111 RM |
632 | env->cwd = fxsave->cwd | 0xffff0000u; |
633 | env->swd = fxsave->swd | 0xffff0000u; | |
634 | env->twd = twd_fxsr_to_i387(fxsave); | |
635 | ||
636 | #ifdef CONFIG_X86_64 | |
637 | env->fip = fxsave->rip; | |
638 | env->foo = fxsave->rdp; | |
10c11f30 BG |
639 | /* |
640 | * should be actually ds/cs at fpu exception time, but | |
641 | * that information is not available in 64bit mode. | |
642 | */ | |
643 | env->fcs = task_pt_regs(tsk)->cs; | |
44210111 | 644 | if (tsk == current) { |
10c11f30 | 645 | savesegment(ds, env->fos); |
1da177e4 | 646 | } else { |
10c11f30 | 647 | env->fos = tsk->thread.ds; |
1da177e4 | 648 | } |
10c11f30 | 649 | env->fos |= 0xffff0000; |
44210111 RM |
650 | #else |
651 | env->fip = fxsave->fip; | |
609b5297 | 652 | env->fcs = (u16) fxsave->fcs | ((u32) fxsave->fop << 16); |
44210111 RM |
653 | env->foo = fxsave->foo; |
654 | env->fos = fxsave->fos; | |
655 | #endif | |
1da177e4 | 656 | |
44210111 RM |
657 | for (i = 0; i < 8; ++i) |
658 | memcpy(&to[i], &from[i], sizeof(to[0])); | |
1da177e4 LT |
659 | } |
660 | ||
72a671ce SS |
661 | void convert_to_fxsr(struct task_struct *tsk, |
662 | const struct user_i387_ia32_struct *env) | |
1da177e4 | 663 | |
1da177e4 | 664 | { |
7366ed77 | 665 | struct i387_fxsave_struct *fxsave = &tsk->thread.fpu.state.fxsave; |
44210111 RM |
666 | struct _fpreg *from = (struct _fpreg *) &env->st_space[0]; |
667 | struct _fpxreg *to = (struct _fpxreg *) &fxsave->st_space[0]; | |
668 | int i; | |
1da177e4 | 669 | |
44210111 RM |
670 | fxsave->cwd = env->cwd; |
671 | fxsave->swd = env->swd; | |
672 | fxsave->twd = twd_i387_to_fxsr(env->twd); | |
673 | fxsave->fop = (u16) ((u32) env->fcs >> 16); | |
674 | #ifdef CONFIG_X86_64 | |
675 | fxsave->rip = env->fip; | |
676 | fxsave->rdp = env->foo; | |
677 | /* cs and ds ignored */ | |
678 | #else | |
679 | fxsave->fip = env->fip; | |
680 | fxsave->fcs = (env->fcs & 0xffff); | |
681 | fxsave->foo = env->foo; | |
682 | fxsave->fos = env->fos; | |
683 | #endif | |
1da177e4 | 684 | |
44210111 RM |
685 | for (i = 0; i < 8; ++i) |
686 | memcpy(&to[i], &from[i], sizeof(from[0])); | |
1da177e4 LT |
687 | } |
688 | ||
44210111 RM |
689 | int fpregs_get(struct task_struct *target, const struct user_regset *regset, |
690 | unsigned int pos, unsigned int count, | |
691 | void *kbuf, void __user *ubuf) | |
1da177e4 | 692 | { |
cc08d545 | 693 | struct fpu *fpu = &target->thread.fpu; |
44210111 | 694 | struct user_i387_ia32_struct env; |
1da177e4 | 695 | |
67ee658e | 696 | fpu__activate_stopped(fpu); |
1da177e4 | 697 | |
60e019eb | 698 | if (!static_cpu_has(X86_FEATURE_FPU)) |
e8a496ac SS |
699 | return fpregs_soft_get(target, regset, pos, count, kbuf, ubuf); |
700 | ||
60e019eb | 701 | if (!cpu_has_fxsr) |
44210111 | 702 | return user_regset_copyout(&pos, &count, &kbuf, &ubuf, |
7366ed77 | 703 | &fpu->state.fsave, 0, |
61c4628b | 704 | -1); |
1da177e4 | 705 | |
36e49e7f | 706 | fpstate_sanitize_xstate(fpu); |
29104e10 | 707 | |
44210111 RM |
708 | if (kbuf && pos == 0 && count == sizeof(env)) { |
709 | convert_from_fxsr(kbuf, target); | |
710 | return 0; | |
1da177e4 | 711 | } |
44210111 RM |
712 | |
713 | convert_from_fxsr(&env, target); | |
f668964e | 714 | |
44210111 | 715 | return user_regset_copyout(&pos, &count, &kbuf, &ubuf, &env, 0, -1); |
1da177e4 LT |
716 | } |
717 | ||
44210111 RM |
718 | int fpregs_set(struct task_struct *target, const struct user_regset *regset, |
719 | unsigned int pos, unsigned int count, | |
720 | const void *kbuf, const void __user *ubuf) | |
1da177e4 | 721 | { |
cc08d545 | 722 | struct fpu *fpu = &target->thread.fpu; |
44210111 RM |
723 | struct user_i387_ia32_struct env; |
724 | int ret; | |
1da177e4 | 725 | |
67ee658e | 726 | fpu__activate_stopped(fpu); |
36e49e7f | 727 | fpstate_sanitize_xstate(fpu); |
29104e10 | 728 | |
60e019eb | 729 | if (!static_cpu_has(X86_FEATURE_FPU)) |
e8a496ac SS |
730 | return fpregs_soft_set(target, regset, pos, count, kbuf, ubuf); |
731 | ||
60e019eb | 732 | if (!cpu_has_fxsr) |
44210111 | 733 | return user_regset_copyin(&pos, &count, &kbuf, &ubuf, |
7366ed77 | 734 | &fpu->state.fsave, 0, |
60e019eb | 735 | -1); |
44210111 RM |
736 | |
737 | if (pos > 0 || count < sizeof(env)) | |
738 | convert_from_fxsr(&env, target); | |
739 | ||
740 | ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &env, 0, -1); | |
741 | if (!ret) | |
742 | convert_to_fxsr(target, &env); | |
743 | ||
42deec6f SS |
744 | /* |
745 | * update the header bit in the xsave header, indicating the | |
746 | * presence of FP. | |
747 | */ | |
748 | if (cpu_has_xsave) | |
7366ed77 | 749 | fpu->state.xsave.header.xfeatures |= XSTATE_FP; |
44210111 | 750 | return ret; |
1da177e4 LT |
751 | } |
752 | ||
1da177e4 LT |
753 | /* |
754 | * FPU state for core dumps. | |
60b3b9af RM |
755 | * This is only used for a.out dumps now. |
756 | * It is declared generically using elf_fpregset_t (which is | |
757 | * struct user_i387_struct) but is in fact only used for 32-bit | |
758 | * dumps, so on 64-bit it is really struct user_i387_ia32_struct. | |
1da177e4 | 759 | */ |
c5bedc68 | 760 | int dump_fpu(struct pt_regs *regs, struct user_i387_struct *ufpu) |
1da177e4 | 761 | { |
1da177e4 | 762 | struct task_struct *tsk = current; |
c5bedc68 | 763 | struct fpu *fpu = &tsk->thread.fpu; |
f668964e | 764 | int fpvalid; |
1da177e4 | 765 | |
c5bedc68 | 766 | fpvalid = fpu->fpstate_active; |
60b3b9af RM |
767 | if (fpvalid) |
768 | fpvalid = !fpregs_get(tsk, NULL, | |
769 | 0, sizeof(struct user_i387_ia32_struct), | |
c5bedc68 | 770 | ufpu, NULL); |
1da177e4 LT |
771 | |
772 | return fpvalid; | |
773 | } | |
129f6946 | 774 | EXPORT_SYMBOL(dump_fpu); |
1da177e4 | 775 | |
60b3b9af | 776 | #endif /* CONFIG_X86_32 || CONFIG_IA32_EMULATION */ |
e1cebad4 IM |
777 | |
778 | /* | |
779 | * x87 math exception handling: | |
780 | */ | |
781 | ||
782 | static inline unsigned short get_fpu_cwd(struct fpu *fpu) | |
783 | { | |
784 | if (cpu_has_fxsr) { | |
785 | return fpu->state.fxsave.cwd; | |
786 | } else { | |
787 | return (unsigned short)fpu->state.fsave.cwd; | |
788 | } | |
789 | } | |
790 | ||
791 | static inline unsigned short get_fpu_swd(struct fpu *fpu) | |
792 | { | |
793 | if (cpu_has_fxsr) { | |
794 | return fpu->state.fxsave.swd; | |
795 | } else { | |
796 | return (unsigned short)fpu->state.fsave.swd; | |
797 | } | |
798 | } | |
799 | ||
800 | static inline unsigned short get_fpu_mxcsr(struct fpu *fpu) | |
801 | { | |
802 | if (cpu_has_xmm) { | |
803 | return fpu->state.fxsave.mxcsr; | |
804 | } else { | |
805 | return MXCSR_DEFAULT; | |
806 | } | |
807 | } | |
808 | ||
809 | int fpu__exception_code(struct fpu *fpu, int trap_nr) | |
810 | { | |
811 | int err; | |
812 | ||
813 | if (trap_nr == X86_TRAP_MF) { | |
814 | unsigned short cwd, swd; | |
815 | /* | |
816 | * (~cwd & swd) will mask out exceptions that are not set to unmasked | |
817 | * status. 0x3f is the exception bits in these regs, 0x200 is the | |
818 | * C1 reg you need in case of a stack fault, 0x040 is the stack | |
819 | * fault bit. We should only be taking one exception at a time, | |
820 | * so if this combination doesn't produce any single exception, | |
821 | * then we have a bad program that isn't synchronizing its FPU usage | |
822 | * and it will suffer the consequences since we won't be able to | |
823 | * fully reproduce the context of the exception | |
824 | */ | |
825 | cwd = get_fpu_cwd(fpu); | |
826 | swd = get_fpu_swd(fpu); | |
827 | ||
828 | err = swd & ~cwd; | |
829 | } else { | |
830 | /* | |
831 | * The SIMD FPU exceptions are handled a little differently, as there | |
832 | * is only a single status/control register. Thus, to determine which | |
833 | * unmasked exception was caught we must mask the exception mask bits | |
834 | * at 0x1f80, and then use these to mask the exception bits at 0x3f. | |
835 | */ | |
836 | unsigned short mxcsr = get_fpu_mxcsr(fpu); | |
837 | err = ~(mxcsr >> 7) & mxcsr; | |
838 | } | |
839 | ||
840 | if (err & 0x001) { /* Invalid op */ | |
841 | /* | |
842 | * swd & 0x240 == 0x040: Stack Underflow | |
843 | * swd & 0x240 == 0x240: Stack Overflow | |
844 | * User must clear the SF bit (0x40) if set | |
845 | */ | |
846 | return FPE_FLTINV; | |
847 | } else if (err & 0x004) { /* Divide by Zero */ | |
848 | return FPE_FLTDIV; | |
849 | } else if (err & 0x008) { /* Overflow */ | |
850 | return FPE_FLTOVF; | |
851 | } else if (err & 0x012) { /* Denormal, Underflow */ | |
852 | return FPE_FLTUND; | |
853 | } else if (err & 0x020) { /* Precision */ | |
854 | return FPE_FLTRES; | |
855 | } | |
856 | ||
857 | /* | |
858 | * If we're using IRQ 13, or supposedly even some trap | |
859 | * X86_TRAP_MF implementations, it's possible | |
860 | * we get a spurious trap, which is not an error. | |
861 | */ | |
862 | return 0; | |
863 | } |