]>
Commit | Line | Data |
---|---|---|
1361b83a LT |
1 | /* |
2 | * Copyright (C) 1994 Linus Torvalds | |
3 | * | |
4 | * Pentium III FXSR, SSE support | |
5 | * General FPU state handling cleanups | |
6 | * Gareth Hughes <gareth@valinux.com>, May 2000 | |
7 | * x86-64 work by Andi Kleen 2002 | |
8 | */ | |
9 | ||
10 | #ifndef _FPU_INTERNAL_H | |
11 | #define _FPU_INTERNAL_H | |
12 | ||
13 | #include <linux/kernel_stat.h> | |
14 | #include <linux/regset.h> | |
050902c0 | 15 | #include <linux/compat.h> |
1361b83a LT |
16 | #include <linux/slab.h> |
17 | #include <asm/asm.h> | |
18 | #include <asm/cpufeature.h> | |
19 | #include <asm/processor.h> | |
20 | #include <asm/sigcontext.h> | |
21 | #include <asm/user.h> | |
22 | #include <asm/uaccess.h> | |
23 | #include <asm/xsave.h> | |
24 | ||
72a671ce SS |
25 | #ifdef CONFIG_X86_64 |
26 | # include <asm/sigcontext32.h> | |
27 | # include <asm/user32.h> | |
28 | int ia32_setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info, | |
29 | compat_sigset_t *set, struct pt_regs *regs); | |
30 | int ia32_setup_frame(int sig, struct k_sigaction *ka, | |
31 | compat_sigset_t *set, struct pt_regs *regs); | |
32 | #else | |
33 | # define user_i387_ia32_struct user_i387_struct | |
34 | # define user32_fxsr_struct user_fxsr_struct | |
35 | # define ia32_setup_frame __setup_frame | |
36 | # define ia32_setup_rt_frame __setup_rt_frame | |
37 | #endif | |
38 | ||
39 | extern unsigned int mxcsr_feature_mask; | |
1361b83a LT |
40 | extern void fpu_init(void); |
41 | ||
42 | DECLARE_PER_CPU(struct task_struct *, fpu_owner_task); | |
43 | ||
72a671ce SS |
44 | extern void convert_from_fxsr(struct user_i387_ia32_struct *env, |
45 | struct task_struct *tsk); | |
46 | extern void convert_to_fxsr(struct task_struct *tsk, | |
47 | const struct user_i387_ia32_struct *env); | |
48 | ||
1361b83a LT |
49 | extern user_regset_active_fn fpregs_active, xfpregs_active; |
50 | extern user_regset_get_fn fpregs_get, xfpregs_get, fpregs_soft_get, | |
51 | xstateregs_get; | |
52 | extern user_regset_set_fn fpregs_set, xfpregs_set, fpregs_soft_set, | |
53 | xstateregs_set; | |
54 | ||
1361b83a LT |
55 | /* |
56 | * xstateregs_active == fpregs_active. Please refer to the comment | |
57 | * at the definition of fpregs_active. | |
58 | */ | |
59 | #define xstateregs_active fpregs_active | |
60 | ||
1361b83a | 61 | #ifdef CONFIG_MATH_EMULATION |
72a671ce | 62 | # define HAVE_HWFP (boot_cpu_data.hard_math) |
1361b83a LT |
63 | extern void finit_soft_fpu(struct i387_soft_struct *soft); |
64 | #else | |
72a671ce | 65 | # define HAVE_HWFP 1 |
1361b83a LT |
66 | static inline void finit_soft_fpu(struct i387_soft_struct *soft) {} |
67 | #endif | |
68 | ||
050902c0 SS |
69 | static inline int is_ia32_compat_frame(void) |
70 | { | |
71 | return config_enabled(CONFIG_IA32_EMULATION) && | |
72 | test_thread_flag(TIF_IA32); | |
73 | } | |
74 | ||
75 | static inline int is_ia32_frame(void) | |
76 | { | |
77 | return config_enabled(CONFIG_X86_32) || is_ia32_compat_frame(); | |
78 | } | |
79 | ||
80 | static inline int is_x32_frame(void) | |
81 | { | |
82 | return config_enabled(CONFIG_X86_X32_ABI) && test_thread_flag(TIF_X32); | |
83 | } | |
84 | ||
1361b83a LT |
85 | #define X87_FSW_ES (1 << 7) /* Exception Summary */ |
86 | ||
87 | static __always_inline __pure bool use_xsaveopt(void) | |
88 | { | |
89 | return static_cpu_has(X86_FEATURE_XSAVEOPT); | |
90 | } | |
91 | ||
92 | static __always_inline __pure bool use_xsave(void) | |
93 | { | |
94 | return static_cpu_has(X86_FEATURE_XSAVE); | |
95 | } | |
96 | ||
97 | static __always_inline __pure bool use_fxsr(void) | |
98 | { | |
99 | return static_cpu_has(X86_FEATURE_FXSR); | |
100 | } | |
101 | ||
102 | extern void __sanitize_i387_state(struct task_struct *); | |
103 | ||
104 | static inline void sanitize_i387_state(struct task_struct *tsk) | |
105 | { | |
106 | if (!use_xsaveopt()) | |
107 | return; | |
108 | __sanitize_i387_state(tsk); | |
109 | } | |
110 | ||
0ca5bd0d SS |
111 | #define check_insn(insn, output, input...) \ |
112 | ({ \ | |
113 | int err; \ | |
114 | asm volatile("1:" #insn "\n\t" \ | |
115 | "2:\n" \ | |
116 | ".section .fixup,\"ax\"\n" \ | |
117 | "3: movl $-1,%[err]\n" \ | |
118 | " jmp 2b\n" \ | |
119 | ".previous\n" \ | |
120 | _ASM_EXTABLE(1b, 3b) \ | |
121 | : [err] "=r" (err), output \ | |
122 | : "0"(0), input); \ | |
123 | err; \ | |
124 | }) | |
125 | ||
126 | static inline int fsave_user(struct i387_fsave_struct __user *fx) | |
1361b83a | 127 | { |
0ca5bd0d | 128 | return check_insn(fnsave %[fx]; fwait, [fx] "=m" (*fx), "m" (*fx)); |
1361b83a LT |
129 | } |
130 | ||
131 | static inline int fxsave_user(struct i387_fxsave_struct __user *fx) | |
132 | { | |
0ca5bd0d SS |
133 | if (config_enabled(CONFIG_X86_32)) |
134 | return check_insn(fxsave %[fx], [fx] "=m" (*fx), "m" (*fx)); | |
135 | else if (config_enabled(CONFIG_AS_FXSAVEQ)) | |
136 | return check_insn(fxsaveq %[fx], [fx] "=m" (*fx), "m" (*fx)); | |
137 | ||
138 | /* See comment in fpu_fxsave() below. */ | |
139 | return check_insn(rex64/fxsave (%[fx]), "=m" (*fx), [fx] "R" (fx)); | |
140 | } | |
141 | ||
142 | static inline int fxrstor_checking(struct i387_fxsave_struct *fx) | |
143 | { | |
144 | if (config_enabled(CONFIG_X86_32)) | |
145 | return check_insn(fxrstor %[fx], "=m" (*fx), [fx] "m" (*fx)); | |
146 | else if (config_enabled(CONFIG_AS_FXSAVEQ)) | |
147 | return check_insn(fxrstorq %[fx], "=m" (*fx), [fx] "m" (*fx)); | |
148 | ||
149 | /* See comment in fpu_fxsave() below. */ | |
150 | return check_insn(rex64/fxrstor (%[fx]), "=m" (*fx), [fx] "R" (fx), | |
151 | "m" (*fx)); | |
152 | } | |
153 | ||
154 | static inline int frstor_checking(struct i387_fsave_struct *fx) | |
155 | { | |
156 | return check_insn(frstor %[fx], "=m" (*fx), [fx] "m" (*fx)); | |
1361b83a LT |
157 | } |
158 | ||
159 | static inline void fpu_fxsave(struct fpu *fpu) | |
160 | { | |
0ca5bd0d SS |
161 | if (config_enabled(CONFIG_X86_32)) |
162 | asm volatile( "fxsave %[fx]" : [fx] "=m" (fpu->state->fxsave)); | |
163 | else if (config_enabled(CONFIG_AS_FXSAVEQ)) | |
164 | asm volatile("fxsaveq %0" : "=m" (fpu->state->fxsave)); | |
165 | else { | |
166 | /* Using "rex64; fxsave %0" is broken because, if the memory | |
167 | * operand uses any extended registers for addressing, a second | |
168 | * REX prefix will be generated (to the assembler, rex64 | |
169 | * followed by semicolon is a separate instruction), and hence | |
170 | * the 64-bitness is lost. | |
171 | * | |
172 | * Using "fxsaveq %0" would be the ideal choice, but is only | |
173 | * supported starting with gas 2.16. | |
174 | * | |
175 | * Using, as a workaround, the properly prefixed form below | |
176 | * isn't accepted by any binutils version so far released, | |
177 | * complaining that the same type of prefix is used twice if | |
178 | * an extended register is needed for addressing (fix submitted | |
179 | * to mainline 2005-11-21). | |
180 | * | |
181 | * asm volatile("rex64/fxsave %0" : "=m" (fpu->state->fxsave)); | |
182 | * | |
183 | * This, however, we can work around by forcing the compiler to | |
184 | * select an addressing mode that doesn't require extended | |
185 | * registers. | |
186 | */ | |
187 | asm volatile( "rex64/fxsave (%[fx])" | |
188 | : "=m" (fpu->state->fxsave) | |
189 | : [fx] "R" (&fpu->state->fxsave)); | |
190 | } | |
1361b83a | 191 | } |
1361b83a LT |
192 | |
193 | /* | |
194 | * These must be called with preempt disabled. Returns | |
195 | * 'true' if the FPU state is still intact. | |
196 | */ | |
197 | static inline int fpu_save_init(struct fpu *fpu) | |
198 | { | |
199 | if (use_xsave()) { | |
200 | fpu_xsave(fpu); | |
201 | ||
202 | /* | |
203 | * xsave header may indicate the init state of the FP. | |
204 | */ | |
205 | if (!(fpu->state->xsave.xsave_hdr.xstate_bv & XSTATE_FP)) | |
206 | return 1; | |
207 | } else if (use_fxsr()) { | |
208 | fpu_fxsave(fpu); | |
209 | } else { | |
210 | asm volatile("fnsave %[fx]; fwait" | |
211 | : [fx] "=m" (fpu->state->fsave)); | |
212 | return 0; | |
213 | } | |
214 | ||
215 | /* | |
216 | * If exceptions are pending, we need to clear them so | |
217 | * that we don't randomly get exceptions later. | |
218 | * | |
219 | * FIXME! Is this perhaps only true for the old-style | |
220 | * irq13 case? Maybe we could leave the x87 state | |
221 | * intact otherwise? | |
222 | */ | |
223 | if (unlikely(fpu->state->fxsave.swd & X87_FSW_ES)) { | |
224 | asm volatile("fnclex"); | |
225 | return 0; | |
226 | } | |
227 | return 1; | |
228 | } | |
229 | ||
230 | static inline int __save_init_fpu(struct task_struct *tsk) | |
231 | { | |
232 | return fpu_save_init(&tsk->thread.fpu); | |
233 | } | |
234 | ||
1361b83a LT |
235 | static inline int fpu_restore_checking(struct fpu *fpu) |
236 | { | |
237 | if (use_xsave()) | |
0ca5bd0d SS |
238 | return fpu_xrstor_checking(&fpu->state->xsave); |
239 | else if (use_fxsr()) | |
240 | return fxrstor_checking(&fpu->state->fxsave); | |
1361b83a | 241 | else |
0ca5bd0d | 242 | return frstor_checking(&fpu->state->fsave); |
1361b83a LT |
243 | } |
244 | ||
245 | static inline int restore_fpu_checking(struct task_struct *tsk) | |
246 | { | |
247 | /* AMD K7/K8 CPUs don't save/restore FDP/FIP/FOP unless an exception | |
248 | is pending. Clear the x87 state here by setting it to fixed | |
249 | values. "m" is a random variable that should be in L1 */ | |
250 | alternative_input( | |
251 | ASM_NOP8 ASM_NOP2, | |
252 | "emms\n\t" /* clear stack tags */ | |
253 | "fildl %P[addr]", /* set F?P to defined value */ | |
254 | X86_FEATURE_FXSAVE_LEAK, | |
255 | [addr] "m" (tsk->thread.fpu.has_fpu)); | |
256 | ||
257 | return fpu_restore_checking(&tsk->thread.fpu); | |
258 | } | |
259 | ||
260 | /* | |
261 | * Software FPU state helpers. Careful: these need to | |
262 | * be preemption protection *and* they need to be | |
263 | * properly paired with the CR0.TS changes! | |
264 | */ | |
265 | static inline int __thread_has_fpu(struct task_struct *tsk) | |
266 | { | |
267 | return tsk->thread.fpu.has_fpu; | |
268 | } | |
269 | ||
270 | /* Must be paired with an 'stts' after! */ | |
271 | static inline void __thread_clear_has_fpu(struct task_struct *tsk) | |
272 | { | |
273 | tsk->thread.fpu.has_fpu = 0; | |
c6ae41e7 | 274 | this_cpu_write(fpu_owner_task, NULL); |
1361b83a LT |
275 | } |
276 | ||
277 | /* Must be paired with a 'clts' before! */ | |
278 | static inline void __thread_set_has_fpu(struct task_struct *tsk) | |
279 | { | |
280 | tsk->thread.fpu.has_fpu = 1; | |
c6ae41e7 | 281 | this_cpu_write(fpu_owner_task, tsk); |
1361b83a LT |
282 | } |
283 | ||
284 | /* | |
285 | * Encapsulate the CR0.TS handling together with the | |
286 | * software flag. | |
287 | * | |
288 | * These generally need preemption protection to work, | |
289 | * do try to avoid using these on their own. | |
290 | */ | |
291 | static inline void __thread_fpu_end(struct task_struct *tsk) | |
292 | { | |
293 | __thread_clear_has_fpu(tsk); | |
304bceda SS |
294 | if (!use_xsave()) |
295 | stts(); | |
1361b83a LT |
296 | } |
297 | ||
298 | static inline void __thread_fpu_begin(struct task_struct *tsk) | |
299 | { | |
304bceda SS |
300 | if (!use_xsave()) |
301 | clts(); | |
1361b83a LT |
302 | __thread_set_has_fpu(tsk); |
303 | } | |
304 | ||
304bceda SS |
305 | static inline void __drop_fpu(struct task_struct *tsk) |
306 | { | |
307 | if (__thread_has_fpu(tsk)) { | |
308 | /* Ignore delayed exceptions from user space */ | |
309 | asm volatile("1: fwait\n" | |
310 | "2:\n" | |
311 | _ASM_EXTABLE(1b, 2b)); | |
312 | __thread_fpu_end(tsk); | |
313 | } | |
314 | } | |
315 | ||
316 | static inline void drop_fpu(struct task_struct *tsk) | |
317 | { | |
318 | /* | |
319 | * Forget coprocessor state.. | |
320 | */ | |
321 | preempt_disable(); | |
322 | tsk->fpu_counter = 0; | |
323 | __drop_fpu(tsk); | |
324 | clear_used_math(); | |
325 | preempt_enable(); | |
326 | } | |
327 | ||
328 | static inline void drop_init_fpu(struct task_struct *tsk) | |
329 | { | |
330 | if (!use_xsave()) | |
331 | drop_fpu(tsk); | |
332 | else | |
333 | xrstor_state(init_xstate_buf, -1); | |
334 | } | |
335 | ||
1361b83a LT |
336 | /* |
337 | * FPU state switching for scheduling. | |
338 | * | |
339 | * This is a two-stage process: | |
340 | * | |
341 | * - switch_fpu_prepare() saves the old state and | |
342 | * sets the new state of the CR0.TS bit. This is | |
343 | * done within the context of the old process. | |
344 | * | |
345 | * - switch_fpu_finish() restores the new state as | |
346 | * necessary. | |
347 | */ | |
348 | typedef struct { int preload; } fpu_switch_t; | |
349 | ||
350 | /* | |
351 | * FIXME! We could do a totally lazy restore, but we need to | |
352 | * add a per-cpu "this was the task that last touched the FPU | |
353 | * on this CPU" variable, and the task needs to have a "I last | |
354 | * touched the FPU on this CPU" and check them. | |
355 | * | |
356 | * We don't do that yet, so "fpu_lazy_restore()" always returns | |
357 | * false, but some day.. | |
358 | */ | |
359 | static inline int fpu_lazy_restore(struct task_struct *new, unsigned int cpu) | |
360 | { | |
c6ae41e7 | 361 | return new == this_cpu_read_stable(fpu_owner_task) && |
1361b83a LT |
362 | cpu == new->thread.fpu.last_cpu; |
363 | } | |
364 | ||
365 | static inline fpu_switch_t switch_fpu_prepare(struct task_struct *old, struct task_struct *new, int cpu) | |
366 | { | |
367 | fpu_switch_t fpu; | |
368 | ||
304bceda SS |
369 | /* |
370 | * If the task has used the math, pre-load the FPU on xsave processors | |
371 | * or if the past 5 consecutive context-switches used math. | |
372 | */ | |
373 | fpu.preload = tsk_used_math(new) && (use_xsave() || | |
374 | new->fpu_counter > 5); | |
1361b83a LT |
375 | if (__thread_has_fpu(old)) { |
376 | if (!__save_init_fpu(old)) | |
377 | cpu = ~0; | |
378 | old->thread.fpu.last_cpu = cpu; | |
379 | old->thread.fpu.has_fpu = 0; /* But leave fpu_owner_task! */ | |
380 | ||
381 | /* Don't change CR0.TS if we just switch! */ | |
382 | if (fpu.preload) { | |
383 | new->fpu_counter++; | |
384 | __thread_set_has_fpu(new); | |
385 | prefetch(new->thread.fpu.state); | |
304bceda | 386 | } else if (!use_xsave()) |
1361b83a LT |
387 | stts(); |
388 | } else { | |
389 | old->fpu_counter = 0; | |
390 | old->thread.fpu.last_cpu = ~0; | |
391 | if (fpu.preload) { | |
392 | new->fpu_counter++; | |
304bceda | 393 | if (!use_xsave() && fpu_lazy_restore(new, cpu)) |
1361b83a LT |
394 | fpu.preload = 0; |
395 | else | |
396 | prefetch(new->thread.fpu.state); | |
397 | __thread_fpu_begin(new); | |
398 | } | |
399 | } | |
400 | return fpu; | |
401 | } | |
402 | ||
403 | /* | |
404 | * By the time this gets called, we've already cleared CR0.TS and | |
405 | * given the process the FPU if we are going to preload the FPU | |
406 | * state - all we need to do is to conditionally restore the register | |
407 | * state itself. | |
408 | */ | |
409 | static inline void switch_fpu_finish(struct task_struct *new, fpu_switch_t fpu) | |
410 | { | |
411 | if (fpu.preload) { | |
412 | if (unlikely(restore_fpu_checking(new))) | |
304bceda | 413 | drop_init_fpu(new); |
1361b83a LT |
414 | } |
415 | } | |
416 | ||
417 | /* | |
418 | * Signal frame handlers... | |
419 | */ | |
72a671ce SS |
420 | extern int save_xstate_sig(void __user *buf, void __user *fx, int size); |
421 | extern int __restore_xstate_sig(void __user *buf, void __user *fx, int size); | |
422 | ||
423 | static inline int xstate_sigframe_size(void) | |
424 | { | |
425 | return use_xsave() ? xstate_size + FP_XSTATE_MAGIC2_SIZE : xstate_size; | |
426 | } | |
427 | ||
428 | static inline int restore_xstate_sig(void __user *buf, int ia32_frame) | |
429 | { | |
430 | void __user *buf_fx = buf; | |
431 | int size = xstate_sigframe_size(); | |
432 | ||
433 | if (ia32_frame && use_fxsr()) { | |
434 | buf_fx = buf + sizeof(struct i387_fsave_struct); | |
435 | size += sizeof(struct i387_fsave_struct); | |
436 | } | |
437 | ||
438 | return __restore_xstate_sig(buf, buf_fx, size); | |
439 | } | |
1361b83a | 440 | |
1361b83a | 441 | /* |
377ffbcc | 442 | * Need to be preemption-safe. |
1361b83a | 443 | * |
377ffbcc SS |
444 | * NOTE! user_fpu_begin() must be used only immediately before restoring |
445 | * it. This function does not do any save/restore on their own. | |
1361b83a | 446 | */ |
1361b83a LT |
447 | static inline void user_fpu_begin(void) |
448 | { | |
449 | preempt_disable(); | |
450 | if (!user_has_fpu()) | |
451 | __thread_fpu_begin(current); | |
452 | preempt_enable(); | |
453 | } | |
454 | ||
455 | /* | |
456 | * These disable preemption on their own and are safe | |
457 | */ | |
458 | static inline void save_init_fpu(struct task_struct *tsk) | |
459 | { | |
460 | WARN_ON_ONCE(!__thread_has_fpu(tsk)); | |
304bceda SS |
461 | |
462 | if (use_xsave()) { | |
463 | xsave_state(&tsk->thread.fpu.state->xsave, -1); | |
464 | return; | |
465 | } | |
466 | ||
1361b83a LT |
467 | preempt_disable(); |
468 | __save_init_fpu(tsk); | |
469 | __thread_fpu_end(tsk); | |
470 | preempt_enable(); | |
471 | } | |
472 | ||
1361b83a LT |
473 | /* |
474 | * i387 state interaction | |
475 | */ | |
476 | static inline unsigned short get_fpu_cwd(struct task_struct *tsk) | |
477 | { | |
478 | if (cpu_has_fxsr) { | |
479 | return tsk->thread.fpu.state->fxsave.cwd; | |
480 | } else { | |
481 | return (unsigned short)tsk->thread.fpu.state->fsave.cwd; | |
482 | } | |
483 | } | |
484 | ||
485 | static inline unsigned short get_fpu_swd(struct task_struct *tsk) | |
486 | { | |
487 | if (cpu_has_fxsr) { | |
488 | return tsk->thread.fpu.state->fxsave.swd; | |
489 | } else { | |
490 | return (unsigned short)tsk->thread.fpu.state->fsave.swd; | |
491 | } | |
492 | } | |
493 | ||
494 | static inline unsigned short get_fpu_mxcsr(struct task_struct *tsk) | |
495 | { | |
496 | if (cpu_has_xmm) { | |
497 | return tsk->thread.fpu.state->fxsave.mxcsr; | |
498 | } else { | |
499 | return MXCSR_DEFAULT; | |
500 | } | |
501 | } | |
502 | ||
503 | static bool fpu_allocated(struct fpu *fpu) | |
504 | { | |
505 | return fpu->state != NULL; | |
506 | } | |
507 | ||
508 | static inline int fpu_alloc(struct fpu *fpu) | |
509 | { | |
510 | if (fpu_allocated(fpu)) | |
511 | return 0; | |
512 | fpu->state = kmem_cache_alloc(task_xstate_cachep, GFP_KERNEL); | |
513 | if (!fpu->state) | |
514 | return -ENOMEM; | |
515 | WARN_ON((unsigned long)fpu->state & 15); | |
516 | return 0; | |
517 | } | |
518 | ||
519 | static inline void fpu_free(struct fpu *fpu) | |
520 | { | |
521 | if (fpu->state) { | |
522 | kmem_cache_free(task_xstate_cachep, fpu->state); | |
523 | fpu->state = NULL; | |
524 | } | |
525 | } | |
526 | ||
304bceda | 527 | static inline void fpu_copy(struct task_struct *dst, struct task_struct *src) |
1361b83a | 528 | { |
304bceda SS |
529 | if (use_xsave()) { |
530 | struct xsave_struct *xsave = &dst->thread.fpu.state->xsave; | |
1361b83a | 531 | |
304bceda SS |
532 | memset(&xsave->xsave_hdr, 0, sizeof(struct xsave_hdr_struct)); |
533 | xsave_state(xsave, -1); | |
534 | } else { | |
535 | struct fpu *dfpu = &dst->thread.fpu; | |
536 | struct fpu *sfpu = &src->thread.fpu; | |
537 | ||
538 | unlazy_fpu(src); | |
539 | memcpy(dfpu->state, sfpu->state, xstate_size); | |
540 | } | |
541 | } | |
1361b83a | 542 | |
72a671ce SS |
543 | static inline unsigned long |
544 | alloc_mathframe(unsigned long sp, int ia32_frame, unsigned long *buf_fx, | |
545 | unsigned long *size) | |
546 | { | |
547 | unsigned long frame_size = xstate_sigframe_size(); | |
548 | ||
549 | *buf_fx = sp = round_down(sp - frame_size, 64); | |
550 | if (ia32_frame && use_fxsr()) { | |
551 | frame_size += sizeof(struct i387_fsave_struct); | |
552 | sp -= sizeof(struct i387_fsave_struct); | |
553 | } | |
554 | ||
555 | *size = frame_size; | |
556 | return sp; | |
557 | } | |
558 | ||
1361b83a | 559 | #endif |