]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blame - arch/mn10300/kernel/fpu.c
sched/headers: Prepare for new header dependencies before moving code to <linux/sched...
[mirror_ubuntu-artful-kernel.git] / arch / mn10300 / kernel / fpu.c
CommitLineData
b920de1b
DH
1/* MN10300 FPU management
2 *
3 * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
4 * Written by David Howells (dhowells@redhat.com)
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public Licence
8 * as published by the Free Software Foundation; either version
9 * 2 of the Licence, or (at your option) any later version.
10 */
7c0f6ba6 11#include <linux/uaccess.h>
3f07c014
IM
12#include <linux/sched/signal.h>
13
b920de1b
DH
14#include <asm/fpu.h>
15#include <asm/elf.h>
16#include <asm/exceptions.h>
17
278d91c4 18#ifdef CONFIG_LAZY_SAVE_FPU
b920de1b 19struct task_struct *fpu_state_owner;
278d91c4 20#endif
b920de1b
DH
21
22/*
278d91c4 23 * error functions in FPU disabled exception
b920de1b 24 */
278d91c4 25asmlinkage void fpu_disabled_in_kernel(struct pt_regs *regs)
b920de1b 26{
278d91c4
AT
27 die_if_no_fixup("An FPU Disabled exception happened in kernel space\n",
28 regs, EXCEP_FPU_DISABLED);
b920de1b
DH
29}
30
31/*
32 * handle an FPU operational exception
33 * - there's a possibility that if the FPU is asynchronous, the signal might
34 * be meant for a process other than the current one
35 */
36asmlinkage void fpu_exception(struct pt_regs *regs, enum exception_code code)
37{
278d91c4 38 struct task_struct *tsk = current;
b920de1b 39 siginfo_t info;
278d91c4 40 u32 fpcr;
b920de1b
DH
41
42 if (!user_mode(regs))
43 die_if_no_fixup("An FPU Operation exception happened in"
44 " kernel space\n",
45 regs, code);
46
278d91c4 47 if (!is_using_fpu(tsk))
b920de1b
DH
48 die_if_no_fixup("An FPU Operation exception happened,"
49 " but the FPU is not in use",
50 regs, code);
51
52 info.si_signo = SIGFPE;
53 info.si_errno = 0;
54 info.si_addr = (void *) tsk->thread.uregs->pc;
55 info.si_code = FPE_FLTINV;
56
278d91c4 57 unlazy_fpu(tsk);
b920de1b 58
278d91c4
AT
59 fpcr = tsk->thread.fpu_state.fpcr;
60
61 if (fpcr & FPCR_EC_Z)
62 info.si_code = FPE_FLTDIV;
63 else if (fpcr & FPCR_EC_O)
64 info.si_code = FPE_FLTOVF;
65 else if (fpcr & FPCR_EC_U)
66 info.si_code = FPE_FLTUND;
67 else if (fpcr & FPCR_EC_I)
68 info.si_code = FPE_FLTRES;
b920de1b
DH
69
70 force_sig_info(SIGFPE, &info, tsk);
71}
72
73/*
74 * save the FPU state to a signal context
75 */
76int fpu_setup_sigcontext(struct fpucontext *fpucontext)
77{
b920de1b
DH
78 struct task_struct *tsk = current;
79
80 if (!is_using_fpu(tsk))
81 return 0;
82
83 /* transfer the current FPU state to memory and cause fpu_init() to be
84 * triggered by the next attempted FPU operation by the current
85 * process.
86 */
87 preempt_disable();
88
278d91c4
AT
89#ifndef CONFIG_LAZY_SAVE_FPU
90 if (tsk->thread.fpu_flags & THREAD_HAS_FPU) {
91 fpu_save(&tsk->thread.fpu_state);
92 tsk->thread.uregs->epsw &= ~EPSW_FE;
93 tsk->thread.fpu_flags &= ~THREAD_HAS_FPU;
94 }
95#else /* !CONFIG_LAZY_SAVE_FPU */
b920de1b
DH
96 if (fpu_state_owner == tsk) {
97 fpu_save(&tsk->thread.fpu_state);
98 fpu_state_owner->thread.uregs->epsw &= ~EPSW_FE;
99 fpu_state_owner = NULL;
100 }
278d91c4 101#endif /* !CONFIG_LAZY_SAVE_FPU */
b920de1b
DH
102
103 preempt_enable();
104
105 /* we no longer have a valid current FPU state */
106 clear_using_fpu(tsk);
107
108 /* transfer the saved FPU state onto the userspace stack */
109 if (copy_to_user(fpucontext,
110 &tsk->thread.fpu_state,
111 min(sizeof(struct fpu_state_struct),
112 sizeof(struct fpucontext))))
113 return -1;
114
115 return 1;
b920de1b
DH
116}
117
118/*
119 * kill a process's FPU state during restoration after signal handling
120 */
121void fpu_kill_state(struct task_struct *tsk)
122{
b920de1b
DH
123 /* disown anything left in the FPU */
124 preempt_disable();
125
278d91c4
AT
126#ifndef CONFIG_LAZY_SAVE_FPU
127 if (tsk->thread.fpu_flags & THREAD_HAS_FPU) {
128 tsk->thread.uregs->epsw &= ~EPSW_FE;
129 tsk->thread.fpu_flags &= ~THREAD_HAS_FPU;
130 }
131#else /* !CONFIG_LAZY_SAVE_FPU */
b920de1b
DH
132 if (fpu_state_owner == tsk) {
133 fpu_state_owner->thread.uregs->epsw &= ~EPSW_FE;
134 fpu_state_owner = NULL;
135 }
278d91c4 136#endif /* !CONFIG_LAZY_SAVE_FPU */
b920de1b
DH
137
138 preempt_enable();
278d91c4 139
b920de1b
DH
140 /* we no longer have a valid current FPU state */
141 clear_using_fpu(tsk);
142}
143
144/*
145 * restore the FPU state from a signal context
146 */
147int fpu_restore_sigcontext(struct fpucontext *fpucontext)
148{
149 struct task_struct *tsk = current;
150 int ret;
151
152 /* load up the old FPU state */
278d91c4 153 ret = copy_from_user(&tsk->thread.fpu_state, fpucontext,
b920de1b
DH
154 min(sizeof(struct fpu_state_struct),
155 sizeof(struct fpucontext)));
156 if (!ret)
157 set_using_fpu(tsk);
158
159 return ret;
160}
161
162/*
163 * fill in the FPU structure for a core dump
164 */
165int dump_fpu(struct pt_regs *regs, elf_fpregset_t *fpreg)
166{
167 struct task_struct *tsk = current;
168 int fpvalid;
169
170 fpvalid = is_using_fpu(tsk);
171 if (fpvalid) {
172 unlazy_fpu(tsk);
173 memcpy(fpreg, &tsk->thread.fpu_state, sizeof(*fpreg));
174 }
175
176 return fpvalid;
177}