]> git.proxmox.com Git - mirror_ubuntu-zesty-kernel.git/blob - arch/mips/kernel/r4k_switch.S
Merge remote-tracking branches 'asoc/topic/tas2552', 'asoc/topic/tegra', 'asoc/topic...
[mirror_ubuntu-zesty-kernel.git] / arch / mips / kernel / r4k_switch.S
1 /*
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details.
5 *
6 * Copyright (C) 1994, 1995, 1996, 1998, 1999, 2002, 2003 Ralf Baechle
7 * Copyright (C) 1996 David S. Miller (davem@davemloft.net)
8 * Copyright (C) 1994, 1995, 1996, by Andreas Busse
9 * Copyright (C) 1999 Silicon Graphics, Inc.
10 * Copyright (C) 2000 MIPS Technologies, Inc.
11 * written by Carsten Langgaard, carstenl@mips.com
12 */
13 #include <asm/asm.h>
14 #include <asm/cachectl.h>
15 #include <asm/fpregdef.h>
16 #include <asm/mipsregs.h>
17 #include <asm/asm-offsets.h>
18 #include <asm/pgtable-bits.h>
19 #include <asm/regdef.h>
20 #include <asm/stackframe.h>
21 #include <asm/thread_info.h>
22
23 #include <asm/asmmacro.h>
24
25 /* preprocessor replaces the fp in ".set fp=64" with $30 otherwise */
26 #undef fp
27
28 /*
29 * Offset to the current process status flags, the first 32 bytes of the
30 * stack are not used.
31 */
32 #define ST_OFF (_THREAD_SIZE - 32 - PT_SIZE + PT_STATUS)
33
34 #ifndef USE_ALTERNATE_RESUME_IMPL
35 /*
36 * task_struct *resume(task_struct *prev, task_struct *next,
37 * struct thread_info *next_ti, s32 fp_save)
38 */
39 .align 5
40 LEAF(resume)
41 mfc0 t1, CP0_STATUS
42 LONG_S t1, THREAD_STATUS(a0)
43 cpu_save_nonscratch a0
44 LONG_S ra, THREAD_REG31(a0)
45
46 /*
47 * Check whether we need to save any FP context. FP context is saved
48 * iff the process has used the context with the scalar FPU or the MSA
49 * ASE in the current time slice, as indicated by _TIF_USEDFPU and
50 * _TIF_USEDMSA respectively. switch_to will have set fp_save
51 * accordingly to an FP_SAVE_ enum value.
52 */
53 beqz a3, 2f
54
55 /*
56 * We do. Clear the saved CU1 bit for prev, such that next time it is
57 * scheduled it will start in userland with the FPU disabled. If the
58 * task uses the FPU then it will be enabled again via the do_cpu trap.
59 * This allows us to lazily restore the FP context.
60 */
61 PTR_L t3, TASK_THREAD_INFO(a0)
62 LONG_L t0, ST_OFF(t3)
63 li t1, ~ST0_CU1
64 and t0, t0, t1
65 LONG_S t0, ST_OFF(t3)
66
67 /* Check whether we're saving scalar or vector context. */
68 bgtz a3, 1f
69
70 /* Save 128b MSA vector context + scalar FP control & status. */
71 .set push
72 SET_HARDFLOAT
73 cfc1 t1, fcr31
74 msa_save_all a0
75 .set pop /* SET_HARDFLOAT */
76
77 sw t1, THREAD_FCR31(a0)
78 b 2f
79
80 1: /* Save 32b/64b scalar FP context. */
81 fpu_save_double a0 t0 t1 # c0_status passed in t0
82 # clobbers t1
83 2:
84
85 #if defined(CONFIG_CC_STACKPROTECTOR) && !defined(CONFIG_SMP)
86 PTR_LA t8, __stack_chk_guard
87 LONG_L t9, TASK_STACK_CANARY(a1)
88 LONG_S t9, 0(t8)
89 #endif
90
91 /*
92 * The order of restoring the registers takes care of the race
93 * updating $28, $29 and kernelsp without disabling ints.
94 */
95 move $28, a2
96 cpu_restore_nonscratch a1
97
98 PTR_ADDU t0, $28, _THREAD_SIZE - 32
99 set_saved_sp t0, t1, t2
100 mfc0 t1, CP0_STATUS /* Do we really need this? */
101 li a3, 0xff01
102 and t1, a3
103 LONG_L a2, THREAD_STATUS(a1)
104 nor a3, $0, a3
105 and a2, a3
106 or a2, t1
107 mtc0 a2, CP0_STATUS
108 move v0, a0
109 jr ra
110 END(resume)
111
112 #endif /* USE_ALTERNATE_RESUME_IMPL */
113
114 /*
115 * Save a thread's fp context.
116 */
117 LEAF(_save_fp)
118 #if defined(CONFIG_64BIT) || defined(CONFIG_CPU_MIPS32_R2)
119 mfc0 t0, CP0_STATUS
120 #endif
121 fpu_save_double a0 t0 t1 # clobbers t1
122 jr ra
123 END(_save_fp)
124
125 /*
126 * Restore a thread's fp context.
127 */
128 LEAF(_restore_fp)
129 #if defined(CONFIG_64BIT) || defined(CONFIG_CPU_MIPS32_R2)
130 mfc0 t0, CP0_STATUS
131 #endif
132 fpu_restore_double a0 t0 t1 # clobbers t1
133 jr ra
134 END(_restore_fp)
135
136 #ifdef CONFIG_CPU_HAS_MSA
137
138 /*
139 * Save a thread's MSA vector context.
140 */
141 LEAF(_save_msa)
142 msa_save_all a0
143 jr ra
144 END(_save_msa)
145
146 /*
147 * Restore a thread's MSA vector context.
148 */
149 LEAF(_restore_msa)
150 msa_restore_all a0
151 jr ra
152 END(_restore_msa)
153
154 LEAF(_init_msa_upper)
155 msa_init_all_upper
156 jr ra
157 END(_init_msa_upper)
158
159 #endif
160
161 /*
162 * Load the FPU with signalling NANS. This bit pattern we're using has
163 * the property that no matter whether considered as single or as double
164 * precision represents signaling NANS.
165 *
166 * We initialize fcr31 to rounding to nearest, no exceptions.
167 */
168
169 #define FPU_DEFAULT 0x00000000
170
171 .set push
172 SET_HARDFLOAT
173
174 LEAF(_init_fpu)
175 mfc0 t0, CP0_STATUS
176 li t1, ST0_CU1
177 or t0, t1
178 mtc0 t0, CP0_STATUS
179 enable_fpu_hazard
180
181 li t1, FPU_DEFAULT
182 ctc1 t1, fcr31
183
184 li t1, -1 # SNaN
185
186 #ifdef CONFIG_64BIT
187 sll t0, t0, 5
188 bgez t0, 1f # 16 / 32 register mode?
189
190 dmtc1 t1, $f1
191 dmtc1 t1, $f3
192 dmtc1 t1, $f5
193 dmtc1 t1, $f7
194 dmtc1 t1, $f9
195 dmtc1 t1, $f11
196 dmtc1 t1, $f13
197 dmtc1 t1, $f15
198 dmtc1 t1, $f17
199 dmtc1 t1, $f19
200 dmtc1 t1, $f21
201 dmtc1 t1, $f23
202 dmtc1 t1, $f25
203 dmtc1 t1, $f27
204 dmtc1 t1, $f29
205 dmtc1 t1, $f31
206 1:
207 #endif
208
209 #ifdef CONFIG_CPU_MIPS32
210 mtc1 t1, $f0
211 mtc1 t1, $f1
212 mtc1 t1, $f2
213 mtc1 t1, $f3
214 mtc1 t1, $f4
215 mtc1 t1, $f5
216 mtc1 t1, $f6
217 mtc1 t1, $f7
218 mtc1 t1, $f8
219 mtc1 t1, $f9
220 mtc1 t1, $f10
221 mtc1 t1, $f11
222 mtc1 t1, $f12
223 mtc1 t1, $f13
224 mtc1 t1, $f14
225 mtc1 t1, $f15
226 mtc1 t1, $f16
227 mtc1 t1, $f17
228 mtc1 t1, $f18
229 mtc1 t1, $f19
230 mtc1 t1, $f20
231 mtc1 t1, $f21
232 mtc1 t1, $f22
233 mtc1 t1, $f23
234 mtc1 t1, $f24
235 mtc1 t1, $f25
236 mtc1 t1, $f26
237 mtc1 t1, $f27
238 mtc1 t1, $f28
239 mtc1 t1, $f29
240 mtc1 t1, $f30
241 mtc1 t1, $f31
242
243 #ifdef CONFIG_CPU_MIPS32_R2
244 .set push
245 .set mips32r2
246 .set fp=64
247 sll t0, t0, 5 # is Status.FR set?
248 bgez t0, 1f # no: skip setting upper 32b
249
250 mthc1 t1, $f0
251 mthc1 t1, $f1
252 mthc1 t1, $f2
253 mthc1 t1, $f3
254 mthc1 t1, $f4
255 mthc1 t1, $f5
256 mthc1 t1, $f6
257 mthc1 t1, $f7
258 mthc1 t1, $f8
259 mthc1 t1, $f9
260 mthc1 t1, $f10
261 mthc1 t1, $f11
262 mthc1 t1, $f12
263 mthc1 t1, $f13
264 mthc1 t1, $f14
265 mthc1 t1, $f15
266 mthc1 t1, $f16
267 mthc1 t1, $f17
268 mthc1 t1, $f18
269 mthc1 t1, $f19
270 mthc1 t1, $f20
271 mthc1 t1, $f21
272 mthc1 t1, $f22
273 mthc1 t1, $f23
274 mthc1 t1, $f24
275 mthc1 t1, $f25
276 mthc1 t1, $f26
277 mthc1 t1, $f27
278 mthc1 t1, $f28
279 mthc1 t1, $f29
280 mthc1 t1, $f30
281 mthc1 t1, $f31
282 1: .set pop
283 #endif /* CONFIG_CPU_MIPS32_R2 */
284 #else
285 .set arch=r4000
286 dmtc1 t1, $f0
287 dmtc1 t1, $f2
288 dmtc1 t1, $f4
289 dmtc1 t1, $f6
290 dmtc1 t1, $f8
291 dmtc1 t1, $f10
292 dmtc1 t1, $f12
293 dmtc1 t1, $f14
294 dmtc1 t1, $f16
295 dmtc1 t1, $f18
296 dmtc1 t1, $f20
297 dmtc1 t1, $f22
298 dmtc1 t1, $f24
299 dmtc1 t1, $f26
300 dmtc1 t1, $f28
301 dmtc1 t1, $f30
302 #endif
303 jr ra
304 END(_init_fpu)
305
306 .set pop /* SET_HARDFLOAT */