]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blame - arch/mips/kernel/r4k_switch.S
MIPS: OCTEON: Use correct instruction to read 64-bit COP0 register
[mirror_ubuntu-artful-kernel.git] / arch / mips / kernel / r4k_switch.S
CommitLineData
1da177e4
LT
1/*
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details.
5 *
6 * Copyright (C) 1994, 1995, 1996, 1998, 1999, 2002, 2003 Ralf Baechle
79add627 7 * Copyright (C) 1996 David S. Miller (davem@davemloft.net)
1da177e4
LT
8 * Copyright (C) 1994, 1995, 1996, by Andreas Busse
9 * Copyright (C) 1999 Silicon Graphics, Inc.
10 * Copyright (C) 2000 MIPS Technologies, Inc.
11 * written by Carsten Langgaard, carstenl@mips.com
12 */
1da177e4
LT
13#include <asm/asm.h>
14#include <asm/cachectl.h>
15#include <asm/fpregdef.h>
16#include <asm/mipsregs.h>
048eb582 17#include <asm/asm-offsets.h>
1da177e4
LT
18#include <asm/pgtable-bits.h>
19#include <asm/regdef.h>
20#include <asm/stackframe.h>
21#include <asm/thread_info.h>
22
23#include <asm/asmmacro.h>
24
842dfc11
ML
25/* preprocessor replaces the fp in ".set fp=64" with $30 otherwise */
26#undef fp
27
1da177e4
LT
28/*
29 * Offset to the current process status flags, the first 32 bytes of the
30 * stack are not used.
31 */
32#define ST_OFF (_THREAD_SIZE - 32 - PT_SIZE + PT_STATUS)
33
a36d8225 34#ifndef USE_ALTERNATE_RESUME_IMPL
1da177e4
LT
35/*
36 * task_struct *resume(task_struct *prev, task_struct *next,
1db1af84 37 * struct thread_info *next_ti, s32 fp_save)
1da177e4
LT
38 */
39 .align 5
40 LEAF(resume)
5323180d
AN
41 mfc0 t1, CP0_STATUS
42 LONG_S t1, THREAD_STATUS(a0)
1da177e4
LT
43 cpu_save_nonscratch a0
44 LONG_S ra, THREAD_REG31(a0)
45
46 /*
1db1af84
PB
47 * Check whether we need to save any FP context. FP context is saved
48 * iff the process has used the context with the scalar FPU or the MSA
49 * ASE in the current time slice, as indicated by _TIF_USEDFPU and
50 * _TIF_USEDMSA respectively. switch_to will have set fp_save
51 * accordingly to an FP_SAVE_ enum value.
1da177e4 52 */
1db1af84 53 beqz a3, 2f
1da177e4 54
1da177e4 55 /*
1db1af84
PB
56 * We do. Clear the saved CU1 bit for prev, such that next time it is
57 * scheduled it will start in userland with the FPU disabled. If the
58 * task uses the FPU then it will be enabled again via the do_cpu trap.
59 * This allows us to lazily restore the FP context.
1da177e4 60 */
1db1af84 61 PTR_L t3, TASK_THREAD_INFO(a0)
1da177e4
LT
62 LONG_L t0, ST_OFF(t3)
63 li t1, ~ST0_CU1
64 and t0, t0, t1
65 LONG_S t0, ST_OFF(t3)
66
1db1af84
PB
67 /* Check whether we're saving scalar or vector context. */
68 bgtz a3, 1f
69
b8340673 70 /* Save 128b MSA vector context + scalar FP control & status. */
842dfc11
ML
71 .set push
72 SET_HARDFLOAT
b8340673 73 cfc1 t1, fcr31
1db1af84 74 msa_save_all a0
842dfc11
ML
75 .set pop /* SET_HARDFLOAT */
76
b8340673 77 sw t1, THREAD_FCR31(a0)
1db1af84
PB
78 b 2f
79
801: /* Save 32b/64b scalar FP context. */
c138e12f
AN
81 fpu_save_double a0 t0 t1 # c0_status passed in t0
82 # clobbers t1
1db1af84 832:
1da177e4 84
1400eb65 85#if defined(CONFIG_CC_STACKPROTECTOR) && !defined(CONFIG_SMP)
8b3c569a 86 PTR_LA t8, __stack_chk_guard
1400eb65
GF
87 LONG_L t9, TASK_STACK_CANARY(a1)
88 LONG_S t9, 0(t8)
89#endif
90
1da177e4
LT
91 /*
92 * The order of restoring the registers takes care of the race
93 * updating $28, $29 and kernelsp without disabling ints.
94 */
95 move $28, a2
96 cpu_restore_nonscratch a1
97
3bd39664 98 PTR_ADDU t0, $28, _THREAD_SIZE - 32
1da177e4 99 set_saved_sp t0, t1, t2
1da177e4
LT
100 mfc0 t1, CP0_STATUS /* Do we really need this? */
101 li a3, 0xff01
102 and t1, a3
103 LONG_L a2, THREAD_STATUS(a1)
104 nor a3, $0, a3
105 and a2, a3
106 or a2, t1
107 mtc0 a2, CP0_STATUS
108 move v0, a0
109 jr ra
110 END(resume)
111
a36d8225
DD
112#endif /* USE_ALTERNATE_RESUME_IMPL */
113
1da177e4
LT
114/*
115 * Save a thread's fp context.
116 */
117LEAF(_save_fp)
207083b1
LY
118#if defined(CONFIG_64BIT) || defined(CONFIG_CPU_MIPS32_R2) || \
119 defined(CONFIG_CPU_MIPS32_R6)
c138e12f 120 mfc0 t0, CP0_STATUS
1da177e4 121#endif
c138e12f 122 fpu_save_double a0 t0 t1 # clobbers t1
1da177e4
LT
123 jr ra
124 END(_save_fp)
125
126/*
127 * Restore a thread's fp context.
128 */
129LEAF(_restore_fp)
207083b1
LY
130#if defined(CONFIG_64BIT) || defined(CONFIG_CPU_MIPS32_R2) || \
131 defined(CONFIG_CPU_MIPS32_R6)
c138e12f
AN
132 mfc0 t0, CP0_STATUS
133#endif
134 fpu_restore_double a0 t0 t1 # clobbers t1
1da177e4
LT
135 jr ra
136 END(_restore_fp)
137
1db1af84
PB
138#ifdef CONFIG_CPU_HAS_MSA
139
140/*
141 * Save a thread's MSA vector context.
142 */
143LEAF(_save_msa)
144 msa_save_all a0
145 jr ra
146 END(_save_msa)
147
148/*
149 * Restore a thread's MSA vector context.
150 */
151LEAF(_restore_msa)
152 msa_restore_all a0
153 jr ra
154 END(_restore_msa)
155
c9017757
PB
156LEAF(_init_msa_upper)
157 msa_init_all_upper
158 jr ra
159 END(_init_msa_upper)
160
1db1af84
PB
161#endif
162
1da177e4
LT
163/*
164 * Load the FPU with signalling NANS. This bit pattern we're using has
165 * the property that no matter whether considered as single or as double
166 * precision represents signaling NANS.
167 *
168 * We initialize fcr31 to rounding to nearest, no exceptions.
169 */
170
171#define FPU_DEFAULT 0x00000000
172
842dfc11
ML
173 .set push
174 SET_HARDFLOAT
175
1da177e4
LT
176LEAF(_init_fpu)
177 mfc0 t0, CP0_STATUS
178 li t1, ST0_CU1
179 or t0, t1
180 mtc0 t0, CP0_STATUS
f9509c84 181 enable_fpu_hazard
1da177e4
LT
182
183 li t1, FPU_DEFAULT
184 ctc1 t1, fcr31
185
186 li t1, -1 # SNaN
187
875d43e7 188#ifdef CONFIG_64BIT
1da177e4
LT
189 sll t0, t0, 5
190 bgez t0, 1f # 16 / 32 register mode?
191
192 dmtc1 t1, $f1
193 dmtc1 t1, $f3
194 dmtc1 t1, $f5
195 dmtc1 t1, $f7
196 dmtc1 t1, $f9
197 dmtc1 t1, $f11
198 dmtc1 t1, $f13
199 dmtc1 t1, $f15
200 dmtc1 t1, $f17
201 dmtc1 t1, $f19
202 dmtc1 t1, $f21
203 dmtc1 t1, $f23
204 dmtc1 t1, $f25
205 dmtc1 t1, $f27
206 dmtc1 t1, $f29
207 dmtc1 t1, $f31
2081:
209#endif
42a3b4f2 210
1da177e4
LT
211#ifdef CONFIG_CPU_MIPS32
212 mtc1 t1, $f0
213 mtc1 t1, $f1
214 mtc1 t1, $f2
215 mtc1 t1, $f3
216 mtc1 t1, $f4
217 mtc1 t1, $f5
218 mtc1 t1, $f6
219 mtc1 t1, $f7
220 mtc1 t1, $f8
221 mtc1 t1, $f9
222 mtc1 t1, $f10
223 mtc1 t1, $f11
224 mtc1 t1, $f12
225 mtc1 t1, $f13
226 mtc1 t1, $f14
227 mtc1 t1, $f15
228 mtc1 t1, $f16
229 mtc1 t1, $f17
230 mtc1 t1, $f18
231 mtc1 t1, $f19
232 mtc1 t1, $f20
233 mtc1 t1, $f21
234 mtc1 t1, $f22
235 mtc1 t1, $f23
236 mtc1 t1, $f24
237 mtc1 t1, $f25
238 mtc1 t1, $f26
239 mtc1 t1, $f27
240 mtc1 t1, $f28
241 mtc1 t1, $f29
242 mtc1 t1, $f30
243 mtc1 t1, $f31
597ce172 244
207083b1 245#if defined(CONFIG_CPU_MIPS32_R2) || defined(CONFIG_CPU_MIPS32_R6)
597ce172 246 .set push
207083b1 247 .set MIPS_ISA_LEVEL_RAW
842dfc11 248 .set fp=64
597ce172
PB
249 sll t0, t0, 5 # is Status.FR set?
250 bgez t0, 1f # no: skip setting upper 32b
251
252 mthc1 t1, $f0
253 mthc1 t1, $f1
254 mthc1 t1, $f2
255 mthc1 t1, $f3
256 mthc1 t1, $f4
257 mthc1 t1, $f5
258 mthc1 t1, $f6
259 mthc1 t1, $f7
260 mthc1 t1, $f8
261 mthc1 t1, $f9
262 mthc1 t1, $f10
263 mthc1 t1, $f11
264 mthc1 t1, $f12
265 mthc1 t1, $f13
266 mthc1 t1, $f14
267 mthc1 t1, $f15
268 mthc1 t1, $f16
269 mthc1 t1, $f17
270 mthc1 t1, $f18
271 mthc1 t1, $f19
272 mthc1 t1, $f20
273 mthc1 t1, $f21
274 mthc1 t1, $f22
275 mthc1 t1, $f23
276 mthc1 t1, $f24
277 mthc1 t1, $f25
278 mthc1 t1, $f26
279 mthc1 t1, $f27
280 mthc1 t1, $f28
281 mthc1 t1, $f29
282 mthc1 t1, $f30
283 mthc1 t1, $f31
2841: .set pop
207083b1 285#endif /* CONFIG_CPU_MIPS32_R2 || CONFIG_CPU_MIPS32_R6 */
1da177e4 286#else
207083b1 287 .set MIPS_ISA_ARCH_LEVEL_RAW
1da177e4
LT
288 dmtc1 t1, $f0
289 dmtc1 t1, $f2
290 dmtc1 t1, $f4
291 dmtc1 t1, $f6
292 dmtc1 t1, $f8
293 dmtc1 t1, $f10
294 dmtc1 t1, $f12
295 dmtc1 t1, $f14
296 dmtc1 t1, $f16
297 dmtc1 t1, $f18
298 dmtc1 t1, $f20
299 dmtc1 t1, $f22
300 dmtc1 t1, $f24
301 dmtc1 t1, $f26
302 dmtc1 t1, $f28
303 dmtc1 t1, $f30
304#endif
305 jr ra
306 END(_init_fpu)
842dfc11
ML
307
308 .set pop /* SET_HARDFLOAT */