]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blob - arch/xtensa/kernel/coprocessor.S
Merge tag 'powerpc-4.15-6' of git://git.kernel.org/pub/scm/linux/kernel/git/powerpc...
[mirror_ubuntu-bionic-kernel.git] / arch / xtensa / kernel / coprocessor.S
1 /*
2 * arch/xtensa/kernel/coprocessor.S
3 *
4 * Xtensa processor configuration-specific table of coprocessor and
5 * other custom register layout information.
6 *
7 * This file is subject to the terms and conditions of the GNU General Public
8 * License. See the file "COPYING" in the main directory of this archive
9 * for more details.
10 *
11 * Copyright (C) 2003 - 2007 Tensilica Inc.
12 */
13
14
15 #include <linux/linkage.h>
16 #include <asm/asm-offsets.h>
17 #include <asm/processor.h>
18 #include <asm/coprocessor.h>
19 #include <asm/thread_info.h>
20 #include <asm/asm-uaccess.h>
21 #include <asm/unistd.h>
22 #include <asm/ptrace.h>
23 #include <asm/current.h>
24 #include <asm/pgtable.h>
25 #include <asm/page.h>
26 #include <asm/signal.h>
27 #include <asm/tlbflush.h>
28
29 #if XTENSA_HAVE_COPROCESSORS
30
31 /*
32 * Macros for lazy context switch.
33 */
34
35 #define SAVE_CP_REGS(x) \
36 .align 4; \
37 .Lsave_cp_regs_cp##x: \
38 .if XTENSA_HAVE_COPROCESSOR(x); \
39 xchal_cp##x##_store a2 a4 a5 a6 a7; \
40 .endif; \
41 jx a0
42
43 #define SAVE_CP_REGS_TAB(x) \
44 .if XTENSA_HAVE_COPROCESSOR(x); \
45 .long .Lsave_cp_regs_cp##x - .Lsave_cp_regs_jump_table; \
46 .else; \
47 .long 0; \
48 .endif; \
49 .long THREAD_XTREGS_CP##x
50
51
52 #define LOAD_CP_REGS(x) \
53 .align 4; \
54 .Lload_cp_regs_cp##x: \
55 .if XTENSA_HAVE_COPROCESSOR(x); \
56 xchal_cp##x##_load a2 a4 a5 a6 a7; \
57 .endif; \
58 jx a0
59
60 #define LOAD_CP_REGS_TAB(x) \
61 .if XTENSA_HAVE_COPROCESSOR(x); \
62 .long .Lload_cp_regs_cp##x - .Lload_cp_regs_jump_table; \
63 .else; \
64 .long 0; \
65 .endif; \
66 .long THREAD_XTREGS_CP##x
67
68 SAVE_CP_REGS(0)
69 SAVE_CP_REGS(1)
70 SAVE_CP_REGS(2)
71 SAVE_CP_REGS(3)
72 SAVE_CP_REGS(4)
73 SAVE_CP_REGS(5)
74 SAVE_CP_REGS(6)
75 SAVE_CP_REGS(7)
76
77 LOAD_CP_REGS(0)
78 LOAD_CP_REGS(1)
79 LOAD_CP_REGS(2)
80 LOAD_CP_REGS(3)
81 LOAD_CP_REGS(4)
82 LOAD_CP_REGS(5)
83 LOAD_CP_REGS(6)
84 LOAD_CP_REGS(7)
85
86 .align 4
87 .Lsave_cp_regs_jump_table:
88 SAVE_CP_REGS_TAB(0)
89 SAVE_CP_REGS_TAB(1)
90 SAVE_CP_REGS_TAB(2)
91 SAVE_CP_REGS_TAB(3)
92 SAVE_CP_REGS_TAB(4)
93 SAVE_CP_REGS_TAB(5)
94 SAVE_CP_REGS_TAB(6)
95 SAVE_CP_REGS_TAB(7)
96
97 .Lload_cp_regs_jump_table:
98 LOAD_CP_REGS_TAB(0)
99 LOAD_CP_REGS_TAB(1)
100 LOAD_CP_REGS_TAB(2)
101 LOAD_CP_REGS_TAB(3)
102 LOAD_CP_REGS_TAB(4)
103 LOAD_CP_REGS_TAB(5)
104 LOAD_CP_REGS_TAB(6)
105 LOAD_CP_REGS_TAB(7)
106
107 /*
108 * coprocessor_save(buffer, index)
109 * a2 a3
110 * coprocessor_load(buffer, index)
111 * a2 a3
112 *
113 * Save or load coprocessor registers for coprocessor 'index'.
114 * The register values are saved to or loaded from them 'buffer' address.
115 *
116 * Note that these functions don't update the coprocessor_owner information!
117 *
118 */
119
120 ENTRY(coprocessor_save)
121
122 entry a1, 32
123 s32i a0, a1, 0
124 movi a0, .Lsave_cp_regs_jump_table
125 addx8 a3, a3, a0
126 l32i a3, a3, 0
127 beqz a3, 1f
128 add a0, a0, a3
129 callx0 a0
130 1: l32i a0, a1, 0
131 retw
132
133 ENDPROC(coprocessor_save)
134
135 ENTRY(coprocessor_load)
136
137 entry a1, 32
138 s32i a0, a1, 0
139 movi a0, .Lload_cp_regs_jump_table
140 addx4 a3, a3, a0
141 l32i a3, a3, 0
142 beqz a3, 1f
143 add a0, a0, a3
144 callx0 a0
145 1: l32i a0, a1, 0
146 retw
147
148 ENDPROC(coprocessor_load)
149
150 /*
151 * coprocessor_flush(struct task_info*, index)
152 * a2 a3
153 * coprocessor_restore(struct task_info*, index)
154 * a2 a3
155 *
156 * Save or load coprocessor registers for coprocessor 'index'.
157 * The register values are saved to or loaded from the coprocessor area
158 * inside the task_info structure.
159 *
160 * Note that these functions don't update the coprocessor_owner information!
161 *
162 */
163
164
165 ENTRY(coprocessor_flush)
166
167 entry a1, 32
168 s32i a0, a1, 0
169 movi a0, .Lsave_cp_regs_jump_table
170 addx8 a3, a3, a0
171 l32i a4, a3, 4
172 l32i a3, a3, 0
173 add a2, a2, a4
174 beqz a3, 1f
175 add a0, a0, a3
176 callx0 a0
177 1: l32i a0, a1, 0
178 retw
179
180 ENDPROC(coprocessor_flush)
181
182 ENTRY(coprocessor_restore)
183 entry a1, 32
184 s32i a0, a1, 0
185 movi a0, .Lload_cp_regs_jump_table
186 addx4 a3, a3, a0
187 l32i a4, a3, 4
188 l32i a3, a3, 0
189 add a2, a2, a4
190 beqz a3, 1f
191 add a0, a0, a3
192 callx0 a0
193 1: l32i a0, a1, 0
194 retw
195
196 ENDPROC(coprocessor_restore)
197
198 /*
199 * Entry condition:
200 *
201 * a0: trashed, original value saved on stack (PT_AREG0)
202 * a1: a1
203 * a2: new stack pointer, original in DEPC
204 * a3: a3
205 * depc: a2, original value saved on stack (PT_DEPC)
206 * excsave_1: dispatch table
207 *
208 * PT_DEPC >= VALID_DOUBLE_EXCEPTION_ADDRESS: double exception, DEPC
209 * < VALID_DOUBLE_EXCEPTION_ADDRESS: regular exception
210 */
211
212 ENTRY(fast_coprocessor_double)
213
214 wsr a0, excsave1
215 movi a0, unrecoverable_exception
216 callx0 a0
217
218 ENDPROC(fast_coprocessor_double)
219
220 ENTRY(fast_coprocessor)
221
222 /* Save remaining registers a1-a3 and SAR */
223
224 s32i a3, a2, PT_AREG3
225 rsr a3, sar
226 s32i a1, a2, PT_AREG1
227 s32i a3, a2, PT_SAR
228 mov a1, a2
229 rsr a2, depc
230 s32i a2, a1, PT_AREG2
231
232 /*
233 * The hal macros require up to 4 temporary registers. We use a3..a6.
234 */
235
236 s32i a4, a1, PT_AREG4
237 s32i a5, a1, PT_AREG5
238 s32i a6, a1, PT_AREG6
239
240 /* Find coprocessor number. Subtract first CP EXCCAUSE from EXCCAUSE */
241
242 rsr a3, exccause
243 addi a3, a3, -EXCCAUSE_COPROCESSOR0_DISABLED
244
245 /* Set corresponding CPENABLE bit -> (sar:cp-index, a3: 1<<cp-index)*/
246
247 ssl a3 # SAR: 32 - coprocessor_number
248 movi a2, 1
249 rsr a0, cpenable
250 sll a2, a2
251 or a0, a0, a2
252 wsr a0, cpenable
253 rsync
254
255 /* Retrieve previous owner. (a3 still holds CP number) */
256
257 movi a0, coprocessor_owner # list of owners
258 addx4 a0, a3, a0 # entry for CP
259 l32i a4, a0, 0
260
261 beqz a4, 1f # skip 'save' if no previous owner
262
263 /* Disable coprocessor for previous owner. (a2 = 1 << CP number) */
264
265 l32i a5, a4, THREAD_CPENABLE
266 xor a5, a5, a2 # (1 << cp-id) still in a2
267 s32i a5, a4, THREAD_CPENABLE
268
269 /*
270 * Get context save area and 'call' save routine.
271 * (a4 still holds previous owner (thread_info), a3 CP number)
272 */
273
274 movi a5, .Lsave_cp_regs_jump_table
275 movi a0, 2f # a0: 'return' address
276 addx8 a3, a3, a5 # a3: coprocessor number
277 l32i a2, a3, 4 # a2: xtregs offset
278 l32i a3, a3, 0 # a3: jump offset
279 add a2, a2, a4
280 add a4, a3, a5 # a4: address of save routine
281 jx a4
282
283 /* Note that only a0 and a1 were preserved. */
284
285 2: rsr a3, exccause
286 addi a3, a3, -EXCCAUSE_COPROCESSOR0_DISABLED
287 movi a0, coprocessor_owner
288 addx4 a0, a3, a0
289
290 /* Set new 'owner' (a0 points to the CP owner, a3 contains the CP nr) */
291
292 1: GET_THREAD_INFO (a4, a1)
293 s32i a4, a0, 0
294
295 /* Get context save area and 'call' load routine. */
296
297 movi a5, .Lload_cp_regs_jump_table
298 movi a0, 1f
299 addx8 a3, a3, a5
300 l32i a2, a3, 4 # a2: xtregs offset
301 l32i a3, a3, 0 # a3: jump offset
302 add a2, a2, a4
303 add a4, a3, a5
304 jx a4
305
306 /* Restore all registers and return from exception handler. */
307
308 1: l32i a6, a1, PT_AREG6
309 l32i a5, a1, PT_AREG5
310 l32i a4, a1, PT_AREG4
311
312 l32i a0, a1, PT_SAR
313 l32i a3, a1, PT_AREG3
314 l32i a2, a1, PT_AREG2
315 wsr a0, sar
316 l32i a0, a1, PT_AREG0
317 l32i a1, a1, PT_AREG1
318
319 rfe
320
321 ENDPROC(fast_coprocessor)
322
323 .data
324
325 ENTRY(coprocessor_owner)
326
327 .fill XCHAL_CP_MAX, 4, 0
328
329 END(coprocessor_owner)
330
331 #endif /* XTENSA_HAVE_COPROCESSORS */