]>
Commit | Line | Data |
---|---|---|
5a0015d6 CZ |
1 | /* |
2 | * arch/xtensa/kernel/entry.S | |
3 | * | |
4 | * Low-level exception handling | |
5 | * | |
6 | * This file is subject to the terms and conditions of the GNU General Public | |
7 | * License. See the file "COPYING" in the main directory of this archive | |
8 | * for more details. | |
9 | * | |
2d1c645c | 10 | * Copyright (C) 2004 - 2008 by Tensilica Inc. |
5a0015d6 CZ |
11 | * |
12 | * Chris Zankel <chris@zankel.net> | |
13 | * | |
14 | */ | |
15 | ||
16 | #include <linux/linkage.h> | |
0013a854 | 17 | #include <asm/asm-offsets.h> |
5a0015d6 | 18 | #include <asm/processor.h> |
4573e398 | 19 | #include <asm/coprocessor.h> |
5a0015d6 CZ |
20 | #include <asm/thread_info.h> |
21 | #include <asm/uaccess.h> | |
22 | #include <asm/unistd.h> | |
23 | #include <asm/ptrace.h> | |
24 | #include <asm/current.h> | |
25 | #include <asm/pgtable.h> | |
26 | #include <asm/page.h> | |
27 | #include <asm/signal.h> | |
173d6681 | 28 | #include <asm/tlbflush.h> |
367b8112 | 29 | #include <variant/tie-asm.h> |
5a0015d6 CZ |
30 | |
31 | /* Unimplemented features. */ | |
32 | ||
5a0015d6 | 33 | #undef KERNEL_STACK_OVERFLOW_CHECK |
5a0015d6 CZ |
34 | |
35 | /* Not well tested. | |
36 | * | |
37 | * - fast_coprocessor | |
38 | */ | |
39 | ||
40 | /* | |
41 | * Macro to find first bit set in WINDOWBASE from the left + 1 | |
42 | * | |
43 | * 100....0 -> 1 | |
44 | * 010....0 -> 2 | |
45 | * 000....1 -> WSBITS | |
46 | */ | |
47 | ||
48 | .macro ffs_ws bit mask | |
49 | ||
50 | #if XCHAL_HAVE_NSA | |
51 | nsau \bit, \mask # 32-WSBITS ... 31 (32 iff 0) | |
52 | addi \bit, \bit, WSBITS - 32 + 1 # uppest bit set -> return 1 | |
53 | #else | |
54 | movi \bit, WSBITS | |
55 | #if WSBITS > 16 | |
56 | _bltui \mask, 0x10000, 99f | |
57 | addi \bit, \bit, -16 | |
58 | extui \mask, \mask, 16, 16 | |
59 | #endif | |
60 | #if WSBITS > 8 | |
61 | 99: _bltui \mask, 0x100, 99f | |
62 | addi \bit, \bit, -8 | |
63 | srli \mask, \mask, 8 | |
64 | #endif | |
65 | 99: _bltui \mask, 0x10, 99f | |
66 | addi \bit, \bit, -4 | |
67 | srli \mask, \mask, 4 | |
68 | 99: _bltui \mask, 0x4, 99f | |
69 | addi \bit, \bit, -2 | |
70 | srli \mask, \mask, 2 | |
71 | 99: _bltui \mask, 0x2, 99f | |
72 | addi \bit, \bit, -1 | |
73 | 99: | |
74 | ||
75 | #endif | |
76 | .endm | |
77 | ||
78 | /* ----------------- DEFAULT FIRST LEVEL EXCEPTION HANDLERS ----------------- */ | |
79 | ||
80 | /* | |
81 | * First-level exception handler for user exceptions. | |
82 | * Save some special registers, extra states and all registers in the AR | |
83 | * register file that were in use in the user task, and jump to the common | |
84 | * exception code. | |
85 | * We save SAR (used to calculate WMASK), and WB and WS (we don't have to | |
86 | * save them for kernel exceptions). | |
87 | * | |
88 | * Entry condition for user_exception: | |
89 | * | |
90 | * a0: trashed, original value saved on stack (PT_AREG0) | |
91 | * a1: a1 | |
92 | * a2: new stack pointer, original value in depc | |
99d5040e | 93 | * a3: a3 |
5a0015d6 | 94 | * depc: a2, original value saved on stack (PT_DEPC) |
99d5040e | 95 | * excsave1: dispatch table |
5a0015d6 CZ |
96 | * |
97 | * PT_DEPC >= VALID_DOUBLE_EXCEPTION_ADDRESS: double exception, DEPC | |
98 | * < VALID_DOUBLE_EXCEPTION_ADDRESS: regular exception | |
99 | * | |
100 | * Entry condition for _user_exception: | |
101 | * | |
102 | * a0-a3 and depc have been saved to PT_AREG0...PT_AREG3 and PT_DEPC | |
103 | * excsave has been restored, and | |
104 | * stack pointer (a1) has been set. | |
105 | * | |
3ad2f3fb | 106 | * Note: _user_exception might be at an odd address. Don't use call0..call12 |
5a0015d6 CZ |
107 | */ |
108 | ||
109 | ENTRY(user_exception) | |
110 | ||
99d5040e | 111 | /* Save a1, a2, a3, and set SP. */ |
5a0015d6 | 112 | |
bc5378fc | 113 | rsr a0, depc |
5a0015d6 CZ |
114 | s32i a1, a2, PT_AREG1 |
115 | s32i a0, a2, PT_AREG2 | |
116 | s32i a3, a2, PT_AREG3 | |
117 | mov a1, a2 | |
118 | ||
119 | .globl _user_exception | |
120 | _user_exception: | |
121 | ||
122 | /* Save SAR and turn off single stepping */ | |
123 | ||
124 | movi a2, 0 | |
bc5378fc MF |
125 | rsr a3, sar |
126 | xsr a2, icountlevel | |
5a0015d6 | 127 | s32i a3, a1, PT_SAR |
29c4dfd9 | 128 | s32i a2, a1, PT_ICOUNTLEVEL |
5a0015d6 | 129 | |
c50842df CZ |
130 | #if XCHAL_HAVE_THREADPTR |
131 | rur a2, threadptr | |
132 | s32i a2, a1, PT_THREADPTR | |
133 | #endif | |
134 | ||
5a0015d6 CZ |
135 | /* Rotate ws so that the current windowbase is at bit0. */ |
136 | /* Assume ws = xxwww1yyyy. Rotate ws right, so that a2 = yyyyxxwww1 */ | |
137 | ||
bc5378fc MF |
138 | rsr a2, windowbase |
139 | rsr a3, windowstart | |
5a0015d6 CZ |
140 | ssr a2 |
141 | s32i a2, a1, PT_WINDOWBASE | |
142 | s32i a3, a1, PT_WINDOWSTART | |
143 | slli a2, a3, 32-WSBITS | |
144 | src a2, a3, a2 | |
145 | srli a2, a2, 32-WSBITS | |
146 | s32i a2, a1, PT_WMASK # needed for restoring registers | |
147 | ||
148 | /* Save only live registers. */ | |
149 | ||
150 | _bbsi.l a2, 1, 1f | |
151 | s32i a4, a1, PT_AREG4 | |
152 | s32i a5, a1, PT_AREG5 | |
153 | s32i a6, a1, PT_AREG6 | |
154 | s32i a7, a1, PT_AREG7 | |
155 | _bbsi.l a2, 2, 1f | |
156 | s32i a8, a1, PT_AREG8 | |
157 | s32i a9, a1, PT_AREG9 | |
158 | s32i a10, a1, PT_AREG10 | |
159 | s32i a11, a1, PT_AREG11 | |
160 | _bbsi.l a2, 3, 1f | |
161 | s32i a12, a1, PT_AREG12 | |
162 | s32i a13, a1, PT_AREG13 | |
163 | s32i a14, a1, PT_AREG14 | |
164 | s32i a15, a1, PT_AREG15 | |
165 | _bnei a2, 1, 1f # only one valid frame? | |
166 | ||
167 | /* Only one valid frame, skip saving regs. */ | |
168 | ||
169 | j 2f | |
170 | ||
171 | /* Save the remaining registers. | |
172 | * We have to save all registers up to the first '1' from | |
173 | * the right, except the current frame (bit 0). | |
174 | * Assume a2 is: 001001000110001 | |
6656920b | 175 | * All register frames starting from the top field to the marked '1' |
5a0015d6 CZ |
176 | * must be saved. |
177 | */ | |
178 | ||
179 | 1: addi a3, a2, -1 # eliminate '1' in bit 0: yyyyxxww0 | |
180 | neg a3, a3 # yyyyxxww0 -> YYYYXXWW1+1 | |
181 | and a3, a3, a2 # max. only one bit is set | |
182 | ||
183 | /* Find number of frames to save */ | |
184 | ||
185 | ffs_ws a0, a3 # number of frames to the '1' from left | |
186 | ||
187 | /* Store information into WMASK: | |
188 | * bits 0..3: xxx1 masked lower 4 bits of the rotated windowstart, | |
189 | * bits 4...: number of valid 4-register frames | |
190 | */ | |
191 | ||
192 | slli a3, a0, 4 # number of frames to save in bits 8..4 | |
193 | extui a2, a2, 0, 4 # mask for the first 16 registers | |
194 | or a2, a3, a2 | |
195 | s32i a2, a1, PT_WMASK # needed when we restore the reg-file | |
196 | ||
197 | /* Save 4 registers at a time */ | |
198 | ||
199 | 1: rotw -1 | |
200 | s32i a0, a5, PT_AREG_END - 16 | |
201 | s32i a1, a5, PT_AREG_END - 12 | |
202 | s32i a2, a5, PT_AREG_END - 8 | |
203 | s32i a3, a5, PT_AREG_END - 4 | |
204 | addi a0, a4, -1 | |
205 | addi a1, a5, -16 | |
206 | _bnez a0, 1b | |
207 | ||
208 | /* WINDOWBASE still in SAR! */ | |
209 | ||
bc5378fc | 210 | rsr a2, sar # original WINDOWBASE |
5a0015d6 CZ |
211 | movi a3, 1 |
212 | ssl a2 | |
213 | sll a3, a3 | |
bc5378fc MF |
214 | wsr a3, windowstart # set corresponding WINDOWSTART bit |
215 | wsr a2, windowbase # and WINDOWSTART | |
5a0015d6 CZ |
216 | rsync |
217 | ||
218 | /* We are back to the original stack pointer (a1) */ | |
219 | ||
c658eac6 | 220 | 2: /* Now, jump to the common exception handler. */ |
5a0015d6 CZ |
221 | |
222 | j common_exception | |
223 | ||
d1538c46 | 224 | ENDPROC(user_exception) |
5a0015d6 CZ |
225 | |
226 | /* | |
227 | * First-level exit handler for kernel exceptions | |
228 | * Save special registers and the live window frame. | |
229 | * Note: Even though we changes the stack pointer, we don't have to do a | |
230 | * MOVSP here, as we do that when we return from the exception. | |
231 | * (See comment in the kernel exception exit code) | |
232 | * | |
233 | * Entry condition for kernel_exception: | |
234 | * | |
235 | * a0: trashed, original value saved on stack (PT_AREG0) | |
236 | * a1: a1 | |
237 | * a2: new stack pointer, original in DEPC | |
99d5040e | 238 | * a3: a3 |
5a0015d6 | 239 | * depc: a2, original value saved on stack (PT_DEPC) |
99d5040e | 240 | * excsave_1: dispatch table |
5a0015d6 CZ |
241 | * |
242 | * PT_DEPC >= VALID_DOUBLE_EXCEPTION_ADDRESS: double exception, DEPC | |
243 | * < VALID_DOUBLE_EXCEPTION_ADDRESS: regular exception | |
244 | * | |
245 | * Entry condition for _kernel_exception: | |
246 | * | |
247 | * a0-a3 and depc have been saved to PT_AREG0...PT_AREG3 and PT_DEPC | |
248 | * excsave has been restored, and | |
249 | * stack pointer (a1) has been set. | |
250 | * | |
3ad2f3fb | 251 | * Note: _kernel_exception might be at an odd address. Don't use call0..call12 |
5a0015d6 CZ |
252 | */ |
253 | ||
254 | ENTRY(kernel_exception) | |
255 | ||
99d5040e | 256 | /* Save a1, a2, a3, and set SP. */ |
5a0015d6 | 257 | |
bc5378fc | 258 | rsr a0, depc # get a2 |
5a0015d6 CZ |
259 | s32i a1, a2, PT_AREG1 |
260 | s32i a0, a2, PT_AREG2 | |
261 | s32i a3, a2, PT_AREG3 | |
262 | mov a1, a2 | |
263 | ||
264 | .globl _kernel_exception | |
265 | _kernel_exception: | |
266 | ||
267 | /* Save SAR and turn off single stepping */ | |
268 | ||
269 | movi a2, 0 | |
bc5378fc MF |
270 | rsr a3, sar |
271 | xsr a2, icountlevel | |
5a0015d6 | 272 | s32i a3, a1, PT_SAR |
29c4dfd9 | 273 | s32i a2, a1, PT_ICOUNTLEVEL |
5a0015d6 CZ |
274 | |
275 | /* Rotate ws so that the current windowbase is at bit0. */ | |
276 | /* Assume ws = xxwww1yyyy. Rotate ws right, so that a2 = yyyyxxwww1 */ | |
277 | ||
bc5378fc MF |
278 | rsr a2, windowbase # don't need to save these, we only |
279 | rsr a3, windowstart # need shifted windowstart: windowmask | |
5a0015d6 CZ |
280 | ssr a2 |
281 | slli a2, a3, 32-WSBITS | |
282 | src a2, a3, a2 | |
283 | srli a2, a2, 32-WSBITS | |
284 | s32i a2, a1, PT_WMASK # needed for kernel_exception_exit | |
285 | ||
286 | /* Save only the live window-frame */ | |
287 | ||
288 | _bbsi.l a2, 1, 1f | |
289 | s32i a4, a1, PT_AREG4 | |
290 | s32i a5, a1, PT_AREG5 | |
291 | s32i a6, a1, PT_AREG6 | |
292 | s32i a7, a1, PT_AREG7 | |
293 | _bbsi.l a2, 2, 1f | |
294 | s32i a8, a1, PT_AREG8 | |
295 | s32i a9, a1, PT_AREG9 | |
296 | s32i a10, a1, PT_AREG10 | |
297 | s32i a11, a1, PT_AREG11 | |
298 | _bbsi.l a2, 3, 1f | |
299 | s32i a12, a1, PT_AREG12 | |
300 | s32i a13, a1, PT_AREG13 | |
301 | s32i a14, a1, PT_AREG14 | |
302 | s32i a15, a1, PT_AREG15 | |
303 | ||
304 | 1: | |
305 | ||
306 | #ifdef KERNEL_STACK_OVERFLOW_CHECK | |
307 | ||
308 | /* Stack overflow check, for debugging */ | |
309 | extui a2, a1, TASK_SIZE_BITS,XX | |
310 | movi a3, SIZE?? | |
311 | _bge a2, a3, out_of_stack_panic | |
312 | ||
313 | #endif | |
314 | ||
315 | /* | |
316 | * This is the common exception handler. | |
317 | * We get here from the user exception handler or simply by falling through | |
318 | * from the kernel exception handler. | |
319 | * Save the remaining special registers, switch to kernel mode, and jump | |
320 | * to the second-level exception handler. | |
321 | * | |
322 | */ | |
323 | ||
324 | common_exception: | |
325 | ||
29c4dfd9 | 326 | /* Save some registers, disable loops and clear the syscall flag. */ |
5a0015d6 | 327 | |
bc5378fc MF |
328 | rsr a2, debugcause |
329 | rsr a3, epc1 | |
5a0015d6 CZ |
330 | s32i a2, a1, PT_DEBUGCAUSE |
331 | s32i a3, a1, PT_PC | |
332 | ||
29c4dfd9 | 333 | movi a2, -1 |
bc5378fc | 334 | rsr a3, excvaddr |
29c4dfd9 | 335 | s32i a2, a1, PT_SYSCALL |
5a0015d6 CZ |
336 | movi a2, 0 |
337 | s32i a3, a1, PT_EXCVADDR | |
bc5378fc | 338 | xsr a2, lcount |
5a0015d6 CZ |
339 | s32i a2, a1, PT_LCOUNT |
340 | ||
341 | /* It is now save to restore the EXC_TABLE_FIXUP variable. */ | |
342 | ||
bc5378fc | 343 | rsr a0, exccause |
5a0015d6 | 344 | movi a3, 0 |
bc5378fc | 345 | rsr a2, excsave1 |
5a0015d6 CZ |
346 | s32i a0, a1, PT_EXCCAUSE |
347 | s32i a3, a2, EXC_TABLE_FIXUP | |
348 | ||
349 | /* All unrecoverable states are saved on stack, now, and a1 is valid, | |
350 | * so we can allow exceptions and interrupts (*) again. | |
351 | * Set PS(EXCM = 0, UM = 0, RING = 0, OWB = 0, WOE = 1, INTLEVEL = X) | |
352 | * | |
895666a9 MF |
353 | * (*) We only allow interrupts if they were previously enabled and |
354 | * we're not handling an IRQ | |
5a0015d6 CZ |
355 | */ |
356 | ||
bc5378fc | 357 | rsr a3, ps |
895666a9 MF |
358 | addi a0, a0, -EXCCAUSE_LEVEL1_INTERRUPT |
359 | movi a2, LOCKLEVEL | |
2d1c645c MG |
360 | extui a3, a3, PS_INTLEVEL_SHIFT, PS_INTLEVEL_WIDTH |
361 | # a3 = PS.INTLEVEL | |
895666a9 | 362 | moveqz a3, a2, a0 # a3 = LOCKLEVEL iff interrupt |
173d6681 | 363 | movi a2, 1 << PS_WOE_BIT |
5a0015d6 | 364 | or a3, a3, a2 |
bc5378fc MF |
365 | rsr a0, exccause |
366 | xsr a3, ps | |
5a0015d6 CZ |
367 | |
368 | s32i a3, a1, PT_PS # save ps | |
369 | ||
bc5378fc | 370 | /* Save lbeg, lend */ |
5a0015d6 | 371 | |
bc5378fc MF |
372 | rsr a2, lbeg |
373 | rsr a3, lend | |
5a0015d6 CZ |
374 | s32i a2, a1, PT_LBEG |
375 | s32i a3, a1, PT_LEND | |
376 | ||
733536b8 MF |
377 | /* Save SCOMPARE1 */ |
378 | ||
379 | #if XCHAL_HAVE_S32C1I | |
380 | rsr a2, scompare1 | |
381 | s32i a2, a1, PT_SCOMPARE1 | |
382 | #endif | |
383 | ||
c658eac6 CZ |
384 | /* Save optional registers. */ |
385 | ||
386 | save_xtregs_opt a1 a2 a4 a5 a6 a7 PT_XTREGS_OPT | |
387 | ||
c92931b2 MF |
388 | #ifdef CONFIG_TRACE_IRQFLAGS |
389 | l32i a4, a1, PT_DEPC | |
390 | /* Double exception means we came here with an exception | |
391 | * while PS.EXCM was set, i.e. interrupts disabled. | |
392 | */ | |
393 | bgeui a4, VALID_DOUBLE_EXCEPTION_ADDRESS, 1f | |
394 | l32i a4, a1, PT_EXCCAUSE | |
395 | bnei a4, EXCCAUSE_LEVEL1_INTERRUPT, 1f | |
396 | /* We came here with an interrupt means interrupts were enabled | |
397 | * and we've just disabled them. | |
398 | */ | |
399 | movi a4, trace_hardirqs_off | |
400 | callx4 a4 | |
401 | 1: | |
402 | #endif | |
403 | ||
5a0015d6 CZ |
404 | /* Go to second-level dispatcher. Set up parameters to pass to the |
405 | * exception handler and call the exception handler. | |
406 | */ | |
407 | ||
99d5040e | 408 | rsr a4, excsave1 |
5a0015d6 CZ |
409 | mov a6, a1 # pass stack frame |
410 | mov a7, a0 # pass EXCCAUSE | |
411 | addx4 a4, a0, a4 | |
412 | l32i a4, a4, EXC_TABLE_DEFAULT # load handler | |
413 | ||
414 | /* Call the second-level handler */ | |
415 | ||
416 | callx4 a4 | |
417 | ||
418 | /* Jump here for exception exit */ | |
e6ffe17e | 419 | .global common_exception_return |
5a0015d6 CZ |
420 | common_exception_return: |
421 | ||
c92931b2 | 422 | 1: |
aea8e7c8 | 423 | rsil a2, LOCKLEVEL |
c92931b2 | 424 | |
5a0015d6 CZ |
425 | /* Jump if we are returning from kernel exceptions. */ |
426 | ||
aea8e7c8 | 427 | l32i a3, a1, PT_PS |
16c5becf MF |
428 | GET_THREAD_INFO(a2, a1) |
429 | l32i a4, a2, TI_FLAGS | |
430 | _bbci.l a3, PS_UM_BIT, 6f | |
5a0015d6 CZ |
431 | |
432 | /* Specific to a user exception exit: | |
433 | * We need to check some flags for signal handling and rescheduling, | |
434 | * and have to restore WB and WS, extra states, and all registers | |
435 | * in the register file that were in use in the user task. | |
e1088430 | 436 | * Note that we don't disable interrupts here. |
5a0015d6 CZ |
437 | */ |
438 | ||
5a0015d6 | 439 | _bbsi.l a4, TIF_NEED_RESCHED, 3f |
a53bb24e | 440 | _bbsi.l a4, TIF_NOTIFY_RESUME, 2f |
a99e07ee | 441 | _bbci.l a4, TIF_SIGPENDING, 5f |
5a0015d6 | 442 | |
a53bb24e | 443 | 2: l32i a4, a1, PT_DEPC |
5a0015d6 | 444 | bgeui a4, VALID_DOUBLE_EXCEPTION_ADDRESS, 4f |
5a0015d6 | 445 | |
e1088430 CZ |
446 | /* Call do_signal() */ |
447 | ||
aea8e7c8 | 448 | rsil a2, 0 |
a53bb24e | 449 | movi a4, do_notify_resume # int do_notify_resume(struct pt_regs*) |
5a0015d6 | 450 | mov a6, a1 |
5a0015d6 CZ |
451 | callx4 a4 |
452 | j 1b | |
453 | ||
e1088430 | 454 | 3: /* Reschedule */ |
5a0015d6 | 455 | |
aea8e7c8 | 456 | rsil a2, 0 |
5a0015d6 CZ |
457 | movi a4, schedule # void schedule (void) |
458 | callx4 a4 | |
459 | j 1b | |
460 | ||
16c5becf MF |
461 | #ifdef CONFIG_PREEMPT |
462 | 6: | |
463 | _bbci.l a4, TIF_NEED_RESCHED, 4f | |
464 | ||
465 | /* Check current_thread_info->preempt_count */ | |
466 | ||
467 | l32i a4, a2, TI_PRE_COUNT | |
468 | bnez a4, 4f | |
469 | movi a4, preempt_schedule_irq | |
470 | callx4 a4 | |
471 | j 1b | |
472 | #endif | |
473 | ||
a99e07ee MF |
474 | 5: |
475 | #ifdef CONFIG_DEBUG_TLB_SANITY | |
476 | l32i a4, a1, PT_DEPC | |
477 | bgeui a4, VALID_DOUBLE_EXCEPTION_ADDRESS, 4f | |
478 | movi a4, check_tlb_sanity | |
479 | callx4 a4 | |
480 | #endif | |
16c5becf | 481 | 6: |
aea8e7c8 MF |
482 | 4: |
483 | #ifdef CONFIG_TRACE_IRQFLAGS | |
484 | l32i a4, a1, PT_DEPC | |
485 | /* Double exception means we came here with an exception | |
486 | * while PS.EXCM was set, i.e. interrupts disabled. | |
487 | */ | |
488 | bgeui a4, VALID_DOUBLE_EXCEPTION_ADDRESS, 1f | |
489 | l32i a4, a1, PT_EXCCAUSE | |
490 | bnei a4, EXCCAUSE_LEVEL1_INTERRUPT, 1f | |
491 | /* We came here with an interrupt means interrupts were enabled | |
492 | * and we'll reenable them on return. | |
493 | */ | |
494 | movi a4, trace_hardirqs_on | |
495 | callx4 a4 | |
496 | 1: | |
497 | #endif | |
498 | /* Restore optional registers. */ | |
e1088430 CZ |
499 | |
500 | load_xtregs_opt a1 a2 a4 a5 a6 a7 PT_XTREGS_OPT | |
5a0015d6 | 501 | |
733536b8 MF |
502 | /* Restore SCOMPARE1 */ |
503 | ||
504 | #if XCHAL_HAVE_S32C1I | |
505 | l32i a2, a1, PT_SCOMPARE1 | |
506 | wsr a2, scompare1 | |
507 | #endif | |
bc5378fc | 508 | wsr a3, ps /* disable interrupts */ |
e1088430 CZ |
509 | |
510 | _bbci.l a3, PS_UM_BIT, kernel_exception_exit | |
511 | ||
512 | user_exception_exit: | |
513 | ||
514 | /* Restore the state of the task and return from the exception. */ | |
5a0015d6 | 515 | |
5a0015d6 CZ |
516 | /* Switch to the user thread WINDOWBASE. Save SP temporarily in DEPC */ |
517 | ||
518 | l32i a2, a1, PT_WINDOWBASE | |
519 | l32i a3, a1, PT_WINDOWSTART | |
bc5378fc MF |
520 | wsr a1, depc # use DEPC as temp storage |
521 | wsr a3, windowstart # restore WINDOWSTART | |
5a0015d6 | 522 | ssr a2 # preserve user's WB in the SAR |
bc5378fc | 523 | wsr a2, windowbase # switch to user's saved WB |
5a0015d6 | 524 | rsync |
bc5378fc | 525 | rsr a1, depc # restore stack pointer |
5a0015d6 CZ |
526 | l32i a2, a1, PT_WMASK # register frames saved (in bits 4...9) |
527 | rotw -1 # we restore a4..a7 | |
528 | _bltui a6, 16, 1f # only have to restore current window? | |
529 | ||
530 | /* The working registers are a0 and a3. We are restoring to | |
531 | * a4..a7. Be careful not to destroy what we have just restored. | |
532 | * Note: wmask has the format YYYYM: | |
533 | * Y: number of registers saved in groups of 4 | |
534 | * M: 4 bit mask of first 16 registers | |
535 | */ | |
536 | ||
537 | mov a2, a6 | |
538 | mov a3, a5 | |
539 | ||
540 | 2: rotw -1 # a0..a3 become a4..a7 | |
541 | addi a3, a7, -4*4 # next iteration | |
542 | addi a2, a6, -16 # decrementing Y in WMASK | |
543 | l32i a4, a3, PT_AREG_END + 0 | |
544 | l32i a5, a3, PT_AREG_END + 4 | |
545 | l32i a6, a3, PT_AREG_END + 8 | |
546 | l32i a7, a3, PT_AREG_END + 12 | |
547 | _bgeui a2, 16, 2b | |
548 | ||
549 | /* Clear unrestored registers (don't leak anything to user-land */ | |
550 | ||
bc5378fc MF |
551 | 1: rsr a0, windowbase |
552 | rsr a3, sar | |
5a0015d6 CZ |
553 | sub a3, a0, a3 |
554 | beqz a3, 2f | |
555 | extui a3, a3, 0, WBBITS | |
556 | ||
557 | 1: rotw -1 | |
558 | addi a3, a7, -1 | |
559 | movi a4, 0 | |
560 | movi a5, 0 | |
561 | movi a6, 0 | |
562 | movi a7, 0 | |
563 | bgei a3, 1, 1b | |
564 | ||
565 | /* We are back were we were when we started. | |
566 | * Note: a2 still contains WMASK (if we've returned to the original | |
567 | * frame where we had loaded a2), or at least the lower 4 bits | |
568 | * (if we have restored WSBITS-1 frames). | |
569 | */ | |
570 | ||
c50842df CZ |
571 | #if XCHAL_HAVE_THREADPTR |
572 | l32i a3, a1, PT_THREADPTR | |
573 | wur a3, threadptr | |
574 | #endif | |
575 | ||
5a0015d6 CZ |
576 | 2: j common_exception_exit |
577 | ||
578 | /* This is the kernel exception exit. | |
579 | * We avoided to do a MOVSP when we entered the exception, but we | |
580 | * have to do it here. | |
581 | */ | |
582 | ||
583 | kernel_exception_exit: | |
584 | ||
5a0015d6 CZ |
585 | /* Check if we have to do a movsp. |
586 | * | |
587 | * We only have to do a movsp if the previous window-frame has | |
588 | * been spilled to the *temporary* exception stack instead of the | |
589 | * task's stack. This is the case if the corresponding bit in | |
590 | * WINDOWSTART for the previous window-frame was set before | |
591 | * (not spilled) but is zero now (spilled). | |
592 | * If this bit is zero, all other bits except the one for the | |
593 | * current window frame are also zero. So, we can use a simple test: | |
594 | * 'and' WINDOWSTART and WINDOWSTART-1: | |
595 | * | |
596 | * (XXXXXX1[0]* - 1) AND XXXXXX1[0]* = XXXXXX0[0]* | |
597 | * | |
598 | * The result is zero only if one bit was set. | |
599 | * | |
600 | * (Note: We might have gone through several task switches before | |
601 | * we come back to the current task, so WINDOWBASE might be | |
602 | * different from the time the exception occurred.) | |
603 | */ | |
604 | ||
605 | /* Test WINDOWSTART before and after the exception. | |
606 | * We actually have WMASK, so we only have to test if it is 1 or not. | |
607 | */ | |
608 | ||
609 | l32i a2, a1, PT_WMASK | |
610 | _beqi a2, 1, common_exception_exit # Spilled before exception,jump | |
611 | ||
612 | /* Test WINDOWSTART now. If spilled, do the movsp */ | |
613 | ||
bc5378fc | 614 | rsr a3, windowstart |
5a0015d6 CZ |
615 | addi a0, a3, -1 |
616 | and a3, a3, a0 | |
617 | _bnez a3, common_exception_exit | |
618 | ||
619 | /* Do a movsp (we returned from a call4, so we have at least a0..a7) */ | |
620 | ||
621 | addi a0, a1, -16 | |
622 | l32i a3, a0, 0 | |
623 | l32i a4, a0, 4 | |
624 | s32i a3, a1, PT_SIZE+0 | |
625 | s32i a4, a1, PT_SIZE+4 | |
626 | l32i a3, a0, 8 | |
627 | l32i a4, a0, 12 | |
628 | s32i a3, a1, PT_SIZE+8 | |
629 | s32i a4, a1, PT_SIZE+12 | |
630 | ||
631 | /* Common exception exit. | |
632 | * We restore the special register and the current window frame, and | |
633 | * return from the exception. | |
634 | * | |
635 | * Note: We expect a2 to hold PT_WMASK | |
636 | */ | |
637 | ||
638 | common_exception_exit: | |
639 | ||
c658eac6 CZ |
640 | /* Restore address registers. */ |
641 | ||
5a0015d6 CZ |
642 | _bbsi.l a2, 1, 1f |
643 | l32i a4, a1, PT_AREG4 | |
644 | l32i a5, a1, PT_AREG5 | |
645 | l32i a6, a1, PT_AREG6 | |
646 | l32i a7, a1, PT_AREG7 | |
647 | _bbsi.l a2, 2, 1f | |
648 | l32i a8, a1, PT_AREG8 | |
649 | l32i a9, a1, PT_AREG9 | |
650 | l32i a10, a1, PT_AREG10 | |
651 | l32i a11, a1, PT_AREG11 | |
652 | _bbsi.l a2, 3, 1f | |
653 | l32i a12, a1, PT_AREG12 | |
654 | l32i a13, a1, PT_AREG13 | |
655 | l32i a14, a1, PT_AREG14 | |
656 | l32i a15, a1, PT_AREG15 | |
657 | ||
658 | /* Restore PC, SAR */ | |
659 | ||
660 | 1: l32i a2, a1, PT_PC | |
661 | l32i a3, a1, PT_SAR | |
bc5378fc MF |
662 | wsr a2, epc1 |
663 | wsr a3, sar | |
5a0015d6 CZ |
664 | |
665 | /* Restore LBEG, LEND, LCOUNT */ | |
666 | ||
667 | l32i a2, a1, PT_LBEG | |
668 | l32i a3, a1, PT_LEND | |
bc5378fc | 669 | wsr a2, lbeg |
5a0015d6 | 670 | l32i a2, a1, PT_LCOUNT |
bc5378fc MF |
671 | wsr a3, lend |
672 | wsr a2, lcount | |
5a0015d6 | 673 | |
29c4dfd9 CZ |
674 | /* We control single stepping through the ICOUNTLEVEL register. */ |
675 | ||
676 | l32i a2, a1, PT_ICOUNTLEVEL | |
677 | movi a3, -2 | |
bc5378fc MF |
678 | wsr a2, icountlevel |
679 | wsr a3, icount | |
29c4dfd9 | 680 | |
5a0015d6 CZ |
681 | /* Check if it was double exception. */ |
682 | ||
683 | l32i a0, a1, PT_DEPC | |
684 | l32i a3, a1, PT_AREG3 | |
685 | l32i a2, a1, PT_AREG2 | |
895666a9 | 686 | _bgeui a0, VALID_DOUBLE_EXCEPTION_ADDRESS, 1f |
5a0015d6 CZ |
687 | |
688 | /* Restore a0...a3 and return */ | |
689 | ||
690 | l32i a0, a1, PT_AREG0 | |
691 | l32i a1, a1, PT_AREG1 | |
895666a9 | 692 | rfe |
5a0015d6 | 693 | |
895666a9 | 694 | 1: wsr a0, depc |
5a0015d6 CZ |
695 | l32i a0, a1, PT_AREG0 |
696 | l32i a1, a1, PT_AREG1 | |
895666a9 | 697 | rfde |
5a0015d6 | 698 | |
d1538c46 CZ |
699 | ENDPROC(kernel_exception) |
700 | ||
5a0015d6 CZ |
701 | /* |
702 | * Debug exception handler. | |
703 | * | |
704 | * Currently, we don't support KGDB, so only user application can be debugged. | |
705 | * | |
706 | * When we get here, a0 is trashed and saved to excsave[debuglevel] | |
707 | */ | |
708 | ||
709 | ENTRY(debug_exception) | |
710 | ||
bc5378fc | 711 | rsr a0, SREG_EPS + XCHAL_DEBUGLEVEL |
173d6681 | 712 | bbsi.l a0, PS_EXCM_BIT, 1f # exception mode |
5a0015d6 | 713 | |
bc5378fc | 714 | /* Set EPC1 and EXCCAUSE */ |
5a0015d6 | 715 | |
bc5378fc MF |
716 | wsr a2, depc # save a2 temporarily |
717 | rsr a2, SREG_EPC + XCHAL_DEBUGLEVEL | |
718 | wsr a2, epc1 | |
5a0015d6 CZ |
719 | |
720 | movi a2, EXCCAUSE_MAPPED_DEBUG | |
bc5378fc | 721 | wsr a2, exccause |
5a0015d6 CZ |
722 | |
723 | /* Restore PS to the value before the debug exc but with PS.EXCM set.*/ | |
724 | ||
173d6681 | 725 | movi a2, 1 << PS_EXCM_BIT |
5a0015d6 CZ |
726 | or a2, a0, a2 |
727 | movi a0, debug_exception # restore a3, debug jump vector | |
bc5378fc MF |
728 | wsr a2, ps |
729 | xsr a0, SREG_EXCSAVE + XCHAL_DEBUGLEVEL | |
5a0015d6 CZ |
730 | |
731 | /* Switch to kernel/user stack, restore jump vector, and save a0 */ | |
732 | ||
173d6681 | 733 | bbsi.l a2, PS_UM_BIT, 2f # jump if user mode |
5a0015d6 CZ |
734 | |
735 | addi a2, a1, -16-PT_SIZE # assume kernel stack | |
736 | s32i a0, a2, PT_AREG0 | |
737 | movi a0, 0 | |
738 | s32i a1, a2, PT_AREG1 | |
739 | s32i a0, a2, PT_DEPC # mark it as a regular exception | |
bc5378fc | 740 | xsr a0, depc |
5a0015d6 CZ |
741 | s32i a3, a2, PT_AREG3 |
742 | s32i a0, a2, PT_AREG2 | |
743 | mov a1, a2 | |
744 | j _kernel_exception | |
745 | ||
bc5378fc | 746 | 2: rsr a2, excsave1 |
5a0015d6 CZ |
747 | l32i a2, a2, EXC_TABLE_KSTK # load kernel stack pointer |
748 | s32i a0, a2, PT_AREG0 | |
749 | movi a0, 0 | |
750 | s32i a1, a2, PT_AREG1 | |
751 | s32i a0, a2, PT_DEPC | |
bc5378fc | 752 | xsr a0, depc |
5a0015d6 CZ |
753 | s32i a3, a2, PT_AREG3 |
754 | s32i a0, a2, PT_AREG2 | |
755 | mov a1, a2 | |
756 | j _user_exception | |
757 | ||
758 | /* Debug exception while in exception mode. */ | |
759 | 1: j 1b // FIXME!! | |
760 | ||
d1538c46 | 761 | ENDPROC(debug_exception) |
5a0015d6 CZ |
762 | |
763 | /* | |
764 | * We get here in case of an unrecoverable exception. | |
765 | * The only thing we can do is to be nice and print a panic message. | |
766 | * We only produce a single stack frame for panic, so ??? | |
767 | * | |
768 | * | |
769 | * Entry conditions: | |
770 | * | |
771 | * - a0 contains the caller address; original value saved in excsave1. | |
772 | * - the original a0 contains a valid return address (backtrace) or 0. | |
773 | * - a2 contains a valid stackpointer | |
774 | * | |
775 | * Notes: | |
776 | * | |
777 | * - If the stack pointer could be invalid, the caller has to setup a | |
778 | * dummy stack pointer (e.g. the stack of the init_task) | |
779 | * | |
780 | * - If the return address could be invalid, the caller has to set it | |
781 | * to 0, so the backtrace would stop. | |
782 | * | |
783 | */ | |
784 | .align 4 | |
785 | unrecoverable_text: | |
786 | .ascii "Unrecoverable error in exception handler\0" | |
787 | ||
788 | ENTRY(unrecoverable_exception) | |
789 | ||
790 | movi a0, 1 | |
791 | movi a1, 0 | |
792 | ||
bc5378fc MF |
793 | wsr a0, windowstart |
794 | wsr a1, windowbase | |
5a0015d6 CZ |
795 | rsync |
796 | ||
2d1c645c | 797 | movi a1, (1 << PS_WOE_BIT) | LOCKLEVEL |
bc5378fc | 798 | wsr a1, ps |
5a0015d6 CZ |
799 | rsync |
800 | ||
801 | movi a1, init_task | |
802 | movi a0, 0 | |
803 | addi a1, a1, PT_REGS_OFFSET | |
804 | ||
805 | movi a4, panic | |
806 | movi a6, unrecoverable_text | |
807 | ||
808 | callx4 a4 | |
809 | ||
810 | 1: j 1b | |
811 | ||
d1538c46 | 812 | ENDPROC(unrecoverable_exception) |
5a0015d6 CZ |
813 | |
814 | /* -------------------------- FAST EXCEPTION HANDLERS ----------------------- */ | |
815 | ||
816 | /* | |
817 | * Fast-handler for alloca exceptions | |
818 | * | |
819 | * The ALLOCA handler is entered when user code executes the MOVSP | |
820 | * instruction and the caller's frame is not in the register file. | |
5a0015d6 | 821 | * |
fff96d69 MF |
822 | * This algorithm was taken from the Ross Morley's RTOS Porting Layer: |
823 | * | |
824 | * /home/ross/rtos/porting/XtensaRTOS-PortingLayer-20090507/xtensa_vectors.S | |
825 | * | |
826 | * It leverages the existing window spill/fill routines and their support for | |
827 | * double exceptions. The 'movsp' instruction will only cause an exception if | |
828 | * the next window needs to be loaded. In fact this ALLOCA exception may be | |
829 | * replaced at some point by changing the hardware to do a underflow exception | |
830 | * of the proper size instead. | |
831 | * | |
832 | * This algorithm simply backs out the register changes started by the user | |
833 | * excpetion handler, makes it appear that we have started a window underflow | |
834 | * by rotating the window back and then setting the old window base (OWB) in | |
835 | * the 'ps' register with the rolled back window base. The 'movsp' instruction | |
836 | * will be re-executed and this time since the next window frames is in the | |
837 | * active AR registers it won't cause an exception. | |
838 | * | |
839 | * If the WindowUnderflow code gets a TLB miss the page will get mapped | |
840 | * the the partial windeowUnderflow will be handeled in the double exception | |
841 | * handler. | |
5a0015d6 CZ |
842 | * |
843 | * Entry condition: | |
844 | * | |
845 | * a0: trashed, original value saved on stack (PT_AREG0) | |
846 | * a1: a1 | |
847 | * a2: new stack pointer, original in DEPC | |
99d5040e | 848 | * a3: a3 |
5a0015d6 | 849 | * depc: a2, original value saved on stack (PT_DEPC) |
99d5040e | 850 | * excsave_1: dispatch table |
5a0015d6 CZ |
851 | * |
852 | * PT_DEPC >= VALID_DOUBLE_EXCEPTION_ADDRESS: double exception, DEPC | |
853 | * < VALID_DOUBLE_EXCEPTION_ADDRESS: regular exception | |
854 | */ | |
855 | ||
5a0015d6 | 856 | ENTRY(fast_alloca) |
fff96d69 MF |
857 | rsr a0, windowbase |
858 | rotw -1 | |
859 | rsr a2, ps | |
860 | extui a3, a2, PS_OWB_SHIFT, PS_OWB_WIDTH | |
861 | xor a3, a3, a4 | |
862 | l32i a4, a6, PT_AREG0 | |
863 | l32i a1, a6, PT_DEPC | |
864 | rsr a6, depc | |
865 | wsr a1, depc | |
866 | slli a3, a3, PS_OWB_SHIFT | |
867 | xor a2, a2, a3 | |
868 | wsr a2, ps | |
869 | rsync | |
5a0015d6 | 870 | |
fff96d69 MF |
871 | _bbci.l a4, 31, 4f |
872 | rotw -1 | |
873 | _bbci.l a8, 30, 8f | |
874 | rotw -1 | |
875 | j _WindowUnderflow12 | |
876 | 8: j _WindowUnderflow8 | |
877 | 4: j _WindowUnderflow4 | |
d1538c46 | 878 | ENDPROC(fast_alloca) |
5a0015d6 CZ |
879 | |
880 | /* | |
881 | * fast system calls. | |
882 | * | |
883 | * WARNING: The kernel doesn't save the entire user context before | |
884 | * handling a fast system call. These functions are small and short, | |
885 | * usually offering some functionality not available to user tasks. | |
886 | * | |
887 | * BE CAREFUL TO PRESERVE THE USER'S CONTEXT. | |
888 | * | |
889 | * Entry condition: | |
890 | * | |
891 | * a0: trashed, original value saved on stack (PT_AREG0) | |
892 | * a1: a1 | |
893 | * a2: new stack pointer, original in DEPC | |
99d5040e | 894 | * a3: a3 |
5a0015d6 | 895 | * depc: a2, original value saved on stack (PT_DEPC) |
99d5040e | 896 | * excsave_1: dispatch table |
5a0015d6 CZ |
897 | */ |
898 | ||
899 | ENTRY(fast_syscall_kernel) | |
900 | ||
901 | /* Skip syscall. */ | |
902 | ||
bc5378fc | 903 | rsr a0, epc1 |
5a0015d6 | 904 | addi a0, a0, 3 |
bc5378fc | 905 | wsr a0, epc1 |
5a0015d6 CZ |
906 | |
907 | l32i a0, a2, PT_DEPC | |
908 | bgeui a0, VALID_DOUBLE_EXCEPTION_ADDRESS, fast_syscall_unrecoverable | |
909 | ||
bc5378fc | 910 | rsr a0, depc # get syscall-nr |
5a0015d6 | 911 | _beqz a0, fast_syscall_spill_registers |
fc4fb2ad | 912 | _beqi a0, __NR_xtensa, fast_syscall_xtensa |
5a0015d6 CZ |
913 | |
914 | j kernel_exception | |
915 | ||
d1538c46 CZ |
916 | ENDPROC(fast_syscall_kernel) |
917 | ||
5a0015d6 CZ |
918 | ENTRY(fast_syscall_user) |
919 | ||
920 | /* Skip syscall. */ | |
921 | ||
bc5378fc | 922 | rsr a0, epc1 |
5a0015d6 | 923 | addi a0, a0, 3 |
bc5378fc | 924 | wsr a0, epc1 |
5a0015d6 CZ |
925 | |
926 | l32i a0, a2, PT_DEPC | |
927 | bgeui a0, VALID_DOUBLE_EXCEPTION_ADDRESS, fast_syscall_unrecoverable | |
928 | ||
bc5378fc | 929 | rsr a0, depc # get syscall-nr |
5a0015d6 | 930 | _beqz a0, fast_syscall_spill_registers |
fc4fb2ad | 931 | _beqi a0, __NR_xtensa, fast_syscall_xtensa |
5a0015d6 CZ |
932 | |
933 | j user_exception | |
934 | ||
d1538c46 CZ |
935 | ENDPROC(fast_syscall_user) |
936 | ||
5a0015d6 CZ |
937 | ENTRY(fast_syscall_unrecoverable) |
938 | ||
c4c4594b | 939 | /* Restore all states. */ |
5a0015d6 | 940 | |
c4c4594b CZ |
941 | l32i a0, a2, PT_AREG0 # restore a0 |
942 | xsr a2, depc # restore a2, depc | |
5a0015d6 | 943 | |
c4c4594b CZ |
944 | wsr a0, excsave1 |
945 | movi a0, unrecoverable_exception | |
946 | callx0 a0 | |
5a0015d6 | 947 | |
d1538c46 | 948 | ENDPROC(fast_syscall_unrecoverable) |
5a0015d6 CZ |
949 | |
950 | /* | |
951 | * sysxtensa syscall handler | |
952 | * | |
fc4fb2ad CZ |
953 | * int sysxtensa (SYS_XTENSA_ATOMIC_SET, ptr, val, unused); |
954 | * int sysxtensa (SYS_XTENSA_ATOMIC_ADD, ptr, val, unused); | |
955 | * int sysxtensa (SYS_XTENSA_ATOMIC_EXG_ADD, ptr, val, unused); | |
956 | * int sysxtensa (SYS_XTENSA_ATOMIC_CMP_SWP, ptr, oldval, newval); | |
957 | * a2 a6 a3 a4 a5 | |
5a0015d6 CZ |
958 | * |
959 | * Entry condition: | |
960 | * | |
fc4fb2ad | 961 | * a0: a2 (syscall-nr), original value saved on stack (PT_AREG0) |
5a0015d6 | 962 | * a1: a1 |
fc4fb2ad | 963 | * a2: new stack pointer, original in a0 and DEPC |
99d5040e | 964 | * a3: a3 |
fc4fb2ad | 965 | * a4..a15: unchanged |
5a0015d6 | 966 | * depc: a2, original value saved on stack (PT_DEPC) |
99d5040e | 967 | * excsave_1: dispatch table |
5a0015d6 CZ |
968 | * |
969 | * PT_DEPC >= VALID_DOUBLE_EXCEPTION_ADDRESS: double exception, DEPC | |
970 | * < VALID_DOUBLE_EXCEPTION_ADDRESS: regular exception | |
971 | * | |
972 | * Note: we don't have to save a2; a2 holds the return value | |
973 | * | |
974 | * We use the two macros TRY and CATCH: | |
975 | * | |
976 | * TRY adds an entry to the __ex_table fixup table for the immediately | |
977 | * following instruction. | |
978 | * | |
25985edc | 979 | * CATCH catches any exception that occurred at one of the preceding TRY |
5a0015d6 CZ |
980 | * statements and continues from there |
981 | * | |
982 | * Usage TRY l32i a0, a1, 0 | |
983 | * <other code> | |
984 | * done: rfe | |
985 | * CATCH <set return code> | |
986 | * j done | |
987 | */ | |
988 | ||
989 | #define TRY \ | |
990 | .section __ex_table, "a"; \ | |
991 | .word 66f, 67f; \ | |
992 | .text; \ | |
993 | 66: | |
994 | ||
995 | #define CATCH \ | |
996 | 67: | |
997 | ||
fc4fb2ad | 998 | ENTRY(fast_syscall_xtensa) |
5a0015d6 | 999 | |
fc4fb2ad | 1000 | s32i a7, a2, PT_AREG7 # we need an additional register |
5a0015d6 | 1001 | movi a7, 4 # sizeof(unsigned int) |
fc4fb2ad | 1002 | access_ok a3, a7, a0, a2, .Leac # a0: scratch reg, a2: sp |
5a0015d6 | 1003 | |
fc4fb2ad CZ |
1004 | addi a6, a6, -1 # assuming SYS_XTENSA_ATOMIC_SET = 1 |
1005 | _bgeui a6, SYS_XTENSA_COUNT - 1, .Lill | |
1006 | _bnei a6, SYS_XTENSA_ATOMIC_CMP_SWP - 1, .Lnswp | |
5a0015d6 | 1007 | |
fc4fb2ad | 1008 | /* Fall through for ATOMIC_CMP_SWP. */ |
5a0015d6 CZ |
1009 | |
1010 | .Lswp: /* Atomic compare and swap */ | |
1011 | ||
fc4fb2ad CZ |
1012 | TRY l32i a0, a3, 0 # read old value |
1013 | bne a0, a4, 1f # same as old value? jump | |
1014 | TRY s32i a5, a3, 0 # different, modify value | |
1015 | l32i a7, a2, PT_AREG7 # restore a7 | |
1016 | l32i a0, a2, PT_AREG0 # restore a0 | |
1017 | movi a2, 1 # and return 1 | |
1018 | addi a6, a6, 1 # restore a6 (really necessary?) | |
1019 | rfe | |
5a0015d6 | 1020 | |
fc4fb2ad CZ |
1021 | 1: l32i a7, a2, PT_AREG7 # restore a7 |
1022 | l32i a0, a2, PT_AREG0 # restore a0 | |
1023 | movi a2, 0 # return 0 (note that we cannot set | |
1024 | addi a6, a6, 1 # restore a6 (really necessary?) | |
1025 | rfe | |
5a0015d6 | 1026 | |
fc4fb2ad | 1027 | .Lnswp: /* Atomic set, add, and exg_add. */ |
5a0015d6 | 1028 | |
fc4fb2ad CZ |
1029 | TRY l32i a7, a3, 0 # orig |
1030 | add a0, a4, a7 # + arg | |
1031 | moveqz a0, a4, a6 # set | |
1032 | TRY s32i a0, a3, 0 # write new value | |
5a0015d6 | 1033 | |
fc4fb2ad | 1034 | mov a0, a2 |
5a0015d6 | 1035 | mov a2, a7 |
fc4fb2ad CZ |
1036 | l32i a7, a0, PT_AREG7 # restore a7 |
1037 | l32i a0, a0, PT_AREG0 # restore a0 | |
1038 | addi a6, a6, 1 # restore a6 (really necessary?) | |
5a0015d6 CZ |
1039 | rfe |
1040 | ||
1041 | CATCH | |
fc4fb2ad CZ |
1042 | .Leac: l32i a7, a2, PT_AREG7 # restore a7 |
1043 | l32i a0, a2, PT_AREG0 # restore a0 | |
1044 | movi a2, -EFAULT | |
1045 | rfe | |
1046 | ||
1047 | .Lill: l32i a7, a2, PT_AREG0 # restore a7 | |
1048 | l32i a0, a2, PT_AREG0 # restore a0 | |
1049 | movi a2, -EINVAL | |
1050 | rfe | |
1051 | ||
d1538c46 | 1052 | ENDPROC(fast_syscall_xtensa) |
5a0015d6 CZ |
1053 | |
1054 | ||
1055 | /* fast_syscall_spill_registers. | |
1056 | * | |
1057 | * Entry condition: | |
1058 | * | |
1059 | * a0: trashed, original value saved on stack (PT_AREG0) | |
1060 | * a1: a1 | |
1061 | * a2: new stack pointer, original in DEPC | |
99d5040e | 1062 | * a3: a3 |
5a0015d6 | 1063 | * depc: a2, original value saved on stack (PT_DEPC) |
99d5040e | 1064 | * excsave_1: dispatch table |
5a0015d6 CZ |
1065 | * |
1066 | * Note: We assume the stack pointer is EXC_TABLE_KSTK in the fixup handler. | |
5a0015d6 CZ |
1067 | */ |
1068 | ||
1069 | ENTRY(fast_syscall_spill_registers) | |
1070 | ||
1071 | /* Register a FIXUP handler (pass current wb as a parameter) */ | |
1072 | ||
99d5040e | 1073 | xsr a3, excsave1 |
5a0015d6 CZ |
1074 | movi a0, fast_syscall_spill_registers_fixup |
1075 | s32i a0, a3, EXC_TABLE_FIXUP | |
bc5378fc | 1076 | rsr a0, windowbase |
5a0015d6 | 1077 | s32i a0, a3, EXC_TABLE_PARAM |
99d5040e | 1078 | xsr a3, excsave1 # restore a3 and excsave_1 |
5a0015d6 | 1079 | |
99d5040e | 1080 | /* Save a3, a4 and SAR on stack. */ |
5a0015d6 | 1081 | |
bc5378fc | 1082 | rsr a0, sar |
5a0015d6 | 1083 | s32i a3, a2, PT_AREG3 |
c658eac6 CZ |
1084 | s32i a4, a2, PT_AREG4 |
1085 | s32i a0, a2, PT_AREG5 # store SAR to PT_AREG5 | |
5a0015d6 CZ |
1086 | |
1087 | /* The spill routine might clobber a7, a11, and a15. */ | |
1088 | ||
c658eac6 CZ |
1089 | s32i a7, a2, PT_AREG7 |
1090 | s32i a11, a2, PT_AREG11 | |
1091 | s32i a15, a2, PT_AREG15 | |
5a0015d6 | 1092 | |
c658eac6 | 1093 | call0 _spill_registers # destroys a3, a4, and SAR |
5a0015d6 CZ |
1094 | |
1095 | /* Advance PC, restore registers and SAR, and return from exception. */ | |
1096 | ||
c658eac6 CZ |
1097 | l32i a3, a2, PT_AREG5 |
1098 | l32i a4, a2, PT_AREG4 | |
5a0015d6 | 1099 | l32i a0, a2, PT_AREG0 |
bc5378fc | 1100 | wsr a3, sar |
5a0015d6 CZ |
1101 | l32i a3, a2, PT_AREG3 |
1102 | ||
1103 | /* Restore clobbered registers. */ | |
1104 | ||
c658eac6 CZ |
1105 | l32i a7, a2, PT_AREG7 |
1106 | l32i a11, a2, PT_AREG11 | |
1107 | l32i a15, a2, PT_AREG15 | |
5a0015d6 CZ |
1108 | |
1109 | movi a2, 0 | |
1110 | rfe | |
1111 | ||
d1538c46 CZ |
1112 | ENDPROC(fast_syscall_spill_registers) |
1113 | ||
5a0015d6 CZ |
1114 | /* Fixup handler. |
1115 | * | |
1116 | * We get here if the spill routine causes an exception, e.g. tlb miss. | |
1117 | * We basically restore WINDOWBASE and WINDOWSTART to the condition when | |
1118 | * we entered the spill routine and jump to the user exception handler. | |
1119 | * | |
1120 | * a0: value of depc, original value in depc | |
1121 | * a2: trashed, original value in EXC_TABLE_DOUBLE_SAVE | |
1122 | * a3: exctable, original value in excsave1 | |
1123 | */ | |
1124 | ||
244066f4 | 1125 | ENTRY(fast_syscall_spill_registers_fixup) |
5a0015d6 | 1126 | |
bc5378fc MF |
1127 | rsr a2, windowbase # get current windowbase (a2 is saved) |
1128 | xsr a0, depc # restore depc and a0 | |
5a0015d6 CZ |
1129 | ssl a2 # set shift (32 - WB) |
1130 | ||
1131 | /* We need to make sure the current registers (a0-a3) are preserved. | |
1132 | * To do this, we simply set the bit for the current window frame | |
1133 | * in WS, so that the exception handlers save them to the task stack. | |
1134 | */ | |
1135 | ||
99d5040e | 1136 | xsr a3, excsave1 # get spill-mask |
244066f4 | 1137 | slli a3, a3, 1 # shift left by one |
5a0015d6 | 1138 | |
244066f4 MF |
1139 | slli a2, a3, 32-WSBITS |
1140 | src a2, a3, a2 # a2 = xxwww1yyxxxwww1yy...... | |
bc5378fc | 1141 | wsr a2, windowstart # set corrected windowstart |
5a0015d6 | 1142 | |
244066f4 MF |
1143 | srli a3, a3, 1 |
1144 | rsr a2, excsave1 | |
1145 | l32i a2, a2, EXC_TABLE_DOUBLE_SAVE # restore a2 | |
1146 | xsr a2, excsave1 | |
1147 | s32i a3, a2, EXC_TABLE_DOUBLE_SAVE # save a3 | |
1148 | l32i a3, a2, EXC_TABLE_PARAM # original WB (in user task) | |
1149 | xsr a2, excsave1 | |
5a0015d6 CZ |
1150 | |
1151 | /* Return to the original (user task) WINDOWBASE. | |
1152 | * We leave the following frame behind: | |
1153 | * a0, a1, a2 same | |
244066f4 | 1154 | * a3: trashed (saved in EXC_TABLE_DOUBLE_SAVE) |
5a0015d6 | 1155 | * depc: depc (we have to return to that address) |
244066f4 | 1156 | * excsave_1: exctable |
5a0015d6 CZ |
1157 | */ |
1158 | ||
bc5378fc | 1159 | wsr a3, windowbase |
5a0015d6 CZ |
1160 | rsync |
1161 | ||
1162 | /* We are now in the original frame when we entered _spill_registers: | |
1163 | * a0: return address | |
1164 | * a1: used, stack pointer | |
1165 | * a2: kernel stack pointer | |
244066f4 | 1166 | * a3: available |
5a0015d6 | 1167 | * depc: exception address |
244066f4 | 1168 | * excsave: exctable |
5a0015d6 CZ |
1169 | * Note: This frame might be the same as above. |
1170 | */ | |
1171 | ||
5a0015d6 CZ |
1172 | /* Setup stack pointer. */ |
1173 | ||
1174 | addi a2, a2, -PT_USER_SIZE | |
1175 | s32i a0, a2, PT_AREG0 | |
1176 | ||
1177 | /* Make sure we return to this fixup handler. */ | |
1178 | ||
1179 | movi a3, fast_syscall_spill_registers_fixup_return | |
1180 | s32i a3, a2, PT_DEPC # setup depc | |
1181 | ||
1182 | /* Jump to the exception handler. */ | |
1183 | ||
99d5040e | 1184 | rsr a3, excsave1 |
bc5378fc | 1185 | rsr a0, exccause |
c4c4594b CZ |
1186 | addx4 a0, a0, a3 # find entry in table |
1187 | l32i a0, a0, EXC_TABLE_FAST_USER # load handler | |
244066f4 | 1188 | l32i a3, a3, EXC_TABLE_DOUBLE_SAVE |
c4c4594b | 1189 | jx a0 |
5a0015d6 | 1190 | |
244066f4 MF |
1191 | ENDPROC(fast_syscall_spill_registers_fixup) |
1192 | ||
1193 | ENTRY(fast_syscall_spill_registers_fixup_return) | |
5a0015d6 CZ |
1194 | |
1195 | /* When we return here, all registers have been restored (a2: DEPC) */ | |
1196 | ||
bc5378fc | 1197 | wsr a2, depc # exception address |
5a0015d6 CZ |
1198 | |
1199 | /* Restore fixup handler. */ | |
1200 | ||
244066f4 MF |
1201 | rsr a2, excsave1 |
1202 | s32i a3, a2, EXC_TABLE_DOUBLE_SAVE | |
1203 | movi a3, fast_syscall_spill_registers_fixup | |
1204 | s32i a3, a2, EXC_TABLE_FIXUP | |
1205 | rsr a3, windowbase | |
1206 | s32i a3, a2, EXC_TABLE_PARAM | |
1207 | l32i a2, a2, EXC_TABLE_KSTK | |
5a0015d6 | 1208 | |
5a0015d6 CZ |
1209 | /* Load WB at the time the exception occurred. */ |
1210 | ||
bc5378fc | 1211 | rsr a3, sar # WB is still in SAR |
5a0015d6 | 1212 | neg a3, a3 |
bc5378fc | 1213 | wsr a3, windowbase |
5a0015d6 CZ |
1214 | rsync |
1215 | ||
244066f4 MF |
1216 | rsr a3, excsave1 |
1217 | l32i a3, a3, EXC_TABLE_DOUBLE_SAVE | |
1218 | ||
5a0015d6 CZ |
1219 | rfde |
1220 | ||
244066f4 | 1221 | ENDPROC(fast_syscall_spill_registers_fixup_return) |
5a0015d6 CZ |
1222 | |
1223 | /* | |
1224 | * spill all registers. | |
1225 | * | |
1226 | * This is not a real function. The following conditions must be met: | |
1227 | * | |
1228 | * - must be called with call0. | |
c658eac6 | 1229 | * - uses a3, a4 and SAR. |
5a0015d6 CZ |
1230 | * - the last 'valid' register of each frame are clobbered. |
1231 | * - the caller must have registered a fixup handler | |
1232 | * (or be inside a critical section) | |
1233 | * - PS_EXCM must be set (PS_WOE cleared?) | |
1234 | */ | |
1235 | ||
1236 | ENTRY(_spill_registers) | |
1237 | ||
1238 | /* | |
1239 | * Rotate ws so that the current windowbase is at bit 0. | |
1240 | * Assume ws = xxxwww1yy (www1 current window frame). | |
c658eac6 | 1241 | * Rotate ws right so that a4 = yyxxxwww1. |
5a0015d6 CZ |
1242 | */ |
1243 | ||
bc5378fc MF |
1244 | rsr a4, windowbase |
1245 | rsr a3, windowstart # a3 = xxxwww1yy | |
c658eac6 CZ |
1246 | ssr a4 # holds WB |
1247 | slli a4, a3, WSBITS | |
1248 | or a3, a3, a4 # a3 = xxxwww1yyxxxwww1yy | |
ea0b6b06 | 1249 | srl a3, a3 # a3 = 00xxxwww1yyxxxwww1 |
5a0015d6 CZ |
1250 | |
1251 | /* We are done if there are no more than the current register frame. */ | |
1252 | ||
50c0716a | 1253 | extui a3, a3, 1, WSBITS-1 # a3 = 0yyxxxwww |
c658eac6 | 1254 | movi a4, (1 << (WSBITS-1)) |
5a0015d6 CZ |
1255 | _beqz a3, .Lnospill # only one active frame? jump |
1256 | ||
1257 | /* We want 1 at the top, so that we return to the current windowbase */ | |
1258 | ||
c658eac6 | 1259 | or a3, a3, a4 # 1yyxxxwww |
5a0015d6 CZ |
1260 | |
1261 | /* Skip empty frames - get 'oldest' WINDOWSTART-bit. */ | |
1262 | ||
bc5378fc | 1263 | wsr a3, windowstart # save shifted windowstart |
c658eac6 CZ |
1264 | neg a4, a3 |
1265 | and a3, a4, a3 # first bit set from right: 000010000 | |
5a0015d6 | 1266 | |
c658eac6 | 1267 | ffs_ws a4, a3 # a4: shifts to skip empty frames |
5a0015d6 | 1268 | movi a3, WSBITS |
c658eac6 CZ |
1269 | sub a4, a3, a4 # WSBITS-a4:number of 0-bits from right |
1270 | ssr a4 # save in SAR for later. | |
5a0015d6 | 1271 | |
bc5378fc | 1272 | rsr a3, windowbase |
c658eac6 | 1273 | add a3, a3, a4 |
bc5378fc | 1274 | wsr a3, windowbase |
5a0015d6 CZ |
1275 | rsync |
1276 | ||
bc5378fc | 1277 | rsr a3, windowstart |
5a0015d6 CZ |
1278 | srl a3, a3 # shift windowstart |
1279 | ||
1280 | /* WB is now just one frame below the oldest frame in the register | |
1281 | window. WS is shifted so the oldest frame is in bit 0, thus, WB | |
1282 | and WS differ by one 4-register frame. */ | |
1283 | ||
1284 | /* Save frames. Depending what call was used (call4, call8, call12), | |
1285 | * we have to save 4,8. or 12 registers. | |
1286 | */ | |
1287 | ||
1288 | _bbsi.l a3, 1, .Lc4 | |
1289 | _bbsi.l a3, 2, .Lc8 | |
1290 | ||
1291 | /* Special case: we have a call12-frame starting at a4. */ | |
1292 | ||
1293 | _bbci.l a3, 3, .Lc12 # bit 3 shouldn't be zero! (Jump to Lc12 first) | |
1294 | ||
1295 | s32e a4, a1, -16 # a1 is valid with an empty spill area | |
1296 | l32e a4, a5, -12 | |
1297 | s32e a8, a4, -48 | |
1298 | mov a8, a4 | |
1299 | l32e a4, a1, -16 | |
1300 | j .Lc12c | |
1301 | ||
50c0716a | 1302 | .Lnospill: |
ea0b6b06 | 1303 | ret |
50c0716a | 1304 | |
5a0015d6 CZ |
1305 | .Lloop: _bbsi.l a3, 1, .Lc4 |
1306 | _bbci.l a3, 2, .Lc12 | |
1307 | ||
1308 | .Lc8: s32e a4, a13, -16 | |
1309 | l32e a4, a5, -12 | |
1310 | s32e a8, a4, -32 | |
1311 | s32e a5, a13, -12 | |
1312 | s32e a6, a13, -8 | |
1313 | s32e a7, a13, -4 | |
1314 | s32e a9, a4, -28 | |
1315 | s32e a10, a4, -24 | |
1316 | s32e a11, a4, -20 | |
1317 | ||
1318 | srli a11, a3, 2 # shift windowbase by 2 | |
1319 | rotw 2 | |
1320 | _bnei a3, 1, .Lloop | |
1321 | ||
1322 | .Lexit: /* Done. Do the final rotation, set WS, and return. */ | |
1323 | ||
1324 | rotw 1 | |
bc5378fc | 1325 | rsr a3, windowbase |
5a0015d6 CZ |
1326 | ssl a3 |
1327 | movi a3, 1 | |
1328 | sll a3, a3 | |
bc5378fc | 1329 | wsr a3, windowstart |
ea0b6b06 | 1330 | ret |
5a0015d6 CZ |
1331 | |
1332 | .Lc4: s32e a4, a9, -16 | |
1333 | s32e a5, a9, -12 | |
1334 | s32e a6, a9, -8 | |
1335 | s32e a7, a9, -4 | |
1336 | ||
1337 | srli a7, a3, 1 | |
1338 | rotw 1 | |
1339 | _bnei a3, 1, .Lloop | |
1340 | j .Lexit | |
1341 | ||
1342 | .Lc12: _bbci.l a3, 3, .Linvalid_mask # bit 2 shouldn't be zero! | |
1343 | ||
1344 | /* 12-register frame (call12) */ | |
1345 | ||
1346 | l32e a2, a5, -12 | |
1347 | s32e a8, a2, -48 | |
1348 | mov a8, a2 | |
1349 | ||
1350 | .Lc12c: s32e a9, a8, -44 | |
1351 | s32e a10, a8, -40 | |
1352 | s32e a11, a8, -36 | |
1353 | s32e a12, a8, -32 | |
1354 | s32e a13, a8, -28 | |
1355 | s32e a14, a8, -24 | |
1356 | s32e a15, a8, -20 | |
1357 | srli a15, a3, 3 | |
1358 | ||
1359 | /* The stack pointer for a4..a7 is out of reach, so we rotate the | |
1360 | * window, grab the stackpointer, and rotate back. | |
1361 | * Alternatively, we could also use the following approach, but that | |
1362 | * makes the fixup routine much more complicated: | |
1363 | * rotw 1 | |
1364 | * s32e a0, a13, -16 | |
1365 | * ... | |
1366 | * rotw 2 | |
1367 | */ | |
1368 | ||
1369 | rotw 1 | |
1370 | mov a5, a13 | |
1371 | rotw -1 | |
1372 | ||
1373 | s32e a4, a9, -16 | |
1374 | s32e a5, a9, -12 | |
1375 | s32e a6, a9, -8 | |
1376 | s32e a7, a9, -4 | |
1377 | ||
1378 | rotw 3 | |
1379 | ||
1380 | _beqi a3, 1, .Lexit | |
1381 | j .Lloop | |
1382 | ||
1383 | .Linvalid_mask: | |
1384 | ||
1385 | /* We get here because of an unrecoverable error in the window | |
1386 | * registers. If we are in user space, we kill the application, | |
1387 | * however, this condition is unrecoverable in kernel space. | |
1388 | */ | |
1389 | ||
bc5378fc | 1390 | rsr a0, ps |
173d6681 | 1391 | _bbci.l a0, PS_UM_BIT, 1f |
5a0015d6 | 1392 | |
c4c4594b | 1393 | /* User space: Setup a dummy frame and kill application. |
5a0015d6 CZ |
1394 | * Note: We assume EXC_TABLE_KSTK contains a valid stack pointer. |
1395 | */ | |
1396 | ||
1397 | movi a0, 1 | |
1398 | movi a1, 0 | |
1399 | ||
bc5378fc MF |
1400 | wsr a0, windowstart |
1401 | wsr a1, windowbase | |
5a0015d6 CZ |
1402 | rsync |
1403 | ||
1404 | movi a0, 0 | |
1405 | ||
99d5040e | 1406 | rsr a3, excsave1 |
5a0015d6 | 1407 | l32i a1, a3, EXC_TABLE_KSTK |
5a0015d6 | 1408 | |
2d1c645c | 1409 | movi a4, (1 << PS_WOE_BIT) | LOCKLEVEL |
bc5378fc | 1410 | wsr a4, ps |
5a0015d6 CZ |
1411 | rsync |
1412 | ||
1413 | movi a6, SIGSEGV | |
1414 | movi a4, do_exit | |
1415 | callx4 a4 | |
1416 | ||
1417 | 1: /* Kernel space: PANIC! */ | |
1418 | ||
bc5378fc | 1419 | wsr a0, excsave1 |
5a0015d6 CZ |
1420 | movi a0, unrecoverable_exception |
1421 | callx0 a0 # should not return | |
1422 | 1: j 1b | |
1423 | ||
d1538c46 CZ |
1424 | ENDPROC(_spill_registers) |
1425 | ||
e5083a63 | 1426 | #ifdef CONFIG_MMU |
5a0015d6 CZ |
1427 | /* |
1428 | * We should never get here. Bail out! | |
1429 | */ | |
1430 | ||
1431 | ENTRY(fast_second_level_miss_double_kernel) | |
1432 | ||
1433 | 1: movi a0, unrecoverable_exception | |
1434 | callx0 a0 # should not return | |
1435 | 1: j 1b | |
1436 | ||
d1538c46 CZ |
1437 | ENDPROC(fast_second_level_miss_double_kernel) |
1438 | ||
5a0015d6 CZ |
1439 | /* First-level entry handler for user, kernel, and double 2nd-level |
1440 | * TLB miss exceptions. Note that for now, user and kernel miss | |
1441 | * exceptions share the same entry point and are handled identically. | |
1442 | * | |
1443 | * An old, less-efficient C version of this function used to exist. | |
1444 | * We include it below, interleaved as comments, for reference. | |
1445 | * | |
1446 | * Entry condition: | |
1447 | * | |
1448 | * a0: trashed, original value saved on stack (PT_AREG0) | |
1449 | * a1: a1 | |
1450 | * a2: new stack pointer, original in DEPC | |
99d5040e | 1451 | * a3: a3 |
5a0015d6 | 1452 | * depc: a2, original value saved on stack (PT_DEPC) |
99d5040e | 1453 | * excsave_1: dispatch table |
5a0015d6 CZ |
1454 | * |
1455 | * PT_DEPC >= VALID_DOUBLE_EXCEPTION_ADDRESS: double exception, DEPC | |
1456 | * < VALID_DOUBLE_EXCEPTION_ADDRESS: regular exception | |
1457 | */ | |
1458 | ||
1459 | ENTRY(fast_second_level_miss) | |
1460 | ||
99d5040e | 1461 | /* Save a1 and a3. Note: we don't expect a double exception. */ |
5a0015d6 CZ |
1462 | |
1463 | s32i a1, a2, PT_AREG1 | |
99d5040e | 1464 | s32i a3, a2, PT_AREG3 |
5a0015d6 CZ |
1465 | |
1466 | /* We need to map the page of PTEs for the user task. Find | |
1467 | * the pointer to that page. Also, it's possible for tsk->mm | |
1468 | * to be NULL while tsk->active_mm is nonzero if we faulted on | |
1469 | * a vmalloc address. In that rare case, we must use | |
1470 | * active_mm instead to avoid a fault in this handler. See | |
1471 | * | |
1472 | * http://mail.nl.linux.org/linux-mm/2002-08/msg00258.html | |
1473 | * (or search Internet on "mm vs. active_mm") | |
1474 | * | |
1475 | * if (!mm) | |
1476 | * mm = tsk->active_mm; | |
1477 | * pgd = pgd_offset (mm, regs->excvaddr); | |
1478 | * pmd = pmd_offset (pgd, regs->excvaddr); | |
1479 | * pmdval = *pmd; | |
1480 | */ | |
1481 | ||
1482 | GET_CURRENT(a1,a2) | |
1483 | l32i a0, a1, TASK_MM # tsk->mm | |
1484 | beqz a0, 9f | |
1485 | ||
bc5378fc | 1486 | 8: rsr a3, excvaddr # fault address |
01858d1b | 1487 | _PGD_OFFSET(a0, a3, a1) |
5a0015d6 | 1488 | l32i a0, a0, 0 # read pmdval |
5a0015d6 CZ |
1489 | beqz a0, 2f |
1490 | ||
1491 | /* Read ptevaddr and convert to top of page-table page. | |
1492 | * | |
1493 | * vpnval = read_ptevaddr_register() & PAGE_MASK; | |
1494 | * vpnval += DTLB_WAY_PGTABLE; | |
1495 | * pteval = mk_pte (virt_to_page(pmd_val(pmdval)), PAGE_KERNEL); | |
1496 | * write_dtlb_entry (pteval, vpnval); | |
1497 | * | |
1498 | * The messy computation for 'pteval' above really simplifies | |
1499 | * into the following: | |
1500 | * | |
6656920b | 1501 | * pteval = ((pmdval - PAGE_OFFSET) & PAGE_MASK) | PAGE_DIRECTORY |
5a0015d6 CZ |
1502 | */ |
1503 | ||
39070cb8 | 1504 | movi a1, (-PAGE_OFFSET) & 0xffffffff |
5a0015d6 CZ |
1505 | add a0, a0, a1 # pmdval - PAGE_OFFSET |
1506 | extui a1, a0, 0, PAGE_SHIFT # ... & PAGE_MASK | |
1507 | xor a0, a0, a1 | |
1508 | ||
01858d1b | 1509 | movi a1, _PAGE_DIRECTORY |
5a0015d6 CZ |
1510 | or a0, a0, a1 # ... | PAGE_DIRECTORY |
1511 | ||
01858d1b | 1512 | /* |
6656920b | 1513 | * We utilize all three wired-ways (7-9) to hold pmd translations. |
01858d1b CZ |
1514 | * Memory regions are mapped to the DTLBs according to bits 28 and 29. |
1515 | * This allows to map the three most common regions to three different | |
1516 | * DTLBs: | |
1517 | * 0,1 -> way 7 program (0040.0000) and virtual (c000.0000) | |
1518 | * 2 -> way 8 shared libaries (2000.0000) | |
1519 | * 3 -> way 0 stack (3000.0000) | |
1520 | */ | |
1521 | ||
1522 | extui a3, a3, 28, 2 # addr. bit 28 and 29 0,1,2,3 | |
bc5378fc | 1523 | rsr a1, ptevaddr |
01858d1b | 1524 | addx2 a3, a3, a3 # -> 0,3,6,9 |
5a0015d6 | 1525 | srli a1, a1, PAGE_SHIFT |
01858d1b | 1526 | extui a3, a3, 2, 2 # -> 0,0,1,2 |
5a0015d6 | 1527 | slli a1, a1, PAGE_SHIFT # ptevaddr & PAGE_MASK |
01858d1b CZ |
1528 | addi a3, a3, DTLB_WAY_PGD |
1529 | add a1, a1, a3 # ... + way_number | |
5a0015d6 | 1530 | |
01858d1b | 1531 | 3: wdtlb a0, a1 |
5a0015d6 CZ |
1532 | dsync |
1533 | ||
1534 | /* Exit critical section. */ | |
1535 | ||
99d5040e | 1536 | 4: rsr a3, excsave1 |
5a0015d6 CZ |
1537 | movi a0, 0 |
1538 | s32i a0, a3, EXC_TABLE_FIXUP | |
1539 | ||
1540 | /* Restore the working registers, and return. */ | |
1541 | ||
1542 | l32i a0, a2, PT_AREG0 | |
1543 | l32i a1, a2, PT_AREG1 | |
99d5040e | 1544 | l32i a3, a2, PT_AREG3 |
5a0015d6 | 1545 | l32i a2, a2, PT_DEPC |
5a0015d6 CZ |
1546 | |
1547 | bgeui a2, VALID_DOUBLE_EXCEPTION_ADDRESS, 1f | |
1548 | ||
1549 | /* Restore excsave1 and return. */ | |
1550 | ||
bc5378fc | 1551 | rsr a2, depc |
5a0015d6 CZ |
1552 | rfe |
1553 | ||
1554 | /* Return from double exception. */ | |
1555 | ||
bc5378fc | 1556 | 1: xsr a2, depc |
5a0015d6 CZ |
1557 | esync |
1558 | rfde | |
1559 | ||
1560 | 9: l32i a0, a1, TASK_ACTIVE_MM # unlikely case mm == 0 | |
1561 | j 8b | |
1562 | ||
6656920b CZ |
1563 | #if (DCACHE_WAY_SIZE > PAGE_SIZE) |
1564 | ||
1565 | 2: /* Special case for cache aliasing. | |
1566 | * We (should) only get here if a clear_user_page, copy_user_page | |
1567 | * or the aliased cache flush functions got preemptively interrupted | |
1568 | * by another task. Re-establish temporary mapping to the | |
1569 | * TLBTEMP_BASE areas. | |
1570 | */ | |
1571 | ||
1572 | /* We shouldn't be in a double exception */ | |
1573 | ||
1574 | l32i a0, a2, PT_DEPC | |
1575 | bgeui a0, VALID_DOUBLE_EXCEPTION_ADDRESS, 2f | |
1576 | ||
1577 | /* Make sure the exception originated in the special functions */ | |
1578 | ||
1579 | movi a0, __tlbtemp_mapping_start | |
bc5378fc | 1580 | rsr a3, epc1 |
6656920b CZ |
1581 | bltu a3, a0, 2f |
1582 | movi a0, __tlbtemp_mapping_end | |
1583 | bgeu a3, a0, 2f | |
1584 | ||
1585 | /* Check if excvaddr was in one of the TLBTEMP_BASE areas. */ | |
1586 | ||
1587 | movi a3, TLBTEMP_BASE_1 | |
bc5378fc | 1588 | rsr a0, excvaddr |
6656920b CZ |
1589 | bltu a0, a3, 2f |
1590 | ||
1591 | addi a1, a0, -(2 << (DCACHE_ALIAS_ORDER + PAGE_SHIFT)) | |
1592 | bgeu a1, a3, 2f | |
1593 | ||
1594 | /* Check if we have to restore an ITLB mapping. */ | |
1595 | ||
1596 | movi a1, __tlbtemp_mapping_itlb | |
bc5378fc | 1597 | rsr a3, epc1 |
6656920b CZ |
1598 | sub a3, a3, a1 |
1599 | ||
1600 | /* Calculate VPN */ | |
1601 | ||
1602 | movi a1, PAGE_MASK | |
1603 | and a1, a1, a0 | |
1604 | ||
1605 | /* Jump for ITLB entry */ | |
1606 | ||
1607 | bgez a3, 1f | |
1608 | ||
1609 | /* We can use up to two TLBTEMP areas, one for src and one for dst. */ | |
1610 | ||
1611 | extui a3, a0, PAGE_SHIFT + DCACHE_ALIAS_ORDER, 1 | |
1612 | add a1, a3, a1 | |
1613 | ||
1614 | /* PPN is in a6 for the first TLBTEMP area and in a7 for the second. */ | |
1615 | ||
1616 | mov a0, a6 | |
1617 | movnez a0, a7, a3 | |
1618 | j 3b | |
1619 | ||
1620 | /* ITLB entry. We only use dst in a6. */ | |
1621 | ||
1622 | 1: witlb a6, a1 | |
1623 | isync | |
1624 | j 4b | |
1625 | ||
1626 | ||
1627 | #endif // DCACHE_WAY_SIZE > PAGE_SIZE | |
1628 | ||
1629 | ||
5a0015d6 CZ |
1630 | 2: /* Invalid PGD, default exception handling */ |
1631 | ||
bc5378fc | 1632 | rsr a1, depc |
5a0015d6 | 1633 | s32i a1, a2, PT_AREG2 |
5a0015d6 CZ |
1634 | mov a1, a2 |
1635 | ||
bc5378fc | 1636 | rsr a2, ps |
173d6681 | 1637 | bbsi.l a2, PS_UM_BIT, 1f |
5a0015d6 CZ |
1638 | j _kernel_exception |
1639 | 1: j _user_exception | |
1640 | ||
d1538c46 | 1641 | ENDPROC(fast_second_level_miss) |
5a0015d6 CZ |
1642 | |
1643 | /* | |
1644 | * StoreProhibitedException | |
1645 | * | |
1646 | * Update the pte and invalidate the itlb mapping for this pte. | |
1647 | * | |
1648 | * Entry condition: | |
1649 | * | |
1650 | * a0: trashed, original value saved on stack (PT_AREG0) | |
1651 | * a1: a1 | |
1652 | * a2: new stack pointer, original in DEPC | |
99d5040e | 1653 | * a3: a3 |
5a0015d6 | 1654 | * depc: a2, original value saved on stack (PT_DEPC) |
99d5040e | 1655 | * excsave_1: dispatch table |
5a0015d6 CZ |
1656 | * |
1657 | * PT_DEPC >= VALID_DOUBLE_EXCEPTION_ADDRESS: double exception, DEPC | |
1658 | * < VALID_DOUBLE_EXCEPTION_ADDRESS: regular exception | |
1659 | */ | |
1660 | ||
1661 | ENTRY(fast_store_prohibited) | |
1662 | ||
99d5040e | 1663 | /* Save a1 and a3. */ |
5a0015d6 CZ |
1664 | |
1665 | s32i a1, a2, PT_AREG1 | |
99d5040e | 1666 | s32i a3, a2, PT_AREG3 |
5a0015d6 CZ |
1667 | |
1668 | GET_CURRENT(a1,a2) | |
1669 | l32i a0, a1, TASK_MM # tsk->mm | |
1670 | beqz a0, 9f | |
1671 | ||
bc5378fc | 1672 | 8: rsr a1, excvaddr # fault address |
99d5040e | 1673 | _PGD_OFFSET(a0, a1, a3) |
5a0015d6 | 1674 | l32i a0, a0, 0 |
5a0015d6 CZ |
1675 | beqz a0, 2f |
1676 | ||
51fc41a9 MF |
1677 | /* |
1678 | * Note that we test _PAGE_WRITABLE_BIT only if PTE is present | |
1679 | * and is not PAGE_NONE. See pgtable.h for possible PTE layouts. | |
1680 | */ | |
01858d1b | 1681 | |
99d5040e MF |
1682 | _PTE_OFFSET(a0, a1, a3) |
1683 | l32i a3, a0, 0 # read pteval | |
51fc41a9 | 1684 | movi a1, _PAGE_CA_INVALID |
99d5040e MF |
1685 | ball a3, a1, 2f |
1686 | bbci.l a3, _PAGE_WRITABLE_BIT, 2f | |
5a0015d6 | 1687 | |
01858d1b | 1688 | movi a1, _PAGE_ACCESSED | _PAGE_DIRTY | _PAGE_HW_WRITE |
99d5040e | 1689 | or a3, a3, a1 |
bc5378fc | 1690 | rsr a1, excvaddr |
99d5040e | 1691 | s32i a3, a0, 0 |
5a0015d6 CZ |
1692 | |
1693 | /* We need to flush the cache if we have page coloring. */ | |
1694 | #if (DCACHE_WAY_SIZE > PAGE_SIZE) && XCHAL_DCACHE_IS_WRITEBACK | |
1695 | dhwb a0, 0 | |
1696 | #endif | |
1697 | pdtlb a0, a1 | |
99d5040e | 1698 | wdtlb a3, a0 |
5a0015d6 CZ |
1699 | |
1700 | /* Exit critical section. */ | |
1701 | ||
1702 | movi a0, 0 | |
99d5040e | 1703 | rsr a3, excsave1 |
5a0015d6 CZ |
1704 | s32i a0, a3, EXC_TABLE_FIXUP |
1705 | ||
1706 | /* Restore the working registers, and return. */ | |
1707 | ||
99d5040e | 1708 | l32i a3, a2, PT_AREG3 |
5a0015d6 CZ |
1709 | l32i a1, a2, PT_AREG1 |
1710 | l32i a0, a2, PT_AREG0 | |
1711 | l32i a2, a2, PT_DEPC | |
1712 | ||
5a0015d6 CZ |
1713 | bgeui a2, VALID_DOUBLE_EXCEPTION_ADDRESS, 1f |
1714 | ||
bc5378fc | 1715 | rsr a2, depc |
5a0015d6 CZ |
1716 | rfe |
1717 | ||
1718 | /* Double exception. Restore FIXUP handler and return. */ | |
1719 | ||
bc5378fc | 1720 | 1: xsr a2, depc |
5a0015d6 CZ |
1721 | esync |
1722 | rfde | |
1723 | ||
1724 | 9: l32i a0, a1, TASK_ACTIVE_MM # unlikely case mm == 0 | |
1725 | j 8b | |
1726 | ||
1727 | 2: /* If there was a problem, handle fault in C */ | |
1728 | ||
99d5040e MF |
1729 | rsr a3, depc # still holds a2 |
1730 | s32i a3, a2, PT_AREG2 | |
5a0015d6 CZ |
1731 | mov a1, a2 |
1732 | ||
bc5378fc | 1733 | rsr a2, ps |
173d6681 | 1734 | bbsi.l a2, PS_UM_BIT, 1f |
5a0015d6 CZ |
1735 | j _kernel_exception |
1736 | 1: j _user_exception | |
d1538c46 CZ |
1737 | |
1738 | ENDPROC(fast_store_prohibited) | |
1739 | ||
e5083a63 | 1740 | #endif /* CONFIG_MMU */ |
5a0015d6 | 1741 | |
fc4fb2ad CZ |
1742 | /* |
1743 | * System Calls. | |
1744 | * | |
1745 | * void system_call (struct pt_regs* regs, int exccause) | |
1746 | * a2 a3 | |
1747 | */ | |
1748 | ||
1749 | ENTRY(system_call) | |
d1538c46 | 1750 | |
fc4fb2ad CZ |
1751 | entry a1, 32 |
1752 | ||
1753 | /* regs->syscall = regs->areg[2] */ | |
1754 | ||
1755 | l32i a3, a2, PT_AREG2 | |
1756 | mov a6, a2 | |
1757 | movi a4, do_syscall_trace_enter | |
1758 | s32i a3, a2, PT_SYSCALL | |
1759 | callx4 a4 | |
1760 | ||
1761 | /* syscall = sys_call_table[syscall_nr] */ | |
1762 | ||
1763 | movi a4, sys_call_table; | |
1764 | movi a5, __NR_syscall_count | |
1765 | movi a6, -ENOSYS | |
1766 | bgeu a3, a5, 1f | |
1767 | ||
1768 | addx4 a4, a3, a4 | |
1769 | l32i a4, a4, 0 | |
1770 | movi a5, sys_ni_syscall; | |
1771 | beq a4, a5, 1f | |
1772 | ||
1773 | /* Load args: arg0 - arg5 are passed via regs. */ | |
1774 | ||
1775 | l32i a6, a2, PT_AREG6 | |
1776 | l32i a7, a2, PT_AREG3 | |
1777 | l32i a8, a2, PT_AREG4 | |
1778 | l32i a9, a2, PT_AREG5 | |
1779 | l32i a10, a2, PT_AREG8 | |
1780 | l32i a11, a2, PT_AREG9 | |
1781 | ||
1782 | /* Pass one additional argument to the syscall: pt_regs (on stack) */ | |
1783 | s32i a2, a1, 0 | |
1784 | ||
1785 | callx4 a4 | |
1786 | ||
1787 | 1: /* regs->areg[2] = return_value */ | |
1788 | ||
1789 | s32i a6, a2, PT_AREG2 | |
1790 | movi a4, do_syscall_trace_leave | |
1791 | mov a6, a2 | |
1792 | callx4 a4 | |
1793 | retw | |
1794 | ||
d1538c46 CZ |
1795 | ENDPROC(system_call) |
1796 | ||
fc4fb2ad | 1797 | |
5a0015d6 CZ |
1798 | /* |
1799 | * Task switch. | |
1800 | * | |
1801 | * struct task* _switch_to (struct task* prev, struct task* next) | |
1802 | * a2 a2 a3 | |
1803 | */ | |
1804 | ||
1805 | ENTRY(_switch_to) | |
1806 | ||
1807 | entry a1, 16 | |
1808 | ||
c658eac6 CZ |
1809 | mov a12, a2 # preserve 'prev' (a2) |
1810 | mov a13, a3 # and 'next' (a3) | |
5a0015d6 | 1811 | |
c658eac6 CZ |
1812 | l32i a4, a2, TASK_THREAD_INFO |
1813 | l32i a5, a3, TASK_THREAD_INFO | |
5a0015d6 | 1814 | |
c658eac6 | 1815 | save_xtregs_user a4 a6 a8 a9 a10 a11 THREAD_XTREGS_USER |
5a0015d6 | 1816 | |
c658eac6 CZ |
1817 | s32i a0, a12, THREAD_RA # save return address |
1818 | s32i a1, a12, THREAD_SP # save stack pointer | |
1819 | ||
1820 | /* Disable ints while we manipulate the stack pointer. */ | |
1821 | ||
1822 | movi a14, (1 << PS_EXCM_BIT) | LOCKLEVEL | |
bc5378fc MF |
1823 | xsr a14, ps |
1824 | rsr a3, excsave1 | |
5a0015d6 CZ |
1825 | rsync |
1826 | s32i a3, a3, EXC_TABLE_FIXUP /* enter critical section */ | |
1827 | ||
c658eac6 CZ |
1828 | /* Switch CPENABLE */ |
1829 | ||
1830 | #if (XTENSA_HAVE_COPROCESSORS || XTENSA_HAVE_IO_PORTS) | |
1831 | l32i a3, a5, THREAD_CPENABLE | |
bc5378fc | 1832 | xsr a3, cpenable |
c658eac6 CZ |
1833 | s32i a3, a4, THREAD_CPENABLE |
1834 | #endif | |
1835 | ||
1836 | /* Flush register file. */ | |
1837 | ||
1838 | call0 _spill_registers # destroys a3, a4, and SAR | |
5a0015d6 CZ |
1839 | |
1840 | /* Set kernel stack (and leave critical section) | |
1841 | * Note: It's save to set it here. The stack will not be overwritten | |
1842 | * because the kernel stack will only be loaded again after | |
1843 | * we return from kernel space. | |
1844 | */ | |
1845 | ||
bc5378fc | 1846 | rsr a3, excsave1 # exc_table |
c658eac6 CZ |
1847 | movi a6, 0 |
1848 | addi a7, a5, PT_REGS_OFFSET | |
1849 | s32i a6, a3, EXC_TABLE_FIXUP | |
1850 | s32i a7, a3, EXC_TABLE_KSTK | |
5a0015d6 | 1851 | |
c50842df | 1852 | /* restore context of the task 'next' */ |
5a0015d6 | 1853 | |
c658eac6 CZ |
1854 | l32i a0, a13, THREAD_RA # restore return address |
1855 | l32i a1, a13, THREAD_SP # restore stack pointer | |
1856 | ||
1857 | load_xtregs_user a5 a6 a8 a9 a10 a11 THREAD_XTREGS_USER | |
5a0015d6 | 1858 | |
bc5378fc | 1859 | wsr a14, ps |
c658eac6 | 1860 | mov a2, a12 # return 'prev' |
5a0015d6 CZ |
1861 | rsync |
1862 | ||
1863 | retw | |
1864 | ||
d1538c46 | 1865 | ENDPROC(_switch_to) |
5a0015d6 CZ |
1866 | |
1867 | ENTRY(ret_from_fork) | |
1868 | ||
1869 | /* void schedule_tail (struct task_struct *prev) | |
1870 | * Note: prev is still in a6 (return value from fake call4 frame) | |
1871 | */ | |
1872 | movi a4, schedule_tail | |
1873 | callx4 a4 | |
1874 | ||
fc4fb2ad CZ |
1875 | movi a4, do_syscall_trace_leave |
1876 | mov a6, a1 | |
5a0015d6 CZ |
1877 | callx4 a4 |
1878 | ||
1879 | j common_exception_return | |
1880 | ||
d1538c46 CZ |
1881 | ENDPROC(ret_from_fork) |
1882 | ||
3306a726 MF |
1883 | /* |
1884 | * Kernel thread creation helper | |
1885 | * On entry, set up by copy_thread: a2 = thread_fn, a3 = thread_fn arg | |
1886 | * left from _switch_to: a6 = prev | |
1887 | */ | |
1888 | ENTRY(ret_from_kernel_thread) | |
1889 | ||
1890 | call4 schedule_tail | |
1891 | mov a6, a3 | |
1892 | callx4 a2 | |
f0a1bf08 | 1893 | j common_exception_return |
3306a726 MF |
1894 | |
1895 | ENDPROC(ret_from_kernel_thread) |