]> git.proxmox.com Git - mirror_ubuntu-zesty-kernel.git/blob - arch/xtensa/kernel/entry.S
xtensa: keep exception/interrupt stack continuous
[mirror_ubuntu-zesty-kernel.git] / arch / xtensa / kernel / entry.S
1 /*
2 * arch/xtensa/kernel/entry.S
3 *
4 * Low-level exception handling
5 *
6 * This file is subject to the terms and conditions of the GNU General Public
7 * License. See the file "COPYING" in the main directory of this archive
8 * for more details.
9 *
10 * Copyright (C) 2004 - 2008 by Tensilica Inc.
11 *
12 * Chris Zankel <chris@zankel.net>
13 *
14 */
15
16 #include <linux/linkage.h>
17 #include <asm/asm-offsets.h>
18 #include <asm/processor.h>
19 #include <asm/coprocessor.h>
20 #include <asm/thread_info.h>
21 #include <asm/uaccess.h>
22 #include <asm/unistd.h>
23 #include <asm/ptrace.h>
24 #include <asm/current.h>
25 #include <asm/pgtable.h>
26 #include <asm/page.h>
27 #include <asm/signal.h>
28 #include <asm/tlbflush.h>
29 #include <variant/tie-asm.h>
30
31 /* Unimplemented features. */
32
33 #undef KERNEL_STACK_OVERFLOW_CHECK
34
35 /* Not well tested.
36 *
37 * - fast_coprocessor
38 */
39
40 /*
41 * Macro to find first bit set in WINDOWBASE from the left + 1
42 *
43 * 100....0 -> 1
44 * 010....0 -> 2
45 * 000....1 -> WSBITS
46 */
47
48 .macro ffs_ws bit mask
49
50 #if XCHAL_HAVE_NSA
51 nsau \bit, \mask # 32-WSBITS ... 31 (32 iff 0)
52 addi \bit, \bit, WSBITS - 32 + 1 # uppest bit set -> return 1
53 #else
54 movi \bit, WSBITS
55 #if WSBITS > 16
56 _bltui \mask, 0x10000, 99f
57 addi \bit, \bit, -16
58 extui \mask, \mask, 16, 16
59 #endif
60 #if WSBITS > 8
61 99: _bltui \mask, 0x100, 99f
62 addi \bit, \bit, -8
63 srli \mask, \mask, 8
64 #endif
65 99: _bltui \mask, 0x10, 99f
66 addi \bit, \bit, -4
67 srli \mask, \mask, 4
68 99: _bltui \mask, 0x4, 99f
69 addi \bit, \bit, -2
70 srli \mask, \mask, 2
71 99: _bltui \mask, 0x2, 99f
72 addi \bit, \bit, -1
73 99:
74
75 #endif
76 .endm
77
78 /* ----------------- DEFAULT FIRST LEVEL EXCEPTION HANDLERS ----------------- */
79
80 /*
81 * First-level exception handler for user exceptions.
82 * Save some special registers, extra states and all registers in the AR
83 * register file that were in use in the user task, and jump to the common
84 * exception code.
85 * We save SAR (used to calculate WMASK), and WB and WS (we don't have to
86 * save them for kernel exceptions).
87 *
88 * Entry condition for user_exception:
89 *
90 * a0: trashed, original value saved on stack (PT_AREG0)
91 * a1: a1
92 * a2: new stack pointer, original value in depc
93 * a3: a3
94 * depc: a2, original value saved on stack (PT_DEPC)
95 * excsave1: dispatch table
96 *
97 * PT_DEPC >= VALID_DOUBLE_EXCEPTION_ADDRESS: double exception, DEPC
98 * < VALID_DOUBLE_EXCEPTION_ADDRESS: regular exception
99 *
100 * Entry condition for _user_exception:
101 *
102 * a0-a3 and depc have been saved to PT_AREG0...PT_AREG3 and PT_DEPC
103 * excsave has been restored, and
104 * stack pointer (a1) has been set.
105 *
106 * Note: _user_exception might be at an odd address. Don't use call0..call12
107 */
108
109 ENTRY(user_exception)
110
111 /* Save a1, a2, a3, and set SP. */
112
113 rsr a0, depc
114 s32i a1, a2, PT_AREG1
115 s32i a0, a2, PT_AREG2
116 s32i a3, a2, PT_AREG3
117 mov a1, a2
118
119 .globl _user_exception
120 _user_exception:
121
122 /* Save SAR and turn off single stepping */
123
124 movi a2, 0
125 wsr a2, depc # terminate user stack trace with 0
126 rsr a3, sar
127 xsr a2, icountlevel
128 s32i a3, a1, PT_SAR
129 s32i a2, a1, PT_ICOUNTLEVEL
130
131 #if XCHAL_HAVE_THREADPTR
132 rur a2, threadptr
133 s32i a2, a1, PT_THREADPTR
134 #endif
135
136 /* Rotate ws so that the current windowbase is at bit0. */
137 /* Assume ws = xxwww1yyyy. Rotate ws right, so that a2 = yyyyxxwww1 */
138
139 rsr a2, windowbase
140 rsr a3, windowstart
141 ssr a2
142 s32i a2, a1, PT_WINDOWBASE
143 s32i a3, a1, PT_WINDOWSTART
144 slli a2, a3, 32-WSBITS
145 src a2, a3, a2
146 srli a2, a2, 32-WSBITS
147 s32i a2, a1, PT_WMASK # needed for restoring registers
148
149 /* Save only live registers. */
150
151 _bbsi.l a2, 1, 1f
152 s32i a4, a1, PT_AREG4
153 s32i a5, a1, PT_AREG5
154 s32i a6, a1, PT_AREG6
155 s32i a7, a1, PT_AREG7
156 _bbsi.l a2, 2, 1f
157 s32i a8, a1, PT_AREG8
158 s32i a9, a1, PT_AREG9
159 s32i a10, a1, PT_AREG10
160 s32i a11, a1, PT_AREG11
161 _bbsi.l a2, 3, 1f
162 s32i a12, a1, PT_AREG12
163 s32i a13, a1, PT_AREG13
164 s32i a14, a1, PT_AREG14
165 s32i a15, a1, PT_AREG15
166 _bnei a2, 1, 1f # only one valid frame?
167
168 /* Only one valid frame, skip saving regs. */
169
170 j 2f
171
172 /* Save the remaining registers.
173 * We have to save all registers up to the first '1' from
174 * the right, except the current frame (bit 0).
175 * Assume a2 is: 001001000110001
176 * All register frames starting from the top field to the marked '1'
177 * must be saved.
178 */
179
180 1: addi a3, a2, -1 # eliminate '1' in bit 0: yyyyxxww0
181 neg a3, a3 # yyyyxxww0 -> YYYYXXWW1+1
182 and a3, a3, a2 # max. only one bit is set
183
184 /* Find number of frames to save */
185
186 ffs_ws a0, a3 # number of frames to the '1' from left
187
188 /* Store information into WMASK:
189 * bits 0..3: xxx1 masked lower 4 bits of the rotated windowstart,
190 * bits 4...: number of valid 4-register frames
191 */
192
193 slli a3, a0, 4 # number of frames to save in bits 8..4
194 extui a2, a2, 0, 4 # mask for the first 16 registers
195 or a2, a3, a2
196 s32i a2, a1, PT_WMASK # needed when we restore the reg-file
197
198 /* Save 4 registers at a time */
199
200 1: rotw -1
201 s32i a0, a5, PT_AREG_END - 16
202 s32i a1, a5, PT_AREG_END - 12
203 s32i a2, a5, PT_AREG_END - 8
204 s32i a3, a5, PT_AREG_END - 4
205 addi a0, a4, -1
206 addi a1, a5, -16
207 _bnez a0, 1b
208
209 /* WINDOWBASE still in SAR! */
210
211 rsr a2, sar # original WINDOWBASE
212 movi a3, 1
213 ssl a2
214 sll a3, a3
215 wsr a3, windowstart # set corresponding WINDOWSTART bit
216 wsr a2, windowbase # and WINDOWSTART
217 rsync
218
219 /* We are back to the original stack pointer (a1) */
220
221 2: /* Now, jump to the common exception handler. */
222
223 j common_exception
224
225 ENDPROC(user_exception)
226
227 /*
228 * First-level exit handler for kernel exceptions
229 * Save special registers and the live window frame.
230 * Note: Even though we changes the stack pointer, we don't have to do a
231 * MOVSP here, as we do that when we return from the exception.
232 * (See comment in the kernel exception exit code)
233 *
234 * Entry condition for kernel_exception:
235 *
236 * a0: trashed, original value saved on stack (PT_AREG0)
237 * a1: a1
238 * a2: new stack pointer, original in DEPC
239 * a3: a3
240 * depc: a2, original value saved on stack (PT_DEPC)
241 * excsave_1: dispatch table
242 *
243 * PT_DEPC >= VALID_DOUBLE_EXCEPTION_ADDRESS: double exception, DEPC
244 * < VALID_DOUBLE_EXCEPTION_ADDRESS: regular exception
245 *
246 * Entry condition for _kernel_exception:
247 *
248 * a0-a3 and depc have been saved to PT_AREG0...PT_AREG3 and PT_DEPC
249 * excsave has been restored, and
250 * stack pointer (a1) has been set.
251 *
252 * Note: _kernel_exception might be at an odd address. Don't use call0..call12
253 */
254
255 ENTRY(kernel_exception)
256
257 /* Save a1, a2, a3, and set SP. */
258
259 rsr a0, depc # get a2
260 s32i a1, a2, PT_AREG1
261 s32i a0, a2, PT_AREG2
262 s32i a3, a2, PT_AREG3
263 mov a1, a2
264
265 .globl _kernel_exception
266 _kernel_exception:
267
268 /* Save SAR and turn off single stepping */
269
270 movi a2, 0
271 rsr a3, sar
272 xsr a2, icountlevel
273 s32i a3, a1, PT_SAR
274 s32i a2, a1, PT_ICOUNTLEVEL
275
276 /* Rotate ws so that the current windowbase is at bit0. */
277 /* Assume ws = xxwww1yyyy. Rotate ws right, so that a2 = yyyyxxwww1 */
278
279 rsr a2, windowbase # don't need to save these, we only
280 rsr a3, windowstart # need shifted windowstart: windowmask
281 ssr a2
282 slli a2, a3, 32-WSBITS
283 src a2, a3, a2
284 srli a2, a2, 32-WSBITS
285 s32i a2, a1, PT_WMASK # needed for kernel_exception_exit
286
287 /* Save only the live window-frame */
288
289 _bbsi.l a2, 1, 1f
290 s32i a4, a1, PT_AREG4
291 s32i a5, a1, PT_AREG5
292 s32i a6, a1, PT_AREG6
293 s32i a7, a1, PT_AREG7
294 _bbsi.l a2, 2, 1f
295 s32i a8, a1, PT_AREG8
296 s32i a9, a1, PT_AREG9
297 s32i a10, a1, PT_AREG10
298 s32i a11, a1, PT_AREG11
299 _bbsi.l a2, 3, 1f
300 s32i a12, a1, PT_AREG12
301 s32i a13, a1, PT_AREG13
302 s32i a14, a1, PT_AREG14
303 s32i a15, a1, PT_AREG15
304
305 _bnei a2, 1, 1f
306
307 /* Copy spill slots of a0 and a1 to imitate movsp
308 * in order to keep exception stack continuous
309 */
310 l32i a3, a1, PT_SIZE
311 l32i a0, a1, PT_SIZE + 4
312 s32e a3, a1, -16
313 s32e a0, a1, -12
314 1:
315 l32i a0, a1, PT_AREG0 # restore saved a0
316 wsr a0, depc
317
318 #ifdef KERNEL_STACK_OVERFLOW_CHECK
319
320 /* Stack overflow check, for debugging */
321 extui a2, a1, TASK_SIZE_BITS,XX
322 movi a3, SIZE??
323 _bge a2, a3, out_of_stack_panic
324
325 #endif
326
327 /*
328 * This is the common exception handler.
329 * We get here from the user exception handler or simply by falling through
330 * from the kernel exception handler.
331 * Save the remaining special registers, switch to kernel mode, and jump
332 * to the second-level exception handler.
333 *
334 */
335
336 common_exception:
337
338 /* Save some registers, disable loops and clear the syscall flag. */
339
340 rsr a2, debugcause
341 rsr a3, epc1
342 s32i a2, a1, PT_DEBUGCAUSE
343 s32i a3, a1, PT_PC
344
345 movi a2, -1
346 rsr a3, excvaddr
347 s32i a2, a1, PT_SYSCALL
348 movi a2, 0
349 s32i a3, a1, PT_EXCVADDR
350 xsr a2, lcount
351 s32i a2, a1, PT_LCOUNT
352
353 /* It is now save to restore the EXC_TABLE_FIXUP variable. */
354
355 rsr a0, exccause
356 movi a3, 0
357 rsr a2, excsave1
358 s32i a0, a1, PT_EXCCAUSE
359 s32i a3, a2, EXC_TABLE_FIXUP
360
361 /* All unrecoverable states are saved on stack, now, and a1 is valid.
362 * Now we can allow exceptions again. In case we've got an interrupt
363 * PS.INTLEVEL is set to LOCKLEVEL disabling furhter interrupts,
364 * otherwise it's left unchanged.
365 *
366 * Set PS(EXCM = 0, UM = 0, RING = 0, OWB = 0, WOE = 1, INTLEVEL = X)
367 */
368
369 rsr a3, ps
370 addi a0, a0, -EXCCAUSE_LEVEL1_INTERRUPT
371 movi a2, LOCKLEVEL
372 extui a3, a3, PS_INTLEVEL_SHIFT, PS_INTLEVEL_WIDTH
373 # a3 = PS.INTLEVEL
374 moveqz a3, a2, a0 # a3 = LOCKLEVEL iff interrupt
375 movi a2, 1 << PS_WOE_BIT
376 or a3, a3, a2
377 rsr a2, exccause
378 /* restore return address (or 0 if return to userspace) */
379 rsr a0, depc
380 xsr a3, ps
381
382 s32i a3, a1, PT_PS # save ps
383
384 /* Save lbeg, lend */
385
386 rsr a4, lbeg
387 rsr a3, lend
388 s32i a4, a1, PT_LBEG
389 s32i a3, a1, PT_LEND
390
391 /* Save SCOMPARE1 */
392
393 #if XCHAL_HAVE_S32C1I
394 rsr a3, scompare1
395 s32i a3, a1, PT_SCOMPARE1
396 #endif
397
398 /* Save optional registers. */
399
400 save_xtregs_opt a1 a3 a4 a5 a6 a7 PT_XTREGS_OPT
401
402 #ifdef CONFIG_TRACE_IRQFLAGS
403 l32i a4, a1, PT_DEPC
404 /* Double exception means we came here with an exception
405 * while PS.EXCM was set, i.e. interrupts disabled.
406 */
407 bgeui a4, VALID_DOUBLE_EXCEPTION_ADDRESS, 1f
408 bnei a2, EXCCAUSE_LEVEL1_INTERRUPT, 1f
409 /* We came here with an interrupt means interrupts were enabled
410 * and we've just disabled them.
411 */
412 movi a4, trace_hardirqs_off
413 callx4 a4
414 1:
415 #endif
416
417 /* Go to second-level dispatcher. Set up parameters to pass to the
418 * exception handler and call the exception handler.
419 */
420
421 rsr a4, excsave1
422 mov a6, a1 # pass stack frame
423 mov a7, a2 # pass EXCCAUSE
424 addx4 a4, a2, a4
425 l32i a4, a4, EXC_TABLE_DEFAULT # load handler
426
427 /* Call the second-level handler */
428
429 callx4 a4
430
431 /* Jump here for exception exit */
432 .global common_exception_return
433 common_exception_return:
434
435 1:
436 rsil a2, LOCKLEVEL
437
438 /* Jump if we are returning from kernel exceptions. */
439
440 l32i a3, a1, PT_PS
441 GET_THREAD_INFO(a2, a1)
442 l32i a4, a2, TI_FLAGS
443 _bbci.l a3, PS_UM_BIT, 6f
444
445 /* Specific to a user exception exit:
446 * We need to check some flags for signal handling and rescheduling,
447 * and have to restore WB and WS, extra states, and all registers
448 * in the register file that were in use in the user task.
449 * Note that we don't disable interrupts here.
450 */
451
452 _bbsi.l a4, TIF_NEED_RESCHED, 3f
453 _bbsi.l a4, TIF_NOTIFY_RESUME, 2f
454 _bbci.l a4, TIF_SIGPENDING, 5f
455
456 2: l32i a4, a1, PT_DEPC
457 bgeui a4, VALID_DOUBLE_EXCEPTION_ADDRESS, 4f
458
459 /* Call do_signal() */
460
461 rsil a2, 0
462 movi a4, do_notify_resume # int do_notify_resume(struct pt_regs*)
463 mov a6, a1
464 callx4 a4
465 j 1b
466
467 3: /* Reschedule */
468
469 rsil a2, 0
470 movi a4, schedule # void schedule (void)
471 callx4 a4
472 j 1b
473
474 #ifdef CONFIG_PREEMPT
475 6:
476 _bbci.l a4, TIF_NEED_RESCHED, 4f
477
478 /* Check current_thread_info->preempt_count */
479
480 l32i a4, a2, TI_PRE_COUNT
481 bnez a4, 4f
482 movi a4, preempt_schedule_irq
483 callx4 a4
484 j 1b
485 #endif
486
487 5:
488 #ifdef CONFIG_DEBUG_TLB_SANITY
489 l32i a4, a1, PT_DEPC
490 bgeui a4, VALID_DOUBLE_EXCEPTION_ADDRESS, 4f
491 movi a4, check_tlb_sanity
492 callx4 a4
493 #endif
494 6:
495 4:
496 #ifdef CONFIG_TRACE_IRQFLAGS
497 l32i a4, a1, PT_DEPC
498 /* Double exception means we came here with an exception
499 * while PS.EXCM was set, i.e. interrupts disabled.
500 */
501 bgeui a4, VALID_DOUBLE_EXCEPTION_ADDRESS, 1f
502 l32i a4, a1, PT_EXCCAUSE
503 bnei a4, EXCCAUSE_LEVEL1_INTERRUPT, 1f
504 /* We came here with an interrupt means interrupts were enabled
505 * and we'll reenable them on return.
506 */
507 movi a4, trace_hardirqs_on
508 callx4 a4
509 1:
510 #endif
511 /* Restore optional registers. */
512
513 load_xtregs_opt a1 a2 a4 a5 a6 a7 PT_XTREGS_OPT
514
515 /* Restore SCOMPARE1 */
516
517 #if XCHAL_HAVE_S32C1I
518 l32i a2, a1, PT_SCOMPARE1
519 wsr a2, scompare1
520 #endif
521 wsr a3, ps /* disable interrupts */
522
523 _bbci.l a3, PS_UM_BIT, kernel_exception_exit
524
525 user_exception_exit:
526
527 /* Restore the state of the task and return from the exception. */
528
529 /* Switch to the user thread WINDOWBASE. Save SP temporarily in DEPC */
530
531 l32i a2, a1, PT_WINDOWBASE
532 l32i a3, a1, PT_WINDOWSTART
533 wsr a1, depc # use DEPC as temp storage
534 wsr a3, windowstart # restore WINDOWSTART
535 ssr a2 # preserve user's WB in the SAR
536 wsr a2, windowbase # switch to user's saved WB
537 rsync
538 rsr a1, depc # restore stack pointer
539 l32i a2, a1, PT_WMASK # register frames saved (in bits 4...9)
540 rotw -1 # we restore a4..a7
541 _bltui a6, 16, 1f # only have to restore current window?
542
543 /* The working registers are a0 and a3. We are restoring to
544 * a4..a7. Be careful not to destroy what we have just restored.
545 * Note: wmask has the format YYYYM:
546 * Y: number of registers saved in groups of 4
547 * M: 4 bit mask of first 16 registers
548 */
549
550 mov a2, a6
551 mov a3, a5
552
553 2: rotw -1 # a0..a3 become a4..a7
554 addi a3, a7, -4*4 # next iteration
555 addi a2, a6, -16 # decrementing Y in WMASK
556 l32i a4, a3, PT_AREG_END + 0
557 l32i a5, a3, PT_AREG_END + 4
558 l32i a6, a3, PT_AREG_END + 8
559 l32i a7, a3, PT_AREG_END + 12
560 _bgeui a2, 16, 2b
561
562 /* Clear unrestored registers (don't leak anything to user-land */
563
564 1: rsr a0, windowbase
565 rsr a3, sar
566 sub a3, a0, a3
567 beqz a3, 2f
568 extui a3, a3, 0, WBBITS
569
570 1: rotw -1
571 addi a3, a7, -1
572 movi a4, 0
573 movi a5, 0
574 movi a6, 0
575 movi a7, 0
576 bgei a3, 1, 1b
577
578 /* We are back were we were when we started.
579 * Note: a2 still contains WMASK (if we've returned to the original
580 * frame where we had loaded a2), or at least the lower 4 bits
581 * (if we have restored WSBITS-1 frames).
582 */
583
584 2:
585 #if XCHAL_HAVE_THREADPTR
586 l32i a3, a1, PT_THREADPTR
587 wur a3, threadptr
588 #endif
589
590 j common_exception_exit
591
592 /* This is the kernel exception exit.
593 * We avoided to do a MOVSP when we entered the exception, but we
594 * have to do it here.
595 */
596
597 kernel_exception_exit:
598
599 /* Check if we have to do a movsp.
600 *
601 * We only have to do a movsp if the previous window-frame has
602 * been spilled to the *temporary* exception stack instead of the
603 * task's stack. This is the case if the corresponding bit in
604 * WINDOWSTART for the previous window-frame was set before
605 * (not spilled) but is zero now (spilled).
606 * If this bit is zero, all other bits except the one for the
607 * current window frame are also zero. So, we can use a simple test:
608 * 'and' WINDOWSTART and WINDOWSTART-1:
609 *
610 * (XXXXXX1[0]* - 1) AND XXXXXX1[0]* = XXXXXX0[0]*
611 *
612 * The result is zero only if one bit was set.
613 *
614 * (Note: We might have gone through several task switches before
615 * we come back to the current task, so WINDOWBASE might be
616 * different from the time the exception occurred.)
617 */
618
619 /* Test WINDOWSTART before and after the exception.
620 * We actually have WMASK, so we only have to test if it is 1 or not.
621 */
622
623 l32i a2, a1, PT_WMASK
624 _beqi a2, 1, common_exception_exit # Spilled before exception,jump
625
626 /* Test WINDOWSTART now. If spilled, do the movsp */
627
628 rsr a3, windowstart
629 addi a0, a3, -1
630 and a3, a3, a0
631 _bnez a3, common_exception_exit
632
633 /* Do a movsp (we returned from a call4, so we have at least a0..a7) */
634
635 addi a0, a1, -16
636 l32i a3, a0, 0
637 l32i a4, a0, 4
638 s32i a3, a1, PT_SIZE+0
639 s32i a4, a1, PT_SIZE+4
640 l32i a3, a0, 8
641 l32i a4, a0, 12
642 s32i a3, a1, PT_SIZE+8
643 s32i a4, a1, PT_SIZE+12
644
645 /* Common exception exit.
646 * We restore the special register and the current window frame, and
647 * return from the exception.
648 *
649 * Note: We expect a2 to hold PT_WMASK
650 */
651
652 common_exception_exit:
653
654 /* Restore address registers. */
655
656 _bbsi.l a2, 1, 1f
657 l32i a4, a1, PT_AREG4
658 l32i a5, a1, PT_AREG5
659 l32i a6, a1, PT_AREG6
660 l32i a7, a1, PT_AREG7
661 _bbsi.l a2, 2, 1f
662 l32i a8, a1, PT_AREG8
663 l32i a9, a1, PT_AREG9
664 l32i a10, a1, PT_AREG10
665 l32i a11, a1, PT_AREG11
666 _bbsi.l a2, 3, 1f
667 l32i a12, a1, PT_AREG12
668 l32i a13, a1, PT_AREG13
669 l32i a14, a1, PT_AREG14
670 l32i a15, a1, PT_AREG15
671
672 /* Restore PC, SAR */
673
674 1: l32i a2, a1, PT_PC
675 l32i a3, a1, PT_SAR
676 wsr a2, epc1
677 wsr a3, sar
678
679 /* Restore LBEG, LEND, LCOUNT */
680
681 l32i a2, a1, PT_LBEG
682 l32i a3, a1, PT_LEND
683 wsr a2, lbeg
684 l32i a2, a1, PT_LCOUNT
685 wsr a3, lend
686 wsr a2, lcount
687
688 /* We control single stepping through the ICOUNTLEVEL register. */
689
690 l32i a2, a1, PT_ICOUNTLEVEL
691 movi a3, -2
692 wsr a2, icountlevel
693 wsr a3, icount
694
695 /* Check if it was double exception. */
696
697 l32i a0, a1, PT_DEPC
698 l32i a3, a1, PT_AREG3
699 l32i a2, a1, PT_AREG2
700 _bgeui a0, VALID_DOUBLE_EXCEPTION_ADDRESS, 1f
701
702 /* Restore a0...a3 and return */
703
704 l32i a0, a1, PT_AREG0
705 l32i a1, a1, PT_AREG1
706 rfe
707
708 1: wsr a0, depc
709 l32i a0, a1, PT_AREG0
710 l32i a1, a1, PT_AREG1
711 rfde
712
713 ENDPROC(kernel_exception)
714
715 /*
716 * Debug exception handler.
717 *
718 * Currently, we don't support KGDB, so only user application can be debugged.
719 *
720 * When we get here, a0 is trashed and saved to excsave[debuglevel]
721 */
722
723 ENTRY(debug_exception)
724
725 rsr a0, SREG_EPS + XCHAL_DEBUGLEVEL
726 bbsi.l a0, PS_EXCM_BIT, 1f # exception mode
727
728 /* Set EPC1 and EXCCAUSE */
729
730 wsr a2, depc # save a2 temporarily
731 rsr a2, SREG_EPC + XCHAL_DEBUGLEVEL
732 wsr a2, epc1
733
734 movi a2, EXCCAUSE_MAPPED_DEBUG
735 wsr a2, exccause
736
737 /* Restore PS to the value before the debug exc but with PS.EXCM set.*/
738
739 movi a2, 1 << PS_EXCM_BIT
740 or a2, a0, a2
741 movi a0, debug_exception # restore a3, debug jump vector
742 wsr a2, ps
743 xsr a0, SREG_EXCSAVE + XCHAL_DEBUGLEVEL
744
745 /* Switch to kernel/user stack, restore jump vector, and save a0 */
746
747 bbsi.l a2, PS_UM_BIT, 2f # jump if user mode
748
749 addi a2, a1, -16-PT_SIZE # assume kernel stack
750 s32i a0, a2, PT_AREG0
751 movi a0, 0
752 s32i a1, a2, PT_AREG1
753 s32i a0, a2, PT_DEPC # mark it as a regular exception
754 xsr a0, depc
755 s32i a3, a2, PT_AREG3
756 s32i a0, a2, PT_AREG2
757 mov a1, a2
758 j _kernel_exception
759
760 2: rsr a2, excsave1
761 l32i a2, a2, EXC_TABLE_KSTK # load kernel stack pointer
762 s32i a0, a2, PT_AREG0
763 movi a0, 0
764 s32i a1, a2, PT_AREG1
765 s32i a0, a2, PT_DEPC
766 xsr a0, depc
767 s32i a3, a2, PT_AREG3
768 s32i a0, a2, PT_AREG2
769 mov a1, a2
770 j _user_exception
771
772 /* Debug exception while in exception mode. */
773 1: j 1b // FIXME!!
774
775 ENDPROC(debug_exception)
776
777 /*
778 * We get here in case of an unrecoverable exception.
779 * The only thing we can do is to be nice and print a panic message.
780 * We only produce a single stack frame for panic, so ???
781 *
782 *
783 * Entry conditions:
784 *
785 * - a0 contains the caller address; original value saved in excsave1.
786 * - the original a0 contains a valid return address (backtrace) or 0.
787 * - a2 contains a valid stackpointer
788 *
789 * Notes:
790 *
791 * - If the stack pointer could be invalid, the caller has to setup a
792 * dummy stack pointer (e.g. the stack of the init_task)
793 *
794 * - If the return address could be invalid, the caller has to set it
795 * to 0, so the backtrace would stop.
796 *
797 */
798 .align 4
799 unrecoverable_text:
800 .ascii "Unrecoverable error in exception handler\0"
801
802 ENTRY(unrecoverable_exception)
803
804 movi a0, 1
805 movi a1, 0
806
807 wsr a0, windowstart
808 wsr a1, windowbase
809 rsync
810
811 movi a1, (1 << PS_WOE_BIT) | LOCKLEVEL
812 wsr a1, ps
813 rsync
814
815 movi a1, init_task
816 movi a0, 0
817 addi a1, a1, PT_REGS_OFFSET
818
819 movi a4, panic
820 movi a6, unrecoverable_text
821
822 callx4 a4
823
824 1: j 1b
825
826 ENDPROC(unrecoverable_exception)
827
828 /* -------------------------- FAST EXCEPTION HANDLERS ----------------------- */
829
830 /*
831 * Fast-handler for alloca exceptions
832 *
833 * The ALLOCA handler is entered when user code executes the MOVSP
834 * instruction and the caller's frame is not in the register file.
835 *
836 * This algorithm was taken from the Ross Morley's RTOS Porting Layer:
837 *
838 * /home/ross/rtos/porting/XtensaRTOS-PortingLayer-20090507/xtensa_vectors.S
839 *
840 * It leverages the existing window spill/fill routines and their support for
841 * double exceptions. The 'movsp' instruction will only cause an exception if
842 * the next window needs to be loaded. In fact this ALLOCA exception may be
843 * replaced at some point by changing the hardware to do a underflow exception
844 * of the proper size instead.
845 *
846 * This algorithm simply backs out the register changes started by the user
847 * excpetion handler, makes it appear that we have started a window underflow
848 * by rotating the window back and then setting the old window base (OWB) in
849 * the 'ps' register with the rolled back window base. The 'movsp' instruction
850 * will be re-executed and this time since the next window frames is in the
851 * active AR registers it won't cause an exception.
852 *
853 * If the WindowUnderflow code gets a TLB miss the page will get mapped
854 * the the partial windeowUnderflow will be handeled in the double exception
855 * handler.
856 *
857 * Entry condition:
858 *
859 * a0: trashed, original value saved on stack (PT_AREG0)
860 * a1: a1
861 * a2: new stack pointer, original in DEPC
862 * a3: a3
863 * depc: a2, original value saved on stack (PT_DEPC)
864 * excsave_1: dispatch table
865 *
866 * PT_DEPC >= VALID_DOUBLE_EXCEPTION_ADDRESS: double exception, DEPC
867 * < VALID_DOUBLE_EXCEPTION_ADDRESS: regular exception
868 */
869
870 ENTRY(fast_alloca)
871 rsr a0, windowbase
872 rotw -1
873 rsr a2, ps
874 extui a3, a2, PS_OWB_SHIFT, PS_OWB_WIDTH
875 xor a3, a3, a4
876 l32i a4, a6, PT_AREG0
877 l32i a1, a6, PT_DEPC
878 rsr a6, depc
879 wsr a1, depc
880 slli a3, a3, PS_OWB_SHIFT
881 xor a2, a2, a3
882 wsr a2, ps
883 rsync
884
885 _bbci.l a4, 31, 4f
886 rotw -1
887 _bbci.l a8, 30, 8f
888 rotw -1
889 j _WindowUnderflow12
890 8: j _WindowUnderflow8
891 4: j _WindowUnderflow4
892 ENDPROC(fast_alloca)
893
894 /*
895 * fast system calls.
896 *
897 * WARNING: The kernel doesn't save the entire user context before
898 * handling a fast system call. These functions are small and short,
899 * usually offering some functionality not available to user tasks.
900 *
901 * BE CAREFUL TO PRESERVE THE USER'S CONTEXT.
902 *
903 * Entry condition:
904 *
905 * a0: trashed, original value saved on stack (PT_AREG0)
906 * a1: a1
907 * a2: new stack pointer, original in DEPC
908 * a3: a3
909 * depc: a2, original value saved on stack (PT_DEPC)
910 * excsave_1: dispatch table
911 */
912
913 ENTRY(fast_syscall_kernel)
914
915 /* Skip syscall. */
916
917 rsr a0, epc1
918 addi a0, a0, 3
919 wsr a0, epc1
920
921 l32i a0, a2, PT_DEPC
922 bgeui a0, VALID_DOUBLE_EXCEPTION_ADDRESS, fast_syscall_unrecoverable
923
924 rsr a0, depc # get syscall-nr
925 _beqz a0, fast_syscall_spill_registers
926 _beqi a0, __NR_xtensa, fast_syscall_xtensa
927
928 j kernel_exception
929
930 ENDPROC(fast_syscall_kernel)
931
932 ENTRY(fast_syscall_user)
933
934 /* Skip syscall. */
935
936 rsr a0, epc1
937 addi a0, a0, 3
938 wsr a0, epc1
939
940 l32i a0, a2, PT_DEPC
941 bgeui a0, VALID_DOUBLE_EXCEPTION_ADDRESS, fast_syscall_unrecoverable
942
943 rsr a0, depc # get syscall-nr
944 _beqz a0, fast_syscall_spill_registers
945 _beqi a0, __NR_xtensa, fast_syscall_xtensa
946
947 j user_exception
948
949 ENDPROC(fast_syscall_user)
950
951 ENTRY(fast_syscall_unrecoverable)
952
953 /* Restore all states. */
954
955 l32i a0, a2, PT_AREG0 # restore a0
956 xsr a2, depc # restore a2, depc
957
958 wsr a0, excsave1
959 movi a0, unrecoverable_exception
960 callx0 a0
961
962 ENDPROC(fast_syscall_unrecoverable)
963
964 /*
965 * sysxtensa syscall handler
966 *
967 * int sysxtensa (SYS_XTENSA_ATOMIC_SET, ptr, val, unused);
968 * int sysxtensa (SYS_XTENSA_ATOMIC_ADD, ptr, val, unused);
969 * int sysxtensa (SYS_XTENSA_ATOMIC_EXG_ADD, ptr, val, unused);
970 * int sysxtensa (SYS_XTENSA_ATOMIC_CMP_SWP, ptr, oldval, newval);
971 * a2 a6 a3 a4 a5
972 *
973 * Entry condition:
974 *
975 * a0: a2 (syscall-nr), original value saved on stack (PT_AREG0)
976 * a1: a1
977 * a2: new stack pointer, original in a0 and DEPC
978 * a3: a3
979 * a4..a15: unchanged
980 * depc: a2, original value saved on stack (PT_DEPC)
981 * excsave_1: dispatch table
982 *
983 * PT_DEPC >= VALID_DOUBLE_EXCEPTION_ADDRESS: double exception, DEPC
984 * < VALID_DOUBLE_EXCEPTION_ADDRESS: regular exception
985 *
986 * Note: we don't have to save a2; a2 holds the return value
987 *
988 * We use the two macros TRY and CATCH:
989 *
990 * TRY adds an entry to the __ex_table fixup table for the immediately
991 * following instruction.
992 *
993 * CATCH catches any exception that occurred at one of the preceding TRY
994 * statements and continues from there
995 *
996 * Usage TRY l32i a0, a1, 0
997 * <other code>
998 * done: rfe
999 * CATCH <set return code>
1000 * j done
1001 */
1002
1003 #ifdef CONFIG_FAST_SYSCALL_XTENSA
1004
1005 #define TRY \
1006 .section __ex_table, "a"; \
1007 .word 66f, 67f; \
1008 .text; \
1009 66:
1010
1011 #define CATCH \
1012 67:
1013
1014 ENTRY(fast_syscall_xtensa)
1015
1016 s32i a7, a2, PT_AREG7 # we need an additional register
1017 movi a7, 4 # sizeof(unsigned int)
1018 access_ok a3, a7, a0, a2, .Leac # a0: scratch reg, a2: sp
1019
1020 _bgeui a6, SYS_XTENSA_COUNT, .Lill
1021 _bnei a6, SYS_XTENSA_ATOMIC_CMP_SWP, .Lnswp
1022
1023 /* Fall through for ATOMIC_CMP_SWP. */
1024
1025 .Lswp: /* Atomic compare and swap */
1026
1027 TRY l32i a0, a3, 0 # read old value
1028 bne a0, a4, 1f # same as old value? jump
1029 TRY s32i a5, a3, 0 # different, modify value
1030 l32i a7, a2, PT_AREG7 # restore a7
1031 l32i a0, a2, PT_AREG0 # restore a0
1032 movi a2, 1 # and return 1
1033 rfe
1034
1035 1: l32i a7, a2, PT_AREG7 # restore a7
1036 l32i a0, a2, PT_AREG0 # restore a0
1037 movi a2, 0 # return 0 (note that we cannot set
1038 rfe
1039
1040 .Lnswp: /* Atomic set, add, and exg_add. */
1041
1042 TRY l32i a7, a3, 0 # orig
1043 addi a6, a6, -SYS_XTENSA_ATOMIC_SET
1044 add a0, a4, a7 # + arg
1045 moveqz a0, a4, a6 # set
1046 addi a6, a6, SYS_XTENSA_ATOMIC_SET
1047 TRY s32i a0, a3, 0 # write new value
1048
1049 mov a0, a2
1050 mov a2, a7
1051 l32i a7, a0, PT_AREG7 # restore a7
1052 l32i a0, a0, PT_AREG0 # restore a0
1053 rfe
1054
1055 CATCH
1056 .Leac: l32i a7, a2, PT_AREG7 # restore a7
1057 l32i a0, a2, PT_AREG0 # restore a0
1058 movi a2, -EFAULT
1059 rfe
1060
1061 .Lill: l32i a7, a2, PT_AREG7 # restore a7
1062 l32i a0, a2, PT_AREG0 # restore a0
1063 movi a2, -EINVAL
1064 rfe
1065
1066 ENDPROC(fast_syscall_xtensa)
1067
1068 #else /* CONFIG_FAST_SYSCALL_XTENSA */
1069
1070 ENTRY(fast_syscall_xtensa)
1071
1072 l32i a0, a2, PT_AREG0 # restore a0
1073 movi a2, -ENOSYS
1074 rfe
1075
1076 ENDPROC(fast_syscall_xtensa)
1077
1078 #endif /* CONFIG_FAST_SYSCALL_XTENSA */
1079
1080
1081 /* fast_syscall_spill_registers.
1082 *
1083 * Entry condition:
1084 *
1085 * a0: trashed, original value saved on stack (PT_AREG0)
1086 * a1: a1
1087 * a2: new stack pointer, original in DEPC
1088 * a3: a3
1089 * depc: a2, original value saved on stack (PT_DEPC)
1090 * excsave_1: dispatch table
1091 *
1092 * Note: We assume the stack pointer is EXC_TABLE_KSTK in the fixup handler.
1093 */
1094
1095 #ifdef CONFIG_FAST_SYSCALL_SPILL_REGISTERS
1096
1097 ENTRY(fast_syscall_spill_registers)
1098
1099 /* Register a FIXUP handler (pass current wb as a parameter) */
1100
1101 xsr a3, excsave1
1102 movi a0, fast_syscall_spill_registers_fixup
1103 s32i a0, a3, EXC_TABLE_FIXUP
1104 rsr a0, windowbase
1105 s32i a0, a3, EXC_TABLE_PARAM
1106 xsr a3, excsave1 # restore a3 and excsave_1
1107
1108 /* Save a3, a4 and SAR on stack. */
1109
1110 rsr a0, sar
1111 s32i a3, a2, PT_AREG3
1112 s32i a0, a2, PT_SAR
1113
1114 /* The spill routine might clobber a4, a7, a8, a11, a12, and a15. */
1115
1116 s32i a4, a2, PT_AREG4
1117 s32i a7, a2, PT_AREG7
1118 s32i a8, a2, PT_AREG8
1119 s32i a11, a2, PT_AREG11
1120 s32i a12, a2, PT_AREG12
1121 s32i a15, a2, PT_AREG15
1122
1123 /*
1124 * Rotate ws so that the current windowbase is at bit 0.
1125 * Assume ws = xxxwww1yy (www1 current window frame).
1126 * Rotate ws right so that a4 = yyxxxwww1.
1127 */
1128
1129 rsr a0, windowbase
1130 rsr a3, windowstart # a3 = xxxwww1yy
1131 ssr a0 # holds WB
1132 slli a0, a3, WSBITS
1133 or a3, a3, a0 # a3 = xxxwww1yyxxxwww1yy
1134 srl a3, a3 # a3 = 00xxxwww1yyxxxwww1
1135
1136 /* We are done if there are no more than the current register frame. */
1137
1138 extui a3, a3, 1, WSBITS-1 # a3 = 0yyxxxwww
1139 movi a0, (1 << (WSBITS-1))
1140 _beqz a3, .Lnospill # only one active frame? jump
1141
1142 /* We want 1 at the top, so that we return to the current windowbase */
1143
1144 or a3, a3, a0 # 1yyxxxwww
1145
1146 /* Skip empty frames - get 'oldest' WINDOWSTART-bit. */
1147
1148 wsr a3, windowstart # save shifted windowstart
1149 neg a0, a3
1150 and a3, a0, a3 # first bit set from right: 000010000
1151
1152 ffs_ws a0, a3 # a0: shifts to skip empty frames
1153 movi a3, WSBITS
1154 sub a0, a3, a0 # WSBITS-a0:number of 0-bits from right
1155 ssr a0 # save in SAR for later.
1156
1157 rsr a3, windowbase
1158 add a3, a3, a0
1159 wsr a3, windowbase
1160 rsync
1161
1162 rsr a3, windowstart
1163 srl a3, a3 # shift windowstart
1164
1165 /* WB is now just one frame below the oldest frame in the register
1166 window. WS is shifted so the oldest frame is in bit 0, thus, WB
1167 and WS differ by one 4-register frame. */
1168
1169 /* Save frames. Depending what call was used (call4, call8, call12),
1170 * we have to save 4,8. or 12 registers.
1171 */
1172
1173
1174 .Lloop: _bbsi.l a3, 1, .Lc4
1175 _bbci.l a3, 2, .Lc12
1176
1177 .Lc8: s32e a4, a13, -16
1178 l32e a4, a5, -12
1179 s32e a8, a4, -32
1180 s32e a5, a13, -12
1181 s32e a6, a13, -8
1182 s32e a7, a13, -4
1183 s32e a9, a4, -28
1184 s32e a10, a4, -24
1185 s32e a11, a4, -20
1186 srli a11, a3, 2 # shift windowbase by 2
1187 rotw 2
1188 _bnei a3, 1, .Lloop
1189 j .Lexit
1190
1191 .Lc4: s32e a4, a9, -16
1192 s32e a5, a9, -12
1193 s32e a6, a9, -8
1194 s32e a7, a9, -4
1195
1196 srli a7, a3, 1
1197 rotw 1
1198 _bnei a3, 1, .Lloop
1199 j .Lexit
1200
1201 .Lc12: _bbci.l a3, 3, .Linvalid_mask # bit 2 shouldn't be zero!
1202
1203 /* 12-register frame (call12) */
1204
1205 l32e a0, a5, -12
1206 s32e a8, a0, -48
1207 mov a8, a0
1208
1209 s32e a9, a8, -44
1210 s32e a10, a8, -40
1211 s32e a11, a8, -36
1212 s32e a12, a8, -32
1213 s32e a13, a8, -28
1214 s32e a14, a8, -24
1215 s32e a15, a8, -20
1216 srli a15, a3, 3
1217
1218 /* The stack pointer for a4..a7 is out of reach, so we rotate the
1219 * window, grab the stackpointer, and rotate back.
1220 * Alternatively, we could also use the following approach, but that
1221 * makes the fixup routine much more complicated:
1222 * rotw 1
1223 * s32e a0, a13, -16
1224 * ...
1225 * rotw 2
1226 */
1227
1228 rotw 1
1229 mov a4, a13
1230 rotw -1
1231
1232 s32e a4, a8, -16
1233 s32e a5, a8, -12
1234 s32e a6, a8, -8
1235 s32e a7, a8, -4
1236
1237 rotw 3
1238
1239 _beqi a3, 1, .Lexit
1240 j .Lloop
1241
1242 .Lexit:
1243
1244 /* Done. Do the final rotation and set WS */
1245
1246 rotw 1
1247 rsr a3, windowbase
1248 ssl a3
1249 movi a3, 1
1250 sll a3, a3
1251 wsr a3, windowstart
1252 .Lnospill:
1253
1254 /* Advance PC, restore registers and SAR, and return from exception. */
1255
1256 l32i a3, a2, PT_SAR
1257 l32i a0, a2, PT_AREG0
1258 wsr a3, sar
1259 l32i a3, a2, PT_AREG3
1260
1261 /* Restore clobbered registers. */
1262
1263 l32i a4, a2, PT_AREG4
1264 l32i a7, a2, PT_AREG7
1265 l32i a8, a2, PT_AREG8
1266 l32i a11, a2, PT_AREG11
1267 l32i a12, a2, PT_AREG12
1268 l32i a15, a2, PT_AREG15
1269
1270 movi a2, 0
1271 rfe
1272
1273 .Linvalid_mask:
1274
1275 /* We get here because of an unrecoverable error in the window
1276 * registers, so set up a dummy frame and kill the user application.
1277 * Note: We assume EXC_TABLE_KSTK contains a valid stack pointer.
1278 */
1279
1280 movi a0, 1
1281 movi a1, 0
1282
1283 wsr a0, windowstart
1284 wsr a1, windowbase
1285 rsync
1286
1287 movi a0, 0
1288
1289 rsr a3, excsave1
1290 l32i a1, a3, EXC_TABLE_KSTK
1291
1292 movi a4, (1 << PS_WOE_BIT) | LOCKLEVEL
1293 wsr a4, ps
1294 rsync
1295
1296 movi a6, SIGSEGV
1297 movi a4, do_exit
1298 callx4 a4
1299
1300 /* shouldn't return, so panic */
1301
1302 wsr a0, excsave1
1303 movi a0, unrecoverable_exception
1304 callx0 a0 # should not return
1305 1: j 1b
1306
1307
1308 ENDPROC(fast_syscall_spill_registers)
1309
1310 /* Fixup handler.
1311 *
1312 * We get here if the spill routine causes an exception, e.g. tlb miss.
1313 * We basically restore WINDOWBASE and WINDOWSTART to the condition when
1314 * we entered the spill routine and jump to the user exception handler.
1315 *
1316 * Note that we only need to restore the bits in windowstart that have not
1317 * been spilled yet by the _spill_register routine. Luckily, a3 contains a
1318 * rotated windowstart with only those bits set for frames that haven't been
1319 * spilled yet. Because a3 is rotated such that bit 0 represents the register
1320 * frame for the current windowbase - 1, we need to rotate a3 left by the
1321 * value of the current windowbase + 1 and move it to windowstart.
1322 *
1323 * a0: value of depc, original value in depc
1324 * a2: trashed, original value in EXC_TABLE_DOUBLE_SAVE
1325 * a3: exctable, original value in excsave1
1326 */
1327
1328 ENTRY(fast_syscall_spill_registers_fixup)
1329
1330 rsr a2, windowbase # get current windowbase (a2 is saved)
1331 xsr a0, depc # restore depc and a0
1332 ssl a2 # set shift (32 - WB)
1333
1334 /* We need to make sure the current registers (a0-a3) are preserved.
1335 * To do this, we simply set the bit for the current window frame
1336 * in WS, so that the exception handlers save them to the task stack.
1337 *
1338 * Note: we use a3 to set the windowbase, so we take a special care
1339 * of it, saving it in the original _spill_registers frame across
1340 * the exception handler call.
1341 */
1342
1343 xsr a3, excsave1 # get spill-mask
1344 slli a3, a3, 1 # shift left by one
1345 addi a3, a3, 1 # set the bit for the current window frame
1346
1347 slli a2, a3, 32-WSBITS
1348 src a2, a3, a2 # a2 = xxwww1yyxxxwww1yy......
1349 wsr a2, windowstart # set corrected windowstart
1350
1351 srli a3, a3, 1
1352 rsr a2, excsave1
1353 l32i a2, a2, EXC_TABLE_DOUBLE_SAVE # restore a2
1354 xsr a2, excsave1
1355 s32i a3, a2, EXC_TABLE_DOUBLE_SAVE # save a3
1356 l32i a3, a2, EXC_TABLE_PARAM # original WB (in user task)
1357 xsr a2, excsave1
1358
1359 /* Return to the original (user task) WINDOWBASE.
1360 * We leave the following frame behind:
1361 * a0, a1, a2 same
1362 * a3: trashed (saved in EXC_TABLE_DOUBLE_SAVE)
1363 * depc: depc (we have to return to that address)
1364 * excsave_1: exctable
1365 */
1366
1367 wsr a3, windowbase
1368 rsync
1369
1370 /* We are now in the original frame when we entered _spill_registers:
1371 * a0: return address
1372 * a1: used, stack pointer
1373 * a2: kernel stack pointer
1374 * a3: available
1375 * depc: exception address
1376 * excsave: exctable
1377 * Note: This frame might be the same as above.
1378 */
1379
1380 /* Setup stack pointer. */
1381
1382 addi a2, a2, -PT_USER_SIZE
1383 s32i a0, a2, PT_AREG0
1384
1385 /* Make sure we return to this fixup handler. */
1386
1387 movi a3, fast_syscall_spill_registers_fixup_return
1388 s32i a3, a2, PT_DEPC # setup depc
1389
1390 /* Jump to the exception handler. */
1391
1392 rsr a3, excsave1
1393 rsr a0, exccause
1394 addx4 a0, a0, a3 # find entry in table
1395 l32i a0, a0, EXC_TABLE_FAST_USER # load handler
1396 l32i a3, a3, EXC_TABLE_DOUBLE_SAVE
1397 jx a0
1398
1399 ENDPROC(fast_syscall_spill_registers_fixup)
1400
1401 ENTRY(fast_syscall_spill_registers_fixup_return)
1402
1403 /* When we return here, all registers have been restored (a2: DEPC) */
1404
1405 wsr a2, depc # exception address
1406
1407 /* Restore fixup handler. */
1408
1409 rsr a2, excsave1
1410 s32i a3, a2, EXC_TABLE_DOUBLE_SAVE
1411 movi a3, fast_syscall_spill_registers_fixup
1412 s32i a3, a2, EXC_TABLE_FIXUP
1413 rsr a3, windowbase
1414 s32i a3, a2, EXC_TABLE_PARAM
1415 l32i a2, a2, EXC_TABLE_KSTK
1416
1417 /* Load WB at the time the exception occurred. */
1418
1419 rsr a3, sar # WB is still in SAR
1420 neg a3, a3
1421 wsr a3, windowbase
1422 rsync
1423
1424 rsr a3, excsave1
1425 l32i a3, a3, EXC_TABLE_DOUBLE_SAVE
1426
1427 rfde
1428
1429 ENDPROC(fast_syscall_spill_registers_fixup_return)
1430
1431 #else /* CONFIG_FAST_SYSCALL_SPILL_REGISTERS */
1432
1433 ENTRY(fast_syscall_spill_registers)
1434
1435 l32i a0, a2, PT_AREG0 # restore a0
1436 movi a2, -ENOSYS
1437 rfe
1438
1439 ENDPROC(fast_syscall_spill_registers)
1440
1441 #endif /* CONFIG_FAST_SYSCALL_SPILL_REGISTERS */
1442
1443 #ifdef CONFIG_MMU
1444 /*
1445 * We should never get here. Bail out!
1446 */
1447
1448 ENTRY(fast_second_level_miss_double_kernel)
1449
1450 1: movi a0, unrecoverable_exception
1451 callx0 a0 # should not return
1452 1: j 1b
1453
1454 ENDPROC(fast_second_level_miss_double_kernel)
1455
1456 /* First-level entry handler for user, kernel, and double 2nd-level
1457 * TLB miss exceptions. Note that for now, user and kernel miss
1458 * exceptions share the same entry point and are handled identically.
1459 *
1460 * An old, less-efficient C version of this function used to exist.
1461 * We include it below, interleaved as comments, for reference.
1462 *
1463 * Entry condition:
1464 *
1465 * a0: trashed, original value saved on stack (PT_AREG0)
1466 * a1: a1
1467 * a2: new stack pointer, original in DEPC
1468 * a3: a3
1469 * depc: a2, original value saved on stack (PT_DEPC)
1470 * excsave_1: dispatch table
1471 *
1472 * PT_DEPC >= VALID_DOUBLE_EXCEPTION_ADDRESS: double exception, DEPC
1473 * < VALID_DOUBLE_EXCEPTION_ADDRESS: regular exception
1474 */
1475
1476 ENTRY(fast_second_level_miss)
1477
1478 /* Save a1 and a3. Note: we don't expect a double exception. */
1479
1480 s32i a1, a2, PT_AREG1
1481 s32i a3, a2, PT_AREG3
1482
1483 /* We need to map the page of PTEs for the user task. Find
1484 * the pointer to that page. Also, it's possible for tsk->mm
1485 * to be NULL while tsk->active_mm is nonzero if we faulted on
1486 * a vmalloc address. In that rare case, we must use
1487 * active_mm instead to avoid a fault in this handler. See
1488 *
1489 * http://mail.nl.linux.org/linux-mm/2002-08/msg00258.html
1490 * (or search Internet on "mm vs. active_mm")
1491 *
1492 * if (!mm)
1493 * mm = tsk->active_mm;
1494 * pgd = pgd_offset (mm, regs->excvaddr);
1495 * pmd = pmd_offset (pgd, regs->excvaddr);
1496 * pmdval = *pmd;
1497 */
1498
1499 GET_CURRENT(a1,a2)
1500 l32i a0, a1, TASK_MM # tsk->mm
1501 beqz a0, 9f
1502
1503 8: rsr a3, excvaddr # fault address
1504 _PGD_OFFSET(a0, a3, a1)
1505 l32i a0, a0, 0 # read pmdval
1506 beqz a0, 2f
1507
1508 /* Read ptevaddr and convert to top of page-table page.
1509 *
1510 * vpnval = read_ptevaddr_register() & PAGE_MASK;
1511 * vpnval += DTLB_WAY_PGTABLE;
1512 * pteval = mk_pte (virt_to_page(pmd_val(pmdval)), PAGE_KERNEL);
1513 * write_dtlb_entry (pteval, vpnval);
1514 *
1515 * The messy computation for 'pteval' above really simplifies
1516 * into the following:
1517 *
1518 * pteval = ((pmdval - PAGE_OFFSET) & PAGE_MASK) | PAGE_DIRECTORY
1519 */
1520
1521 movi a1, (-PAGE_OFFSET) & 0xffffffff
1522 add a0, a0, a1 # pmdval - PAGE_OFFSET
1523 extui a1, a0, 0, PAGE_SHIFT # ... & PAGE_MASK
1524 xor a0, a0, a1
1525
1526 movi a1, _PAGE_DIRECTORY
1527 or a0, a0, a1 # ... | PAGE_DIRECTORY
1528
1529 /*
1530 * We utilize all three wired-ways (7-9) to hold pmd translations.
1531 * Memory regions are mapped to the DTLBs according to bits 28 and 29.
1532 * This allows to map the three most common regions to three different
1533 * DTLBs:
1534 * 0,1 -> way 7 program (0040.0000) and virtual (c000.0000)
1535 * 2 -> way 8 shared libaries (2000.0000)
1536 * 3 -> way 0 stack (3000.0000)
1537 */
1538
1539 extui a3, a3, 28, 2 # addr. bit 28 and 29 0,1,2,3
1540 rsr a1, ptevaddr
1541 addx2 a3, a3, a3 # -> 0,3,6,9
1542 srli a1, a1, PAGE_SHIFT
1543 extui a3, a3, 2, 2 # -> 0,0,1,2
1544 slli a1, a1, PAGE_SHIFT # ptevaddr & PAGE_MASK
1545 addi a3, a3, DTLB_WAY_PGD
1546 add a1, a1, a3 # ... + way_number
1547
1548 3: wdtlb a0, a1
1549 dsync
1550
1551 /* Exit critical section. */
1552
1553 4: rsr a3, excsave1
1554 movi a0, 0
1555 s32i a0, a3, EXC_TABLE_FIXUP
1556
1557 /* Restore the working registers, and return. */
1558
1559 l32i a0, a2, PT_AREG0
1560 l32i a1, a2, PT_AREG1
1561 l32i a3, a2, PT_AREG3
1562 l32i a2, a2, PT_DEPC
1563
1564 bgeui a2, VALID_DOUBLE_EXCEPTION_ADDRESS, 1f
1565
1566 /* Restore excsave1 and return. */
1567
1568 rsr a2, depc
1569 rfe
1570
1571 /* Return from double exception. */
1572
1573 1: xsr a2, depc
1574 esync
1575 rfde
1576
1577 9: l32i a0, a1, TASK_ACTIVE_MM # unlikely case mm == 0
1578 j 8b
1579
1580 #if (DCACHE_WAY_SIZE > PAGE_SIZE)
1581
1582 2: /* Special case for cache aliasing.
1583 * We (should) only get here if a clear_user_page, copy_user_page
1584 * or the aliased cache flush functions got preemptively interrupted
1585 * by another task. Re-establish temporary mapping to the
1586 * TLBTEMP_BASE areas.
1587 */
1588
1589 /* We shouldn't be in a double exception */
1590
1591 l32i a0, a2, PT_DEPC
1592 bgeui a0, VALID_DOUBLE_EXCEPTION_ADDRESS, 2f
1593
1594 /* Make sure the exception originated in the special functions */
1595
1596 movi a0, __tlbtemp_mapping_start
1597 rsr a3, epc1
1598 bltu a3, a0, 2f
1599 movi a0, __tlbtemp_mapping_end
1600 bgeu a3, a0, 2f
1601
1602 /* Check if excvaddr was in one of the TLBTEMP_BASE areas. */
1603
1604 movi a3, TLBTEMP_BASE_1
1605 rsr a0, excvaddr
1606 bltu a0, a3, 2f
1607
1608 addi a1, a0, -TLBTEMP_SIZE
1609 bgeu a1, a3, 2f
1610
1611 /* Check if we have to restore an ITLB mapping. */
1612
1613 movi a1, __tlbtemp_mapping_itlb
1614 rsr a3, epc1
1615 sub a3, a3, a1
1616
1617 /* Calculate VPN */
1618
1619 movi a1, PAGE_MASK
1620 and a1, a1, a0
1621
1622 /* Jump for ITLB entry */
1623
1624 bgez a3, 1f
1625
1626 /* We can use up to two TLBTEMP areas, one for src and one for dst. */
1627
1628 extui a3, a0, PAGE_SHIFT + DCACHE_ALIAS_ORDER, 1
1629 add a1, a3, a1
1630
1631 /* PPN is in a6 for the first TLBTEMP area and in a7 for the second. */
1632
1633 mov a0, a6
1634 movnez a0, a7, a3
1635 j 3b
1636
1637 /* ITLB entry. We only use dst in a6. */
1638
1639 1: witlb a6, a1
1640 isync
1641 j 4b
1642
1643
1644 #endif // DCACHE_WAY_SIZE > PAGE_SIZE
1645
1646
1647 2: /* Invalid PGD, default exception handling */
1648
1649 rsr a1, depc
1650 s32i a1, a2, PT_AREG2
1651 mov a1, a2
1652
1653 rsr a2, ps
1654 bbsi.l a2, PS_UM_BIT, 1f
1655 j _kernel_exception
1656 1: j _user_exception
1657
1658 ENDPROC(fast_second_level_miss)
1659
1660 /*
1661 * StoreProhibitedException
1662 *
1663 * Update the pte and invalidate the itlb mapping for this pte.
1664 *
1665 * Entry condition:
1666 *
1667 * a0: trashed, original value saved on stack (PT_AREG0)
1668 * a1: a1
1669 * a2: new stack pointer, original in DEPC
1670 * a3: a3
1671 * depc: a2, original value saved on stack (PT_DEPC)
1672 * excsave_1: dispatch table
1673 *
1674 * PT_DEPC >= VALID_DOUBLE_EXCEPTION_ADDRESS: double exception, DEPC
1675 * < VALID_DOUBLE_EXCEPTION_ADDRESS: regular exception
1676 */
1677
1678 ENTRY(fast_store_prohibited)
1679
1680 /* Save a1 and a3. */
1681
1682 s32i a1, a2, PT_AREG1
1683 s32i a3, a2, PT_AREG3
1684
1685 GET_CURRENT(a1,a2)
1686 l32i a0, a1, TASK_MM # tsk->mm
1687 beqz a0, 9f
1688
1689 8: rsr a1, excvaddr # fault address
1690 _PGD_OFFSET(a0, a1, a3)
1691 l32i a0, a0, 0
1692 beqz a0, 2f
1693
1694 /*
1695 * Note that we test _PAGE_WRITABLE_BIT only if PTE is present
1696 * and is not PAGE_NONE. See pgtable.h for possible PTE layouts.
1697 */
1698
1699 _PTE_OFFSET(a0, a1, a3)
1700 l32i a3, a0, 0 # read pteval
1701 movi a1, _PAGE_CA_INVALID
1702 ball a3, a1, 2f
1703 bbci.l a3, _PAGE_WRITABLE_BIT, 2f
1704
1705 movi a1, _PAGE_ACCESSED | _PAGE_DIRTY | _PAGE_HW_WRITE
1706 or a3, a3, a1
1707 rsr a1, excvaddr
1708 s32i a3, a0, 0
1709
1710 /* We need to flush the cache if we have page coloring. */
1711 #if (DCACHE_WAY_SIZE > PAGE_SIZE) && XCHAL_DCACHE_IS_WRITEBACK
1712 dhwb a0, 0
1713 #endif
1714 pdtlb a0, a1
1715 wdtlb a3, a0
1716
1717 /* Exit critical section. */
1718
1719 movi a0, 0
1720 rsr a3, excsave1
1721 s32i a0, a3, EXC_TABLE_FIXUP
1722
1723 /* Restore the working registers, and return. */
1724
1725 l32i a3, a2, PT_AREG3
1726 l32i a1, a2, PT_AREG1
1727 l32i a0, a2, PT_AREG0
1728 l32i a2, a2, PT_DEPC
1729
1730 bgeui a2, VALID_DOUBLE_EXCEPTION_ADDRESS, 1f
1731
1732 rsr a2, depc
1733 rfe
1734
1735 /* Double exception. Restore FIXUP handler and return. */
1736
1737 1: xsr a2, depc
1738 esync
1739 rfde
1740
1741 9: l32i a0, a1, TASK_ACTIVE_MM # unlikely case mm == 0
1742 j 8b
1743
1744 2: /* If there was a problem, handle fault in C */
1745
1746 rsr a3, depc # still holds a2
1747 s32i a3, a2, PT_AREG2
1748 mov a1, a2
1749
1750 rsr a2, ps
1751 bbsi.l a2, PS_UM_BIT, 1f
1752 j _kernel_exception
1753 1: j _user_exception
1754
1755 ENDPROC(fast_store_prohibited)
1756
1757 #endif /* CONFIG_MMU */
1758
1759 /*
1760 * System Calls.
1761 *
1762 * void system_call (struct pt_regs* regs, int exccause)
1763 * a2 a3
1764 */
1765
1766 ENTRY(system_call)
1767
1768 entry a1, 32
1769
1770 /* regs->syscall = regs->areg[2] */
1771
1772 l32i a3, a2, PT_AREG2
1773 mov a6, a2
1774 movi a4, do_syscall_trace_enter
1775 s32i a3, a2, PT_SYSCALL
1776 callx4 a4
1777
1778 /* syscall = sys_call_table[syscall_nr] */
1779
1780 movi a4, sys_call_table;
1781 movi a5, __NR_syscall_count
1782 movi a6, -ENOSYS
1783 bgeu a3, a5, 1f
1784
1785 addx4 a4, a3, a4
1786 l32i a4, a4, 0
1787 movi a5, sys_ni_syscall;
1788 beq a4, a5, 1f
1789
1790 /* Load args: arg0 - arg5 are passed via regs. */
1791
1792 l32i a6, a2, PT_AREG6
1793 l32i a7, a2, PT_AREG3
1794 l32i a8, a2, PT_AREG4
1795 l32i a9, a2, PT_AREG5
1796 l32i a10, a2, PT_AREG8
1797 l32i a11, a2, PT_AREG9
1798
1799 /* Pass one additional argument to the syscall: pt_regs (on stack) */
1800 s32i a2, a1, 0
1801
1802 callx4 a4
1803
1804 1: /* regs->areg[2] = return_value */
1805
1806 s32i a6, a2, PT_AREG2
1807 movi a4, do_syscall_trace_leave
1808 mov a6, a2
1809 callx4 a4
1810 retw
1811
1812 ENDPROC(system_call)
1813
1814 /*
1815 * Spill live registers on the kernel stack macro.
1816 *
1817 * Entry condition: ps.woe is set, ps.excm is cleared
1818 * Exit condition: windowstart has single bit set
1819 * May clobber: a12, a13
1820 */
1821 .macro spill_registers_kernel
1822
1823 #if XCHAL_NUM_AREGS > 16
1824 call12 1f
1825 _j 2f
1826 retw
1827 .align 4
1828 1:
1829 _entry a1, 48
1830 addi a12, a0, 3
1831 #if XCHAL_NUM_AREGS > 32
1832 .rept (XCHAL_NUM_AREGS - 32) / 12
1833 _entry a1, 48
1834 mov a12, a0
1835 .endr
1836 #endif
1837 _entry a1, 48
1838 #if XCHAL_NUM_AREGS % 12 == 0
1839 mov a8, a8
1840 #elif XCHAL_NUM_AREGS % 12 == 4
1841 mov a12, a12
1842 #elif XCHAL_NUM_AREGS % 12 == 8
1843 mov a4, a4
1844 #endif
1845 retw
1846 2:
1847 #else
1848 mov a12, a12
1849 #endif
1850 .endm
1851
1852 /*
1853 * Task switch.
1854 *
1855 * struct task* _switch_to (struct task* prev, struct task* next)
1856 * a2 a2 a3
1857 */
1858
1859 ENTRY(_switch_to)
1860
1861 entry a1, 16
1862
1863 mov a11, a3 # and 'next' (a3)
1864
1865 l32i a4, a2, TASK_THREAD_INFO
1866 l32i a5, a3, TASK_THREAD_INFO
1867
1868 save_xtregs_user a4 a6 a8 a9 a12 a13 THREAD_XTREGS_USER
1869
1870 #if THREAD_RA > 1020 || THREAD_SP > 1020
1871 addi a10, a2, TASK_THREAD
1872 s32i a0, a10, THREAD_RA - TASK_THREAD # save return address
1873 s32i a1, a10, THREAD_SP - TASK_THREAD # save stack pointer
1874 #else
1875 s32i a0, a2, THREAD_RA # save return address
1876 s32i a1, a2, THREAD_SP # save stack pointer
1877 #endif
1878
1879 /* Disable ints while we manipulate the stack pointer. */
1880
1881 rsil a14, LOCKLEVEL
1882 rsr a3, excsave1
1883 rsync
1884 s32i a3, a3, EXC_TABLE_FIXUP /* enter critical section */
1885
1886 /* Switch CPENABLE */
1887
1888 #if (XTENSA_HAVE_COPROCESSORS || XTENSA_HAVE_IO_PORTS)
1889 l32i a3, a5, THREAD_CPENABLE
1890 xsr a3, cpenable
1891 s32i a3, a4, THREAD_CPENABLE
1892 #endif
1893
1894 /* Flush register file. */
1895
1896 spill_registers_kernel
1897
1898 /* Set kernel stack (and leave critical section)
1899 * Note: It's save to set it here. The stack will not be overwritten
1900 * because the kernel stack will only be loaded again after
1901 * we return from kernel space.
1902 */
1903
1904 rsr a3, excsave1 # exc_table
1905 movi a6, 0
1906 addi a7, a5, PT_REGS_OFFSET
1907 s32i a6, a3, EXC_TABLE_FIXUP
1908 s32i a7, a3, EXC_TABLE_KSTK
1909
1910 /* restore context of the task 'next' */
1911
1912 l32i a0, a11, THREAD_RA # restore return address
1913 l32i a1, a11, THREAD_SP # restore stack pointer
1914
1915 load_xtregs_user a5 a6 a8 a9 a12 a13 THREAD_XTREGS_USER
1916
1917 wsr a14, ps
1918 rsync
1919
1920 retw
1921
1922 ENDPROC(_switch_to)
1923
1924 ENTRY(ret_from_fork)
1925
1926 /* void schedule_tail (struct task_struct *prev)
1927 * Note: prev is still in a6 (return value from fake call4 frame)
1928 */
1929 movi a4, schedule_tail
1930 callx4 a4
1931
1932 movi a4, do_syscall_trace_leave
1933 mov a6, a1
1934 callx4 a4
1935
1936 j common_exception_return
1937
1938 ENDPROC(ret_from_fork)
1939
1940 /*
1941 * Kernel thread creation helper
1942 * On entry, set up by copy_thread: a2 = thread_fn, a3 = thread_fn arg
1943 * left from _switch_to: a6 = prev
1944 */
1945 ENTRY(ret_from_kernel_thread)
1946
1947 call4 schedule_tail
1948 mov a6, a3
1949 callx4 a2
1950 j common_exception_return
1951
1952 ENDPROC(ret_from_kernel_thread)