]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blame - arch/microblaze/kernel/entry.S
microblaze: Optimize SAVE_STATE macro
[mirror_ubuntu-artful-kernel.git] / arch / microblaze / kernel / entry.S
CommitLineData
ca54502b
MS
1/*
2 * Low-level system-call handling, trap handlers and context-switching
3 *
4 * Copyright (C) 2008-2009 Michal Simek <monstr@monstr.eu>
5 * Copyright (C) 2008-2009 PetaLogix
6 * Copyright (C) 2003 John Williams <jwilliams@itee.uq.edu.au>
7 * Copyright (C) 2001,2002 NEC Corporation
8 * Copyright (C) 2001,2002 Miles Bader <miles@gnu.org>
9 *
10 * This file is subject to the terms and conditions of the GNU General
11 * Public License. See the file COPYING in the main directory of this
12 * archive for more details.
13 *
14 * Written by Miles Bader <miles@gnu.org>
15 * Heavily modified by John Williams for Microblaze
16 */
17
18#include <linux/sys.h>
19#include <linux/linkage.h>
20
21#include <asm/entry.h>
22#include <asm/current.h>
23#include <asm/processor.h>
24#include <asm/exceptions.h>
25#include <asm/asm-offsets.h>
26#include <asm/thread_info.h>
27
28#include <asm/page.h>
29#include <asm/unistd.h>
30
31#include <linux/errno.h>
32#include <asm/signal.h>
33
11d51360
MS
34#undef DEBUG
35
ca54502b
MS
36/* The size of a state save frame. */
37#define STATE_SAVE_SIZE (PT_SIZE + STATE_SAVE_ARG_SPACE)
38
39/* The offset of the struct pt_regs in a `state save frame' on the stack. */
40#define PTO STATE_SAVE_ARG_SPACE /* 24 the space for args */
41
42#define C_ENTRY(name) .globl name; .align 4; name
43
44/*
45 * Various ways of setting and clearing BIP in flags reg.
46 * This is mucky, but necessary using microblaze version that
47 * allows msr ops to write to BIP
48 */
49#if CONFIG_XILINX_MICROBLAZE0_USE_MSR_INSTR
50 .macro clear_bip
51 msrclr r11, MSR_BIP
52 nop
53 .endm
54
55 .macro set_bip
56 msrset r11, MSR_BIP
57 nop
58 .endm
59
60 .macro clear_eip
61 msrclr r11, MSR_EIP
62 nop
63 .endm
64
65 .macro set_ee
66 msrset r11, MSR_EE
67 nop
68 .endm
69
70 .macro disable_irq
71 msrclr r11, MSR_IE
72 nop
73 .endm
74
75 .macro enable_irq
76 msrset r11, MSR_IE
77 nop
78 .endm
79
80 .macro set_ums
81 msrset r11, MSR_UMS
82 nop
83 msrclr r11, MSR_VMS
84 nop
85 .endm
86
87 .macro set_vms
88 msrclr r11, MSR_UMS
89 nop
90 msrset r11, MSR_VMS
91 nop
92 .endm
93
94 .macro clear_vms_ums
3fbd93e5 95 msrclr r11, MSR_VMS | MSR_UMS
ca54502b
MS
96 nop
97 .endm
98#else
99 .macro clear_bip
100 mfs r11, rmsr
101 nop
102 andi r11, r11, ~MSR_BIP
103 mts rmsr, r11
104 nop
105 .endm
106
107 .macro set_bip
108 mfs r11, rmsr
109 nop
110 ori r11, r11, MSR_BIP
111 mts rmsr, r11
112 nop
113 .endm
114
115 .macro clear_eip
116 mfs r11, rmsr
117 nop
118 andi r11, r11, ~MSR_EIP
119 mts rmsr, r11
120 nop
121 .endm
122
123 .macro set_ee
124 mfs r11, rmsr
125 nop
126 ori r11, r11, MSR_EE
127 mts rmsr, r11
128 nop
129 .endm
130
131 .macro disable_irq
132 mfs r11, rmsr
133 nop
134 andi r11, r11, ~MSR_IE
135 mts rmsr, r11
136 nop
137 .endm
138
139 .macro enable_irq
140 mfs r11, rmsr
141 nop
142 ori r11, r11, MSR_IE
143 mts rmsr, r11
144 nop
145 .endm
146
147 .macro set_ums
148 mfs r11, rmsr
149 nop
150 ori r11, r11, MSR_VMS
151 andni r11, r11, MSR_UMS
152 mts rmsr, r11
153 nop
154 .endm
155
156 .macro set_vms
157 mfs r11, rmsr
158 nop
159 ori r11, r11, MSR_VMS
160 andni r11, r11, MSR_UMS
161 mts rmsr, r11
162 nop
163 .endm
164
165 .macro clear_vms_ums
166 mfs r11, rmsr
167 nop
168 andni r11, r11, (MSR_VMS|MSR_UMS)
169 mts rmsr,r11
170 nop
171 .endm
172#endif
173
174/* Define how to call high-level functions. With MMU, virtual mode must be
175 * enabled when calling the high-level function. Clobbers R11.
176 * VM_ON, VM_OFF, DO_JUMP_BIPCLR, DO_CALL
177 */
178
179/* turn on virtual protected mode save */
180#define VM_ON \
a4a94dbf 181 set_ums; \
ca54502b 182 rted r0, 2f; \
a4a94dbf
MS
183 nop; \
1842:
ca54502b
MS
185
186/* turn off virtual protected mode save and user mode save*/
187#define VM_OFF \
a4a94dbf 188 clear_vms_ums; \
ca54502b 189 rted r0, TOPHYS(1f); \
a4a94dbf
MS
190 nop; \
1911:
ca54502b
MS
192
193#define SAVE_REGS \
194 swi r2, r1, PTO+PT_R2; /* Save SDA */ \
36f60954
MS
195 swi r3, r1, PTO+PT_R3; \
196 swi r4, r1, PTO+PT_R4; \
ca54502b
MS
197 swi r5, r1, PTO+PT_R5; \
198 swi r6, r1, PTO+PT_R6; \
199 swi r7, r1, PTO+PT_R7; \
200 swi r8, r1, PTO+PT_R8; \
201 swi r9, r1, PTO+PT_R9; \
202 swi r10, r1, PTO+PT_R10; \
203 swi r11, r1, PTO+PT_R11; /* save clobbered regs after rval */\
204 swi r12, r1, PTO+PT_R12; \
205 swi r13, r1, PTO+PT_R13; /* Save SDA2 */ \
206 swi r14, r1, PTO+PT_PC; /* PC, before IRQ/trap */ \
207 swi r15, r1, PTO+PT_R15; /* Save LP */ \
208 swi r18, r1, PTO+PT_R18; /* Save asm scratch reg */ \
209 swi r19, r1, PTO+PT_R19; \
210 swi r20, r1, PTO+PT_R20; \
211 swi r21, r1, PTO+PT_R21; \
212 swi r22, r1, PTO+PT_R22; \
213 swi r23, r1, PTO+PT_R23; \
214 swi r24, r1, PTO+PT_R24; \
215 swi r25, r1, PTO+PT_R25; \
216 swi r26, r1, PTO+PT_R26; \
217 swi r27, r1, PTO+PT_R27; \
218 swi r28, r1, PTO+PT_R28; \
219 swi r29, r1, PTO+PT_R29; \
220 swi r30, r1, PTO+PT_R30; \
221 swi r31, r1, PTO+PT_R31; /* Save current task reg */ \
222 mfs r11, rmsr; /* save MSR */ \
223 nop; \
224 swi r11, r1, PTO+PT_MSR;
225
226#define RESTORE_REGS \
227 lwi r11, r1, PTO+PT_MSR; \
228 mts rmsr , r11; \
229 nop; \
230 lwi r2, r1, PTO+PT_R2; /* restore SDA */ \
36f60954
MS
231 lwi r3, r1, PTO+PT_R3; \
232 lwi r4, r1, PTO+PT_R4; \
ca54502b
MS
233 lwi r5, r1, PTO+PT_R5; \
234 lwi r6, r1, PTO+PT_R6; \
235 lwi r7, r1, PTO+PT_R7; \
236 lwi r8, r1, PTO+PT_R8; \
237 lwi r9, r1, PTO+PT_R9; \
238 lwi r10, r1, PTO+PT_R10; \
239 lwi r11, r1, PTO+PT_R11; /* restore clobbered regs after rval */\
240 lwi r12, r1, PTO+PT_R12; \
241 lwi r13, r1, PTO+PT_R13; /* restore SDA2 */ \
242 lwi r14, r1, PTO+PT_PC; /* RESTORE_LINK PC, before IRQ/trap */\
243 lwi r15, r1, PTO+PT_R15; /* restore LP */ \
244 lwi r18, r1, PTO+PT_R18; /* restore asm scratch reg */ \
245 lwi r19, r1, PTO+PT_R19; \
246 lwi r20, r1, PTO+PT_R20; \
247 lwi r21, r1, PTO+PT_R21; \
248 lwi r22, r1, PTO+PT_R22; \
249 lwi r23, r1, PTO+PT_R23; \
250 lwi r24, r1, PTO+PT_R24; \
251 lwi r25, r1, PTO+PT_R25; \
252 lwi r26, r1, PTO+PT_R26; \
253 lwi r27, r1, PTO+PT_R27; \
254 lwi r28, r1, PTO+PT_R28; \
255 lwi r29, r1, PTO+PT_R29; \
256 lwi r30, r1, PTO+PT_R30; \
257 lwi r31, r1, PTO+PT_R31; /* Restore cur task reg */
258
259.text
260
261/*
262 * User trap.
263 *
264 * System calls are handled here.
265 *
266 * Syscall protocol:
267 * Syscall number in r12, args in r5-r10
268 * Return value in r3
269 *
270 * Trap entered via brki instruction, so BIP bit is set, and interrupts
271 * are masked. This is nice, means we don't have to CLI before state save
272 */
273C_ENTRY(_user_exception):
274 swi r1, r0, TOPHYS(PER_CPU(ENTRY_SP)) /* save stack */
275 addi r14, r14, 4 /* return address is 4 byte after call */
276 swi r11, r0, TOPHYS(r0_ram + PTO + PT_R11); /* Save r11 */
277
5c0d72b1
MS
278 mfs r11, rmsr
279 nop
280 andi r11, r11, MSR_UMS
281 bnei r11, 1f
282
283/* Kernel-mode state save - kernel execve */
ca54502b
MS
284 lwi r11, r0, TOPHYS(PER_CPU(ENTRY_SP)); /* Reload kernel stack-ptr*/
285 tophys(r1,r11);
286 swi r11, r1, (PT_R1-PT_SIZE); /* Save original SP. */
287 lwi r11, r0, TOPHYS(r0_ram + PTO + PT_R11); /* restore r11 */
288
289 addik r1, r1, -STATE_SAVE_SIZE; /* Make room on the stack. */
290 SAVE_REGS
291
292 addi r11, r0, 1; /* Was in kernel-mode. */
293 swi r11, r1, PTO+PT_MODE; /* pt_regs -> kernel mode */
294 brid 2f;
295 nop; /* Fill delay slot */
296
297/* User-mode state save. */
2981:
299 lwi r11, r0, TOPHYS(r0_ram + PTO + PT_R11); /* restore r11 */
300 lwi r1, r0, TOPHYS(PER_CPU(CURRENT_SAVE)); /* get saved current */
301 tophys(r1,r1);
302 lwi r1, r1, TS_THREAD_INFO; /* get stack from task_struct */
303/* calculate kernel stack pointer from task struct 8k */
304 addik r1, r1, THREAD_SIZE;
305 tophys(r1,r1);
306
307 addik r1, r1, -STATE_SAVE_SIZE; /* Make room on the stack. */
308 SAVE_REGS
309
310 swi r0, r1, PTO+PT_MODE; /* Was in user-mode. */
311 lwi r11, r0, TOPHYS(PER_CPU(ENTRY_SP));
312 swi r11, r1, PTO+PT_R1; /* Store user SP. */
b1d70c62 3132: lwi CURRENT_TASK, r0, TOPHYS(PER_CPU(CURRENT_SAVE));
ca54502b
MS
314 /* Save away the syscall number. */
315 swi r12, r1, PTO+PT_R0;
316 tovirt(r1,r1)
317
ca54502b
MS
318/* where the trap should return need -8 to adjust for rtsd r15, 8*/
319/* Jump to the appropriate function for the system call number in r12
320 * (r12 is not preserved), or return an error if r12 is not valid. The LP
321 * register should point to the location where
322 * the called function should return. [note that MAKE_SYS_CALL uses label 1] */
23575483
MS
323
324 # Step into virtual mode.
325 set_vms;
326 addik r11, r0, 3f
327 rtid r11, 0
328 nop
3293:
b1d70c62 330 lwi r11, CURRENT_TASK, TS_THREAD_INFO /* get thread info */
23575483
MS
331 lwi r11, r11, TI_FLAGS /* get flags in thread info */
332 andi r11, r11, _TIF_WORK_SYSCALL_MASK
333 beqi r11, 4f
334
335 addik r3, r0, -ENOSYS
336 swi r3, r1, PTO + PT_R3
337 brlid r15, do_syscall_trace_enter
338 addik r5, r1, PTO + PT_R0
339
340 # do_syscall_trace_enter returns the new syscall nr.
341 addk r12, r0, r3
342 lwi r5, r1, PTO+PT_R5;
343 lwi r6, r1, PTO+PT_R6;
344 lwi r7, r1, PTO+PT_R7;
345 lwi r8, r1, PTO+PT_R8;
346 lwi r9, r1, PTO+PT_R9;
347 lwi r10, r1, PTO+PT_R10;
3484:
349/* Jump to the appropriate function for the system call number in r12
350 * (r12 is not preserved), or return an error if r12 is not valid.
351 * The LP register should point to the location where the called function
352 * should return. [note that MAKE_SYS_CALL uses label 1] */
353 /* See if the system call number is valid */
ca54502b 354 addi r11, r12, -__NR_syscalls;
23575483 355 bgei r11,5f;
ca54502b
MS
356 /* Figure out which function to use for this system call. */
357 /* Note Microblaze barrel shift is optional, so don't rely on it */
358 add r12, r12, r12; /* convert num -> ptr */
359 add r12, r12, r12;
360
11d51360 361#ifdef DEBUG
ca54502b 362 /* Trac syscalls and stored them to r0_ram */
23575483 363 lwi r3, r12, 0x400 + r0_ram
ca54502b 364 addi r3, r3, 1
23575483 365 swi r3, r12, 0x400 + r0_ram
11d51360 366#endif
23575483
MS
367
368 # Find and jump into the syscall handler.
369 lwi r12, r12, sys_call_table
370 /* where the trap should return need -8 to adjust for rtsd r15, 8 */
b9ea77e2 371 addi r15, r0, ret_from_trap-8
23575483 372 bra r12
ca54502b 373
ca54502b 374 /* The syscall number is invalid, return an error. */
23575483 3755:
ca54502b
MS
376 addi r3, r0, -ENOSYS;
377 rtsd r15,8; /* looks like a normal subroutine return */
378 or r0, r0, r0
379
380
23575483 381/* Entry point used to return from a syscall/trap */
ca54502b
MS
382/* We re-enable BIP bit before state restore */
383C_ENTRY(ret_from_trap):
b1d70c62
MS
384 swi r3, r1, PTO + PT_R3
385 swi r4, r1, PTO + PT_R4
386
36f60954
MS
387 lwi r11, r1, PTO+PT_MODE;
388/* See if returning to kernel mode, if so, skip resched &c. */
389 bnei r11, 2f;
23575483
MS
390 /* We're returning to user mode, so check for various conditions that
391 * trigger rescheduling. */
b1d70c62
MS
392 /* FIXME: Restructure all these flag checks. */
393 lwi r11, CURRENT_TASK, TS_THREAD_INFO; /* get thread info */
23575483
MS
394 lwi r11, r11, TI_FLAGS; /* get flags in thread info */
395 andi r11, r11, _TIF_WORK_SYSCALL_MASK
396 beqi r11, 1f
397
23575483
MS
398 brlid r15, do_syscall_trace_leave
399 addik r5, r1, PTO + PT_R0
23575483 4001:
ca54502b
MS
401 /* We're returning to user mode, so check for various conditions that
402 * trigger rescheduling. */
b1d70c62
MS
403 /* get thread info from current task */
404 lwi r11, CURRENT_TASK, TS_THREAD_INFO;
ca54502b
MS
405 lwi r11, r11, TI_FLAGS; /* get flags in thread info */
406 andi r11, r11, _TIF_NEED_RESCHED;
407 beqi r11, 5f;
408
ca54502b
MS
409 bralid r15, schedule; /* Call scheduler */
410 nop; /* delay slot */
ca54502b
MS
411
412 /* Maybe handle a signal */
b1d70c62
MS
4135: /* get thread info from current task*/
414 lwi r11, CURRENT_TASK, TS_THREAD_INFO;
ca54502b
MS
415 lwi r11, r11, TI_FLAGS; /* get flags in thread info */
416 andi r11, r11, _TIF_SIGPENDING;
417 beqi r11, 1f; /* Signals to handle, handle them */
418
b9ea77e2 419 addik r5, r1, PTO; /* Arg 1: struct pt_regs *regs */
ca54502b
MS
420 addi r7, r0, 1; /* Arg 3: int in_syscall */
421 bralid r15, do_signal; /* Handle any signals */
841d6e8c 422 add r6, r0, r0; /* Arg 2: sigset_t *oldset */
b1d70c62
MS
423
424/* Finally, return to user state. */
96014cc3 4251: set_bip; /* Ints masked for state restore */
8633bebc 426 swi CURRENT_TASK, r0, PER_CPU(CURRENT_SAVE); /* save current */
ca54502b
MS
427 VM_OFF;
428 tophys(r1,r1);
429 RESTORE_REGS;
430 addik r1, r1, STATE_SAVE_SIZE /* Clean up stack space. */
431 lwi r1, r1, PT_R1 - PT_SIZE;/* Restore user stack pointer. */
432 bri 6f;
433
434/* Return to kernel state. */
96014cc3
MS
4352: set_bip; /* Ints masked for state restore */
436 VM_OFF;
ca54502b
MS
437 tophys(r1,r1);
438 RESTORE_REGS;
439 addik r1, r1, STATE_SAVE_SIZE /* Clean up stack space. */
440 tovirt(r1,r1);
4416:
442TRAP_return: /* Make global symbol for debugging */
443 rtbd r14, 0; /* Instructions to return from an IRQ */
444 nop;
445
446
447/* These syscalls need access to the struct pt_regs on the stack, so we
448 implement them in assembly (they're basically all wrappers anyway). */
449
450C_ENTRY(sys_fork_wrapper):
451 addi r5, r0, SIGCHLD /* Arg 0: flags */
452 lwi r6, r1, PTO+PT_R1 /* Arg 1: child SP (use parent's) */
b9ea77e2 453 addik r7, r1, PTO /* Arg 2: parent context */
ca54502b
MS
454 add r8. r0, r0 /* Arg 3: (unused) */
455 add r9, r0, r0; /* Arg 4: (unused) */
456 add r10, r0, r0; /* Arg 5: (unused) */
457 brid do_fork /* Do real work (tail-call) */
458 nop;
459
460/* This the initial entry point for a new child thread, with an appropriate
461 stack in place that makes it look the the child is in the middle of an
462 syscall. This function is actually `returned to' from switch_thread
463 (copy_thread makes ret_from_fork the return address in each new thread's
464 saved context). */
465C_ENTRY(ret_from_fork):
466 bralid r15, schedule_tail; /* ...which is schedule_tail's arg */
467 add r3, r5, r0; /* switch_thread returns the prev task */
468 /* ( in the delay slot ) */
469 add r3, r0, r0; /* Child's fork call should return 0. */
470 brid ret_from_trap; /* Do normal trap return */
471 nop;
472
e513588f
AB
473C_ENTRY(sys_vfork):
474 brid microblaze_vfork /* Do real work (tail-call) */
b9ea77e2 475 addik r5, r1, PTO
ca54502b 476
e513588f 477C_ENTRY(sys_clone):
ca54502b 478 bnei r6, 1f; /* See if child SP arg (arg 1) is 0. */
570e3e23 479 lwi r6, r1, PTO + PT_R1; /* If so, use paret's stack ptr */
b9ea77e2
MS
4801: addik r7, r1, PTO; /* Arg 2: parent context */
481 add r8, r0, r0; /* Arg 3: (unused) */
482 add r9, r0, r0; /* Arg 4: (unused) */
483 add r10, r0, r0; /* Arg 5: (unused) */
484 brid do_fork /* Do real work (tail-call) */
485 nop;
ca54502b 486
e513588f 487C_ENTRY(sys_execve):
b9ea77e2 488 addik r8, r1, PTO; /* add user context as 4th arg */
e513588f 489 brid microblaze_execve; /* Do real work (tail-call).*/
ca54502b
MS
490 nop;
491
ca54502b
MS
492C_ENTRY(sys_rt_sigreturn_wrapper):
493 swi r3, r1, PTO+PT_R3; /* restore saved r3, r4 registers */
494 swi r4, r1, PTO+PT_R4;
b9ea77e2 495 addik r5, r1, PTO; /* add user context as 1st arg */
ca54502b
MS
496 brlid r15, sys_rt_sigreturn /* Do real work */
497 nop;
498 lwi r3, r1, PTO+PT_R3; /* restore saved r3, r4 registers */
499 lwi r4, r1, PTO+PT_R4;
500 bri ret_from_trap /* fall through will not work here due to align */
501 nop;
502
503/*
504 * HW EXCEPTION rutine start
505 */
506
507#define SAVE_STATE \
63708f63 508 swi r1, r0, TOPHYS(PER_CPU(ENTRY_SP)); /* save stack */ \
ca54502b 509 swi r11, r0, TOPHYS(r0_ram + PTO + PT_R11); /* Save r11 */ \
ca54502b 510 /* See if already in kernel mode.*/ \
5c0d72b1
MS
511 mfs r11, rmsr; \
512 nop; \
513 andi r11, r11, MSR_UMS; \
514 bnei r11, 1f; \
ca54502b
MS
515 /* Kernel-mode state save. */ \
516 /* Reload kernel stack-ptr. */ \
517 lwi r11, r0, TOPHYS(PER_CPU(ENTRY_SP)); \
518 tophys(r1,r11); \
519 swi r11, r1, (PT_R1-PT_SIZE); /* Save original SP. */ \
520 lwi r11, r0, TOPHYS(r0_ram + PTO + PT_R11); /* restore r11 */\
521 addik r1, r1, -STATE_SAVE_SIZE; /* Make room on the stack. */\
ca54502b
MS
522 SAVE_REGS \
523 /* PC, before IRQ/trap - this is one instruction above */ \
524 swi r17, r1, PTO+PT_PC; \
525 \
526 addi r11, r0, 1; /* Was in kernel-mode. */ \
527 swi r11, r1, PTO+PT_MODE; \
528 brid 2f; \
529 nop; /* Fill delay slot */ \
5301: /* User-mode state save. */ \
531 lwi r11, r0, TOPHYS(r0_ram + PTO + PT_R11); /* restore r11 */\
532 lwi r1, r0, TOPHYS(PER_CPU(CURRENT_SAVE)); /* get saved current */\
533 tophys(r1,r1); \
534 lwi r1, r1, TS_THREAD_INFO; /* get the thread info */ \
535 addik r1, r1, THREAD_SIZE; /* calculate kernel stack pointer */\
536 tophys(r1,r1); \
537 \
538 addik r1, r1, -STATE_SAVE_SIZE; /* Make room on the stack. */\
ca54502b
MS
539 SAVE_REGS \
540 /* PC, before IRQ/trap - this is one instruction above FIXME*/ \
541 swi r17, r1, PTO+PT_PC; \
542 \
543 swi r0, r1, PTO+PT_MODE; /* Was in user-mode. */ \
544 lwi r11, r0, TOPHYS(PER_CPU(ENTRY_SP)); \
545 swi r11, r1, PTO+PT_R1; /* Store user SP. */ \
b1d70c62 5462: lwi CURRENT_TASK, r0, TOPHYS(PER_CPU(CURRENT_SAVE)); \
ca54502b
MS
547 /* Save away the syscall number. */ \
548 swi r0, r1, PTO+PT_R0; \
549 tovirt(r1,r1)
550
551C_ENTRY(full_exception_trap):
ca54502b
MS
552 /* adjust exception address for privileged instruction
553 * for finding where is it */
554 addik r17, r17, -4
555 SAVE_STATE /* Save registers */
556 /* FIXME this can be store directly in PT_ESR reg.
557 * I tested it but there is a fault */
558 /* where the trap should return need -8 to adjust for rtsd r15, 8 */
b9ea77e2
MS
559 addik r15, r0, ret_from_exc - 8
560 addik r5, r1, PTO /* parameter struct pt_regs * regs */
ca54502b
MS
561 mfs r6, resr
562 nop
563 mfs r7, rfsr; /* save FSR */
564 nop
131e4e97
MS
565 mts rfsr, r0; /* Clear sticky fsr */
566 nop
b9ea77e2 567 addik r12, r0, full_exception
ca54502b 568 set_vms;
8b110d15 569 rted r12, 0;
ca54502b
MS
570 nop;
571
572/*
573 * Unaligned data trap.
574 *
575 * Unaligned data trap last on 4k page is handled here.
576 *
577 * Trap entered via exception, so EE bit is set, and interrupts
578 * are masked. This is nice, means we don't have to CLI before state save
579 *
580 * The assembler routine is in "arch/microblaze/kernel/hw_exception_handler.S"
581 */
582C_ENTRY(unaligned_data_trap):
8b110d15
MS
583 /* MS: I have to save r11 value and then restore it because
584 * set_bit, clear_eip, set_ee use r11 as temp register if MSR
585 * instructions are not used. We don't need to do if MSR instructions
586 * are used and they use r0 instead of r11.
587 * I am using ENTRY_SP which should be primary used only for stack
588 * pointer saving. */
589 swi r11, r0, TOPHYS(PER_CPU(ENTRY_SP));
590 set_bip; /* equalize initial state for all possible entries */
591 clear_eip;
592 set_ee;
593 lwi r11, r0, TOPHYS(PER_CPU(ENTRY_SP));
ca54502b
MS
594 SAVE_STATE /* Save registers.*/
595 /* where the trap should return need -8 to adjust for rtsd r15, 8 */
b9ea77e2 596 addik r15, r0, ret_from_exc-8
ca54502b
MS
597 mfs r3, resr /* ESR */
598 nop
599 mfs r4, rear /* EAR */
600 nop
b9ea77e2
MS
601 addik r7, r1, PTO /* parameter struct pt_regs * regs */
602 addik r12, r0, _unaligned_data_exception
ca54502b
MS
603 set_vms;
604 rtbd r12, 0; /* interrupts enabled */
605 nop;
606
607/*
608 * Page fault traps.
609 *
610 * If the real exception handler (from hw_exception_handler.S) didn't find
611 * the mapping for the process, then we're thrown here to handle such situation.
612 *
613 * Trap entered via exceptions, so EE bit is set, and interrupts
614 * are masked. This is nice, means we don't have to CLI before state save
615 *
616 * Build a standard exception frame for TLB Access errors. All TLB exceptions
617 * will bail out to this point if they can't resolve the lightweight TLB fault.
618 *
619 * The C function called is in "arch/microblaze/mm/fault.c", declared as:
620 * void do_page_fault(struct pt_regs *regs,
621 * unsigned long address,
622 * unsigned long error_code)
623 */
624/* data and intruction trap - which is choose is resolved int fault.c */
625C_ENTRY(page_fault_data_trap):
ca54502b
MS
626 SAVE_STATE /* Save registers.*/
627 /* where the trap should return need -8 to adjust for rtsd r15, 8 */
b9ea77e2
MS
628 addik r15, r0, ret_from_exc-8
629 addik r5, r1, PTO /* parameter struct pt_regs * regs */
ca54502b
MS
630 mfs r6, rear /* parameter unsigned long address */
631 nop
632 mfs r7, resr /* parameter unsigned long error_code */
633 nop
b9ea77e2 634 addik r12, r0, do_page_fault
ca54502b 635 set_vms;
8b110d15 636 rted r12, 0; /* interrupts enabled */
ca54502b
MS
637 nop;
638
639C_ENTRY(page_fault_instr_trap):
ca54502b
MS
640 SAVE_STATE /* Save registers.*/
641 /* where the trap should return need -8 to adjust for rtsd r15, 8 */
b9ea77e2
MS
642 addik r15, r0, ret_from_exc-8
643 addik r5, r1, PTO /* parameter struct pt_regs * regs */
ca54502b
MS
644 mfs r6, rear /* parameter unsigned long address */
645 nop
646 ori r7, r0, 0 /* parameter unsigned long error_code */
b9ea77e2 647 addik r12, r0, do_page_fault
ca54502b 648 set_vms;
8b110d15 649 rted r12, 0; /* interrupts enabled */
ca54502b
MS
650 nop;
651
652/* Entry point used to return from an exception. */
653C_ENTRY(ret_from_exc):
ca54502b
MS
654 lwi r11, r1, PTO+PT_MODE;
655 bnei r11, 2f; /* See if returning to kernel mode, */
656 /* ... if so, skip resched &c. */
657
658 /* We're returning to user mode, so check for various conditions that
659 trigger rescheduling. */
b1d70c62 660 lwi r11, CURRENT_TASK, TS_THREAD_INFO; /* get thread info */
ca54502b
MS
661 lwi r11, r11, TI_FLAGS; /* get flags in thread info */
662 andi r11, r11, _TIF_NEED_RESCHED;
663 beqi r11, 5f;
664
665/* Call the scheduler before returning from a syscall/trap. */
666 bralid r15, schedule; /* Call scheduler */
667 nop; /* delay slot */
668
669 /* Maybe handle a signal */
b1d70c62 6705: lwi r11, CURRENT_TASK, TS_THREAD_INFO; /* get thread info */
ca54502b
MS
671 lwi r11, r11, TI_FLAGS; /* get flags in thread info */
672 andi r11, r11, _TIF_SIGPENDING;
673 beqi r11, 1f; /* Signals to handle, handle them */
674
675 /*
676 * Handle a signal return; Pending signals should be in r18.
677 *
678 * Not all registers are saved by the normal trap/interrupt entry
679 * points (for instance, call-saved registers (because the normal
680 * C-compiler calling sequence in the kernel makes sure they're
681 * preserved), and call-clobbered registers in the case of
682 * traps), but signal handlers may want to examine or change the
683 * complete register state. Here we save anything not saved by
684 * the normal entry sequence, so that it may be safely restored
36f60954 685 * (in a possibly modified form) after do_signal returns. */
b9ea77e2 686 addik r5, r1, PTO; /* Arg 1: struct pt_regs *regs */
ca54502b
MS
687 addi r7, r0, 0; /* Arg 3: int in_syscall */
688 bralid r15, do_signal; /* Handle any signals */
841d6e8c 689 add r6, r0, r0; /* Arg 2: sigset_t *oldset */
ca54502b
MS
690
691/* Finally, return to user state. */
96014cc3 6921: set_bip; /* Ints masked for state restore */
8633bebc 693 swi CURRENT_TASK, r0, PER_CPU(CURRENT_SAVE); /* save current */
ca54502b
MS
694 VM_OFF;
695 tophys(r1,r1);
696
ca54502b
MS
697 RESTORE_REGS;
698 addik r1, r1, STATE_SAVE_SIZE /* Clean up stack space. */
699
700 lwi r1, r1, PT_R1 - PT_SIZE; /* Restore user stack pointer. */
701 bri 6f;
702/* Return to kernel state. */
96014cc3
MS
7032: set_bip; /* Ints masked for state restore */
704 VM_OFF;
ca54502b 705 tophys(r1,r1);
ca54502b
MS
706 RESTORE_REGS;
707 addik r1, r1, STATE_SAVE_SIZE /* Clean up stack space. */
708
709 tovirt(r1,r1);
7106:
711EXC_return: /* Make global symbol for debugging */
712 rtbd r14, 0; /* Instructions to return from an IRQ */
713 nop;
714
715/*
716 * HW EXCEPTION rutine end
717 */
718
719/*
720 * Hardware maskable interrupts.
721 *
722 * The stack-pointer (r1) should have already been saved to the memory
723 * location PER_CPU(ENTRY_SP).
724 */
725C_ENTRY(_interrupt):
726/* MS: we are in physical address */
727/* Save registers, switch to proper stack, convert SP to virtual.*/
728 swi r1, r0, TOPHYS(PER_CPU(ENTRY_SP))
729 swi r11, r0, TOPHYS(PER_CPU(R11_SAVE));
730 /* MS: See if already in kernel mode. */
5c0d72b1
MS
731 mfs r11, rmsr
732 nop
733 andi r11, r11, MSR_UMS
734 bnei r11, 1f
ca54502b
MS
735
736/* Kernel-mode state save. */
737 or r11, r1, r0
738 tophys(r1,r11); /* MS: I have in r1 physical address where stack is */
739/* MS: Save original SP - position PT_R1 to next stack frame 4 *1 - 152*/
740 swi r11, r1, (PT_R1 - PT_SIZE);
741/* MS: restore r11 because of saving in SAVE_REGS */
742 lwi r11, r0, TOPHYS(PER_CPU(R11_SAVE));
743 /* save registers */
744/* MS: Make room on the stack -> activation record */
745 addik r1, r1, -STATE_SAVE_SIZE;
ca54502b
MS
746 SAVE_REGS
747 /* MS: store mode */
748 addi r11, r0, 1; /* MS: Was in kernel-mode. */
749 swi r11, r1, PTO + PT_MODE; /* MS: and save it */
750 brid 2f;
751 nop; /* MS: Fill delay slot */
752
7531:
754/* User-mode state save. */
755/* MS: restore r11 -> FIXME move before SAVE_REG */
756 lwi r11, r0, TOPHYS(PER_CPU(R11_SAVE));
757 /* MS: get the saved current */
758 lwi r1, r0, TOPHYS(PER_CPU(CURRENT_SAVE));
759 tophys(r1,r1);
760 lwi r1, r1, TS_THREAD_INFO;
761 addik r1, r1, THREAD_SIZE;
762 tophys(r1,r1);
763 /* save registers */
764 addik r1, r1, -STATE_SAVE_SIZE;
ca54502b
MS
765 SAVE_REGS
766 /* calculate mode */
767 swi r0, r1, PTO + PT_MODE;
768 lwi r11, r0, TOPHYS(PER_CPU(ENTRY_SP));
769 swi r11, r1, PTO+PT_R1;
ca54502b 7702:
b1d70c62 771 lwi CURRENT_TASK, r0, TOPHYS(PER_CPU(CURRENT_SAVE));
ca54502b
MS
772 swi r0, r1, PTO + PT_R0;
773 tovirt(r1,r1)
b9ea77e2 774 addik r5, r1, PTO;
ca54502b 775 set_vms;
b9ea77e2
MS
776 addik r11, r0, do_IRQ;
777 addik r15, r0, irq_call;
ca54502b
MS
778irq_call:rtbd r11, 0;
779 nop;
780
781/* MS: we are in virtual mode */
782ret_from_irq:
783 lwi r11, r1, PTO + PT_MODE;
784 bnei r11, 2f;
785
b1d70c62 786 lwi r11, CURRENT_TASK, TS_THREAD_INFO;
ca54502b
MS
787 lwi r11, r11, TI_FLAGS; /* MS: get flags from thread info */
788 andi r11, r11, _TIF_NEED_RESCHED;
789 beqi r11, 5f
790 bralid r15, schedule;
791 nop; /* delay slot */
792
793 /* Maybe handle a signal */
b1d70c62 7945: lwi r11, CURRENT_TASK, TS_THREAD_INFO; /* MS: get thread info */
ca54502b
MS
795 lwi r11, r11, TI_FLAGS; /* get flags in thread info */
796 andi r11, r11, _TIF_SIGPENDING;
797 beqid r11, no_intr_resched
798/* Handle a signal return; Pending signals should be in r18. */
799 addi r7, r0, 0; /* Arg 3: int in_syscall */
b9ea77e2 800 addik r5, r1, PTO; /* Arg 1: struct pt_regs *regs */
ca54502b
MS
801 bralid r15, do_signal; /* Handle any signals */
802 add r6, r0, r0; /* Arg 2: sigset_t *oldset */
803
804/* Finally, return to user state. */
805no_intr_resched:
806 /* Disable interrupts, we are now committed to the state restore */
807 disable_irq
8633bebc 808 swi CURRENT_TASK, r0, PER_CPU(CURRENT_SAVE);
ca54502b
MS
809 VM_OFF;
810 tophys(r1,r1);
ca54502b
MS
811 RESTORE_REGS
812 addik r1, r1, STATE_SAVE_SIZE /* MS: Clean up stack space. */
813 lwi r1, r1, PT_R1 - PT_SIZE;
814 bri 6f;
815/* MS: Return to kernel state. */
77753790
MS
8162:
817#ifdef CONFIG_PREEMPT
b1d70c62 818 lwi r11, CURRENT_TASK, TS_THREAD_INFO;
77753790
MS
819 /* MS: get preempt_count from thread info */
820 lwi r5, r11, TI_PREEMPT_COUNT;
821 bgti r5, restore;
822
823 lwi r5, r11, TI_FLAGS; /* get flags in thread info */
824 andi r5, r5, _TIF_NEED_RESCHED;
825 beqi r5, restore /* if zero jump over */
826
827preempt:
828 /* interrupts are off that's why I am calling preempt_chedule_irq */
829 bralid r15, preempt_schedule_irq
830 nop
b1d70c62 831 lwi r11, CURRENT_TASK, TS_THREAD_INFO; /* get thread info */
77753790
MS
832 lwi r5, r11, TI_FLAGS; /* get flags in thread info */
833 andi r5, r5, _TIF_NEED_RESCHED;
834 bnei r5, preempt /* if non zero jump to resched */
835restore:
836#endif
837 VM_OFF /* MS: turn off MMU */
ca54502b 838 tophys(r1,r1)
ca54502b
MS
839 RESTORE_REGS
840 addik r1, r1, STATE_SAVE_SIZE /* MS: Clean up stack space. */
841 tovirt(r1,r1);
8426:
843IRQ_return: /* MS: Make global symbol for debugging */
844 rtid r14, 0
845 nop
846
847/*
848 * `Debug' trap
849 * We enter dbtrap in "BIP" (breakpoint) mode.
850 * So we exit the breakpoint mode with an 'rtbd' and proceed with the
851 * original dbtrap.
852 * however, wait to save state first
853 */
854C_ENTRY(_debug_exception):
855 /* BIP bit is set on entry, no interrupts can occur */
856 swi r1, r0, TOPHYS(PER_CPU(ENTRY_SP))
857
858 swi r11, r0, TOPHYS(r0_ram + PTO + PT_R11); /* Save r11 */
859 set_bip; /*equalize initial state for all possible entries*/
860 clear_eip;
861 enable_irq;
5c0d72b1
MS
862 mfs r11, rmsr
863 nop
864 andi r11, r11, MSR_UMS
865 bnei r11, 1f
ca54502b
MS
866 /* Kernel-mode state save. */
867 lwi r11, r0, TOPHYS(PER_CPU(ENTRY_SP)); /* Reload kernel stack-ptr*/
868 tophys(r1,r11);
869 swi r11, r1, (PT_R1-PT_SIZE); /* Save original SP. */
870 lwi r11, r0, TOPHYS(r0_ram + PTO + PT_R11); /* restore r11 */
871
872 addik r1, r1, -STATE_SAVE_SIZE; /* Make room on the stack. */
ca54502b
MS
873 SAVE_REGS;
874
875 addi r11, r0, 1; /* Was in kernel-mode. */
876 swi r11, r1, PTO + PT_MODE;
877 brid 2f;
878 nop; /* Fill delay slot */
8791: /* User-mode state save. */
880 lwi r11, r0, TOPHYS(r0_ram + PTO + PT_R11); /* restore r11 */
881 lwi r1, r0, TOPHYS(PER_CPU(CURRENT_SAVE)); /* get saved current */
882 tophys(r1,r1);
883 lwi r1, r1, TS_THREAD_INFO; /* get the thread info */
884 addik r1, r1, THREAD_SIZE; /* calculate kernel stack pointer */
885 tophys(r1,r1);
886
887 addik r1, r1, -STATE_SAVE_SIZE; /* Make room on the stack. */
ca54502b
MS
888 SAVE_REGS;
889
890 swi r0, r1, PTO+PT_MODE; /* Was in user-mode. */
891 lwi r11, r0, TOPHYS(PER_CPU(ENTRY_SP));
892 swi r11, r1, PTO+PT_R1; /* Store user SP. */
b1d70c62 8932: lwi CURRENT_TASK, r0, TOPHYS(PER_CPU(CURRENT_SAVE));
ca54502b
MS
894 /* Save away the syscall number. */
895 swi r0, r1, PTO+PT_R0;
896 tovirt(r1,r1)
897
898 addi r5, r0, SIGTRAP /* send the trap signal */
899 add r6, r0, CURRENT_TASK; /* Get current task ptr into r11 */
900 addk r7, r0, r0 /* 3rd param zero */
901
902 set_vms;
b9ea77e2
MS
903 addik r11, r0, send_sig;
904 addik r15, r0, dbtrap_call;
ca54502b
MS
905dbtrap_call: rtbd r11, 0;
906 nop;
907
908 set_bip; /* Ints masked for state restore*/
909 lwi r11, r1, PTO+PT_MODE;
910 bnei r11, 2f;
911
912 /* Get current task ptr into r11 */
b1d70c62 913 lwi r11, CURRENT_TASK, TS_THREAD_INFO; /* get thread info */
ca54502b
MS
914 lwi r11, r11, TI_FLAGS; /* get flags in thread info */
915 andi r11, r11, _TIF_NEED_RESCHED;
916 beqi r11, 5f;
917
918/* Call the scheduler before returning from a syscall/trap. */
919
920 bralid r15, schedule; /* Call scheduler */
921 nop; /* delay slot */
922 /* XXX Is PT_DTRACE handling needed here? */
923 /* XXX m68knommu also checks TASK_STATE & TASK_COUNTER here. */
924
925 /* Maybe handle a signal */
b1d70c62 9265: lwi r11, CURRENT_TASK, TS_THREAD_INFO; /* get thread info */
ca54502b
MS
927 lwi r11, r11, TI_FLAGS; /* get flags in thread info */
928 andi r11, r11, _TIF_SIGPENDING;
929 beqi r11, 1f; /* Signals to handle, handle them */
930
931/* Handle a signal return; Pending signals should be in r18. */
932 /* Not all registers are saved by the normal trap/interrupt entry
933 points (for instance, call-saved registers (because the normal
934 C-compiler calling sequence in the kernel makes sure they're
935 preserved), and call-clobbered registers in the case of
936 traps), but signal handlers may want to examine or change the
937 complete register state. Here we save anything not saved by
938 the normal entry sequence, so that it may be safely restored
939 (in a possibly modified form) after do_signal returns. */
940
b9ea77e2 941 addik r5, r1, PTO; /* Arg 1: struct pt_regs *regs */
ca54502b
MS
942 addi r7, r0, 0; /* Arg 3: int in_syscall */
943 bralid r15, do_signal; /* Handle any signals */
841d6e8c 944 add r6, r0, r0; /* Arg 2: sigset_t *oldset */
ca54502b
MS
945
946
947/* Finally, return to user state. */
5c0d72b1 9481:
8633bebc 949 swi CURRENT_TASK, r0, PER_CPU(CURRENT_SAVE); /* save current */
ca54502b
MS
950 VM_OFF;
951 tophys(r1,r1);
952
ca54502b
MS
953 RESTORE_REGS
954 addik r1, r1, STATE_SAVE_SIZE /* Clean up stack space. */
955
956
957 lwi r1, r1, PT_R1 - PT_SIZE;
958 /* Restore user stack pointer. */
959 bri 6f;
960
961/* Return to kernel state. */
9622: VM_OFF;
963 tophys(r1,r1);
ca54502b
MS
964 RESTORE_REGS
965 addik r1, r1, STATE_SAVE_SIZE /* Clean up stack space. */
966
967 tovirt(r1,r1);
9686:
969DBTRAP_return: /* Make global symbol for debugging */
970 rtbd r14, 0; /* Instructions to return from an IRQ */
971 nop;
972
973
974
975ENTRY(_switch_to)
976 /* prepare return value */
b1d70c62 977 addk r3, r0, CURRENT_TASK
ca54502b
MS
978
979 /* save registers in cpu_context */
980 /* use r11 and r12, volatile registers, as temp register */
981 /* give start of cpu_context for previous process */
982 addik r11, r5, TI_CPU_CONTEXT
983 swi r1, r11, CC_R1
984 swi r2, r11, CC_R2
985 /* skip volatile registers.
986 * they are saved on stack when we jumped to _switch_to() */
987 /* dedicated registers */
988 swi r13, r11, CC_R13
989 swi r14, r11, CC_R14
990 swi r15, r11, CC_R15
991 swi r16, r11, CC_R16
992 swi r17, r11, CC_R17
993 swi r18, r11, CC_R18
994 /* save non-volatile registers */
995 swi r19, r11, CC_R19
996 swi r20, r11, CC_R20
997 swi r21, r11, CC_R21
998 swi r22, r11, CC_R22
999 swi r23, r11, CC_R23
1000 swi r24, r11, CC_R24
1001 swi r25, r11, CC_R25
1002 swi r26, r11, CC_R26
1003 swi r27, r11, CC_R27
1004 swi r28, r11, CC_R28
1005 swi r29, r11, CC_R29
1006 swi r30, r11, CC_R30
1007 /* special purpose registers */
1008 mfs r12, rmsr
1009 nop
1010 swi r12, r11, CC_MSR
1011 mfs r12, rear
1012 nop
1013 swi r12, r11, CC_EAR
1014 mfs r12, resr
1015 nop
1016 swi r12, r11, CC_ESR
1017 mfs r12, rfsr
1018 nop
1019 swi r12, r11, CC_FSR
1020
b1d70c62
MS
1021 /* update r31, the current-give me pointer to task which will be next */
1022 lwi CURRENT_TASK, r6, TI_TASK
ca54502b 1023 /* stored it to current_save too */
b1d70c62 1024 swi CURRENT_TASK, r0, PER_CPU(CURRENT_SAVE)
ca54502b
MS
1025
1026 /* get new process' cpu context and restore */
1027 /* give me start where start context of next task */
1028 addik r11, r6, TI_CPU_CONTEXT
1029
1030 /* non-volatile registers */
1031 lwi r30, r11, CC_R30
1032 lwi r29, r11, CC_R29
1033 lwi r28, r11, CC_R28
1034 lwi r27, r11, CC_R27
1035 lwi r26, r11, CC_R26
1036 lwi r25, r11, CC_R25
1037 lwi r24, r11, CC_R24
1038 lwi r23, r11, CC_R23
1039 lwi r22, r11, CC_R22
1040 lwi r21, r11, CC_R21
1041 lwi r20, r11, CC_R20
1042 lwi r19, r11, CC_R19
1043 /* dedicated registers */
1044 lwi r18, r11, CC_R18
1045 lwi r17, r11, CC_R17
1046 lwi r16, r11, CC_R16
1047 lwi r15, r11, CC_R15
1048 lwi r14, r11, CC_R14
1049 lwi r13, r11, CC_R13
1050 /* skip volatile registers */
1051 lwi r2, r11, CC_R2
1052 lwi r1, r11, CC_R1
1053
1054 /* special purpose registers */
1055 lwi r12, r11, CC_FSR
1056 mts rfsr, r12
1057 nop
1058 lwi r12, r11, CC_MSR
1059 mts rmsr, r12
1060 nop
1061
1062 rtsd r15, 8
1063 nop
1064
1065ENTRY(_reset)
1066 brai 0x70; /* Jump back to FS-boot */
1067
1068ENTRY(_break)
1069 mfs r5, rmsr
1070 nop
1071 swi r5, r0, 0x250 + TOPHYS(r0_ram)
1072 mfs r5, resr
1073 nop
1074 swi r5, r0, 0x254 + TOPHYS(r0_ram)
1075 bri 0
1076
1077 /* These are compiled and loaded into high memory, then
1078 * copied into place in mach_early_setup */
1079 .section .init.ivt, "ax"
1080 .org 0x0
1081 /* this is very important - here is the reset vector */
1082 /* in current MMU branch you don't care what is here - it is
1083 * used from bootloader site - but this is correct for FS-BOOT */
1084 brai 0x70
1085 nop
1086 brai TOPHYS(_user_exception); /* syscall handler */
1087 brai TOPHYS(_interrupt); /* Interrupt handler */
1088 brai TOPHYS(_break); /* nmi trap handler */
1089 brai TOPHYS(_hw_exception_handler); /* HW exception handler */
1090
1091 .org 0x60
1092 brai TOPHYS(_debug_exception); /* debug trap handler*/
1093
1094.section .rodata,"a"
1095#include "syscall_table.S"
1096
1097syscall_table_size=(.-sys_call_table)
1098
ce3266c0
SM
1099type_SYSCALL:
1100 .ascii "SYSCALL\0"
1101type_IRQ:
1102 .ascii "IRQ\0"
1103type_IRQ_PREEMPT:
1104 .ascii "IRQ (PREEMPTED)\0"
1105type_SYSCALL_PREEMPT:
1106 .ascii " SYSCALL (PREEMPTED)\0"
1107
1108 /*
1109 * Trap decoding for stack unwinder
1110 * Tuples are (start addr, end addr, string)
1111 * If return address lies on [start addr, end addr],
1112 * unwinder displays 'string'
1113 */
1114
1115 .align 4
1116.global microblaze_trap_handlers
1117microblaze_trap_handlers:
1118 /* Exact matches come first */
1119 .word ret_from_trap; .word ret_from_trap ; .word type_SYSCALL
1120 .word ret_from_irq ; .word ret_from_irq ; .word type_IRQ
1121 /* Fuzzy matches go here */
1122 .word ret_from_irq ; .word no_intr_resched ; .word type_IRQ_PREEMPT
1123 .word ret_from_trap; .word TRAP_return ; .word type_SYSCALL_PREEMPT
1124 /* End of table */
1125 .word 0 ; .word 0 ; .word 0