]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blame - arch/microblaze/kernel/entry.S
microblaze: Use delay slot in syscall macros
[mirror_ubuntu-artful-kernel.git] / arch / microblaze / kernel / entry.S
CommitLineData
ca54502b
MS
1/*
2 * Low-level system-call handling, trap handlers and context-switching
3 *
4 * Copyright (C) 2008-2009 Michal Simek <monstr@monstr.eu>
5 * Copyright (C) 2008-2009 PetaLogix
6 * Copyright (C) 2003 John Williams <jwilliams@itee.uq.edu.au>
7 * Copyright (C) 2001,2002 NEC Corporation
8 * Copyright (C) 2001,2002 Miles Bader <miles@gnu.org>
9 *
10 * This file is subject to the terms and conditions of the GNU General
11 * Public License. See the file COPYING in the main directory of this
12 * archive for more details.
13 *
14 * Written by Miles Bader <miles@gnu.org>
15 * Heavily modified by John Williams for Microblaze
16 */
17
18#include <linux/sys.h>
19#include <linux/linkage.h>
20
21#include <asm/entry.h>
22#include <asm/current.h>
23#include <asm/processor.h>
24#include <asm/exceptions.h>
25#include <asm/asm-offsets.h>
26#include <asm/thread_info.h>
27
28#include <asm/page.h>
29#include <asm/unistd.h>
30
31#include <linux/errno.h>
32#include <asm/signal.h>
33
11d51360
MS
34#undef DEBUG
35
ca54502b
MS
36/* The size of a state save frame. */
37#define STATE_SAVE_SIZE (PT_SIZE + STATE_SAVE_ARG_SPACE)
38
39/* The offset of the struct pt_regs in a `state save frame' on the stack. */
40#define PTO STATE_SAVE_ARG_SPACE /* 24 the space for args */
41
42#define C_ENTRY(name) .globl name; .align 4; name
43
44/*
45 * Various ways of setting and clearing BIP in flags reg.
46 * This is mucky, but necessary using microblaze version that
47 * allows msr ops to write to BIP
48 */
49#if CONFIG_XILINX_MICROBLAZE0_USE_MSR_INSTR
50 .macro clear_bip
66f7de86 51 msrclr r0, MSR_BIP
ca54502b
MS
52 nop
53 .endm
54
55 .macro set_bip
66f7de86 56 msrset r0, MSR_BIP
ca54502b
MS
57 nop
58 .endm
59
60 .macro clear_eip
66f7de86 61 msrclr r0, MSR_EIP
ca54502b
MS
62 nop
63 .endm
64
65 .macro set_ee
66f7de86 66 msrset r0, MSR_EE
ca54502b
MS
67 nop
68 .endm
69
70 .macro disable_irq
66f7de86 71 msrclr r0, MSR_IE
ca54502b
MS
72 nop
73 .endm
74
75 .macro enable_irq
66f7de86 76 msrset r0, MSR_IE
ca54502b
MS
77 nop
78 .endm
79
80 .macro set_ums
66f7de86 81 msrset r0, MSR_UMS
ca54502b 82 nop
66f7de86 83 msrclr r0, MSR_VMS
ca54502b
MS
84 nop
85 .endm
86
87 .macro set_vms
66f7de86 88 msrclr r0, MSR_UMS
ca54502b 89 nop
66f7de86 90 msrset r0, MSR_VMS
ca54502b
MS
91 nop
92 .endm
93
b318067e 94 .macro clear_ums
66f7de86 95 msrclr r0, MSR_UMS
b318067e
MS
96 nop
97 .endm
98
ca54502b 99 .macro clear_vms_ums
66f7de86 100 msrclr r0, MSR_VMS | MSR_UMS
ca54502b
MS
101 nop
102 .endm
103#else
104 .macro clear_bip
105 mfs r11, rmsr
106 nop
107 andi r11, r11, ~MSR_BIP
108 mts rmsr, r11
109 nop
110 .endm
111
112 .macro set_bip
113 mfs r11, rmsr
114 nop
115 ori r11, r11, MSR_BIP
116 mts rmsr, r11
117 nop
118 .endm
119
120 .macro clear_eip
121 mfs r11, rmsr
122 nop
123 andi r11, r11, ~MSR_EIP
124 mts rmsr, r11
125 nop
126 .endm
127
128 .macro set_ee
129 mfs r11, rmsr
130 nop
131 ori r11, r11, MSR_EE
132 mts rmsr, r11
133 nop
134 .endm
135
136 .macro disable_irq
137 mfs r11, rmsr
138 nop
139 andi r11, r11, ~MSR_IE
140 mts rmsr, r11
141 nop
142 .endm
143
144 .macro enable_irq
145 mfs r11, rmsr
146 nop
147 ori r11, r11, MSR_IE
148 mts rmsr, r11
149 nop
150 .endm
151
152 .macro set_ums
153 mfs r11, rmsr
154 nop
155 ori r11, r11, MSR_VMS
156 andni r11, r11, MSR_UMS
157 mts rmsr, r11
158 nop
159 .endm
160
161 .macro set_vms
162 mfs r11, rmsr
163 nop
164 ori r11, r11, MSR_VMS
165 andni r11, r11, MSR_UMS
166 mts rmsr, r11
167 nop
168 .endm
169
b318067e
MS
170 .macro clear_ums
171 mfs r11, rmsr
172 nop
173 andni r11, r11, MSR_UMS
174 mts rmsr,r11
175 nop
176 .endm
177
ca54502b
MS
178 .macro clear_vms_ums
179 mfs r11, rmsr
180 nop
181 andni r11, r11, (MSR_VMS|MSR_UMS)
182 mts rmsr,r11
183 nop
184 .endm
185#endif
186
187/* Define how to call high-level functions. With MMU, virtual mode must be
188 * enabled when calling the high-level function. Clobbers R11.
189 * VM_ON, VM_OFF, DO_JUMP_BIPCLR, DO_CALL
190 */
191
192/* turn on virtual protected mode save */
193#define VM_ON \
a4a94dbf 194 set_ums; \
ca54502b 195 rted r0, 2f; \
a4a94dbf
MS
196 nop; \
1972:
ca54502b
MS
198
199/* turn off virtual protected mode save and user mode save*/
200#define VM_OFF \
a4a94dbf 201 clear_vms_ums; \
ca54502b 202 rted r0, TOPHYS(1f); \
a4a94dbf
MS
203 nop; \
2041:
ca54502b
MS
205
206#define SAVE_REGS \
207 swi r2, r1, PTO+PT_R2; /* Save SDA */ \
36f60954
MS
208 swi r3, r1, PTO+PT_R3; \
209 swi r4, r1, PTO+PT_R4; \
ca54502b
MS
210 swi r5, r1, PTO+PT_R5; \
211 swi r6, r1, PTO+PT_R6; \
212 swi r7, r1, PTO+PT_R7; \
213 swi r8, r1, PTO+PT_R8; \
214 swi r9, r1, PTO+PT_R9; \
215 swi r10, r1, PTO+PT_R10; \
216 swi r11, r1, PTO+PT_R11; /* save clobbered regs after rval */\
217 swi r12, r1, PTO+PT_R12; \
218 swi r13, r1, PTO+PT_R13; /* Save SDA2 */ \
219 swi r14, r1, PTO+PT_PC; /* PC, before IRQ/trap */ \
220 swi r15, r1, PTO+PT_R15; /* Save LP */ \
221 swi r18, r1, PTO+PT_R18; /* Save asm scratch reg */ \
222 swi r19, r1, PTO+PT_R19; \
223 swi r20, r1, PTO+PT_R20; \
224 swi r21, r1, PTO+PT_R21; \
225 swi r22, r1, PTO+PT_R22; \
226 swi r23, r1, PTO+PT_R23; \
227 swi r24, r1, PTO+PT_R24; \
228 swi r25, r1, PTO+PT_R25; \
229 swi r26, r1, PTO+PT_R26; \
230 swi r27, r1, PTO+PT_R27; \
231 swi r28, r1, PTO+PT_R28; \
232 swi r29, r1, PTO+PT_R29; \
233 swi r30, r1, PTO+PT_R30; \
234 swi r31, r1, PTO+PT_R31; /* Save current task reg */ \
235 mfs r11, rmsr; /* save MSR */ \
236 nop; \
237 swi r11, r1, PTO+PT_MSR;
238
239#define RESTORE_REGS \
240 lwi r11, r1, PTO+PT_MSR; \
241 mts rmsr , r11; \
242 nop; \
243 lwi r2, r1, PTO+PT_R2; /* restore SDA */ \
36f60954
MS
244 lwi r3, r1, PTO+PT_R3; \
245 lwi r4, r1, PTO+PT_R4; \
ca54502b
MS
246 lwi r5, r1, PTO+PT_R5; \
247 lwi r6, r1, PTO+PT_R6; \
248 lwi r7, r1, PTO+PT_R7; \
249 lwi r8, r1, PTO+PT_R8; \
250 lwi r9, r1, PTO+PT_R9; \
251 lwi r10, r1, PTO+PT_R10; \
252 lwi r11, r1, PTO+PT_R11; /* restore clobbered regs after rval */\
253 lwi r12, r1, PTO+PT_R12; \
254 lwi r13, r1, PTO+PT_R13; /* restore SDA2 */ \
255 lwi r14, r1, PTO+PT_PC; /* RESTORE_LINK PC, before IRQ/trap */\
256 lwi r15, r1, PTO+PT_R15; /* restore LP */ \
257 lwi r18, r1, PTO+PT_R18; /* restore asm scratch reg */ \
258 lwi r19, r1, PTO+PT_R19; \
259 lwi r20, r1, PTO+PT_R20; \
260 lwi r21, r1, PTO+PT_R21; \
261 lwi r22, r1, PTO+PT_R22; \
262 lwi r23, r1, PTO+PT_R23; \
263 lwi r24, r1, PTO+PT_R24; \
264 lwi r25, r1, PTO+PT_R25; \
265 lwi r26, r1, PTO+PT_R26; \
266 lwi r27, r1, PTO+PT_R27; \
267 lwi r28, r1, PTO+PT_R28; \
268 lwi r29, r1, PTO+PT_R29; \
269 lwi r30, r1, PTO+PT_R30; \
270 lwi r31, r1, PTO+PT_R31; /* Restore cur task reg */
271
e5d2af2b
MS
272#define SAVE_STATE \
273 swi r1, r0, TOPHYS(PER_CPU(ENTRY_SP)); /* save stack */ \
274 /* See if already in kernel mode.*/ \
275 mfs r1, rmsr; \
276 nop; \
277 andi r1, r1, MSR_UMS; \
278 bnei r1, 1f; \
279 /* Kernel-mode state save. */ \
280 /* Reload kernel stack-ptr. */ \
281 lwi r1, r0, TOPHYS(PER_CPU(ENTRY_SP)); \
282 tophys(r1,r1); \
283 addik r1, r1, -STATE_SAVE_SIZE; /* Make room on the stack. */\
284 SAVE_REGS \
e5d2af2b 285 brid 2f; \
da233552 286 swi r1, r1, PTO+PT_MODE; \
e5d2af2b
MS
2871: /* User-mode state save. */ \
288 lwi r1, r0, TOPHYS(PER_CPU(CURRENT_SAVE)); /* get saved current */\
289 tophys(r1,r1); \
290 lwi r1, r1, TS_THREAD_INFO; /* get the thread info */ \
291 addik r1, r1, THREAD_SIZE; /* calculate kernel stack pointer */\
292 tophys(r1,r1); \
293 addik r1, r1, -STATE_SAVE_SIZE; /* Make room on the stack. */\
294 SAVE_REGS \
e5d2af2b
MS
295 lwi r11, r0, TOPHYS(PER_CPU(ENTRY_SP)); \
296 swi r11, r1, PTO+PT_R1; /* Store user SP. */ \
e7741075 297 swi r0, r1, PTO + PT_MODE; /* Was in user-mode. */ \
e5d2af2b
MS
298 /* MS: I am clearing UMS even in case when I come from kernel space */ \
299 clear_ums; \
3002: lwi CURRENT_TASK, r0, TOPHYS(PER_CPU(CURRENT_SAVE));
301
ca54502b
MS
302.text
303
304/*
305 * User trap.
306 *
307 * System calls are handled here.
308 *
309 * Syscall protocol:
310 * Syscall number in r12, args in r5-r10
311 * Return value in r3
312 *
313 * Trap entered via brki instruction, so BIP bit is set, and interrupts
314 * are masked. This is nice, means we don't have to CLI before state save
315 */
316C_ENTRY(_user_exception):
317 swi r1, r0, TOPHYS(PER_CPU(ENTRY_SP)) /* save stack */
318 addi r14, r14, 4 /* return address is 4 byte after call */
ca54502b 319
653e447e 320 mfs r1, rmsr
5c0d72b1 321 nop
653e447e
MS
322 andi r1, r1, MSR_UMS
323 bnei r1, 1f
5c0d72b1
MS
324
325/* Kernel-mode state save - kernel execve */
653e447e
MS
326 lwi r1, r0, TOPHYS(PER_CPU(ENTRY_SP)); /* Reload kernel stack-ptr*/
327 tophys(r1,r1);
ca54502b
MS
328
329 addik r1, r1, -STATE_SAVE_SIZE; /* Make room on the stack. */
330 SAVE_REGS
331
77f6d226 332 swi r1, r1, PTO + PT_MODE; /* pt_regs -> kernel mode */
ca54502b
MS
333 brid 2f;
334 nop; /* Fill delay slot */
335
336/* User-mode state save. */
3371:
ca54502b
MS
338 lwi r1, r0, TOPHYS(PER_CPU(CURRENT_SAVE)); /* get saved current */
339 tophys(r1,r1);
340 lwi r1, r1, TS_THREAD_INFO; /* get stack from task_struct */
341/* calculate kernel stack pointer from task struct 8k */
342 addik r1, r1, THREAD_SIZE;
343 tophys(r1,r1);
344
345 addik r1, r1, -STATE_SAVE_SIZE; /* Make room on the stack. */
346 SAVE_REGS
347
77f6d226 348 swi r0, r1, PTO + PT_MODE; /* Was in user-mode. */
ca54502b
MS
349 lwi r11, r0, TOPHYS(PER_CPU(ENTRY_SP));
350 swi r11, r1, PTO+PT_R1; /* Store user SP. */
b1d70c62 3512: lwi CURRENT_TASK, r0, TOPHYS(PER_CPU(CURRENT_SAVE));
ca54502b
MS
352 /* Save away the syscall number. */
353 swi r12, r1, PTO+PT_R0;
354 tovirt(r1,r1)
355
ca54502b
MS
356/* where the trap should return need -8 to adjust for rtsd r15, 8*/
357/* Jump to the appropriate function for the system call number in r12
358 * (r12 is not preserved), or return an error if r12 is not valid. The LP
359 * register should point to the location where
360 * the called function should return. [note that MAKE_SYS_CALL uses label 1] */
23575483
MS
361
362 # Step into virtual mode.
363 set_vms;
364 addik r11, r0, 3f
365 rtid r11, 0
366 nop
3673:
b1d70c62 368 lwi r11, CURRENT_TASK, TS_THREAD_INFO /* get thread info */
23575483
MS
369 lwi r11, r11, TI_FLAGS /* get flags in thread info */
370 andi r11, r11, _TIF_WORK_SYSCALL_MASK
371 beqi r11, 4f
372
373 addik r3, r0, -ENOSYS
374 swi r3, r1, PTO + PT_R3
375 brlid r15, do_syscall_trace_enter
376 addik r5, r1, PTO + PT_R0
377
378 # do_syscall_trace_enter returns the new syscall nr.
379 addk r12, r0, r3
380 lwi r5, r1, PTO+PT_R5;
381 lwi r6, r1, PTO+PT_R6;
382 lwi r7, r1, PTO+PT_R7;
383 lwi r8, r1, PTO+PT_R8;
384 lwi r9, r1, PTO+PT_R9;
385 lwi r10, r1, PTO+PT_R10;
3864:
387/* Jump to the appropriate function for the system call number in r12
388 * (r12 is not preserved), or return an error if r12 is not valid.
389 * The LP register should point to the location where the called function
390 * should return. [note that MAKE_SYS_CALL uses label 1] */
391 /* See if the system call number is valid */
ca54502b 392 addi r11, r12, -__NR_syscalls;
23575483 393 bgei r11,5f;
ca54502b
MS
394 /* Figure out which function to use for this system call. */
395 /* Note Microblaze barrel shift is optional, so don't rely on it */
396 add r12, r12, r12; /* convert num -> ptr */
397 add r12, r12, r12;
398
11d51360 399#ifdef DEBUG
ca54502b 400 /* Trac syscalls and stored them to r0_ram */
23575483 401 lwi r3, r12, 0x400 + r0_ram
ca54502b 402 addi r3, r3, 1
23575483 403 swi r3, r12, 0x400 + r0_ram
11d51360 404#endif
23575483
MS
405
406 # Find and jump into the syscall handler.
407 lwi r12, r12, sys_call_table
408 /* where the trap should return need -8 to adjust for rtsd r15, 8 */
b9ea77e2 409 addi r15, r0, ret_from_trap-8
23575483 410 bra r12
ca54502b 411
ca54502b 412 /* The syscall number is invalid, return an error. */
23575483 4135:
9814cc11 414 rtsd r15, 8; /* looks like a normal subroutine return */
ca54502b 415 addi r3, r0, -ENOSYS;
ca54502b 416
23575483 417/* Entry point used to return from a syscall/trap */
ca54502b
MS
418/* We re-enable BIP bit before state restore */
419C_ENTRY(ret_from_trap):
b1d70c62
MS
420 swi r3, r1, PTO + PT_R3
421 swi r4, r1, PTO + PT_R4
422
77f6d226 423 lwi r11, r1, PTO + PT_MODE;
36f60954
MS
424/* See if returning to kernel mode, if so, skip resched &c. */
425 bnei r11, 2f;
23575483
MS
426 /* We're returning to user mode, so check for various conditions that
427 * trigger rescheduling. */
b1d70c62
MS
428 /* FIXME: Restructure all these flag checks. */
429 lwi r11, CURRENT_TASK, TS_THREAD_INFO; /* get thread info */
23575483
MS
430 lwi r11, r11, TI_FLAGS; /* get flags in thread info */
431 andi r11, r11, _TIF_WORK_SYSCALL_MASK
432 beqi r11, 1f
433
23575483
MS
434 brlid r15, do_syscall_trace_leave
435 addik r5, r1, PTO + PT_R0
23575483 4361:
ca54502b
MS
437 /* We're returning to user mode, so check for various conditions that
438 * trigger rescheduling. */
b1d70c62
MS
439 /* get thread info from current task */
440 lwi r11, CURRENT_TASK, TS_THREAD_INFO;
ca54502b
MS
441 lwi r11, r11, TI_FLAGS; /* get flags in thread info */
442 andi r11, r11, _TIF_NEED_RESCHED;
443 beqi r11, 5f;
444
ca54502b
MS
445 bralid r15, schedule; /* Call scheduler */
446 nop; /* delay slot */
ca54502b
MS
447
448 /* Maybe handle a signal */
b1d70c62
MS
4495: /* get thread info from current task*/
450 lwi r11, CURRENT_TASK, TS_THREAD_INFO;
ca54502b
MS
451 lwi r11, r11, TI_FLAGS; /* get flags in thread info */
452 andi r11, r11, _TIF_SIGPENDING;
453 beqi r11, 1f; /* Signals to handle, handle them */
454
b9ea77e2 455 addik r5, r1, PTO; /* Arg 1: struct pt_regs *regs */
ca54502b
MS
456 addi r7, r0, 1; /* Arg 3: int in_syscall */
457 bralid r15, do_signal; /* Handle any signals */
841d6e8c 458 add r6, r0, r0; /* Arg 2: sigset_t *oldset */
b1d70c62
MS
459
460/* Finally, return to user state. */
96014cc3 4611: set_bip; /* Ints masked for state restore */
8633bebc 462 swi CURRENT_TASK, r0, PER_CPU(CURRENT_SAVE); /* save current */
ca54502b
MS
463 VM_OFF;
464 tophys(r1,r1);
465 RESTORE_REGS;
466 addik r1, r1, STATE_SAVE_SIZE /* Clean up stack space. */
467 lwi r1, r1, PT_R1 - PT_SIZE;/* Restore user stack pointer. */
468 bri 6f;
469
470/* Return to kernel state. */
96014cc3
MS
4712: set_bip; /* Ints masked for state restore */
472 VM_OFF;
ca54502b
MS
473 tophys(r1,r1);
474 RESTORE_REGS;
475 addik r1, r1, STATE_SAVE_SIZE /* Clean up stack space. */
476 tovirt(r1,r1);
4776:
478TRAP_return: /* Make global symbol for debugging */
479 rtbd r14, 0; /* Instructions to return from an IRQ */
480 nop;
481
482
483/* These syscalls need access to the struct pt_regs on the stack, so we
484 implement them in assembly (they're basically all wrappers anyway). */
485
486C_ENTRY(sys_fork_wrapper):
487 addi r5, r0, SIGCHLD /* Arg 0: flags */
488 lwi r6, r1, PTO+PT_R1 /* Arg 1: child SP (use parent's) */
b9ea77e2 489 addik r7, r1, PTO /* Arg 2: parent context */
ca54502b
MS
490 add r8. r0, r0 /* Arg 3: (unused) */
491 add r9, r0, r0; /* Arg 4: (unused) */
ca54502b 492 brid do_fork /* Do real work (tail-call) */
9814cc11 493 add r10, r0, r0; /* Arg 5: (unused) */
ca54502b
MS
494
495/* This the initial entry point for a new child thread, with an appropriate
496 stack in place that makes it look the the child is in the middle of an
497 syscall. This function is actually `returned to' from switch_thread
498 (copy_thread makes ret_from_fork the return address in each new thread's
499 saved context). */
500C_ENTRY(ret_from_fork):
501 bralid r15, schedule_tail; /* ...which is schedule_tail's arg */
502 add r3, r5, r0; /* switch_thread returns the prev task */
503 /* ( in the delay slot ) */
ca54502b 504 brid ret_from_trap; /* Do normal trap return */
9814cc11 505 add r3, r0, r0; /* Child's fork call should return 0. */
ca54502b 506
e513588f
AB
507C_ENTRY(sys_vfork):
508 brid microblaze_vfork /* Do real work (tail-call) */
b9ea77e2 509 addik r5, r1, PTO
ca54502b 510
e513588f 511C_ENTRY(sys_clone):
ca54502b 512 bnei r6, 1f; /* See if child SP arg (arg 1) is 0. */
570e3e23 513 lwi r6, r1, PTO + PT_R1; /* If so, use paret's stack ptr */
b9ea77e2
MS
5141: addik r7, r1, PTO; /* Arg 2: parent context */
515 add r8, r0, r0; /* Arg 3: (unused) */
516 add r9, r0, r0; /* Arg 4: (unused) */
b9ea77e2 517 brid do_fork /* Do real work (tail-call) */
9814cc11 518 add r10, r0, r0; /* Arg 5: (unused) */
ca54502b 519
e513588f 520C_ENTRY(sys_execve):
e513588f 521 brid microblaze_execve; /* Do real work (tail-call).*/
9814cc11 522 addik r8, r1, PTO; /* add user context as 4th arg */
ca54502b 523
ca54502b
MS
524C_ENTRY(sys_rt_sigreturn_wrapper):
525 swi r3, r1, PTO+PT_R3; /* restore saved r3, r4 registers */
526 swi r4, r1, PTO+PT_R4;
ca54502b 527 brlid r15, sys_rt_sigreturn /* Do real work */
9814cc11 528 addik r5, r1, PTO; /* add user context as 1st arg */
ca54502b
MS
529 lwi r3, r1, PTO+PT_R3; /* restore saved r3, r4 registers */
530 lwi r4, r1, PTO+PT_R4;
531 bri ret_from_trap /* fall through will not work here due to align */
532 nop;
533
534/*
535 * HW EXCEPTION rutine start
536 */
ca54502b 537C_ENTRY(full_exception_trap):
ca54502b
MS
538 /* adjust exception address for privileged instruction
539 * for finding where is it */
540 addik r17, r17, -4
541 SAVE_STATE /* Save registers */
06a54604
MS
542 /* PC, before IRQ/trap - this is one instruction above */
543 swi r17, r1, PTO+PT_PC;
544 tovirt(r1,r1)
ca54502b
MS
545 /* FIXME this can be store directly in PT_ESR reg.
546 * I tested it but there is a fault */
547 /* where the trap should return need -8 to adjust for rtsd r15, 8 */
b9ea77e2 548 addik r15, r0, ret_from_exc - 8
ca54502b
MS
549 mfs r6, resr
550 nop
551 mfs r7, rfsr; /* save FSR */
552 nop
131e4e97
MS
553 mts rfsr, r0; /* Clear sticky fsr */
554 nop
c318d483 555 rted r0, full_exception
9814cc11 556 addik r5, r1, PTO /* parameter struct pt_regs * regs */
ca54502b
MS
557
558/*
559 * Unaligned data trap.
560 *
561 * Unaligned data trap last on 4k page is handled here.
562 *
563 * Trap entered via exception, so EE bit is set, and interrupts
564 * are masked. This is nice, means we don't have to CLI before state save
565 *
566 * The assembler routine is in "arch/microblaze/kernel/hw_exception_handler.S"
567 */
568C_ENTRY(unaligned_data_trap):
8b110d15
MS
569 /* MS: I have to save r11 value and then restore it because
570 * set_bit, clear_eip, set_ee use r11 as temp register if MSR
571 * instructions are not used. We don't need to do if MSR instructions
572 * are used and they use r0 instead of r11.
573 * I am using ENTRY_SP which should be primary used only for stack
574 * pointer saving. */
575 swi r11, r0, TOPHYS(PER_CPU(ENTRY_SP));
576 set_bip; /* equalize initial state for all possible entries */
577 clear_eip;
578 set_ee;
579 lwi r11, r0, TOPHYS(PER_CPU(ENTRY_SP));
ca54502b 580 SAVE_STATE /* Save registers.*/
06a54604
MS
581 /* PC, before IRQ/trap - this is one instruction above */
582 swi r17, r1, PTO+PT_PC;
583 tovirt(r1,r1)
ca54502b 584 /* where the trap should return need -8 to adjust for rtsd r15, 8 */
b9ea77e2 585 addik r15, r0, ret_from_exc-8
ca54502b
MS
586 mfs r3, resr /* ESR */
587 nop
588 mfs r4, rear /* EAR */
589 nop
c318d483 590 rtbd r0, _unaligned_data_exception
b9ea77e2 591 addik r7, r1, PTO /* parameter struct pt_regs * regs */
ca54502b
MS
592
593/*
594 * Page fault traps.
595 *
596 * If the real exception handler (from hw_exception_handler.S) didn't find
597 * the mapping for the process, then we're thrown here to handle such situation.
598 *
599 * Trap entered via exceptions, so EE bit is set, and interrupts
600 * are masked. This is nice, means we don't have to CLI before state save
601 *
602 * Build a standard exception frame for TLB Access errors. All TLB exceptions
603 * will bail out to this point if they can't resolve the lightweight TLB fault.
604 *
605 * The C function called is in "arch/microblaze/mm/fault.c", declared as:
606 * void do_page_fault(struct pt_regs *regs,
607 * unsigned long address,
608 * unsigned long error_code)
609 */
610/* data and intruction trap - which is choose is resolved int fault.c */
611C_ENTRY(page_fault_data_trap):
ca54502b 612 SAVE_STATE /* Save registers.*/
06a54604
MS
613 /* PC, before IRQ/trap - this is one instruction above */
614 swi r17, r1, PTO+PT_PC;
615 tovirt(r1,r1)
ca54502b 616 /* where the trap should return need -8 to adjust for rtsd r15, 8 */
b9ea77e2 617 addik r15, r0, ret_from_exc-8
ca54502b
MS
618 mfs r6, rear /* parameter unsigned long address */
619 nop
620 mfs r7, resr /* parameter unsigned long error_code */
621 nop
c318d483 622 rted r0, do_page_fault
9814cc11 623 addik r5, r1, PTO /* parameter struct pt_regs * regs */
ca54502b
MS
624
625C_ENTRY(page_fault_instr_trap):
ca54502b 626 SAVE_STATE /* Save registers.*/
06a54604
MS
627 /* PC, before IRQ/trap - this is one instruction above */
628 swi r17, r1, PTO+PT_PC;
629 tovirt(r1,r1)
ca54502b 630 /* where the trap should return need -8 to adjust for rtsd r15, 8 */
b9ea77e2 631 addik r15, r0, ret_from_exc-8
ca54502b
MS
632 mfs r6, rear /* parameter unsigned long address */
633 nop
634 ori r7, r0, 0 /* parameter unsigned long error_code */
9814cc11
MS
635 rted r0, do_page_fault
636 addik r5, r1, PTO /* parameter struct pt_regs * regs */
ca54502b
MS
637
638/* Entry point used to return from an exception. */
639C_ENTRY(ret_from_exc):
77f6d226 640 lwi r11, r1, PTO + PT_MODE;
ca54502b
MS
641 bnei r11, 2f; /* See if returning to kernel mode, */
642 /* ... if so, skip resched &c. */
643
644 /* We're returning to user mode, so check for various conditions that
645 trigger rescheduling. */
b1d70c62 646 lwi r11, CURRENT_TASK, TS_THREAD_INFO; /* get thread info */
ca54502b
MS
647 lwi r11, r11, TI_FLAGS; /* get flags in thread info */
648 andi r11, r11, _TIF_NEED_RESCHED;
649 beqi r11, 5f;
650
651/* Call the scheduler before returning from a syscall/trap. */
652 bralid r15, schedule; /* Call scheduler */
653 nop; /* delay slot */
654
655 /* Maybe handle a signal */
b1d70c62 6565: lwi r11, CURRENT_TASK, TS_THREAD_INFO; /* get thread info */
ca54502b
MS
657 lwi r11, r11, TI_FLAGS; /* get flags in thread info */
658 andi r11, r11, _TIF_SIGPENDING;
659 beqi r11, 1f; /* Signals to handle, handle them */
660
661 /*
662 * Handle a signal return; Pending signals should be in r18.
663 *
664 * Not all registers are saved by the normal trap/interrupt entry
665 * points (for instance, call-saved registers (because the normal
666 * C-compiler calling sequence in the kernel makes sure they're
667 * preserved), and call-clobbered registers in the case of
668 * traps), but signal handlers may want to examine or change the
669 * complete register state. Here we save anything not saved by
670 * the normal entry sequence, so that it may be safely restored
36f60954 671 * (in a possibly modified form) after do_signal returns. */
b9ea77e2 672 addik r5, r1, PTO; /* Arg 1: struct pt_regs *regs */
ca54502b
MS
673 addi r7, r0, 0; /* Arg 3: int in_syscall */
674 bralid r15, do_signal; /* Handle any signals */
841d6e8c 675 add r6, r0, r0; /* Arg 2: sigset_t *oldset */
ca54502b
MS
676
677/* Finally, return to user state. */
96014cc3 6781: set_bip; /* Ints masked for state restore */
8633bebc 679 swi CURRENT_TASK, r0, PER_CPU(CURRENT_SAVE); /* save current */
ca54502b
MS
680 VM_OFF;
681 tophys(r1,r1);
682
ca54502b
MS
683 RESTORE_REGS;
684 addik r1, r1, STATE_SAVE_SIZE /* Clean up stack space. */
685
686 lwi r1, r1, PT_R1 - PT_SIZE; /* Restore user stack pointer. */
687 bri 6f;
688/* Return to kernel state. */
96014cc3
MS
6892: set_bip; /* Ints masked for state restore */
690 VM_OFF;
ca54502b 691 tophys(r1,r1);
ca54502b
MS
692 RESTORE_REGS;
693 addik r1, r1, STATE_SAVE_SIZE /* Clean up stack space. */
694
695 tovirt(r1,r1);
6966:
697EXC_return: /* Make global symbol for debugging */
698 rtbd r14, 0; /* Instructions to return from an IRQ */
699 nop;
700
701/*
702 * HW EXCEPTION rutine end
703 */
704
705/*
706 * Hardware maskable interrupts.
707 *
708 * The stack-pointer (r1) should have already been saved to the memory
709 * location PER_CPU(ENTRY_SP).
710 */
711C_ENTRY(_interrupt):
712/* MS: we are in physical address */
713/* Save registers, switch to proper stack, convert SP to virtual.*/
714 swi r1, r0, TOPHYS(PER_CPU(ENTRY_SP))
ca54502b 715 /* MS: See if already in kernel mode. */
653e447e 716 mfs r1, rmsr
5c0d72b1 717 nop
653e447e
MS
718 andi r1, r1, MSR_UMS
719 bnei r1, 1f
ca54502b
MS
720
721/* Kernel-mode state save. */
653e447e
MS
722 lwi r1, r0, TOPHYS(PER_CPU(ENTRY_SP))
723 tophys(r1,r1); /* MS: I have in r1 physical address where stack is */
ca54502b
MS
724 /* save registers */
725/* MS: Make room on the stack -> activation record */
726 addik r1, r1, -STATE_SAVE_SIZE;
ca54502b 727 SAVE_REGS
77f6d226 728 swi r1, r1, PTO + PT_MODE; /* 0 - user mode, 1 - kernel mode */
ca54502b
MS
729 brid 2f;
730 nop; /* MS: Fill delay slot */
731
7321:
733/* User-mode state save. */
ca54502b
MS
734 /* MS: get the saved current */
735 lwi r1, r0, TOPHYS(PER_CPU(CURRENT_SAVE));
736 tophys(r1,r1);
737 lwi r1, r1, TS_THREAD_INFO;
738 addik r1, r1, THREAD_SIZE;
739 tophys(r1,r1);
740 /* save registers */
741 addik r1, r1, -STATE_SAVE_SIZE;
ca54502b
MS
742 SAVE_REGS
743 /* calculate mode */
744 swi r0, r1, PTO + PT_MODE;
745 lwi r11, r0, TOPHYS(PER_CPU(ENTRY_SP));
746 swi r11, r1, PTO+PT_R1;
ca54502b 7472:
b1d70c62 748 lwi CURRENT_TASK, r0, TOPHYS(PER_CPU(CURRENT_SAVE));
ca54502b 749 tovirt(r1,r1)
b9ea77e2 750 addik r5, r1, PTO;
ca54502b 751 set_vms;
b9ea77e2
MS
752 addik r11, r0, do_IRQ;
753 addik r15, r0, irq_call;
ca54502b
MS
754irq_call:rtbd r11, 0;
755 nop;
756
757/* MS: we are in virtual mode */
758ret_from_irq:
759 lwi r11, r1, PTO + PT_MODE;
760 bnei r11, 2f;
761
b1d70c62 762 lwi r11, CURRENT_TASK, TS_THREAD_INFO;
ca54502b
MS
763 lwi r11, r11, TI_FLAGS; /* MS: get flags from thread info */
764 andi r11, r11, _TIF_NEED_RESCHED;
765 beqi r11, 5f
766 bralid r15, schedule;
767 nop; /* delay slot */
768
769 /* Maybe handle a signal */
b1d70c62 7705: lwi r11, CURRENT_TASK, TS_THREAD_INFO; /* MS: get thread info */
ca54502b
MS
771 lwi r11, r11, TI_FLAGS; /* get flags in thread info */
772 andi r11, r11, _TIF_SIGPENDING;
773 beqid r11, no_intr_resched
774/* Handle a signal return; Pending signals should be in r18. */
775 addi r7, r0, 0; /* Arg 3: int in_syscall */
b9ea77e2 776 addik r5, r1, PTO; /* Arg 1: struct pt_regs *regs */
ca54502b
MS
777 bralid r15, do_signal; /* Handle any signals */
778 add r6, r0, r0; /* Arg 2: sigset_t *oldset */
779
780/* Finally, return to user state. */
781no_intr_resched:
782 /* Disable interrupts, we are now committed to the state restore */
783 disable_irq
8633bebc 784 swi CURRENT_TASK, r0, PER_CPU(CURRENT_SAVE);
ca54502b
MS
785 VM_OFF;
786 tophys(r1,r1);
ca54502b
MS
787 RESTORE_REGS
788 addik r1, r1, STATE_SAVE_SIZE /* MS: Clean up stack space. */
789 lwi r1, r1, PT_R1 - PT_SIZE;
790 bri 6f;
791/* MS: Return to kernel state. */
77753790
MS
7922:
793#ifdef CONFIG_PREEMPT
b1d70c62 794 lwi r11, CURRENT_TASK, TS_THREAD_INFO;
77753790
MS
795 /* MS: get preempt_count from thread info */
796 lwi r5, r11, TI_PREEMPT_COUNT;
797 bgti r5, restore;
798
799 lwi r5, r11, TI_FLAGS; /* get flags in thread info */
800 andi r5, r5, _TIF_NEED_RESCHED;
801 beqi r5, restore /* if zero jump over */
802
803preempt:
804 /* interrupts are off that's why I am calling preempt_chedule_irq */
805 bralid r15, preempt_schedule_irq
806 nop
b1d70c62 807 lwi r11, CURRENT_TASK, TS_THREAD_INFO; /* get thread info */
77753790
MS
808 lwi r5, r11, TI_FLAGS; /* get flags in thread info */
809 andi r5, r5, _TIF_NEED_RESCHED;
810 bnei r5, preempt /* if non zero jump to resched */
811restore:
812#endif
813 VM_OFF /* MS: turn off MMU */
ca54502b 814 tophys(r1,r1)
ca54502b
MS
815 RESTORE_REGS
816 addik r1, r1, STATE_SAVE_SIZE /* MS: Clean up stack space. */
817 tovirt(r1,r1);
8186:
819IRQ_return: /* MS: Make global symbol for debugging */
820 rtid r14, 0
821 nop
822
823/*
824 * `Debug' trap
825 * We enter dbtrap in "BIP" (breakpoint) mode.
826 * So we exit the breakpoint mode with an 'rtbd' and proceed with the
827 * original dbtrap.
828 * however, wait to save state first
829 */
830C_ENTRY(_debug_exception):
831 /* BIP bit is set on entry, no interrupts can occur */
832 swi r1, r0, TOPHYS(PER_CPU(ENTRY_SP))
833
653e447e 834 mfs r1, rmsr
5c0d72b1 835 nop
653e447e
MS
836 andi r1, r1, MSR_UMS
837 bnei r1, 1f
ca54502b 838 /* Kernel-mode state save. */
653e447e
MS
839 lwi r1, r0, TOPHYS(PER_CPU(ENTRY_SP)); /* Reload kernel stack-ptr*/
840 tophys(r1,r1);
ca54502b
MS
841
842 addik r1, r1, -STATE_SAVE_SIZE; /* Make room on the stack. */
ca54502b
MS
843 SAVE_REGS;
844
77f6d226 845 swi r1, r1, PTO + PT_MODE;
ca54502b
MS
846 brid 2f;
847 nop; /* Fill delay slot */
8481: /* User-mode state save. */
ca54502b
MS
849 lwi r1, r0, TOPHYS(PER_CPU(CURRENT_SAVE)); /* get saved current */
850 tophys(r1,r1);
851 lwi r1, r1, TS_THREAD_INFO; /* get the thread info */
852 addik r1, r1, THREAD_SIZE; /* calculate kernel stack pointer */
853 tophys(r1,r1);
854
855 addik r1, r1, -STATE_SAVE_SIZE; /* Make room on the stack. */
ca54502b
MS
856 SAVE_REGS;
857
77f6d226 858 swi r0, r1, PTO + PT_MODE; /* Was in user-mode. */
ca54502b
MS
859 lwi r11, r0, TOPHYS(PER_CPU(ENTRY_SP));
860 swi r11, r1, PTO+PT_R1; /* Store user SP. */
653e447e 8612:
ca54502b
MS
862 tovirt(r1,r1)
863
06b28640 864 set_vms;
ca54502b
MS
865 addi r5, r0, SIGTRAP /* send the trap signal */
866 add r6, r0, CURRENT_TASK; /* Get current task ptr into r11 */
867 addk r7, r0, r0 /* 3rd param zero */
06b28640 868dbtrap_call: rtbd r0, send_sig;
b9ea77e2 869 addik r15, r0, dbtrap_call;
ca54502b
MS
870
871 set_bip; /* Ints masked for state restore*/
77f6d226 872 lwi r11, r1, PTO + PT_MODE;
ca54502b
MS
873 bnei r11, 2f;
874
875 /* Get current task ptr into r11 */
b1d70c62 876 lwi r11, CURRENT_TASK, TS_THREAD_INFO; /* get thread info */
ca54502b
MS
877 lwi r11, r11, TI_FLAGS; /* get flags in thread info */
878 andi r11, r11, _TIF_NEED_RESCHED;
879 beqi r11, 5f;
880
881/* Call the scheduler before returning from a syscall/trap. */
882
883 bralid r15, schedule; /* Call scheduler */
884 nop; /* delay slot */
885 /* XXX Is PT_DTRACE handling needed here? */
886 /* XXX m68knommu also checks TASK_STATE & TASK_COUNTER here. */
887
888 /* Maybe handle a signal */
b1d70c62 8895: lwi r11, CURRENT_TASK, TS_THREAD_INFO; /* get thread info */
ca54502b
MS
890 lwi r11, r11, TI_FLAGS; /* get flags in thread info */
891 andi r11, r11, _TIF_SIGPENDING;
892 beqi r11, 1f; /* Signals to handle, handle them */
893
894/* Handle a signal return; Pending signals should be in r18. */
895 /* Not all registers are saved by the normal trap/interrupt entry
896 points (for instance, call-saved registers (because the normal
897 C-compiler calling sequence in the kernel makes sure they're
898 preserved), and call-clobbered registers in the case of
899 traps), but signal handlers may want to examine or change the
900 complete register state. Here we save anything not saved by
901 the normal entry sequence, so that it may be safely restored
902 (in a possibly modified form) after do_signal returns. */
903
b9ea77e2 904 addik r5, r1, PTO; /* Arg 1: struct pt_regs *regs */
ca54502b
MS
905 addi r7, r0, 0; /* Arg 3: int in_syscall */
906 bralid r15, do_signal; /* Handle any signals */
841d6e8c 907 add r6, r0, r0; /* Arg 2: sigset_t *oldset */
ca54502b
MS
908
909
910/* Finally, return to user state. */
5c0d72b1 9111:
8633bebc 912 swi CURRENT_TASK, r0, PER_CPU(CURRENT_SAVE); /* save current */
ca54502b
MS
913 VM_OFF;
914 tophys(r1,r1);
915
ca54502b
MS
916 RESTORE_REGS
917 addik r1, r1, STATE_SAVE_SIZE /* Clean up stack space. */
918
919
920 lwi r1, r1, PT_R1 - PT_SIZE;
921 /* Restore user stack pointer. */
922 bri 6f;
923
924/* Return to kernel state. */
9252: VM_OFF;
926 tophys(r1,r1);
ca54502b
MS
927 RESTORE_REGS
928 addik r1, r1, STATE_SAVE_SIZE /* Clean up stack space. */
929
930 tovirt(r1,r1);
9316:
932DBTRAP_return: /* Make global symbol for debugging */
933 rtbd r14, 0; /* Instructions to return from an IRQ */
934 nop;
935
936
937
938ENTRY(_switch_to)
939 /* prepare return value */
b1d70c62 940 addk r3, r0, CURRENT_TASK
ca54502b
MS
941
942 /* save registers in cpu_context */
943 /* use r11 and r12, volatile registers, as temp register */
944 /* give start of cpu_context for previous process */
945 addik r11, r5, TI_CPU_CONTEXT
946 swi r1, r11, CC_R1
947 swi r2, r11, CC_R2
948 /* skip volatile registers.
949 * they are saved on stack when we jumped to _switch_to() */
950 /* dedicated registers */
951 swi r13, r11, CC_R13
952 swi r14, r11, CC_R14
953 swi r15, r11, CC_R15
954 swi r16, r11, CC_R16
955 swi r17, r11, CC_R17
956 swi r18, r11, CC_R18
957 /* save non-volatile registers */
958 swi r19, r11, CC_R19
959 swi r20, r11, CC_R20
960 swi r21, r11, CC_R21
961 swi r22, r11, CC_R22
962 swi r23, r11, CC_R23
963 swi r24, r11, CC_R24
964 swi r25, r11, CC_R25
965 swi r26, r11, CC_R26
966 swi r27, r11, CC_R27
967 swi r28, r11, CC_R28
968 swi r29, r11, CC_R29
969 swi r30, r11, CC_R30
970 /* special purpose registers */
971 mfs r12, rmsr
972 nop
973 swi r12, r11, CC_MSR
974 mfs r12, rear
975 nop
976 swi r12, r11, CC_EAR
977 mfs r12, resr
978 nop
979 swi r12, r11, CC_ESR
980 mfs r12, rfsr
981 nop
982 swi r12, r11, CC_FSR
983
b1d70c62
MS
984 /* update r31, the current-give me pointer to task which will be next */
985 lwi CURRENT_TASK, r6, TI_TASK
ca54502b 986 /* stored it to current_save too */
b1d70c62 987 swi CURRENT_TASK, r0, PER_CPU(CURRENT_SAVE)
ca54502b
MS
988
989 /* get new process' cpu context and restore */
990 /* give me start where start context of next task */
991 addik r11, r6, TI_CPU_CONTEXT
992
993 /* non-volatile registers */
994 lwi r30, r11, CC_R30
995 lwi r29, r11, CC_R29
996 lwi r28, r11, CC_R28
997 lwi r27, r11, CC_R27
998 lwi r26, r11, CC_R26
999 lwi r25, r11, CC_R25
1000 lwi r24, r11, CC_R24
1001 lwi r23, r11, CC_R23
1002 lwi r22, r11, CC_R22
1003 lwi r21, r11, CC_R21
1004 lwi r20, r11, CC_R20
1005 lwi r19, r11, CC_R19
1006 /* dedicated registers */
1007 lwi r18, r11, CC_R18
1008 lwi r17, r11, CC_R17
1009 lwi r16, r11, CC_R16
1010 lwi r15, r11, CC_R15
1011 lwi r14, r11, CC_R14
1012 lwi r13, r11, CC_R13
1013 /* skip volatile registers */
1014 lwi r2, r11, CC_R2
1015 lwi r1, r11, CC_R1
1016
1017 /* special purpose registers */
1018 lwi r12, r11, CC_FSR
1019 mts rfsr, r12
1020 nop
1021 lwi r12, r11, CC_MSR
1022 mts rmsr, r12
1023 nop
1024
1025 rtsd r15, 8
1026 nop
1027
1028ENTRY(_reset)
1029 brai 0x70; /* Jump back to FS-boot */
1030
1031ENTRY(_break)
1032 mfs r5, rmsr
1033 nop
1034 swi r5, r0, 0x250 + TOPHYS(r0_ram)
1035 mfs r5, resr
1036 nop
1037 swi r5, r0, 0x254 + TOPHYS(r0_ram)
1038 bri 0
1039
1040 /* These are compiled and loaded into high memory, then
1041 * copied into place in mach_early_setup */
1042 .section .init.ivt, "ax"
1043 .org 0x0
1044 /* this is very important - here is the reset vector */
1045 /* in current MMU branch you don't care what is here - it is
1046 * used from bootloader site - but this is correct for FS-BOOT */
1047 brai 0x70
1048 nop
1049 brai TOPHYS(_user_exception); /* syscall handler */
1050 brai TOPHYS(_interrupt); /* Interrupt handler */
1051 brai TOPHYS(_break); /* nmi trap handler */
1052 brai TOPHYS(_hw_exception_handler); /* HW exception handler */
1053
1054 .org 0x60
1055 brai TOPHYS(_debug_exception); /* debug trap handler*/
1056
1057.section .rodata,"a"
1058#include "syscall_table.S"
1059
1060syscall_table_size=(.-sys_call_table)
1061
ce3266c0
SM
1062type_SYSCALL:
1063 .ascii "SYSCALL\0"
1064type_IRQ:
1065 .ascii "IRQ\0"
1066type_IRQ_PREEMPT:
1067 .ascii "IRQ (PREEMPTED)\0"
1068type_SYSCALL_PREEMPT:
1069 .ascii " SYSCALL (PREEMPTED)\0"
1070
1071 /*
1072 * Trap decoding for stack unwinder
1073 * Tuples are (start addr, end addr, string)
1074 * If return address lies on [start addr, end addr],
1075 * unwinder displays 'string'
1076 */
1077
1078 .align 4
1079.global microblaze_trap_handlers
1080microblaze_trap_handlers:
1081 /* Exact matches come first */
1082 .word ret_from_trap; .word ret_from_trap ; .word type_SYSCALL
1083 .word ret_from_irq ; .word ret_from_irq ; .word type_IRQ
1084 /* Fuzzy matches go here */
1085 .word ret_from_irq ; .word no_intr_resched ; .word type_IRQ_PREEMPT
1086 .word ret_from_trap; .word TRAP_return ; .word type_SYSCALL_PREEMPT
1087 /* End of table */
1088 .word 0 ; .word 0 ; .word 0