]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blame - arch/microblaze/kernel/entry.S
microblaze: Remove additional loading
[mirror_ubuntu-artful-kernel.git] / arch / microblaze / kernel / entry.S
CommitLineData
ca54502b
MS
1/*
2 * Low-level system-call handling, trap handlers and context-switching
3 *
4 * Copyright (C) 2008-2009 Michal Simek <monstr@monstr.eu>
5 * Copyright (C) 2008-2009 PetaLogix
6 * Copyright (C) 2003 John Williams <jwilliams@itee.uq.edu.au>
7 * Copyright (C) 2001,2002 NEC Corporation
8 * Copyright (C) 2001,2002 Miles Bader <miles@gnu.org>
9 *
10 * This file is subject to the terms and conditions of the GNU General
11 * Public License. See the file COPYING in the main directory of this
12 * archive for more details.
13 *
14 * Written by Miles Bader <miles@gnu.org>
15 * Heavily modified by John Williams for Microblaze
16 */
17
18#include <linux/sys.h>
19#include <linux/linkage.h>
20
21#include <asm/entry.h>
22#include <asm/current.h>
23#include <asm/processor.h>
24#include <asm/exceptions.h>
25#include <asm/asm-offsets.h>
26#include <asm/thread_info.h>
27
28#include <asm/page.h>
29#include <asm/unistd.h>
30
31#include <linux/errno.h>
32#include <asm/signal.h>
33
11d51360
MS
34#undef DEBUG
35
ca54502b
MS
36/* The size of a state save frame. */
37#define STATE_SAVE_SIZE (PT_SIZE + STATE_SAVE_ARG_SPACE)
38
39/* The offset of the struct pt_regs in a `state save frame' on the stack. */
40#define PTO STATE_SAVE_ARG_SPACE /* 24 the space for args */
41
42#define C_ENTRY(name) .globl name; .align 4; name
43
44/*
45 * Various ways of setting and clearing BIP in flags reg.
46 * This is mucky, but necessary using microblaze version that
47 * allows msr ops to write to BIP
48 */
49#if CONFIG_XILINX_MICROBLAZE0_USE_MSR_INSTR
50 .macro clear_bip
51 msrclr r11, MSR_BIP
52 nop
53 .endm
54
55 .macro set_bip
56 msrset r11, MSR_BIP
57 nop
58 .endm
59
60 .macro clear_eip
61 msrclr r11, MSR_EIP
62 nop
63 .endm
64
65 .macro set_ee
66 msrset r11, MSR_EE
67 nop
68 .endm
69
70 .macro disable_irq
71 msrclr r11, MSR_IE
72 nop
73 .endm
74
75 .macro enable_irq
76 msrset r11, MSR_IE
77 nop
78 .endm
79
80 .macro set_ums
81 msrset r11, MSR_UMS
82 nop
83 msrclr r11, MSR_VMS
84 nop
85 .endm
86
87 .macro set_vms
88 msrclr r11, MSR_UMS
89 nop
90 msrset r11, MSR_VMS
91 nop
92 .endm
93
94 .macro clear_vms_ums
3fbd93e5 95 msrclr r11, MSR_VMS | MSR_UMS
ca54502b
MS
96 nop
97 .endm
98#else
99 .macro clear_bip
100 mfs r11, rmsr
101 nop
102 andi r11, r11, ~MSR_BIP
103 mts rmsr, r11
104 nop
105 .endm
106
107 .macro set_bip
108 mfs r11, rmsr
109 nop
110 ori r11, r11, MSR_BIP
111 mts rmsr, r11
112 nop
113 .endm
114
115 .macro clear_eip
116 mfs r11, rmsr
117 nop
118 andi r11, r11, ~MSR_EIP
119 mts rmsr, r11
120 nop
121 .endm
122
123 .macro set_ee
124 mfs r11, rmsr
125 nop
126 ori r11, r11, MSR_EE
127 mts rmsr, r11
128 nop
129 .endm
130
131 .macro disable_irq
132 mfs r11, rmsr
133 nop
134 andi r11, r11, ~MSR_IE
135 mts rmsr, r11
136 nop
137 .endm
138
139 .macro enable_irq
140 mfs r11, rmsr
141 nop
142 ori r11, r11, MSR_IE
143 mts rmsr, r11
144 nop
145 .endm
146
147 .macro set_ums
148 mfs r11, rmsr
149 nop
150 ori r11, r11, MSR_VMS
151 andni r11, r11, MSR_UMS
152 mts rmsr, r11
153 nop
154 .endm
155
156 .macro set_vms
157 mfs r11, rmsr
158 nop
159 ori r11, r11, MSR_VMS
160 andni r11, r11, MSR_UMS
161 mts rmsr, r11
162 nop
163 .endm
164
165 .macro clear_vms_ums
166 mfs r11, rmsr
167 nop
168 andni r11, r11, (MSR_VMS|MSR_UMS)
169 mts rmsr,r11
170 nop
171 .endm
172#endif
173
174/* Define how to call high-level functions. With MMU, virtual mode must be
175 * enabled when calling the high-level function. Clobbers R11.
176 * VM_ON, VM_OFF, DO_JUMP_BIPCLR, DO_CALL
177 */
178
179/* turn on virtual protected mode save */
180#define VM_ON \
a4a94dbf 181 set_ums; \
ca54502b 182 rted r0, 2f; \
a4a94dbf
MS
183 nop; \
1842:
ca54502b
MS
185
186/* turn off virtual protected mode save and user mode save*/
187#define VM_OFF \
a4a94dbf 188 clear_vms_ums; \
ca54502b 189 rted r0, TOPHYS(1f); \
a4a94dbf
MS
190 nop; \
1911:
ca54502b
MS
192
193#define SAVE_REGS \
194 swi r2, r1, PTO+PT_R2; /* Save SDA */ \
36f60954
MS
195 swi r3, r1, PTO+PT_R3; \
196 swi r4, r1, PTO+PT_R4; \
ca54502b
MS
197 swi r5, r1, PTO+PT_R5; \
198 swi r6, r1, PTO+PT_R6; \
199 swi r7, r1, PTO+PT_R7; \
200 swi r8, r1, PTO+PT_R8; \
201 swi r9, r1, PTO+PT_R9; \
202 swi r10, r1, PTO+PT_R10; \
203 swi r11, r1, PTO+PT_R11; /* save clobbered regs after rval */\
204 swi r12, r1, PTO+PT_R12; \
205 swi r13, r1, PTO+PT_R13; /* Save SDA2 */ \
206 swi r14, r1, PTO+PT_PC; /* PC, before IRQ/trap */ \
207 swi r15, r1, PTO+PT_R15; /* Save LP */ \
208 swi r18, r1, PTO+PT_R18; /* Save asm scratch reg */ \
209 swi r19, r1, PTO+PT_R19; \
210 swi r20, r1, PTO+PT_R20; \
211 swi r21, r1, PTO+PT_R21; \
212 swi r22, r1, PTO+PT_R22; \
213 swi r23, r1, PTO+PT_R23; \
214 swi r24, r1, PTO+PT_R24; \
215 swi r25, r1, PTO+PT_R25; \
216 swi r26, r1, PTO+PT_R26; \
217 swi r27, r1, PTO+PT_R27; \
218 swi r28, r1, PTO+PT_R28; \
219 swi r29, r1, PTO+PT_R29; \
220 swi r30, r1, PTO+PT_R30; \
221 swi r31, r1, PTO+PT_R31; /* Save current task reg */ \
222 mfs r11, rmsr; /* save MSR */ \
223 nop; \
224 swi r11, r1, PTO+PT_MSR;
225
226#define RESTORE_REGS \
227 lwi r11, r1, PTO+PT_MSR; \
228 mts rmsr , r11; \
229 nop; \
230 lwi r2, r1, PTO+PT_R2; /* restore SDA */ \
36f60954
MS
231 lwi r3, r1, PTO+PT_R3; \
232 lwi r4, r1, PTO+PT_R4; \
ca54502b
MS
233 lwi r5, r1, PTO+PT_R5; \
234 lwi r6, r1, PTO+PT_R6; \
235 lwi r7, r1, PTO+PT_R7; \
236 lwi r8, r1, PTO+PT_R8; \
237 lwi r9, r1, PTO+PT_R9; \
238 lwi r10, r1, PTO+PT_R10; \
239 lwi r11, r1, PTO+PT_R11; /* restore clobbered regs after rval */\
240 lwi r12, r1, PTO+PT_R12; \
241 lwi r13, r1, PTO+PT_R13; /* restore SDA2 */ \
242 lwi r14, r1, PTO+PT_PC; /* RESTORE_LINK PC, before IRQ/trap */\
243 lwi r15, r1, PTO+PT_R15; /* restore LP */ \
244 lwi r18, r1, PTO+PT_R18; /* restore asm scratch reg */ \
245 lwi r19, r1, PTO+PT_R19; \
246 lwi r20, r1, PTO+PT_R20; \
247 lwi r21, r1, PTO+PT_R21; \
248 lwi r22, r1, PTO+PT_R22; \
249 lwi r23, r1, PTO+PT_R23; \
250 lwi r24, r1, PTO+PT_R24; \
251 lwi r25, r1, PTO+PT_R25; \
252 lwi r26, r1, PTO+PT_R26; \
253 lwi r27, r1, PTO+PT_R27; \
254 lwi r28, r1, PTO+PT_R28; \
255 lwi r29, r1, PTO+PT_R29; \
256 lwi r30, r1, PTO+PT_R30; \
257 lwi r31, r1, PTO+PT_R31; /* Restore cur task reg */
258
259.text
260
261/*
262 * User trap.
263 *
264 * System calls are handled here.
265 *
266 * Syscall protocol:
267 * Syscall number in r12, args in r5-r10
268 * Return value in r3
269 *
270 * Trap entered via brki instruction, so BIP bit is set, and interrupts
271 * are masked. This is nice, means we don't have to CLI before state save
272 */
273C_ENTRY(_user_exception):
274 swi r1, r0, TOPHYS(PER_CPU(ENTRY_SP)) /* save stack */
275 addi r14, r14, 4 /* return address is 4 byte after call */
ca54502b 276
653e447e 277 mfs r1, rmsr
5c0d72b1 278 nop
653e447e
MS
279 andi r1, r1, MSR_UMS
280 bnei r1, 1f
5c0d72b1
MS
281
282/* Kernel-mode state save - kernel execve */
653e447e
MS
283 lwi r1, r0, TOPHYS(PER_CPU(ENTRY_SP)); /* Reload kernel stack-ptr*/
284 tophys(r1,r1);
ca54502b
MS
285
286 addik r1, r1, -STATE_SAVE_SIZE; /* Make room on the stack. */
287 SAVE_REGS
288
289 addi r11, r0, 1; /* Was in kernel-mode. */
290 swi r11, r1, PTO+PT_MODE; /* pt_regs -> kernel mode */
291 brid 2f;
292 nop; /* Fill delay slot */
293
294/* User-mode state save. */
2951:
ca54502b
MS
296 lwi r1, r0, TOPHYS(PER_CPU(CURRENT_SAVE)); /* get saved current */
297 tophys(r1,r1);
298 lwi r1, r1, TS_THREAD_INFO; /* get stack from task_struct */
299/* calculate kernel stack pointer from task struct 8k */
300 addik r1, r1, THREAD_SIZE;
301 tophys(r1,r1);
302
303 addik r1, r1, -STATE_SAVE_SIZE; /* Make room on the stack. */
304 SAVE_REGS
305
306 swi r0, r1, PTO+PT_MODE; /* Was in user-mode. */
307 lwi r11, r0, TOPHYS(PER_CPU(ENTRY_SP));
308 swi r11, r1, PTO+PT_R1; /* Store user SP. */
b1d70c62 3092: lwi CURRENT_TASK, r0, TOPHYS(PER_CPU(CURRENT_SAVE));
ca54502b
MS
310 /* Save away the syscall number. */
311 swi r12, r1, PTO+PT_R0;
312 tovirt(r1,r1)
313
ca54502b
MS
314/* where the trap should return need -8 to adjust for rtsd r15, 8*/
315/* Jump to the appropriate function for the system call number in r12
316 * (r12 is not preserved), or return an error if r12 is not valid. The LP
317 * register should point to the location where
318 * the called function should return. [note that MAKE_SYS_CALL uses label 1] */
23575483
MS
319
320 # Step into virtual mode.
321 set_vms;
322 addik r11, r0, 3f
323 rtid r11, 0
324 nop
3253:
b1d70c62 326 lwi r11, CURRENT_TASK, TS_THREAD_INFO /* get thread info */
23575483
MS
327 lwi r11, r11, TI_FLAGS /* get flags in thread info */
328 andi r11, r11, _TIF_WORK_SYSCALL_MASK
329 beqi r11, 4f
330
331 addik r3, r0, -ENOSYS
332 swi r3, r1, PTO + PT_R3
333 brlid r15, do_syscall_trace_enter
334 addik r5, r1, PTO + PT_R0
335
336 # do_syscall_trace_enter returns the new syscall nr.
337 addk r12, r0, r3
338 lwi r5, r1, PTO+PT_R5;
339 lwi r6, r1, PTO+PT_R6;
340 lwi r7, r1, PTO+PT_R7;
341 lwi r8, r1, PTO+PT_R8;
342 lwi r9, r1, PTO+PT_R9;
343 lwi r10, r1, PTO+PT_R10;
3444:
345/* Jump to the appropriate function for the system call number in r12
346 * (r12 is not preserved), or return an error if r12 is not valid.
347 * The LP register should point to the location where the called function
348 * should return. [note that MAKE_SYS_CALL uses label 1] */
349 /* See if the system call number is valid */
ca54502b 350 addi r11, r12, -__NR_syscalls;
23575483 351 bgei r11,5f;
ca54502b
MS
352 /* Figure out which function to use for this system call. */
353 /* Note Microblaze barrel shift is optional, so don't rely on it */
354 add r12, r12, r12; /* convert num -> ptr */
355 add r12, r12, r12;
356
11d51360 357#ifdef DEBUG
ca54502b 358 /* Trac syscalls and stored them to r0_ram */
23575483 359 lwi r3, r12, 0x400 + r0_ram
ca54502b 360 addi r3, r3, 1
23575483 361 swi r3, r12, 0x400 + r0_ram
11d51360 362#endif
23575483
MS
363
364 # Find and jump into the syscall handler.
365 lwi r12, r12, sys_call_table
366 /* where the trap should return need -8 to adjust for rtsd r15, 8 */
b9ea77e2 367 addi r15, r0, ret_from_trap-8
23575483 368 bra r12
ca54502b 369
ca54502b 370 /* The syscall number is invalid, return an error. */
23575483 3715:
ca54502b
MS
372 addi r3, r0, -ENOSYS;
373 rtsd r15,8; /* looks like a normal subroutine return */
374 or r0, r0, r0
375
376
23575483 377/* Entry point used to return from a syscall/trap */
ca54502b
MS
378/* We re-enable BIP bit before state restore */
379C_ENTRY(ret_from_trap):
b1d70c62
MS
380 swi r3, r1, PTO + PT_R3
381 swi r4, r1, PTO + PT_R4
382
36f60954
MS
383 lwi r11, r1, PTO+PT_MODE;
384/* See if returning to kernel mode, if so, skip resched &c. */
385 bnei r11, 2f;
23575483
MS
386 /* We're returning to user mode, so check for various conditions that
387 * trigger rescheduling. */
b1d70c62
MS
388 /* FIXME: Restructure all these flag checks. */
389 lwi r11, CURRENT_TASK, TS_THREAD_INFO; /* get thread info */
23575483
MS
390 lwi r11, r11, TI_FLAGS; /* get flags in thread info */
391 andi r11, r11, _TIF_WORK_SYSCALL_MASK
392 beqi r11, 1f
393
23575483
MS
394 brlid r15, do_syscall_trace_leave
395 addik r5, r1, PTO + PT_R0
23575483 3961:
ca54502b
MS
397 /* We're returning to user mode, so check for various conditions that
398 * trigger rescheduling. */
b1d70c62
MS
399 /* get thread info from current task */
400 lwi r11, CURRENT_TASK, TS_THREAD_INFO;
ca54502b
MS
401 lwi r11, r11, TI_FLAGS; /* get flags in thread info */
402 andi r11, r11, _TIF_NEED_RESCHED;
403 beqi r11, 5f;
404
ca54502b
MS
405 bralid r15, schedule; /* Call scheduler */
406 nop; /* delay slot */
ca54502b
MS
407
408 /* Maybe handle a signal */
b1d70c62
MS
4095: /* get thread info from current task*/
410 lwi r11, CURRENT_TASK, TS_THREAD_INFO;
ca54502b
MS
411 lwi r11, r11, TI_FLAGS; /* get flags in thread info */
412 andi r11, r11, _TIF_SIGPENDING;
413 beqi r11, 1f; /* Signals to handle, handle them */
414
b9ea77e2 415 addik r5, r1, PTO; /* Arg 1: struct pt_regs *regs */
ca54502b
MS
416 addi r7, r0, 1; /* Arg 3: int in_syscall */
417 bralid r15, do_signal; /* Handle any signals */
841d6e8c 418 add r6, r0, r0; /* Arg 2: sigset_t *oldset */
b1d70c62
MS
419
420/* Finally, return to user state. */
96014cc3 4211: set_bip; /* Ints masked for state restore */
8633bebc 422 swi CURRENT_TASK, r0, PER_CPU(CURRENT_SAVE); /* save current */
ca54502b
MS
423 VM_OFF;
424 tophys(r1,r1);
425 RESTORE_REGS;
426 addik r1, r1, STATE_SAVE_SIZE /* Clean up stack space. */
427 lwi r1, r1, PT_R1 - PT_SIZE;/* Restore user stack pointer. */
428 bri 6f;
429
430/* Return to kernel state. */
96014cc3
MS
4312: set_bip; /* Ints masked for state restore */
432 VM_OFF;
ca54502b
MS
433 tophys(r1,r1);
434 RESTORE_REGS;
435 addik r1, r1, STATE_SAVE_SIZE /* Clean up stack space. */
436 tovirt(r1,r1);
4376:
438TRAP_return: /* Make global symbol for debugging */
439 rtbd r14, 0; /* Instructions to return from an IRQ */
440 nop;
441
442
443/* These syscalls need access to the struct pt_regs on the stack, so we
444 implement them in assembly (they're basically all wrappers anyway). */
445
446C_ENTRY(sys_fork_wrapper):
447 addi r5, r0, SIGCHLD /* Arg 0: flags */
448 lwi r6, r1, PTO+PT_R1 /* Arg 1: child SP (use parent's) */
b9ea77e2 449 addik r7, r1, PTO /* Arg 2: parent context */
ca54502b
MS
450 add r8. r0, r0 /* Arg 3: (unused) */
451 add r9, r0, r0; /* Arg 4: (unused) */
452 add r10, r0, r0; /* Arg 5: (unused) */
453 brid do_fork /* Do real work (tail-call) */
454 nop;
455
456/* This the initial entry point for a new child thread, with an appropriate
457 stack in place that makes it look the the child is in the middle of an
458 syscall. This function is actually `returned to' from switch_thread
459 (copy_thread makes ret_from_fork the return address in each new thread's
460 saved context). */
461C_ENTRY(ret_from_fork):
462 bralid r15, schedule_tail; /* ...which is schedule_tail's arg */
463 add r3, r5, r0; /* switch_thread returns the prev task */
464 /* ( in the delay slot ) */
465 add r3, r0, r0; /* Child's fork call should return 0. */
466 brid ret_from_trap; /* Do normal trap return */
467 nop;
468
e513588f
AB
469C_ENTRY(sys_vfork):
470 brid microblaze_vfork /* Do real work (tail-call) */
b9ea77e2 471 addik r5, r1, PTO
ca54502b 472
e513588f 473C_ENTRY(sys_clone):
ca54502b 474 bnei r6, 1f; /* See if child SP arg (arg 1) is 0. */
570e3e23 475 lwi r6, r1, PTO + PT_R1; /* If so, use paret's stack ptr */
b9ea77e2
MS
4761: addik r7, r1, PTO; /* Arg 2: parent context */
477 add r8, r0, r0; /* Arg 3: (unused) */
478 add r9, r0, r0; /* Arg 4: (unused) */
479 add r10, r0, r0; /* Arg 5: (unused) */
480 brid do_fork /* Do real work (tail-call) */
481 nop;
ca54502b 482
e513588f 483C_ENTRY(sys_execve):
b9ea77e2 484 addik r8, r1, PTO; /* add user context as 4th arg */
e513588f 485 brid microblaze_execve; /* Do real work (tail-call).*/
ca54502b
MS
486 nop;
487
ca54502b
MS
488C_ENTRY(sys_rt_sigreturn_wrapper):
489 swi r3, r1, PTO+PT_R3; /* restore saved r3, r4 registers */
490 swi r4, r1, PTO+PT_R4;
b9ea77e2 491 addik r5, r1, PTO; /* add user context as 1st arg */
ca54502b
MS
492 brlid r15, sys_rt_sigreturn /* Do real work */
493 nop;
494 lwi r3, r1, PTO+PT_R3; /* restore saved r3, r4 registers */
495 lwi r4, r1, PTO+PT_R4;
496 bri ret_from_trap /* fall through will not work here due to align */
497 nop;
498
499/*
500 * HW EXCEPTION rutine start
501 */
502
503#define SAVE_STATE \
63708f63 504 swi r1, r0, TOPHYS(PER_CPU(ENTRY_SP)); /* save stack */ \
ca54502b 505 /* See if already in kernel mode.*/ \
653e447e 506 mfs r1, rmsr; \
5c0d72b1 507 nop; \
653e447e
MS
508 andi r1, r1, MSR_UMS; \
509 bnei r1, 1f; \
ca54502b
MS
510 /* Kernel-mode state save. */ \
511 /* Reload kernel stack-ptr. */ \
653e447e
MS
512 lwi r1, r0, TOPHYS(PER_CPU(ENTRY_SP)); \
513 tophys(r1,r1); \
ca54502b 514 addik r1, r1, -STATE_SAVE_SIZE; /* Make room on the stack. */\
ca54502b
MS
515 SAVE_REGS \
516 /* PC, before IRQ/trap - this is one instruction above */ \
517 swi r17, r1, PTO+PT_PC; \
518 \
519 addi r11, r0, 1; /* Was in kernel-mode. */ \
520 swi r11, r1, PTO+PT_MODE; \
521 brid 2f; \
522 nop; /* Fill delay slot */ \
5231: /* User-mode state save. */ \
ca54502b
MS
524 lwi r1, r0, TOPHYS(PER_CPU(CURRENT_SAVE)); /* get saved current */\
525 tophys(r1,r1); \
526 lwi r1, r1, TS_THREAD_INFO; /* get the thread info */ \
527 addik r1, r1, THREAD_SIZE; /* calculate kernel stack pointer */\
528 tophys(r1,r1); \
ca54502b 529 addik r1, r1, -STATE_SAVE_SIZE; /* Make room on the stack. */\
ca54502b
MS
530 SAVE_REGS \
531 /* PC, before IRQ/trap - this is one instruction above FIXME*/ \
532 swi r17, r1, PTO+PT_PC; \
533 \
534 swi r0, r1, PTO+PT_MODE; /* Was in user-mode. */ \
535 lwi r11, r0, TOPHYS(PER_CPU(ENTRY_SP)); \
536 swi r11, r1, PTO+PT_R1; /* Store user SP. */ \
b1d70c62 5372: lwi CURRENT_TASK, r0, TOPHYS(PER_CPU(CURRENT_SAVE)); \
ca54502b
MS
538 tovirt(r1,r1)
539
540C_ENTRY(full_exception_trap):
ca54502b
MS
541 /* adjust exception address for privileged instruction
542 * for finding where is it */
543 addik r17, r17, -4
544 SAVE_STATE /* Save registers */
545 /* FIXME this can be store directly in PT_ESR reg.
546 * I tested it but there is a fault */
547 /* where the trap should return need -8 to adjust for rtsd r15, 8 */
b9ea77e2
MS
548 addik r15, r0, ret_from_exc - 8
549 addik r5, r1, PTO /* parameter struct pt_regs * regs */
ca54502b
MS
550 mfs r6, resr
551 nop
552 mfs r7, rfsr; /* save FSR */
553 nop
131e4e97
MS
554 mts rfsr, r0; /* Clear sticky fsr */
555 nop
b9ea77e2 556 addik r12, r0, full_exception
ca54502b 557 set_vms;
8b110d15 558 rted r12, 0;
ca54502b
MS
559 nop;
560
561/*
562 * Unaligned data trap.
563 *
564 * Unaligned data trap last on 4k page is handled here.
565 *
566 * Trap entered via exception, so EE bit is set, and interrupts
567 * are masked. This is nice, means we don't have to CLI before state save
568 *
569 * The assembler routine is in "arch/microblaze/kernel/hw_exception_handler.S"
570 */
571C_ENTRY(unaligned_data_trap):
8b110d15
MS
572 /* MS: I have to save r11 value and then restore it because
573 * set_bit, clear_eip, set_ee use r11 as temp register if MSR
574 * instructions are not used. We don't need to do if MSR instructions
575 * are used and they use r0 instead of r11.
576 * I am using ENTRY_SP which should be primary used only for stack
577 * pointer saving. */
578 swi r11, r0, TOPHYS(PER_CPU(ENTRY_SP));
579 set_bip; /* equalize initial state for all possible entries */
580 clear_eip;
581 set_ee;
582 lwi r11, r0, TOPHYS(PER_CPU(ENTRY_SP));
ca54502b
MS
583 SAVE_STATE /* Save registers.*/
584 /* where the trap should return need -8 to adjust for rtsd r15, 8 */
b9ea77e2 585 addik r15, r0, ret_from_exc-8
ca54502b
MS
586 mfs r3, resr /* ESR */
587 nop
588 mfs r4, rear /* EAR */
589 nop
b9ea77e2
MS
590 addik r7, r1, PTO /* parameter struct pt_regs * regs */
591 addik r12, r0, _unaligned_data_exception
ca54502b
MS
592 set_vms;
593 rtbd r12, 0; /* interrupts enabled */
594 nop;
595
596/*
597 * Page fault traps.
598 *
599 * If the real exception handler (from hw_exception_handler.S) didn't find
600 * the mapping for the process, then we're thrown here to handle such situation.
601 *
602 * Trap entered via exceptions, so EE bit is set, and interrupts
603 * are masked. This is nice, means we don't have to CLI before state save
604 *
605 * Build a standard exception frame for TLB Access errors. All TLB exceptions
606 * will bail out to this point if they can't resolve the lightweight TLB fault.
607 *
608 * The C function called is in "arch/microblaze/mm/fault.c", declared as:
609 * void do_page_fault(struct pt_regs *regs,
610 * unsigned long address,
611 * unsigned long error_code)
612 */
613/* data and intruction trap - which is choose is resolved int fault.c */
614C_ENTRY(page_fault_data_trap):
ca54502b
MS
615 SAVE_STATE /* Save registers.*/
616 /* where the trap should return need -8 to adjust for rtsd r15, 8 */
b9ea77e2
MS
617 addik r15, r0, ret_from_exc-8
618 addik r5, r1, PTO /* parameter struct pt_regs * regs */
ca54502b
MS
619 mfs r6, rear /* parameter unsigned long address */
620 nop
621 mfs r7, resr /* parameter unsigned long error_code */
622 nop
b9ea77e2 623 addik r12, r0, do_page_fault
ca54502b 624 set_vms;
8b110d15 625 rted r12, 0; /* interrupts enabled */
ca54502b
MS
626 nop;
627
628C_ENTRY(page_fault_instr_trap):
ca54502b
MS
629 SAVE_STATE /* Save registers.*/
630 /* where the trap should return need -8 to adjust for rtsd r15, 8 */
b9ea77e2
MS
631 addik r15, r0, ret_from_exc-8
632 addik r5, r1, PTO /* parameter struct pt_regs * regs */
ca54502b
MS
633 mfs r6, rear /* parameter unsigned long address */
634 nop
635 ori r7, r0, 0 /* parameter unsigned long error_code */
b9ea77e2 636 addik r12, r0, do_page_fault
ca54502b 637 set_vms;
8b110d15 638 rted r12, 0; /* interrupts enabled */
ca54502b
MS
639 nop;
640
641/* Entry point used to return from an exception. */
642C_ENTRY(ret_from_exc):
ca54502b
MS
643 lwi r11, r1, PTO+PT_MODE;
644 bnei r11, 2f; /* See if returning to kernel mode, */
645 /* ... if so, skip resched &c. */
646
647 /* We're returning to user mode, so check for various conditions that
648 trigger rescheduling. */
b1d70c62 649 lwi r11, CURRENT_TASK, TS_THREAD_INFO; /* get thread info */
ca54502b
MS
650 lwi r11, r11, TI_FLAGS; /* get flags in thread info */
651 andi r11, r11, _TIF_NEED_RESCHED;
652 beqi r11, 5f;
653
654/* Call the scheduler before returning from a syscall/trap. */
655 bralid r15, schedule; /* Call scheduler */
656 nop; /* delay slot */
657
658 /* Maybe handle a signal */
b1d70c62 6595: lwi r11, CURRENT_TASK, TS_THREAD_INFO; /* get thread info */
ca54502b
MS
660 lwi r11, r11, TI_FLAGS; /* get flags in thread info */
661 andi r11, r11, _TIF_SIGPENDING;
662 beqi r11, 1f; /* Signals to handle, handle them */
663
664 /*
665 * Handle a signal return; Pending signals should be in r18.
666 *
667 * Not all registers are saved by the normal trap/interrupt entry
668 * points (for instance, call-saved registers (because the normal
669 * C-compiler calling sequence in the kernel makes sure they're
670 * preserved), and call-clobbered registers in the case of
671 * traps), but signal handlers may want to examine or change the
672 * complete register state. Here we save anything not saved by
673 * the normal entry sequence, so that it may be safely restored
36f60954 674 * (in a possibly modified form) after do_signal returns. */
b9ea77e2 675 addik r5, r1, PTO; /* Arg 1: struct pt_regs *regs */
ca54502b
MS
676 addi r7, r0, 0; /* Arg 3: int in_syscall */
677 bralid r15, do_signal; /* Handle any signals */
841d6e8c 678 add r6, r0, r0; /* Arg 2: sigset_t *oldset */
ca54502b
MS
679
680/* Finally, return to user state. */
96014cc3 6811: set_bip; /* Ints masked for state restore */
8633bebc 682 swi CURRENT_TASK, r0, PER_CPU(CURRENT_SAVE); /* save current */
ca54502b
MS
683 VM_OFF;
684 tophys(r1,r1);
685
ca54502b
MS
686 RESTORE_REGS;
687 addik r1, r1, STATE_SAVE_SIZE /* Clean up stack space. */
688
689 lwi r1, r1, PT_R1 - PT_SIZE; /* Restore user stack pointer. */
690 bri 6f;
691/* Return to kernel state. */
96014cc3
MS
6922: set_bip; /* Ints masked for state restore */
693 VM_OFF;
ca54502b 694 tophys(r1,r1);
ca54502b
MS
695 RESTORE_REGS;
696 addik r1, r1, STATE_SAVE_SIZE /* Clean up stack space. */
697
698 tovirt(r1,r1);
6996:
700EXC_return: /* Make global symbol for debugging */
701 rtbd r14, 0; /* Instructions to return from an IRQ */
702 nop;
703
704/*
705 * HW EXCEPTION rutine end
706 */
707
708/*
709 * Hardware maskable interrupts.
710 *
711 * The stack-pointer (r1) should have already been saved to the memory
712 * location PER_CPU(ENTRY_SP).
713 */
714C_ENTRY(_interrupt):
715/* MS: we are in physical address */
716/* Save registers, switch to proper stack, convert SP to virtual.*/
717 swi r1, r0, TOPHYS(PER_CPU(ENTRY_SP))
ca54502b 718 /* MS: See if already in kernel mode. */
653e447e 719 mfs r1, rmsr
5c0d72b1 720 nop
653e447e
MS
721 andi r1, r1, MSR_UMS
722 bnei r1, 1f
ca54502b
MS
723
724/* Kernel-mode state save. */
653e447e
MS
725 lwi r1, r0, TOPHYS(PER_CPU(ENTRY_SP))
726 tophys(r1,r1); /* MS: I have in r1 physical address where stack is */
ca54502b
MS
727 /* save registers */
728/* MS: Make room on the stack -> activation record */
729 addik r1, r1, -STATE_SAVE_SIZE;
ca54502b
MS
730 SAVE_REGS
731 /* MS: store mode */
732 addi r11, r0, 1; /* MS: Was in kernel-mode. */
733 swi r11, r1, PTO + PT_MODE; /* MS: and save it */
734 brid 2f;
735 nop; /* MS: Fill delay slot */
736
7371:
738/* User-mode state save. */
ca54502b
MS
739 /* MS: get the saved current */
740 lwi r1, r0, TOPHYS(PER_CPU(CURRENT_SAVE));
741 tophys(r1,r1);
742 lwi r1, r1, TS_THREAD_INFO;
743 addik r1, r1, THREAD_SIZE;
744 tophys(r1,r1);
745 /* save registers */
746 addik r1, r1, -STATE_SAVE_SIZE;
ca54502b
MS
747 SAVE_REGS
748 /* calculate mode */
749 swi r0, r1, PTO + PT_MODE;
750 lwi r11, r0, TOPHYS(PER_CPU(ENTRY_SP));
751 swi r11, r1, PTO+PT_R1;
ca54502b 7522:
b1d70c62 753 lwi CURRENT_TASK, r0, TOPHYS(PER_CPU(CURRENT_SAVE));
ca54502b 754 tovirt(r1,r1)
b9ea77e2 755 addik r5, r1, PTO;
ca54502b 756 set_vms;
b9ea77e2
MS
757 addik r11, r0, do_IRQ;
758 addik r15, r0, irq_call;
ca54502b
MS
759irq_call:rtbd r11, 0;
760 nop;
761
762/* MS: we are in virtual mode */
763ret_from_irq:
764 lwi r11, r1, PTO + PT_MODE;
765 bnei r11, 2f;
766
b1d70c62 767 lwi r11, CURRENT_TASK, TS_THREAD_INFO;
ca54502b
MS
768 lwi r11, r11, TI_FLAGS; /* MS: get flags from thread info */
769 andi r11, r11, _TIF_NEED_RESCHED;
770 beqi r11, 5f
771 bralid r15, schedule;
772 nop; /* delay slot */
773
774 /* Maybe handle a signal */
b1d70c62 7755: lwi r11, CURRENT_TASK, TS_THREAD_INFO; /* MS: get thread info */
ca54502b
MS
776 lwi r11, r11, TI_FLAGS; /* get flags in thread info */
777 andi r11, r11, _TIF_SIGPENDING;
778 beqid r11, no_intr_resched
779/* Handle a signal return; Pending signals should be in r18. */
780 addi r7, r0, 0; /* Arg 3: int in_syscall */
b9ea77e2 781 addik r5, r1, PTO; /* Arg 1: struct pt_regs *regs */
ca54502b
MS
782 bralid r15, do_signal; /* Handle any signals */
783 add r6, r0, r0; /* Arg 2: sigset_t *oldset */
784
785/* Finally, return to user state. */
786no_intr_resched:
787 /* Disable interrupts, we are now committed to the state restore */
788 disable_irq
8633bebc 789 swi CURRENT_TASK, r0, PER_CPU(CURRENT_SAVE);
ca54502b
MS
790 VM_OFF;
791 tophys(r1,r1);
ca54502b
MS
792 RESTORE_REGS
793 addik r1, r1, STATE_SAVE_SIZE /* MS: Clean up stack space. */
794 lwi r1, r1, PT_R1 - PT_SIZE;
795 bri 6f;
796/* MS: Return to kernel state. */
77753790
MS
7972:
798#ifdef CONFIG_PREEMPT
b1d70c62 799 lwi r11, CURRENT_TASK, TS_THREAD_INFO;
77753790
MS
800 /* MS: get preempt_count from thread info */
801 lwi r5, r11, TI_PREEMPT_COUNT;
802 bgti r5, restore;
803
804 lwi r5, r11, TI_FLAGS; /* get flags in thread info */
805 andi r5, r5, _TIF_NEED_RESCHED;
806 beqi r5, restore /* if zero jump over */
807
808preempt:
809 /* interrupts are off that's why I am calling preempt_chedule_irq */
810 bralid r15, preempt_schedule_irq
811 nop
b1d70c62 812 lwi r11, CURRENT_TASK, TS_THREAD_INFO; /* get thread info */
77753790
MS
813 lwi r5, r11, TI_FLAGS; /* get flags in thread info */
814 andi r5, r5, _TIF_NEED_RESCHED;
815 bnei r5, preempt /* if non zero jump to resched */
816restore:
817#endif
818 VM_OFF /* MS: turn off MMU */
ca54502b 819 tophys(r1,r1)
ca54502b
MS
820 RESTORE_REGS
821 addik r1, r1, STATE_SAVE_SIZE /* MS: Clean up stack space. */
822 tovirt(r1,r1);
8236:
824IRQ_return: /* MS: Make global symbol for debugging */
825 rtid r14, 0
826 nop
827
828/*
829 * `Debug' trap
830 * We enter dbtrap in "BIP" (breakpoint) mode.
831 * So we exit the breakpoint mode with an 'rtbd' and proceed with the
832 * original dbtrap.
833 * however, wait to save state first
834 */
835C_ENTRY(_debug_exception):
836 /* BIP bit is set on entry, no interrupts can occur */
837 swi r1, r0, TOPHYS(PER_CPU(ENTRY_SP))
838
653e447e 839 mfs r1, rmsr
5c0d72b1 840 nop
653e447e
MS
841 andi r1, r1, MSR_UMS
842 bnei r1, 1f
ca54502b 843 /* Kernel-mode state save. */
653e447e
MS
844 lwi r1, r0, TOPHYS(PER_CPU(ENTRY_SP)); /* Reload kernel stack-ptr*/
845 tophys(r1,r1);
ca54502b
MS
846
847 addik r1, r1, -STATE_SAVE_SIZE; /* Make room on the stack. */
ca54502b
MS
848 SAVE_REGS;
849
850 addi r11, r0, 1; /* Was in kernel-mode. */
851 swi r11, r1, PTO + PT_MODE;
852 brid 2f;
853 nop; /* Fill delay slot */
8541: /* User-mode state save. */
ca54502b
MS
855 lwi r1, r0, TOPHYS(PER_CPU(CURRENT_SAVE)); /* get saved current */
856 tophys(r1,r1);
857 lwi r1, r1, TS_THREAD_INFO; /* get the thread info */
858 addik r1, r1, THREAD_SIZE; /* calculate kernel stack pointer */
859 tophys(r1,r1);
860
861 addik r1, r1, -STATE_SAVE_SIZE; /* Make room on the stack. */
ca54502b
MS
862 SAVE_REGS;
863
864 swi r0, r1, PTO+PT_MODE; /* Was in user-mode. */
865 lwi r11, r0, TOPHYS(PER_CPU(ENTRY_SP));
866 swi r11, r1, PTO+PT_R1; /* Store user SP. */
653e447e 8672:
ca54502b
MS
868 tovirt(r1,r1)
869
06b28640 870 set_vms;
ca54502b
MS
871 addi r5, r0, SIGTRAP /* send the trap signal */
872 add r6, r0, CURRENT_TASK; /* Get current task ptr into r11 */
873 addk r7, r0, r0 /* 3rd param zero */
06b28640 874dbtrap_call: rtbd r0, send_sig;
b9ea77e2 875 addik r15, r0, dbtrap_call;
ca54502b
MS
876
877 set_bip; /* Ints masked for state restore*/
878 lwi r11, r1, PTO+PT_MODE;
879 bnei r11, 2f;
880
881 /* Get current task ptr into r11 */
b1d70c62 882 lwi r11, CURRENT_TASK, TS_THREAD_INFO; /* get thread info */
ca54502b
MS
883 lwi r11, r11, TI_FLAGS; /* get flags in thread info */
884 andi r11, r11, _TIF_NEED_RESCHED;
885 beqi r11, 5f;
886
887/* Call the scheduler before returning from a syscall/trap. */
888
889 bralid r15, schedule; /* Call scheduler */
890 nop; /* delay slot */
891 /* XXX Is PT_DTRACE handling needed here? */
892 /* XXX m68knommu also checks TASK_STATE & TASK_COUNTER here. */
893
894 /* Maybe handle a signal */
b1d70c62 8955: lwi r11, CURRENT_TASK, TS_THREAD_INFO; /* get thread info */
ca54502b
MS
896 lwi r11, r11, TI_FLAGS; /* get flags in thread info */
897 andi r11, r11, _TIF_SIGPENDING;
898 beqi r11, 1f; /* Signals to handle, handle them */
899
900/* Handle a signal return; Pending signals should be in r18. */
901 /* Not all registers are saved by the normal trap/interrupt entry
902 points (for instance, call-saved registers (because the normal
903 C-compiler calling sequence in the kernel makes sure they're
904 preserved), and call-clobbered registers in the case of
905 traps), but signal handlers may want to examine or change the
906 complete register state. Here we save anything not saved by
907 the normal entry sequence, so that it may be safely restored
908 (in a possibly modified form) after do_signal returns. */
909
b9ea77e2 910 addik r5, r1, PTO; /* Arg 1: struct pt_regs *regs */
ca54502b
MS
911 addi r7, r0, 0; /* Arg 3: int in_syscall */
912 bralid r15, do_signal; /* Handle any signals */
841d6e8c 913 add r6, r0, r0; /* Arg 2: sigset_t *oldset */
ca54502b
MS
914
915
916/* Finally, return to user state. */
5c0d72b1 9171:
8633bebc 918 swi CURRENT_TASK, r0, PER_CPU(CURRENT_SAVE); /* save current */
ca54502b
MS
919 VM_OFF;
920 tophys(r1,r1);
921
ca54502b
MS
922 RESTORE_REGS
923 addik r1, r1, STATE_SAVE_SIZE /* Clean up stack space. */
924
925
926 lwi r1, r1, PT_R1 - PT_SIZE;
927 /* Restore user stack pointer. */
928 bri 6f;
929
930/* Return to kernel state. */
9312: VM_OFF;
932 tophys(r1,r1);
ca54502b
MS
933 RESTORE_REGS
934 addik r1, r1, STATE_SAVE_SIZE /* Clean up stack space. */
935
936 tovirt(r1,r1);
9376:
938DBTRAP_return: /* Make global symbol for debugging */
939 rtbd r14, 0; /* Instructions to return from an IRQ */
940 nop;
941
942
943
944ENTRY(_switch_to)
945 /* prepare return value */
b1d70c62 946 addk r3, r0, CURRENT_TASK
ca54502b
MS
947
948 /* save registers in cpu_context */
949 /* use r11 and r12, volatile registers, as temp register */
950 /* give start of cpu_context for previous process */
951 addik r11, r5, TI_CPU_CONTEXT
952 swi r1, r11, CC_R1
953 swi r2, r11, CC_R2
954 /* skip volatile registers.
955 * they are saved on stack when we jumped to _switch_to() */
956 /* dedicated registers */
957 swi r13, r11, CC_R13
958 swi r14, r11, CC_R14
959 swi r15, r11, CC_R15
960 swi r16, r11, CC_R16
961 swi r17, r11, CC_R17
962 swi r18, r11, CC_R18
963 /* save non-volatile registers */
964 swi r19, r11, CC_R19
965 swi r20, r11, CC_R20
966 swi r21, r11, CC_R21
967 swi r22, r11, CC_R22
968 swi r23, r11, CC_R23
969 swi r24, r11, CC_R24
970 swi r25, r11, CC_R25
971 swi r26, r11, CC_R26
972 swi r27, r11, CC_R27
973 swi r28, r11, CC_R28
974 swi r29, r11, CC_R29
975 swi r30, r11, CC_R30
976 /* special purpose registers */
977 mfs r12, rmsr
978 nop
979 swi r12, r11, CC_MSR
980 mfs r12, rear
981 nop
982 swi r12, r11, CC_EAR
983 mfs r12, resr
984 nop
985 swi r12, r11, CC_ESR
986 mfs r12, rfsr
987 nop
988 swi r12, r11, CC_FSR
989
b1d70c62
MS
990 /* update r31, the current-give me pointer to task which will be next */
991 lwi CURRENT_TASK, r6, TI_TASK
ca54502b 992 /* stored it to current_save too */
b1d70c62 993 swi CURRENT_TASK, r0, PER_CPU(CURRENT_SAVE)
ca54502b
MS
994
995 /* get new process' cpu context and restore */
996 /* give me start where start context of next task */
997 addik r11, r6, TI_CPU_CONTEXT
998
999 /* non-volatile registers */
1000 lwi r30, r11, CC_R30
1001 lwi r29, r11, CC_R29
1002 lwi r28, r11, CC_R28
1003 lwi r27, r11, CC_R27
1004 lwi r26, r11, CC_R26
1005 lwi r25, r11, CC_R25
1006 lwi r24, r11, CC_R24
1007 lwi r23, r11, CC_R23
1008 lwi r22, r11, CC_R22
1009 lwi r21, r11, CC_R21
1010 lwi r20, r11, CC_R20
1011 lwi r19, r11, CC_R19
1012 /* dedicated registers */
1013 lwi r18, r11, CC_R18
1014 lwi r17, r11, CC_R17
1015 lwi r16, r11, CC_R16
1016 lwi r15, r11, CC_R15
1017 lwi r14, r11, CC_R14
1018 lwi r13, r11, CC_R13
1019 /* skip volatile registers */
1020 lwi r2, r11, CC_R2
1021 lwi r1, r11, CC_R1
1022
1023 /* special purpose registers */
1024 lwi r12, r11, CC_FSR
1025 mts rfsr, r12
1026 nop
1027 lwi r12, r11, CC_MSR
1028 mts rmsr, r12
1029 nop
1030
1031 rtsd r15, 8
1032 nop
1033
1034ENTRY(_reset)
1035 brai 0x70; /* Jump back to FS-boot */
1036
1037ENTRY(_break)
1038 mfs r5, rmsr
1039 nop
1040 swi r5, r0, 0x250 + TOPHYS(r0_ram)
1041 mfs r5, resr
1042 nop
1043 swi r5, r0, 0x254 + TOPHYS(r0_ram)
1044 bri 0
1045
1046 /* These are compiled and loaded into high memory, then
1047 * copied into place in mach_early_setup */
1048 .section .init.ivt, "ax"
1049 .org 0x0
1050 /* this is very important - here is the reset vector */
1051 /* in current MMU branch you don't care what is here - it is
1052 * used from bootloader site - but this is correct for FS-BOOT */
1053 brai 0x70
1054 nop
1055 brai TOPHYS(_user_exception); /* syscall handler */
1056 brai TOPHYS(_interrupt); /* Interrupt handler */
1057 brai TOPHYS(_break); /* nmi trap handler */
1058 brai TOPHYS(_hw_exception_handler); /* HW exception handler */
1059
1060 .org 0x60
1061 brai TOPHYS(_debug_exception); /* debug trap handler*/
1062
1063.section .rodata,"a"
1064#include "syscall_table.S"
1065
1066syscall_table_size=(.-sys_call_table)
1067
ce3266c0
SM
1068type_SYSCALL:
1069 .ascii "SYSCALL\0"
1070type_IRQ:
1071 .ascii "IRQ\0"
1072type_IRQ_PREEMPT:
1073 .ascii "IRQ (PREEMPTED)\0"
1074type_SYSCALL_PREEMPT:
1075 .ascii " SYSCALL (PREEMPTED)\0"
1076
1077 /*
1078 * Trap decoding for stack unwinder
1079 * Tuples are (start addr, end addr, string)
1080 * If return address lies on [start addr, end addr],
1081 * unwinder displays 'string'
1082 */
1083
1084 .align 4
1085.global microblaze_trap_handlers
1086microblaze_trap_handlers:
1087 /* Exact matches come first */
1088 .word ret_from_trap; .word ret_from_trap ; .word type_SYSCALL
1089 .word ret_from_irq ; .word ret_from_irq ; .word type_IRQ
1090 /* Fuzzy matches go here */
1091 .word ret_from_irq ; .word no_intr_resched ; .word type_IRQ_PREEMPT
1092 .word ret_from_trap; .word TRAP_return ; .word type_SYSCALL_PREEMPT
1093 /* End of table */
1094 .word 0 ; .word 0 ; .word 0