]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blame - arch/microblaze/kernel/entry.S
microblaze: Remove additional setup for kernel_mode
[mirror_ubuntu-bionic-kernel.git] / arch / microblaze / kernel / entry.S
CommitLineData
ca54502b
MS
1/*
2 * Low-level system-call handling, trap handlers and context-switching
3 *
4 * Copyright (C) 2008-2009 Michal Simek <monstr@monstr.eu>
5 * Copyright (C) 2008-2009 PetaLogix
6 * Copyright (C) 2003 John Williams <jwilliams@itee.uq.edu.au>
7 * Copyright (C) 2001,2002 NEC Corporation
8 * Copyright (C) 2001,2002 Miles Bader <miles@gnu.org>
9 *
10 * This file is subject to the terms and conditions of the GNU General
11 * Public License. See the file COPYING in the main directory of this
12 * archive for more details.
13 *
14 * Written by Miles Bader <miles@gnu.org>
15 * Heavily modified by John Williams for Microblaze
16 */
17
18#include <linux/sys.h>
19#include <linux/linkage.h>
20
21#include <asm/entry.h>
22#include <asm/current.h>
23#include <asm/processor.h>
24#include <asm/exceptions.h>
25#include <asm/asm-offsets.h>
26#include <asm/thread_info.h>
27
28#include <asm/page.h>
29#include <asm/unistd.h>
30
31#include <linux/errno.h>
32#include <asm/signal.h>
33
11d51360
MS
34#undef DEBUG
35
ca54502b
MS
36/* The size of a state save frame. */
37#define STATE_SAVE_SIZE (PT_SIZE + STATE_SAVE_ARG_SPACE)
38
39/* The offset of the struct pt_regs in a `state save frame' on the stack. */
40#define PTO STATE_SAVE_ARG_SPACE /* 24 the space for args */
41
42#define C_ENTRY(name) .globl name; .align 4; name
43
44/*
45 * Various ways of setting and clearing BIP in flags reg.
46 * This is mucky, but necessary using microblaze version that
47 * allows msr ops to write to BIP
48 */
49#if CONFIG_XILINX_MICROBLAZE0_USE_MSR_INSTR
50 .macro clear_bip
51 msrclr r11, MSR_BIP
52 nop
53 .endm
54
55 .macro set_bip
56 msrset r11, MSR_BIP
57 nop
58 .endm
59
60 .macro clear_eip
61 msrclr r11, MSR_EIP
62 nop
63 .endm
64
65 .macro set_ee
66 msrset r11, MSR_EE
67 nop
68 .endm
69
70 .macro disable_irq
71 msrclr r11, MSR_IE
72 nop
73 .endm
74
75 .macro enable_irq
76 msrset r11, MSR_IE
77 nop
78 .endm
79
80 .macro set_ums
81 msrset r11, MSR_UMS
82 nop
83 msrclr r11, MSR_VMS
84 nop
85 .endm
86
87 .macro set_vms
88 msrclr r11, MSR_UMS
89 nop
90 msrset r11, MSR_VMS
91 nop
92 .endm
93
94 .macro clear_vms_ums
3fbd93e5 95 msrclr r11, MSR_VMS | MSR_UMS
ca54502b
MS
96 nop
97 .endm
98#else
99 .macro clear_bip
100 mfs r11, rmsr
101 nop
102 andi r11, r11, ~MSR_BIP
103 mts rmsr, r11
104 nop
105 .endm
106
107 .macro set_bip
108 mfs r11, rmsr
109 nop
110 ori r11, r11, MSR_BIP
111 mts rmsr, r11
112 nop
113 .endm
114
115 .macro clear_eip
116 mfs r11, rmsr
117 nop
118 andi r11, r11, ~MSR_EIP
119 mts rmsr, r11
120 nop
121 .endm
122
123 .macro set_ee
124 mfs r11, rmsr
125 nop
126 ori r11, r11, MSR_EE
127 mts rmsr, r11
128 nop
129 .endm
130
131 .macro disable_irq
132 mfs r11, rmsr
133 nop
134 andi r11, r11, ~MSR_IE
135 mts rmsr, r11
136 nop
137 .endm
138
139 .macro enable_irq
140 mfs r11, rmsr
141 nop
142 ori r11, r11, MSR_IE
143 mts rmsr, r11
144 nop
145 .endm
146
147 .macro set_ums
148 mfs r11, rmsr
149 nop
150 ori r11, r11, MSR_VMS
151 andni r11, r11, MSR_UMS
152 mts rmsr, r11
153 nop
154 .endm
155
156 .macro set_vms
157 mfs r11, rmsr
158 nop
159 ori r11, r11, MSR_VMS
160 andni r11, r11, MSR_UMS
161 mts rmsr, r11
162 nop
163 .endm
164
165 .macro clear_vms_ums
166 mfs r11, rmsr
167 nop
168 andni r11, r11, (MSR_VMS|MSR_UMS)
169 mts rmsr,r11
170 nop
171 .endm
172#endif
173
174/* Define how to call high-level functions. With MMU, virtual mode must be
175 * enabled when calling the high-level function. Clobbers R11.
176 * VM_ON, VM_OFF, DO_JUMP_BIPCLR, DO_CALL
177 */
178
179/* turn on virtual protected mode save */
180#define VM_ON \
a4a94dbf 181 set_ums; \
ca54502b 182 rted r0, 2f; \
a4a94dbf
MS
183 nop; \
1842:
ca54502b
MS
185
186/* turn off virtual protected mode save and user mode save*/
187#define VM_OFF \
a4a94dbf 188 clear_vms_ums; \
ca54502b 189 rted r0, TOPHYS(1f); \
a4a94dbf
MS
190 nop; \
1911:
ca54502b
MS
192
193#define SAVE_REGS \
194 swi r2, r1, PTO+PT_R2; /* Save SDA */ \
36f60954
MS
195 swi r3, r1, PTO+PT_R3; \
196 swi r4, r1, PTO+PT_R4; \
ca54502b
MS
197 swi r5, r1, PTO+PT_R5; \
198 swi r6, r1, PTO+PT_R6; \
199 swi r7, r1, PTO+PT_R7; \
200 swi r8, r1, PTO+PT_R8; \
201 swi r9, r1, PTO+PT_R9; \
202 swi r10, r1, PTO+PT_R10; \
203 swi r11, r1, PTO+PT_R11; /* save clobbered regs after rval */\
204 swi r12, r1, PTO+PT_R12; \
205 swi r13, r1, PTO+PT_R13; /* Save SDA2 */ \
206 swi r14, r1, PTO+PT_PC; /* PC, before IRQ/trap */ \
207 swi r15, r1, PTO+PT_R15; /* Save LP */ \
208 swi r18, r1, PTO+PT_R18; /* Save asm scratch reg */ \
209 swi r19, r1, PTO+PT_R19; \
210 swi r20, r1, PTO+PT_R20; \
211 swi r21, r1, PTO+PT_R21; \
212 swi r22, r1, PTO+PT_R22; \
213 swi r23, r1, PTO+PT_R23; \
214 swi r24, r1, PTO+PT_R24; \
215 swi r25, r1, PTO+PT_R25; \
216 swi r26, r1, PTO+PT_R26; \
217 swi r27, r1, PTO+PT_R27; \
218 swi r28, r1, PTO+PT_R28; \
219 swi r29, r1, PTO+PT_R29; \
220 swi r30, r1, PTO+PT_R30; \
221 swi r31, r1, PTO+PT_R31; /* Save current task reg */ \
222 mfs r11, rmsr; /* save MSR */ \
223 nop; \
224 swi r11, r1, PTO+PT_MSR;
225
226#define RESTORE_REGS \
227 lwi r11, r1, PTO+PT_MSR; \
228 mts rmsr , r11; \
229 nop; \
230 lwi r2, r1, PTO+PT_R2; /* restore SDA */ \
36f60954
MS
231 lwi r3, r1, PTO+PT_R3; \
232 lwi r4, r1, PTO+PT_R4; \
ca54502b
MS
233 lwi r5, r1, PTO+PT_R5; \
234 lwi r6, r1, PTO+PT_R6; \
235 lwi r7, r1, PTO+PT_R7; \
236 lwi r8, r1, PTO+PT_R8; \
237 lwi r9, r1, PTO+PT_R9; \
238 lwi r10, r1, PTO+PT_R10; \
239 lwi r11, r1, PTO+PT_R11; /* restore clobbered regs after rval */\
240 lwi r12, r1, PTO+PT_R12; \
241 lwi r13, r1, PTO+PT_R13; /* restore SDA2 */ \
242 lwi r14, r1, PTO+PT_PC; /* RESTORE_LINK PC, before IRQ/trap */\
243 lwi r15, r1, PTO+PT_R15; /* restore LP */ \
244 lwi r18, r1, PTO+PT_R18; /* restore asm scratch reg */ \
245 lwi r19, r1, PTO+PT_R19; \
246 lwi r20, r1, PTO+PT_R20; \
247 lwi r21, r1, PTO+PT_R21; \
248 lwi r22, r1, PTO+PT_R22; \
249 lwi r23, r1, PTO+PT_R23; \
250 lwi r24, r1, PTO+PT_R24; \
251 lwi r25, r1, PTO+PT_R25; \
252 lwi r26, r1, PTO+PT_R26; \
253 lwi r27, r1, PTO+PT_R27; \
254 lwi r28, r1, PTO+PT_R28; \
255 lwi r29, r1, PTO+PT_R29; \
256 lwi r30, r1, PTO+PT_R30; \
257 lwi r31, r1, PTO+PT_R31; /* Restore cur task reg */
258
259.text
260
261/*
262 * User trap.
263 *
264 * System calls are handled here.
265 *
266 * Syscall protocol:
267 * Syscall number in r12, args in r5-r10
268 * Return value in r3
269 *
270 * Trap entered via brki instruction, so BIP bit is set, and interrupts
271 * are masked. This is nice, means we don't have to CLI before state save
272 */
273C_ENTRY(_user_exception):
274 swi r1, r0, TOPHYS(PER_CPU(ENTRY_SP)) /* save stack */
275 addi r14, r14, 4 /* return address is 4 byte after call */
ca54502b 276
653e447e 277 mfs r1, rmsr
5c0d72b1 278 nop
653e447e
MS
279 andi r1, r1, MSR_UMS
280 bnei r1, 1f
5c0d72b1
MS
281
282/* Kernel-mode state save - kernel execve */
653e447e
MS
283 lwi r1, r0, TOPHYS(PER_CPU(ENTRY_SP)); /* Reload kernel stack-ptr*/
284 tophys(r1,r1);
ca54502b
MS
285
286 addik r1, r1, -STATE_SAVE_SIZE; /* Make room on the stack. */
287 SAVE_REGS
288
77f6d226 289 swi r1, r1, PTO + PT_MODE; /* pt_regs -> kernel mode */
ca54502b
MS
290 brid 2f;
291 nop; /* Fill delay slot */
292
293/* User-mode state save. */
2941:
ca54502b
MS
295 lwi r1, r0, TOPHYS(PER_CPU(CURRENT_SAVE)); /* get saved current */
296 tophys(r1,r1);
297 lwi r1, r1, TS_THREAD_INFO; /* get stack from task_struct */
298/* calculate kernel stack pointer from task struct 8k */
299 addik r1, r1, THREAD_SIZE;
300 tophys(r1,r1);
301
302 addik r1, r1, -STATE_SAVE_SIZE; /* Make room on the stack. */
303 SAVE_REGS
304
77f6d226 305 swi r0, r1, PTO + PT_MODE; /* Was in user-mode. */
ca54502b
MS
306 lwi r11, r0, TOPHYS(PER_CPU(ENTRY_SP));
307 swi r11, r1, PTO+PT_R1; /* Store user SP. */
b1d70c62 3082: lwi CURRENT_TASK, r0, TOPHYS(PER_CPU(CURRENT_SAVE));
ca54502b
MS
309 /* Save away the syscall number. */
310 swi r12, r1, PTO+PT_R0;
311 tovirt(r1,r1)
312
ca54502b
MS
313/* where the trap should return need -8 to adjust for rtsd r15, 8*/
314/* Jump to the appropriate function for the system call number in r12
315 * (r12 is not preserved), or return an error if r12 is not valid. The LP
316 * register should point to the location where
317 * the called function should return. [note that MAKE_SYS_CALL uses label 1] */
23575483
MS
318
319 # Step into virtual mode.
320 set_vms;
321 addik r11, r0, 3f
322 rtid r11, 0
323 nop
3243:
b1d70c62 325 lwi r11, CURRENT_TASK, TS_THREAD_INFO /* get thread info */
23575483
MS
326 lwi r11, r11, TI_FLAGS /* get flags in thread info */
327 andi r11, r11, _TIF_WORK_SYSCALL_MASK
328 beqi r11, 4f
329
330 addik r3, r0, -ENOSYS
331 swi r3, r1, PTO + PT_R3
332 brlid r15, do_syscall_trace_enter
333 addik r5, r1, PTO + PT_R0
334
335 # do_syscall_trace_enter returns the new syscall nr.
336 addk r12, r0, r3
337 lwi r5, r1, PTO+PT_R5;
338 lwi r6, r1, PTO+PT_R6;
339 lwi r7, r1, PTO+PT_R7;
340 lwi r8, r1, PTO+PT_R8;
341 lwi r9, r1, PTO+PT_R9;
342 lwi r10, r1, PTO+PT_R10;
3434:
344/* Jump to the appropriate function for the system call number in r12
345 * (r12 is not preserved), or return an error if r12 is not valid.
346 * The LP register should point to the location where the called function
347 * should return. [note that MAKE_SYS_CALL uses label 1] */
348 /* See if the system call number is valid */
ca54502b 349 addi r11, r12, -__NR_syscalls;
23575483 350 bgei r11,5f;
ca54502b
MS
351 /* Figure out which function to use for this system call. */
352 /* Note Microblaze barrel shift is optional, so don't rely on it */
353 add r12, r12, r12; /* convert num -> ptr */
354 add r12, r12, r12;
355
11d51360 356#ifdef DEBUG
ca54502b 357 /* Trac syscalls and stored them to r0_ram */
23575483 358 lwi r3, r12, 0x400 + r0_ram
ca54502b 359 addi r3, r3, 1
23575483 360 swi r3, r12, 0x400 + r0_ram
11d51360 361#endif
23575483
MS
362
363 # Find and jump into the syscall handler.
364 lwi r12, r12, sys_call_table
365 /* where the trap should return need -8 to adjust for rtsd r15, 8 */
b9ea77e2 366 addi r15, r0, ret_from_trap-8
23575483 367 bra r12
ca54502b 368
ca54502b 369 /* The syscall number is invalid, return an error. */
23575483 3705:
ca54502b
MS
371 addi r3, r0, -ENOSYS;
372 rtsd r15,8; /* looks like a normal subroutine return */
373 or r0, r0, r0
374
375
23575483 376/* Entry point used to return from a syscall/trap */
ca54502b
MS
377/* We re-enable BIP bit before state restore */
378C_ENTRY(ret_from_trap):
b1d70c62
MS
379 swi r3, r1, PTO + PT_R3
380 swi r4, r1, PTO + PT_R4
381
77f6d226 382 lwi r11, r1, PTO + PT_MODE;
36f60954
MS
383/* See if returning to kernel mode, if so, skip resched &c. */
384 bnei r11, 2f;
23575483
MS
385 /* We're returning to user mode, so check for various conditions that
386 * trigger rescheduling. */
b1d70c62
MS
387 /* FIXME: Restructure all these flag checks. */
388 lwi r11, CURRENT_TASK, TS_THREAD_INFO; /* get thread info */
23575483
MS
389 lwi r11, r11, TI_FLAGS; /* get flags in thread info */
390 andi r11, r11, _TIF_WORK_SYSCALL_MASK
391 beqi r11, 1f
392
23575483
MS
393 brlid r15, do_syscall_trace_leave
394 addik r5, r1, PTO + PT_R0
23575483 3951:
ca54502b
MS
396 /* We're returning to user mode, so check for various conditions that
397 * trigger rescheduling. */
b1d70c62
MS
398 /* get thread info from current task */
399 lwi r11, CURRENT_TASK, TS_THREAD_INFO;
ca54502b
MS
400 lwi r11, r11, TI_FLAGS; /* get flags in thread info */
401 andi r11, r11, _TIF_NEED_RESCHED;
402 beqi r11, 5f;
403
ca54502b
MS
404 bralid r15, schedule; /* Call scheduler */
405 nop; /* delay slot */
ca54502b
MS
406
407 /* Maybe handle a signal */
b1d70c62
MS
4085: /* get thread info from current task*/
409 lwi r11, CURRENT_TASK, TS_THREAD_INFO;
ca54502b
MS
410 lwi r11, r11, TI_FLAGS; /* get flags in thread info */
411 andi r11, r11, _TIF_SIGPENDING;
412 beqi r11, 1f; /* Signals to handle, handle them */
413
b9ea77e2 414 addik r5, r1, PTO; /* Arg 1: struct pt_regs *regs */
ca54502b
MS
415 addi r7, r0, 1; /* Arg 3: int in_syscall */
416 bralid r15, do_signal; /* Handle any signals */
841d6e8c 417 add r6, r0, r0; /* Arg 2: sigset_t *oldset */
b1d70c62
MS
418
419/* Finally, return to user state. */
96014cc3 4201: set_bip; /* Ints masked for state restore */
8633bebc 421 swi CURRENT_TASK, r0, PER_CPU(CURRENT_SAVE); /* save current */
ca54502b
MS
422 VM_OFF;
423 tophys(r1,r1);
424 RESTORE_REGS;
425 addik r1, r1, STATE_SAVE_SIZE /* Clean up stack space. */
426 lwi r1, r1, PT_R1 - PT_SIZE;/* Restore user stack pointer. */
427 bri 6f;
428
429/* Return to kernel state. */
96014cc3
MS
4302: set_bip; /* Ints masked for state restore */
431 VM_OFF;
ca54502b
MS
432 tophys(r1,r1);
433 RESTORE_REGS;
434 addik r1, r1, STATE_SAVE_SIZE /* Clean up stack space. */
435 tovirt(r1,r1);
4366:
437TRAP_return: /* Make global symbol for debugging */
438 rtbd r14, 0; /* Instructions to return from an IRQ */
439 nop;
440
441
442/* These syscalls need access to the struct pt_regs on the stack, so we
443 implement them in assembly (they're basically all wrappers anyway). */
444
445C_ENTRY(sys_fork_wrapper):
446 addi r5, r0, SIGCHLD /* Arg 0: flags */
447 lwi r6, r1, PTO+PT_R1 /* Arg 1: child SP (use parent's) */
b9ea77e2 448 addik r7, r1, PTO /* Arg 2: parent context */
ca54502b
MS
449 add r8. r0, r0 /* Arg 3: (unused) */
450 add r9, r0, r0; /* Arg 4: (unused) */
451 add r10, r0, r0; /* Arg 5: (unused) */
452 brid do_fork /* Do real work (tail-call) */
453 nop;
454
455/* This the initial entry point for a new child thread, with an appropriate
456 stack in place that makes it look the the child is in the middle of an
457 syscall. This function is actually `returned to' from switch_thread
458 (copy_thread makes ret_from_fork the return address in each new thread's
459 saved context). */
460C_ENTRY(ret_from_fork):
461 bralid r15, schedule_tail; /* ...which is schedule_tail's arg */
462 add r3, r5, r0; /* switch_thread returns the prev task */
463 /* ( in the delay slot ) */
464 add r3, r0, r0; /* Child's fork call should return 0. */
465 brid ret_from_trap; /* Do normal trap return */
466 nop;
467
e513588f
AB
468C_ENTRY(sys_vfork):
469 brid microblaze_vfork /* Do real work (tail-call) */
b9ea77e2 470 addik r5, r1, PTO
ca54502b 471
e513588f 472C_ENTRY(sys_clone):
ca54502b 473 bnei r6, 1f; /* See if child SP arg (arg 1) is 0. */
570e3e23 474 lwi r6, r1, PTO + PT_R1; /* If so, use paret's stack ptr */
b9ea77e2
MS
4751: addik r7, r1, PTO; /* Arg 2: parent context */
476 add r8, r0, r0; /* Arg 3: (unused) */
477 add r9, r0, r0; /* Arg 4: (unused) */
478 add r10, r0, r0; /* Arg 5: (unused) */
479 brid do_fork /* Do real work (tail-call) */
480 nop;
ca54502b 481
e513588f 482C_ENTRY(sys_execve):
b9ea77e2 483 addik r8, r1, PTO; /* add user context as 4th arg */
e513588f 484 brid microblaze_execve; /* Do real work (tail-call).*/
ca54502b
MS
485 nop;
486
ca54502b
MS
487C_ENTRY(sys_rt_sigreturn_wrapper):
488 swi r3, r1, PTO+PT_R3; /* restore saved r3, r4 registers */
489 swi r4, r1, PTO+PT_R4;
b9ea77e2 490 addik r5, r1, PTO; /* add user context as 1st arg */
ca54502b
MS
491 brlid r15, sys_rt_sigreturn /* Do real work */
492 nop;
493 lwi r3, r1, PTO+PT_R3; /* restore saved r3, r4 registers */
494 lwi r4, r1, PTO+PT_R4;
495 bri ret_from_trap /* fall through will not work here due to align */
496 nop;
497
498/*
499 * HW EXCEPTION rutine start
500 */
501
502#define SAVE_STATE \
63708f63 503 swi r1, r0, TOPHYS(PER_CPU(ENTRY_SP)); /* save stack */ \
ca54502b 504 /* See if already in kernel mode.*/ \
653e447e 505 mfs r1, rmsr; \
5c0d72b1 506 nop; \
653e447e
MS
507 andi r1, r1, MSR_UMS; \
508 bnei r1, 1f; \
ca54502b
MS
509 /* Kernel-mode state save. */ \
510 /* Reload kernel stack-ptr. */ \
653e447e
MS
511 lwi r1, r0, TOPHYS(PER_CPU(ENTRY_SP)); \
512 tophys(r1,r1); \
ca54502b 513 addik r1, r1, -STATE_SAVE_SIZE; /* Make room on the stack. */\
ca54502b 514 SAVE_REGS \
77f6d226 515 swi r1, r1, PTO+PT_MODE; \
ca54502b
MS
516 brid 2f; \
517 nop; /* Fill delay slot */ \
5181: /* User-mode state save. */ \
ca54502b
MS
519 lwi r1, r0, TOPHYS(PER_CPU(CURRENT_SAVE)); /* get saved current */\
520 tophys(r1,r1); \
521 lwi r1, r1, TS_THREAD_INFO; /* get the thread info */ \
522 addik r1, r1, THREAD_SIZE; /* calculate kernel stack pointer */\
523 tophys(r1,r1); \
ca54502b 524 addik r1, r1, -STATE_SAVE_SIZE; /* Make room on the stack. */\
ca54502b 525 SAVE_REGS \
77f6d226 526 swi r0, r1, PTO + PT_MODE; /* Was in user-mode. */ \
ca54502b
MS
527 lwi r11, r0, TOPHYS(PER_CPU(ENTRY_SP)); \
528 swi r11, r1, PTO+PT_R1; /* Store user SP. */ \
06a54604 5292: lwi CURRENT_TASK, r0, TOPHYS(PER_CPU(CURRENT_SAVE));
ca54502b
MS
530
531C_ENTRY(full_exception_trap):
ca54502b
MS
532 /* adjust exception address for privileged instruction
533 * for finding where is it */
534 addik r17, r17, -4
535 SAVE_STATE /* Save registers */
06a54604
MS
536 /* PC, before IRQ/trap - this is one instruction above */
537 swi r17, r1, PTO+PT_PC;
538 tovirt(r1,r1)
ca54502b
MS
539 /* FIXME this can be store directly in PT_ESR reg.
540 * I tested it but there is a fault */
541 /* where the trap should return need -8 to adjust for rtsd r15, 8 */
b9ea77e2
MS
542 addik r15, r0, ret_from_exc - 8
543 addik r5, r1, PTO /* parameter struct pt_regs * regs */
ca54502b
MS
544 mfs r6, resr
545 nop
546 mfs r7, rfsr; /* save FSR */
547 nop
131e4e97
MS
548 mts rfsr, r0; /* Clear sticky fsr */
549 nop
b9ea77e2 550 addik r12, r0, full_exception
ca54502b 551 set_vms;
8b110d15 552 rted r12, 0;
ca54502b
MS
553 nop;
554
555/*
556 * Unaligned data trap.
557 *
558 * Unaligned data trap last on 4k page is handled here.
559 *
560 * Trap entered via exception, so EE bit is set, and interrupts
561 * are masked. This is nice, means we don't have to CLI before state save
562 *
563 * The assembler routine is in "arch/microblaze/kernel/hw_exception_handler.S"
564 */
565C_ENTRY(unaligned_data_trap):
8b110d15
MS
566 /* MS: I have to save r11 value and then restore it because
567 * set_bit, clear_eip, set_ee use r11 as temp register if MSR
568 * instructions are not used. We don't need to do if MSR instructions
569 * are used and they use r0 instead of r11.
570 * I am using ENTRY_SP which should be primary used only for stack
571 * pointer saving. */
572 swi r11, r0, TOPHYS(PER_CPU(ENTRY_SP));
573 set_bip; /* equalize initial state for all possible entries */
574 clear_eip;
575 set_ee;
576 lwi r11, r0, TOPHYS(PER_CPU(ENTRY_SP));
ca54502b 577 SAVE_STATE /* Save registers.*/
06a54604
MS
578 /* PC, before IRQ/trap - this is one instruction above */
579 swi r17, r1, PTO+PT_PC;
580 tovirt(r1,r1)
ca54502b 581 /* where the trap should return need -8 to adjust for rtsd r15, 8 */
b9ea77e2 582 addik r15, r0, ret_from_exc-8
ca54502b
MS
583 mfs r3, resr /* ESR */
584 nop
585 mfs r4, rear /* EAR */
586 nop
b9ea77e2
MS
587 addik r7, r1, PTO /* parameter struct pt_regs * regs */
588 addik r12, r0, _unaligned_data_exception
ca54502b
MS
589 set_vms;
590 rtbd r12, 0; /* interrupts enabled */
591 nop;
592
593/*
594 * Page fault traps.
595 *
596 * If the real exception handler (from hw_exception_handler.S) didn't find
597 * the mapping for the process, then we're thrown here to handle such situation.
598 *
599 * Trap entered via exceptions, so EE bit is set, and interrupts
600 * are masked. This is nice, means we don't have to CLI before state save
601 *
602 * Build a standard exception frame for TLB Access errors. All TLB exceptions
603 * will bail out to this point if they can't resolve the lightweight TLB fault.
604 *
605 * The C function called is in "arch/microblaze/mm/fault.c", declared as:
606 * void do_page_fault(struct pt_regs *regs,
607 * unsigned long address,
608 * unsigned long error_code)
609 */
610/* data and intruction trap - which is choose is resolved int fault.c */
611C_ENTRY(page_fault_data_trap):
ca54502b 612 SAVE_STATE /* Save registers.*/
06a54604
MS
613 /* PC, before IRQ/trap - this is one instruction above */
614 swi r17, r1, PTO+PT_PC;
615 tovirt(r1,r1)
ca54502b 616 /* where the trap should return need -8 to adjust for rtsd r15, 8 */
b9ea77e2
MS
617 addik r15, r0, ret_from_exc-8
618 addik r5, r1, PTO /* parameter struct pt_regs * regs */
ca54502b
MS
619 mfs r6, rear /* parameter unsigned long address */
620 nop
621 mfs r7, resr /* parameter unsigned long error_code */
622 nop
b9ea77e2 623 addik r12, r0, do_page_fault
ca54502b 624 set_vms;
8b110d15 625 rted r12, 0; /* interrupts enabled */
ca54502b
MS
626 nop;
627
628C_ENTRY(page_fault_instr_trap):
ca54502b 629 SAVE_STATE /* Save registers.*/
06a54604
MS
630 /* PC, before IRQ/trap - this is one instruction above */
631 swi r17, r1, PTO+PT_PC;
632 tovirt(r1,r1)
ca54502b 633 /* where the trap should return need -8 to adjust for rtsd r15, 8 */
b9ea77e2
MS
634 addik r15, r0, ret_from_exc-8
635 addik r5, r1, PTO /* parameter struct pt_regs * regs */
ca54502b
MS
636 mfs r6, rear /* parameter unsigned long address */
637 nop
638 ori r7, r0, 0 /* parameter unsigned long error_code */
b9ea77e2 639 addik r12, r0, do_page_fault
ca54502b 640 set_vms;
8b110d15 641 rted r12, 0; /* interrupts enabled */
ca54502b
MS
642 nop;
643
644/* Entry point used to return from an exception. */
645C_ENTRY(ret_from_exc):
77f6d226 646 lwi r11, r1, PTO + PT_MODE;
ca54502b
MS
647 bnei r11, 2f; /* See if returning to kernel mode, */
648 /* ... if so, skip resched &c. */
649
650 /* We're returning to user mode, so check for various conditions that
651 trigger rescheduling. */
b1d70c62 652 lwi r11, CURRENT_TASK, TS_THREAD_INFO; /* get thread info */
ca54502b
MS
653 lwi r11, r11, TI_FLAGS; /* get flags in thread info */
654 andi r11, r11, _TIF_NEED_RESCHED;
655 beqi r11, 5f;
656
657/* Call the scheduler before returning from a syscall/trap. */
658 bralid r15, schedule; /* Call scheduler */
659 nop; /* delay slot */
660
661 /* Maybe handle a signal */
b1d70c62 6625: lwi r11, CURRENT_TASK, TS_THREAD_INFO; /* get thread info */
ca54502b
MS
663 lwi r11, r11, TI_FLAGS; /* get flags in thread info */
664 andi r11, r11, _TIF_SIGPENDING;
665 beqi r11, 1f; /* Signals to handle, handle them */
666
667 /*
668 * Handle a signal return; Pending signals should be in r18.
669 *
670 * Not all registers are saved by the normal trap/interrupt entry
671 * points (for instance, call-saved registers (because the normal
672 * C-compiler calling sequence in the kernel makes sure they're
673 * preserved), and call-clobbered registers in the case of
674 * traps), but signal handlers may want to examine or change the
675 * complete register state. Here we save anything not saved by
676 * the normal entry sequence, so that it may be safely restored
36f60954 677 * (in a possibly modified form) after do_signal returns. */
b9ea77e2 678 addik r5, r1, PTO; /* Arg 1: struct pt_regs *regs */
ca54502b
MS
679 addi r7, r0, 0; /* Arg 3: int in_syscall */
680 bralid r15, do_signal; /* Handle any signals */
841d6e8c 681 add r6, r0, r0; /* Arg 2: sigset_t *oldset */
ca54502b
MS
682
683/* Finally, return to user state. */
96014cc3 6841: set_bip; /* Ints masked for state restore */
8633bebc 685 swi CURRENT_TASK, r0, PER_CPU(CURRENT_SAVE); /* save current */
ca54502b
MS
686 VM_OFF;
687 tophys(r1,r1);
688
ca54502b
MS
689 RESTORE_REGS;
690 addik r1, r1, STATE_SAVE_SIZE /* Clean up stack space. */
691
692 lwi r1, r1, PT_R1 - PT_SIZE; /* Restore user stack pointer. */
693 bri 6f;
694/* Return to kernel state. */
96014cc3
MS
6952: set_bip; /* Ints masked for state restore */
696 VM_OFF;
ca54502b 697 tophys(r1,r1);
ca54502b
MS
698 RESTORE_REGS;
699 addik r1, r1, STATE_SAVE_SIZE /* Clean up stack space. */
700
701 tovirt(r1,r1);
7026:
703EXC_return: /* Make global symbol for debugging */
704 rtbd r14, 0; /* Instructions to return from an IRQ */
705 nop;
706
707/*
708 * HW EXCEPTION rutine end
709 */
710
711/*
712 * Hardware maskable interrupts.
713 *
714 * The stack-pointer (r1) should have already been saved to the memory
715 * location PER_CPU(ENTRY_SP).
716 */
717C_ENTRY(_interrupt):
718/* MS: we are in physical address */
719/* Save registers, switch to proper stack, convert SP to virtual.*/
720 swi r1, r0, TOPHYS(PER_CPU(ENTRY_SP))
ca54502b 721 /* MS: See if already in kernel mode. */
653e447e 722 mfs r1, rmsr
5c0d72b1 723 nop
653e447e
MS
724 andi r1, r1, MSR_UMS
725 bnei r1, 1f
ca54502b
MS
726
727/* Kernel-mode state save. */
653e447e
MS
728 lwi r1, r0, TOPHYS(PER_CPU(ENTRY_SP))
729 tophys(r1,r1); /* MS: I have in r1 physical address where stack is */
ca54502b
MS
730 /* save registers */
731/* MS: Make room on the stack -> activation record */
732 addik r1, r1, -STATE_SAVE_SIZE;
ca54502b 733 SAVE_REGS
77f6d226 734 swi r1, r1, PTO + PT_MODE; /* 0 - user mode, 1 - kernel mode */
ca54502b
MS
735 brid 2f;
736 nop; /* MS: Fill delay slot */
737
7381:
739/* User-mode state save. */
ca54502b
MS
740 /* MS: get the saved current */
741 lwi r1, r0, TOPHYS(PER_CPU(CURRENT_SAVE));
742 tophys(r1,r1);
743 lwi r1, r1, TS_THREAD_INFO;
744 addik r1, r1, THREAD_SIZE;
745 tophys(r1,r1);
746 /* save registers */
747 addik r1, r1, -STATE_SAVE_SIZE;
ca54502b
MS
748 SAVE_REGS
749 /* calculate mode */
750 swi r0, r1, PTO + PT_MODE;
751 lwi r11, r0, TOPHYS(PER_CPU(ENTRY_SP));
752 swi r11, r1, PTO+PT_R1;
ca54502b 7532:
b1d70c62 754 lwi CURRENT_TASK, r0, TOPHYS(PER_CPU(CURRENT_SAVE));
ca54502b 755 tovirt(r1,r1)
b9ea77e2 756 addik r5, r1, PTO;
ca54502b 757 set_vms;
b9ea77e2
MS
758 addik r11, r0, do_IRQ;
759 addik r15, r0, irq_call;
ca54502b
MS
760irq_call:rtbd r11, 0;
761 nop;
762
763/* MS: we are in virtual mode */
764ret_from_irq:
765 lwi r11, r1, PTO + PT_MODE;
766 bnei r11, 2f;
767
b1d70c62 768 lwi r11, CURRENT_TASK, TS_THREAD_INFO;
ca54502b
MS
769 lwi r11, r11, TI_FLAGS; /* MS: get flags from thread info */
770 andi r11, r11, _TIF_NEED_RESCHED;
771 beqi r11, 5f
772 bralid r15, schedule;
773 nop; /* delay slot */
774
775 /* Maybe handle a signal */
b1d70c62 7765: lwi r11, CURRENT_TASK, TS_THREAD_INFO; /* MS: get thread info */
ca54502b
MS
777 lwi r11, r11, TI_FLAGS; /* get flags in thread info */
778 andi r11, r11, _TIF_SIGPENDING;
779 beqid r11, no_intr_resched
780/* Handle a signal return; Pending signals should be in r18. */
781 addi r7, r0, 0; /* Arg 3: int in_syscall */
b9ea77e2 782 addik r5, r1, PTO; /* Arg 1: struct pt_regs *regs */
ca54502b
MS
783 bralid r15, do_signal; /* Handle any signals */
784 add r6, r0, r0; /* Arg 2: sigset_t *oldset */
785
786/* Finally, return to user state. */
787no_intr_resched:
788 /* Disable interrupts, we are now committed to the state restore */
789 disable_irq
8633bebc 790 swi CURRENT_TASK, r0, PER_CPU(CURRENT_SAVE);
ca54502b
MS
791 VM_OFF;
792 tophys(r1,r1);
ca54502b
MS
793 RESTORE_REGS
794 addik r1, r1, STATE_SAVE_SIZE /* MS: Clean up stack space. */
795 lwi r1, r1, PT_R1 - PT_SIZE;
796 bri 6f;
797/* MS: Return to kernel state. */
77753790
MS
7982:
799#ifdef CONFIG_PREEMPT
b1d70c62 800 lwi r11, CURRENT_TASK, TS_THREAD_INFO;
77753790
MS
801 /* MS: get preempt_count from thread info */
802 lwi r5, r11, TI_PREEMPT_COUNT;
803 bgti r5, restore;
804
805 lwi r5, r11, TI_FLAGS; /* get flags in thread info */
806 andi r5, r5, _TIF_NEED_RESCHED;
807 beqi r5, restore /* if zero jump over */
808
809preempt:
810 /* interrupts are off that's why I am calling preempt_chedule_irq */
811 bralid r15, preempt_schedule_irq
812 nop
b1d70c62 813 lwi r11, CURRENT_TASK, TS_THREAD_INFO; /* get thread info */
77753790
MS
814 lwi r5, r11, TI_FLAGS; /* get flags in thread info */
815 andi r5, r5, _TIF_NEED_RESCHED;
816 bnei r5, preempt /* if non zero jump to resched */
817restore:
818#endif
819 VM_OFF /* MS: turn off MMU */
ca54502b 820 tophys(r1,r1)
ca54502b
MS
821 RESTORE_REGS
822 addik r1, r1, STATE_SAVE_SIZE /* MS: Clean up stack space. */
823 tovirt(r1,r1);
8246:
825IRQ_return: /* MS: Make global symbol for debugging */
826 rtid r14, 0
827 nop
828
829/*
830 * `Debug' trap
831 * We enter dbtrap in "BIP" (breakpoint) mode.
832 * So we exit the breakpoint mode with an 'rtbd' and proceed with the
833 * original dbtrap.
834 * however, wait to save state first
835 */
836C_ENTRY(_debug_exception):
837 /* BIP bit is set on entry, no interrupts can occur */
838 swi r1, r0, TOPHYS(PER_CPU(ENTRY_SP))
839
653e447e 840 mfs r1, rmsr
5c0d72b1 841 nop
653e447e
MS
842 andi r1, r1, MSR_UMS
843 bnei r1, 1f
ca54502b 844 /* Kernel-mode state save. */
653e447e
MS
845 lwi r1, r0, TOPHYS(PER_CPU(ENTRY_SP)); /* Reload kernel stack-ptr*/
846 tophys(r1,r1);
ca54502b
MS
847
848 addik r1, r1, -STATE_SAVE_SIZE; /* Make room on the stack. */
ca54502b
MS
849 SAVE_REGS;
850
77f6d226 851 swi r1, r1, PTO + PT_MODE;
ca54502b
MS
852 brid 2f;
853 nop; /* Fill delay slot */
8541: /* User-mode state save. */
ca54502b
MS
855 lwi r1, r0, TOPHYS(PER_CPU(CURRENT_SAVE)); /* get saved current */
856 tophys(r1,r1);
857 lwi r1, r1, TS_THREAD_INFO; /* get the thread info */
858 addik r1, r1, THREAD_SIZE; /* calculate kernel stack pointer */
859 tophys(r1,r1);
860
861 addik r1, r1, -STATE_SAVE_SIZE; /* Make room on the stack. */
ca54502b
MS
862 SAVE_REGS;
863
77f6d226 864 swi r0, r1, PTO + PT_MODE; /* Was in user-mode. */
ca54502b
MS
865 lwi r11, r0, TOPHYS(PER_CPU(ENTRY_SP));
866 swi r11, r1, PTO+PT_R1; /* Store user SP. */
653e447e 8672:
ca54502b
MS
868 tovirt(r1,r1)
869
06b28640 870 set_vms;
ca54502b
MS
871 addi r5, r0, SIGTRAP /* send the trap signal */
872 add r6, r0, CURRENT_TASK; /* Get current task ptr into r11 */
873 addk r7, r0, r0 /* 3rd param zero */
06b28640 874dbtrap_call: rtbd r0, send_sig;
b9ea77e2 875 addik r15, r0, dbtrap_call;
ca54502b
MS
876
877 set_bip; /* Ints masked for state restore*/
77f6d226 878 lwi r11, r1, PTO + PT_MODE;
ca54502b
MS
879 bnei r11, 2f;
880
881 /* Get current task ptr into r11 */
b1d70c62 882 lwi r11, CURRENT_TASK, TS_THREAD_INFO; /* get thread info */
ca54502b
MS
883 lwi r11, r11, TI_FLAGS; /* get flags in thread info */
884 andi r11, r11, _TIF_NEED_RESCHED;
885 beqi r11, 5f;
886
887/* Call the scheduler before returning from a syscall/trap. */
888
889 bralid r15, schedule; /* Call scheduler */
890 nop; /* delay slot */
891 /* XXX Is PT_DTRACE handling needed here? */
892 /* XXX m68knommu also checks TASK_STATE & TASK_COUNTER here. */
893
894 /* Maybe handle a signal */
b1d70c62 8955: lwi r11, CURRENT_TASK, TS_THREAD_INFO; /* get thread info */
ca54502b
MS
896 lwi r11, r11, TI_FLAGS; /* get flags in thread info */
897 andi r11, r11, _TIF_SIGPENDING;
898 beqi r11, 1f; /* Signals to handle, handle them */
899
900/* Handle a signal return; Pending signals should be in r18. */
901 /* Not all registers are saved by the normal trap/interrupt entry
902 points (for instance, call-saved registers (because the normal
903 C-compiler calling sequence in the kernel makes sure they're
904 preserved), and call-clobbered registers in the case of
905 traps), but signal handlers may want to examine or change the
906 complete register state. Here we save anything not saved by
907 the normal entry sequence, so that it may be safely restored
908 (in a possibly modified form) after do_signal returns. */
909
b9ea77e2 910 addik r5, r1, PTO; /* Arg 1: struct pt_regs *regs */
ca54502b
MS
911 addi r7, r0, 0; /* Arg 3: int in_syscall */
912 bralid r15, do_signal; /* Handle any signals */
841d6e8c 913 add r6, r0, r0; /* Arg 2: sigset_t *oldset */
ca54502b
MS
914
915
916/* Finally, return to user state. */
5c0d72b1 9171:
8633bebc 918 swi CURRENT_TASK, r0, PER_CPU(CURRENT_SAVE); /* save current */
ca54502b
MS
919 VM_OFF;
920 tophys(r1,r1);
921
ca54502b
MS
922 RESTORE_REGS
923 addik r1, r1, STATE_SAVE_SIZE /* Clean up stack space. */
924
925
926 lwi r1, r1, PT_R1 - PT_SIZE;
927 /* Restore user stack pointer. */
928 bri 6f;
929
930/* Return to kernel state. */
9312: VM_OFF;
932 tophys(r1,r1);
ca54502b
MS
933 RESTORE_REGS
934 addik r1, r1, STATE_SAVE_SIZE /* Clean up stack space. */
935
936 tovirt(r1,r1);
9376:
938DBTRAP_return: /* Make global symbol for debugging */
939 rtbd r14, 0; /* Instructions to return from an IRQ */
940 nop;
941
942
943
944ENTRY(_switch_to)
945 /* prepare return value */
b1d70c62 946 addk r3, r0, CURRENT_TASK
ca54502b
MS
947
948 /* save registers in cpu_context */
949 /* use r11 and r12, volatile registers, as temp register */
950 /* give start of cpu_context for previous process */
951 addik r11, r5, TI_CPU_CONTEXT
952 swi r1, r11, CC_R1
953 swi r2, r11, CC_R2
954 /* skip volatile registers.
955 * they are saved on stack when we jumped to _switch_to() */
956 /* dedicated registers */
957 swi r13, r11, CC_R13
958 swi r14, r11, CC_R14
959 swi r15, r11, CC_R15
960 swi r16, r11, CC_R16
961 swi r17, r11, CC_R17
962 swi r18, r11, CC_R18
963 /* save non-volatile registers */
964 swi r19, r11, CC_R19
965 swi r20, r11, CC_R20
966 swi r21, r11, CC_R21
967 swi r22, r11, CC_R22
968 swi r23, r11, CC_R23
969 swi r24, r11, CC_R24
970 swi r25, r11, CC_R25
971 swi r26, r11, CC_R26
972 swi r27, r11, CC_R27
973 swi r28, r11, CC_R28
974 swi r29, r11, CC_R29
975 swi r30, r11, CC_R30
976 /* special purpose registers */
977 mfs r12, rmsr
978 nop
979 swi r12, r11, CC_MSR
980 mfs r12, rear
981 nop
982 swi r12, r11, CC_EAR
983 mfs r12, resr
984 nop
985 swi r12, r11, CC_ESR
986 mfs r12, rfsr
987 nop
988 swi r12, r11, CC_FSR
989
b1d70c62
MS
990 /* update r31, the current-give me pointer to task which will be next */
991 lwi CURRENT_TASK, r6, TI_TASK
ca54502b 992 /* stored it to current_save too */
b1d70c62 993 swi CURRENT_TASK, r0, PER_CPU(CURRENT_SAVE)
ca54502b
MS
994
995 /* get new process' cpu context and restore */
996 /* give me start where start context of next task */
997 addik r11, r6, TI_CPU_CONTEXT
998
999 /* non-volatile registers */
1000 lwi r30, r11, CC_R30
1001 lwi r29, r11, CC_R29
1002 lwi r28, r11, CC_R28
1003 lwi r27, r11, CC_R27
1004 lwi r26, r11, CC_R26
1005 lwi r25, r11, CC_R25
1006 lwi r24, r11, CC_R24
1007 lwi r23, r11, CC_R23
1008 lwi r22, r11, CC_R22
1009 lwi r21, r11, CC_R21
1010 lwi r20, r11, CC_R20
1011 lwi r19, r11, CC_R19
1012 /* dedicated registers */
1013 lwi r18, r11, CC_R18
1014 lwi r17, r11, CC_R17
1015 lwi r16, r11, CC_R16
1016 lwi r15, r11, CC_R15
1017 lwi r14, r11, CC_R14
1018 lwi r13, r11, CC_R13
1019 /* skip volatile registers */
1020 lwi r2, r11, CC_R2
1021 lwi r1, r11, CC_R1
1022
1023 /* special purpose registers */
1024 lwi r12, r11, CC_FSR
1025 mts rfsr, r12
1026 nop
1027 lwi r12, r11, CC_MSR
1028 mts rmsr, r12
1029 nop
1030
1031 rtsd r15, 8
1032 nop
1033
1034ENTRY(_reset)
1035 brai 0x70; /* Jump back to FS-boot */
1036
1037ENTRY(_break)
1038 mfs r5, rmsr
1039 nop
1040 swi r5, r0, 0x250 + TOPHYS(r0_ram)
1041 mfs r5, resr
1042 nop
1043 swi r5, r0, 0x254 + TOPHYS(r0_ram)
1044 bri 0
1045
1046 /* These are compiled and loaded into high memory, then
1047 * copied into place in mach_early_setup */
1048 .section .init.ivt, "ax"
1049 .org 0x0
1050 /* this is very important - here is the reset vector */
1051 /* in current MMU branch you don't care what is here - it is
1052 * used from bootloader site - but this is correct for FS-BOOT */
1053 brai 0x70
1054 nop
1055 brai TOPHYS(_user_exception); /* syscall handler */
1056 brai TOPHYS(_interrupt); /* Interrupt handler */
1057 brai TOPHYS(_break); /* nmi trap handler */
1058 brai TOPHYS(_hw_exception_handler); /* HW exception handler */
1059
1060 .org 0x60
1061 brai TOPHYS(_debug_exception); /* debug trap handler*/
1062
1063.section .rodata,"a"
1064#include "syscall_table.S"
1065
1066syscall_table_size=(.-sys_call_table)
1067
ce3266c0
SM
1068type_SYSCALL:
1069 .ascii "SYSCALL\0"
1070type_IRQ:
1071 .ascii "IRQ\0"
1072type_IRQ_PREEMPT:
1073 .ascii "IRQ (PREEMPTED)\0"
1074type_SYSCALL_PREEMPT:
1075 .ascii " SYSCALL (PREEMPTED)\0"
1076
1077 /*
1078 * Trap decoding for stack unwinder
1079 * Tuples are (start addr, end addr, string)
1080 * If return address lies on [start addr, end addr],
1081 * unwinder displays 'string'
1082 */
1083
1084 .align 4
1085.global microblaze_trap_handlers
1086microblaze_trap_handlers:
1087 /* Exact matches come first */
1088 .word ret_from_trap; .word ret_from_trap ; .word type_SYSCALL
1089 .word ret_from_irq ; .word ret_from_irq ; .word type_IRQ
1090 /* Fuzzy matches go here */
1091 .word ret_from_irq ; .word no_intr_resched ; .word type_IRQ_PREEMPT
1092 .word ret_from_trap; .word TRAP_return ; .word type_SYSCALL_PREEMPT
1093 /* End of table */
1094 .word 0 ; .word 0 ; .word 0