]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blame - arch/powerpc/kernel/entry_32.S
ftrace, POWERPC: add irqs_disabled_flags to ppc
[mirror_ubuntu-artful-kernel.git] / arch / powerpc / kernel / entry_32.S
CommitLineData
9994a338
PM
1/*
2 * PowerPC version
3 * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
4 * Rewritten by Cort Dougan (cort@fsmlabs.com) for PReP
5 * Copyright (C) 1996 Cort Dougan <cort@fsmlabs.com>
6 * Adapted for Power Macintosh by Paul Mackerras.
7 * Low-level exception handlers and MMU support
8 * rewritten by Paul Mackerras.
9 * Copyright (C) 1996 Paul Mackerras.
10 * MPC8xx modifications Copyright (C) 1997 Dan Malek (dmalek@jlc.net).
11 *
12 * This file contains the system call entry code, context switch
13 * code, and exception/interrupt return code for PowerPC.
14 *
15 * This program is free software; you can redistribute it and/or
16 * modify it under the terms of the GNU General Public License
17 * as published by the Free Software Foundation; either version
18 * 2 of the License, or (at your option) any later version.
19 *
20 */
21
9994a338
PM
22#include <linux/errno.h>
23#include <linux/sys.h>
24#include <linux/threads.h>
25#include <asm/reg.h>
26#include <asm/page.h>
27#include <asm/mmu.h>
28#include <asm/cputable.h>
29#include <asm/thread_info.h>
30#include <asm/ppc_asm.h>
31#include <asm/asm-offsets.h>
32#include <asm/unistd.h>
33
34#undef SHOW_SYSCALLS
35#undef SHOW_SYSCALLS_TASK
36
37/*
38 * MSR_KERNEL is > 0x10000 on 4xx/Book-E since it include MSR_CE.
39 */
40#if MSR_KERNEL >= 0x10000
41#define LOAD_MSR_KERNEL(r, x) lis r,(x)@h; ori r,r,(x)@l
42#else
43#define LOAD_MSR_KERNEL(r, x) li r,(x)
44#endif
45
46#ifdef CONFIG_BOOKE
47#include "head_booke.h"
48#define TRANSFER_TO_HANDLER_EXC_LEVEL(exc_level) \
49 mtspr exc_level##_SPRG,r8; \
50 BOOKE_LOAD_EXC_LEVEL_STACK(exc_level); \
51 lwz r0,GPR10-INT_FRAME_SIZE(r8); \
52 stw r0,GPR10(r11); \
53 lwz r0,GPR11-INT_FRAME_SIZE(r8); \
54 stw r0,GPR11(r11); \
55 mfspr r8,exc_level##_SPRG
56
57 .globl mcheck_transfer_to_handler
58mcheck_transfer_to_handler:
59 TRANSFER_TO_HANDLER_EXC_LEVEL(MCHECK)
60 b transfer_to_handler_full
61
62 .globl debug_transfer_to_handler
63debug_transfer_to_handler:
64 TRANSFER_TO_HANDLER_EXC_LEVEL(DEBUG)
65 b transfer_to_handler_full
66
67 .globl crit_transfer_to_handler
68crit_transfer_to_handler:
69 TRANSFER_TO_HANDLER_EXC_LEVEL(CRIT)
70 /* fall through */
71#endif
72
73#ifdef CONFIG_40x
74 .globl crit_transfer_to_handler
75crit_transfer_to_handler:
76 lwz r0,crit_r10@l(0)
77 stw r0,GPR10(r11)
78 lwz r0,crit_r11@l(0)
79 stw r0,GPR11(r11)
80 /* fall through */
81#endif
82
83/*
84 * This code finishes saving the registers to the exception frame
85 * and jumps to the appropriate handler for the exception, turning
86 * on address translation.
87 * Note that we rely on the caller having set cr0.eq iff the exception
88 * occurred in kernel mode (i.e. MSR:PR = 0).
89 */
90 .globl transfer_to_handler_full
91transfer_to_handler_full:
92 SAVE_NVGPRS(r11)
93 /* fall through */
94
95 .globl transfer_to_handler
96transfer_to_handler:
97 stw r2,GPR2(r11)
98 stw r12,_NIP(r11)
99 stw r9,_MSR(r11)
100 andi. r2,r9,MSR_PR
101 mfctr r12
102 mfspr r2,SPRN_XER
103 stw r12,_CTR(r11)
104 stw r2,_XER(r11)
105 mfspr r12,SPRN_SPRG3
106 addi r2,r12,-THREAD
107 tovirt(r2,r2) /* set r2 to current */
108 beq 2f /* if from user, fix up THREAD.regs */
109 addi r11,r1,STACK_FRAME_OVERHEAD
110 stw r11,PT_REGS(r12)
111#if defined(CONFIG_40x) || defined(CONFIG_BOOKE)
112 /* Check to see if the dbcr0 register is set up to debug. Use the
4eaddb4d 113 internal debug mode bit to do this. */
9994a338 114 lwz r12,THREAD_DBCR0(r12)
4eaddb4d 115 andis. r12,r12,DBCR0_IDM@h
9994a338
PM
116 beq+ 3f
117 /* From user and task is ptraced - load up global dbcr0 */
118 li r12,-1 /* clear all pending debug events */
119 mtspr SPRN_DBSR,r12
120 lis r11,global_dbcr0@ha
121 tophys(r11,r11)
122 addi r11,r11,global_dbcr0@l
4eaddb4d
KG
123#ifdef CONFIG_SMP
124 rlwinm r9,r1,0,0,(31-THREAD_SHIFT)
125 lwz r9,TI_CPU(r9)
126 slwi r9,r9,3
127 add r11,r11,r9
128#endif
9994a338
PM
129 lwz r12,0(r11)
130 mtspr SPRN_DBCR0,r12
131 lwz r12,4(r11)
132 addi r12,r12,-1
133 stw r12,4(r11)
134#endif
135 b 3f
f39224a8 136
9994a338
PM
1372: /* if from kernel, check interrupted DOZE/NAP mode and
138 * check for stack overflow
139 */
85218827
KG
140 lwz r9,KSP_LIMIT(r12)
141 cmplw r1,r9 /* if r1 <= ksp_limit */
f39224a8
PM
142 ble- stack_ovf /* then the kernel stack overflowed */
1435:
9994a338 144#ifdef CONFIG_6xx
85218827 145 rlwinm r9,r1,0,0,31-THREAD_SHIFT
f39224a8
PM
146 tophys(r9,r9) /* check local flags */
147 lwz r12,TI_LOCAL_FLAGS(r9)
148 mtcrf 0x01,r12
149 bt- 31-TLF_NAPPING,4f
9994a338
PM
150#endif /* CONFIG_6xx */
151 .globl transfer_to_handler_cont
152transfer_to_handler_cont:
9994a338
PM
1533:
154 mflr r9
155 lwz r11,0(r9) /* virtual address of handler */
156 lwz r9,4(r9) /* where to go when done */
9994a338
PM
157 mtspr SPRN_SRR0,r11
158 mtspr SPRN_SRR1,r10
159 mtlr r9
160 SYNC
161 RFI /* jump to handler, enable MMU */
162
f39224a8
PM
163#ifdef CONFIG_6xx
1644: rlwinm r12,r12,0,~_TLF_NAPPING
165 stw r12,TI_LOCAL_FLAGS(r9)
166 b power_save_6xx_restore
a0652fc9
PM
167#endif
168
9994a338
PM
169/*
170 * On kernel stack overflow, load up an initial stack pointer
171 * and call StackOverflow(regs), which should not return.
172 */
173stack_ovf:
174 /* sometimes we use a statically-allocated stack, which is OK. */
f39224a8
PM
175 lis r12,_end@h
176 ori r12,r12,_end@l
177 cmplw r1,r12
178 ble 5b /* r1 <= &_end is OK */
9994a338
PM
179 SAVE_NVGPRS(r11)
180 addi r3,r1,STACK_FRAME_OVERHEAD
181 lis r1,init_thread_union@ha
182 addi r1,r1,init_thread_union@l
183 addi r1,r1,THREAD_SIZE-STACK_FRAME_OVERHEAD
184 lis r9,StackOverflow@ha
185 addi r9,r9,StackOverflow@l
186 LOAD_MSR_KERNEL(r10,MSR_KERNEL)
187 FIX_SRR1(r10,r12)
188 mtspr SPRN_SRR0,r9
189 mtspr SPRN_SRR1,r10
190 SYNC
191 RFI
192
193/*
194 * Handle a system call.
195 */
196 .stabs "arch/powerpc/kernel/",N_SO,0,0,0f
197 .stabs "entry_32.S",N_SO,0,0,0f
1980:
199
200_GLOBAL(DoSyscall)
9994a338
PM
201 stw r3,ORIG_GPR3(r1)
202 li r12,0
203 stw r12,RESULT(r1)
204 lwz r11,_CCR(r1) /* Clear SO bit in CR */
205 rlwinm r11,r11,0,4,2
206 stw r11,_CCR(r1)
207#ifdef SHOW_SYSCALLS
208 bl do_show_syscall
209#endif /* SHOW_SYSCALLS */
6cb7bfeb 210 rlwinm r10,r1,0,0,(31-THREAD_SHIFT) /* current_thread_info() */
9994a338
PM
211 lwz r11,TI_FLAGS(r10)
212 andi. r11,r11,_TIF_SYSCALL_T_OR_A
213 bne- syscall_dotrace
214syscall_dotrace_cont:
215 cmplwi 0,r0,NR_syscalls
216 lis r10,sys_call_table@h
217 ori r10,r10,sys_call_table@l
218 slwi r0,r0,2
219 bge- 66f
220 lwzx r10,r10,r0 /* Fetch system call handler [ptr] */
221 mtlr r10
222 addi r9,r1,STACK_FRAME_OVERHEAD
223 PPC440EP_ERR42
224 blrl /* Call handler */
225 .globl ret_from_syscall
226ret_from_syscall:
227#ifdef SHOW_SYSCALLS
228 bl do_show_syscall_exit
229#endif
230 mr r6,r3
6cb7bfeb 231 rlwinm r12,r1,0,0,(31-THREAD_SHIFT) /* current_thread_info() */
9994a338 232 /* disable interrupts so current_thread_info()->flags can't change */
401d1f02 233 LOAD_MSR_KERNEL(r10,MSR_KERNEL) /* doesn't include MSR_EE */
9994a338
PM
234 SYNC
235 MTMSRD(r10)
236 lwz r9,TI_FLAGS(r12)
401d1f02 237 li r8,-_LAST_ERRNO
1bd79336 238 andi. r0,r9,(_TIF_SYSCALL_T_OR_A|_TIF_SINGLESTEP|_TIF_USER_WORK_MASK|_TIF_PERSYSCALL_MASK)
9994a338 239 bne- syscall_exit_work
401d1f02
DW
240 cmplw 0,r3,r8
241 blt+ syscall_exit_cont
242 lwz r11,_CCR(r1) /* Load CR */
243 neg r3,r3
244 oris r11,r11,0x1000 /* Set SO bit in CR */
245 stw r11,_CCR(r1)
9994a338
PM
246syscall_exit_cont:
247#if defined(CONFIG_4xx) || defined(CONFIG_BOOKE)
4eaddb4d
KG
248 /* If the process has its own DBCR0 value, load it up. The internal
249 debug mode bit tells us that dbcr0 should be loaded. */
9994a338 250 lwz r0,THREAD+THREAD_DBCR0(r2)
4eaddb4d 251 andis. r10,r0,DBCR0_IDM@h
9994a338
PM
252 bnel- load_dbcr0
253#endif
b98ac05d
BH
254#ifdef CONFIG_44x
255 lis r4,icache_44x_need_flush@ha
256 lwz r5,icache_44x_need_flush@l(r4)
257 cmplwi cr0,r5,0
258 bne- 2f
2591:
260#endif /* CONFIG_44x */
b64f87c1
BB
261BEGIN_FTR_SECTION
262 lwarx r7,0,r1
263END_FTR_SECTION_IFSET(CPU_FTR_NEED_PAIRED_STWCX)
9994a338
PM
264 stwcx. r0,0,r1 /* to clear the reservation */
265 lwz r4,_LINK(r1)
266 lwz r5,_CCR(r1)
267 mtlr r4
268 mtcr r5
269 lwz r7,_NIP(r1)
270 lwz r8,_MSR(r1)
271 FIX_SRR1(r8, r0)
272 lwz r2,GPR2(r1)
273 lwz r1,GPR1(r1)
274 mtspr SPRN_SRR0,r7
275 mtspr SPRN_SRR1,r8
276 SYNC
277 RFI
b98ac05d
BH
278#ifdef CONFIG_44x
2792: li r7,0
280 iccci r0,r0
281 stw r7,icache_44x_need_flush@l(r4)
282 b 1b
283#endif /* CONFIG_44x */
9994a338
PM
284
28566: li r3,-ENOSYS
286 b ret_from_syscall
287
288 .globl ret_from_fork
289ret_from_fork:
290 REST_NVGPRS(r1)
291 bl schedule_tail
292 li r3,0
293 b ret_from_syscall
294
295/* Traced system call support */
296syscall_dotrace:
297 SAVE_NVGPRS(r1)
298 li r0,0xc00
d73e0c99 299 stw r0,_TRAP(r1)
9994a338
PM
300 addi r3,r1,STACK_FRAME_OVERHEAD
301 bl do_syscall_trace_enter
302 lwz r0,GPR0(r1) /* Restore original registers */
303 lwz r3,GPR3(r1)
304 lwz r4,GPR4(r1)
305 lwz r5,GPR5(r1)
306 lwz r6,GPR6(r1)
307 lwz r7,GPR7(r1)
308 lwz r8,GPR8(r1)
309 REST_NVGPRS(r1)
310 b syscall_dotrace_cont
311
312syscall_exit_work:
401d1f02 313 andi. r0,r9,_TIF_RESTOREALL
1bd79336
PM
314 beq+ 0f
315 REST_NVGPRS(r1)
316 b 2f
3170: cmplw 0,r3,r8
401d1f02
DW
318 blt+ 1f
319 andi. r0,r9,_TIF_NOERROR
320 bne- 1f
321 lwz r11,_CCR(r1) /* Load CR */
322 neg r3,r3
323 oris r11,r11,0x1000 /* Set SO bit in CR */
324 stw r11,_CCR(r1)
325
3261: stw r6,RESULT(r1) /* Save result */
9994a338 327 stw r3,GPR3(r1) /* Update return value */
401d1f02
DW
3282: andi. r0,r9,(_TIF_PERSYSCALL_MASK)
329 beq 4f
330
1bd79336 331 /* Clear per-syscall TIF flags if any are set. */
401d1f02
DW
332
333 li r11,_TIF_PERSYSCALL_MASK
334 addi r12,r12,TI_FLAGS
3353: lwarx r8,0,r12
336 andc r8,r8,r11
337#ifdef CONFIG_IBM405_ERR77
338 dcbt 0,r12
339#endif
340 stwcx. r8,0,r12
341 bne- 3b
342 subi r12,r12,TI_FLAGS
343
3444: /* Anything which requires enabling interrupts? */
1bd79336
PM
345 andi. r0,r9,(_TIF_SYSCALL_T_OR_A|_TIF_SINGLESTEP)
346 beq ret_from_except
347
348 /* Re-enable interrupts */
349 ori r10,r10,MSR_EE
350 SYNC
351 MTMSRD(r10)
401d1f02
DW
352
353 /* Save NVGPRS if they're not saved already */
d73e0c99 354 lwz r4,_TRAP(r1)
9994a338 355 andi. r4,r4,1
401d1f02 356 beq 5f
9994a338
PM
357 SAVE_NVGPRS(r1)
358 li r4,0xc00
d73e0c99 359 stw r4,_TRAP(r1)
1bd79336 3605:
9994a338
PM
361 addi r3,r1,STACK_FRAME_OVERHEAD
362 bl do_syscall_trace_leave
1bd79336 363 b ret_from_except_full
9994a338
PM
364
365#ifdef SHOW_SYSCALLS
366do_show_syscall:
367#ifdef SHOW_SYSCALLS_TASK
368 lis r11,show_syscalls_task@ha
369 lwz r11,show_syscalls_task@l(r11)
370 cmp 0,r2,r11
371 bnelr
372#endif
373 stw r31,GPR31(r1)
374 mflr r31
375 lis r3,7f@ha
376 addi r3,r3,7f@l
377 lwz r4,GPR0(r1)
378 lwz r5,GPR3(r1)
379 lwz r6,GPR4(r1)
380 lwz r7,GPR5(r1)
381 lwz r8,GPR6(r1)
382 lwz r9,GPR7(r1)
383 bl printk
384 lis r3,77f@ha
385 addi r3,r3,77f@l
386 lwz r4,GPR8(r1)
387 mr r5,r2
388 bl printk
389 lwz r0,GPR0(r1)
390 lwz r3,GPR3(r1)
391 lwz r4,GPR4(r1)
392 lwz r5,GPR5(r1)
393 lwz r6,GPR6(r1)
394 lwz r7,GPR7(r1)
395 lwz r8,GPR8(r1)
396 mtlr r31
397 lwz r31,GPR31(r1)
398 blr
399
400do_show_syscall_exit:
401#ifdef SHOW_SYSCALLS_TASK
402 lis r11,show_syscalls_task@ha
403 lwz r11,show_syscalls_task@l(r11)
404 cmp 0,r2,r11
405 bnelr
406#endif
407 stw r31,GPR31(r1)
408 mflr r31
409 stw r3,RESULT(r1) /* Save result */
410 mr r4,r3
411 lis r3,79f@ha
412 addi r3,r3,79f@l
413 bl printk
414 lwz r3,RESULT(r1)
415 mtlr r31
416 lwz r31,GPR31(r1)
417 blr
418
4197: .string "syscall %d(%x, %x, %x, %x, %x, "
42077: .string "%x), current=%p\n"
42179: .string " -> %x\n"
422 .align 2,0
423
424#ifdef SHOW_SYSCALLS_TASK
425 .data
426 .globl show_syscalls_task
427show_syscalls_task:
428 .long -1
429 .text
430#endif
431#endif /* SHOW_SYSCALLS */
432
433/*
401d1f02
DW
434 * The fork/clone functions need to copy the full register set into
435 * the child process. Therefore we need to save all the nonvolatile
436 * registers (r13 - r31) before calling the C code.
9994a338 437 */
9994a338
PM
438 .globl ppc_fork
439ppc_fork:
440 SAVE_NVGPRS(r1)
d73e0c99 441 lwz r0,_TRAP(r1)
9994a338 442 rlwinm r0,r0,0,0,30 /* clear LSB to indicate full */
d73e0c99 443 stw r0,_TRAP(r1) /* register set saved */
9994a338
PM
444 b sys_fork
445
446 .globl ppc_vfork
447ppc_vfork:
448 SAVE_NVGPRS(r1)
d73e0c99 449 lwz r0,_TRAP(r1)
9994a338 450 rlwinm r0,r0,0,0,30 /* clear LSB to indicate full */
d73e0c99 451 stw r0,_TRAP(r1) /* register set saved */
9994a338
PM
452 b sys_vfork
453
454 .globl ppc_clone
455ppc_clone:
456 SAVE_NVGPRS(r1)
d73e0c99 457 lwz r0,_TRAP(r1)
9994a338 458 rlwinm r0,r0,0,0,30 /* clear LSB to indicate full */
d73e0c99 459 stw r0,_TRAP(r1) /* register set saved */
9994a338
PM
460 b sys_clone
461
1bd79336
PM
462 .globl ppc_swapcontext
463ppc_swapcontext:
464 SAVE_NVGPRS(r1)
465 lwz r0,_TRAP(r1)
466 rlwinm r0,r0,0,0,30 /* clear LSB to indicate full */
467 stw r0,_TRAP(r1) /* register set saved */
468 b sys_swapcontext
469
9994a338
PM
470/*
471 * Top-level page fault handling.
472 * This is in assembler because if do_page_fault tells us that
473 * it is a bad kernel page fault, we want to save the non-volatile
474 * registers before calling bad_page_fault.
475 */
476 .globl handle_page_fault
477handle_page_fault:
478 stw r4,_DAR(r1)
479 addi r3,r1,STACK_FRAME_OVERHEAD
480 bl do_page_fault
481 cmpwi r3,0
482 beq+ ret_from_except
483 SAVE_NVGPRS(r1)
d73e0c99 484 lwz r0,_TRAP(r1)
9994a338 485 clrrwi r0,r0,1
d73e0c99 486 stw r0,_TRAP(r1)
9994a338
PM
487 mr r5,r3
488 addi r3,r1,STACK_FRAME_OVERHEAD
489 lwz r4,_DAR(r1)
490 bl bad_page_fault
491 b ret_from_except_full
492
493/*
494 * This routine switches between two different tasks. The process
495 * state of one is saved on its kernel stack. Then the state
496 * of the other is restored from its kernel stack. The memory
497 * management hardware is updated to the second process's state.
498 * Finally, we can return to the second process.
499 * On entry, r3 points to the THREAD for the current task, r4
500 * points to the THREAD for the new task.
501 *
502 * This routine is always called with interrupts disabled.
503 *
504 * Note: there are two ways to get to the "going out" portion
505 * of this code; either by coming in via the entry (_switch)
506 * or via "fork" which must set up an environment equivalent
507 * to the "_switch" path. If you change this , you'll have to
508 * change the fork code also.
509 *
510 * The code which creates the new task context is in 'copy_thread'
511 * in arch/ppc/kernel/process.c
512 */
513_GLOBAL(_switch)
514 stwu r1,-INT_FRAME_SIZE(r1)
515 mflr r0
516 stw r0,INT_FRAME_SIZE+4(r1)
517 /* r3-r12 are caller saved -- Cort */
518 SAVE_NVGPRS(r1)
519 stw r0,_NIP(r1) /* Return to switch caller */
520 mfmsr r11
521 li r0,MSR_FP /* Disable floating-point */
522#ifdef CONFIG_ALTIVEC
523BEGIN_FTR_SECTION
524 oris r0,r0,MSR_VEC@h /* Disable altivec */
525 mfspr r12,SPRN_VRSAVE /* save vrsave register value */
526 stw r12,THREAD+THREAD_VRSAVE(r2)
527END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
528#endif /* CONFIG_ALTIVEC */
529#ifdef CONFIG_SPE
5e14d21e 530BEGIN_FTR_SECTION
9994a338
PM
531 oris r0,r0,MSR_SPE@h /* Disable SPE */
532 mfspr r12,SPRN_SPEFSCR /* save spefscr register value */
533 stw r12,THREAD+THREAD_SPEFSCR(r2)
5e14d21e 534END_FTR_SECTION_IFSET(CPU_FTR_SPE)
9994a338
PM
535#endif /* CONFIG_SPE */
536 and. r0,r0,r11 /* FP or altivec or SPE enabled? */
537 beq+ 1f
538 andc r11,r11,r0
539 MTMSRD(r11)
540 isync
5411: stw r11,_MSR(r1)
542 mfcr r10
543 stw r10,_CCR(r1)
544 stw r1,KSP(r3) /* Set old stack pointer */
545
546#ifdef CONFIG_SMP
547 /* We need a sync somewhere here to make sure that if the
548 * previous task gets rescheduled on another CPU, it sees all
549 * stores it has performed on this one.
550 */
551 sync
552#endif /* CONFIG_SMP */
553
554 tophys(r0,r4)
555 CLR_TOP32(r0)
556 mtspr SPRN_SPRG3,r0 /* Update current THREAD phys addr */
557 lwz r1,KSP(r4) /* Load new stack pointer */
558
559 /* save the old current 'last' for return value */
560 mr r3,r2
561 addi r2,r4,-THREAD /* Update current */
562
563#ifdef CONFIG_ALTIVEC
564BEGIN_FTR_SECTION
565 lwz r0,THREAD+THREAD_VRSAVE(r2)
566 mtspr SPRN_VRSAVE,r0 /* if G4, restore VRSAVE reg */
567END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
568#endif /* CONFIG_ALTIVEC */
569#ifdef CONFIG_SPE
5e14d21e 570BEGIN_FTR_SECTION
9994a338
PM
571 lwz r0,THREAD+THREAD_SPEFSCR(r2)
572 mtspr SPRN_SPEFSCR,r0 /* restore SPEFSCR reg */
5e14d21e 573END_FTR_SECTION_IFSET(CPU_FTR_SPE)
9994a338
PM
574#endif /* CONFIG_SPE */
575
576 lwz r0,_CCR(r1)
577 mtcrf 0xFF,r0
578 /* r3-r12 are destroyed -- Cort */
579 REST_NVGPRS(r1)
580
581 lwz r4,_NIP(r1) /* Return to _switch caller in new task */
582 mtlr r4
583 addi r1,r1,INT_FRAME_SIZE
584 blr
585
586 .globl fast_exception_return
587fast_exception_return:
588#if !(defined(CONFIG_4xx) || defined(CONFIG_BOOKE))
589 andi. r10,r9,MSR_RI /* check for recoverable interrupt */
590 beq 1f /* if not, we've got problems */
591#endif
592
5932: REST_4GPRS(3, r11)
594 lwz r10,_CCR(r11)
595 REST_GPR(1, r11)
596 mtcr r10
597 lwz r10,_LINK(r11)
598 mtlr r10
599 REST_GPR(10, r11)
600 mtspr SPRN_SRR1,r9
601 mtspr SPRN_SRR0,r12
602 REST_GPR(9, r11)
603 REST_GPR(12, r11)
604 lwz r11,GPR11(r11)
605 SYNC
606 RFI
607
608#if !(defined(CONFIG_4xx) || defined(CONFIG_BOOKE))
609/* check if the exception happened in a restartable section */
6101: lis r3,exc_exit_restart_end@ha
611 addi r3,r3,exc_exit_restart_end@l
612 cmplw r12,r3
613 bge 3f
614 lis r4,exc_exit_restart@ha
615 addi r4,r4,exc_exit_restart@l
616 cmplw r12,r4
617 blt 3f
618 lis r3,fee_restarts@ha
619 tophys(r3,r3)
620 lwz r5,fee_restarts@l(r3)
621 addi r5,r5,1
622 stw r5,fee_restarts@l(r3)
623 mr r12,r4 /* restart at exc_exit_restart */
624 b 2b
625
991eb43a
KG
626 .section .bss
627 .align 2
628fee_restarts:
629 .space 4
630 .previous
9994a338
PM
631
632/* aargh, a nonrecoverable interrupt, panic */
633/* aargh, we don't know which trap this is */
634/* but the 601 doesn't implement the RI bit, so assume it's OK */
6353:
636BEGIN_FTR_SECTION
637 b 2b
638END_FTR_SECTION_IFSET(CPU_FTR_601)
639 li r10,-1
d73e0c99 640 stw r10,_TRAP(r11)
9994a338
PM
641 addi r3,r1,STACK_FRAME_OVERHEAD
642 lis r10,MSR_KERNEL@h
643 ori r10,r10,MSR_KERNEL@l
644 bl transfer_to_handler_full
645 .long nonrecoverable_exception
646 .long ret_from_except
647#endif
648
9994a338
PM
649 .globl ret_from_except_full
650ret_from_except_full:
651 REST_NVGPRS(r1)
652 /* fall through */
653
654 .globl ret_from_except
655ret_from_except:
656 /* Hard-disable interrupts so that current_thread_info()->flags
657 * can't change between when we test it and when we return
658 * from the interrupt. */
659 LOAD_MSR_KERNEL(r10,MSR_KERNEL)
660 SYNC /* Some chip revs have problems here... */
661 MTMSRD(r10) /* disable interrupts */
662
663 lwz r3,_MSR(r1) /* Returning to user mode? */
664 andi. r0,r3,MSR_PR
665 beq resume_kernel
666
667user_exc_return: /* r10 contains MSR_KERNEL here */
668 /* Check current_thread_info()->flags */
6cb7bfeb 669 rlwinm r9,r1,0,0,(31-THREAD_SHIFT)
9994a338 670 lwz r9,TI_FLAGS(r9)
1bd79336 671 andi. r0,r9,(_TIF_SIGPENDING|_TIF_RESTORE_SIGMASK|_TIF_NEED_RESCHED)
9994a338
PM
672 bne do_work
673
674restore_user:
675#if defined(CONFIG_4xx) || defined(CONFIG_BOOKE)
4eaddb4d
KG
676 /* Check whether this process has its own DBCR0 value. The internal
677 debug mode bit tells us that dbcr0 should be loaded. */
9994a338 678 lwz r0,THREAD+THREAD_DBCR0(r2)
4eaddb4d 679 andis. r10,r0,DBCR0_IDM@h
9994a338
PM
680 bnel- load_dbcr0
681#endif
682
683#ifdef CONFIG_PREEMPT
684 b restore
685
686/* N.B. the only way to get here is from the beq following ret_from_except. */
687resume_kernel:
688 /* check current_thread_info->preempt_count */
6cb7bfeb 689 rlwinm r9,r1,0,0,(31-THREAD_SHIFT)
9994a338
PM
690 lwz r0,TI_PREEMPT(r9)
691 cmpwi 0,r0,0 /* if non-zero, just restore regs and return */
692 bne restore
693 lwz r0,TI_FLAGS(r9)
694 andi. r0,r0,_TIF_NEED_RESCHED
695 beq+ restore
696 andi. r0,r3,MSR_EE /* interrupts off? */
697 beq restore /* don't schedule if so */
6981: bl preempt_schedule_irq
6cb7bfeb 699 rlwinm r9,r1,0,0,(31-THREAD_SHIFT)
9994a338
PM
700 lwz r3,TI_FLAGS(r9)
701 andi. r0,r3,_TIF_NEED_RESCHED
702 bne- 1b
703#else
704resume_kernel:
705#endif /* CONFIG_PREEMPT */
706
707 /* interrupts are hard-disabled at this point */
708restore:
b98ac05d
BH
709#ifdef CONFIG_44x
710 lis r4,icache_44x_need_flush@ha
711 lwz r5,icache_44x_need_flush@l(r4)
712 cmplwi cr0,r5,0
713 beq+ 1f
714 li r6,0
715 iccci r0,r0
716 stw r6,icache_44x_need_flush@l(r4)
7171:
718#endif /* CONFIG_44x */
9994a338
PM
719 lwz r0,GPR0(r1)
720 lwz r2,GPR2(r1)
721 REST_4GPRS(3, r1)
722 REST_2GPRS(7, r1)
723
724 lwz r10,_XER(r1)
725 lwz r11,_CTR(r1)
726 mtspr SPRN_XER,r10
727 mtctr r11
728
729 PPC405_ERR77(0,r1)
b64f87c1
BB
730BEGIN_FTR_SECTION
731 lwarx r11,0,r1
732END_FTR_SECTION_IFSET(CPU_FTR_NEED_PAIRED_STWCX)
9994a338
PM
733 stwcx. r0,0,r1 /* to clear the reservation */
734
735#if !(defined(CONFIG_4xx) || defined(CONFIG_BOOKE))
736 lwz r9,_MSR(r1)
737 andi. r10,r9,MSR_RI /* check if this exception occurred */
738 beql nonrecoverable /* at a bad place (MSR:RI = 0) */
739
740 lwz r10,_CCR(r1)
741 lwz r11,_LINK(r1)
742 mtcrf 0xFF,r10
743 mtlr r11
744
745 /*
746 * Once we put values in SRR0 and SRR1, we are in a state
747 * where exceptions are not recoverable, since taking an
748 * exception will trash SRR0 and SRR1. Therefore we clear the
749 * MSR:RI bit to indicate this. If we do take an exception,
750 * we can't return to the point of the exception but we
751 * can restart the exception exit path at the label
752 * exc_exit_restart below. -- paulus
753 */
754 LOAD_MSR_KERNEL(r10,MSR_KERNEL & ~MSR_RI)
755 SYNC
756 MTMSRD(r10) /* clear the RI bit */
757 .globl exc_exit_restart
758exc_exit_restart:
759 lwz r9,_MSR(r1)
760 lwz r12,_NIP(r1)
761 FIX_SRR1(r9,r10)
762 mtspr SPRN_SRR0,r12
763 mtspr SPRN_SRR1,r9
764 REST_4GPRS(9, r1)
765 lwz r1,GPR1(r1)
766 .globl exc_exit_restart_end
767exc_exit_restart_end:
768 SYNC
769 RFI
770
771#else /* !(CONFIG_4xx || CONFIG_BOOKE) */
772 /*
773 * This is a bit different on 4xx/Book-E because it doesn't have
774 * the RI bit in the MSR.
775 * The TLB miss handler checks if we have interrupted
776 * the exception exit path and restarts it if so
777 * (well maybe one day it will... :).
778 */
779 lwz r11,_LINK(r1)
780 mtlr r11
781 lwz r10,_CCR(r1)
782 mtcrf 0xff,r10
783 REST_2GPRS(9, r1)
784 .globl exc_exit_restart
785exc_exit_restart:
786 lwz r11,_NIP(r1)
787 lwz r12,_MSR(r1)
788exc_exit_start:
789 mtspr SPRN_SRR0,r11
790 mtspr SPRN_SRR1,r12
791 REST_2GPRS(11, r1)
792 lwz r1,GPR1(r1)
793 .globl exc_exit_restart_end
794exc_exit_restart_end:
795 PPC405_ERR77_SYNC
796 rfi
797 b . /* prevent prefetch past rfi */
798
799/*
800 * Returning from a critical interrupt in user mode doesn't need
801 * to be any different from a normal exception. For a critical
802 * interrupt in the kernel, we just return (without checking for
803 * preemption) since the interrupt may have happened at some crucial
804 * place (e.g. inside the TLB miss handler), and because we will be
805 * running with r1 pointing into critical_stack, not the current
806 * process's kernel stack (and therefore current_thread_info() will
807 * give the wrong answer).
808 * We have to restore various SPRs that may have been in use at the
809 * time of the critical interrupt.
810 *
811 */
812#ifdef CONFIG_40x
813#define PPC_40x_TURN_OFF_MSR_DR \
814 /* avoid any possible TLB misses here by turning off MSR.DR, we \
815 * assume the instructions here are mapped by a pinned TLB entry */ \
816 li r10,MSR_IR; \
817 mtmsr r10; \
818 isync; \
819 tophys(r1, r1);
820#else
821#define PPC_40x_TURN_OFF_MSR_DR
822#endif
823
824#define RET_FROM_EXC_LEVEL(exc_lvl_srr0, exc_lvl_srr1, exc_lvl_rfi) \
825 REST_NVGPRS(r1); \
826 lwz r3,_MSR(r1); \
827 andi. r3,r3,MSR_PR; \
828 LOAD_MSR_KERNEL(r10,MSR_KERNEL); \
829 bne user_exc_return; \
830 lwz r0,GPR0(r1); \
831 lwz r2,GPR2(r1); \
832 REST_4GPRS(3, r1); \
833 REST_2GPRS(7, r1); \
834 lwz r10,_XER(r1); \
835 lwz r11,_CTR(r1); \
836 mtspr SPRN_XER,r10; \
837 mtctr r11; \
838 PPC405_ERR77(0,r1); \
839 stwcx. r0,0,r1; /* to clear the reservation */ \
840 lwz r11,_LINK(r1); \
841 mtlr r11; \
842 lwz r10,_CCR(r1); \
843 mtcrf 0xff,r10; \
844 PPC_40x_TURN_OFF_MSR_DR; \
845 lwz r9,_DEAR(r1); \
846 lwz r10,_ESR(r1); \
847 mtspr SPRN_DEAR,r9; \
848 mtspr SPRN_ESR,r10; \
849 lwz r11,_NIP(r1); \
850 lwz r12,_MSR(r1); \
851 mtspr exc_lvl_srr0,r11; \
852 mtspr exc_lvl_srr1,r12; \
853 lwz r9,GPR9(r1); \
854 lwz r12,GPR12(r1); \
855 lwz r10,GPR10(r1); \
856 lwz r11,GPR11(r1); \
857 lwz r1,GPR1(r1); \
858 PPC405_ERR77_SYNC; \
859 exc_lvl_rfi; \
860 b .; /* prevent prefetch past exc_lvl_rfi */
861
862 .globl ret_from_crit_exc
863ret_from_crit_exc:
864 RET_FROM_EXC_LEVEL(SPRN_CSRR0, SPRN_CSRR1, RFCI)
865
866#ifdef CONFIG_BOOKE
867 .globl ret_from_debug_exc
868ret_from_debug_exc:
869 RET_FROM_EXC_LEVEL(SPRN_DSRR0, SPRN_DSRR1, RFDI)
870
871 .globl ret_from_mcheck_exc
872ret_from_mcheck_exc:
873 RET_FROM_EXC_LEVEL(SPRN_MCSRR0, SPRN_MCSRR1, RFMCI)
874#endif /* CONFIG_BOOKE */
875
876/*
877 * Load the DBCR0 value for a task that is being ptraced,
878 * having first saved away the global DBCR0. Note that r0
879 * has the dbcr0 value to set upon entry to this.
880 */
881load_dbcr0:
882 mfmsr r10 /* first disable debug exceptions */
883 rlwinm r10,r10,0,~MSR_DE
884 mtmsr r10
885 isync
886 mfspr r10,SPRN_DBCR0
887 lis r11,global_dbcr0@ha
888 addi r11,r11,global_dbcr0@l
4eaddb4d
KG
889#ifdef CONFIG_SMP
890 rlwinm r9,r1,0,0,(31-THREAD_SHIFT)
891 lwz r9,TI_CPU(r9)
892 slwi r9,r9,3
893 add r11,r11,r9
894#endif
9994a338
PM
895 stw r10,0(r11)
896 mtspr SPRN_DBCR0,r0
897 lwz r10,4(r11)
898 addi r10,r10,1
899 stw r10,4(r11)
900 li r11,-1
901 mtspr SPRN_DBSR,r11 /* clear all pending debug events */
902 blr
903
991eb43a
KG
904 .section .bss
905 .align 4
906global_dbcr0:
4eaddb4d 907 .space 8*NR_CPUS
991eb43a 908 .previous
9994a338
PM
909#endif /* !(CONFIG_4xx || CONFIG_BOOKE) */
910
911do_work: /* r10 contains MSR_KERNEL here */
912 andi. r0,r9,_TIF_NEED_RESCHED
913 beq do_user_signal
914
915do_resched: /* r10 contains MSR_KERNEL here */
916 ori r10,r10,MSR_EE
917 SYNC
918 MTMSRD(r10) /* hard-enable interrupts */
919 bl schedule
920recheck:
921 LOAD_MSR_KERNEL(r10,MSR_KERNEL)
922 SYNC
923 MTMSRD(r10) /* disable interrupts */
6cb7bfeb 924 rlwinm r9,r1,0,0,(31-THREAD_SHIFT)
9994a338
PM
925 lwz r9,TI_FLAGS(r9)
926 andi. r0,r9,_TIF_NEED_RESCHED
927 bne- do_resched
f27201da 928 andi. r0,r9,_TIF_SIGPENDING|_TIF_RESTORE_SIGMASK
9994a338
PM
929 beq restore_user
930do_user_signal: /* r10 contains MSR_KERNEL here */
931 ori r10,r10,MSR_EE
932 SYNC
933 MTMSRD(r10) /* hard-enable interrupts */
934 /* save r13-r31 in the exception frame, if not already done */
d73e0c99 935 lwz r3,_TRAP(r1)
9994a338
PM
936 andi. r0,r3,1
937 beq 2f
938 SAVE_NVGPRS(r1)
939 rlwinm r3,r3,0,0,30
d73e0c99 940 stw r3,_TRAP(r1)
9994a338
PM
9412: li r3,0
942 addi r4,r1,STACK_FRAME_OVERHEAD
943 bl do_signal
944 REST_NVGPRS(r1)
945 b recheck
946
947/*
948 * We come here when we are at the end of handling an exception
949 * that occurred at a place where taking an exception will lose
950 * state information, such as the contents of SRR0 and SRR1.
951 */
952nonrecoverable:
953 lis r10,exc_exit_restart_end@ha
954 addi r10,r10,exc_exit_restart_end@l
955 cmplw r12,r10
956 bge 3f
957 lis r11,exc_exit_restart@ha
958 addi r11,r11,exc_exit_restart@l
959 cmplw r12,r11
960 blt 3f
961 lis r10,ee_restarts@ha
962 lwz r12,ee_restarts@l(r10)
963 addi r12,r12,1
964 stw r12,ee_restarts@l(r10)
965 mr r12,r11 /* restart at exc_exit_restart */
966 blr
9673: /* OK, we can't recover, kill this process */
968 /* but the 601 doesn't implement the RI bit, so assume it's OK */
969BEGIN_FTR_SECTION
970 blr
971END_FTR_SECTION_IFSET(CPU_FTR_601)
d73e0c99 972 lwz r3,_TRAP(r1)
9994a338
PM
973 andi. r0,r3,1
974 beq 4f
975 SAVE_NVGPRS(r1)
976 rlwinm r3,r3,0,0,30
d73e0c99 977 stw r3,_TRAP(r1)
9994a338
PM
9784: addi r3,r1,STACK_FRAME_OVERHEAD
979 bl nonrecoverable_exception
980 /* shouldn't return */
981 b 4b
982
991eb43a
KG
983 .section .bss
984 .align 2
985ee_restarts:
986 .space 4
987 .previous
9994a338
PM
988
989/*
990 * PROM code for specific machines follows. Put it
991 * here so it's easy to add arch-specific sections later.
992 * -- Cort
993 */
033ef338 994#ifdef CONFIG_PPC_RTAS
9994a338
PM
995/*
996 * On CHRP, the Run-Time Abstraction Services (RTAS) have to be
997 * called with the MMU off.
998 */
999_GLOBAL(enter_rtas)
1000 stwu r1,-INT_FRAME_SIZE(r1)
1001 mflr r0
1002 stw r0,INT_FRAME_SIZE+4(r1)
e58c3495 1003 LOAD_REG_ADDR(r4, rtas)
9994a338
PM
1004 lis r6,1f@ha /* physical return address for rtas */
1005 addi r6,r6,1f@l
1006 tophys(r6,r6)
1007 tophys(r7,r1)
033ef338
PM
1008 lwz r8,RTASENTRY(r4)
1009 lwz r4,RTASBASE(r4)
9994a338
PM
1010 mfmsr r9
1011 stw r9,8(r1)
1012 LOAD_MSR_KERNEL(r0,MSR_KERNEL)
1013 SYNC /* disable interrupts so SRR0/1 */
1014 MTMSRD(r0) /* don't get trashed */
1015 li r9,MSR_KERNEL & ~(MSR_IR|MSR_DR)
1016 mtlr r6
9994a338
PM
1017 mtspr SPRN_SPRG2,r7
1018 mtspr SPRN_SRR0,r8
1019 mtspr SPRN_SRR1,r9
1020 RFI
10211: tophys(r9,r1)
1022 lwz r8,INT_FRAME_SIZE+4(r9) /* get return address */
1023 lwz r9,8(r9) /* original msr value */
1024 FIX_SRR1(r9,r0)
1025 addi r1,r1,INT_FRAME_SIZE
1026 li r0,0
1027 mtspr SPRN_SPRG2,r0
1028 mtspr SPRN_SRR0,r8
1029 mtspr SPRN_SRR1,r9
1030 RFI /* return to caller */
1031
1032 .globl machine_check_in_rtas
1033machine_check_in_rtas:
1034 twi 31,0,0
1035 /* XXX load up BATs and panic */
1036
033ef338 1037#endif /* CONFIG_PPC_RTAS */