]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blame - arch/powerpc/kernel/entry_32.S
[POWERPC] Use __always_inline for xchg* and cmpxchg*
[mirror_ubuntu-artful-kernel.git] / arch / powerpc / kernel / entry_32.S
CommitLineData
9994a338
PM
1/*
2 * PowerPC version
3 * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
4 * Rewritten by Cort Dougan (cort@fsmlabs.com) for PReP
5 * Copyright (C) 1996 Cort Dougan <cort@fsmlabs.com>
6 * Adapted for Power Macintosh by Paul Mackerras.
7 * Low-level exception handlers and MMU support
8 * rewritten by Paul Mackerras.
9 * Copyright (C) 1996 Paul Mackerras.
10 * MPC8xx modifications Copyright (C) 1997 Dan Malek (dmalek@jlc.net).
11 *
12 * This file contains the system call entry code, context switch
13 * code, and exception/interrupt return code for PowerPC.
14 *
15 * This program is free software; you can redistribute it and/or
16 * modify it under the terms of the GNU General Public License
17 * as published by the Free Software Foundation; either version
18 * 2 of the License, or (at your option) any later version.
19 *
20 */
21
9994a338
PM
22#include <linux/errno.h>
23#include <linux/sys.h>
24#include <linux/threads.h>
25#include <asm/reg.h>
26#include <asm/page.h>
27#include <asm/mmu.h>
28#include <asm/cputable.h>
29#include <asm/thread_info.h>
30#include <asm/ppc_asm.h>
31#include <asm/asm-offsets.h>
32#include <asm/unistd.h>
33
34#undef SHOW_SYSCALLS
35#undef SHOW_SYSCALLS_TASK
36
37/*
38 * MSR_KERNEL is > 0x10000 on 4xx/Book-E since it include MSR_CE.
39 */
40#if MSR_KERNEL >= 0x10000
41#define LOAD_MSR_KERNEL(r, x) lis r,(x)@h; ori r,r,(x)@l
42#else
43#define LOAD_MSR_KERNEL(r, x) li r,(x)
44#endif
45
46#ifdef CONFIG_BOOKE
47#include "head_booke.h"
48#define TRANSFER_TO_HANDLER_EXC_LEVEL(exc_level) \
49 mtspr exc_level##_SPRG,r8; \
50 BOOKE_LOAD_EXC_LEVEL_STACK(exc_level); \
51 lwz r0,GPR10-INT_FRAME_SIZE(r8); \
52 stw r0,GPR10(r11); \
53 lwz r0,GPR11-INT_FRAME_SIZE(r8); \
54 stw r0,GPR11(r11); \
55 mfspr r8,exc_level##_SPRG
56
57 .globl mcheck_transfer_to_handler
58mcheck_transfer_to_handler:
59 TRANSFER_TO_HANDLER_EXC_LEVEL(MCHECK)
60 b transfer_to_handler_full
61
62 .globl debug_transfer_to_handler
63debug_transfer_to_handler:
64 TRANSFER_TO_HANDLER_EXC_LEVEL(DEBUG)
65 b transfer_to_handler_full
66
67 .globl crit_transfer_to_handler
68crit_transfer_to_handler:
69 TRANSFER_TO_HANDLER_EXC_LEVEL(CRIT)
70 /* fall through */
71#endif
72
73#ifdef CONFIG_40x
74 .globl crit_transfer_to_handler
75crit_transfer_to_handler:
76 lwz r0,crit_r10@l(0)
77 stw r0,GPR10(r11)
78 lwz r0,crit_r11@l(0)
79 stw r0,GPR11(r11)
80 /* fall through */
81#endif
82
83/*
84 * This code finishes saving the registers to the exception frame
85 * and jumps to the appropriate handler for the exception, turning
86 * on address translation.
87 * Note that we rely on the caller having set cr0.eq iff the exception
88 * occurred in kernel mode (i.e. MSR:PR = 0).
89 */
90 .globl transfer_to_handler_full
91transfer_to_handler_full:
92 SAVE_NVGPRS(r11)
93 /* fall through */
94
95 .globl transfer_to_handler
96transfer_to_handler:
97 stw r2,GPR2(r11)
98 stw r12,_NIP(r11)
99 stw r9,_MSR(r11)
100 andi. r2,r9,MSR_PR
101 mfctr r12
102 mfspr r2,SPRN_XER
103 stw r12,_CTR(r11)
104 stw r2,_XER(r11)
105 mfspr r12,SPRN_SPRG3
106 addi r2,r12,-THREAD
107 tovirt(r2,r2) /* set r2 to current */
108 beq 2f /* if from user, fix up THREAD.regs */
109 addi r11,r1,STACK_FRAME_OVERHEAD
110 stw r11,PT_REGS(r12)
111#if defined(CONFIG_40x) || defined(CONFIG_BOOKE)
112 /* Check to see if the dbcr0 register is set up to debug. Use the
4eaddb4d 113 internal debug mode bit to do this. */
9994a338 114 lwz r12,THREAD_DBCR0(r12)
4eaddb4d 115 andis. r12,r12,DBCR0_IDM@h
9994a338
PM
116 beq+ 3f
117 /* From user and task is ptraced - load up global dbcr0 */
118 li r12,-1 /* clear all pending debug events */
119 mtspr SPRN_DBSR,r12
120 lis r11,global_dbcr0@ha
121 tophys(r11,r11)
122 addi r11,r11,global_dbcr0@l
4eaddb4d
KG
123#ifdef CONFIG_SMP
124 rlwinm r9,r1,0,0,(31-THREAD_SHIFT)
125 lwz r9,TI_CPU(r9)
126 slwi r9,r9,3
127 add r11,r11,r9
128#endif
9994a338
PM
129 lwz r12,0(r11)
130 mtspr SPRN_DBCR0,r12
131 lwz r12,4(r11)
132 addi r12,r12,-1
133 stw r12,4(r11)
134#endif
135 b 3f
f39224a8 136
9994a338
PM
1372: /* if from kernel, check interrupted DOZE/NAP mode and
138 * check for stack overflow
139 */
f39224a8
PM
140 lwz r9,THREAD_INFO-THREAD(r12)
141 cmplw r1,r9 /* if r1 <= current->thread_info */
142 ble- stack_ovf /* then the kernel stack overflowed */
1435:
9994a338 144#ifdef CONFIG_6xx
f39224a8
PM
145 tophys(r9,r9) /* check local flags */
146 lwz r12,TI_LOCAL_FLAGS(r9)
147 mtcrf 0x01,r12
148 bt- 31-TLF_NAPPING,4f
9994a338
PM
149#endif /* CONFIG_6xx */
150 .globl transfer_to_handler_cont
151transfer_to_handler_cont:
9994a338
PM
1523:
153 mflr r9
154 lwz r11,0(r9) /* virtual address of handler */
155 lwz r9,4(r9) /* where to go when done */
9994a338
PM
156 mtspr SPRN_SRR0,r11
157 mtspr SPRN_SRR1,r10
158 mtlr r9
159 SYNC
160 RFI /* jump to handler, enable MMU */
161
f39224a8
PM
162#ifdef CONFIG_6xx
1634: rlwinm r12,r12,0,~_TLF_NAPPING
164 stw r12,TI_LOCAL_FLAGS(r9)
165 b power_save_6xx_restore
a0652fc9
PM
166#endif
167
9994a338
PM
168/*
169 * On kernel stack overflow, load up an initial stack pointer
170 * and call StackOverflow(regs), which should not return.
171 */
172stack_ovf:
173 /* sometimes we use a statically-allocated stack, which is OK. */
f39224a8
PM
174 lis r12,_end@h
175 ori r12,r12,_end@l
176 cmplw r1,r12
177 ble 5b /* r1 <= &_end is OK */
9994a338
PM
178 SAVE_NVGPRS(r11)
179 addi r3,r1,STACK_FRAME_OVERHEAD
180 lis r1,init_thread_union@ha
181 addi r1,r1,init_thread_union@l
182 addi r1,r1,THREAD_SIZE-STACK_FRAME_OVERHEAD
183 lis r9,StackOverflow@ha
184 addi r9,r9,StackOverflow@l
185 LOAD_MSR_KERNEL(r10,MSR_KERNEL)
186 FIX_SRR1(r10,r12)
187 mtspr SPRN_SRR0,r9
188 mtspr SPRN_SRR1,r10
189 SYNC
190 RFI
191
192/*
193 * Handle a system call.
194 */
195 .stabs "arch/powerpc/kernel/",N_SO,0,0,0f
196 .stabs "entry_32.S",N_SO,0,0,0f
1970:
198
199_GLOBAL(DoSyscall)
9994a338
PM
200 stw r3,ORIG_GPR3(r1)
201 li r12,0
202 stw r12,RESULT(r1)
203 lwz r11,_CCR(r1) /* Clear SO bit in CR */
204 rlwinm r11,r11,0,4,2
205 stw r11,_CCR(r1)
206#ifdef SHOW_SYSCALLS
207 bl do_show_syscall
208#endif /* SHOW_SYSCALLS */
6cb7bfeb 209 rlwinm r10,r1,0,0,(31-THREAD_SHIFT) /* current_thread_info() */
9994a338
PM
210 lwz r11,TI_FLAGS(r10)
211 andi. r11,r11,_TIF_SYSCALL_T_OR_A
212 bne- syscall_dotrace
213syscall_dotrace_cont:
214 cmplwi 0,r0,NR_syscalls
215 lis r10,sys_call_table@h
216 ori r10,r10,sys_call_table@l
217 slwi r0,r0,2
218 bge- 66f
219 lwzx r10,r10,r0 /* Fetch system call handler [ptr] */
220 mtlr r10
221 addi r9,r1,STACK_FRAME_OVERHEAD
222 PPC440EP_ERR42
223 blrl /* Call handler */
224 .globl ret_from_syscall
225ret_from_syscall:
226#ifdef SHOW_SYSCALLS
227 bl do_show_syscall_exit
228#endif
229 mr r6,r3
6cb7bfeb 230 rlwinm r12,r1,0,0,(31-THREAD_SHIFT) /* current_thread_info() */
9994a338 231 /* disable interrupts so current_thread_info()->flags can't change */
401d1f02 232 LOAD_MSR_KERNEL(r10,MSR_KERNEL) /* doesn't include MSR_EE */
9994a338
PM
233 SYNC
234 MTMSRD(r10)
235 lwz r9,TI_FLAGS(r12)
401d1f02 236 li r8,-_LAST_ERRNO
1bd79336 237 andi. r0,r9,(_TIF_SYSCALL_T_OR_A|_TIF_SINGLESTEP|_TIF_USER_WORK_MASK|_TIF_PERSYSCALL_MASK)
9994a338 238 bne- syscall_exit_work
401d1f02
DW
239 cmplw 0,r3,r8
240 blt+ syscall_exit_cont
241 lwz r11,_CCR(r1) /* Load CR */
242 neg r3,r3
243 oris r11,r11,0x1000 /* Set SO bit in CR */
244 stw r11,_CCR(r1)
9994a338
PM
245syscall_exit_cont:
246#if defined(CONFIG_4xx) || defined(CONFIG_BOOKE)
4eaddb4d
KG
247 /* If the process has its own DBCR0 value, load it up. The internal
248 debug mode bit tells us that dbcr0 should be loaded. */
9994a338 249 lwz r0,THREAD+THREAD_DBCR0(r2)
4eaddb4d 250 andis. r10,r0,DBCR0_IDM@h
9994a338
PM
251 bnel- load_dbcr0
252#endif
b98ac05d
BH
253#ifdef CONFIG_44x
254 lis r4,icache_44x_need_flush@ha
255 lwz r5,icache_44x_need_flush@l(r4)
256 cmplwi cr0,r5,0
257 bne- 2f
2581:
259#endif /* CONFIG_44x */
b64f87c1
BB
260BEGIN_FTR_SECTION
261 lwarx r7,0,r1
262END_FTR_SECTION_IFSET(CPU_FTR_NEED_PAIRED_STWCX)
9994a338
PM
263 stwcx. r0,0,r1 /* to clear the reservation */
264 lwz r4,_LINK(r1)
265 lwz r5,_CCR(r1)
266 mtlr r4
267 mtcr r5
268 lwz r7,_NIP(r1)
269 lwz r8,_MSR(r1)
270 FIX_SRR1(r8, r0)
271 lwz r2,GPR2(r1)
272 lwz r1,GPR1(r1)
273 mtspr SPRN_SRR0,r7
274 mtspr SPRN_SRR1,r8
275 SYNC
276 RFI
b98ac05d
BH
277#ifdef CONFIG_44x
2782: li r7,0
279 iccci r0,r0
280 stw r7,icache_44x_need_flush@l(r4)
281 b 1b
282#endif /* CONFIG_44x */
9994a338
PM
283
28466: li r3,-ENOSYS
285 b ret_from_syscall
286
287 .globl ret_from_fork
288ret_from_fork:
289 REST_NVGPRS(r1)
290 bl schedule_tail
291 li r3,0
292 b ret_from_syscall
293
294/* Traced system call support */
295syscall_dotrace:
296 SAVE_NVGPRS(r1)
297 li r0,0xc00
d73e0c99 298 stw r0,_TRAP(r1)
9994a338
PM
299 addi r3,r1,STACK_FRAME_OVERHEAD
300 bl do_syscall_trace_enter
301 lwz r0,GPR0(r1) /* Restore original registers */
302 lwz r3,GPR3(r1)
303 lwz r4,GPR4(r1)
304 lwz r5,GPR5(r1)
305 lwz r6,GPR6(r1)
306 lwz r7,GPR7(r1)
307 lwz r8,GPR8(r1)
308 REST_NVGPRS(r1)
309 b syscall_dotrace_cont
310
311syscall_exit_work:
401d1f02 312 andi. r0,r9,_TIF_RESTOREALL
1bd79336
PM
313 beq+ 0f
314 REST_NVGPRS(r1)
315 b 2f
3160: cmplw 0,r3,r8
401d1f02
DW
317 blt+ 1f
318 andi. r0,r9,_TIF_NOERROR
319 bne- 1f
320 lwz r11,_CCR(r1) /* Load CR */
321 neg r3,r3
322 oris r11,r11,0x1000 /* Set SO bit in CR */
323 stw r11,_CCR(r1)
324
3251: stw r6,RESULT(r1) /* Save result */
9994a338 326 stw r3,GPR3(r1) /* Update return value */
401d1f02
DW
3272: andi. r0,r9,(_TIF_PERSYSCALL_MASK)
328 beq 4f
329
1bd79336 330 /* Clear per-syscall TIF flags if any are set. */
401d1f02
DW
331
332 li r11,_TIF_PERSYSCALL_MASK
333 addi r12,r12,TI_FLAGS
3343: lwarx r8,0,r12
335 andc r8,r8,r11
336#ifdef CONFIG_IBM405_ERR77
337 dcbt 0,r12
338#endif
339 stwcx. r8,0,r12
340 bne- 3b
341 subi r12,r12,TI_FLAGS
342
3434: /* Anything which requires enabling interrupts? */
1bd79336
PM
344 andi. r0,r9,(_TIF_SYSCALL_T_OR_A|_TIF_SINGLESTEP)
345 beq ret_from_except
346
347 /* Re-enable interrupts */
348 ori r10,r10,MSR_EE
349 SYNC
350 MTMSRD(r10)
401d1f02
DW
351
352 /* Save NVGPRS if they're not saved already */
d73e0c99 353 lwz r4,_TRAP(r1)
9994a338 354 andi. r4,r4,1
401d1f02 355 beq 5f
9994a338
PM
356 SAVE_NVGPRS(r1)
357 li r4,0xc00
d73e0c99 358 stw r4,_TRAP(r1)
1bd79336 3595:
9994a338
PM
360 addi r3,r1,STACK_FRAME_OVERHEAD
361 bl do_syscall_trace_leave
1bd79336 362 b ret_from_except_full
9994a338
PM
363
364#ifdef SHOW_SYSCALLS
365do_show_syscall:
366#ifdef SHOW_SYSCALLS_TASK
367 lis r11,show_syscalls_task@ha
368 lwz r11,show_syscalls_task@l(r11)
369 cmp 0,r2,r11
370 bnelr
371#endif
372 stw r31,GPR31(r1)
373 mflr r31
374 lis r3,7f@ha
375 addi r3,r3,7f@l
376 lwz r4,GPR0(r1)
377 lwz r5,GPR3(r1)
378 lwz r6,GPR4(r1)
379 lwz r7,GPR5(r1)
380 lwz r8,GPR6(r1)
381 lwz r9,GPR7(r1)
382 bl printk
383 lis r3,77f@ha
384 addi r3,r3,77f@l
385 lwz r4,GPR8(r1)
386 mr r5,r2
387 bl printk
388 lwz r0,GPR0(r1)
389 lwz r3,GPR3(r1)
390 lwz r4,GPR4(r1)
391 lwz r5,GPR5(r1)
392 lwz r6,GPR6(r1)
393 lwz r7,GPR7(r1)
394 lwz r8,GPR8(r1)
395 mtlr r31
396 lwz r31,GPR31(r1)
397 blr
398
399do_show_syscall_exit:
400#ifdef SHOW_SYSCALLS_TASK
401 lis r11,show_syscalls_task@ha
402 lwz r11,show_syscalls_task@l(r11)
403 cmp 0,r2,r11
404 bnelr
405#endif
406 stw r31,GPR31(r1)
407 mflr r31
408 stw r3,RESULT(r1) /* Save result */
409 mr r4,r3
410 lis r3,79f@ha
411 addi r3,r3,79f@l
412 bl printk
413 lwz r3,RESULT(r1)
414 mtlr r31
415 lwz r31,GPR31(r1)
416 blr
417
4187: .string "syscall %d(%x, %x, %x, %x, %x, "
41977: .string "%x), current=%p\n"
42079: .string " -> %x\n"
421 .align 2,0
422
423#ifdef SHOW_SYSCALLS_TASK
424 .data
425 .globl show_syscalls_task
426show_syscalls_task:
427 .long -1
428 .text
429#endif
430#endif /* SHOW_SYSCALLS */
431
432/*
401d1f02
DW
433 * The fork/clone functions need to copy the full register set into
434 * the child process. Therefore we need to save all the nonvolatile
435 * registers (r13 - r31) before calling the C code.
9994a338 436 */
9994a338
PM
437 .globl ppc_fork
438ppc_fork:
439 SAVE_NVGPRS(r1)
d73e0c99 440 lwz r0,_TRAP(r1)
9994a338 441 rlwinm r0,r0,0,0,30 /* clear LSB to indicate full */
d73e0c99 442 stw r0,_TRAP(r1) /* register set saved */
9994a338
PM
443 b sys_fork
444
445 .globl ppc_vfork
446ppc_vfork:
447 SAVE_NVGPRS(r1)
d73e0c99 448 lwz r0,_TRAP(r1)
9994a338 449 rlwinm r0,r0,0,0,30 /* clear LSB to indicate full */
d73e0c99 450 stw r0,_TRAP(r1) /* register set saved */
9994a338
PM
451 b sys_vfork
452
453 .globl ppc_clone
454ppc_clone:
455 SAVE_NVGPRS(r1)
d73e0c99 456 lwz r0,_TRAP(r1)
9994a338 457 rlwinm r0,r0,0,0,30 /* clear LSB to indicate full */
d73e0c99 458 stw r0,_TRAP(r1) /* register set saved */
9994a338
PM
459 b sys_clone
460
1bd79336
PM
461 .globl ppc_swapcontext
462ppc_swapcontext:
463 SAVE_NVGPRS(r1)
464 lwz r0,_TRAP(r1)
465 rlwinm r0,r0,0,0,30 /* clear LSB to indicate full */
466 stw r0,_TRAP(r1) /* register set saved */
467 b sys_swapcontext
468
9994a338
PM
469/*
470 * Top-level page fault handling.
471 * This is in assembler because if do_page_fault tells us that
472 * it is a bad kernel page fault, we want to save the non-volatile
473 * registers before calling bad_page_fault.
474 */
475 .globl handle_page_fault
476handle_page_fault:
477 stw r4,_DAR(r1)
478 addi r3,r1,STACK_FRAME_OVERHEAD
479 bl do_page_fault
480 cmpwi r3,0
481 beq+ ret_from_except
482 SAVE_NVGPRS(r1)
d73e0c99 483 lwz r0,_TRAP(r1)
9994a338 484 clrrwi r0,r0,1
d73e0c99 485 stw r0,_TRAP(r1)
9994a338
PM
486 mr r5,r3
487 addi r3,r1,STACK_FRAME_OVERHEAD
488 lwz r4,_DAR(r1)
489 bl bad_page_fault
490 b ret_from_except_full
491
492/*
493 * This routine switches between two different tasks. The process
494 * state of one is saved on its kernel stack. Then the state
495 * of the other is restored from its kernel stack. The memory
496 * management hardware is updated to the second process's state.
497 * Finally, we can return to the second process.
498 * On entry, r3 points to the THREAD for the current task, r4
499 * points to the THREAD for the new task.
500 *
501 * This routine is always called with interrupts disabled.
502 *
503 * Note: there are two ways to get to the "going out" portion
504 * of this code; either by coming in via the entry (_switch)
505 * or via "fork" which must set up an environment equivalent
506 * to the "_switch" path. If you change this , you'll have to
507 * change the fork code also.
508 *
509 * The code which creates the new task context is in 'copy_thread'
510 * in arch/ppc/kernel/process.c
511 */
512_GLOBAL(_switch)
513 stwu r1,-INT_FRAME_SIZE(r1)
514 mflr r0
515 stw r0,INT_FRAME_SIZE+4(r1)
516 /* r3-r12 are caller saved -- Cort */
517 SAVE_NVGPRS(r1)
518 stw r0,_NIP(r1) /* Return to switch caller */
519 mfmsr r11
520 li r0,MSR_FP /* Disable floating-point */
521#ifdef CONFIG_ALTIVEC
522BEGIN_FTR_SECTION
523 oris r0,r0,MSR_VEC@h /* Disable altivec */
524 mfspr r12,SPRN_VRSAVE /* save vrsave register value */
525 stw r12,THREAD+THREAD_VRSAVE(r2)
526END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
527#endif /* CONFIG_ALTIVEC */
528#ifdef CONFIG_SPE
5e14d21e 529BEGIN_FTR_SECTION
9994a338
PM
530 oris r0,r0,MSR_SPE@h /* Disable SPE */
531 mfspr r12,SPRN_SPEFSCR /* save spefscr register value */
532 stw r12,THREAD+THREAD_SPEFSCR(r2)
5e14d21e 533END_FTR_SECTION_IFSET(CPU_FTR_SPE)
9994a338
PM
534#endif /* CONFIG_SPE */
535 and. r0,r0,r11 /* FP or altivec or SPE enabled? */
536 beq+ 1f
537 andc r11,r11,r0
538 MTMSRD(r11)
539 isync
5401: stw r11,_MSR(r1)
541 mfcr r10
542 stw r10,_CCR(r1)
543 stw r1,KSP(r3) /* Set old stack pointer */
544
545#ifdef CONFIG_SMP
546 /* We need a sync somewhere here to make sure that if the
547 * previous task gets rescheduled on another CPU, it sees all
548 * stores it has performed on this one.
549 */
550 sync
551#endif /* CONFIG_SMP */
552
553 tophys(r0,r4)
554 CLR_TOP32(r0)
555 mtspr SPRN_SPRG3,r0 /* Update current THREAD phys addr */
556 lwz r1,KSP(r4) /* Load new stack pointer */
557
558 /* save the old current 'last' for return value */
559 mr r3,r2
560 addi r2,r4,-THREAD /* Update current */
561
562#ifdef CONFIG_ALTIVEC
563BEGIN_FTR_SECTION
564 lwz r0,THREAD+THREAD_VRSAVE(r2)
565 mtspr SPRN_VRSAVE,r0 /* if G4, restore VRSAVE reg */
566END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
567#endif /* CONFIG_ALTIVEC */
568#ifdef CONFIG_SPE
5e14d21e 569BEGIN_FTR_SECTION
9994a338
PM
570 lwz r0,THREAD+THREAD_SPEFSCR(r2)
571 mtspr SPRN_SPEFSCR,r0 /* restore SPEFSCR reg */
5e14d21e 572END_FTR_SECTION_IFSET(CPU_FTR_SPE)
9994a338
PM
573#endif /* CONFIG_SPE */
574
575 lwz r0,_CCR(r1)
576 mtcrf 0xFF,r0
577 /* r3-r12 are destroyed -- Cort */
578 REST_NVGPRS(r1)
579
580 lwz r4,_NIP(r1) /* Return to _switch caller in new task */
581 mtlr r4
582 addi r1,r1,INT_FRAME_SIZE
583 blr
584
585 .globl fast_exception_return
586fast_exception_return:
587#if !(defined(CONFIG_4xx) || defined(CONFIG_BOOKE))
588 andi. r10,r9,MSR_RI /* check for recoverable interrupt */
589 beq 1f /* if not, we've got problems */
590#endif
591
5922: REST_4GPRS(3, r11)
593 lwz r10,_CCR(r11)
594 REST_GPR(1, r11)
595 mtcr r10
596 lwz r10,_LINK(r11)
597 mtlr r10
598 REST_GPR(10, r11)
599 mtspr SPRN_SRR1,r9
600 mtspr SPRN_SRR0,r12
601 REST_GPR(9, r11)
602 REST_GPR(12, r11)
603 lwz r11,GPR11(r11)
604 SYNC
605 RFI
606
607#if !(defined(CONFIG_4xx) || defined(CONFIG_BOOKE))
608/* check if the exception happened in a restartable section */
6091: lis r3,exc_exit_restart_end@ha
610 addi r3,r3,exc_exit_restart_end@l
611 cmplw r12,r3
612 bge 3f
613 lis r4,exc_exit_restart@ha
614 addi r4,r4,exc_exit_restart@l
615 cmplw r12,r4
616 blt 3f
617 lis r3,fee_restarts@ha
618 tophys(r3,r3)
619 lwz r5,fee_restarts@l(r3)
620 addi r5,r5,1
621 stw r5,fee_restarts@l(r3)
622 mr r12,r4 /* restart at exc_exit_restart */
623 b 2b
624
991eb43a
KG
625 .section .bss
626 .align 2
627fee_restarts:
628 .space 4
629 .previous
9994a338
PM
630
631/* aargh, a nonrecoverable interrupt, panic */
632/* aargh, we don't know which trap this is */
633/* but the 601 doesn't implement the RI bit, so assume it's OK */
6343:
635BEGIN_FTR_SECTION
636 b 2b
637END_FTR_SECTION_IFSET(CPU_FTR_601)
638 li r10,-1
d73e0c99 639 stw r10,_TRAP(r11)
9994a338
PM
640 addi r3,r1,STACK_FRAME_OVERHEAD
641 lis r10,MSR_KERNEL@h
642 ori r10,r10,MSR_KERNEL@l
643 bl transfer_to_handler_full
644 .long nonrecoverable_exception
645 .long ret_from_except
646#endif
647
9994a338
PM
648 .globl ret_from_except_full
649ret_from_except_full:
650 REST_NVGPRS(r1)
651 /* fall through */
652
653 .globl ret_from_except
654ret_from_except:
655 /* Hard-disable interrupts so that current_thread_info()->flags
656 * can't change between when we test it and when we return
657 * from the interrupt. */
658 LOAD_MSR_KERNEL(r10,MSR_KERNEL)
659 SYNC /* Some chip revs have problems here... */
660 MTMSRD(r10) /* disable interrupts */
661
662 lwz r3,_MSR(r1) /* Returning to user mode? */
663 andi. r0,r3,MSR_PR
664 beq resume_kernel
665
666user_exc_return: /* r10 contains MSR_KERNEL here */
667 /* Check current_thread_info()->flags */
6cb7bfeb 668 rlwinm r9,r1,0,0,(31-THREAD_SHIFT)
9994a338 669 lwz r9,TI_FLAGS(r9)
1bd79336 670 andi. r0,r9,(_TIF_SIGPENDING|_TIF_RESTORE_SIGMASK|_TIF_NEED_RESCHED)
9994a338
PM
671 bne do_work
672
673restore_user:
674#if defined(CONFIG_4xx) || defined(CONFIG_BOOKE)
4eaddb4d
KG
675 /* Check whether this process has its own DBCR0 value. The internal
676 debug mode bit tells us that dbcr0 should be loaded. */
9994a338 677 lwz r0,THREAD+THREAD_DBCR0(r2)
4eaddb4d 678 andis. r10,r0,DBCR0_IDM@h
9994a338
PM
679 bnel- load_dbcr0
680#endif
681
682#ifdef CONFIG_PREEMPT
683 b restore
684
685/* N.B. the only way to get here is from the beq following ret_from_except. */
686resume_kernel:
687 /* check current_thread_info->preempt_count */
6cb7bfeb 688 rlwinm r9,r1,0,0,(31-THREAD_SHIFT)
9994a338
PM
689 lwz r0,TI_PREEMPT(r9)
690 cmpwi 0,r0,0 /* if non-zero, just restore regs and return */
691 bne restore
692 lwz r0,TI_FLAGS(r9)
693 andi. r0,r0,_TIF_NEED_RESCHED
694 beq+ restore
695 andi. r0,r3,MSR_EE /* interrupts off? */
696 beq restore /* don't schedule if so */
6971: bl preempt_schedule_irq
6cb7bfeb 698 rlwinm r9,r1,0,0,(31-THREAD_SHIFT)
9994a338
PM
699 lwz r3,TI_FLAGS(r9)
700 andi. r0,r3,_TIF_NEED_RESCHED
701 bne- 1b
702#else
703resume_kernel:
704#endif /* CONFIG_PREEMPT */
705
706 /* interrupts are hard-disabled at this point */
707restore:
b98ac05d
BH
708#ifdef CONFIG_44x
709 lis r4,icache_44x_need_flush@ha
710 lwz r5,icache_44x_need_flush@l(r4)
711 cmplwi cr0,r5,0
712 beq+ 1f
713 li r6,0
714 iccci r0,r0
715 stw r6,icache_44x_need_flush@l(r4)
7161:
717#endif /* CONFIG_44x */
9994a338
PM
718 lwz r0,GPR0(r1)
719 lwz r2,GPR2(r1)
720 REST_4GPRS(3, r1)
721 REST_2GPRS(7, r1)
722
723 lwz r10,_XER(r1)
724 lwz r11,_CTR(r1)
725 mtspr SPRN_XER,r10
726 mtctr r11
727
728 PPC405_ERR77(0,r1)
b64f87c1
BB
729BEGIN_FTR_SECTION
730 lwarx r11,0,r1
731END_FTR_SECTION_IFSET(CPU_FTR_NEED_PAIRED_STWCX)
9994a338
PM
732 stwcx. r0,0,r1 /* to clear the reservation */
733
734#if !(defined(CONFIG_4xx) || defined(CONFIG_BOOKE))
735 lwz r9,_MSR(r1)
736 andi. r10,r9,MSR_RI /* check if this exception occurred */
737 beql nonrecoverable /* at a bad place (MSR:RI = 0) */
738
739 lwz r10,_CCR(r1)
740 lwz r11,_LINK(r1)
741 mtcrf 0xFF,r10
742 mtlr r11
743
744 /*
745 * Once we put values in SRR0 and SRR1, we are in a state
746 * where exceptions are not recoverable, since taking an
747 * exception will trash SRR0 and SRR1. Therefore we clear the
748 * MSR:RI bit to indicate this. If we do take an exception,
749 * we can't return to the point of the exception but we
750 * can restart the exception exit path at the label
751 * exc_exit_restart below. -- paulus
752 */
753 LOAD_MSR_KERNEL(r10,MSR_KERNEL & ~MSR_RI)
754 SYNC
755 MTMSRD(r10) /* clear the RI bit */
756 .globl exc_exit_restart
757exc_exit_restart:
758 lwz r9,_MSR(r1)
759 lwz r12,_NIP(r1)
760 FIX_SRR1(r9,r10)
761 mtspr SPRN_SRR0,r12
762 mtspr SPRN_SRR1,r9
763 REST_4GPRS(9, r1)
764 lwz r1,GPR1(r1)
765 .globl exc_exit_restart_end
766exc_exit_restart_end:
767 SYNC
768 RFI
769
770#else /* !(CONFIG_4xx || CONFIG_BOOKE) */
771 /*
772 * This is a bit different on 4xx/Book-E because it doesn't have
773 * the RI bit in the MSR.
774 * The TLB miss handler checks if we have interrupted
775 * the exception exit path and restarts it if so
776 * (well maybe one day it will... :).
777 */
778 lwz r11,_LINK(r1)
779 mtlr r11
780 lwz r10,_CCR(r1)
781 mtcrf 0xff,r10
782 REST_2GPRS(9, r1)
783 .globl exc_exit_restart
784exc_exit_restart:
785 lwz r11,_NIP(r1)
786 lwz r12,_MSR(r1)
787exc_exit_start:
788 mtspr SPRN_SRR0,r11
789 mtspr SPRN_SRR1,r12
790 REST_2GPRS(11, r1)
791 lwz r1,GPR1(r1)
792 .globl exc_exit_restart_end
793exc_exit_restart_end:
794 PPC405_ERR77_SYNC
795 rfi
796 b . /* prevent prefetch past rfi */
797
798/*
799 * Returning from a critical interrupt in user mode doesn't need
800 * to be any different from a normal exception. For a critical
801 * interrupt in the kernel, we just return (without checking for
802 * preemption) since the interrupt may have happened at some crucial
803 * place (e.g. inside the TLB miss handler), and because we will be
804 * running with r1 pointing into critical_stack, not the current
805 * process's kernel stack (and therefore current_thread_info() will
806 * give the wrong answer).
807 * We have to restore various SPRs that may have been in use at the
808 * time of the critical interrupt.
809 *
810 */
811#ifdef CONFIG_40x
812#define PPC_40x_TURN_OFF_MSR_DR \
813 /* avoid any possible TLB misses here by turning off MSR.DR, we \
814 * assume the instructions here are mapped by a pinned TLB entry */ \
815 li r10,MSR_IR; \
816 mtmsr r10; \
817 isync; \
818 tophys(r1, r1);
819#else
820#define PPC_40x_TURN_OFF_MSR_DR
821#endif
822
823#define RET_FROM_EXC_LEVEL(exc_lvl_srr0, exc_lvl_srr1, exc_lvl_rfi) \
824 REST_NVGPRS(r1); \
825 lwz r3,_MSR(r1); \
826 andi. r3,r3,MSR_PR; \
827 LOAD_MSR_KERNEL(r10,MSR_KERNEL); \
828 bne user_exc_return; \
829 lwz r0,GPR0(r1); \
830 lwz r2,GPR2(r1); \
831 REST_4GPRS(3, r1); \
832 REST_2GPRS(7, r1); \
833 lwz r10,_XER(r1); \
834 lwz r11,_CTR(r1); \
835 mtspr SPRN_XER,r10; \
836 mtctr r11; \
837 PPC405_ERR77(0,r1); \
838 stwcx. r0,0,r1; /* to clear the reservation */ \
839 lwz r11,_LINK(r1); \
840 mtlr r11; \
841 lwz r10,_CCR(r1); \
842 mtcrf 0xff,r10; \
843 PPC_40x_TURN_OFF_MSR_DR; \
844 lwz r9,_DEAR(r1); \
845 lwz r10,_ESR(r1); \
846 mtspr SPRN_DEAR,r9; \
847 mtspr SPRN_ESR,r10; \
848 lwz r11,_NIP(r1); \
849 lwz r12,_MSR(r1); \
850 mtspr exc_lvl_srr0,r11; \
851 mtspr exc_lvl_srr1,r12; \
852 lwz r9,GPR9(r1); \
853 lwz r12,GPR12(r1); \
854 lwz r10,GPR10(r1); \
855 lwz r11,GPR11(r1); \
856 lwz r1,GPR1(r1); \
857 PPC405_ERR77_SYNC; \
858 exc_lvl_rfi; \
859 b .; /* prevent prefetch past exc_lvl_rfi */
860
861 .globl ret_from_crit_exc
862ret_from_crit_exc:
863 RET_FROM_EXC_LEVEL(SPRN_CSRR0, SPRN_CSRR1, RFCI)
864
865#ifdef CONFIG_BOOKE
866 .globl ret_from_debug_exc
867ret_from_debug_exc:
868 RET_FROM_EXC_LEVEL(SPRN_DSRR0, SPRN_DSRR1, RFDI)
869
870 .globl ret_from_mcheck_exc
871ret_from_mcheck_exc:
872 RET_FROM_EXC_LEVEL(SPRN_MCSRR0, SPRN_MCSRR1, RFMCI)
873#endif /* CONFIG_BOOKE */
874
875/*
876 * Load the DBCR0 value for a task that is being ptraced,
877 * having first saved away the global DBCR0. Note that r0
878 * has the dbcr0 value to set upon entry to this.
879 */
880load_dbcr0:
881 mfmsr r10 /* first disable debug exceptions */
882 rlwinm r10,r10,0,~MSR_DE
883 mtmsr r10
884 isync
885 mfspr r10,SPRN_DBCR0
886 lis r11,global_dbcr0@ha
887 addi r11,r11,global_dbcr0@l
4eaddb4d
KG
888#ifdef CONFIG_SMP
889 rlwinm r9,r1,0,0,(31-THREAD_SHIFT)
890 lwz r9,TI_CPU(r9)
891 slwi r9,r9,3
892 add r11,r11,r9
893#endif
9994a338
PM
894 stw r10,0(r11)
895 mtspr SPRN_DBCR0,r0
896 lwz r10,4(r11)
897 addi r10,r10,1
898 stw r10,4(r11)
899 li r11,-1
900 mtspr SPRN_DBSR,r11 /* clear all pending debug events */
901 blr
902
991eb43a
KG
903 .section .bss
904 .align 4
905global_dbcr0:
4eaddb4d 906 .space 8*NR_CPUS
991eb43a 907 .previous
9994a338
PM
908#endif /* !(CONFIG_4xx || CONFIG_BOOKE) */
909
910do_work: /* r10 contains MSR_KERNEL here */
911 andi. r0,r9,_TIF_NEED_RESCHED
912 beq do_user_signal
913
914do_resched: /* r10 contains MSR_KERNEL here */
915 ori r10,r10,MSR_EE
916 SYNC
917 MTMSRD(r10) /* hard-enable interrupts */
918 bl schedule
919recheck:
920 LOAD_MSR_KERNEL(r10,MSR_KERNEL)
921 SYNC
922 MTMSRD(r10) /* disable interrupts */
6cb7bfeb 923 rlwinm r9,r1,0,0,(31-THREAD_SHIFT)
9994a338
PM
924 lwz r9,TI_FLAGS(r9)
925 andi. r0,r9,_TIF_NEED_RESCHED
926 bne- do_resched
f27201da 927 andi. r0,r9,_TIF_SIGPENDING|_TIF_RESTORE_SIGMASK
9994a338
PM
928 beq restore_user
929do_user_signal: /* r10 contains MSR_KERNEL here */
930 ori r10,r10,MSR_EE
931 SYNC
932 MTMSRD(r10) /* hard-enable interrupts */
933 /* save r13-r31 in the exception frame, if not already done */
d73e0c99 934 lwz r3,_TRAP(r1)
9994a338
PM
935 andi. r0,r3,1
936 beq 2f
937 SAVE_NVGPRS(r1)
938 rlwinm r3,r3,0,0,30
d73e0c99 939 stw r3,_TRAP(r1)
9994a338
PM
9402: li r3,0
941 addi r4,r1,STACK_FRAME_OVERHEAD
942 bl do_signal
943 REST_NVGPRS(r1)
944 b recheck
945
946/*
947 * We come here when we are at the end of handling an exception
948 * that occurred at a place where taking an exception will lose
949 * state information, such as the contents of SRR0 and SRR1.
950 */
951nonrecoverable:
952 lis r10,exc_exit_restart_end@ha
953 addi r10,r10,exc_exit_restart_end@l
954 cmplw r12,r10
955 bge 3f
956 lis r11,exc_exit_restart@ha
957 addi r11,r11,exc_exit_restart@l
958 cmplw r12,r11
959 blt 3f
960 lis r10,ee_restarts@ha
961 lwz r12,ee_restarts@l(r10)
962 addi r12,r12,1
963 stw r12,ee_restarts@l(r10)
964 mr r12,r11 /* restart at exc_exit_restart */
965 blr
9663: /* OK, we can't recover, kill this process */
967 /* but the 601 doesn't implement the RI bit, so assume it's OK */
968BEGIN_FTR_SECTION
969 blr
970END_FTR_SECTION_IFSET(CPU_FTR_601)
d73e0c99 971 lwz r3,_TRAP(r1)
9994a338
PM
972 andi. r0,r3,1
973 beq 4f
974 SAVE_NVGPRS(r1)
975 rlwinm r3,r3,0,0,30
d73e0c99 976 stw r3,_TRAP(r1)
9994a338
PM
9774: addi r3,r1,STACK_FRAME_OVERHEAD
978 bl nonrecoverable_exception
979 /* shouldn't return */
980 b 4b
981
991eb43a
KG
982 .section .bss
983 .align 2
984ee_restarts:
985 .space 4
986 .previous
9994a338
PM
987
988/*
989 * PROM code for specific machines follows. Put it
990 * here so it's easy to add arch-specific sections later.
991 * -- Cort
992 */
033ef338 993#ifdef CONFIG_PPC_RTAS
9994a338
PM
994/*
995 * On CHRP, the Run-Time Abstraction Services (RTAS) have to be
996 * called with the MMU off.
997 */
998_GLOBAL(enter_rtas)
999 stwu r1,-INT_FRAME_SIZE(r1)
1000 mflr r0
1001 stw r0,INT_FRAME_SIZE+4(r1)
e58c3495 1002 LOAD_REG_ADDR(r4, rtas)
9994a338
PM
1003 lis r6,1f@ha /* physical return address for rtas */
1004 addi r6,r6,1f@l
1005 tophys(r6,r6)
1006 tophys(r7,r1)
033ef338
PM
1007 lwz r8,RTASENTRY(r4)
1008 lwz r4,RTASBASE(r4)
9994a338
PM
1009 mfmsr r9
1010 stw r9,8(r1)
1011 LOAD_MSR_KERNEL(r0,MSR_KERNEL)
1012 SYNC /* disable interrupts so SRR0/1 */
1013 MTMSRD(r0) /* don't get trashed */
1014 li r9,MSR_KERNEL & ~(MSR_IR|MSR_DR)
1015 mtlr r6
9994a338
PM
1016 mtspr SPRN_SPRG2,r7
1017 mtspr SPRN_SRR0,r8
1018 mtspr SPRN_SRR1,r9
1019 RFI
10201: tophys(r9,r1)
1021 lwz r8,INT_FRAME_SIZE+4(r9) /* get return address */
1022 lwz r9,8(r9) /* original msr value */
1023 FIX_SRR1(r9,r0)
1024 addi r1,r1,INT_FRAME_SIZE
1025 li r0,0
1026 mtspr SPRN_SPRG2,r0
1027 mtspr SPRN_SRR0,r8
1028 mtspr SPRN_SRR1,r9
1029 RFI /* return to caller */
1030
1031 .globl machine_check_in_rtas
1032machine_check_in_rtas:
1033 twi 31,0,0
1034 /* XXX load up BATs and panic */
1035
033ef338 1036#endif /* CONFIG_PPC_RTAS */