]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blame - arch/powerpc/kernel/entry_32.S
powerpc/perf: Change type of the bhrb_users variable
[mirror_ubuntu-bionic-kernel.git] / arch / powerpc / kernel / entry_32.S
CommitLineData
9994a338
PM
1/*
2 * PowerPC version
3 * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
4 * Rewritten by Cort Dougan (cort@fsmlabs.com) for PReP
5 * Copyright (C) 1996 Cort Dougan <cort@fsmlabs.com>
6 * Adapted for Power Macintosh by Paul Mackerras.
7 * Low-level exception handlers and MMU support
8 * rewritten by Paul Mackerras.
9 * Copyright (C) 1996 Paul Mackerras.
10 * MPC8xx modifications Copyright (C) 1997 Dan Malek (dmalek@jlc.net).
11 *
12 * This file contains the system call entry code, context switch
13 * code, and exception/interrupt return code for PowerPC.
14 *
15 * This program is free software; you can redistribute it and/or
16 * modify it under the terms of the GNU General Public License
17 * as published by the Free Software Foundation; either version
18 * 2 of the License, or (at your option) any later version.
19 *
20 */
21
9994a338
PM
22#include <linux/errno.h>
23#include <linux/sys.h>
24#include <linux/threads.h>
25#include <asm/reg.h>
26#include <asm/page.h>
27#include <asm/mmu.h>
28#include <asm/cputable.h>
29#include <asm/thread_info.h>
30#include <asm/ppc_asm.h>
31#include <asm/asm-offsets.h>
32#include <asm/unistd.h>
395a59d0 33#include <asm/ftrace.h>
46f52210 34#include <asm/ptrace.h>
9994a338 35
9994a338
PM
36/*
37 * MSR_KERNEL is > 0x10000 on 4xx/Book-E since it include MSR_CE.
38 */
39#if MSR_KERNEL >= 0x10000
40#define LOAD_MSR_KERNEL(r, x) lis r,(x)@h; ori r,r,(x)@l
41#else
42#define LOAD_MSR_KERNEL(r, x) li r,(x)
43#endif
44
45#ifdef CONFIG_BOOKE
9994a338
PM
46 .globl mcheck_transfer_to_handler
47mcheck_transfer_to_handler:
fca622c5
KG
48 mfspr r0,SPRN_DSRR0
49 stw r0,_DSRR0(r11)
50 mfspr r0,SPRN_DSRR1
51 stw r0,_DSRR1(r11)
52 /* fall through */
9994a338
PM
53
54 .globl debug_transfer_to_handler
55debug_transfer_to_handler:
fca622c5
KG
56 mfspr r0,SPRN_CSRR0
57 stw r0,_CSRR0(r11)
58 mfspr r0,SPRN_CSRR1
59 stw r0,_CSRR1(r11)
60 /* fall through */
9994a338
PM
61
62 .globl crit_transfer_to_handler
63crit_transfer_to_handler:
70fe3af8 64#ifdef CONFIG_PPC_BOOK3E_MMU
fca622c5
KG
65 mfspr r0,SPRN_MAS0
66 stw r0,MAS0(r11)
67 mfspr r0,SPRN_MAS1
68 stw r0,MAS1(r11)
69 mfspr r0,SPRN_MAS2
70 stw r0,MAS2(r11)
71 mfspr r0,SPRN_MAS3
72 stw r0,MAS3(r11)
73 mfspr r0,SPRN_MAS6
74 stw r0,MAS6(r11)
75#ifdef CONFIG_PHYS_64BIT
76 mfspr r0,SPRN_MAS7
77 stw r0,MAS7(r11)
78#endif /* CONFIG_PHYS_64BIT */
70fe3af8 79#endif /* CONFIG_PPC_BOOK3E_MMU */
fca622c5
KG
80#ifdef CONFIG_44x
81 mfspr r0,SPRN_MMUCR
82 stw r0,MMUCR(r11)
83#endif
84 mfspr r0,SPRN_SRR0
85 stw r0,_SRR0(r11)
86 mfspr r0,SPRN_SRR1
87 stw r0,_SRR1(r11)
88
1f8b0bc8
SY
89 /* set the stack limit to the current stack
90 * and set the limit to protect the thread_info
91 * struct
92 */
ee43eb78 93 mfspr r8,SPRN_SPRG_THREAD
fca622c5
KG
94 lwz r0,KSP_LIMIT(r8)
95 stw r0,SAVED_KSP_LIMIT(r11)
1f8b0bc8 96 rlwimi r0,r1,0,0,(31-THREAD_SHIFT)
fca622c5 97 stw r0,KSP_LIMIT(r8)
9994a338
PM
98 /* fall through */
99#endif
100
101#ifdef CONFIG_40x
102 .globl crit_transfer_to_handler
103crit_transfer_to_handler:
104 lwz r0,crit_r10@l(0)
105 stw r0,GPR10(r11)
106 lwz r0,crit_r11@l(0)
107 stw r0,GPR11(r11)
fca622c5
KG
108 mfspr r0,SPRN_SRR0
109 stw r0,crit_srr0@l(0)
110 mfspr r0,SPRN_SRR1
111 stw r0,crit_srr1@l(0)
112
1f8b0bc8
SY
113 /* set the stack limit to the current stack
114 * and set the limit to protect the thread_info
115 * struct
116 */
ee43eb78 117 mfspr r8,SPRN_SPRG_THREAD
fca622c5
KG
118 lwz r0,KSP_LIMIT(r8)
119 stw r0,saved_ksp_limit@l(0)
1f8b0bc8 120 rlwimi r0,r1,0,0,(31-THREAD_SHIFT)
fca622c5 121 stw r0,KSP_LIMIT(r8)
9994a338
PM
122 /* fall through */
123#endif
124
125/*
126 * This code finishes saving the registers to the exception frame
127 * and jumps to the appropriate handler for the exception, turning
128 * on address translation.
129 * Note that we rely on the caller having set cr0.eq iff the exception
130 * occurred in kernel mode (i.e. MSR:PR = 0).
131 */
132 .globl transfer_to_handler_full
133transfer_to_handler_full:
134 SAVE_NVGPRS(r11)
135 /* fall through */
136
137 .globl transfer_to_handler
138transfer_to_handler:
139 stw r2,GPR2(r11)
140 stw r12,_NIP(r11)
141 stw r9,_MSR(r11)
142 andi. r2,r9,MSR_PR
143 mfctr r12
144 mfspr r2,SPRN_XER
145 stw r12,_CTR(r11)
146 stw r2,_XER(r11)
ee43eb78 147 mfspr r12,SPRN_SPRG_THREAD
9994a338
PM
148 addi r2,r12,-THREAD
149 tovirt(r2,r2) /* set r2 to current */
150 beq 2f /* if from user, fix up THREAD.regs */
151 addi r11,r1,STACK_FRAME_OVERHEAD
152 stw r11,PT_REGS(r12)
153#if defined(CONFIG_40x) || defined(CONFIG_BOOKE)
154 /* Check to see if the dbcr0 register is set up to debug. Use the
4eaddb4d 155 internal debug mode bit to do this. */
9994a338 156 lwz r12,THREAD_DBCR0(r12)
2325f0a0 157 andis. r12,r12,DBCR0_IDM@h
9994a338
PM
158 beq+ 3f
159 /* From user and task is ptraced - load up global dbcr0 */
160 li r12,-1 /* clear all pending debug events */
161 mtspr SPRN_DBSR,r12
162 lis r11,global_dbcr0@ha
163 tophys(r11,r11)
164 addi r11,r11,global_dbcr0@l
4eaddb4d 165#ifdef CONFIG_SMP
9778b696 166 CURRENT_THREAD_INFO(r9, r1)
4eaddb4d
KG
167 lwz r9,TI_CPU(r9)
168 slwi r9,r9,3
169 add r11,r11,r9
170#endif
9994a338
PM
171 lwz r12,0(r11)
172 mtspr SPRN_DBCR0,r12
173 lwz r12,4(r11)
174 addi r12,r12,-1
175 stw r12,4(r11)
176#endif
177 b 3f
f39224a8 178
9994a338
PM
1792: /* if from kernel, check interrupted DOZE/NAP mode and
180 * check for stack overflow
181 */
85218827
KG
182 lwz r9,KSP_LIMIT(r12)
183 cmplw r1,r9 /* if r1 <= ksp_limit */
f39224a8
PM
184 ble- stack_ovf /* then the kernel stack overflowed */
1855:
fc4033b2 186#if defined(CONFIG_6xx) || defined(CONFIG_E500)
9778b696 187 CURRENT_THREAD_INFO(r9, r1)
f39224a8
PM
188 tophys(r9,r9) /* check local flags */
189 lwz r12,TI_LOCAL_FLAGS(r9)
190 mtcrf 0x01,r12
191 bt- 31-TLF_NAPPING,4f
a560643e 192 bt- 31-TLF_SLEEPING,7f
fc4033b2 193#endif /* CONFIG_6xx || CONFIG_E500 */
9994a338
PM
194 .globl transfer_to_handler_cont
195transfer_to_handler_cont:
9994a338
PM
1963:
197 mflr r9
198 lwz r11,0(r9) /* virtual address of handler */
199 lwz r9,4(r9) /* where to go when done */
5d38902c
BH
200#ifdef CONFIG_TRACE_IRQFLAGS
201 lis r12,reenable_mmu@h
202 ori r12,r12,reenable_mmu@l
203 mtspr SPRN_SRR0,r12
204 mtspr SPRN_SRR1,r10
205 SYNC
206 RFI
207reenable_mmu: /* re-enable mmu so we can */
208 mfmsr r10
209 lwz r12,_MSR(r1)
210 xor r10,r10,r12
211 andi. r10,r10,MSR_EE /* Did EE change? */
212 beq 1f
213
2cd76629
KH
214 /*
215 * The trace_hardirqs_off will use CALLER_ADDR0 and CALLER_ADDR1.
216 * If from user mode there is only one stack frame on the stack, and
217 * accessing CALLER_ADDR1 will cause oops. So we need create a dummy
218 * stack frame to make trace_hardirqs_off happy.
08f1ec8a
BH
219 *
220 * This is handy because we also need to save a bunch of GPRs,
221 * r3 can be different from GPR3(r1) at this point, r9 and r11
222 * contains the old MSR and handler address respectively,
223 * r4 & r5 can contain page fault arguments that need to be passed
224 * along as well. r12, CCR, CTR, XER etc... are left clobbered as
225 * they aren't useful past this point (aren't syscall arguments),
226 * the rest is restored from the exception frame.
2cd76629 227 */
08f1ec8a
BH
228 stwu r1,-32(r1)
229 stw r9,8(r1)
230 stw r11,12(r1)
231 stw r3,16(r1)
232 stw r4,20(r1)
233 stw r5,24(r1)
2cd76629 234 bl trace_hardirqs_off
08f1ec8a
BH
235 lwz r5,24(r1)
236 lwz r4,20(r1)
237 lwz r3,16(r1)
238 lwz r11,12(r1)
239 lwz r9,8(r1)
240 addi r1,r1,32
5d38902c 241 lwz r0,GPR0(r1)
5d38902c
BH
242 lwz r6,GPR6(r1)
243 lwz r7,GPR7(r1)
244 lwz r8,GPR8(r1)
5d38902c
BH
2451: mtctr r11
246 mtlr r9
247 bctr /* jump to handler */
248#else /* CONFIG_TRACE_IRQFLAGS */
9994a338
PM
249 mtspr SPRN_SRR0,r11
250 mtspr SPRN_SRR1,r10
251 mtlr r9
252 SYNC
253 RFI /* jump to handler, enable MMU */
5d38902c 254#endif /* CONFIG_TRACE_IRQFLAGS */
9994a338 255
fc4033b2 256#if defined (CONFIG_6xx) || defined(CONFIG_E500)
f39224a8
PM
2574: rlwinm r12,r12,0,~_TLF_NAPPING
258 stw r12,TI_LOCAL_FLAGS(r9)
fc4033b2 259 b power_save_ppc32_restore
a560643e
PM
260
2617: rlwinm r12,r12,0,~_TLF_SLEEPING
262 stw r12,TI_LOCAL_FLAGS(r9)
263 lwz r9,_MSR(r11) /* if sleeping, clear MSR.EE */
264 rlwinm r9,r9,0,~MSR_EE
265 lwz r12,_LINK(r11) /* and return to address in LR */
266 b fast_exception_return
a0652fc9
PM
267#endif
268
9994a338
PM
269/*
270 * On kernel stack overflow, load up an initial stack pointer
271 * and call StackOverflow(regs), which should not return.
272 */
273stack_ovf:
274 /* sometimes we use a statically-allocated stack, which is OK. */
f39224a8
PM
275 lis r12,_end@h
276 ori r12,r12,_end@l
277 cmplw r1,r12
278 ble 5b /* r1 <= &_end is OK */
9994a338
PM
279 SAVE_NVGPRS(r11)
280 addi r3,r1,STACK_FRAME_OVERHEAD
281 lis r1,init_thread_union@ha
282 addi r1,r1,init_thread_union@l
283 addi r1,r1,THREAD_SIZE-STACK_FRAME_OVERHEAD
284 lis r9,StackOverflow@ha
285 addi r9,r9,StackOverflow@l
286 LOAD_MSR_KERNEL(r10,MSR_KERNEL)
287 FIX_SRR1(r10,r12)
288 mtspr SPRN_SRR0,r9
289 mtspr SPRN_SRR1,r10
290 SYNC
291 RFI
292
293/*
294 * Handle a system call.
295 */
296 .stabs "arch/powerpc/kernel/",N_SO,0,0,0f
297 .stabs "entry_32.S",N_SO,0,0,0f
2980:
299
300_GLOBAL(DoSyscall)
9994a338
PM
301 stw r3,ORIG_GPR3(r1)
302 li r12,0
303 stw r12,RESULT(r1)
304 lwz r11,_CCR(r1) /* Clear SO bit in CR */
305 rlwinm r11,r11,0,4,2
306 stw r11,_CCR(r1)
5d38902c
BH
307#ifdef CONFIG_TRACE_IRQFLAGS
308 /* Return from syscalls can (and generally will) hard enable
309 * interrupts. You aren't supposed to call a syscall with
310 * interrupts disabled in the first place. However, to ensure
311 * that we get it right vs. lockdep if it happens, we force
312 * that hard enable here with appropriate tracing if we see
313 * that we have been called with interrupts off
314 */
315 mfmsr r11
316 andi. r12,r11,MSR_EE
317 bne+ 1f
318 /* We came in with interrupts disabled, we enable them now */
319 bl trace_hardirqs_on
320 mfmsr r11
321 lwz r0,GPR0(r1)
322 lwz r3,GPR3(r1)
323 lwz r4,GPR4(r1)
324 ori r11,r11,MSR_EE
325 lwz r5,GPR5(r1)
326 lwz r6,GPR6(r1)
327 lwz r7,GPR7(r1)
328 lwz r8,GPR8(r1)
329 mtmsr r11
3301:
331#endif /* CONFIG_TRACE_IRQFLAGS */
9778b696 332 CURRENT_THREAD_INFO(r10, r1)
9994a338 333 lwz r11,TI_FLAGS(r10)
10ea8343 334 andi. r11,r11,_TIF_SYSCALL_DOTRACE
9994a338
PM
335 bne- syscall_dotrace
336syscall_dotrace_cont:
337 cmplwi 0,r0,NR_syscalls
338 lis r10,sys_call_table@h
339 ori r10,r10,sys_call_table@l
340 slwi r0,r0,2
341 bge- 66f
342 lwzx r10,r10,r0 /* Fetch system call handler [ptr] */
343 mtlr r10
344 addi r9,r1,STACK_FRAME_OVERHEAD
345 PPC440EP_ERR42
346 blrl /* Call handler */
347 .globl ret_from_syscall
348ret_from_syscall:
9994a338 349 mr r6,r3
9778b696 350 CURRENT_THREAD_INFO(r12, r1)
9994a338 351 /* disable interrupts so current_thread_info()->flags can't change */
401d1f02 352 LOAD_MSR_KERNEL(r10,MSR_KERNEL) /* doesn't include MSR_EE */
5d38902c 353 /* Note: We don't bother telling lockdep about it */
9994a338
PM
354 SYNC
355 MTMSRD(r10)
356 lwz r9,TI_FLAGS(r12)
401d1f02 357 li r8,-_LAST_ERRNO
10ea8343 358 andi. r0,r9,(_TIF_SYSCALL_DOTRACE|_TIF_SINGLESTEP|_TIF_USER_WORK_MASK|_TIF_PERSYSCALL_MASK)
9994a338 359 bne- syscall_exit_work
401d1f02
DW
360 cmplw 0,r3,r8
361 blt+ syscall_exit_cont
362 lwz r11,_CCR(r1) /* Load CR */
363 neg r3,r3
364 oris r11,r11,0x1000 /* Set SO bit in CR */
365 stw r11,_CCR(r1)
9994a338 366syscall_exit_cont:
5d38902c
BH
367 lwz r8,_MSR(r1)
368#ifdef CONFIG_TRACE_IRQFLAGS
369 /* If we are going to return from the syscall with interrupts
370 * off, we trace that here. It shouldn't happen though but we
371 * want to catch the bugger if it does right ?
372 */
373 andi. r10,r8,MSR_EE
374 bne+ 1f
375 stw r3,GPR3(r1)
376 bl trace_hardirqs_off
377 lwz r3,GPR3(r1)
3781:
379#endif /* CONFIG_TRACE_IRQFLAGS */
9994a338 380#if defined(CONFIG_4xx) || defined(CONFIG_BOOKE)
4eaddb4d
KG
381 /* If the process has its own DBCR0 value, load it up. The internal
382 debug mode bit tells us that dbcr0 should be loaded. */
9994a338 383 lwz r0,THREAD+THREAD_DBCR0(r2)
2325f0a0 384 andis. r10,r0,DBCR0_IDM@h
9994a338
PM
385 bnel- load_dbcr0
386#endif
b98ac05d 387#ifdef CONFIG_44x
e7f75ad0 388BEGIN_MMU_FTR_SECTION
b98ac05d
BH
389 lis r4,icache_44x_need_flush@ha
390 lwz r5,icache_44x_need_flush@l(r4)
391 cmplwi cr0,r5,0
392 bne- 2f
3931:
e7f75ad0 394END_MMU_FTR_SECTION_IFCLR(MMU_FTR_TYPE_47x)
b98ac05d 395#endif /* CONFIG_44x */
b64f87c1
BB
396BEGIN_FTR_SECTION
397 lwarx r7,0,r1
398END_FTR_SECTION_IFSET(CPU_FTR_NEED_PAIRED_STWCX)
9994a338
PM
399 stwcx. r0,0,r1 /* to clear the reservation */
400 lwz r4,_LINK(r1)
401 lwz r5,_CCR(r1)
402 mtlr r4
403 mtcr r5
404 lwz r7,_NIP(r1)
9994a338
PM
405 FIX_SRR1(r8, r0)
406 lwz r2,GPR2(r1)
407 lwz r1,GPR1(r1)
408 mtspr SPRN_SRR0,r7
409 mtspr SPRN_SRR1,r8
410 SYNC
411 RFI
b98ac05d
BH
412#ifdef CONFIG_44x
4132: li r7,0
414 iccci r0,r0
415 stw r7,icache_44x_need_flush@l(r4)
416 b 1b
417#endif /* CONFIG_44x */
9994a338
PM
418
41966: li r3,-ENOSYS
420 b ret_from_syscall
421
422 .globl ret_from_fork
423ret_from_fork:
424 REST_NVGPRS(r1)
425 bl schedule_tail
426 li r3,0
427 b ret_from_syscall
428
58254e10
AV
429 .globl ret_from_kernel_thread
430ret_from_kernel_thread:
431 REST_NVGPRS(r1)
432 bl schedule_tail
433 mtlr r14
434 mr r3,r15
435 PPC440EP_ERR42
436 blrl
437 li r3,0
be6abfa7 438 b ret_from_syscall
9994a338
PM
439
440/* Traced system call support */
441syscall_dotrace:
442 SAVE_NVGPRS(r1)
443 li r0,0xc00
d73e0c99 444 stw r0,_TRAP(r1)
9994a338
PM
445 addi r3,r1,STACK_FRAME_OVERHEAD
446 bl do_syscall_trace_enter
4f72c427
RM
447 /*
448 * Restore argument registers possibly just changed.
449 * We use the return value of do_syscall_trace_enter
450 * for call number to look up in the table (r0).
451 */
452 mr r0,r3
9994a338
PM
453 lwz r3,GPR3(r1)
454 lwz r4,GPR4(r1)
455 lwz r5,GPR5(r1)
456 lwz r6,GPR6(r1)
457 lwz r7,GPR7(r1)
458 lwz r8,GPR8(r1)
459 REST_NVGPRS(r1)
460 b syscall_dotrace_cont
461
462syscall_exit_work:
401d1f02 463 andi. r0,r9,_TIF_RESTOREALL
1bd79336
PM
464 beq+ 0f
465 REST_NVGPRS(r1)
466 b 2f
4670: cmplw 0,r3,r8
401d1f02
DW
468 blt+ 1f
469 andi. r0,r9,_TIF_NOERROR
470 bne- 1f
471 lwz r11,_CCR(r1) /* Load CR */
472 neg r3,r3
473 oris r11,r11,0x1000 /* Set SO bit in CR */
474 stw r11,_CCR(r1)
475
4761: stw r6,RESULT(r1) /* Save result */
9994a338 477 stw r3,GPR3(r1) /* Update return value */
401d1f02
DW
4782: andi. r0,r9,(_TIF_PERSYSCALL_MASK)
479 beq 4f
480
1bd79336 481 /* Clear per-syscall TIF flags if any are set. */
401d1f02
DW
482
483 li r11,_TIF_PERSYSCALL_MASK
484 addi r12,r12,TI_FLAGS
4853: lwarx r8,0,r12
486 andc r8,r8,r11
487#ifdef CONFIG_IBM405_ERR77
488 dcbt 0,r12
489#endif
490 stwcx. r8,0,r12
491 bne- 3b
492 subi r12,r12,TI_FLAGS
493
4944: /* Anything which requires enabling interrupts? */
10ea8343 495 andi. r0,r9,(_TIF_SYSCALL_DOTRACE|_TIF_SINGLESTEP)
1bd79336
PM
496 beq ret_from_except
497
5d38902c
BH
498 /* Re-enable interrupts. There is no need to trace that with
499 * lockdep as we are supposed to have IRQs on at this point
500 */
1bd79336
PM
501 ori r10,r10,MSR_EE
502 SYNC
503 MTMSRD(r10)
401d1f02
DW
504
505 /* Save NVGPRS if they're not saved already */
d73e0c99 506 lwz r4,_TRAP(r1)
9994a338 507 andi. r4,r4,1
401d1f02 508 beq 5f
9994a338
PM
509 SAVE_NVGPRS(r1)
510 li r4,0xc00
d73e0c99 511 stw r4,_TRAP(r1)
1bd79336 5125:
9994a338
PM
513 addi r3,r1,STACK_FRAME_OVERHEAD
514 bl do_syscall_trace_leave
1bd79336 515 b ret_from_except_full
9994a338 516
9994a338 517/*
401d1f02
DW
518 * The fork/clone functions need to copy the full register set into
519 * the child process. Therefore we need to save all the nonvolatile
520 * registers (r13 - r31) before calling the C code.
9994a338 521 */
9994a338
PM
522 .globl ppc_fork
523ppc_fork:
524 SAVE_NVGPRS(r1)
d73e0c99 525 lwz r0,_TRAP(r1)
9994a338 526 rlwinm r0,r0,0,0,30 /* clear LSB to indicate full */
d73e0c99 527 stw r0,_TRAP(r1) /* register set saved */
9994a338
PM
528 b sys_fork
529
530 .globl ppc_vfork
531ppc_vfork:
532 SAVE_NVGPRS(r1)
d73e0c99 533 lwz r0,_TRAP(r1)
9994a338 534 rlwinm r0,r0,0,0,30 /* clear LSB to indicate full */
d73e0c99 535 stw r0,_TRAP(r1) /* register set saved */
9994a338
PM
536 b sys_vfork
537
538 .globl ppc_clone
539ppc_clone:
540 SAVE_NVGPRS(r1)
d73e0c99 541 lwz r0,_TRAP(r1)
9994a338 542 rlwinm r0,r0,0,0,30 /* clear LSB to indicate full */
d73e0c99 543 stw r0,_TRAP(r1) /* register set saved */
9994a338
PM
544 b sys_clone
545
1bd79336
PM
546 .globl ppc_swapcontext
547ppc_swapcontext:
548 SAVE_NVGPRS(r1)
549 lwz r0,_TRAP(r1)
550 rlwinm r0,r0,0,0,30 /* clear LSB to indicate full */
551 stw r0,_TRAP(r1) /* register set saved */
552 b sys_swapcontext
553
9994a338
PM
554/*
555 * Top-level page fault handling.
556 * This is in assembler because if do_page_fault tells us that
557 * it is a bad kernel page fault, we want to save the non-volatile
558 * registers before calling bad_page_fault.
559 */
560 .globl handle_page_fault
561handle_page_fault:
562 stw r4,_DAR(r1)
563 addi r3,r1,STACK_FRAME_OVERHEAD
564 bl do_page_fault
565 cmpwi r3,0
566 beq+ ret_from_except
567 SAVE_NVGPRS(r1)
d73e0c99 568 lwz r0,_TRAP(r1)
9994a338 569 clrrwi r0,r0,1
d73e0c99 570 stw r0,_TRAP(r1)
9994a338
PM
571 mr r5,r3
572 addi r3,r1,STACK_FRAME_OVERHEAD
573 lwz r4,_DAR(r1)
574 bl bad_page_fault
575 b ret_from_except_full
576
577/*
578 * This routine switches between two different tasks. The process
579 * state of one is saved on its kernel stack. Then the state
580 * of the other is restored from its kernel stack. The memory
581 * management hardware is updated to the second process's state.
582 * Finally, we can return to the second process.
583 * On entry, r3 points to the THREAD for the current task, r4
584 * points to the THREAD for the new task.
585 *
586 * This routine is always called with interrupts disabled.
587 *
588 * Note: there are two ways to get to the "going out" portion
589 * of this code; either by coming in via the entry (_switch)
590 * or via "fork" which must set up an environment equivalent
591 * to the "_switch" path. If you change this , you'll have to
592 * change the fork code also.
593 *
594 * The code which creates the new task context is in 'copy_thread'
595 * in arch/ppc/kernel/process.c
596 */
597_GLOBAL(_switch)
598 stwu r1,-INT_FRAME_SIZE(r1)
599 mflr r0
600 stw r0,INT_FRAME_SIZE+4(r1)
601 /* r3-r12 are caller saved -- Cort */
602 SAVE_NVGPRS(r1)
603 stw r0,_NIP(r1) /* Return to switch caller */
604 mfmsr r11
605 li r0,MSR_FP /* Disable floating-point */
606#ifdef CONFIG_ALTIVEC
607BEGIN_FTR_SECTION
608 oris r0,r0,MSR_VEC@h /* Disable altivec */
609 mfspr r12,SPRN_VRSAVE /* save vrsave register value */
610 stw r12,THREAD+THREAD_VRSAVE(r2)
611END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
612#endif /* CONFIG_ALTIVEC */
613#ifdef CONFIG_SPE
5e14d21e 614BEGIN_FTR_SECTION
9994a338
PM
615 oris r0,r0,MSR_SPE@h /* Disable SPE */
616 mfspr r12,SPRN_SPEFSCR /* save spefscr register value */
617 stw r12,THREAD+THREAD_SPEFSCR(r2)
5e14d21e 618END_FTR_SECTION_IFSET(CPU_FTR_SPE)
9994a338
PM
619#endif /* CONFIG_SPE */
620 and. r0,r0,r11 /* FP or altivec or SPE enabled? */
621 beq+ 1f
622 andc r11,r11,r0
623 MTMSRD(r11)
624 isync
6251: stw r11,_MSR(r1)
626 mfcr r10
627 stw r10,_CCR(r1)
628 stw r1,KSP(r3) /* Set old stack pointer */
629
630#ifdef CONFIG_SMP
631 /* We need a sync somewhere here to make sure that if the
632 * previous task gets rescheduled on another CPU, it sees all
633 * stores it has performed on this one.
634 */
635 sync
636#endif /* CONFIG_SMP */
637
638 tophys(r0,r4)
639 CLR_TOP32(r0)
ee43eb78 640 mtspr SPRN_SPRG_THREAD,r0 /* Update current THREAD phys addr */
9994a338
PM
641 lwz r1,KSP(r4) /* Load new stack pointer */
642
643 /* save the old current 'last' for return value */
644 mr r3,r2
645 addi r2,r4,-THREAD /* Update current */
646
647#ifdef CONFIG_ALTIVEC
648BEGIN_FTR_SECTION
649 lwz r0,THREAD+THREAD_VRSAVE(r2)
650 mtspr SPRN_VRSAVE,r0 /* if G4, restore VRSAVE reg */
651END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
652#endif /* CONFIG_ALTIVEC */
653#ifdef CONFIG_SPE
5e14d21e 654BEGIN_FTR_SECTION
9994a338
PM
655 lwz r0,THREAD+THREAD_SPEFSCR(r2)
656 mtspr SPRN_SPEFSCR,r0 /* restore SPEFSCR reg */
5e14d21e 657END_FTR_SECTION_IFSET(CPU_FTR_SPE)
9994a338
PM
658#endif /* CONFIG_SPE */
659
660 lwz r0,_CCR(r1)
661 mtcrf 0xFF,r0
662 /* r3-r12 are destroyed -- Cort */
663 REST_NVGPRS(r1)
664
665 lwz r4,_NIP(r1) /* Return to _switch caller in new task */
666 mtlr r4
667 addi r1,r1,INT_FRAME_SIZE
668 blr
669
670 .globl fast_exception_return
671fast_exception_return:
672#if !(defined(CONFIG_4xx) || defined(CONFIG_BOOKE))
673 andi. r10,r9,MSR_RI /* check for recoverable interrupt */
674 beq 1f /* if not, we've got problems */
675#endif
676
6772: REST_4GPRS(3, r11)
678 lwz r10,_CCR(r11)
679 REST_GPR(1, r11)
680 mtcr r10
681 lwz r10,_LINK(r11)
682 mtlr r10
683 REST_GPR(10, r11)
684 mtspr SPRN_SRR1,r9
685 mtspr SPRN_SRR0,r12
686 REST_GPR(9, r11)
687 REST_GPR(12, r11)
688 lwz r11,GPR11(r11)
689 SYNC
690 RFI
691
692#if !(defined(CONFIG_4xx) || defined(CONFIG_BOOKE))
693/* check if the exception happened in a restartable section */
6941: lis r3,exc_exit_restart_end@ha
695 addi r3,r3,exc_exit_restart_end@l
696 cmplw r12,r3
697 bge 3f
698 lis r4,exc_exit_restart@ha
699 addi r4,r4,exc_exit_restart@l
700 cmplw r12,r4
701 blt 3f
702 lis r3,fee_restarts@ha
703 tophys(r3,r3)
704 lwz r5,fee_restarts@l(r3)
705 addi r5,r5,1
706 stw r5,fee_restarts@l(r3)
707 mr r12,r4 /* restart at exc_exit_restart */
708 b 2b
709
991eb43a
KG
710 .section .bss
711 .align 2
712fee_restarts:
713 .space 4
714 .previous
9994a338
PM
715
716/* aargh, a nonrecoverable interrupt, panic */
717/* aargh, we don't know which trap this is */
718/* but the 601 doesn't implement the RI bit, so assume it's OK */
7193:
720BEGIN_FTR_SECTION
721 b 2b
722END_FTR_SECTION_IFSET(CPU_FTR_601)
723 li r10,-1
d73e0c99 724 stw r10,_TRAP(r11)
9994a338
PM
725 addi r3,r1,STACK_FRAME_OVERHEAD
726 lis r10,MSR_KERNEL@h
727 ori r10,r10,MSR_KERNEL@l
728 bl transfer_to_handler_full
729 .long nonrecoverable_exception
730 .long ret_from_except
731#endif
732
9994a338
PM
733 .globl ret_from_except_full
734ret_from_except_full:
735 REST_NVGPRS(r1)
736 /* fall through */
737
738 .globl ret_from_except
739ret_from_except:
740 /* Hard-disable interrupts so that current_thread_info()->flags
741 * can't change between when we test it and when we return
742 * from the interrupt. */
5d38902c 743 /* Note: We don't bother telling lockdep about it */
9994a338
PM
744 LOAD_MSR_KERNEL(r10,MSR_KERNEL)
745 SYNC /* Some chip revs have problems here... */
746 MTMSRD(r10) /* disable interrupts */
747
748 lwz r3,_MSR(r1) /* Returning to user mode? */
749 andi. r0,r3,MSR_PR
750 beq resume_kernel
751
752user_exc_return: /* r10 contains MSR_KERNEL here */
753 /* Check current_thread_info()->flags */
9778b696 754 CURRENT_THREAD_INFO(r9, r1)
9994a338 755 lwz r9,TI_FLAGS(r9)
7a10174e 756 andi. r0,r9,_TIF_USER_WORK_MASK
9994a338
PM
757 bne do_work
758
759restore_user:
760#if defined(CONFIG_4xx) || defined(CONFIG_BOOKE)
4eaddb4d
KG
761 /* Check whether this process has its own DBCR0 value. The internal
762 debug mode bit tells us that dbcr0 should be loaded. */
9994a338 763 lwz r0,THREAD+THREAD_DBCR0(r2)
2325f0a0 764 andis. r10,r0,DBCR0_IDM@h
9994a338
PM
765 bnel- load_dbcr0
766#endif
767
9994a338
PM
768 b restore
769
770/* N.B. the only way to get here is from the beq following ret_from_except. */
771resume_kernel:
a9c4e541 772 /* check current_thread_info, _TIF_EMULATE_STACK_STORE */
9778b696 773 CURRENT_THREAD_INFO(r9, r1)
a9c4e541 774 lwz r8,TI_FLAGS(r9)
f7b33677 775 andis. r0,r8,_TIF_EMULATE_STACK_STORE@h
a9c4e541
TC
776 beq+ 1f
777
778 addi r8,r1,INT_FRAME_SIZE /* Get the kprobed function entry */
779
780 lwz r3,GPR1(r1)
781 subi r3,r3,INT_FRAME_SIZE /* dst: Allocate a trampoline exception frame */
782 mr r4,r1 /* src: current exception frame */
783 mr r1,r3 /* Reroute the trampoline frame to r1 */
784
785 /* Copy from the original to the trampoline. */
786 li r5,INT_FRAME_SIZE/4 /* size: INT_FRAME_SIZE */
787 li r6,0 /* start offset: 0 */
788 mtctr r5
7892: lwzx r0,r6,r4
790 stwx r0,r6,r3
791 addi r6,r6,4
792 bdnz 2b
793
794 /* Do real store operation to complete stwu */
795 lwz r5,GPR1(r1)
796 stw r8,0(r5)
797
798 /* Clear _TIF_EMULATE_STACK_STORE flag */
799 lis r11,_TIF_EMULATE_STACK_STORE@h
800 addi r5,r9,TI_FLAGS
8010: lwarx r8,0,r5
802 andc r8,r8,r11
803#ifdef CONFIG_IBM405_ERR77
804 dcbt 0,r5
805#endif
806 stwcx. r8,0,r5
807 bne- 0b
8081:
809
810#ifdef CONFIG_PREEMPT
811 /* check current_thread_info->preempt_count */
9994a338
PM
812 lwz r0,TI_PREEMPT(r9)
813 cmpwi 0,r0,0 /* if non-zero, just restore regs and return */
814 bne restore
a9c4e541 815 andi. r8,r8,_TIF_NEED_RESCHED
9994a338 816 beq+ restore
a9c4e541 817 lwz r3,_MSR(r1)
9994a338
PM
818 andi. r0,r3,MSR_EE /* interrupts off? */
819 beq restore /* don't schedule if so */
5d38902c
BH
820#ifdef CONFIG_TRACE_IRQFLAGS
821 /* Lockdep thinks irqs are enabled, we need to call
822 * preempt_schedule_irq with IRQs off, so we inform lockdep
823 * now that we -did- turn them off already
824 */
825 bl trace_hardirqs_off
826#endif
9994a338 8271: bl preempt_schedule_irq
9778b696 828 CURRENT_THREAD_INFO(r9, r1)
9994a338
PM
829 lwz r3,TI_FLAGS(r9)
830 andi. r0,r3,_TIF_NEED_RESCHED
831 bne- 1b
5d38902c
BH
832#ifdef CONFIG_TRACE_IRQFLAGS
833 /* And now, to properly rebalance the above, we tell lockdep they
834 * are being turned back on, which will happen when we return
835 */
836 bl trace_hardirqs_on
837#endif
9994a338
PM
838#endif /* CONFIG_PREEMPT */
839
840 /* interrupts are hard-disabled at this point */
841restore:
b98ac05d 842#ifdef CONFIG_44x
e7f75ad0
DK
843BEGIN_MMU_FTR_SECTION
844 b 1f
845END_MMU_FTR_SECTION_IFSET(MMU_FTR_TYPE_47x)
b98ac05d
BH
846 lis r4,icache_44x_need_flush@ha
847 lwz r5,icache_44x_need_flush@l(r4)
848 cmplwi cr0,r5,0
849 beq+ 1f
850 li r6,0
851 iccci r0,r0
852 stw r6,icache_44x_need_flush@l(r4)
8531:
854#endif /* CONFIG_44x */
5d38902c
BH
855
856 lwz r9,_MSR(r1)
857#ifdef CONFIG_TRACE_IRQFLAGS
858 /* Lockdep doesn't know about the fact that IRQs are temporarily turned
859 * off in this assembly code while peeking at TI_FLAGS() and such. However
860 * we need to inform it if the exception turned interrupts off, and we
861 * are about to trun them back on.
862 *
863 * The problem here sadly is that we don't know whether the exceptions was
864 * one that turned interrupts off or not. So we always tell lockdep about
865 * turning them on here when we go back to wherever we came from with EE
866 * on, even if that may meen some redudant calls being tracked. Maybe later
867 * we could encode what the exception did somewhere or test the exception
868 * type in the pt_regs but that sounds overkill
869 */
870 andi. r10,r9,MSR_EE
871 beq 1f
06ca2188
SR
872 /*
873 * Since the ftrace irqsoff latency trace checks CALLER_ADDR1,
874 * which is the stack frame here, we need to force a stack frame
875 * in case we came from user space.
876 */
877 stwu r1,-32(r1)
878 mflr r0
879 stw r0,4(r1)
880 stwu r1,-32(r1)
5d38902c 881 bl trace_hardirqs_on
06ca2188
SR
882 lwz r1,0(r1)
883 lwz r1,0(r1)
5d38902c
BH
884 lwz r9,_MSR(r1)
8851:
886#endif /* CONFIG_TRACE_IRQFLAGS */
887
9994a338
PM
888 lwz r0,GPR0(r1)
889 lwz r2,GPR2(r1)
890 REST_4GPRS(3, r1)
891 REST_2GPRS(7, r1)
892
893 lwz r10,_XER(r1)
894 lwz r11,_CTR(r1)
895 mtspr SPRN_XER,r10
896 mtctr r11
897
898 PPC405_ERR77(0,r1)
b64f87c1
BB
899BEGIN_FTR_SECTION
900 lwarx r11,0,r1
901END_FTR_SECTION_IFSET(CPU_FTR_NEED_PAIRED_STWCX)
9994a338
PM
902 stwcx. r0,0,r1 /* to clear the reservation */
903
904#if !(defined(CONFIG_4xx) || defined(CONFIG_BOOKE))
9994a338
PM
905 andi. r10,r9,MSR_RI /* check if this exception occurred */
906 beql nonrecoverable /* at a bad place (MSR:RI = 0) */
907
908 lwz r10,_CCR(r1)
909 lwz r11,_LINK(r1)
910 mtcrf 0xFF,r10
911 mtlr r11
912
913 /*
914 * Once we put values in SRR0 and SRR1, we are in a state
915 * where exceptions are not recoverable, since taking an
916 * exception will trash SRR0 and SRR1. Therefore we clear the
917 * MSR:RI bit to indicate this. If we do take an exception,
918 * we can't return to the point of the exception but we
919 * can restart the exception exit path at the label
920 * exc_exit_restart below. -- paulus
921 */
922 LOAD_MSR_KERNEL(r10,MSR_KERNEL & ~MSR_RI)
923 SYNC
924 MTMSRD(r10) /* clear the RI bit */
925 .globl exc_exit_restart
926exc_exit_restart:
9994a338
PM
927 lwz r12,_NIP(r1)
928 FIX_SRR1(r9,r10)
929 mtspr SPRN_SRR0,r12
930 mtspr SPRN_SRR1,r9
931 REST_4GPRS(9, r1)
932 lwz r1,GPR1(r1)
933 .globl exc_exit_restart_end
934exc_exit_restart_end:
935 SYNC
936 RFI
937
938#else /* !(CONFIG_4xx || CONFIG_BOOKE) */
939 /*
940 * This is a bit different on 4xx/Book-E because it doesn't have
941 * the RI bit in the MSR.
942 * The TLB miss handler checks if we have interrupted
943 * the exception exit path and restarts it if so
944 * (well maybe one day it will... :).
945 */
946 lwz r11,_LINK(r1)
947 mtlr r11
948 lwz r10,_CCR(r1)
949 mtcrf 0xff,r10
950 REST_2GPRS(9, r1)
951 .globl exc_exit_restart
952exc_exit_restart:
953 lwz r11,_NIP(r1)
954 lwz r12,_MSR(r1)
955exc_exit_start:
956 mtspr SPRN_SRR0,r11
957 mtspr SPRN_SRR1,r12
958 REST_2GPRS(11, r1)
959 lwz r1,GPR1(r1)
960 .globl exc_exit_restart_end
961exc_exit_restart_end:
962 PPC405_ERR77_SYNC
963 rfi
964 b . /* prevent prefetch past rfi */
965
966/*
967 * Returning from a critical interrupt in user mode doesn't need
968 * to be any different from a normal exception. For a critical
969 * interrupt in the kernel, we just return (without checking for
970 * preemption) since the interrupt may have happened at some crucial
971 * place (e.g. inside the TLB miss handler), and because we will be
972 * running with r1 pointing into critical_stack, not the current
973 * process's kernel stack (and therefore current_thread_info() will
974 * give the wrong answer).
975 * We have to restore various SPRs that may have been in use at the
976 * time of the critical interrupt.
977 *
978 */
979#ifdef CONFIG_40x
980#define PPC_40x_TURN_OFF_MSR_DR \
981 /* avoid any possible TLB misses here by turning off MSR.DR, we \
982 * assume the instructions here are mapped by a pinned TLB entry */ \
983 li r10,MSR_IR; \
984 mtmsr r10; \
985 isync; \
986 tophys(r1, r1);
987#else
988#define PPC_40x_TURN_OFF_MSR_DR
989#endif
990
991#define RET_FROM_EXC_LEVEL(exc_lvl_srr0, exc_lvl_srr1, exc_lvl_rfi) \
992 REST_NVGPRS(r1); \
993 lwz r3,_MSR(r1); \
994 andi. r3,r3,MSR_PR; \
995 LOAD_MSR_KERNEL(r10,MSR_KERNEL); \
996 bne user_exc_return; \
997 lwz r0,GPR0(r1); \
998 lwz r2,GPR2(r1); \
999 REST_4GPRS(3, r1); \
1000 REST_2GPRS(7, r1); \
1001 lwz r10,_XER(r1); \
1002 lwz r11,_CTR(r1); \
1003 mtspr SPRN_XER,r10; \
1004 mtctr r11; \
1005 PPC405_ERR77(0,r1); \
1006 stwcx. r0,0,r1; /* to clear the reservation */ \
1007 lwz r11,_LINK(r1); \
1008 mtlr r11; \
1009 lwz r10,_CCR(r1); \
1010 mtcrf 0xff,r10; \
1011 PPC_40x_TURN_OFF_MSR_DR; \
1012 lwz r9,_DEAR(r1); \
1013 lwz r10,_ESR(r1); \
1014 mtspr SPRN_DEAR,r9; \
1015 mtspr SPRN_ESR,r10; \
1016 lwz r11,_NIP(r1); \
1017 lwz r12,_MSR(r1); \
1018 mtspr exc_lvl_srr0,r11; \
1019 mtspr exc_lvl_srr1,r12; \
1020 lwz r9,GPR9(r1); \
1021 lwz r12,GPR12(r1); \
1022 lwz r10,GPR10(r1); \
1023 lwz r11,GPR11(r1); \
1024 lwz r1,GPR1(r1); \
1025 PPC405_ERR77_SYNC; \
1026 exc_lvl_rfi; \
1027 b .; /* prevent prefetch past exc_lvl_rfi */
1028
fca622c5
KG
1029#define RESTORE_xSRR(exc_lvl_srr0, exc_lvl_srr1) \
1030 lwz r9,_##exc_lvl_srr0(r1); \
1031 lwz r10,_##exc_lvl_srr1(r1); \
1032 mtspr SPRN_##exc_lvl_srr0,r9; \
1033 mtspr SPRN_##exc_lvl_srr1,r10;
1034
70fe3af8 1035#if defined(CONFIG_PPC_BOOK3E_MMU)
fca622c5
KG
1036#ifdef CONFIG_PHYS_64BIT
1037#define RESTORE_MAS7 \
1038 lwz r11,MAS7(r1); \
1039 mtspr SPRN_MAS7,r11;
1040#else
1041#define RESTORE_MAS7
1042#endif /* CONFIG_PHYS_64BIT */
1043#define RESTORE_MMU_REGS \
1044 lwz r9,MAS0(r1); \
1045 lwz r10,MAS1(r1); \
1046 lwz r11,MAS2(r1); \
1047 mtspr SPRN_MAS0,r9; \
1048 lwz r9,MAS3(r1); \
1049 mtspr SPRN_MAS1,r10; \
1050 lwz r10,MAS6(r1); \
1051 mtspr SPRN_MAS2,r11; \
1052 mtspr SPRN_MAS3,r9; \
1053 mtspr SPRN_MAS6,r10; \
1054 RESTORE_MAS7;
1055#elif defined(CONFIG_44x)
1056#define RESTORE_MMU_REGS \
1057 lwz r9,MMUCR(r1); \
1058 mtspr SPRN_MMUCR,r9;
1059#else
1060#define RESTORE_MMU_REGS
1061#endif
1062
1063#ifdef CONFIG_40x
9994a338
PM
1064 .globl ret_from_crit_exc
1065ret_from_crit_exc:
ee43eb78 1066 mfspr r9,SPRN_SPRG_THREAD
fca622c5
KG
1067 lis r10,saved_ksp_limit@ha;
1068 lwz r10,saved_ksp_limit@l(r10);
1069 tovirt(r9,r9);
1070 stw r10,KSP_LIMIT(r9)
1071 lis r9,crit_srr0@ha;
1072 lwz r9,crit_srr0@l(r9);
1073 lis r10,crit_srr1@ha;
1074 lwz r10,crit_srr1@l(r10);
1075 mtspr SPRN_SRR0,r9;
1076 mtspr SPRN_SRR1,r10;
16c57b36 1077 RET_FROM_EXC_LEVEL(SPRN_CSRR0, SPRN_CSRR1, PPC_RFCI)
fca622c5 1078#endif /* CONFIG_40x */
9994a338
PM
1079
1080#ifdef CONFIG_BOOKE
fca622c5
KG
1081 .globl ret_from_crit_exc
1082ret_from_crit_exc:
ee43eb78 1083 mfspr r9,SPRN_SPRG_THREAD
fca622c5
KG
1084 lwz r10,SAVED_KSP_LIMIT(r1)
1085 stw r10,KSP_LIMIT(r9)
1086 RESTORE_xSRR(SRR0,SRR1);
1087 RESTORE_MMU_REGS;
16c57b36 1088 RET_FROM_EXC_LEVEL(SPRN_CSRR0, SPRN_CSRR1, PPC_RFCI)
fca622c5 1089
9994a338
PM
1090 .globl ret_from_debug_exc
1091ret_from_debug_exc:
ee43eb78 1092 mfspr r9,SPRN_SPRG_THREAD
fca622c5
KG
1093 lwz r10,SAVED_KSP_LIMIT(r1)
1094 stw r10,KSP_LIMIT(r9)
1095 lwz r9,THREAD_INFO-THREAD(r9)
9778b696 1096 CURRENT_THREAD_INFO(r10, r1)
fca622c5
KG
1097 lwz r10,TI_PREEMPT(r10)
1098 stw r10,TI_PREEMPT(r9)
1099 RESTORE_xSRR(SRR0,SRR1);
1100 RESTORE_xSRR(CSRR0,CSRR1);
1101 RESTORE_MMU_REGS;
16c57b36 1102 RET_FROM_EXC_LEVEL(SPRN_DSRR0, SPRN_DSRR1, PPC_RFDI)
9994a338
PM
1103
1104 .globl ret_from_mcheck_exc
1105ret_from_mcheck_exc:
ee43eb78 1106 mfspr r9,SPRN_SPRG_THREAD
fca622c5
KG
1107 lwz r10,SAVED_KSP_LIMIT(r1)
1108 stw r10,KSP_LIMIT(r9)
1109 RESTORE_xSRR(SRR0,SRR1);
1110 RESTORE_xSRR(CSRR0,CSRR1);
1111 RESTORE_xSRR(DSRR0,DSRR1);
1112 RESTORE_MMU_REGS;
16c57b36 1113 RET_FROM_EXC_LEVEL(SPRN_MCSRR0, SPRN_MCSRR1, PPC_RFMCI)
9994a338
PM
1114#endif /* CONFIG_BOOKE */
1115
1116/*
1117 * Load the DBCR0 value for a task that is being ptraced,
1118 * having first saved away the global DBCR0. Note that r0
1119 * has the dbcr0 value to set upon entry to this.
1120 */
1121load_dbcr0:
1122 mfmsr r10 /* first disable debug exceptions */
1123 rlwinm r10,r10,0,~MSR_DE
1124 mtmsr r10
1125 isync
1126 mfspr r10,SPRN_DBCR0
1127 lis r11,global_dbcr0@ha
1128 addi r11,r11,global_dbcr0@l
4eaddb4d 1129#ifdef CONFIG_SMP
9778b696 1130 CURRENT_THREAD_INFO(r9, r1)
4eaddb4d
KG
1131 lwz r9,TI_CPU(r9)
1132 slwi r9,r9,3
1133 add r11,r11,r9
1134#endif
9994a338
PM
1135 stw r10,0(r11)
1136 mtspr SPRN_DBCR0,r0
1137 lwz r10,4(r11)
1138 addi r10,r10,1
1139 stw r10,4(r11)
1140 li r11,-1
1141 mtspr SPRN_DBSR,r11 /* clear all pending debug events */
1142 blr
1143
991eb43a
KG
1144 .section .bss
1145 .align 4
1146global_dbcr0:
4eaddb4d 1147 .space 8*NR_CPUS
991eb43a 1148 .previous
9994a338
PM
1149#endif /* !(CONFIG_4xx || CONFIG_BOOKE) */
1150
1151do_work: /* r10 contains MSR_KERNEL here */
1152 andi. r0,r9,_TIF_NEED_RESCHED
1153 beq do_user_signal
1154
1155do_resched: /* r10 contains MSR_KERNEL here */
5d38902c
BH
1156 /* Note: We don't need to inform lockdep that we are enabling
1157 * interrupts here. As far as it knows, they are already enabled
1158 */
9994a338
PM
1159 ori r10,r10,MSR_EE
1160 SYNC
1161 MTMSRD(r10) /* hard-enable interrupts */
1162 bl schedule
1163recheck:
5d38902c
BH
1164 /* Note: And we don't tell it we are disabling them again
1165 * neither. Those disable/enable cycles used to peek at
1166 * TI_FLAGS aren't advertised.
1167 */
9994a338
PM
1168 LOAD_MSR_KERNEL(r10,MSR_KERNEL)
1169 SYNC
1170 MTMSRD(r10) /* disable interrupts */
9778b696 1171 CURRENT_THREAD_INFO(r9, r1)
9994a338
PM
1172 lwz r9,TI_FLAGS(r9)
1173 andi. r0,r9,_TIF_NEED_RESCHED
1174 bne- do_resched
7a10174e 1175 andi. r0,r9,_TIF_USER_WORK_MASK
9994a338
PM
1176 beq restore_user
1177do_user_signal: /* r10 contains MSR_KERNEL here */
1178 ori r10,r10,MSR_EE
1179 SYNC
1180 MTMSRD(r10) /* hard-enable interrupts */
1181 /* save r13-r31 in the exception frame, if not already done */
d73e0c99 1182 lwz r3,_TRAP(r1)
9994a338
PM
1183 andi. r0,r3,1
1184 beq 2f
1185 SAVE_NVGPRS(r1)
1186 rlwinm r3,r3,0,0,30
d73e0c99 1187 stw r3,_TRAP(r1)
7d6d637d
RM
11882: addi r3,r1,STACK_FRAME_OVERHEAD
1189 mr r4,r9
18b246fa 1190 bl do_notify_resume
9994a338
PM
1191 REST_NVGPRS(r1)
1192 b recheck
1193
1194/*
1195 * We come here when we are at the end of handling an exception
1196 * that occurred at a place where taking an exception will lose
1197 * state information, such as the contents of SRR0 and SRR1.
1198 */
1199nonrecoverable:
1200 lis r10,exc_exit_restart_end@ha
1201 addi r10,r10,exc_exit_restart_end@l
1202 cmplw r12,r10
1203 bge 3f
1204 lis r11,exc_exit_restart@ha
1205 addi r11,r11,exc_exit_restart@l
1206 cmplw r12,r11
1207 blt 3f
1208 lis r10,ee_restarts@ha
1209 lwz r12,ee_restarts@l(r10)
1210 addi r12,r12,1
1211 stw r12,ee_restarts@l(r10)
1212 mr r12,r11 /* restart at exc_exit_restart */
1213 blr
12143: /* OK, we can't recover, kill this process */
1215 /* but the 601 doesn't implement the RI bit, so assume it's OK */
1216BEGIN_FTR_SECTION
1217 blr
1218END_FTR_SECTION_IFSET(CPU_FTR_601)
d73e0c99 1219 lwz r3,_TRAP(r1)
9994a338
PM
1220 andi. r0,r3,1
1221 beq 4f
1222 SAVE_NVGPRS(r1)
1223 rlwinm r3,r3,0,0,30
d73e0c99 1224 stw r3,_TRAP(r1)
9994a338
PM
12254: addi r3,r1,STACK_FRAME_OVERHEAD
1226 bl nonrecoverable_exception
1227 /* shouldn't return */
1228 b 4b
1229
991eb43a
KG
1230 .section .bss
1231 .align 2
1232ee_restarts:
1233 .space 4
1234 .previous
9994a338
PM
1235
1236/*
1237 * PROM code for specific machines follows. Put it
1238 * here so it's easy to add arch-specific sections later.
1239 * -- Cort
1240 */
033ef338 1241#ifdef CONFIG_PPC_RTAS
9994a338
PM
1242/*
1243 * On CHRP, the Run-Time Abstraction Services (RTAS) have to be
1244 * called with the MMU off.
1245 */
1246_GLOBAL(enter_rtas)
1247 stwu r1,-INT_FRAME_SIZE(r1)
1248 mflr r0
1249 stw r0,INT_FRAME_SIZE+4(r1)
e58c3495 1250 LOAD_REG_ADDR(r4, rtas)
9994a338
PM
1251 lis r6,1f@ha /* physical return address for rtas */
1252 addi r6,r6,1f@l
1253 tophys(r6,r6)
1254 tophys(r7,r1)
033ef338
PM
1255 lwz r8,RTASENTRY(r4)
1256 lwz r4,RTASBASE(r4)
9994a338
PM
1257 mfmsr r9
1258 stw r9,8(r1)
1259 LOAD_MSR_KERNEL(r0,MSR_KERNEL)
1260 SYNC /* disable interrupts so SRR0/1 */
1261 MTMSRD(r0) /* don't get trashed */
1262 li r9,MSR_KERNEL & ~(MSR_IR|MSR_DR)
1263 mtlr r6
ee43eb78 1264 mtspr SPRN_SPRG_RTAS,r7
9994a338
PM
1265 mtspr SPRN_SRR0,r8
1266 mtspr SPRN_SRR1,r9
1267 RFI
12681: tophys(r9,r1)
1269 lwz r8,INT_FRAME_SIZE+4(r9) /* get return address */
1270 lwz r9,8(r9) /* original msr value */
1271 FIX_SRR1(r9,r0)
1272 addi r1,r1,INT_FRAME_SIZE
1273 li r0,0
ee43eb78 1274 mtspr SPRN_SPRG_RTAS,r0
9994a338
PM
1275 mtspr SPRN_SRR0,r8
1276 mtspr SPRN_SRR1,r9
1277 RFI /* return to caller */
1278
1279 .globl machine_check_in_rtas
1280machine_check_in_rtas:
1281 twi 31,0,0
1282 /* XXX load up BATs and panic */
1283
033ef338 1284#endif /* CONFIG_PPC_RTAS */
4e491d14 1285
606576ce 1286#ifdef CONFIG_FUNCTION_TRACER
4e491d14
SR
1287#ifdef CONFIG_DYNAMIC_FTRACE
1288_GLOBAL(mcount)
1289_GLOBAL(_mcount)
c7b0d173
SR
1290 /*
1291 * It is required that _mcount on PPC32 must preserve the
1292 * link register. But we have r0 to play with. We use r0
1293 * to push the return address back to the caller of mcount
1294 * into the ctr register, restore the link register and
1295 * then jump back using the ctr register.
1296 */
1297 mflr r0
4e491d14 1298 mtctr r0
c7b0d173 1299 lwz r0, 4(r1)
4e491d14 1300 mtlr r0
4e491d14
SR
1301 bctr
1302
1303_GLOBAL(ftrace_caller)
bf528a3a
SR
1304 MCOUNT_SAVE_FRAME
1305 /* r3 ends up with link register */
395a59d0 1306 subi r3, r3, MCOUNT_INSN_SIZE
4e491d14
SR
1307.globl ftrace_call
1308ftrace_call:
1309 bl ftrace_stub
1310 nop
60ce8f72
SR
1311#ifdef CONFIG_FUNCTION_GRAPH_TRACER
1312.globl ftrace_graph_call
1313ftrace_graph_call:
1314 b ftrace_graph_stub
1315_GLOBAL(ftrace_graph_stub)
1316#endif
bf528a3a
SR
1317 MCOUNT_RESTORE_FRAME
1318 /* old link register ends up in ctr reg */
4e491d14
SR
1319 bctr
1320#else
1321_GLOBAL(mcount)
1322_GLOBAL(_mcount)
bf528a3a
SR
1323
1324 MCOUNT_SAVE_FRAME
4e491d14 1325
395a59d0 1326 subi r3, r3, MCOUNT_INSN_SIZE
4e491d14 1327 LOAD_REG_ADDR(r5, ftrace_trace_function)
4e491d14 1328 lwz r5,0(r5)
ccbfac29 1329
4e491d14
SR
1330 mtctr r5
1331 bctrl
4e491d14
SR
1332 nop
1333
fad4f47c
SR
1334#ifdef CONFIG_FUNCTION_GRAPH_TRACER
1335 b ftrace_graph_caller
1336#endif
bf528a3a 1337 MCOUNT_RESTORE_FRAME
4e491d14
SR
1338 bctr
1339#endif
1340
1341_GLOBAL(ftrace_stub)
1342 blr
1343
fad4f47c
SR
1344#ifdef CONFIG_FUNCTION_GRAPH_TRACER
1345_GLOBAL(ftrace_graph_caller)
1346 /* load r4 with local address */
1347 lwz r4, 44(r1)
1348 subi r4, r4, MCOUNT_INSN_SIZE
1349
b3c18725
AB
1350 /* Grab the LR out of the caller stack frame */
1351 lwz r3,52(r1)
fad4f47c
SR
1352
1353 bl prepare_ftrace_return
1354 nop
1355
b3c18725
AB
1356 /*
1357 * prepare_ftrace_return gives us the address we divert to.
1358 * Change the LR in the callers stack frame to this.
1359 */
1360 stw r3,52(r1)
1361
fad4f47c
SR
1362 MCOUNT_RESTORE_FRAME
1363 /* old link register ends up in ctr reg */
1364 bctr
1365
1366_GLOBAL(return_to_handler)
1367 /* need to save return values */
1368 stwu r1, -32(r1)
1369 stw r3, 20(r1)
1370 stw r4, 16(r1)
1371 stw r31, 12(r1)
1372 mr r31, r1
1373
1374 bl ftrace_return_to_handler
1375 nop
1376
1377 /* return value has real return address */
1378 mtlr r3
1379
1380 lwz r3, 20(r1)
1381 lwz r4, 16(r1)
1382 lwz r31,12(r1)
1383 lwz r1, 0(r1)
1384
1385 /* Jump back to real return address */
1386 blr
1387#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
1388
60878dfb 1389#endif /* CONFIG_FUNCTION_TRACER */