]> git.proxmox.com Git - mirror_ubuntu-hirsute-kernel.git/blame - arch/powerpc/kernel/entry_32.S
Merge tag 'asoc-v5.7' of https://git.kernel.org/pub/scm/linux/kernel/git/broonie...
[mirror_ubuntu-hirsute-kernel.git] / arch / powerpc / kernel / entry_32.S
CommitLineData
2874c5fd 1/* SPDX-License-Identifier: GPL-2.0-or-later */
9994a338
PM
2/*
3 * PowerPC version
4 * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
5 * Rewritten by Cort Dougan (cort@fsmlabs.com) for PReP
6 * Copyright (C) 1996 Cort Dougan <cort@fsmlabs.com>
7 * Adapted for Power Macintosh by Paul Mackerras.
8 * Low-level exception handlers and MMU support
9 * rewritten by Paul Mackerras.
10 * Copyright (C) 1996 Paul Mackerras.
11 * MPC8xx modifications Copyright (C) 1997 Dan Malek (dmalek@jlc.net).
12 *
13 * This file contains the system call entry code, context switch
14 * code, and exception/interrupt return code for PowerPC.
9994a338
PM
15 */
16
9994a338 17#include <linux/errno.h>
c3525940 18#include <linux/err.h>
9994a338
PM
19#include <linux/sys.h>
20#include <linux/threads.h>
21#include <asm/reg.h>
22#include <asm/page.h>
23#include <asm/mmu.h>
24#include <asm/cputable.h>
25#include <asm/thread_info.h>
26#include <asm/ppc_asm.h>
27#include <asm/asm-offsets.h>
28#include <asm/unistd.h>
46f52210 29#include <asm/ptrace.h>
9445aa1a 30#include <asm/export.h>
36a7eeaf 31#include <asm/asm-405.h>
2c86cd18 32#include <asm/feature-fixups.h>
c28218d4 33#include <asm/barrier.h>
e2fb9f54 34#include <asm/kup.h>
40530db7 35#include <asm/bug.h>
9994a338 36
37737a2a 37#include "head_32.h"
9994a338 38
0eb0d2e7
CL
39/*
40 * Align to 4k in order to ensure that all functions modyfing srr0/srr1
41 * fit into one page in order to not encounter a TLB miss between the
42 * modification of srr0/srr1 and the associated rfi.
43 */
44 .align 12
45
9994a338 46#ifdef CONFIG_BOOKE
9994a338
PM
47 .globl mcheck_transfer_to_handler
48mcheck_transfer_to_handler:
fca622c5
KG
49 mfspr r0,SPRN_DSRR0
50 stw r0,_DSRR0(r11)
51 mfspr r0,SPRN_DSRR1
52 stw r0,_DSRR1(r11)
53 /* fall through */
9994a338
PM
54
55 .globl debug_transfer_to_handler
56debug_transfer_to_handler:
fca622c5
KG
57 mfspr r0,SPRN_CSRR0
58 stw r0,_CSRR0(r11)
59 mfspr r0,SPRN_CSRR1
60 stw r0,_CSRR1(r11)
61 /* fall through */
9994a338
PM
62
63 .globl crit_transfer_to_handler
64crit_transfer_to_handler:
70fe3af8 65#ifdef CONFIG_PPC_BOOK3E_MMU
fca622c5
KG
66 mfspr r0,SPRN_MAS0
67 stw r0,MAS0(r11)
68 mfspr r0,SPRN_MAS1
69 stw r0,MAS1(r11)
70 mfspr r0,SPRN_MAS2
71 stw r0,MAS2(r11)
72 mfspr r0,SPRN_MAS3
73 stw r0,MAS3(r11)
74 mfspr r0,SPRN_MAS6
75 stw r0,MAS6(r11)
76#ifdef CONFIG_PHYS_64BIT
77 mfspr r0,SPRN_MAS7
78 stw r0,MAS7(r11)
79#endif /* CONFIG_PHYS_64BIT */
70fe3af8 80#endif /* CONFIG_PPC_BOOK3E_MMU */
fca622c5
KG
81#ifdef CONFIG_44x
82 mfspr r0,SPRN_MMUCR
83 stw r0,MMUCR(r11)
84#endif
85 mfspr r0,SPRN_SRR0
86 stw r0,_SRR0(r11)
87 mfspr r0,SPRN_SRR1
88 stw r0,_SRR1(r11)
89
a7916a1d 90 /* set the stack limit to the current stack */
ee43eb78 91 mfspr r8,SPRN_SPRG_THREAD
fca622c5
KG
92 lwz r0,KSP_LIMIT(r8)
93 stw r0,SAVED_KSP_LIMIT(r11)
a7916a1d 94 rlwinm r0,r1,0,0,(31 - THREAD_SHIFT)
fca622c5 95 stw r0,KSP_LIMIT(r8)
9994a338
PM
96 /* fall through */
97#endif
98
99#ifdef CONFIG_40x
100 .globl crit_transfer_to_handler
101crit_transfer_to_handler:
102 lwz r0,crit_r10@l(0)
103 stw r0,GPR10(r11)
104 lwz r0,crit_r11@l(0)
105 stw r0,GPR11(r11)
fca622c5
KG
106 mfspr r0,SPRN_SRR0
107 stw r0,crit_srr0@l(0)
108 mfspr r0,SPRN_SRR1
109 stw r0,crit_srr1@l(0)
110
a7916a1d 111 /* set the stack limit to the current stack */
ee43eb78 112 mfspr r8,SPRN_SPRG_THREAD
fca622c5
KG
113 lwz r0,KSP_LIMIT(r8)
114 stw r0,saved_ksp_limit@l(0)
a7916a1d 115 rlwinm r0,r1,0,0,(31 - THREAD_SHIFT)
fca622c5 116 stw r0,KSP_LIMIT(r8)
9994a338
PM
117 /* fall through */
118#endif
119
120/*
121 * This code finishes saving the registers to the exception frame
122 * and jumps to the appropriate handler for the exception, turning
123 * on address translation.
124 * Note that we rely on the caller having set cr0.eq iff the exception
125 * occurred in kernel mode (i.e. MSR:PR = 0).
126 */
127 .globl transfer_to_handler_full
128transfer_to_handler_full:
129 SAVE_NVGPRS(r11)
130 /* fall through */
131
132 .globl transfer_to_handler
133transfer_to_handler:
134 stw r2,GPR2(r11)
135 stw r12,_NIP(r11)
136 stw r9,_MSR(r11)
137 andi. r2,r9,MSR_PR
138 mfctr r12
139 mfspr r2,SPRN_XER
140 stw r12,_CTR(r11)
141 stw r2,_XER(r11)
ee43eb78 142 mfspr r12,SPRN_SPRG_THREAD
02847487 143 tovirt_vmstack r12, r12
9994a338 144 beq 2f /* if from user, fix up THREAD.regs */
e2fb9f54 145 addi r2, r12, -THREAD
9994a338
PM
146 addi r11,r1,STACK_FRAME_OVERHEAD
147 stw r11,PT_REGS(r12)
148#if defined(CONFIG_40x) || defined(CONFIG_BOOKE)
149 /* Check to see if the dbcr0 register is set up to debug. Use the
4eaddb4d 150 internal debug mode bit to do this. */
9994a338 151 lwz r12,THREAD_DBCR0(r12)
2325f0a0 152 andis. r12,r12,DBCR0_IDM@h
6b9166f0 153#endif
f7354cca 154 ACCOUNT_CPU_USER_ENTRY(r2, r11, r12)
31ed2b13
CL
155#ifdef CONFIG_PPC_BOOK3S_32
156 kuep_lock r11, r12
157#endif
6b9166f0 158#if defined(CONFIG_40x) || defined(CONFIG_BOOKE)
9994a338
PM
159 beq+ 3f
160 /* From user and task is ptraced - load up global dbcr0 */
161 li r12,-1 /* clear all pending debug events */
162 mtspr SPRN_DBSR,r12
163 lis r11,global_dbcr0@ha
164 tophys(r11,r11)
165 addi r11,r11,global_dbcr0@l
4eaddb4d 166#ifdef CONFIG_SMP
f7354cca 167 lwz r9,TASK_CPU(r2)
4eaddb4d
KG
168 slwi r9,r9,3
169 add r11,r11,r9
170#endif
9994a338
PM
171 lwz r12,0(r11)
172 mtspr SPRN_DBCR0,r12
173 lwz r12,4(r11)
174 addi r12,r12,-1
175 stw r12,4(r11)
176#endif
c223c903 177
9994a338 178 b 3f
f39224a8 179
9994a338
PM
1802: /* if from kernel, check interrupted DOZE/NAP mode and
181 * check for stack overflow
182 */
99338190 183 kuap_save_and_lock r11, r12, r9, r2, r6
e2fb9f54 184 addi r2, r12, -THREAD
3978eb78 185#ifndef CONFIG_VMAP_STACK
85218827
KG
186 lwz r9,KSP_LIMIT(r12)
187 cmplw r1,r9 /* if r1 <= ksp_limit */
f39224a8 188 ble- stack_ovf /* then the kernel stack overflowed */
3978eb78 189#endif
f39224a8 1905:
d7cceda9 191#if defined(CONFIG_PPC_BOOK3S_32) || defined(CONFIG_E500)
f7354cca 192 lwz r12,TI_LOCAL_FLAGS(r2)
f39224a8
PM
193 mtcrf 0x01,r12
194 bt- 31-TLF_NAPPING,4f
a560643e 195 bt- 31-TLF_SLEEPING,7f
d7cceda9 196#endif /* CONFIG_PPC_BOOK3S_32 || CONFIG_E500 */
9994a338
PM
197 .globl transfer_to_handler_cont
198transfer_to_handler_cont:
9994a338
PM
1993:
200 mflr r9
02847487
CL
201 tovirt_novmstack r2, r2 /* set r2 to current */
202 tovirt_vmstack r9, r9
9994a338
PM
203 lwz r11,0(r9) /* virtual address of handler */
204 lwz r9,4(r9) /* where to go when done */
cd99ddbe 205#if defined(CONFIG_PPC_8xx) && defined(CONFIG_PERF_EVENTS)
75b82472
CL
206 mtspr SPRN_NRI, r0
207#endif
5d38902c 208#ifdef CONFIG_TRACE_IRQFLAGS
40530db7
CL
209 /*
210 * When tracing IRQ state (lockdep) we enable the MMU before we call
211 * the IRQ tracing functions as they might access vmalloc space or
212 * perform IOs for console output.
213 *
214 * To speed up the syscall path where interrupts stay on, let's check
215 * first if we are changing the MSR value at all.
216 */
d4bf9053 217 tophys_novmstack r12, r1
40530db7 218 lwz r12,_MSR(r12)
40530db7
CL
219 andi. r12,r12,MSR_EE
220 bne 1f
221
222 /* MSR isn't changing, just transition directly */
223#endif
224 mtspr SPRN_SRR0,r11
225 mtspr SPRN_SRR1,r10
226 mtlr r9
227 SYNC
228 RFI /* jump to handler, enable MMU */
229
230#ifdef CONFIG_TRACE_IRQFLAGS
2311: /* MSR is changing, re-enable MMU so we can notify lockdep. We need to
232 * keep interrupts disabled at this point otherwise we might risk
233 * taking an interrupt before we tell lockdep they are enabled.
234 */
5d38902c
BH
235 lis r12,reenable_mmu@h
236 ori r12,r12,reenable_mmu@l
ba18025f 237 LOAD_REG_IMMEDIATE(r0, MSR_KERNEL)
5d38902c 238 mtspr SPRN_SRR0,r12
40530db7 239 mtspr SPRN_SRR1,r0
5d38902c
BH
240 SYNC
241 RFI
5d38902c 242
40530db7 243reenable_mmu:
2cd76629 244 /*
d1865e71 245 * We save a bunch of GPRs,
08f1ec8a
BH
246 * r3 can be different from GPR3(r1) at this point, r9 and r11
247 * contains the old MSR and handler address respectively,
248 * r4 & r5 can contain page fault arguments that need to be passed
249 * along as well. r12, CCR, CTR, XER etc... are left clobbered as
250 * they aren't useful past this point (aren't syscall arguments),
251 * the rest is restored from the exception frame.
2cd76629 252 */
40530db7 253
08f1ec8a
BH
254 stwu r1,-32(r1)
255 stw r9,8(r1)
256 stw r11,12(r1)
257 stw r3,16(r1)
258 stw r4,20(r1)
259 stw r5,24(r1)
40530db7 260
40530db7
CL
261 /* If we are disabling interrupts (normal case), simply log it with
262 * lockdep
263 */
2641: bl trace_hardirqs_off
2652: lwz r5,24(r1)
08f1ec8a
BH
266 lwz r4,20(r1)
267 lwz r3,16(r1)
268 lwz r11,12(r1)
269 lwz r9,8(r1)
270 addi r1,r1,32
5d38902c 271 lwz r0,GPR0(r1)
5d38902c
BH
272 lwz r6,GPR6(r1)
273 lwz r7,GPR7(r1)
274 lwz r8,GPR8(r1)
40530db7 275 mtctr r11
5d38902c
BH
276 mtlr r9
277 bctr /* jump to handler */
5d38902c 278#endif /* CONFIG_TRACE_IRQFLAGS */
9994a338 279
d7cceda9 280#if defined (CONFIG_PPC_BOOK3S_32) || defined(CONFIG_E500)
f39224a8 2814: rlwinm r12,r12,0,~_TLF_NAPPING
f7354cca 282 stw r12,TI_LOCAL_FLAGS(r2)
fc4033b2 283 b power_save_ppc32_restore
a560643e
PM
284
2857: rlwinm r12,r12,0,~_TLF_SLEEPING
f7354cca 286 stw r12,TI_LOCAL_FLAGS(r2)
a560643e
PM
287 lwz r9,_MSR(r11) /* if sleeping, clear MSR.EE */
288 rlwinm r9,r9,0,~MSR_EE
289 lwz r12,_LINK(r11) /* and return to address in LR */
e2fb9f54 290 kuap_restore r11, r2, r3, r4, r5
99338190 291 lwz r2, GPR2(r11)
a560643e 292 b fast_exception_return
a0652fc9
PM
293#endif
294
3978eb78 295#ifndef CONFIG_VMAP_STACK
9994a338
PM
296/*
297 * On kernel stack overflow, load up an initial stack pointer
298 * and call StackOverflow(regs), which should not return.
299 */
300stack_ovf:
301 /* sometimes we use a statically-allocated stack, which is OK. */
f39224a8
PM
302 lis r12,_end@h
303 ori r12,r12,_end@l
304 cmplw r1,r12
305 ble 5b /* r1 <= &_end is OK */
9994a338
PM
306 SAVE_NVGPRS(r11)
307 addi r3,r1,STACK_FRAME_OVERHEAD
308 lis r1,init_thread_union@ha
309 addi r1,r1,init_thread_union@l
310 addi r1,r1,THREAD_SIZE-STACK_FRAME_OVERHEAD
311 lis r9,StackOverflow@ha
312 addi r9,r9,StackOverflow@l
ba18025f 313 LOAD_REG_IMMEDIATE(r10,MSR_KERNEL)
cd99ddbe 314#if defined(CONFIG_PPC_8xx) && defined(CONFIG_PERF_EVENTS)
75b82472
CL
315 mtspr SPRN_NRI, r0
316#endif
9994a338
PM
317 mtspr SPRN_SRR0,r9
318 mtspr SPRN_SRR1,r10
319 SYNC
320 RFI
3978eb78 321#endif
9994a338 322
b86fb888
CL
323#ifdef CONFIG_TRACE_IRQFLAGS
324trace_syscall_entry_irq_off:
325 /*
326 * Syscall shouldn't happen while interrupts are disabled,
327 * so let's do a warning here.
328 */
3290: trap
330 EMIT_BUG_ENTRY 0b,__FILE__,__LINE__, BUGFLAG_WARNING
331 bl trace_hardirqs_on
332
333 /* Now enable for real */
ba18025f 334 LOAD_REG_IMMEDIATE(r10, MSR_KERNEL | MSR_EE)
b86fb888
CL
335 mtmsr r10
336
337 REST_GPR(0, r1)
338 REST_4GPRS(3, r1)
339 REST_2GPRS(7, r1)
340 b DoSyscall
341#endif /* CONFIG_TRACE_IRQFLAGS */
342
343 .globl transfer_to_syscall
344transfer_to_syscall:
345#ifdef CONFIG_TRACE_IRQFLAGS
346 andi. r12,r9,MSR_EE
347 beq- trace_syscall_entry_irq_off
348#endif /* CONFIG_TRACE_IRQFLAGS */
b86fb888 349
9994a338
PM
350/*
351 * Handle a system call.
352 */
353 .stabs "arch/powerpc/kernel/",N_SO,0,0,0f
354 .stabs "entry_32.S",N_SO,0,0,0f
3550:
356
357_GLOBAL(DoSyscall)
9994a338
PM
358 stw r3,ORIG_GPR3(r1)
359 li r12,0
360 stw r12,RESULT(r1)
5d38902c 361#ifdef CONFIG_TRACE_IRQFLAGS
40530db7 362 /* Make sure interrupts are enabled */
5d38902c
BH
363 mfmsr r11
364 andi. r12,r11,MSR_EE
40530db7
CL
365 /* We came in with interrupts disabled, we WARN and mark them enabled
366 * for lockdep now */
3670: tweqi r12, 0
368 EMIT_BUG_ENTRY 0b,__FILE__,__LINE__, BUGFLAG_WARNING
5d38902c 369#endif /* CONFIG_TRACE_IRQFLAGS */
f7354cca 370 lwz r11,TI_FLAGS(r2)
10ea8343 371 andi. r11,r11,_TIF_SYSCALL_DOTRACE
9994a338
PM
372 bne- syscall_dotrace
373syscall_dotrace_cont:
374 cmplwi 0,r0,NR_syscalls
375 lis r10,sys_call_table@h
376 ori r10,r10,sys_call_table@l
377 slwi r0,r0,2
378 bge- 66f
c28218d4
DC
379
380 barrier_nospec_asm
381 /*
382 * Prevent the load of the handler below (based on the user-passed
383 * system call number) being speculatively executed until the test
384 * against NR_syscalls and branch to .66f above has
385 * committed.
386 */
387
9994a338
PM
388 lwzx r10,r10,r0 /* Fetch system call handler [ptr] */
389 mtlr r10
390 addi r9,r1,STACK_FRAME_OVERHEAD
391 PPC440EP_ERR42
392 blrl /* Call handler */
393 .globl ret_from_syscall
394ret_from_syscall:
6f37be4b
BF
395#ifdef CONFIG_DEBUG_RSEQ
396 /* Check whether the syscall is issued inside a restartable sequence */
397 stw r3,GPR3(r1)
398 addi r3,r1,STACK_FRAME_OVERHEAD
399 bl rseq_syscall
400 lwz r3,GPR3(r1)
401#endif
9994a338 402 mr r6,r3
9994a338 403 /* disable interrupts so current_thread_info()->flags can't change */
ba18025f 404 LOAD_REG_IMMEDIATE(r10,MSR_KERNEL) /* doesn't include MSR_EE */
5d38902c 405 /* Note: We don't bother telling lockdep about it */
9994a338 406 SYNC
39bccfd1 407 mtmsr r10
f7354cca 408 lwz r9,TI_FLAGS(r2)
c3525940 409 li r8,-MAX_ERRNO
10ea8343 410 andi. r0,r9,(_TIF_SYSCALL_DOTRACE|_TIF_SINGLESTEP|_TIF_USER_WORK_MASK|_TIF_PERSYSCALL_MASK)
9994a338 411 bne- syscall_exit_work
401d1f02
DW
412 cmplw 0,r3,r8
413 blt+ syscall_exit_cont
414 lwz r11,_CCR(r1) /* Load CR */
415 neg r3,r3
416 oris r11,r11,0x1000 /* Set SO bit in CR */
417 stw r11,_CCR(r1)
9994a338 418syscall_exit_cont:
5d38902c
BH
419 lwz r8,_MSR(r1)
420#ifdef CONFIG_TRACE_IRQFLAGS
421 /* If we are going to return from the syscall with interrupts
40530db7 422 * off, we trace that here. It shouldn't normally happen.
5d38902c
BH
423 */
424 andi. r10,r8,MSR_EE
425 bne+ 1f
426 stw r3,GPR3(r1)
427 bl trace_hardirqs_off
428 lwz r3,GPR3(r1)
4291:
430#endif /* CONFIG_TRACE_IRQFLAGS */
9994a338 431#if defined(CONFIG_4xx) || defined(CONFIG_BOOKE)
4eaddb4d
KG
432 /* If the process has its own DBCR0 value, load it up. The internal
433 debug mode bit tells us that dbcr0 should be loaded. */
9994a338 434 lwz r0,THREAD+THREAD_DBCR0(r2)
2325f0a0 435 andis. r10,r0,DBCR0_IDM@h
9994a338
PM
436 bnel- load_dbcr0
437#endif
b98ac05d 438#ifdef CONFIG_44x
e7f75ad0 439BEGIN_MMU_FTR_SECTION
b98ac05d
BH
440 lis r4,icache_44x_need_flush@ha
441 lwz r5,icache_44x_need_flush@l(r4)
442 cmplwi cr0,r5,0
443 bne- 2f
4441:
e7f75ad0 445END_MMU_FTR_SECTION_IFCLR(MMU_FTR_TYPE_47x)
b98ac05d 446#endif /* CONFIG_44x */
b64f87c1
BB
447BEGIN_FTR_SECTION
448 lwarx r7,0,r1
449END_FTR_SECTION_IFSET(CPU_FTR_NEED_PAIRED_STWCX)
9994a338 450 stwcx. r0,0,r1 /* to clear the reservation */
f7354cca 451 ACCOUNT_CPU_USER_EXIT(r2, r5, r7)
31ed2b13
CL
452#ifdef CONFIG_PPC_BOOK3S_32
453 kuep_unlock r5, r7
454#endif
e2fb9f54 455 kuap_check r2, r4
9994a338
PM
456 lwz r4,_LINK(r1)
457 lwz r5,_CCR(r1)
458 mtlr r4
459 mtcr r5
460 lwz r7,_NIP(r1)
9994a338
PM
461 lwz r2,GPR2(r1)
462 lwz r1,GPR1(r1)
cd99ddbe 463#if defined(CONFIG_PPC_8xx) && defined(CONFIG_PERF_EVENTS)
75b82472
CL
464 mtspr SPRN_NRI, r0
465#endif
9994a338
PM
466 mtspr SPRN_SRR0,r7
467 mtspr SPRN_SRR1,r8
468 SYNC
469 RFI
b98ac05d
BH
470#ifdef CONFIG_44x
4712: li r7,0
472 iccci r0,r0
473 stw r7,icache_44x_need_flush@l(r4)
474 b 1b
475#endif /* CONFIG_44x */
9994a338
PM
476
47766: li r3,-ENOSYS
478 b ret_from_syscall
479
480 .globl ret_from_fork
481ret_from_fork:
482 REST_NVGPRS(r1)
483 bl schedule_tail
484 li r3,0
485 b ret_from_syscall
486
58254e10
AV
487 .globl ret_from_kernel_thread
488ret_from_kernel_thread:
489 REST_NVGPRS(r1)
490 bl schedule_tail
491 mtlr r14
492 mr r3,r15
493 PPC440EP_ERR42
494 blrl
495 li r3,0
be6abfa7 496 b ret_from_syscall
9994a338
PM
497
498/* Traced system call support */
499syscall_dotrace:
500 SAVE_NVGPRS(r1)
501 li r0,0xc00
d73e0c99 502 stw r0,_TRAP(r1)
9994a338
PM
503 addi r3,r1,STACK_FRAME_OVERHEAD
504 bl do_syscall_trace_enter
4f72c427
RM
505 /*
506 * Restore argument registers possibly just changed.
507 * We use the return value of do_syscall_trace_enter
508 * for call number to look up in the table (r0).
509 */
510 mr r0,r3
9994a338
PM
511 lwz r3,GPR3(r1)
512 lwz r4,GPR4(r1)
513 lwz r5,GPR5(r1)
514 lwz r6,GPR6(r1)
515 lwz r7,GPR7(r1)
516 lwz r8,GPR8(r1)
517 REST_NVGPRS(r1)
d3837414
ME
518
519 cmplwi r0,NR_syscalls
520 /* Return code is already in r3 thanks to do_syscall_trace_enter() */
521 bge- ret_from_syscall
9994a338
PM
522 b syscall_dotrace_cont
523
524syscall_exit_work:
401d1f02 525 andi. r0,r9,_TIF_RESTOREALL
1bd79336
PM
526 beq+ 0f
527 REST_NVGPRS(r1)
528 b 2f
5290: cmplw 0,r3,r8
401d1f02
DW
530 blt+ 1f
531 andi. r0,r9,_TIF_NOERROR
532 bne- 1f
533 lwz r11,_CCR(r1) /* Load CR */
534 neg r3,r3
535 oris r11,r11,0x1000 /* Set SO bit in CR */
536 stw r11,_CCR(r1)
537
5381: stw r6,RESULT(r1) /* Save result */
9994a338 539 stw r3,GPR3(r1) /* Update return value */
401d1f02
DW
5402: andi. r0,r9,(_TIF_PERSYSCALL_MASK)
541 beq 4f
542
1bd79336 543 /* Clear per-syscall TIF flags if any are set. */
401d1f02
DW
544
545 li r11,_TIF_PERSYSCALL_MASK
f7354cca 546 addi r12,r2,TI_FLAGS
401d1f02
DW
5473: lwarx r8,0,r12
548 andc r8,r8,r11
549#ifdef CONFIG_IBM405_ERR77
550 dcbt 0,r12
551#endif
552 stwcx. r8,0,r12
553 bne- 3b
401d1f02
DW
554
5554: /* Anything which requires enabling interrupts? */
10ea8343 556 andi. r0,r9,(_TIF_SYSCALL_DOTRACE|_TIF_SINGLESTEP)
1bd79336
PM
557 beq ret_from_except
558
5d38902c
BH
559 /* Re-enable interrupts. There is no need to trace that with
560 * lockdep as we are supposed to have IRQs on at this point
561 */
1bd79336
PM
562 ori r10,r10,MSR_EE
563 SYNC
39bccfd1 564 mtmsr r10
401d1f02
DW
565
566 /* Save NVGPRS if they're not saved already */
d73e0c99 567 lwz r4,_TRAP(r1)
9994a338 568 andi. r4,r4,1
401d1f02 569 beq 5f
9994a338
PM
570 SAVE_NVGPRS(r1)
571 li r4,0xc00
d73e0c99 572 stw r4,_TRAP(r1)
1bd79336 5735:
9994a338
PM
574 addi r3,r1,STACK_FRAME_OVERHEAD
575 bl do_syscall_trace_leave
1bd79336 576 b ret_from_except_full
9994a338 577
9994a338 578/*
401d1f02
DW
579 * The fork/clone functions need to copy the full register set into
580 * the child process. Therefore we need to save all the nonvolatile
581 * registers (r13 - r31) before calling the C code.
9994a338 582 */
9994a338
PM
583 .globl ppc_fork
584ppc_fork:
585 SAVE_NVGPRS(r1)
d73e0c99 586 lwz r0,_TRAP(r1)
9994a338 587 rlwinm r0,r0,0,0,30 /* clear LSB to indicate full */
d73e0c99 588 stw r0,_TRAP(r1) /* register set saved */
9994a338
PM
589 b sys_fork
590
591 .globl ppc_vfork
592ppc_vfork:
593 SAVE_NVGPRS(r1)
d73e0c99 594 lwz r0,_TRAP(r1)
9994a338 595 rlwinm r0,r0,0,0,30 /* clear LSB to indicate full */
d73e0c99 596 stw r0,_TRAP(r1) /* register set saved */
9994a338
PM
597 b sys_vfork
598
599 .globl ppc_clone
600ppc_clone:
601 SAVE_NVGPRS(r1)
d73e0c99 602 lwz r0,_TRAP(r1)
9994a338 603 rlwinm r0,r0,0,0,30 /* clear LSB to indicate full */
d73e0c99 604 stw r0,_TRAP(r1) /* register set saved */
9994a338
PM
605 b sys_clone
606
cee3536d
ME
607 .globl ppc_clone3
608ppc_clone3:
609 SAVE_NVGPRS(r1)
610 lwz r0,_TRAP(r1)
611 rlwinm r0,r0,0,0,30 /* clear LSB to indicate full */
612 stw r0,_TRAP(r1) /* register set saved */
613 b sys_clone3
614
1bd79336
PM
615 .globl ppc_swapcontext
616ppc_swapcontext:
617 SAVE_NVGPRS(r1)
618 lwz r0,_TRAP(r1)
619 rlwinm r0,r0,0,0,30 /* clear LSB to indicate full */
620 stw r0,_TRAP(r1) /* register set saved */
621 b sys_swapcontext
622
9994a338
PM
623/*
624 * Top-level page fault handling.
625 * This is in assembler because if do_page_fault tells us that
626 * it is a bad kernel page fault, we want to save the non-volatile
627 * registers before calling bad_page_fault.
628 */
629 .globl handle_page_fault
630handle_page_fault:
9994a338 631 addi r3,r1,STACK_FRAME_OVERHEAD
d7cceda9 632#ifdef CONFIG_PPC_BOOK3S_32
64d0a506 633 andis. r0,r5,DSISR_DABRMATCH@h
d300627c 634 bne- handle_dabr_fault
d300627c 635#endif
64d0a506 636 bl do_page_fault
9994a338
PM
637 cmpwi r3,0
638 beq+ ret_from_except
639 SAVE_NVGPRS(r1)
d73e0c99 640 lwz r0,_TRAP(r1)
9994a338 641 clrrwi r0,r0,1
d73e0c99 642 stw r0,_TRAP(r1)
9994a338
PM
643 mr r5,r3
644 addi r3,r1,STACK_FRAME_OVERHEAD
645 lwz r4,_DAR(r1)
646 bl bad_page_fault
647 b ret_from_except_full
648
d7cceda9 649#ifdef CONFIG_PPC_BOOK3S_32
d300627c
BH
650 /* We have a data breakpoint exception - handle it */
651handle_dabr_fault:
652 SAVE_NVGPRS(r1)
653 lwz r0,_TRAP(r1)
654 clrrwi r0,r0,1
655 stw r0,_TRAP(r1)
656 bl do_break
657 b ret_from_except_full
658#endif
659
9994a338
PM
660/*
661 * This routine switches between two different tasks. The process
662 * state of one is saved on its kernel stack. Then the state
663 * of the other is restored from its kernel stack. The memory
664 * management hardware is updated to the second process's state.
665 * Finally, we can return to the second process.
666 * On entry, r3 points to the THREAD for the current task, r4
667 * points to the THREAD for the new task.
668 *
669 * This routine is always called with interrupts disabled.
670 *
671 * Note: there are two ways to get to the "going out" portion
672 * of this code; either by coming in via the entry (_switch)
673 * or via "fork" which must set up an environment equivalent
674 * to the "_switch" path. If you change this , you'll have to
675 * change the fork code also.
676 *
677 * The code which creates the new task context is in 'copy_thread'
678 * in arch/ppc/kernel/process.c
679 */
680_GLOBAL(_switch)
681 stwu r1,-INT_FRAME_SIZE(r1)
682 mflr r0
683 stw r0,INT_FRAME_SIZE+4(r1)
684 /* r3-r12 are caller saved -- Cort */
685 SAVE_NVGPRS(r1)
686 stw r0,_NIP(r1) /* Return to switch caller */
687 mfmsr r11
688 li r0,MSR_FP /* Disable floating-point */
689#ifdef CONFIG_ALTIVEC
690BEGIN_FTR_SECTION
691 oris r0,r0,MSR_VEC@h /* Disable altivec */
692 mfspr r12,SPRN_VRSAVE /* save vrsave register value */
693 stw r12,THREAD+THREAD_VRSAVE(r2)
694END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
695#endif /* CONFIG_ALTIVEC */
696#ifdef CONFIG_SPE
5e14d21e 697BEGIN_FTR_SECTION
9994a338
PM
698 oris r0,r0,MSR_SPE@h /* Disable SPE */
699 mfspr r12,SPRN_SPEFSCR /* save spefscr register value */
700 stw r12,THREAD+THREAD_SPEFSCR(r2)
5e14d21e 701END_FTR_SECTION_IFSET(CPU_FTR_SPE)
9994a338
PM
702#endif /* CONFIG_SPE */
703 and. r0,r0,r11 /* FP or altivec or SPE enabled? */
704 beq+ 1f
705 andc r11,r11,r0
39bccfd1 706 mtmsr r11
9994a338
PM
707 isync
7081: stw r11,_MSR(r1)
709 mfcr r10
710 stw r10,_CCR(r1)
711 stw r1,KSP(r3) /* Set old stack pointer */
712
e2fb9f54 713 kuap_check r2, r4
9994a338
PM
714#ifdef CONFIG_SMP
715 /* We need a sync somewhere here to make sure that if the
716 * previous task gets rescheduled on another CPU, it sees all
717 * stores it has performed on this one.
718 */
719 sync
720#endif /* CONFIG_SMP */
721
722 tophys(r0,r4)
ee43eb78 723 mtspr SPRN_SPRG_THREAD,r0 /* Update current THREAD phys addr */
9994a338
PM
724 lwz r1,KSP(r4) /* Load new stack pointer */
725
726 /* save the old current 'last' for return value */
727 mr r3,r2
728 addi r2,r4,-THREAD /* Update current */
729
730#ifdef CONFIG_ALTIVEC
731BEGIN_FTR_SECTION
732 lwz r0,THREAD+THREAD_VRSAVE(r2)
733 mtspr SPRN_VRSAVE,r0 /* if G4, restore VRSAVE reg */
734END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
735#endif /* CONFIG_ALTIVEC */
736#ifdef CONFIG_SPE
5e14d21e 737BEGIN_FTR_SECTION
9994a338
PM
738 lwz r0,THREAD+THREAD_SPEFSCR(r2)
739 mtspr SPRN_SPEFSCR,r0 /* restore SPEFSCR reg */
5e14d21e 740END_FTR_SECTION_IFSET(CPU_FTR_SPE)
9994a338 741#endif /* CONFIG_SPE */
f2574030 742
9994a338
PM
743 lwz r0,_CCR(r1)
744 mtcrf 0xFF,r0
745 /* r3-r12 are destroyed -- Cort */
746 REST_NVGPRS(r1)
747
748 lwz r4,_NIP(r1) /* Return to _switch caller in new task */
749 mtlr r4
750 addi r1,r1,INT_FRAME_SIZE
751 blr
752
753 .globl fast_exception_return
754fast_exception_return:
755#if !(defined(CONFIG_4xx) || defined(CONFIG_BOOKE))
756 andi. r10,r9,MSR_RI /* check for recoverable interrupt */
757 beq 1f /* if not, we've got problems */
758#endif
759
7602: REST_4GPRS(3, r11)
761 lwz r10,_CCR(r11)
762 REST_GPR(1, r11)
763 mtcr r10
764 lwz r10,_LINK(r11)
765 mtlr r10
9580b71b
CL
766 /* Clear the exception_marker on the stack to avoid confusing stacktrace */
767 li r10, 0
768 stw r10, 8(r11)
9994a338 769 REST_GPR(10, r11)
cd99ddbe 770#if defined(CONFIG_PPC_8xx) && defined(CONFIG_PERF_EVENTS)
75b82472
CL
771 mtspr SPRN_NRI, r0
772#endif
9994a338
PM
773 mtspr SPRN_SRR1,r9
774 mtspr SPRN_SRR0,r12
775 REST_GPR(9, r11)
776 REST_GPR(12, r11)
777 lwz r11,GPR11(r11)
778 SYNC
779 RFI
780
781#if !(defined(CONFIG_4xx) || defined(CONFIG_BOOKE))
782/* check if the exception happened in a restartable section */
7831: lis r3,exc_exit_restart_end@ha
784 addi r3,r3,exc_exit_restart_end@l
785 cmplw r12,r3
9eb425b2 786#ifdef CONFIG_PPC_BOOK3S_601
12c3f1fd
CL
787 bge 2b
788#else
9994a338 789 bge 3f
12c3f1fd 790#endif
9994a338
PM
791 lis r4,exc_exit_restart@ha
792 addi r4,r4,exc_exit_restart@l
793 cmplw r12,r4
9eb425b2 794#ifdef CONFIG_PPC_BOOK3S_601
12c3f1fd
CL
795 blt 2b
796#else
9994a338 797 blt 3f
12c3f1fd 798#endif
9994a338
PM
799 lis r3,fee_restarts@ha
800 tophys(r3,r3)
801 lwz r5,fee_restarts@l(r3)
802 addi r5,r5,1
803 stw r5,fee_restarts@l(r3)
804 mr r12,r4 /* restart at exc_exit_restart */
805 b 2b
806
991eb43a
KG
807 .section .bss
808 .align 2
809fee_restarts:
810 .space 4
811 .previous
9994a338
PM
812
813/* aargh, a nonrecoverable interrupt, panic */
814/* aargh, we don't know which trap this is */
815/* but the 601 doesn't implement the RI bit, so assume it's OK */
8163:
9994a338 817 li r10,-1
d73e0c99 818 stw r10,_TRAP(r11)
9994a338
PM
819 addi r3,r1,STACK_FRAME_OVERHEAD
820 lis r10,MSR_KERNEL@h
821 ori r10,r10,MSR_KERNEL@l
822 bl transfer_to_handler_full
51423a9c 823 .long unrecoverable_exception
9994a338
PM
824 .long ret_from_except
825#endif
826
9994a338
PM
827 .globl ret_from_except_full
828ret_from_except_full:
829 REST_NVGPRS(r1)
830 /* fall through */
831
832 .globl ret_from_except
833ret_from_except:
834 /* Hard-disable interrupts so that current_thread_info()->flags
835 * can't change between when we test it and when we return
836 * from the interrupt. */
5d38902c 837 /* Note: We don't bother telling lockdep about it */
ba18025f 838 LOAD_REG_IMMEDIATE(r10,MSR_KERNEL)
9994a338 839 SYNC /* Some chip revs have problems here... */
39bccfd1 840 mtmsr r10 /* disable interrupts */
9994a338
PM
841
842 lwz r3,_MSR(r1) /* Returning to user mode? */
843 andi. r0,r3,MSR_PR
844 beq resume_kernel
845
846user_exc_return: /* r10 contains MSR_KERNEL here */
847 /* Check current_thread_info()->flags */
f7354cca 848 lwz r9,TI_FLAGS(r2)
7a10174e 849 andi. r0,r9,_TIF_USER_WORK_MASK
9994a338
PM
850 bne do_work
851
852restore_user:
853#if defined(CONFIG_4xx) || defined(CONFIG_BOOKE)
4eaddb4d
KG
854 /* Check whether this process has its own DBCR0 value. The internal
855 debug mode bit tells us that dbcr0 should be loaded. */
9994a338 856 lwz r0,THREAD+THREAD_DBCR0(r2)
2325f0a0 857 andis. r10,r0,DBCR0_IDM@h
9994a338
PM
858 bnel- load_dbcr0
859#endif
f7354cca 860 ACCOUNT_CPU_USER_EXIT(r2, r10, r11)
31ed2b13
CL
861#ifdef CONFIG_PPC_BOOK3S_32
862 kuep_unlock r10, r11
863#endif
9994a338 864
9994a338
PM
865 b restore
866
867/* N.B. the only way to get here is from the beq following ret_from_except. */
868resume_kernel:
a9c4e541 869 /* check current_thread_info, _TIF_EMULATE_STACK_STORE */
f7354cca 870 lwz r8,TI_FLAGS(r2)
f7b33677 871 andis. r0,r8,_TIF_EMULATE_STACK_STORE@h
a9c4e541
TC
872 beq+ 1f
873
874 addi r8,r1,INT_FRAME_SIZE /* Get the kprobed function entry */
875
876 lwz r3,GPR1(r1)
877 subi r3,r3,INT_FRAME_SIZE /* dst: Allocate a trampoline exception frame */
878 mr r4,r1 /* src: current exception frame */
879 mr r1,r3 /* Reroute the trampoline frame to r1 */
880
881 /* Copy from the original to the trampoline. */
882 li r5,INT_FRAME_SIZE/4 /* size: INT_FRAME_SIZE */
883 li r6,0 /* start offset: 0 */
884 mtctr r5
8852: lwzx r0,r6,r4
886 stwx r0,r6,r3
887 addi r6,r6,4
888 bdnz 2b
889
890 /* Do real store operation to complete stwu */
891 lwz r5,GPR1(r1)
892 stw r8,0(r5)
893
894 /* Clear _TIF_EMULATE_STACK_STORE flag */
895 lis r11,_TIF_EMULATE_STACK_STORE@h
f7354cca 896 addi r5,r2,TI_FLAGS
a9c4e541
TC
8970: lwarx r8,0,r5
898 andc r8,r8,r11
899#ifdef CONFIG_IBM405_ERR77
900 dcbt 0,r5
901#endif
902 stwcx. r8,0,r5
903 bne- 0b
9041:
905
fdc5569e 906#ifdef CONFIG_PREEMPTION
a9c4e541 907 /* check current_thread_info->preempt_count */
f7354cca 908 lwz r0,TI_PREEMPT(r2)
9994a338 909 cmpwi 0,r0,0 /* if non-zero, just restore regs and return */
e2fb9f54 910 bne restore_kuap
a9c4e541 911 andi. r8,r8,_TIF_NEED_RESCHED
e2fb9f54 912 beq+ restore_kuap
a9c4e541 913 lwz r3,_MSR(r1)
9994a338 914 andi. r0,r3,MSR_EE /* interrupts off? */
e2fb9f54 915 beq restore_kuap /* don't schedule if so */
5d38902c
BH
916#ifdef CONFIG_TRACE_IRQFLAGS
917 /* Lockdep thinks irqs are enabled, we need to call
918 * preempt_schedule_irq with IRQs off, so we inform lockdep
919 * now that we -did- turn them off already
920 */
921 bl trace_hardirqs_off
922#endif
90437bff 923 bl preempt_schedule_irq
5d38902c
BH
924#ifdef CONFIG_TRACE_IRQFLAGS
925 /* And now, to properly rebalance the above, we tell lockdep they
926 * are being turned back on, which will happen when we return
927 */
928 bl trace_hardirqs_on
929#endif
fdc5569e 930#endif /* CONFIG_PREEMPTION */
e2fb9f54
CL
931restore_kuap:
932 kuap_restore r1, r2, r9, r10, r0
9994a338
PM
933
934 /* interrupts are hard-disabled at this point */
935restore:
b98ac05d 936#ifdef CONFIG_44x
e7f75ad0
DK
937BEGIN_MMU_FTR_SECTION
938 b 1f
939END_MMU_FTR_SECTION_IFSET(MMU_FTR_TYPE_47x)
b98ac05d
BH
940 lis r4,icache_44x_need_flush@ha
941 lwz r5,icache_44x_need_flush@l(r4)
942 cmplwi cr0,r5,0
943 beq+ 1f
944 li r6,0
945 iccci r0,r0
946 stw r6,icache_44x_need_flush@l(r4)
9471:
948#endif /* CONFIG_44x */
5d38902c
BH
949
950 lwz r9,_MSR(r1)
951#ifdef CONFIG_TRACE_IRQFLAGS
952 /* Lockdep doesn't know about the fact that IRQs are temporarily turned
953 * off in this assembly code while peeking at TI_FLAGS() and such. However
954 * we need to inform it if the exception turned interrupts off, and we
955 * are about to trun them back on.
5d38902c
BH
956 */
957 andi. r10,r9,MSR_EE
958 beq 1f
06ca2188
SR
959 stwu r1,-32(r1)
960 mflr r0
961 stw r0,4(r1)
5d38902c 962 bl trace_hardirqs_on
d1865e71 963 addi r1, r1, 32
5d38902c
BH
964 lwz r9,_MSR(r1)
9651:
966#endif /* CONFIG_TRACE_IRQFLAGS */
967
9994a338
PM
968 lwz r0,GPR0(r1)
969 lwz r2,GPR2(r1)
970 REST_4GPRS(3, r1)
971 REST_2GPRS(7, r1)
972
973 lwz r10,_XER(r1)
974 lwz r11,_CTR(r1)
975 mtspr SPRN_XER,r10
976 mtctr r11
977
978 PPC405_ERR77(0,r1)
b64f87c1
BB
979BEGIN_FTR_SECTION
980 lwarx r11,0,r1
981END_FTR_SECTION_IFSET(CPU_FTR_NEED_PAIRED_STWCX)
9994a338
PM
982 stwcx. r0,0,r1 /* to clear the reservation */
983
984#if !(defined(CONFIG_4xx) || defined(CONFIG_BOOKE))
9994a338
PM
985 andi. r10,r9,MSR_RI /* check if this exception occurred */
986 beql nonrecoverable /* at a bad place (MSR:RI = 0) */
987
988 lwz r10,_CCR(r1)
989 lwz r11,_LINK(r1)
990 mtcrf 0xFF,r10
991 mtlr r11
992
9580b71b
CL
993 /* Clear the exception_marker on the stack to avoid confusing stacktrace */
994 li r10, 0
995 stw r10, 8(r1)
9994a338
PM
996 /*
997 * Once we put values in SRR0 and SRR1, we are in a state
998 * where exceptions are not recoverable, since taking an
999 * exception will trash SRR0 and SRR1. Therefore we clear the
1000 * MSR:RI bit to indicate this. If we do take an exception,
1001 * we can't return to the point of the exception but we
1002 * can restart the exception exit path at the label
1003 * exc_exit_restart below. -- paulus
1004 */
ba18025f 1005 LOAD_REG_IMMEDIATE(r10,MSR_KERNEL & ~MSR_RI)
9994a338 1006 SYNC
39bccfd1 1007 mtmsr r10 /* clear the RI bit */
9994a338
PM
1008 .globl exc_exit_restart
1009exc_exit_restart:
9994a338 1010 lwz r12,_NIP(r1)
9994a338
PM
1011 mtspr SPRN_SRR0,r12
1012 mtspr SPRN_SRR1,r9
1013 REST_4GPRS(9, r1)
1014 lwz r1,GPR1(r1)
1015 .globl exc_exit_restart_end
1016exc_exit_restart_end:
1017 SYNC
1018 RFI
1019
1020#else /* !(CONFIG_4xx || CONFIG_BOOKE) */
1021 /*
1022 * This is a bit different on 4xx/Book-E because it doesn't have
1023 * the RI bit in the MSR.
1024 * The TLB miss handler checks if we have interrupted
1025 * the exception exit path and restarts it if so
1026 * (well maybe one day it will... :).
1027 */
1028 lwz r11,_LINK(r1)
1029 mtlr r11
1030 lwz r10,_CCR(r1)
1031 mtcrf 0xff,r10
9580b71b
CL
1032 /* Clear the exception_marker on the stack to avoid confusing stacktrace */
1033 li r10, 0
1034 stw r10, 8(r1)
9994a338
PM
1035 REST_2GPRS(9, r1)
1036 .globl exc_exit_restart
1037exc_exit_restart:
1038 lwz r11,_NIP(r1)
1039 lwz r12,_MSR(r1)
1040exc_exit_start:
1041 mtspr SPRN_SRR0,r11
1042 mtspr SPRN_SRR1,r12
1043 REST_2GPRS(11, r1)
1044 lwz r1,GPR1(r1)
1045 .globl exc_exit_restart_end
1046exc_exit_restart_end:
1047 PPC405_ERR77_SYNC
1048 rfi
1049 b . /* prevent prefetch past rfi */
1050
1051/*
1052 * Returning from a critical interrupt in user mode doesn't need
1053 * to be any different from a normal exception. For a critical
1054 * interrupt in the kernel, we just return (without checking for
1055 * preemption) since the interrupt may have happened at some crucial
1056 * place (e.g. inside the TLB miss handler), and because we will be
1057 * running with r1 pointing into critical_stack, not the current
1058 * process's kernel stack (and therefore current_thread_info() will
1059 * give the wrong answer).
1060 * We have to restore various SPRs that may have been in use at the
1061 * time of the critical interrupt.
1062 *
1063 */
1064#ifdef CONFIG_40x
1065#define PPC_40x_TURN_OFF_MSR_DR \
1066 /* avoid any possible TLB misses here by turning off MSR.DR, we \
1067 * assume the instructions here are mapped by a pinned TLB entry */ \
1068 li r10,MSR_IR; \
1069 mtmsr r10; \
1070 isync; \
1071 tophys(r1, r1);
1072#else
1073#define PPC_40x_TURN_OFF_MSR_DR
1074#endif
1075
1076#define RET_FROM_EXC_LEVEL(exc_lvl_srr0, exc_lvl_srr1, exc_lvl_rfi) \
1077 REST_NVGPRS(r1); \
1078 lwz r3,_MSR(r1); \
1079 andi. r3,r3,MSR_PR; \
ba18025f 1080 LOAD_REG_IMMEDIATE(r10,MSR_KERNEL); \
9994a338
PM
1081 bne user_exc_return; \
1082 lwz r0,GPR0(r1); \
1083 lwz r2,GPR2(r1); \
1084 REST_4GPRS(3, r1); \
1085 REST_2GPRS(7, r1); \
1086 lwz r10,_XER(r1); \
1087 lwz r11,_CTR(r1); \
1088 mtspr SPRN_XER,r10; \
1089 mtctr r11; \
1090 PPC405_ERR77(0,r1); \
1091 stwcx. r0,0,r1; /* to clear the reservation */ \
1092 lwz r11,_LINK(r1); \
1093 mtlr r11; \
1094 lwz r10,_CCR(r1); \
1095 mtcrf 0xff,r10; \
1096 PPC_40x_TURN_OFF_MSR_DR; \
1097 lwz r9,_DEAR(r1); \
1098 lwz r10,_ESR(r1); \
1099 mtspr SPRN_DEAR,r9; \
1100 mtspr SPRN_ESR,r10; \
1101 lwz r11,_NIP(r1); \
1102 lwz r12,_MSR(r1); \
1103 mtspr exc_lvl_srr0,r11; \
1104 mtspr exc_lvl_srr1,r12; \
1105 lwz r9,GPR9(r1); \
1106 lwz r12,GPR12(r1); \
1107 lwz r10,GPR10(r1); \
1108 lwz r11,GPR11(r1); \
1109 lwz r1,GPR1(r1); \
1110 PPC405_ERR77_SYNC; \
1111 exc_lvl_rfi; \
1112 b .; /* prevent prefetch past exc_lvl_rfi */
1113
fca622c5
KG
1114#define RESTORE_xSRR(exc_lvl_srr0, exc_lvl_srr1) \
1115 lwz r9,_##exc_lvl_srr0(r1); \
1116 lwz r10,_##exc_lvl_srr1(r1); \
1117 mtspr SPRN_##exc_lvl_srr0,r9; \
1118 mtspr SPRN_##exc_lvl_srr1,r10;
1119
70fe3af8 1120#if defined(CONFIG_PPC_BOOK3E_MMU)
fca622c5
KG
1121#ifdef CONFIG_PHYS_64BIT
1122#define RESTORE_MAS7 \
1123 lwz r11,MAS7(r1); \
1124 mtspr SPRN_MAS7,r11;
1125#else
1126#define RESTORE_MAS7
1127#endif /* CONFIG_PHYS_64BIT */
1128#define RESTORE_MMU_REGS \
1129 lwz r9,MAS0(r1); \
1130 lwz r10,MAS1(r1); \
1131 lwz r11,MAS2(r1); \
1132 mtspr SPRN_MAS0,r9; \
1133 lwz r9,MAS3(r1); \
1134 mtspr SPRN_MAS1,r10; \
1135 lwz r10,MAS6(r1); \
1136 mtspr SPRN_MAS2,r11; \
1137 mtspr SPRN_MAS3,r9; \
1138 mtspr SPRN_MAS6,r10; \
1139 RESTORE_MAS7;
1140#elif defined(CONFIG_44x)
1141#define RESTORE_MMU_REGS \
1142 lwz r9,MMUCR(r1); \
1143 mtspr SPRN_MMUCR,r9;
1144#else
1145#define RESTORE_MMU_REGS
1146#endif
1147
1148#ifdef CONFIG_40x
9994a338
PM
1149 .globl ret_from_crit_exc
1150ret_from_crit_exc:
ee43eb78 1151 mfspr r9,SPRN_SPRG_THREAD
fca622c5
KG
1152 lis r10,saved_ksp_limit@ha;
1153 lwz r10,saved_ksp_limit@l(r10);
1154 tovirt(r9,r9);
1155 stw r10,KSP_LIMIT(r9)
1156 lis r9,crit_srr0@ha;
1157 lwz r9,crit_srr0@l(r9);
1158 lis r10,crit_srr1@ha;
1159 lwz r10,crit_srr1@l(r10);
1160 mtspr SPRN_SRR0,r9;
1161 mtspr SPRN_SRR1,r10;
16c57b36 1162 RET_FROM_EXC_LEVEL(SPRN_CSRR0, SPRN_CSRR1, PPC_RFCI)
fca622c5 1163#endif /* CONFIG_40x */
9994a338
PM
1164
1165#ifdef CONFIG_BOOKE
fca622c5
KG
1166 .globl ret_from_crit_exc
1167ret_from_crit_exc:
ee43eb78 1168 mfspr r9,SPRN_SPRG_THREAD
fca622c5
KG
1169 lwz r10,SAVED_KSP_LIMIT(r1)
1170 stw r10,KSP_LIMIT(r9)
1171 RESTORE_xSRR(SRR0,SRR1);
1172 RESTORE_MMU_REGS;
16c57b36 1173 RET_FROM_EXC_LEVEL(SPRN_CSRR0, SPRN_CSRR1, PPC_RFCI)
fca622c5 1174
9994a338
PM
1175 .globl ret_from_debug_exc
1176ret_from_debug_exc:
ee43eb78 1177 mfspr r9,SPRN_SPRG_THREAD
fca622c5
KG
1178 lwz r10,SAVED_KSP_LIMIT(r1)
1179 stw r10,KSP_LIMIT(r9)
fca622c5
KG
1180 RESTORE_xSRR(SRR0,SRR1);
1181 RESTORE_xSRR(CSRR0,CSRR1);
1182 RESTORE_MMU_REGS;
16c57b36 1183 RET_FROM_EXC_LEVEL(SPRN_DSRR0, SPRN_DSRR1, PPC_RFDI)
9994a338
PM
1184
1185 .globl ret_from_mcheck_exc
1186ret_from_mcheck_exc:
ee43eb78 1187 mfspr r9,SPRN_SPRG_THREAD
fca622c5
KG
1188 lwz r10,SAVED_KSP_LIMIT(r1)
1189 stw r10,KSP_LIMIT(r9)
1190 RESTORE_xSRR(SRR0,SRR1);
1191 RESTORE_xSRR(CSRR0,CSRR1);
1192 RESTORE_xSRR(DSRR0,DSRR1);
1193 RESTORE_MMU_REGS;
16c57b36 1194 RET_FROM_EXC_LEVEL(SPRN_MCSRR0, SPRN_MCSRR1, PPC_RFMCI)
9994a338
PM
1195#endif /* CONFIG_BOOKE */
1196
1197/*
1198 * Load the DBCR0 value for a task that is being ptraced,
1199 * having first saved away the global DBCR0. Note that r0
1200 * has the dbcr0 value to set upon entry to this.
1201 */
1202load_dbcr0:
1203 mfmsr r10 /* first disable debug exceptions */
1204 rlwinm r10,r10,0,~MSR_DE
1205 mtmsr r10
1206 isync
1207 mfspr r10,SPRN_DBCR0
1208 lis r11,global_dbcr0@ha
1209 addi r11,r11,global_dbcr0@l
4eaddb4d 1210#ifdef CONFIG_SMP
f7354cca 1211 lwz r9,TASK_CPU(r2)
4eaddb4d
KG
1212 slwi r9,r9,3
1213 add r11,r11,r9
1214#endif
9994a338
PM
1215 stw r10,0(r11)
1216 mtspr SPRN_DBCR0,r0
1217 lwz r10,4(r11)
1218 addi r10,r10,1
1219 stw r10,4(r11)
1220 li r11,-1
1221 mtspr SPRN_DBSR,r11 /* clear all pending debug events */
1222 blr
1223
991eb43a
KG
1224 .section .bss
1225 .align 4
b86fb888 1226 .global global_dbcr0
991eb43a 1227global_dbcr0:
4eaddb4d 1228 .space 8*NR_CPUS
991eb43a 1229 .previous
9994a338
PM
1230#endif /* !(CONFIG_4xx || CONFIG_BOOKE) */
1231
1232do_work: /* r10 contains MSR_KERNEL here */
1233 andi. r0,r9,_TIF_NEED_RESCHED
1234 beq do_user_signal
1235
1236do_resched: /* r10 contains MSR_KERNEL here */
40530db7
CL
1237#ifdef CONFIG_TRACE_IRQFLAGS
1238 bl trace_hardirqs_on
1239 mfmsr r10
1240#endif
9994a338
PM
1241 ori r10,r10,MSR_EE
1242 SYNC
39bccfd1 1243 mtmsr r10 /* hard-enable interrupts */
9994a338
PM
1244 bl schedule
1245recheck:
5d38902c
BH
1246 /* Note: And we don't tell it we are disabling them again
1247 * neither. Those disable/enable cycles used to peek at
1248 * TI_FLAGS aren't advertised.
1249 */
ba18025f 1250 LOAD_REG_IMMEDIATE(r10,MSR_KERNEL)
9994a338 1251 SYNC
39bccfd1 1252 mtmsr r10 /* disable interrupts */
f7354cca 1253 lwz r9,TI_FLAGS(r2)
9994a338
PM
1254 andi. r0,r9,_TIF_NEED_RESCHED
1255 bne- do_resched
7a10174e 1256 andi. r0,r9,_TIF_USER_WORK_MASK
9994a338
PM
1257 beq restore_user
1258do_user_signal: /* r10 contains MSR_KERNEL here */
1259 ori r10,r10,MSR_EE
1260 SYNC
39bccfd1 1261 mtmsr r10 /* hard-enable interrupts */
9994a338 1262 /* save r13-r31 in the exception frame, if not already done */
d73e0c99 1263 lwz r3,_TRAP(r1)
9994a338
PM
1264 andi. r0,r3,1
1265 beq 2f
1266 SAVE_NVGPRS(r1)
1267 rlwinm r3,r3,0,0,30
d73e0c99 1268 stw r3,_TRAP(r1)
7d6d637d
RM
12692: addi r3,r1,STACK_FRAME_OVERHEAD
1270 mr r4,r9
18b246fa 1271 bl do_notify_resume
9994a338
PM
1272 REST_NVGPRS(r1)
1273 b recheck
1274
1275/*
1276 * We come here when we are at the end of handling an exception
1277 * that occurred at a place where taking an exception will lose
1278 * state information, such as the contents of SRR0 and SRR1.
1279 */
1280nonrecoverable:
1281 lis r10,exc_exit_restart_end@ha
1282 addi r10,r10,exc_exit_restart_end@l
1283 cmplw r12,r10
12c3f1fd
CL
1284#ifdef CONFIG_PPC_BOOK3S_601
1285 bgelr
1286#else
9994a338 1287 bge 3f
12c3f1fd 1288#endif
9994a338
PM
1289 lis r11,exc_exit_restart@ha
1290 addi r11,r11,exc_exit_restart@l
1291 cmplw r12,r11
12c3f1fd
CL
1292#ifdef CONFIG_PPC_BOOK3S_601
1293 bltlr
1294#else
9994a338 1295 blt 3f
12c3f1fd 1296#endif
9994a338
PM
1297 lis r10,ee_restarts@ha
1298 lwz r12,ee_restarts@l(r10)
1299 addi r12,r12,1
1300 stw r12,ee_restarts@l(r10)
1301 mr r12,r11 /* restart at exc_exit_restart */
1302 blr
13033: /* OK, we can't recover, kill this process */
1304 /* but the 601 doesn't implement the RI bit, so assume it's OK */
d73e0c99 1305 lwz r3,_TRAP(r1)
9994a338 1306 andi. r0,r3,1
ed1cd6de 1307 beq 5f
9994a338
PM
1308 SAVE_NVGPRS(r1)
1309 rlwinm r3,r3,0,0,30
d73e0c99 1310 stw r3,_TRAP(r1)
ed1cd6de
CL
13115: mfspr r2,SPRN_SPRG_THREAD
1312 addi r2,r2,-THREAD
1313 tovirt(r2,r2) /* set back r2 to current */
9994a338 13144: addi r3,r1,STACK_FRAME_OVERHEAD
51423a9c 1315 bl unrecoverable_exception
9994a338
PM
1316 /* shouldn't return */
1317 b 4b
1318
991eb43a
KG
1319 .section .bss
1320 .align 2
1321ee_restarts:
1322 .space 4
1323 .previous
9994a338
PM
1324
1325/*
1326 * PROM code for specific machines follows. Put it
1327 * here so it's easy to add arch-specific sections later.
1328 * -- Cort
1329 */
033ef338 1330#ifdef CONFIG_PPC_RTAS
9994a338
PM
1331/*
1332 * On CHRP, the Run-Time Abstraction Services (RTAS) have to be
1333 * called with the MMU off.
1334 */
1335_GLOBAL(enter_rtas)
1336 stwu r1,-INT_FRAME_SIZE(r1)
1337 mflr r0
1338 stw r0,INT_FRAME_SIZE+4(r1)
e58c3495 1339 LOAD_REG_ADDR(r4, rtas)
9994a338
PM
1340 lis r6,1f@ha /* physical return address for rtas */
1341 addi r6,r6,1f@l
1342 tophys(r6,r6)
cd08f109 1343 tophys_novmstack r7, r1
033ef338
PM
1344 lwz r8,RTASENTRY(r4)
1345 lwz r4,RTASBASE(r4)
9994a338
PM
1346 mfmsr r9
1347 stw r9,8(r1)
ba18025f 1348 LOAD_REG_IMMEDIATE(r0,MSR_KERNEL)
9994a338 1349 SYNC /* disable interrupts so SRR0/1 */
39bccfd1 1350 mtmsr r0 /* don't get trashed */
9994a338
PM
1351 li r9,MSR_KERNEL & ~(MSR_IR|MSR_DR)
1352 mtlr r6
0df977ea 1353 stw r7, THREAD + RTAS_SP(r2)
9994a338
PM
1354 mtspr SPRN_SRR0,r8
1355 mtspr SPRN_SRR1,r9
1356 RFI
5a528eb6
CL
13571: tophys_novmstack r9, r1
1358#ifdef CONFIG_VMAP_STACK
1359 li r0, MSR_KERNEL & ~MSR_IR /* can take DTLB miss */
1360 mtmsr r0
1361 isync
1362#endif
9994a338
PM
1363 lwz r8,INT_FRAME_SIZE+4(r9) /* get return address */
1364 lwz r9,8(r9) /* original msr value */
9994a338
PM
1365 addi r1,r1,INT_FRAME_SIZE
1366 li r0,0
5a528eb6 1367 tophys_novmstack r7, r2
0df977ea 1368 stw r0, THREAD + RTAS_SP(r7)
9994a338
PM
1369 mtspr SPRN_SRR0,r8
1370 mtspr SPRN_SRR1,r9
1371 RFI /* return to caller */
1372
1373 .globl machine_check_in_rtas
1374machine_check_in_rtas:
1375 twi 31,0,0
1376 /* XXX load up BATs and panic */
1377
033ef338 1378#endif /* CONFIG_PPC_RTAS */