]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blob - arch/powerpc/kernel/entry_32.S
dff51ea52e4906f7c246f7ccec626d47ee3bfbaa
[mirror_ubuntu-bionic-kernel.git] / arch / powerpc / kernel / entry_32.S
1 /*
2 * PowerPC version
3 * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
4 * Rewritten by Cort Dougan (cort@fsmlabs.com) for PReP
5 * Copyright (C) 1996 Cort Dougan <cort@fsmlabs.com>
6 * Adapted for Power Macintosh by Paul Mackerras.
7 * Low-level exception handlers and MMU support
8 * rewritten by Paul Mackerras.
9 * Copyright (C) 1996 Paul Mackerras.
10 * MPC8xx modifications Copyright (C) 1997 Dan Malek (dmalek@jlc.net).
11 *
12 * This file contains the system call entry code, context switch
13 * code, and exception/interrupt return code for PowerPC.
14 *
15 * This program is free software; you can redistribute it and/or
16 * modify it under the terms of the GNU General Public License
17 * as published by the Free Software Foundation; either version
18 * 2 of the License, or (at your option) any later version.
19 *
20 */
21
22 #include <linux/errno.h>
23 #include <linux/err.h>
24 #include <linux/sys.h>
25 #include <linux/threads.h>
26 #include <asm/reg.h>
27 #include <asm/page.h>
28 #include <asm/mmu.h>
29 #include <asm/cputable.h>
30 #include <asm/thread_info.h>
31 #include <asm/ppc_asm.h>
32 #include <asm/asm-offsets.h>
33 #include <asm/unistd.h>
34 #include <asm/ptrace.h>
35 #include <asm/export.h>
36
37 /*
38 * MSR_KERNEL is > 0x10000 on 4xx/Book-E since it include MSR_CE.
39 */
40 #if MSR_KERNEL >= 0x10000
41 #define LOAD_MSR_KERNEL(r, x) lis r,(x)@h; ori r,r,(x)@l
42 #else
43 #define LOAD_MSR_KERNEL(r, x) li r,(x)
44 #endif
45
46 #ifdef CONFIG_BOOKE
47 .globl mcheck_transfer_to_handler
48 mcheck_transfer_to_handler:
49 mfspr r0,SPRN_DSRR0
50 stw r0,_DSRR0(r11)
51 mfspr r0,SPRN_DSRR1
52 stw r0,_DSRR1(r11)
53 /* fall through */
54
55 .globl debug_transfer_to_handler
56 debug_transfer_to_handler:
57 mfspr r0,SPRN_CSRR0
58 stw r0,_CSRR0(r11)
59 mfspr r0,SPRN_CSRR1
60 stw r0,_CSRR1(r11)
61 /* fall through */
62
63 .globl crit_transfer_to_handler
64 crit_transfer_to_handler:
65 #ifdef CONFIG_PPC_BOOK3E_MMU
66 mfspr r0,SPRN_MAS0
67 stw r0,MAS0(r11)
68 mfspr r0,SPRN_MAS1
69 stw r0,MAS1(r11)
70 mfspr r0,SPRN_MAS2
71 stw r0,MAS2(r11)
72 mfspr r0,SPRN_MAS3
73 stw r0,MAS3(r11)
74 mfspr r0,SPRN_MAS6
75 stw r0,MAS6(r11)
76 #ifdef CONFIG_PHYS_64BIT
77 mfspr r0,SPRN_MAS7
78 stw r0,MAS7(r11)
79 #endif /* CONFIG_PHYS_64BIT */
80 #endif /* CONFIG_PPC_BOOK3E_MMU */
81 #ifdef CONFIG_44x
82 mfspr r0,SPRN_MMUCR
83 stw r0,MMUCR(r11)
84 #endif
85 mfspr r0,SPRN_SRR0
86 stw r0,_SRR0(r11)
87 mfspr r0,SPRN_SRR1
88 stw r0,_SRR1(r11)
89
90 /* set the stack limit to the current stack
91 * and set the limit to protect the thread_info
92 * struct
93 */
94 mfspr r8,SPRN_SPRG_THREAD
95 lwz r0,KSP_LIMIT(r8)
96 stw r0,SAVED_KSP_LIMIT(r11)
97 rlwimi r0,r1,0,0,(31-THREAD_SHIFT)
98 stw r0,KSP_LIMIT(r8)
99 /* fall through */
100 #endif
101
102 #ifdef CONFIG_40x
103 .globl crit_transfer_to_handler
104 crit_transfer_to_handler:
105 lwz r0,crit_r10@l(0)
106 stw r0,GPR10(r11)
107 lwz r0,crit_r11@l(0)
108 stw r0,GPR11(r11)
109 mfspr r0,SPRN_SRR0
110 stw r0,crit_srr0@l(0)
111 mfspr r0,SPRN_SRR1
112 stw r0,crit_srr1@l(0)
113
114 /* set the stack limit to the current stack
115 * and set the limit to protect the thread_info
116 * struct
117 */
118 mfspr r8,SPRN_SPRG_THREAD
119 lwz r0,KSP_LIMIT(r8)
120 stw r0,saved_ksp_limit@l(0)
121 rlwimi r0,r1,0,0,(31-THREAD_SHIFT)
122 stw r0,KSP_LIMIT(r8)
123 /* fall through */
124 #endif
125
126 /*
127 * This code finishes saving the registers to the exception frame
128 * and jumps to the appropriate handler for the exception, turning
129 * on address translation.
130 * Note that we rely on the caller having set cr0.eq iff the exception
131 * occurred in kernel mode (i.e. MSR:PR = 0).
132 */
133 .globl transfer_to_handler_full
134 transfer_to_handler_full:
135 SAVE_NVGPRS(r11)
136 /* fall through */
137
138 .globl transfer_to_handler
139 transfer_to_handler:
140 stw r2,GPR2(r11)
141 stw r12,_NIP(r11)
142 stw r9,_MSR(r11)
143 andi. r2,r9,MSR_PR
144 mfctr r12
145 mfspr r2,SPRN_XER
146 stw r12,_CTR(r11)
147 stw r2,_XER(r11)
148 mfspr r12,SPRN_SPRG_THREAD
149 addi r2,r12,-THREAD
150 tovirt(r2,r2) /* set r2 to current */
151 beq 2f /* if from user, fix up THREAD.regs */
152 addi r11,r1,STACK_FRAME_OVERHEAD
153 stw r11,PT_REGS(r12)
154 #if defined(CONFIG_40x) || defined(CONFIG_BOOKE)
155 /* Check to see if the dbcr0 register is set up to debug. Use the
156 internal debug mode bit to do this. */
157 lwz r12,THREAD_DBCR0(r12)
158 andis. r12,r12,DBCR0_IDM@h
159 beq+ 3f
160 /* From user and task is ptraced - load up global dbcr0 */
161 li r12,-1 /* clear all pending debug events */
162 mtspr SPRN_DBSR,r12
163 lis r11,global_dbcr0@ha
164 tophys(r11,r11)
165 addi r11,r11,global_dbcr0@l
166 #ifdef CONFIG_SMP
167 CURRENT_THREAD_INFO(r9, r1)
168 lwz r9,TI_CPU(r9)
169 slwi r9,r9,3
170 add r11,r11,r9
171 #endif
172 lwz r12,0(r11)
173 mtspr SPRN_DBCR0,r12
174 lwz r12,4(r11)
175 addi r12,r12,-1
176 stw r12,4(r11)
177 #endif
178 #ifdef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE
179 CURRENT_THREAD_INFO(r9, r1)
180 tophys(r9, r9)
181 ACCOUNT_CPU_USER_ENTRY(r9, r11, r12)
182 #endif
183
184 b 3f
185
186 2: /* if from kernel, check interrupted DOZE/NAP mode and
187 * check for stack overflow
188 */
189 lwz r9,KSP_LIMIT(r12)
190 cmplw r1,r9 /* if r1 <= ksp_limit */
191 ble- stack_ovf /* then the kernel stack overflowed */
192 5:
193 #if defined(CONFIG_6xx) || defined(CONFIG_E500)
194 CURRENT_THREAD_INFO(r9, r1)
195 tophys(r9,r9) /* check local flags */
196 lwz r12,TI_LOCAL_FLAGS(r9)
197 mtcrf 0x01,r12
198 bt- 31-TLF_NAPPING,4f
199 bt- 31-TLF_SLEEPING,7f
200 #endif /* CONFIG_6xx || CONFIG_E500 */
201 .globl transfer_to_handler_cont
202 transfer_to_handler_cont:
203 3:
204 mflr r9
205 lwz r11,0(r9) /* virtual address of handler */
206 lwz r9,4(r9) /* where to go when done */
207 #ifdef CONFIG_PPC_8xx_PERF_EVENT
208 mtspr SPRN_NRI, r0
209 #endif
210 #ifdef CONFIG_TRACE_IRQFLAGS
211 lis r12,reenable_mmu@h
212 ori r12,r12,reenable_mmu@l
213 mtspr SPRN_SRR0,r12
214 mtspr SPRN_SRR1,r10
215 SYNC
216 RFI
217 reenable_mmu: /* re-enable mmu so we can */
218 mfmsr r10
219 lwz r12,_MSR(r1)
220 xor r10,r10,r12
221 andi. r10,r10,MSR_EE /* Did EE change? */
222 beq 1f
223
224 /*
225 * The trace_hardirqs_off will use CALLER_ADDR0 and CALLER_ADDR1.
226 * If from user mode there is only one stack frame on the stack, and
227 * accessing CALLER_ADDR1 will cause oops. So we need create a dummy
228 * stack frame to make trace_hardirqs_off happy.
229 *
230 * This is handy because we also need to save a bunch of GPRs,
231 * r3 can be different from GPR3(r1) at this point, r9 and r11
232 * contains the old MSR and handler address respectively,
233 * r4 & r5 can contain page fault arguments that need to be passed
234 * along as well. r12, CCR, CTR, XER etc... are left clobbered as
235 * they aren't useful past this point (aren't syscall arguments),
236 * the rest is restored from the exception frame.
237 */
238 stwu r1,-32(r1)
239 stw r9,8(r1)
240 stw r11,12(r1)
241 stw r3,16(r1)
242 stw r4,20(r1)
243 stw r5,24(r1)
244 bl trace_hardirqs_off
245 lwz r5,24(r1)
246 lwz r4,20(r1)
247 lwz r3,16(r1)
248 lwz r11,12(r1)
249 lwz r9,8(r1)
250 addi r1,r1,32
251 lwz r0,GPR0(r1)
252 lwz r6,GPR6(r1)
253 lwz r7,GPR7(r1)
254 lwz r8,GPR8(r1)
255 1: mtctr r11
256 mtlr r9
257 bctr /* jump to handler */
258 #else /* CONFIG_TRACE_IRQFLAGS */
259 mtspr SPRN_SRR0,r11
260 mtspr SPRN_SRR1,r10
261 mtlr r9
262 SYNC
263 RFI /* jump to handler, enable MMU */
264 #endif /* CONFIG_TRACE_IRQFLAGS */
265
266 #if defined (CONFIG_6xx) || defined(CONFIG_E500)
267 4: rlwinm r12,r12,0,~_TLF_NAPPING
268 stw r12,TI_LOCAL_FLAGS(r9)
269 b power_save_ppc32_restore
270
271 7: rlwinm r12,r12,0,~_TLF_SLEEPING
272 stw r12,TI_LOCAL_FLAGS(r9)
273 lwz r9,_MSR(r11) /* if sleeping, clear MSR.EE */
274 rlwinm r9,r9,0,~MSR_EE
275 lwz r12,_LINK(r11) /* and return to address in LR */
276 b fast_exception_return
277 #endif
278
279 /*
280 * On kernel stack overflow, load up an initial stack pointer
281 * and call StackOverflow(regs), which should not return.
282 */
283 stack_ovf:
284 /* sometimes we use a statically-allocated stack, which is OK. */
285 lis r12,_end@h
286 ori r12,r12,_end@l
287 cmplw r1,r12
288 ble 5b /* r1 <= &_end is OK */
289 SAVE_NVGPRS(r11)
290 addi r3,r1,STACK_FRAME_OVERHEAD
291 lis r1,init_thread_union@ha
292 addi r1,r1,init_thread_union@l
293 addi r1,r1,THREAD_SIZE-STACK_FRAME_OVERHEAD
294 lis r9,StackOverflow@ha
295 addi r9,r9,StackOverflow@l
296 LOAD_MSR_KERNEL(r10,MSR_KERNEL)
297 #ifdef CONFIG_PPC_8xx_PERF_EVENT
298 mtspr SPRN_NRI, r0
299 #endif
300 mtspr SPRN_SRR0,r9
301 mtspr SPRN_SRR1,r10
302 SYNC
303 RFI
304
305 /*
306 * Handle a system call.
307 */
308 .stabs "arch/powerpc/kernel/",N_SO,0,0,0f
309 .stabs "entry_32.S",N_SO,0,0,0f
310 0:
311
312 _GLOBAL(DoSyscall)
313 stw r3,ORIG_GPR3(r1)
314 li r12,0
315 stw r12,RESULT(r1)
316 lwz r11,_CCR(r1) /* Clear SO bit in CR */
317 rlwinm r11,r11,0,4,2
318 stw r11,_CCR(r1)
319 #ifdef CONFIG_TRACE_IRQFLAGS
320 /* Return from syscalls can (and generally will) hard enable
321 * interrupts. You aren't supposed to call a syscall with
322 * interrupts disabled in the first place. However, to ensure
323 * that we get it right vs. lockdep if it happens, we force
324 * that hard enable here with appropriate tracing if we see
325 * that we have been called with interrupts off
326 */
327 mfmsr r11
328 andi. r12,r11,MSR_EE
329 bne+ 1f
330 /* We came in with interrupts disabled, we enable them now */
331 bl trace_hardirqs_on
332 mfmsr r11
333 lwz r0,GPR0(r1)
334 lwz r3,GPR3(r1)
335 lwz r4,GPR4(r1)
336 ori r11,r11,MSR_EE
337 lwz r5,GPR5(r1)
338 lwz r6,GPR6(r1)
339 lwz r7,GPR7(r1)
340 lwz r8,GPR8(r1)
341 mtmsr r11
342 1:
343 #endif /* CONFIG_TRACE_IRQFLAGS */
344 CURRENT_THREAD_INFO(r10, r1)
345 lwz r11,TI_FLAGS(r10)
346 andi. r11,r11,_TIF_SYSCALL_DOTRACE
347 bne- syscall_dotrace
348 syscall_dotrace_cont:
349 cmplwi 0,r0,NR_syscalls
350 lis r10,sys_call_table@h
351 ori r10,r10,sys_call_table@l
352 slwi r0,r0,2
353 bge- 66f
354 lwzx r10,r10,r0 /* Fetch system call handler [ptr] */
355 mtlr r10
356 addi r9,r1,STACK_FRAME_OVERHEAD
357 PPC440EP_ERR42
358 blrl /* Call handler */
359 .globl ret_from_syscall
360 ret_from_syscall:
361 mr r6,r3
362 CURRENT_THREAD_INFO(r12, r1)
363 /* disable interrupts so current_thread_info()->flags can't change */
364 LOAD_MSR_KERNEL(r10,MSR_KERNEL) /* doesn't include MSR_EE */
365 /* Note: We don't bother telling lockdep about it */
366 SYNC
367 MTMSRD(r10)
368 lwz r9,TI_FLAGS(r12)
369 li r8,-MAX_ERRNO
370 andi. r0,r9,(_TIF_SYSCALL_DOTRACE|_TIF_SINGLESTEP|_TIF_USER_WORK_MASK|_TIF_PERSYSCALL_MASK)
371 bne- syscall_exit_work
372 cmplw 0,r3,r8
373 blt+ syscall_exit_cont
374 lwz r11,_CCR(r1) /* Load CR */
375 neg r3,r3
376 oris r11,r11,0x1000 /* Set SO bit in CR */
377 stw r11,_CCR(r1)
378 syscall_exit_cont:
379 lwz r8,_MSR(r1)
380 #ifdef CONFIG_TRACE_IRQFLAGS
381 /* If we are going to return from the syscall with interrupts
382 * off, we trace that here. It shouldn't happen though but we
383 * want to catch the bugger if it does right ?
384 */
385 andi. r10,r8,MSR_EE
386 bne+ 1f
387 stw r3,GPR3(r1)
388 bl trace_hardirqs_off
389 lwz r3,GPR3(r1)
390 1:
391 #endif /* CONFIG_TRACE_IRQFLAGS */
392 #if defined(CONFIG_4xx) || defined(CONFIG_BOOKE)
393 /* If the process has its own DBCR0 value, load it up. The internal
394 debug mode bit tells us that dbcr0 should be loaded. */
395 lwz r0,THREAD+THREAD_DBCR0(r2)
396 andis. r10,r0,DBCR0_IDM@h
397 bnel- load_dbcr0
398 #endif
399 #ifdef CONFIG_44x
400 BEGIN_MMU_FTR_SECTION
401 lis r4,icache_44x_need_flush@ha
402 lwz r5,icache_44x_need_flush@l(r4)
403 cmplwi cr0,r5,0
404 bne- 2f
405 1:
406 END_MMU_FTR_SECTION_IFCLR(MMU_FTR_TYPE_47x)
407 #endif /* CONFIG_44x */
408 BEGIN_FTR_SECTION
409 lwarx r7,0,r1
410 END_FTR_SECTION_IFSET(CPU_FTR_NEED_PAIRED_STWCX)
411 stwcx. r0,0,r1 /* to clear the reservation */
412 #ifdef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE
413 andi. r4,r8,MSR_PR
414 beq 3f
415 CURRENT_THREAD_INFO(r4, r1)
416 ACCOUNT_CPU_USER_EXIT(r4, r5, r7)
417 3:
418 #endif
419 lwz r4,_LINK(r1)
420 lwz r5,_CCR(r1)
421 mtlr r4
422 mtcr r5
423 lwz r7,_NIP(r1)
424 lwz r2,GPR2(r1)
425 lwz r1,GPR1(r1)
426 #ifdef CONFIG_PPC_8xx_PERF_EVENT
427 mtspr SPRN_NRI, r0
428 #endif
429 mtspr SPRN_SRR0,r7
430 mtspr SPRN_SRR1,r8
431 SYNC
432 RFI
433 #ifdef CONFIG_44x
434 2: li r7,0
435 iccci r0,r0
436 stw r7,icache_44x_need_flush@l(r4)
437 b 1b
438 #endif /* CONFIG_44x */
439
440 66: li r3,-ENOSYS
441 b ret_from_syscall
442
443 .globl ret_from_fork
444 ret_from_fork:
445 REST_NVGPRS(r1)
446 bl schedule_tail
447 li r3,0
448 b ret_from_syscall
449
450 .globl ret_from_kernel_thread
451 ret_from_kernel_thread:
452 REST_NVGPRS(r1)
453 bl schedule_tail
454 mtlr r14
455 mr r3,r15
456 PPC440EP_ERR42
457 blrl
458 li r3,0
459 b ret_from_syscall
460
461 /* Traced system call support */
462 syscall_dotrace:
463 SAVE_NVGPRS(r1)
464 li r0,0xc00
465 stw r0,_TRAP(r1)
466 addi r3,r1,STACK_FRAME_OVERHEAD
467 bl do_syscall_trace_enter
468 /*
469 * Restore argument registers possibly just changed.
470 * We use the return value of do_syscall_trace_enter
471 * for call number to look up in the table (r0).
472 */
473 mr r0,r3
474 lwz r3,GPR3(r1)
475 lwz r4,GPR4(r1)
476 lwz r5,GPR5(r1)
477 lwz r6,GPR6(r1)
478 lwz r7,GPR7(r1)
479 lwz r8,GPR8(r1)
480 REST_NVGPRS(r1)
481
482 cmplwi r0,NR_syscalls
483 /* Return code is already in r3 thanks to do_syscall_trace_enter() */
484 bge- ret_from_syscall
485 b syscall_dotrace_cont
486
487 syscall_exit_work:
488 andi. r0,r9,_TIF_RESTOREALL
489 beq+ 0f
490 REST_NVGPRS(r1)
491 b 2f
492 0: cmplw 0,r3,r8
493 blt+ 1f
494 andi. r0,r9,_TIF_NOERROR
495 bne- 1f
496 lwz r11,_CCR(r1) /* Load CR */
497 neg r3,r3
498 oris r11,r11,0x1000 /* Set SO bit in CR */
499 stw r11,_CCR(r1)
500
501 1: stw r6,RESULT(r1) /* Save result */
502 stw r3,GPR3(r1) /* Update return value */
503 2: andi. r0,r9,(_TIF_PERSYSCALL_MASK)
504 beq 4f
505
506 /* Clear per-syscall TIF flags if any are set. */
507
508 li r11,_TIF_PERSYSCALL_MASK
509 addi r12,r12,TI_FLAGS
510 3: lwarx r8,0,r12
511 andc r8,r8,r11
512 #ifdef CONFIG_IBM405_ERR77
513 dcbt 0,r12
514 #endif
515 stwcx. r8,0,r12
516 bne- 3b
517 subi r12,r12,TI_FLAGS
518
519 4: /* Anything which requires enabling interrupts? */
520 andi. r0,r9,(_TIF_SYSCALL_DOTRACE|_TIF_SINGLESTEP)
521 beq ret_from_except
522
523 /* Re-enable interrupts. There is no need to trace that with
524 * lockdep as we are supposed to have IRQs on at this point
525 */
526 ori r10,r10,MSR_EE
527 SYNC
528 MTMSRD(r10)
529
530 /* Save NVGPRS if they're not saved already */
531 lwz r4,_TRAP(r1)
532 andi. r4,r4,1
533 beq 5f
534 SAVE_NVGPRS(r1)
535 li r4,0xc00
536 stw r4,_TRAP(r1)
537 5:
538 addi r3,r1,STACK_FRAME_OVERHEAD
539 bl do_syscall_trace_leave
540 b ret_from_except_full
541
542 /*
543 * The fork/clone functions need to copy the full register set into
544 * the child process. Therefore we need to save all the nonvolatile
545 * registers (r13 - r31) before calling the C code.
546 */
547 .globl ppc_fork
548 ppc_fork:
549 SAVE_NVGPRS(r1)
550 lwz r0,_TRAP(r1)
551 rlwinm r0,r0,0,0,30 /* clear LSB to indicate full */
552 stw r0,_TRAP(r1) /* register set saved */
553 b sys_fork
554
555 .globl ppc_vfork
556 ppc_vfork:
557 SAVE_NVGPRS(r1)
558 lwz r0,_TRAP(r1)
559 rlwinm r0,r0,0,0,30 /* clear LSB to indicate full */
560 stw r0,_TRAP(r1) /* register set saved */
561 b sys_vfork
562
563 .globl ppc_clone
564 ppc_clone:
565 SAVE_NVGPRS(r1)
566 lwz r0,_TRAP(r1)
567 rlwinm r0,r0,0,0,30 /* clear LSB to indicate full */
568 stw r0,_TRAP(r1) /* register set saved */
569 b sys_clone
570
571 .globl ppc_swapcontext
572 ppc_swapcontext:
573 SAVE_NVGPRS(r1)
574 lwz r0,_TRAP(r1)
575 rlwinm r0,r0,0,0,30 /* clear LSB to indicate full */
576 stw r0,_TRAP(r1) /* register set saved */
577 b sys_swapcontext
578
579 /*
580 * Top-level page fault handling.
581 * This is in assembler because if do_page_fault tells us that
582 * it is a bad kernel page fault, we want to save the non-volatile
583 * registers before calling bad_page_fault.
584 */
585 .globl handle_page_fault
586 handle_page_fault:
587 stw r4,_DAR(r1)
588 addi r3,r1,STACK_FRAME_OVERHEAD
589 andis. r0,r5,DSISR_DABRMATCH@h
590 #ifdef CONFIG_6xx
591 bne- handle_dabr_fault
592 bl do_page_fault
593 #endif
594 cmpwi r3,0
595 beq+ ret_from_except
596 SAVE_NVGPRS(r1)
597 lwz r0,_TRAP(r1)
598 clrrwi r0,r0,1
599 stw r0,_TRAP(r1)
600 mr r5,r3
601 addi r3,r1,STACK_FRAME_OVERHEAD
602 lwz r4,_DAR(r1)
603 bl bad_page_fault
604 b ret_from_except_full
605
606 #ifdef CONFIG_6xx
607 /* We have a data breakpoint exception - handle it */
608 handle_dabr_fault:
609 SAVE_NVGPRS(r1)
610 lwz r0,_TRAP(r1)
611 clrrwi r0,r0,1
612 stw r0,_TRAP(r1)
613 bl do_break
614 b ret_from_except_full
615 #endif
616
617 /*
618 * This routine switches between two different tasks. The process
619 * state of one is saved on its kernel stack. Then the state
620 * of the other is restored from its kernel stack. The memory
621 * management hardware is updated to the second process's state.
622 * Finally, we can return to the second process.
623 * On entry, r3 points to the THREAD for the current task, r4
624 * points to the THREAD for the new task.
625 *
626 * This routine is always called with interrupts disabled.
627 *
628 * Note: there are two ways to get to the "going out" portion
629 * of this code; either by coming in via the entry (_switch)
630 * or via "fork" which must set up an environment equivalent
631 * to the "_switch" path. If you change this , you'll have to
632 * change the fork code also.
633 *
634 * The code which creates the new task context is in 'copy_thread'
635 * in arch/ppc/kernel/process.c
636 */
637 _GLOBAL(_switch)
638 stwu r1,-INT_FRAME_SIZE(r1)
639 mflr r0
640 stw r0,INT_FRAME_SIZE+4(r1)
641 /* r3-r12 are caller saved -- Cort */
642 SAVE_NVGPRS(r1)
643 stw r0,_NIP(r1) /* Return to switch caller */
644 mfmsr r11
645 li r0,MSR_FP /* Disable floating-point */
646 #ifdef CONFIG_ALTIVEC
647 BEGIN_FTR_SECTION
648 oris r0,r0,MSR_VEC@h /* Disable altivec */
649 mfspr r12,SPRN_VRSAVE /* save vrsave register value */
650 stw r12,THREAD+THREAD_VRSAVE(r2)
651 END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
652 #endif /* CONFIG_ALTIVEC */
653 #ifdef CONFIG_SPE
654 BEGIN_FTR_SECTION
655 oris r0,r0,MSR_SPE@h /* Disable SPE */
656 mfspr r12,SPRN_SPEFSCR /* save spefscr register value */
657 stw r12,THREAD+THREAD_SPEFSCR(r2)
658 END_FTR_SECTION_IFSET(CPU_FTR_SPE)
659 #endif /* CONFIG_SPE */
660 and. r0,r0,r11 /* FP or altivec or SPE enabled? */
661 beq+ 1f
662 andc r11,r11,r0
663 MTMSRD(r11)
664 isync
665 1: stw r11,_MSR(r1)
666 mfcr r10
667 stw r10,_CCR(r1)
668 stw r1,KSP(r3) /* Set old stack pointer */
669
670 #ifdef CONFIG_SMP
671 /* We need a sync somewhere here to make sure that if the
672 * previous task gets rescheduled on another CPU, it sees all
673 * stores it has performed on this one.
674 */
675 sync
676 #endif /* CONFIG_SMP */
677
678 tophys(r0,r4)
679 mtspr SPRN_SPRG_THREAD,r0 /* Update current THREAD phys addr */
680 lwz r1,KSP(r4) /* Load new stack pointer */
681
682 /* save the old current 'last' for return value */
683 mr r3,r2
684 addi r2,r4,-THREAD /* Update current */
685
686 #ifdef CONFIG_ALTIVEC
687 BEGIN_FTR_SECTION
688 lwz r0,THREAD+THREAD_VRSAVE(r2)
689 mtspr SPRN_VRSAVE,r0 /* if G4, restore VRSAVE reg */
690 END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
691 #endif /* CONFIG_ALTIVEC */
692 #ifdef CONFIG_SPE
693 BEGIN_FTR_SECTION
694 lwz r0,THREAD+THREAD_SPEFSCR(r2)
695 mtspr SPRN_SPEFSCR,r0 /* restore SPEFSCR reg */
696 END_FTR_SECTION_IFSET(CPU_FTR_SPE)
697 #endif /* CONFIG_SPE */
698
699 lwz r0,_CCR(r1)
700 mtcrf 0xFF,r0
701 /* r3-r12 are destroyed -- Cort */
702 REST_NVGPRS(r1)
703
704 lwz r4,_NIP(r1) /* Return to _switch caller in new task */
705 mtlr r4
706 addi r1,r1,INT_FRAME_SIZE
707 blr
708
709 .globl fast_exception_return
710 fast_exception_return:
711 #if !(defined(CONFIG_4xx) || defined(CONFIG_BOOKE))
712 andi. r10,r9,MSR_RI /* check for recoverable interrupt */
713 beq 1f /* if not, we've got problems */
714 #endif
715
716 2: REST_4GPRS(3, r11)
717 lwz r10,_CCR(r11)
718 REST_GPR(1, r11)
719 mtcr r10
720 lwz r10,_LINK(r11)
721 mtlr r10
722 REST_GPR(10, r11)
723 #ifdef CONFIG_PPC_8xx_PERF_EVENT
724 mtspr SPRN_NRI, r0
725 #endif
726 mtspr SPRN_SRR1,r9
727 mtspr SPRN_SRR0,r12
728 REST_GPR(9, r11)
729 REST_GPR(12, r11)
730 lwz r11,GPR11(r11)
731 SYNC
732 RFI
733
734 #if !(defined(CONFIG_4xx) || defined(CONFIG_BOOKE))
735 /* check if the exception happened in a restartable section */
736 1: lis r3,exc_exit_restart_end@ha
737 addi r3,r3,exc_exit_restart_end@l
738 cmplw r12,r3
739 bge 3f
740 lis r4,exc_exit_restart@ha
741 addi r4,r4,exc_exit_restart@l
742 cmplw r12,r4
743 blt 3f
744 lis r3,fee_restarts@ha
745 tophys(r3,r3)
746 lwz r5,fee_restarts@l(r3)
747 addi r5,r5,1
748 stw r5,fee_restarts@l(r3)
749 mr r12,r4 /* restart at exc_exit_restart */
750 b 2b
751
752 .section .bss
753 .align 2
754 fee_restarts:
755 .space 4
756 .previous
757
758 /* aargh, a nonrecoverable interrupt, panic */
759 /* aargh, we don't know which trap this is */
760 /* but the 601 doesn't implement the RI bit, so assume it's OK */
761 3:
762 BEGIN_FTR_SECTION
763 b 2b
764 END_FTR_SECTION_IFSET(CPU_FTR_601)
765 li r10,-1
766 stw r10,_TRAP(r11)
767 addi r3,r1,STACK_FRAME_OVERHEAD
768 lis r10,MSR_KERNEL@h
769 ori r10,r10,MSR_KERNEL@l
770 bl transfer_to_handler_full
771 .long nonrecoverable_exception
772 .long ret_from_except
773 #endif
774
775 .globl ret_from_except_full
776 ret_from_except_full:
777 REST_NVGPRS(r1)
778 /* fall through */
779
780 .globl ret_from_except
781 ret_from_except:
782 /* Hard-disable interrupts so that current_thread_info()->flags
783 * can't change between when we test it and when we return
784 * from the interrupt. */
785 /* Note: We don't bother telling lockdep about it */
786 LOAD_MSR_KERNEL(r10,MSR_KERNEL)
787 SYNC /* Some chip revs have problems here... */
788 MTMSRD(r10) /* disable interrupts */
789
790 lwz r3,_MSR(r1) /* Returning to user mode? */
791 andi. r0,r3,MSR_PR
792 beq resume_kernel
793
794 user_exc_return: /* r10 contains MSR_KERNEL here */
795 /* Check current_thread_info()->flags */
796 CURRENT_THREAD_INFO(r9, r1)
797 lwz r9,TI_FLAGS(r9)
798 andi. r0,r9,_TIF_USER_WORK_MASK
799 bne do_work
800
801 restore_user:
802 #if defined(CONFIG_4xx) || defined(CONFIG_BOOKE)
803 /* Check whether this process has its own DBCR0 value. The internal
804 debug mode bit tells us that dbcr0 should be loaded. */
805 lwz r0,THREAD+THREAD_DBCR0(r2)
806 andis. r10,r0,DBCR0_IDM@h
807 bnel- load_dbcr0
808 #endif
809 #ifdef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE
810 CURRENT_THREAD_INFO(r9, r1)
811 ACCOUNT_CPU_USER_EXIT(r9, r10, r11)
812 #endif
813
814 b restore
815
816 /* N.B. the only way to get here is from the beq following ret_from_except. */
817 resume_kernel:
818 /* check current_thread_info, _TIF_EMULATE_STACK_STORE */
819 CURRENT_THREAD_INFO(r9, r1)
820 lwz r8,TI_FLAGS(r9)
821 andis. r0,r8,_TIF_EMULATE_STACK_STORE@h
822 beq+ 1f
823
824 addi r8,r1,INT_FRAME_SIZE /* Get the kprobed function entry */
825
826 lwz r3,GPR1(r1)
827 subi r3,r3,INT_FRAME_SIZE /* dst: Allocate a trampoline exception frame */
828 mr r4,r1 /* src: current exception frame */
829 mr r1,r3 /* Reroute the trampoline frame to r1 */
830
831 /* Copy from the original to the trampoline. */
832 li r5,INT_FRAME_SIZE/4 /* size: INT_FRAME_SIZE */
833 li r6,0 /* start offset: 0 */
834 mtctr r5
835 2: lwzx r0,r6,r4
836 stwx r0,r6,r3
837 addi r6,r6,4
838 bdnz 2b
839
840 /* Do real store operation to complete stwu */
841 lwz r5,GPR1(r1)
842 stw r8,0(r5)
843
844 /* Clear _TIF_EMULATE_STACK_STORE flag */
845 lis r11,_TIF_EMULATE_STACK_STORE@h
846 addi r5,r9,TI_FLAGS
847 0: lwarx r8,0,r5
848 andc r8,r8,r11
849 #ifdef CONFIG_IBM405_ERR77
850 dcbt 0,r5
851 #endif
852 stwcx. r8,0,r5
853 bne- 0b
854 1:
855
856 #ifdef CONFIG_PREEMPT
857 /* check current_thread_info->preempt_count */
858 lwz r0,TI_PREEMPT(r9)
859 cmpwi 0,r0,0 /* if non-zero, just restore regs and return */
860 bne restore
861 andi. r8,r8,_TIF_NEED_RESCHED
862 beq+ restore
863 lwz r3,_MSR(r1)
864 andi. r0,r3,MSR_EE /* interrupts off? */
865 beq restore /* don't schedule if so */
866 #ifdef CONFIG_TRACE_IRQFLAGS
867 /* Lockdep thinks irqs are enabled, we need to call
868 * preempt_schedule_irq with IRQs off, so we inform lockdep
869 * now that we -did- turn them off already
870 */
871 bl trace_hardirqs_off
872 #endif
873 1: bl preempt_schedule_irq
874 CURRENT_THREAD_INFO(r9, r1)
875 lwz r3,TI_FLAGS(r9)
876 andi. r0,r3,_TIF_NEED_RESCHED
877 bne- 1b
878 #ifdef CONFIG_TRACE_IRQFLAGS
879 /* And now, to properly rebalance the above, we tell lockdep they
880 * are being turned back on, which will happen when we return
881 */
882 bl trace_hardirqs_on
883 #endif
884 #endif /* CONFIG_PREEMPT */
885
886 /* interrupts are hard-disabled at this point */
887 restore:
888 #ifdef CONFIG_44x
889 BEGIN_MMU_FTR_SECTION
890 b 1f
891 END_MMU_FTR_SECTION_IFSET(MMU_FTR_TYPE_47x)
892 lis r4,icache_44x_need_flush@ha
893 lwz r5,icache_44x_need_flush@l(r4)
894 cmplwi cr0,r5,0
895 beq+ 1f
896 li r6,0
897 iccci r0,r0
898 stw r6,icache_44x_need_flush@l(r4)
899 1:
900 #endif /* CONFIG_44x */
901
902 lwz r9,_MSR(r1)
903 #ifdef CONFIG_TRACE_IRQFLAGS
904 /* Lockdep doesn't know about the fact that IRQs are temporarily turned
905 * off in this assembly code while peeking at TI_FLAGS() and such. However
906 * we need to inform it if the exception turned interrupts off, and we
907 * are about to trun them back on.
908 *
909 * The problem here sadly is that we don't know whether the exceptions was
910 * one that turned interrupts off or not. So we always tell lockdep about
911 * turning them on here when we go back to wherever we came from with EE
912 * on, even if that may meen some redudant calls being tracked. Maybe later
913 * we could encode what the exception did somewhere or test the exception
914 * type in the pt_regs but that sounds overkill
915 */
916 andi. r10,r9,MSR_EE
917 beq 1f
918 /*
919 * Since the ftrace irqsoff latency trace checks CALLER_ADDR1,
920 * which is the stack frame here, we need to force a stack frame
921 * in case we came from user space.
922 */
923 stwu r1,-32(r1)
924 mflr r0
925 stw r0,4(r1)
926 stwu r1,-32(r1)
927 bl trace_hardirqs_on
928 lwz r1,0(r1)
929 lwz r1,0(r1)
930 lwz r9,_MSR(r1)
931 1:
932 #endif /* CONFIG_TRACE_IRQFLAGS */
933
934 lwz r0,GPR0(r1)
935 lwz r2,GPR2(r1)
936 REST_4GPRS(3, r1)
937 REST_2GPRS(7, r1)
938
939 lwz r10,_XER(r1)
940 lwz r11,_CTR(r1)
941 mtspr SPRN_XER,r10
942 mtctr r11
943
944 PPC405_ERR77(0,r1)
945 BEGIN_FTR_SECTION
946 lwarx r11,0,r1
947 END_FTR_SECTION_IFSET(CPU_FTR_NEED_PAIRED_STWCX)
948 stwcx. r0,0,r1 /* to clear the reservation */
949
950 #if !(defined(CONFIG_4xx) || defined(CONFIG_BOOKE))
951 andi. r10,r9,MSR_RI /* check if this exception occurred */
952 beql nonrecoverable /* at a bad place (MSR:RI = 0) */
953
954 lwz r10,_CCR(r1)
955 lwz r11,_LINK(r1)
956 mtcrf 0xFF,r10
957 mtlr r11
958
959 /*
960 * Once we put values in SRR0 and SRR1, we are in a state
961 * where exceptions are not recoverable, since taking an
962 * exception will trash SRR0 and SRR1. Therefore we clear the
963 * MSR:RI bit to indicate this. If we do take an exception,
964 * we can't return to the point of the exception but we
965 * can restart the exception exit path at the label
966 * exc_exit_restart below. -- paulus
967 */
968 LOAD_MSR_KERNEL(r10,MSR_KERNEL & ~MSR_RI)
969 SYNC
970 MTMSRD(r10) /* clear the RI bit */
971 .globl exc_exit_restart
972 exc_exit_restart:
973 lwz r12,_NIP(r1)
974 #ifdef CONFIG_PPC_8xx_PERF_EVENT
975 mtspr SPRN_NRI, r0
976 #endif
977 mtspr SPRN_SRR0,r12
978 mtspr SPRN_SRR1,r9
979 REST_4GPRS(9, r1)
980 lwz r1,GPR1(r1)
981 .globl exc_exit_restart_end
982 exc_exit_restart_end:
983 SYNC
984 RFI
985
986 #else /* !(CONFIG_4xx || CONFIG_BOOKE) */
987 /*
988 * This is a bit different on 4xx/Book-E because it doesn't have
989 * the RI bit in the MSR.
990 * The TLB miss handler checks if we have interrupted
991 * the exception exit path and restarts it if so
992 * (well maybe one day it will... :).
993 */
994 lwz r11,_LINK(r1)
995 mtlr r11
996 lwz r10,_CCR(r1)
997 mtcrf 0xff,r10
998 REST_2GPRS(9, r1)
999 .globl exc_exit_restart
1000 exc_exit_restart:
1001 lwz r11,_NIP(r1)
1002 lwz r12,_MSR(r1)
1003 exc_exit_start:
1004 mtspr SPRN_SRR0,r11
1005 mtspr SPRN_SRR1,r12
1006 REST_2GPRS(11, r1)
1007 lwz r1,GPR1(r1)
1008 .globl exc_exit_restart_end
1009 exc_exit_restart_end:
1010 PPC405_ERR77_SYNC
1011 rfi
1012 b . /* prevent prefetch past rfi */
1013
1014 /*
1015 * Returning from a critical interrupt in user mode doesn't need
1016 * to be any different from a normal exception. For a critical
1017 * interrupt in the kernel, we just return (without checking for
1018 * preemption) since the interrupt may have happened at some crucial
1019 * place (e.g. inside the TLB miss handler), and because we will be
1020 * running with r1 pointing into critical_stack, not the current
1021 * process's kernel stack (and therefore current_thread_info() will
1022 * give the wrong answer).
1023 * We have to restore various SPRs that may have been in use at the
1024 * time of the critical interrupt.
1025 *
1026 */
1027 #ifdef CONFIG_40x
1028 #define PPC_40x_TURN_OFF_MSR_DR \
1029 /* avoid any possible TLB misses here by turning off MSR.DR, we \
1030 * assume the instructions here are mapped by a pinned TLB entry */ \
1031 li r10,MSR_IR; \
1032 mtmsr r10; \
1033 isync; \
1034 tophys(r1, r1);
1035 #else
1036 #define PPC_40x_TURN_OFF_MSR_DR
1037 #endif
1038
1039 #define RET_FROM_EXC_LEVEL(exc_lvl_srr0, exc_lvl_srr1, exc_lvl_rfi) \
1040 REST_NVGPRS(r1); \
1041 lwz r3,_MSR(r1); \
1042 andi. r3,r3,MSR_PR; \
1043 LOAD_MSR_KERNEL(r10,MSR_KERNEL); \
1044 bne user_exc_return; \
1045 lwz r0,GPR0(r1); \
1046 lwz r2,GPR2(r1); \
1047 REST_4GPRS(3, r1); \
1048 REST_2GPRS(7, r1); \
1049 lwz r10,_XER(r1); \
1050 lwz r11,_CTR(r1); \
1051 mtspr SPRN_XER,r10; \
1052 mtctr r11; \
1053 PPC405_ERR77(0,r1); \
1054 stwcx. r0,0,r1; /* to clear the reservation */ \
1055 lwz r11,_LINK(r1); \
1056 mtlr r11; \
1057 lwz r10,_CCR(r1); \
1058 mtcrf 0xff,r10; \
1059 PPC_40x_TURN_OFF_MSR_DR; \
1060 lwz r9,_DEAR(r1); \
1061 lwz r10,_ESR(r1); \
1062 mtspr SPRN_DEAR,r9; \
1063 mtspr SPRN_ESR,r10; \
1064 lwz r11,_NIP(r1); \
1065 lwz r12,_MSR(r1); \
1066 mtspr exc_lvl_srr0,r11; \
1067 mtspr exc_lvl_srr1,r12; \
1068 lwz r9,GPR9(r1); \
1069 lwz r12,GPR12(r1); \
1070 lwz r10,GPR10(r1); \
1071 lwz r11,GPR11(r1); \
1072 lwz r1,GPR1(r1); \
1073 PPC405_ERR77_SYNC; \
1074 exc_lvl_rfi; \
1075 b .; /* prevent prefetch past exc_lvl_rfi */
1076
1077 #define RESTORE_xSRR(exc_lvl_srr0, exc_lvl_srr1) \
1078 lwz r9,_##exc_lvl_srr0(r1); \
1079 lwz r10,_##exc_lvl_srr1(r1); \
1080 mtspr SPRN_##exc_lvl_srr0,r9; \
1081 mtspr SPRN_##exc_lvl_srr1,r10;
1082
1083 #if defined(CONFIG_PPC_BOOK3E_MMU)
1084 #ifdef CONFIG_PHYS_64BIT
1085 #define RESTORE_MAS7 \
1086 lwz r11,MAS7(r1); \
1087 mtspr SPRN_MAS7,r11;
1088 #else
1089 #define RESTORE_MAS7
1090 #endif /* CONFIG_PHYS_64BIT */
1091 #define RESTORE_MMU_REGS \
1092 lwz r9,MAS0(r1); \
1093 lwz r10,MAS1(r1); \
1094 lwz r11,MAS2(r1); \
1095 mtspr SPRN_MAS0,r9; \
1096 lwz r9,MAS3(r1); \
1097 mtspr SPRN_MAS1,r10; \
1098 lwz r10,MAS6(r1); \
1099 mtspr SPRN_MAS2,r11; \
1100 mtspr SPRN_MAS3,r9; \
1101 mtspr SPRN_MAS6,r10; \
1102 RESTORE_MAS7;
1103 #elif defined(CONFIG_44x)
1104 #define RESTORE_MMU_REGS \
1105 lwz r9,MMUCR(r1); \
1106 mtspr SPRN_MMUCR,r9;
1107 #else
1108 #define RESTORE_MMU_REGS
1109 #endif
1110
1111 #ifdef CONFIG_40x
1112 .globl ret_from_crit_exc
1113 ret_from_crit_exc:
1114 mfspr r9,SPRN_SPRG_THREAD
1115 lis r10,saved_ksp_limit@ha;
1116 lwz r10,saved_ksp_limit@l(r10);
1117 tovirt(r9,r9);
1118 stw r10,KSP_LIMIT(r9)
1119 lis r9,crit_srr0@ha;
1120 lwz r9,crit_srr0@l(r9);
1121 lis r10,crit_srr1@ha;
1122 lwz r10,crit_srr1@l(r10);
1123 mtspr SPRN_SRR0,r9;
1124 mtspr SPRN_SRR1,r10;
1125 RET_FROM_EXC_LEVEL(SPRN_CSRR0, SPRN_CSRR1, PPC_RFCI)
1126 #endif /* CONFIG_40x */
1127
1128 #ifdef CONFIG_BOOKE
1129 .globl ret_from_crit_exc
1130 ret_from_crit_exc:
1131 mfspr r9,SPRN_SPRG_THREAD
1132 lwz r10,SAVED_KSP_LIMIT(r1)
1133 stw r10,KSP_LIMIT(r9)
1134 RESTORE_xSRR(SRR0,SRR1);
1135 RESTORE_MMU_REGS;
1136 RET_FROM_EXC_LEVEL(SPRN_CSRR0, SPRN_CSRR1, PPC_RFCI)
1137
1138 .globl ret_from_debug_exc
1139 ret_from_debug_exc:
1140 mfspr r9,SPRN_SPRG_THREAD
1141 lwz r10,SAVED_KSP_LIMIT(r1)
1142 stw r10,KSP_LIMIT(r9)
1143 lwz r9,THREAD_INFO-THREAD(r9)
1144 CURRENT_THREAD_INFO(r10, r1)
1145 lwz r10,TI_PREEMPT(r10)
1146 stw r10,TI_PREEMPT(r9)
1147 RESTORE_xSRR(SRR0,SRR1);
1148 RESTORE_xSRR(CSRR0,CSRR1);
1149 RESTORE_MMU_REGS;
1150 RET_FROM_EXC_LEVEL(SPRN_DSRR0, SPRN_DSRR1, PPC_RFDI)
1151
1152 .globl ret_from_mcheck_exc
1153 ret_from_mcheck_exc:
1154 mfspr r9,SPRN_SPRG_THREAD
1155 lwz r10,SAVED_KSP_LIMIT(r1)
1156 stw r10,KSP_LIMIT(r9)
1157 RESTORE_xSRR(SRR0,SRR1);
1158 RESTORE_xSRR(CSRR0,CSRR1);
1159 RESTORE_xSRR(DSRR0,DSRR1);
1160 RESTORE_MMU_REGS;
1161 RET_FROM_EXC_LEVEL(SPRN_MCSRR0, SPRN_MCSRR1, PPC_RFMCI)
1162 #endif /* CONFIG_BOOKE */
1163
1164 /*
1165 * Load the DBCR0 value for a task that is being ptraced,
1166 * having first saved away the global DBCR0. Note that r0
1167 * has the dbcr0 value to set upon entry to this.
1168 */
1169 load_dbcr0:
1170 mfmsr r10 /* first disable debug exceptions */
1171 rlwinm r10,r10,0,~MSR_DE
1172 mtmsr r10
1173 isync
1174 mfspr r10,SPRN_DBCR0
1175 lis r11,global_dbcr0@ha
1176 addi r11,r11,global_dbcr0@l
1177 #ifdef CONFIG_SMP
1178 CURRENT_THREAD_INFO(r9, r1)
1179 lwz r9,TI_CPU(r9)
1180 slwi r9,r9,3
1181 add r11,r11,r9
1182 #endif
1183 stw r10,0(r11)
1184 mtspr SPRN_DBCR0,r0
1185 lwz r10,4(r11)
1186 addi r10,r10,1
1187 stw r10,4(r11)
1188 li r11,-1
1189 mtspr SPRN_DBSR,r11 /* clear all pending debug events */
1190 blr
1191
1192 .section .bss
1193 .align 4
1194 global_dbcr0:
1195 .space 8*NR_CPUS
1196 .previous
1197 #endif /* !(CONFIG_4xx || CONFIG_BOOKE) */
1198
1199 do_work: /* r10 contains MSR_KERNEL here */
1200 andi. r0,r9,_TIF_NEED_RESCHED
1201 beq do_user_signal
1202
1203 do_resched: /* r10 contains MSR_KERNEL here */
1204 /* Note: We don't need to inform lockdep that we are enabling
1205 * interrupts here. As far as it knows, they are already enabled
1206 */
1207 ori r10,r10,MSR_EE
1208 SYNC
1209 MTMSRD(r10) /* hard-enable interrupts */
1210 bl schedule
1211 recheck:
1212 /* Note: And we don't tell it we are disabling them again
1213 * neither. Those disable/enable cycles used to peek at
1214 * TI_FLAGS aren't advertised.
1215 */
1216 LOAD_MSR_KERNEL(r10,MSR_KERNEL)
1217 SYNC
1218 MTMSRD(r10) /* disable interrupts */
1219 CURRENT_THREAD_INFO(r9, r1)
1220 lwz r9,TI_FLAGS(r9)
1221 andi. r0,r9,_TIF_NEED_RESCHED
1222 bne- do_resched
1223 andi. r0,r9,_TIF_USER_WORK_MASK
1224 beq restore_user
1225 do_user_signal: /* r10 contains MSR_KERNEL here */
1226 ori r10,r10,MSR_EE
1227 SYNC
1228 MTMSRD(r10) /* hard-enable interrupts */
1229 /* save r13-r31 in the exception frame, if not already done */
1230 lwz r3,_TRAP(r1)
1231 andi. r0,r3,1
1232 beq 2f
1233 SAVE_NVGPRS(r1)
1234 rlwinm r3,r3,0,0,30
1235 stw r3,_TRAP(r1)
1236 2: addi r3,r1,STACK_FRAME_OVERHEAD
1237 mr r4,r9
1238 bl do_notify_resume
1239 REST_NVGPRS(r1)
1240 b recheck
1241
1242 /*
1243 * We come here when we are at the end of handling an exception
1244 * that occurred at a place where taking an exception will lose
1245 * state information, such as the contents of SRR0 and SRR1.
1246 */
1247 nonrecoverable:
1248 lis r10,exc_exit_restart_end@ha
1249 addi r10,r10,exc_exit_restart_end@l
1250 cmplw r12,r10
1251 bge 3f
1252 lis r11,exc_exit_restart@ha
1253 addi r11,r11,exc_exit_restart@l
1254 cmplw r12,r11
1255 blt 3f
1256 lis r10,ee_restarts@ha
1257 lwz r12,ee_restarts@l(r10)
1258 addi r12,r12,1
1259 stw r12,ee_restarts@l(r10)
1260 mr r12,r11 /* restart at exc_exit_restart */
1261 blr
1262 3: /* OK, we can't recover, kill this process */
1263 /* but the 601 doesn't implement the RI bit, so assume it's OK */
1264 BEGIN_FTR_SECTION
1265 blr
1266 END_FTR_SECTION_IFSET(CPU_FTR_601)
1267 lwz r3,_TRAP(r1)
1268 andi. r0,r3,1
1269 beq 4f
1270 SAVE_NVGPRS(r1)
1271 rlwinm r3,r3,0,0,30
1272 stw r3,_TRAP(r1)
1273 4: addi r3,r1,STACK_FRAME_OVERHEAD
1274 bl nonrecoverable_exception
1275 /* shouldn't return */
1276 b 4b
1277
1278 .section .bss
1279 .align 2
1280 ee_restarts:
1281 .space 4
1282 .previous
1283
1284 /*
1285 * PROM code for specific machines follows. Put it
1286 * here so it's easy to add arch-specific sections later.
1287 * -- Cort
1288 */
1289 #ifdef CONFIG_PPC_RTAS
1290 /*
1291 * On CHRP, the Run-Time Abstraction Services (RTAS) have to be
1292 * called with the MMU off.
1293 */
1294 _GLOBAL(enter_rtas)
1295 stwu r1,-INT_FRAME_SIZE(r1)
1296 mflr r0
1297 stw r0,INT_FRAME_SIZE+4(r1)
1298 LOAD_REG_ADDR(r4, rtas)
1299 lis r6,1f@ha /* physical return address for rtas */
1300 addi r6,r6,1f@l
1301 tophys(r6,r6)
1302 tophys(r7,r1)
1303 lwz r8,RTASENTRY(r4)
1304 lwz r4,RTASBASE(r4)
1305 mfmsr r9
1306 stw r9,8(r1)
1307 LOAD_MSR_KERNEL(r0,MSR_KERNEL)
1308 SYNC /* disable interrupts so SRR0/1 */
1309 MTMSRD(r0) /* don't get trashed */
1310 li r9,MSR_KERNEL & ~(MSR_IR|MSR_DR)
1311 mtlr r6
1312 mtspr SPRN_SPRG_RTAS,r7
1313 mtspr SPRN_SRR0,r8
1314 mtspr SPRN_SRR1,r9
1315 RFI
1316 1: tophys(r9,r1)
1317 lwz r8,INT_FRAME_SIZE+4(r9) /* get return address */
1318 lwz r9,8(r9) /* original msr value */
1319 addi r1,r1,INT_FRAME_SIZE
1320 li r0,0
1321 mtspr SPRN_SPRG_RTAS,r0
1322 mtspr SPRN_SRR0,r8
1323 mtspr SPRN_SRR1,r9
1324 RFI /* return to caller */
1325
1326 .globl machine_check_in_rtas
1327 machine_check_in_rtas:
1328 twi 31,0,0
1329 /* XXX load up BATs and panic */
1330
1331 #endif /* CONFIG_PPC_RTAS */