]>
Commit | Line | Data |
---|---|---|
9994a338 | 1 | /* |
9994a338 PM |
2 | * PowerPC version |
3 | * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org) | |
4 | * Rewritten by Cort Dougan (cort@cs.nmt.edu) for PReP | |
5 | * Copyright (C) 1996 Cort Dougan <cort@cs.nmt.edu> | |
6 | * Adapted for Power Macintosh by Paul Mackerras. | |
7 | * Low-level exception handlers and MMU support | |
8 | * rewritten by Paul Mackerras. | |
9 | * Copyright (C) 1996 Paul Mackerras. | |
10 | * MPC8xx modifications Copyright (C) 1997 Dan Malek (dmalek@jlc.net). | |
11 | * | |
12 | * This file contains the system call entry code, context switch | |
13 | * code, and exception/interrupt return code for PowerPC. | |
14 | * | |
15 | * This program is free software; you can redistribute it and/or | |
16 | * modify it under the terms of the GNU General Public License | |
17 | * as published by the Free Software Foundation; either version | |
18 | * 2 of the License, or (at your option) any later version. | |
19 | */ | |
20 | ||
9994a338 PM |
21 | #include <linux/errno.h> |
22 | #include <asm/unistd.h> | |
23 | #include <asm/processor.h> | |
24 | #include <asm/page.h> | |
25 | #include <asm/mmu.h> | |
26 | #include <asm/thread_info.h> | |
27 | #include <asm/ppc_asm.h> | |
28 | #include <asm/asm-offsets.h> | |
29 | #include <asm/cputable.h> | |
3f639ee8 | 30 | #include <asm/firmware.h> |
007d88d0 | 31 | #include <asm/bug.h> |
ec2b36b9 | 32 | #include <asm/ptrace.h> |
945feb17 | 33 | #include <asm/irqflags.h> |
395a59d0 | 34 | #include <asm/ftrace.h> |
7230c564 | 35 | #include <asm/hw_irq.h> |
5d1c5745 | 36 | #include <asm/context_tracking.h> |
9994a338 PM |
37 | |
38 | /* | |
39 | * System calls. | |
40 | */ | |
41 | .section ".toc","aw" | |
42 | .SYS_CALL_TABLE: | |
43 | .tc .sys_call_table[TC],.sys_call_table | |
44 | ||
45 | /* This value is used to mark exception frames on the stack. */ | |
46 | exception_marker: | |
ec2b36b9 | 47 | .tc ID_EXC_MARKER[TC],STACK_FRAME_REGS_MARKER |
9994a338 PM |
48 | |
49 | .section ".text" | |
50 | .align 7 | |
51 | ||
52 | #undef SHOW_SYSCALLS | |
53 | ||
54 | .globl system_call_common | |
55 | system_call_common: | |
56 | andi. r10,r12,MSR_PR | |
57 | mr r10,r1 | |
58 | addi r1,r1,-INT_FRAME_SIZE | |
59 | beq- 1f | |
60 | ld r1,PACAKSAVE(r13) | |
61 | 1: std r10,0(r1) | |
62 | std r11,_NIP(r1) | |
63 | std r12,_MSR(r1) | |
64 | std r0,GPR0(r1) | |
65 | std r10,GPR1(r1) | |
5d75b264 | 66 | beq 2f /* if from kernel mode */ |
c6622f63 | 67 | ACCOUNT_CPU_USER_ENTRY(r10, r11) |
5d75b264 | 68 | 2: std r2,GPR2(r1) |
9994a338 | 69 | std r3,GPR3(r1) |
fd6c40f3 | 70 | mfcr r2 |
9994a338 PM |
71 | std r4,GPR4(r1) |
72 | std r5,GPR5(r1) | |
73 | std r6,GPR6(r1) | |
74 | std r7,GPR7(r1) | |
75 | std r8,GPR8(r1) | |
76 | li r11,0 | |
77 | std r11,GPR9(r1) | |
78 | std r11,GPR10(r1) | |
79 | std r11,GPR11(r1) | |
80 | std r11,GPR12(r1) | |
823df435 | 81 | std r11,_XER(r1) |
82087414 | 82 | std r11,_CTR(r1) |
9994a338 | 83 | std r9,GPR13(r1) |
9994a338 | 84 | mflr r10 |
fd6c40f3 AB |
85 | /* |
86 | * This clears CR0.SO (bit 28), which is the error indication on | |
87 | * return from this system call. | |
88 | */ | |
89 | rldimi r2,r11,28,(63-28) | |
9994a338 | 90 | li r11,0xc01 |
9994a338 PM |
91 | std r10,_LINK(r1) |
92 | std r11,_TRAP(r1) | |
9994a338 | 93 | std r3,ORIG_GPR3(r1) |
fd6c40f3 | 94 | std r2,_CCR(r1) |
9994a338 PM |
95 | ld r2,PACATOC(r13) |
96 | addi r9,r1,STACK_FRAME_OVERHEAD | |
97 | ld r11,exception_marker@toc(r2) | |
98 | std r11,-16(r9) /* "regshere" marker */ | |
abf917cd | 99 | #if defined(CONFIG_VIRT_CPU_ACCOUNTING_NATIVE) && defined(CONFIG_PPC_SPLPAR) |
cf9efce0 PM |
100 | BEGIN_FW_FTR_SECTION |
101 | beq 33f | |
102 | /* if from user, see if there are any DTL entries to process */ | |
103 | ld r10,PACALPPACAPTR(r13) /* get ptr to VPA */ | |
104 | ld r11,PACA_DTL_RIDX(r13) /* get log read index */ | |
105 | ld r10,LPPACA_DTLIDX(r10) /* get log write index */ | |
106 | cmpd cr1,r11,r10 | |
107 | beq+ cr1,33f | |
108 | bl .accumulate_stolen_time | |
109 | REST_GPR(0,r1) | |
110 | REST_4GPRS(3,r1) | |
111 | REST_2GPRS(7,r1) | |
112 | addi r9,r1,STACK_FRAME_OVERHEAD | |
113 | 33: | |
114 | END_FW_FTR_SECTION_IFSET(FW_FEATURE_SPLPAR) | |
abf917cd | 115 | #endif /* CONFIG_VIRT_CPU_ACCOUNTING_NATIVE && CONFIG_PPC_SPLPAR */ |
cf9efce0 | 116 | |
1421ae0b BH |
117 | /* |
118 | * A syscall should always be called with interrupts enabled | |
119 | * so we just unconditionally hard-enable here. When some kind | |
120 | * of irq tracing is used, we additionally check that condition | |
121 | * is correct | |
122 | */ | |
123 | #if defined(CONFIG_TRACE_IRQFLAGS) && defined(CONFIG_BUG) | |
124 | lbz r10,PACASOFTIRQEN(r13) | |
125 | xori r10,r10,1 | |
126 | 1: tdnei r10,0 | |
127 | EMIT_BUG_ENTRY 1b,__FILE__,__LINE__,BUGFLAG_WARNING | |
128 | #endif | |
2d27cfd3 | 129 | |
2d27cfd3 BH |
130 | #ifdef CONFIG_PPC_BOOK3E |
131 | wrteei 1 | |
132 | #else | |
1421ae0b | 133 | ld r11,PACAKMSR(r13) |
9994a338 PM |
134 | ori r11,r11,MSR_EE |
135 | mtmsrd r11,1 | |
2d27cfd3 | 136 | #endif /* CONFIG_PPC_BOOK3E */ |
9994a338 | 137 | |
1421ae0b BH |
138 | /* We do need to set SOFTE in the stack frame or the return |
139 | * from interrupt will be painful | |
140 | */ | |
141 | li r10,1 | |
142 | std r10,SOFTE(r1) | |
143 | ||
9994a338 PM |
144 | #ifdef SHOW_SYSCALLS |
145 | bl .do_show_syscall | |
146 | REST_GPR(0,r1) | |
147 | REST_4GPRS(3,r1) | |
148 | REST_2GPRS(7,r1) | |
149 | addi r9,r1,STACK_FRAME_OVERHEAD | |
150 | #endif | |
9778b696 | 151 | CURRENT_THREAD_INFO(r11, r1) |
9994a338 | 152 | ld r10,TI_FLAGS(r11) |
9994a338 | 153 | andi. r11,r10,_TIF_SYSCALL_T_OR_A |
2540334a | 154 | bne syscall_dotrace |
d14299de | 155 | .Lsyscall_dotrace_cont: |
9994a338 PM |
156 | cmpldi 0,r0,NR_syscalls |
157 | bge- syscall_enosys | |
158 | ||
159 | system_call: /* label this so stack traces look sane */ | |
160 | /* | |
161 | * Need to vector to 32 Bit or default sys_call_table here, | |
162 | * based on caller's run-mode / personality. | |
163 | */ | |
164 | ld r11,.SYS_CALL_TABLE@toc(2) | |
165 | andi. r10,r10,_TIF_32BIT | |
166 | beq 15f | |
167 | addi r11,r11,8 /* use 32-bit syscall entries */ | |
168 | clrldi r3,r3,32 | |
169 | clrldi r4,r4,32 | |
170 | clrldi r5,r5,32 | |
171 | clrldi r6,r6,32 | |
172 | clrldi r7,r7,32 | |
173 | clrldi r8,r8,32 | |
174 | 15: | |
175 | slwi r0,r0,4 | |
176 | ldx r10,r11,r0 /* Fetch system call handler [ptr] */ | |
177 | mtctr r10 | |
178 | bctrl /* Call handler */ | |
179 | ||
180 | syscall_exit: | |
401d1f02 | 181 | std r3,RESULT(r1) |
9994a338 | 182 | #ifdef SHOW_SYSCALLS |
9994a338 | 183 | bl .do_show_syscall_exit |
401d1f02 | 184 | ld r3,RESULT(r1) |
9994a338 | 185 | #endif |
9778b696 | 186 | CURRENT_THREAD_INFO(r12, r1) |
9994a338 | 187 | |
9994a338 | 188 | ld r8,_MSR(r1) |
2d27cfd3 BH |
189 | #ifdef CONFIG_PPC_BOOK3S |
190 | /* No MSR:RI on BookE */ | |
9994a338 PM |
191 | andi. r10,r8,MSR_RI |
192 | beq- unrecov_restore | |
2d27cfd3 | 193 | #endif |
1421ae0b BH |
194 | /* |
195 | * Disable interrupts so current_thread_info()->flags can't change, | |
2d27cfd3 BH |
196 | * and so that we don't get interrupted after loading SRR0/1. |
197 | */ | |
198 | #ifdef CONFIG_PPC_BOOK3E | |
199 | wrteei 0 | |
200 | #else | |
1421ae0b | 201 | ld r10,PACAKMSR(r13) |
ac1dc365 AB |
202 | /* |
203 | * For performance reasons we clear RI the same time that we | |
204 | * clear EE. We only need to clear RI just before we restore r13 | |
205 | * below, but batching it with EE saves us one expensive mtmsrd call. | |
206 | * We have to be careful to restore RI if we branch anywhere from | |
207 | * here (eg syscall_exit_work). | |
208 | */ | |
209 | li r9,MSR_RI | |
210 | andc r11,r10,r9 | |
211 | mtmsrd r11,1 | |
2d27cfd3 BH |
212 | #endif /* CONFIG_PPC_BOOK3E */ |
213 | ||
9994a338 | 214 | ld r9,TI_FLAGS(r12) |
401d1f02 | 215 | li r11,-_LAST_ERRNO |
1bd79336 | 216 | andi. r0,r9,(_TIF_SYSCALL_T_OR_A|_TIF_SINGLESTEP|_TIF_USER_WORK_MASK|_TIF_PERSYSCALL_MASK) |
9994a338 | 217 | bne- syscall_exit_work |
401d1f02 DW |
218 | cmpld r3,r11 |
219 | ld r5,_CCR(r1) | |
220 | bge- syscall_error | |
d14299de | 221 | .Lsyscall_error_cont: |
9994a338 | 222 | ld r7,_NIP(r1) |
f89451fb | 223 | BEGIN_FTR_SECTION |
9994a338 | 224 | stdcx. r0,0,r1 /* to clear the reservation */ |
f89451fb | 225 | END_FTR_SECTION_IFCLR(CPU_FTR_STCX_CHECKS_ADDRESS) |
9994a338 PM |
226 | andi. r6,r8,MSR_PR |
227 | ld r4,_LINK(r1) | |
2d27cfd3 | 228 | |
c6622f63 PM |
229 | beq- 1f |
230 | ACCOUNT_CPU_USER_EXIT(r11, r12) | |
44e9309f | 231 | HMT_MEDIUM_LOW_HAS_PPR |
c6622f63 | 232 | ld r13,GPR13(r1) /* only restore r13 if returning to usermode */ |
9994a338 | 233 | 1: ld r2,GPR2(r1) |
9994a338 PM |
234 | ld r1,GPR1(r1) |
235 | mtlr r4 | |
236 | mtcr r5 | |
237 | mtspr SPRN_SRR0,r7 | |
238 | mtspr SPRN_SRR1,r8 | |
2d27cfd3 | 239 | RFI |
9994a338 PM |
240 | b . /* prevent speculative execution */ |
241 | ||
401d1f02 | 242 | syscall_error: |
9994a338 | 243 | oris r5,r5,0x1000 /* Set SO bit in CR */ |
401d1f02 | 244 | neg r3,r3 |
9994a338 | 245 | std r5,_CCR(r1) |
d14299de | 246 | b .Lsyscall_error_cont |
401d1f02 | 247 | |
9994a338 PM |
248 | /* Traced system call support */ |
249 | syscall_dotrace: | |
250 | bl .save_nvgprs | |
251 | addi r3,r1,STACK_FRAME_OVERHEAD | |
252 | bl .do_syscall_trace_enter | |
4f72c427 RM |
253 | /* |
254 | * Restore argument registers possibly just changed. | |
255 | * We use the return value of do_syscall_trace_enter | |
256 | * for the call number to look up in the table (r0). | |
257 | */ | |
258 | mr r0,r3 | |
9994a338 PM |
259 | ld r3,GPR3(r1) |
260 | ld r4,GPR4(r1) | |
261 | ld r5,GPR5(r1) | |
262 | ld r6,GPR6(r1) | |
263 | ld r7,GPR7(r1) | |
264 | ld r8,GPR8(r1) | |
265 | addi r9,r1,STACK_FRAME_OVERHEAD | |
9778b696 | 266 | CURRENT_THREAD_INFO(r10, r1) |
9994a338 | 267 | ld r10,TI_FLAGS(r10) |
d14299de | 268 | b .Lsyscall_dotrace_cont |
9994a338 | 269 | |
401d1f02 DW |
270 | syscall_enosys: |
271 | li r3,-ENOSYS | |
272 | b syscall_exit | |
273 | ||
274 | syscall_exit_work: | |
ac1dc365 AB |
275 | #ifdef CONFIG_PPC_BOOK3S |
276 | mtmsrd r10,1 /* Restore RI */ | |
277 | #endif | |
401d1f02 DW |
278 | /* If TIF_RESTOREALL is set, don't scribble on either r3 or ccr. |
279 | If TIF_NOERROR is set, just save r3 as it is. */ | |
280 | ||
281 | andi. r0,r9,_TIF_RESTOREALL | |
1bd79336 PM |
282 | beq+ 0f |
283 | REST_NVGPRS(r1) | |
284 | b 2f | |
285 | 0: cmpld r3,r11 /* r10 is -LAST_ERRNO */ | |
401d1f02 DW |
286 | blt+ 1f |
287 | andi. r0,r9,_TIF_NOERROR | |
288 | bne- 1f | |
289 | ld r5,_CCR(r1) | |
290 | neg r3,r3 | |
291 | oris r5,r5,0x1000 /* Set SO bit in CR */ | |
292 | std r5,_CCR(r1) | |
293 | 1: std r3,GPR3(r1) | |
294 | 2: andi. r0,r9,(_TIF_PERSYSCALL_MASK) | |
295 | beq 4f | |
296 | ||
1bd79336 | 297 | /* Clear per-syscall TIF flags if any are set. */ |
401d1f02 DW |
298 | |
299 | li r11,_TIF_PERSYSCALL_MASK | |
300 | addi r12,r12,TI_FLAGS | |
301 | 3: ldarx r10,0,r12 | |
302 | andc r10,r10,r11 | |
303 | stdcx. r10,0,r12 | |
304 | bne- 3b | |
305 | subi r12,r12,TI_FLAGS | |
1bd79336 PM |
306 | |
307 | 4: /* Anything else left to do? */ | |
05e38e5d | 308 | SET_DEFAULT_THREAD_PPR(r3, r10) /* Set thread.ppr = 3 */ |
1bd79336 | 309 | andi. r0,r9,(_TIF_SYSCALL_T_OR_A|_TIF_SINGLESTEP) |
401d1f02 DW |
310 | beq .ret_from_except_lite |
311 | ||
312 | /* Re-enable interrupts */ | |
2d27cfd3 BH |
313 | #ifdef CONFIG_PPC_BOOK3E |
314 | wrteei 1 | |
315 | #else | |
1421ae0b | 316 | ld r10,PACAKMSR(r13) |
401d1f02 DW |
317 | ori r10,r10,MSR_EE |
318 | mtmsrd r10,1 | |
2d27cfd3 | 319 | #endif /* CONFIG_PPC_BOOK3E */ |
401d1f02 | 320 | |
1bd79336 | 321 | bl .save_nvgprs |
9994a338 PM |
322 | addi r3,r1,STACK_FRAME_OVERHEAD |
323 | bl .do_syscall_trace_leave | |
1bd79336 | 324 | b .ret_from_except |
9994a338 PM |
325 | |
326 | /* Save non-volatile GPRs, if not already saved. */ | |
327 | _GLOBAL(save_nvgprs) | |
328 | ld r11,_TRAP(r1) | |
329 | andi. r0,r11,1 | |
330 | beqlr- | |
331 | SAVE_NVGPRS(r1) | |
332 | clrrdi r0,r11,1 | |
333 | std r0,_TRAP(r1) | |
334 | blr | |
335 | ||
401d1f02 | 336 | |
9994a338 PM |
337 | /* |
338 | * The sigsuspend and rt_sigsuspend system calls can call do_signal | |
339 | * and thus put the process into the stopped state where we might | |
340 | * want to examine its user state with ptrace. Therefore we need | |
341 | * to save all the nonvolatile registers (r14 - r31) before calling | |
342 | * the C code. Similarly, fork, vfork and clone need the full | |
343 | * register state on the stack so that it can be copied to the child. | |
344 | */ | |
9994a338 PM |
345 | |
346 | _GLOBAL(ppc_fork) | |
347 | bl .save_nvgprs | |
348 | bl .sys_fork | |
349 | b syscall_exit | |
350 | ||
351 | _GLOBAL(ppc_vfork) | |
352 | bl .save_nvgprs | |
353 | bl .sys_vfork | |
354 | b syscall_exit | |
355 | ||
356 | _GLOBAL(ppc_clone) | |
357 | bl .save_nvgprs | |
358 | bl .sys_clone | |
359 | b syscall_exit | |
360 | ||
1bd79336 PM |
361 | _GLOBAL(ppc32_swapcontext) |
362 | bl .save_nvgprs | |
363 | bl .compat_sys_swapcontext | |
364 | b syscall_exit | |
365 | ||
366 | _GLOBAL(ppc64_swapcontext) | |
367 | bl .save_nvgprs | |
368 | bl .sys_swapcontext | |
369 | b syscall_exit | |
370 | ||
9994a338 PM |
371 | _GLOBAL(ret_from_fork) |
372 | bl .schedule_tail | |
373 | REST_NVGPRS(r1) | |
374 | li r3,0 | |
375 | b syscall_exit | |
376 | ||
58254e10 AV |
377 | _GLOBAL(ret_from_kernel_thread) |
378 | bl .schedule_tail | |
379 | REST_NVGPRS(r1) | |
53b50f94 | 380 | ld r14, 0(r14) |
58254e10 AV |
381 | mtlr r14 |
382 | mr r3,r15 | |
383 | blrl | |
384 | li r3,0 | |
be6abfa7 AV |
385 | b syscall_exit |
386 | ||
71433285 AB |
387 | .section ".toc","aw" |
388 | DSCR_DEFAULT: | |
389 | .tc dscr_default[TC],dscr_default | |
390 | ||
391 | .section ".text" | |
392 | ||
9994a338 PM |
393 | /* |
394 | * This routine switches between two different tasks. The process | |
395 | * state of one is saved on its kernel stack. Then the state | |
396 | * of the other is restored from its kernel stack. The memory | |
397 | * management hardware is updated to the second process's state. | |
398 | * Finally, we can return to the second process, via ret_from_except. | |
399 | * On entry, r3 points to the THREAD for the current task, r4 | |
400 | * points to the THREAD for the new task. | |
401 | * | |
402 | * Note: there are two ways to get to the "going out" portion | |
403 | * of this code; either by coming in via the entry (_switch) | |
404 | * or via "fork" which must set up an environment equivalent | |
405 | * to the "_switch" path. If you change this you'll have to change | |
406 | * the fork code also. | |
407 | * | |
408 | * The code which creates the new task context is in 'copy_thread' | |
2ef9481e | 409 | * in arch/powerpc/kernel/process.c |
9994a338 PM |
410 | */ |
411 | .align 7 | |
412 | _GLOBAL(_switch) | |
413 | mflr r0 | |
414 | std r0,16(r1) | |
415 | stdu r1,-SWITCH_FRAME_SIZE(r1) | |
416 | /* r3-r13 are caller saved -- Cort */ | |
417 | SAVE_8GPRS(14, r1) | |
418 | SAVE_10GPRS(22, r1) | |
419 | mflr r20 /* Return to switch caller */ | |
420 | mfmsr r22 | |
421 | li r0, MSR_FP | |
ce48b210 MN |
422 | #ifdef CONFIG_VSX |
423 | BEGIN_FTR_SECTION | |
424 | oris r0,r0,MSR_VSX@h /* Disable VSX */ | |
425 | END_FTR_SECTION_IFSET(CPU_FTR_VSX) | |
426 | #endif /* CONFIG_VSX */ | |
9994a338 PM |
427 | #ifdef CONFIG_ALTIVEC |
428 | BEGIN_FTR_SECTION | |
429 | oris r0,r0,MSR_VEC@h /* Disable altivec */ | |
430 | mfspr r24,SPRN_VRSAVE /* save vrsave register value */ | |
431 | std r24,THREAD_VRSAVE(r3) | |
432 | END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC) | |
433 | #endif /* CONFIG_ALTIVEC */ | |
efcac658 AK |
434 | #ifdef CONFIG_PPC64 |
435 | BEGIN_FTR_SECTION | |
436 | mfspr r25,SPRN_DSCR | |
437 | std r25,THREAD_DSCR(r3) | |
438 | END_FTR_SECTION_IFSET(CPU_FTR_DSCR) | |
439 | #endif | |
9994a338 PM |
440 | and. r0,r0,r22 |
441 | beq+ 1f | |
442 | andc r22,r22,r0 | |
2d27cfd3 | 443 | MTMSRD(r22) |
9994a338 PM |
444 | isync |
445 | 1: std r20,_NIP(r1) | |
446 | mfcr r23 | |
447 | std r23,_CCR(r1) | |
448 | std r1,KSP(r3) /* Set old stack pointer */ | |
449 | ||
2468dcf6 IM |
450 | #ifdef CONFIG_PPC_BOOK3S_64 |
451 | BEGIN_FTR_SECTION | |
452 | /* | |
453 | * Back up the TAR across context switches. Note that the TAR is not | |
454 | * available for use in the kernel. (To provide this, the TAR should | |
455 | * be backed up/restored on exception entry/exit instead, and be in | |
456 | * pt_regs. FIXME, this should be in pt_regs anyway (for debug).) | |
457 | */ | |
458 | mfspr r0,SPRN_TAR | |
459 | std r0,THREAD_TAR(r3) | |
9353374b ME |
460 | |
461 | /* Event based branch registers */ | |
462 | mfspr r0, SPRN_BESCR | |
463 | std r0, THREAD_BESCR(r3) | |
464 | mfspr r0, SPRN_EBBHR | |
465 | std r0, THREAD_EBBHR(r3) | |
466 | mfspr r0, SPRN_EBBRR | |
467 | std r0, THREAD_EBBRR(r3) | |
59affcd3 ME |
468 | |
469 | /* PMU registers made user read/(write) by EBB */ | |
470 | mfspr r0, SPRN_SIAR | |
471 | std r0, THREAD_SIAR(r3) | |
472 | mfspr r0, SPRN_SDAR | |
473 | std r0, THREAD_SDAR(r3) | |
474 | mfspr r0, SPRN_SIER | |
475 | std r0, THREAD_SIER(r3) | |
476 | mfspr r0, SPRN_MMCR0 | |
477 | std r0, THREAD_MMCR0(r3) | |
478 | mfspr r0, SPRN_MMCR2 | |
479 | std r0, THREAD_MMCR2(r3) | |
480 | mfspr r0, SPRN_MMCRA | |
481 | std r0, THREAD_MMCRA(r3) | |
1de2bd4e | 482 | END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S) |
2468dcf6 IM |
483 | #endif |
484 | ||
9994a338 PM |
485 | #ifdef CONFIG_SMP |
486 | /* We need a sync somewhere here to make sure that if the | |
487 | * previous task gets rescheduled on another CPU, it sees all | |
488 | * stores it has performed on this one. | |
489 | */ | |
490 | sync | |
491 | #endif /* CONFIG_SMP */ | |
492 | ||
f89451fb AB |
493 | /* |
494 | * If we optimise away the clear of the reservation in system | |
495 | * calls because we know the CPU tracks the address of the | |
496 | * reservation, then we need to clear it here to cover the | |
497 | * case that the kernel context switch path has no larx | |
498 | * instructions. | |
499 | */ | |
500 | BEGIN_FTR_SECTION | |
501 | ldarx r6,0,r1 | |
502 | END_FTR_SECTION_IFSET(CPU_FTR_STCX_CHECKS_ADDRESS) | |
503 | ||
9994a338 PM |
504 | addi r6,r4,-THREAD /* Convert THREAD to 'current' */ |
505 | std r6,PACACURRENT(r13) /* Set new 'current' */ | |
506 | ||
507 | ld r8,KSP(r4) /* new stack pointer */ | |
2d27cfd3 | 508 | #ifdef CONFIG_PPC_BOOK3S |
1189be65 | 509 | BEGIN_FTR_SECTION |
c230328d | 510 | BEGIN_FTR_SECTION_NESTED(95) |
9994a338 PM |
511 | clrrdi r6,r8,28 /* get its ESID */ |
512 | clrrdi r9,r1,28 /* get current sp ESID */ | |
c230328d | 513 | FTR_SECTION_ELSE_NESTED(95) |
1189be65 PM |
514 | clrrdi r6,r8,40 /* get its 1T ESID */ |
515 | clrrdi r9,r1,40 /* get current sp 1T ESID */ | |
44ae3ab3 | 516 | ALT_MMU_FTR_SECTION_END_NESTED_IFCLR(MMU_FTR_1T_SEGMENT, 95) |
c230328d ME |
517 | FTR_SECTION_ELSE |
518 | b 2f | |
44ae3ab3 | 519 | ALT_MMU_FTR_SECTION_END_IFSET(MMU_FTR_SLB) |
9994a338 PM |
520 | clrldi. r0,r6,2 /* is new ESID c00000000? */ |
521 | cmpd cr1,r6,r9 /* or is new ESID the same as current ESID? */ | |
522 | cror eq,4*cr1+eq,eq | |
523 | beq 2f /* if yes, don't slbie it */ | |
524 | ||
525 | /* Bolt in the new stack SLB entry */ | |
526 | ld r7,KSP_VSID(r4) /* Get new stack's VSID */ | |
527 | oris r0,r6,(SLB_ESID_V)@h | |
528 | ori r0,r0,(SLB_NUM_BOLTED-1)@l | |
1189be65 PM |
529 | BEGIN_FTR_SECTION |
530 | li r9,MMU_SEGSIZE_1T /* insert B field */ | |
531 | oris r6,r6,(MMU_SEGSIZE_1T << SLBIE_SSIZE_SHIFT)@h | |
532 | rldimi r7,r9,SLB_VSID_SSIZE_SHIFT,0 | |
44ae3ab3 | 533 | END_MMU_FTR_SECTION_IFSET(MMU_FTR_1T_SEGMENT) |
2f6093c8 | 534 | |
00efee7d MN |
535 | /* Update the last bolted SLB. No write barriers are needed |
536 | * here, provided we only update the current CPU's SLB shadow | |
537 | * buffer. | |
538 | */ | |
2f6093c8 | 539 | ld r9,PACA_SLBSHADOWPTR(r13) |
11a27ad7 MN |
540 | li r12,0 |
541 | std r12,SLBSHADOW_STACKESID(r9) /* Clear ESID */ | |
542 | std r7,SLBSHADOW_STACKVSID(r9) /* Save VSID */ | |
543 | std r0,SLBSHADOW_STACKESID(r9) /* Save ESID */ | |
2f6093c8 | 544 | |
44ae3ab3 | 545 | /* No need to check for MMU_FTR_NO_SLBIE_B here, since when |
f66bce5e OJ |
546 | * we have 1TB segments, the only CPUs known to have the errata |
547 | * only support less than 1TB of system memory and we'll never | |
548 | * actually hit this code path. | |
549 | */ | |
550 | ||
9994a338 PM |
551 | slbie r6 |
552 | slbie r6 /* Workaround POWER5 < DD2.1 issue */ | |
553 | slbmte r7,r0 | |
554 | isync | |
9994a338 | 555 | 2: |
2d27cfd3 BH |
556 | #endif /* !CONFIG_PPC_BOOK3S */ |
557 | ||
9778b696 | 558 | CURRENT_THREAD_INFO(r7, r8) /* base of new stack */ |
9994a338 PM |
559 | /* Note: this uses SWITCH_FRAME_SIZE rather than INT_FRAME_SIZE |
560 | because we don't need to leave the 288-byte ABI gap at the | |
561 | top of the kernel stack. */ | |
562 | addi r7,r7,THREAD_SIZE-SWITCH_FRAME_SIZE | |
563 | ||
564 | mr r1,r8 /* start using new stack pointer */ | |
565 | std r7,PACAKSAVE(r13) | |
566 | ||
2468dcf6 IM |
567 | #ifdef CONFIG_PPC_BOOK3S_64 |
568 | BEGIN_FTR_SECTION | |
9353374b ME |
569 | /* Event based branch registers */ |
570 | ld r0, THREAD_BESCR(r4) | |
571 | mtspr SPRN_BESCR, r0 | |
572 | ld r0, THREAD_EBBHR(r4) | |
573 | mtspr SPRN_EBBHR, r0 | |
574 | ld r0, THREAD_EBBRR(r4) | |
575 | mtspr SPRN_EBBRR, r0 | |
576 | ||
59affcd3 ME |
577 | /* PMU registers made user read/(write) by EBB */ |
578 | ld r0, THREAD_SIAR(r4) | |
579 | mtspr SPRN_SIAR, r0 | |
580 | ld r0, THREAD_SDAR(r4) | |
581 | mtspr SPRN_SDAR, r0 | |
582 | ld r0, THREAD_SIER(r4) | |
583 | mtspr SPRN_SIER, r0 | |
584 | ld r0, THREAD_MMCR0(r4) | |
585 | mtspr SPRN_MMCR0, r0 | |
586 | ld r0, THREAD_MMCR2(r4) | |
587 | mtspr SPRN_MMCR2, r0 | |
588 | ld r0, THREAD_MMCRA(r4) | |
589 | mtspr SPRN_MMCRA, r0 | |
590 | ||
2468dcf6 IM |
591 | ld r0,THREAD_TAR(r4) |
592 | mtspr SPRN_TAR,r0 | |
1de2bd4e | 593 | END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S) |
2468dcf6 IM |
594 | #endif |
595 | ||
9994a338 PM |
596 | #ifdef CONFIG_ALTIVEC |
597 | BEGIN_FTR_SECTION | |
598 | ld r0,THREAD_VRSAVE(r4) | |
599 | mtspr SPRN_VRSAVE,r0 /* if G4, restore VRSAVE reg */ | |
600 | END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC) | |
601 | #endif /* CONFIG_ALTIVEC */ | |
efcac658 AK |
602 | #ifdef CONFIG_PPC64 |
603 | BEGIN_FTR_SECTION | |
71433285 AB |
604 | lwz r6,THREAD_DSCR_INHERIT(r4) |
605 | ld r7,DSCR_DEFAULT@toc(2) | |
efcac658 | 606 | ld r0,THREAD_DSCR(r4) |
71433285 AB |
607 | cmpwi r6,0 |
608 | bne 1f | |
609 | ld r0,0(r7) | |
610 | 1: cmpd r0,r25 | |
611 | beq 2f | |
efcac658 | 612 | mtspr SPRN_DSCR,r0 |
71433285 | 613 | 2: |
efcac658 AK |
614 | END_FTR_SECTION_IFSET(CPU_FTR_DSCR) |
615 | #endif | |
9994a338 | 616 | |
71433285 AB |
617 | ld r6,_CCR(r1) |
618 | mtcrf 0xFF,r6 | |
619 | ||
9994a338 PM |
620 | /* r3-r13 are destroyed -- Cort */ |
621 | REST_8GPRS(14, r1) | |
622 | REST_10GPRS(22, r1) | |
623 | ||
624 | /* convert old thread to its task_struct for return value */ | |
625 | addi r3,r3,-THREAD | |
626 | ld r7,_NIP(r1) /* Return to _switch caller in new task */ | |
627 | mtlr r7 | |
628 | addi r1,r1,SWITCH_FRAME_SIZE | |
629 | blr | |
630 | ||
631 | .align 7 | |
632 | _GLOBAL(ret_from_except) | |
633 | ld r11,_TRAP(r1) | |
634 | andi. r0,r11,1 | |
635 | bne .ret_from_except_lite | |
636 | REST_NVGPRS(r1) | |
637 | ||
638 | _GLOBAL(ret_from_except_lite) | |
639 | /* | |
640 | * Disable interrupts so that current_thread_info()->flags | |
641 | * can't change between when we test it and when we return | |
642 | * from the interrupt. | |
643 | */ | |
2d27cfd3 BH |
644 | #ifdef CONFIG_PPC_BOOK3E |
645 | wrteei 0 | |
646 | #else | |
d9ada91a BH |
647 | ld r10,PACAKMSR(r13) /* Get kernel MSR without EE */ |
648 | mtmsrd r10,1 /* Update machine state */ | |
2d27cfd3 | 649 | #endif /* CONFIG_PPC_BOOK3E */ |
9994a338 | 650 | |
9778b696 | 651 | CURRENT_THREAD_INFO(r9, r1) |
9994a338 PM |
652 | ld r3,_MSR(r1) |
653 | ld r4,TI_FLAGS(r9) | |
9994a338 | 654 | andi. r3,r3,MSR_PR |
c58ce2b1 | 655 | beq resume_kernel |
9994a338 PM |
656 | |
657 | /* Check current_thread_info()->flags */ | |
c58ce2b1 TC |
658 | andi. r0,r4,_TIF_USER_WORK_MASK |
659 | beq restore | |
660 | ||
661 | andi. r0,r4,_TIF_NEED_RESCHED | |
662 | beq 1f | |
663 | bl .restore_interrupts | |
5d1c5745 | 664 | SCHEDULE_USER |
c58ce2b1 TC |
665 | b .ret_from_except_lite |
666 | ||
667 | 1: bl .save_nvgprs | |
668 | bl .restore_interrupts | |
669 | addi r3,r1,STACK_FRAME_OVERHEAD | |
670 | bl .do_notify_resume | |
671 | b .ret_from_except | |
672 | ||
673 | resume_kernel: | |
a9c4e541 TC |
674 | /* check current_thread_info, _TIF_EMULATE_STACK_STORE */ |
675 | CURRENT_THREAD_INFO(r9, r1) | |
676 | ld r8,TI_FLAGS(r9) | |
677 | andis. r8,r8,_TIF_EMULATE_STACK_STORE@h | |
678 | beq+ 1f | |
679 | ||
680 | addi r8,r1,INT_FRAME_SIZE /* Get the kprobed function entry */ | |
681 | ||
682 | lwz r3,GPR1(r1) | |
683 | subi r3,r3,INT_FRAME_SIZE /* dst: Allocate a trampoline exception frame */ | |
684 | mr r4,r1 /* src: current exception frame */ | |
685 | mr r1,r3 /* Reroute the trampoline frame to r1 */ | |
686 | ||
687 | /* Copy from the original to the trampoline. */ | |
688 | li r5,INT_FRAME_SIZE/8 /* size: INT_FRAME_SIZE */ | |
689 | li r6,0 /* start offset: 0 */ | |
690 | mtctr r5 | |
691 | 2: ldx r0,r6,r4 | |
692 | stdx r0,r6,r3 | |
693 | addi r6,r6,8 | |
694 | bdnz 2b | |
695 | ||
696 | /* Do real store operation to complete stwu */ | |
697 | lwz r5,GPR1(r1) | |
698 | std r8,0(r5) | |
699 | ||
700 | /* Clear _TIF_EMULATE_STACK_STORE flag */ | |
701 | lis r11,_TIF_EMULATE_STACK_STORE@h | |
702 | addi r5,r9,TI_FLAGS | |
d8b92292 | 703 | 0: ldarx r4,0,r5 |
a9c4e541 TC |
704 | andc r4,r4,r11 |
705 | stdcx. r4,0,r5 | |
706 | bne- 0b | |
707 | 1: | |
708 | ||
c58ce2b1 TC |
709 | #ifdef CONFIG_PREEMPT |
710 | /* Check if we need to preempt */ | |
711 | andi. r0,r4,_TIF_NEED_RESCHED | |
712 | beq+ restore | |
713 | /* Check that preempt_count() == 0 and interrupts are enabled */ | |
714 | lwz r8,TI_PREEMPT(r9) | |
715 | cmpwi cr1,r8,0 | |
716 | ld r0,SOFTE(r1) | |
717 | cmpdi r0,0 | |
718 | crandc eq,cr1*4+eq,eq | |
719 | bne restore | |
720 | ||
721 | /* | |
722 | * Here we are preempting the current task. We want to make | |
723 | * sure we are soft-disabled first | |
724 | */ | |
725 | SOFT_DISABLE_INTS(r3,r4) | |
726 | 1: bl .preempt_schedule_irq | |
727 | ||
728 | /* Re-test flags and eventually loop */ | |
9778b696 | 729 | CURRENT_THREAD_INFO(r9, r1) |
9994a338 | 730 | ld r4,TI_FLAGS(r9) |
c58ce2b1 TC |
731 | andi. r0,r4,_TIF_NEED_RESCHED |
732 | bne 1b | |
572177d7 TC |
733 | |
734 | /* | |
735 | * arch_local_irq_restore() from preempt_schedule_irq above may | |
736 | * enable hard interrupt but we really should disable interrupts | |
737 | * when we return from the interrupt, and so that we don't get | |
738 | * interrupted after loading SRR0/1. | |
739 | */ | |
740 | #ifdef CONFIG_PPC_BOOK3E | |
741 | wrteei 0 | |
742 | #else | |
743 | ld r10,PACAKMSR(r13) /* Get kernel MSR without EE */ | |
744 | mtmsrd r10,1 /* Update machine state */ | |
745 | #endif /* CONFIG_PPC_BOOK3E */ | |
c58ce2b1 | 746 | #endif /* CONFIG_PREEMPT */ |
9994a338 | 747 | |
7230c564 BH |
748 | .globl fast_exc_return_irq |
749 | fast_exc_return_irq: | |
9994a338 | 750 | restore: |
7230c564 | 751 | /* |
7c0482e3 BH |
752 | * This is the main kernel exit path. First we check if we |
753 | * are about to re-enable interrupts | |
7230c564 | 754 | */ |
01f3880d | 755 | ld r5,SOFTE(r1) |
7230c564 | 756 | lbz r6,PACASOFTIRQEN(r13) |
7c0482e3 BH |
757 | cmpwi cr0,r5,0 |
758 | beq restore_irq_off | |
7230c564 | 759 | |
7c0482e3 BH |
760 | /* We are enabling, were we already enabled ? Yes, just return */ |
761 | cmpwi cr0,r6,1 | |
762 | beq cr0,do_restore | |
9994a338 | 763 | |
7c0482e3 | 764 | /* |
7230c564 BH |
765 | * We are about to soft-enable interrupts (we are hard disabled |
766 | * at this point). We check if there's anything that needs to | |
767 | * be replayed first. | |
768 | */ | |
769 | lbz r0,PACAIRQHAPPENED(r13) | |
770 | cmpwi cr0,r0,0 | |
771 | bne- restore_check_irq_replay | |
e56a6e20 | 772 | |
7230c564 BH |
773 | /* |
774 | * Get here when nothing happened while soft-disabled, just | |
775 | * soft-enable and move-on. We will hard-enable as a side | |
776 | * effect of rfi | |
777 | */ | |
778 | restore_no_replay: | |
779 | TRACE_ENABLE_INTS | |
780 | li r0,1 | |
781 | stb r0,PACASOFTIRQEN(r13); | |
782 | ||
783 | /* | |
784 | * Final return path. BookE is handled in a different file | |
785 | */ | |
7c0482e3 | 786 | do_restore: |
2d27cfd3 BH |
787 | #ifdef CONFIG_PPC_BOOK3E |
788 | b .exception_return_book3e | |
789 | #else | |
7230c564 BH |
790 | /* |
791 | * Clear the reservation. If we know the CPU tracks the address of | |
792 | * the reservation then we can potentially save some cycles and use | |
793 | * a larx. On POWER6 and POWER7 this is significantly faster. | |
794 | */ | |
795 | BEGIN_FTR_SECTION | |
796 | stdcx. r0,0,r1 /* to clear the reservation */ | |
797 | FTR_SECTION_ELSE | |
798 | ldarx r4,0,r1 | |
799 | ALT_FTR_SECTION_END_IFCLR(CPU_FTR_STCX_CHECKS_ADDRESS) | |
800 | ||
801 | /* | |
802 | * Some code path such as load_up_fpu or altivec return directly | |
803 | * here. They run entirely hard disabled and do not alter the | |
804 | * interrupt state. They also don't use lwarx/stwcx. and thus | |
805 | * are known not to leave dangling reservations. | |
806 | */ | |
807 | .globl fast_exception_return | |
808 | fast_exception_return: | |
809 | ld r3,_MSR(r1) | |
e56a6e20 PM |
810 | ld r4,_CTR(r1) |
811 | ld r0,_LINK(r1) | |
812 | mtctr r4 | |
813 | mtlr r0 | |
814 | ld r4,_XER(r1) | |
815 | mtspr SPRN_XER,r4 | |
816 | ||
817 | REST_8GPRS(5, r1) | |
818 | ||
9994a338 PM |
819 | andi. r0,r3,MSR_RI |
820 | beq- unrecov_restore | |
821 | ||
e56a6e20 PM |
822 | /* |
823 | * Clear RI before restoring r13. If we are returning to | |
824 | * userspace and we take an exception after restoring r13, | |
825 | * we end up corrupting the userspace r13 value. | |
826 | */ | |
d9ada91a BH |
827 | ld r4,PACAKMSR(r13) /* Get kernel MSR without EE */ |
828 | andc r4,r4,r0 /* r0 contains MSR_RI here */ | |
e56a6e20 | 829 | mtmsrd r4,1 |
9994a338 | 830 | |
afc07701 MN |
831 | #ifdef CONFIG_PPC_TRANSACTIONAL_MEM |
832 | /* TM debug */ | |
833 | std r3, PACATMSCRATCH(r13) /* Stash returned-to MSR */ | |
834 | #endif | |
9994a338 PM |
835 | /* |
836 | * r13 is our per cpu area, only restore it if we are returning to | |
7230c564 BH |
837 | * userspace the value stored in the stack frame may belong to |
838 | * another CPU. | |
9994a338 | 839 | */ |
e56a6e20 | 840 | andi. r0,r3,MSR_PR |
9994a338 | 841 | beq 1f |
e56a6e20 | 842 | ACCOUNT_CPU_USER_EXIT(r2, r4) |
44e9309f | 843 | RESTORE_PPR(r2, r4) |
9994a338 PM |
844 | REST_GPR(13, r1) |
845 | 1: | |
e56a6e20 | 846 | mtspr SPRN_SRR1,r3 |
9994a338 PM |
847 | |
848 | ld r2,_CCR(r1) | |
849 | mtcrf 0xFF,r2 | |
850 | ld r2,_NIP(r1) | |
851 | mtspr SPRN_SRR0,r2 | |
852 | ||
853 | ld r0,GPR0(r1) | |
854 | ld r2,GPR2(r1) | |
855 | ld r3,GPR3(r1) | |
856 | ld r4,GPR4(r1) | |
857 | ld r1,GPR1(r1) | |
858 | ||
859 | rfid | |
860 | b . /* prevent speculative execution */ | |
861 | ||
2d27cfd3 BH |
862 | #endif /* CONFIG_PPC_BOOK3E */ |
863 | ||
7c0482e3 BH |
864 | /* |
865 | * We are returning to a context with interrupts soft disabled. | |
866 | * | |
867 | * However, we may also about to hard enable, so we need to | |
868 | * make sure that in this case, we also clear PACA_IRQ_HARD_DIS | |
869 | * or that bit can get out of sync and bad things will happen | |
870 | */ | |
871 | restore_irq_off: | |
872 | ld r3,_MSR(r1) | |
873 | lbz r7,PACAIRQHAPPENED(r13) | |
874 | andi. r0,r3,MSR_EE | |
875 | beq 1f | |
876 | rlwinm r7,r7,0,~PACA_IRQ_HARD_DIS | |
877 | stb r7,PACAIRQHAPPENED(r13) | |
878 | 1: li r0,0 | |
879 | stb r0,PACASOFTIRQEN(r13); | |
880 | TRACE_DISABLE_INTS | |
881 | b do_restore | |
882 | ||
7230c564 BH |
883 | /* |
884 | * Something did happen, check if a re-emit is needed | |
885 | * (this also clears paca->irq_happened) | |
886 | */ | |
887 | restore_check_irq_replay: | |
888 | /* XXX: We could implement a fast path here where we check | |
889 | * for irq_happened being just 0x01, in which case we can | |
890 | * clear it and return. That means that we would potentially | |
891 | * miss a decrementer having wrapped all the way around. | |
892 | * | |
893 | * Still, this might be useful for things like hash_page | |
894 | */ | |
895 | bl .__check_irq_replay | |
896 | cmpwi cr0,r3,0 | |
897 | beq restore_no_replay | |
898 | ||
899 | /* | |
900 | * We need to re-emit an interrupt. We do so by re-using our | |
901 | * existing exception frame. We first change the trap value, | |
902 | * but we need to ensure we preserve the low nibble of it | |
903 | */ | |
904 | ld r4,_TRAP(r1) | |
905 | clrldi r4,r4,60 | |
906 | or r4,r4,r3 | |
907 | std r4,_TRAP(r1) | |
908 | ||
909 | /* | |
910 | * Then find the right handler and call it. Interrupts are | |
911 | * still soft-disabled and we keep them that way. | |
912 | */ | |
913 | cmpwi cr0,r3,0x500 | |
914 | bne 1f | |
915 | addi r3,r1,STACK_FRAME_OVERHEAD; | |
916 | bl .do_IRQ | |
917 | b .ret_from_except | |
918 | 1: cmpwi cr0,r3,0x900 | |
919 | bne 1f | |
920 | addi r3,r1,STACK_FRAME_OVERHEAD; | |
921 | bl .timer_interrupt | |
922 | b .ret_from_except | |
fe9e1d54 IM |
923 | #ifdef CONFIG_PPC_DOORBELL |
924 | 1: | |
7230c564 | 925 | #ifdef CONFIG_PPC_BOOK3E |
fe9e1d54 IM |
926 | cmpwi cr0,r3,0x280 |
927 | #else | |
928 | BEGIN_FTR_SECTION | |
929 | cmpwi cr0,r3,0xe80 | |
930 | FTR_SECTION_ELSE | |
931 | cmpwi cr0,r3,0xa00 | |
932 | ALT_FTR_SECTION_END_IFSET(CPU_FTR_HVMODE) | |
933 | #endif /* CONFIG_PPC_BOOK3E */ | |
7230c564 BH |
934 | bne 1f |
935 | addi r3,r1,STACK_FRAME_OVERHEAD; | |
936 | bl .doorbell_exception | |
937 | b .ret_from_except | |
fe9e1d54 | 938 | #endif /* CONFIG_PPC_DOORBELL */ |
7230c564 BH |
939 | 1: b .ret_from_except /* What else to do here ? */ |
940 | ||
9994a338 PM |
941 | unrecov_restore: |
942 | addi r3,r1,STACK_FRAME_OVERHEAD | |
943 | bl .unrecoverable_exception | |
944 | b unrecov_restore | |
945 | ||
946 | #ifdef CONFIG_PPC_RTAS | |
947 | /* | |
948 | * On CHRP, the Run-Time Abstraction Services (RTAS) have to be | |
949 | * called with the MMU off. | |
950 | * | |
951 | * In addition, we need to be in 32b mode, at least for now. | |
952 | * | |
953 | * Note: r3 is an input parameter to rtas, so don't trash it... | |
954 | */ | |
955 | _GLOBAL(enter_rtas) | |
956 | mflr r0 | |
957 | std r0,16(r1) | |
958 | stdu r1,-RTAS_FRAME_SIZE(r1) /* Save SP and create stack space. */ | |
959 | ||
960 | /* Because RTAS is running in 32b mode, it clobbers the high order half | |
961 | * of all registers that it saves. We therefore save those registers | |
962 | * RTAS might touch to the stack. (r0, r3-r13 are caller saved) | |
963 | */ | |
964 | SAVE_GPR(2, r1) /* Save the TOC */ | |
965 | SAVE_GPR(13, r1) /* Save paca */ | |
966 | SAVE_8GPRS(14, r1) /* Save the non-volatiles */ | |
967 | SAVE_10GPRS(22, r1) /* ditto */ | |
968 | ||
969 | mfcr r4 | |
970 | std r4,_CCR(r1) | |
971 | mfctr r5 | |
972 | std r5,_CTR(r1) | |
973 | mfspr r6,SPRN_XER | |
974 | std r6,_XER(r1) | |
975 | mfdar r7 | |
976 | std r7,_DAR(r1) | |
977 | mfdsisr r8 | |
978 | std r8,_DSISR(r1) | |
9994a338 | 979 | |
9fe901d1 MK |
980 | /* Temporary workaround to clear CR until RTAS can be modified to |
981 | * ignore all bits. | |
982 | */ | |
983 | li r0,0 | |
984 | mtcr r0 | |
985 | ||
007d88d0 | 986 | #ifdef CONFIG_BUG |
9994a338 PM |
987 | /* There is no way it is acceptable to get here with interrupts enabled, |
988 | * check it with the asm equivalent of WARN_ON | |
989 | */ | |
d04c56f7 | 990 | lbz r0,PACASOFTIRQEN(r13) |
9994a338 | 991 | 1: tdnei r0,0 |
007d88d0 DW |
992 | EMIT_BUG_ENTRY 1b,__FILE__,__LINE__,BUGFLAG_WARNING |
993 | #endif | |
994 | ||
d04c56f7 PM |
995 | /* Hard-disable interrupts */ |
996 | mfmsr r6 | |
997 | rldicl r7,r6,48,1 | |
998 | rotldi r7,r7,16 | |
999 | mtmsrd r7,1 | |
1000 | ||
9994a338 PM |
1001 | /* Unfortunately, the stack pointer and the MSR are also clobbered, |
1002 | * so they are saved in the PACA which allows us to restore | |
1003 | * our original state after RTAS returns. | |
1004 | */ | |
1005 | std r1,PACAR1(r13) | |
1006 | std r6,PACASAVEDMSR(r13) | |
1007 | ||
1008 | /* Setup our real return addr */ | |
e58c3495 DG |
1009 | LOAD_REG_ADDR(r4,.rtas_return_loc) |
1010 | clrldi r4,r4,2 /* convert to realmode address */ | |
9994a338 PM |
1011 | mtlr r4 |
1012 | ||
1013 | li r0,0 | |
1014 | ori r0,r0,MSR_EE|MSR_SE|MSR_BE|MSR_RI | |
1015 | andc r0,r6,r0 | |
1016 | ||
1017 | li r9,1 | |
1018 | rldicr r9,r9,MSR_SF_LG,(63-MSR_SF_LG) | |
44c9f3cc | 1019 | ori r9,r9,MSR_IR|MSR_DR|MSR_FE0|MSR_FE1|MSR_FP|MSR_RI |
9994a338 | 1020 | andc r6,r0,r9 |
9994a338 PM |
1021 | sync /* disable interrupts so SRR0/1 */ |
1022 | mtmsrd r0 /* don't get trashed */ | |
1023 | ||
e58c3495 | 1024 | LOAD_REG_ADDR(r4, rtas) |
9994a338 PM |
1025 | ld r5,RTASENTRY(r4) /* get the rtas->entry value */ |
1026 | ld r4,RTASBASE(r4) /* get the rtas->base value */ | |
1027 | ||
1028 | mtspr SPRN_SRR0,r5 | |
1029 | mtspr SPRN_SRR1,r6 | |
1030 | rfid | |
1031 | b . /* prevent speculative execution */ | |
1032 | ||
1033 | _STATIC(rtas_return_loc) | |
1034 | /* relocation is off at this point */ | |
2dd60d79 | 1035 | GET_PACA(r4) |
e58c3495 | 1036 | clrldi r4,r4,2 /* convert to realmode address */ |
9994a338 | 1037 | |
e31aa453 PM |
1038 | bcl 20,31,$+4 |
1039 | 0: mflr r3 | |
1040 | ld r3,(1f-0b)(r3) /* get &.rtas_restore_regs */ | |
1041 | ||
9994a338 PM |
1042 | mfmsr r6 |
1043 | li r0,MSR_RI | |
1044 | andc r6,r6,r0 | |
1045 | sync | |
1046 | mtmsrd r6 | |
1047 | ||
1048 | ld r1,PACAR1(r4) /* Restore our SP */ | |
9994a338 PM |
1049 | ld r4,PACASAVEDMSR(r4) /* Restore our MSR */ |
1050 | ||
1051 | mtspr SPRN_SRR0,r3 | |
1052 | mtspr SPRN_SRR1,r4 | |
1053 | rfid | |
1054 | b . /* prevent speculative execution */ | |
1055 | ||
e31aa453 PM |
1056 | .align 3 |
1057 | 1: .llong .rtas_restore_regs | |
1058 | ||
9994a338 PM |
1059 | _STATIC(rtas_restore_regs) |
1060 | /* relocation is on at this point */ | |
1061 | REST_GPR(2, r1) /* Restore the TOC */ | |
1062 | REST_GPR(13, r1) /* Restore paca */ | |
1063 | REST_8GPRS(14, r1) /* Restore the non-volatiles */ | |
1064 | REST_10GPRS(22, r1) /* ditto */ | |
1065 | ||
2dd60d79 | 1066 | GET_PACA(r13) |
9994a338 PM |
1067 | |
1068 | ld r4,_CCR(r1) | |
1069 | mtcr r4 | |
1070 | ld r5,_CTR(r1) | |
1071 | mtctr r5 | |
1072 | ld r6,_XER(r1) | |
1073 | mtspr SPRN_XER,r6 | |
1074 | ld r7,_DAR(r1) | |
1075 | mtdar r7 | |
1076 | ld r8,_DSISR(r1) | |
1077 | mtdsisr r8 | |
9994a338 PM |
1078 | |
1079 | addi r1,r1,RTAS_FRAME_SIZE /* Unstack our frame */ | |
1080 | ld r0,16(r1) /* get return address */ | |
1081 | ||
1082 | mtlr r0 | |
1083 | blr /* return to caller */ | |
1084 | ||
1085 | #endif /* CONFIG_PPC_RTAS */ | |
1086 | ||
9994a338 PM |
1087 | _GLOBAL(enter_prom) |
1088 | mflr r0 | |
1089 | std r0,16(r1) | |
1090 | stdu r1,-PROM_FRAME_SIZE(r1) /* Save SP and create stack space */ | |
1091 | ||
1092 | /* Because PROM is running in 32b mode, it clobbers the high order half | |
1093 | * of all registers that it saves. We therefore save those registers | |
1094 | * PROM might touch to the stack. (r0, r3-r13 are caller saved) | |
1095 | */ | |
6c171994 | 1096 | SAVE_GPR(2, r1) |
9994a338 PM |
1097 | SAVE_GPR(13, r1) |
1098 | SAVE_8GPRS(14, r1) | |
1099 | SAVE_10GPRS(22, r1) | |
6c171994 | 1100 | mfcr r10 |
9994a338 | 1101 | mfmsr r11 |
6c171994 | 1102 | std r10,_CCR(r1) |
9994a338 PM |
1103 | std r11,_MSR(r1) |
1104 | ||
1105 | /* Get the PROM entrypoint */ | |
6c171994 | 1106 | mtlr r4 |
9994a338 PM |
1107 | |
1108 | /* Switch MSR to 32 bits mode | |
1109 | */ | |
2d27cfd3 BH |
1110 | #ifdef CONFIG_PPC_BOOK3E |
1111 | rlwinm r11,r11,0,1,31 | |
1112 | mtmsr r11 | |
1113 | #else /* CONFIG_PPC_BOOK3E */ | |
9994a338 PM |
1114 | mfmsr r11 |
1115 | li r12,1 | |
1116 | rldicr r12,r12,MSR_SF_LG,(63-MSR_SF_LG) | |
1117 | andc r11,r11,r12 | |
1118 | li r12,1 | |
1119 | rldicr r12,r12,MSR_ISF_LG,(63-MSR_ISF_LG) | |
1120 | andc r11,r11,r12 | |
1121 | mtmsrd r11 | |
2d27cfd3 | 1122 | #endif /* CONFIG_PPC_BOOK3E */ |
9994a338 PM |
1123 | isync |
1124 | ||
6c171994 | 1125 | /* Enter PROM here... */ |
9994a338 PM |
1126 | blrl |
1127 | ||
1128 | /* Just make sure that r1 top 32 bits didn't get | |
1129 | * corrupt by OF | |
1130 | */ | |
1131 | rldicl r1,r1,0,32 | |
1132 | ||
1133 | /* Restore the MSR (back to 64 bits) */ | |
1134 | ld r0,_MSR(r1) | |
6c171994 | 1135 | MTMSRD(r0) |
9994a338 PM |
1136 | isync |
1137 | ||
1138 | /* Restore other registers */ | |
1139 | REST_GPR(2, r1) | |
1140 | REST_GPR(13, r1) | |
1141 | REST_8GPRS(14, r1) | |
1142 | REST_10GPRS(22, r1) | |
1143 | ld r4,_CCR(r1) | |
1144 | mtcr r4 | |
9994a338 PM |
1145 | |
1146 | addi r1,r1,PROM_FRAME_SIZE | |
1147 | ld r0,16(r1) | |
1148 | mtlr r0 | |
1149 | blr | |
4e491d14 | 1150 | |
606576ce | 1151 | #ifdef CONFIG_FUNCTION_TRACER |
4e491d14 SR |
1152 | #ifdef CONFIG_DYNAMIC_FTRACE |
1153 | _GLOBAL(mcount) | |
1154 | _GLOBAL(_mcount) | |
4e491d14 SR |
1155 | blr |
1156 | ||
1157 | _GLOBAL(ftrace_caller) | |
1158 | /* Taken from output of objdump from lib64/glibc */ | |
1159 | mflr r3 | |
1160 | ld r11, 0(r1) | |
1161 | stdu r1, -112(r1) | |
1162 | std r3, 128(r1) | |
1163 | ld r4, 16(r11) | |
395a59d0 | 1164 | subi r3, r3, MCOUNT_INSN_SIZE |
4e491d14 SR |
1165 | .globl ftrace_call |
1166 | ftrace_call: | |
1167 | bl ftrace_stub | |
1168 | nop | |
46542888 SR |
1169 | #ifdef CONFIG_FUNCTION_GRAPH_TRACER |
1170 | .globl ftrace_graph_call | |
1171 | ftrace_graph_call: | |
1172 | b ftrace_graph_stub | |
1173 | _GLOBAL(ftrace_graph_stub) | |
1174 | #endif | |
4e491d14 SR |
1175 | ld r0, 128(r1) |
1176 | mtlr r0 | |
1177 | addi r1, r1, 112 | |
1178 | _GLOBAL(ftrace_stub) | |
1179 | blr | |
1180 | #else | |
1181 | _GLOBAL(mcount) | |
1182 | blr | |
1183 | ||
1184 | _GLOBAL(_mcount) | |
1185 | /* Taken from output of objdump from lib64/glibc */ | |
1186 | mflr r3 | |
1187 | ld r11, 0(r1) | |
1188 | stdu r1, -112(r1) | |
1189 | std r3, 128(r1) | |
1190 | ld r4, 16(r11) | |
1191 | ||
395a59d0 | 1192 | subi r3, r3, MCOUNT_INSN_SIZE |
4e491d14 SR |
1193 | LOAD_REG_ADDR(r5,ftrace_trace_function) |
1194 | ld r5,0(r5) | |
1195 | ld r5,0(r5) | |
1196 | mtctr r5 | |
1197 | bctrl | |
4e491d14 | 1198 | nop |
6794c782 SR |
1199 | |
1200 | ||
1201 | #ifdef CONFIG_FUNCTION_GRAPH_TRACER | |
1202 | b ftrace_graph_caller | |
1203 | #endif | |
4e491d14 SR |
1204 | ld r0, 128(r1) |
1205 | mtlr r0 | |
1206 | addi r1, r1, 112 | |
1207 | _GLOBAL(ftrace_stub) | |
1208 | blr | |
1209 | ||
6794c782 SR |
1210 | #endif /* CONFIG_DYNAMIC_FTRACE */ |
1211 | ||
1212 | #ifdef CONFIG_FUNCTION_GRAPH_TRACER | |
46542888 | 1213 | _GLOBAL(ftrace_graph_caller) |
6794c782 SR |
1214 | /* load r4 with local address */ |
1215 | ld r4, 128(r1) | |
1216 | subi r4, r4, MCOUNT_INSN_SIZE | |
1217 | ||
1218 | /* get the parent address */ | |
1219 | ld r11, 112(r1) | |
1220 | addi r3, r11, 16 | |
1221 | ||
1222 | bl .prepare_ftrace_return | |
1223 | nop | |
1224 | ||
1225 | ld r0, 128(r1) | |
1226 | mtlr r0 | |
1227 | addi r1, r1, 112 | |
1228 | blr | |
1229 | ||
1230 | _GLOBAL(return_to_handler) | |
bb725340 SR |
1231 | /* need to save return values */ |
1232 | std r4, -24(r1) | |
1233 | std r3, -16(r1) | |
1234 | std r31, -8(r1) | |
1235 | mr r31, r1 | |
1236 | stdu r1, -112(r1) | |
1237 | ||
1238 | bl .ftrace_return_to_handler | |
1239 | nop | |
1240 | ||
1241 | /* return value has real return address */ | |
1242 | mtlr r3 | |
1243 | ||
1244 | ld r1, 0(r1) | |
1245 | ld r4, -24(r1) | |
1246 | ld r3, -16(r1) | |
1247 | ld r31, -8(r1) | |
1248 | ||
1249 | /* Jump back to real return address */ | |
1250 | blr | |
1251 | ||
1252 | _GLOBAL(mod_return_to_handler) | |
6794c782 SR |
1253 | /* need to save return values */ |
1254 | std r4, -32(r1) | |
1255 | std r3, -24(r1) | |
1256 | /* save TOC */ | |
1257 | std r2, -16(r1) | |
1258 | std r31, -8(r1) | |
1259 | mr r31, r1 | |
1260 | stdu r1, -112(r1) | |
1261 | ||
bb725340 SR |
1262 | /* |
1263 | * We are in a module using the module's TOC. | |
1264 | * Switch to our TOC to run inside the core kernel. | |
1265 | */ | |
be10ab10 | 1266 | ld r2, PACATOC(r13) |
6794c782 SR |
1267 | |
1268 | bl .ftrace_return_to_handler | |
1269 | nop | |
1270 | ||
1271 | /* return value has real return address */ | |
1272 | mtlr r3 | |
1273 | ||
1274 | ld r1, 0(r1) | |
1275 | ld r4, -32(r1) | |
1276 | ld r3, -24(r1) | |
1277 | ld r2, -16(r1) | |
1278 | ld r31, -8(r1) | |
1279 | ||
1280 | /* Jump back to real return address */ | |
1281 | blr | |
1282 | #endif /* CONFIG_FUNCTION_GRAPH_TRACER */ | |
1283 | #endif /* CONFIG_FUNCTION_TRACER */ |