]>
Commit | Line | Data |
---|---|---|
9994a338 | 1 | /* |
9994a338 PM |
2 | * PowerPC version |
3 | * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org) | |
4 | * Rewritten by Cort Dougan (cort@cs.nmt.edu) for PReP | |
5 | * Copyright (C) 1996 Cort Dougan <cort@cs.nmt.edu> | |
6 | * Adapted for Power Macintosh by Paul Mackerras. | |
7 | * Low-level exception handlers and MMU support | |
8 | * rewritten by Paul Mackerras. | |
9 | * Copyright (C) 1996 Paul Mackerras. | |
10 | * MPC8xx modifications Copyright (C) 1997 Dan Malek (dmalek@jlc.net). | |
11 | * | |
12 | * This file contains the system call entry code, context switch | |
13 | * code, and exception/interrupt return code for PowerPC. | |
14 | * | |
15 | * This program is free software; you can redistribute it and/or | |
16 | * modify it under the terms of the GNU General Public License | |
17 | * as published by the Free Software Foundation; either version | |
18 | * 2 of the License, or (at your option) any later version. | |
19 | */ | |
20 | ||
9994a338 | 21 | #include <linux/errno.h> |
c3525940 | 22 | #include <linux/err.h> |
9994a338 PM |
23 | #include <asm/unistd.h> |
24 | #include <asm/processor.h> | |
25 | #include <asm/page.h> | |
26 | #include <asm/mmu.h> | |
27 | #include <asm/thread_info.h> | |
28 | #include <asm/ppc_asm.h> | |
29 | #include <asm/asm-offsets.h> | |
30 | #include <asm/cputable.h> | |
3f639ee8 | 31 | #include <asm/firmware.h> |
007d88d0 | 32 | #include <asm/bug.h> |
ec2b36b9 | 33 | #include <asm/ptrace.h> |
945feb17 | 34 | #include <asm/irqflags.h> |
7230c564 | 35 | #include <asm/hw_irq.h> |
5d1c5745 | 36 | #include <asm/context_tracking.h> |
b4b56f9e | 37 | #include <asm/tm.h> |
8a649045 | 38 | #include <asm/ppc-opcode.h> |
9445aa1a | 39 | #include <asm/export.h> |
9994a338 PM |
40 | |
41 | /* | |
42 | * System calls. | |
43 | */ | |
44 | .section ".toc","aw" | |
c857c43b AB |
45 | SYS_CALL_TABLE: |
46 | .tc sys_call_table[TC],sys_call_table | |
9994a338 PM |
47 | |
48 | /* This value is used to mark exception frames on the stack. */ | |
49 | exception_marker: | |
ec2b36b9 | 50 | .tc ID_EXC_MARKER[TC],STACK_FRAME_REGS_MARKER |
9994a338 PM |
51 | |
52 | .section ".text" | |
53 | .align 7 | |
54 | ||
9994a338 PM |
55 | .globl system_call_common |
56 | system_call_common: | |
b4b56f9e S |
57 | #ifdef CONFIG_PPC_TRANSACTIONAL_MEM |
58 | BEGIN_FTR_SECTION | |
59 | extrdi. r10, r12, 1, (63-MSR_TS_T_LG) /* transaction active? */ | |
cf7d6fb0 | 60 | bne .Ltabort_syscall |
b4b56f9e S |
61 | END_FTR_SECTION_IFSET(CPU_FTR_TM) |
62 | #endif | |
9994a338 PM |
63 | andi. r10,r12,MSR_PR |
64 | mr r10,r1 | |
65 | addi r1,r1,-INT_FRAME_SIZE | |
66 | beq- 1f | |
67 | ld r1,PACAKSAVE(r13) | |
68 | 1: std r10,0(r1) | |
69 | std r11,_NIP(r1) | |
70 | std r12,_MSR(r1) | |
71 | std r0,GPR0(r1) | |
72 | std r10,GPR1(r1) | |
5d75b264 | 73 | beq 2f /* if from kernel mode */ |
c223c903 | 74 | ACCOUNT_CPU_USER_ENTRY(r13, r10, r11) |
5d75b264 | 75 | 2: std r2,GPR2(r1) |
9994a338 | 76 | std r3,GPR3(r1) |
fd6c40f3 | 77 | mfcr r2 |
9994a338 PM |
78 | std r4,GPR4(r1) |
79 | std r5,GPR5(r1) | |
80 | std r6,GPR6(r1) | |
81 | std r7,GPR7(r1) | |
82 | std r8,GPR8(r1) | |
83 | li r11,0 | |
84 | std r11,GPR9(r1) | |
85 | std r11,GPR10(r1) | |
86 | std r11,GPR11(r1) | |
87 | std r11,GPR12(r1) | |
823df435 | 88 | std r11,_XER(r1) |
82087414 | 89 | std r11,_CTR(r1) |
9994a338 | 90 | std r9,GPR13(r1) |
9994a338 | 91 | mflr r10 |
fd6c40f3 AB |
92 | /* |
93 | * This clears CR0.SO (bit 28), which is the error indication on | |
94 | * return from this system call. | |
95 | */ | |
96 | rldimi r2,r11,28,(63-28) | |
9994a338 | 97 | li r11,0xc01 |
9994a338 PM |
98 | std r10,_LINK(r1) |
99 | std r11,_TRAP(r1) | |
9994a338 | 100 | std r3,ORIG_GPR3(r1) |
fd6c40f3 | 101 | std r2,_CCR(r1) |
9994a338 PM |
102 | ld r2,PACATOC(r13) |
103 | addi r9,r1,STACK_FRAME_OVERHEAD | |
104 | ld r11,exception_marker@toc(r2) | |
105 | std r11,-16(r9) /* "regshere" marker */ | |
abf917cd | 106 | #if defined(CONFIG_VIRT_CPU_ACCOUNTING_NATIVE) && defined(CONFIG_PPC_SPLPAR) |
cf9efce0 PM |
107 | BEGIN_FW_FTR_SECTION |
108 | beq 33f | |
109 | /* if from user, see if there are any DTL entries to process */ | |
110 | ld r10,PACALPPACAPTR(r13) /* get ptr to VPA */ | |
111 | ld r11,PACA_DTL_RIDX(r13) /* get log read index */ | |
7ffcf8ec AB |
112 | addi r10,r10,LPPACA_DTLIDX |
113 | LDX_BE r10,0,r10 /* get log write index */ | |
cf9efce0 PM |
114 | cmpd cr1,r11,r10 |
115 | beq+ cr1,33f | |
b1576fec | 116 | bl accumulate_stolen_time |
cf9efce0 PM |
117 | REST_GPR(0,r1) |
118 | REST_4GPRS(3,r1) | |
119 | REST_2GPRS(7,r1) | |
120 | addi r9,r1,STACK_FRAME_OVERHEAD | |
121 | 33: | |
122 | END_FW_FTR_SECTION_IFSET(FW_FEATURE_SPLPAR) | |
abf917cd | 123 | #endif /* CONFIG_VIRT_CPU_ACCOUNTING_NATIVE && CONFIG_PPC_SPLPAR */ |
cf9efce0 | 124 | |
1421ae0b BH |
125 | /* |
126 | * A syscall should always be called with interrupts enabled | |
127 | * so we just unconditionally hard-enable here. When some kind | |
128 | * of irq tracing is used, we additionally check that condition | |
129 | * is correct | |
130 | */ | |
131 | #if defined(CONFIG_TRACE_IRQFLAGS) && defined(CONFIG_BUG) | |
132 | lbz r10,PACASOFTIRQEN(r13) | |
133 | xori r10,r10,1 | |
134 | 1: tdnei r10,0 | |
135 | EMIT_BUG_ENTRY 1b,__FILE__,__LINE__,BUGFLAG_WARNING | |
136 | #endif | |
2d27cfd3 | 137 | |
2d27cfd3 BH |
138 | #ifdef CONFIG_PPC_BOOK3E |
139 | wrteei 1 | |
140 | #else | |
49d09bf2 | 141 | li r11,MSR_RI |
9994a338 PM |
142 | ori r11,r11,MSR_EE |
143 | mtmsrd r11,1 | |
2d27cfd3 | 144 | #endif /* CONFIG_PPC_BOOK3E */ |
9994a338 | 145 | |
266de3a8 | 146 | system_call: /* label this so stack traces look sane */ |
1421ae0b BH |
147 | /* We do need to set SOFTE in the stack frame or the return |
148 | * from interrupt will be painful | |
149 | */ | |
150 | li r10,1 | |
151 | std r10,SOFTE(r1) | |
152 | ||
9778b696 | 153 | CURRENT_THREAD_INFO(r11, r1) |
9994a338 | 154 | ld r10,TI_FLAGS(r11) |
10ea8343 | 155 | andi. r11,r10,_TIF_SYSCALL_DOTRACE |
cf7d6fb0 | 156 | bne .Lsyscall_dotrace /* does not return */ |
9994a338 | 157 | cmpldi 0,r0,NR_syscalls |
cf7d6fb0 | 158 | bge- .Lsyscall_enosys |
9994a338 | 159 | |
266de3a8 | 160 | .Lsyscall: |
9994a338 PM |
161 | /* |
162 | * Need to vector to 32 Bit or default sys_call_table here, | |
163 | * based on caller's run-mode / personality. | |
164 | */ | |
c857c43b | 165 | ld r11,SYS_CALL_TABLE@toc(2) |
9994a338 PM |
166 | andi. r10,r10,_TIF_32BIT |
167 | beq 15f | |
168 | addi r11,r11,8 /* use 32-bit syscall entries */ | |
169 | clrldi r3,r3,32 | |
170 | clrldi r4,r4,32 | |
171 | clrldi r5,r5,32 | |
172 | clrldi r6,r6,32 | |
173 | clrldi r7,r7,32 | |
174 | clrldi r8,r8,32 | |
175 | 15: | |
176 | slwi r0,r0,4 | |
cc7efbf9 AB |
177 | ldx r12,r11,r0 /* Fetch system call handler [ptr] */ |
178 | mtctr r12 | |
9994a338 PM |
179 | bctrl /* Call handler */ |
180 | ||
4c3b2168 | 181 | .Lsyscall_exit: |
401d1f02 | 182 | std r3,RESULT(r1) |
9778b696 | 183 | CURRENT_THREAD_INFO(r12, r1) |
9994a338 | 184 | |
9994a338 | 185 | ld r8,_MSR(r1) |
2d27cfd3 BH |
186 | #ifdef CONFIG_PPC_BOOK3S |
187 | /* No MSR:RI on BookE */ | |
9994a338 PM |
188 | andi. r10,r8,MSR_RI |
189 | beq- unrecov_restore | |
2d27cfd3 | 190 | #endif |
3639d661 NR |
191 | |
192 | /* | |
193 | * This is a few instructions into the actual syscall exit path (which actually | |
194 | * starts at .Lsyscall_exit) to cater to kprobe blacklisting and to reduce the | |
195 | * number of visible symbols for profiling purposes. | |
196 | * | |
197 | * We can probe from system_call until this point as MSR_RI is set. But once it | |
198 | * is cleared below, we won't be able to take a trap. | |
199 | * | |
200 | * This is blacklisted from kprobes further below with _ASM_NOKPROBE_SYMBOL(). | |
201 | */ | |
202 | system_call_exit: | |
1421ae0b BH |
203 | /* |
204 | * Disable interrupts so current_thread_info()->flags can't change, | |
2d27cfd3 BH |
205 | * and so that we don't get interrupted after loading SRR0/1. |
206 | */ | |
207 | #ifdef CONFIG_PPC_BOOK3E | |
208 | wrteei 0 | |
209 | #else | |
ac1dc365 AB |
210 | /* |
211 | * For performance reasons we clear RI the same time that we | |
212 | * clear EE. We only need to clear RI just before we restore r13 | |
213 | * below, but batching it with EE saves us one expensive mtmsrd call. | |
214 | * We have to be careful to restore RI if we branch anywhere from | |
215 | * here (eg syscall_exit_work). | |
216 | */ | |
49d09bf2 | 217 | li r11,0 |
ac1dc365 | 218 | mtmsrd r11,1 |
2d27cfd3 BH |
219 | #endif /* CONFIG_PPC_BOOK3E */ |
220 | ||
9994a338 | 221 | ld r9,TI_FLAGS(r12) |
c3525940 | 222 | li r11,-MAX_ERRNO |
10ea8343 | 223 | andi. r0,r9,(_TIF_SYSCALL_DOTRACE|_TIF_SINGLESTEP|_TIF_USER_WORK_MASK|_TIF_PERSYSCALL_MASK) |
cf7d6fb0 | 224 | bne- .Lsyscall_exit_work |
70fe3d98 | 225 | |
bc4f65e4 NP |
226 | /* If MSR_FP and MSR_VEC are set in user msr, then no need to restore */ |
227 | li r7,MSR_FP | |
70fe3d98 | 228 | #ifdef CONFIG_ALTIVEC |
bc4f65e4 | 229 | oris r7,r7,MSR_VEC@h |
70fe3d98 | 230 | #endif |
bc4f65e4 NP |
231 | and r0,r8,r7 |
232 | cmpd r0,r7 | |
cf7d6fb0 | 233 | bne .Lsyscall_restore_math |
bc4f65e4 | 234 | .Lsyscall_restore_math_cont: |
70fe3d98 | 235 | |
bc4f65e4 | 236 | cmpld r3,r11 |
401d1f02 | 237 | ld r5,_CCR(r1) |
cf7d6fb0 | 238 | bge- .Lsyscall_error |
d14299de | 239 | .Lsyscall_error_cont: |
9994a338 | 240 | ld r7,_NIP(r1) |
f89451fb | 241 | BEGIN_FTR_SECTION |
9994a338 | 242 | stdcx. r0,0,r1 /* to clear the reservation */ |
f89451fb | 243 | END_FTR_SECTION_IFCLR(CPU_FTR_STCX_CHECKS_ADDRESS) |
9994a338 PM |
244 | andi. r6,r8,MSR_PR |
245 | ld r4,_LINK(r1) | |
2d27cfd3 | 246 | |
c6622f63 | 247 | beq- 1f |
c223c903 | 248 | ACCOUNT_CPU_USER_EXIT(r13, r11, r12) |
d030a4b5 ME |
249 | |
250 | BEGIN_FTR_SECTION | |
251 | HMT_MEDIUM_LOW | |
252 | END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR) | |
253 | ||
c6622f63 | 254 | ld r13,GPR13(r1) /* only restore r13 if returning to usermode */ |
9994a338 | 255 | 1: ld r2,GPR2(r1) |
9994a338 PM |
256 | ld r1,GPR1(r1) |
257 | mtlr r4 | |
258 | mtcr r5 | |
259 | mtspr SPRN_SRR0,r7 | |
260 | mtspr SPRN_SRR1,r8 | |
2d27cfd3 | 261 | RFI |
9994a338 PM |
262 | b . /* prevent speculative execution */ |
263 | ||
cf7d6fb0 | 264 | .Lsyscall_error: |
9994a338 | 265 | oris r5,r5,0x1000 /* Set SO bit in CR */ |
401d1f02 | 266 | neg r3,r3 |
9994a338 | 267 | std r5,_CCR(r1) |
d14299de | 268 | b .Lsyscall_error_cont |
bc4f65e4 | 269 | |
cf7d6fb0 | 270 | .Lsyscall_restore_math: |
bc4f65e4 NP |
271 | /* |
272 | * Some initial tests from restore_math to avoid the heavyweight | |
273 | * C code entry and MSR manipulations. | |
274 | */ | |
275 | LOAD_REG_IMMEDIATE(r0, MSR_TS_MASK) | |
276 | and. r0,r0,r8 | |
277 | bne 1f | |
278 | ||
279 | ld r7,PACACURRENT(r13) | |
280 | lbz r0,THREAD+THREAD_LOAD_FP(r7) | |
281 | #ifdef CONFIG_ALTIVEC | |
282 | lbz r6,THREAD+THREAD_LOAD_VEC(r7) | |
283 | add r0,r0,r6 | |
284 | #endif | |
285 | cmpdi r0,0 | |
286 | beq .Lsyscall_restore_math_cont | |
287 | ||
288 | 1: addi r3,r1,STACK_FRAME_OVERHEAD | |
289 | #ifdef CONFIG_PPC_BOOK3S | |
290 | li r10,MSR_RI | |
291 | mtmsrd r10,1 /* Restore RI */ | |
292 | #endif | |
293 | bl restore_math | |
294 | #ifdef CONFIG_PPC_BOOK3S | |
295 | li r11,0 | |
296 | mtmsrd r11,1 | |
297 | #endif | |
298 | /* Restore volatiles, reload MSR from updated one */ | |
299 | ld r8,_MSR(r1) | |
300 | ld r3,RESULT(r1) | |
301 | li r11,-MAX_ERRNO | |
302 | b .Lsyscall_restore_math_cont | |
303 | ||
9994a338 | 304 | /* Traced system call support */ |
cf7d6fb0 | 305 | .Lsyscall_dotrace: |
b1576fec | 306 | bl save_nvgprs |
9994a338 | 307 | addi r3,r1,STACK_FRAME_OVERHEAD |
b1576fec | 308 | bl do_syscall_trace_enter |
d3837414 | 309 | |
4f72c427 | 310 | /* |
d3837414 ME |
311 | * We use the return value of do_syscall_trace_enter() as the syscall |
312 | * number. If the syscall was rejected for any reason do_syscall_trace_enter() | |
313 | * returns an invalid syscall number and the test below against | |
314 | * NR_syscalls will fail. | |
4f72c427 RM |
315 | */ |
316 | mr r0,r3 | |
d3837414 ME |
317 | |
318 | /* Restore argument registers just clobbered and/or possibly changed. */ | |
9994a338 PM |
319 | ld r3,GPR3(r1) |
320 | ld r4,GPR4(r1) | |
321 | ld r5,GPR5(r1) | |
322 | ld r6,GPR6(r1) | |
323 | ld r7,GPR7(r1) | |
324 | ld r8,GPR8(r1) | |
d3837414 | 325 | |
266de3a8 | 326 | /* Repopulate r9 and r10 for the syscall path */ |
9994a338 | 327 | addi r9,r1,STACK_FRAME_OVERHEAD |
9778b696 | 328 | CURRENT_THREAD_INFO(r10, r1) |
9994a338 | 329 | ld r10,TI_FLAGS(r10) |
d3837414 ME |
330 | |
331 | cmpldi r0,NR_syscalls | |
266de3a8 | 332 | blt+ .Lsyscall |
d3837414 ME |
333 | |
334 | /* Return code is already in r3 thanks to do_syscall_trace_enter() */ | |
335 | b .Lsyscall_exit | |
336 | ||
9994a338 | 337 | |
cf7d6fb0 | 338 | .Lsyscall_enosys: |
401d1f02 | 339 | li r3,-ENOSYS |
4c3b2168 | 340 | b .Lsyscall_exit |
401d1f02 | 341 | |
cf7d6fb0 | 342 | .Lsyscall_exit_work: |
ac1dc365 | 343 | #ifdef CONFIG_PPC_BOOK3S |
49d09bf2 | 344 | li r10,MSR_RI |
ac1dc365 AB |
345 | mtmsrd r10,1 /* Restore RI */ |
346 | #endif | |
401d1f02 DW |
347 | /* If TIF_RESTOREALL is set, don't scribble on either r3 or ccr. |
348 | If TIF_NOERROR is set, just save r3 as it is. */ | |
349 | ||
350 | andi. r0,r9,_TIF_RESTOREALL | |
1bd79336 PM |
351 | beq+ 0f |
352 | REST_NVGPRS(r1) | |
353 | b 2f | |
c3525940 | 354 | 0: cmpld r3,r11 /* r11 is -MAX_ERRNO */ |
401d1f02 DW |
355 | blt+ 1f |
356 | andi. r0,r9,_TIF_NOERROR | |
357 | bne- 1f | |
358 | ld r5,_CCR(r1) | |
359 | neg r3,r3 | |
360 | oris r5,r5,0x1000 /* Set SO bit in CR */ | |
361 | std r5,_CCR(r1) | |
362 | 1: std r3,GPR3(r1) | |
363 | 2: andi. r0,r9,(_TIF_PERSYSCALL_MASK) | |
364 | beq 4f | |
365 | ||
1bd79336 | 366 | /* Clear per-syscall TIF flags if any are set. */ |
401d1f02 DW |
367 | |
368 | li r11,_TIF_PERSYSCALL_MASK | |
369 | addi r12,r12,TI_FLAGS | |
370 | 3: ldarx r10,0,r12 | |
371 | andc r10,r10,r11 | |
372 | stdcx. r10,0,r12 | |
373 | bne- 3b | |
374 | subi r12,r12,TI_FLAGS | |
1bd79336 PM |
375 | |
376 | 4: /* Anything else left to do? */ | |
d8725ce8 ME |
377 | BEGIN_FTR_SECTION |
378 | lis r3,INIT_PPR@highest /* Set thread.ppr = 3 */ | |
379 | ld r10,PACACURRENT(r13) | |
380 | sldi r3,r3,32 /* bits 11-13 are used for ppr */ | |
381 | std r3,TASKTHREADPPR(r10) | |
382 | END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR) | |
383 | ||
10ea8343 | 384 | andi. r0,r9,(_TIF_SYSCALL_DOTRACE|_TIF_SINGLESTEP) |
b1576fec | 385 | beq ret_from_except_lite |
401d1f02 DW |
386 | |
387 | /* Re-enable interrupts */ | |
2d27cfd3 BH |
388 | #ifdef CONFIG_PPC_BOOK3E |
389 | wrteei 1 | |
390 | #else | |
49d09bf2 | 391 | li r10,MSR_RI |
401d1f02 DW |
392 | ori r10,r10,MSR_EE |
393 | mtmsrd r10,1 | |
2d27cfd3 | 394 | #endif /* CONFIG_PPC_BOOK3E */ |
401d1f02 | 395 | |
b1576fec | 396 | bl save_nvgprs |
9994a338 | 397 | addi r3,r1,STACK_FRAME_OVERHEAD |
b1576fec AB |
398 | bl do_syscall_trace_leave |
399 | b ret_from_except | |
9994a338 | 400 | |
b4b56f9e | 401 | #ifdef CONFIG_PPC_TRANSACTIONAL_MEM |
cf7d6fb0 | 402 | .Ltabort_syscall: |
b4b56f9e S |
403 | /* Firstly we need to enable TM in the kernel */ |
404 | mfmsr r10 | |
cc7786d3 NP |
405 | li r9, 1 |
406 | rldimi r10, r9, MSR_TM_LG, 63-MSR_TM_LG | |
b4b56f9e S |
407 | mtmsrd r10, 0 |
408 | ||
409 | /* tabort, this dooms the transaction, nothing else */ | |
cc7786d3 NP |
410 | li r9, (TM_CAUSE_SYSCALL|TM_CAUSE_PERSISTENT) |
411 | TABORT(R9) | |
b4b56f9e S |
412 | |
413 | /* | |
414 | * Return directly to userspace. We have corrupted user register state, | |
415 | * but userspace will never see that register state. Execution will | |
416 | * resume after the tbegin of the aborted transaction with the | |
417 | * checkpointed register state. | |
418 | */ | |
cc7786d3 NP |
419 | li r9, MSR_RI |
420 | andc r10, r10, r9 | |
b4b56f9e S |
421 | mtmsrd r10, 1 |
422 | mtspr SPRN_SRR0, r11 | |
423 | mtspr SPRN_SRR1, r12 | |
424 | ||
425 | rfid | |
426 | b . /* prevent speculative execution */ | |
427 | #endif | |
cf7d6fb0 | 428 | _ASM_NOKPROBE_SYMBOL(system_call_common); |
3639d661 | 429 | _ASM_NOKPROBE_SYMBOL(system_call_exit); |
b4b56f9e | 430 | |
9994a338 PM |
431 | /* Save non-volatile GPRs, if not already saved. */ |
432 | _GLOBAL(save_nvgprs) | |
433 | ld r11,_TRAP(r1) | |
434 | andi. r0,r11,1 | |
435 | beqlr- | |
436 | SAVE_NVGPRS(r1) | |
437 | clrrdi r0,r11,1 | |
438 | std r0,_TRAP(r1) | |
439 | blr | |
440 | ||
401d1f02 | 441 | |
9994a338 PM |
442 | /* |
443 | * The sigsuspend and rt_sigsuspend system calls can call do_signal | |
444 | * and thus put the process into the stopped state where we might | |
445 | * want to examine its user state with ptrace. Therefore we need | |
446 | * to save all the nonvolatile registers (r14 - r31) before calling | |
447 | * the C code. Similarly, fork, vfork and clone need the full | |
448 | * register state on the stack so that it can be copied to the child. | |
449 | */ | |
9994a338 PM |
450 | |
451 | _GLOBAL(ppc_fork) | |
b1576fec AB |
452 | bl save_nvgprs |
453 | bl sys_fork | |
4c3b2168 | 454 | b .Lsyscall_exit |
9994a338 PM |
455 | |
456 | _GLOBAL(ppc_vfork) | |
b1576fec AB |
457 | bl save_nvgprs |
458 | bl sys_vfork | |
4c3b2168 | 459 | b .Lsyscall_exit |
9994a338 PM |
460 | |
461 | _GLOBAL(ppc_clone) | |
b1576fec AB |
462 | bl save_nvgprs |
463 | bl sys_clone | |
4c3b2168 | 464 | b .Lsyscall_exit |
9994a338 | 465 | |
1bd79336 | 466 | _GLOBAL(ppc32_swapcontext) |
b1576fec AB |
467 | bl save_nvgprs |
468 | bl compat_sys_swapcontext | |
4c3b2168 | 469 | b .Lsyscall_exit |
1bd79336 PM |
470 | |
471 | _GLOBAL(ppc64_swapcontext) | |
b1576fec AB |
472 | bl save_nvgprs |
473 | bl sys_swapcontext | |
4c3b2168 | 474 | b .Lsyscall_exit |
1bd79336 | 475 | |
529d235a ME |
476 | _GLOBAL(ppc_switch_endian) |
477 | bl save_nvgprs | |
478 | bl sys_switch_endian | |
479 | b .Lsyscall_exit | |
480 | ||
9994a338 | 481 | _GLOBAL(ret_from_fork) |
b1576fec | 482 | bl schedule_tail |
9994a338 PM |
483 | REST_NVGPRS(r1) |
484 | li r3,0 | |
4c3b2168 | 485 | b .Lsyscall_exit |
9994a338 | 486 | |
58254e10 | 487 | _GLOBAL(ret_from_kernel_thread) |
b1576fec | 488 | bl schedule_tail |
58254e10 | 489 | REST_NVGPRS(r1) |
58254e10 AV |
490 | mtlr r14 |
491 | mr r3,r15 | |
f55d9665 | 492 | #ifdef PPC64_ELF_ABI_v2 |
7cedd601 AB |
493 | mr r12,r14 |
494 | #endif | |
58254e10 AV |
495 | blrl |
496 | li r3,0 | |
4c3b2168 | 497 | b .Lsyscall_exit |
be6abfa7 | 498 | |
9994a338 PM |
499 | /* |
500 | * This routine switches between two different tasks. The process | |
501 | * state of one is saved on its kernel stack. Then the state | |
502 | * of the other is restored from its kernel stack. The memory | |
503 | * management hardware is updated to the second process's state. | |
504 | * Finally, we can return to the second process, via ret_from_except. | |
505 | * On entry, r3 points to the THREAD for the current task, r4 | |
506 | * points to the THREAD for the new task. | |
507 | * | |
508 | * Note: there are two ways to get to the "going out" portion | |
509 | * of this code; either by coming in via the entry (_switch) | |
510 | * or via "fork" which must set up an environment equivalent | |
511 | * to the "_switch" path. If you change this you'll have to change | |
512 | * the fork code also. | |
513 | * | |
514 | * The code which creates the new task context is in 'copy_thread' | |
2ef9481e | 515 | * in arch/powerpc/kernel/process.c |
9994a338 PM |
516 | */ |
517 | .align 7 | |
518 | _GLOBAL(_switch) | |
519 | mflr r0 | |
520 | std r0,16(r1) | |
521 | stdu r1,-SWITCH_FRAME_SIZE(r1) | |
522 | /* r3-r13 are caller saved -- Cort */ | |
523 | SAVE_8GPRS(14, r1) | |
524 | SAVE_10GPRS(22, r1) | |
68bfa962 | 525 | std r0,_NIP(r1) /* Return to switch caller */ |
9994a338 PM |
526 | mfcr r23 |
527 | std r23,_CCR(r1) | |
528 | std r1,KSP(r3) /* Set old stack pointer */ | |
529 | ||
9145effd NP |
530 | /* |
531 | * On SMP kernels, care must be taken because a task may be | |
532 | * scheduled off CPUx and on to CPUy. Memory ordering must be | |
533 | * considered. | |
534 | * | |
535 | * Cacheable stores on CPUx will be visible when the task is | |
536 | * scheduled on CPUy by virtue of the core scheduler barriers | |
537 | * (see "Notes on Program-Order guarantees on SMP systems." in | |
538 | * kernel/sched/core.c). | |
539 | * | |
540 | * Uncacheable stores in the case of involuntary preemption must | |
541 | * be taken care of. The smp_mb__before_spin_lock() in __schedule() | |
542 | * is implemented as hwsync on powerpc, which orders MMIO too. So | |
543 | * long as there is an hwsync in the context switch path, it will | |
544 | * be executed on the source CPU after the task has performed | |
545 | * all MMIO ops on that CPU, and on the destination CPU before the | |
546 | * task performs any MMIO ops there. | |
9994a338 | 547 | */ |
9994a338 | 548 | |
f89451fb | 549 | /* |
837e72f7 NP |
550 | * The kernel context switch path must contain a spin_lock, |
551 | * which contains larx/stcx, which will clear any reservation | |
552 | * of the task being switched. | |
f89451fb | 553 | */ |
a515348f MN |
554 | #ifdef CONFIG_PPC_BOOK3S |
555 | /* Cancel all explict user streams as they will have no use after context | |
556 | * switch and will stop the HW from creating streams itself | |
557 | */ | |
558 | DCBT_STOP_ALL_STREAM_IDS(r6) | |
559 | #endif | |
560 | ||
9994a338 PM |
561 | addi r6,r4,-THREAD /* Convert THREAD to 'current' */ |
562 | std r6,PACACURRENT(r13) /* Set new 'current' */ | |
563 | ||
564 | ld r8,KSP(r4) /* new stack pointer */ | |
caca285e AK |
565 | #ifdef CONFIG_PPC_STD_MMU_64 |
566 | BEGIN_MMU_FTR_SECTION | |
567 | b 2f | |
5a25b6f5 | 568 | END_MMU_FTR_SECTION_IFSET(MMU_FTR_TYPE_RADIX) |
1189be65 | 569 | BEGIN_FTR_SECTION |
9994a338 PM |
570 | clrrdi r6,r8,28 /* get its ESID */ |
571 | clrrdi r9,r1,28 /* get current sp ESID */ | |
13b3d13b | 572 | FTR_SECTION_ELSE |
1189be65 PM |
573 | clrrdi r6,r8,40 /* get its 1T ESID */ |
574 | clrrdi r9,r1,40 /* get current sp 1T ESID */ | |
13b3d13b | 575 | ALT_MMU_FTR_SECTION_END_IFCLR(MMU_FTR_1T_SEGMENT) |
9994a338 PM |
576 | clrldi. r0,r6,2 /* is new ESID c00000000? */ |
577 | cmpd cr1,r6,r9 /* or is new ESID the same as current ESID? */ | |
578 | cror eq,4*cr1+eq,eq | |
579 | beq 2f /* if yes, don't slbie it */ | |
580 | ||
581 | /* Bolt in the new stack SLB entry */ | |
582 | ld r7,KSP_VSID(r4) /* Get new stack's VSID */ | |
583 | oris r0,r6,(SLB_ESID_V)@h | |
584 | ori r0,r0,(SLB_NUM_BOLTED-1)@l | |
1189be65 PM |
585 | BEGIN_FTR_SECTION |
586 | li r9,MMU_SEGSIZE_1T /* insert B field */ | |
587 | oris r6,r6,(MMU_SEGSIZE_1T << SLBIE_SSIZE_SHIFT)@h | |
588 | rldimi r7,r9,SLB_VSID_SSIZE_SHIFT,0 | |
44ae3ab3 | 589 | END_MMU_FTR_SECTION_IFSET(MMU_FTR_1T_SEGMENT) |
2f6093c8 | 590 | |
00efee7d MN |
591 | /* Update the last bolted SLB. No write barriers are needed |
592 | * here, provided we only update the current CPU's SLB shadow | |
593 | * buffer. | |
594 | */ | |
2f6093c8 | 595 | ld r9,PACA_SLBSHADOWPTR(r13) |
11a27ad7 | 596 | li r12,0 |
7ffcf8ec AB |
597 | std r12,SLBSHADOW_STACKESID(r9) /* Clear ESID */ |
598 | li r12,SLBSHADOW_STACKVSID | |
599 | STDX_BE r7,r12,r9 /* Save VSID */ | |
600 | li r12,SLBSHADOW_STACKESID | |
601 | STDX_BE r0,r12,r9 /* Save ESID */ | |
2f6093c8 | 602 | |
44ae3ab3 | 603 | /* No need to check for MMU_FTR_NO_SLBIE_B here, since when |
f66bce5e OJ |
604 | * we have 1TB segments, the only CPUs known to have the errata |
605 | * only support less than 1TB of system memory and we'll never | |
606 | * actually hit this code path. | |
607 | */ | |
608 | ||
9994a338 PM |
609 | slbie r6 |
610 | slbie r6 /* Workaround POWER5 < DD2.1 issue */ | |
611 | slbmte r7,r0 | |
612 | isync | |
9994a338 | 613 | 2: |
caca285e | 614 | #endif /* CONFIG_PPC_STD_MMU_64 */ |
2d27cfd3 | 615 | |
9778b696 | 616 | CURRENT_THREAD_INFO(r7, r8) /* base of new stack */ |
9994a338 PM |
617 | /* Note: this uses SWITCH_FRAME_SIZE rather than INT_FRAME_SIZE |
618 | because we don't need to leave the 288-byte ABI gap at the | |
619 | top of the kernel stack. */ | |
620 | addi r7,r7,THREAD_SIZE-SWITCH_FRAME_SIZE | |
621 | ||
e4c0fc5f NP |
622 | /* |
623 | * PMU interrupts in radix may come in here. They will use r1, not | |
624 | * PACAKSAVE, so this stack switch will not cause a problem. They | |
625 | * will store to the process stack, which may then be migrated to | |
626 | * another CPU. However the rq lock release on this CPU paired with | |
627 | * the rq lock acquire on the new CPU before the stack becomes | |
628 | * active on the new CPU, will order those stores. | |
629 | */ | |
9994a338 PM |
630 | mr r1,r8 /* start using new stack pointer */ |
631 | std r7,PACAKSAVE(r13) | |
632 | ||
71433285 AB |
633 | ld r6,_CCR(r1) |
634 | mtcrf 0xFF,r6 | |
635 | ||
9994a338 PM |
636 | /* r3-r13 are destroyed -- Cort */ |
637 | REST_8GPRS(14, r1) | |
638 | REST_10GPRS(22, r1) | |
639 | ||
640 | /* convert old thread to its task_struct for return value */ | |
641 | addi r3,r3,-THREAD | |
642 | ld r7,_NIP(r1) /* Return to _switch caller in new task */ | |
643 | mtlr r7 | |
644 | addi r1,r1,SWITCH_FRAME_SIZE | |
645 | blr | |
646 | ||
647 | .align 7 | |
648 | _GLOBAL(ret_from_except) | |
649 | ld r11,_TRAP(r1) | |
650 | andi. r0,r11,1 | |
b1576fec | 651 | bne ret_from_except_lite |
9994a338 PM |
652 | REST_NVGPRS(r1) |
653 | ||
654 | _GLOBAL(ret_from_except_lite) | |
655 | /* | |
656 | * Disable interrupts so that current_thread_info()->flags | |
657 | * can't change between when we test it and when we return | |
658 | * from the interrupt. | |
659 | */ | |
2d27cfd3 BH |
660 | #ifdef CONFIG_PPC_BOOK3E |
661 | wrteei 0 | |
662 | #else | |
49d09bf2 | 663 | li r10,MSR_RI |
d9ada91a | 664 | mtmsrd r10,1 /* Update machine state */ |
2d27cfd3 | 665 | #endif /* CONFIG_PPC_BOOK3E */ |
9994a338 | 666 | |
9778b696 | 667 | CURRENT_THREAD_INFO(r9, r1) |
9994a338 | 668 | ld r3,_MSR(r1) |
13d543cd BB |
669 | #ifdef CONFIG_PPC_BOOK3E |
670 | ld r10,PACACURRENT(r13) | |
671 | #endif /* CONFIG_PPC_BOOK3E */ | |
9994a338 | 672 | ld r4,TI_FLAGS(r9) |
9994a338 | 673 | andi. r3,r3,MSR_PR |
c58ce2b1 | 674 | beq resume_kernel |
13d543cd BB |
675 | #ifdef CONFIG_PPC_BOOK3E |
676 | lwz r3,(THREAD+THREAD_DBCR0)(r10) | |
677 | #endif /* CONFIG_PPC_BOOK3E */ | |
9994a338 PM |
678 | |
679 | /* Check current_thread_info()->flags */ | |
c58ce2b1 | 680 | andi. r0,r4,_TIF_USER_WORK_MASK |
13d543cd | 681 | bne 1f |
70fe3d98 | 682 | #ifdef CONFIG_PPC_BOOK3E |
13d543cd BB |
683 | /* |
684 | * Check to see if the dbcr0 register is set up to debug. | |
685 | * Use the internal debug mode bit to do this. | |
686 | */ | |
687 | andis. r0,r3,DBCR0_IDM@h | |
c58ce2b1 | 688 | beq restore |
13d543cd BB |
689 | mfmsr r0 |
690 | rlwinm r0,r0,0,~MSR_DE /* Clear MSR.DE */ | |
691 | mtmsr r0 | |
692 | mtspr SPRN_DBCR0,r3 | |
693 | li r10, -1 | |
694 | mtspr SPRN_DBSR,r10 | |
695 | b restore | |
696 | #else | |
70fe3d98 CB |
697 | addi r3,r1,STACK_FRAME_OVERHEAD |
698 | bl restore_math | |
699 | b restore | |
13d543cd BB |
700 | #endif |
701 | 1: andi. r0,r4,_TIF_NEED_RESCHED | |
702 | beq 2f | |
b1576fec | 703 | bl restore_interrupts |
5d1c5745 | 704 | SCHEDULE_USER |
b1576fec | 705 | b ret_from_except_lite |
d31626f7 PM |
706 | 2: |
707 | #ifdef CONFIG_PPC_TRANSACTIONAL_MEM | |
708 | andi. r0,r4,_TIF_USER_WORK_MASK & ~_TIF_RESTORE_TM | |
709 | bne 3f /* only restore TM if nothing else to do */ | |
710 | addi r3,r1,STACK_FRAME_OVERHEAD | |
b1576fec | 711 | bl restore_tm_state |
d31626f7 PM |
712 | b restore |
713 | 3: | |
714 | #endif | |
b1576fec | 715 | bl save_nvgprs |
808be314 AB |
716 | /* |
717 | * Use a non volatile GPR to save and restore our thread_info flags | |
718 | * across the call to restore_interrupts. | |
719 | */ | |
720 | mr r30,r4 | |
b1576fec | 721 | bl restore_interrupts |
808be314 | 722 | mr r4,r30 |
c58ce2b1 | 723 | addi r3,r1,STACK_FRAME_OVERHEAD |
b1576fec AB |
724 | bl do_notify_resume |
725 | b ret_from_except | |
c58ce2b1 TC |
726 | |
727 | resume_kernel: | |
a9c4e541 | 728 | /* check current_thread_info, _TIF_EMULATE_STACK_STORE */ |
0edfdd10 | 729 | andis. r8,r4,_TIF_EMULATE_STACK_STORE@h |
a9c4e541 TC |
730 | beq+ 1f |
731 | ||
732 | addi r8,r1,INT_FRAME_SIZE /* Get the kprobed function entry */ | |
733 | ||
9e1ba4f2 | 734 | ld r3,GPR1(r1) |
a9c4e541 TC |
735 | subi r3,r3,INT_FRAME_SIZE /* dst: Allocate a trampoline exception frame */ |
736 | mr r4,r1 /* src: current exception frame */ | |
737 | mr r1,r3 /* Reroute the trampoline frame to r1 */ | |
738 | ||
739 | /* Copy from the original to the trampoline. */ | |
740 | li r5,INT_FRAME_SIZE/8 /* size: INT_FRAME_SIZE */ | |
741 | li r6,0 /* start offset: 0 */ | |
742 | mtctr r5 | |
743 | 2: ldx r0,r6,r4 | |
744 | stdx r0,r6,r3 | |
745 | addi r6,r6,8 | |
746 | bdnz 2b | |
747 | ||
9e1ba4f2 RB |
748 | /* Do real store operation to complete stdu */ |
749 | ld r5,GPR1(r1) | |
a9c4e541 TC |
750 | std r8,0(r5) |
751 | ||
752 | /* Clear _TIF_EMULATE_STACK_STORE flag */ | |
753 | lis r11,_TIF_EMULATE_STACK_STORE@h | |
754 | addi r5,r9,TI_FLAGS | |
d8b92292 | 755 | 0: ldarx r4,0,r5 |
a9c4e541 TC |
756 | andc r4,r4,r11 |
757 | stdcx. r4,0,r5 | |
758 | bne- 0b | |
759 | 1: | |
760 | ||
c58ce2b1 TC |
761 | #ifdef CONFIG_PREEMPT |
762 | /* Check if we need to preempt */ | |
763 | andi. r0,r4,_TIF_NEED_RESCHED | |
764 | beq+ restore | |
765 | /* Check that preempt_count() == 0 and interrupts are enabled */ | |
766 | lwz r8,TI_PREEMPT(r9) | |
767 | cmpwi cr1,r8,0 | |
768 | ld r0,SOFTE(r1) | |
769 | cmpdi r0,0 | |
770 | crandc eq,cr1*4+eq,eq | |
771 | bne restore | |
772 | ||
773 | /* | |
774 | * Here we are preempting the current task. We want to make | |
de021bb7 | 775 | * sure we are soft-disabled first and reconcile irq state. |
c58ce2b1 | 776 | */ |
de021bb7 | 777 | RECONCILE_IRQ_STATE(r3,r4) |
b1576fec | 778 | 1: bl preempt_schedule_irq |
c58ce2b1 TC |
779 | |
780 | /* Re-test flags and eventually loop */ | |
9778b696 | 781 | CURRENT_THREAD_INFO(r9, r1) |
9994a338 | 782 | ld r4,TI_FLAGS(r9) |
c58ce2b1 TC |
783 | andi. r0,r4,_TIF_NEED_RESCHED |
784 | bne 1b | |
572177d7 TC |
785 | |
786 | /* | |
787 | * arch_local_irq_restore() from preempt_schedule_irq above may | |
788 | * enable hard interrupt but we really should disable interrupts | |
789 | * when we return from the interrupt, and so that we don't get | |
790 | * interrupted after loading SRR0/1. | |
791 | */ | |
792 | #ifdef CONFIG_PPC_BOOK3E | |
793 | wrteei 0 | |
794 | #else | |
49d09bf2 | 795 | li r10,MSR_RI |
572177d7 TC |
796 | mtmsrd r10,1 /* Update machine state */ |
797 | #endif /* CONFIG_PPC_BOOK3E */ | |
c58ce2b1 | 798 | #endif /* CONFIG_PREEMPT */ |
9994a338 | 799 | |
7230c564 BH |
800 | .globl fast_exc_return_irq |
801 | fast_exc_return_irq: | |
9994a338 | 802 | restore: |
7230c564 | 803 | /* |
7c0482e3 BH |
804 | * This is the main kernel exit path. First we check if we |
805 | * are about to re-enable interrupts | |
7230c564 | 806 | */ |
01f3880d | 807 | ld r5,SOFTE(r1) |
7230c564 | 808 | lbz r6,PACASOFTIRQEN(r13) |
7c0482e3 BH |
809 | cmpwi cr0,r5,0 |
810 | beq restore_irq_off | |
7230c564 | 811 | |
7c0482e3 BH |
812 | /* We are enabling, were we already enabled ? Yes, just return */ |
813 | cmpwi cr0,r6,1 | |
814 | beq cr0,do_restore | |
9994a338 | 815 | |
7c0482e3 | 816 | /* |
7230c564 BH |
817 | * We are about to soft-enable interrupts (we are hard disabled |
818 | * at this point). We check if there's anything that needs to | |
819 | * be replayed first. | |
820 | */ | |
821 | lbz r0,PACAIRQHAPPENED(r13) | |
822 | cmpwi cr0,r0,0 | |
823 | bne- restore_check_irq_replay | |
e56a6e20 | 824 | |
7230c564 BH |
825 | /* |
826 | * Get here when nothing happened while soft-disabled, just | |
827 | * soft-enable and move-on. We will hard-enable as a side | |
828 | * effect of rfi | |
829 | */ | |
830 | restore_no_replay: | |
831 | TRACE_ENABLE_INTS | |
832 | li r0,1 | |
833 | stb r0,PACASOFTIRQEN(r13); | |
834 | ||
835 | /* | |
836 | * Final return path. BookE is handled in a different file | |
837 | */ | |
7c0482e3 | 838 | do_restore: |
2d27cfd3 | 839 | #ifdef CONFIG_PPC_BOOK3E |
b1576fec | 840 | b exception_return_book3e |
2d27cfd3 | 841 | #else |
7230c564 BH |
842 | /* |
843 | * Clear the reservation. If we know the CPU tracks the address of | |
844 | * the reservation then we can potentially save some cycles and use | |
845 | * a larx. On POWER6 and POWER7 this is significantly faster. | |
846 | */ | |
847 | BEGIN_FTR_SECTION | |
848 | stdcx. r0,0,r1 /* to clear the reservation */ | |
849 | FTR_SECTION_ELSE | |
850 | ldarx r4,0,r1 | |
851 | ALT_FTR_SECTION_END_IFCLR(CPU_FTR_STCX_CHECKS_ADDRESS) | |
852 | ||
853 | /* | |
854 | * Some code path such as load_up_fpu or altivec return directly | |
855 | * here. They run entirely hard disabled and do not alter the | |
856 | * interrupt state. They also don't use lwarx/stwcx. and thus | |
857 | * are known not to leave dangling reservations. | |
858 | */ | |
859 | .globl fast_exception_return | |
860 | fast_exception_return: | |
861 | ld r3,_MSR(r1) | |
e56a6e20 PM |
862 | ld r4,_CTR(r1) |
863 | ld r0,_LINK(r1) | |
864 | mtctr r4 | |
865 | mtlr r0 | |
866 | ld r4,_XER(r1) | |
867 | mtspr SPRN_XER,r4 | |
868 | ||
869 | REST_8GPRS(5, r1) | |
870 | ||
9994a338 PM |
871 | andi. r0,r3,MSR_RI |
872 | beq- unrecov_restore | |
873 | ||
0c4888ef BH |
874 | /* Load PPR from thread struct before we clear MSR:RI */ |
875 | BEGIN_FTR_SECTION | |
876 | ld r2,PACACURRENT(r13) | |
877 | ld r2,TASKTHREADPPR(r2) | |
878 | END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR) | |
879 | ||
e56a6e20 PM |
880 | /* |
881 | * Clear RI before restoring r13. If we are returning to | |
882 | * userspace and we take an exception after restoring r13, | |
883 | * we end up corrupting the userspace r13 value. | |
884 | */ | |
49d09bf2 | 885 | li r4,0 |
e56a6e20 | 886 | mtmsrd r4,1 |
9994a338 | 887 | |
afc07701 MN |
888 | #ifdef CONFIG_PPC_TRANSACTIONAL_MEM |
889 | /* TM debug */ | |
890 | std r3, PACATMSCRATCH(r13) /* Stash returned-to MSR */ | |
891 | #endif | |
9994a338 PM |
892 | /* |
893 | * r13 is our per cpu area, only restore it if we are returning to | |
7230c564 BH |
894 | * userspace the value stored in the stack frame may belong to |
895 | * another CPU. | |
9994a338 | 896 | */ |
e56a6e20 | 897 | andi. r0,r3,MSR_PR |
9994a338 | 898 | beq 1f |
0c4888ef BH |
899 | BEGIN_FTR_SECTION |
900 | mtspr SPRN_PPR,r2 /* Restore PPR */ | |
901 | END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR) | |
c223c903 | 902 | ACCOUNT_CPU_USER_EXIT(r13, r2, r4) |
9994a338 PM |
903 | REST_GPR(13, r1) |
904 | 1: | |
e56a6e20 | 905 | mtspr SPRN_SRR1,r3 |
9994a338 PM |
906 | |
907 | ld r2,_CCR(r1) | |
908 | mtcrf 0xFF,r2 | |
909 | ld r2,_NIP(r1) | |
910 | mtspr SPRN_SRR0,r2 | |
911 | ||
912 | ld r0,GPR0(r1) | |
913 | ld r2,GPR2(r1) | |
914 | ld r3,GPR3(r1) | |
915 | ld r4,GPR4(r1) | |
916 | ld r1,GPR1(r1) | |
917 | ||
918 | rfid | |
919 | b . /* prevent speculative execution */ | |
920 | ||
2d27cfd3 BH |
921 | #endif /* CONFIG_PPC_BOOK3E */ |
922 | ||
7c0482e3 BH |
923 | /* |
924 | * We are returning to a context with interrupts soft disabled. | |
925 | * | |
926 | * However, we may also about to hard enable, so we need to | |
927 | * make sure that in this case, we also clear PACA_IRQ_HARD_DIS | |
928 | * or that bit can get out of sync and bad things will happen | |
929 | */ | |
930 | restore_irq_off: | |
931 | ld r3,_MSR(r1) | |
932 | lbz r7,PACAIRQHAPPENED(r13) | |
933 | andi. r0,r3,MSR_EE | |
934 | beq 1f | |
935 | rlwinm r7,r7,0,~PACA_IRQ_HARD_DIS | |
936 | stb r7,PACAIRQHAPPENED(r13) | |
937 | 1: li r0,0 | |
938 | stb r0,PACASOFTIRQEN(r13); | |
939 | TRACE_DISABLE_INTS | |
940 | b do_restore | |
941 | ||
7230c564 BH |
942 | /* |
943 | * Something did happen, check if a re-emit is needed | |
944 | * (this also clears paca->irq_happened) | |
945 | */ | |
946 | restore_check_irq_replay: | |
947 | /* XXX: We could implement a fast path here where we check | |
948 | * for irq_happened being just 0x01, in which case we can | |
949 | * clear it and return. That means that we would potentially | |
950 | * miss a decrementer having wrapped all the way around. | |
951 | * | |
952 | * Still, this might be useful for things like hash_page | |
953 | */ | |
b1576fec | 954 | bl __check_irq_replay |
7230c564 BH |
955 | cmpwi cr0,r3,0 |
956 | beq restore_no_replay | |
957 | ||
958 | /* | |
959 | * We need to re-emit an interrupt. We do so by re-using our | |
960 | * existing exception frame. We first change the trap value, | |
961 | * but we need to ensure we preserve the low nibble of it | |
962 | */ | |
963 | ld r4,_TRAP(r1) | |
964 | clrldi r4,r4,60 | |
965 | or r4,r4,r3 | |
966 | std r4,_TRAP(r1) | |
967 | ||
968 | /* | |
969 | * Then find the right handler and call it. Interrupts are | |
970 | * still soft-disabled and we keep them that way. | |
971 | */ | |
972 | cmpwi cr0,r3,0x500 | |
973 | bne 1f | |
974 | addi r3,r1,STACK_FRAME_OVERHEAD; | |
b1576fec AB |
975 | bl do_IRQ |
976 | b ret_from_except | |
0869b6fd MS |
977 | 1: cmpwi cr0,r3,0xe60 |
978 | bne 1f | |
979 | addi r3,r1,STACK_FRAME_OVERHEAD; | |
980 | bl handle_hmi_exception | |
981 | b ret_from_except | |
7230c564 BH |
982 | 1: cmpwi cr0,r3,0x900 |
983 | bne 1f | |
984 | addi r3,r1,STACK_FRAME_OVERHEAD; | |
b1576fec AB |
985 | bl timer_interrupt |
986 | b ret_from_except | |
fe9e1d54 IM |
987 | #ifdef CONFIG_PPC_DOORBELL |
988 | 1: | |
7230c564 | 989 | #ifdef CONFIG_PPC_BOOK3E |
fe9e1d54 IM |
990 | cmpwi cr0,r3,0x280 |
991 | #else | |
992 | BEGIN_FTR_SECTION | |
993 | cmpwi cr0,r3,0xe80 | |
994 | FTR_SECTION_ELSE | |
995 | cmpwi cr0,r3,0xa00 | |
996 | ALT_FTR_SECTION_END_IFSET(CPU_FTR_HVMODE) | |
997 | #endif /* CONFIG_PPC_BOOK3E */ | |
7230c564 BH |
998 | bne 1f |
999 | addi r3,r1,STACK_FRAME_OVERHEAD; | |
b1576fec AB |
1000 | bl doorbell_exception |
1001 | b ret_from_except | |
fe9e1d54 | 1002 | #endif /* CONFIG_PPC_DOORBELL */ |
b1576fec | 1003 | 1: b ret_from_except /* What else to do here ? */ |
7230c564 | 1004 | |
9994a338 PM |
1005 | unrecov_restore: |
1006 | addi r3,r1,STACK_FRAME_OVERHEAD | |
b1576fec | 1007 | bl unrecoverable_exception |
9994a338 PM |
1008 | b unrecov_restore |
1009 | ||
1010 | #ifdef CONFIG_PPC_RTAS | |
1011 | /* | |
1012 | * On CHRP, the Run-Time Abstraction Services (RTAS) have to be | |
1013 | * called with the MMU off. | |
1014 | * | |
1015 | * In addition, we need to be in 32b mode, at least for now. | |
1016 | * | |
1017 | * Note: r3 is an input parameter to rtas, so don't trash it... | |
1018 | */ | |
1019 | _GLOBAL(enter_rtas) | |
1020 | mflr r0 | |
1021 | std r0,16(r1) | |
1022 | stdu r1,-RTAS_FRAME_SIZE(r1) /* Save SP and create stack space. */ | |
1023 | ||
1024 | /* Because RTAS is running in 32b mode, it clobbers the high order half | |
1025 | * of all registers that it saves. We therefore save those registers | |
1026 | * RTAS might touch to the stack. (r0, r3-r13 are caller saved) | |
1027 | */ | |
1028 | SAVE_GPR(2, r1) /* Save the TOC */ | |
1029 | SAVE_GPR(13, r1) /* Save paca */ | |
1030 | SAVE_8GPRS(14, r1) /* Save the non-volatiles */ | |
1031 | SAVE_10GPRS(22, r1) /* ditto */ | |
1032 | ||
1033 | mfcr r4 | |
1034 | std r4,_CCR(r1) | |
1035 | mfctr r5 | |
1036 | std r5,_CTR(r1) | |
1037 | mfspr r6,SPRN_XER | |
1038 | std r6,_XER(r1) | |
1039 | mfdar r7 | |
1040 | std r7,_DAR(r1) | |
1041 | mfdsisr r8 | |
1042 | std r8,_DSISR(r1) | |
9994a338 | 1043 | |
9fe901d1 MK |
1044 | /* Temporary workaround to clear CR until RTAS can be modified to |
1045 | * ignore all bits. | |
1046 | */ | |
1047 | li r0,0 | |
1048 | mtcr r0 | |
1049 | ||
007d88d0 | 1050 | #ifdef CONFIG_BUG |
9994a338 PM |
1051 | /* There is no way it is acceptable to get here with interrupts enabled, |
1052 | * check it with the asm equivalent of WARN_ON | |
1053 | */ | |
d04c56f7 | 1054 | lbz r0,PACASOFTIRQEN(r13) |
9994a338 | 1055 | 1: tdnei r0,0 |
007d88d0 DW |
1056 | EMIT_BUG_ENTRY 1b,__FILE__,__LINE__,BUGFLAG_WARNING |
1057 | #endif | |
1058 | ||
d04c56f7 PM |
1059 | /* Hard-disable interrupts */ |
1060 | mfmsr r6 | |
1061 | rldicl r7,r6,48,1 | |
1062 | rotldi r7,r7,16 | |
1063 | mtmsrd r7,1 | |
1064 | ||
9994a338 PM |
1065 | /* Unfortunately, the stack pointer and the MSR are also clobbered, |
1066 | * so they are saved in the PACA which allows us to restore | |
1067 | * our original state after RTAS returns. | |
1068 | */ | |
1069 | std r1,PACAR1(r13) | |
1070 | std r6,PACASAVEDMSR(r13) | |
1071 | ||
1072 | /* Setup our real return addr */ | |
ad0289e4 | 1073 | LOAD_REG_ADDR(r4,rtas_return_loc) |
e58c3495 | 1074 | clrldi r4,r4,2 /* convert to realmode address */ |
9994a338 PM |
1075 | mtlr r4 |
1076 | ||
1077 | li r0,0 | |
1078 | ori r0,r0,MSR_EE|MSR_SE|MSR_BE|MSR_RI | |
1079 | andc r0,r6,r0 | |
1080 | ||
1081 | li r9,1 | |
1082 | rldicr r9,r9,MSR_SF_LG,(63-MSR_SF_LG) | |
5c0484e2 | 1083 | ori r9,r9,MSR_IR|MSR_DR|MSR_FE0|MSR_FE1|MSR_FP|MSR_RI|MSR_LE |
9994a338 | 1084 | andc r6,r0,r9 |
9994a338 PM |
1085 | sync /* disable interrupts so SRR0/1 */ |
1086 | mtmsrd r0 /* don't get trashed */ | |
1087 | ||
e58c3495 | 1088 | LOAD_REG_ADDR(r4, rtas) |
9994a338 PM |
1089 | ld r5,RTASENTRY(r4) /* get the rtas->entry value */ |
1090 | ld r4,RTASBASE(r4) /* get the rtas->base value */ | |
1091 | ||
1092 | mtspr SPRN_SRR0,r5 | |
1093 | mtspr SPRN_SRR1,r6 | |
1094 | rfid | |
1095 | b . /* prevent speculative execution */ | |
1096 | ||
ad0289e4 | 1097 | rtas_return_loc: |
5c0484e2 BH |
1098 | FIXUP_ENDIAN |
1099 | ||
9994a338 | 1100 | /* relocation is off at this point */ |
2dd60d79 | 1101 | GET_PACA(r4) |
e58c3495 | 1102 | clrldi r4,r4,2 /* convert to realmode address */ |
9994a338 | 1103 | |
e31aa453 PM |
1104 | bcl 20,31,$+4 |
1105 | 0: mflr r3 | |
ad0289e4 | 1106 | ld r3,(1f-0b)(r3) /* get &rtas_restore_regs */ |
e31aa453 | 1107 | |
9994a338 PM |
1108 | mfmsr r6 |
1109 | li r0,MSR_RI | |
1110 | andc r6,r6,r0 | |
1111 | sync | |
1112 | mtmsrd r6 | |
1113 | ||
1114 | ld r1,PACAR1(r4) /* Restore our SP */ | |
9994a338 PM |
1115 | ld r4,PACASAVEDMSR(r4) /* Restore our MSR */ |
1116 | ||
1117 | mtspr SPRN_SRR0,r3 | |
1118 | mtspr SPRN_SRR1,r4 | |
1119 | rfid | |
1120 | b . /* prevent speculative execution */ | |
1121 | ||
e31aa453 | 1122 | .align 3 |
ad0289e4 | 1123 | 1: .llong rtas_restore_regs |
e31aa453 | 1124 | |
ad0289e4 | 1125 | rtas_restore_regs: |
9994a338 PM |
1126 | /* relocation is on at this point */ |
1127 | REST_GPR(2, r1) /* Restore the TOC */ | |
1128 | REST_GPR(13, r1) /* Restore paca */ | |
1129 | REST_8GPRS(14, r1) /* Restore the non-volatiles */ | |
1130 | REST_10GPRS(22, r1) /* ditto */ | |
1131 | ||
2dd60d79 | 1132 | GET_PACA(r13) |
9994a338 PM |
1133 | |
1134 | ld r4,_CCR(r1) | |
1135 | mtcr r4 | |
1136 | ld r5,_CTR(r1) | |
1137 | mtctr r5 | |
1138 | ld r6,_XER(r1) | |
1139 | mtspr SPRN_XER,r6 | |
1140 | ld r7,_DAR(r1) | |
1141 | mtdar r7 | |
1142 | ld r8,_DSISR(r1) | |
1143 | mtdsisr r8 | |
9994a338 PM |
1144 | |
1145 | addi r1,r1,RTAS_FRAME_SIZE /* Unstack our frame */ | |
1146 | ld r0,16(r1) /* get return address */ | |
1147 | ||
1148 | mtlr r0 | |
1149 | blr /* return to caller */ | |
1150 | ||
1151 | #endif /* CONFIG_PPC_RTAS */ | |
1152 | ||
9994a338 PM |
1153 | _GLOBAL(enter_prom) |
1154 | mflr r0 | |
1155 | std r0,16(r1) | |
1156 | stdu r1,-PROM_FRAME_SIZE(r1) /* Save SP and create stack space */ | |
1157 | ||
1158 | /* Because PROM is running in 32b mode, it clobbers the high order half | |
1159 | * of all registers that it saves. We therefore save those registers | |
1160 | * PROM might touch to the stack. (r0, r3-r13 are caller saved) | |
1161 | */ | |
6c171994 | 1162 | SAVE_GPR(2, r1) |
9994a338 PM |
1163 | SAVE_GPR(13, r1) |
1164 | SAVE_8GPRS(14, r1) | |
1165 | SAVE_10GPRS(22, r1) | |
6c171994 | 1166 | mfcr r10 |
9994a338 | 1167 | mfmsr r11 |
6c171994 | 1168 | std r10,_CCR(r1) |
9994a338 PM |
1169 | std r11,_MSR(r1) |
1170 | ||
5c0484e2 BH |
1171 | /* Put PROM address in SRR0 */ |
1172 | mtsrr0 r4 | |
1173 | ||
1174 | /* Setup our trampoline return addr in LR */ | |
1175 | bcl 20,31,$+4 | |
1176 | 0: mflr r4 | |
1177 | addi r4,r4,(1f - 0b) | |
1178 | mtlr r4 | |
9994a338 | 1179 | |
5c0484e2 | 1180 | /* Prepare a 32-bit mode big endian MSR |
9994a338 | 1181 | */ |
2d27cfd3 BH |
1182 | #ifdef CONFIG_PPC_BOOK3E |
1183 | rlwinm r11,r11,0,1,31 | |
5c0484e2 BH |
1184 | mtsrr1 r11 |
1185 | rfi | |
2d27cfd3 | 1186 | #else /* CONFIG_PPC_BOOK3E */ |
5c0484e2 BH |
1187 | LOAD_REG_IMMEDIATE(r12, MSR_SF | MSR_ISF | MSR_LE) |
1188 | andc r11,r11,r12 | |
1189 | mtsrr1 r11 | |
1190 | rfid | |
2d27cfd3 | 1191 | #endif /* CONFIG_PPC_BOOK3E */ |
9994a338 | 1192 | |
5c0484e2 BH |
1193 | 1: /* Return from OF */ |
1194 | FIXUP_ENDIAN | |
9994a338 PM |
1195 | |
1196 | /* Just make sure that r1 top 32 bits didn't get | |
1197 | * corrupt by OF | |
1198 | */ | |
1199 | rldicl r1,r1,0,32 | |
1200 | ||
1201 | /* Restore the MSR (back to 64 bits) */ | |
1202 | ld r0,_MSR(r1) | |
6c171994 | 1203 | MTMSRD(r0) |
9994a338 PM |
1204 | isync |
1205 | ||
1206 | /* Restore other registers */ | |
1207 | REST_GPR(2, r1) | |
1208 | REST_GPR(13, r1) | |
1209 | REST_8GPRS(14, r1) | |
1210 | REST_10GPRS(22, r1) | |
1211 | ld r4,_CCR(r1) | |
1212 | mtcr r4 | |
9994a338 PM |
1213 | |
1214 | addi r1,r1,PROM_FRAME_SIZE | |
1215 | ld r0,16(r1) | |
1216 | mtlr r0 | |
1217 | blr |