]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blame - arch/powerpc/kernel/entry_64.S
powerpc/64s: Blacklist system_call() and system_call_common() from kprobes
[mirror_ubuntu-artful-kernel.git] / arch / powerpc / kernel / entry_64.S
CommitLineData
9994a338 1/*
9994a338
PM
2 * PowerPC version
3 * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
4 * Rewritten by Cort Dougan (cort@cs.nmt.edu) for PReP
5 * Copyright (C) 1996 Cort Dougan <cort@cs.nmt.edu>
6 * Adapted for Power Macintosh by Paul Mackerras.
7 * Low-level exception handlers and MMU support
8 * rewritten by Paul Mackerras.
9 * Copyright (C) 1996 Paul Mackerras.
10 * MPC8xx modifications Copyright (C) 1997 Dan Malek (dmalek@jlc.net).
11 *
12 * This file contains the system call entry code, context switch
13 * code, and exception/interrupt return code for PowerPC.
14 *
15 * This program is free software; you can redistribute it and/or
16 * modify it under the terms of the GNU General Public License
17 * as published by the Free Software Foundation; either version
18 * 2 of the License, or (at your option) any later version.
19 */
20
9994a338 21#include <linux/errno.h>
c3525940 22#include <linux/err.h>
9994a338
PM
23#include <asm/unistd.h>
24#include <asm/processor.h>
25#include <asm/page.h>
26#include <asm/mmu.h>
27#include <asm/thread_info.h>
28#include <asm/ppc_asm.h>
29#include <asm/asm-offsets.h>
30#include <asm/cputable.h>
3f639ee8 31#include <asm/firmware.h>
007d88d0 32#include <asm/bug.h>
ec2b36b9 33#include <asm/ptrace.h>
945feb17 34#include <asm/irqflags.h>
7230c564 35#include <asm/hw_irq.h>
5d1c5745 36#include <asm/context_tracking.h>
b4b56f9e 37#include <asm/tm.h>
8a649045 38#include <asm/ppc-opcode.h>
9445aa1a 39#include <asm/export.h>
9994a338
PM
40
41/*
42 * System calls.
43 */
44 .section ".toc","aw"
c857c43b
AB
45SYS_CALL_TABLE:
46 .tc sys_call_table[TC],sys_call_table
9994a338
PM
47
48/* This value is used to mark exception frames on the stack. */
49exception_marker:
ec2b36b9 50 .tc ID_EXC_MARKER[TC],STACK_FRAME_REGS_MARKER
9994a338
PM
51
52 .section ".text"
53 .align 7
54
9994a338
PM
55 .globl system_call_common
56system_call_common:
b4b56f9e
S
57#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
58BEGIN_FTR_SECTION
59 extrdi. r10, r12, 1, (63-MSR_TS_T_LG) /* transaction active? */
cf7d6fb0 60 bne .Ltabort_syscall
b4b56f9e
S
61END_FTR_SECTION_IFSET(CPU_FTR_TM)
62#endif
9994a338
PM
63 andi. r10,r12,MSR_PR
64 mr r10,r1
65 addi r1,r1,-INT_FRAME_SIZE
66 beq- 1f
67 ld r1,PACAKSAVE(r13)
681: std r10,0(r1)
69 std r11,_NIP(r1)
70 std r12,_MSR(r1)
71 std r0,GPR0(r1)
72 std r10,GPR1(r1)
5d75b264 73 beq 2f /* if from kernel mode */
c223c903 74 ACCOUNT_CPU_USER_ENTRY(r13, r10, r11)
5d75b264 752: std r2,GPR2(r1)
9994a338 76 std r3,GPR3(r1)
fd6c40f3 77 mfcr r2
9994a338
PM
78 std r4,GPR4(r1)
79 std r5,GPR5(r1)
80 std r6,GPR6(r1)
81 std r7,GPR7(r1)
82 std r8,GPR8(r1)
83 li r11,0
84 std r11,GPR9(r1)
85 std r11,GPR10(r1)
86 std r11,GPR11(r1)
87 std r11,GPR12(r1)
823df435 88 std r11,_XER(r1)
82087414 89 std r11,_CTR(r1)
9994a338 90 std r9,GPR13(r1)
9994a338 91 mflr r10
fd6c40f3
AB
92 /*
93 * This clears CR0.SO (bit 28), which is the error indication on
94 * return from this system call.
95 */
96 rldimi r2,r11,28,(63-28)
9994a338 97 li r11,0xc01
9994a338
PM
98 std r10,_LINK(r1)
99 std r11,_TRAP(r1)
9994a338 100 std r3,ORIG_GPR3(r1)
fd6c40f3 101 std r2,_CCR(r1)
9994a338
PM
102 ld r2,PACATOC(r13)
103 addi r9,r1,STACK_FRAME_OVERHEAD
104 ld r11,exception_marker@toc(r2)
105 std r11,-16(r9) /* "regshere" marker */
abf917cd 106#if defined(CONFIG_VIRT_CPU_ACCOUNTING_NATIVE) && defined(CONFIG_PPC_SPLPAR)
cf9efce0
PM
107BEGIN_FW_FTR_SECTION
108 beq 33f
109 /* if from user, see if there are any DTL entries to process */
110 ld r10,PACALPPACAPTR(r13) /* get ptr to VPA */
111 ld r11,PACA_DTL_RIDX(r13) /* get log read index */
7ffcf8ec
AB
112 addi r10,r10,LPPACA_DTLIDX
113 LDX_BE r10,0,r10 /* get log write index */
cf9efce0
PM
114 cmpd cr1,r11,r10
115 beq+ cr1,33f
b1576fec 116 bl accumulate_stolen_time
cf9efce0
PM
117 REST_GPR(0,r1)
118 REST_4GPRS(3,r1)
119 REST_2GPRS(7,r1)
120 addi r9,r1,STACK_FRAME_OVERHEAD
12133:
122END_FW_FTR_SECTION_IFSET(FW_FEATURE_SPLPAR)
abf917cd 123#endif /* CONFIG_VIRT_CPU_ACCOUNTING_NATIVE && CONFIG_PPC_SPLPAR */
cf9efce0 124
1421ae0b
BH
125 /*
126 * A syscall should always be called with interrupts enabled
127 * so we just unconditionally hard-enable here. When some kind
128 * of irq tracing is used, we additionally check that condition
129 * is correct
130 */
131#if defined(CONFIG_TRACE_IRQFLAGS) && defined(CONFIG_BUG)
132 lbz r10,PACASOFTIRQEN(r13)
133 xori r10,r10,1
1341: tdnei r10,0
135 EMIT_BUG_ENTRY 1b,__FILE__,__LINE__,BUGFLAG_WARNING
136#endif
2d27cfd3 137
2d27cfd3
BH
138#ifdef CONFIG_PPC_BOOK3E
139 wrteei 1
140#else
49d09bf2 141 li r11,MSR_RI
9994a338
PM
142 ori r11,r11,MSR_EE
143 mtmsrd r11,1
2d27cfd3 144#endif /* CONFIG_PPC_BOOK3E */
9994a338 145
1421ae0b
BH
146 /* We do need to set SOFTE in the stack frame or the return
147 * from interrupt will be painful
148 */
149 li r10,1
150 std r10,SOFTE(r1)
151
9778b696 152 CURRENT_THREAD_INFO(r11, r1)
9994a338 153 ld r10,TI_FLAGS(r11)
10ea8343 154 andi. r11,r10,_TIF_SYSCALL_DOTRACE
cf7d6fb0 155 bne .Lsyscall_dotrace /* does not return */
9994a338 156 cmpldi 0,r0,NR_syscalls
cf7d6fb0 157 bge- .Lsyscall_enosys
9994a338
PM
158
159system_call: /* label this so stack traces look sane */
160/*
161 * Need to vector to 32 Bit or default sys_call_table here,
162 * based on caller's run-mode / personality.
163 */
c857c43b 164 ld r11,SYS_CALL_TABLE@toc(2)
9994a338
PM
165 andi. r10,r10,_TIF_32BIT
166 beq 15f
167 addi r11,r11,8 /* use 32-bit syscall entries */
168 clrldi r3,r3,32
169 clrldi r4,r4,32
170 clrldi r5,r5,32
171 clrldi r6,r6,32
172 clrldi r7,r7,32
173 clrldi r8,r8,32
17415:
175 slwi r0,r0,4
cc7efbf9
AB
176 ldx r12,r11,r0 /* Fetch system call handler [ptr] */
177 mtctr r12
9994a338
PM
178 bctrl /* Call handler */
179
4c3b2168 180.Lsyscall_exit:
401d1f02 181 std r3,RESULT(r1)
9778b696 182 CURRENT_THREAD_INFO(r12, r1)
9994a338 183
9994a338 184 ld r8,_MSR(r1)
2d27cfd3
BH
185#ifdef CONFIG_PPC_BOOK3S
186 /* No MSR:RI on BookE */
9994a338
PM
187 andi. r10,r8,MSR_RI
188 beq- unrecov_restore
2d27cfd3 189#endif
1421ae0b
BH
190 /*
191 * Disable interrupts so current_thread_info()->flags can't change,
2d27cfd3
BH
192 * and so that we don't get interrupted after loading SRR0/1.
193 */
194#ifdef CONFIG_PPC_BOOK3E
195 wrteei 0
196#else
ac1dc365
AB
197 /*
198 * For performance reasons we clear RI the same time that we
199 * clear EE. We only need to clear RI just before we restore r13
200 * below, but batching it with EE saves us one expensive mtmsrd call.
201 * We have to be careful to restore RI if we branch anywhere from
202 * here (eg syscall_exit_work).
203 */
49d09bf2 204 li r11,0
ac1dc365 205 mtmsrd r11,1
2d27cfd3
BH
206#endif /* CONFIG_PPC_BOOK3E */
207
9994a338 208 ld r9,TI_FLAGS(r12)
c3525940 209 li r11,-MAX_ERRNO
10ea8343 210 andi. r0,r9,(_TIF_SYSCALL_DOTRACE|_TIF_SINGLESTEP|_TIF_USER_WORK_MASK|_TIF_PERSYSCALL_MASK)
cf7d6fb0 211 bne- .Lsyscall_exit_work
70fe3d98 212
bc4f65e4
NP
213 /* If MSR_FP and MSR_VEC are set in user msr, then no need to restore */
214 li r7,MSR_FP
70fe3d98 215#ifdef CONFIG_ALTIVEC
bc4f65e4 216 oris r7,r7,MSR_VEC@h
70fe3d98 217#endif
bc4f65e4
NP
218 and r0,r8,r7
219 cmpd r0,r7
cf7d6fb0 220 bne .Lsyscall_restore_math
bc4f65e4 221.Lsyscall_restore_math_cont:
70fe3d98 222
bc4f65e4 223 cmpld r3,r11
401d1f02 224 ld r5,_CCR(r1)
cf7d6fb0 225 bge- .Lsyscall_error
d14299de 226.Lsyscall_error_cont:
9994a338 227 ld r7,_NIP(r1)
f89451fb 228BEGIN_FTR_SECTION
9994a338 229 stdcx. r0,0,r1 /* to clear the reservation */
f89451fb 230END_FTR_SECTION_IFCLR(CPU_FTR_STCX_CHECKS_ADDRESS)
9994a338
PM
231 andi. r6,r8,MSR_PR
232 ld r4,_LINK(r1)
2d27cfd3 233
c6622f63 234 beq- 1f
c223c903 235 ACCOUNT_CPU_USER_EXIT(r13, r11, r12)
d030a4b5
ME
236
237BEGIN_FTR_SECTION
238 HMT_MEDIUM_LOW
239END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR)
240
c6622f63 241 ld r13,GPR13(r1) /* only restore r13 if returning to usermode */
9994a338 2421: ld r2,GPR2(r1)
9994a338
PM
243 ld r1,GPR1(r1)
244 mtlr r4
245 mtcr r5
246 mtspr SPRN_SRR0,r7
247 mtspr SPRN_SRR1,r8
2d27cfd3 248 RFI
9994a338
PM
249 b . /* prevent speculative execution */
250
cf7d6fb0 251.Lsyscall_error:
9994a338 252 oris r5,r5,0x1000 /* Set SO bit in CR */
401d1f02 253 neg r3,r3
9994a338 254 std r5,_CCR(r1)
d14299de 255 b .Lsyscall_error_cont
bc4f65e4 256
cf7d6fb0 257.Lsyscall_restore_math:
bc4f65e4
NP
258 /*
259 * Some initial tests from restore_math to avoid the heavyweight
260 * C code entry and MSR manipulations.
261 */
262 LOAD_REG_IMMEDIATE(r0, MSR_TS_MASK)
263 and. r0,r0,r8
264 bne 1f
265
266 ld r7,PACACURRENT(r13)
267 lbz r0,THREAD+THREAD_LOAD_FP(r7)
268#ifdef CONFIG_ALTIVEC
269 lbz r6,THREAD+THREAD_LOAD_VEC(r7)
270 add r0,r0,r6
271#endif
272 cmpdi r0,0
273 beq .Lsyscall_restore_math_cont
274
2751: addi r3,r1,STACK_FRAME_OVERHEAD
276#ifdef CONFIG_PPC_BOOK3S
277 li r10,MSR_RI
278 mtmsrd r10,1 /* Restore RI */
279#endif
280 bl restore_math
281#ifdef CONFIG_PPC_BOOK3S
282 li r11,0
283 mtmsrd r11,1
284#endif
285 /* Restore volatiles, reload MSR from updated one */
286 ld r8,_MSR(r1)
287 ld r3,RESULT(r1)
288 li r11,-MAX_ERRNO
289 b .Lsyscall_restore_math_cont
290
9994a338 291/* Traced system call support */
cf7d6fb0 292.Lsyscall_dotrace:
b1576fec 293 bl save_nvgprs
9994a338 294 addi r3,r1,STACK_FRAME_OVERHEAD
b1576fec 295 bl do_syscall_trace_enter
d3837414 296
4f72c427 297 /*
d3837414
ME
298 * We use the return value of do_syscall_trace_enter() as the syscall
299 * number. If the syscall was rejected for any reason do_syscall_trace_enter()
300 * returns an invalid syscall number and the test below against
301 * NR_syscalls will fail.
4f72c427
RM
302 */
303 mr r0,r3
d3837414
ME
304
305 /* Restore argument registers just clobbered and/or possibly changed. */
9994a338
PM
306 ld r3,GPR3(r1)
307 ld r4,GPR4(r1)
308 ld r5,GPR5(r1)
309 ld r6,GPR6(r1)
310 ld r7,GPR7(r1)
311 ld r8,GPR8(r1)
d3837414
ME
312
313 /* Repopulate r9 and r10 for the system_call path */
9994a338 314 addi r9,r1,STACK_FRAME_OVERHEAD
9778b696 315 CURRENT_THREAD_INFO(r10, r1)
9994a338 316 ld r10,TI_FLAGS(r10)
d3837414
ME
317
318 cmpldi r0,NR_syscalls
319 blt+ system_call
320
321 /* Return code is already in r3 thanks to do_syscall_trace_enter() */
322 b .Lsyscall_exit
323
9994a338 324
cf7d6fb0 325.Lsyscall_enosys:
401d1f02 326 li r3,-ENOSYS
4c3b2168 327 b .Lsyscall_exit
401d1f02 328
cf7d6fb0 329.Lsyscall_exit_work:
ac1dc365 330#ifdef CONFIG_PPC_BOOK3S
49d09bf2 331 li r10,MSR_RI
ac1dc365
AB
332 mtmsrd r10,1 /* Restore RI */
333#endif
401d1f02
DW
334 /* If TIF_RESTOREALL is set, don't scribble on either r3 or ccr.
335 If TIF_NOERROR is set, just save r3 as it is. */
336
337 andi. r0,r9,_TIF_RESTOREALL
1bd79336
PM
338 beq+ 0f
339 REST_NVGPRS(r1)
340 b 2f
c3525940 3410: cmpld r3,r11 /* r11 is -MAX_ERRNO */
401d1f02
DW
342 blt+ 1f
343 andi. r0,r9,_TIF_NOERROR
344 bne- 1f
345 ld r5,_CCR(r1)
346 neg r3,r3
347 oris r5,r5,0x1000 /* Set SO bit in CR */
348 std r5,_CCR(r1)
3491: std r3,GPR3(r1)
3502: andi. r0,r9,(_TIF_PERSYSCALL_MASK)
351 beq 4f
352
1bd79336 353 /* Clear per-syscall TIF flags if any are set. */
401d1f02
DW
354
355 li r11,_TIF_PERSYSCALL_MASK
356 addi r12,r12,TI_FLAGS
3573: ldarx r10,0,r12
358 andc r10,r10,r11
359 stdcx. r10,0,r12
360 bne- 3b
361 subi r12,r12,TI_FLAGS
1bd79336
PM
362
3634: /* Anything else left to do? */
d8725ce8
ME
364BEGIN_FTR_SECTION
365 lis r3,INIT_PPR@highest /* Set thread.ppr = 3 */
366 ld r10,PACACURRENT(r13)
367 sldi r3,r3,32 /* bits 11-13 are used for ppr */
368 std r3,TASKTHREADPPR(r10)
369END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR)
370
10ea8343 371 andi. r0,r9,(_TIF_SYSCALL_DOTRACE|_TIF_SINGLESTEP)
b1576fec 372 beq ret_from_except_lite
401d1f02
DW
373
374 /* Re-enable interrupts */
2d27cfd3
BH
375#ifdef CONFIG_PPC_BOOK3E
376 wrteei 1
377#else
49d09bf2 378 li r10,MSR_RI
401d1f02
DW
379 ori r10,r10,MSR_EE
380 mtmsrd r10,1
2d27cfd3 381#endif /* CONFIG_PPC_BOOK3E */
401d1f02 382
b1576fec 383 bl save_nvgprs
9994a338 384 addi r3,r1,STACK_FRAME_OVERHEAD
b1576fec
AB
385 bl do_syscall_trace_leave
386 b ret_from_except
9994a338 387
b4b56f9e 388#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
cf7d6fb0 389.Ltabort_syscall:
b4b56f9e
S
390 /* Firstly we need to enable TM in the kernel */
391 mfmsr r10
cc7786d3
NP
392 li r9, 1
393 rldimi r10, r9, MSR_TM_LG, 63-MSR_TM_LG
b4b56f9e
S
394 mtmsrd r10, 0
395
396 /* tabort, this dooms the transaction, nothing else */
cc7786d3
NP
397 li r9, (TM_CAUSE_SYSCALL|TM_CAUSE_PERSISTENT)
398 TABORT(R9)
b4b56f9e
S
399
400 /*
401 * Return directly to userspace. We have corrupted user register state,
402 * but userspace will never see that register state. Execution will
403 * resume after the tbegin of the aborted transaction with the
404 * checkpointed register state.
405 */
cc7786d3
NP
406 li r9, MSR_RI
407 andc r10, r10, r9
b4b56f9e
S
408 mtmsrd r10, 1
409 mtspr SPRN_SRR0, r11
410 mtspr SPRN_SRR1, r12
411
412 rfid
413 b . /* prevent speculative execution */
414#endif
cf7d6fb0
NR
415_ASM_NOKPROBE_SYMBOL(system_call_common);
416_ASM_NOKPROBE_SYMBOL(system_call);
b4b56f9e 417
9994a338
PM
418/* Save non-volatile GPRs, if not already saved. */
419_GLOBAL(save_nvgprs)
420 ld r11,_TRAP(r1)
421 andi. r0,r11,1
422 beqlr-
423 SAVE_NVGPRS(r1)
424 clrrdi r0,r11,1
425 std r0,_TRAP(r1)
426 blr
427
401d1f02 428
9994a338
PM
429/*
430 * The sigsuspend and rt_sigsuspend system calls can call do_signal
431 * and thus put the process into the stopped state where we might
432 * want to examine its user state with ptrace. Therefore we need
433 * to save all the nonvolatile registers (r14 - r31) before calling
434 * the C code. Similarly, fork, vfork and clone need the full
435 * register state on the stack so that it can be copied to the child.
436 */
9994a338
PM
437
438_GLOBAL(ppc_fork)
b1576fec
AB
439 bl save_nvgprs
440 bl sys_fork
4c3b2168 441 b .Lsyscall_exit
9994a338
PM
442
443_GLOBAL(ppc_vfork)
b1576fec
AB
444 bl save_nvgprs
445 bl sys_vfork
4c3b2168 446 b .Lsyscall_exit
9994a338
PM
447
448_GLOBAL(ppc_clone)
b1576fec
AB
449 bl save_nvgprs
450 bl sys_clone
4c3b2168 451 b .Lsyscall_exit
9994a338 452
1bd79336 453_GLOBAL(ppc32_swapcontext)
b1576fec
AB
454 bl save_nvgprs
455 bl compat_sys_swapcontext
4c3b2168 456 b .Lsyscall_exit
1bd79336
PM
457
458_GLOBAL(ppc64_swapcontext)
b1576fec
AB
459 bl save_nvgprs
460 bl sys_swapcontext
4c3b2168 461 b .Lsyscall_exit
1bd79336 462
529d235a
ME
463_GLOBAL(ppc_switch_endian)
464 bl save_nvgprs
465 bl sys_switch_endian
466 b .Lsyscall_exit
467
9994a338 468_GLOBAL(ret_from_fork)
b1576fec 469 bl schedule_tail
9994a338
PM
470 REST_NVGPRS(r1)
471 li r3,0
4c3b2168 472 b .Lsyscall_exit
9994a338 473
58254e10 474_GLOBAL(ret_from_kernel_thread)
b1576fec 475 bl schedule_tail
58254e10 476 REST_NVGPRS(r1)
58254e10
AV
477 mtlr r14
478 mr r3,r15
f55d9665 479#ifdef PPC64_ELF_ABI_v2
7cedd601
AB
480 mr r12,r14
481#endif
58254e10
AV
482 blrl
483 li r3,0
4c3b2168 484 b .Lsyscall_exit
be6abfa7 485
9994a338
PM
486/*
487 * This routine switches between two different tasks. The process
488 * state of one is saved on its kernel stack. Then the state
489 * of the other is restored from its kernel stack. The memory
490 * management hardware is updated to the second process's state.
491 * Finally, we can return to the second process, via ret_from_except.
492 * On entry, r3 points to the THREAD for the current task, r4
493 * points to the THREAD for the new task.
494 *
495 * Note: there are two ways to get to the "going out" portion
496 * of this code; either by coming in via the entry (_switch)
497 * or via "fork" which must set up an environment equivalent
498 * to the "_switch" path. If you change this you'll have to change
499 * the fork code also.
500 *
501 * The code which creates the new task context is in 'copy_thread'
2ef9481e 502 * in arch/powerpc/kernel/process.c
9994a338
PM
503 */
504 .align 7
505_GLOBAL(_switch)
506 mflr r0
507 std r0,16(r1)
508 stdu r1,-SWITCH_FRAME_SIZE(r1)
509 /* r3-r13 are caller saved -- Cort */
510 SAVE_8GPRS(14, r1)
511 SAVE_10GPRS(22, r1)
68bfa962 512 std r0,_NIP(r1) /* Return to switch caller */
9994a338
PM
513 mfcr r23
514 std r23,_CCR(r1)
515 std r1,KSP(r3) /* Set old stack pointer */
516
9145effd
NP
517 /*
518 * On SMP kernels, care must be taken because a task may be
519 * scheduled off CPUx and on to CPUy. Memory ordering must be
520 * considered.
521 *
522 * Cacheable stores on CPUx will be visible when the task is
523 * scheduled on CPUy by virtue of the core scheduler barriers
524 * (see "Notes on Program-Order guarantees on SMP systems." in
525 * kernel/sched/core.c).
526 *
527 * Uncacheable stores in the case of involuntary preemption must
528 * be taken care of. The smp_mb__before_spin_lock() in __schedule()
529 * is implemented as hwsync on powerpc, which orders MMIO too. So
530 * long as there is an hwsync in the context switch path, it will
531 * be executed on the source CPU after the task has performed
532 * all MMIO ops on that CPU, and on the destination CPU before the
533 * task performs any MMIO ops there.
9994a338 534 */
9994a338 535
f89451fb 536 /*
837e72f7
NP
537 * The kernel context switch path must contain a spin_lock,
538 * which contains larx/stcx, which will clear any reservation
539 * of the task being switched.
f89451fb 540 */
a515348f
MN
541#ifdef CONFIG_PPC_BOOK3S
542/* Cancel all explict user streams as they will have no use after context
543 * switch and will stop the HW from creating streams itself
544 */
545 DCBT_STOP_ALL_STREAM_IDS(r6)
546#endif
547
9994a338
PM
548 addi r6,r4,-THREAD /* Convert THREAD to 'current' */
549 std r6,PACACURRENT(r13) /* Set new 'current' */
550
551 ld r8,KSP(r4) /* new stack pointer */
caca285e
AK
552#ifdef CONFIG_PPC_STD_MMU_64
553BEGIN_MMU_FTR_SECTION
554 b 2f
5a25b6f5 555END_MMU_FTR_SECTION_IFSET(MMU_FTR_TYPE_RADIX)
1189be65 556BEGIN_FTR_SECTION
9994a338
PM
557 clrrdi r6,r8,28 /* get its ESID */
558 clrrdi r9,r1,28 /* get current sp ESID */
13b3d13b 559FTR_SECTION_ELSE
1189be65
PM
560 clrrdi r6,r8,40 /* get its 1T ESID */
561 clrrdi r9,r1,40 /* get current sp 1T ESID */
13b3d13b 562ALT_MMU_FTR_SECTION_END_IFCLR(MMU_FTR_1T_SEGMENT)
9994a338
PM
563 clrldi. r0,r6,2 /* is new ESID c00000000? */
564 cmpd cr1,r6,r9 /* or is new ESID the same as current ESID? */
565 cror eq,4*cr1+eq,eq
566 beq 2f /* if yes, don't slbie it */
567
568 /* Bolt in the new stack SLB entry */
569 ld r7,KSP_VSID(r4) /* Get new stack's VSID */
570 oris r0,r6,(SLB_ESID_V)@h
571 ori r0,r0,(SLB_NUM_BOLTED-1)@l
1189be65
PM
572BEGIN_FTR_SECTION
573 li r9,MMU_SEGSIZE_1T /* insert B field */
574 oris r6,r6,(MMU_SEGSIZE_1T << SLBIE_SSIZE_SHIFT)@h
575 rldimi r7,r9,SLB_VSID_SSIZE_SHIFT,0
44ae3ab3 576END_MMU_FTR_SECTION_IFSET(MMU_FTR_1T_SEGMENT)
2f6093c8 577
00efee7d
MN
578 /* Update the last bolted SLB. No write barriers are needed
579 * here, provided we only update the current CPU's SLB shadow
580 * buffer.
581 */
2f6093c8 582 ld r9,PACA_SLBSHADOWPTR(r13)
11a27ad7 583 li r12,0
7ffcf8ec
AB
584 std r12,SLBSHADOW_STACKESID(r9) /* Clear ESID */
585 li r12,SLBSHADOW_STACKVSID
586 STDX_BE r7,r12,r9 /* Save VSID */
587 li r12,SLBSHADOW_STACKESID
588 STDX_BE r0,r12,r9 /* Save ESID */
2f6093c8 589
44ae3ab3 590 /* No need to check for MMU_FTR_NO_SLBIE_B here, since when
f66bce5e
OJ
591 * we have 1TB segments, the only CPUs known to have the errata
592 * only support less than 1TB of system memory and we'll never
593 * actually hit this code path.
594 */
595
9994a338
PM
596 slbie r6
597 slbie r6 /* Workaround POWER5 < DD2.1 issue */
598 slbmte r7,r0
599 isync
9994a338 6002:
caca285e 601#endif /* CONFIG_PPC_STD_MMU_64 */
2d27cfd3 602
9778b696 603 CURRENT_THREAD_INFO(r7, r8) /* base of new stack */
9994a338
PM
604 /* Note: this uses SWITCH_FRAME_SIZE rather than INT_FRAME_SIZE
605 because we don't need to leave the 288-byte ABI gap at the
606 top of the kernel stack. */
607 addi r7,r7,THREAD_SIZE-SWITCH_FRAME_SIZE
608
e4c0fc5f
NP
609 /*
610 * PMU interrupts in radix may come in here. They will use r1, not
611 * PACAKSAVE, so this stack switch will not cause a problem. They
612 * will store to the process stack, which may then be migrated to
613 * another CPU. However the rq lock release on this CPU paired with
614 * the rq lock acquire on the new CPU before the stack becomes
615 * active on the new CPU, will order those stores.
616 */
9994a338
PM
617 mr r1,r8 /* start using new stack pointer */
618 std r7,PACAKSAVE(r13)
619
71433285
AB
620 ld r6,_CCR(r1)
621 mtcrf 0xFF,r6
622
9994a338
PM
623 /* r3-r13 are destroyed -- Cort */
624 REST_8GPRS(14, r1)
625 REST_10GPRS(22, r1)
626
627 /* convert old thread to its task_struct for return value */
628 addi r3,r3,-THREAD
629 ld r7,_NIP(r1) /* Return to _switch caller in new task */
630 mtlr r7
631 addi r1,r1,SWITCH_FRAME_SIZE
632 blr
633
634 .align 7
635_GLOBAL(ret_from_except)
636 ld r11,_TRAP(r1)
637 andi. r0,r11,1
b1576fec 638 bne ret_from_except_lite
9994a338
PM
639 REST_NVGPRS(r1)
640
641_GLOBAL(ret_from_except_lite)
642 /*
643 * Disable interrupts so that current_thread_info()->flags
644 * can't change between when we test it and when we return
645 * from the interrupt.
646 */
2d27cfd3
BH
647#ifdef CONFIG_PPC_BOOK3E
648 wrteei 0
649#else
49d09bf2 650 li r10,MSR_RI
d9ada91a 651 mtmsrd r10,1 /* Update machine state */
2d27cfd3 652#endif /* CONFIG_PPC_BOOK3E */
9994a338 653
9778b696 654 CURRENT_THREAD_INFO(r9, r1)
9994a338 655 ld r3,_MSR(r1)
13d543cd
BB
656#ifdef CONFIG_PPC_BOOK3E
657 ld r10,PACACURRENT(r13)
658#endif /* CONFIG_PPC_BOOK3E */
9994a338 659 ld r4,TI_FLAGS(r9)
9994a338 660 andi. r3,r3,MSR_PR
c58ce2b1 661 beq resume_kernel
13d543cd
BB
662#ifdef CONFIG_PPC_BOOK3E
663 lwz r3,(THREAD+THREAD_DBCR0)(r10)
664#endif /* CONFIG_PPC_BOOK3E */
9994a338
PM
665
666 /* Check current_thread_info()->flags */
c58ce2b1 667 andi. r0,r4,_TIF_USER_WORK_MASK
13d543cd 668 bne 1f
70fe3d98 669#ifdef CONFIG_PPC_BOOK3E
13d543cd
BB
670 /*
671 * Check to see if the dbcr0 register is set up to debug.
672 * Use the internal debug mode bit to do this.
673 */
674 andis. r0,r3,DBCR0_IDM@h
c58ce2b1 675 beq restore
13d543cd
BB
676 mfmsr r0
677 rlwinm r0,r0,0,~MSR_DE /* Clear MSR.DE */
678 mtmsr r0
679 mtspr SPRN_DBCR0,r3
680 li r10, -1
681 mtspr SPRN_DBSR,r10
682 b restore
683#else
70fe3d98
CB
684 addi r3,r1,STACK_FRAME_OVERHEAD
685 bl restore_math
686 b restore
13d543cd
BB
687#endif
6881: andi. r0,r4,_TIF_NEED_RESCHED
689 beq 2f
b1576fec 690 bl restore_interrupts
5d1c5745 691 SCHEDULE_USER
b1576fec 692 b ret_from_except_lite
d31626f7
PM
6932:
694#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
695 andi. r0,r4,_TIF_USER_WORK_MASK & ~_TIF_RESTORE_TM
696 bne 3f /* only restore TM if nothing else to do */
697 addi r3,r1,STACK_FRAME_OVERHEAD
b1576fec 698 bl restore_tm_state
d31626f7
PM
699 b restore
7003:
701#endif
b1576fec 702 bl save_nvgprs
808be314
AB
703 /*
704 * Use a non volatile GPR to save and restore our thread_info flags
705 * across the call to restore_interrupts.
706 */
707 mr r30,r4
b1576fec 708 bl restore_interrupts
808be314 709 mr r4,r30
c58ce2b1 710 addi r3,r1,STACK_FRAME_OVERHEAD
b1576fec
AB
711 bl do_notify_resume
712 b ret_from_except
c58ce2b1
TC
713
714resume_kernel:
a9c4e541 715 /* check current_thread_info, _TIF_EMULATE_STACK_STORE */
0edfdd10 716 andis. r8,r4,_TIF_EMULATE_STACK_STORE@h
a9c4e541
TC
717 beq+ 1f
718
719 addi r8,r1,INT_FRAME_SIZE /* Get the kprobed function entry */
720
9e1ba4f2 721 ld r3,GPR1(r1)
a9c4e541
TC
722 subi r3,r3,INT_FRAME_SIZE /* dst: Allocate a trampoline exception frame */
723 mr r4,r1 /* src: current exception frame */
724 mr r1,r3 /* Reroute the trampoline frame to r1 */
725
726 /* Copy from the original to the trampoline. */
727 li r5,INT_FRAME_SIZE/8 /* size: INT_FRAME_SIZE */
728 li r6,0 /* start offset: 0 */
729 mtctr r5
7302: ldx r0,r6,r4
731 stdx r0,r6,r3
732 addi r6,r6,8
733 bdnz 2b
734
9e1ba4f2
RB
735 /* Do real store operation to complete stdu */
736 ld r5,GPR1(r1)
a9c4e541
TC
737 std r8,0(r5)
738
739 /* Clear _TIF_EMULATE_STACK_STORE flag */
740 lis r11,_TIF_EMULATE_STACK_STORE@h
741 addi r5,r9,TI_FLAGS
d8b92292 7420: ldarx r4,0,r5
a9c4e541
TC
743 andc r4,r4,r11
744 stdcx. r4,0,r5
745 bne- 0b
7461:
747
c58ce2b1
TC
748#ifdef CONFIG_PREEMPT
749 /* Check if we need to preempt */
750 andi. r0,r4,_TIF_NEED_RESCHED
751 beq+ restore
752 /* Check that preempt_count() == 0 and interrupts are enabled */
753 lwz r8,TI_PREEMPT(r9)
754 cmpwi cr1,r8,0
755 ld r0,SOFTE(r1)
756 cmpdi r0,0
757 crandc eq,cr1*4+eq,eq
758 bne restore
759
760 /*
761 * Here we are preempting the current task. We want to make
de021bb7 762 * sure we are soft-disabled first and reconcile irq state.
c58ce2b1 763 */
de021bb7 764 RECONCILE_IRQ_STATE(r3,r4)
b1576fec 7651: bl preempt_schedule_irq
c58ce2b1
TC
766
767 /* Re-test flags and eventually loop */
9778b696 768 CURRENT_THREAD_INFO(r9, r1)
9994a338 769 ld r4,TI_FLAGS(r9)
c58ce2b1
TC
770 andi. r0,r4,_TIF_NEED_RESCHED
771 bne 1b
572177d7
TC
772
773 /*
774 * arch_local_irq_restore() from preempt_schedule_irq above may
775 * enable hard interrupt but we really should disable interrupts
776 * when we return from the interrupt, and so that we don't get
777 * interrupted after loading SRR0/1.
778 */
779#ifdef CONFIG_PPC_BOOK3E
780 wrteei 0
781#else
49d09bf2 782 li r10,MSR_RI
572177d7
TC
783 mtmsrd r10,1 /* Update machine state */
784#endif /* CONFIG_PPC_BOOK3E */
c58ce2b1 785#endif /* CONFIG_PREEMPT */
9994a338 786
7230c564
BH
787 .globl fast_exc_return_irq
788fast_exc_return_irq:
9994a338 789restore:
7230c564 790 /*
7c0482e3
BH
791 * This is the main kernel exit path. First we check if we
792 * are about to re-enable interrupts
7230c564 793 */
01f3880d 794 ld r5,SOFTE(r1)
7230c564 795 lbz r6,PACASOFTIRQEN(r13)
7c0482e3
BH
796 cmpwi cr0,r5,0
797 beq restore_irq_off
7230c564 798
7c0482e3
BH
799 /* We are enabling, were we already enabled ? Yes, just return */
800 cmpwi cr0,r6,1
801 beq cr0,do_restore
9994a338 802
7c0482e3 803 /*
7230c564
BH
804 * We are about to soft-enable interrupts (we are hard disabled
805 * at this point). We check if there's anything that needs to
806 * be replayed first.
807 */
808 lbz r0,PACAIRQHAPPENED(r13)
809 cmpwi cr0,r0,0
810 bne- restore_check_irq_replay
e56a6e20 811
7230c564
BH
812 /*
813 * Get here when nothing happened while soft-disabled, just
814 * soft-enable and move-on. We will hard-enable as a side
815 * effect of rfi
816 */
817restore_no_replay:
818 TRACE_ENABLE_INTS
819 li r0,1
820 stb r0,PACASOFTIRQEN(r13);
821
822 /*
823 * Final return path. BookE is handled in a different file
824 */
7c0482e3 825do_restore:
2d27cfd3 826#ifdef CONFIG_PPC_BOOK3E
b1576fec 827 b exception_return_book3e
2d27cfd3 828#else
7230c564
BH
829 /*
830 * Clear the reservation. If we know the CPU tracks the address of
831 * the reservation then we can potentially save some cycles and use
832 * a larx. On POWER6 and POWER7 this is significantly faster.
833 */
834BEGIN_FTR_SECTION
835 stdcx. r0,0,r1 /* to clear the reservation */
836FTR_SECTION_ELSE
837 ldarx r4,0,r1
838ALT_FTR_SECTION_END_IFCLR(CPU_FTR_STCX_CHECKS_ADDRESS)
839
840 /*
841 * Some code path such as load_up_fpu or altivec return directly
842 * here. They run entirely hard disabled and do not alter the
843 * interrupt state. They also don't use lwarx/stwcx. and thus
844 * are known not to leave dangling reservations.
845 */
846 .globl fast_exception_return
847fast_exception_return:
848 ld r3,_MSR(r1)
e56a6e20
PM
849 ld r4,_CTR(r1)
850 ld r0,_LINK(r1)
851 mtctr r4
852 mtlr r0
853 ld r4,_XER(r1)
854 mtspr SPRN_XER,r4
855
856 REST_8GPRS(5, r1)
857
9994a338
PM
858 andi. r0,r3,MSR_RI
859 beq- unrecov_restore
860
0c4888ef
BH
861 /* Load PPR from thread struct before we clear MSR:RI */
862BEGIN_FTR_SECTION
863 ld r2,PACACURRENT(r13)
864 ld r2,TASKTHREADPPR(r2)
865END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR)
866
e56a6e20
PM
867 /*
868 * Clear RI before restoring r13. If we are returning to
869 * userspace and we take an exception after restoring r13,
870 * we end up corrupting the userspace r13 value.
871 */
49d09bf2 872 li r4,0
e56a6e20 873 mtmsrd r4,1
9994a338 874
afc07701
MN
875#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
876 /* TM debug */
877 std r3, PACATMSCRATCH(r13) /* Stash returned-to MSR */
878#endif
9994a338
PM
879 /*
880 * r13 is our per cpu area, only restore it if we are returning to
7230c564
BH
881 * userspace the value stored in the stack frame may belong to
882 * another CPU.
9994a338 883 */
e56a6e20 884 andi. r0,r3,MSR_PR
9994a338 885 beq 1f
0c4888ef
BH
886BEGIN_FTR_SECTION
887 mtspr SPRN_PPR,r2 /* Restore PPR */
888END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR)
c223c903 889 ACCOUNT_CPU_USER_EXIT(r13, r2, r4)
9994a338
PM
890 REST_GPR(13, r1)
8911:
e56a6e20 892 mtspr SPRN_SRR1,r3
9994a338
PM
893
894 ld r2,_CCR(r1)
895 mtcrf 0xFF,r2
896 ld r2,_NIP(r1)
897 mtspr SPRN_SRR0,r2
898
899 ld r0,GPR0(r1)
900 ld r2,GPR2(r1)
901 ld r3,GPR3(r1)
902 ld r4,GPR4(r1)
903 ld r1,GPR1(r1)
904
905 rfid
906 b . /* prevent speculative execution */
907
2d27cfd3
BH
908#endif /* CONFIG_PPC_BOOK3E */
909
7c0482e3
BH
910 /*
911 * We are returning to a context with interrupts soft disabled.
912 *
913 * However, we may also about to hard enable, so we need to
914 * make sure that in this case, we also clear PACA_IRQ_HARD_DIS
915 * or that bit can get out of sync and bad things will happen
916 */
917restore_irq_off:
918 ld r3,_MSR(r1)
919 lbz r7,PACAIRQHAPPENED(r13)
920 andi. r0,r3,MSR_EE
921 beq 1f
922 rlwinm r7,r7,0,~PACA_IRQ_HARD_DIS
923 stb r7,PACAIRQHAPPENED(r13)
9241: li r0,0
925 stb r0,PACASOFTIRQEN(r13);
926 TRACE_DISABLE_INTS
927 b do_restore
928
7230c564
BH
929 /*
930 * Something did happen, check if a re-emit is needed
931 * (this also clears paca->irq_happened)
932 */
933restore_check_irq_replay:
934 /* XXX: We could implement a fast path here where we check
935 * for irq_happened being just 0x01, in which case we can
936 * clear it and return. That means that we would potentially
937 * miss a decrementer having wrapped all the way around.
938 *
939 * Still, this might be useful for things like hash_page
940 */
b1576fec 941 bl __check_irq_replay
7230c564
BH
942 cmpwi cr0,r3,0
943 beq restore_no_replay
944
945 /*
946 * We need to re-emit an interrupt. We do so by re-using our
947 * existing exception frame. We first change the trap value,
948 * but we need to ensure we preserve the low nibble of it
949 */
950 ld r4,_TRAP(r1)
951 clrldi r4,r4,60
952 or r4,r4,r3
953 std r4,_TRAP(r1)
954
955 /*
956 * Then find the right handler and call it. Interrupts are
957 * still soft-disabled and we keep them that way.
958 */
959 cmpwi cr0,r3,0x500
960 bne 1f
961 addi r3,r1,STACK_FRAME_OVERHEAD;
b1576fec
AB
962 bl do_IRQ
963 b ret_from_except
0869b6fd
MS
9641: cmpwi cr0,r3,0xe60
965 bne 1f
966 addi r3,r1,STACK_FRAME_OVERHEAD;
967 bl handle_hmi_exception
968 b ret_from_except
7230c564
BH
9691: cmpwi cr0,r3,0x900
970 bne 1f
971 addi r3,r1,STACK_FRAME_OVERHEAD;
b1576fec
AB
972 bl timer_interrupt
973 b ret_from_except
fe9e1d54
IM
974#ifdef CONFIG_PPC_DOORBELL
9751:
7230c564 976#ifdef CONFIG_PPC_BOOK3E
fe9e1d54
IM
977 cmpwi cr0,r3,0x280
978#else
979 BEGIN_FTR_SECTION
980 cmpwi cr0,r3,0xe80
981 FTR_SECTION_ELSE
982 cmpwi cr0,r3,0xa00
983 ALT_FTR_SECTION_END_IFSET(CPU_FTR_HVMODE)
984#endif /* CONFIG_PPC_BOOK3E */
7230c564
BH
985 bne 1f
986 addi r3,r1,STACK_FRAME_OVERHEAD;
b1576fec
AB
987 bl doorbell_exception
988 b ret_from_except
fe9e1d54 989#endif /* CONFIG_PPC_DOORBELL */
b1576fec 9901: b ret_from_except /* What else to do here ? */
7230c564 991
9994a338
PM
992unrecov_restore:
993 addi r3,r1,STACK_FRAME_OVERHEAD
b1576fec 994 bl unrecoverable_exception
9994a338
PM
995 b unrecov_restore
996
997#ifdef CONFIG_PPC_RTAS
998/*
999 * On CHRP, the Run-Time Abstraction Services (RTAS) have to be
1000 * called with the MMU off.
1001 *
1002 * In addition, we need to be in 32b mode, at least for now.
1003 *
1004 * Note: r3 is an input parameter to rtas, so don't trash it...
1005 */
1006_GLOBAL(enter_rtas)
1007 mflr r0
1008 std r0,16(r1)
1009 stdu r1,-RTAS_FRAME_SIZE(r1) /* Save SP and create stack space. */
1010
1011 /* Because RTAS is running in 32b mode, it clobbers the high order half
1012 * of all registers that it saves. We therefore save those registers
1013 * RTAS might touch to the stack. (r0, r3-r13 are caller saved)
1014 */
1015 SAVE_GPR(2, r1) /* Save the TOC */
1016 SAVE_GPR(13, r1) /* Save paca */
1017 SAVE_8GPRS(14, r1) /* Save the non-volatiles */
1018 SAVE_10GPRS(22, r1) /* ditto */
1019
1020 mfcr r4
1021 std r4,_CCR(r1)
1022 mfctr r5
1023 std r5,_CTR(r1)
1024 mfspr r6,SPRN_XER
1025 std r6,_XER(r1)
1026 mfdar r7
1027 std r7,_DAR(r1)
1028 mfdsisr r8
1029 std r8,_DSISR(r1)
9994a338 1030
9fe901d1
MK
1031 /* Temporary workaround to clear CR until RTAS can be modified to
1032 * ignore all bits.
1033 */
1034 li r0,0
1035 mtcr r0
1036
007d88d0 1037#ifdef CONFIG_BUG
9994a338
PM
1038 /* There is no way it is acceptable to get here with interrupts enabled,
1039 * check it with the asm equivalent of WARN_ON
1040 */
d04c56f7 1041 lbz r0,PACASOFTIRQEN(r13)
9994a338 10421: tdnei r0,0
007d88d0
DW
1043 EMIT_BUG_ENTRY 1b,__FILE__,__LINE__,BUGFLAG_WARNING
1044#endif
1045
d04c56f7
PM
1046 /* Hard-disable interrupts */
1047 mfmsr r6
1048 rldicl r7,r6,48,1
1049 rotldi r7,r7,16
1050 mtmsrd r7,1
1051
9994a338
PM
1052 /* Unfortunately, the stack pointer and the MSR are also clobbered,
1053 * so they are saved in the PACA which allows us to restore
1054 * our original state after RTAS returns.
1055 */
1056 std r1,PACAR1(r13)
1057 std r6,PACASAVEDMSR(r13)
1058
1059 /* Setup our real return addr */
ad0289e4 1060 LOAD_REG_ADDR(r4,rtas_return_loc)
e58c3495 1061 clrldi r4,r4,2 /* convert to realmode address */
9994a338
PM
1062 mtlr r4
1063
1064 li r0,0
1065 ori r0,r0,MSR_EE|MSR_SE|MSR_BE|MSR_RI
1066 andc r0,r6,r0
1067
1068 li r9,1
1069 rldicr r9,r9,MSR_SF_LG,(63-MSR_SF_LG)
5c0484e2 1070 ori r9,r9,MSR_IR|MSR_DR|MSR_FE0|MSR_FE1|MSR_FP|MSR_RI|MSR_LE
9994a338 1071 andc r6,r0,r9
9994a338
PM
1072 sync /* disable interrupts so SRR0/1 */
1073 mtmsrd r0 /* don't get trashed */
1074
e58c3495 1075 LOAD_REG_ADDR(r4, rtas)
9994a338
PM
1076 ld r5,RTASENTRY(r4) /* get the rtas->entry value */
1077 ld r4,RTASBASE(r4) /* get the rtas->base value */
1078
1079 mtspr SPRN_SRR0,r5
1080 mtspr SPRN_SRR1,r6
1081 rfid
1082 b . /* prevent speculative execution */
1083
ad0289e4 1084rtas_return_loc:
5c0484e2
BH
1085 FIXUP_ENDIAN
1086
9994a338 1087 /* relocation is off at this point */
2dd60d79 1088 GET_PACA(r4)
e58c3495 1089 clrldi r4,r4,2 /* convert to realmode address */
9994a338 1090
e31aa453
PM
1091 bcl 20,31,$+4
10920: mflr r3
ad0289e4 1093 ld r3,(1f-0b)(r3) /* get &rtas_restore_regs */
e31aa453 1094
9994a338
PM
1095 mfmsr r6
1096 li r0,MSR_RI
1097 andc r6,r6,r0
1098 sync
1099 mtmsrd r6
1100
1101 ld r1,PACAR1(r4) /* Restore our SP */
9994a338
PM
1102 ld r4,PACASAVEDMSR(r4) /* Restore our MSR */
1103
1104 mtspr SPRN_SRR0,r3
1105 mtspr SPRN_SRR1,r4
1106 rfid
1107 b . /* prevent speculative execution */
1108
e31aa453 1109 .align 3
ad0289e4 11101: .llong rtas_restore_regs
e31aa453 1111
ad0289e4 1112rtas_restore_regs:
9994a338
PM
1113 /* relocation is on at this point */
1114 REST_GPR(2, r1) /* Restore the TOC */
1115 REST_GPR(13, r1) /* Restore paca */
1116 REST_8GPRS(14, r1) /* Restore the non-volatiles */
1117 REST_10GPRS(22, r1) /* ditto */
1118
2dd60d79 1119 GET_PACA(r13)
9994a338
PM
1120
1121 ld r4,_CCR(r1)
1122 mtcr r4
1123 ld r5,_CTR(r1)
1124 mtctr r5
1125 ld r6,_XER(r1)
1126 mtspr SPRN_XER,r6
1127 ld r7,_DAR(r1)
1128 mtdar r7
1129 ld r8,_DSISR(r1)
1130 mtdsisr r8
9994a338
PM
1131
1132 addi r1,r1,RTAS_FRAME_SIZE /* Unstack our frame */
1133 ld r0,16(r1) /* get return address */
1134
1135 mtlr r0
1136 blr /* return to caller */
1137
1138#endif /* CONFIG_PPC_RTAS */
1139
9994a338
PM
1140_GLOBAL(enter_prom)
1141 mflr r0
1142 std r0,16(r1)
1143 stdu r1,-PROM_FRAME_SIZE(r1) /* Save SP and create stack space */
1144
1145 /* Because PROM is running in 32b mode, it clobbers the high order half
1146 * of all registers that it saves. We therefore save those registers
1147 * PROM might touch to the stack. (r0, r3-r13 are caller saved)
1148 */
6c171994 1149 SAVE_GPR(2, r1)
9994a338
PM
1150 SAVE_GPR(13, r1)
1151 SAVE_8GPRS(14, r1)
1152 SAVE_10GPRS(22, r1)
6c171994 1153 mfcr r10
9994a338 1154 mfmsr r11
6c171994 1155 std r10,_CCR(r1)
9994a338
PM
1156 std r11,_MSR(r1)
1157
5c0484e2
BH
1158 /* Put PROM address in SRR0 */
1159 mtsrr0 r4
1160
1161 /* Setup our trampoline return addr in LR */
1162 bcl 20,31,$+4
11630: mflr r4
1164 addi r4,r4,(1f - 0b)
1165 mtlr r4
9994a338 1166
5c0484e2 1167 /* Prepare a 32-bit mode big endian MSR
9994a338 1168 */
2d27cfd3
BH
1169#ifdef CONFIG_PPC_BOOK3E
1170 rlwinm r11,r11,0,1,31
5c0484e2
BH
1171 mtsrr1 r11
1172 rfi
2d27cfd3 1173#else /* CONFIG_PPC_BOOK3E */
5c0484e2
BH
1174 LOAD_REG_IMMEDIATE(r12, MSR_SF | MSR_ISF | MSR_LE)
1175 andc r11,r11,r12
1176 mtsrr1 r11
1177 rfid
2d27cfd3 1178#endif /* CONFIG_PPC_BOOK3E */
9994a338 1179
5c0484e2
BH
11801: /* Return from OF */
1181 FIXUP_ENDIAN
9994a338
PM
1182
1183 /* Just make sure that r1 top 32 bits didn't get
1184 * corrupt by OF
1185 */
1186 rldicl r1,r1,0,32
1187
1188 /* Restore the MSR (back to 64 bits) */
1189 ld r0,_MSR(r1)
6c171994 1190 MTMSRD(r0)
9994a338
PM
1191 isync
1192
1193 /* Restore other registers */
1194 REST_GPR(2, r1)
1195 REST_GPR(13, r1)
1196 REST_8GPRS(14, r1)
1197 REST_10GPRS(22, r1)
1198 ld r4,_CCR(r1)
1199 mtcr r4
9994a338
PM
1200
1201 addi r1,r1,PROM_FRAME_SIZE
1202 ld r0,16(r1)
1203 mtlr r0
1204 blr