]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blame - arch/powerpc/kernel/entry_64.S
powerpc/crypto/crc32c-vpmsum: Fix missing preempt_disable()
[mirror_ubuntu-artful-kernel.git] / arch / powerpc / kernel / entry_64.S
CommitLineData
9994a338 1/*
9994a338
PM
2 * PowerPC version
3 * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
4 * Rewritten by Cort Dougan (cort@cs.nmt.edu) for PReP
5 * Copyright (C) 1996 Cort Dougan <cort@cs.nmt.edu>
6 * Adapted for Power Macintosh by Paul Mackerras.
7 * Low-level exception handlers and MMU support
8 * rewritten by Paul Mackerras.
9 * Copyright (C) 1996 Paul Mackerras.
10 * MPC8xx modifications Copyright (C) 1997 Dan Malek (dmalek@jlc.net).
11 *
12 * This file contains the system call entry code, context switch
13 * code, and exception/interrupt return code for PowerPC.
14 *
15 * This program is free software; you can redistribute it and/or
16 * modify it under the terms of the GNU General Public License
17 * as published by the Free Software Foundation; either version
18 * 2 of the License, or (at your option) any later version.
19 */
20
9994a338 21#include <linux/errno.h>
c3525940 22#include <linux/err.h>
85baa095 23#include <linux/magic.h>
9994a338
PM
24#include <asm/unistd.h>
25#include <asm/processor.h>
26#include <asm/page.h>
27#include <asm/mmu.h>
28#include <asm/thread_info.h>
29#include <asm/ppc_asm.h>
30#include <asm/asm-offsets.h>
31#include <asm/cputable.h>
3f639ee8 32#include <asm/firmware.h>
007d88d0 33#include <asm/bug.h>
ec2b36b9 34#include <asm/ptrace.h>
945feb17 35#include <asm/irqflags.h>
395a59d0 36#include <asm/ftrace.h>
7230c564 37#include <asm/hw_irq.h>
5d1c5745 38#include <asm/context_tracking.h>
b4b56f9e 39#include <asm/tm.h>
8a649045 40#include <asm/ppc-opcode.h>
9445aa1a 41#include <asm/export.h>
9994a338
PM
42
43/*
44 * System calls.
45 */
46 .section ".toc","aw"
c857c43b
AB
47SYS_CALL_TABLE:
48 .tc sys_call_table[TC],sys_call_table
9994a338
PM
49
50/* This value is used to mark exception frames on the stack. */
51exception_marker:
ec2b36b9 52 .tc ID_EXC_MARKER[TC],STACK_FRAME_REGS_MARKER
9994a338
PM
53
54 .section ".text"
55 .align 7
56
9994a338
PM
57 .globl system_call_common
58system_call_common:
b4b56f9e
S
59#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
60BEGIN_FTR_SECTION
61 extrdi. r10, r12, 1, (63-MSR_TS_T_LG) /* transaction active? */
62 bne tabort_syscall
63END_FTR_SECTION_IFSET(CPU_FTR_TM)
64#endif
9994a338
PM
65 andi. r10,r12,MSR_PR
66 mr r10,r1
67 addi r1,r1,-INT_FRAME_SIZE
68 beq- 1f
69 ld r1,PACAKSAVE(r13)
701: std r10,0(r1)
71 std r11,_NIP(r1)
72 std r12,_MSR(r1)
73 std r0,GPR0(r1)
74 std r10,GPR1(r1)
5d75b264 75 beq 2f /* if from kernel mode */
c223c903 76 ACCOUNT_CPU_USER_ENTRY(r13, r10, r11)
5d75b264 772: std r2,GPR2(r1)
9994a338 78 std r3,GPR3(r1)
fd6c40f3 79 mfcr r2
9994a338
PM
80 std r4,GPR4(r1)
81 std r5,GPR5(r1)
82 std r6,GPR6(r1)
83 std r7,GPR7(r1)
84 std r8,GPR8(r1)
85 li r11,0
86 std r11,GPR9(r1)
87 std r11,GPR10(r1)
88 std r11,GPR11(r1)
89 std r11,GPR12(r1)
823df435 90 std r11,_XER(r1)
82087414 91 std r11,_CTR(r1)
9994a338 92 std r9,GPR13(r1)
9994a338 93 mflr r10
fd6c40f3
AB
94 /*
95 * This clears CR0.SO (bit 28), which is the error indication on
96 * return from this system call.
97 */
98 rldimi r2,r11,28,(63-28)
9994a338 99 li r11,0xc01
9994a338
PM
100 std r10,_LINK(r1)
101 std r11,_TRAP(r1)
9994a338 102 std r3,ORIG_GPR3(r1)
fd6c40f3 103 std r2,_CCR(r1)
9994a338
PM
104 ld r2,PACATOC(r13)
105 addi r9,r1,STACK_FRAME_OVERHEAD
106 ld r11,exception_marker@toc(r2)
107 std r11,-16(r9) /* "regshere" marker */
abf917cd 108#if defined(CONFIG_VIRT_CPU_ACCOUNTING_NATIVE) && defined(CONFIG_PPC_SPLPAR)
cf9efce0
PM
109BEGIN_FW_FTR_SECTION
110 beq 33f
111 /* if from user, see if there are any DTL entries to process */
112 ld r10,PACALPPACAPTR(r13) /* get ptr to VPA */
113 ld r11,PACA_DTL_RIDX(r13) /* get log read index */
7ffcf8ec
AB
114 addi r10,r10,LPPACA_DTLIDX
115 LDX_BE r10,0,r10 /* get log write index */
cf9efce0
PM
116 cmpd cr1,r11,r10
117 beq+ cr1,33f
b1576fec 118 bl accumulate_stolen_time
cf9efce0
PM
119 REST_GPR(0,r1)
120 REST_4GPRS(3,r1)
121 REST_2GPRS(7,r1)
122 addi r9,r1,STACK_FRAME_OVERHEAD
12333:
124END_FW_FTR_SECTION_IFSET(FW_FEATURE_SPLPAR)
abf917cd 125#endif /* CONFIG_VIRT_CPU_ACCOUNTING_NATIVE && CONFIG_PPC_SPLPAR */
cf9efce0 126
1421ae0b
BH
127 /*
128 * A syscall should always be called with interrupts enabled
129 * so we just unconditionally hard-enable here. When some kind
130 * of irq tracing is used, we additionally check that condition
131 * is correct
132 */
133#if defined(CONFIG_TRACE_IRQFLAGS) && defined(CONFIG_BUG)
134 lbz r10,PACASOFTIRQEN(r13)
135 xori r10,r10,1
1361: tdnei r10,0
137 EMIT_BUG_ENTRY 1b,__FILE__,__LINE__,BUGFLAG_WARNING
138#endif
2d27cfd3 139
2d27cfd3
BH
140#ifdef CONFIG_PPC_BOOK3E
141 wrteei 1
142#else
49d09bf2 143 li r11,MSR_RI
9994a338
PM
144 ori r11,r11,MSR_EE
145 mtmsrd r11,1
2d27cfd3 146#endif /* CONFIG_PPC_BOOK3E */
9994a338 147
1421ae0b
BH
148 /* We do need to set SOFTE in the stack frame or the return
149 * from interrupt will be painful
150 */
151 li r10,1
152 std r10,SOFTE(r1)
153
9778b696 154 CURRENT_THREAD_INFO(r11, r1)
9994a338 155 ld r10,TI_FLAGS(r11)
10ea8343 156 andi. r11,r10,_TIF_SYSCALL_DOTRACE
d3837414 157 bne syscall_dotrace /* does not return */
9994a338
PM
158 cmpldi 0,r0,NR_syscalls
159 bge- syscall_enosys
160
161system_call: /* label this so stack traces look sane */
162/*
163 * Need to vector to 32 Bit or default sys_call_table here,
164 * based on caller's run-mode / personality.
165 */
c857c43b 166 ld r11,SYS_CALL_TABLE@toc(2)
9994a338
PM
167 andi. r10,r10,_TIF_32BIT
168 beq 15f
169 addi r11,r11,8 /* use 32-bit syscall entries */
170 clrldi r3,r3,32
171 clrldi r4,r4,32
172 clrldi r5,r5,32
173 clrldi r6,r6,32
174 clrldi r7,r7,32
175 clrldi r8,r8,32
17615:
177 slwi r0,r0,4
cc7efbf9
AB
178 ldx r12,r11,r0 /* Fetch system call handler [ptr] */
179 mtctr r12
9994a338
PM
180 bctrl /* Call handler */
181
4c3b2168 182.Lsyscall_exit:
401d1f02 183 std r3,RESULT(r1)
9778b696 184 CURRENT_THREAD_INFO(r12, r1)
9994a338 185
9994a338 186 ld r8,_MSR(r1)
2d27cfd3
BH
187#ifdef CONFIG_PPC_BOOK3S
188 /* No MSR:RI on BookE */
9994a338
PM
189 andi. r10,r8,MSR_RI
190 beq- unrecov_restore
2d27cfd3 191#endif
1421ae0b
BH
192 /*
193 * Disable interrupts so current_thread_info()->flags can't change,
2d27cfd3
BH
194 * and so that we don't get interrupted after loading SRR0/1.
195 */
196#ifdef CONFIG_PPC_BOOK3E
197 wrteei 0
198#else
ac1dc365
AB
199 /*
200 * For performance reasons we clear RI the same time that we
201 * clear EE. We only need to clear RI just before we restore r13
202 * below, but batching it with EE saves us one expensive mtmsrd call.
203 * We have to be careful to restore RI if we branch anywhere from
204 * here (eg syscall_exit_work).
205 */
49d09bf2 206 li r11,0
ac1dc365 207 mtmsrd r11,1
2d27cfd3
BH
208#endif /* CONFIG_PPC_BOOK3E */
209
9994a338 210 ld r9,TI_FLAGS(r12)
c3525940 211 li r11,-MAX_ERRNO
10ea8343 212 andi. r0,r9,(_TIF_SYSCALL_DOTRACE|_TIF_SINGLESTEP|_TIF_USER_WORK_MASK|_TIF_PERSYSCALL_MASK)
9994a338 213 bne- syscall_exit_work
70fe3d98
CB
214
215 andi. r0,r8,MSR_FP
216 beq 2f
217#ifdef CONFIG_ALTIVEC
218 andis. r0,r8,MSR_VEC@h
219 bne 3f
220#endif
2212: addi r3,r1,STACK_FRAME_OVERHEAD
6e669f08 222#ifdef CONFIG_PPC_BOOK3S
49d09bf2 223 li r10,MSR_RI
6e669f08
CB
224 mtmsrd r10,1 /* Restore RI */
225#endif
70fe3d98 226 bl restore_math
6e669f08 227#ifdef CONFIG_PPC_BOOK3S
49d09bf2 228 li r11,0
6e669f08
CB
229 mtmsrd r11,1
230#endif
70fe3d98
CB
231 ld r8,_MSR(r1)
232 ld r3,RESULT(r1)
233 li r11,-MAX_ERRNO
234
2353: cmpld r3,r11
401d1f02
DW
236 ld r5,_CCR(r1)
237 bge- syscall_error
d14299de 238.Lsyscall_error_cont:
9994a338 239 ld r7,_NIP(r1)
f89451fb 240BEGIN_FTR_SECTION
9994a338 241 stdcx. r0,0,r1 /* to clear the reservation */
f89451fb 242END_FTR_SECTION_IFCLR(CPU_FTR_STCX_CHECKS_ADDRESS)
9994a338
PM
243 andi. r6,r8,MSR_PR
244 ld r4,_LINK(r1)
2d27cfd3 245
c6622f63 246 beq- 1f
c223c903 247 ACCOUNT_CPU_USER_EXIT(r13, r11, r12)
d030a4b5
ME
248
249BEGIN_FTR_SECTION
250 HMT_MEDIUM_LOW
251END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR)
252
c6622f63 253 ld r13,GPR13(r1) /* only restore r13 if returning to usermode */
9994a338 2541: ld r2,GPR2(r1)
9994a338
PM
255 ld r1,GPR1(r1)
256 mtlr r4
257 mtcr r5
258 mtspr SPRN_SRR0,r7
259 mtspr SPRN_SRR1,r8
2d27cfd3 260 RFI
9994a338
PM
261 b . /* prevent speculative execution */
262
401d1f02 263syscall_error:
9994a338 264 oris r5,r5,0x1000 /* Set SO bit in CR */
401d1f02 265 neg r3,r3
9994a338 266 std r5,_CCR(r1)
d14299de 267 b .Lsyscall_error_cont
401d1f02 268
9994a338
PM
269/* Traced system call support */
270syscall_dotrace:
b1576fec 271 bl save_nvgprs
9994a338 272 addi r3,r1,STACK_FRAME_OVERHEAD
b1576fec 273 bl do_syscall_trace_enter
d3837414 274
4f72c427 275 /*
d3837414
ME
276 * We use the return value of do_syscall_trace_enter() as the syscall
277 * number. If the syscall was rejected for any reason do_syscall_trace_enter()
278 * returns an invalid syscall number and the test below against
279 * NR_syscalls will fail.
4f72c427
RM
280 */
281 mr r0,r3
d3837414
ME
282
283 /* Restore argument registers just clobbered and/or possibly changed. */
9994a338
PM
284 ld r3,GPR3(r1)
285 ld r4,GPR4(r1)
286 ld r5,GPR5(r1)
287 ld r6,GPR6(r1)
288 ld r7,GPR7(r1)
289 ld r8,GPR8(r1)
d3837414
ME
290
291 /* Repopulate r9 and r10 for the system_call path */
9994a338 292 addi r9,r1,STACK_FRAME_OVERHEAD
9778b696 293 CURRENT_THREAD_INFO(r10, r1)
9994a338 294 ld r10,TI_FLAGS(r10)
d3837414
ME
295
296 cmpldi r0,NR_syscalls
297 blt+ system_call
298
299 /* Return code is already in r3 thanks to do_syscall_trace_enter() */
300 b .Lsyscall_exit
301
9994a338 302
401d1f02
DW
303syscall_enosys:
304 li r3,-ENOSYS
4c3b2168 305 b .Lsyscall_exit
401d1f02
DW
306
307syscall_exit_work:
ac1dc365 308#ifdef CONFIG_PPC_BOOK3S
49d09bf2 309 li r10,MSR_RI
ac1dc365
AB
310 mtmsrd r10,1 /* Restore RI */
311#endif
401d1f02
DW
312 /* If TIF_RESTOREALL is set, don't scribble on either r3 or ccr.
313 If TIF_NOERROR is set, just save r3 as it is. */
314
315 andi. r0,r9,_TIF_RESTOREALL
1bd79336
PM
316 beq+ 0f
317 REST_NVGPRS(r1)
318 b 2f
c3525940 3190: cmpld r3,r11 /* r11 is -MAX_ERRNO */
401d1f02
DW
320 blt+ 1f
321 andi. r0,r9,_TIF_NOERROR
322 bne- 1f
323 ld r5,_CCR(r1)
324 neg r3,r3
325 oris r5,r5,0x1000 /* Set SO bit in CR */
326 std r5,_CCR(r1)
3271: std r3,GPR3(r1)
3282: andi. r0,r9,(_TIF_PERSYSCALL_MASK)
329 beq 4f
330
1bd79336 331 /* Clear per-syscall TIF flags if any are set. */
401d1f02
DW
332
333 li r11,_TIF_PERSYSCALL_MASK
334 addi r12,r12,TI_FLAGS
3353: ldarx r10,0,r12
336 andc r10,r10,r11
337 stdcx. r10,0,r12
338 bne- 3b
339 subi r12,r12,TI_FLAGS
1bd79336
PM
340
3414: /* Anything else left to do? */
d8725ce8
ME
342BEGIN_FTR_SECTION
343 lis r3,INIT_PPR@highest /* Set thread.ppr = 3 */
344 ld r10,PACACURRENT(r13)
345 sldi r3,r3,32 /* bits 11-13 are used for ppr */
346 std r3,TASKTHREADPPR(r10)
347END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR)
348
10ea8343 349 andi. r0,r9,(_TIF_SYSCALL_DOTRACE|_TIF_SINGLESTEP)
b1576fec 350 beq ret_from_except_lite
401d1f02
DW
351
352 /* Re-enable interrupts */
2d27cfd3
BH
353#ifdef CONFIG_PPC_BOOK3E
354 wrteei 1
355#else
49d09bf2 356 li r10,MSR_RI
401d1f02
DW
357 ori r10,r10,MSR_EE
358 mtmsrd r10,1
2d27cfd3 359#endif /* CONFIG_PPC_BOOK3E */
401d1f02 360
b1576fec 361 bl save_nvgprs
9994a338 362 addi r3,r1,STACK_FRAME_OVERHEAD
b1576fec
AB
363 bl do_syscall_trace_leave
364 b ret_from_except
9994a338 365
b4b56f9e
S
366#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
367tabort_syscall:
368 /* Firstly we need to enable TM in the kernel */
369 mfmsr r10
cc7786d3
NP
370 li r9, 1
371 rldimi r10, r9, MSR_TM_LG, 63-MSR_TM_LG
b4b56f9e
S
372 mtmsrd r10, 0
373
374 /* tabort, this dooms the transaction, nothing else */
cc7786d3
NP
375 li r9, (TM_CAUSE_SYSCALL|TM_CAUSE_PERSISTENT)
376 TABORT(R9)
b4b56f9e
S
377
378 /*
379 * Return directly to userspace. We have corrupted user register state,
380 * but userspace will never see that register state. Execution will
381 * resume after the tbegin of the aborted transaction with the
382 * checkpointed register state.
383 */
cc7786d3
NP
384 li r9, MSR_RI
385 andc r10, r10, r9
b4b56f9e
S
386 mtmsrd r10, 1
387 mtspr SPRN_SRR0, r11
388 mtspr SPRN_SRR1, r12
389
390 rfid
391 b . /* prevent speculative execution */
392#endif
393
9994a338
PM
394/* Save non-volatile GPRs, if not already saved. */
395_GLOBAL(save_nvgprs)
396 ld r11,_TRAP(r1)
397 andi. r0,r11,1
398 beqlr-
399 SAVE_NVGPRS(r1)
400 clrrdi r0,r11,1
401 std r0,_TRAP(r1)
402 blr
403
401d1f02 404
9994a338
PM
405/*
406 * The sigsuspend and rt_sigsuspend system calls can call do_signal
407 * and thus put the process into the stopped state where we might
408 * want to examine its user state with ptrace. Therefore we need
409 * to save all the nonvolatile registers (r14 - r31) before calling
410 * the C code. Similarly, fork, vfork and clone need the full
411 * register state on the stack so that it can be copied to the child.
412 */
9994a338
PM
413
414_GLOBAL(ppc_fork)
b1576fec
AB
415 bl save_nvgprs
416 bl sys_fork
4c3b2168 417 b .Lsyscall_exit
9994a338
PM
418
419_GLOBAL(ppc_vfork)
b1576fec
AB
420 bl save_nvgprs
421 bl sys_vfork
4c3b2168 422 b .Lsyscall_exit
9994a338
PM
423
424_GLOBAL(ppc_clone)
b1576fec
AB
425 bl save_nvgprs
426 bl sys_clone
4c3b2168 427 b .Lsyscall_exit
9994a338 428
1bd79336 429_GLOBAL(ppc32_swapcontext)
b1576fec
AB
430 bl save_nvgprs
431 bl compat_sys_swapcontext
4c3b2168 432 b .Lsyscall_exit
1bd79336
PM
433
434_GLOBAL(ppc64_swapcontext)
b1576fec
AB
435 bl save_nvgprs
436 bl sys_swapcontext
4c3b2168 437 b .Lsyscall_exit
1bd79336 438
529d235a
ME
439_GLOBAL(ppc_switch_endian)
440 bl save_nvgprs
441 bl sys_switch_endian
442 b .Lsyscall_exit
443
9994a338 444_GLOBAL(ret_from_fork)
b1576fec 445 bl schedule_tail
9994a338
PM
446 REST_NVGPRS(r1)
447 li r3,0
4c3b2168 448 b .Lsyscall_exit
9994a338 449
58254e10 450_GLOBAL(ret_from_kernel_thread)
b1576fec 451 bl schedule_tail
58254e10 452 REST_NVGPRS(r1)
58254e10
AV
453 mtlr r14
454 mr r3,r15
f55d9665 455#ifdef PPC64_ELF_ABI_v2
7cedd601
AB
456 mr r12,r14
457#endif
58254e10
AV
458 blrl
459 li r3,0
4c3b2168 460 b .Lsyscall_exit
be6abfa7 461
9994a338
PM
462/*
463 * This routine switches between two different tasks. The process
464 * state of one is saved on its kernel stack. Then the state
465 * of the other is restored from its kernel stack. The memory
466 * management hardware is updated to the second process's state.
467 * Finally, we can return to the second process, via ret_from_except.
468 * On entry, r3 points to the THREAD for the current task, r4
469 * points to the THREAD for the new task.
470 *
471 * Note: there are two ways to get to the "going out" portion
472 * of this code; either by coming in via the entry (_switch)
473 * or via "fork" which must set up an environment equivalent
474 * to the "_switch" path. If you change this you'll have to change
475 * the fork code also.
476 *
477 * The code which creates the new task context is in 'copy_thread'
2ef9481e 478 * in arch/powerpc/kernel/process.c
9994a338
PM
479 */
480 .align 7
481_GLOBAL(_switch)
482 mflr r0
483 std r0,16(r1)
484 stdu r1,-SWITCH_FRAME_SIZE(r1)
485 /* r3-r13 are caller saved -- Cort */
486 SAVE_8GPRS(14, r1)
487 SAVE_10GPRS(22, r1)
68bfa962 488 std r0,_NIP(r1) /* Return to switch caller */
9994a338
PM
489 mfcr r23
490 std r23,_CCR(r1)
491 std r1,KSP(r3) /* Set old stack pointer */
492
493#ifdef CONFIG_SMP
494 /* We need a sync somewhere here to make sure that if the
495 * previous task gets rescheduled on another CPU, it sees all
496 * stores it has performed on this one.
497 */
498 sync
499#endif /* CONFIG_SMP */
500
f89451fb
AB
501 /*
502 * If we optimise away the clear of the reservation in system
503 * calls because we know the CPU tracks the address of the
504 * reservation, then we need to clear it here to cover the
505 * case that the kernel context switch path has no larx
506 * instructions.
507 */
508BEGIN_FTR_SECTION
509 ldarx r6,0,r1
510END_FTR_SECTION_IFSET(CPU_FTR_STCX_CHECKS_ADDRESS)
511
8a649045
CS
512BEGIN_FTR_SECTION
513/*
514 * A cp_abort (copy paste abort) here ensures that when context switching, a
515 * copy from one process can't leak into the paste of another.
516 */
517 PPC_CP_ABORT
518END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300)
519
a515348f
MN
520#ifdef CONFIG_PPC_BOOK3S
521/* Cancel all explict user streams as they will have no use after context
522 * switch and will stop the HW from creating streams itself
523 */
524 DCBT_STOP_ALL_STREAM_IDS(r6)
525#endif
526
9994a338
PM
527 addi r6,r4,-THREAD /* Convert THREAD to 'current' */
528 std r6,PACACURRENT(r13) /* Set new 'current' */
529
530 ld r8,KSP(r4) /* new stack pointer */
caca285e
AK
531#ifdef CONFIG_PPC_STD_MMU_64
532BEGIN_MMU_FTR_SECTION
533 b 2f
5a25b6f5 534END_MMU_FTR_SECTION_IFSET(MMU_FTR_TYPE_RADIX)
1189be65 535BEGIN_FTR_SECTION
9994a338
PM
536 clrrdi r6,r8,28 /* get its ESID */
537 clrrdi r9,r1,28 /* get current sp ESID */
13b3d13b 538FTR_SECTION_ELSE
1189be65
PM
539 clrrdi r6,r8,40 /* get its 1T ESID */
540 clrrdi r9,r1,40 /* get current sp 1T ESID */
13b3d13b 541ALT_MMU_FTR_SECTION_END_IFCLR(MMU_FTR_1T_SEGMENT)
9994a338
PM
542 clrldi. r0,r6,2 /* is new ESID c00000000? */
543 cmpd cr1,r6,r9 /* or is new ESID the same as current ESID? */
544 cror eq,4*cr1+eq,eq
545 beq 2f /* if yes, don't slbie it */
546
547 /* Bolt in the new stack SLB entry */
548 ld r7,KSP_VSID(r4) /* Get new stack's VSID */
549 oris r0,r6,(SLB_ESID_V)@h
550 ori r0,r0,(SLB_NUM_BOLTED-1)@l
1189be65
PM
551BEGIN_FTR_SECTION
552 li r9,MMU_SEGSIZE_1T /* insert B field */
553 oris r6,r6,(MMU_SEGSIZE_1T << SLBIE_SSIZE_SHIFT)@h
554 rldimi r7,r9,SLB_VSID_SSIZE_SHIFT,0
44ae3ab3 555END_MMU_FTR_SECTION_IFSET(MMU_FTR_1T_SEGMENT)
2f6093c8 556
00efee7d
MN
557 /* Update the last bolted SLB. No write barriers are needed
558 * here, provided we only update the current CPU's SLB shadow
559 * buffer.
560 */
2f6093c8 561 ld r9,PACA_SLBSHADOWPTR(r13)
11a27ad7 562 li r12,0
7ffcf8ec
AB
563 std r12,SLBSHADOW_STACKESID(r9) /* Clear ESID */
564 li r12,SLBSHADOW_STACKVSID
565 STDX_BE r7,r12,r9 /* Save VSID */
566 li r12,SLBSHADOW_STACKESID
567 STDX_BE r0,r12,r9 /* Save ESID */
2f6093c8 568
44ae3ab3 569 /* No need to check for MMU_FTR_NO_SLBIE_B here, since when
f66bce5e
OJ
570 * we have 1TB segments, the only CPUs known to have the errata
571 * only support less than 1TB of system memory and we'll never
572 * actually hit this code path.
573 */
574
9994a338
PM
575 slbie r6
576 slbie r6 /* Workaround POWER5 < DD2.1 issue */
577 slbmte r7,r0
578 isync
9994a338 5792:
caca285e 580#endif /* CONFIG_PPC_STD_MMU_64 */
2d27cfd3 581
9778b696 582 CURRENT_THREAD_INFO(r7, r8) /* base of new stack */
9994a338
PM
583 /* Note: this uses SWITCH_FRAME_SIZE rather than INT_FRAME_SIZE
584 because we don't need to leave the 288-byte ABI gap at the
585 top of the kernel stack. */
586 addi r7,r7,THREAD_SIZE-SWITCH_FRAME_SIZE
587
588 mr r1,r8 /* start using new stack pointer */
589 std r7,PACAKSAVE(r13)
590
71433285
AB
591 ld r6,_CCR(r1)
592 mtcrf 0xFF,r6
593
9994a338
PM
594 /* r3-r13 are destroyed -- Cort */
595 REST_8GPRS(14, r1)
596 REST_10GPRS(22, r1)
597
598 /* convert old thread to its task_struct for return value */
599 addi r3,r3,-THREAD
600 ld r7,_NIP(r1) /* Return to _switch caller in new task */
601 mtlr r7
602 addi r1,r1,SWITCH_FRAME_SIZE
603 blr
604
605 .align 7
606_GLOBAL(ret_from_except)
607 ld r11,_TRAP(r1)
608 andi. r0,r11,1
b1576fec 609 bne ret_from_except_lite
9994a338
PM
610 REST_NVGPRS(r1)
611
612_GLOBAL(ret_from_except_lite)
613 /*
614 * Disable interrupts so that current_thread_info()->flags
615 * can't change between when we test it and when we return
616 * from the interrupt.
617 */
2d27cfd3
BH
618#ifdef CONFIG_PPC_BOOK3E
619 wrteei 0
620#else
49d09bf2 621 li r10,MSR_RI
d9ada91a 622 mtmsrd r10,1 /* Update machine state */
2d27cfd3 623#endif /* CONFIG_PPC_BOOK3E */
9994a338 624
9778b696 625 CURRENT_THREAD_INFO(r9, r1)
9994a338 626 ld r3,_MSR(r1)
13d543cd
BB
627#ifdef CONFIG_PPC_BOOK3E
628 ld r10,PACACURRENT(r13)
629#endif /* CONFIG_PPC_BOOK3E */
9994a338 630 ld r4,TI_FLAGS(r9)
9994a338 631 andi. r3,r3,MSR_PR
c58ce2b1 632 beq resume_kernel
13d543cd
BB
633#ifdef CONFIG_PPC_BOOK3E
634 lwz r3,(THREAD+THREAD_DBCR0)(r10)
635#endif /* CONFIG_PPC_BOOK3E */
9994a338
PM
636
637 /* Check current_thread_info()->flags */
c58ce2b1 638 andi. r0,r4,_TIF_USER_WORK_MASK
13d543cd 639 bne 1f
70fe3d98 640#ifdef CONFIG_PPC_BOOK3E
13d543cd
BB
641 /*
642 * Check to see if the dbcr0 register is set up to debug.
643 * Use the internal debug mode bit to do this.
644 */
645 andis. r0,r3,DBCR0_IDM@h
c58ce2b1 646 beq restore
13d543cd
BB
647 mfmsr r0
648 rlwinm r0,r0,0,~MSR_DE /* Clear MSR.DE */
649 mtmsr r0
650 mtspr SPRN_DBCR0,r3
651 li r10, -1
652 mtspr SPRN_DBSR,r10
653 b restore
654#else
70fe3d98
CB
655 addi r3,r1,STACK_FRAME_OVERHEAD
656 bl restore_math
657 b restore
13d543cd
BB
658#endif
6591: andi. r0,r4,_TIF_NEED_RESCHED
660 beq 2f
b1576fec 661 bl restore_interrupts
5d1c5745 662 SCHEDULE_USER
b1576fec 663 b ret_from_except_lite
d31626f7
PM
6642:
665#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
666 andi. r0,r4,_TIF_USER_WORK_MASK & ~_TIF_RESTORE_TM
667 bne 3f /* only restore TM if nothing else to do */
668 addi r3,r1,STACK_FRAME_OVERHEAD
b1576fec 669 bl restore_tm_state
d31626f7
PM
670 b restore
6713:
672#endif
b1576fec 673 bl save_nvgprs
808be314
AB
674 /*
675 * Use a non volatile GPR to save and restore our thread_info flags
676 * across the call to restore_interrupts.
677 */
678 mr r30,r4
b1576fec 679 bl restore_interrupts
808be314 680 mr r4,r30
c58ce2b1 681 addi r3,r1,STACK_FRAME_OVERHEAD
b1576fec
AB
682 bl do_notify_resume
683 b ret_from_except
c58ce2b1
TC
684
685resume_kernel:
a9c4e541 686 /* check current_thread_info, _TIF_EMULATE_STACK_STORE */
0edfdd10 687 andis. r8,r4,_TIF_EMULATE_STACK_STORE@h
a9c4e541
TC
688 beq+ 1f
689
690 addi r8,r1,INT_FRAME_SIZE /* Get the kprobed function entry */
691
692 lwz r3,GPR1(r1)
693 subi r3,r3,INT_FRAME_SIZE /* dst: Allocate a trampoline exception frame */
694 mr r4,r1 /* src: current exception frame */
695 mr r1,r3 /* Reroute the trampoline frame to r1 */
696
697 /* Copy from the original to the trampoline. */
698 li r5,INT_FRAME_SIZE/8 /* size: INT_FRAME_SIZE */
699 li r6,0 /* start offset: 0 */
700 mtctr r5
7012: ldx r0,r6,r4
702 stdx r0,r6,r3
703 addi r6,r6,8
704 bdnz 2b
705
706 /* Do real store operation to complete stwu */
707 lwz r5,GPR1(r1)
708 std r8,0(r5)
709
710 /* Clear _TIF_EMULATE_STACK_STORE flag */
711 lis r11,_TIF_EMULATE_STACK_STORE@h
712 addi r5,r9,TI_FLAGS
d8b92292 7130: ldarx r4,0,r5
a9c4e541
TC
714 andc r4,r4,r11
715 stdcx. r4,0,r5
716 bne- 0b
7171:
718
c58ce2b1
TC
719#ifdef CONFIG_PREEMPT
720 /* Check if we need to preempt */
721 andi. r0,r4,_TIF_NEED_RESCHED
722 beq+ restore
723 /* Check that preempt_count() == 0 and interrupts are enabled */
724 lwz r8,TI_PREEMPT(r9)
725 cmpwi cr1,r8,0
726 ld r0,SOFTE(r1)
727 cmpdi r0,0
728 crandc eq,cr1*4+eq,eq
729 bne restore
730
731 /*
732 * Here we are preempting the current task. We want to make
de021bb7 733 * sure we are soft-disabled first and reconcile irq state.
c58ce2b1 734 */
de021bb7 735 RECONCILE_IRQ_STATE(r3,r4)
b1576fec 7361: bl preempt_schedule_irq
c58ce2b1
TC
737
738 /* Re-test flags and eventually loop */
9778b696 739 CURRENT_THREAD_INFO(r9, r1)
9994a338 740 ld r4,TI_FLAGS(r9)
c58ce2b1
TC
741 andi. r0,r4,_TIF_NEED_RESCHED
742 bne 1b
572177d7
TC
743
744 /*
745 * arch_local_irq_restore() from preempt_schedule_irq above may
746 * enable hard interrupt but we really should disable interrupts
747 * when we return from the interrupt, and so that we don't get
748 * interrupted after loading SRR0/1.
749 */
750#ifdef CONFIG_PPC_BOOK3E
751 wrteei 0
752#else
49d09bf2 753 li r10,MSR_RI
572177d7
TC
754 mtmsrd r10,1 /* Update machine state */
755#endif /* CONFIG_PPC_BOOK3E */
c58ce2b1 756#endif /* CONFIG_PREEMPT */
9994a338 757
7230c564
BH
758 .globl fast_exc_return_irq
759fast_exc_return_irq:
9994a338 760restore:
7230c564 761 /*
7c0482e3
BH
762 * This is the main kernel exit path. First we check if we
763 * are about to re-enable interrupts
7230c564 764 */
01f3880d 765 ld r5,SOFTE(r1)
7230c564 766 lbz r6,PACASOFTIRQEN(r13)
7c0482e3
BH
767 cmpwi cr0,r5,0
768 beq restore_irq_off
7230c564 769
7c0482e3
BH
770 /* We are enabling, were we already enabled ? Yes, just return */
771 cmpwi cr0,r6,1
772 beq cr0,do_restore
9994a338 773
7c0482e3 774 /*
7230c564
BH
775 * We are about to soft-enable interrupts (we are hard disabled
776 * at this point). We check if there's anything that needs to
777 * be replayed first.
778 */
779 lbz r0,PACAIRQHAPPENED(r13)
780 cmpwi cr0,r0,0
781 bne- restore_check_irq_replay
e56a6e20 782
7230c564
BH
783 /*
784 * Get here when nothing happened while soft-disabled, just
785 * soft-enable and move-on. We will hard-enable as a side
786 * effect of rfi
787 */
788restore_no_replay:
789 TRACE_ENABLE_INTS
790 li r0,1
791 stb r0,PACASOFTIRQEN(r13);
792
793 /*
794 * Final return path. BookE is handled in a different file
795 */
7c0482e3 796do_restore:
2d27cfd3 797#ifdef CONFIG_PPC_BOOK3E
b1576fec 798 b exception_return_book3e
2d27cfd3 799#else
7230c564
BH
800 /*
801 * Clear the reservation. If we know the CPU tracks the address of
802 * the reservation then we can potentially save some cycles and use
803 * a larx. On POWER6 and POWER7 this is significantly faster.
804 */
805BEGIN_FTR_SECTION
806 stdcx. r0,0,r1 /* to clear the reservation */
807FTR_SECTION_ELSE
808 ldarx r4,0,r1
809ALT_FTR_SECTION_END_IFCLR(CPU_FTR_STCX_CHECKS_ADDRESS)
810
811 /*
812 * Some code path such as load_up_fpu or altivec return directly
813 * here. They run entirely hard disabled and do not alter the
814 * interrupt state. They also don't use lwarx/stwcx. and thus
815 * are known not to leave dangling reservations.
816 */
817 .globl fast_exception_return
818fast_exception_return:
819 ld r3,_MSR(r1)
e56a6e20
PM
820 ld r4,_CTR(r1)
821 ld r0,_LINK(r1)
822 mtctr r4
823 mtlr r0
824 ld r4,_XER(r1)
825 mtspr SPRN_XER,r4
826
827 REST_8GPRS(5, r1)
828
9994a338
PM
829 andi. r0,r3,MSR_RI
830 beq- unrecov_restore
831
0c4888ef
BH
832 /* Load PPR from thread struct before we clear MSR:RI */
833BEGIN_FTR_SECTION
834 ld r2,PACACURRENT(r13)
835 ld r2,TASKTHREADPPR(r2)
836END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR)
837
e56a6e20
PM
838 /*
839 * Clear RI before restoring r13. If we are returning to
840 * userspace and we take an exception after restoring r13,
841 * we end up corrupting the userspace r13 value.
842 */
49d09bf2 843 li r4,0
e56a6e20 844 mtmsrd r4,1
9994a338 845
afc07701
MN
846#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
847 /* TM debug */
848 std r3, PACATMSCRATCH(r13) /* Stash returned-to MSR */
849#endif
9994a338
PM
850 /*
851 * r13 is our per cpu area, only restore it if we are returning to
7230c564
BH
852 * userspace the value stored in the stack frame may belong to
853 * another CPU.
9994a338 854 */
e56a6e20 855 andi. r0,r3,MSR_PR
9994a338 856 beq 1f
0c4888ef
BH
857BEGIN_FTR_SECTION
858 mtspr SPRN_PPR,r2 /* Restore PPR */
859END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR)
c223c903 860 ACCOUNT_CPU_USER_EXIT(r13, r2, r4)
9994a338
PM
861 REST_GPR(13, r1)
8621:
e56a6e20 863 mtspr SPRN_SRR1,r3
9994a338
PM
864
865 ld r2,_CCR(r1)
866 mtcrf 0xFF,r2
867 ld r2,_NIP(r1)
868 mtspr SPRN_SRR0,r2
869
870 ld r0,GPR0(r1)
871 ld r2,GPR2(r1)
872 ld r3,GPR3(r1)
873 ld r4,GPR4(r1)
874 ld r1,GPR1(r1)
875
876 rfid
877 b . /* prevent speculative execution */
878
2d27cfd3
BH
879#endif /* CONFIG_PPC_BOOK3E */
880
7c0482e3
BH
881 /*
882 * We are returning to a context with interrupts soft disabled.
883 *
884 * However, we may also about to hard enable, so we need to
885 * make sure that in this case, we also clear PACA_IRQ_HARD_DIS
886 * or that bit can get out of sync and bad things will happen
887 */
888restore_irq_off:
889 ld r3,_MSR(r1)
890 lbz r7,PACAIRQHAPPENED(r13)
891 andi. r0,r3,MSR_EE
892 beq 1f
893 rlwinm r7,r7,0,~PACA_IRQ_HARD_DIS
894 stb r7,PACAIRQHAPPENED(r13)
8951: li r0,0
896 stb r0,PACASOFTIRQEN(r13);
897 TRACE_DISABLE_INTS
898 b do_restore
899
7230c564
BH
900 /*
901 * Something did happen, check if a re-emit is needed
902 * (this also clears paca->irq_happened)
903 */
904restore_check_irq_replay:
905 /* XXX: We could implement a fast path here where we check
906 * for irq_happened being just 0x01, in which case we can
907 * clear it and return. That means that we would potentially
908 * miss a decrementer having wrapped all the way around.
909 *
910 * Still, this might be useful for things like hash_page
911 */
b1576fec 912 bl __check_irq_replay
7230c564
BH
913 cmpwi cr0,r3,0
914 beq restore_no_replay
915
916 /*
917 * We need to re-emit an interrupt. We do so by re-using our
918 * existing exception frame. We first change the trap value,
919 * but we need to ensure we preserve the low nibble of it
920 */
921 ld r4,_TRAP(r1)
922 clrldi r4,r4,60
923 or r4,r4,r3
924 std r4,_TRAP(r1)
925
926 /*
927 * Then find the right handler and call it. Interrupts are
928 * still soft-disabled and we keep them that way.
929 */
930 cmpwi cr0,r3,0x500
931 bne 1f
932 addi r3,r1,STACK_FRAME_OVERHEAD;
b1576fec
AB
933 bl do_IRQ
934 b ret_from_except
0869b6fd
MS
9351: cmpwi cr0,r3,0xe60
936 bne 1f
937 addi r3,r1,STACK_FRAME_OVERHEAD;
938 bl handle_hmi_exception
939 b ret_from_except
7230c564
BH
9401: cmpwi cr0,r3,0x900
941 bne 1f
942 addi r3,r1,STACK_FRAME_OVERHEAD;
b1576fec
AB
943 bl timer_interrupt
944 b ret_from_except
fe9e1d54
IM
945#ifdef CONFIG_PPC_DOORBELL
9461:
7230c564 947#ifdef CONFIG_PPC_BOOK3E
fe9e1d54
IM
948 cmpwi cr0,r3,0x280
949#else
950 BEGIN_FTR_SECTION
951 cmpwi cr0,r3,0xe80
952 FTR_SECTION_ELSE
953 cmpwi cr0,r3,0xa00
954 ALT_FTR_SECTION_END_IFSET(CPU_FTR_HVMODE)
955#endif /* CONFIG_PPC_BOOK3E */
7230c564
BH
956 bne 1f
957 addi r3,r1,STACK_FRAME_OVERHEAD;
b1576fec
AB
958 bl doorbell_exception
959 b ret_from_except
fe9e1d54 960#endif /* CONFIG_PPC_DOORBELL */
b1576fec 9611: b ret_from_except /* What else to do here ? */
7230c564 962
9994a338
PM
963unrecov_restore:
964 addi r3,r1,STACK_FRAME_OVERHEAD
b1576fec 965 bl unrecoverable_exception
9994a338
PM
966 b unrecov_restore
967
968#ifdef CONFIG_PPC_RTAS
969/*
970 * On CHRP, the Run-Time Abstraction Services (RTAS) have to be
971 * called with the MMU off.
972 *
973 * In addition, we need to be in 32b mode, at least for now.
974 *
975 * Note: r3 is an input parameter to rtas, so don't trash it...
976 */
977_GLOBAL(enter_rtas)
978 mflr r0
979 std r0,16(r1)
980 stdu r1,-RTAS_FRAME_SIZE(r1) /* Save SP and create stack space. */
981
982 /* Because RTAS is running in 32b mode, it clobbers the high order half
983 * of all registers that it saves. We therefore save those registers
984 * RTAS might touch to the stack. (r0, r3-r13 are caller saved)
985 */
986 SAVE_GPR(2, r1) /* Save the TOC */
987 SAVE_GPR(13, r1) /* Save paca */
988 SAVE_8GPRS(14, r1) /* Save the non-volatiles */
989 SAVE_10GPRS(22, r1) /* ditto */
990
991 mfcr r4
992 std r4,_CCR(r1)
993 mfctr r5
994 std r5,_CTR(r1)
995 mfspr r6,SPRN_XER
996 std r6,_XER(r1)
997 mfdar r7
998 std r7,_DAR(r1)
999 mfdsisr r8
1000 std r8,_DSISR(r1)
9994a338 1001
9fe901d1
MK
1002 /* Temporary workaround to clear CR until RTAS can be modified to
1003 * ignore all bits.
1004 */
1005 li r0,0
1006 mtcr r0
1007
007d88d0 1008#ifdef CONFIG_BUG
9994a338
PM
1009 /* There is no way it is acceptable to get here with interrupts enabled,
1010 * check it with the asm equivalent of WARN_ON
1011 */
d04c56f7 1012 lbz r0,PACASOFTIRQEN(r13)
9994a338 10131: tdnei r0,0
007d88d0
DW
1014 EMIT_BUG_ENTRY 1b,__FILE__,__LINE__,BUGFLAG_WARNING
1015#endif
1016
d04c56f7
PM
1017 /* Hard-disable interrupts */
1018 mfmsr r6
1019 rldicl r7,r6,48,1
1020 rotldi r7,r7,16
1021 mtmsrd r7,1
1022
9994a338
PM
1023 /* Unfortunately, the stack pointer and the MSR are also clobbered,
1024 * so they are saved in the PACA which allows us to restore
1025 * our original state after RTAS returns.
1026 */
1027 std r1,PACAR1(r13)
1028 std r6,PACASAVEDMSR(r13)
1029
1030 /* Setup our real return addr */
ad0289e4 1031 LOAD_REG_ADDR(r4,rtas_return_loc)
e58c3495 1032 clrldi r4,r4,2 /* convert to realmode address */
9994a338
PM
1033 mtlr r4
1034
1035 li r0,0
1036 ori r0,r0,MSR_EE|MSR_SE|MSR_BE|MSR_RI
1037 andc r0,r6,r0
1038
1039 li r9,1
1040 rldicr r9,r9,MSR_SF_LG,(63-MSR_SF_LG)
5c0484e2 1041 ori r9,r9,MSR_IR|MSR_DR|MSR_FE0|MSR_FE1|MSR_FP|MSR_RI|MSR_LE
9994a338 1042 andc r6,r0,r9
9994a338
PM
1043 sync /* disable interrupts so SRR0/1 */
1044 mtmsrd r0 /* don't get trashed */
1045
e58c3495 1046 LOAD_REG_ADDR(r4, rtas)
9994a338
PM
1047 ld r5,RTASENTRY(r4) /* get the rtas->entry value */
1048 ld r4,RTASBASE(r4) /* get the rtas->base value */
1049
1050 mtspr SPRN_SRR0,r5
1051 mtspr SPRN_SRR1,r6
1052 rfid
1053 b . /* prevent speculative execution */
1054
ad0289e4 1055rtas_return_loc:
5c0484e2
BH
1056 FIXUP_ENDIAN
1057
9994a338 1058 /* relocation is off at this point */
2dd60d79 1059 GET_PACA(r4)
e58c3495 1060 clrldi r4,r4,2 /* convert to realmode address */
9994a338 1061
e31aa453
PM
1062 bcl 20,31,$+4
10630: mflr r3
ad0289e4 1064 ld r3,(1f-0b)(r3) /* get &rtas_restore_regs */
e31aa453 1065
9994a338
PM
1066 mfmsr r6
1067 li r0,MSR_RI
1068 andc r6,r6,r0
1069 sync
1070 mtmsrd r6
1071
1072 ld r1,PACAR1(r4) /* Restore our SP */
9994a338
PM
1073 ld r4,PACASAVEDMSR(r4) /* Restore our MSR */
1074
1075 mtspr SPRN_SRR0,r3
1076 mtspr SPRN_SRR1,r4
1077 rfid
1078 b . /* prevent speculative execution */
1079
e31aa453 1080 .align 3
ad0289e4 10811: .llong rtas_restore_regs
e31aa453 1082
ad0289e4 1083rtas_restore_regs:
9994a338
PM
1084 /* relocation is on at this point */
1085 REST_GPR(2, r1) /* Restore the TOC */
1086 REST_GPR(13, r1) /* Restore paca */
1087 REST_8GPRS(14, r1) /* Restore the non-volatiles */
1088 REST_10GPRS(22, r1) /* ditto */
1089
2dd60d79 1090 GET_PACA(r13)
9994a338
PM
1091
1092 ld r4,_CCR(r1)
1093 mtcr r4
1094 ld r5,_CTR(r1)
1095 mtctr r5
1096 ld r6,_XER(r1)
1097 mtspr SPRN_XER,r6
1098 ld r7,_DAR(r1)
1099 mtdar r7
1100 ld r8,_DSISR(r1)
1101 mtdsisr r8
9994a338
PM
1102
1103 addi r1,r1,RTAS_FRAME_SIZE /* Unstack our frame */
1104 ld r0,16(r1) /* get return address */
1105
1106 mtlr r0
1107 blr /* return to caller */
1108
1109#endif /* CONFIG_PPC_RTAS */
1110
9994a338
PM
1111_GLOBAL(enter_prom)
1112 mflr r0
1113 std r0,16(r1)
1114 stdu r1,-PROM_FRAME_SIZE(r1) /* Save SP and create stack space */
1115
1116 /* Because PROM is running in 32b mode, it clobbers the high order half
1117 * of all registers that it saves. We therefore save those registers
1118 * PROM might touch to the stack. (r0, r3-r13 are caller saved)
1119 */
6c171994 1120 SAVE_GPR(2, r1)
9994a338
PM
1121 SAVE_GPR(13, r1)
1122 SAVE_8GPRS(14, r1)
1123 SAVE_10GPRS(22, r1)
6c171994 1124 mfcr r10
9994a338 1125 mfmsr r11
6c171994 1126 std r10,_CCR(r1)
9994a338
PM
1127 std r11,_MSR(r1)
1128
5c0484e2
BH
1129 /* Put PROM address in SRR0 */
1130 mtsrr0 r4
1131
1132 /* Setup our trampoline return addr in LR */
1133 bcl 20,31,$+4
11340: mflr r4
1135 addi r4,r4,(1f - 0b)
1136 mtlr r4
9994a338 1137
5c0484e2 1138 /* Prepare a 32-bit mode big endian MSR
9994a338 1139 */
2d27cfd3
BH
1140#ifdef CONFIG_PPC_BOOK3E
1141 rlwinm r11,r11,0,1,31
5c0484e2
BH
1142 mtsrr1 r11
1143 rfi
2d27cfd3 1144#else /* CONFIG_PPC_BOOK3E */
5c0484e2
BH
1145 LOAD_REG_IMMEDIATE(r12, MSR_SF | MSR_ISF | MSR_LE)
1146 andc r11,r11,r12
1147 mtsrr1 r11
1148 rfid
2d27cfd3 1149#endif /* CONFIG_PPC_BOOK3E */
9994a338 1150
5c0484e2
BH
11511: /* Return from OF */
1152 FIXUP_ENDIAN
9994a338
PM
1153
1154 /* Just make sure that r1 top 32 bits didn't get
1155 * corrupt by OF
1156 */
1157 rldicl r1,r1,0,32
1158
1159 /* Restore the MSR (back to 64 bits) */
1160 ld r0,_MSR(r1)
6c171994 1161 MTMSRD(r0)
9994a338
PM
1162 isync
1163
1164 /* Restore other registers */
1165 REST_GPR(2, r1)
1166 REST_GPR(13, r1)
1167 REST_8GPRS(14, r1)
1168 REST_10GPRS(22, r1)
1169 ld r4,_CCR(r1)
1170 mtcr r4
9994a338
PM
1171
1172 addi r1,r1,PROM_FRAME_SIZE
1173 ld r0,16(r1)
1174 mtlr r0
1175 blr
4e491d14 1176
606576ce 1177#ifdef CONFIG_FUNCTION_TRACER
4e491d14
SR
1178#ifdef CONFIG_DYNAMIC_FTRACE
1179_GLOBAL(mcount)
1180_GLOBAL(_mcount)
9445aa1a 1181EXPORT_SYMBOL(_mcount)
15308664
TD
1182 mflr r12
1183 mtctr r12
1184 mtlr r0
1185 bctr
4e491d14 1186
15308664 1187#ifndef CC_USING_MPROFILE_KERNEL
5e66684f 1188_GLOBAL_TOC(ftrace_caller)
4e491d14
SR
1189 /* Taken from output of objdump from lib64/glibc */
1190 mflr r3
1191 ld r11, 0(r1)
1192 stdu r1, -112(r1)
1193 std r3, 128(r1)
1194 ld r4, 16(r11)
395a59d0 1195 subi r3, r3, MCOUNT_INSN_SIZE
4e491d14
SR
1196.globl ftrace_call
1197ftrace_call:
1198 bl ftrace_stub
1199 nop
46542888
SR
1200#ifdef CONFIG_FUNCTION_GRAPH_TRACER
1201.globl ftrace_graph_call
1202ftrace_graph_call:
1203 b ftrace_graph_stub
1204_GLOBAL(ftrace_graph_stub)
1205#endif
4e491d14
SR
1206 ld r0, 128(r1)
1207 mtlr r0
1208 addi r1, r1, 112
15308664
TD
1209
1210#else /* CC_USING_MPROFILE_KERNEL */
1211/*
1212 *
1213 * ftrace_caller() is the function that replaces _mcount() when ftrace is
1214 * active.
1215 *
1216 * We arrive here after a function A calls function B, and we are the trace
1217 * function for B. When we enter r1 points to A's stack frame, B has not yet
1218 * had a chance to allocate one yet.
1219 *
1220 * Additionally r2 may point either to the TOC for A, or B, depending on
1221 * whether B did a TOC setup sequence before calling us.
1222 *
1223 * On entry the LR points back to the _mcount() call site, and r0 holds the
1224 * saved LR as it was on entry to B, ie. the original return address at the
1225 * call site in A.
1226 *
1227 * Our job is to save the register state into a struct pt_regs (on the stack)
1228 * and then arrange for the ftrace function to be called.
1229 */
1230_GLOBAL(ftrace_caller)
1231 /* Save the original return address in A's stack frame */
1232 std r0,LRSAVE(r1)
1233
1234 /* Create our stack frame + pt_regs */
1235 stdu r1,-SWITCH_FRAME_SIZE(r1)
1236
1237 /* Save all gprs to pt_regs */
1238 SAVE_8GPRS(0,r1)
1239 SAVE_8GPRS(8,r1)
1240 SAVE_8GPRS(16,r1)
1241 SAVE_8GPRS(24,r1)
1242
1243 /* Load special regs for save below */
1244 mfmsr r8
1245 mfctr r9
1246 mfxer r10
1247 mfcr r11
1248
1249 /* Get the _mcount() call site out of LR */
1250 mflr r7
1251 /* Save it as pt_regs->nip & pt_regs->link */
1252 std r7, _NIP(r1)
1253 std r7, _LINK(r1)
1254
1255 /* Save callee's TOC in the ABI compliant location */
1256 std r2, 24(r1)
1257 ld r2,PACATOC(r13) /* get kernel TOC in r2 */
1258
1259 addis r3,r2,function_trace_op@toc@ha
1260 addi r3,r3,function_trace_op@toc@l
1261 ld r5,0(r3)
1262
85baa095
ME
1263#ifdef CONFIG_LIVEPATCH
1264 mr r14,r7 /* remember old NIP */
1265#endif
15308664
TD
1266 /* Calculate ip from nip-4 into r3 for call below */
1267 subi r3, r7, MCOUNT_INSN_SIZE
1268
1269 /* Put the original return address in r4 as parent_ip */
1270 mr r4, r0
1271
1272 /* Save special regs */
1273 std r8, _MSR(r1)
1274 std r9, _CTR(r1)
1275 std r10, _XER(r1)
1276 std r11, _CCR(r1)
1277
1278 /* Load &pt_regs in r6 for call below */
1279 addi r6, r1 ,STACK_FRAME_OVERHEAD
1280
1281 /* ftrace_call(r3, r4, r5, r6) */
1282.globl ftrace_call
1283ftrace_call:
1284 bl ftrace_stub
1285 nop
1286
1287 /* Load ctr with the possibly modified NIP */
1288 ld r3, _NIP(r1)
1289 mtctr r3
85baa095
ME
1290#ifdef CONFIG_LIVEPATCH
1291 cmpd r14,r3 /* has NIP been altered? */
1292#endif
15308664
TD
1293
1294 /* Restore gprs */
1295 REST_8GPRS(0,r1)
1296 REST_8GPRS(8,r1)
1297 REST_8GPRS(16,r1)
1298 REST_8GPRS(24,r1)
1299
1300 /* Restore callee's TOC */
1301 ld r2, 24(r1)
1302
1303 /* Pop our stack frame */
1304 addi r1, r1, SWITCH_FRAME_SIZE
1305
1306 /* Restore original LR for return to B */
1307 ld r0, LRSAVE(r1)
1308 mtlr r0
1309
85baa095
ME
1310#ifdef CONFIG_LIVEPATCH
1311 /* Based on the cmpd above, if the NIP was altered handle livepatch */
1312 bne- livepatch_handler
1313#endif
1314
15308664
TD
1315#ifdef CONFIG_FUNCTION_GRAPH_TRACER
1316 stdu r1, -112(r1)
1317.globl ftrace_graph_call
1318ftrace_graph_call:
1319 b ftrace_graph_stub
1320_GLOBAL(ftrace_graph_stub)
1321 addi r1, r1, 112
1322#endif
1323
1324 ld r0,LRSAVE(r1) /* restore callee's lr at _mcount site */
1325 mtlr r0
1326 bctr /* jump after _mcount site */
1327#endif /* CC_USING_MPROFILE_KERNEL */
1328
4e491d14
SR
1329_GLOBAL(ftrace_stub)
1330 blr
85baa095
ME
1331
1332#ifdef CONFIG_LIVEPATCH
1333 /*
1334 * This function runs in the mcount context, between two functions. As
1335 * such it can only clobber registers which are volatile and used in
1336 * function linkage.
1337 *
1338 * We get here when a function A, calls another function B, but B has
1339 * been live patched with a new function C.
1340 *
1341 * On entry:
1342 * - we have no stack frame and can not allocate one
1343 * - LR points back to the original caller (in A)
1344 * - CTR holds the new NIP in C
1345 * - r0 & r12 are free
1346 *
1347 * r0 can't be used as the base register for a DS-form load or store, so
1348 * we temporarily shuffle r1 (stack pointer) into r0 and then put it back.
1349 */
1350livepatch_handler:
1351 CURRENT_THREAD_INFO(r12, r1)
1352
1353 /* Save stack pointer into r0 */
1354 mr r0, r1
1355
1356 /* Allocate 3 x 8 bytes */
1357 ld r1, TI_livepatch_sp(r12)
1358 addi r1, r1, 24
1359 std r1, TI_livepatch_sp(r12)
1360
1361 /* Save toc & real LR on livepatch stack */
1362 std r2, -24(r1)
1363 mflr r12
1364 std r12, -16(r1)
1365
1366 /* Store stack end marker */
1367 lis r12, STACK_END_MAGIC@h
1368 ori r12, r12, STACK_END_MAGIC@l
1369 std r12, -8(r1)
1370
1371 /* Restore real stack pointer */
1372 mr r1, r0
1373
1374 /* Put ctr in r12 for global entry and branch there */
1375 mfctr r12
1376 bctrl
1377
1378 /*
1379 * Now we are returning from the patched function to the original
1380 * caller A. We are free to use r0 and r12, and we can use r2 until we
1381 * restore it.
1382 */
1383
1384 CURRENT_THREAD_INFO(r12, r1)
1385
1386 /* Save stack pointer into r0 */
1387 mr r0, r1
1388
1389 ld r1, TI_livepatch_sp(r12)
1390
1391 /* Check stack marker hasn't been trashed */
1392 lis r2, STACK_END_MAGIC@h
1393 ori r2, r2, STACK_END_MAGIC@l
1394 ld r12, -8(r1)
13951: tdne r12, r2
1396 EMIT_BUG_ENTRY 1b, __FILE__, __LINE__ - 1, 0
1397
1398 /* Restore LR & toc from livepatch stack */
1399 ld r12, -16(r1)
1400 mtlr r12
1401 ld r2, -24(r1)
1402
1403 /* Pop livepatch stack frame */
1404 CURRENT_THREAD_INFO(r12, r0)
1405 subi r1, r1, 24
1406 std r1, TI_livepatch_sp(r12)
1407
1408 /* Restore real stack pointer */
1409 mr r1, r0
1410
1411 /* Return to original caller of live patched function */
1412 blr
1413#endif
1414
1415
4e491d14 1416#else
5e66684f 1417_GLOBAL_TOC(_mcount)
9445aa1a 1418EXPORT_SYMBOL(_mcount)
4e491d14
SR
1419 /* Taken from output of objdump from lib64/glibc */
1420 mflr r3
1421 ld r11, 0(r1)
1422 stdu r1, -112(r1)
1423 std r3, 128(r1)
1424 ld r4, 16(r11)
1425
395a59d0 1426 subi r3, r3, MCOUNT_INSN_SIZE
4e491d14
SR
1427 LOAD_REG_ADDR(r5,ftrace_trace_function)
1428 ld r5,0(r5)
1429 ld r5,0(r5)
1430 mtctr r5
1431 bctrl
4e491d14 1432 nop
6794c782
SR
1433
1434
1435#ifdef CONFIG_FUNCTION_GRAPH_TRACER
1436 b ftrace_graph_caller
1437#endif
4e491d14
SR
1438 ld r0, 128(r1)
1439 mtlr r0
1440 addi r1, r1, 112
1441_GLOBAL(ftrace_stub)
1442 blr
1443
6794c782
SR
1444#endif /* CONFIG_DYNAMIC_FTRACE */
1445
1446#ifdef CONFIG_FUNCTION_GRAPH_TRACER
15308664 1447#ifndef CC_USING_MPROFILE_KERNEL
46542888 1448_GLOBAL(ftrace_graph_caller)
6794c782
SR
1449 /* load r4 with local address */
1450 ld r4, 128(r1)
1451 subi r4, r4, MCOUNT_INSN_SIZE
1452
b3c18725 1453 /* Grab the LR out of the caller stack frame */
6794c782 1454 ld r11, 112(r1)
b3c18725 1455 ld r3, 16(r11)
6794c782 1456
b1576fec 1457 bl prepare_ftrace_return
6794c782
SR
1458 nop
1459
b3c18725
AB
1460 /*
1461 * prepare_ftrace_return gives us the address we divert to.
1462 * Change the LR in the callers stack frame to this.
1463 */
1464 ld r11, 112(r1)
1465 std r3, 16(r11)
1466
6794c782
SR
1467 ld r0, 128(r1)
1468 mtlr r0
1469 addi r1, r1, 112
1470 blr
1471
15308664
TD
1472#else /* CC_USING_MPROFILE_KERNEL */
1473_GLOBAL(ftrace_graph_caller)
1474 /* with -mprofile-kernel, parameter regs are still alive at _mcount */
1475 std r10, 104(r1)
1476 std r9, 96(r1)
1477 std r8, 88(r1)
1478 std r7, 80(r1)
1479 std r6, 72(r1)
1480 std r5, 64(r1)
1481 std r4, 56(r1)
1482 std r3, 48(r1)
1483
1484 /* Save callee's TOC in the ABI compliant location */
1485 std r2, 24(r1)
1486 ld r2, PACATOC(r13) /* get kernel TOC in r2 */
1487
1488 mfctr r4 /* ftrace_caller has moved local addr here */
1489 std r4, 40(r1)
1490 mflr r3 /* ftrace_caller has restored LR from stack */
1491 subi r4, r4, MCOUNT_INSN_SIZE
1492
1493 bl prepare_ftrace_return
1494 nop
1495
1496 /*
1497 * prepare_ftrace_return gives us the address we divert to.
1498 * Change the LR to this.
1499 */
1500 mtlr r3
1501
1502 ld r0, 40(r1)
1503 mtctr r0
1504 ld r10, 104(r1)
1505 ld r9, 96(r1)
1506 ld r8, 88(r1)
1507 ld r7, 80(r1)
1508 ld r6, 72(r1)
1509 ld r5, 64(r1)
1510 ld r4, 56(r1)
1511 ld r3, 48(r1)
1512
1513 /* Restore callee's TOC */
1514 ld r2, 24(r1)
1515
1516 addi r1, r1, 112
1517 mflr r0
1518 std r0, LRSAVE(r1)
1519 bctr
1520#endif /* CC_USING_MPROFILE_KERNEL */
1521
6794c782
SR
1522_GLOBAL(return_to_handler)
1523 /* need to save return values */
1524 std r4, -32(r1)
1525 std r3, -24(r1)
1526 /* save TOC */
1527 std r2, -16(r1)
1528 std r31, -8(r1)
1529 mr r31, r1
1530 stdu r1, -112(r1)
1531
bb725340 1532 /*
7d56c65a 1533 * We might be called from a module.
bb725340
SR
1534 * Switch to our TOC to run inside the core kernel.
1535 */
be10ab10 1536 ld r2, PACATOC(r13)
6794c782 1537
b1576fec 1538 bl ftrace_return_to_handler
6794c782
SR
1539 nop
1540
1541 /* return value has real return address */
1542 mtlr r3
1543
1544 ld r1, 0(r1)
1545 ld r4, -32(r1)
1546 ld r3, -24(r1)
1547 ld r2, -16(r1)
1548 ld r31, -8(r1)
1549
1550 /* Jump back to real return address */
1551 blr
1552#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
1553#endif /* CONFIG_FUNCTION_TRACER */