]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blame - arch/powerpc/kernel/entry_64.S
powerpc: Explicitly disable math features when copying thread
[mirror_ubuntu-artful-kernel.git] / arch / powerpc / kernel / entry_64.S
CommitLineData
9994a338 1/*
9994a338
PM
2 * PowerPC version
3 * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
4 * Rewritten by Cort Dougan (cort@cs.nmt.edu) for PReP
5 * Copyright (C) 1996 Cort Dougan <cort@cs.nmt.edu>
6 * Adapted for Power Macintosh by Paul Mackerras.
7 * Low-level exception handlers and MMU support
8 * rewritten by Paul Mackerras.
9 * Copyright (C) 1996 Paul Mackerras.
10 * MPC8xx modifications Copyright (C) 1997 Dan Malek (dmalek@jlc.net).
11 *
12 * This file contains the system call entry code, context switch
13 * code, and exception/interrupt return code for PowerPC.
14 *
15 * This program is free software; you can redistribute it and/or
16 * modify it under the terms of the GNU General Public License
17 * as published by the Free Software Foundation; either version
18 * 2 of the License, or (at your option) any later version.
19 */
20
9994a338 21#include <linux/errno.h>
c3525940 22#include <linux/err.h>
9994a338
PM
23#include <asm/unistd.h>
24#include <asm/processor.h>
25#include <asm/page.h>
26#include <asm/mmu.h>
27#include <asm/thread_info.h>
28#include <asm/ppc_asm.h>
29#include <asm/asm-offsets.h>
30#include <asm/cputable.h>
3f639ee8 31#include <asm/firmware.h>
007d88d0 32#include <asm/bug.h>
ec2b36b9 33#include <asm/ptrace.h>
945feb17 34#include <asm/irqflags.h>
395a59d0 35#include <asm/ftrace.h>
7230c564 36#include <asm/hw_irq.h>
5d1c5745 37#include <asm/context_tracking.h>
b4b56f9e 38#include <asm/tm.h>
9994a338
PM
39
40/*
41 * System calls.
42 */
43 .section ".toc","aw"
c857c43b
AB
44SYS_CALL_TABLE:
45 .tc sys_call_table[TC],sys_call_table
9994a338
PM
46
47/* This value is used to mark exception frames on the stack. */
48exception_marker:
ec2b36b9 49 .tc ID_EXC_MARKER[TC],STACK_FRAME_REGS_MARKER
9994a338
PM
50
51 .section ".text"
52 .align 7
53
9994a338
PM
54 .globl system_call_common
55system_call_common:
b4b56f9e
S
56#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
57BEGIN_FTR_SECTION
58 extrdi. r10, r12, 1, (63-MSR_TS_T_LG) /* transaction active? */
59 bne tabort_syscall
60END_FTR_SECTION_IFSET(CPU_FTR_TM)
61#endif
9994a338
PM
62 andi. r10,r12,MSR_PR
63 mr r10,r1
64 addi r1,r1,-INT_FRAME_SIZE
65 beq- 1f
66 ld r1,PACAKSAVE(r13)
671: std r10,0(r1)
68 std r11,_NIP(r1)
69 std r12,_MSR(r1)
70 std r0,GPR0(r1)
71 std r10,GPR1(r1)
5d75b264 72 beq 2f /* if from kernel mode */
c6622f63 73 ACCOUNT_CPU_USER_ENTRY(r10, r11)
5d75b264 742: std r2,GPR2(r1)
9994a338 75 std r3,GPR3(r1)
fd6c40f3 76 mfcr r2
9994a338
PM
77 std r4,GPR4(r1)
78 std r5,GPR5(r1)
79 std r6,GPR6(r1)
80 std r7,GPR7(r1)
81 std r8,GPR8(r1)
82 li r11,0
83 std r11,GPR9(r1)
84 std r11,GPR10(r1)
85 std r11,GPR11(r1)
86 std r11,GPR12(r1)
823df435 87 std r11,_XER(r1)
82087414 88 std r11,_CTR(r1)
9994a338 89 std r9,GPR13(r1)
9994a338 90 mflr r10
fd6c40f3
AB
91 /*
92 * This clears CR0.SO (bit 28), which is the error indication on
93 * return from this system call.
94 */
95 rldimi r2,r11,28,(63-28)
9994a338 96 li r11,0xc01
9994a338
PM
97 std r10,_LINK(r1)
98 std r11,_TRAP(r1)
9994a338 99 std r3,ORIG_GPR3(r1)
fd6c40f3 100 std r2,_CCR(r1)
9994a338
PM
101 ld r2,PACATOC(r13)
102 addi r9,r1,STACK_FRAME_OVERHEAD
103 ld r11,exception_marker@toc(r2)
104 std r11,-16(r9) /* "regshere" marker */
abf917cd 105#if defined(CONFIG_VIRT_CPU_ACCOUNTING_NATIVE) && defined(CONFIG_PPC_SPLPAR)
cf9efce0
PM
106BEGIN_FW_FTR_SECTION
107 beq 33f
108 /* if from user, see if there are any DTL entries to process */
109 ld r10,PACALPPACAPTR(r13) /* get ptr to VPA */
110 ld r11,PACA_DTL_RIDX(r13) /* get log read index */
7ffcf8ec
AB
111 addi r10,r10,LPPACA_DTLIDX
112 LDX_BE r10,0,r10 /* get log write index */
cf9efce0
PM
113 cmpd cr1,r11,r10
114 beq+ cr1,33f
b1576fec 115 bl accumulate_stolen_time
cf9efce0
PM
116 REST_GPR(0,r1)
117 REST_4GPRS(3,r1)
118 REST_2GPRS(7,r1)
119 addi r9,r1,STACK_FRAME_OVERHEAD
12033:
121END_FW_FTR_SECTION_IFSET(FW_FEATURE_SPLPAR)
abf917cd 122#endif /* CONFIG_VIRT_CPU_ACCOUNTING_NATIVE && CONFIG_PPC_SPLPAR */
cf9efce0 123
1421ae0b
BH
124 /*
125 * A syscall should always be called with interrupts enabled
126 * so we just unconditionally hard-enable here. When some kind
127 * of irq tracing is used, we additionally check that condition
128 * is correct
129 */
130#if defined(CONFIG_TRACE_IRQFLAGS) && defined(CONFIG_BUG)
131 lbz r10,PACASOFTIRQEN(r13)
132 xori r10,r10,1
1331: tdnei r10,0
134 EMIT_BUG_ENTRY 1b,__FILE__,__LINE__,BUGFLAG_WARNING
135#endif
2d27cfd3 136
2d27cfd3
BH
137#ifdef CONFIG_PPC_BOOK3E
138 wrteei 1
139#else
1421ae0b 140 ld r11,PACAKMSR(r13)
9994a338
PM
141 ori r11,r11,MSR_EE
142 mtmsrd r11,1
2d27cfd3 143#endif /* CONFIG_PPC_BOOK3E */
9994a338 144
1421ae0b
BH
145 /* We do need to set SOFTE in the stack frame or the return
146 * from interrupt will be painful
147 */
148 li r10,1
149 std r10,SOFTE(r1)
150
9778b696 151 CURRENT_THREAD_INFO(r11, r1)
9994a338 152 ld r10,TI_FLAGS(r11)
10ea8343 153 andi. r11,r10,_TIF_SYSCALL_DOTRACE
d3837414 154 bne syscall_dotrace /* does not return */
9994a338
PM
155 cmpldi 0,r0,NR_syscalls
156 bge- syscall_enosys
157
158system_call: /* label this so stack traces look sane */
159/*
160 * Need to vector to 32 Bit or default sys_call_table here,
161 * based on caller's run-mode / personality.
162 */
c857c43b 163 ld r11,SYS_CALL_TABLE@toc(2)
9994a338
PM
164 andi. r10,r10,_TIF_32BIT
165 beq 15f
166 addi r11,r11,8 /* use 32-bit syscall entries */
167 clrldi r3,r3,32
168 clrldi r4,r4,32
169 clrldi r5,r5,32
170 clrldi r6,r6,32
171 clrldi r7,r7,32
172 clrldi r8,r8,32
17315:
174 slwi r0,r0,4
cc7efbf9
AB
175 ldx r12,r11,r0 /* Fetch system call handler [ptr] */
176 mtctr r12
9994a338
PM
177 bctrl /* Call handler */
178
4c3b2168 179.Lsyscall_exit:
401d1f02 180 std r3,RESULT(r1)
9778b696 181 CURRENT_THREAD_INFO(r12, r1)
9994a338 182
9994a338 183 ld r8,_MSR(r1)
2d27cfd3
BH
184#ifdef CONFIG_PPC_BOOK3S
185 /* No MSR:RI on BookE */
9994a338
PM
186 andi. r10,r8,MSR_RI
187 beq- unrecov_restore
2d27cfd3 188#endif
1421ae0b
BH
189 /*
190 * Disable interrupts so current_thread_info()->flags can't change,
2d27cfd3
BH
191 * and so that we don't get interrupted after loading SRR0/1.
192 */
193#ifdef CONFIG_PPC_BOOK3E
194 wrteei 0
195#else
1421ae0b 196 ld r10,PACAKMSR(r13)
ac1dc365
AB
197 /*
198 * For performance reasons we clear RI the same time that we
199 * clear EE. We only need to clear RI just before we restore r13
200 * below, but batching it with EE saves us one expensive mtmsrd call.
201 * We have to be careful to restore RI if we branch anywhere from
202 * here (eg syscall_exit_work).
203 */
204 li r9,MSR_RI
205 andc r11,r10,r9
206 mtmsrd r11,1
2d27cfd3
BH
207#endif /* CONFIG_PPC_BOOK3E */
208
9994a338 209 ld r9,TI_FLAGS(r12)
c3525940 210 li r11,-MAX_ERRNO
10ea8343 211 andi. r0,r9,(_TIF_SYSCALL_DOTRACE|_TIF_SINGLESTEP|_TIF_USER_WORK_MASK|_TIF_PERSYSCALL_MASK)
9994a338 212 bne- syscall_exit_work
401d1f02
DW
213 cmpld r3,r11
214 ld r5,_CCR(r1)
215 bge- syscall_error
d14299de 216.Lsyscall_error_cont:
9994a338 217 ld r7,_NIP(r1)
f89451fb 218BEGIN_FTR_SECTION
9994a338 219 stdcx. r0,0,r1 /* to clear the reservation */
f89451fb 220END_FTR_SECTION_IFCLR(CPU_FTR_STCX_CHECKS_ADDRESS)
9994a338
PM
221 andi. r6,r8,MSR_PR
222 ld r4,_LINK(r1)
2d27cfd3 223
c6622f63
PM
224 beq- 1f
225 ACCOUNT_CPU_USER_EXIT(r11, r12)
d030a4b5
ME
226
227BEGIN_FTR_SECTION
228 HMT_MEDIUM_LOW
229END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR)
230
c6622f63 231 ld r13,GPR13(r1) /* only restore r13 if returning to usermode */
9994a338 2321: ld r2,GPR2(r1)
9994a338
PM
233 ld r1,GPR1(r1)
234 mtlr r4
235 mtcr r5
236 mtspr SPRN_SRR0,r7
237 mtspr SPRN_SRR1,r8
2d27cfd3 238 RFI
9994a338
PM
239 b . /* prevent speculative execution */
240
401d1f02 241syscall_error:
9994a338 242 oris r5,r5,0x1000 /* Set SO bit in CR */
401d1f02 243 neg r3,r3
9994a338 244 std r5,_CCR(r1)
d14299de 245 b .Lsyscall_error_cont
401d1f02 246
9994a338
PM
247/* Traced system call support */
248syscall_dotrace:
b1576fec 249 bl save_nvgprs
9994a338 250 addi r3,r1,STACK_FRAME_OVERHEAD
b1576fec 251 bl do_syscall_trace_enter
d3837414 252
4f72c427 253 /*
d3837414
ME
254 * We use the return value of do_syscall_trace_enter() as the syscall
255 * number. If the syscall was rejected for any reason do_syscall_trace_enter()
256 * returns an invalid syscall number and the test below against
257 * NR_syscalls will fail.
4f72c427
RM
258 */
259 mr r0,r3
d3837414
ME
260
261 /* Restore argument registers just clobbered and/or possibly changed. */
9994a338
PM
262 ld r3,GPR3(r1)
263 ld r4,GPR4(r1)
264 ld r5,GPR5(r1)
265 ld r6,GPR6(r1)
266 ld r7,GPR7(r1)
267 ld r8,GPR8(r1)
d3837414
ME
268
269 /* Repopulate r9 and r10 for the system_call path */
9994a338 270 addi r9,r1,STACK_FRAME_OVERHEAD
9778b696 271 CURRENT_THREAD_INFO(r10, r1)
9994a338 272 ld r10,TI_FLAGS(r10)
d3837414
ME
273
274 cmpldi r0,NR_syscalls
275 blt+ system_call
276
277 /* Return code is already in r3 thanks to do_syscall_trace_enter() */
278 b .Lsyscall_exit
279
9994a338 280
401d1f02
DW
281syscall_enosys:
282 li r3,-ENOSYS
4c3b2168 283 b .Lsyscall_exit
401d1f02
DW
284
285syscall_exit_work:
ac1dc365
AB
286#ifdef CONFIG_PPC_BOOK3S
287 mtmsrd r10,1 /* Restore RI */
288#endif
401d1f02
DW
289 /* If TIF_RESTOREALL is set, don't scribble on either r3 or ccr.
290 If TIF_NOERROR is set, just save r3 as it is. */
291
292 andi. r0,r9,_TIF_RESTOREALL
1bd79336
PM
293 beq+ 0f
294 REST_NVGPRS(r1)
295 b 2f
c3525940 2960: cmpld r3,r11 /* r11 is -MAX_ERRNO */
401d1f02
DW
297 blt+ 1f
298 andi. r0,r9,_TIF_NOERROR
299 bne- 1f
300 ld r5,_CCR(r1)
301 neg r3,r3
302 oris r5,r5,0x1000 /* Set SO bit in CR */
303 std r5,_CCR(r1)
3041: std r3,GPR3(r1)
3052: andi. r0,r9,(_TIF_PERSYSCALL_MASK)
306 beq 4f
307
1bd79336 308 /* Clear per-syscall TIF flags if any are set. */
401d1f02
DW
309
310 li r11,_TIF_PERSYSCALL_MASK
311 addi r12,r12,TI_FLAGS
3123: ldarx r10,0,r12
313 andc r10,r10,r11
314 stdcx. r10,0,r12
315 bne- 3b
316 subi r12,r12,TI_FLAGS
1bd79336
PM
317
3184: /* Anything else left to do? */
d8725ce8
ME
319BEGIN_FTR_SECTION
320 lis r3,INIT_PPR@highest /* Set thread.ppr = 3 */
321 ld r10,PACACURRENT(r13)
322 sldi r3,r3,32 /* bits 11-13 are used for ppr */
323 std r3,TASKTHREADPPR(r10)
324END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR)
325
10ea8343 326 andi. r0,r9,(_TIF_SYSCALL_DOTRACE|_TIF_SINGLESTEP)
b1576fec 327 beq ret_from_except_lite
401d1f02
DW
328
329 /* Re-enable interrupts */
2d27cfd3
BH
330#ifdef CONFIG_PPC_BOOK3E
331 wrteei 1
332#else
1421ae0b 333 ld r10,PACAKMSR(r13)
401d1f02
DW
334 ori r10,r10,MSR_EE
335 mtmsrd r10,1
2d27cfd3 336#endif /* CONFIG_PPC_BOOK3E */
401d1f02 337
b1576fec 338 bl save_nvgprs
9994a338 339 addi r3,r1,STACK_FRAME_OVERHEAD
b1576fec
AB
340 bl do_syscall_trace_leave
341 b ret_from_except
9994a338 342
b4b56f9e
S
343#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
344tabort_syscall:
345 /* Firstly we need to enable TM in the kernel */
346 mfmsr r10
347 li r13, 1
348 rldimi r10, r13, MSR_TM_LG, 63-MSR_TM_LG
349 mtmsrd r10, 0
350
351 /* tabort, this dooms the transaction, nothing else */
352 li r13, (TM_CAUSE_SYSCALL|TM_CAUSE_PERSISTENT)
353 TABORT(R13)
354
355 /*
356 * Return directly to userspace. We have corrupted user register state,
357 * but userspace will never see that register state. Execution will
358 * resume after the tbegin of the aborted transaction with the
359 * checkpointed register state.
360 */
361 li r13, MSR_RI
362 andc r10, r10, r13
363 mtmsrd r10, 1
364 mtspr SPRN_SRR0, r11
365 mtspr SPRN_SRR1, r12
366
367 rfid
368 b . /* prevent speculative execution */
369#endif
370
9994a338
PM
371/* Save non-volatile GPRs, if not already saved. */
372_GLOBAL(save_nvgprs)
373 ld r11,_TRAP(r1)
374 andi. r0,r11,1
375 beqlr-
376 SAVE_NVGPRS(r1)
377 clrrdi r0,r11,1
378 std r0,_TRAP(r1)
379 blr
380
401d1f02 381
9994a338
PM
382/*
383 * The sigsuspend and rt_sigsuspend system calls can call do_signal
384 * and thus put the process into the stopped state where we might
385 * want to examine its user state with ptrace. Therefore we need
386 * to save all the nonvolatile registers (r14 - r31) before calling
387 * the C code. Similarly, fork, vfork and clone need the full
388 * register state on the stack so that it can be copied to the child.
389 */
9994a338
PM
390
391_GLOBAL(ppc_fork)
b1576fec
AB
392 bl save_nvgprs
393 bl sys_fork
4c3b2168 394 b .Lsyscall_exit
9994a338
PM
395
396_GLOBAL(ppc_vfork)
b1576fec
AB
397 bl save_nvgprs
398 bl sys_vfork
4c3b2168 399 b .Lsyscall_exit
9994a338
PM
400
401_GLOBAL(ppc_clone)
b1576fec
AB
402 bl save_nvgprs
403 bl sys_clone
4c3b2168 404 b .Lsyscall_exit
9994a338 405
1bd79336 406_GLOBAL(ppc32_swapcontext)
b1576fec
AB
407 bl save_nvgprs
408 bl compat_sys_swapcontext
4c3b2168 409 b .Lsyscall_exit
1bd79336
PM
410
411_GLOBAL(ppc64_swapcontext)
b1576fec
AB
412 bl save_nvgprs
413 bl sys_swapcontext
4c3b2168 414 b .Lsyscall_exit
1bd79336 415
529d235a
ME
416_GLOBAL(ppc_switch_endian)
417 bl save_nvgprs
418 bl sys_switch_endian
419 b .Lsyscall_exit
420
9994a338 421_GLOBAL(ret_from_fork)
b1576fec 422 bl schedule_tail
9994a338
PM
423 REST_NVGPRS(r1)
424 li r3,0
4c3b2168 425 b .Lsyscall_exit
9994a338 426
58254e10 427_GLOBAL(ret_from_kernel_thread)
b1576fec 428 bl schedule_tail
58254e10 429 REST_NVGPRS(r1)
58254e10
AV
430 mtlr r14
431 mr r3,r15
7cedd601
AB
432#if defined(_CALL_ELF) && _CALL_ELF == 2
433 mr r12,r14
434#endif
58254e10
AV
435 blrl
436 li r3,0
4c3b2168 437 b .Lsyscall_exit
be6abfa7 438
9994a338
PM
439/*
440 * This routine switches between two different tasks. The process
441 * state of one is saved on its kernel stack. Then the state
442 * of the other is restored from its kernel stack. The memory
443 * management hardware is updated to the second process's state.
444 * Finally, we can return to the second process, via ret_from_except.
445 * On entry, r3 points to the THREAD for the current task, r4
446 * points to the THREAD for the new task.
447 *
448 * Note: there are two ways to get to the "going out" portion
449 * of this code; either by coming in via the entry (_switch)
450 * or via "fork" which must set up an environment equivalent
451 * to the "_switch" path. If you change this you'll have to change
452 * the fork code also.
453 *
454 * The code which creates the new task context is in 'copy_thread'
2ef9481e 455 * in arch/powerpc/kernel/process.c
9994a338
PM
456 */
457 .align 7
458_GLOBAL(_switch)
459 mflr r0
460 std r0,16(r1)
461 stdu r1,-SWITCH_FRAME_SIZE(r1)
462 /* r3-r13 are caller saved -- Cort */
463 SAVE_8GPRS(14, r1)
464 SAVE_10GPRS(22, r1)
68bfa962 465 std r0,_NIP(r1) /* Return to switch caller */
9994a338
PM
466 mfcr r23
467 std r23,_CCR(r1)
468 std r1,KSP(r3) /* Set old stack pointer */
469
470#ifdef CONFIG_SMP
471 /* We need a sync somewhere here to make sure that if the
472 * previous task gets rescheduled on another CPU, it sees all
473 * stores it has performed on this one.
474 */
475 sync
476#endif /* CONFIG_SMP */
477
f89451fb
AB
478 /*
479 * If we optimise away the clear of the reservation in system
480 * calls because we know the CPU tracks the address of the
481 * reservation, then we need to clear it here to cover the
482 * case that the kernel context switch path has no larx
483 * instructions.
484 */
485BEGIN_FTR_SECTION
486 ldarx r6,0,r1
487END_FTR_SECTION_IFSET(CPU_FTR_STCX_CHECKS_ADDRESS)
488
a515348f
MN
489#ifdef CONFIG_PPC_BOOK3S
490/* Cancel all explict user streams as they will have no use after context
491 * switch and will stop the HW from creating streams itself
492 */
493 DCBT_STOP_ALL_STREAM_IDS(r6)
494#endif
495
9994a338
PM
496 addi r6,r4,-THREAD /* Convert THREAD to 'current' */
497 std r6,PACACURRENT(r13) /* Set new 'current' */
498
499 ld r8,KSP(r4) /* new stack pointer */
2d27cfd3 500#ifdef CONFIG_PPC_BOOK3S
1189be65 501BEGIN_FTR_SECTION
9994a338
PM
502 clrrdi r6,r8,28 /* get its ESID */
503 clrrdi r9,r1,28 /* get current sp ESID */
13b3d13b 504FTR_SECTION_ELSE
1189be65
PM
505 clrrdi r6,r8,40 /* get its 1T ESID */
506 clrrdi r9,r1,40 /* get current sp 1T ESID */
13b3d13b 507ALT_MMU_FTR_SECTION_END_IFCLR(MMU_FTR_1T_SEGMENT)
9994a338
PM
508 clrldi. r0,r6,2 /* is new ESID c00000000? */
509 cmpd cr1,r6,r9 /* or is new ESID the same as current ESID? */
510 cror eq,4*cr1+eq,eq
511 beq 2f /* if yes, don't slbie it */
512
513 /* Bolt in the new stack SLB entry */
514 ld r7,KSP_VSID(r4) /* Get new stack's VSID */
515 oris r0,r6,(SLB_ESID_V)@h
516 ori r0,r0,(SLB_NUM_BOLTED-1)@l
1189be65
PM
517BEGIN_FTR_SECTION
518 li r9,MMU_SEGSIZE_1T /* insert B field */
519 oris r6,r6,(MMU_SEGSIZE_1T << SLBIE_SSIZE_SHIFT)@h
520 rldimi r7,r9,SLB_VSID_SSIZE_SHIFT,0
44ae3ab3 521END_MMU_FTR_SECTION_IFSET(MMU_FTR_1T_SEGMENT)
2f6093c8 522
00efee7d
MN
523 /* Update the last bolted SLB. No write barriers are needed
524 * here, provided we only update the current CPU's SLB shadow
525 * buffer.
526 */
2f6093c8 527 ld r9,PACA_SLBSHADOWPTR(r13)
11a27ad7 528 li r12,0
7ffcf8ec
AB
529 std r12,SLBSHADOW_STACKESID(r9) /* Clear ESID */
530 li r12,SLBSHADOW_STACKVSID
531 STDX_BE r7,r12,r9 /* Save VSID */
532 li r12,SLBSHADOW_STACKESID
533 STDX_BE r0,r12,r9 /* Save ESID */
2f6093c8 534
44ae3ab3 535 /* No need to check for MMU_FTR_NO_SLBIE_B here, since when
f66bce5e
OJ
536 * we have 1TB segments, the only CPUs known to have the errata
537 * only support less than 1TB of system memory and we'll never
538 * actually hit this code path.
539 */
540
9994a338
PM
541 slbie r6
542 slbie r6 /* Workaround POWER5 < DD2.1 issue */
543 slbmte r7,r0
544 isync
9994a338 5452:
2d27cfd3
BH
546#endif /* !CONFIG_PPC_BOOK3S */
547
9778b696 548 CURRENT_THREAD_INFO(r7, r8) /* base of new stack */
9994a338
PM
549 /* Note: this uses SWITCH_FRAME_SIZE rather than INT_FRAME_SIZE
550 because we don't need to leave the 288-byte ABI gap at the
551 top of the kernel stack. */
552 addi r7,r7,THREAD_SIZE-SWITCH_FRAME_SIZE
553
554 mr r1,r8 /* start using new stack pointer */
555 std r7,PACAKSAVE(r13)
556
71433285
AB
557 ld r6,_CCR(r1)
558 mtcrf 0xFF,r6
559
9994a338
PM
560 /* r3-r13 are destroyed -- Cort */
561 REST_8GPRS(14, r1)
562 REST_10GPRS(22, r1)
563
564 /* convert old thread to its task_struct for return value */
565 addi r3,r3,-THREAD
566 ld r7,_NIP(r1) /* Return to _switch caller in new task */
567 mtlr r7
568 addi r1,r1,SWITCH_FRAME_SIZE
569 blr
570
571 .align 7
572_GLOBAL(ret_from_except)
573 ld r11,_TRAP(r1)
574 andi. r0,r11,1
b1576fec 575 bne ret_from_except_lite
9994a338
PM
576 REST_NVGPRS(r1)
577
578_GLOBAL(ret_from_except_lite)
579 /*
580 * Disable interrupts so that current_thread_info()->flags
581 * can't change between when we test it and when we return
582 * from the interrupt.
583 */
2d27cfd3
BH
584#ifdef CONFIG_PPC_BOOK3E
585 wrteei 0
586#else
d9ada91a
BH
587 ld r10,PACAKMSR(r13) /* Get kernel MSR without EE */
588 mtmsrd r10,1 /* Update machine state */
2d27cfd3 589#endif /* CONFIG_PPC_BOOK3E */
9994a338 590
9778b696 591 CURRENT_THREAD_INFO(r9, r1)
9994a338 592 ld r3,_MSR(r1)
13d543cd
BB
593#ifdef CONFIG_PPC_BOOK3E
594 ld r10,PACACURRENT(r13)
595#endif /* CONFIG_PPC_BOOK3E */
9994a338 596 ld r4,TI_FLAGS(r9)
9994a338 597 andi. r3,r3,MSR_PR
c58ce2b1 598 beq resume_kernel
13d543cd
BB
599#ifdef CONFIG_PPC_BOOK3E
600 lwz r3,(THREAD+THREAD_DBCR0)(r10)
601#endif /* CONFIG_PPC_BOOK3E */
9994a338
PM
602
603 /* Check current_thread_info()->flags */
c58ce2b1 604 andi. r0,r4,_TIF_USER_WORK_MASK
13d543cd
BB
605#ifdef CONFIG_PPC_BOOK3E
606 bne 1f
607 /*
608 * Check to see if the dbcr0 register is set up to debug.
609 * Use the internal debug mode bit to do this.
610 */
611 andis. r0,r3,DBCR0_IDM@h
c58ce2b1 612 beq restore
13d543cd
BB
613 mfmsr r0
614 rlwinm r0,r0,0,~MSR_DE /* Clear MSR.DE */
615 mtmsr r0
616 mtspr SPRN_DBCR0,r3
617 li r10, -1
618 mtspr SPRN_DBSR,r10
619 b restore
620#else
621 beq restore
622#endif
6231: andi. r0,r4,_TIF_NEED_RESCHED
624 beq 2f
b1576fec 625 bl restore_interrupts
5d1c5745 626 SCHEDULE_USER
b1576fec 627 b ret_from_except_lite
d31626f7
PM
6282:
629#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
630 andi. r0,r4,_TIF_USER_WORK_MASK & ~_TIF_RESTORE_TM
631 bne 3f /* only restore TM if nothing else to do */
632 addi r3,r1,STACK_FRAME_OVERHEAD
b1576fec 633 bl restore_tm_state
d31626f7
PM
634 b restore
6353:
636#endif
b1576fec 637 bl save_nvgprs
808be314
AB
638 /*
639 * Use a non volatile GPR to save and restore our thread_info flags
640 * across the call to restore_interrupts.
641 */
642 mr r30,r4
b1576fec 643 bl restore_interrupts
808be314 644 mr r4,r30
c58ce2b1 645 addi r3,r1,STACK_FRAME_OVERHEAD
b1576fec
AB
646 bl do_notify_resume
647 b ret_from_except
c58ce2b1
TC
648
649resume_kernel:
a9c4e541 650 /* check current_thread_info, _TIF_EMULATE_STACK_STORE */
0edfdd10 651 andis. r8,r4,_TIF_EMULATE_STACK_STORE@h
a9c4e541
TC
652 beq+ 1f
653
654 addi r8,r1,INT_FRAME_SIZE /* Get the kprobed function entry */
655
656 lwz r3,GPR1(r1)
657 subi r3,r3,INT_FRAME_SIZE /* dst: Allocate a trampoline exception frame */
658 mr r4,r1 /* src: current exception frame */
659 mr r1,r3 /* Reroute the trampoline frame to r1 */
660
661 /* Copy from the original to the trampoline. */
662 li r5,INT_FRAME_SIZE/8 /* size: INT_FRAME_SIZE */
663 li r6,0 /* start offset: 0 */
664 mtctr r5
6652: ldx r0,r6,r4
666 stdx r0,r6,r3
667 addi r6,r6,8
668 bdnz 2b
669
670 /* Do real store operation to complete stwu */
671 lwz r5,GPR1(r1)
672 std r8,0(r5)
673
674 /* Clear _TIF_EMULATE_STACK_STORE flag */
675 lis r11,_TIF_EMULATE_STACK_STORE@h
676 addi r5,r9,TI_FLAGS
d8b92292 6770: ldarx r4,0,r5
a9c4e541
TC
678 andc r4,r4,r11
679 stdcx. r4,0,r5
680 bne- 0b
6811:
682
c58ce2b1
TC
683#ifdef CONFIG_PREEMPT
684 /* Check if we need to preempt */
685 andi. r0,r4,_TIF_NEED_RESCHED
686 beq+ restore
687 /* Check that preempt_count() == 0 and interrupts are enabled */
688 lwz r8,TI_PREEMPT(r9)
689 cmpwi cr1,r8,0
690 ld r0,SOFTE(r1)
691 cmpdi r0,0
692 crandc eq,cr1*4+eq,eq
693 bne restore
694
695 /*
696 * Here we are preempting the current task. We want to make
de021bb7 697 * sure we are soft-disabled first and reconcile irq state.
c58ce2b1 698 */
de021bb7 699 RECONCILE_IRQ_STATE(r3,r4)
b1576fec 7001: bl preempt_schedule_irq
c58ce2b1
TC
701
702 /* Re-test flags and eventually loop */
9778b696 703 CURRENT_THREAD_INFO(r9, r1)
9994a338 704 ld r4,TI_FLAGS(r9)
c58ce2b1
TC
705 andi. r0,r4,_TIF_NEED_RESCHED
706 bne 1b
572177d7
TC
707
708 /*
709 * arch_local_irq_restore() from preempt_schedule_irq above may
710 * enable hard interrupt but we really should disable interrupts
711 * when we return from the interrupt, and so that we don't get
712 * interrupted after loading SRR0/1.
713 */
714#ifdef CONFIG_PPC_BOOK3E
715 wrteei 0
716#else
717 ld r10,PACAKMSR(r13) /* Get kernel MSR without EE */
718 mtmsrd r10,1 /* Update machine state */
719#endif /* CONFIG_PPC_BOOK3E */
c58ce2b1 720#endif /* CONFIG_PREEMPT */
9994a338 721
7230c564
BH
722 .globl fast_exc_return_irq
723fast_exc_return_irq:
9994a338 724restore:
7230c564 725 /*
7c0482e3
BH
726 * This is the main kernel exit path. First we check if we
727 * are about to re-enable interrupts
7230c564 728 */
01f3880d 729 ld r5,SOFTE(r1)
7230c564 730 lbz r6,PACASOFTIRQEN(r13)
7c0482e3
BH
731 cmpwi cr0,r5,0
732 beq restore_irq_off
7230c564 733
7c0482e3
BH
734 /* We are enabling, were we already enabled ? Yes, just return */
735 cmpwi cr0,r6,1
736 beq cr0,do_restore
9994a338 737
7c0482e3 738 /*
7230c564
BH
739 * We are about to soft-enable interrupts (we are hard disabled
740 * at this point). We check if there's anything that needs to
741 * be replayed first.
742 */
743 lbz r0,PACAIRQHAPPENED(r13)
744 cmpwi cr0,r0,0
745 bne- restore_check_irq_replay
e56a6e20 746
7230c564
BH
747 /*
748 * Get here when nothing happened while soft-disabled, just
749 * soft-enable and move-on. We will hard-enable as a side
750 * effect of rfi
751 */
752restore_no_replay:
753 TRACE_ENABLE_INTS
754 li r0,1
755 stb r0,PACASOFTIRQEN(r13);
756
757 /*
758 * Final return path. BookE is handled in a different file
759 */
7c0482e3 760do_restore:
2d27cfd3 761#ifdef CONFIG_PPC_BOOK3E
b1576fec 762 b exception_return_book3e
2d27cfd3 763#else
7230c564
BH
764 /*
765 * Clear the reservation. If we know the CPU tracks the address of
766 * the reservation then we can potentially save some cycles and use
767 * a larx. On POWER6 and POWER7 this is significantly faster.
768 */
769BEGIN_FTR_SECTION
770 stdcx. r0,0,r1 /* to clear the reservation */
771FTR_SECTION_ELSE
772 ldarx r4,0,r1
773ALT_FTR_SECTION_END_IFCLR(CPU_FTR_STCX_CHECKS_ADDRESS)
774
775 /*
776 * Some code path such as load_up_fpu or altivec return directly
777 * here. They run entirely hard disabled and do not alter the
778 * interrupt state. They also don't use lwarx/stwcx. and thus
779 * are known not to leave dangling reservations.
780 */
781 .globl fast_exception_return
782fast_exception_return:
783 ld r3,_MSR(r1)
e56a6e20
PM
784 ld r4,_CTR(r1)
785 ld r0,_LINK(r1)
786 mtctr r4
787 mtlr r0
788 ld r4,_XER(r1)
789 mtspr SPRN_XER,r4
790
791 REST_8GPRS(5, r1)
792
9994a338
PM
793 andi. r0,r3,MSR_RI
794 beq- unrecov_restore
795
0c4888ef
BH
796 /* Load PPR from thread struct before we clear MSR:RI */
797BEGIN_FTR_SECTION
798 ld r2,PACACURRENT(r13)
799 ld r2,TASKTHREADPPR(r2)
800END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR)
801
e56a6e20
PM
802 /*
803 * Clear RI before restoring r13. If we are returning to
804 * userspace and we take an exception after restoring r13,
805 * we end up corrupting the userspace r13 value.
806 */
d9ada91a
BH
807 ld r4,PACAKMSR(r13) /* Get kernel MSR without EE */
808 andc r4,r4,r0 /* r0 contains MSR_RI here */
e56a6e20 809 mtmsrd r4,1
9994a338 810
afc07701
MN
811#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
812 /* TM debug */
813 std r3, PACATMSCRATCH(r13) /* Stash returned-to MSR */
814#endif
9994a338
PM
815 /*
816 * r13 is our per cpu area, only restore it if we are returning to
7230c564
BH
817 * userspace the value stored in the stack frame may belong to
818 * another CPU.
9994a338 819 */
e56a6e20 820 andi. r0,r3,MSR_PR
9994a338 821 beq 1f
0c4888ef
BH
822BEGIN_FTR_SECTION
823 mtspr SPRN_PPR,r2 /* Restore PPR */
824END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR)
e56a6e20 825 ACCOUNT_CPU_USER_EXIT(r2, r4)
9994a338
PM
826 REST_GPR(13, r1)
8271:
e56a6e20 828 mtspr SPRN_SRR1,r3
9994a338
PM
829
830 ld r2,_CCR(r1)
831 mtcrf 0xFF,r2
832 ld r2,_NIP(r1)
833 mtspr SPRN_SRR0,r2
834
835 ld r0,GPR0(r1)
836 ld r2,GPR2(r1)
837 ld r3,GPR3(r1)
838 ld r4,GPR4(r1)
839 ld r1,GPR1(r1)
840
841 rfid
842 b . /* prevent speculative execution */
843
2d27cfd3
BH
844#endif /* CONFIG_PPC_BOOK3E */
845
7c0482e3
BH
846 /*
847 * We are returning to a context with interrupts soft disabled.
848 *
849 * However, we may also about to hard enable, so we need to
850 * make sure that in this case, we also clear PACA_IRQ_HARD_DIS
851 * or that bit can get out of sync and bad things will happen
852 */
853restore_irq_off:
854 ld r3,_MSR(r1)
855 lbz r7,PACAIRQHAPPENED(r13)
856 andi. r0,r3,MSR_EE
857 beq 1f
858 rlwinm r7,r7,0,~PACA_IRQ_HARD_DIS
859 stb r7,PACAIRQHAPPENED(r13)
8601: li r0,0
861 stb r0,PACASOFTIRQEN(r13);
862 TRACE_DISABLE_INTS
863 b do_restore
864
7230c564
BH
865 /*
866 * Something did happen, check if a re-emit is needed
867 * (this also clears paca->irq_happened)
868 */
869restore_check_irq_replay:
870 /* XXX: We could implement a fast path here where we check
871 * for irq_happened being just 0x01, in which case we can
872 * clear it and return. That means that we would potentially
873 * miss a decrementer having wrapped all the way around.
874 *
875 * Still, this might be useful for things like hash_page
876 */
b1576fec 877 bl __check_irq_replay
7230c564
BH
878 cmpwi cr0,r3,0
879 beq restore_no_replay
880
881 /*
882 * We need to re-emit an interrupt. We do so by re-using our
883 * existing exception frame. We first change the trap value,
884 * but we need to ensure we preserve the low nibble of it
885 */
886 ld r4,_TRAP(r1)
887 clrldi r4,r4,60
888 or r4,r4,r3
889 std r4,_TRAP(r1)
890
891 /*
892 * Then find the right handler and call it. Interrupts are
893 * still soft-disabled and we keep them that way.
894 */
895 cmpwi cr0,r3,0x500
896 bne 1f
897 addi r3,r1,STACK_FRAME_OVERHEAD;
b1576fec
AB
898 bl do_IRQ
899 b ret_from_except
0869b6fd
MS
9001: cmpwi cr0,r3,0xe60
901 bne 1f
902 addi r3,r1,STACK_FRAME_OVERHEAD;
903 bl handle_hmi_exception
904 b ret_from_except
7230c564
BH
9051: cmpwi cr0,r3,0x900
906 bne 1f
907 addi r3,r1,STACK_FRAME_OVERHEAD;
b1576fec
AB
908 bl timer_interrupt
909 b ret_from_except
fe9e1d54
IM
910#ifdef CONFIG_PPC_DOORBELL
9111:
7230c564 912#ifdef CONFIG_PPC_BOOK3E
fe9e1d54
IM
913 cmpwi cr0,r3,0x280
914#else
915 BEGIN_FTR_SECTION
916 cmpwi cr0,r3,0xe80
917 FTR_SECTION_ELSE
918 cmpwi cr0,r3,0xa00
919 ALT_FTR_SECTION_END_IFSET(CPU_FTR_HVMODE)
920#endif /* CONFIG_PPC_BOOK3E */
7230c564
BH
921 bne 1f
922 addi r3,r1,STACK_FRAME_OVERHEAD;
b1576fec
AB
923 bl doorbell_exception
924 b ret_from_except
fe9e1d54 925#endif /* CONFIG_PPC_DOORBELL */
b1576fec 9261: b ret_from_except /* What else to do here ? */
7230c564 927
9994a338
PM
928unrecov_restore:
929 addi r3,r1,STACK_FRAME_OVERHEAD
b1576fec 930 bl unrecoverable_exception
9994a338
PM
931 b unrecov_restore
932
933#ifdef CONFIG_PPC_RTAS
934/*
935 * On CHRP, the Run-Time Abstraction Services (RTAS) have to be
936 * called with the MMU off.
937 *
938 * In addition, we need to be in 32b mode, at least for now.
939 *
940 * Note: r3 is an input parameter to rtas, so don't trash it...
941 */
942_GLOBAL(enter_rtas)
943 mflr r0
944 std r0,16(r1)
945 stdu r1,-RTAS_FRAME_SIZE(r1) /* Save SP and create stack space. */
946
947 /* Because RTAS is running in 32b mode, it clobbers the high order half
948 * of all registers that it saves. We therefore save those registers
949 * RTAS might touch to the stack. (r0, r3-r13 are caller saved)
950 */
951 SAVE_GPR(2, r1) /* Save the TOC */
952 SAVE_GPR(13, r1) /* Save paca */
953 SAVE_8GPRS(14, r1) /* Save the non-volatiles */
954 SAVE_10GPRS(22, r1) /* ditto */
955
956 mfcr r4
957 std r4,_CCR(r1)
958 mfctr r5
959 std r5,_CTR(r1)
960 mfspr r6,SPRN_XER
961 std r6,_XER(r1)
962 mfdar r7
963 std r7,_DAR(r1)
964 mfdsisr r8
965 std r8,_DSISR(r1)
9994a338 966
9fe901d1
MK
967 /* Temporary workaround to clear CR until RTAS can be modified to
968 * ignore all bits.
969 */
970 li r0,0
971 mtcr r0
972
007d88d0 973#ifdef CONFIG_BUG
9994a338
PM
974 /* There is no way it is acceptable to get here with interrupts enabled,
975 * check it with the asm equivalent of WARN_ON
976 */
d04c56f7 977 lbz r0,PACASOFTIRQEN(r13)
9994a338 9781: tdnei r0,0
007d88d0
DW
979 EMIT_BUG_ENTRY 1b,__FILE__,__LINE__,BUGFLAG_WARNING
980#endif
981
d04c56f7
PM
982 /* Hard-disable interrupts */
983 mfmsr r6
984 rldicl r7,r6,48,1
985 rotldi r7,r7,16
986 mtmsrd r7,1
987
9994a338
PM
988 /* Unfortunately, the stack pointer and the MSR are also clobbered,
989 * so they are saved in the PACA which allows us to restore
990 * our original state after RTAS returns.
991 */
992 std r1,PACAR1(r13)
993 std r6,PACASAVEDMSR(r13)
994
995 /* Setup our real return addr */
ad0289e4 996 LOAD_REG_ADDR(r4,rtas_return_loc)
e58c3495 997 clrldi r4,r4,2 /* convert to realmode address */
9994a338
PM
998 mtlr r4
999
1000 li r0,0
1001 ori r0,r0,MSR_EE|MSR_SE|MSR_BE|MSR_RI
1002 andc r0,r6,r0
1003
1004 li r9,1
1005 rldicr r9,r9,MSR_SF_LG,(63-MSR_SF_LG)
5c0484e2 1006 ori r9,r9,MSR_IR|MSR_DR|MSR_FE0|MSR_FE1|MSR_FP|MSR_RI|MSR_LE
9994a338 1007 andc r6,r0,r9
9994a338
PM
1008 sync /* disable interrupts so SRR0/1 */
1009 mtmsrd r0 /* don't get trashed */
1010
e58c3495 1011 LOAD_REG_ADDR(r4, rtas)
9994a338
PM
1012 ld r5,RTASENTRY(r4) /* get the rtas->entry value */
1013 ld r4,RTASBASE(r4) /* get the rtas->base value */
1014
1015 mtspr SPRN_SRR0,r5
1016 mtspr SPRN_SRR1,r6
1017 rfid
1018 b . /* prevent speculative execution */
1019
ad0289e4 1020rtas_return_loc:
5c0484e2
BH
1021 FIXUP_ENDIAN
1022
9994a338 1023 /* relocation is off at this point */
2dd60d79 1024 GET_PACA(r4)
e58c3495 1025 clrldi r4,r4,2 /* convert to realmode address */
9994a338 1026
e31aa453
PM
1027 bcl 20,31,$+4
10280: mflr r3
ad0289e4 1029 ld r3,(1f-0b)(r3) /* get &rtas_restore_regs */
e31aa453 1030
9994a338
PM
1031 mfmsr r6
1032 li r0,MSR_RI
1033 andc r6,r6,r0
1034 sync
1035 mtmsrd r6
1036
1037 ld r1,PACAR1(r4) /* Restore our SP */
9994a338
PM
1038 ld r4,PACASAVEDMSR(r4) /* Restore our MSR */
1039
1040 mtspr SPRN_SRR0,r3
1041 mtspr SPRN_SRR1,r4
1042 rfid
1043 b . /* prevent speculative execution */
1044
e31aa453 1045 .align 3
ad0289e4 10461: .llong rtas_restore_regs
e31aa453 1047
ad0289e4 1048rtas_restore_regs:
9994a338
PM
1049 /* relocation is on at this point */
1050 REST_GPR(2, r1) /* Restore the TOC */
1051 REST_GPR(13, r1) /* Restore paca */
1052 REST_8GPRS(14, r1) /* Restore the non-volatiles */
1053 REST_10GPRS(22, r1) /* ditto */
1054
2dd60d79 1055 GET_PACA(r13)
9994a338
PM
1056
1057 ld r4,_CCR(r1)
1058 mtcr r4
1059 ld r5,_CTR(r1)
1060 mtctr r5
1061 ld r6,_XER(r1)
1062 mtspr SPRN_XER,r6
1063 ld r7,_DAR(r1)
1064 mtdar r7
1065 ld r8,_DSISR(r1)
1066 mtdsisr r8
9994a338
PM
1067
1068 addi r1,r1,RTAS_FRAME_SIZE /* Unstack our frame */
1069 ld r0,16(r1) /* get return address */
1070
1071 mtlr r0
1072 blr /* return to caller */
1073
1074#endif /* CONFIG_PPC_RTAS */
1075
9994a338
PM
1076_GLOBAL(enter_prom)
1077 mflr r0
1078 std r0,16(r1)
1079 stdu r1,-PROM_FRAME_SIZE(r1) /* Save SP and create stack space */
1080
1081 /* Because PROM is running in 32b mode, it clobbers the high order half
1082 * of all registers that it saves. We therefore save those registers
1083 * PROM might touch to the stack. (r0, r3-r13 are caller saved)
1084 */
6c171994 1085 SAVE_GPR(2, r1)
9994a338
PM
1086 SAVE_GPR(13, r1)
1087 SAVE_8GPRS(14, r1)
1088 SAVE_10GPRS(22, r1)
6c171994 1089 mfcr r10
9994a338 1090 mfmsr r11
6c171994 1091 std r10,_CCR(r1)
9994a338
PM
1092 std r11,_MSR(r1)
1093
5c0484e2
BH
1094 /* Put PROM address in SRR0 */
1095 mtsrr0 r4
1096
1097 /* Setup our trampoline return addr in LR */
1098 bcl 20,31,$+4
10990: mflr r4
1100 addi r4,r4,(1f - 0b)
1101 mtlr r4
9994a338 1102
5c0484e2 1103 /* Prepare a 32-bit mode big endian MSR
9994a338 1104 */
2d27cfd3
BH
1105#ifdef CONFIG_PPC_BOOK3E
1106 rlwinm r11,r11,0,1,31
5c0484e2
BH
1107 mtsrr1 r11
1108 rfi
2d27cfd3 1109#else /* CONFIG_PPC_BOOK3E */
5c0484e2
BH
1110 LOAD_REG_IMMEDIATE(r12, MSR_SF | MSR_ISF | MSR_LE)
1111 andc r11,r11,r12
1112 mtsrr1 r11
1113 rfid
2d27cfd3 1114#endif /* CONFIG_PPC_BOOK3E */
9994a338 1115
5c0484e2
BH
11161: /* Return from OF */
1117 FIXUP_ENDIAN
9994a338
PM
1118
1119 /* Just make sure that r1 top 32 bits didn't get
1120 * corrupt by OF
1121 */
1122 rldicl r1,r1,0,32
1123
1124 /* Restore the MSR (back to 64 bits) */
1125 ld r0,_MSR(r1)
6c171994 1126 MTMSRD(r0)
9994a338
PM
1127 isync
1128
1129 /* Restore other registers */
1130 REST_GPR(2, r1)
1131 REST_GPR(13, r1)
1132 REST_8GPRS(14, r1)
1133 REST_10GPRS(22, r1)
1134 ld r4,_CCR(r1)
1135 mtcr r4
9994a338
PM
1136
1137 addi r1,r1,PROM_FRAME_SIZE
1138 ld r0,16(r1)
1139 mtlr r0
1140 blr
4e491d14 1141
606576ce 1142#ifdef CONFIG_FUNCTION_TRACER
4e491d14
SR
1143#ifdef CONFIG_DYNAMIC_FTRACE
1144_GLOBAL(mcount)
1145_GLOBAL(_mcount)
4e491d14
SR
1146 blr
1147
5e66684f 1148_GLOBAL_TOC(ftrace_caller)
4e491d14
SR
1149 /* Taken from output of objdump from lib64/glibc */
1150 mflr r3
1151 ld r11, 0(r1)
1152 stdu r1, -112(r1)
1153 std r3, 128(r1)
1154 ld r4, 16(r11)
395a59d0 1155 subi r3, r3, MCOUNT_INSN_SIZE
4e491d14
SR
1156.globl ftrace_call
1157ftrace_call:
1158 bl ftrace_stub
1159 nop
46542888
SR
1160#ifdef CONFIG_FUNCTION_GRAPH_TRACER
1161.globl ftrace_graph_call
1162ftrace_graph_call:
1163 b ftrace_graph_stub
1164_GLOBAL(ftrace_graph_stub)
1165#endif
4e491d14
SR
1166 ld r0, 128(r1)
1167 mtlr r0
1168 addi r1, r1, 112
1169_GLOBAL(ftrace_stub)
1170 blr
1171#else
5e66684f 1172_GLOBAL_TOC(_mcount)
4e491d14
SR
1173 /* Taken from output of objdump from lib64/glibc */
1174 mflr r3
1175 ld r11, 0(r1)
1176 stdu r1, -112(r1)
1177 std r3, 128(r1)
1178 ld r4, 16(r11)
1179
395a59d0 1180 subi r3, r3, MCOUNT_INSN_SIZE
4e491d14
SR
1181 LOAD_REG_ADDR(r5,ftrace_trace_function)
1182 ld r5,0(r5)
1183 ld r5,0(r5)
1184 mtctr r5
1185 bctrl
4e491d14 1186 nop
6794c782
SR
1187
1188
1189#ifdef CONFIG_FUNCTION_GRAPH_TRACER
1190 b ftrace_graph_caller
1191#endif
4e491d14
SR
1192 ld r0, 128(r1)
1193 mtlr r0
1194 addi r1, r1, 112
1195_GLOBAL(ftrace_stub)
1196 blr
1197
6794c782
SR
1198#endif /* CONFIG_DYNAMIC_FTRACE */
1199
1200#ifdef CONFIG_FUNCTION_GRAPH_TRACER
46542888 1201_GLOBAL(ftrace_graph_caller)
6794c782
SR
1202 /* load r4 with local address */
1203 ld r4, 128(r1)
1204 subi r4, r4, MCOUNT_INSN_SIZE
1205
b3c18725 1206 /* Grab the LR out of the caller stack frame */
6794c782 1207 ld r11, 112(r1)
b3c18725 1208 ld r3, 16(r11)
6794c782 1209
b1576fec 1210 bl prepare_ftrace_return
6794c782
SR
1211 nop
1212
b3c18725
AB
1213 /*
1214 * prepare_ftrace_return gives us the address we divert to.
1215 * Change the LR in the callers stack frame to this.
1216 */
1217 ld r11, 112(r1)
1218 std r3, 16(r11)
1219
6794c782
SR
1220 ld r0, 128(r1)
1221 mtlr r0
1222 addi r1, r1, 112
1223 blr
1224
1225_GLOBAL(return_to_handler)
1226 /* need to save return values */
1227 std r4, -32(r1)
1228 std r3, -24(r1)
1229 /* save TOC */
1230 std r2, -16(r1)
1231 std r31, -8(r1)
1232 mr r31, r1
1233 stdu r1, -112(r1)
1234
bb725340 1235 /*
7d56c65a 1236 * We might be called from a module.
bb725340
SR
1237 * Switch to our TOC to run inside the core kernel.
1238 */
be10ab10 1239 ld r2, PACATOC(r13)
6794c782 1240
b1576fec 1241 bl ftrace_return_to_handler
6794c782
SR
1242 nop
1243
1244 /* return value has real return address */
1245 mtlr r3
1246
1247 ld r1, 0(r1)
1248 ld r4, -32(r1)
1249 ld r3, -24(r1)
1250 ld r2, -16(r1)
1251 ld r31, -8(r1)
1252
1253 /* Jump back to real return address */
1254 blr
1255#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
1256#endif /* CONFIG_FUNCTION_TRACER */