]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blob - arch/powerpc/kernel/entry_64.S
Merge branch 'fix/sun8i' of git://git.kernel.org/pub/scm/linux/kernel/git/broonie...
[mirror_ubuntu-bionic-kernel.git] / arch / powerpc / kernel / entry_64.S
1 /*
2 * PowerPC version
3 * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
4 * Rewritten by Cort Dougan (cort@cs.nmt.edu) for PReP
5 * Copyright (C) 1996 Cort Dougan <cort@cs.nmt.edu>
6 * Adapted for Power Macintosh by Paul Mackerras.
7 * Low-level exception handlers and MMU support
8 * rewritten by Paul Mackerras.
9 * Copyright (C) 1996 Paul Mackerras.
10 * MPC8xx modifications Copyright (C) 1997 Dan Malek (dmalek@jlc.net).
11 *
12 * This file contains the system call entry code, context switch
13 * code, and exception/interrupt return code for PowerPC.
14 *
15 * This program is free software; you can redistribute it and/or
16 * modify it under the terms of the GNU General Public License
17 * as published by the Free Software Foundation; either version
18 * 2 of the License, or (at your option) any later version.
19 */
20
21 #include <linux/errno.h>
22 #include <linux/err.h>
23 #include <linux/magic.h>
24 #include <asm/unistd.h>
25 #include <asm/processor.h>
26 #include <asm/page.h>
27 #include <asm/mmu.h>
28 #include <asm/thread_info.h>
29 #include <asm/ppc_asm.h>
30 #include <asm/asm-offsets.h>
31 #include <asm/cputable.h>
32 #include <asm/firmware.h>
33 #include <asm/bug.h>
34 #include <asm/ptrace.h>
35 #include <asm/irqflags.h>
36 #include <asm/ftrace.h>
37 #include <asm/hw_irq.h>
38 #include <asm/context_tracking.h>
39 #include <asm/tm.h>
40 #include <asm/ppc-opcode.h>
41 #include <asm/export.h>
42
43 /*
44 * System calls.
45 */
46 .section ".toc","aw"
47 SYS_CALL_TABLE:
48 .tc sys_call_table[TC],sys_call_table
49
50 /* This value is used to mark exception frames on the stack. */
51 exception_marker:
52 .tc ID_EXC_MARKER[TC],STACK_FRAME_REGS_MARKER
53
54 .section ".text"
55 .align 7
56
57 .globl system_call_common
58 system_call_common:
59 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
60 BEGIN_FTR_SECTION
61 extrdi. r10, r12, 1, (63-MSR_TS_T_LG) /* transaction active? */
62 bne tabort_syscall
63 END_FTR_SECTION_IFSET(CPU_FTR_TM)
64 #endif
65 andi. r10,r12,MSR_PR
66 mr r10,r1
67 addi r1,r1,-INT_FRAME_SIZE
68 beq- 1f
69 ld r1,PACAKSAVE(r13)
70 1: std r10,0(r1)
71 std r11,_NIP(r1)
72 std r12,_MSR(r1)
73 std r0,GPR0(r1)
74 std r10,GPR1(r1)
75 beq 2f /* if from kernel mode */
76 ACCOUNT_CPU_USER_ENTRY(r13, r10, r11)
77 2: std r2,GPR2(r1)
78 std r3,GPR3(r1)
79 mfcr r2
80 std r4,GPR4(r1)
81 std r5,GPR5(r1)
82 std r6,GPR6(r1)
83 std r7,GPR7(r1)
84 std r8,GPR8(r1)
85 li r11,0
86 std r11,GPR9(r1)
87 std r11,GPR10(r1)
88 std r11,GPR11(r1)
89 std r11,GPR12(r1)
90 std r11,_XER(r1)
91 std r11,_CTR(r1)
92 std r9,GPR13(r1)
93 mflr r10
94 /*
95 * This clears CR0.SO (bit 28), which is the error indication on
96 * return from this system call.
97 */
98 rldimi r2,r11,28,(63-28)
99 li r11,0xc01
100 std r10,_LINK(r1)
101 std r11,_TRAP(r1)
102 std r3,ORIG_GPR3(r1)
103 std r2,_CCR(r1)
104 ld r2,PACATOC(r13)
105 addi r9,r1,STACK_FRAME_OVERHEAD
106 ld r11,exception_marker@toc(r2)
107 std r11,-16(r9) /* "regshere" marker */
108 #if defined(CONFIG_VIRT_CPU_ACCOUNTING_NATIVE) && defined(CONFIG_PPC_SPLPAR)
109 BEGIN_FW_FTR_SECTION
110 beq 33f
111 /* if from user, see if there are any DTL entries to process */
112 ld r10,PACALPPACAPTR(r13) /* get ptr to VPA */
113 ld r11,PACA_DTL_RIDX(r13) /* get log read index */
114 addi r10,r10,LPPACA_DTLIDX
115 LDX_BE r10,0,r10 /* get log write index */
116 cmpd cr1,r11,r10
117 beq+ cr1,33f
118 bl accumulate_stolen_time
119 REST_GPR(0,r1)
120 REST_4GPRS(3,r1)
121 REST_2GPRS(7,r1)
122 addi r9,r1,STACK_FRAME_OVERHEAD
123 33:
124 END_FW_FTR_SECTION_IFSET(FW_FEATURE_SPLPAR)
125 #endif /* CONFIG_VIRT_CPU_ACCOUNTING_NATIVE && CONFIG_PPC_SPLPAR */
126
127 /*
128 * A syscall should always be called with interrupts enabled
129 * so we just unconditionally hard-enable here. When some kind
130 * of irq tracing is used, we additionally check that condition
131 * is correct
132 */
133 #if defined(CONFIG_TRACE_IRQFLAGS) && defined(CONFIG_BUG)
134 lbz r10,PACASOFTIRQEN(r13)
135 xori r10,r10,1
136 1: tdnei r10,0
137 EMIT_BUG_ENTRY 1b,__FILE__,__LINE__,BUGFLAG_WARNING
138 #endif
139
140 #ifdef CONFIG_PPC_BOOK3E
141 wrteei 1
142 #else
143 li r11,MSR_RI
144 ori r11,r11,MSR_EE
145 mtmsrd r11,1
146 #endif /* CONFIG_PPC_BOOK3E */
147
148 /* We do need to set SOFTE in the stack frame or the return
149 * from interrupt will be painful
150 */
151 li r10,1
152 std r10,SOFTE(r1)
153
154 CURRENT_THREAD_INFO(r11, r1)
155 ld r10,TI_FLAGS(r11)
156 andi. r11,r10,_TIF_SYSCALL_DOTRACE
157 bne syscall_dotrace /* does not return */
158 cmpldi 0,r0,NR_syscalls
159 bge- syscall_enosys
160
161 system_call: /* label this so stack traces look sane */
162 /*
163 * Need to vector to 32 Bit or default sys_call_table here,
164 * based on caller's run-mode / personality.
165 */
166 ld r11,SYS_CALL_TABLE@toc(2)
167 andi. r10,r10,_TIF_32BIT
168 beq 15f
169 addi r11,r11,8 /* use 32-bit syscall entries */
170 clrldi r3,r3,32
171 clrldi r4,r4,32
172 clrldi r5,r5,32
173 clrldi r6,r6,32
174 clrldi r7,r7,32
175 clrldi r8,r8,32
176 15:
177 slwi r0,r0,4
178 ldx r12,r11,r0 /* Fetch system call handler [ptr] */
179 mtctr r12
180 bctrl /* Call handler */
181
182 .Lsyscall_exit:
183 std r3,RESULT(r1)
184 CURRENT_THREAD_INFO(r12, r1)
185
186 ld r8,_MSR(r1)
187 #ifdef CONFIG_PPC_BOOK3S
188 /* No MSR:RI on BookE */
189 andi. r10,r8,MSR_RI
190 beq- unrecov_restore
191 #endif
192 /*
193 * Disable interrupts so current_thread_info()->flags can't change,
194 * and so that we don't get interrupted after loading SRR0/1.
195 */
196 #ifdef CONFIG_PPC_BOOK3E
197 wrteei 0
198 #else
199 /*
200 * For performance reasons we clear RI the same time that we
201 * clear EE. We only need to clear RI just before we restore r13
202 * below, but batching it with EE saves us one expensive mtmsrd call.
203 * We have to be careful to restore RI if we branch anywhere from
204 * here (eg syscall_exit_work).
205 */
206 li r11,0
207 mtmsrd r11,1
208 #endif /* CONFIG_PPC_BOOK3E */
209
210 ld r9,TI_FLAGS(r12)
211 li r11,-MAX_ERRNO
212 andi. r0,r9,(_TIF_SYSCALL_DOTRACE|_TIF_SINGLESTEP|_TIF_USER_WORK_MASK|_TIF_PERSYSCALL_MASK)
213 bne- syscall_exit_work
214
215 andi. r0,r8,MSR_FP
216 beq 2f
217 #ifdef CONFIG_ALTIVEC
218 andis. r0,r8,MSR_VEC@h
219 bne 3f
220 #endif
221 2: addi r3,r1,STACK_FRAME_OVERHEAD
222 #ifdef CONFIG_PPC_BOOK3S
223 li r10,MSR_RI
224 mtmsrd r10,1 /* Restore RI */
225 #endif
226 bl restore_math
227 #ifdef CONFIG_PPC_BOOK3S
228 li r11,0
229 mtmsrd r11,1
230 #endif
231 ld r8,_MSR(r1)
232 ld r3,RESULT(r1)
233 li r11,-MAX_ERRNO
234
235 3: cmpld r3,r11
236 ld r5,_CCR(r1)
237 bge- syscall_error
238 .Lsyscall_error_cont:
239 ld r7,_NIP(r1)
240 BEGIN_FTR_SECTION
241 stdcx. r0,0,r1 /* to clear the reservation */
242 END_FTR_SECTION_IFCLR(CPU_FTR_STCX_CHECKS_ADDRESS)
243 andi. r6,r8,MSR_PR
244 ld r4,_LINK(r1)
245
246 beq- 1f
247 ACCOUNT_CPU_USER_EXIT(r13, r11, r12)
248
249 BEGIN_FTR_SECTION
250 HMT_MEDIUM_LOW
251 END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR)
252
253 ld r13,GPR13(r1) /* only restore r13 if returning to usermode */
254 1: ld r2,GPR2(r1)
255 ld r1,GPR1(r1)
256 mtlr r4
257 mtcr r5
258 mtspr SPRN_SRR0,r7
259 mtspr SPRN_SRR1,r8
260 RFI
261 b . /* prevent speculative execution */
262
263 syscall_error:
264 oris r5,r5,0x1000 /* Set SO bit in CR */
265 neg r3,r3
266 std r5,_CCR(r1)
267 b .Lsyscall_error_cont
268
269 /* Traced system call support */
270 syscall_dotrace:
271 bl save_nvgprs
272 addi r3,r1,STACK_FRAME_OVERHEAD
273 bl do_syscall_trace_enter
274
275 /*
276 * We use the return value of do_syscall_trace_enter() as the syscall
277 * number. If the syscall was rejected for any reason do_syscall_trace_enter()
278 * returns an invalid syscall number and the test below against
279 * NR_syscalls will fail.
280 */
281 mr r0,r3
282
283 /* Restore argument registers just clobbered and/or possibly changed. */
284 ld r3,GPR3(r1)
285 ld r4,GPR4(r1)
286 ld r5,GPR5(r1)
287 ld r6,GPR6(r1)
288 ld r7,GPR7(r1)
289 ld r8,GPR8(r1)
290
291 /* Repopulate r9 and r10 for the system_call path */
292 addi r9,r1,STACK_FRAME_OVERHEAD
293 CURRENT_THREAD_INFO(r10, r1)
294 ld r10,TI_FLAGS(r10)
295
296 cmpldi r0,NR_syscalls
297 blt+ system_call
298
299 /* Return code is already in r3 thanks to do_syscall_trace_enter() */
300 b .Lsyscall_exit
301
302
303 syscall_enosys:
304 li r3,-ENOSYS
305 b .Lsyscall_exit
306
307 syscall_exit_work:
308 #ifdef CONFIG_PPC_BOOK3S
309 li r10,MSR_RI
310 mtmsrd r10,1 /* Restore RI */
311 #endif
312 /* If TIF_RESTOREALL is set, don't scribble on either r3 or ccr.
313 If TIF_NOERROR is set, just save r3 as it is. */
314
315 andi. r0,r9,_TIF_RESTOREALL
316 beq+ 0f
317 REST_NVGPRS(r1)
318 b 2f
319 0: cmpld r3,r11 /* r11 is -MAX_ERRNO */
320 blt+ 1f
321 andi. r0,r9,_TIF_NOERROR
322 bne- 1f
323 ld r5,_CCR(r1)
324 neg r3,r3
325 oris r5,r5,0x1000 /* Set SO bit in CR */
326 std r5,_CCR(r1)
327 1: std r3,GPR3(r1)
328 2: andi. r0,r9,(_TIF_PERSYSCALL_MASK)
329 beq 4f
330
331 /* Clear per-syscall TIF flags if any are set. */
332
333 li r11,_TIF_PERSYSCALL_MASK
334 addi r12,r12,TI_FLAGS
335 3: ldarx r10,0,r12
336 andc r10,r10,r11
337 stdcx. r10,0,r12
338 bne- 3b
339 subi r12,r12,TI_FLAGS
340
341 4: /* Anything else left to do? */
342 BEGIN_FTR_SECTION
343 lis r3,INIT_PPR@highest /* Set thread.ppr = 3 */
344 ld r10,PACACURRENT(r13)
345 sldi r3,r3,32 /* bits 11-13 are used for ppr */
346 std r3,TASKTHREADPPR(r10)
347 END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR)
348
349 andi. r0,r9,(_TIF_SYSCALL_DOTRACE|_TIF_SINGLESTEP)
350 beq ret_from_except_lite
351
352 /* Re-enable interrupts */
353 #ifdef CONFIG_PPC_BOOK3E
354 wrteei 1
355 #else
356 li r10,MSR_RI
357 ori r10,r10,MSR_EE
358 mtmsrd r10,1
359 #endif /* CONFIG_PPC_BOOK3E */
360
361 bl save_nvgprs
362 addi r3,r1,STACK_FRAME_OVERHEAD
363 bl do_syscall_trace_leave
364 b ret_from_except
365
366 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
367 tabort_syscall:
368 /* Firstly we need to enable TM in the kernel */
369 mfmsr r10
370 li r9, 1
371 rldimi r10, r9, MSR_TM_LG, 63-MSR_TM_LG
372 mtmsrd r10, 0
373
374 /* tabort, this dooms the transaction, nothing else */
375 li r9, (TM_CAUSE_SYSCALL|TM_CAUSE_PERSISTENT)
376 TABORT(R9)
377
378 /*
379 * Return directly to userspace. We have corrupted user register state,
380 * but userspace will never see that register state. Execution will
381 * resume after the tbegin of the aborted transaction with the
382 * checkpointed register state.
383 */
384 li r9, MSR_RI
385 andc r10, r10, r9
386 mtmsrd r10, 1
387 mtspr SPRN_SRR0, r11
388 mtspr SPRN_SRR1, r12
389
390 rfid
391 b . /* prevent speculative execution */
392 #endif
393
394 /* Save non-volatile GPRs, if not already saved. */
395 _GLOBAL(save_nvgprs)
396 ld r11,_TRAP(r1)
397 andi. r0,r11,1
398 beqlr-
399 SAVE_NVGPRS(r1)
400 clrrdi r0,r11,1
401 std r0,_TRAP(r1)
402 blr
403
404
405 /*
406 * The sigsuspend and rt_sigsuspend system calls can call do_signal
407 * and thus put the process into the stopped state where we might
408 * want to examine its user state with ptrace. Therefore we need
409 * to save all the nonvolatile registers (r14 - r31) before calling
410 * the C code. Similarly, fork, vfork and clone need the full
411 * register state on the stack so that it can be copied to the child.
412 */
413
414 _GLOBAL(ppc_fork)
415 bl save_nvgprs
416 bl sys_fork
417 b .Lsyscall_exit
418
419 _GLOBAL(ppc_vfork)
420 bl save_nvgprs
421 bl sys_vfork
422 b .Lsyscall_exit
423
424 _GLOBAL(ppc_clone)
425 bl save_nvgprs
426 bl sys_clone
427 b .Lsyscall_exit
428
429 _GLOBAL(ppc32_swapcontext)
430 bl save_nvgprs
431 bl compat_sys_swapcontext
432 b .Lsyscall_exit
433
434 _GLOBAL(ppc64_swapcontext)
435 bl save_nvgprs
436 bl sys_swapcontext
437 b .Lsyscall_exit
438
439 _GLOBAL(ppc_switch_endian)
440 bl save_nvgprs
441 bl sys_switch_endian
442 b .Lsyscall_exit
443
444 _GLOBAL(ret_from_fork)
445 bl schedule_tail
446 REST_NVGPRS(r1)
447 li r3,0
448 b .Lsyscall_exit
449
450 _GLOBAL(ret_from_kernel_thread)
451 bl schedule_tail
452 REST_NVGPRS(r1)
453 mtlr r14
454 mr r3,r15
455 #ifdef PPC64_ELF_ABI_v2
456 mr r12,r14
457 #endif
458 blrl
459 li r3,0
460 b .Lsyscall_exit
461
462 /*
463 * This routine switches between two different tasks. The process
464 * state of one is saved on its kernel stack. Then the state
465 * of the other is restored from its kernel stack. The memory
466 * management hardware is updated to the second process's state.
467 * Finally, we can return to the second process, via ret_from_except.
468 * On entry, r3 points to the THREAD for the current task, r4
469 * points to the THREAD for the new task.
470 *
471 * Note: there are two ways to get to the "going out" portion
472 * of this code; either by coming in via the entry (_switch)
473 * or via "fork" which must set up an environment equivalent
474 * to the "_switch" path. If you change this you'll have to change
475 * the fork code also.
476 *
477 * The code which creates the new task context is in 'copy_thread'
478 * in arch/powerpc/kernel/process.c
479 */
480 .align 7
481 _GLOBAL(_switch)
482 mflr r0
483 std r0,16(r1)
484 stdu r1,-SWITCH_FRAME_SIZE(r1)
485 /* r3-r13 are caller saved -- Cort */
486 SAVE_8GPRS(14, r1)
487 SAVE_10GPRS(22, r1)
488 std r0,_NIP(r1) /* Return to switch caller */
489 mfcr r23
490 std r23,_CCR(r1)
491 std r1,KSP(r3) /* Set old stack pointer */
492
493 #ifdef CONFIG_SMP
494 /* We need a sync somewhere here to make sure that if the
495 * previous task gets rescheduled on another CPU, it sees all
496 * stores it has performed on this one.
497 */
498 sync
499 #endif /* CONFIG_SMP */
500
501 /*
502 * If we optimise away the clear of the reservation in system
503 * calls because we know the CPU tracks the address of the
504 * reservation, then we need to clear it here to cover the
505 * case that the kernel context switch path has no larx
506 * instructions.
507 */
508 BEGIN_FTR_SECTION
509 ldarx r6,0,r1
510 END_FTR_SECTION_IFSET(CPU_FTR_STCX_CHECKS_ADDRESS)
511
512 BEGIN_FTR_SECTION
513 /*
514 * A cp_abort (copy paste abort) here ensures that when context switching, a
515 * copy from one process can't leak into the paste of another.
516 */
517 PPC_CP_ABORT
518 END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300)
519
520 #ifdef CONFIG_PPC_BOOK3S
521 /* Cancel all explict user streams as they will have no use after context
522 * switch and will stop the HW from creating streams itself
523 */
524 DCBT_STOP_ALL_STREAM_IDS(r6)
525 #endif
526
527 addi r6,r4,-THREAD /* Convert THREAD to 'current' */
528 std r6,PACACURRENT(r13) /* Set new 'current' */
529
530 ld r8,KSP(r4) /* new stack pointer */
531 #ifdef CONFIG_PPC_STD_MMU_64
532 BEGIN_MMU_FTR_SECTION
533 b 2f
534 END_MMU_FTR_SECTION_IFSET(MMU_FTR_TYPE_RADIX)
535 BEGIN_FTR_SECTION
536 clrrdi r6,r8,28 /* get its ESID */
537 clrrdi r9,r1,28 /* get current sp ESID */
538 FTR_SECTION_ELSE
539 clrrdi r6,r8,40 /* get its 1T ESID */
540 clrrdi r9,r1,40 /* get current sp 1T ESID */
541 ALT_MMU_FTR_SECTION_END_IFCLR(MMU_FTR_1T_SEGMENT)
542 clrldi. r0,r6,2 /* is new ESID c00000000? */
543 cmpd cr1,r6,r9 /* or is new ESID the same as current ESID? */
544 cror eq,4*cr1+eq,eq
545 beq 2f /* if yes, don't slbie it */
546
547 /* Bolt in the new stack SLB entry */
548 ld r7,KSP_VSID(r4) /* Get new stack's VSID */
549 oris r0,r6,(SLB_ESID_V)@h
550 ori r0,r0,(SLB_NUM_BOLTED-1)@l
551 BEGIN_FTR_SECTION
552 li r9,MMU_SEGSIZE_1T /* insert B field */
553 oris r6,r6,(MMU_SEGSIZE_1T << SLBIE_SSIZE_SHIFT)@h
554 rldimi r7,r9,SLB_VSID_SSIZE_SHIFT,0
555 END_MMU_FTR_SECTION_IFSET(MMU_FTR_1T_SEGMENT)
556
557 /* Update the last bolted SLB. No write barriers are needed
558 * here, provided we only update the current CPU's SLB shadow
559 * buffer.
560 */
561 ld r9,PACA_SLBSHADOWPTR(r13)
562 li r12,0
563 std r12,SLBSHADOW_STACKESID(r9) /* Clear ESID */
564 li r12,SLBSHADOW_STACKVSID
565 STDX_BE r7,r12,r9 /* Save VSID */
566 li r12,SLBSHADOW_STACKESID
567 STDX_BE r0,r12,r9 /* Save ESID */
568
569 /* No need to check for MMU_FTR_NO_SLBIE_B here, since when
570 * we have 1TB segments, the only CPUs known to have the errata
571 * only support less than 1TB of system memory and we'll never
572 * actually hit this code path.
573 */
574
575 slbie r6
576 slbie r6 /* Workaround POWER5 < DD2.1 issue */
577 slbmte r7,r0
578 isync
579 2:
580 #endif /* CONFIG_PPC_STD_MMU_64 */
581
582 CURRENT_THREAD_INFO(r7, r8) /* base of new stack */
583 /* Note: this uses SWITCH_FRAME_SIZE rather than INT_FRAME_SIZE
584 because we don't need to leave the 288-byte ABI gap at the
585 top of the kernel stack. */
586 addi r7,r7,THREAD_SIZE-SWITCH_FRAME_SIZE
587
588 mr r1,r8 /* start using new stack pointer */
589 std r7,PACAKSAVE(r13)
590
591 ld r6,_CCR(r1)
592 mtcrf 0xFF,r6
593
594 /* r3-r13 are destroyed -- Cort */
595 REST_8GPRS(14, r1)
596 REST_10GPRS(22, r1)
597
598 /* convert old thread to its task_struct for return value */
599 addi r3,r3,-THREAD
600 ld r7,_NIP(r1) /* Return to _switch caller in new task */
601 mtlr r7
602 addi r1,r1,SWITCH_FRAME_SIZE
603 blr
604
605 .align 7
606 _GLOBAL(ret_from_except)
607 ld r11,_TRAP(r1)
608 andi. r0,r11,1
609 bne ret_from_except_lite
610 REST_NVGPRS(r1)
611
612 _GLOBAL(ret_from_except_lite)
613 /*
614 * Disable interrupts so that current_thread_info()->flags
615 * can't change between when we test it and when we return
616 * from the interrupt.
617 */
618 #ifdef CONFIG_PPC_BOOK3E
619 wrteei 0
620 #else
621 li r10,MSR_RI
622 mtmsrd r10,1 /* Update machine state */
623 #endif /* CONFIG_PPC_BOOK3E */
624
625 CURRENT_THREAD_INFO(r9, r1)
626 ld r3,_MSR(r1)
627 #ifdef CONFIG_PPC_BOOK3E
628 ld r10,PACACURRENT(r13)
629 #endif /* CONFIG_PPC_BOOK3E */
630 ld r4,TI_FLAGS(r9)
631 andi. r3,r3,MSR_PR
632 beq resume_kernel
633 #ifdef CONFIG_PPC_BOOK3E
634 lwz r3,(THREAD+THREAD_DBCR0)(r10)
635 #endif /* CONFIG_PPC_BOOK3E */
636
637 /* Check current_thread_info()->flags */
638 andi. r0,r4,_TIF_USER_WORK_MASK
639 bne 1f
640 #ifdef CONFIG_PPC_BOOK3E
641 /*
642 * Check to see if the dbcr0 register is set up to debug.
643 * Use the internal debug mode bit to do this.
644 */
645 andis. r0,r3,DBCR0_IDM@h
646 beq restore
647 mfmsr r0
648 rlwinm r0,r0,0,~MSR_DE /* Clear MSR.DE */
649 mtmsr r0
650 mtspr SPRN_DBCR0,r3
651 li r10, -1
652 mtspr SPRN_DBSR,r10
653 b restore
654 #else
655 addi r3,r1,STACK_FRAME_OVERHEAD
656 bl restore_math
657 b restore
658 #endif
659 1: andi. r0,r4,_TIF_NEED_RESCHED
660 beq 2f
661 bl restore_interrupts
662 SCHEDULE_USER
663 b ret_from_except_lite
664 2:
665 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
666 andi. r0,r4,_TIF_USER_WORK_MASK & ~_TIF_RESTORE_TM
667 bne 3f /* only restore TM if nothing else to do */
668 addi r3,r1,STACK_FRAME_OVERHEAD
669 bl restore_tm_state
670 b restore
671 3:
672 #endif
673 bl save_nvgprs
674 /*
675 * Use a non volatile GPR to save and restore our thread_info flags
676 * across the call to restore_interrupts.
677 */
678 mr r30,r4
679 bl restore_interrupts
680 mr r4,r30
681 addi r3,r1,STACK_FRAME_OVERHEAD
682 bl do_notify_resume
683 b ret_from_except
684
685 resume_kernel:
686 /* check current_thread_info, _TIF_EMULATE_STACK_STORE */
687 andis. r8,r4,_TIF_EMULATE_STACK_STORE@h
688 beq+ 1f
689
690 addi r8,r1,INT_FRAME_SIZE /* Get the kprobed function entry */
691
692 lwz r3,GPR1(r1)
693 subi r3,r3,INT_FRAME_SIZE /* dst: Allocate a trampoline exception frame */
694 mr r4,r1 /* src: current exception frame */
695 mr r1,r3 /* Reroute the trampoline frame to r1 */
696
697 /* Copy from the original to the trampoline. */
698 li r5,INT_FRAME_SIZE/8 /* size: INT_FRAME_SIZE */
699 li r6,0 /* start offset: 0 */
700 mtctr r5
701 2: ldx r0,r6,r4
702 stdx r0,r6,r3
703 addi r6,r6,8
704 bdnz 2b
705
706 /* Do real store operation to complete stwu */
707 lwz r5,GPR1(r1)
708 std r8,0(r5)
709
710 /* Clear _TIF_EMULATE_STACK_STORE flag */
711 lis r11,_TIF_EMULATE_STACK_STORE@h
712 addi r5,r9,TI_FLAGS
713 0: ldarx r4,0,r5
714 andc r4,r4,r11
715 stdcx. r4,0,r5
716 bne- 0b
717 1:
718
719 #ifdef CONFIG_PREEMPT
720 /* Check if we need to preempt */
721 andi. r0,r4,_TIF_NEED_RESCHED
722 beq+ restore
723 /* Check that preempt_count() == 0 and interrupts are enabled */
724 lwz r8,TI_PREEMPT(r9)
725 cmpwi cr1,r8,0
726 ld r0,SOFTE(r1)
727 cmpdi r0,0
728 crandc eq,cr1*4+eq,eq
729 bne restore
730
731 /*
732 * Here we are preempting the current task. We want to make
733 * sure we are soft-disabled first and reconcile irq state.
734 */
735 RECONCILE_IRQ_STATE(r3,r4)
736 1: bl preempt_schedule_irq
737
738 /* Re-test flags and eventually loop */
739 CURRENT_THREAD_INFO(r9, r1)
740 ld r4,TI_FLAGS(r9)
741 andi. r0,r4,_TIF_NEED_RESCHED
742 bne 1b
743
744 /*
745 * arch_local_irq_restore() from preempt_schedule_irq above may
746 * enable hard interrupt but we really should disable interrupts
747 * when we return from the interrupt, and so that we don't get
748 * interrupted after loading SRR0/1.
749 */
750 #ifdef CONFIG_PPC_BOOK3E
751 wrteei 0
752 #else
753 li r10,MSR_RI
754 mtmsrd r10,1 /* Update machine state */
755 #endif /* CONFIG_PPC_BOOK3E */
756 #endif /* CONFIG_PREEMPT */
757
758 .globl fast_exc_return_irq
759 fast_exc_return_irq:
760 restore:
761 /*
762 * This is the main kernel exit path. First we check if we
763 * are about to re-enable interrupts
764 */
765 ld r5,SOFTE(r1)
766 lbz r6,PACASOFTIRQEN(r13)
767 cmpwi cr0,r5,0
768 beq restore_irq_off
769
770 /* We are enabling, were we already enabled ? Yes, just return */
771 cmpwi cr0,r6,1
772 beq cr0,do_restore
773
774 /*
775 * We are about to soft-enable interrupts (we are hard disabled
776 * at this point). We check if there's anything that needs to
777 * be replayed first.
778 */
779 lbz r0,PACAIRQHAPPENED(r13)
780 cmpwi cr0,r0,0
781 bne- restore_check_irq_replay
782
783 /*
784 * Get here when nothing happened while soft-disabled, just
785 * soft-enable and move-on. We will hard-enable as a side
786 * effect of rfi
787 */
788 restore_no_replay:
789 TRACE_ENABLE_INTS
790 li r0,1
791 stb r0,PACASOFTIRQEN(r13);
792
793 /*
794 * Final return path. BookE is handled in a different file
795 */
796 do_restore:
797 #ifdef CONFIG_PPC_BOOK3E
798 b exception_return_book3e
799 #else
800 /*
801 * Clear the reservation. If we know the CPU tracks the address of
802 * the reservation then we can potentially save some cycles and use
803 * a larx. On POWER6 and POWER7 this is significantly faster.
804 */
805 BEGIN_FTR_SECTION
806 stdcx. r0,0,r1 /* to clear the reservation */
807 FTR_SECTION_ELSE
808 ldarx r4,0,r1
809 ALT_FTR_SECTION_END_IFCLR(CPU_FTR_STCX_CHECKS_ADDRESS)
810
811 /*
812 * Some code path such as load_up_fpu or altivec return directly
813 * here. They run entirely hard disabled and do not alter the
814 * interrupt state. They also don't use lwarx/stwcx. and thus
815 * are known not to leave dangling reservations.
816 */
817 .globl fast_exception_return
818 fast_exception_return:
819 ld r3,_MSR(r1)
820 ld r4,_CTR(r1)
821 ld r0,_LINK(r1)
822 mtctr r4
823 mtlr r0
824 ld r4,_XER(r1)
825 mtspr SPRN_XER,r4
826
827 REST_8GPRS(5, r1)
828
829 andi. r0,r3,MSR_RI
830 beq- unrecov_restore
831
832 /* Load PPR from thread struct before we clear MSR:RI */
833 BEGIN_FTR_SECTION
834 ld r2,PACACURRENT(r13)
835 ld r2,TASKTHREADPPR(r2)
836 END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR)
837
838 /*
839 * Clear RI before restoring r13. If we are returning to
840 * userspace and we take an exception after restoring r13,
841 * we end up corrupting the userspace r13 value.
842 */
843 li r4,0
844 mtmsrd r4,1
845
846 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
847 /* TM debug */
848 std r3, PACATMSCRATCH(r13) /* Stash returned-to MSR */
849 #endif
850 /*
851 * r13 is our per cpu area, only restore it if we are returning to
852 * userspace the value stored in the stack frame may belong to
853 * another CPU.
854 */
855 andi. r0,r3,MSR_PR
856 beq 1f
857 BEGIN_FTR_SECTION
858 mtspr SPRN_PPR,r2 /* Restore PPR */
859 END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR)
860 ACCOUNT_CPU_USER_EXIT(r13, r2, r4)
861 REST_GPR(13, r1)
862 1:
863 mtspr SPRN_SRR1,r3
864
865 ld r2,_CCR(r1)
866 mtcrf 0xFF,r2
867 ld r2,_NIP(r1)
868 mtspr SPRN_SRR0,r2
869
870 ld r0,GPR0(r1)
871 ld r2,GPR2(r1)
872 ld r3,GPR3(r1)
873 ld r4,GPR4(r1)
874 ld r1,GPR1(r1)
875
876 rfid
877 b . /* prevent speculative execution */
878
879 #endif /* CONFIG_PPC_BOOK3E */
880
881 /*
882 * We are returning to a context with interrupts soft disabled.
883 *
884 * However, we may also about to hard enable, so we need to
885 * make sure that in this case, we also clear PACA_IRQ_HARD_DIS
886 * or that bit can get out of sync and bad things will happen
887 */
888 restore_irq_off:
889 ld r3,_MSR(r1)
890 lbz r7,PACAIRQHAPPENED(r13)
891 andi. r0,r3,MSR_EE
892 beq 1f
893 rlwinm r7,r7,0,~PACA_IRQ_HARD_DIS
894 stb r7,PACAIRQHAPPENED(r13)
895 1: li r0,0
896 stb r0,PACASOFTIRQEN(r13);
897 TRACE_DISABLE_INTS
898 b do_restore
899
900 /*
901 * Something did happen, check if a re-emit is needed
902 * (this also clears paca->irq_happened)
903 */
904 restore_check_irq_replay:
905 /* XXX: We could implement a fast path here where we check
906 * for irq_happened being just 0x01, in which case we can
907 * clear it and return. That means that we would potentially
908 * miss a decrementer having wrapped all the way around.
909 *
910 * Still, this might be useful for things like hash_page
911 */
912 bl __check_irq_replay
913 cmpwi cr0,r3,0
914 beq restore_no_replay
915
916 /*
917 * We need to re-emit an interrupt. We do so by re-using our
918 * existing exception frame. We first change the trap value,
919 * but we need to ensure we preserve the low nibble of it
920 */
921 ld r4,_TRAP(r1)
922 clrldi r4,r4,60
923 or r4,r4,r3
924 std r4,_TRAP(r1)
925
926 /*
927 * Then find the right handler and call it. Interrupts are
928 * still soft-disabled and we keep them that way.
929 */
930 cmpwi cr0,r3,0x500
931 bne 1f
932 addi r3,r1,STACK_FRAME_OVERHEAD;
933 bl do_IRQ
934 b ret_from_except
935 1: cmpwi cr0,r3,0xe60
936 bne 1f
937 addi r3,r1,STACK_FRAME_OVERHEAD;
938 bl handle_hmi_exception
939 b ret_from_except
940 1: cmpwi cr0,r3,0x900
941 bne 1f
942 addi r3,r1,STACK_FRAME_OVERHEAD;
943 bl timer_interrupt
944 b ret_from_except
945 #ifdef CONFIG_PPC_DOORBELL
946 1:
947 #ifdef CONFIG_PPC_BOOK3E
948 cmpwi cr0,r3,0x280
949 #else
950 BEGIN_FTR_SECTION
951 cmpwi cr0,r3,0xe80
952 FTR_SECTION_ELSE
953 cmpwi cr0,r3,0xa00
954 ALT_FTR_SECTION_END_IFSET(CPU_FTR_HVMODE)
955 #endif /* CONFIG_PPC_BOOK3E */
956 bne 1f
957 addi r3,r1,STACK_FRAME_OVERHEAD;
958 bl doorbell_exception
959 b ret_from_except
960 #endif /* CONFIG_PPC_DOORBELL */
961 1: b ret_from_except /* What else to do here ? */
962
963 unrecov_restore:
964 addi r3,r1,STACK_FRAME_OVERHEAD
965 bl unrecoverable_exception
966 b unrecov_restore
967
968 #ifdef CONFIG_PPC_RTAS
969 /*
970 * On CHRP, the Run-Time Abstraction Services (RTAS) have to be
971 * called with the MMU off.
972 *
973 * In addition, we need to be in 32b mode, at least for now.
974 *
975 * Note: r3 is an input parameter to rtas, so don't trash it...
976 */
977 _GLOBAL(enter_rtas)
978 mflr r0
979 std r0,16(r1)
980 stdu r1,-RTAS_FRAME_SIZE(r1) /* Save SP and create stack space. */
981
982 /* Because RTAS is running in 32b mode, it clobbers the high order half
983 * of all registers that it saves. We therefore save those registers
984 * RTAS might touch to the stack. (r0, r3-r13 are caller saved)
985 */
986 SAVE_GPR(2, r1) /* Save the TOC */
987 SAVE_GPR(13, r1) /* Save paca */
988 SAVE_8GPRS(14, r1) /* Save the non-volatiles */
989 SAVE_10GPRS(22, r1) /* ditto */
990
991 mfcr r4
992 std r4,_CCR(r1)
993 mfctr r5
994 std r5,_CTR(r1)
995 mfspr r6,SPRN_XER
996 std r6,_XER(r1)
997 mfdar r7
998 std r7,_DAR(r1)
999 mfdsisr r8
1000 std r8,_DSISR(r1)
1001
1002 /* Temporary workaround to clear CR until RTAS can be modified to
1003 * ignore all bits.
1004 */
1005 li r0,0
1006 mtcr r0
1007
1008 #ifdef CONFIG_BUG
1009 /* There is no way it is acceptable to get here with interrupts enabled,
1010 * check it with the asm equivalent of WARN_ON
1011 */
1012 lbz r0,PACASOFTIRQEN(r13)
1013 1: tdnei r0,0
1014 EMIT_BUG_ENTRY 1b,__FILE__,__LINE__,BUGFLAG_WARNING
1015 #endif
1016
1017 /* Hard-disable interrupts */
1018 mfmsr r6
1019 rldicl r7,r6,48,1
1020 rotldi r7,r7,16
1021 mtmsrd r7,1
1022
1023 /* Unfortunately, the stack pointer and the MSR are also clobbered,
1024 * so they are saved in the PACA which allows us to restore
1025 * our original state after RTAS returns.
1026 */
1027 std r1,PACAR1(r13)
1028 std r6,PACASAVEDMSR(r13)
1029
1030 /* Setup our real return addr */
1031 LOAD_REG_ADDR(r4,rtas_return_loc)
1032 clrldi r4,r4,2 /* convert to realmode address */
1033 mtlr r4
1034
1035 li r0,0
1036 ori r0,r0,MSR_EE|MSR_SE|MSR_BE|MSR_RI
1037 andc r0,r6,r0
1038
1039 li r9,1
1040 rldicr r9,r9,MSR_SF_LG,(63-MSR_SF_LG)
1041 ori r9,r9,MSR_IR|MSR_DR|MSR_FE0|MSR_FE1|MSR_FP|MSR_RI|MSR_LE
1042 andc r6,r0,r9
1043 sync /* disable interrupts so SRR0/1 */
1044 mtmsrd r0 /* don't get trashed */
1045
1046 LOAD_REG_ADDR(r4, rtas)
1047 ld r5,RTASENTRY(r4) /* get the rtas->entry value */
1048 ld r4,RTASBASE(r4) /* get the rtas->base value */
1049
1050 mtspr SPRN_SRR0,r5
1051 mtspr SPRN_SRR1,r6
1052 rfid
1053 b . /* prevent speculative execution */
1054
1055 rtas_return_loc:
1056 FIXUP_ENDIAN
1057
1058 /* relocation is off at this point */
1059 GET_PACA(r4)
1060 clrldi r4,r4,2 /* convert to realmode address */
1061
1062 bcl 20,31,$+4
1063 0: mflr r3
1064 ld r3,(1f-0b)(r3) /* get &rtas_restore_regs */
1065
1066 mfmsr r6
1067 li r0,MSR_RI
1068 andc r6,r6,r0
1069 sync
1070 mtmsrd r6
1071
1072 ld r1,PACAR1(r4) /* Restore our SP */
1073 ld r4,PACASAVEDMSR(r4) /* Restore our MSR */
1074
1075 mtspr SPRN_SRR0,r3
1076 mtspr SPRN_SRR1,r4
1077 rfid
1078 b . /* prevent speculative execution */
1079
1080 .align 3
1081 1: .llong rtas_restore_regs
1082
1083 rtas_restore_regs:
1084 /* relocation is on at this point */
1085 REST_GPR(2, r1) /* Restore the TOC */
1086 REST_GPR(13, r1) /* Restore paca */
1087 REST_8GPRS(14, r1) /* Restore the non-volatiles */
1088 REST_10GPRS(22, r1) /* ditto */
1089
1090 GET_PACA(r13)
1091
1092 ld r4,_CCR(r1)
1093 mtcr r4
1094 ld r5,_CTR(r1)
1095 mtctr r5
1096 ld r6,_XER(r1)
1097 mtspr SPRN_XER,r6
1098 ld r7,_DAR(r1)
1099 mtdar r7
1100 ld r8,_DSISR(r1)
1101 mtdsisr r8
1102
1103 addi r1,r1,RTAS_FRAME_SIZE /* Unstack our frame */
1104 ld r0,16(r1) /* get return address */
1105
1106 mtlr r0
1107 blr /* return to caller */
1108
1109 #endif /* CONFIG_PPC_RTAS */
1110
1111 _GLOBAL(enter_prom)
1112 mflr r0
1113 std r0,16(r1)
1114 stdu r1,-PROM_FRAME_SIZE(r1) /* Save SP and create stack space */
1115
1116 /* Because PROM is running in 32b mode, it clobbers the high order half
1117 * of all registers that it saves. We therefore save those registers
1118 * PROM might touch to the stack. (r0, r3-r13 are caller saved)
1119 */
1120 SAVE_GPR(2, r1)
1121 SAVE_GPR(13, r1)
1122 SAVE_8GPRS(14, r1)
1123 SAVE_10GPRS(22, r1)
1124 mfcr r10
1125 mfmsr r11
1126 std r10,_CCR(r1)
1127 std r11,_MSR(r1)
1128
1129 /* Put PROM address in SRR0 */
1130 mtsrr0 r4
1131
1132 /* Setup our trampoline return addr in LR */
1133 bcl 20,31,$+4
1134 0: mflr r4
1135 addi r4,r4,(1f - 0b)
1136 mtlr r4
1137
1138 /* Prepare a 32-bit mode big endian MSR
1139 */
1140 #ifdef CONFIG_PPC_BOOK3E
1141 rlwinm r11,r11,0,1,31
1142 mtsrr1 r11
1143 rfi
1144 #else /* CONFIG_PPC_BOOK3E */
1145 LOAD_REG_IMMEDIATE(r12, MSR_SF | MSR_ISF | MSR_LE)
1146 andc r11,r11,r12
1147 mtsrr1 r11
1148 rfid
1149 #endif /* CONFIG_PPC_BOOK3E */
1150
1151 1: /* Return from OF */
1152 FIXUP_ENDIAN
1153
1154 /* Just make sure that r1 top 32 bits didn't get
1155 * corrupt by OF
1156 */
1157 rldicl r1,r1,0,32
1158
1159 /* Restore the MSR (back to 64 bits) */
1160 ld r0,_MSR(r1)
1161 MTMSRD(r0)
1162 isync
1163
1164 /* Restore other registers */
1165 REST_GPR(2, r1)
1166 REST_GPR(13, r1)
1167 REST_8GPRS(14, r1)
1168 REST_10GPRS(22, r1)
1169 ld r4,_CCR(r1)
1170 mtcr r4
1171
1172 addi r1,r1,PROM_FRAME_SIZE
1173 ld r0,16(r1)
1174 mtlr r0
1175 blr
1176
1177 #ifdef CONFIG_FUNCTION_TRACER
1178 #ifdef CONFIG_DYNAMIC_FTRACE
1179 _GLOBAL(mcount)
1180 _GLOBAL(_mcount)
1181 EXPORT_SYMBOL(_mcount)
1182 mflr r12
1183 mtctr r12
1184 mtlr r0
1185 bctr
1186
1187 #ifndef CC_USING_MPROFILE_KERNEL
1188 _GLOBAL_TOC(ftrace_caller)
1189 /* Taken from output of objdump from lib64/glibc */
1190 mflr r3
1191 ld r11, 0(r1)
1192 stdu r1, -112(r1)
1193 std r3, 128(r1)
1194 ld r4, 16(r11)
1195 subi r3, r3, MCOUNT_INSN_SIZE
1196 .globl ftrace_call
1197 ftrace_call:
1198 bl ftrace_stub
1199 nop
1200 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
1201 .globl ftrace_graph_call
1202 ftrace_graph_call:
1203 b ftrace_graph_stub
1204 _GLOBAL(ftrace_graph_stub)
1205 #endif
1206 ld r0, 128(r1)
1207 mtlr r0
1208 addi r1, r1, 112
1209
1210 #else /* CC_USING_MPROFILE_KERNEL */
1211 /*
1212 *
1213 * ftrace_caller() is the function that replaces _mcount() when ftrace is
1214 * active.
1215 *
1216 * We arrive here after a function A calls function B, and we are the trace
1217 * function for B. When we enter r1 points to A's stack frame, B has not yet
1218 * had a chance to allocate one yet.
1219 *
1220 * Additionally r2 may point either to the TOC for A, or B, depending on
1221 * whether B did a TOC setup sequence before calling us.
1222 *
1223 * On entry the LR points back to the _mcount() call site, and r0 holds the
1224 * saved LR as it was on entry to B, ie. the original return address at the
1225 * call site in A.
1226 *
1227 * Our job is to save the register state into a struct pt_regs (on the stack)
1228 * and then arrange for the ftrace function to be called.
1229 */
1230 _GLOBAL(ftrace_caller)
1231 /* Save the original return address in A's stack frame */
1232 std r0,LRSAVE(r1)
1233
1234 /* Create our stack frame + pt_regs */
1235 stdu r1,-SWITCH_FRAME_SIZE(r1)
1236
1237 /* Save all gprs to pt_regs */
1238 SAVE_8GPRS(0,r1)
1239 SAVE_8GPRS(8,r1)
1240 SAVE_8GPRS(16,r1)
1241 SAVE_8GPRS(24,r1)
1242
1243 /* Load special regs for save below */
1244 mfmsr r8
1245 mfctr r9
1246 mfxer r10
1247 mfcr r11
1248
1249 /* Get the _mcount() call site out of LR */
1250 mflr r7
1251 /* Save it as pt_regs->nip & pt_regs->link */
1252 std r7, _NIP(r1)
1253 std r7, _LINK(r1)
1254
1255 /* Save callee's TOC in the ABI compliant location */
1256 std r2, 24(r1)
1257 ld r2,PACATOC(r13) /* get kernel TOC in r2 */
1258
1259 addis r3,r2,function_trace_op@toc@ha
1260 addi r3,r3,function_trace_op@toc@l
1261 ld r5,0(r3)
1262
1263 #ifdef CONFIG_LIVEPATCH
1264 mr r14,r7 /* remember old NIP */
1265 #endif
1266 /* Calculate ip from nip-4 into r3 for call below */
1267 subi r3, r7, MCOUNT_INSN_SIZE
1268
1269 /* Put the original return address in r4 as parent_ip */
1270 mr r4, r0
1271
1272 /* Save special regs */
1273 std r8, _MSR(r1)
1274 std r9, _CTR(r1)
1275 std r10, _XER(r1)
1276 std r11, _CCR(r1)
1277
1278 /* Load &pt_regs in r6 for call below */
1279 addi r6, r1 ,STACK_FRAME_OVERHEAD
1280
1281 /* ftrace_call(r3, r4, r5, r6) */
1282 .globl ftrace_call
1283 ftrace_call:
1284 bl ftrace_stub
1285 nop
1286
1287 /* Load ctr with the possibly modified NIP */
1288 ld r3, _NIP(r1)
1289 mtctr r3
1290 #ifdef CONFIG_LIVEPATCH
1291 cmpd r14,r3 /* has NIP been altered? */
1292 #endif
1293
1294 /* Restore gprs */
1295 REST_8GPRS(0,r1)
1296 REST_8GPRS(8,r1)
1297 REST_8GPRS(16,r1)
1298 REST_8GPRS(24,r1)
1299
1300 /* Restore callee's TOC */
1301 ld r2, 24(r1)
1302
1303 /* Pop our stack frame */
1304 addi r1, r1, SWITCH_FRAME_SIZE
1305
1306 /* Restore original LR for return to B */
1307 ld r0, LRSAVE(r1)
1308 mtlr r0
1309
1310 #ifdef CONFIG_LIVEPATCH
1311 /* Based on the cmpd above, if the NIP was altered handle livepatch */
1312 bne- livepatch_handler
1313 #endif
1314
1315 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
1316 stdu r1, -112(r1)
1317 .globl ftrace_graph_call
1318 ftrace_graph_call:
1319 b ftrace_graph_stub
1320 _GLOBAL(ftrace_graph_stub)
1321 addi r1, r1, 112
1322 #endif
1323
1324 ld r0,LRSAVE(r1) /* restore callee's lr at _mcount site */
1325 mtlr r0
1326 bctr /* jump after _mcount site */
1327 #endif /* CC_USING_MPROFILE_KERNEL */
1328
1329 _GLOBAL(ftrace_stub)
1330 blr
1331
1332 #ifdef CONFIG_LIVEPATCH
1333 /*
1334 * This function runs in the mcount context, between two functions. As
1335 * such it can only clobber registers which are volatile and used in
1336 * function linkage.
1337 *
1338 * We get here when a function A, calls another function B, but B has
1339 * been live patched with a new function C.
1340 *
1341 * On entry:
1342 * - we have no stack frame and can not allocate one
1343 * - LR points back to the original caller (in A)
1344 * - CTR holds the new NIP in C
1345 * - r0 & r12 are free
1346 *
1347 * r0 can't be used as the base register for a DS-form load or store, so
1348 * we temporarily shuffle r1 (stack pointer) into r0 and then put it back.
1349 */
1350 livepatch_handler:
1351 CURRENT_THREAD_INFO(r12, r1)
1352
1353 /* Save stack pointer into r0 */
1354 mr r0, r1
1355
1356 /* Allocate 3 x 8 bytes */
1357 ld r1, TI_livepatch_sp(r12)
1358 addi r1, r1, 24
1359 std r1, TI_livepatch_sp(r12)
1360
1361 /* Save toc & real LR on livepatch stack */
1362 std r2, -24(r1)
1363 mflr r12
1364 std r12, -16(r1)
1365
1366 /* Store stack end marker */
1367 lis r12, STACK_END_MAGIC@h
1368 ori r12, r12, STACK_END_MAGIC@l
1369 std r12, -8(r1)
1370
1371 /* Restore real stack pointer */
1372 mr r1, r0
1373
1374 /* Put ctr in r12 for global entry and branch there */
1375 mfctr r12
1376 bctrl
1377
1378 /*
1379 * Now we are returning from the patched function to the original
1380 * caller A. We are free to use r0 and r12, and we can use r2 until we
1381 * restore it.
1382 */
1383
1384 CURRENT_THREAD_INFO(r12, r1)
1385
1386 /* Save stack pointer into r0 */
1387 mr r0, r1
1388
1389 ld r1, TI_livepatch_sp(r12)
1390
1391 /* Check stack marker hasn't been trashed */
1392 lis r2, STACK_END_MAGIC@h
1393 ori r2, r2, STACK_END_MAGIC@l
1394 ld r12, -8(r1)
1395 1: tdne r12, r2
1396 EMIT_BUG_ENTRY 1b, __FILE__, __LINE__ - 1, 0
1397
1398 /* Restore LR & toc from livepatch stack */
1399 ld r12, -16(r1)
1400 mtlr r12
1401 ld r2, -24(r1)
1402
1403 /* Pop livepatch stack frame */
1404 CURRENT_THREAD_INFO(r12, r0)
1405 subi r1, r1, 24
1406 std r1, TI_livepatch_sp(r12)
1407
1408 /* Restore real stack pointer */
1409 mr r1, r0
1410
1411 /* Return to original caller of live patched function */
1412 blr
1413 #endif
1414
1415
1416 #else
1417 _GLOBAL_TOC(_mcount)
1418 EXPORT_SYMBOL(_mcount)
1419 /* Taken from output of objdump from lib64/glibc */
1420 mflr r3
1421 ld r11, 0(r1)
1422 stdu r1, -112(r1)
1423 std r3, 128(r1)
1424 ld r4, 16(r11)
1425
1426 subi r3, r3, MCOUNT_INSN_SIZE
1427 LOAD_REG_ADDR(r5,ftrace_trace_function)
1428 ld r5,0(r5)
1429 ld r5,0(r5)
1430 mtctr r5
1431 bctrl
1432 nop
1433
1434
1435 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
1436 b ftrace_graph_caller
1437 #endif
1438 ld r0, 128(r1)
1439 mtlr r0
1440 addi r1, r1, 112
1441 _GLOBAL(ftrace_stub)
1442 blr
1443
1444 #endif /* CONFIG_DYNAMIC_FTRACE */
1445
1446 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
1447 #ifndef CC_USING_MPROFILE_KERNEL
1448 _GLOBAL(ftrace_graph_caller)
1449 /* load r4 with local address */
1450 ld r4, 128(r1)
1451 subi r4, r4, MCOUNT_INSN_SIZE
1452
1453 /* Grab the LR out of the caller stack frame */
1454 ld r11, 112(r1)
1455 ld r3, 16(r11)
1456
1457 bl prepare_ftrace_return
1458 nop
1459
1460 /*
1461 * prepare_ftrace_return gives us the address we divert to.
1462 * Change the LR in the callers stack frame to this.
1463 */
1464 ld r11, 112(r1)
1465 std r3, 16(r11)
1466
1467 ld r0, 128(r1)
1468 mtlr r0
1469 addi r1, r1, 112
1470 blr
1471
1472 #else /* CC_USING_MPROFILE_KERNEL */
1473 _GLOBAL(ftrace_graph_caller)
1474 /* with -mprofile-kernel, parameter regs are still alive at _mcount */
1475 std r10, 104(r1)
1476 std r9, 96(r1)
1477 std r8, 88(r1)
1478 std r7, 80(r1)
1479 std r6, 72(r1)
1480 std r5, 64(r1)
1481 std r4, 56(r1)
1482 std r3, 48(r1)
1483
1484 /* Save callee's TOC in the ABI compliant location */
1485 std r2, 24(r1)
1486 ld r2, PACATOC(r13) /* get kernel TOC in r2 */
1487
1488 mfctr r4 /* ftrace_caller has moved local addr here */
1489 std r4, 40(r1)
1490 mflr r3 /* ftrace_caller has restored LR from stack */
1491 subi r4, r4, MCOUNT_INSN_SIZE
1492
1493 bl prepare_ftrace_return
1494 nop
1495
1496 /*
1497 * prepare_ftrace_return gives us the address we divert to.
1498 * Change the LR to this.
1499 */
1500 mtlr r3
1501
1502 ld r0, 40(r1)
1503 mtctr r0
1504 ld r10, 104(r1)
1505 ld r9, 96(r1)
1506 ld r8, 88(r1)
1507 ld r7, 80(r1)
1508 ld r6, 72(r1)
1509 ld r5, 64(r1)
1510 ld r4, 56(r1)
1511 ld r3, 48(r1)
1512
1513 /* Restore callee's TOC */
1514 ld r2, 24(r1)
1515
1516 addi r1, r1, 112
1517 mflr r0
1518 std r0, LRSAVE(r1)
1519 bctr
1520 #endif /* CC_USING_MPROFILE_KERNEL */
1521
1522 _GLOBAL(return_to_handler)
1523 /* need to save return values */
1524 std r4, -32(r1)
1525 std r3, -24(r1)
1526 /* save TOC */
1527 std r2, -16(r1)
1528 std r31, -8(r1)
1529 mr r31, r1
1530 stdu r1, -112(r1)
1531
1532 /*
1533 * We might be called from a module.
1534 * Switch to our TOC to run inside the core kernel.
1535 */
1536 ld r2, PACATOC(r13)
1537
1538 bl ftrace_return_to_handler
1539 nop
1540
1541 /* return value has real return address */
1542 mtlr r3
1543
1544 ld r1, 0(r1)
1545 ld r4, -32(r1)
1546 ld r3, -24(r1)
1547 ld r2, -16(r1)
1548 ld r31, -8(r1)
1549
1550 /* Jump back to real return address */
1551 blr
1552 #endif /* CONFIG_FUNCTION_GRAPH_TRACER */
1553 #endif /* CONFIG_FUNCTION_TRACER */