]> git.proxmox.com Git - mirror_ubuntu-focal-kernel.git/blob - arch/powerpc/kernel/entry_64.S
openrisc: prefer memblock APIs returning virtual address
[mirror_ubuntu-focal-kernel.git] / arch / powerpc / kernel / entry_64.S
1 /*
2 * PowerPC version
3 * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
4 * Rewritten by Cort Dougan (cort@cs.nmt.edu) for PReP
5 * Copyright (C) 1996 Cort Dougan <cort@cs.nmt.edu>
6 * Adapted for Power Macintosh by Paul Mackerras.
7 * Low-level exception handlers and MMU support
8 * rewritten by Paul Mackerras.
9 * Copyright (C) 1996 Paul Mackerras.
10 * MPC8xx modifications Copyright (C) 1997 Dan Malek (dmalek@jlc.net).
11 *
12 * This file contains the system call entry code, context switch
13 * code, and exception/interrupt return code for PowerPC.
14 *
15 * This program is free software; you can redistribute it and/or
16 * modify it under the terms of the GNU General Public License
17 * as published by the Free Software Foundation; either version
18 * 2 of the License, or (at your option) any later version.
19 */
20
21 #include <linux/errno.h>
22 #include <linux/err.h>
23 #include <asm/unistd.h>
24 #include <asm/processor.h>
25 #include <asm/page.h>
26 #include <asm/mmu.h>
27 #include <asm/thread_info.h>
28 #include <asm/code-patching-asm.h>
29 #include <asm/ppc_asm.h>
30 #include <asm/asm-offsets.h>
31 #include <asm/cputable.h>
32 #include <asm/firmware.h>
33 #include <asm/bug.h>
34 #include <asm/ptrace.h>
35 #include <asm/irqflags.h>
36 #include <asm/hw_irq.h>
37 #include <asm/context_tracking.h>
38 #include <asm/tm.h>
39 #include <asm/ppc-opcode.h>
40 #include <asm/barrier.h>
41 #include <asm/export.h>
42 #include <asm/asm-compat.h>
43 #ifdef CONFIG_PPC_BOOK3S
44 #include <asm/exception-64s.h>
45 #else
46 #include <asm/exception-64e.h>
47 #endif
48 #include <asm/feature-fixups.h>
49
50 /*
51 * System calls.
52 */
53 .section ".toc","aw"
54 SYS_CALL_TABLE:
55 .tc sys_call_table[TC],sys_call_table
56
57 COMPAT_SYS_CALL_TABLE:
58 .tc compat_sys_call_table[TC],compat_sys_call_table
59
60 /* This value is used to mark exception frames on the stack. */
61 exception_marker:
62 .tc ID_EXC_MARKER[TC],STACK_FRAME_REGS_MARKER
63
64 .section ".text"
65 .align 7
66
67 .globl system_call_common
68 system_call_common:
69 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
70 BEGIN_FTR_SECTION
71 extrdi. r10, r12, 1, (63-MSR_TS_T_LG) /* transaction active? */
72 bne .Ltabort_syscall
73 END_FTR_SECTION_IFSET(CPU_FTR_TM)
74 #endif
75 andi. r10,r12,MSR_PR
76 mr r10,r1
77 addi r1,r1,-INT_FRAME_SIZE
78 beq- 1f
79 ld r1,PACAKSAVE(r13)
80 1: std r10,0(r1)
81 std r11,_NIP(r1)
82 std r12,_MSR(r1)
83 std r0,GPR0(r1)
84 std r10,GPR1(r1)
85 beq 2f /* if from kernel mode */
86 #ifdef CONFIG_PPC_FSL_BOOK3E
87 START_BTB_FLUSH_SECTION
88 BTB_FLUSH(r10)
89 END_BTB_FLUSH_SECTION
90 #endif
91 ACCOUNT_CPU_USER_ENTRY(r13, r10, r11)
92 2: std r2,GPR2(r1)
93 std r3,GPR3(r1)
94 mfcr r2
95 std r4,GPR4(r1)
96 std r5,GPR5(r1)
97 std r6,GPR6(r1)
98 std r7,GPR7(r1)
99 std r8,GPR8(r1)
100 li r11,0
101 std r11,GPR9(r1)
102 std r11,GPR10(r1)
103 std r11,GPR11(r1)
104 std r11,GPR12(r1)
105 std r11,_XER(r1)
106 std r11,_CTR(r1)
107 std r9,GPR13(r1)
108 mflr r10
109 /*
110 * This clears CR0.SO (bit 28), which is the error indication on
111 * return from this system call.
112 */
113 rldimi r2,r11,28,(63-28)
114 li r11,0xc01
115 std r10,_LINK(r1)
116 std r11,_TRAP(r1)
117 std r3,ORIG_GPR3(r1)
118 std r2,_CCR(r1)
119 ld r2,PACATOC(r13)
120 addi r9,r1,STACK_FRAME_OVERHEAD
121 ld r11,exception_marker@toc(r2)
122 std r11,-16(r9) /* "regshere" marker */
123 #if defined(CONFIG_VIRT_CPU_ACCOUNTING_NATIVE) && defined(CONFIG_PPC_SPLPAR)
124 BEGIN_FW_FTR_SECTION
125 beq 33f
126 /* if from user, see if there are any DTL entries to process */
127 ld r10,PACALPPACAPTR(r13) /* get ptr to VPA */
128 ld r11,PACA_DTL_RIDX(r13) /* get log read index */
129 addi r10,r10,LPPACA_DTLIDX
130 LDX_BE r10,0,r10 /* get log write index */
131 cmpd cr1,r11,r10
132 beq+ cr1,33f
133 bl accumulate_stolen_time
134 REST_GPR(0,r1)
135 REST_4GPRS(3,r1)
136 REST_2GPRS(7,r1)
137 addi r9,r1,STACK_FRAME_OVERHEAD
138 33:
139 END_FW_FTR_SECTION_IFSET(FW_FEATURE_SPLPAR)
140 #endif /* CONFIG_VIRT_CPU_ACCOUNTING_NATIVE && CONFIG_PPC_SPLPAR */
141
142 /*
143 * A syscall should always be called with interrupts enabled
144 * so we just unconditionally hard-enable here. When some kind
145 * of irq tracing is used, we additionally check that condition
146 * is correct
147 */
148 #if defined(CONFIG_PPC_IRQ_SOFT_MASK_DEBUG) && defined(CONFIG_BUG)
149 lbz r10,PACAIRQSOFTMASK(r13)
150 1: tdnei r10,IRQS_ENABLED
151 EMIT_BUG_ENTRY 1b,__FILE__,__LINE__,BUGFLAG_WARNING
152 #endif
153
154 #ifdef CONFIG_PPC_BOOK3E
155 wrteei 1
156 #else
157 li r11,MSR_RI
158 ori r11,r11,MSR_EE
159 mtmsrd r11,1
160 #endif /* CONFIG_PPC_BOOK3E */
161
162 system_call: /* label this so stack traces look sane */
163 /* We do need to set SOFTE in the stack frame or the return
164 * from interrupt will be painful
165 */
166 li r10,IRQS_ENABLED
167 std r10,SOFTE(r1)
168
169 ld r11, PACA_THREAD_INFO(r13)
170 ld r10,TI_FLAGS(r11)
171 andi. r11,r10,_TIF_SYSCALL_DOTRACE
172 bne .Lsyscall_dotrace /* does not return */
173 cmpldi 0,r0,NR_syscalls
174 bge- .Lsyscall_enosys
175
176 .Lsyscall:
177 /*
178 * Need to vector to 32 Bit or default sys_call_table here,
179 * based on caller's run-mode / personality.
180 */
181 ld r11,SYS_CALL_TABLE@toc(2)
182 andis. r10,r10,_TIF_32BIT@h
183 beq 15f
184 ld r11,COMPAT_SYS_CALL_TABLE@toc(2)
185 clrldi r3,r3,32
186 clrldi r4,r4,32
187 clrldi r5,r5,32
188 clrldi r6,r6,32
189 clrldi r7,r7,32
190 clrldi r8,r8,32
191 15:
192 slwi r0,r0,3
193
194 barrier_nospec_asm
195 /*
196 * Prevent the load of the handler below (based on the user-passed
197 * system call number) being speculatively executed until the test
198 * against NR_syscalls and branch to .Lsyscall_enosys above has
199 * committed.
200 */
201
202 ldx r12,r11,r0 /* Fetch system call handler [ptr] */
203 mtctr r12
204 bctrl /* Call handler */
205
206 .Lsyscall_exit:
207 std r3,RESULT(r1)
208
209 #ifdef CONFIG_DEBUG_RSEQ
210 /* Check whether the syscall is issued inside a restartable sequence */
211 addi r3,r1,STACK_FRAME_OVERHEAD
212 bl rseq_syscall
213 ld r3,RESULT(r1)
214 #endif
215
216 ld r12, PACA_THREAD_INFO(r13)
217
218 ld r8,_MSR(r1)
219 #ifdef CONFIG_PPC_BOOK3S
220 /* No MSR:RI on BookE */
221 andi. r10,r8,MSR_RI
222 beq- .Lunrecov_restore
223 #endif
224
225 /*
226 * This is a few instructions into the actual syscall exit path (which actually
227 * starts at .Lsyscall_exit) to cater to kprobe blacklisting and to reduce the
228 * number of visible symbols for profiling purposes.
229 *
230 * We can probe from system_call until this point as MSR_RI is set. But once it
231 * is cleared below, we won't be able to take a trap.
232 *
233 * This is blacklisted from kprobes further below with _ASM_NOKPROBE_SYMBOL().
234 */
235 system_call_exit:
236 /*
237 * Disable interrupts so current_thread_info()->flags can't change,
238 * and so that we don't get interrupted after loading SRR0/1.
239 *
240 * Leave MSR_RI enabled for now, because with THREAD_INFO_IN_TASK we
241 * could fault on the load of the TI_FLAGS below.
242 */
243 #ifdef CONFIG_PPC_BOOK3E
244 wrteei 0
245 #else
246 li r11,MSR_RI
247 mtmsrd r11,1
248 #endif /* CONFIG_PPC_BOOK3E */
249
250 ld r9,TI_FLAGS(r12)
251 li r11,-MAX_ERRNO
252 andi. r0,r9,(_TIF_SYSCALL_DOTRACE|_TIF_SINGLESTEP|_TIF_USER_WORK_MASK|_TIF_PERSYSCALL_MASK)
253 bne- .Lsyscall_exit_work
254
255 andi. r0,r8,MSR_FP
256 beq 2f
257 #ifdef CONFIG_ALTIVEC
258 andis. r0,r8,MSR_VEC@h
259 bne 3f
260 #endif
261 2: addi r3,r1,STACK_FRAME_OVERHEAD
262 bl restore_math
263 ld r8,_MSR(r1)
264 ld r3,RESULT(r1)
265 li r11,-MAX_ERRNO
266
267 3: cmpld r3,r11
268 ld r5,_CCR(r1)
269 bge- .Lsyscall_error
270 .Lsyscall_error_cont:
271 ld r7,_NIP(r1)
272 BEGIN_FTR_SECTION
273 stdcx. r0,0,r1 /* to clear the reservation */
274 END_FTR_SECTION_IFCLR(CPU_FTR_STCX_CHECKS_ADDRESS)
275 andi. r6,r8,MSR_PR
276 ld r4,_LINK(r1)
277
278 #ifdef CONFIG_PPC_BOOK3S
279 /*
280 * Clear MSR_RI, MSR_EE is already and remains disabled. We could do
281 * this later, but testing shows that doing it here causes less slow
282 * down than doing it closer to the rfid.
283 */
284 li r11,0
285 mtmsrd r11,1
286 #endif
287
288 beq- 1f
289 ACCOUNT_CPU_USER_EXIT(r13, r11, r12)
290
291 BEGIN_FTR_SECTION
292 HMT_MEDIUM_LOW
293 END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR)
294
295 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
296 std r8, PACATMSCRATCH(r13)
297 #endif
298
299 ld r13,GPR13(r1) /* only restore r13 if returning to usermode */
300 ld r2,GPR2(r1)
301 ld r1,GPR1(r1)
302 mtlr r4
303 mtcr r5
304 mtspr SPRN_SRR0,r7
305 mtspr SPRN_SRR1,r8
306 RFI_TO_USER
307 b . /* prevent speculative execution */
308
309 /* exit to kernel */
310 1: ld r2,GPR2(r1)
311 ld r1,GPR1(r1)
312 mtlr r4
313 mtcr r5
314 mtspr SPRN_SRR0,r7
315 mtspr SPRN_SRR1,r8
316 RFI_TO_KERNEL
317 b . /* prevent speculative execution */
318
319 .Lsyscall_error:
320 oris r5,r5,0x1000 /* Set SO bit in CR */
321 neg r3,r3
322 std r5,_CCR(r1)
323 b .Lsyscall_error_cont
324
325 /* Traced system call support */
326 .Lsyscall_dotrace:
327 bl save_nvgprs
328 addi r3,r1,STACK_FRAME_OVERHEAD
329 bl do_syscall_trace_enter
330
331 /*
332 * We use the return value of do_syscall_trace_enter() as the syscall
333 * number. If the syscall was rejected for any reason do_syscall_trace_enter()
334 * returns an invalid syscall number and the test below against
335 * NR_syscalls will fail.
336 */
337 mr r0,r3
338
339 /* Restore argument registers just clobbered and/or possibly changed. */
340 ld r3,GPR3(r1)
341 ld r4,GPR4(r1)
342 ld r5,GPR5(r1)
343 ld r6,GPR6(r1)
344 ld r7,GPR7(r1)
345 ld r8,GPR8(r1)
346
347 /* Repopulate r9 and r10 for the syscall path */
348 addi r9,r1,STACK_FRAME_OVERHEAD
349 ld r10, PACA_THREAD_INFO(r13)
350 ld r10,TI_FLAGS(r10)
351
352 cmpldi r0,NR_syscalls
353 blt+ .Lsyscall
354
355 /* Return code is already in r3 thanks to do_syscall_trace_enter() */
356 b .Lsyscall_exit
357
358
359 .Lsyscall_enosys:
360 li r3,-ENOSYS
361 b .Lsyscall_exit
362
363 .Lsyscall_exit_work:
364 /* If TIF_RESTOREALL is set, don't scribble on either r3 or ccr.
365 If TIF_NOERROR is set, just save r3 as it is. */
366
367 andi. r0,r9,_TIF_RESTOREALL
368 beq+ 0f
369 REST_NVGPRS(r1)
370 b 2f
371 0: cmpld r3,r11 /* r11 is -MAX_ERRNO */
372 blt+ 1f
373 andi. r0,r9,_TIF_NOERROR
374 bne- 1f
375 ld r5,_CCR(r1)
376 neg r3,r3
377 oris r5,r5,0x1000 /* Set SO bit in CR */
378 std r5,_CCR(r1)
379 1: std r3,GPR3(r1)
380 2: andi. r0,r9,(_TIF_PERSYSCALL_MASK)
381 beq 4f
382
383 /* Clear per-syscall TIF flags if any are set. */
384
385 li r11,_TIF_PERSYSCALL_MASK
386 addi r12,r12,TI_FLAGS
387 3: ldarx r10,0,r12
388 andc r10,r10,r11
389 stdcx. r10,0,r12
390 bne- 3b
391 subi r12,r12,TI_FLAGS
392
393 4: /* Anything else left to do? */
394 BEGIN_FTR_SECTION
395 lis r3,DEFAULT_PPR@highest /* Set default PPR */
396 sldi r3,r3,32 /* bits 11-13 are used for ppr */
397 std r3,_PPR(r1)
398 END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR)
399
400 andi. r0,r9,(_TIF_SYSCALL_DOTRACE|_TIF_SINGLESTEP)
401 beq ret_from_except_lite
402
403 /* Re-enable interrupts */
404 #ifdef CONFIG_PPC_BOOK3E
405 wrteei 1
406 #else
407 li r10,MSR_RI
408 ori r10,r10,MSR_EE
409 mtmsrd r10,1
410 #endif /* CONFIG_PPC_BOOK3E */
411
412 bl save_nvgprs
413 addi r3,r1,STACK_FRAME_OVERHEAD
414 bl do_syscall_trace_leave
415 b ret_from_except
416
417 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
418 .Ltabort_syscall:
419 /* Firstly we need to enable TM in the kernel */
420 mfmsr r10
421 li r9, 1
422 rldimi r10, r9, MSR_TM_LG, 63-MSR_TM_LG
423 mtmsrd r10, 0
424
425 /* tabort, this dooms the transaction, nothing else */
426 li r9, (TM_CAUSE_SYSCALL|TM_CAUSE_PERSISTENT)
427 TABORT(R9)
428
429 /*
430 * Return directly to userspace. We have corrupted user register state,
431 * but userspace will never see that register state. Execution will
432 * resume after the tbegin of the aborted transaction with the
433 * checkpointed register state.
434 */
435 li r9, MSR_RI
436 andc r10, r10, r9
437 mtmsrd r10, 1
438 mtspr SPRN_SRR0, r11
439 mtspr SPRN_SRR1, r12
440 RFI_TO_USER
441 b . /* prevent speculative execution */
442 #endif
443 _ASM_NOKPROBE_SYMBOL(system_call_common);
444 _ASM_NOKPROBE_SYMBOL(system_call_exit);
445
446 /* Save non-volatile GPRs, if not already saved. */
447 _GLOBAL(save_nvgprs)
448 ld r11,_TRAP(r1)
449 andi. r0,r11,1
450 beqlr-
451 SAVE_NVGPRS(r1)
452 clrrdi r0,r11,1
453 std r0,_TRAP(r1)
454 blr
455 _ASM_NOKPROBE_SYMBOL(save_nvgprs);
456
457
458 /*
459 * The sigsuspend and rt_sigsuspend system calls can call do_signal
460 * and thus put the process into the stopped state where we might
461 * want to examine its user state with ptrace. Therefore we need
462 * to save all the nonvolatile registers (r14 - r31) before calling
463 * the C code. Similarly, fork, vfork and clone need the full
464 * register state on the stack so that it can be copied to the child.
465 */
466
467 _GLOBAL(ppc_fork)
468 bl save_nvgprs
469 bl sys_fork
470 b .Lsyscall_exit
471
472 _GLOBAL(ppc_vfork)
473 bl save_nvgprs
474 bl sys_vfork
475 b .Lsyscall_exit
476
477 _GLOBAL(ppc_clone)
478 bl save_nvgprs
479 bl sys_clone
480 b .Lsyscall_exit
481
482 _GLOBAL(ppc32_swapcontext)
483 bl save_nvgprs
484 bl compat_sys_swapcontext
485 b .Lsyscall_exit
486
487 _GLOBAL(ppc64_swapcontext)
488 bl save_nvgprs
489 bl sys_swapcontext
490 b .Lsyscall_exit
491
492 _GLOBAL(ppc_switch_endian)
493 bl save_nvgprs
494 bl sys_switch_endian
495 b .Lsyscall_exit
496
497 _GLOBAL(ret_from_fork)
498 bl schedule_tail
499 REST_NVGPRS(r1)
500 li r3,0
501 b .Lsyscall_exit
502
503 _GLOBAL(ret_from_kernel_thread)
504 bl schedule_tail
505 REST_NVGPRS(r1)
506 mtlr r14
507 mr r3,r15
508 #ifdef PPC64_ELF_ABI_v2
509 mr r12,r14
510 #endif
511 blrl
512 li r3,0
513 b .Lsyscall_exit
514
515 #ifdef CONFIG_PPC_BOOK3S_64
516
517 #define FLUSH_COUNT_CACHE \
518 1: nop; \
519 patch_site 1b, patch__call_flush_count_cache
520
521
522 #define BCCTR_FLUSH .long 0x4c400420
523
524 .macro nops number
525 .rept \number
526 nop
527 .endr
528 .endm
529
530 .balign 32
531 .global flush_count_cache
532 flush_count_cache:
533 /* Save LR into r9 */
534 mflr r9
535
536 .rept 64
537 bl .+4
538 .endr
539 b 1f
540 nops 6
541
542 .balign 32
543 /* Restore LR */
544 1: mtlr r9
545 li r9,0x7fff
546 mtctr r9
547
548 BCCTR_FLUSH
549
550 2: nop
551 patch_site 2b patch__flush_count_cache_return
552
553 nops 3
554
555 .rept 278
556 .balign 32
557 BCCTR_FLUSH
558 nops 7
559 .endr
560
561 blr
562 #else
563 #define FLUSH_COUNT_CACHE
564 #endif /* CONFIG_PPC_BOOK3S_64 */
565
566 /*
567 * This routine switches between two different tasks. The process
568 * state of one is saved on its kernel stack. Then the state
569 * of the other is restored from its kernel stack. The memory
570 * management hardware is updated to the second process's state.
571 * Finally, we can return to the second process, via ret_from_except.
572 * On entry, r3 points to the THREAD for the current task, r4
573 * points to the THREAD for the new task.
574 *
575 * Note: there are two ways to get to the "going out" portion
576 * of this code; either by coming in via the entry (_switch)
577 * or via "fork" which must set up an environment equivalent
578 * to the "_switch" path. If you change this you'll have to change
579 * the fork code also.
580 *
581 * The code which creates the new task context is in 'copy_thread'
582 * in arch/powerpc/kernel/process.c
583 */
584 .align 7
585 _GLOBAL(_switch)
586 mflr r0
587 std r0,16(r1)
588 stdu r1,-SWITCH_FRAME_SIZE(r1)
589 /* r3-r13 are caller saved -- Cort */
590 SAVE_8GPRS(14, r1)
591 SAVE_10GPRS(22, r1)
592 std r0,_NIP(r1) /* Return to switch caller */
593 mfcr r23
594 std r23,_CCR(r1)
595 std r1,KSP(r3) /* Set old stack pointer */
596
597 FLUSH_COUNT_CACHE
598
599 /*
600 * On SMP kernels, care must be taken because a task may be
601 * scheduled off CPUx and on to CPUy. Memory ordering must be
602 * considered.
603 *
604 * Cacheable stores on CPUx will be visible when the task is
605 * scheduled on CPUy by virtue of the core scheduler barriers
606 * (see "Notes on Program-Order guarantees on SMP systems." in
607 * kernel/sched/core.c).
608 *
609 * Uncacheable stores in the case of involuntary preemption must
610 * be taken care of. The smp_mb__before_spin_lock() in __schedule()
611 * is implemented as hwsync on powerpc, which orders MMIO too. So
612 * long as there is an hwsync in the context switch path, it will
613 * be executed on the source CPU after the task has performed
614 * all MMIO ops on that CPU, and on the destination CPU before the
615 * task performs any MMIO ops there.
616 */
617
618 /*
619 * The kernel context switch path must contain a spin_lock,
620 * which contains larx/stcx, which will clear any reservation
621 * of the task being switched.
622 */
623 #ifdef CONFIG_PPC_BOOK3S
624 /* Cancel all explict user streams as they will have no use after context
625 * switch and will stop the HW from creating streams itself
626 */
627 DCBT_BOOK3S_STOP_ALL_STREAM_IDS(r6)
628 #endif
629
630 addi r6,r4,-THREAD /* Convert THREAD to 'current' */
631 std r6,PACACURRENT(r13) /* Set new 'current' */
632 #if defined(CONFIG_STACKPROTECTOR)
633 ld r6, TASK_CANARY(r6)
634 std r6, PACA_CANARY(r13)
635 #endif
636
637 ld r8,KSP(r4) /* new stack pointer */
638 #ifdef CONFIG_PPC_BOOK3S_64
639 BEGIN_MMU_FTR_SECTION
640 b 2f
641 END_MMU_FTR_SECTION_IFSET(MMU_FTR_TYPE_RADIX)
642 BEGIN_FTR_SECTION
643 clrrdi r6,r8,28 /* get its ESID */
644 clrrdi r9,r1,28 /* get current sp ESID */
645 FTR_SECTION_ELSE
646 clrrdi r6,r8,40 /* get its 1T ESID */
647 clrrdi r9,r1,40 /* get current sp 1T ESID */
648 ALT_MMU_FTR_SECTION_END_IFCLR(MMU_FTR_1T_SEGMENT)
649 clrldi. r0,r6,2 /* is new ESID c00000000? */
650 cmpd cr1,r6,r9 /* or is new ESID the same as current ESID? */
651 cror eq,4*cr1+eq,eq
652 beq 2f /* if yes, don't slbie it */
653
654 /* Bolt in the new stack SLB entry */
655 ld r7,KSP_VSID(r4) /* Get new stack's VSID */
656 oris r0,r6,(SLB_ESID_V)@h
657 ori r0,r0,(SLB_NUM_BOLTED-1)@l
658 BEGIN_FTR_SECTION
659 li r9,MMU_SEGSIZE_1T /* insert B field */
660 oris r6,r6,(MMU_SEGSIZE_1T << SLBIE_SSIZE_SHIFT)@h
661 rldimi r7,r9,SLB_VSID_SSIZE_SHIFT,0
662 END_MMU_FTR_SECTION_IFSET(MMU_FTR_1T_SEGMENT)
663
664 /* Update the last bolted SLB. No write barriers are needed
665 * here, provided we only update the current CPU's SLB shadow
666 * buffer.
667 */
668 ld r9,PACA_SLBSHADOWPTR(r13)
669 li r12,0
670 std r12,SLBSHADOW_STACKESID(r9) /* Clear ESID */
671 li r12,SLBSHADOW_STACKVSID
672 STDX_BE r7,r12,r9 /* Save VSID */
673 li r12,SLBSHADOW_STACKESID
674 STDX_BE r0,r12,r9 /* Save ESID */
675
676 /* No need to check for MMU_FTR_NO_SLBIE_B here, since when
677 * we have 1TB segments, the only CPUs known to have the errata
678 * only support less than 1TB of system memory and we'll never
679 * actually hit this code path.
680 */
681
682 isync
683 slbie r6
684 BEGIN_FTR_SECTION
685 slbie r6 /* Workaround POWER5 < DD2.1 issue */
686 END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_207S)
687 slbmte r7,r0
688 isync
689 2:
690 #endif /* CONFIG_PPC_BOOK3S_64 */
691
692 clrrdi r7, r8, THREAD_SHIFT /* base of new stack */
693 /* Note: this uses SWITCH_FRAME_SIZE rather than INT_FRAME_SIZE
694 because we don't need to leave the 288-byte ABI gap at the
695 top of the kernel stack. */
696 addi r7,r7,THREAD_SIZE-SWITCH_FRAME_SIZE
697
698 /*
699 * PMU interrupts in radix may come in here. They will use r1, not
700 * PACAKSAVE, so this stack switch will not cause a problem. They
701 * will store to the process stack, which may then be migrated to
702 * another CPU. However the rq lock release on this CPU paired with
703 * the rq lock acquire on the new CPU before the stack becomes
704 * active on the new CPU, will order those stores.
705 */
706 mr r1,r8 /* start using new stack pointer */
707 std r7,PACAKSAVE(r13)
708
709 ld r6,_CCR(r1)
710 mtcrf 0xFF,r6
711
712 /* r3-r13 are destroyed -- Cort */
713 REST_8GPRS(14, r1)
714 REST_10GPRS(22, r1)
715
716 /* convert old thread to its task_struct for return value */
717 addi r3,r3,-THREAD
718 ld r7,_NIP(r1) /* Return to _switch caller in new task */
719 mtlr r7
720 addi r1,r1,SWITCH_FRAME_SIZE
721 blr
722
723 .align 7
724 _GLOBAL(ret_from_except)
725 ld r11,_TRAP(r1)
726 andi. r0,r11,1
727 bne ret_from_except_lite
728 REST_NVGPRS(r1)
729
730 _GLOBAL(ret_from_except_lite)
731 /*
732 * Disable interrupts so that current_thread_info()->flags
733 * can't change between when we test it and when we return
734 * from the interrupt.
735 */
736 #ifdef CONFIG_PPC_BOOK3E
737 wrteei 0
738 #else
739 li r10,MSR_RI
740 mtmsrd r10,1 /* Update machine state */
741 #endif /* CONFIG_PPC_BOOK3E */
742
743 ld r9, PACA_THREAD_INFO(r13)
744 ld r3,_MSR(r1)
745 #ifdef CONFIG_PPC_BOOK3E
746 ld r10,PACACURRENT(r13)
747 #endif /* CONFIG_PPC_BOOK3E */
748 ld r4,TI_FLAGS(r9)
749 andi. r3,r3,MSR_PR
750 beq resume_kernel
751 #ifdef CONFIG_PPC_BOOK3E
752 lwz r3,(THREAD+THREAD_DBCR0)(r10)
753 #endif /* CONFIG_PPC_BOOK3E */
754
755 /* Check current_thread_info()->flags */
756 andi. r0,r4,_TIF_USER_WORK_MASK
757 bne 1f
758 #ifdef CONFIG_PPC_BOOK3E
759 /*
760 * Check to see if the dbcr0 register is set up to debug.
761 * Use the internal debug mode bit to do this.
762 */
763 andis. r0,r3,DBCR0_IDM@h
764 beq restore
765 mfmsr r0
766 rlwinm r0,r0,0,~MSR_DE /* Clear MSR.DE */
767 mtmsr r0
768 mtspr SPRN_DBCR0,r3
769 li r10, -1
770 mtspr SPRN_DBSR,r10
771 b restore
772 #else
773 addi r3,r1,STACK_FRAME_OVERHEAD
774 bl restore_math
775 b restore
776 #endif
777 1: andi. r0,r4,_TIF_NEED_RESCHED
778 beq 2f
779 bl restore_interrupts
780 SCHEDULE_USER
781 b ret_from_except_lite
782 2:
783 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
784 andi. r0,r4,_TIF_USER_WORK_MASK & ~_TIF_RESTORE_TM
785 bne 3f /* only restore TM if nothing else to do */
786 addi r3,r1,STACK_FRAME_OVERHEAD
787 bl restore_tm_state
788 b restore
789 3:
790 #endif
791 bl save_nvgprs
792 /*
793 * Use a non volatile GPR to save and restore our thread_info flags
794 * across the call to restore_interrupts.
795 */
796 mr r30,r4
797 bl restore_interrupts
798 mr r4,r30
799 addi r3,r1,STACK_FRAME_OVERHEAD
800 bl do_notify_resume
801 b ret_from_except
802
803 resume_kernel:
804 /* check current_thread_info, _TIF_EMULATE_STACK_STORE */
805 andis. r8,r4,_TIF_EMULATE_STACK_STORE@h
806 beq+ 1f
807
808 addi r8,r1,INT_FRAME_SIZE /* Get the kprobed function entry */
809
810 ld r3,GPR1(r1)
811 subi r3,r3,INT_FRAME_SIZE /* dst: Allocate a trampoline exception frame */
812 mr r4,r1 /* src: current exception frame */
813 mr r1,r3 /* Reroute the trampoline frame to r1 */
814
815 /* Copy from the original to the trampoline. */
816 li r5,INT_FRAME_SIZE/8 /* size: INT_FRAME_SIZE */
817 li r6,0 /* start offset: 0 */
818 mtctr r5
819 2: ldx r0,r6,r4
820 stdx r0,r6,r3
821 addi r6,r6,8
822 bdnz 2b
823
824 /* Do real store operation to complete stdu */
825 ld r5,GPR1(r1)
826 std r8,0(r5)
827
828 /* Clear _TIF_EMULATE_STACK_STORE flag */
829 lis r11,_TIF_EMULATE_STACK_STORE@h
830 addi r5,r9,TI_FLAGS
831 0: ldarx r4,0,r5
832 andc r4,r4,r11
833 stdcx. r4,0,r5
834 bne- 0b
835 1:
836
837 #ifdef CONFIG_PREEMPT
838 /* Check if we need to preempt */
839 andi. r0,r4,_TIF_NEED_RESCHED
840 beq+ restore
841 /* Check that preempt_count() == 0 and interrupts are enabled */
842 lwz r8,TI_PREEMPT(r9)
843 cmpwi cr0,r8,0
844 bne restore
845 ld r0,SOFTE(r1)
846 andi. r0,r0,IRQS_DISABLED
847 bne restore
848
849 /*
850 * Here we are preempting the current task. We want to make
851 * sure we are soft-disabled first and reconcile irq state.
852 */
853 RECONCILE_IRQ_STATE(r3,r4)
854 1: bl preempt_schedule_irq
855
856 /* Re-test flags and eventually loop */
857 ld r9, PACA_THREAD_INFO(r13)
858 ld r4,TI_FLAGS(r9)
859 andi. r0,r4,_TIF_NEED_RESCHED
860 bne 1b
861
862 /*
863 * arch_local_irq_restore() from preempt_schedule_irq above may
864 * enable hard interrupt but we really should disable interrupts
865 * when we return from the interrupt, and so that we don't get
866 * interrupted after loading SRR0/1.
867 */
868 #ifdef CONFIG_PPC_BOOK3E
869 wrteei 0
870 #else
871 li r10,MSR_RI
872 mtmsrd r10,1 /* Update machine state */
873 #endif /* CONFIG_PPC_BOOK3E */
874 #endif /* CONFIG_PREEMPT */
875
876 .globl fast_exc_return_irq
877 fast_exc_return_irq:
878 restore:
879 /*
880 * This is the main kernel exit path. First we check if we
881 * are about to re-enable interrupts
882 */
883 ld r5,SOFTE(r1)
884 lbz r6,PACAIRQSOFTMASK(r13)
885 andi. r5,r5,IRQS_DISABLED
886 bne .Lrestore_irq_off
887
888 /* We are enabling, were we already enabled ? Yes, just return */
889 andi. r6,r6,IRQS_DISABLED
890 beq cr0,.Ldo_restore
891
892 /*
893 * We are about to soft-enable interrupts (we are hard disabled
894 * at this point). We check if there's anything that needs to
895 * be replayed first.
896 */
897 lbz r0,PACAIRQHAPPENED(r13)
898 cmpwi cr0,r0,0
899 bne- .Lrestore_check_irq_replay
900
901 /*
902 * Get here when nothing happened while soft-disabled, just
903 * soft-enable and move-on. We will hard-enable as a side
904 * effect of rfi
905 */
906 .Lrestore_no_replay:
907 TRACE_ENABLE_INTS
908 li r0,IRQS_ENABLED
909 stb r0,PACAIRQSOFTMASK(r13);
910
911 /*
912 * Final return path. BookE is handled in a different file
913 */
914 .Ldo_restore:
915 #ifdef CONFIG_PPC_BOOK3E
916 b exception_return_book3e
917 #else
918 /*
919 * Clear the reservation. If we know the CPU tracks the address of
920 * the reservation then we can potentially save some cycles and use
921 * a larx. On POWER6 and POWER7 this is significantly faster.
922 */
923 BEGIN_FTR_SECTION
924 stdcx. r0,0,r1 /* to clear the reservation */
925 FTR_SECTION_ELSE
926 ldarx r4,0,r1
927 ALT_FTR_SECTION_END_IFCLR(CPU_FTR_STCX_CHECKS_ADDRESS)
928
929 /*
930 * Some code path such as load_up_fpu or altivec return directly
931 * here. They run entirely hard disabled and do not alter the
932 * interrupt state. They also don't use lwarx/stwcx. and thus
933 * are known not to leave dangling reservations.
934 */
935 .globl fast_exception_return
936 fast_exception_return:
937 ld r3,_MSR(r1)
938 ld r4,_CTR(r1)
939 ld r0,_LINK(r1)
940 mtctr r4
941 mtlr r0
942 ld r4,_XER(r1)
943 mtspr SPRN_XER,r4
944
945 REST_8GPRS(5, r1)
946
947 andi. r0,r3,MSR_RI
948 beq- .Lunrecov_restore
949
950 /*
951 * Clear RI before restoring r13. If we are returning to
952 * userspace and we take an exception after restoring r13,
953 * we end up corrupting the userspace r13 value.
954 */
955 li r4,0
956 mtmsrd r4,1
957
958 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
959 /* TM debug */
960 std r3, PACATMSCRATCH(r13) /* Stash returned-to MSR */
961 #endif
962 /*
963 * r13 is our per cpu area, only restore it if we are returning to
964 * userspace the value stored in the stack frame may belong to
965 * another CPU.
966 */
967 andi. r0,r3,MSR_PR
968 beq 1f
969 BEGIN_FTR_SECTION
970 /* Restore PPR */
971 ld r2,_PPR(r1)
972 mtspr SPRN_PPR,r2
973 END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR)
974 ACCOUNT_CPU_USER_EXIT(r13, r2, r4)
975 REST_GPR(13, r1)
976
977 mtspr SPRN_SRR1,r3
978
979 ld r2,_CCR(r1)
980 mtcrf 0xFF,r2
981 ld r2,_NIP(r1)
982 mtspr SPRN_SRR0,r2
983
984 ld r0,GPR0(r1)
985 ld r2,GPR2(r1)
986 ld r3,GPR3(r1)
987 ld r4,GPR4(r1)
988 ld r1,GPR1(r1)
989 RFI_TO_USER
990 b . /* prevent speculative execution */
991
992 1: mtspr SPRN_SRR1,r3
993
994 ld r2,_CCR(r1)
995 mtcrf 0xFF,r2
996 ld r2,_NIP(r1)
997 mtspr SPRN_SRR0,r2
998
999 /*
1000 * Leaving a stale exception_marker on the stack can confuse
1001 * the reliable stack unwinder later on. Clear it.
1002 */
1003 li r2,0
1004 std r2,STACK_FRAME_OVERHEAD-16(r1)
1005
1006 ld r0,GPR0(r1)
1007 ld r2,GPR2(r1)
1008 ld r3,GPR3(r1)
1009 ld r4,GPR4(r1)
1010 ld r1,GPR1(r1)
1011 RFI_TO_KERNEL
1012 b . /* prevent speculative execution */
1013
1014 #endif /* CONFIG_PPC_BOOK3E */
1015
1016 /*
1017 * We are returning to a context with interrupts soft disabled.
1018 *
1019 * However, we may also about to hard enable, so we need to
1020 * make sure that in this case, we also clear PACA_IRQ_HARD_DIS
1021 * or that bit can get out of sync and bad things will happen
1022 */
1023 .Lrestore_irq_off:
1024 ld r3,_MSR(r1)
1025 lbz r7,PACAIRQHAPPENED(r13)
1026 andi. r0,r3,MSR_EE
1027 beq 1f
1028 rlwinm r7,r7,0,~PACA_IRQ_HARD_DIS
1029 stb r7,PACAIRQHAPPENED(r13)
1030 1:
1031 #if defined(CONFIG_PPC_IRQ_SOFT_MASK_DEBUG) && defined(CONFIG_BUG)
1032 /* The interrupt should not have soft enabled. */
1033 lbz r7,PACAIRQSOFTMASK(r13)
1034 1: tdeqi r7,IRQS_ENABLED
1035 EMIT_BUG_ENTRY 1b,__FILE__,__LINE__,BUGFLAG_WARNING
1036 #endif
1037 b .Ldo_restore
1038
1039 /*
1040 * Something did happen, check if a re-emit is needed
1041 * (this also clears paca->irq_happened)
1042 */
1043 .Lrestore_check_irq_replay:
1044 /* XXX: We could implement a fast path here where we check
1045 * for irq_happened being just 0x01, in which case we can
1046 * clear it and return. That means that we would potentially
1047 * miss a decrementer having wrapped all the way around.
1048 *
1049 * Still, this might be useful for things like hash_page
1050 */
1051 bl __check_irq_replay
1052 cmpwi cr0,r3,0
1053 beq .Lrestore_no_replay
1054
1055 /*
1056 * We need to re-emit an interrupt. We do so by re-using our
1057 * existing exception frame. We first change the trap value,
1058 * but we need to ensure we preserve the low nibble of it
1059 */
1060 ld r4,_TRAP(r1)
1061 clrldi r4,r4,60
1062 or r4,r4,r3
1063 std r4,_TRAP(r1)
1064
1065 /*
1066 * PACA_IRQ_HARD_DIS won't always be set here, so set it now
1067 * to reconcile the IRQ state. Tracing is already accounted for.
1068 */
1069 lbz r4,PACAIRQHAPPENED(r13)
1070 ori r4,r4,PACA_IRQ_HARD_DIS
1071 stb r4,PACAIRQHAPPENED(r13)
1072
1073 /*
1074 * Then find the right handler and call it. Interrupts are
1075 * still soft-disabled and we keep them that way.
1076 */
1077 cmpwi cr0,r3,0x500
1078 bne 1f
1079 addi r3,r1,STACK_FRAME_OVERHEAD;
1080 bl do_IRQ
1081 b ret_from_except
1082 1: cmpwi cr0,r3,0xf00
1083 bne 1f
1084 addi r3,r1,STACK_FRAME_OVERHEAD;
1085 bl performance_monitor_exception
1086 b ret_from_except
1087 1: cmpwi cr0,r3,0xe60
1088 bne 1f
1089 addi r3,r1,STACK_FRAME_OVERHEAD;
1090 bl handle_hmi_exception
1091 b ret_from_except
1092 1: cmpwi cr0,r3,0x900
1093 bne 1f
1094 addi r3,r1,STACK_FRAME_OVERHEAD;
1095 bl timer_interrupt
1096 b ret_from_except
1097 #ifdef CONFIG_PPC_DOORBELL
1098 1:
1099 #ifdef CONFIG_PPC_BOOK3E
1100 cmpwi cr0,r3,0x280
1101 #else
1102 cmpwi cr0,r3,0xa00
1103 #endif /* CONFIG_PPC_BOOK3E */
1104 bne 1f
1105 addi r3,r1,STACK_FRAME_OVERHEAD;
1106 bl doorbell_exception
1107 #endif /* CONFIG_PPC_DOORBELL */
1108 1: b ret_from_except /* What else to do here ? */
1109
1110 .Lunrecov_restore:
1111 addi r3,r1,STACK_FRAME_OVERHEAD
1112 bl unrecoverable_exception
1113 b .Lunrecov_restore
1114
1115 _ASM_NOKPROBE_SYMBOL(ret_from_except);
1116 _ASM_NOKPROBE_SYMBOL(ret_from_except_lite);
1117 _ASM_NOKPROBE_SYMBOL(resume_kernel);
1118 _ASM_NOKPROBE_SYMBOL(fast_exc_return_irq);
1119 _ASM_NOKPROBE_SYMBOL(restore);
1120 _ASM_NOKPROBE_SYMBOL(fast_exception_return);
1121
1122
1123 #ifdef CONFIG_PPC_RTAS
1124 /*
1125 * On CHRP, the Run-Time Abstraction Services (RTAS) have to be
1126 * called with the MMU off.
1127 *
1128 * In addition, we need to be in 32b mode, at least for now.
1129 *
1130 * Note: r3 is an input parameter to rtas, so don't trash it...
1131 */
1132 _GLOBAL(enter_rtas)
1133 mflr r0
1134 std r0,16(r1)
1135 stdu r1,-SWITCH_FRAME_SIZE(r1) /* Save SP and create stack space. */
1136
1137 /* Because RTAS is running in 32b mode, it clobbers the high order half
1138 * of all registers that it saves. We therefore save those registers
1139 * RTAS might touch to the stack. (r0, r3-r13 are caller saved)
1140 */
1141 SAVE_GPR(2, r1) /* Save the TOC */
1142 SAVE_GPR(13, r1) /* Save paca */
1143 SAVE_8GPRS(14, r1) /* Save the non-volatiles */
1144 SAVE_10GPRS(22, r1) /* ditto */
1145
1146 mfcr r4
1147 std r4,_CCR(r1)
1148 mfctr r5
1149 std r5,_CTR(r1)
1150 mfspr r6,SPRN_XER
1151 std r6,_XER(r1)
1152 mfdar r7
1153 std r7,_DAR(r1)
1154 mfdsisr r8
1155 std r8,_DSISR(r1)
1156
1157 /* Temporary workaround to clear CR until RTAS can be modified to
1158 * ignore all bits.
1159 */
1160 li r0,0
1161 mtcr r0
1162
1163 #ifdef CONFIG_BUG
1164 /* There is no way it is acceptable to get here with interrupts enabled,
1165 * check it with the asm equivalent of WARN_ON
1166 */
1167 lbz r0,PACAIRQSOFTMASK(r13)
1168 1: tdeqi r0,IRQS_ENABLED
1169 EMIT_BUG_ENTRY 1b,__FILE__,__LINE__,BUGFLAG_WARNING
1170 #endif
1171
1172 /* Hard-disable interrupts */
1173 mfmsr r6
1174 rldicl r7,r6,48,1
1175 rotldi r7,r7,16
1176 mtmsrd r7,1
1177
1178 /* Unfortunately, the stack pointer and the MSR are also clobbered,
1179 * so they are saved in the PACA which allows us to restore
1180 * our original state after RTAS returns.
1181 */
1182 std r1,PACAR1(r13)
1183 std r6,PACASAVEDMSR(r13)
1184
1185 /* Setup our real return addr */
1186 LOAD_REG_ADDR(r4,rtas_return_loc)
1187 clrldi r4,r4,2 /* convert to realmode address */
1188 mtlr r4
1189
1190 li r0,0
1191 ori r0,r0,MSR_EE|MSR_SE|MSR_BE|MSR_RI
1192 andc r0,r6,r0
1193
1194 li r9,1
1195 rldicr r9,r9,MSR_SF_LG,(63-MSR_SF_LG)
1196 ori r9,r9,MSR_IR|MSR_DR|MSR_FE0|MSR_FE1|MSR_FP|MSR_RI|MSR_LE
1197 andc r6,r0,r9
1198
1199 __enter_rtas:
1200 sync /* disable interrupts so SRR0/1 */
1201 mtmsrd r0 /* don't get trashed */
1202
1203 LOAD_REG_ADDR(r4, rtas)
1204 ld r5,RTASENTRY(r4) /* get the rtas->entry value */
1205 ld r4,RTASBASE(r4) /* get the rtas->base value */
1206
1207 mtspr SPRN_SRR0,r5
1208 mtspr SPRN_SRR1,r6
1209 RFI_TO_KERNEL
1210 b . /* prevent speculative execution */
1211
1212 rtas_return_loc:
1213 FIXUP_ENDIAN
1214
1215 /*
1216 * Clear RI and set SF before anything.
1217 */
1218 mfmsr r6
1219 li r0,MSR_RI
1220 andc r6,r6,r0
1221 sldi r0,r0,(MSR_SF_LG - MSR_RI_LG)
1222 or r6,r6,r0
1223 sync
1224 mtmsrd r6
1225
1226 /* relocation is off at this point */
1227 GET_PACA(r4)
1228 clrldi r4,r4,2 /* convert to realmode address */
1229
1230 bcl 20,31,$+4
1231 0: mflr r3
1232 ld r3,(1f-0b)(r3) /* get &rtas_restore_regs */
1233
1234 ld r1,PACAR1(r4) /* Restore our SP */
1235 ld r4,PACASAVEDMSR(r4) /* Restore our MSR */
1236
1237 mtspr SPRN_SRR0,r3
1238 mtspr SPRN_SRR1,r4
1239 RFI_TO_KERNEL
1240 b . /* prevent speculative execution */
1241 _ASM_NOKPROBE_SYMBOL(__enter_rtas)
1242 _ASM_NOKPROBE_SYMBOL(rtas_return_loc)
1243
1244 .align 3
1245 1: .8byte rtas_restore_regs
1246
1247 rtas_restore_regs:
1248 /* relocation is on at this point */
1249 REST_GPR(2, r1) /* Restore the TOC */
1250 REST_GPR(13, r1) /* Restore paca */
1251 REST_8GPRS(14, r1) /* Restore the non-volatiles */
1252 REST_10GPRS(22, r1) /* ditto */
1253
1254 GET_PACA(r13)
1255
1256 ld r4,_CCR(r1)
1257 mtcr r4
1258 ld r5,_CTR(r1)
1259 mtctr r5
1260 ld r6,_XER(r1)
1261 mtspr SPRN_XER,r6
1262 ld r7,_DAR(r1)
1263 mtdar r7
1264 ld r8,_DSISR(r1)
1265 mtdsisr r8
1266
1267 addi r1,r1,SWITCH_FRAME_SIZE /* Unstack our frame */
1268 ld r0,16(r1) /* get return address */
1269
1270 mtlr r0
1271 blr /* return to caller */
1272
1273 #endif /* CONFIG_PPC_RTAS */
1274
1275 _GLOBAL(enter_prom)
1276 mflr r0
1277 std r0,16(r1)
1278 stdu r1,-SWITCH_FRAME_SIZE(r1) /* Save SP and create stack space */
1279
1280 /* Because PROM is running in 32b mode, it clobbers the high order half
1281 * of all registers that it saves. We therefore save those registers
1282 * PROM might touch to the stack. (r0, r3-r13 are caller saved)
1283 */
1284 SAVE_GPR(2, r1)
1285 SAVE_GPR(13, r1)
1286 SAVE_8GPRS(14, r1)
1287 SAVE_10GPRS(22, r1)
1288 mfcr r10
1289 mfmsr r11
1290 std r10,_CCR(r1)
1291 std r11,_MSR(r1)
1292
1293 /* Put PROM address in SRR0 */
1294 mtsrr0 r4
1295
1296 /* Setup our trampoline return addr in LR */
1297 bcl 20,31,$+4
1298 0: mflr r4
1299 addi r4,r4,(1f - 0b)
1300 mtlr r4
1301
1302 /* Prepare a 32-bit mode big endian MSR
1303 */
1304 #ifdef CONFIG_PPC_BOOK3E
1305 rlwinm r11,r11,0,1,31
1306 mtsrr1 r11
1307 rfi
1308 #else /* CONFIG_PPC_BOOK3E */
1309 LOAD_REG_IMMEDIATE(r12, MSR_SF | MSR_ISF | MSR_LE)
1310 andc r11,r11,r12
1311 mtsrr1 r11
1312 RFI_TO_KERNEL
1313 #endif /* CONFIG_PPC_BOOK3E */
1314
1315 1: /* Return from OF */
1316 FIXUP_ENDIAN
1317
1318 /* Just make sure that r1 top 32 bits didn't get
1319 * corrupt by OF
1320 */
1321 rldicl r1,r1,0,32
1322
1323 /* Restore the MSR (back to 64 bits) */
1324 ld r0,_MSR(r1)
1325 MTMSRD(r0)
1326 isync
1327
1328 /* Restore other registers */
1329 REST_GPR(2, r1)
1330 REST_GPR(13, r1)
1331 REST_8GPRS(14, r1)
1332 REST_10GPRS(22, r1)
1333 ld r4,_CCR(r1)
1334 mtcr r4
1335
1336 addi r1,r1,SWITCH_FRAME_SIZE
1337 ld r0,16(r1)
1338 mtlr r0
1339 blr