2 * This file contains the 64-bit "server" PowerPC variant
3 * of the low level exception handling including exception
4 * vectors, exception return, part of the slb and stab
5 * handling and other fixed offset specific things.
7 * This file is meant to be #included from head_64.S due to
8 * position dependent assembly.
10 * Most of this originates from head_64.S and thus has the same
15 #include <asm/exception-64s.h>
16 #include <asm/ptrace.h>
19 * We layout physical memory as follows:
20 * 0x0000 - 0x00ff : Secondary processor spin code
21 * 0x0100 - 0x2fff : pSeries Interrupt prologs
22 * 0x3000 - 0x5fff : interrupt support, iSeries and common interrupt prologs
23 * 0x6000 - 0x6fff : Initial (CPU0) segment table
24 * 0x7000 - 0x7fff : FWNMI data area
25 * 0x8000 - : Early init and support code
29 * This is the start of the interrupt handlers for pSeries
30 * This code runs with relocation off.
31 * Code from here to __end_interrupts gets copied down to real
32 * address 0x100 when we are running a relocatable kernel.
33 * Therefore any relative branches in this section must only
34 * branch to labels in this section.
37 .globl __start_interrupts
40 .globl system_reset_pSeries;
44 #ifdef CONFIG_PPC_P7_NAP
46 /* Running native on arch 2.06 or later, check if we are
47 * waking up from nap. We only handle no state loss and
48 * supervisor state loss. We do -not- handle hypervisor
49 * state loss at this time.
52 rlwinm r13,r13,47-31,30,31
55 b .power7_wakeup_noloss
59 /* Total loss of HV state is fatal, we could try to use the
60 * PIR to locate a PACA, then use an emergency stack etc...
61 * but for now, let's just stay stuck here
65 END_FTR_SECTION_IFSET(CPU_FTR_HVMODE_206)
66 #endif /* CONFIG_PPC_P7_NAP */
67 EXCEPTION_PROLOG_PSERIES(PACA_EXGEN, system_reset_common, EXC_STD,
71 machine_check_pSeries_1:
72 /* This is moved out of line as it can be patched by FW, but
73 * some code path might still want to branch into the original
76 b machine_check_pSeries
79 .globl data_access_pSeries
83 #ifndef CONFIG_POWER4_ONLY
85 b data_access_check_stab
87 END_MMU_FTR_SECTION_IFCLR(MMU_FTR_SLB)
89 EXCEPTION_PROLOG_PSERIES(PACA_EXGEN, data_access_common, EXC_STD,
93 .globl data_access_slb_pSeries
94 data_access_slb_pSeries:
97 EXCEPTION_PROLOG_1(PACA_EXSLB, KVMTEST_PR, 0x380)
98 std r3,PACA_EXSLB+EX_R3(r13)
101 /* Keep that around for when we re-implement dynamic VSIDs */
103 bge slb_miss_user_pseries
104 #endif /* __DISABLED__ */
106 #ifndef CONFIG_RELOCATABLE
110 * We can't just use a direct branch to .slb_miss_realmode
111 * because the distance from here to there depends on where
112 * the kernel ends up being put.
115 ld r10,PACAKBASE(r13)
116 LOAD_HANDLER(r10, .slb_miss_realmode)
121 STD_EXCEPTION_PSERIES(0x400, 0x400, instruction_access)
124 .globl instruction_access_slb_pSeries
125 instruction_access_slb_pSeries:
128 EXCEPTION_PROLOG_1(PACA_EXSLB, KVMTEST_PR, 0x480)
129 std r3,PACA_EXSLB+EX_R3(r13)
130 mfspr r3,SPRN_SRR0 /* SRR0 is faulting address */
132 /* Keep that around for when we re-implement dynamic VSIDs */
134 bge slb_miss_user_pseries
135 #endif /* __DISABLED__ */
137 #ifndef CONFIG_RELOCATABLE
141 ld r10,PACAKBASE(r13)
142 LOAD_HANDLER(r10, .slb_miss_realmode)
147 /* We open code these as we can't have a ". = x" (even with
148 * x = "." within a feature section
151 .globl hardware_interrupt_pSeries;
152 .globl hardware_interrupt_hv;
153 hardware_interrupt_pSeries:
154 hardware_interrupt_hv:
156 _MASKABLE_EXCEPTION_PSERIES(0x502, hardware_interrupt,
157 EXC_HV, SOFTEN_TEST_HV)
158 KVM_HANDLER(PACA_EXGEN, EXC_HV, 0x502)
160 _MASKABLE_EXCEPTION_PSERIES(0x500, hardware_interrupt,
161 EXC_STD, SOFTEN_TEST_PR)
162 KVM_HANDLER(PACA_EXGEN, EXC_STD, 0x500)
163 ALT_FTR_SECTION_END_IFSET(CPU_FTR_HVMODE_206)
165 STD_EXCEPTION_PSERIES(0x600, 0x600, alignment)
166 KVM_HANDLER_PR(PACA_EXGEN, EXC_STD, 0x600)
168 STD_EXCEPTION_PSERIES(0x700, 0x700, program_check)
169 KVM_HANDLER_PR(PACA_EXGEN, EXC_STD, 0x700)
171 STD_EXCEPTION_PSERIES(0x800, 0x800, fp_unavailable)
172 KVM_HANDLER_PR(PACA_EXGEN, EXC_STD, 0x800)
174 MASKABLE_EXCEPTION_PSERIES(0x900, 0x900, decrementer)
175 MASKABLE_EXCEPTION_HV(0x980, 0x982, decrementer)
177 STD_EXCEPTION_PSERIES(0xa00, 0xa00, trap_0a)
178 KVM_HANDLER_PR(PACA_EXGEN, EXC_STD, 0xa00)
180 STD_EXCEPTION_PSERIES(0xb00, 0xb00, trap_0b)
181 KVM_HANDLER_PR(PACA_EXGEN, EXC_STD, 0xb00)
184 .globl system_call_pSeries
187 #ifdef CONFIG_KVM_BOOK3S_64_HANDLER
190 std r9,PACA_EXGEN+EX_R9(r13)
191 std r10,PACA_EXGEN+EX_R10(r13)
199 END_FTR_SECTION_IFSET(CPU_FTR_REAL_LE)
204 ld r10,PACAKBASE(r13)
205 LOAD_HANDLER(r10, system_call_entry)
210 b . /* prevent speculative execution */
212 KVM_HANDLER(PACA_EXGEN, EXC_STD, 0xc00)
214 /* Fast LE/BE switch system call */
215 1: mfspr r12,SPRN_SRR1
218 rfid /* return to userspace */
221 STD_EXCEPTION_PSERIES(0xd00, 0xd00, single_step)
222 KVM_HANDLER_PR(PACA_EXGEN, EXC_STD, 0xd00)
224 /* At 0xe??? we have a bunch of hypervisor exceptions, we branch
225 * out of line to handle them
232 b emulation_assist_hv
238 /* We need to deal with the Altivec unavailable exception
239 * here which is at 0xf20, thus in the middle of the
240 * prolog code of the PerformanceMonitor one. A little
241 * trickery is thus necessary
243 performance_monitor_pSeries_1:
245 b performance_monitor_pSeries
247 altivec_unavailable_pSeries_1:
249 b altivec_unavailable_pSeries
251 vsx_unavailable_pSeries_1:
253 b vsx_unavailable_pSeries
255 #ifdef CONFIG_CBE_RAS
256 STD_EXCEPTION_HV(0x1200, 0x1202, cbe_system_error)
257 KVM_HANDLER_PR_SKIP(PACA_EXGEN, EXC_HV, 0x1202)
258 #endif /* CONFIG_CBE_RAS */
260 STD_EXCEPTION_PSERIES(0x1300, 0x1300, instruction_breakpoint)
261 KVM_HANDLER_PR_SKIP(PACA_EXGEN, EXC_STD, 0x1300)
263 #ifdef CONFIG_CBE_RAS
264 STD_EXCEPTION_HV(0x1600, 0x1602, cbe_maintenance)
265 KVM_HANDLER_PR_SKIP(PACA_EXGEN, EXC_HV, 0x1602)
266 #endif /* CONFIG_CBE_RAS */
268 STD_EXCEPTION_PSERIES(0x1700, 0x1700, altivec_assist)
269 KVM_HANDLER_PR(PACA_EXGEN, EXC_STD, 0x1700)
271 #ifdef CONFIG_CBE_RAS
272 STD_EXCEPTION_HV(0x1800, 0x1802, cbe_thermal)
273 KVM_HANDLER_PR_SKIP(PACA_EXGEN, EXC_HV, 0x1802)
274 #endif /* CONFIG_CBE_RAS */
278 /*** Out of line interrupts support ***/
280 /* moved from 0x200 */
281 machine_check_pSeries:
282 .globl machine_check_fwnmi
285 SET_SCRATCH0(r13) /* save r13 */
286 EXCEPTION_PROLOG_PSERIES(PACA_EXMC, machine_check_common,
287 EXC_STD, KVMTEST, 0x200)
288 KVM_HANDLER_SKIP(PACA_EXMC, EXC_STD, 0x200)
290 #ifndef CONFIG_POWER4_ONLY
291 /* moved from 0x300 */
292 data_access_check_stab:
294 std r9,PACA_EXSLB+EX_R9(r13)
295 std r10,PACA_EXSLB+EX_R10(r13)
299 rlwimi r10,r9,16,0x20
300 #ifdef CONFIG_KVM_BOOK3S_PR
301 lbz r9,HSTATE_IN_GUEST(r13)
302 rlwimi r10,r9,8,0x300
306 beq do_stab_bolted_pSeries
308 ld r9,PACA_EXSLB+EX_R9(r13)
309 ld r10,PACA_EXSLB+EX_R10(r13)
310 b data_access_not_stab
311 do_stab_bolted_pSeries:
312 std r11,PACA_EXSLB+EX_R11(r13)
313 std r12,PACA_EXSLB+EX_R12(r13)
315 std r10,PACA_EXSLB+EX_R13(r13)
316 EXCEPTION_PROLOG_PSERIES_1(.do_stab_bolted, EXC_STD)
317 #endif /* CONFIG_POWER4_ONLY */
319 KVM_HANDLER_PR_SKIP(PACA_EXGEN, EXC_STD, 0x300)
320 KVM_HANDLER_PR_SKIP(PACA_EXSLB, EXC_STD, 0x380)
321 KVM_HANDLER_PR(PACA_EXGEN, EXC_STD, 0x400)
322 KVM_HANDLER_PR(PACA_EXSLB, EXC_STD, 0x480)
323 KVM_HANDLER_PR(PACA_EXGEN, EXC_STD, 0x900)
324 KVM_HANDLER(PACA_EXGEN, EXC_HV, 0x982)
327 /* moved from 0xe00 */
328 STD_EXCEPTION_HV(., 0xe02, h_data_storage)
329 KVM_HANDLER_SKIP(PACA_EXGEN, EXC_HV, 0xe02)
330 STD_EXCEPTION_HV(., 0xe22, h_instr_storage)
331 KVM_HANDLER(PACA_EXGEN, EXC_HV, 0xe22)
332 STD_EXCEPTION_HV(., 0xe42, emulation_assist)
333 KVM_HANDLER(PACA_EXGEN, EXC_HV, 0xe42)
334 STD_EXCEPTION_HV(., 0xe62, hmi_exception) /* need to flush cache ? */
335 KVM_HANDLER(PACA_EXGEN, EXC_HV, 0xe62)
337 /* moved from 0xf00 */
338 STD_EXCEPTION_PSERIES(., 0xf00, performance_monitor)
339 KVM_HANDLER_PR(PACA_EXGEN, EXC_STD, 0xf00)
340 STD_EXCEPTION_PSERIES(., 0xf20, altivec_unavailable)
341 KVM_HANDLER_PR(PACA_EXGEN, EXC_STD, 0xf20)
342 STD_EXCEPTION_PSERIES(., 0xf40, vsx_unavailable)
343 KVM_HANDLER_PR(PACA_EXGEN, EXC_STD, 0xf40)
346 * An interrupt came in while soft-disabled; clear EE in SRR1,
347 * clear paca->hard_enabled and return.
350 stb r10,PACAHARDIRQEN(r13)
352 ld r9,PACA_EXGEN+EX_R9(r13)
354 rldicl r10,r10,48,1 /* clear MSR_EE */
357 ld r10,PACA_EXGEN+EX_R10(r13)
363 stb r10,PACAHARDIRQEN(r13)
365 ld r9,PACA_EXGEN+EX_R9(r13)
367 rldicl r10,r10,48,1 /* clear MSR_EE */
370 ld r10,PACA_EXGEN+EX_R10(r13)
375 #ifdef CONFIG_PPC_PSERIES
377 * Vectors for the FWNMI option. Share common code.
379 .globl system_reset_fwnmi
383 SET_SCRATCH0(r13) /* save r13 */
384 EXCEPTION_PROLOG_PSERIES(PACA_EXGEN, system_reset_common, EXC_STD,
387 #endif /* CONFIG_PPC_PSERIES */
391 * This is used for when the SLB miss handler has to go virtual,
392 * which doesn't happen for now anymore but will once we re-implement
393 * dynamic VSIDs for shared page tables
395 slb_miss_user_pseries:
396 std r10,PACA_EXGEN+EX_R10(r13)
397 std r11,PACA_EXGEN+EX_R11(r13)
398 std r12,PACA_EXGEN+EX_R12(r13)
400 ld r11,PACA_EXSLB+EX_R9(r13)
401 ld r12,PACA_EXSLB+EX_R3(r13)
402 std r10,PACA_EXGEN+EX_R13(r13)
403 std r11,PACA_EXGEN+EX_R9(r13)
404 std r12,PACA_EXGEN+EX_R3(r13)
407 mfspr r11,SRR0 /* save SRR0 */
408 ori r12,r12,slb_miss_user_common@l /* virt addr of handler */
409 ori r10,r10,MSR_IR|MSR_DR|MSR_RI
411 mfspr r12,SRR1 /* and SRR1 */
414 b . /* prevent spec. execution */
415 #endif /* __DISABLED__ */
417 /* KVM's trampoline code needs to be close to the interrupt handlers */
419 #ifdef CONFIG_KVM_BOOK3S_64_HANDLER
420 #ifdef CONFIG_KVM_BOOK3S_PR
421 #include "../kvm/book3s_rmhandlers.S"
423 #include "../kvm/book3s_hv_rmhandlers.S"
428 .globl __end_interrupts
432 * Code from here down to __end_handlers is invoked from the
433 * exception prologs above. Because the prologs assemble the
434 * addresses of these handlers using the LOAD_HANDLER macro,
435 * which uses an addi instruction, these handlers must be in
436 * the first 32k of the kernel image.
439 /*** Common interrupt handlers ***/
441 STD_EXCEPTION_COMMON(0x100, system_reset, .system_reset_exception)
444 * Machine check is different because we use a different
445 * save area: PACA_EXMC instead of PACA_EXGEN.
448 .globl machine_check_common
449 machine_check_common:
450 EXCEPTION_PROLOG_COMMON(0x200, PACA_EXMC)
454 addi r3,r1,STACK_FRAME_OVERHEAD
455 bl .machine_check_exception
458 STD_EXCEPTION_COMMON_LITE(0x900, decrementer, .timer_interrupt)
459 STD_EXCEPTION_COMMON(0xa00, trap_0a, .unknown_exception)
460 STD_EXCEPTION_COMMON(0xb00, trap_0b, .unknown_exception)
461 STD_EXCEPTION_COMMON(0xd00, single_step, .single_step_exception)
462 STD_EXCEPTION_COMMON(0xe00, trap_0e, .unknown_exception)
463 STD_EXCEPTION_COMMON(0xe40, emulation_assist, .program_check_exception)
464 STD_EXCEPTION_COMMON(0xe60, hmi_exception, .unknown_exception)
465 STD_EXCEPTION_COMMON_IDLE(0xf00, performance_monitor, .performance_monitor_exception)
466 STD_EXCEPTION_COMMON(0x1300, instruction_breakpoint, .instruction_breakpoint_exception)
467 #ifdef CONFIG_ALTIVEC
468 STD_EXCEPTION_COMMON(0x1700, altivec_assist, .altivec_assist_exception)
470 STD_EXCEPTION_COMMON(0x1700, altivec_assist, .unknown_exception)
472 #ifdef CONFIG_CBE_RAS
473 STD_EXCEPTION_COMMON(0x1200, cbe_system_error, .cbe_system_error_exception)
474 STD_EXCEPTION_COMMON(0x1600, cbe_maintenance, .cbe_maintenance_exception)
475 STD_EXCEPTION_COMMON(0x1800, cbe_thermal, .cbe_thermal_exception)
476 #endif /* CONFIG_CBE_RAS */
483 * Here we have detected that the kernel stack pointer is bad.
484 * R9 contains the saved CR, r13 points to the paca,
485 * r10 contains the (bad) kernel stack pointer,
486 * r11 and r12 contain the saved SRR0 and SRR1.
487 * We switch to using an emergency stack, save the registers there,
488 * and call kernel_bad_stack(), which panics.
491 ld r1,PACAEMERGSP(r13)
492 subi r1,r1,64+INT_FRAME_SIZE
524 std r10,ORIG_GPR3(r1)
525 END_FTR_SECTION_IFSET(CPU_FTR_CFAR)
528 lhz r12,PACA_TRAP_SAVE(r13)
530 addi r11,r1,INT_FRAME_SIZE
535 ld r11,exception_marker@toc(r2)
537 std r11,STACK_FRAME_OVERHEAD-16(r1)
538 1: addi r3,r1,STACK_FRAME_OVERHEAD
543 * Here r13 points to the paca, r9 contains the saved CR,
544 * SRR0 and SRR1 are saved in r11 and r12,
545 * r9 - r13 are saved in paca->exgen.
548 .globl data_access_common
551 std r10,PACA_EXGEN+EX_DAR(r13)
553 stw r10,PACA_EXGEN+EX_DSISR(r13)
554 EXCEPTION_PROLOG_COMMON(0x300, PACA_EXGEN)
555 ld r3,PACA_EXGEN+EX_DAR(r13)
556 lwz r4,PACA_EXGEN+EX_DSISR(r13)
558 b .do_hash_page /* Try to handle as hpte fault */
561 .globl h_data_storage_common
562 h_data_storage_common:
564 std r10,PACA_EXGEN+EX_DAR(r13)
565 mfspr r10,SPRN_HDSISR
566 stw r10,PACA_EXGEN+EX_DSISR(r13)
567 EXCEPTION_PROLOG_COMMON(0xe00, PACA_EXGEN)
569 addi r3,r1,STACK_FRAME_OVERHEAD
570 bl .unknown_exception
574 .globl instruction_access_common
575 instruction_access_common:
576 EXCEPTION_PROLOG_COMMON(0x400, PACA_EXGEN)
580 b .do_hash_page /* Try to handle as hpte fault */
582 STD_EXCEPTION_COMMON(0xe20, h_instr_storage, .unknown_exception)
585 * Here is the common SLB miss user that is used when going to virtual
586 * mode for SLB misses, that is currently not used
590 .globl slb_miss_user_common
591 slb_miss_user_common:
593 std r3,PACA_EXGEN+EX_DAR(r13)
594 stw r9,PACA_EXGEN+EX_CCR(r13)
595 std r10,PACA_EXGEN+EX_LR(r13)
596 std r11,PACA_EXGEN+EX_SRR0(r13)
597 bl .slb_allocate_user
599 ld r10,PACA_EXGEN+EX_LR(r13)
600 ld r3,PACA_EXGEN+EX_R3(r13)
601 lwz r9,PACA_EXGEN+EX_CCR(r13)
602 ld r11,PACA_EXGEN+EX_SRR0(r13)
606 andi. r10,r12,MSR_RI /* check for unrecoverable exception */
607 beq- unrecov_user_slb
615 clrrdi r10,r10,2 /* clear RI before setting SRR0/1 */
621 ld r9,PACA_EXGEN+EX_R9(r13)
622 ld r10,PACA_EXGEN+EX_R10(r13)
623 ld r11,PACA_EXGEN+EX_R11(r13)
624 ld r12,PACA_EXGEN+EX_R12(r13)
625 ld r13,PACA_EXGEN+EX_R13(r13)
630 EXCEPTION_PROLOG_COMMON(0x380, PACA_EXGEN)
631 ld r4,PACA_EXGEN+EX_DAR(r13)
638 EXCEPTION_PROLOG_COMMON(0x4200, PACA_EXGEN)
641 1: addi r3,r1,STACK_FRAME_OVERHEAD
642 bl .unrecoverable_exception
645 #endif /* __DISABLED__ */
649 * r13 points to the PACA, r9 contains the saved CR,
650 * r12 contain the saved SRR1, SRR0 is still ready for return
651 * r3 has the faulting address
652 * r9 - r13 are saved in paca->exslb.
653 * r3 is saved in paca->slb_r3
654 * We assume we aren't going to take any exceptions during this procedure.
656 _GLOBAL(slb_miss_realmode)
658 #ifdef CONFIG_RELOCATABLE
662 stw r9,PACA_EXSLB+EX_CCR(r13) /* save CR in exc. frame */
663 std r10,PACA_EXSLB+EX_LR(r13) /* save LR */
665 bl .slb_allocate_realmode
667 /* All done -- return from exception. */
669 ld r10,PACA_EXSLB+EX_LR(r13)
670 ld r3,PACA_EXSLB+EX_R3(r13)
671 lwz r9,PACA_EXSLB+EX_CCR(r13) /* get saved CR */
672 #ifdef CONFIG_PPC_ISERIES
674 ld r11,PACALPPACAPTR(r13)
675 ld r11,LPPACASRR0(r11) /* get SRR0 value */
676 END_FW_FTR_SECTION_IFSET(FW_FEATURE_ISERIES)
677 #endif /* CONFIG_PPC_ISERIES */
681 andi. r10,r12,MSR_RI /* check for unrecoverable exception */
687 mtcrf 0x01,r9 /* slb_allocate uses cr0 and cr7 */
690 #ifdef CONFIG_PPC_ISERIES
694 END_FW_FTR_SECTION_IFSET(FW_FEATURE_ISERIES)
695 #endif /* CONFIG_PPC_ISERIES */
696 ld r9,PACA_EXSLB+EX_R9(r13)
697 ld r10,PACA_EXSLB+EX_R10(r13)
698 ld r11,PACA_EXSLB+EX_R11(r13)
699 ld r12,PACA_EXSLB+EX_R12(r13)
700 ld r13,PACA_EXSLB+EX_R13(r13)
702 b . /* prevent speculative execution */
705 #ifdef CONFIG_PPC_ISERIES
708 END_FW_FTR_SECTION_IFSET(FW_FEATURE_ISERIES)
709 #endif /* CONFIG_PPC_ISERIES */
711 ld r10,PACAKBASE(r13)
712 LOAD_HANDLER(r10,unrecov_slb)
720 EXCEPTION_PROLOG_COMMON(0x4100, PACA_EXSLB)
723 1: addi r3,r1,STACK_FRAME_OVERHEAD
724 bl .unrecoverable_exception
728 .globl hardware_interrupt_common
729 .globl hardware_interrupt_entry
730 hardware_interrupt_common:
731 EXCEPTION_PROLOG_COMMON(0x500, PACA_EXGEN)
733 hardware_interrupt_entry:
736 bl .ppc64_runlatch_on
737 END_FTR_SECTION_IFSET(CPU_FTR_CTRL)
738 addi r3,r1,STACK_FRAME_OVERHEAD
740 b .ret_from_except_lite
742 #ifdef CONFIG_PPC_970_NAP
745 std r9,TI_LOCAL_FLAGS(r11)
746 ld r10,_LINK(r1) /* make idle task do the */
747 std r10,_NIP(r1) /* equivalent of a blr */
752 .globl alignment_common
755 std r10,PACA_EXGEN+EX_DAR(r13)
757 stw r10,PACA_EXGEN+EX_DSISR(r13)
758 EXCEPTION_PROLOG_COMMON(0x600, PACA_EXGEN)
759 ld r3,PACA_EXGEN+EX_DAR(r13)
760 lwz r4,PACA_EXGEN+EX_DSISR(r13)
764 addi r3,r1,STACK_FRAME_OVERHEAD
766 bl .alignment_exception
770 .globl program_check_common
771 program_check_common:
772 EXCEPTION_PROLOG_COMMON(0x700, PACA_EXGEN)
774 addi r3,r1,STACK_FRAME_OVERHEAD
776 bl .program_check_exception
780 .globl fp_unavailable_common
781 fp_unavailable_common:
782 EXCEPTION_PROLOG_COMMON(0x800, PACA_EXGEN)
783 bne 1f /* if from user, just load it up */
785 addi r3,r1,STACK_FRAME_OVERHEAD
787 bl .kernel_fp_unavailable_exception
790 b fast_exception_return
793 .globl altivec_unavailable_common
794 altivec_unavailable_common:
795 EXCEPTION_PROLOG_COMMON(0xf20, PACA_EXGEN)
796 #ifdef CONFIG_ALTIVEC
800 b fast_exception_return
802 END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
805 addi r3,r1,STACK_FRAME_OVERHEAD
807 bl .altivec_unavailable_exception
811 .globl vsx_unavailable_common
812 vsx_unavailable_common:
813 EXCEPTION_PROLOG_COMMON(0xf40, PACA_EXGEN)
818 END_FTR_SECTION_IFSET(CPU_FTR_VSX)
821 addi r3,r1,STACK_FRAME_OVERHEAD
823 bl .vsx_unavailable_exception
827 .globl __end_handlers
831 * Return from an exception with minimal checks.
832 * The caller is assumed to have done EXCEPTION_PROLOG_COMMON.
833 * If interrupts have been enabled, or anything has been
834 * done that might have changed the scheduling status of
835 * any task or sent any task a signal, you should use
836 * ret_from_except or ret_from_except_lite instead of this.
838 fast_exc_return_irq: /* restores irq state too */
840 TRACE_AND_RESTORE_IRQ(r3);
842 rldicl r4,r12,49,63 /* get MSR_EE to LSB */
843 stb r4,PACAHARDIRQEN(r13) /* restore paca->hard_enabled */
846 .globl fast_exception_return
847 fast_exception_return:
850 andi. r3,r12,MSR_RI /* check if RI is set */
853 #ifdef CONFIG_VIRT_CPU_ACCOUNTING
856 ACCOUNT_CPU_USER_EXIT(r3, r4)
872 rldicl r10,r10,48,1 /* clear EE */
873 rldicr r10,r10,16,61 /* clear RI (LE is 0 already) */
881 b . /* prevent speculative execution */
885 1: addi r3,r1,STACK_FRAME_OVERHEAD
886 bl .unrecoverable_exception
894 _STATIC(do_hash_page)
898 andis. r0,r4,0xa410 /* weird error? */
899 bne- handle_page_fault /* if not, try to insert a HPTE */
900 andis. r0,r4,DSISR_DABRMATCH@h
901 bne- handle_dabr_fault
904 andis. r0,r4,0x0020 /* Is it a segment table fault? */
905 bne- do_ste_alloc /* If so handle it */
906 END_MMU_FTR_SECTION_IFCLR(MMU_FTR_SLB)
908 clrrdi r11,r1,THREAD_SHIFT
909 lwz r0,TI_PREEMPT(r11) /* If we're in an "NMI" */
910 andis. r0,r0,NMI_MASK@h /* (i.e. an irq when soft-disabled) */
911 bne 77f /* then don't call hash_page now */
914 * On iSeries, we soft-disable interrupts here, then
915 * hard-enable interrupts so that the hash_page code can spin on
916 * the hash_table_lock without problems on a shared processor.
921 * Currently, trace_hardirqs_off() will be called by DISABLE_INTS
922 * and will clobber volatile registers when irq tracing is enabled
923 * so we need to reload them. It may be possible to be smarter here
924 * and move the irq tracing elsewhere but let's keep it simple for
927 #ifdef CONFIG_TRACE_IRQFLAGS
933 #endif /* CONFIG_TRACE_IRQFLAGS */
935 * We need to set the _PAGE_USER bit if MSR_PR is set or if we are
936 * accessing a userspace segment (even from the kernel). We assume
937 * kernel addresses always have the high bit set.
939 rlwinm r4,r4,32-25+9,31-9,31-9 /* DSISR_STORE -> _PAGE_RW */
940 rotldi r0,r3,15 /* Move high bit into MSR_PR posn */
941 orc r0,r12,r0 /* MSR_PR | ~high_bit */
942 rlwimi r4,r0,32-13,30,30 /* becomes _PAGE_USER access bit */
943 ori r4,r4,1 /* add _PAGE_PRESENT */
944 rlwimi r4,r5,22+2,31-2,31-2 /* Set _PAGE_EXEC if trap is 0x400 */
947 * r3 contains the faulting address
948 * r4 contains the required access permissions
949 * r5 contains the trap number
951 * at return r3 = 0 for success
953 bl .hash_page /* build HPTE if possible */
954 cmpdi r3,0 /* see if hash_page succeeded */
958 * If we had interrupts soft-enabled at the point where the
959 * DSI/ISI occurred, and an interrupt came in during hash_page,
961 * We jump to ret_from_except_lite rather than fast_exception_return
962 * because ret_from_except_lite will check for and handle pending
963 * interrupts if necessary.
966 END_FW_FTR_SECTION_IFSET(FW_FEATURE_ISERIES)
970 * Here we have interrupts hard-disabled, so it is sufficient
971 * to restore paca->{soft,hard}_enable and get out.
973 beq fast_exc_return_irq /* Return from exception on success */
974 END_FW_FTR_SECTION_IFCLR(FW_FEATURE_ISERIES)
976 /* For a hash failure, we don't bother re-enabling interrupts */
980 * hash_page couldn't handle it, set soft interrupt enable back
981 * to what it was before the trap. Note that .arch_local_irq_restore
982 * handles any interrupts pending at this point.
985 TRACE_AND_RESTORE_IRQ_PARTIAL(r3, 11f)
986 bl .arch_local_irq_restore
989 /* We have a data breakpoint exception - handle it */
994 addi r3,r1,STACK_FRAME_OVERHEAD
996 b .ret_from_except_lite
998 /* Here we have a page fault that hash_page can't handle. */
1003 addi r3,r1,STACK_FRAME_OVERHEAD
1009 addi r3,r1,STACK_FRAME_OVERHEAD
1014 13: b .ret_from_except_lite
1016 /* We have a page fault that hash_page could handle but HV refused
1021 addi r3,r1,STACK_FRAME_OVERHEAD
1027 * We come here as a result of a DSI at a point where we don't want
1028 * to call hash_page, such as when we are accessing memory (possibly
1029 * user memory) inside a PMU interrupt that occurred while interrupts
1030 * were soft-disabled. We want to invoke the exception handler for
1031 * the access, or panic if there isn't a handler.
1035 addi r3,r1,STACK_FRAME_OVERHEAD
1040 /* here we have a segment miss */
1042 bl .ste_allocate /* try to insert stab entry */
1044 bne- handle_page_fault
1045 b fast_exception_return
1048 * r13 points to the PACA, r9 contains the saved CR,
1049 * r11 and r12 contain the saved SRR0 and SRR1.
1050 * r9 - r13 are saved in paca->exslb.
1051 * We assume we aren't going to take any exceptions during this procedure.
1052 * We assume (DAR >> 60) == 0xc.
1055 _GLOBAL(do_stab_bolted)
1056 stw r9,PACA_EXSLB+EX_CCR(r13) /* save CR in exc. frame */
1057 std r11,PACA_EXSLB+EX_SRR0(r13) /* save SRR0 in exc. frame */
1059 /* Hash to the primary group */
1060 ld r10,PACASTABVIRT(r13)
1063 rldimi r10,r11,7,52 /* r10 = first ste of the group */
1065 /* Calculate VSID */
1066 /* This is a kernel address, so protovsid = ESID */
1067 ASM_VSID_SCRAMBLE(r11, r9, 256M)
1068 rldic r9,r11,12,16 /* r9 = vsid << 12 */
1070 /* Search the primary group for a free entry */
1071 1: ld r11,0(r10) /* Test valid bit of the current ste */
1078 /* Stick for only searching the primary group for now. */
1079 /* At least for now, we use a very simple random castout scheme */
1080 /* Use the TB as a random number ; OR in 1 to avoid entry 0 */
1082 rldic r11,r11,4,57 /* r11 = (r11 << 4) & 0x70 */
1085 /* r10 currently points to an ste one past the group of interest */
1086 /* make it point to the randomly selected entry */
1088 or r10,r10,r11 /* r10 is the entry to invalidate */
1090 isync /* mark the entry invalid */
1092 rldicl r11,r11,56,1 /* clear the valid bit */
1097 clrrdi r11,r11,28 /* Get the esid part of the ste */
1100 2: std r9,8(r10) /* Store the vsid part of the ste */
1103 mfspr r11,SPRN_DAR /* Get the new esid */
1104 clrrdi r11,r11,28 /* Permits a full 32b of ESID */
1105 ori r11,r11,0x90 /* Turn on valid and kp */
1106 std r11,0(r10) /* Put new entry back into the stab */
1110 /* All done -- return from exception. */
1111 lwz r9,PACA_EXSLB+EX_CCR(r13) /* get saved CR */
1112 ld r11,PACA_EXSLB+EX_SRR0(r13) /* get saved SRR0 */
1114 andi. r10,r12,MSR_RI
1117 mtcrf 0x80,r9 /* restore CR */
1125 ld r9,PACA_EXSLB+EX_R9(r13)
1126 ld r10,PACA_EXSLB+EX_R10(r13)
1127 ld r11,PACA_EXSLB+EX_R11(r13)
1128 ld r12,PACA_EXSLB+EX_R12(r13)
1129 ld r13,PACA_EXSLB+EX_R13(r13)
1131 b . /* prevent speculative execution */
1133 #ifdef CONFIG_PPC_PSERIES
1135 * Data area reserved for FWNMI option.
1136 * This address (0x7000) is fixed by the RPA.
1139 .globl fwnmi_data_area
1141 #endif /* CONFIG_PPC_PSERIES */
1143 /* iSeries does not use the FWNMI stuff, so it is safe to put
1144 * this here, even if we later allow kernels that will boot on
1145 * both pSeries and iSeries */
1146 #ifdef CONFIG_PPC_ISERIES
1150 .quad HvEsidsToMap /* xNumberEsids */
1151 .quad HvRangesToMap /* xNumberRanges */
1152 .quad STAB0_PAGE /* xSegmentTableOffs */
1153 .zero 40 /* xRsvd */
1154 /* xEsids (HvEsidsToMap entries of 2 quads) */
1155 .quad PAGE_OFFSET_ESID /* xKernelEsid */
1156 .quad PAGE_OFFSET_VSID /* xKernelVsid */
1157 .quad VMALLOC_START_ESID /* xKernelEsid */
1158 .quad VMALLOC_START_VSID /* xKernelVsid */
1159 /* xRanges (HvRangesToMap entries of 3 quads) */
1160 .quad HvPagesToMap /* xPages */
1161 .quad 0 /* xOffset */
1162 .quad PAGE_OFFSET_VSID << (SID_SHIFT - HW_PAGE_SHIFT) /* xVPN */
1164 #endif /* CONFIG_PPC_ISERIES */
1166 #ifdef CONFIG_PPC_PSERIES
1168 #endif /* CONFIG_PPC_PSERIES */
1171 * Space for CPU0's segment table.
1173 * On iSeries, the hypervisor must fill in at least one entry before
1174 * we get control (with relocate on). The address is given to the hv
1175 * as a page number (see xLparMap above), so this must be at a
1176 * fixed address (the linker can't compute (u64)&initial_stab >>
1179 . = STAB0_OFFSET /* 0x8000 */