]> git.proxmox.com Git - mirror_ubuntu-zesty-kernel.git/blame - arch/powerpc/kernel/exceptions-64s.S
powerpc: Add book3s hypervisor doorbell exception vectors
[mirror_ubuntu-zesty-kernel.git] / arch / powerpc / kernel / exceptions-64s.S
CommitLineData
0ebc4cda
BH
1/*
2 * This file contains the 64-bit "server" PowerPC variant
3 * of the low level exception handling including exception
4 * vectors, exception return, part of the slb and stab
5 * handling and other fixed offset specific things.
6 *
7 * This file is meant to be #included from head_64.S due to
25985edc 8 * position dependent assembly.
0ebc4cda
BH
9 *
10 * Most of this originates from head_64.S and thus has the same
11 * copyright history.
12 *
13 */
14
7230c564 15#include <asm/hw_irq.h>
8aa34ab8 16#include <asm/exception-64s.h>
46f52210 17#include <asm/ptrace.h>
8aa34ab8 18
0ebc4cda
BH
19/*
20 * We layout physical memory as follows:
21 * 0x0000 - 0x00ff : Secondary processor spin code
c1fb6816
MN
22 * 0x0100 - 0x17ff : pSeries Interrupt prologs
23 * 0x1800 - 0x4000 : interrupt support common interrupt prologs
24 * 0x4000 - 0x5fff : pSeries interrupts with IR=1,DR=1
25 * 0x6000 - 0x6fff : more interrupt support including for IR=1,DR=1
0ebc4cda 26 * 0x7000 - 0x7fff : FWNMI data area
c1fb6816
MN
27 * 0x8000 - 0x8fff : Initial (CPU0) segment table
28 * 0x9000 - : Early init and support code
0ebc4cda 29 */
742415d6
MN
30 /* Syscall routine is used twice, in reloc-off and reloc-on paths */
31#define SYSCALL_PSERIES_1 \
32BEGIN_FTR_SECTION \
33 cmpdi r0,0x1ebe ; \
34 beq- 1f ; \
35END_FTR_SECTION_IFSET(CPU_FTR_REAL_LE) \
36 mr r9,r13 ; \
37 GET_PACA(r13) ; \
38 mfspr r11,SPRN_SRR0 ; \
390:
40
41#define SYSCALL_PSERIES_2_RFID \
42 mfspr r12,SPRN_SRR1 ; \
43 ld r10,PACAKBASE(r13) ; \
44 LOAD_HANDLER(r10, system_call_entry) ; \
45 mtspr SPRN_SRR0,r10 ; \
46 ld r10,PACAKMSR(r13) ; \
47 mtspr SPRN_SRR1,r10 ; \
48 rfid ; \
49 b . ; /* prevent speculative execution */
50
51#define SYSCALL_PSERIES_3 \
52 /* Fast LE/BE switch system call */ \
531: mfspr r12,SPRN_SRR1 ; \
54 xori r12,r12,MSR_LE ; \
55 mtspr SPRN_SRR1,r12 ; \
56 rfid ; /* return to userspace */ \
57 b . ; \
582: mfspr r12,SPRN_SRR1 ; \
59 andi. r12,r12,MSR_PR ; \
60 bne 0b ; \
61 mtspr SPRN_SRR0,r3 ; \
62 mtspr SPRN_SRR1,r4 ; \
63 mtspr SPRN_SDR1,r5 ; \
64 rfid ; \
65 b . ; /* prevent speculative execution */
66
4700dfaf
MN
67#if defined(CONFIG_RELOCATABLE)
68 /*
69 * We can't branch directly; in the direct case we use LR
70 * and system_call_entry restores LR. (We thus need to move
71 * LR to r10 in the RFID case too.)
72 */
73#define SYSCALL_PSERIES_2_DIRECT \
74 mflr r10 ; \
75 ld r12,PACAKBASE(r13) ; \
76 LOAD_HANDLER(r12, system_call_entry_direct) ; \
77 mtlr r12 ; \
78 mfspr r12,SPRN_SRR1 ; \
79 /* Re-use of r13... No spare regs to do this */ \
80 li r13,MSR_RI ; \
81 mtmsrd r13,1 ; \
82 GET_PACA(r13) ; /* get r13 back */ \
83 blr ;
84#else
85 /* We can branch directly */
86#define SYSCALL_PSERIES_2_DIRECT \
87 mfspr r12,SPRN_SRR1 ; \
88 li r10,MSR_RI ; \
89 mtmsrd r10,1 ; /* Set RI (EE=0) */ \
90 b system_call_entry_direct ;
91#endif
0ebc4cda 92
0ebc4cda
BH
93/*
94 * This is the start of the interrupt handlers for pSeries
95 * This code runs with relocation off.
96 * Code from here to __end_interrupts gets copied down to real
97 * address 0x100 when we are running a relocatable kernel.
98 * Therefore any relative branches in this section must only
99 * branch to labels in this section.
100 */
101 . = 0x100
102 .globl __start_interrupts
103__start_interrupts:
104
948cf67c
BH
105 .globl system_reset_pSeries;
106system_reset_pSeries:
107 HMT_MEDIUM;
948cf67c
BH
108 SET_SCRATCH0(r13)
109#ifdef CONFIG_PPC_P7_NAP
110BEGIN_FTR_SECTION
111 /* Running native on arch 2.06 or later, check if we are
112 * waking up from nap. We only handle no state loss and
113 * supervisor state loss. We do -not- handle hypervisor
114 * state loss at this time.
115 */
116 mfspr r13,SPRN_SRR1
371fefd6
PM
117 rlwinm. r13,r13,47-31,30,31
118 beq 9f
119
120 /* waking up from powersave (nap) state */
121 cmpwi cr1,r13,2
948cf67c
BH
122 /* Total loss of HV state is fatal, we could try to use the
123 * PIR to locate a PACA, then use an emergency stack etc...
124 * but for now, let's just stay stuck here
125 */
371fefd6
PM
126 bgt cr1,.
127 GET_PACA(r13)
128
129#ifdef CONFIG_KVM_BOOK3S_64_HV
f0888f70
PM
130 li r0,KVM_HWTHREAD_IN_KERNEL
131 stb r0,HSTATE_HWTHREAD_STATE(r13)
132 /* Order setting hwthread_state vs. testing hwthread_req */
133 sync
134 lbz r0,HSTATE_HWTHREAD_REQ(r13)
135 cmpwi r0,0
136 beq 1f
371fefd6
PM
137 b kvm_start_guest
1381:
139#endif
140
141 beq cr1,2f
142 b .power7_wakeup_noloss
1432: b .power7_wakeup_loss
1449:
969391c5 145END_FTR_SECTION_IFSET(CPU_FTR_HVMODE | CPU_FTR_ARCH_206)
948cf67c 146#endif /* CONFIG_PPC_P7_NAP */
b01c8b54
PM
147 EXCEPTION_PROLOG_PSERIES(PACA_EXGEN, system_reset_common, EXC_STD,
148 NOTEST, 0x100)
0ebc4cda
BH
149
150 . = 0x200
b01c8b54
PM
151machine_check_pSeries_1:
152 /* This is moved out of line as it can be patched by FW, but
153 * some code path might still want to branch into the original
154 * vector
155 */
156 b machine_check_pSeries
0ebc4cda
BH
157
158 . = 0x300
159 .globl data_access_pSeries
160data_access_pSeries:
161 HMT_MEDIUM
673b189a 162 SET_SCRATCH0(r13)
0ebc4cda 163BEGIN_FTR_SECTION
b01c8b54
PM
164 b data_access_check_stab
165data_access_not_stab:
166END_MMU_FTR_SECTION_IFCLR(MMU_FTR_SLB)
b01c8b54 167 EXCEPTION_PROLOG_PSERIES(PACA_EXGEN, data_access_common, EXC_STD,
697d3899 168 KVMTEST, 0x300)
0ebc4cda
BH
169
170 . = 0x380
171 .globl data_access_slb_pSeries
172data_access_slb_pSeries:
173 HMT_MEDIUM
673b189a 174 SET_SCRATCH0(r13)
697d3899 175 EXCEPTION_PROLOG_1(PACA_EXSLB, KVMTEST, 0x380)
0ebc4cda
BH
176 std r3,PACA_EXSLB+EX_R3(r13)
177 mfspr r3,SPRN_DAR
0ebc4cda
BH
178#ifdef __DISABLED__
179 /* Keep that around for when we re-implement dynamic VSIDs */
180 cmpdi r3,0
181 bge slb_miss_user_pseries
182#endif /* __DISABLED__ */
b01c8b54 183 mfspr r12,SPRN_SRR1
0ebc4cda
BH
184#ifndef CONFIG_RELOCATABLE
185 b .slb_miss_realmode
186#else
187 /*
188 * We can't just use a direct branch to .slb_miss_realmode
189 * because the distance from here to there depends on where
190 * the kernel ends up being put.
191 */
192 mfctr r11
193 ld r10,PACAKBASE(r13)
194 LOAD_HANDLER(r10, .slb_miss_realmode)
195 mtctr r10
196 bctr
197#endif
198
b3e6b5df 199 STD_EXCEPTION_PSERIES(0x400, 0x400, instruction_access)
0ebc4cda
BH
200
201 . = 0x480
202 .globl instruction_access_slb_pSeries
203instruction_access_slb_pSeries:
204 HMT_MEDIUM
673b189a 205 SET_SCRATCH0(r13)
de56a948 206 EXCEPTION_PROLOG_1(PACA_EXSLB, KVMTEST_PR, 0x480)
0ebc4cda
BH
207 std r3,PACA_EXSLB+EX_R3(r13)
208 mfspr r3,SPRN_SRR0 /* SRR0 is faulting address */
0ebc4cda
BH
209#ifdef __DISABLED__
210 /* Keep that around for when we re-implement dynamic VSIDs */
211 cmpdi r3,0
212 bge slb_miss_user_pseries
213#endif /* __DISABLED__ */
b01c8b54 214 mfspr r12,SPRN_SRR1
0ebc4cda
BH
215#ifndef CONFIG_RELOCATABLE
216 b .slb_miss_realmode
217#else
218 mfctr r11
219 ld r10,PACAKBASE(r13)
220 LOAD_HANDLER(r10, .slb_miss_realmode)
221 mtctr r10
222 bctr
223#endif
224
b3e6b5df
BH
225 /* We open code these as we can't have a ". = x" (even with
226 * x = "." within a feature section
227 */
a5d4f3ad 228 . = 0x500;
b3e6b5df
BH
229 .globl hardware_interrupt_pSeries;
230 .globl hardware_interrupt_hv;
a5d4f3ad 231hardware_interrupt_pSeries:
b3e6b5df 232hardware_interrupt_hv:
a5d4f3ad 233 BEGIN_FTR_SECTION
b01c8b54
PM
234 _MASKABLE_EXCEPTION_PSERIES(0x502, hardware_interrupt,
235 EXC_HV, SOFTEN_TEST_HV)
236 KVM_HANDLER(PACA_EXGEN, EXC_HV, 0x502)
de56a948
PM
237 FTR_SECTION_ELSE
238 _MASKABLE_EXCEPTION_PSERIES(0x500, hardware_interrupt,
9e368f29 239 EXC_STD, SOFTEN_TEST_HV_201)
de56a948 240 KVM_HANDLER(PACA_EXGEN, EXC_STD, 0x500)
969391c5 241 ALT_FTR_SECTION_END_IFSET(CPU_FTR_HVMODE | CPU_FTR_ARCH_206)
a5d4f3ad 242
b3e6b5df 243 STD_EXCEPTION_PSERIES(0x600, 0x600, alignment)
de56a948 244 KVM_HANDLER_PR(PACA_EXGEN, EXC_STD, 0x600)
b01c8b54 245
b3e6b5df 246 STD_EXCEPTION_PSERIES(0x700, 0x700, program_check)
de56a948 247 KVM_HANDLER_PR(PACA_EXGEN, EXC_STD, 0x700)
b01c8b54 248
b3e6b5df 249 STD_EXCEPTION_PSERIES(0x800, 0x800, fp_unavailable)
de56a948 250 KVM_HANDLER_PR(PACA_EXGEN, EXC_STD, 0x800)
a5d4f3ad 251
b3e6b5df 252 MASKABLE_EXCEPTION_PSERIES(0x900, 0x900, decrementer)
dabe859e 253 STD_EXCEPTION_HV(0x980, 0x982, hdecrementer)
a5d4f3ad 254
b3e6b5df 255 STD_EXCEPTION_PSERIES(0xa00, 0xa00, trap_0a)
de56a948 256 KVM_HANDLER_PR(PACA_EXGEN, EXC_STD, 0xa00)
b01c8b54 257
b3e6b5df 258 STD_EXCEPTION_PSERIES(0xb00, 0xb00, trap_0b)
de56a948 259 KVM_HANDLER_PR(PACA_EXGEN, EXC_STD, 0xb00)
0ebc4cda
BH
260
261 . = 0xc00
262 .globl system_call_pSeries
263system_call_pSeries:
264 HMT_MEDIUM
b01c8b54
PM
265#ifdef CONFIG_KVM_BOOK3S_64_HANDLER
266 SET_SCRATCH0(r13)
267 GET_PACA(r13)
268 std r9,PACA_EXGEN+EX_R9(r13)
269 std r10,PACA_EXGEN+EX_R10(r13)
270 mfcr r9
271 KVMTEST(0xc00)
272 GET_SCRATCH0(r13)
273#endif
742415d6
MN
274 SYSCALL_PSERIES_1
275 SYSCALL_PSERIES_2_RFID
276 SYSCALL_PSERIES_3
b01c8b54
PM
277 KVM_HANDLER(PACA_EXGEN, EXC_STD, 0xc00)
278
b3e6b5df 279 STD_EXCEPTION_PSERIES(0xd00, 0xd00, single_step)
de56a948 280 KVM_HANDLER_PR(PACA_EXGEN, EXC_STD, 0xd00)
b3e6b5df
BH
281
282 /* At 0xe??? we have a bunch of hypervisor exceptions, we branch
283 * out of line to handle them
284 */
285 . = 0xe00
e6a74c6e 286hv_exception_trampoline:
b3e6b5df
BH
287 b h_data_storage_hv
288 . = 0xe20
289 b h_instr_storage_hv
290 . = 0xe40
291 b emulation_assist_hv
292 . = 0xe50
293 b hmi_exception_hv
294 . = 0xe60
295 b hmi_exception_hv
655bb3f4
IM
296 . = 0xe80
297 b h_doorbell_hv
0ebc4cda
BH
298
299 /* We need to deal with the Altivec unavailable exception
300 * here which is at 0xf20, thus in the middle of the
301 * prolog code of the PerformanceMonitor one. A little
302 * trickery is thus necessary
303 */
c86e2ead 304performance_monitor_pSeries_1:
0ebc4cda
BH
305 . = 0xf00
306 b performance_monitor_pSeries
307
c86e2ead 308altivec_unavailable_pSeries_1:
0ebc4cda
BH
309 . = 0xf20
310 b altivec_unavailable_pSeries
311
c86e2ead 312vsx_unavailable_pSeries_1:
0ebc4cda
BH
313 . = 0xf40
314 b vsx_unavailable_pSeries
315
316#ifdef CONFIG_CBE_RAS
b3e6b5df 317 STD_EXCEPTION_HV(0x1200, 0x1202, cbe_system_error)
5ccf55dd 318 KVM_HANDLER_SKIP(PACA_EXGEN, EXC_HV, 0x1202)
0ebc4cda 319#endif /* CONFIG_CBE_RAS */
b01c8b54 320
b3e6b5df 321 STD_EXCEPTION_PSERIES(0x1300, 0x1300, instruction_breakpoint)
de56a948 322 KVM_HANDLER_PR_SKIP(PACA_EXGEN, EXC_STD, 0x1300)
b01c8b54 323
b92a66a6 324 . = 0x1500
51cf2b30 325 .global denorm_exception_hv
b92a66a6
MN
326denorm_exception_hv:
327 HMT_MEDIUM
328 mtspr SPRN_SPRG_HSCRATCH0,r13
329 mfspr r13,SPRN_SPRG_HPACA
330 std r9,PACA_EXGEN+EX_R9(r13)
331 std r10,PACA_EXGEN+EX_R10(r13)
332 std r11,PACA_EXGEN+EX_R11(r13)
333 std r12,PACA_EXGEN+EX_R12(r13)
334 mfspr r9,SPRN_SPRG_HSCRATCH0
335 std r9,PACA_EXGEN+EX_R13(r13)
336 mfcr r9
337
338#ifdef CONFIG_PPC_DENORMALISATION
339 mfspr r10,SPRN_HSRR1
340 mfspr r11,SPRN_HSRR0 /* save HSRR0 */
341 andis. r10,r10,(HSRR1_DENORM)@h /* denorm? */
342 addi r11,r11,-4 /* HSRR0 is next instruction */
343 bne+ denorm_assist
344#endif
345
346 EXCEPTION_PROLOG_PSERIES_1(denorm_common, EXC_HV)
347 KVM_HANDLER_SKIP(PACA_EXGEN, EXC_STD, 0x1500)
348
0ebc4cda 349#ifdef CONFIG_CBE_RAS
b3e6b5df 350 STD_EXCEPTION_HV(0x1600, 0x1602, cbe_maintenance)
5ccf55dd 351 KVM_HANDLER_SKIP(PACA_EXGEN, EXC_HV, 0x1602)
0ebc4cda 352#endif /* CONFIG_CBE_RAS */
b01c8b54 353
b3e6b5df 354 STD_EXCEPTION_PSERIES(0x1700, 0x1700, altivec_assist)
de56a948 355 KVM_HANDLER_PR(PACA_EXGEN, EXC_STD, 0x1700)
b01c8b54 356
0ebc4cda 357#ifdef CONFIG_CBE_RAS
b3e6b5df 358 STD_EXCEPTION_HV(0x1800, 0x1802, cbe_thermal)
5ccf55dd 359 KVM_HANDLER_SKIP(PACA_EXGEN, EXC_HV, 0x1802)
faab4dd2
MN
360#else
361 . = 0x1800
0ebc4cda
BH
362#endif /* CONFIG_CBE_RAS */
363
0ebc4cda 364
b3e6b5df
BH
365/*** Out of line interrupts support ***/
366
faab4dd2 367 .align 7
b01c8b54
PM
368 /* moved from 0x200 */
369machine_check_pSeries:
370 .globl machine_check_fwnmi
371machine_check_fwnmi:
372 HMT_MEDIUM
373 SET_SCRATCH0(r13) /* save r13 */
374 EXCEPTION_PROLOG_PSERIES(PACA_EXMC, machine_check_common,
375 EXC_STD, KVMTEST, 0x200)
376 KVM_HANDLER_SKIP(PACA_EXMC, EXC_STD, 0x200)
377
b01c8b54
PM
378 /* moved from 0x300 */
379data_access_check_stab:
380 GET_PACA(r13)
381 std r9,PACA_EXSLB+EX_R9(r13)
382 std r10,PACA_EXSLB+EX_R10(r13)
383 mfspr r10,SPRN_DAR
384 mfspr r9,SPRN_DSISR
385 srdi r10,r10,60
386 rlwimi r10,r9,16,0x20
de56a948 387#ifdef CONFIG_KVM_BOOK3S_PR
3c42bf8a 388 lbz r9,HSTATE_IN_GUEST(r13)
b01c8b54
PM
389 rlwimi r10,r9,8,0x300
390#endif
391 mfcr r9
392 cmpwi r10,0x2c
393 beq do_stab_bolted_pSeries
394 mtcrf 0x80,r9
395 ld r9,PACA_EXSLB+EX_R9(r13)
396 ld r10,PACA_EXSLB+EX_R10(r13)
397 b data_access_not_stab
398do_stab_bolted_pSeries:
399 std r11,PACA_EXSLB+EX_R11(r13)
400 std r12,PACA_EXSLB+EX_R12(r13)
401 GET_SCRATCH0(r10)
402 std r10,PACA_EXSLB+EX_R13(r13)
403 EXCEPTION_PROLOG_PSERIES_1(.do_stab_bolted, EXC_STD)
b01c8b54 404
697d3899
PM
405 KVM_HANDLER_SKIP(PACA_EXGEN, EXC_STD, 0x300)
406 KVM_HANDLER_SKIP(PACA_EXSLB, EXC_STD, 0x380)
de56a948
PM
407 KVM_HANDLER_PR(PACA_EXGEN, EXC_STD, 0x400)
408 KVM_HANDLER_PR(PACA_EXSLB, EXC_STD, 0x480)
409 KVM_HANDLER_PR(PACA_EXGEN, EXC_STD, 0x900)
b01c8b54
PM
410 KVM_HANDLER(PACA_EXGEN, EXC_HV, 0x982)
411
b92a66a6
MN
412#ifdef CONFIG_PPC_DENORMALISATION
413denorm_assist:
414BEGIN_FTR_SECTION
415/*
416 * To denormalise we need to move a copy of the register to itself.
417 * For POWER6 do that here for all FP regs.
418 */
419 mfmsr r10
420 ori r10,r10,(MSR_FP|MSR_FE0|MSR_FE1)
421 xori r10,r10,(MSR_FE0|MSR_FE1)
422 mtmsrd r10
423 sync
424 fmr 0,0
425 fmr 1,1
426 fmr 2,2
427 fmr 3,3
428 fmr 4,4
429 fmr 5,5
430 fmr 6,6
431 fmr 7,7
432 fmr 8,8
433 fmr 9,9
434 fmr 10,10
435 fmr 11,11
436 fmr 12,12
437 fmr 13,13
438 fmr 14,14
439 fmr 15,15
440 fmr 16,16
441 fmr 17,17
442 fmr 18,18
443 fmr 19,19
444 fmr 20,20
445 fmr 21,21
446 fmr 22,22
447 fmr 23,23
448 fmr 24,24
449 fmr 25,25
450 fmr 26,26
451 fmr 27,27
452 fmr 28,28
453 fmr 29,29
454 fmr 30,30
455 fmr 31,31
456FTR_SECTION_ELSE
457/*
458 * To denormalise we need to move a copy of the register to itself.
459 * For POWER7 do that here for the first 32 VSX registers only.
460 */
461 mfmsr r10
462 oris r10,r10,MSR_VSX@h
463 mtmsrd r10
464 sync
465 XVCPSGNDP(0,0,0)
466 XVCPSGNDP(1,1,1)
467 XVCPSGNDP(2,2,2)
468 XVCPSGNDP(3,3,3)
469 XVCPSGNDP(4,4,4)
470 XVCPSGNDP(5,5,5)
471 XVCPSGNDP(6,6,6)
472 XVCPSGNDP(7,7,7)
473 XVCPSGNDP(8,8,8)
474 XVCPSGNDP(9,9,9)
475 XVCPSGNDP(10,10,10)
476 XVCPSGNDP(11,11,11)
477 XVCPSGNDP(12,12,12)
478 XVCPSGNDP(13,13,13)
479 XVCPSGNDP(14,14,14)
480 XVCPSGNDP(15,15,15)
481 XVCPSGNDP(16,16,16)
482 XVCPSGNDP(17,17,17)
483 XVCPSGNDP(18,18,18)
484 XVCPSGNDP(19,19,19)
485 XVCPSGNDP(20,20,20)
486 XVCPSGNDP(21,21,21)
487 XVCPSGNDP(22,22,22)
488 XVCPSGNDP(23,23,23)
489 XVCPSGNDP(24,24,24)
490 XVCPSGNDP(25,25,25)
491 XVCPSGNDP(26,26,26)
492 XVCPSGNDP(27,27,27)
493 XVCPSGNDP(28,28,28)
494 XVCPSGNDP(29,29,29)
495 XVCPSGNDP(30,30,30)
496 XVCPSGNDP(31,31,31)
497ALT_FTR_SECTION_END_IFCLR(CPU_FTR_ARCH_206)
498 mtspr SPRN_HSRR0,r11
499 mtcrf 0x80,r9
500 ld r9,PACA_EXGEN+EX_R9(r13)
501 ld r10,PACA_EXGEN+EX_R10(r13)
502 ld r11,PACA_EXGEN+EX_R11(r13)
503 ld r12,PACA_EXGEN+EX_R12(r13)
504 ld r13,PACA_EXGEN+EX_R13(r13)
505 HRFID
506 b .
507#endif
508
b01c8b54 509 .align 7
b3e6b5df 510 /* moved from 0xe00 */
b01c8b54
PM
511 STD_EXCEPTION_HV(., 0xe02, h_data_storage)
512 KVM_HANDLER_SKIP(PACA_EXGEN, EXC_HV, 0xe02)
513 STD_EXCEPTION_HV(., 0xe22, h_instr_storage)
514 KVM_HANDLER(PACA_EXGEN, EXC_HV, 0xe22)
515 STD_EXCEPTION_HV(., 0xe42, emulation_assist)
516 KVM_HANDLER(PACA_EXGEN, EXC_HV, 0xe42)
517 STD_EXCEPTION_HV(., 0xe62, hmi_exception) /* need to flush cache ? */
518 KVM_HANDLER(PACA_EXGEN, EXC_HV, 0xe62)
655bb3f4
IM
519 MASKABLE_EXCEPTION_HV(., 0xe82, h_doorbell)
520 KVM_HANDLER(PACA_EXGEN, EXC_HV, 0xe82)
0ebc4cda
BH
521
522 /* moved from 0xf00 */
b3e6b5df 523 STD_EXCEPTION_PSERIES(., 0xf00, performance_monitor)
de56a948 524 KVM_HANDLER_PR(PACA_EXGEN, EXC_STD, 0xf00)
b3e6b5df 525 STD_EXCEPTION_PSERIES(., 0xf20, altivec_unavailable)
de56a948 526 KVM_HANDLER_PR(PACA_EXGEN, EXC_STD, 0xf20)
b3e6b5df 527 STD_EXCEPTION_PSERIES(., 0xf40, vsx_unavailable)
de56a948 528 KVM_HANDLER_PR(PACA_EXGEN, EXC_STD, 0xf40)
0ebc4cda
BH
529
530/*
7230c564
BH
531 * An interrupt came in while soft-disabled. We set paca->irq_happened,
532 * then, if it was a decrementer interrupt, we bump the dec to max and
533 * and return, else we hard disable and return. This is called with
534 * r10 containing the value to OR to the paca field.
0ebc4cda 535 */
7230c564
BH
536#define MASKED_INTERRUPT(_H) \
537masked_##_H##interrupt: \
538 std r11,PACA_EXGEN+EX_R11(r13); \
539 lbz r11,PACAIRQHAPPENED(r13); \
540 or r11,r11,r10; \
541 stb r11,PACAIRQHAPPENED(r13); \
542 andi. r10,r10,PACA_IRQ_DEC; \
543 beq 1f; \
544 lis r10,0x7fff; \
545 ori r10,r10,0xffff; \
546 mtspr SPRN_DEC,r10; \
547 b 2f; \
5481: mfspr r10,SPRN_##_H##SRR1; \
549 rldicl r10,r10,48,1; /* clear MSR_EE */ \
550 rotldi r10,r10,16; \
551 mtspr SPRN_##_H##SRR1,r10; \
5522: mtcrf 0x80,r9; \
553 ld r9,PACA_EXGEN+EX_R9(r13); \
554 ld r10,PACA_EXGEN+EX_R10(r13); \
555 ld r11,PACA_EXGEN+EX_R11(r13); \
556 GET_SCRATCH0(r13); \
557 ##_H##rfid; \
0ebc4cda 558 b .
7230c564
BH
559
560 MASKED_INTERRUPT()
561 MASKED_INTERRUPT(H)
0ebc4cda 562
7230c564
BH
563/*
564 * Called from arch_local_irq_enable when an interrupt needs
565 * to be resent. r3 contains 0x500 or 0x900 to indicate which
566 * kind of interrupt. MSR:EE is already off. We generate a
567 * stackframe like if a real interrupt had happened.
568 *
569 * Note: While MSR:EE is off, we need to make sure that _MSR
570 * in the generated frame has EE set to 1 or the exception
571 * handler will not properly re-enable them.
572 */
573_GLOBAL(__replay_interrupt)
574 /* We are going to jump to the exception common code which
575 * will retrieve various register values from the PACA which
576 * we don't give a damn about, so we don't bother storing them.
577 */
578 mfmsr r12
579 mflr r11
580 mfcr r9
581 ori r12,r12,MSR_EE
582 andi. r3,r3,0x0800
583 bne decrementer_common
584 b hardware_interrupt_common
a5d4f3ad 585
0ebc4cda
BH
586#ifdef CONFIG_PPC_PSERIES
587/*
588 * Vectors for the FWNMI option. Share common code.
589 */
590 .globl system_reset_fwnmi
591 .align 7
592system_reset_fwnmi:
593 HMT_MEDIUM
673b189a 594 SET_SCRATCH0(r13) /* save r13 */
b01c8b54
PM
595 EXCEPTION_PROLOG_PSERIES(PACA_EXGEN, system_reset_common, EXC_STD,
596 NOTEST, 0x100)
0ebc4cda
BH
597
598#endif /* CONFIG_PPC_PSERIES */
599
600#ifdef __DISABLED__
601/*
602 * This is used for when the SLB miss handler has to go virtual,
603 * which doesn't happen for now anymore but will once we re-implement
604 * dynamic VSIDs for shared page tables
605 */
606slb_miss_user_pseries:
607 std r10,PACA_EXGEN+EX_R10(r13)
608 std r11,PACA_EXGEN+EX_R11(r13)
609 std r12,PACA_EXGEN+EX_R12(r13)
673b189a 610 GET_SCRATCH0(r10)
0ebc4cda
BH
611 ld r11,PACA_EXSLB+EX_R9(r13)
612 ld r12,PACA_EXSLB+EX_R3(r13)
613 std r10,PACA_EXGEN+EX_R13(r13)
614 std r11,PACA_EXGEN+EX_R9(r13)
615 std r12,PACA_EXGEN+EX_R3(r13)
616 clrrdi r12,r13,32
617 mfmsr r10
618 mfspr r11,SRR0 /* save SRR0 */
619 ori r12,r12,slb_miss_user_common@l /* virt addr of handler */
620 ori r10,r10,MSR_IR|MSR_DR|MSR_RI
621 mtspr SRR0,r12
622 mfspr r12,SRR1 /* and SRR1 */
623 mtspr SRR1,r10
624 rfid
625 b . /* prevent spec. execution */
626#endif /* __DISABLED__ */
627
0ebc4cda
BH
628/*
629 * Code from here down to __end_handlers is invoked from the
630 * exception prologs above. Because the prologs assemble the
631 * addresses of these handlers using the LOAD_HANDLER macro,
61e2390e
MN
632 * which uses an ori instruction, these handlers must be in
633 * the first 64k of the kernel image.
0ebc4cda
BH
634 */
635
636/*** Common interrupt handlers ***/
637
638 STD_EXCEPTION_COMMON(0x100, system_reset, .system_reset_exception)
639
640 /*
641 * Machine check is different because we use a different
642 * save area: PACA_EXMC instead of PACA_EXGEN.
643 */
644 .align 7
645 .globl machine_check_common
646machine_check_common:
647 EXCEPTION_PROLOG_COMMON(0x200, PACA_EXMC)
648 FINISH_NAP
649 DISABLE_INTS
650 bl .save_nvgprs
651 addi r3,r1,STACK_FRAME_OVERHEAD
652 bl .machine_check_exception
653 b .ret_from_except
654
7450f6f0
BH
655 STD_EXCEPTION_COMMON_ASYNC(0x500, hardware_interrupt, do_IRQ)
656 STD_EXCEPTION_COMMON_ASYNC(0x900, decrementer, .timer_interrupt)
dabe859e 657 STD_EXCEPTION_COMMON(0x980, hdecrementer, .hdec_interrupt)
0ebc4cda
BH
658 STD_EXCEPTION_COMMON(0xa00, trap_0a, .unknown_exception)
659 STD_EXCEPTION_COMMON(0xb00, trap_0b, .unknown_exception)
660 STD_EXCEPTION_COMMON(0xd00, single_step, .single_step_exception)
661 STD_EXCEPTION_COMMON(0xe00, trap_0e, .unknown_exception)
278a6cdc
MN
662 STD_EXCEPTION_COMMON(0xe40, emulation_assist, .program_check_exception)
663 STD_EXCEPTION_COMMON(0xe60, hmi_exception, .unknown_exception)
655bb3f4
IM
664#ifdef CONFIG_PPC_DOORBELL
665 STD_EXCEPTION_COMMON_ASYNC(0xe80, h_doorbell, .doorbell_exception)
666#else
667 STD_EXCEPTION_COMMON_ASYNC(0xe80, h_doorbell, .unknown_exception)
668#endif
7450f6f0 669 STD_EXCEPTION_COMMON_ASYNC(0xf00, performance_monitor, .performance_monitor_exception)
0ebc4cda 670 STD_EXCEPTION_COMMON(0x1300, instruction_breakpoint, .instruction_breakpoint_exception)
b92a66a6 671 STD_EXCEPTION_COMMON(0x1502, denorm, .unknown_exception)
0ebc4cda
BH
672#ifdef CONFIG_ALTIVEC
673 STD_EXCEPTION_COMMON(0x1700, altivec_assist, .altivec_assist_exception)
674#else
675 STD_EXCEPTION_COMMON(0x1700, altivec_assist, .unknown_exception)
676#endif
677#ifdef CONFIG_CBE_RAS
678 STD_EXCEPTION_COMMON(0x1200, cbe_system_error, .cbe_system_error_exception)
679 STD_EXCEPTION_COMMON(0x1600, cbe_maintenance, .cbe_maintenance_exception)
680 STD_EXCEPTION_COMMON(0x1800, cbe_thermal, .cbe_thermal_exception)
681#endif /* CONFIG_CBE_RAS */
682
c1fb6816
MN
683 /*
684 * Relocation-on interrupts: A subset of the interrupts can be delivered
685 * with IR=1/DR=1, if AIL==2 and MSR.HV won't be changed by delivering
686 * it. Addresses are the same as the original interrupt addresses, but
687 * offset by 0xc000000000004000.
688 * It's impossible to receive interrupts below 0x300 via this mechanism.
689 * KVM: None of these traps are from the guest ; anything that escalated
690 * to HV=1 from HV=0 is delivered via real mode handlers.
691 */
692
693 /*
694 * This uses the standard macro, since the original 0x300 vector
695 * only has extra guff for STAB-based processors -- which never
696 * come here.
697 */
698 STD_RELON_EXCEPTION_PSERIES(0x4300, 0x300, data_access)
699 . = 0x4380
700 .globl data_access_slb_relon_pSeries
701data_access_slb_relon_pSeries:
702 HMT_MEDIUM
703 SET_SCRATCH0(r13)
704 EXCEPTION_PROLOG_1(PACA_EXSLB, NOTEST, 0x380)
705 std r3,PACA_EXSLB+EX_R3(r13)
706 mfspr r3,SPRN_DAR
707 mfspr r12,SPRN_SRR1
708#ifndef CONFIG_RELOCATABLE
709 b .slb_miss_realmode
710#else
711 /*
712 * We can't just use a direct branch to .slb_miss_realmode
713 * because the distance from here to there depends on where
714 * the kernel ends up being put.
715 */
716 mfctr r11
717 ld r10,PACAKBASE(r13)
718 LOAD_HANDLER(r10, .slb_miss_realmode)
719 mtctr r10
720 bctr
721#endif
722
723 STD_RELON_EXCEPTION_PSERIES(0x4400, 0x400, instruction_access)
724 . = 0x4480
725 .globl instruction_access_slb_relon_pSeries
726instruction_access_slb_relon_pSeries:
727 HMT_MEDIUM
728 SET_SCRATCH0(r13)
729 EXCEPTION_PROLOG_1(PACA_EXSLB, NOTEST, 0x480)
730 std r3,PACA_EXSLB+EX_R3(r13)
731 mfspr r3,SPRN_SRR0 /* SRR0 is faulting address */
732 mfspr r12,SPRN_SRR1
733#ifndef CONFIG_RELOCATABLE
734 b .slb_miss_realmode
735#else
736 mfctr r11
737 ld r10,PACAKBASE(r13)
738 LOAD_HANDLER(r10, .slb_miss_realmode)
739 mtctr r10
740 bctr
741#endif
742
743 . = 0x4500
744 .globl hardware_interrupt_relon_pSeries;
745 .globl hardware_interrupt_relon_hv;
746hardware_interrupt_relon_pSeries:
747hardware_interrupt_relon_hv:
748 BEGIN_FTR_SECTION
749 _MASKABLE_RELON_EXCEPTION_PSERIES(0x502, hardware_interrupt, EXC_HV, SOFTEN_TEST_HV)
750 FTR_SECTION_ELSE
751 _MASKABLE_RELON_EXCEPTION_PSERIES(0x500, hardware_interrupt, EXC_STD, SOFTEN_TEST_PR)
752 ALT_FTR_SECTION_END_IFSET(CPU_FTR_ARCH_206)
753 STD_RELON_EXCEPTION_PSERIES(0x4600, 0x600, alignment)
754 STD_RELON_EXCEPTION_PSERIES(0x4700, 0x700, program_check)
755 STD_RELON_EXCEPTION_PSERIES(0x4800, 0x800, fp_unavailable)
756 MASKABLE_RELON_EXCEPTION_PSERIES(0x4900, 0x900, decrementer)
757 STD_RELON_EXCEPTION_HV(0x4980, 0x982, hdecrementer)
758 STD_RELON_EXCEPTION_PSERIES(0x4b00, 0xb00, trap_0b)
759
760 . = 0x4c00
761 .globl system_call_relon_pSeries
762system_call_relon_pSeries:
763 HMT_MEDIUM
764 SYSCALL_PSERIES_1
765 SYSCALL_PSERIES_2_DIRECT
766 SYSCALL_PSERIES_3
767
768 STD_RELON_EXCEPTION_PSERIES(0x4d00, 0xd00, single_step)
769
770 . = 0x4e00
771 b h_data_storage_relon_hv
772
773 . = 0x4e20
774 b h_instr_storage_relon_hv
775
776 . = 0x4e40
777 b emulation_assist_relon_hv
778
779 . = 0x4e50
780 b hmi_exception_relon_hv
781
782 . = 0x4e60
783 b hmi_exception_relon_hv
784
655bb3f4
IM
785 . = 0x4e80
786 b h_doorbell_relon_hv
c1fb6816
MN
787
788performance_monitor_relon_pSeries_1:
789 . = 0x4f00
790 b performance_monitor_relon_pSeries
791
792altivec_unavailable_relon_pSeries_1:
793 . = 0x4f20
794 b altivec_unavailable_relon_pSeries
795
796vsx_unavailable_relon_pSeries_1:
797 . = 0x4f40
798 b vsx_unavailable_relon_pSeries
799
800#ifdef CONFIG_CBE_RAS
801 STD_RELON_EXCEPTION_HV(0x5200, 0x1202, cbe_system_error)
802#endif /* CONFIG_CBE_RAS */
803 STD_RELON_EXCEPTION_PSERIES(0x5300, 0x1300, instruction_breakpoint)
804#ifdef CONFIG_PPC_DENORMALISATION
805 . = 0x5500
806 b denorm_exception_hv
807#endif
808#ifdef CONFIG_CBE_RAS
809 STD_RELON_EXCEPTION_HV(0x5600, 0x1602, cbe_maintenance)
810#else
811#ifdef CONFIG_HVC_SCOM
812 STD_RELON_EXCEPTION_HV(0x5600, 0x1600, maintence_interrupt)
813 KVM_HANDLER_SKIP(PACA_EXGEN, EXC_HV, 0x1600)
814#endif /* CONFIG_HVC_SCOM */
815#endif /* CONFIG_CBE_RAS */
816 STD_RELON_EXCEPTION_PSERIES(0x5700, 0x1700, altivec_assist)
817#ifdef CONFIG_CBE_RAS
818 STD_RELON_EXCEPTION_HV(0x5800, 0x1802, cbe_thermal)
819#endif /* CONFIG_CBE_RAS */
820
821 /* Other future vectors */
822 .align 7
823 .globl __end_interrupts
824__end_interrupts:
825
0ebc4cda 826 .align 7
c1fb6816
MN
827system_call_entry_direct:
828#if defined(CONFIG_RELOCATABLE)
829 /* The first level prologue may have used LR to get here, saving
830 * orig in r10. To save hacking/ifdeffing common code, restore here.
831 */
832 mtlr r10
833#endif
0ebc4cda
BH
834system_call_entry:
835 b system_call_common
836
fe1952fc
BH
837ppc64_runlatch_on_trampoline:
838 b .__ppc64_runlatch_on
839
0ebc4cda
BH
840/*
841 * Here we have detected that the kernel stack pointer is bad.
842 * R9 contains the saved CR, r13 points to the paca,
843 * r10 contains the (bad) kernel stack pointer,
844 * r11 and r12 contain the saved SRR0 and SRR1.
845 * We switch to using an emergency stack, save the registers there,
846 * and call kernel_bad_stack(), which panics.
847 */
848bad_stack:
849 ld r1,PACAEMERGSP(r13)
850 subi r1,r1,64+INT_FRAME_SIZE
851 std r9,_CCR(r1)
852 std r10,GPR1(r1)
853 std r11,_NIP(r1)
854 std r12,_MSR(r1)
855 mfspr r11,SPRN_DAR
856 mfspr r12,SPRN_DSISR
857 std r11,_DAR(r1)
858 std r12,_DSISR(r1)
859 mflr r10
860 mfctr r11
861 mfxer r12
862 std r10,_LINK(r1)
863 std r11,_CTR(r1)
864 std r12,_XER(r1)
865 SAVE_GPR(0,r1)
866 SAVE_GPR(2,r1)
1977b502
PM
867 ld r10,EX_R3(r3)
868 std r10,GPR3(r1)
869 SAVE_GPR(4,r1)
870 SAVE_4GPRS(5,r1)
871 ld r9,EX_R9(r3)
872 ld r10,EX_R10(r3)
873 SAVE_2GPRS(9,r1)
874 ld r9,EX_R11(r3)
875 ld r10,EX_R12(r3)
876 ld r11,EX_R13(r3)
877 std r9,GPR11(r1)
878 std r10,GPR12(r1)
879 std r11,GPR13(r1)
48404f2e
PM
880BEGIN_FTR_SECTION
881 ld r10,EX_CFAR(r3)
882 std r10,ORIG_GPR3(r1)
883END_FTR_SECTION_IFSET(CPU_FTR_CFAR)
1977b502 884 SAVE_8GPRS(14,r1)
0ebc4cda
BH
885 SAVE_10GPRS(22,r1)
886 lhz r12,PACA_TRAP_SAVE(r13)
887 std r12,_TRAP(r1)
888 addi r11,r1,INT_FRAME_SIZE
889 std r11,0(r1)
890 li r12,0
891 std r12,0(r11)
892 ld r2,PACATOC(r13)
1977b502
PM
893 ld r11,exception_marker@toc(r2)
894 std r12,RESULT(r1)
895 std r11,STACK_FRAME_OVERHEAD-16(r1)
0ebc4cda
BH
8961: addi r3,r1,STACK_FRAME_OVERHEAD
897 bl .kernel_bad_stack
898 b 1b
899
900/*
901 * Here r13 points to the paca, r9 contains the saved CR,
902 * SRR0 and SRR1 are saved in r11 and r12,
903 * r9 - r13 are saved in paca->exgen.
904 */
905 .align 7
906 .globl data_access_common
907data_access_common:
908 mfspr r10,SPRN_DAR
909 std r10,PACA_EXGEN+EX_DAR(r13)
910 mfspr r10,SPRN_DSISR
911 stw r10,PACA_EXGEN+EX_DSISR(r13)
912 EXCEPTION_PROLOG_COMMON(0x300, PACA_EXGEN)
a546498f
BH
913 DISABLE_INTS
914 ld r12,_MSR(r1)
0ebc4cda
BH
915 ld r3,PACA_EXGEN+EX_DAR(r13)
916 lwz r4,PACA_EXGEN+EX_DSISR(r13)
917 li r5,0x300
278a6cdc 918 b .do_hash_page /* Try to handle as hpte fault */
0ebc4cda 919
b3e6b5df 920 .align 7
278a6cdc 921 .globl h_data_storage_common
b3e6b5df 922h_data_storage_common:
278a6cdc
MN
923 mfspr r10,SPRN_HDAR
924 std r10,PACA_EXGEN+EX_DAR(r13)
925 mfspr r10,SPRN_HDSISR
926 stw r10,PACA_EXGEN+EX_DSISR(r13)
927 EXCEPTION_PROLOG_COMMON(0xe00, PACA_EXGEN)
928 bl .save_nvgprs
a546498f 929 DISABLE_INTS
278a6cdc
MN
930 addi r3,r1,STACK_FRAME_OVERHEAD
931 bl .unknown_exception
932 b .ret_from_except
b3e6b5df 933
0ebc4cda
BH
934 .align 7
935 .globl instruction_access_common
936instruction_access_common:
937 EXCEPTION_PROLOG_COMMON(0x400, PACA_EXGEN)
a546498f
BH
938 DISABLE_INTS
939 ld r12,_MSR(r1)
0ebc4cda
BH
940 ld r3,_NIP(r1)
941 andis. r4,r12,0x5820
942 li r5,0x400
943 b .do_hash_page /* Try to handle as hpte fault */
944
278a6cdc 945 STD_EXCEPTION_COMMON(0xe20, h_instr_storage, .unknown_exception)
b3e6b5df 946
0ebc4cda
BH
947/*
948 * Here is the common SLB miss user that is used when going to virtual
949 * mode for SLB misses, that is currently not used
950 */
951#ifdef __DISABLED__
952 .align 7
953 .globl slb_miss_user_common
954slb_miss_user_common:
955 mflr r10
956 std r3,PACA_EXGEN+EX_DAR(r13)
957 stw r9,PACA_EXGEN+EX_CCR(r13)
958 std r10,PACA_EXGEN+EX_LR(r13)
959 std r11,PACA_EXGEN+EX_SRR0(r13)
960 bl .slb_allocate_user
961
962 ld r10,PACA_EXGEN+EX_LR(r13)
963 ld r3,PACA_EXGEN+EX_R3(r13)
964 lwz r9,PACA_EXGEN+EX_CCR(r13)
965 ld r11,PACA_EXGEN+EX_SRR0(r13)
966 mtlr r10
967 beq- slb_miss_fault
968
969 andi. r10,r12,MSR_RI /* check for unrecoverable exception */
970 beq- unrecov_user_slb
971 mfmsr r10
972
973.machine push
974.machine "power4"
975 mtcrf 0x80,r9
976.machine pop
977
978 clrrdi r10,r10,2 /* clear RI before setting SRR0/1 */
979 mtmsrd r10,1
980
981 mtspr SRR0,r11
982 mtspr SRR1,r12
983
984 ld r9,PACA_EXGEN+EX_R9(r13)
985 ld r10,PACA_EXGEN+EX_R10(r13)
986 ld r11,PACA_EXGEN+EX_R11(r13)
987 ld r12,PACA_EXGEN+EX_R12(r13)
988 ld r13,PACA_EXGEN+EX_R13(r13)
989 rfid
990 b .
991
992slb_miss_fault:
993 EXCEPTION_PROLOG_COMMON(0x380, PACA_EXGEN)
994 ld r4,PACA_EXGEN+EX_DAR(r13)
995 li r5,0
996 std r4,_DAR(r1)
997 std r5,_DSISR(r1)
998 b handle_page_fault
999
1000unrecov_user_slb:
1001 EXCEPTION_PROLOG_COMMON(0x4200, PACA_EXGEN)
1002 DISABLE_INTS
1003 bl .save_nvgprs
10041: addi r3,r1,STACK_FRAME_OVERHEAD
1005 bl .unrecoverable_exception
1006 b 1b
1007
1008#endif /* __DISABLED__ */
1009
1010
1011/*
1012 * r13 points to the PACA, r9 contains the saved CR,
1013 * r12 contain the saved SRR1, SRR0 is still ready for return
1014 * r3 has the faulting address
1015 * r9 - r13 are saved in paca->exslb.
1016 * r3 is saved in paca->slb_r3
1017 * We assume we aren't going to take any exceptions during this procedure.
1018 */
1019_GLOBAL(slb_miss_realmode)
1020 mflr r10
1021#ifdef CONFIG_RELOCATABLE
1022 mtctr r11
1023#endif
1024
1025 stw r9,PACA_EXSLB+EX_CCR(r13) /* save CR in exc. frame */
1026 std r10,PACA_EXSLB+EX_LR(r13) /* save LR */
1027
1028 bl .slb_allocate_realmode
1029
1030 /* All done -- return from exception. */
1031
1032 ld r10,PACA_EXSLB+EX_LR(r13)
1033 ld r3,PACA_EXSLB+EX_R3(r13)
1034 lwz r9,PACA_EXSLB+EX_CCR(r13) /* get saved CR */
0ebc4cda
BH
1035
1036 mtlr r10
1037
1038 andi. r10,r12,MSR_RI /* check for unrecoverable exception */
1039 beq- 2f
1040
1041.machine push
1042.machine "power4"
1043 mtcrf 0x80,r9
1044 mtcrf 0x01,r9 /* slb_allocate uses cr0 and cr7 */
1045.machine pop
1046
0ebc4cda
BH
1047 ld r9,PACA_EXSLB+EX_R9(r13)
1048 ld r10,PACA_EXSLB+EX_R10(r13)
1049 ld r11,PACA_EXSLB+EX_R11(r13)
1050 ld r12,PACA_EXSLB+EX_R12(r13)
1051 ld r13,PACA_EXSLB+EX_R13(r13)
1052 rfid
1053 b . /* prevent speculative execution */
1054
4f8cf36f 10552: mfspr r11,SPRN_SRR0
0ebc4cda
BH
1056 ld r10,PACAKBASE(r13)
1057 LOAD_HANDLER(r10,unrecov_slb)
1058 mtspr SPRN_SRR0,r10
1059 ld r10,PACAKMSR(r13)
1060 mtspr SPRN_SRR1,r10
1061 rfid
1062 b .
1063
1064unrecov_slb:
1065 EXCEPTION_PROLOG_COMMON(0x4100, PACA_EXSLB)
1066 DISABLE_INTS
1067 bl .save_nvgprs
10681: addi r3,r1,STACK_FRAME_OVERHEAD
1069 bl .unrecoverable_exception
1070 b 1b
1071
0ebc4cda
BH
1072
1073#ifdef CONFIG_PPC_970_NAP
1074power4_fixup_nap:
1075 andc r9,r9,r10
1076 std r9,TI_LOCAL_FLAGS(r11)
1077 ld r10,_LINK(r1) /* make idle task do the */
1078 std r10,_NIP(r1) /* equivalent of a blr */
1079 blr
1080#endif
1081
1082 .align 7
1083 .globl alignment_common
1084alignment_common:
1085 mfspr r10,SPRN_DAR
1086 std r10,PACA_EXGEN+EX_DAR(r13)
1087 mfspr r10,SPRN_DSISR
1088 stw r10,PACA_EXGEN+EX_DSISR(r13)
1089 EXCEPTION_PROLOG_COMMON(0x600, PACA_EXGEN)
1090 ld r3,PACA_EXGEN+EX_DAR(r13)
1091 lwz r4,PACA_EXGEN+EX_DSISR(r13)
1092 std r3,_DAR(r1)
1093 std r4,_DSISR(r1)
1094 bl .save_nvgprs
a3512b2d 1095 DISABLE_INTS
0ebc4cda 1096 addi r3,r1,STACK_FRAME_OVERHEAD
0ebc4cda
BH
1097 bl .alignment_exception
1098 b .ret_from_except
1099
1100 .align 7
1101 .globl program_check_common
1102program_check_common:
1103 EXCEPTION_PROLOG_COMMON(0x700, PACA_EXGEN)
1104 bl .save_nvgprs
54321242 1105 DISABLE_INTS
922b9f86 1106 addi r3,r1,STACK_FRAME_OVERHEAD
0ebc4cda
BH
1107 bl .program_check_exception
1108 b .ret_from_except
1109
1110 .align 7
1111 .globl fp_unavailable_common
1112fp_unavailable_common:
1113 EXCEPTION_PROLOG_COMMON(0x800, PACA_EXGEN)
1114 bne 1f /* if from user, just load it up */
1115 bl .save_nvgprs
9f2f79e3 1116 DISABLE_INTS
0ebc4cda 1117 addi r3,r1,STACK_FRAME_OVERHEAD
0ebc4cda
BH
1118 bl .kernel_fp_unavailable_exception
1119 BUG_OPCODE
11201: bl .load_up_fpu
1121 b fast_exception_return
1122
1123 .align 7
1124 .globl altivec_unavailable_common
1125altivec_unavailable_common:
1126 EXCEPTION_PROLOG_COMMON(0xf20, PACA_EXGEN)
1127#ifdef CONFIG_ALTIVEC
1128BEGIN_FTR_SECTION
1129 beq 1f
1130 bl .load_up_altivec
1131 b fast_exception_return
11321:
1133END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
1134#endif
1135 bl .save_nvgprs
9f2f79e3 1136 DISABLE_INTS
0ebc4cda 1137 addi r3,r1,STACK_FRAME_OVERHEAD
0ebc4cda
BH
1138 bl .altivec_unavailable_exception
1139 b .ret_from_except
1140
1141 .align 7
1142 .globl vsx_unavailable_common
1143vsx_unavailable_common:
1144 EXCEPTION_PROLOG_COMMON(0xf40, PACA_EXGEN)
1145#ifdef CONFIG_VSX
1146BEGIN_FTR_SECTION
7230c564
BH
1147 beq 1f
1148 b .load_up_vsx
0ebc4cda
BH
11491:
1150END_FTR_SECTION_IFSET(CPU_FTR_VSX)
1151#endif
1152 bl .save_nvgprs
9f2f79e3 1153 DISABLE_INTS
0ebc4cda 1154 addi r3,r1,STACK_FRAME_OVERHEAD
0ebc4cda
BH
1155 bl .vsx_unavailable_exception
1156 b .ret_from_except
1157
1158 .align 7
1159 .globl __end_handlers
1160__end_handlers:
1161
0ebc4cda
BH
1162/*
1163 * Hash table stuff
1164 */
1165 .align 7
1166_STATIC(do_hash_page)
1167 std r3,_DAR(r1)
1168 std r4,_DSISR(r1)
1169
9c7cc234 1170 andis. r0,r4,0xa410 /* weird error? */
0ebc4cda 1171 bne- handle_page_fault /* if not, try to insert a HPTE */
9c7cc234
P
1172 andis. r0,r4,DSISR_DABRMATCH@h
1173 bne- handle_dabr_fault
1174
0ebc4cda
BH
1175BEGIN_FTR_SECTION
1176 andis. r0,r4,0x0020 /* Is it a segment table fault? */
1177 bne- do_ste_alloc /* If so handle it */
44ae3ab3 1178END_MMU_FTR_SECTION_IFCLR(MMU_FTR_SLB)
0ebc4cda 1179
9778b696 1180 CURRENT_THREAD_INFO(r11, r1)
9c1e1052
PM
1181 lwz r0,TI_PREEMPT(r11) /* If we're in an "NMI" */
1182 andis. r0,r0,NMI_MASK@h /* (i.e. an irq when soft-disabled) */
1183 bne 77f /* then don't call hash_page now */
0ebc4cda
BH
1184 /*
1185 * We need to set the _PAGE_USER bit if MSR_PR is set or if we are
1186 * accessing a userspace segment (even from the kernel). We assume
1187 * kernel addresses always have the high bit set.
1188 */
1189 rlwinm r4,r4,32-25+9,31-9,31-9 /* DSISR_STORE -> _PAGE_RW */
1190 rotldi r0,r3,15 /* Move high bit into MSR_PR posn */
1191 orc r0,r12,r0 /* MSR_PR | ~high_bit */
1192 rlwimi r4,r0,32-13,30,30 /* becomes _PAGE_USER access bit */
1193 ori r4,r4,1 /* add _PAGE_PRESENT */
1194 rlwimi r4,r5,22+2,31-2,31-2 /* Set _PAGE_EXEC if trap is 0x400 */
1195
1196 /*
1197 * r3 contains the faulting address
1198 * r4 contains the required access permissions
1199 * r5 contains the trap number
1200 *
7230c564 1201 * at return r3 = 0 for success, 1 for page fault, negative for error
0ebc4cda
BH
1202 */
1203 bl .hash_page /* build HPTE if possible */
1204 cmpdi r3,0 /* see if hash_page succeeded */
1205
7230c564 1206 /* Success */
0ebc4cda 1207 beq fast_exc_return_irq /* Return from exception on success */
0ebc4cda 1208
7230c564
BH
1209 /* Error */
1210 blt- 13f
9c7cc234 1211
0ebc4cda
BH
1212/* Here we have a page fault that hash_page can't handle. */
1213handle_page_fault:
0ebc4cda
BH
121411: ld r4,_DAR(r1)
1215 ld r5,_DSISR(r1)
1216 addi r3,r1,STACK_FRAME_OVERHEAD
1217 bl .do_page_fault
1218 cmpdi r3,0
a546498f 1219 beq+ 12f
0ebc4cda
BH
1220 bl .save_nvgprs
1221 mr r5,r3
1222 addi r3,r1,STACK_FRAME_OVERHEAD
1223 lwz r4,_DAR(r1)
1224 bl .bad_page_fault
1225 b .ret_from_except
1226
a546498f
BH
1227/* We have a data breakpoint exception - handle it */
1228handle_dabr_fault:
1229 bl .save_nvgprs
1230 ld r4,_DAR(r1)
1231 ld r5,_DSISR(r1)
1232 addi r3,r1,STACK_FRAME_OVERHEAD
1233 bl .do_dabr
123412: b .ret_from_except_lite
1235
0ebc4cda
BH
1236
1237/* We have a page fault that hash_page could handle but HV refused
1238 * the PTE insertion
1239 */
a546498f 124013: bl .save_nvgprs
0ebc4cda
BH
1241 mr r5,r3
1242 addi r3,r1,STACK_FRAME_OVERHEAD
1243 ld r4,_DAR(r1)
1244 bl .low_hash_fault
1245 b .ret_from_except
1246
9c1e1052
PM
1247/*
1248 * We come here as a result of a DSI at a point where we don't want
1249 * to call hash_page, such as when we are accessing memory (possibly
1250 * user memory) inside a PMU interrupt that occurred while interrupts
1251 * were soft-disabled. We want to invoke the exception handler for
1252 * the access, or panic if there isn't a handler.
1253 */
125477: bl .save_nvgprs
1255 mr r4,r3
1256 addi r3,r1,STACK_FRAME_OVERHEAD
1257 li r5,SIGSEGV
1258 bl .bad_page_fault
1259 b .ret_from_except
1260
0ebc4cda
BH
1261 /* here we have a segment miss */
1262do_ste_alloc:
1263 bl .ste_allocate /* try to insert stab entry */
1264 cmpdi r3,0
1265 bne- handle_page_fault
1266 b fast_exception_return
1267
1268/*
1269 * r13 points to the PACA, r9 contains the saved CR,
1270 * r11 and r12 contain the saved SRR0 and SRR1.
1271 * r9 - r13 are saved in paca->exslb.
1272 * We assume we aren't going to take any exceptions during this procedure.
1273 * We assume (DAR >> 60) == 0xc.
1274 */
1275 .align 7
1276_GLOBAL(do_stab_bolted)
1277 stw r9,PACA_EXSLB+EX_CCR(r13) /* save CR in exc. frame */
1278 std r11,PACA_EXSLB+EX_SRR0(r13) /* save SRR0 in exc. frame */
1279
1280 /* Hash to the primary group */
1281 ld r10,PACASTABVIRT(r13)
1282 mfspr r11,SPRN_DAR
1283 srdi r11,r11,28
1284 rldimi r10,r11,7,52 /* r10 = first ste of the group */
1285
1286 /* Calculate VSID */
048ee099
AK
1287 /* This is a kernel address, so protovsid = ESID | 1 << 37 */
1288 li r9,0x1
1289 rldimi r11,r9,(CONTEXT_BITS + USER_ESID_BITS),0
0ebc4cda
BH
1290 ASM_VSID_SCRAMBLE(r11, r9, 256M)
1291 rldic r9,r11,12,16 /* r9 = vsid << 12 */
1292
1293 /* Search the primary group for a free entry */
12941: ld r11,0(r10) /* Test valid bit of the current ste */
1295 andi. r11,r11,0x80
1296 beq 2f
1297 addi r10,r10,16
1298 andi. r11,r10,0x70
1299 bne 1b
1300
1301 /* Stick for only searching the primary group for now. */
1302 /* At least for now, we use a very simple random castout scheme */
1303 /* Use the TB as a random number ; OR in 1 to avoid entry 0 */
1304 mftb r11
1305 rldic r11,r11,4,57 /* r11 = (r11 << 4) & 0x70 */
1306 ori r11,r11,0x10
1307
1308 /* r10 currently points to an ste one past the group of interest */
1309 /* make it point to the randomly selected entry */
1310 subi r10,r10,128
1311 or r10,r10,r11 /* r10 is the entry to invalidate */
1312
1313 isync /* mark the entry invalid */
1314 ld r11,0(r10)
1315 rldicl r11,r11,56,1 /* clear the valid bit */
1316 rotldi r11,r11,8
1317 std r11,0(r10)
1318 sync
1319
1320 clrrdi r11,r11,28 /* Get the esid part of the ste */
1321 slbie r11
1322
13232: std r9,8(r10) /* Store the vsid part of the ste */
1324 eieio
1325
1326 mfspr r11,SPRN_DAR /* Get the new esid */
1327 clrrdi r11,r11,28 /* Permits a full 32b of ESID */
1328 ori r11,r11,0x90 /* Turn on valid and kp */
1329 std r11,0(r10) /* Put new entry back into the stab */
1330
1331 sync
1332
1333 /* All done -- return from exception. */
1334 lwz r9,PACA_EXSLB+EX_CCR(r13) /* get saved CR */
1335 ld r11,PACA_EXSLB+EX_SRR0(r13) /* get saved SRR0 */
1336
1337 andi. r10,r12,MSR_RI
1338 beq- unrecov_slb
1339
1340 mtcrf 0x80,r9 /* restore CR */
1341
1342 mfmsr r10
1343 clrrdi r10,r10,2
1344 mtmsrd r10,1
1345
1346 mtspr SPRN_SRR0,r11
1347 mtspr SPRN_SRR1,r12
1348 ld r9,PACA_EXSLB+EX_R9(r13)
1349 ld r10,PACA_EXSLB+EX_R10(r13)
1350 ld r11,PACA_EXSLB+EX_R11(r13)
1351 ld r12,PACA_EXSLB+EX_R12(r13)
1352 ld r13,PACA_EXSLB+EX_R13(r13)
1353 rfid
1354 b . /* prevent speculative execution */
1355
c1fb6816
MN
1356
1357 /* Equivalents to the above handlers for relocation-on interrupt vectors */
1358 STD_RELON_EXCEPTION_HV(., 0xe00, h_data_storage)
1359 KVM_HANDLER(PACA_EXGEN, EXC_HV, 0xe00)
1360 STD_RELON_EXCEPTION_HV(., 0xe20, h_instr_storage)
1361 KVM_HANDLER(PACA_EXGEN, EXC_HV, 0xe20)
1362 STD_RELON_EXCEPTION_HV(., 0xe40, emulation_assist)
1363 KVM_HANDLER(PACA_EXGEN, EXC_HV, 0xe40)
1364 STD_RELON_EXCEPTION_HV(., 0xe60, hmi_exception)
1365 KVM_HANDLER(PACA_EXGEN, EXC_HV, 0xe60)
655bb3f4
IM
1366 MASKABLE_RELON_EXCEPTION_HV(., 0xe80, h_doorbell)
1367 KVM_HANDLER(PACA_EXGEN, EXC_HV, 0xe80)
c1fb6816
MN
1368
1369 STD_RELON_EXCEPTION_PSERIES(., 0xf00, performance_monitor)
1370 STD_RELON_EXCEPTION_PSERIES(., 0xf20, altivec_unavailable)
1371 STD_RELON_EXCEPTION_PSERIES(., 0xf40, vsx_unavailable)
1372
ed79ba9e 1373#if defined(CONFIG_PPC_PSERIES) || defined(CONFIG_PPC_POWERNV)
0ebc4cda
BH
1374/*
1375 * Data area reserved for FWNMI option.
1376 * This address (0x7000) is fixed by the RPA.
1377 */
1378 .= 0x7000
1379 .globl fwnmi_data_area
1380fwnmi_data_area:
0ebc4cda 1381
ed79ba9e
BH
1382 /* pseries and powernv need to keep the whole page from
1383 * 0x7000 to 0x8000 free for use by the firmware
1384 */
278a6cdc 1385 . = 0x8000
ed79ba9e 1386#endif /* defined(CONFIG_PPC_PSERIES) || defined(CONFIG_PPC_POWERNV) */
84493804 1387
4f8cf36f
BH
1388/* Space for CPU0's segment table */
1389 .balign 4096
84493804
BH
1390 .globl initial_stab
1391initial_stab:
1392 .space 4096
4f8cf36f 1393
ed79ba9e
BH
1394#ifdef CONFIG_PPC_POWERNV
1395_GLOBAL(opal_mc_secondary_handler)
1396 HMT_MEDIUM
1397 SET_SCRATCH0(r13)
1398 GET_PACA(r13)
1399 clrldi r3,r3,2
1400 tovirt(r3,r3)
1401 std r3,PACA_OPAL_MC_EVT(r13)
1402 ld r13,OPAL_MC_SRR0(r3)
1403 mtspr SPRN_SRR0,r13
1404 ld r13,OPAL_MC_SRR1(r3)
1405 mtspr SPRN_SRR1,r13
1406 ld r3,OPAL_MC_GPR3(r3)
1407 GET_SCRATCH0(r13)
1408 b machine_check_pSeries
1409#endif /* CONFIG_PPC_POWERNV */