]> git.proxmox.com Git - mirror_ubuntu-zesty-kernel.git/blame - arch/powerpc/kernel/exceptions-64s.S
powerpc: Add relocation on exception vector handlers
[mirror_ubuntu-zesty-kernel.git] / arch / powerpc / kernel / exceptions-64s.S
CommitLineData
0ebc4cda
BH
1/*
2 * This file contains the 64-bit "server" PowerPC variant
3 * of the low level exception handling including exception
4 * vectors, exception return, part of the slb and stab
5 * handling and other fixed offset specific things.
6 *
7 * This file is meant to be #included from head_64.S due to
25985edc 8 * position dependent assembly.
0ebc4cda
BH
9 *
10 * Most of this originates from head_64.S and thus has the same
11 * copyright history.
12 *
13 */
14
7230c564 15#include <asm/hw_irq.h>
8aa34ab8 16#include <asm/exception-64s.h>
46f52210 17#include <asm/ptrace.h>
8aa34ab8 18
0ebc4cda
BH
19/*
20 * We layout physical memory as follows:
21 * 0x0000 - 0x00ff : Secondary processor spin code
c1fb6816
MN
22 * 0x0100 - 0x17ff : pSeries Interrupt prologs
23 * 0x1800 - 0x4000 : interrupt support common interrupt prologs
24 * 0x4000 - 0x5fff : pSeries interrupts with IR=1,DR=1
25 * 0x6000 - 0x6fff : more interrupt support including for IR=1,DR=1
0ebc4cda 26 * 0x7000 - 0x7fff : FWNMI data area
c1fb6816
MN
27 * 0x8000 - 0x8fff : Initial (CPU0) segment table
28 * 0x9000 - : Early init and support code
0ebc4cda 29 */
742415d6
MN
30 /* Syscall routine is used twice, in reloc-off and reloc-on paths */
31#define SYSCALL_PSERIES_1 \
32BEGIN_FTR_SECTION \
33 cmpdi r0,0x1ebe ; \
34 beq- 1f ; \
35END_FTR_SECTION_IFSET(CPU_FTR_REAL_LE) \
36 mr r9,r13 ; \
37 GET_PACA(r13) ; \
38 mfspr r11,SPRN_SRR0 ; \
390:
40
41#define SYSCALL_PSERIES_2_RFID \
42 mfspr r12,SPRN_SRR1 ; \
43 ld r10,PACAKBASE(r13) ; \
44 LOAD_HANDLER(r10, system_call_entry) ; \
45 mtspr SPRN_SRR0,r10 ; \
46 ld r10,PACAKMSR(r13) ; \
47 mtspr SPRN_SRR1,r10 ; \
48 rfid ; \
49 b . ; /* prevent speculative execution */
50
51#define SYSCALL_PSERIES_3 \
52 /* Fast LE/BE switch system call */ \
531: mfspr r12,SPRN_SRR1 ; \
54 xori r12,r12,MSR_LE ; \
55 mtspr SPRN_SRR1,r12 ; \
56 rfid ; /* return to userspace */ \
57 b . ; \
582: mfspr r12,SPRN_SRR1 ; \
59 andi. r12,r12,MSR_PR ; \
60 bne 0b ; \
61 mtspr SPRN_SRR0,r3 ; \
62 mtspr SPRN_SRR1,r4 ; \
63 mtspr SPRN_SDR1,r5 ; \
64 rfid ; \
65 b . ; /* prevent speculative execution */
66
4700dfaf
MN
67#if defined(CONFIG_RELOCATABLE)
68 /*
69 * We can't branch directly; in the direct case we use LR
70 * and system_call_entry restores LR. (We thus need to move
71 * LR to r10 in the RFID case too.)
72 */
73#define SYSCALL_PSERIES_2_DIRECT \
74 mflr r10 ; \
75 ld r12,PACAKBASE(r13) ; \
76 LOAD_HANDLER(r12, system_call_entry_direct) ; \
77 mtlr r12 ; \
78 mfspr r12,SPRN_SRR1 ; \
79 /* Re-use of r13... No spare regs to do this */ \
80 li r13,MSR_RI ; \
81 mtmsrd r13,1 ; \
82 GET_PACA(r13) ; /* get r13 back */ \
83 blr ;
84#else
85 /* We can branch directly */
86#define SYSCALL_PSERIES_2_DIRECT \
87 mfspr r12,SPRN_SRR1 ; \
88 li r10,MSR_RI ; \
89 mtmsrd r10,1 ; /* Set RI (EE=0) */ \
90 b system_call_entry_direct ;
91#endif
0ebc4cda 92
0ebc4cda
BH
93/*
94 * This is the start of the interrupt handlers for pSeries
95 * This code runs with relocation off.
96 * Code from here to __end_interrupts gets copied down to real
97 * address 0x100 when we are running a relocatable kernel.
98 * Therefore any relative branches in this section must only
99 * branch to labels in this section.
100 */
101 . = 0x100
102 .globl __start_interrupts
103__start_interrupts:
104
948cf67c
BH
105 .globl system_reset_pSeries;
106system_reset_pSeries:
107 HMT_MEDIUM;
948cf67c
BH
108 SET_SCRATCH0(r13)
109#ifdef CONFIG_PPC_P7_NAP
110BEGIN_FTR_SECTION
111 /* Running native on arch 2.06 or later, check if we are
112 * waking up from nap. We only handle no state loss and
113 * supervisor state loss. We do -not- handle hypervisor
114 * state loss at this time.
115 */
116 mfspr r13,SPRN_SRR1
371fefd6
PM
117 rlwinm. r13,r13,47-31,30,31
118 beq 9f
119
120 /* waking up from powersave (nap) state */
121 cmpwi cr1,r13,2
948cf67c
BH
122 /* Total loss of HV state is fatal, we could try to use the
123 * PIR to locate a PACA, then use an emergency stack etc...
124 * but for now, let's just stay stuck here
125 */
371fefd6
PM
126 bgt cr1,.
127 GET_PACA(r13)
128
129#ifdef CONFIG_KVM_BOOK3S_64_HV
f0888f70
PM
130 li r0,KVM_HWTHREAD_IN_KERNEL
131 stb r0,HSTATE_HWTHREAD_STATE(r13)
132 /* Order setting hwthread_state vs. testing hwthread_req */
133 sync
134 lbz r0,HSTATE_HWTHREAD_REQ(r13)
135 cmpwi r0,0
136 beq 1f
371fefd6
PM
137 b kvm_start_guest
1381:
139#endif
140
141 beq cr1,2f
142 b .power7_wakeup_noloss
1432: b .power7_wakeup_loss
1449:
969391c5 145END_FTR_SECTION_IFSET(CPU_FTR_HVMODE | CPU_FTR_ARCH_206)
948cf67c 146#endif /* CONFIG_PPC_P7_NAP */
b01c8b54
PM
147 EXCEPTION_PROLOG_PSERIES(PACA_EXGEN, system_reset_common, EXC_STD,
148 NOTEST, 0x100)
0ebc4cda
BH
149
150 . = 0x200
b01c8b54
PM
151machine_check_pSeries_1:
152 /* This is moved out of line as it can be patched by FW, but
153 * some code path might still want to branch into the original
154 * vector
155 */
156 b machine_check_pSeries
0ebc4cda
BH
157
158 . = 0x300
159 .globl data_access_pSeries
160data_access_pSeries:
161 HMT_MEDIUM
673b189a 162 SET_SCRATCH0(r13)
0ebc4cda 163BEGIN_FTR_SECTION
b01c8b54
PM
164 b data_access_check_stab
165data_access_not_stab:
166END_MMU_FTR_SECTION_IFCLR(MMU_FTR_SLB)
b01c8b54 167 EXCEPTION_PROLOG_PSERIES(PACA_EXGEN, data_access_common, EXC_STD,
697d3899 168 KVMTEST, 0x300)
0ebc4cda
BH
169
170 . = 0x380
171 .globl data_access_slb_pSeries
172data_access_slb_pSeries:
173 HMT_MEDIUM
673b189a 174 SET_SCRATCH0(r13)
697d3899 175 EXCEPTION_PROLOG_1(PACA_EXSLB, KVMTEST, 0x380)
0ebc4cda
BH
176 std r3,PACA_EXSLB+EX_R3(r13)
177 mfspr r3,SPRN_DAR
0ebc4cda
BH
178#ifdef __DISABLED__
179 /* Keep that around for when we re-implement dynamic VSIDs */
180 cmpdi r3,0
181 bge slb_miss_user_pseries
182#endif /* __DISABLED__ */
b01c8b54 183 mfspr r12,SPRN_SRR1
0ebc4cda
BH
184#ifndef CONFIG_RELOCATABLE
185 b .slb_miss_realmode
186#else
187 /*
188 * We can't just use a direct branch to .slb_miss_realmode
189 * because the distance from here to there depends on where
190 * the kernel ends up being put.
191 */
192 mfctr r11
193 ld r10,PACAKBASE(r13)
194 LOAD_HANDLER(r10, .slb_miss_realmode)
195 mtctr r10
196 bctr
197#endif
198
b3e6b5df 199 STD_EXCEPTION_PSERIES(0x400, 0x400, instruction_access)
0ebc4cda
BH
200
201 . = 0x480
202 .globl instruction_access_slb_pSeries
203instruction_access_slb_pSeries:
204 HMT_MEDIUM
673b189a 205 SET_SCRATCH0(r13)
de56a948 206 EXCEPTION_PROLOG_1(PACA_EXSLB, KVMTEST_PR, 0x480)
0ebc4cda
BH
207 std r3,PACA_EXSLB+EX_R3(r13)
208 mfspr r3,SPRN_SRR0 /* SRR0 is faulting address */
0ebc4cda
BH
209#ifdef __DISABLED__
210 /* Keep that around for when we re-implement dynamic VSIDs */
211 cmpdi r3,0
212 bge slb_miss_user_pseries
213#endif /* __DISABLED__ */
b01c8b54 214 mfspr r12,SPRN_SRR1
0ebc4cda
BH
215#ifndef CONFIG_RELOCATABLE
216 b .slb_miss_realmode
217#else
218 mfctr r11
219 ld r10,PACAKBASE(r13)
220 LOAD_HANDLER(r10, .slb_miss_realmode)
221 mtctr r10
222 bctr
223#endif
224
b3e6b5df
BH
225 /* We open code these as we can't have a ". = x" (even with
226 * x = "." within a feature section
227 */
a5d4f3ad 228 . = 0x500;
b3e6b5df
BH
229 .globl hardware_interrupt_pSeries;
230 .globl hardware_interrupt_hv;
a5d4f3ad 231hardware_interrupt_pSeries:
b3e6b5df 232hardware_interrupt_hv:
a5d4f3ad 233 BEGIN_FTR_SECTION
b01c8b54
PM
234 _MASKABLE_EXCEPTION_PSERIES(0x502, hardware_interrupt,
235 EXC_HV, SOFTEN_TEST_HV)
236 KVM_HANDLER(PACA_EXGEN, EXC_HV, 0x502)
de56a948
PM
237 FTR_SECTION_ELSE
238 _MASKABLE_EXCEPTION_PSERIES(0x500, hardware_interrupt,
9e368f29 239 EXC_STD, SOFTEN_TEST_HV_201)
de56a948 240 KVM_HANDLER(PACA_EXGEN, EXC_STD, 0x500)
969391c5 241 ALT_FTR_SECTION_END_IFSET(CPU_FTR_HVMODE | CPU_FTR_ARCH_206)
a5d4f3ad 242
b3e6b5df 243 STD_EXCEPTION_PSERIES(0x600, 0x600, alignment)
de56a948 244 KVM_HANDLER_PR(PACA_EXGEN, EXC_STD, 0x600)
b01c8b54 245
b3e6b5df 246 STD_EXCEPTION_PSERIES(0x700, 0x700, program_check)
de56a948 247 KVM_HANDLER_PR(PACA_EXGEN, EXC_STD, 0x700)
b01c8b54 248
b3e6b5df 249 STD_EXCEPTION_PSERIES(0x800, 0x800, fp_unavailable)
de56a948 250 KVM_HANDLER_PR(PACA_EXGEN, EXC_STD, 0x800)
a5d4f3ad 251
b3e6b5df 252 MASKABLE_EXCEPTION_PSERIES(0x900, 0x900, decrementer)
dabe859e 253 STD_EXCEPTION_HV(0x980, 0x982, hdecrementer)
a5d4f3ad 254
b3e6b5df 255 STD_EXCEPTION_PSERIES(0xa00, 0xa00, trap_0a)
de56a948 256 KVM_HANDLER_PR(PACA_EXGEN, EXC_STD, 0xa00)
b01c8b54 257
b3e6b5df 258 STD_EXCEPTION_PSERIES(0xb00, 0xb00, trap_0b)
de56a948 259 KVM_HANDLER_PR(PACA_EXGEN, EXC_STD, 0xb00)
0ebc4cda
BH
260
261 . = 0xc00
262 .globl system_call_pSeries
263system_call_pSeries:
264 HMT_MEDIUM
b01c8b54
PM
265#ifdef CONFIG_KVM_BOOK3S_64_HANDLER
266 SET_SCRATCH0(r13)
267 GET_PACA(r13)
268 std r9,PACA_EXGEN+EX_R9(r13)
269 std r10,PACA_EXGEN+EX_R10(r13)
270 mfcr r9
271 KVMTEST(0xc00)
272 GET_SCRATCH0(r13)
273#endif
742415d6
MN
274 SYSCALL_PSERIES_1
275 SYSCALL_PSERIES_2_RFID
276 SYSCALL_PSERIES_3
b01c8b54
PM
277 KVM_HANDLER(PACA_EXGEN, EXC_STD, 0xc00)
278
b3e6b5df 279 STD_EXCEPTION_PSERIES(0xd00, 0xd00, single_step)
de56a948 280 KVM_HANDLER_PR(PACA_EXGEN, EXC_STD, 0xd00)
b3e6b5df
BH
281
282 /* At 0xe??? we have a bunch of hypervisor exceptions, we branch
283 * out of line to handle them
284 */
285 . = 0xe00
e6a74c6e 286hv_exception_trampoline:
b3e6b5df
BH
287 b h_data_storage_hv
288 . = 0xe20
289 b h_instr_storage_hv
290 . = 0xe40
291 b emulation_assist_hv
292 . = 0xe50
293 b hmi_exception_hv
294 . = 0xe60
295 b hmi_exception_hv
0ebc4cda
BH
296
297 /* We need to deal with the Altivec unavailable exception
298 * here which is at 0xf20, thus in the middle of the
299 * prolog code of the PerformanceMonitor one. A little
300 * trickery is thus necessary
301 */
c86e2ead 302performance_monitor_pSeries_1:
0ebc4cda
BH
303 . = 0xf00
304 b performance_monitor_pSeries
305
c86e2ead 306altivec_unavailable_pSeries_1:
0ebc4cda
BH
307 . = 0xf20
308 b altivec_unavailable_pSeries
309
c86e2ead 310vsx_unavailable_pSeries_1:
0ebc4cda
BH
311 . = 0xf40
312 b vsx_unavailable_pSeries
313
314#ifdef CONFIG_CBE_RAS
b3e6b5df 315 STD_EXCEPTION_HV(0x1200, 0x1202, cbe_system_error)
5ccf55dd 316 KVM_HANDLER_SKIP(PACA_EXGEN, EXC_HV, 0x1202)
0ebc4cda 317#endif /* CONFIG_CBE_RAS */
b01c8b54 318
b3e6b5df 319 STD_EXCEPTION_PSERIES(0x1300, 0x1300, instruction_breakpoint)
de56a948 320 KVM_HANDLER_PR_SKIP(PACA_EXGEN, EXC_STD, 0x1300)
b01c8b54 321
b92a66a6 322 . = 0x1500
51cf2b30 323 .global denorm_exception_hv
b92a66a6
MN
324denorm_exception_hv:
325 HMT_MEDIUM
326 mtspr SPRN_SPRG_HSCRATCH0,r13
327 mfspr r13,SPRN_SPRG_HPACA
328 std r9,PACA_EXGEN+EX_R9(r13)
329 std r10,PACA_EXGEN+EX_R10(r13)
330 std r11,PACA_EXGEN+EX_R11(r13)
331 std r12,PACA_EXGEN+EX_R12(r13)
332 mfspr r9,SPRN_SPRG_HSCRATCH0
333 std r9,PACA_EXGEN+EX_R13(r13)
334 mfcr r9
335
336#ifdef CONFIG_PPC_DENORMALISATION
337 mfspr r10,SPRN_HSRR1
338 mfspr r11,SPRN_HSRR0 /* save HSRR0 */
339 andis. r10,r10,(HSRR1_DENORM)@h /* denorm? */
340 addi r11,r11,-4 /* HSRR0 is next instruction */
341 bne+ denorm_assist
342#endif
343
344 EXCEPTION_PROLOG_PSERIES_1(denorm_common, EXC_HV)
345 KVM_HANDLER_SKIP(PACA_EXGEN, EXC_STD, 0x1500)
346
0ebc4cda 347#ifdef CONFIG_CBE_RAS
b3e6b5df 348 STD_EXCEPTION_HV(0x1600, 0x1602, cbe_maintenance)
5ccf55dd 349 KVM_HANDLER_SKIP(PACA_EXGEN, EXC_HV, 0x1602)
0ebc4cda 350#endif /* CONFIG_CBE_RAS */
b01c8b54 351
b3e6b5df 352 STD_EXCEPTION_PSERIES(0x1700, 0x1700, altivec_assist)
de56a948 353 KVM_HANDLER_PR(PACA_EXGEN, EXC_STD, 0x1700)
b01c8b54 354
0ebc4cda 355#ifdef CONFIG_CBE_RAS
b3e6b5df 356 STD_EXCEPTION_HV(0x1800, 0x1802, cbe_thermal)
5ccf55dd 357 KVM_HANDLER_SKIP(PACA_EXGEN, EXC_HV, 0x1802)
faab4dd2
MN
358#else
359 . = 0x1800
0ebc4cda
BH
360#endif /* CONFIG_CBE_RAS */
361
0ebc4cda 362
b3e6b5df
BH
363/*** Out of line interrupts support ***/
364
faab4dd2 365 .align 7
b01c8b54
PM
366 /* moved from 0x200 */
367machine_check_pSeries:
368 .globl machine_check_fwnmi
369machine_check_fwnmi:
370 HMT_MEDIUM
371 SET_SCRATCH0(r13) /* save r13 */
372 EXCEPTION_PROLOG_PSERIES(PACA_EXMC, machine_check_common,
373 EXC_STD, KVMTEST, 0x200)
374 KVM_HANDLER_SKIP(PACA_EXMC, EXC_STD, 0x200)
375
b01c8b54
PM
376 /* moved from 0x300 */
377data_access_check_stab:
378 GET_PACA(r13)
379 std r9,PACA_EXSLB+EX_R9(r13)
380 std r10,PACA_EXSLB+EX_R10(r13)
381 mfspr r10,SPRN_DAR
382 mfspr r9,SPRN_DSISR
383 srdi r10,r10,60
384 rlwimi r10,r9,16,0x20
de56a948 385#ifdef CONFIG_KVM_BOOK3S_PR
3c42bf8a 386 lbz r9,HSTATE_IN_GUEST(r13)
b01c8b54
PM
387 rlwimi r10,r9,8,0x300
388#endif
389 mfcr r9
390 cmpwi r10,0x2c
391 beq do_stab_bolted_pSeries
392 mtcrf 0x80,r9
393 ld r9,PACA_EXSLB+EX_R9(r13)
394 ld r10,PACA_EXSLB+EX_R10(r13)
395 b data_access_not_stab
396do_stab_bolted_pSeries:
397 std r11,PACA_EXSLB+EX_R11(r13)
398 std r12,PACA_EXSLB+EX_R12(r13)
399 GET_SCRATCH0(r10)
400 std r10,PACA_EXSLB+EX_R13(r13)
401 EXCEPTION_PROLOG_PSERIES_1(.do_stab_bolted, EXC_STD)
b01c8b54 402
697d3899
PM
403 KVM_HANDLER_SKIP(PACA_EXGEN, EXC_STD, 0x300)
404 KVM_HANDLER_SKIP(PACA_EXSLB, EXC_STD, 0x380)
de56a948
PM
405 KVM_HANDLER_PR(PACA_EXGEN, EXC_STD, 0x400)
406 KVM_HANDLER_PR(PACA_EXSLB, EXC_STD, 0x480)
407 KVM_HANDLER_PR(PACA_EXGEN, EXC_STD, 0x900)
b01c8b54
PM
408 KVM_HANDLER(PACA_EXGEN, EXC_HV, 0x982)
409
b92a66a6
MN
410#ifdef CONFIG_PPC_DENORMALISATION
411denorm_assist:
412BEGIN_FTR_SECTION
413/*
414 * To denormalise we need to move a copy of the register to itself.
415 * For POWER6 do that here for all FP regs.
416 */
417 mfmsr r10
418 ori r10,r10,(MSR_FP|MSR_FE0|MSR_FE1)
419 xori r10,r10,(MSR_FE0|MSR_FE1)
420 mtmsrd r10
421 sync
422 fmr 0,0
423 fmr 1,1
424 fmr 2,2
425 fmr 3,3
426 fmr 4,4
427 fmr 5,5
428 fmr 6,6
429 fmr 7,7
430 fmr 8,8
431 fmr 9,9
432 fmr 10,10
433 fmr 11,11
434 fmr 12,12
435 fmr 13,13
436 fmr 14,14
437 fmr 15,15
438 fmr 16,16
439 fmr 17,17
440 fmr 18,18
441 fmr 19,19
442 fmr 20,20
443 fmr 21,21
444 fmr 22,22
445 fmr 23,23
446 fmr 24,24
447 fmr 25,25
448 fmr 26,26
449 fmr 27,27
450 fmr 28,28
451 fmr 29,29
452 fmr 30,30
453 fmr 31,31
454FTR_SECTION_ELSE
455/*
456 * To denormalise we need to move a copy of the register to itself.
457 * For POWER7 do that here for the first 32 VSX registers only.
458 */
459 mfmsr r10
460 oris r10,r10,MSR_VSX@h
461 mtmsrd r10
462 sync
463 XVCPSGNDP(0,0,0)
464 XVCPSGNDP(1,1,1)
465 XVCPSGNDP(2,2,2)
466 XVCPSGNDP(3,3,3)
467 XVCPSGNDP(4,4,4)
468 XVCPSGNDP(5,5,5)
469 XVCPSGNDP(6,6,6)
470 XVCPSGNDP(7,7,7)
471 XVCPSGNDP(8,8,8)
472 XVCPSGNDP(9,9,9)
473 XVCPSGNDP(10,10,10)
474 XVCPSGNDP(11,11,11)
475 XVCPSGNDP(12,12,12)
476 XVCPSGNDP(13,13,13)
477 XVCPSGNDP(14,14,14)
478 XVCPSGNDP(15,15,15)
479 XVCPSGNDP(16,16,16)
480 XVCPSGNDP(17,17,17)
481 XVCPSGNDP(18,18,18)
482 XVCPSGNDP(19,19,19)
483 XVCPSGNDP(20,20,20)
484 XVCPSGNDP(21,21,21)
485 XVCPSGNDP(22,22,22)
486 XVCPSGNDP(23,23,23)
487 XVCPSGNDP(24,24,24)
488 XVCPSGNDP(25,25,25)
489 XVCPSGNDP(26,26,26)
490 XVCPSGNDP(27,27,27)
491 XVCPSGNDP(28,28,28)
492 XVCPSGNDP(29,29,29)
493 XVCPSGNDP(30,30,30)
494 XVCPSGNDP(31,31,31)
495ALT_FTR_SECTION_END_IFCLR(CPU_FTR_ARCH_206)
496 mtspr SPRN_HSRR0,r11
497 mtcrf 0x80,r9
498 ld r9,PACA_EXGEN+EX_R9(r13)
499 ld r10,PACA_EXGEN+EX_R10(r13)
500 ld r11,PACA_EXGEN+EX_R11(r13)
501 ld r12,PACA_EXGEN+EX_R12(r13)
502 ld r13,PACA_EXGEN+EX_R13(r13)
503 HRFID
504 b .
505#endif
506
b01c8b54 507 .align 7
b3e6b5df 508 /* moved from 0xe00 */
b01c8b54
PM
509 STD_EXCEPTION_HV(., 0xe02, h_data_storage)
510 KVM_HANDLER_SKIP(PACA_EXGEN, EXC_HV, 0xe02)
511 STD_EXCEPTION_HV(., 0xe22, h_instr_storage)
512 KVM_HANDLER(PACA_EXGEN, EXC_HV, 0xe22)
513 STD_EXCEPTION_HV(., 0xe42, emulation_assist)
514 KVM_HANDLER(PACA_EXGEN, EXC_HV, 0xe42)
515 STD_EXCEPTION_HV(., 0xe62, hmi_exception) /* need to flush cache ? */
516 KVM_HANDLER(PACA_EXGEN, EXC_HV, 0xe62)
0ebc4cda
BH
517
518 /* moved from 0xf00 */
b3e6b5df 519 STD_EXCEPTION_PSERIES(., 0xf00, performance_monitor)
de56a948 520 KVM_HANDLER_PR(PACA_EXGEN, EXC_STD, 0xf00)
b3e6b5df 521 STD_EXCEPTION_PSERIES(., 0xf20, altivec_unavailable)
de56a948 522 KVM_HANDLER_PR(PACA_EXGEN, EXC_STD, 0xf20)
b3e6b5df 523 STD_EXCEPTION_PSERIES(., 0xf40, vsx_unavailable)
de56a948 524 KVM_HANDLER_PR(PACA_EXGEN, EXC_STD, 0xf40)
0ebc4cda
BH
525
526/*
7230c564
BH
527 * An interrupt came in while soft-disabled. We set paca->irq_happened,
528 * then, if it was a decrementer interrupt, we bump the dec to max and
529 * and return, else we hard disable and return. This is called with
530 * r10 containing the value to OR to the paca field.
0ebc4cda 531 */
7230c564
BH
532#define MASKED_INTERRUPT(_H) \
533masked_##_H##interrupt: \
534 std r11,PACA_EXGEN+EX_R11(r13); \
535 lbz r11,PACAIRQHAPPENED(r13); \
536 or r11,r11,r10; \
537 stb r11,PACAIRQHAPPENED(r13); \
538 andi. r10,r10,PACA_IRQ_DEC; \
539 beq 1f; \
540 lis r10,0x7fff; \
541 ori r10,r10,0xffff; \
542 mtspr SPRN_DEC,r10; \
543 b 2f; \
5441: mfspr r10,SPRN_##_H##SRR1; \
545 rldicl r10,r10,48,1; /* clear MSR_EE */ \
546 rotldi r10,r10,16; \
547 mtspr SPRN_##_H##SRR1,r10; \
5482: mtcrf 0x80,r9; \
549 ld r9,PACA_EXGEN+EX_R9(r13); \
550 ld r10,PACA_EXGEN+EX_R10(r13); \
551 ld r11,PACA_EXGEN+EX_R11(r13); \
552 GET_SCRATCH0(r13); \
553 ##_H##rfid; \
0ebc4cda 554 b .
7230c564
BH
555
556 MASKED_INTERRUPT()
557 MASKED_INTERRUPT(H)
0ebc4cda 558
7230c564
BH
559/*
560 * Called from arch_local_irq_enable when an interrupt needs
561 * to be resent. r3 contains 0x500 or 0x900 to indicate which
562 * kind of interrupt. MSR:EE is already off. We generate a
563 * stackframe like if a real interrupt had happened.
564 *
565 * Note: While MSR:EE is off, we need to make sure that _MSR
566 * in the generated frame has EE set to 1 or the exception
567 * handler will not properly re-enable them.
568 */
569_GLOBAL(__replay_interrupt)
570 /* We are going to jump to the exception common code which
571 * will retrieve various register values from the PACA which
572 * we don't give a damn about, so we don't bother storing them.
573 */
574 mfmsr r12
575 mflr r11
576 mfcr r9
577 ori r12,r12,MSR_EE
578 andi. r3,r3,0x0800
579 bne decrementer_common
580 b hardware_interrupt_common
a5d4f3ad 581
0ebc4cda
BH
582#ifdef CONFIG_PPC_PSERIES
583/*
584 * Vectors for the FWNMI option. Share common code.
585 */
586 .globl system_reset_fwnmi
587 .align 7
588system_reset_fwnmi:
589 HMT_MEDIUM
673b189a 590 SET_SCRATCH0(r13) /* save r13 */
b01c8b54
PM
591 EXCEPTION_PROLOG_PSERIES(PACA_EXGEN, system_reset_common, EXC_STD,
592 NOTEST, 0x100)
0ebc4cda
BH
593
594#endif /* CONFIG_PPC_PSERIES */
595
596#ifdef __DISABLED__
597/*
598 * This is used for when the SLB miss handler has to go virtual,
599 * which doesn't happen for now anymore but will once we re-implement
600 * dynamic VSIDs for shared page tables
601 */
602slb_miss_user_pseries:
603 std r10,PACA_EXGEN+EX_R10(r13)
604 std r11,PACA_EXGEN+EX_R11(r13)
605 std r12,PACA_EXGEN+EX_R12(r13)
673b189a 606 GET_SCRATCH0(r10)
0ebc4cda
BH
607 ld r11,PACA_EXSLB+EX_R9(r13)
608 ld r12,PACA_EXSLB+EX_R3(r13)
609 std r10,PACA_EXGEN+EX_R13(r13)
610 std r11,PACA_EXGEN+EX_R9(r13)
611 std r12,PACA_EXGEN+EX_R3(r13)
612 clrrdi r12,r13,32
613 mfmsr r10
614 mfspr r11,SRR0 /* save SRR0 */
615 ori r12,r12,slb_miss_user_common@l /* virt addr of handler */
616 ori r10,r10,MSR_IR|MSR_DR|MSR_RI
617 mtspr SRR0,r12
618 mfspr r12,SRR1 /* and SRR1 */
619 mtspr SRR1,r10
620 rfid
621 b . /* prevent spec. execution */
622#endif /* __DISABLED__ */
623
0ebc4cda
BH
624/*
625 * Code from here down to __end_handlers is invoked from the
626 * exception prologs above. Because the prologs assemble the
627 * addresses of these handlers using the LOAD_HANDLER macro,
61e2390e
MN
628 * which uses an ori instruction, these handlers must be in
629 * the first 64k of the kernel image.
0ebc4cda
BH
630 */
631
632/*** Common interrupt handlers ***/
633
634 STD_EXCEPTION_COMMON(0x100, system_reset, .system_reset_exception)
635
636 /*
637 * Machine check is different because we use a different
638 * save area: PACA_EXMC instead of PACA_EXGEN.
639 */
640 .align 7
641 .globl machine_check_common
642machine_check_common:
643 EXCEPTION_PROLOG_COMMON(0x200, PACA_EXMC)
644 FINISH_NAP
645 DISABLE_INTS
646 bl .save_nvgprs
647 addi r3,r1,STACK_FRAME_OVERHEAD
648 bl .machine_check_exception
649 b .ret_from_except
650
7450f6f0
BH
651 STD_EXCEPTION_COMMON_ASYNC(0x500, hardware_interrupt, do_IRQ)
652 STD_EXCEPTION_COMMON_ASYNC(0x900, decrementer, .timer_interrupt)
dabe859e 653 STD_EXCEPTION_COMMON(0x980, hdecrementer, .hdec_interrupt)
0ebc4cda
BH
654 STD_EXCEPTION_COMMON(0xa00, trap_0a, .unknown_exception)
655 STD_EXCEPTION_COMMON(0xb00, trap_0b, .unknown_exception)
656 STD_EXCEPTION_COMMON(0xd00, single_step, .single_step_exception)
657 STD_EXCEPTION_COMMON(0xe00, trap_0e, .unknown_exception)
278a6cdc
MN
658 STD_EXCEPTION_COMMON(0xe40, emulation_assist, .program_check_exception)
659 STD_EXCEPTION_COMMON(0xe60, hmi_exception, .unknown_exception)
7450f6f0 660 STD_EXCEPTION_COMMON_ASYNC(0xf00, performance_monitor, .performance_monitor_exception)
0ebc4cda 661 STD_EXCEPTION_COMMON(0x1300, instruction_breakpoint, .instruction_breakpoint_exception)
b92a66a6 662 STD_EXCEPTION_COMMON(0x1502, denorm, .unknown_exception)
0ebc4cda
BH
663#ifdef CONFIG_ALTIVEC
664 STD_EXCEPTION_COMMON(0x1700, altivec_assist, .altivec_assist_exception)
665#else
666 STD_EXCEPTION_COMMON(0x1700, altivec_assist, .unknown_exception)
667#endif
668#ifdef CONFIG_CBE_RAS
669 STD_EXCEPTION_COMMON(0x1200, cbe_system_error, .cbe_system_error_exception)
670 STD_EXCEPTION_COMMON(0x1600, cbe_maintenance, .cbe_maintenance_exception)
671 STD_EXCEPTION_COMMON(0x1800, cbe_thermal, .cbe_thermal_exception)
672#endif /* CONFIG_CBE_RAS */
673
c1fb6816
MN
674 /*
675 * Relocation-on interrupts: A subset of the interrupts can be delivered
676 * with IR=1/DR=1, if AIL==2 and MSR.HV won't be changed by delivering
677 * it. Addresses are the same as the original interrupt addresses, but
678 * offset by 0xc000000000004000.
679 * It's impossible to receive interrupts below 0x300 via this mechanism.
680 * KVM: None of these traps are from the guest ; anything that escalated
681 * to HV=1 from HV=0 is delivered via real mode handlers.
682 */
683
684 /*
685 * This uses the standard macro, since the original 0x300 vector
686 * only has extra guff for STAB-based processors -- which never
687 * come here.
688 */
689 STD_RELON_EXCEPTION_PSERIES(0x4300, 0x300, data_access)
690 . = 0x4380
691 .globl data_access_slb_relon_pSeries
692data_access_slb_relon_pSeries:
693 HMT_MEDIUM
694 SET_SCRATCH0(r13)
695 EXCEPTION_PROLOG_1(PACA_EXSLB, NOTEST, 0x380)
696 std r3,PACA_EXSLB+EX_R3(r13)
697 mfspr r3,SPRN_DAR
698 mfspr r12,SPRN_SRR1
699#ifndef CONFIG_RELOCATABLE
700 b .slb_miss_realmode
701#else
702 /*
703 * We can't just use a direct branch to .slb_miss_realmode
704 * because the distance from here to there depends on where
705 * the kernel ends up being put.
706 */
707 mfctr r11
708 ld r10,PACAKBASE(r13)
709 LOAD_HANDLER(r10, .slb_miss_realmode)
710 mtctr r10
711 bctr
712#endif
713
714 STD_RELON_EXCEPTION_PSERIES(0x4400, 0x400, instruction_access)
715 . = 0x4480
716 .globl instruction_access_slb_relon_pSeries
717instruction_access_slb_relon_pSeries:
718 HMT_MEDIUM
719 SET_SCRATCH0(r13)
720 EXCEPTION_PROLOG_1(PACA_EXSLB, NOTEST, 0x480)
721 std r3,PACA_EXSLB+EX_R3(r13)
722 mfspr r3,SPRN_SRR0 /* SRR0 is faulting address */
723 mfspr r12,SPRN_SRR1
724#ifndef CONFIG_RELOCATABLE
725 b .slb_miss_realmode
726#else
727 mfctr r11
728 ld r10,PACAKBASE(r13)
729 LOAD_HANDLER(r10, .slb_miss_realmode)
730 mtctr r10
731 bctr
732#endif
733
734 . = 0x4500
735 .globl hardware_interrupt_relon_pSeries;
736 .globl hardware_interrupt_relon_hv;
737hardware_interrupt_relon_pSeries:
738hardware_interrupt_relon_hv:
739 BEGIN_FTR_SECTION
740 _MASKABLE_RELON_EXCEPTION_PSERIES(0x502, hardware_interrupt, EXC_HV, SOFTEN_TEST_HV)
741 FTR_SECTION_ELSE
742 _MASKABLE_RELON_EXCEPTION_PSERIES(0x500, hardware_interrupt, EXC_STD, SOFTEN_TEST_PR)
743 ALT_FTR_SECTION_END_IFSET(CPU_FTR_ARCH_206)
744 STD_RELON_EXCEPTION_PSERIES(0x4600, 0x600, alignment)
745 STD_RELON_EXCEPTION_PSERIES(0x4700, 0x700, program_check)
746 STD_RELON_EXCEPTION_PSERIES(0x4800, 0x800, fp_unavailable)
747 MASKABLE_RELON_EXCEPTION_PSERIES(0x4900, 0x900, decrementer)
748 STD_RELON_EXCEPTION_HV(0x4980, 0x982, hdecrementer)
749 STD_RELON_EXCEPTION_PSERIES(0x4b00, 0xb00, trap_0b)
750
751 . = 0x4c00
752 .globl system_call_relon_pSeries
753system_call_relon_pSeries:
754 HMT_MEDIUM
755 SYSCALL_PSERIES_1
756 SYSCALL_PSERIES_2_DIRECT
757 SYSCALL_PSERIES_3
758
759 STD_RELON_EXCEPTION_PSERIES(0x4d00, 0xd00, single_step)
760
761 . = 0x4e00
762 b h_data_storage_relon_hv
763
764 . = 0x4e20
765 b h_instr_storage_relon_hv
766
767 . = 0x4e40
768 b emulation_assist_relon_hv
769
770 . = 0x4e50
771 b hmi_exception_relon_hv
772
773 . = 0x4e60
774 b hmi_exception_relon_hv
775
776 /* For when we support the doorbell interrupt:
777 STD_RELON_EXCEPTION_HYPERVISOR(0x4e80, 0xe80, doorbell_hyper)
778 */
779
780performance_monitor_relon_pSeries_1:
781 . = 0x4f00
782 b performance_monitor_relon_pSeries
783
784altivec_unavailable_relon_pSeries_1:
785 . = 0x4f20
786 b altivec_unavailable_relon_pSeries
787
788vsx_unavailable_relon_pSeries_1:
789 . = 0x4f40
790 b vsx_unavailable_relon_pSeries
791
792#ifdef CONFIG_CBE_RAS
793 STD_RELON_EXCEPTION_HV(0x5200, 0x1202, cbe_system_error)
794#endif /* CONFIG_CBE_RAS */
795 STD_RELON_EXCEPTION_PSERIES(0x5300, 0x1300, instruction_breakpoint)
796#ifdef CONFIG_PPC_DENORMALISATION
797 . = 0x5500
798 b denorm_exception_hv
799#endif
800#ifdef CONFIG_CBE_RAS
801 STD_RELON_EXCEPTION_HV(0x5600, 0x1602, cbe_maintenance)
802#else
803#ifdef CONFIG_HVC_SCOM
804 STD_RELON_EXCEPTION_HV(0x5600, 0x1600, maintence_interrupt)
805 KVM_HANDLER_SKIP(PACA_EXGEN, EXC_HV, 0x1600)
806#endif /* CONFIG_HVC_SCOM */
807#endif /* CONFIG_CBE_RAS */
808 STD_RELON_EXCEPTION_PSERIES(0x5700, 0x1700, altivec_assist)
809#ifdef CONFIG_CBE_RAS
810 STD_RELON_EXCEPTION_HV(0x5800, 0x1802, cbe_thermal)
811#endif /* CONFIG_CBE_RAS */
812
813 /* Other future vectors */
814 .align 7
815 .globl __end_interrupts
816__end_interrupts:
817
0ebc4cda 818 .align 7
c1fb6816
MN
819system_call_entry_direct:
820#if defined(CONFIG_RELOCATABLE)
821 /* The first level prologue may have used LR to get here, saving
822 * orig in r10. To save hacking/ifdeffing common code, restore here.
823 */
824 mtlr r10
825#endif
0ebc4cda
BH
826system_call_entry:
827 b system_call_common
828
fe1952fc
BH
829ppc64_runlatch_on_trampoline:
830 b .__ppc64_runlatch_on
831
0ebc4cda
BH
832/*
833 * Here we have detected that the kernel stack pointer is bad.
834 * R9 contains the saved CR, r13 points to the paca,
835 * r10 contains the (bad) kernel stack pointer,
836 * r11 and r12 contain the saved SRR0 and SRR1.
837 * We switch to using an emergency stack, save the registers there,
838 * and call kernel_bad_stack(), which panics.
839 */
840bad_stack:
841 ld r1,PACAEMERGSP(r13)
842 subi r1,r1,64+INT_FRAME_SIZE
843 std r9,_CCR(r1)
844 std r10,GPR1(r1)
845 std r11,_NIP(r1)
846 std r12,_MSR(r1)
847 mfspr r11,SPRN_DAR
848 mfspr r12,SPRN_DSISR
849 std r11,_DAR(r1)
850 std r12,_DSISR(r1)
851 mflr r10
852 mfctr r11
853 mfxer r12
854 std r10,_LINK(r1)
855 std r11,_CTR(r1)
856 std r12,_XER(r1)
857 SAVE_GPR(0,r1)
858 SAVE_GPR(2,r1)
1977b502
PM
859 ld r10,EX_R3(r3)
860 std r10,GPR3(r1)
861 SAVE_GPR(4,r1)
862 SAVE_4GPRS(5,r1)
863 ld r9,EX_R9(r3)
864 ld r10,EX_R10(r3)
865 SAVE_2GPRS(9,r1)
866 ld r9,EX_R11(r3)
867 ld r10,EX_R12(r3)
868 ld r11,EX_R13(r3)
869 std r9,GPR11(r1)
870 std r10,GPR12(r1)
871 std r11,GPR13(r1)
48404f2e
PM
872BEGIN_FTR_SECTION
873 ld r10,EX_CFAR(r3)
874 std r10,ORIG_GPR3(r1)
875END_FTR_SECTION_IFSET(CPU_FTR_CFAR)
1977b502 876 SAVE_8GPRS(14,r1)
0ebc4cda
BH
877 SAVE_10GPRS(22,r1)
878 lhz r12,PACA_TRAP_SAVE(r13)
879 std r12,_TRAP(r1)
880 addi r11,r1,INT_FRAME_SIZE
881 std r11,0(r1)
882 li r12,0
883 std r12,0(r11)
884 ld r2,PACATOC(r13)
1977b502
PM
885 ld r11,exception_marker@toc(r2)
886 std r12,RESULT(r1)
887 std r11,STACK_FRAME_OVERHEAD-16(r1)
0ebc4cda
BH
8881: addi r3,r1,STACK_FRAME_OVERHEAD
889 bl .kernel_bad_stack
890 b 1b
891
892/*
893 * Here r13 points to the paca, r9 contains the saved CR,
894 * SRR0 and SRR1 are saved in r11 and r12,
895 * r9 - r13 are saved in paca->exgen.
896 */
897 .align 7
898 .globl data_access_common
899data_access_common:
900 mfspr r10,SPRN_DAR
901 std r10,PACA_EXGEN+EX_DAR(r13)
902 mfspr r10,SPRN_DSISR
903 stw r10,PACA_EXGEN+EX_DSISR(r13)
904 EXCEPTION_PROLOG_COMMON(0x300, PACA_EXGEN)
a546498f
BH
905 DISABLE_INTS
906 ld r12,_MSR(r1)
0ebc4cda
BH
907 ld r3,PACA_EXGEN+EX_DAR(r13)
908 lwz r4,PACA_EXGEN+EX_DSISR(r13)
909 li r5,0x300
278a6cdc 910 b .do_hash_page /* Try to handle as hpte fault */
0ebc4cda 911
b3e6b5df 912 .align 7
278a6cdc 913 .globl h_data_storage_common
b3e6b5df 914h_data_storage_common:
278a6cdc
MN
915 mfspr r10,SPRN_HDAR
916 std r10,PACA_EXGEN+EX_DAR(r13)
917 mfspr r10,SPRN_HDSISR
918 stw r10,PACA_EXGEN+EX_DSISR(r13)
919 EXCEPTION_PROLOG_COMMON(0xe00, PACA_EXGEN)
920 bl .save_nvgprs
a546498f 921 DISABLE_INTS
278a6cdc
MN
922 addi r3,r1,STACK_FRAME_OVERHEAD
923 bl .unknown_exception
924 b .ret_from_except
b3e6b5df 925
0ebc4cda
BH
926 .align 7
927 .globl instruction_access_common
928instruction_access_common:
929 EXCEPTION_PROLOG_COMMON(0x400, PACA_EXGEN)
a546498f
BH
930 DISABLE_INTS
931 ld r12,_MSR(r1)
0ebc4cda
BH
932 ld r3,_NIP(r1)
933 andis. r4,r12,0x5820
934 li r5,0x400
935 b .do_hash_page /* Try to handle as hpte fault */
936
278a6cdc 937 STD_EXCEPTION_COMMON(0xe20, h_instr_storage, .unknown_exception)
b3e6b5df 938
0ebc4cda
BH
939/*
940 * Here is the common SLB miss user that is used when going to virtual
941 * mode for SLB misses, that is currently not used
942 */
943#ifdef __DISABLED__
944 .align 7
945 .globl slb_miss_user_common
946slb_miss_user_common:
947 mflr r10
948 std r3,PACA_EXGEN+EX_DAR(r13)
949 stw r9,PACA_EXGEN+EX_CCR(r13)
950 std r10,PACA_EXGEN+EX_LR(r13)
951 std r11,PACA_EXGEN+EX_SRR0(r13)
952 bl .slb_allocate_user
953
954 ld r10,PACA_EXGEN+EX_LR(r13)
955 ld r3,PACA_EXGEN+EX_R3(r13)
956 lwz r9,PACA_EXGEN+EX_CCR(r13)
957 ld r11,PACA_EXGEN+EX_SRR0(r13)
958 mtlr r10
959 beq- slb_miss_fault
960
961 andi. r10,r12,MSR_RI /* check for unrecoverable exception */
962 beq- unrecov_user_slb
963 mfmsr r10
964
965.machine push
966.machine "power4"
967 mtcrf 0x80,r9
968.machine pop
969
970 clrrdi r10,r10,2 /* clear RI before setting SRR0/1 */
971 mtmsrd r10,1
972
973 mtspr SRR0,r11
974 mtspr SRR1,r12
975
976 ld r9,PACA_EXGEN+EX_R9(r13)
977 ld r10,PACA_EXGEN+EX_R10(r13)
978 ld r11,PACA_EXGEN+EX_R11(r13)
979 ld r12,PACA_EXGEN+EX_R12(r13)
980 ld r13,PACA_EXGEN+EX_R13(r13)
981 rfid
982 b .
983
984slb_miss_fault:
985 EXCEPTION_PROLOG_COMMON(0x380, PACA_EXGEN)
986 ld r4,PACA_EXGEN+EX_DAR(r13)
987 li r5,0
988 std r4,_DAR(r1)
989 std r5,_DSISR(r1)
990 b handle_page_fault
991
992unrecov_user_slb:
993 EXCEPTION_PROLOG_COMMON(0x4200, PACA_EXGEN)
994 DISABLE_INTS
995 bl .save_nvgprs
9961: addi r3,r1,STACK_FRAME_OVERHEAD
997 bl .unrecoverable_exception
998 b 1b
999
1000#endif /* __DISABLED__ */
1001
1002
1003/*
1004 * r13 points to the PACA, r9 contains the saved CR,
1005 * r12 contain the saved SRR1, SRR0 is still ready for return
1006 * r3 has the faulting address
1007 * r9 - r13 are saved in paca->exslb.
1008 * r3 is saved in paca->slb_r3
1009 * We assume we aren't going to take any exceptions during this procedure.
1010 */
1011_GLOBAL(slb_miss_realmode)
1012 mflr r10
1013#ifdef CONFIG_RELOCATABLE
1014 mtctr r11
1015#endif
1016
1017 stw r9,PACA_EXSLB+EX_CCR(r13) /* save CR in exc. frame */
1018 std r10,PACA_EXSLB+EX_LR(r13) /* save LR */
1019
1020 bl .slb_allocate_realmode
1021
1022 /* All done -- return from exception. */
1023
1024 ld r10,PACA_EXSLB+EX_LR(r13)
1025 ld r3,PACA_EXSLB+EX_R3(r13)
1026 lwz r9,PACA_EXSLB+EX_CCR(r13) /* get saved CR */
0ebc4cda
BH
1027
1028 mtlr r10
1029
1030 andi. r10,r12,MSR_RI /* check for unrecoverable exception */
1031 beq- 2f
1032
1033.machine push
1034.machine "power4"
1035 mtcrf 0x80,r9
1036 mtcrf 0x01,r9 /* slb_allocate uses cr0 and cr7 */
1037.machine pop
1038
0ebc4cda
BH
1039 ld r9,PACA_EXSLB+EX_R9(r13)
1040 ld r10,PACA_EXSLB+EX_R10(r13)
1041 ld r11,PACA_EXSLB+EX_R11(r13)
1042 ld r12,PACA_EXSLB+EX_R12(r13)
1043 ld r13,PACA_EXSLB+EX_R13(r13)
1044 rfid
1045 b . /* prevent speculative execution */
1046
4f8cf36f 10472: mfspr r11,SPRN_SRR0
0ebc4cda
BH
1048 ld r10,PACAKBASE(r13)
1049 LOAD_HANDLER(r10,unrecov_slb)
1050 mtspr SPRN_SRR0,r10
1051 ld r10,PACAKMSR(r13)
1052 mtspr SPRN_SRR1,r10
1053 rfid
1054 b .
1055
1056unrecov_slb:
1057 EXCEPTION_PROLOG_COMMON(0x4100, PACA_EXSLB)
1058 DISABLE_INTS
1059 bl .save_nvgprs
10601: addi r3,r1,STACK_FRAME_OVERHEAD
1061 bl .unrecoverable_exception
1062 b 1b
1063
0ebc4cda
BH
1064
1065#ifdef CONFIG_PPC_970_NAP
1066power4_fixup_nap:
1067 andc r9,r9,r10
1068 std r9,TI_LOCAL_FLAGS(r11)
1069 ld r10,_LINK(r1) /* make idle task do the */
1070 std r10,_NIP(r1) /* equivalent of a blr */
1071 blr
1072#endif
1073
1074 .align 7
1075 .globl alignment_common
1076alignment_common:
1077 mfspr r10,SPRN_DAR
1078 std r10,PACA_EXGEN+EX_DAR(r13)
1079 mfspr r10,SPRN_DSISR
1080 stw r10,PACA_EXGEN+EX_DSISR(r13)
1081 EXCEPTION_PROLOG_COMMON(0x600, PACA_EXGEN)
1082 ld r3,PACA_EXGEN+EX_DAR(r13)
1083 lwz r4,PACA_EXGEN+EX_DSISR(r13)
1084 std r3,_DAR(r1)
1085 std r4,_DSISR(r1)
1086 bl .save_nvgprs
a3512b2d 1087 DISABLE_INTS
0ebc4cda 1088 addi r3,r1,STACK_FRAME_OVERHEAD
0ebc4cda
BH
1089 bl .alignment_exception
1090 b .ret_from_except
1091
1092 .align 7
1093 .globl program_check_common
1094program_check_common:
1095 EXCEPTION_PROLOG_COMMON(0x700, PACA_EXGEN)
1096 bl .save_nvgprs
54321242 1097 DISABLE_INTS
922b9f86 1098 addi r3,r1,STACK_FRAME_OVERHEAD
0ebc4cda
BH
1099 bl .program_check_exception
1100 b .ret_from_except
1101
1102 .align 7
1103 .globl fp_unavailable_common
1104fp_unavailable_common:
1105 EXCEPTION_PROLOG_COMMON(0x800, PACA_EXGEN)
1106 bne 1f /* if from user, just load it up */
1107 bl .save_nvgprs
9f2f79e3 1108 DISABLE_INTS
0ebc4cda 1109 addi r3,r1,STACK_FRAME_OVERHEAD
0ebc4cda
BH
1110 bl .kernel_fp_unavailable_exception
1111 BUG_OPCODE
11121: bl .load_up_fpu
1113 b fast_exception_return
1114
1115 .align 7
1116 .globl altivec_unavailable_common
1117altivec_unavailable_common:
1118 EXCEPTION_PROLOG_COMMON(0xf20, PACA_EXGEN)
1119#ifdef CONFIG_ALTIVEC
1120BEGIN_FTR_SECTION
1121 beq 1f
1122 bl .load_up_altivec
1123 b fast_exception_return
11241:
1125END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
1126#endif
1127 bl .save_nvgprs
9f2f79e3 1128 DISABLE_INTS
0ebc4cda 1129 addi r3,r1,STACK_FRAME_OVERHEAD
0ebc4cda
BH
1130 bl .altivec_unavailable_exception
1131 b .ret_from_except
1132
1133 .align 7
1134 .globl vsx_unavailable_common
1135vsx_unavailable_common:
1136 EXCEPTION_PROLOG_COMMON(0xf40, PACA_EXGEN)
1137#ifdef CONFIG_VSX
1138BEGIN_FTR_SECTION
7230c564
BH
1139 beq 1f
1140 b .load_up_vsx
0ebc4cda
BH
11411:
1142END_FTR_SECTION_IFSET(CPU_FTR_VSX)
1143#endif
1144 bl .save_nvgprs
9f2f79e3 1145 DISABLE_INTS
0ebc4cda 1146 addi r3,r1,STACK_FRAME_OVERHEAD
0ebc4cda
BH
1147 bl .vsx_unavailable_exception
1148 b .ret_from_except
1149
1150 .align 7
1151 .globl __end_handlers
1152__end_handlers:
1153
0ebc4cda
BH
1154/*
1155 * Hash table stuff
1156 */
1157 .align 7
1158_STATIC(do_hash_page)
1159 std r3,_DAR(r1)
1160 std r4,_DSISR(r1)
1161
9c7cc234 1162 andis. r0,r4,0xa410 /* weird error? */
0ebc4cda 1163 bne- handle_page_fault /* if not, try to insert a HPTE */
9c7cc234
P
1164 andis. r0,r4,DSISR_DABRMATCH@h
1165 bne- handle_dabr_fault
1166
0ebc4cda
BH
1167BEGIN_FTR_SECTION
1168 andis. r0,r4,0x0020 /* Is it a segment table fault? */
1169 bne- do_ste_alloc /* If so handle it */
44ae3ab3 1170END_MMU_FTR_SECTION_IFCLR(MMU_FTR_SLB)
0ebc4cda 1171
9778b696 1172 CURRENT_THREAD_INFO(r11, r1)
9c1e1052
PM
1173 lwz r0,TI_PREEMPT(r11) /* If we're in an "NMI" */
1174 andis. r0,r0,NMI_MASK@h /* (i.e. an irq when soft-disabled) */
1175 bne 77f /* then don't call hash_page now */
0ebc4cda
BH
1176 /*
1177 * We need to set the _PAGE_USER bit if MSR_PR is set or if we are
1178 * accessing a userspace segment (even from the kernel). We assume
1179 * kernel addresses always have the high bit set.
1180 */
1181 rlwinm r4,r4,32-25+9,31-9,31-9 /* DSISR_STORE -> _PAGE_RW */
1182 rotldi r0,r3,15 /* Move high bit into MSR_PR posn */
1183 orc r0,r12,r0 /* MSR_PR | ~high_bit */
1184 rlwimi r4,r0,32-13,30,30 /* becomes _PAGE_USER access bit */
1185 ori r4,r4,1 /* add _PAGE_PRESENT */
1186 rlwimi r4,r5,22+2,31-2,31-2 /* Set _PAGE_EXEC if trap is 0x400 */
1187
1188 /*
1189 * r3 contains the faulting address
1190 * r4 contains the required access permissions
1191 * r5 contains the trap number
1192 *
7230c564 1193 * at return r3 = 0 for success, 1 for page fault, negative for error
0ebc4cda
BH
1194 */
1195 bl .hash_page /* build HPTE if possible */
1196 cmpdi r3,0 /* see if hash_page succeeded */
1197
7230c564 1198 /* Success */
0ebc4cda 1199 beq fast_exc_return_irq /* Return from exception on success */
0ebc4cda 1200
7230c564
BH
1201 /* Error */
1202 blt- 13f
9c7cc234 1203
0ebc4cda
BH
1204/* Here we have a page fault that hash_page can't handle. */
1205handle_page_fault:
0ebc4cda
BH
120611: ld r4,_DAR(r1)
1207 ld r5,_DSISR(r1)
1208 addi r3,r1,STACK_FRAME_OVERHEAD
1209 bl .do_page_fault
1210 cmpdi r3,0
a546498f 1211 beq+ 12f
0ebc4cda
BH
1212 bl .save_nvgprs
1213 mr r5,r3
1214 addi r3,r1,STACK_FRAME_OVERHEAD
1215 lwz r4,_DAR(r1)
1216 bl .bad_page_fault
1217 b .ret_from_except
1218
a546498f
BH
1219/* We have a data breakpoint exception - handle it */
1220handle_dabr_fault:
1221 bl .save_nvgprs
1222 ld r4,_DAR(r1)
1223 ld r5,_DSISR(r1)
1224 addi r3,r1,STACK_FRAME_OVERHEAD
1225 bl .do_dabr
122612: b .ret_from_except_lite
1227
0ebc4cda
BH
1228
1229/* We have a page fault that hash_page could handle but HV refused
1230 * the PTE insertion
1231 */
a546498f 123213: bl .save_nvgprs
0ebc4cda
BH
1233 mr r5,r3
1234 addi r3,r1,STACK_FRAME_OVERHEAD
1235 ld r4,_DAR(r1)
1236 bl .low_hash_fault
1237 b .ret_from_except
1238
9c1e1052
PM
1239/*
1240 * We come here as a result of a DSI at a point where we don't want
1241 * to call hash_page, such as when we are accessing memory (possibly
1242 * user memory) inside a PMU interrupt that occurred while interrupts
1243 * were soft-disabled. We want to invoke the exception handler for
1244 * the access, or panic if there isn't a handler.
1245 */
124677: bl .save_nvgprs
1247 mr r4,r3
1248 addi r3,r1,STACK_FRAME_OVERHEAD
1249 li r5,SIGSEGV
1250 bl .bad_page_fault
1251 b .ret_from_except
1252
0ebc4cda
BH
1253 /* here we have a segment miss */
1254do_ste_alloc:
1255 bl .ste_allocate /* try to insert stab entry */
1256 cmpdi r3,0
1257 bne- handle_page_fault
1258 b fast_exception_return
1259
1260/*
1261 * r13 points to the PACA, r9 contains the saved CR,
1262 * r11 and r12 contain the saved SRR0 and SRR1.
1263 * r9 - r13 are saved in paca->exslb.
1264 * We assume we aren't going to take any exceptions during this procedure.
1265 * We assume (DAR >> 60) == 0xc.
1266 */
1267 .align 7
1268_GLOBAL(do_stab_bolted)
1269 stw r9,PACA_EXSLB+EX_CCR(r13) /* save CR in exc. frame */
1270 std r11,PACA_EXSLB+EX_SRR0(r13) /* save SRR0 in exc. frame */
1271
1272 /* Hash to the primary group */
1273 ld r10,PACASTABVIRT(r13)
1274 mfspr r11,SPRN_DAR
1275 srdi r11,r11,28
1276 rldimi r10,r11,7,52 /* r10 = first ste of the group */
1277
1278 /* Calculate VSID */
048ee099
AK
1279 /* This is a kernel address, so protovsid = ESID | 1 << 37 */
1280 li r9,0x1
1281 rldimi r11,r9,(CONTEXT_BITS + USER_ESID_BITS),0
0ebc4cda
BH
1282 ASM_VSID_SCRAMBLE(r11, r9, 256M)
1283 rldic r9,r11,12,16 /* r9 = vsid << 12 */
1284
1285 /* Search the primary group for a free entry */
12861: ld r11,0(r10) /* Test valid bit of the current ste */
1287 andi. r11,r11,0x80
1288 beq 2f
1289 addi r10,r10,16
1290 andi. r11,r10,0x70
1291 bne 1b
1292
1293 /* Stick for only searching the primary group for now. */
1294 /* At least for now, we use a very simple random castout scheme */
1295 /* Use the TB as a random number ; OR in 1 to avoid entry 0 */
1296 mftb r11
1297 rldic r11,r11,4,57 /* r11 = (r11 << 4) & 0x70 */
1298 ori r11,r11,0x10
1299
1300 /* r10 currently points to an ste one past the group of interest */
1301 /* make it point to the randomly selected entry */
1302 subi r10,r10,128
1303 or r10,r10,r11 /* r10 is the entry to invalidate */
1304
1305 isync /* mark the entry invalid */
1306 ld r11,0(r10)
1307 rldicl r11,r11,56,1 /* clear the valid bit */
1308 rotldi r11,r11,8
1309 std r11,0(r10)
1310 sync
1311
1312 clrrdi r11,r11,28 /* Get the esid part of the ste */
1313 slbie r11
1314
13152: std r9,8(r10) /* Store the vsid part of the ste */
1316 eieio
1317
1318 mfspr r11,SPRN_DAR /* Get the new esid */
1319 clrrdi r11,r11,28 /* Permits a full 32b of ESID */
1320 ori r11,r11,0x90 /* Turn on valid and kp */
1321 std r11,0(r10) /* Put new entry back into the stab */
1322
1323 sync
1324
1325 /* All done -- return from exception. */
1326 lwz r9,PACA_EXSLB+EX_CCR(r13) /* get saved CR */
1327 ld r11,PACA_EXSLB+EX_SRR0(r13) /* get saved SRR0 */
1328
1329 andi. r10,r12,MSR_RI
1330 beq- unrecov_slb
1331
1332 mtcrf 0x80,r9 /* restore CR */
1333
1334 mfmsr r10
1335 clrrdi r10,r10,2
1336 mtmsrd r10,1
1337
1338 mtspr SPRN_SRR0,r11
1339 mtspr SPRN_SRR1,r12
1340 ld r9,PACA_EXSLB+EX_R9(r13)
1341 ld r10,PACA_EXSLB+EX_R10(r13)
1342 ld r11,PACA_EXSLB+EX_R11(r13)
1343 ld r12,PACA_EXSLB+EX_R12(r13)
1344 ld r13,PACA_EXSLB+EX_R13(r13)
1345 rfid
1346 b . /* prevent speculative execution */
1347
c1fb6816
MN
1348
1349 /* Equivalents to the above handlers for relocation-on interrupt vectors */
1350 STD_RELON_EXCEPTION_HV(., 0xe00, h_data_storage)
1351 KVM_HANDLER(PACA_EXGEN, EXC_HV, 0xe00)
1352 STD_RELON_EXCEPTION_HV(., 0xe20, h_instr_storage)
1353 KVM_HANDLER(PACA_EXGEN, EXC_HV, 0xe20)
1354 STD_RELON_EXCEPTION_HV(., 0xe40, emulation_assist)
1355 KVM_HANDLER(PACA_EXGEN, EXC_HV, 0xe40)
1356 STD_RELON_EXCEPTION_HV(., 0xe60, hmi_exception)
1357 KVM_HANDLER(PACA_EXGEN, EXC_HV, 0xe60)
1358
1359 STD_RELON_EXCEPTION_PSERIES(., 0xf00, performance_monitor)
1360 STD_RELON_EXCEPTION_PSERIES(., 0xf20, altivec_unavailable)
1361 STD_RELON_EXCEPTION_PSERIES(., 0xf40, vsx_unavailable)
1362
ed79ba9e 1363#if defined(CONFIG_PPC_PSERIES) || defined(CONFIG_PPC_POWERNV)
0ebc4cda
BH
1364/*
1365 * Data area reserved for FWNMI option.
1366 * This address (0x7000) is fixed by the RPA.
1367 */
1368 .= 0x7000
1369 .globl fwnmi_data_area
1370fwnmi_data_area:
0ebc4cda 1371
ed79ba9e
BH
1372 /* pseries and powernv need to keep the whole page from
1373 * 0x7000 to 0x8000 free for use by the firmware
1374 */
278a6cdc 1375 . = 0x8000
ed79ba9e 1376#endif /* defined(CONFIG_PPC_PSERIES) || defined(CONFIG_PPC_POWERNV) */
84493804 1377
4f8cf36f
BH
1378/* Space for CPU0's segment table */
1379 .balign 4096
84493804
BH
1380 .globl initial_stab
1381initial_stab:
1382 .space 4096
4f8cf36f 1383
ed79ba9e
BH
1384#ifdef CONFIG_PPC_POWERNV
1385_GLOBAL(opal_mc_secondary_handler)
1386 HMT_MEDIUM
1387 SET_SCRATCH0(r13)
1388 GET_PACA(r13)
1389 clrldi r3,r3,2
1390 tovirt(r3,r3)
1391 std r3,PACA_OPAL_MC_EVT(r13)
1392 ld r13,OPAL_MC_SRR0(r3)
1393 mtspr SPRN_SRR0,r13
1394 ld r13,OPAL_MC_SRR1(r3)
1395 mtspr SPRN_SRR1,r13
1396 ld r3,OPAL_MC_GPR3(r3)
1397 GET_SCRATCH0(r13)
1398 b machine_check_pSeries
1399#endif /* CONFIG_PPC_POWERNV */