]> git.proxmox.com Git - mirror_ubuntu-zesty-kernel.git/blame - arch/powerpc/kernel/exceptions-64s.S
powerpc: Make load_hander handle upto 64k offset
[mirror_ubuntu-zesty-kernel.git] / arch / powerpc / kernel / exceptions-64s.S
CommitLineData
0ebc4cda
BH
1/*
2 * This file contains the 64-bit "server" PowerPC variant
3 * of the low level exception handling including exception
4 * vectors, exception return, part of the slb and stab
5 * handling and other fixed offset specific things.
6 *
7 * This file is meant to be #included from head_64.S due to
25985edc 8 * position dependent assembly.
0ebc4cda
BH
9 *
10 * Most of this originates from head_64.S and thus has the same
11 * copyright history.
12 *
13 */
14
7230c564 15#include <asm/hw_irq.h>
8aa34ab8 16#include <asm/exception-64s.h>
46f52210 17#include <asm/ptrace.h>
8aa34ab8 18
0ebc4cda
BH
19/*
20 * We layout physical memory as follows:
21 * 0x0000 - 0x00ff : Secondary processor spin code
22 * 0x0100 - 0x2fff : pSeries Interrupt prologs
4f8cf36f 23 * 0x3000 - 0x5fff : interrupt support common interrupt prologs
0ebc4cda
BH
24 * 0x6000 - 0x6fff : Initial (CPU0) segment table
25 * 0x7000 - 0x7fff : FWNMI data area
26 * 0x8000 - : Early init and support code
27 */
28
0ebc4cda
BH
29/*
30 * This is the start of the interrupt handlers for pSeries
31 * This code runs with relocation off.
32 * Code from here to __end_interrupts gets copied down to real
33 * address 0x100 when we are running a relocatable kernel.
34 * Therefore any relative branches in this section must only
35 * branch to labels in this section.
36 */
37 . = 0x100
38 .globl __start_interrupts
39__start_interrupts:
40
948cf67c
BH
41 .globl system_reset_pSeries;
42system_reset_pSeries:
43 HMT_MEDIUM;
948cf67c
BH
44 SET_SCRATCH0(r13)
45#ifdef CONFIG_PPC_P7_NAP
46BEGIN_FTR_SECTION
47 /* Running native on arch 2.06 or later, check if we are
48 * waking up from nap. We only handle no state loss and
49 * supervisor state loss. We do -not- handle hypervisor
50 * state loss at this time.
51 */
52 mfspr r13,SPRN_SRR1
371fefd6
PM
53 rlwinm. r13,r13,47-31,30,31
54 beq 9f
55
56 /* waking up from powersave (nap) state */
57 cmpwi cr1,r13,2
948cf67c
BH
58 /* Total loss of HV state is fatal, we could try to use the
59 * PIR to locate a PACA, then use an emergency stack etc...
60 * but for now, let's just stay stuck here
61 */
371fefd6
PM
62 bgt cr1,.
63 GET_PACA(r13)
64
65#ifdef CONFIG_KVM_BOOK3S_64_HV
f0888f70
PM
66 li r0,KVM_HWTHREAD_IN_KERNEL
67 stb r0,HSTATE_HWTHREAD_STATE(r13)
68 /* Order setting hwthread_state vs. testing hwthread_req */
69 sync
70 lbz r0,HSTATE_HWTHREAD_REQ(r13)
71 cmpwi r0,0
72 beq 1f
371fefd6
PM
73 b kvm_start_guest
741:
75#endif
76
77 beq cr1,2f
78 b .power7_wakeup_noloss
792: b .power7_wakeup_loss
809:
969391c5 81END_FTR_SECTION_IFSET(CPU_FTR_HVMODE | CPU_FTR_ARCH_206)
948cf67c 82#endif /* CONFIG_PPC_P7_NAP */
b01c8b54
PM
83 EXCEPTION_PROLOG_PSERIES(PACA_EXGEN, system_reset_common, EXC_STD,
84 NOTEST, 0x100)
0ebc4cda
BH
85
86 . = 0x200
b01c8b54
PM
87machine_check_pSeries_1:
88 /* This is moved out of line as it can be patched by FW, but
89 * some code path might still want to branch into the original
90 * vector
91 */
92 b machine_check_pSeries
0ebc4cda
BH
93
94 . = 0x300
95 .globl data_access_pSeries
96data_access_pSeries:
97 HMT_MEDIUM
673b189a 98 SET_SCRATCH0(r13)
0ebc4cda 99BEGIN_FTR_SECTION
b01c8b54
PM
100 b data_access_check_stab
101data_access_not_stab:
102END_MMU_FTR_SECTION_IFCLR(MMU_FTR_SLB)
b01c8b54 103 EXCEPTION_PROLOG_PSERIES(PACA_EXGEN, data_access_common, EXC_STD,
697d3899 104 KVMTEST, 0x300)
0ebc4cda
BH
105
106 . = 0x380
107 .globl data_access_slb_pSeries
108data_access_slb_pSeries:
109 HMT_MEDIUM
673b189a 110 SET_SCRATCH0(r13)
697d3899 111 EXCEPTION_PROLOG_1(PACA_EXSLB, KVMTEST, 0x380)
0ebc4cda
BH
112 std r3,PACA_EXSLB+EX_R3(r13)
113 mfspr r3,SPRN_DAR
0ebc4cda
BH
114#ifdef __DISABLED__
115 /* Keep that around for when we re-implement dynamic VSIDs */
116 cmpdi r3,0
117 bge slb_miss_user_pseries
118#endif /* __DISABLED__ */
b01c8b54 119 mfspr r12,SPRN_SRR1
0ebc4cda
BH
120#ifndef CONFIG_RELOCATABLE
121 b .slb_miss_realmode
122#else
123 /*
124 * We can't just use a direct branch to .slb_miss_realmode
125 * because the distance from here to there depends on where
126 * the kernel ends up being put.
127 */
128 mfctr r11
129 ld r10,PACAKBASE(r13)
130 LOAD_HANDLER(r10, .slb_miss_realmode)
131 mtctr r10
132 bctr
133#endif
134
b3e6b5df 135 STD_EXCEPTION_PSERIES(0x400, 0x400, instruction_access)
0ebc4cda
BH
136
137 . = 0x480
138 .globl instruction_access_slb_pSeries
139instruction_access_slb_pSeries:
140 HMT_MEDIUM
673b189a 141 SET_SCRATCH0(r13)
de56a948 142 EXCEPTION_PROLOG_1(PACA_EXSLB, KVMTEST_PR, 0x480)
0ebc4cda
BH
143 std r3,PACA_EXSLB+EX_R3(r13)
144 mfspr r3,SPRN_SRR0 /* SRR0 is faulting address */
0ebc4cda
BH
145#ifdef __DISABLED__
146 /* Keep that around for when we re-implement dynamic VSIDs */
147 cmpdi r3,0
148 bge slb_miss_user_pseries
149#endif /* __DISABLED__ */
b01c8b54 150 mfspr r12,SPRN_SRR1
0ebc4cda
BH
151#ifndef CONFIG_RELOCATABLE
152 b .slb_miss_realmode
153#else
154 mfctr r11
155 ld r10,PACAKBASE(r13)
156 LOAD_HANDLER(r10, .slb_miss_realmode)
157 mtctr r10
158 bctr
159#endif
160
b3e6b5df
BH
161 /* We open code these as we can't have a ". = x" (even with
162 * x = "." within a feature section
163 */
a5d4f3ad 164 . = 0x500;
b3e6b5df
BH
165 .globl hardware_interrupt_pSeries;
166 .globl hardware_interrupt_hv;
a5d4f3ad 167hardware_interrupt_pSeries:
b3e6b5df 168hardware_interrupt_hv:
a5d4f3ad 169 BEGIN_FTR_SECTION
b01c8b54
PM
170 _MASKABLE_EXCEPTION_PSERIES(0x502, hardware_interrupt,
171 EXC_HV, SOFTEN_TEST_HV)
172 KVM_HANDLER(PACA_EXGEN, EXC_HV, 0x502)
de56a948
PM
173 FTR_SECTION_ELSE
174 _MASKABLE_EXCEPTION_PSERIES(0x500, hardware_interrupt,
9e368f29 175 EXC_STD, SOFTEN_TEST_HV_201)
de56a948 176 KVM_HANDLER(PACA_EXGEN, EXC_STD, 0x500)
969391c5 177 ALT_FTR_SECTION_END_IFSET(CPU_FTR_HVMODE | CPU_FTR_ARCH_206)
a5d4f3ad 178
b3e6b5df 179 STD_EXCEPTION_PSERIES(0x600, 0x600, alignment)
de56a948 180 KVM_HANDLER_PR(PACA_EXGEN, EXC_STD, 0x600)
b01c8b54 181
b3e6b5df 182 STD_EXCEPTION_PSERIES(0x700, 0x700, program_check)
de56a948 183 KVM_HANDLER_PR(PACA_EXGEN, EXC_STD, 0x700)
b01c8b54 184
b3e6b5df 185 STD_EXCEPTION_PSERIES(0x800, 0x800, fp_unavailable)
de56a948 186 KVM_HANDLER_PR(PACA_EXGEN, EXC_STD, 0x800)
a5d4f3ad 187
b3e6b5df 188 MASKABLE_EXCEPTION_PSERIES(0x900, 0x900, decrementer)
dabe859e 189 STD_EXCEPTION_HV(0x980, 0x982, hdecrementer)
a5d4f3ad 190
b3e6b5df 191 STD_EXCEPTION_PSERIES(0xa00, 0xa00, trap_0a)
de56a948 192 KVM_HANDLER_PR(PACA_EXGEN, EXC_STD, 0xa00)
b01c8b54 193
b3e6b5df 194 STD_EXCEPTION_PSERIES(0xb00, 0xb00, trap_0b)
de56a948 195 KVM_HANDLER_PR(PACA_EXGEN, EXC_STD, 0xb00)
0ebc4cda
BH
196
197 . = 0xc00
198 .globl system_call_pSeries
199system_call_pSeries:
200 HMT_MEDIUM
b01c8b54
PM
201#ifdef CONFIG_KVM_BOOK3S_64_HANDLER
202 SET_SCRATCH0(r13)
203 GET_PACA(r13)
204 std r9,PACA_EXGEN+EX_R9(r13)
205 std r10,PACA_EXGEN+EX_R10(r13)
206 mfcr r9
207 KVMTEST(0xc00)
208 GET_SCRATCH0(r13)
209#endif
0ebc4cda
BH
210BEGIN_FTR_SECTION
211 cmpdi r0,0x1ebe
212 beq- 1f
213END_FTR_SECTION_IFSET(CPU_FTR_REAL_LE)
214 mr r9,r13
2dd60d79 215 GET_PACA(r13)
0ebc4cda 216 mfspr r11,SPRN_SRR0
0ebc4cda 217 mfspr r12,SPRN_SRR1
f5f0307f
AB
218 ld r10,PACAKBASE(r13)
219 LOAD_HANDLER(r10, system_call_entry)
220 mtspr SPRN_SRR0,r10
221 ld r10,PACAKMSR(r13)
0ebc4cda
BH
222 mtspr SPRN_SRR1,r10
223 rfid
224 b . /* prevent speculative execution */
225
b01c8b54
PM
226 KVM_HANDLER(PACA_EXGEN, EXC_STD, 0xc00)
227
0ebc4cda
BH
228/* Fast LE/BE switch system call */
2291: mfspr r12,SPRN_SRR1
230 xori r12,r12,MSR_LE
231 mtspr SPRN_SRR1,r12
232 rfid /* return to userspace */
233 b .
234
b3e6b5df 235 STD_EXCEPTION_PSERIES(0xd00, 0xd00, single_step)
de56a948 236 KVM_HANDLER_PR(PACA_EXGEN, EXC_STD, 0xd00)
b3e6b5df
BH
237
238 /* At 0xe??? we have a bunch of hypervisor exceptions, we branch
239 * out of line to handle them
240 */
241 . = 0xe00
e6a74c6e 242hv_exception_trampoline:
b3e6b5df
BH
243 b h_data_storage_hv
244 . = 0xe20
245 b h_instr_storage_hv
246 . = 0xe40
247 b emulation_assist_hv
248 . = 0xe50
249 b hmi_exception_hv
250 . = 0xe60
251 b hmi_exception_hv
0ebc4cda
BH
252
253 /* We need to deal with the Altivec unavailable exception
254 * here which is at 0xf20, thus in the middle of the
255 * prolog code of the PerformanceMonitor one. A little
256 * trickery is thus necessary
257 */
c86e2ead 258performance_monitor_pSeries_1:
0ebc4cda
BH
259 . = 0xf00
260 b performance_monitor_pSeries
261
c86e2ead 262altivec_unavailable_pSeries_1:
0ebc4cda
BH
263 . = 0xf20
264 b altivec_unavailable_pSeries
265
c86e2ead 266vsx_unavailable_pSeries_1:
0ebc4cda
BH
267 . = 0xf40
268 b vsx_unavailable_pSeries
269
270#ifdef CONFIG_CBE_RAS
b3e6b5df 271 STD_EXCEPTION_HV(0x1200, 0x1202, cbe_system_error)
5ccf55dd 272 KVM_HANDLER_SKIP(PACA_EXGEN, EXC_HV, 0x1202)
0ebc4cda 273#endif /* CONFIG_CBE_RAS */
b01c8b54 274
b3e6b5df 275 STD_EXCEPTION_PSERIES(0x1300, 0x1300, instruction_breakpoint)
de56a948 276 KVM_HANDLER_PR_SKIP(PACA_EXGEN, EXC_STD, 0x1300)
b01c8b54 277
b92a66a6 278 . = 0x1500
51cf2b30 279 .global denorm_exception_hv
b92a66a6
MN
280denorm_exception_hv:
281 HMT_MEDIUM
282 mtspr SPRN_SPRG_HSCRATCH0,r13
283 mfspr r13,SPRN_SPRG_HPACA
284 std r9,PACA_EXGEN+EX_R9(r13)
285 std r10,PACA_EXGEN+EX_R10(r13)
286 std r11,PACA_EXGEN+EX_R11(r13)
287 std r12,PACA_EXGEN+EX_R12(r13)
288 mfspr r9,SPRN_SPRG_HSCRATCH0
289 std r9,PACA_EXGEN+EX_R13(r13)
290 mfcr r9
291
292#ifdef CONFIG_PPC_DENORMALISATION
293 mfspr r10,SPRN_HSRR1
294 mfspr r11,SPRN_HSRR0 /* save HSRR0 */
295 andis. r10,r10,(HSRR1_DENORM)@h /* denorm? */
296 addi r11,r11,-4 /* HSRR0 is next instruction */
297 bne+ denorm_assist
298#endif
299
300 EXCEPTION_PROLOG_PSERIES_1(denorm_common, EXC_HV)
301 KVM_HANDLER_SKIP(PACA_EXGEN, EXC_STD, 0x1500)
302
0ebc4cda 303#ifdef CONFIG_CBE_RAS
b3e6b5df 304 STD_EXCEPTION_HV(0x1600, 0x1602, cbe_maintenance)
5ccf55dd 305 KVM_HANDLER_SKIP(PACA_EXGEN, EXC_HV, 0x1602)
0ebc4cda 306#endif /* CONFIG_CBE_RAS */
b01c8b54 307
b3e6b5df 308 STD_EXCEPTION_PSERIES(0x1700, 0x1700, altivec_assist)
de56a948 309 KVM_HANDLER_PR(PACA_EXGEN, EXC_STD, 0x1700)
b01c8b54 310
0ebc4cda 311#ifdef CONFIG_CBE_RAS
b3e6b5df 312 STD_EXCEPTION_HV(0x1800, 0x1802, cbe_thermal)
5ccf55dd 313 KVM_HANDLER_SKIP(PACA_EXGEN, EXC_HV, 0x1802)
faab4dd2
MN
314#else
315 . = 0x1800
0ebc4cda
BH
316#endif /* CONFIG_CBE_RAS */
317
0ebc4cda 318
b3e6b5df
BH
319/*** Out of line interrupts support ***/
320
faab4dd2 321 .align 7
b01c8b54
PM
322 /* moved from 0x200 */
323machine_check_pSeries:
324 .globl machine_check_fwnmi
325machine_check_fwnmi:
326 HMT_MEDIUM
327 SET_SCRATCH0(r13) /* save r13 */
328 EXCEPTION_PROLOG_PSERIES(PACA_EXMC, machine_check_common,
329 EXC_STD, KVMTEST, 0x200)
330 KVM_HANDLER_SKIP(PACA_EXMC, EXC_STD, 0x200)
331
b01c8b54
PM
332 /* moved from 0x300 */
333data_access_check_stab:
334 GET_PACA(r13)
335 std r9,PACA_EXSLB+EX_R9(r13)
336 std r10,PACA_EXSLB+EX_R10(r13)
337 mfspr r10,SPRN_DAR
338 mfspr r9,SPRN_DSISR
339 srdi r10,r10,60
340 rlwimi r10,r9,16,0x20
de56a948 341#ifdef CONFIG_KVM_BOOK3S_PR
3c42bf8a 342 lbz r9,HSTATE_IN_GUEST(r13)
b01c8b54
PM
343 rlwimi r10,r9,8,0x300
344#endif
345 mfcr r9
346 cmpwi r10,0x2c
347 beq do_stab_bolted_pSeries
348 mtcrf 0x80,r9
349 ld r9,PACA_EXSLB+EX_R9(r13)
350 ld r10,PACA_EXSLB+EX_R10(r13)
351 b data_access_not_stab
352do_stab_bolted_pSeries:
353 std r11,PACA_EXSLB+EX_R11(r13)
354 std r12,PACA_EXSLB+EX_R12(r13)
355 GET_SCRATCH0(r10)
356 std r10,PACA_EXSLB+EX_R13(r13)
357 EXCEPTION_PROLOG_PSERIES_1(.do_stab_bolted, EXC_STD)
b01c8b54 358
697d3899
PM
359 KVM_HANDLER_SKIP(PACA_EXGEN, EXC_STD, 0x300)
360 KVM_HANDLER_SKIP(PACA_EXSLB, EXC_STD, 0x380)
de56a948
PM
361 KVM_HANDLER_PR(PACA_EXGEN, EXC_STD, 0x400)
362 KVM_HANDLER_PR(PACA_EXSLB, EXC_STD, 0x480)
363 KVM_HANDLER_PR(PACA_EXGEN, EXC_STD, 0x900)
b01c8b54
PM
364 KVM_HANDLER(PACA_EXGEN, EXC_HV, 0x982)
365
b92a66a6
MN
366#ifdef CONFIG_PPC_DENORMALISATION
367denorm_assist:
368BEGIN_FTR_SECTION
369/*
370 * To denormalise we need to move a copy of the register to itself.
371 * For POWER6 do that here for all FP regs.
372 */
373 mfmsr r10
374 ori r10,r10,(MSR_FP|MSR_FE0|MSR_FE1)
375 xori r10,r10,(MSR_FE0|MSR_FE1)
376 mtmsrd r10
377 sync
378 fmr 0,0
379 fmr 1,1
380 fmr 2,2
381 fmr 3,3
382 fmr 4,4
383 fmr 5,5
384 fmr 6,6
385 fmr 7,7
386 fmr 8,8
387 fmr 9,9
388 fmr 10,10
389 fmr 11,11
390 fmr 12,12
391 fmr 13,13
392 fmr 14,14
393 fmr 15,15
394 fmr 16,16
395 fmr 17,17
396 fmr 18,18
397 fmr 19,19
398 fmr 20,20
399 fmr 21,21
400 fmr 22,22
401 fmr 23,23
402 fmr 24,24
403 fmr 25,25
404 fmr 26,26
405 fmr 27,27
406 fmr 28,28
407 fmr 29,29
408 fmr 30,30
409 fmr 31,31
410FTR_SECTION_ELSE
411/*
412 * To denormalise we need to move a copy of the register to itself.
413 * For POWER7 do that here for the first 32 VSX registers only.
414 */
415 mfmsr r10
416 oris r10,r10,MSR_VSX@h
417 mtmsrd r10
418 sync
419 XVCPSGNDP(0,0,0)
420 XVCPSGNDP(1,1,1)
421 XVCPSGNDP(2,2,2)
422 XVCPSGNDP(3,3,3)
423 XVCPSGNDP(4,4,4)
424 XVCPSGNDP(5,5,5)
425 XVCPSGNDP(6,6,6)
426 XVCPSGNDP(7,7,7)
427 XVCPSGNDP(8,8,8)
428 XVCPSGNDP(9,9,9)
429 XVCPSGNDP(10,10,10)
430 XVCPSGNDP(11,11,11)
431 XVCPSGNDP(12,12,12)
432 XVCPSGNDP(13,13,13)
433 XVCPSGNDP(14,14,14)
434 XVCPSGNDP(15,15,15)
435 XVCPSGNDP(16,16,16)
436 XVCPSGNDP(17,17,17)
437 XVCPSGNDP(18,18,18)
438 XVCPSGNDP(19,19,19)
439 XVCPSGNDP(20,20,20)
440 XVCPSGNDP(21,21,21)
441 XVCPSGNDP(22,22,22)
442 XVCPSGNDP(23,23,23)
443 XVCPSGNDP(24,24,24)
444 XVCPSGNDP(25,25,25)
445 XVCPSGNDP(26,26,26)
446 XVCPSGNDP(27,27,27)
447 XVCPSGNDP(28,28,28)
448 XVCPSGNDP(29,29,29)
449 XVCPSGNDP(30,30,30)
450 XVCPSGNDP(31,31,31)
451ALT_FTR_SECTION_END_IFCLR(CPU_FTR_ARCH_206)
452 mtspr SPRN_HSRR0,r11
453 mtcrf 0x80,r9
454 ld r9,PACA_EXGEN+EX_R9(r13)
455 ld r10,PACA_EXGEN+EX_R10(r13)
456 ld r11,PACA_EXGEN+EX_R11(r13)
457 ld r12,PACA_EXGEN+EX_R12(r13)
458 ld r13,PACA_EXGEN+EX_R13(r13)
459 HRFID
460 b .
461#endif
462
b01c8b54 463 .align 7
b3e6b5df 464 /* moved from 0xe00 */
b01c8b54
PM
465 STD_EXCEPTION_HV(., 0xe02, h_data_storage)
466 KVM_HANDLER_SKIP(PACA_EXGEN, EXC_HV, 0xe02)
467 STD_EXCEPTION_HV(., 0xe22, h_instr_storage)
468 KVM_HANDLER(PACA_EXGEN, EXC_HV, 0xe22)
469 STD_EXCEPTION_HV(., 0xe42, emulation_assist)
470 KVM_HANDLER(PACA_EXGEN, EXC_HV, 0xe42)
471 STD_EXCEPTION_HV(., 0xe62, hmi_exception) /* need to flush cache ? */
472 KVM_HANDLER(PACA_EXGEN, EXC_HV, 0xe62)
0ebc4cda
BH
473
474 /* moved from 0xf00 */
b3e6b5df 475 STD_EXCEPTION_PSERIES(., 0xf00, performance_monitor)
de56a948 476 KVM_HANDLER_PR(PACA_EXGEN, EXC_STD, 0xf00)
b3e6b5df 477 STD_EXCEPTION_PSERIES(., 0xf20, altivec_unavailable)
de56a948 478 KVM_HANDLER_PR(PACA_EXGEN, EXC_STD, 0xf20)
b3e6b5df 479 STD_EXCEPTION_PSERIES(., 0xf40, vsx_unavailable)
de56a948 480 KVM_HANDLER_PR(PACA_EXGEN, EXC_STD, 0xf40)
0ebc4cda
BH
481
482/*
7230c564
BH
483 * An interrupt came in while soft-disabled. We set paca->irq_happened,
484 * then, if it was a decrementer interrupt, we bump the dec to max and
485 * and return, else we hard disable and return. This is called with
486 * r10 containing the value to OR to the paca field.
0ebc4cda 487 */
7230c564
BH
488#define MASKED_INTERRUPT(_H) \
489masked_##_H##interrupt: \
490 std r11,PACA_EXGEN+EX_R11(r13); \
491 lbz r11,PACAIRQHAPPENED(r13); \
492 or r11,r11,r10; \
493 stb r11,PACAIRQHAPPENED(r13); \
494 andi. r10,r10,PACA_IRQ_DEC; \
495 beq 1f; \
496 lis r10,0x7fff; \
497 ori r10,r10,0xffff; \
498 mtspr SPRN_DEC,r10; \
499 b 2f; \
5001: mfspr r10,SPRN_##_H##SRR1; \
501 rldicl r10,r10,48,1; /* clear MSR_EE */ \
502 rotldi r10,r10,16; \
503 mtspr SPRN_##_H##SRR1,r10; \
5042: mtcrf 0x80,r9; \
505 ld r9,PACA_EXGEN+EX_R9(r13); \
506 ld r10,PACA_EXGEN+EX_R10(r13); \
507 ld r11,PACA_EXGEN+EX_R11(r13); \
508 GET_SCRATCH0(r13); \
509 ##_H##rfid; \
0ebc4cda 510 b .
7230c564
BH
511
512 MASKED_INTERRUPT()
513 MASKED_INTERRUPT(H)
0ebc4cda 514
7230c564
BH
515/*
516 * Called from arch_local_irq_enable when an interrupt needs
517 * to be resent. r3 contains 0x500 or 0x900 to indicate which
518 * kind of interrupt. MSR:EE is already off. We generate a
519 * stackframe like if a real interrupt had happened.
520 *
521 * Note: While MSR:EE is off, we need to make sure that _MSR
522 * in the generated frame has EE set to 1 or the exception
523 * handler will not properly re-enable them.
524 */
525_GLOBAL(__replay_interrupt)
526 /* We are going to jump to the exception common code which
527 * will retrieve various register values from the PACA which
528 * we don't give a damn about, so we don't bother storing them.
529 */
530 mfmsr r12
531 mflr r11
532 mfcr r9
533 ori r12,r12,MSR_EE
534 andi. r3,r3,0x0800
535 bne decrementer_common
536 b hardware_interrupt_common
a5d4f3ad 537
0ebc4cda
BH
538#ifdef CONFIG_PPC_PSERIES
539/*
540 * Vectors for the FWNMI option. Share common code.
541 */
542 .globl system_reset_fwnmi
543 .align 7
544system_reset_fwnmi:
545 HMT_MEDIUM
673b189a 546 SET_SCRATCH0(r13) /* save r13 */
b01c8b54
PM
547 EXCEPTION_PROLOG_PSERIES(PACA_EXGEN, system_reset_common, EXC_STD,
548 NOTEST, 0x100)
0ebc4cda
BH
549
550#endif /* CONFIG_PPC_PSERIES */
551
552#ifdef __DISABLED__
553/*
554 * This is used for when the SLB miss handler has to go virtual,
555 * which doesn't happen for now anymore but will once we re-implement
556 * dynamic VSIDs for shared page tables
557 */
558slb_miss_user_pseries:
559 std r10,PACA_EXGEN+EX_R10(r13)
560 std r11,PACA_EXGEN+EX_R11(r13)
561 std r12,PACA_EXGEN+EX_R12(r13)
673b189a 562 GET_SCRATCH0(r10)
0ebc4cda
BH
563 ld r11,PACA_EXSLB+EX_R9(r13)
564 ld r12,PACA_EXSLB+EX_R3(r13)
565 std r10,PACA_EXGEN+EX_R13(r13)
566 std r11,PACA_EXGEN+EX_R9(r13)
567 std r12,PACA_EXGEN+EX_R3(r13)
568 clrrdi r12,r13,32
569 mfmsr r10
570 mfspr r11,SRR0 /* save SRR0 */
571 ori r12,r12,slb_miss_user_common@l /* virt addr of handler */
572 ori r10,r10,MSR_IR|MSR_DR|MSR_RI
573 mtspr SRR0,r12
574 mfspr r12,SRR1 /* and SRR1 */
575 mtspr SRR1,r10
576 rfid
577 b . /* prevent spec. execution */
578#endif /* __DISABLED__ */
579
580 .align 7
581 .globl __end_interrupts
582__end_interrupts:
583
584/*
585 * Code from here down to __end_handlers is invoked from the
586 * exception prologs above. Because the prologs assemble the
587 * addresses of these handlers using the LOAD_HANDLER macro,
61e2390e
MN
588 * which uses an ori instruction, these handlers must be in
589 * the first 64k of the kernel image.
0ebc4cda
BH
590 */
591
592/*** Common interrupt handlers ***/
593
594 STD_EXCEPTION_COMMON(0x100, system_reset, .system_reset_exception)
595
596 /*
597 * Machine check is different because we use a different
598 * save area: PACA_EXMC instead of PACA_EXGEN.
599 */
600 .align 7
601 .globl machine_check_common
602machine_check_common:
603 EXCEPTION_PROLOG_COMMON(0x200, PACA_EXMC)
604 FINISH_NAP
605 DISABLE_INTS
606 bl .save_nvgprs
607 addi r3,r1,STACK_FRAME_OVERHEAD
608 bl .machine_check_exception
609 b .ret_from_except
610
7450f6f0
BH
611 STD_EXCEPTION_COMMON_ASYNC(0x500, hardware_interrupt, do_IRQ)
612 STD_EXCEPTION_COMMON_ASYNC(0x900, decrementer, .timer_interrupt)
dabe859e 613 STD_EXCEPTION_COMMON(0x980, hdecrementer, .hdec_interrupt)
0ebc4cda
BH
614 STD_EXCEPTION_COMMON(0xa00, trap_0a, .unknown_exception)
615 STD_EXCEPTION_COMMON(0xb00, trap_0b, .unknown_exception)
616 STD_EXCEPTION_COMMON(0xd00, single_step, .single_step_exception)
617 STD_EXCEPTION_COMMON(0xe00, trap_0e, .unknown_exception)
278a6cdc
MN
618 STD_EXCEPTION_COMMON(0xe40, emulation_assist, .program_check_exception)
619 STD_EXCEPTION_COMMON(0xe60, hmi_exception, .unknown_exception)
7450f6f0 620 STD_EXCEPTION_COMMON_ASYNC(0xf00, performance_monitor, .performance_monitor_exception)
0ebc4cda 621 STD_EXCEPTION_COMMON(0x1300, instruction_breakpoint, .instruction_breakpoint_exception)
b92a66a6 622 STD_EXCEPTION_COMMON(0x1502, denorm, .unknown_exception)
0ebc4cda
BH
623#ifdef CONFIG_ALTIVEC
624 STD_EXCEPTION_COMMON(0x1700, altivec_assist, .altivec_assist_exception)
625#else
626 STD_EXCEPTION_COMMON(0x1700, altivec_assist, .unknown_exception)
627#endif
628#ifdef CONFIG_CBE_RAS
629 STD_EXCEPTION_COMMON(0x1200, cbe_system_error, .cbe_system_error_exception)
630 STD_EXCEPTION_COMMON(0x1600, cbe_maintenance, .cbe_maintenance_exception)
631 STD_EXCEPTION_COMMON(0x1800, cbe_thermal, .cbe_thermal_exception)
632#endif /* CONFIG_CBE_RAS */
633
634 .align 7
635system_call_entry:
636 b system_call_common
637
fe1952fc
BH
638ppc64_runlatch_on_trampoline:
639 b .__ppc64_runlatch_on
640
0ebc4cda
BH
641/*
642 * Here we have detected that the kernel stack pointer is bad.
643 * R9 contains the saved CR, r13 points to the paca,
644 * r10 contains the (bad) kernel stack pointer,
645 * r11 and r12 contain the saved SRR0 and SRR1.
646 * We switch to using an emergency stack, save the registers there,
647 * and call kernel_bad_stack(), which panics.
648 */
649bad_stack:
650 ld r1,PACAEMERGSP(r13)
651 subi r1,r1,64+INT_FRAME_SIZE
652 std r9,_CCR(r1)
653 std r10,GPR1(r1)
654 std r11,_NIP(r1)
655 std r12,_MSR(r1)
656 mfspr r11,SPRN_DAR
657 mfspr r12,SPRN_DSISR
658 std r11,_DAR(r1)
659 std r12,_DSISR(r1)
660 mflr r10
661 mfctr r11
662 mfxer r12
663 std r10,_LINK(r1)
664 std r11,_CTR(r1)
665 std r12,_XER(r1)
666 SAVE_GPR(0,r1)
667 SAVE_GPR(2,r1)
1977b502
PM
668 ld r10,EX_R3(r3)
669 std r10,GPR3(r1)
670 SAVE_GPR(4,r1)
671 SAVE_4GPRS(5,r1)
672 ld r9,EX_R9(r3)
673 ld r10,EX_R10(r3)
674 SAVE_2GPRS(9,r1)
675 ld r9,EX_R11(r3)
676 ld r10,EX_R12(r3)
677 ld r11,EX_R13(r3)
678 std r9,GPR11(r1)
679 std r10,GPR12(r1)
680 std r11,GPR13(r1)
48404f2e
PM
681BEGIN_FTR_SECTION
682 ld r10,EX_CFAR(r3)
683 std r10,ORIG_GPR3(r1)
684END_FTR_SECTION_IFSET(CPU_FTR_CFAR)
1977b502 685 SAVE_8GPRS(14,r1)
0ebc4cda
BH
686 SAVE_10GPRS(22,r1)
687 lhz r12,PACA_TRAP_SAVE(r13)
688 std r12,_TRAP(r1)
689 addi r11,r1,INT_FRAME_SIZE
690 std r11,0(r1)
691 li r12,0
692 std r12,0(r11)
693 ld r2,PACATOC(r13)
1977b502
PM
694 ld r11,exception_marker@toc(r2)
695 std r12,RESULT(r1)
696 std r11,STACK_FRAME_OVERHEAD-16(r1)
0ebc4cda
BH
6971: addi r3,r1,STACK_FRAME_OVERHEAD
698 bl .kernel_bad_stack
699 b 1b
700
701/*
702 * Here r13 points to the paca, r9 contains the saved CR,
703 * SRR0 and SRR1 are saved in r11 and r12,
704 * r9 - r13 are saved in paca->exgen.
705 */
706 .align 7
707 .globl data_access_common
708data_access_common:
709 mfspr r10,SPRN_DAR
710 std r10,PACA_EXGEN+EX_DAR(r13)
711 mfspr r10,SPRN_DSISR
712 stw r10,PACA_EXGEN+EX_DSISR(r13)
713 EXCEPTION_PROLOG_COMMON(0x300, PACA_EXGEN)
a546498f
BH
714 DISABLE_INTS
715 ld r12,_MSR(r1)
0ebc4cda
BH
716 ld r3,PACA_EXGEN+EX_DAR(r13)
717 lwz r4,PACA_EXGEN+EX_DSISR(r13)
718 li r5,0x300
278a6cdc 719 b .do_hash_page /* Try to handle as hpte fault */
0ebc4cda 720
b3e6b5df 721 .align 7
278a6cdc 722 .globl h_data_storage_common
b3e6b5df 723h_data_storage_common:
278a6cdc
MN
724 mfspr r10,SPRN_HDAR
725 std r10,PACA_EXGEN+EX_DAR(r13)
726 mfspr r10,SPRN_HDSISR
727 stw r10,PACA_EXGEN+EX_DSISR(r13)
728 EXCEPTION_PROLOG_COMMON(0xe00, PACA_EXGEN)
729 bl .save_nvgprs
a546498f 730 DISABLE_INTS
278a6cdc
MN
731 addi r3,r1,STACK_FRAME_OVERHEAD
732 bl .unknown_exception
733 b .ret_from_except
b3e6b5df 734
0ebc4cda
BH
735 .align 7
736 .globl instruction_access_common
737instruction_access_common:
738 EXCEPTION_PROLOG_COMMON(0x400, PACA_EXGEN)
a546498f
BH
739 DISABLE_INTS
740 ld r12,_MSR(r1)
0ebc4cda
BH
741 ld r3,_NIP(r1)
742 andis. r4,r12,0x5820
743 li r5,0x400
744 b .do_hash_page /* Try to handle as hpte fault */
745
278a6cdc 746 STD_EXCEPTION_COMMON(0xe20, h_instr_storage, .unknown_exception)
b3e6b5df 747
0ebc4cda
BH
748/*
749 * Here is the common SLB miss user that is used when going to virtual
750 * mode for SLB misses, that is currently not used
751 */
752#ifdef __DISABLED__
753 .align 7
754 .globl slb_miss_user_common
755slb_miss_user_common:
756 mflr r10
757 std r3,PACA_EXGEN+EX_DAR(r13)
758 stw r9,PACA_EXGEN+EX_CCR(r13)
759 std r10,PACA_EXGEN+EX_LR(r13)
760 std r11,PACA_EXGEN+EX_SRR0(r13)
761 bl .slb_allocate_user
762
763 ld r10,PACA_EXGEN+EX_LR(r13)
764 ld r3,PACA_EXGEN+EX_R3(r13)
765 lwz r9,PACA_EXGEN+EX_CCR(r13)
766 ld r11,PACA_EXGEN+EX_SRR0(r13)
767 mtlr r10
768 beq- slb_miss_fault
769
770 andi. r10,r12,MSR_RI /* check for unrecoverable exception */
771 beq- unrecov_user_slb
772 mfmsr r10
773
774.machine push
775.machine "power4"
776 mtcrf 0x80,r9
777.machine pop
778
779 clrrdi r10,r10,2 /* clear RI before setting SRR0/1 */
780 mtmsrd r10,1
781
782 mtspr SRR0,r11
783 mtspr SRR1,r12
784
785 ld r9,PACA_EXGEN+EX_R9(r13)
786 ld r10,PACA_EXGEN+EX_R10(r13)
787 ld r11,PACA_EXGEN+EX_R11(r13)
788 ld r12,PACA_EXGEN+EX_R12(r13)
789 ld r13,PACA_EXGEN+EX_R13(r13)
790 rfid
791 b .
792
793slb_miss_fault:
794 EXCEPTION_PROLOG_COMMON(0x380, PACA_EXGEN)
795 ld r4,PACA_EXGEN+EX_DAR(r13)
796 li r5,0
797 std r4,_DAR(r1)
798 std r5,_DSISR(r1)
799 b handle_page_fault
800
801unrecov_user_slb:
802 EXCEPTION_PROLOG_COMMON(0x4200, PACA_EXGEN)
803 DISABLE_INTS
804 bl .save_nvgprs
8051: addi r3,r1,STACK_FRAME_OVERHEAD
806 bl .unrecoverable_exception
807 b 1b
808
809#endif /* __DISABLED__ */
810
811
812/*
813 * r13 points to the PACA, r9 contains the saved CR,
814 * r12 contain the saved SRR1, SRR0 is still ready for return
815 * r3 has the faulting address
816 * r9 - r13 are saved in paca->exslb.
817 * r3 is saved in paca->slb_r3
818 * We assume we aren't going to take any exceptions during this procedure.
819 */
820_GLOBAL(slb_miss_realmode)
821 mflr r10
822#ifdef CONFIG_RELOCATABLE
823 mtctr r11
824#endif
825
826 stw r9,PACA_EXSLB+EX_CCR(r13) /* save CR in exc. frame */
827 std r10,PACA_EXSLB+EX_LR(r13) /* save LR */
828
829 bl .slb_allocate_realmode
830
831 /* All done -- return from exception. */
832
833 ld r10,PACA_EXSLB+EX_LR(r13)
834 ld r3,PACA_EXSLB+EX_R3(r13)
835 lwz r9,PACA_EXSLB+EX_CCR(r13) /* get saved CR */
0ebc4cda
BH
836
837 mtlr r10
838
839 andi. r10,r12,MSR_RI /* check for unrecoverable exception */
840 beq- 2f
841
842.machine push
843.machine "power4"
844 mtcrf 0x80,r9
845 mtcrf 0x01,r9 /* slb_allocate uses cr0 and cr7 */
846.machine pop
847
0ebc4cda
BH
848 ld r9,PACA_EXSLB+EX_R9(r13)
849 ld r10,PACA_EXSLB+EX_R10(r13)
850 ld r11,PACA_EXSLB+EX_R11(r13)
851 ld r12,PACA_EXSLB+EX_R12(r13)
852 ld r13,PACA_EXSLB+EX_R13(r13)
853 rfid
854 b . /* prevent speculative execution */
855
4f8cf36f 8562: mfspr r11,SPRN_SRR0
0ebc4cda
BH
857 ld r10,PACAKBASE(r13)
858 LOAD_HANDLER(r10,unrecov_slb)
859 mtspr SPRN_SRR0,r10
860 ld r10,PACAKMSR(r13)
861 mtspr SPRN_SRR1,r10
862 rfid
863 b .
864
865unrecov_slb:
866 EXCEPTION_PROLOG_COMMON(0x4100, PACA_EXSLB)
867 DISABLE_INTS
868 bl .save_nvgprs
8691: addi r3,r1,STACK_FRAME_OVERHEAD
870 bl .unrecoverable_exception
871 b 1b
872
0ebc4cda
BH
873
874#ifdef CONFIG_PPC_970_NAP
875power4_fixup_nap:
876 andc r9,r9,r10
877 std r9,TI_LOCAL_FLAGS(r11)
878 ld r10,_LINK(r1) /* make idle task do the */
879 std r10,_NIP(r1) /* equivalent of a blr */
880 blr
881#endif
882
883 .align 7
884 .globl alignment_common
885alignment_common:
886 mfspr r10,SPRN_DAR
887 std r10,PACA_EXGEN+EX_DAR(r13)
888 mfspr r10,SPRN_DSISR
889 stw r10,PACA_EXGEN+EX_DSISR(r13)
890 EXCEPTION_PROLOG_COMMON(0x600, PACA_EXGEN)
891 ld r3,PACA_EXGEN+EX_DAR(r13)
892 lwz r4,PACA_EXGEN+EX_DSISR(r13)
893 std r3,_DAR(r1)
894 std r4,_DSISR(r1)
895 bl .save_nvgprs
a3512b2d 896 DISABLE_INTS
0ebc4cda 897 addi r3,r1,STACK_FRAME_OVERHEAD
0ebc4cda
BH
898 bl .alignment_exception
899 b .ret_from_except
900
901 .align 7
902 .globl program_check_common
903program_check_common:
904 EXCEPTION_PROLOG_COMMON(0x700, PACA_EXGEN)
905 bl .save_nvgprs
54321242 906 DISABLE_INTS
922b9f86 907 addi r3,r1,STACK_FRAME_OVERHEAD
0ebc4cda
BH
908 bl .program_check_exception
909 b .ret_from_except
910
911 .align 7
912 .globl fp_unavailable_common
913fp_unavailable_common:
914 EXCEPTION_PROLOG_COMMON(0x800, PACA_EXGEN)
915 bne 1f /* if from user, just load it up */
916 bl .save_nvgprs
9f2f79e3 917 DISABLE_INTS
0ebc4cda 918 addi r3,r1,STACK_FRAME_OVERHEAD
0ebc4cda
BH
919 bl .kernel_fp_unavailable_exception
920 BUG_OPCODE
9211: bl .load_up_fpu
922 b fast_exception_return
923
924 .align 7
925 .globl altivec_unavailable_common
926altivec_unavailable_common:
927 EXCEPTION_PROLOG_COMMON(0xf20, PACA_EXGEN)
928#ifdef CONFIG_ALTIVEC
929BEGIN_FTR_SECTION
930 beq 1f
931 bl .load_up_altivec
932 b fast_exception_return
9331:
934END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
935#endif
936 bl .save_nvgprs
9f2f79e3 937 DISABLE_INTS
0ebc4cda 938 addi r3,r1,STACK_FRAME_OVERHEAD
0ebc4cda
BH
939 bl .altivec_unavailable_exception
940 b .ret_from_except
941
942 .align 7
943 .globl vsx_unavailable_common
944vsx_unavailable_common:
945 EXCEPTION_PROLOG_COMMON(0xf40, PACA_EXGEN)
946#ifdef CONFIG_VSX
947BEGIN_FTR_SECTION
7230c564
BH
948 beq 1f
949 b .load_up_vsx
0ebc4cda
BH
9501:
951END_FTR_SECTION_IFSET(CPU_FTR_VSX)
952#endif
953 bl .save_nvgprs
9f2f79e3 954 DISABLE_INTS
0ebc4cda 955 addi r3,r1,STACK_FRAME_OVERHEAD
0ebc4cda
BH
956 bl .vsx_unavailable_exception
957 b .ret_from_except
958
959 .align 7
960 .globl __end_handlers
961__end_handlers:
962
0ebc4cda
BH
963/*
964 * Hash table stuff
965 */
966 .align 7
967_STATIC(do_hash_page)
968 std r3,_DAR(r1)
969 std r4,_DSISR(r1)
970
9c7cc234 971 andis. r0,r4,0xa410 /* weird error? */
0ebc4cda 972 bne- handle_page_fault /* if not, try to insert a HPTE */
9c7cc234
P
973 andis. r0,r4,DSISR_DABRMATCH@h
974 bne- handle_dabr_fault
975
0ebc4cda
BH
976BEGIN_FTR_SECTION
977 andis. r0,r4,0x0020 /* Is it a segment table fault? */
978 bne- do_ste_alloc /* If so handle it */
44ae3ab3 979END_MMU_FTR_SECTION_IFCLR(MMU_FTR_SLB)
0ebc4cda 980
9778b696 981 CURRENT_THREAD_INFO(r11, r1)
9c1e1052
PM
982 lwz r0,TI_PREEMPT(r11) /* If we're in an "NMI" */
983 andis. r0,r0,NMI_MASK@h /* (i.e. an irq when soft-disabled) */
984 bne 77f /* then don't call hash_page now */
0ebc4cda
BH
985 /*
986 * We need to set the _PAGE_USER bit if MSR_PR is set or if we are
987 * accessing a userspace segment (even from the kernel). We assume
988 * kernel addresses always have the high bit set.
989 */
990 rlwinm r4,r4,32-25+9,31-9,31-9 /* DSISR_STORE -> _PAGE_RW */
991 rotldi r0,r3,15 /* Move high bit into MSR_PR posn */
992 orc r0,r12,r0 /* MSR_PR | ~high_bit */
993 rlwimi r4,r0,32-13,30,30 /* becomes _PAGE_USER access bit */
994 ori r4,r4,1 /* add _PAGE_PRESENT */
995 rlwimi r4,r5,22+2,31-2,31-2 /* Set _PAGE_EXEC if trap is 0x400 */
996
997 /*
998 * r3 contains the faulting address
999 * r4 contains the required access permissions
1000 * r5 contains the trap number
1001 *
7230c564 1002 * at return r3 = 0 for success, 1 for page fault, negative for error
0ebc4cda
BH
1003 */
1004 bl .hash_page /* build HPTE if possible */
1005 cmpdi r3,0 /* see if hash_page succeeded */
1006
7230c564 1007 /* Success */
0ebc4cda 1008 beq fast_exc_return_irq /* Return from exception on success */
0ebc4cda 1009
7230c564
BH
1010 /* Error */
1011 blt- 13f
9c7cc234 1012
0ebc4cda
BH
1013/* Here we have a page fault that hash_page can't handle. */
1014handle_page_fault:
0ebc4cda
BH
101511: ld r4,_DAR(r1)
1016 ld r5,_DSISR(r1)
1017 addi r3,r1,STACK_FRAME_OVERHEAD
1018 bl .do_page_fault
1019 cmpdi r3,0
a546498f 1020 beq+ 12f
0ebc4cda
BH
1021 bl .save_nvgprs
1022 mr r5,r3
1023 addi r3,r1,STACK_FRAME_OVERHEAD
1024 lwz r4,_DAR(r1)
1025 bl .bad_page_fault
1026 b .ret_from_except
1027
a546498f
BH
1028/* We have a data breakpoint exception - handle it */
1029handle_dabr_fault:
1030 bl .save_nvgprs
1031 ld r4,_DAR(r1)
1032 ld r5,_DSISR(r1)
1033 addi r3,r1,STACK_FRAME_OVERHEAD
1034 bl .do_dabr
103512: b .ret_from_except_lite
1036
0ebc4cda
BH
1037
1038/* We have a page fault that hash_page could handle but HV refused
1039 * the PTE insertion
1040 */
a546498f 104113: bl .save_nvgprs
0ebc4cda
BH
1042 mr r5,r3
1043 addi r3,r1,STACK_FRAME_OVERHEAD
1044 ld r4,_DAR(r1)
1045 bl .low_hash_fault
1046 b .ret_from_except
1047
9c1e1052
PM
1048/*
1049 * We come here as a result of a DSI at a point where we don't want
1050 * to call hash_page, such as when we are accessing memory (possibly
1051 * user memory) inside a PMU interrupt that occurred while interrupts
1052 * were soft-disabled. We want to invoke the exception handler for
1053 * the access, or panic if there isn't a handler.
1054 */
105577: bl .save_nvgprs
1056 mr r4,r3
1057 addi r3,r1,STACK_FRAME_OVERHEAD
1058 li r5,SIGSEGV
1059 bl .bad_page_fault
1060 b .ret_from_except
1061
0ebc4cda
BH
1062 /* here we have a segment miss */
1063do_ste_alloc:
1064 bl .ste_allocate /* try to insert stab entry */
1065 cmpdi r3,0
1066 bne- handle_page_fault
1067 b fast_exception_return
1068
1069/*
1070 * r13 points to the PACA, r9 contains the saved CR,
1071 * r11 and r12 contain the saved SRR0 and SRR1.
1072 * r9 - r13 are saved in paca->exslb.
1073 * We assume we aren't going to take any exceptions during this procedure.
1074 * We assume (DAR >> 60) == 0xc.
1075 */
1076 .align 7
1077_GLOBAL(do_stab_bolted)
1078 stw r9,PACA_EXSLB+EX_CCR(r13) /* save CR in exc. frame */
1079 std r11,PACA_EXSLB+EX_SRR0(r13) /* save SRR0 in exc. frame */
1080
1081 /* Hash to the primary group */
1082 ld r10,PACASTABVIRT(r13)
1083 mfspr r11,SPRN_DAR
1084 srdi r11,r11,28
1085 rldimi r10,r11,7,52 /* r10 = first ste of the group */
1086
1087 /* Calculate VSID */
048ee099
AK
1088 /* This is a kernel address, so protovsid = ESID | 1 << 37 */
1089 li r9,0x1
1090 rldimi r11,r9,(CONTEXT_BITS + USER_ESID_BITS),0
0ebc4cda
BH
1091 ASM_VSID_SCRAMBLE(r11, r9, 256M)
1092 rldic r9,r11,12,16 /* r9 = vsid << 12 */
1093
1094 /* Search the primary group for a free entry */
10951: ld r11,0(r10) /* Test valid bit of the current ste */
1096 andi. r11,r11,0x80
1097 beq 2f
1098 addi r10,r10,16
1099 andi. r11,r10,0x70
1100 bne 1b
1101
1102 /* Stick for only searching the primary group for now. */
1103 /* At least for now, we use a very simple random castout scheme */
1104 /* Use the TB as a random number ; OR in 1 to avoid entry 0 */
1105 mftb r11
1106 rldic r11,r11,4,57 /* r11 = (r11 << 4) & 0x70 */
1107 ori r11,r11,0x10
1108
1109 /* r10 currently points to an ste one past the group of interest */
1110 /* make it point to the randomly selected entry */
1111 subi r10,r10,128
1112 or r10,r10,r11 /* r10 is the entry to invalidate */
1113
1114 isync /* mark the entry invalid */
1115 ld r11,0(r10)
1116 rldicl r11,r11,56,1 /* clear the valid bit */
1117 rotldi r11,r11,8
1118 std r11,0(r10)
1119 sync
1120
1121 clrrdi r11,r11,28 /* Get the esid part of the ste */
1122 slbie r11
1123
11242: std r9,8(r10) /* Store the vsid part of the ste */
1125 eieio
1126
1127 mfspr r11,SPRN_DAR /* Get the new esid */
1128 clrrdi r11,r11,28 /* Permits a full 32b of ESID */
1129 ori r11,r11,0x90 /* Turn on valid and kp */
1130 std r11,0(r10) /* Put new entry back into the stab */
1131
1132 sync
1133
1134 /* All done -- return from exception. */
1135 lwz r9,PACA_EXSLB+EX_CCR(r13) /* get saved CR */
1136 ld r11,PACA_EXSLB+EX_SRR0(r13) /* get saved SRR0 */
1137
1138 andi. r10,r12,MSR_RI
1139 beq- unrecov_slb
1140
1141 mtcrf 0x80,r9 /* restore CR */
1142
1143 mfmsr r10
1144 clrrdi r10,r10,2
1145 mtmsrd r10,1
1146
1147 mtspr SPRN_SRR0,r11
1148 mtspr SPRN_SRR1,r12
1149 ld r9,PACA_EXSLB+EX_R9(r13)
1150 ld r10,PACA_EXSLB+EX_R10(r13)
1151 ld r11,PACA_EXSLB+EX_R11(r13)
1152 ld r12,PACA_EXSLB+EX_R12(r13)
1153 ld r13,PACA_EXSLB+EX_R13(r13)
1154 rfid
1155 b . /* prevent speculative execution */
1156
ed79ba9e 1157#if defined(CONFIG_PPC_PSERIES) || defined(CONFIG_PPC_POWERNV)
0ebc4cda
BH
1158/*
1159 * Data area reserved for FWNMI option.
1160 * This address (0x7000) is fixed by the RPA.
1161 */
1162 .= 0x7000
1163 .globl fwnmi_data_area
1164fwnmi_data_area:
0ebc4cda 1165
ed79ba9e
BH
1166 /* pseries and powernv need to keep the whole page from
1167 * 0x7000 to 0x8000 free for use by the firmware
1168 */
278a6cdc 1169 . = 0x8000
ed79ba9e 1170#endif /* defined(CONFIG_PPC_PSERIES) || defined(CONFIG_PPC_POWERNV) */
84493804 1171
4f8cf36f
BH
1172/* Space for CPU0's segment table */
1173 .balign 4096
84493804
BH
1174 .globl initial_stab
1175initial_stab:
1176 .space 4096
4f8cf36f 1177
ed79ba9e
BH
1178#ifdef CONFIG_PPC_POWERNV
1179_GLOBAL(opal_mc_secondary_handler)
1180 HMT_MEDIUM
1181 SET_SCRATCH0(r13)
1182 GET_PACA(r13)
1183 clrldi r3,r3,2
1184 tovirt(r3,r3)
1185 std r3,PACA_OPAL_MC_EVT(r13)
1186 ld r13,OPAL_MC_SRR0(r3)
1187 mtspr SPRN_SRR0,r13
1188 ld r13,OPAL_MC_SRR1(r3)
1189 mtspr SPRN_SRR1,r13
1190 ld r3,OPAL_MC_GPR3(r3)
1191 GET_SCRATCH0(r13)
1192 b machine_check_pSeries
1193#endif /* CONFIG_PPC_POWERNV */