]> git.proxmox.com Git - mirror_ubuntu-zesty-kernel.git/blob - arch/powerpc/kernel/exceptions-64s.S
powerpc: Remove Cell-specific relocation-on interrupt vector code
[mirror_ubuntu-zesty-kernel.git] / arch / powerpc / kernel / exceptions-64s.S
1 /*
2 * This file contains the 64-bit "server" PowerPC variant
3 * of the low level exception handling including exception
4 * vectors, exception return, part of the slb and stab
5 * handling and other fixed offset specific things.
6 *
7 * This file is meant to be #included from head_64.S due to
8 * position dependent assembly.
9 *
10 * Most of this originates from head_64.S and thus has the same
11 * copyright history.
12 *
13 */
14
15 #include <asm/hw_irq.h>
16 #include <asm/exception-64s.h>
17 #include <asm/ptrace.h>
18
19 /*
20 * We layout physical memory as follows:
21 * 0x0000 - 0x00ff : Secondary processor spin code
22 * 0x0100 - 0x17ff : pSeries Interrupt prologs
23 * 0x1800 - 0x4000 : interrupt support common interrupt prologs
24 * 0x4000 - 0x5fff : pSeries interrupts with IR=1,DR=1
25 * 0x6000 - 0x6fff : more interrupt support including for IR=1,DR=1
26 * 0x7000 - 0x7fff : FWNMI data area
27 * 0x8000 - 0x8fff : Initial (CPU0) segment table
28 * 0x9000 - : Early init and support code
29 */
30 /* Syscall routine is used twice, in reloc-off and reloc-on paths */
31 #define SYSCALL_PSERIES_1 \
32 BEGIN_FTR_SECTION \
33 cmpdi r0,0x1ebe ; \
34 beq- 1f ; \
35 END_FTR_SECTION_IFSET(CPU_FTR_REAL_LE) \
36 mr r9,r13 ; \
37 GET_PACA(r13) ; \
38 mfspr r11,SPRN_SRR0 ; \
39 0:
40
41 #define SYSCALL_PSERIES_2_RFID \
42 mfspr r12,SPRN_SRR1 ; \
43 ld r10,PACAKBASE(r13) ; \
44 LOAD_HANDLER(r10, system_call_entry) ; \
45 mtspr SPRN_SRR0,r10 ; \
46 ld r10,PACAKMSR(r13) ; \
47 mtspr SPRN_SRR1,r10 ; \
48 rfid ; \
49 b . ; /* prevent speculative execution */
50
51 #define SYSCALL_PSERIES_3 \
52 /* Fast LE/BE switch system call */ \
53 1: mfspr r12,SPRN_SRR1 ; \
54 xori r12,r12,MSR_LE ; \
55 mtspr SPRN_SRR1,r12 ; \
56 rfid ; /* return to userspace */ \
57 b . ; \
58 2: mfspr r12,SPRN_SRR1 ; \
59 andi. r12,r12,MSR_PR ; \
60 bne 0b ; \
61 mtspr SPRN_SRR0,r3 ; \
62 mtspr SPRN_SRR1,r4 ; \
63 mtspr SPRN_SDR1,r5 ; \
64 rfid ; \
65 b . ; /* prevent speculative execution */
66
67 #if defined(CONFIG_RELOCATABLE)
68 /*
69 * We can't branch directly; in the direct case we use LR
70 * and system_call_entry restores LR. (We thus need to move
71 * LR to r10 in the RFID case too.)
72 */
73 #define SYSCALL_PSERIES_2_DIRECT \
74 mflr r10 ; \
75 ld r12,PACAKBASE(r13) ; \
76 LOAD_HANDLER(r12, system_call_entry_direct) ; \
77 mtlr r12 ; \
78 mfspr r12,SPRN_SRR1 ; \
79 /* Re-use of r13... No spare regs to do this */ \
80 li r13,MSR_RI ; \
81 mtmsrd r13,1 ; \
82 GET_PACA(r13) ; /* get r13 back */ \
83 blr ;
84 #else
85 /* We can branch directly */
86 #define SYSCALL_PSERIES_2_DIRECT \
87 mfspr r12,SPRN_SRR1 ; \
88 li r10,MSR_RI ; \
89 mtmsrd r10,1 ; /* Set RI (EE=0) */ \
90 b system_call_entry_direct ;
91 #endif
92
93 /*
94 * This is the start of the interrupt handlers for pSeries
95 * This code runs with relocation off.
96 * Code from here to __end_interrupts gets copied down to real
97 * address 0x100 when we are running a relocatable kernel.
98 * Therefore any relative branches in this section must only
99 * branch to labels in this section.
100 */
101 . = 0x100
102 .globl __start_interrupts
103 __start_interrupts:
104
105 .globl system_reset_pSeries;
106 system_reset_pSeries:
107 HMT_MEDIUM_PPR_DISCARD
108 SET_SCRATCH0(r13)
109 #ifdef CONFIG_PPC_P7_NAP
110 BEGIN_FTR_SECTION
111 /* Running native on arch 2.06 or later, check if we are
112 * waking up from nap. We only handle no state loss and
113 * supervisor state loss. We do -not- handle hypervisor
114 * state loss at this time.
115 */
116 mfspr r13,SPRN_SRR1
117 rlwinm. r13,r13,47-31,30,31
118 beq 9f
119
120 /* waking up from powersave (nap) state */
121 cmpwi cr1,r13,2
122 /* Total loss of HV state is fatal, we could try to use the
123 * PIR to locate a PACA, then use an emergency stack etc...
124 * but for now, let's just stay stuck here
125 */
126 bgt cr1,.
127 GET_PACA(r13)
128
129 #ifdef CONFIG_KVM_BOOK3S_64_HV
130 li r0,KVM_HWTHREAD_IN_KERNEL
131 stb r0,HSTATE_HWTHREAD_STATE(r13)
132 /* Order setting hwthread_state vs. testing hwthread_req */
133 sync
134 lbz r0,HSTATE_HWTHREAD_REQ(r13)
135 cmpwi r0,0
136 beq 1f
137 b kvm_start_guest
138 1:
139 #endif
140
141 beq cr1,2f
142 b .power7_wakeup_noloss
143 2: b .power7_wakeup_loss
144 9:
145 END_FTR_SECTION_IFSET(CPU_FTR_HVMODE | CPU_FTR_ARCH_206)
146 #endif /* CONFIG_PPC_P7_NAP */
147 EXCEPTION_PROLOG_PSERIES(PACA_EXGEN, system_reset_common, EXC_STD,
148 NOTEST, 0x100)
149
150 . = 0x200
151 machine_check_pSeries_1:
152 /* This is moved out of line as it can be patched by FW, but
153 * some code path might still want to branch into the original
154 * vector
155 */
156 b machine_check_pSeries
157
158 . = 0x300
159 .globl data_access_pSeries
160 data_access_pSeries:
161 HMT_MEDIUM_PPR_DISCARD
162 SET_SCRATCH0(r13)
163 BEGIN_FTR_SECTION
164 b data_access_check_stab
165 data_access_not_stab:
166 END_MMU_FTR_SECTION_IFCLR(MMU_FTR_SLB)
167 EXCEPTION_PROLOG_PSERIES(PACA_EXGEN, data_access_common, EXC_STD,
168 KVMTEST, 0x300)
169
170 . = 0x380
171 .globl data_access_slb_pSeries
172 data_access_slb_pSeries:
173 HMT_MEDIUM_PPR_DISCARD
174 SET_SCRATCH0(r13)
175 EXCEPTION_PROLOG_1(PACA_EXSLB, KVMTEST, 0x380)
176 std r3,PACA_EXSLB+EX_R3(r13)
177 mfspr r3,SPRN_DAR
178 #ifdef __DISABLED__
179 /* Keep that around for when we re-implement dynamic VSIDs */
180 cmpdi r3,0
181 bge slb_miss_user_pseries
182 #endif /* __DISABLED__ */
183 mfspr r12,SPRN_SRR1
184 #ifndef CONFIG_RELOCATABLE
185 b .slb_miss_realmode
186 #else
187 /*
188 * We can't just use a direct branch to .slb_miss_realmode
189 * because the distance from here to there depends on where
190 * the kernel ends up being put.
191 */
192 mfctr r11
193 ld r10,PACAKBASE(r13)
194 LOAD_HANDLER(r10, .slb_miss_realmode)
195 mtctr r10
196 bctr
197 #endif
198
199 STD_EXCEPTION_PSERIES(0x400, 0x400, instruction_access)
200
201 . = 0x480
202 .globl instruction_access_slb_pSeries
203 instruction_access_slb_pSeries:
204 HMT_MEDIUM_PPR_DISCARD
205 SET_SCRATCH0(r13)
206 EXCEPTION_PROLOG_1(PACA_EXSLB, KVMTEST_PR, 0x480)
207 std r3,PACA_EXSLB+EX_R3(r13)
208 mfspr r3,SPRN_SRR0 /* SRR0 is faulting address */
209 #ifdef __DISABLED__
210 /* Keep that around for when we re-implement dynamic VSIDs */
211 cmpdi r3,0
212 bge slb_miss_user_pseries
213 #endif /* __DISABLED__ */
214 mfspr r12,SPRN_SRR1
215 #ifndef CONFIG_RELOCATABLE
216 b .slb_miss_realmode
217 #else
218 mfctr r11
219 ld r10,PACAKBASE(r13)
220 LOAD_HANDLER(r10, .slb_miss_realmode)
221 mtctr r10
222 bctr
223 #endif
224
225 /* We open code these as we can't have a ". = x" (even with
226 * x = "." within a feature section
227 */
228 . = 0x500;
229 .globl hardware_interrupt_pSeries;
230 .globl hardware_interrupt_hv;
231 hardware_interrupt_pSeries:
232 hardware_interrupt_hv:
233 BEGIN_FTR_SECTION
234 _MASKABLE_EXCEPTION_PSERIES(0x502, hardware_interrupt,
235 EXC_HV, SOFTEN_TEST_HV)
236 KVM_HANDLER(PACA_EXGEN, EXC_HV, 0x502)
237 FTR_SECTION_ELSE
238 _MASKABLE_EXCEPTION_PSERIES(0x500, hardware_interrupt,
239 EXC_STD, SOFTEN_TEST_HV_201)
240 KVM_HANDLER(PACA_EXGEN, EXC_STD, 0x500)
241 ALT_FTR_SECTION_END_IFSET(CPU_FTR_HVMODE | CPU_FTR_ARCH_206)
242
243 STD_EXCEPTION_PSERIES(0x600, 0x600, alignment)
244 KVM_HANDLER_PR(PACA_EXGEN, EXC_STD, 0x600)
245
246 STD_EXCEPTION_PSERIES(0x700, 0x700, program_check)
247 KVM_HANDLER_PR(PACA_EXGEN, EXC_STD, 0x700)
248
249 STD_EXCEPTION_PSERIES(0x800, 0x800, fp_unavailable)
250 KVM_HANDLER_PR(PACA_EXGEN, EXC_STD, 0x800)
251
252 MASKABLE_EXCEPTION_PSERIES(0x900, 0x900, decrementer)
253 STD_EXCEPTION_HV(0x980, 0x982, hdecrementer)
254
255 MASKABLE_EXCEPTION_PSERIES(0xa00, 0xa00, doorbell_super)
256 KVM_HANDLER_PR(PACA_EXGEN, EXC_STD, 0xa00)
257
258 STD_EXCEPTION_PSERIES(0xb00, 0xb00, trap_0b)
259 KVM_HANDLER_PR(PACA_EXGEN, EXC_STD, 0xb00)
260
261 . = 0xc00
262 .globl system_call_pSeries
263 system_call_pSeries:
264 HMT_MEDIUM
265 #ifdef CONFIG_KVM_BOOK3S_64_HANDLER
266 SET_SCRATCH0(r13)
267 GET_PACA(r13)
268 std r9,PACA_EXGEN+EX_R9(r13)
269 std r10,PACA_EXGEN+EX_R10(r13)
270 mfcr r9
271 KVMTEST(0xc00)
272 GET_SCRATCH0(r13)
273 #endif
274 SYSCALL_PSERIES_1
275 SYSCALL_PSERIES_2_RFID
276 SYSCALL_PSERIES_3
277 KVM_HANDLER(PACA_EXGEN, EXC_STD, 0xc00)
278
279 STD_EXCEPTION_PSERIES(0xd00, 0xd00, single_step)
280 KVM_HANDLER_PR(PACA_EXGEN, EXC_STD, 0xd00)
281
282 /* At 0xe??? we have a bunch of hypervisor exceptions, we branch
283 * out of line to handle them
284 */
285 . = 0xe00
286 hv_exception_trampoline:
287 b h_data_storage_hv
288 . = 0xe20
289 b h_instr_storage_hv
290 . = 0xe40
291 b emulation_assist_hv
292 . = 0xe50
293 b hmi_exception_hv
294 . = 0xe60
295 b hmi_exception_hv
296 . = 0xe80
297 b h_doorbell_hv
298
299 /* We need to deal with the Altivec unavailable exception
300 * here which is at 0xf20, thus in the middle of the
301 * prolog code of the PerformanceMonitor one. A little
302 * trickery is thus necessary
303 */
304 performance_monitor_pSeries_1:
305 . = 0xf00
306 b performance_monitor_pSeries
307
308 altivec_unavailable_pSeries_1:
309 . = 0xf20
310 b altivec_unavailable_pSeries
311
312 vsx_unavailable_pSeries_1:
313 . = 0xf40
314 b vsx_unavailable_pSeries
315
316 #ifdef CONFIG_CBE_RAS
317 STD_EXCEPTION_HV(0x1200, 0x1202, cbe_system_error)
318 KVM_HANDLER_SKIP(PACA_EXGEN, EXC_HV, 0x1202)
319 #endif /* CONFIG_CBE_RAS */
320
321 STD_EXCEPTION_PSERIES(0x1300, 0x1300, instruction_breakpoint)
322 KVM_HANDLER_PR_SKIP(PACA_EXGEN, EXC_STD, 0x1300)
323
324 . = 0x1500
325 .global denorm_exception_hv
326 denorm_exception_hv:
327 HMT_MEDIUM_PPR_DISCARD
328 mtspr SPRN_SPRG_HSCRATCH0,r13
329 mfspr r13,SPRN_SPRG_HPACA
330 std r9,PACA_EXGEN+EX_R9(r13)
331 HMT_MEDIUM_PPR_SAVE(PACA_EXGEN, r9)
332 std r10,PACA_EXGEN+EX_R10(r13)
333 std r11,PACA_EXGEN+EX_R11(r13)
334 std r12,PACA_EXGEN+EX_R12(r13)
335 mfspr r9,SPRN_SPRG_HSCRATCH0
336 std r9,PACA_EXGEN+EX_R13(r13)
337 mfcr r9
338
339 #ifdef CONFIG_PPC_DENORMALISATION
340 mfspr r10,SPRN_HSRR1
341 mfspr r11,SPRN_HSRR0 /* save HSRR0 */
342 andis. r10,r10,(HSRR1_DENORM)@h /* denorm? */
343 addi r11,r11,-4 /* HSRR0 is next instruction */
344 bne+ denorm_assist
345 #endif
346
347 EXCEPTION_PROLOG_PSERIES_1(denorm_common, EXC_HV)
348 KVM_HANDLER_SKIP(PACA_EXGEN, EXC_STD, 0x1500)
349
350 #ifdef CONFIG_CBE_RAS
351 STD_EXCEPTION_HV(0x1600, 0x1602, cbe_maintenance)
352 KVM_HANDLER_SKIP(PACA_EXGEN, EXC_HV, 0x1602)
353 #endif /* CONFIG_CBE_RAS */
354
355 STD_EXCEPTION_PSERIES(0x1700, 0x1700, altivec_assist)
356 KVM_HANDLER_PR(PACA_EXGEN, EXC_STD, 0x1700)
357
358 #ifdef CONFIG_CBE_RAS
359 STD_EXCEPTION_HV(0x1800, 0x1802, cbe_thermal)
360 KVM_HANDLER_SKIP(PACA_EXGEN, EXC_HV, 0x1802)
361 #else
362 . = 0x1800
363 #endif /* CONFIG_CBE_RAS */
364
365
366 /*** Out of line interrupts support ***/
367
368 .align 7
369 /* moved from 0x200 */
370 machine_check_pSeries:
371 .globl machine_check_fwnmi
372 machine_check_fwnmi:
373 HMT_MEDIUM_PPR_DISCARD
374 SET_SCRATCH0(r13) /* save r13 */
375 EXCEPTION_PROLOG_PSERIES(PACA_EXMC, machine_check_common,
376 EXC_STD, KVMTEST, 0x200)
377 KVM_HANDLER_SKIP(PACA_EXMC, EXC_STD, 0x200)
378
379 /* moved from 0x300 */
380 data_access_check_stab:
381 GET_PACA(r13)
382 std r9,PACA_EXSLB+EX_R9(r13)
383 std r10,PACA_EXSLB+EX_R10(r13)
384 mfspr r10,SPRN_DAR
385 mfspr r9,SPRN_DSISR
386 srdi r10,r10,60
387 rlwimi r10,r9,16,0x20
388 #ifdef CONFIG_KVM_BOOK3S_PR
389 lbz r9,HSTATE_IN_GUEST(r13)
390 rlwimi r10,r9,8,0x300
391 #endif
392 mfcr r9
393 cmpwi r10,0x2c
394 beq do_stab_bolted_pSeries
395 mtcrf 0x80,r9
396 ld r9,PACA_EXSLB+EX_R9(r13)
397 ld r10,PACA_EXSLB+EX_R10(r13)
398 b data_access_not_stab
399 do_stab_bolted_pSeries:
400 std r11,PACA_EXSLB+EX_R11(r13)
401 std r12,PACA_EXSLB+EX_R12(r13)
402 GET_SCRATCH0(r10)
403 std r10,PACA_EXSLB+EX_R13(r13)
404 EXCEPTION_PROLOG_PSERIES_1(.do_stab_bolted, EXC_STD)
405
406 KVM_HANDLER_SKIP(PACA_EXGEN, EXC_STD, 0x300)
407 KVM_HANDLER_SKIP(PACA_EXSLB, EXC_STD, 0x380)
408 KVM_HANDLER_PR(PACA_EXGEN, EXC_STD, 0x400)
409 KVM_HANDLER_PR(PACA_EXSLB, EXC_STD, 0x480)
410 KVM_HANDLER_PR(PACA_EXGEN, EXC_STD, 0x900)
411 KVM_HANDLER(PACA_EXGEN, EXC_HV, 0x982)
412
413 #ifdef CONFIG_PPC_DENORMALISATION
414 denorm_assist:
415 BEGIN_FTR_SECTION
416 /*
417 * To denormalise we need to move a copy of the register to itself.
418 * For POWER6 do that here for all FP regs.
419 */
420 mfmsr r10
421 ori r10,r10,(MSR_FP|MSR_FE0|MSR_FE1)
422 xori r10,r10,(MSR_FE0|MSR_FE1)
423 mtmsrd r10
424 sync
425 fmr 0,0
426 fmr 1,1
427 fmr 2,2
428 fmr 3,3
429 fmr 4,4
430 fmr 5,5
431 fmr 6,6
432 fmr 7,7
433 fmr 8,8
434 fmr 9,9
435 fmr 10,10
436 fmr 11,11
437 fmr 12,12
438 fmr 13,13
439 fmr 14,14
440 fmr 15,15
441 fmr 16,16
442 fmr 17,17
443 fmr 18,18
444 fmr 19,19
445 fmr 20,20
446 fmr 21,21
447 fmr 22,22
448 fmr 23,23
449 fmr 24,24
450 fmr 25,25
451 fmr 26,26
452 fmr 27,27
453 fmr 28,28
454 fmr 29,29
455 fmr 30,30
456 fmr 31,31
457 FTR_SECTION_ELSE
458 /*
459 * To denormalise we need to move a copy of the register to itself.
460 * For POWER7 do that here for the first 32 VSX registers only.
461 */
462 mfmsr r10
463 oris r10,r10,MSR_VSX@h
464 mtmsrd r10
465 sync
466 XVCPSGNDP(0,0,0)
467 XVCPSGNDP(1,1,1)
468 XVCPSGNDP(2,2,2)
469 XVCPSGNDP(3,3,3)
470 XVCPSGNDP(4,4,4)
471 XVCPSGNDP(5,5,5)
472 XVCPSGNDP(6,6,6)
473 XVCPSGNDP(7,7,7)
474 XVCPSGNDP(8,8,8)
475 XVCPSGNDP(9,9,9)
476 XVCPSGNDP(10,10,10)
477 XVCPSGNDP(11,11,11)
478 XVCPSGNDP(12,12,12)
479 XVCPSGNDP(13,13,13)
480 XVCPSGNDP(14,14,14)
481 XVCPSGNDP(15,15,15)
482 XVCPSGNDP(16,16,16)
483 XVCPSGNDP(17,17,17)
484 XVCPSGNDP(18,18,18)
485 XVCPSGNDP(19,19,19)
486 XVCPSGNDP(20,20,20)
487 XVCPSGNDP(21,21,21)
488 XVCPSGNDP(22,22,22)
489 XVCPSGNDP(23,23,23)
490 XVCPSGNDP(24,24,24)
491 XVCPSGNDP(25,25,25)
492 XVCPSGNDP(26,26,26)
493 XVCPSGNDP(27,27,27)
494 XVCPSGNDP(28,28,28)
495 XVCPSGNDP(29,29,29)
496 XVCPSGNDP(30,30,30)
497 XVCPSGNDP(31,31,31)
498 ALT_FTR_SECTION_END_IFCLR(CPU_FTR_ARCH_206)
499 mtspr SPRN_HSRR0,r11
500 mtcrf 0x80,r9
501 ld r9,PACA_EXGEN+EX_R9(r13)
502 RESTORE_PPR_PACA(PACA_EXGEN, r10)
503 ld r10,PACA_EXGEN+EX_R10(r13)
504 ld r11,PACA_EXGEN+EX_R11(r13)
505 ld r12,PACA_EXGEN+EX_R12(r13)
506 ld r13,PACA_EXGEN+EX_R13(r13)
507 HRFID
508 b .
509 #endif
510
511 .align 7
512 /* moved from 0xe00 */
513 STD_EXCEPTION_HV(., 0xe02, h_data_storage)
514 KVM_HANDLER_SKIP(PACA_EXGEN, EXC_HV, 0xe02)
515 STD_EXCEPTION_HV(., 0xe22, h_instr_storage)
516 KVM_HANDLER(PACA_EXGEN, EXC_HV, 0xe22)
517 STD_EXCEPTION_HV(., 0xe42, emulation_assist)
518 KVM_HANDLER(PACA_EXGEN, EXC_HV, 0xe42)
519 STD_EXCEPTION_HV(., 0xe62, hmi_exception) /* need to flush cache ? */
520 KVM_HANDLER(PACA_EXGEN, EXC_HV, 0xe62)
521 MASKABLE_EXCEPTION_HV(., 0xe82, h_doorbell)
522 KVM_HANDLER(PACA_EXGEN, EXC_HV, 0xe82)
523
524 /* moved from 0xf00 */
525 STD_EXCEPTION_PSERIES(., 0xf00, performance_monitor)
526 KVM_HANDLER_PR(PACA_EXGEN, EXC_STD, 0xf00)
527 STD_EXCEPTION_PSERIES(., 0xf20, altivec_unavailable)
528 KVM_HANDLER_PR(PACA_EXGEN, EXC_STD, 0xf20)
529 STD_EXCEPTION_PSERIES(., 0xf40, vsx_unavailable)
530 KVM_HANDLER_PR(PACA_EXGEN, EXC_STD, 0xf40)
531
532 /*
533 * An interrupt came in while soft-disabled. We set paca->irq_happened, then:
534 * - If it was a decrementer interrupt, we bump the dec to max and and return.
535 * - If it was a doorbell we return immediately since doorbells are edge
536 * triggered and won't automatically refire.
537 * - else we hard disable and return.
538 * This is called with r10 containing the value to OR to the paca field.
539 */
540 #define MASKED_INTERRUPT(_H) \
541 masked_##_H##interrupt: \
542 std r11,PACA_EXGEN+EX_R11(r13); \
543 lbz r11,PACAIRQHAPPENED(r13); \
544 or r11,r11,r10; \
545 stb r11,PACAIRQHAPPENED(r13); \
546 cmpwi r10,PACA_IRQ_DEC; \
547 bne 1f; \
548 lis r10,0x7fff; \
549 ori r10,r10,0xffff; \
550 mtspr SPRN_DEC,r10; \
551 b 2f; \
552 1: cmpwi r10,PACA_IRQ_DBELL; \
553 beq 2f; \
554 mfspr r10,SPRN_##_H##SRR1; \
555 rldicl r10,r10,48,1; /* clear MSR_EE */ \
556 rotldi r10,r10,16; \
557 mtspr SPRN_##_H##SRR1,r10; \
558 2: mtcrf 0x80,r9; \
559 ld r9,PACA_EXGEN+EX_R9(r13); \
560 ld r10,PACA_EXGEN+EX_R10(r13); \
561 ld r11,PACA_EXGEN+EX_R11(r13); \
562 GET_SCRATCH0(r13); \
563 ##_H##rfid; \
564 b .
565
566 MASKED_INTERRUPT()
567 MASKED_INTERRUPT(H)
568
569 /*
570 * Called from arch_local_irq_enable when an interrupt needs
571 * to be resent. r3 contains 0x500, 0x900, 0xa00 or 0xe80 to indicate
572 * which kind of interrupt. MSR:EE is already off. We generate a
573 * stackframe like if a real interrupt had happened.
574 *
575 * Note: While MSR:EE is off, we need to make sure that _MSR
576 * in the generated frame has EE set to 1 or the exception
577 * handler will not properly re-enable them.
578 */
579 _GLOBAL(__replay_interrupt)
580 /* We are going to jump to the exception common code which
581 * will retrieve various register values from the PACA which
582 * we don't give a damn about, so we don't bother storing them.
583 */
584 mfmsr r12
585 mflr r11
586 mfcr r9
587 ori r12,r12,MSR_EE
588 cmpwi r3,0x900
589 beq decrementer_common
590 cmpwi r3,0x500
591 beq hardware_interrupt_common
592 BEGIN_FTR_SECTION
593 cmpwi r3,0xe80
594 beq h_doorbell_common
595 FTR_SECTION_ELSE
596 cmpwi r3,0xa00
597 beq doorbell_super_common
598 ALT_FTR_SECTION_END_IFSET(CPU_FTR_HVMODE)
599 blr
600
601 #ifdef CONFIG_PPC_PSERIES
602 /*
603 * Vectors for the FWNMI option. Share common code.
604 */
605 .globl system_reset_fwnmi
606 .align 7
607 system_reset_fwnmi:
608 HMT_MEDIUM_PPR_DISCARD
609 SET_SCRATCH0(r13) /* save r13 */
610 EXCEPTION_PROLOG_PSERIES(PACA_EXGEN, system_reset_common, EXC_STD,
611 NOTEST, 0x100)
612
613 #endif /* CONFIG_PPC_PSERIES */
614
615 #ifdef __DISABLED__
616 /*
617 * This is used for when the SLB miss handler has to go virtual,
618 * which doesn't happen for now anymore but will once we re-implement
619 * dynamic VSIDs for shared page tables
620 */
621 slb_miss_user_pseries:
622 std r10,PACA_EXGEN+EX_R10(r13)
623 std r11,PACA_EXGEN+EX_R11(r13)
624 std r12,PACA_EXGEN+EX_R12(r13)
625 GET_SCRATCH0(r10)
626 ld r11,PACA_EXSLB+EX_R9(r13)
627 ld r12,PACA_EXSLB+EX_R3(r13)
628 std r10,PACA_EXGEN+EX_R13(r13)
629 std r11,PACA_EXGEN+EX_R9(r13)
630 std r12,PACA_EXGEN+EX_R3(r13)
631 clrrdi r12,r13,32
632 mfmsr r10
633 mfspr r11,SRR0 /* save SRR0 */
634 ori r12,r12,slb_miss_user_common@l /* virt addr of handler */
635 ori r10,r10,MSR_IR|MSR_DR|MSR_RI
636 mtspr SRR0,r12
637 mfspr r12,SRR1 /* and SRR1 */
638 mtspr SRR1,r10
639 rfid
640 b . /* prevent spec. execution */
641 #endif /* __DISABLED__ */
642
643 /*
644 * Code from here down to __end_handlers is invoked from the
645 * exception prologs above. Because the prologs assemble the
646 * addresses of these handlers using the LOAD_HANDLER macro,
647 * which uses an ori instruction, these handlers must be in
648 * the first 64k of the kernel image.
649 */
650
651 /*** Common interrupt handlers ***/
652
653 STD_EXCEPTION_COMMON(0x100, system_reset, .system_reset_exception)
654
655 /*
656 * Machine check is different because we use a different
657 * save area: PACA_EXMC instead of PACA_EXGEN.
658 */
659 .align 7
660 .globl machine_check_common
661 machine_check_common:
662 EXCEPTION_PROLOG_COMMON(0x200, PACA_EXMC)
663 FINISH_NAP
664 DISABLE_INTS
665 bl .save_nvgprs
666 addi r3,r1,STACK_FRAME_OVERHEAD
667 bl .machine_check_exception
668 b .ret_from_except
669
670 STD_EXCEPTION_COMMON_ASYNC(0x500, hardware_interrupt, do_IRQ)
671 STD_EXCEPTION_COMMON_ASYNC(0x900, decrementer, .timer_interrupt)
672 STD_EXCEPTION_COMMON(0x980, hdecrementer, .hdec_interrupt)
673 #ifdef CONFIG_PPC_DOORBELL
674 STD_EXCEPTION_COMMON_ASYNC(0xa00, doorbell_super, .doorbell_exception)
675 #else
676 STD_EXCEPTION_COMMON_ASYNC(0xa00, doorbell_super, .unknown_exception)
677 #endif
678 STD_EXCEPTION_COMMON(0xb00, trap_0b, .unknown_exception)
679 STD_EXCEPTION_COMMON(0xd00, single_step, .single_step_exception)
680 STD_EXCEPTION_COMMON(0xe00, trap_0e, .unknown_exception)
681 STD_EXCEPTION_COMMON(0xe40, emulation_assist, .program_check_exception)
682 STD_EXCEPTION_COMMON(0xe60, hmi_exception, .unknown_exception)
683 #ifdef CONFIG_PPC_DOORBELL
684 STD_EXCEPTION_COMMON_ASYNC(0xe80, h_doorbell, .doorbell_exception)
685 #else
686 STD_EXCEPTION_COMMON_ASYNC(0xe80, h_doorbell, .unknown_exception)
687 #endif
688 STD_EXCEPTION_COMMON_ASYNC(0xf00, performance_monitor, .performance_monitor_exception)
689 STD_EXCEPTION_COMMON(0x1300, instruction_breakpoint, .instruction_breakpoint_exception)
690 STD_EXCEPTION_COMMON(0x1502, denorm, .unknown_exception)
691 #ifdef CONFIG_ALTIVEC
692 STD_EXCEPTION_COMMON(0x1700, altivec_assist, .altivec_assist_exception)
693 #else
694 STD_EXCEPTION_COMMON(0x1700, altivec_assist, .unknown_exception)
695 #endif
696 #ifdef CONFIG_CBE_RAS
697 STD_EXCEPTION_COMMON(0x1200, cbe_system_error, .cbe_system_error_exception)
698 STD_EXCEPTION_COMMON(0x1600, cbe_maintenance, .cbe_maintenance_exception)
699 STD_EXCEPTION_COMMON(0x1800, cbe_thermal, .cbe_thermal_exception)
700 #endif /* CONFIG_CBE_RAS */
701
702 /*
703 * Relocation-on interrupts: A subset of the interrupts can be delivered
704 * with IR=1/DR=1, if AIL==2 and MSR.HV won't be changed by delivering
705 * it. Addresses are the same as the original interrupt addresses, but
706 * offset by 0xc000000000004000.
707 * It's impossible to receive interrupts below 0x300 via this mechanism.
708 * KVM: None of these traps are from the guest ; anything that escalated
709 * to HV=1 from HV=0 is delivered via real mode handlers.
710 */
711
712 /*
713 * This uses the standard macro, since the original 0x300 vector
714 * only has extra guff for STAB-based processors -- which never
715 * come here.
716 */
717 STD_RELON_EXCEPTION_PSERIES(0x4300, 0x300, data_access)
718 . = 0x4380
719 .globl data_access_slb_relon_pSeries
720 data_access_slb_relon_pSeries:
721 HMT_MEDIUM_PPR_DISCARD
722 SET_SCRATCH0(r13)
723 EXCEPTION_PROLOG_1(PACA_EXSLB, NOTEST, 0x380)
724 std r3,PACA_EXSLB+EX_R3(r13)
725 mfspr r3,SPRN_DAR
726 mfspr r12,SPRN_SRR1
727 #ifndef CONFIG_RELOCATABLE
728 b .slb_miss_realmode
729 #else
730 /*
731 * We can't just use a direct branch to .slb_miss_realmode
732 * because the distance from here to there depends on where
733 * the kernel ends up being put.
734 */
735 mfctr r11
736 ld r10,PACAKBASE(r13)
737 LOAD_HANDLER(r10, .slb_miss_realmode)
738 mtctr r10
739 bctr
740 #endif
741
742 STD_RELON_EXCEPTION_PSERIES(0x4400, 0x400, instruction_access)
743 . = 0x4480
744 .globl instruction_access_slb_relon_pSeries
745 instruction_access_slb_relon_pSeries:
746 HMT_MEDIUM_PPR_DISCARD
747 SET_SCRATCH0(r13)
748 EXCEPTION_PROLOG_1(PACA_EXSLB, NOTEST, 0x480)
749 std r3,PACA_EXSLB+EX_R3(r13)
750 mfspr r3,SPRN_SRR0 /* SRR0 is faulting address */
751 mfspr r12,SPRN_SRR1
752 #ifndef CONFIG_RELOCATABLE
753 b .slb_miss_realmode
754 #else
755 mfctr r11
756 ld r10,PACAKBASE(r13)
757 LOAD_HANDLER(r10, .slb_miss_realmode)
758 mtctr r10
759 bctr
760 #endif
761
762 . = 0x4500
763 .globl hardware_interrupt_relon_pSeries;
764 .globl hardware_interrupt_relon_hv;
765 hardware_interrupt_relon_pSeries:
766 hardware_interrupt_relon_hv:
767 BEGIN_FTR_SECTION
768 _MASKABLE_RELON_EXCEPTION_PSERIES(0x502, hardware_interrupt, EXC_HV, SOFTEN_TEST_HV)
769 FTR_SECTION_ELSE
770 _MASKABLE_RELON_EXCEPTION_PSERIES(0x500, hardware_interrupt, EXC_STD, SOFTEN_TEST_PR)
771 ALT_FTR_SECTION_END_IFSET(CPU_FTR_ARCH_206)
772 STD_RELON_EXCEPTION_PSERIES(0x4600, 0x600, alignment)
773 STD_RELON_EXCEPTION_PSERIES(0x4700, 0x700, program_check)
774 STD_RELON_EXCEPTION_PSERIES(0x4800, 0x800, fp_unavailable)
775 MASKABLE_RELON_EXCEPTION_PSERIES(0x4900, 0x900, decrementer)
776 STD_RELON_EXCEPTION_HV(0x4980, 0x982, hdecrementer)
777 MASKABLE_RELON_EXCEPTION_PSERIES(0x4a00, 0xa00, doorbell_super)
778 STD_RELON_EXCEPTION_PSERIES(0x4b00, 0xb00, trap_0b)
779
780 . = 0x4c00
781 .globl system_call_relon_pSeries
782 system_call_relon_pSeries:
783 HMT_MEDIUM
784 SYSCALL_PSERIES_1
785 SYSCALL_PSERIES_2_DIRECT
786 SYSCALL_PSERIES_3
787
788 STD_RELON_EXCEPTION_PSERIES(0x4d00, 0xd00, single_step)
789
790 . = 0x4e00
791 b h_data_storage_relon_hv
792
793 . = 0x4e20
794 b h_instr_storage_relon_hv
795
796 . = 0x4e40
797 b emulation_assist_relon_hv
798
799 . = 0x4e50
800 b hmi_exception_relon_hv
801
802 . = 0x4e60
803 b hmi_exception_relon_hv
804
805 . = 0x4e80
806 b h_doorbell_relon_hv
807
808 performance_monitor_relon_pSeries_1:
809 . = 0x4f00
810 b performance_monitor_relon_pSeries
811
812 altivec_unavailable_relon_pSeries_1:
813 . = 0x4f20
814 b altivec_unavailable_relon_pSeries
815
816 vsx_unavailable_relon_pSeries_1:
817 . = 0x4f40
818 b vsx_unavailable_relon_pSeries
819
820 STD_RELON_EXCEPTION_PSERIES(0x5300, 0x1300, instruction_breakpoint)
821 #ifdef CONFIG_PPC_DENORMALISATION
822 . = 0x5500
823 b denorm_exception_hv
824 #endif
825 #ifdef CONFIG_HVC_SCOM
826 STD_RELON_EXCEPTION_HV(0x5600, 0x1600, maintence_interrupt)
827 KVM_HANDLER_SKIP(PACA_EXGEN, EXC_HV, 0x1600)
828 #endif /* CONFIG_HVC_SCOM */
829 STD_RELON_EXCEPTION_PSERIES(0x5700, 0x1700, altivec_assist)
830
831 /* Other future vectors */
832 .align 7
833 .globl __end_interrupts
834 __end_interrupts:
835
836 .align 7
837 system_call_entry_direct:
838 #if defined(CONFIG_RELOCATABLE)
839 /* The first level prologue may have used LR to get here, saving
840 * orig in r10. To save hacking/ifdeffing common code, restore here.
841 */
842 mtlr r10
843 #endif
844 system_call_entry:
845 b system_call_common
846
847 ppc64_runlatch_on_trampoline:
848 b .__ppc64_runlatch_on
849
850 /*
851 * Here we have detected that the kernel stack pointer is bad.
852 * R9 contains the saved CR, r13 points to the paca,
853 * r10 contains the (bad) kernel stack pointer,
854 * r11 and r12 contain the saved SRR0 and SRR1.
855 * We switch to using an emergency stack, save the registers there,
856 * and call kernel_bad_stack(), which panics.
857 */
858 bad_stack:
859 ld r1,PACAEMERGSP(r13)
860 subi r1,r1,64+INT_FRAME_SIZE
861 std r9,_CCR(r1)
862 std r10,GPR1(r1)
863 std r11,_NIP(r1)
864 std r12,_MSR(r1)
865 mfspr r11,SPRN_DAR
866 mfspr r12,SPRN_DSISR
867 std r11,_DAR(r1)
868 std r12,_DSISR(r1)
869 mflr r10
870 mfctr r11
871 mfxer r12
872 std r10,_LINK(r1)
873 std r11,_CTR(r1)
874 std r12,_XER(r1)
875 SAVE_GPR(0,r1)
876 SAVE_GPR(2,r1)
877 ld r10,EX_R3(r3)
878 std r10,GPR3(r1)
879 SAVE_GPR(4,r1)
880 SAVE_4GPRS(5,r1)
881 ld r9,EX_R9(r3)
882 ld r10,EX_R10(r3)
883 SAVE_2GPRS(9,r1)
884 ld r9,EX_R11(r3)
885 ld r10,EX_R12(r3)
886 ld r11,EX_R13(r3)
887 std r9,GPR11(r1)
888 std r10,GPR12(r1)
889 std r11,GPR13(r1)
890 BEGIN_FTR_SECTION
891 ld r10,EX_CFAR(r3)
892 std r10,ORIG_GPR3(r1)
893 END_FTR_SECTION_IFSET(CPU_FTR_CFAR)
894 SAVE_8GPRS(14,r1)
895 SAVE_10GPRS(22,r1)
896 lhz r12,PACA_TRAP_SAVE(r13)
897 std r12,_TRAP(r1)
898 addi r11,r1,INT_FRAME_SIZE
899 std r11,0(r1)
900 li r12,0
901 std r12,0(r11)
902 ld r2,PACATOC(r13)
903 ld r11,exception_marker@toc(r2)
904 std r12,RESULT(r1)
905 std r11,STACK_FRAME_OVERHEAD-16(r1)
906 1: addi r3,r1,STACK_FRAME_OVERHEAD
907 bl .kernel_bad_stack
908 b 1b
909
910 /*
911 * Here r13 points to the paca, r9 contains the saved CR,
912 * SRR0 and SRR1 are saved in r11 and r12,
913 * r9 - r13 are saved in paca->exgen.
914 */
915 .align 7
916 .globl data_access_common
917 data_access_common:
918 mfspr r10,SPRN_DAR
919 std r10,PACA_EXGEN+EX_DAR(r13)
920 mfspr r10,SPRN_DSISR
921 stw r10,PACA_EXGEN+EX_DSISR(r13)
922 EXCEPTION_PROLOG_COMMON(0x300, PACA_EXGEN)
923 DISABLE_INTS
924 ld r12,_MSR(r1)
925 ld r3,PACA_EXGEN+EX_DAR(r13)
926 lwz r4,PACA_EXGEN+EX_DSISR(r13)
927 li r5,0x300
928 b .do_hash_page /* Try to handle as hpte fault */
929
930 .align 7
931 .globl h_data_storage_common
932 h_data_storage_common:
933 mfspr r10,SPRN_HDAR
934 std r10,PACA_EXGEN+EX_DAR(r13)
935 mfspr r10,SPRN_HDSISR
936 stw r10,PACA_EXGEN+EX_DSISR(r13)
937 EXCEPTION_PROLOG_COMMON(0xe00, PACA_EXGEN)
938 bl .save_nvgprs
939 DISABLE_INTS
940 addi r3,r1,STACK_FRAME_OVERHEAD
941 bl .unknown_exception
942 b .ret_from_except
943
944 .align 7
945 .globl instruction_access_common
946 instruction_access_common:
947 EXCEPTION_PROLOG_COMMON(0x400, PACA_EXGEN)
948 DISABLE_INTS
949 ld r12,_MSR(r1)
950 ld r3,_NIP(r1)
951 andis. r4,r12,0x5820
952 li r5,0x400
953 b .do_hash_page /* Try to handle as hpte fault */
954
955 STD_EXCEPTION_COMMON(0xe20, h_instr_storage, .unknown_exception)
956
957 /*
958 * Here is the common SLB miss user that is used when going to virtual
959 * mode for SLB misses, that is currently not used
960 */
961 #ifdef __DISABLED__
962 .align 7
963 .globl slb_miss_user_common
964 slb_miss_user_common:
965 mflr r10
966 std r3,PACA_EXGEN+EX_DAR(r13)
967 stw r9,PACA_EXGEN+EX_CCR(r13)
968 std r10,PACA_EXGEN+EX_LR(r13)
969 std r11,PACA_EXGEN+EX_SRR0(r13)
970 bl .slb_allocate_user
971
972 ld r10,PACA_EXGEN+EX_LR(r13)
973 ld r3,PACA_EXGEN+EX_R3(r13)
974 lwz r9,PACA_EXGEN+EX_CCR(r13)
975 ld r11,PACA_EXGEN+EX_SRR0(r13)
976 mtlr r10
977 beq- slb_miss_fault
978
979 andi. r10,r12,MSR_RI /* check for unrecoverable exception */
980 beq- unrecov_user_slb
981 mfmsr r10
982
983 .machine push
984 .machine "power4"
985 mtcrf 0x80,r9
986 .machine pop
987
988 clrrdi r10,r10,2 /* clear RI before setting SRR0/1 */
989 mtmsrd r10,1
990
991 mtspr SRR0,r11
992 mtspr SRR1,r12
993
994 ld r9,PACA_EXGEN+EX_R9(r13)
995 ld r10,PACA_EXGEN+EX_R10(r13)
996 ld r11,PACA_EXGEN+EX_R11(r13)
997 ld r12,PACA_EXGEN+EX_R12(r13)
998 ld r13,PACA_EXGEN+EX_R13(r13)
999 rfid
1000 b .
1001
1002 slb_miss_fault:
1003 EXCEPTION_PROLOG_COMMON(0x380, PACA_EXGEN)
1004 ld r4,PACA_EXGEN+EX_DAR(r13)
1005 li r5,0
1006 std r4,_DAR(r1)
1007 std r5,_DSISR(r1)
1008 b handle_page_fault
1009
1010 unrecov_user_slb:
1011 EXCEPTION_PROLOG_COMMON(0x4200, PACA_EXGEN)
1012 DISABLE_INTS
1013 bl .save_nvgprs
1014 1: addi r3,r1,STACK_FRAME_OVERHEAD
1015 bl .unrecoverable_exception
1016 b 1b
1017
1018 #endif /* __DISABLED__ */
1019
1020
1021 /*
1022 * r13 points to the PACA, r9 contains the saved CR,
1023 * r12 contain the saved SRR1, SRR0 is still ready for return
1024 * r3 has the faulting address
1025 * r9 - r13 are saved in paca->exslb.
1026 * r3 is saved in paca->slb_r3
1027 * We assume we aren't going to take any exceptions during this procedure.
1028 */
1029 _GLOBAL(slb_miss_realmode)
1030 mflr r10
1031 #ifdef CONFIG_RELOCATABLE
1032 mtctr r11
1033 #endif
1034
1035 stw r9,PACA_EXSLB+EX_CCR(r13) /* save CR in exc. frame */
1036 std r10,PACA_EXSLB+EX_LR(r13) /* save LR */
1037
1038 bl .slb_allocate_realmode
1039
1040 /* All done -- return from exception. */
1041
1042 ld r10,PACA_EXSLB+EX_LR(r13)
1043 ld r3,PACA_EXSLB+EX_R3(r13)
1044 lwz r9,PACA_EXSLB+EX_CCR(r13) /* get saved CR */
1045
1046 mtlr r10
1047
1048 andi. r10,r12,MSR_RI /* check for unrecoverable exception */
1049 beq- 2f
1050
1051 .machine push
1052 .machine "power4"
1053 mtcrf 0x80,r9
1054 mtcrf 0x01,r9 /* slb_allocate uses cr0 and cr7 */
1055 .machine pop
1056
1057 RESTORE_PPR_PACA(PACA_EXSLB, r9)
1058 ld r9,PACA_EXSLB+EX_R9(r13)
1059 ld r10,PACA_EXSLB+EX_R10(r13)
1060 ld r11,PACA_EXSLB+EX_R11(r13)
1061 ld r12,PACA_EXSLB+EX_R12(r13)
1062 ld r13,PACA_EXSLB+EX_R13(r13)
1063 rfid
1064 b . /* prevent speculative execution */
1065
1066 2: mfspr r11,SPRN_SRR0
1067 ld r10,PACAKBASE(r13)
1068 LOAD_HANDLER(r10,unrecov_slb)
1069 mtspr SPRN_SRR0,r10
1070 ld r10,PACAKMSR(r13)
1071 mtspr SPRN_SRR1,r10
1072 rfid
1073 b .
1074
1075 unrecov_slb:
1076 EXCEPTION_PROLOG_COMMON(0x4100, PACA_EXSLB)
1077 DISABLE_INTS
1078 bl .save_nvgprs
1079 1: addi r3,r1,STACK_FRAME_OVERHEAD
1080 bl .unrecoverable_exception
1081 b 1b
1082
1083
1084 #ifdef CONFIG_PPC_970_NAP
1085 power4_fixup_nap:
1086 andc r9,r9,r10
1087 std r9,TI_LOCAL_FLAGS(r11)
1088 ld r10,_LINK(r1) /* make idle task do the */
1089 std r10,_NIP(r1) /* equivalent of a blr */
1090 blr
1091 #endif
1092
1093 .align 7
1094 .globl alignment_common
1095 alignment_common:
1096 mfspr r10,SPRN_DAR
1097 std r10,PACA_EXGEN+EX_DAR(r13)
1098 mfspr r10,SPRN_DSISR
1099 stw r10,PACA_EXGEN+EX_DSISR(r13)
1100 EXCEPTION_PROLOG_COMMON(0x600, PACA_EXGEN)
1101 ld r3,PACA_EXGEN+EX_DAR(r13)
1102 lwz r4,PACA_EXGEN+EX_DSISR(r13)
1103 std r3,_DAR(r1)
1104 std r4,_DSISR(r1)
1105 bl .save_nvgprs
1106 DISABLE_INTS
1107 addi r3,r1,STACK_FRAME_OVERHEAD
1108 bl .alignment_exception
1109 b .ret_from_except
1110
1111 .align 7
1112 .globl program_check_common
1113 program_check_common:
1114 EXCEPTION_PROLOG_COMMON(0x700, PACA_EXGEN)
1115 bl .save_nvgprs
1116 DISABLE_INTS
1117 addi r3,r1,STACK_FRAME_OVERHEAD
1118 bl .program_check_exception
1119 b .ret_from_except
1120
1121 .align 7
1122 .globl fp_unavailable_common
1123 fp_unavailable_common:
1124 EXCEPTION_PROLOG_COMMON(0x800, PACA_EXGEN)
1125 bne 1f /* if from user, just load it up */
1126 bl .save_nvgprs
1127 DISABLE_INTS
1128 addi r3,r1,STACK_FRAME_OVERHEAD
1129 bl .kernel_fp_unavailable_exception
1130 BUG_OPCODE
1131 1: bl .load_up_fpu
1132 b fast_exception_return
1133
1134 .align 7
1135 .globl altivec_unavailable_common
1136 altivec_unavailable_common:
1137 EXCEPTION_PROLOG_COMMON(0xf20, PACA_EXGEN)
1138 #ifdef CONFIG_ALTIVEC
1139 BEGIN_FTR_SECTION
1140 beq 1f
1141 bl .load_up_altivec
1142 b fast_exception_return
1143 1:
1144 END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
1145 #endif
1146 bl .save_nvgprs
1147 DISABLE_INTS
1148 addi r3,r1,STACK_FRAME_OVERHEAD
1149 bl .altivec_unavailable_exception
1150 b .ret_from_except
1151
1152 .align 7
1153 .globl vsx_unavailable_common
1154 vsx_unavailable_common:
1155 EXCEPTION_PROLOG_COMMON(0xf40, PACA_EXGEN)
1156 #ifdef CONFIG_VSX
1157 BEGIN_FTR_SECTION
1158 beq 1f
1159 b .load_up_vsx
1160 1:
1161 END_FTR_SECTION_IFSET(CPU_FTR_VSX)
1162 #endif
1163 bl .save_nvgprs
1164 DISABLE_INTS
1165 addi r3,r1,STACK_FRAME_OVERHEAD
1166 bl .vsx_unavailable_exception
1167 b .ret_from_except
1168
1169 .align 7
1170 .globl __end_handlers
1171 __end_handlers:
1172
1173 /* Equivalents to the above handlers for relocation-on interrupt vectors */
1174 STD_RELON_EXCEPTION_HV(., 0xe00, h_data_storage)
1175 KVM_HANDLER(PACA_EXGEN, EXC_HV, 0xe00)
1176 STD_RELON_EXCEPTION_HV(., 0xe20, h_instr_storage)
1177 KVM_HANDLER(PACA_EXGEN, EXC_HV, 0xe20)
1178 STD_RELON_EXCEPTION_HV(., 0xe40, emulation_assist)
1179 KVM_HANDLER(PACA_EXGEN, EXC_HV, 0xe40)
1180 STD_RELON_EXCEPTION_HV(., 0xe60, hmi_exception)
1181 KVM_HANDLER(PACA_EXGEN, EXC_HV, 0xe60)
1182 MASKABLE_RELON_EXCEPTION_HV(., 0xe80, h_doorbell)
1183 KVM_HANDLER(PACA_EXGEN, EXC_HV, 0xe80)
1184
1185 STD_RELON_EXCEPTION_PSERIES(., 0xf00, performance_monitor)
1186 STD_RELON_EXCEPTION_PSERIES(., 0xf20, altivec_unavailable)
1187 STD_RELON_EXCEPTION_PSERIES(., 0xf40, vsx_unavailable)
1188
1189 #if defined(CONFIG_PPC_PSERIES) || defined(CONFIG_PPC_POWERNV)
1190 /*
1191 * Data area reserved for FWNMI option.
1192 * This address (0x7000) is fixed by the RPA.
1193 */
1194 .= 0x7000
1195 .globl fwnmi_data_area
1196 fwnmi_data_area:
1197
1198 /* pseries and powernv need to keep the whole page from
1199 * 0x7000 to 0x8000 free for use by the firmware
1200 */
1201 . = 0x8000
1202 #endif /* defined(CONFIG_PPC_PSERIES) || defined(CONFIG_PPC_POWERNV) */
1203
1204 /* Space for CPU0's segment table */
1205 .balign 4096
1206 .globl initial_stab
1207 initial_stab:
1208 .space 4096
1209
1210 #ifdef CONFIG_PPC_POWERNV
1211 _GLOBAL(opal_mc_secondary_handler)
1212 HMT_MEDIUM_PPR_DISCARD
1213 SET_SCRATCH0(r13)
1214 GET_PACA(r13)
1215 clrldi r3,r3,2
1216 tovirt(r3,r3)
1217 std r3,PACA_OPAL_MC_EVT(r13)
1218 ld r13,OPAL_MC_SRR0(r3)
1219 mtspr SPRN_SRR0,r13
1220 ld r13,OPAL_MC_SRR1(r3)
1221 mtspr SPRN_SRR1,r13
1222 ld r3,OPAL_MC_GPR3(r3)
1223 GET_SCRATCH0(r13)
1224 b machine_check_pSeries
1225 #endif /* CONFIG_PPC_POWERNV */
1226
1227
1228 /*
1229 * Hash table stuff
1230 */
1231 .align 7
1232 _STATIC(do_hash_page)
1233 std r3,_DAR(r1)
1234 std r4,_DSISR(r1)
1235
1236 andis. r0,r4,0xa410 /* weird error? */
1237 bne- handle_page_fault /* if not, try to insert a HPTE */
1238 andis. r0,r4,DSISR_DABRMATCH@h
1239 bne- handle_dabr_fault
1240
1241 BEGIN_FTR_SECTION
1242 andis. r0,r4,0x0020 /* Is it a segment table fault? */
1243 bne- do_ste_alloc /* If so handle it */
1244 END_MMU_FTR_SECTION_IFCLR(MMU_FTR_SLB)
1245
1246 CURRENT_THREAD_INFO(r11, r1)
1247 lwz r0,TI_PREEMPT(r11) /* If we're in an "NMI" */
1248 andis. r0,r0,NMI_MASK@h /* (i.e. an irq when soft-disabled) */
1249 bne 77f /* then don't call hash_page now */
1250 /*
1251 * We need to set the _PAGE_USER bit if MSR_PR is set or if we are
1252 * accessing a userspace segment (even from the kernel). We assume
1253 * kernel addresses always have the high bit set.
1254 */
1255 rlwinm r4,r4,32-25+9,31-9,31-9 /* DSISR_STORE -> _PAGE_RW */
1256 rotldi r0,r3,15 /* Move high bit into MSR_PR posn */
1257 orc r0,r12,r0 /* MSR_PR | ~high_bit */
1258 rlwimi r4,r0,32-13,30,30 /* becomes _PAGE_USER access bit */
1259 ori r4,r4,1 /* add _PAGE_PRESENT */
1260 rlwimi r4,r5,22+2,31-2,31-2 /* Set _PAGE_EXEC if trap is 0x400 */
1261
1262 /*
1263 * r3 contains the faulting address
1264 * r4 contains the required access permissions
1265 * r5 contains the trap number
1266 *
1267 * at return r3 = 0 for success, 1 for page fault, negative for error
1268 */
1269 bl .hash_page /* build HPTE if possible */
1270 cmpdi r3,0 /* see if hash_page succeeded */
1271
1272 /* Success */
1273 beq fast_exc_return_irq /* Return from exception on success */
1274
1275 /* Error */
1276 blt- 13f
1277
1278 /* Here we have a page fault that hash_page can't handle. */
1279 handle_page_fault:
1280 11: ld r4,_DAR(r1)
1281 ld r5,_DSISR(r1)
1282 addi r3,r1,STACK_FRAME_OVERHEAD
1283 bl .do_page_fault
1284 cmpdi r3,0
1285 beq+ 12f
1286 bl .save_nvgprs
1287 mr r5,r3
1288 addi r3,r1,STACK_FRAME_OVERHEAD
1289 lwz r4,_DAR(r1)
1290 bl .bad_page_fault
1291 b .ret_from_except
1292
1293 /* We have a data breakpoint exception - handle it */
1294 handle_dabr_fault:
1295 bl .save_nvgprs
1296 ld r4,_DAR(r1)
1297 ld r5,_DSISR(r1)
1298 addi r3,r1,STACK_FRAME_OVERHEAD
1299 bl .do_break
1300 12: b .ret_from_except_lite
1301
1302
1303 /* We have a page fault that hash_page could handle but HV refused
1304 * the PTE insertion
1305 */
1306 13: bl .save_nvgprs
1307 mr r5,r3
1308 addi r3,r1,STACK_FRAME_OVERHEAD
1309 ld r4,_DAR(r1)
1310 bl .low_hash_fault
1311 b .ret_from_except
1312
1313 /*
1314 * We come here as a result of a DSI at a point where we don't want
1315 * to call hash_page, such as when we are accessing memory (possibly
1316 * user memory) inside a PMU interrupt that occurred while interrupts
1317 * were soft-disabled. We want to invoke the exception handler for
1318 * the access, or panic if there isn't a handler.
1319 */
1320 77: bl .save_nvgprs
1321 mr r4,r3
1322 addi r3,r1,STACK_FRAME_OVERHEAD
1323 li r5,SIGSEGV
1324 bl .bad_page_fault
1325 b .ret_from_except
1326
1327 /* here we have a segment miss */
1328 do_ste_alloc:
1329 bl .ste_allocate /* try to insert stab entry */
1330 cmpdi r3,0
1331 bne- handle_page_fault
1332 b fast_exception_return
1333
1334 /*
1335 * r13 points to the PACA, r9 contains the saved CR,
1336 * r11 and r12 contain the saved SRR0 and SRR1.
1337 * r9 - r13 are saved in paca->exslb.
1338 * We assume we aren't going to take any exceptions during this procedure.
1339 * We assume (DAR >> 60) == 0xc.
1340 */
1341 .align 7
1342 _GLOBAL(do_stab_bolted)
1343 stw r9,PACA_EXSLB+EX_CCR(r13) /* save CR in exc. frame */
1344 std r11,PACA_EXSLB+EX_SRR0(r13) /* save SRR0 in exc. frame */
1345
1346 /* Hash to the primary group */
1347 ld r10,PACASTABVIRT(r13)
1348 mfspr r11,SPRN_DAR
1349 srdi r11,r11,28
1350 rldimi r10,r11,7,52 /* r10 = first ste of the group */
1351
1352 /* Calculate VSID */
1353 /* This is a kernel address, so protovsid = ESID | 1 << 37 */
1354 li r9,0x1
1355 rldimi r11,r9,(CONTEXT_BITS + USER_ESID_BITS),0
1356 ASM_VSID_SCRAMBLE(r11, r9, 256M)
1357 rldic r9,r11,12,16 /* r9 = vsid << 12 */
1358
1359 /* Search the primary group for a free entry */
1360 1: ld r11,0(r10) /* Test valid bit of the current ste */
1361 andi. r11,r11,0x80
1362 beq 2f
1363 addi r10,r10,16
1364 andi. r11,r10,0x70
1365 bne 1b
1366
1367 /* Stick for only searching the primary group for now. */
1368 /* At least for now, we use a very simple random castout scheme */
1369 /* Use the TB as a random number ; OR in 1 to avoid entry 0 */
1370 mftb r11
1371 rldic r11,r11,4,57 /* r11 = (r11 << 4) & 0x70 */
1372 ori r11,r11,0x10
1373
1374 /* r10 currently points to an ste one past the group of interest */
1375 /* make it point to the randomly selected entry */
1376 subi r10,r10,128
1377 or r10,r10,r11 /* r10 is the entry to invalidate */
1378
1379 isync /* mark the entry invalid */
1380 ld r11,0(r10)
1381 rldicl r11,r11,56,1 /* clear the valid bit */
1382 rotldi r11,r11,8
1383 std r11,0(r10)
1384 sync
1385
1386 clrrdi r11,r11,28 /* Get the esid part of the ste */
1387 slbie r11
1388
1389 2: std r9,8(r10) /* Store the vsid part of the ste */
1390 eieio
1391
1392 mfspr r11,SPRN_DAR /* Get the new esid */
1393 clrrdi r11,r11,28 /* Permits a full 32b of ESID */
1394 ori r11,r11,0x90 /* Turn on valid and kp */
1395 std r11,0(r10) /* Put new entry back into the stab */
1396
1397 sync
1398
1399 /* All done -- return from exception. */
1400 lwz r9,PACA_EXSLB+EX_CCR(r13) /* get saved CR */
1401 ld r11,PACA_EXSLB+EX_SRR0(r13) /* get saved SRR0 */
1402
1403 andi. r10,r12,MSR_RI
1404 beq- unrecov_slb
1405
1406 mtcrf 0x80,r9 /* restore CR */
1407
1408 mfmsr r10
1409 clrrdi r10,r10,2
1410 mtmsrd r10,1
1411
1412 mtspr SPRN_SRR0,r11
1413 mtspr SPRN_SRR1,r12
1414 ld r9,PACA_EXSLB+EX_R9(r13)
1415 ld r10,PACA_EXSLB+EX_R10(r13)
1416 ld r11,PACA_EXSLB+EX_R11(r13)
1417 ld r12,PACA_EXSLB+EX_R12(r13)
1418 ld r13,PACA_EXSLB+EX_R13(r13)
1419 rfid
1420 b . /* prevent speculative execution */