]> git.proxmox.com Git - mirror_ubuntu-zesty-kernel.git/blame_incremental - arch/powerpc/kernel/exceptions-64s.S
powerpc: Make load_hander handle upto 64k offset
[mirror_ubuntu-zesty-kernel.git] / arch / powerpc / kernel / exceptions-64s.S
... / ...
CommitLineData
1/*
2 * This file contains the 64-bit "server" PowerPC variant
3 * of the low level exception handling including exception
4 * vectors, exception return, part of the slb and stab
5 * handling and other fixed offset specific things.
6 *
7 * This file is meant to be #included from head_64.S due to
8 * position dependent assembly.
9 *
10 * Most of this originates from head_64.S and thus has the same
11 * copyright history.
12 *
13 */
14
15#include <asm/hw_irq.h>
16#include <asm/exception-64s.h>
17#include <asm/ptrace.h>
18
19/*
20 * We layout physical memory as follows:
21 * 0x0000 - 0x00ff : Secondary processor spin code
22 * 0x0100 - 0x2fff : pSeries Interrupt prologs
23 * 0x3000 - 0x5fff : interrupt support common interrupt prologs
24 * 0x6000 - 0x6fff : Initial (CPU0) segment table
25 * 0x7000 - 0x7fff : FWNMI data area
26 * 0x8000 - : Early init and support code
27 */
28
29/*
30 * This is the start of the interrupt handlers for pSeries
31 * This code runs with relocation off.
32 * Code from here to __end_interrupts gets copied down to real
33 * address 0x100 when we are running a relocatable kernel.
34 * Therefore any relative branches in this section must only
35 * branch to labels in this section.
36 */
37 . = 0x100
38 .globl __start_interrupts
39__start_interrupts:
40
41 .globl system_reset_pSeries;
42system_reset_pSeries:
43 HMT_MEDIUM;
44 SET_SCRATCH0(r13)
45#ifdef CONFIG_PPC_P7_NAP
46BEGIN_FTR_SECTION
47 /* Running native on arch 2.06 or later, check if we are
48 * waking up from nap. We only handle no state loss and
49 * supervisor state loss. We do -not- handle hypervisor
50 * state loss at this time.
51 */
52 mfspr r13,SPRN_SRR1
53 rlwinm. r13,r13,47-31,30,31
54 beq 9f
55
56 /* waking up from powersave (nap) state */
57 cmpwi cr1,r13,2
58 /* Total loss of HV state is fatal, we could try to use the
59 * PIR to locate a PACA, then use an emergency stack etc...
60 * but for now, let's just stay stuck here
61 */
62 bgt cr1,.
63 GET_PACA(r13)
64
65#ifdef CONFIG_KVM_BOOK3S_64_HV
66 li r0,KVM_HWTHREAD_IN_KERNEL
67 stb r0,HSTATE_HWTHREAD_STATE(r13)
68 /* Order setting hwthread_state vs. testing hwthread_req */
69 sync
70 lbz r0,HSTATE_HWTHREAD_REQ(r13)
71 cmpwi r0,0
72 beq 1f
73 b kvm_start_guest
741:
75#endif
76
77 beq cr1,2f
78 b .power7_wakeup_noloss
792: b .power7_wakeup_loss
809:
81END_FTR_SECTION_IFSET(CPU_FTR_HVMODE | CPU_FTR_ARCH_206)
82#endif /* CONFIG_PPC_P7_NAP */
83 EXCEPTION_PROLOG_PSERIES(PACA_EXGEN, system_reset_common, EXC_STD,
84 NOTEST, 0x100)
85
86 . = 0x200
87machine_check_pSeries_1:
88 /* This is moved out of line as it can be patched by FW, but
89 * some code path might still want to branch into the original
90 * vector
91 */
92 b machine_check_pSeries
93
94 . = 0x300
95 .globl data_access_pSeries
96data_access_pSeries:
97 HMT_MEDIUM
98 SET_SCRATCH0(r13)
99BEGIN_FTR_SECTION
100 b data_access_check_stab
101data_access_not_stab:
102END_MMU_FTR_SECTION_IFCLR(MMU_FTR_SLB)
103 EXCEPTION_PROLOG_PSERIES(PACA_EXGEN, data_access_common, EXC_STD,
104 KVMTEST, 0x300)
105
106 . = 0x380
107 .globl data_access_slb_pSeries
108data_access_slb_pSeries:
109 HMT_MEDIUM
110 SET_SCRATCH0(r13)
111 EXCEPTION_PROLOG_1(PACA_EXSLB, KVMTEST, 0x380)
112 std r3,PACA_EXSLB+EX_R3(r13)
113 mfspr r3,SPRN_DAR
114#ifdef __DISABLED__
115 /* Keep that around for when we re-implement dynamic VSIDs */
116 cmpdi r3,0
117 bge slb_miss_user_pseries
118#endif /* __DISABLED__ */
119 mfspr r12,SPRN_SRR1
120#ifndef CONFIG_RELOCATABLE
121 b .slb_miss_realmode
122#else
123 /*
124 * We can't just use a direct branch to .slb_miss_realmode
125 * because the distance from here to there depends on where
126 * the kernel ends up being put.
127 */
128 mfctr r11
129 ld r10,PACAKBASE(r13)
130 LOAD_HANDLER(r10, .slb_miss_realmode)
131 mtctr r10
132 bctr
133#endif
134
135 STD_EXCEPTION_PSERIES(0x400, 0x400, instruction_access)
136
137 . = 0x480
138 .globl instruction_access_slb_pSeries
139instruction_access_slb_pSeries:
140 HMT_MEDIUM
141 SET_SCRATCH0(r13)
142 EXCEPTION_PROLOG_1(PACA_EXSLB, KVMTEST_PR, 0x480)
143 std r3,PACA_EXSLB+EX_R3(r13)
144 mfspr r3,SPRN_SRR0 /* SRR0 is faulting address */
145#ifdef __DISABLED__
146 /* Keep that around for when we re-implement dynamic VSIDs */
147 cmpdi r3,0
148 bge slb_miss_user_pseries
149#endif /* __DISABLED__ */
150 mfspr r12,SPRN_SRR1
151#ifndef CONFIG_RELOCATABLE
152 b .slb_miss_realmode
153#else
154 mfctr r11
155 ld r10,PACAKBASE(r13)
156 LOAD_HANDLER(r10, .slb_miss_realmode)
157 mtctr r10
158 bctr
159#endif
160
161 /* We open code these as we can't have a ". = x" (even with
162 * x = "." within a feature section
163 */
164 . = 0x500;
165 .globl hardware_interrupt_pSeries;
166 .globl hardware_interrupt_hv;
167hardware_interrupt_pSeries:
168hardware_interrupt_hv:
169 BEGIN_FTR_SECTION
170 _MASKABLE_EXCEPTION_PSERIES(0x502, hardware_interrupt,
171 EXC_HV, SOFTEN_TEST_HV)
172 KVM_HANDLER(PACA_EXGEN, EXC_HV, 0x502)
173 FTR_SECTION_ELSE
174 _MASKABLE_EXCEPTION_PSERIES(0x500, hardware_interrupt,
175 EXC_STD, SOFTEN_TEST_HV_201)
176 KVM_HANDLER(PACA_EXGEN, EXC_STD, 0x500)
177 ALT_FTR_SECTION_END_IFSET(CPU_FTR_HVMODE | CPU_FTR_ARCH_206)
178
179 STD_EXCEPTION_PSERIES(0x600, 0x600, alignment)
180 KVM_HANDLER_PR(PACA_EXGEN, EXC_STD, 0x600)
181
182 STD_EXCEPTION_PSERIES(0x700, 0x700, program_check)
183 KVM_HANDLER_PR(PACA_EXGEN, EXC_STD, 0x700)
184
185 STD_EXCEPTION_PSERIES(0x800, 0x800, fp_unavailable)
186 KVM_HANDLER_PR(PACA_EXGEN, EXC_STD, 0x800)
187
188 MASKABLE_EXCEPTION_PSERIES(0x900, 0x900, decrementer)
189 STD_EXCEPTION_HV(0x980, 0x982, hdecrementer)
190
191 STD_EXCEPTION_PSERIES(0xa00, 0xa00, trap_0a)
192 KVM_HANDLER_PR(PACA_EXGEN, EXC_STD, 0xa00)
193
194 STD_EXCEPTION_PSERIES(0xb00, 0xb00, trap_0b)
195 KVM_HANDLER_PR(PACA_EXGEN, EXC_STD, 0xb00)
196
197 . = 0xc00
198 .globl system_call_pSeries
199system_call_pSeries:
200 HMT_MEDIUM
201#ifdef CONFIG_KVM_BOOK3S_64_HANDLER
202 SET_SCRATCH0(r13)
203 GET_PACA(r13)
204 std r9,PACA_EXGEN+EX_R9(r13)
205 std r10,PACA_EXGEN+EX_R10(r13)
206 mfcr r9
207 KVMTEST(0xc00)
208 GET_SCRATCH0(r13)
209#endif
210BEGIN_FTR_SECTION
211 cmpdi r0,0x1ebe
212 beq- 1f
213END_FTR_SECTION_IFSET(CPU_FTR_REAL_LE)
214 mr r9,r13
215 GET_PACA(r13)
216 mfspr r11,SPRN_SRR0
217 mfspr r12,SPRN_SRR1
218 ld r10,PACAKBASE(r13)
219 LOAD_HANDLER(r10, system_call_entry)
220 mtspr SPRN_SRR0,r10
221 ld r10,PACAKMSR(r13)
222 mtspr SPRN_SRR1,r10
223 rfid
224 b . /* prevent speculative execution */
225
226 KVM_HANDLER(PACA_EXGEN, EXC_STD, 0xc00)
227
228/* Fast LE/BE switch system call */
2291: mfspr r12,SPRN_SRR1
230 xori r12,r12,MSR_LE
231 mtspr SPRN_SRR1,r12
232 rfid /* return to userspace */
233 b .
234
235 STD_EXCEPTION_PSERIES(0xd00, 0xd00, single_step)
236 KVM_HANDLER_PR(PACA_EXGEN, EXC_STD, 0xd00)
237
238 /* At 0xe??? we have a bunch of hypervisor exceptions, we branch
239 * out of line to handle them
240 */
241 . = 0xe00
242hv_exception_trampoline:
243 b h_data_storage_hv
244 . = 0xe20
245 b h_instr_storage_hv
246 . = 0xe40
247 b emulation_assist_hv
248 . = 0xe50
249 b hmi_exception_hv
250 . = 0xe60
251 b hmi_exception_hv
252
253 /* We need to deal with the Altivec unavailable exception
254 * here which is at 0xf20, thus in the middle of the
255 * prolog code of the PerformanceMonitor one. A little
256 * trickery is thus necessary
257 */
258performance_monitor_pSeries_1:
259 . = 0xf00
260 b performance_monitor_pSeries
261
262altivec_unavailable_pSeries_1:
263 . = 0xf20
264 b altivec_unavailable_pSeries
265
266vsx_unavailable_pSeries_1:
267 . = 0xf40
268 b vsx_unavailable_pSeries
269
270#ifdef CONFIG_CBE_RAS
271 STD_EXCEPTION_HV(0x1200, 0x1202, cbe_system_error)
272 KVM_HANDLER_SKIP(PACA_EXGEN, EXC_HV, 0x1202)
273#endif /* CONFIG_CBE_RAS */
274
275 STD_EXCEPTION_PSERIES(0x1300, 0x1300, instruction_breakpoint)
276 KVM_HANDLER_PR_SKIP(PACA_EXGEN, EXC_STD, 0x1300)
277
278 . = 0x1500
279 .global denorm_exception_hv
280denorm_exception_hv:
281 HMT_MEDIUM
282 mtspr SPRN_SPRG_HSCRATCH0,r13
283 mfspr r13,SPRN_SPRG_HPACA
284 std r9,PACA_EXGEN+EX_R9(r13)
285 std r10,PACA_EXGEN+EX_R10(r13)
286 std r11,PACA_EXGEN+EX_R11(r13)
287 std r12,PACA_EXGEN+EX_R12(r13)
288 mfspr r9,SPRN_SPRG_HSCRATCH0
289 std r9,PACA_EXGEN+EX_R13(r13)
290 mfcr r9
291
292#ifdef CONFIG_PPC_DENORMALISATION
293 mfspr r10,SPRN_HSRR1
294 mfspr r11,SPRN_HSRR0 /* save HSRR0 */
295 andis. r10,r10,(HSRR1_DENORM)@h /* denorm? */
296 addi r11,r11,-4 /* HSRR0 is next instruction */
297 bne+ denorm_assist
298#endif
299
300 EXCEPTION_PROLOG_PSERIES_1(denorm_common, EXC_HV)
301 KVM_HANDLER_SKIP(PACA_EXGEN, EXC_STD, 0x1500)
302
303#ifdef CONFIG_CBE_RAS
304 STD_EXCEPTION_HV(0x1600, 0x1602, cbe_maintenance)
305 KVM_HANDLER_SKIP(PACA_EXGEN, EXC_HV, 0x1602)
306#endif /* CONFIG_CBE_RAS */
307
308 STD_EXCEPTION_PSERIES(0x1700, 0x1700, altivec_assist)
309 KVM_HANDLER_PR(PACA_EXGEN, EXC_STD, 0x1700)
310
311#ifdef CONFIG_CBE_RAS
312 STD_EXCEPTION_HV(0x1800, 0x1802, cbe_thermal)
313 KVM_HANDLER_SKIP(PACA_EXGEN, EXC_HV, 0x1802)
314#else
315 . = 0x1800
316#endif /* CONFIG_CBE_RAS */
317
318
319/*** Out of line interrupts support ***/
320
321 .align 7
322 /* moved from 0x200 */
323machine_check_pSeries:
324 .globl machine_check_fwnmi
325machine_check_fwnmi:
326 HMT_MEDIUM
327 SET_SCRATCH0(r13) /* save r13 */
328 EXCEPTION_PROLOG_PSERIES(PACA_EXMC, machine_check_common,
329 EXC_STD, KVMTEST, 0x200)
330 KVM_HANDLER_SKIP(PACA_EXMC, EXC_STD, 0x200)
331
332 /* moved from 0x300 */
333data_access_check_stab:
334 GET_PACA(r13)
335 std r9,PACA_EXSLB+EX_R9(r13)
336 std r10,PACA_EXSLB+EX_R10(r13)
337 mfspr r10,SPRN_DAR
338 mfspr r9,SPRN_DSISR
339 srdi r10,r10,60
340 rlwimi r10,r9,16,0x20
341#ifdef CONFIG_KVM_BOOK3S_PR
342 lbz r9,HSTATE_IN_GUEST(r13)
343 rlwimi r10,r9,8,0x300
344#endif
345 mfcr r9
346 cmpwi r10,0x2c
347 beq do_stab_bolted_pSeries
348 mtcrf 0x80,r9
349 ld r9,PACA_EXSLB+EX_R9(r13)
350 ld r10,PACA_EXSLB+EX_R10(r13)
351 b data_access_not_stab
352do_stab_bolted_pSeries:
353 std r11,PACA_EXSLB+EX_R11(r13)
354 std r12,PACA_EXSLB+EX_R12(r13)
355 GET_SCRATCH0(r10)
356 std r10,PACA_EXSLB+EX_R13(r13)
357 EXCEPTION_PROLOG_PSERIES_1(.do_stab_bolted, EXC_STD)
358
359 KVM_HANDLER_SKIP(PACA_EXGEN, EXC_STD, 0x300)
360 KVM_HANDLER_SKIP(PACA_EXSLB, EXC_STD, 0x380)
361 KVM_HANDLER_PR(PACA_EXGEN, EXC_STD, 0x400)
362 KVM_HANDLER_PR(PACA_EXSLB, EXC_STD, 0x480)
363 KVM_HANDLER_PR(PACA_EXGEN, EXC_STD, 0x900)
364 KVM_HANDLER(PACA_EXGEN, EXC_HV, 0x982)
365
366#ifdef CONFIG_PPC_DENORMALISATION
367denorm_assist:
368BEGIN_FTR_SECTION
369/*
370 * To denormalise we need to move a copy of the register to itself.
371 * For POWER6 do that here for all FP regs.
372 */
373 mfmsr r10
374 ori r10,r10,(MSR_FP|MSR_FE0|MSR_FE1)
375 xori r10,r10,(MSR_FE0|MSR_FE1)
376 mtmsrd r10
377 sync
378 fmr 0,0
379 fmr 1,1
380 fmr 2,2
381 fmr 3,3
382 fmr 4,4
383 fmr 5,5
384 fmr 6,6
385 fmr 7,7
386 fmr 8,8
387 fmr 9,9
388 fmr 10,10
389 fmr 11,11
390 fmr 12,12
391 fmr 13,13
392 fmr 14,14
393 fmr 15,15
394 fmr 16,16
395 fmr 17,17
396 fmr 18,18
397 fmr 19,19
398 fmr 20,20
399 fmr 21,21
400 fmr 22,22
401 fmr 23,23
402 fmr 24,24
403 fmr 25,25
404 fmr 26,26
405 fmr 27,27
406 fmr 28,28
407 fmr 29,29
408 fmr 30,30
409 fmr 31,31
410FTR_SECTION_ELSE
411/*
412 * To denormalise we need to move a copy of the register to itself.
413 * For POWER7 do that here for the first 32 VSX registers only.
414 */
415 mfmsr r10
416 oris r10,r10,MSR_VSX@h
417 mtmsrd r10
418 sync
419 XVCPSGNDP(0,0,0)
420 XVCPSGNDP(1,1,1)
421 XVCPSGNDP(2,2,2)
422 XVCPSGNDP(3,3,3)
423 XVCPSGNDP(4,4,4)
424 XVCPSGNDP(5,5,5)
425 XVCPSGNDP(6,6,6)
426 XVCPSGNDP(7,7,7)
427 XVCPSGNDP(8,8,8)
428 XVCPSGNDP(9,9,9)
429 XVCPSGNDP(10,10,10)
430 XVCPSGNDP(11,11,11)
431 XVCPSGNDP(12,12,12)
432 XVCPSGNDP(13,13,13)
433 XVCPSGNDP(14,14,14)
434 XVCPSGNDP(15,15,15)
435 XVCPSGNDP(16,16,16)
436 XVCPSGNDP(17,17,17)
437 XVCPSGNDP(18,18,18)
438 XVCPSGNDP(19,19,19)
439 XVCPSGNDP(20,20,20)
440 XVCPSGNDP(21,21,21)
441 XVCPSGNDP(22,22,22)
442 XVCPSGNDP(23,23,23)
443 XVCPSGNDP(24,24,24)
444 XVCPSGNDP(25,25,25)
445 XVCPSGNDP(26,26,26)
446 XVCPSGNDP(27,27,27)
447 XVCPSGNDP(28,28,28)
448 XVCPSGNDP(29,29,29)
449 XVCPSGNDP(30,30,30)
450 XVCPSGNDP(31,31,31)
451ALT_FTR_SECTION_END_IFCLR(CPU_FTR_ARCH_206)
452 mtspr SPRN_HSRR0,r11
453 mtcrf 0x80,r9
454 ld r9,PACA_EXGEN+EX_R9(r13)
455 ld r10,PACA_EXGEN+EX_R10(r13)
456 ld r11,PACA_EXGEN+EX_R11(r13)
457 ld r12,PACA_EXGEN+EX_R12(r13)
458 ld r13,PACA_EXGEN+EX_R13(r13)
459 HRFID
460 b .
461#endif
462
463 .align 7
464 /* moved from 0xe00 */
465 STD_EXCEPTION_HV(., 0xe02, h_data_storage)
466 KVM_HANDLER_SKIP(PACA_EXGEN, EXC_HV, 0xe02)
467 STD_EXCEPTION_HV(., 0xe22, h_instr_storage)
468 KVM_HANDLER(PACA_EXGEN, EXC_HV, 0xe22)
469 STD_EXCEPTION_HV(., 0xe42, emulation_assist)
470 KVM_HANDLER(PACA_EXGEN, EXC_HV, 0xe42)
471 STD_EXCEPTION_HV(., 0xe62, hmi_exception) /* need to flush cache ? */
472 KVM_HANDLER(PACA_EXGEN, EXC_HV, 0xe62)
473
474 /* moved from 0xf00 */
475 STD_EXCEPTION_PSERIES(., 0xf00, performance_monitor)
476 KVM_HANDLER_PR(PACA_EXGEN, EXC_STD, 0xf00)
477 STD_EXCEPTION_PSERIES(., 0xf20, altivec_unavailable)
478 KVM_HANDLER_PR(PACA_EXGEN, EXC_STD, 0xf20)
479 STD_EXCEPTION_PSERIES(., 0xf40, vsx_unavailable)
480 KVM_HANDLER_PR(PACA_EXGEN, EXC_STD, 0xf40)
481
482/*
483 * An interrupt came in while soft-disabled. We set paca->irq_happened,
484 * then, if it was a decrementer interrupt, we bump the dec to max and
485 * and return, else we hard disable and return. This is called with
486 * r10 containing the value to OR to the paca field.
487 */
488#define MASKED_INTERRUPT(_H) \
489masked_##_H##interrupt: \
490 std r11,PACA_EXGEN+EX_R11(r13); \
491 lbz r11,PACAIRQHAPPENED(r13); \
492 or r11,r11,r10; \
493 stb r11,PACAIRQHAPPENED(r13); \
494 andi. r10,r10,PACA_IRQ_DEC; \
495 beq 1f; \
496 lis r10,0x7fff; \
497 ori r10,r10,0xffff; \
498 mtspr SPRN_DEC,r10; \
499 b 2f; \
5001: mfspr r10,SPRN_##_H##SRR1; \
501 rldicl r10,r10,48,1; /* clear MSR_EE */ \
502 rotldi r10,r10,16; \
503 mtspr SPRN_##_H##SRR1,r10; \
5042: mtcrf 0x80,r9; \
505 ld r9,PACA_EXGEN+EX_R9(r13); \
506 ld r10,PACA_EXGEN+EX_R10(r13); \
507 ld r11,PACA_EXGEN+EX_R11(r13); \
508 GET_SCRATCH0(r13); \
509 ##_H##rfid; \
510 b .
511
512 MASKED_INTERRUPT()
513 MASKED_INTERRUPT(H)
514
515/*
516 * Called from arch_local_irq_enable when an interrupt needs
517 * to be resent. r3 contains 0x500 or 0x900 to indicate which
518 * kind of interrupt. MSR:EE is already off. We generate a
519 * stackframe like if a real interrupt had happened.
520 *
521 * Note: While MSR:EE is off, we need to make sure that _MSR
522 * in the generated frame has EE set to 1 or the exception
523 * handler will not properly re-enable them.
524 */
525_GLOBAL(__replay_interrupt)
526 /* We are going to jump to the exception common code which
527 * will retrieve various register values from the PACA which
528 * we don't give a damn about, so we don't bother storing them.
529 */
530 mfmsr r12
531 mflr r11
532 mfcr r9
533 ori r12,r12,MSR_EE
534 andi. r3,r3,0x0800
535 bne decrementer_common
536 b hardware_interrupt_common
537
538#ifdef CONFIG_PPC_PSERIES
539/*
540 * Vectors for the FWNMI option. Share common code.
541 */
542 .globl system_reset_fwnmi
543 .align 7
544system_reset_fwnmi:
545 HMT_MEDIUM
546 SET_SCRATCH0(r13) /* save r13 */
547 EXCEPTION_PROLOG_PSERIES(PACA_EXGEN, system_reset_common, EXC_STD,
548 NOTEST, 0x100)
549
550#endif /* CONFIG_PPC_PSERIES */
551
552#ifdef __DISABLED__
553/*
554 * This is used for when the SLB miss handler has to go virtual,
555 * which doesn't happen for now anymore but will once we re-implement
556 * dynamic VSIDs for shared page tables
557 */
558slb_miss_user_pseries:
559 std r10,PACA_EXGEN+EX_R10(r13)
560 std r11,PACA_EXGEN+EX_R11(r13)
561 std r12,PACA_EXGEN+EX_R12(r13)
562 GET_SCRATCH0(r10)
563 ld r11,PACA_EXSLB+EX_R9(r13)
564 ld r12,PACA_EXSLB+EX_R3(r13)
565 std r10,PACA_EXGEN+EX_R13(r13)
566 std r11,PACA_EXGEN+EX_R9(r13)
567 std r12,PACA_EXGEN+EX_R3(r13)
568 clrrdi r12,r13,32
569 mfmsr r10
570 mfspr r11,SRR0 /* save SRR0 */
571 ori r12,r12,slb_miss_user_common@l /* virt addr of handler */
572 ori r10,r10,MSR_IR|MSR_DR|MSR_RI
573 mtspr SRR0,r12
574 mfspr r12,SRR1 /* and SRR1 */
575 mtspr SRR1,r10
576 rfid
577 b . /* prevent spec. execution */
578#endif /* __DISABLED__ */
579
580 .align 7
581 .globl __end_interrupts
582__end_interrupts:
583
584/*
585 * Code from here down to __end_handlers is invoked from the
586 * exception prologs above. Because the prologs assemble the
587 * addresses of these handlers using the LOAD_HANDLER macro,
588 * which uses an ori instruction, these handlers must be in
589 * the first 64k of the kernel image.
590 */
591
592/*** Common interrupt handlers ***/
593
594 STD_EXCEPTION_COMMON(0x100, system_reset, .system_reset_exception)
595
596 /*
597 * Machine check is different because we use a different
598 * save area: PACA_EXMC instead of PACA_EXGEN.
599 */
600 .align 7
601 .globl machine_check_common
602machine_check_common:
603 EXCEPTION_PROLOG_COMMON(0x200, PACA_EXMC)
604 FINISH_NAP
605 DISABLE_INTS
606 bl .save_nvgprs
607 addi r3,r1,STACK_FRAME_OVERHEAD
608 bl .machine_check_exception
609 b .ret_from_except
610
611 STD_EXCEPTION_COMMON_ASYNC(0x500, hardware_interrupt, do_IRQ)
612 STD_EXCEPTION_COMMON_ASYNC(0x900, decrementer, .timer_interrupt)
613 STD_EXCEPTION_COMMON(0x980, hdecrementer, .hdec_interrupt)
614 STD_EXCEPTION_COMMON(0xa00, trap_0a, .unknown_exception)
615 STD_EXCEPTION_COMMON(0xb00, trap_0b, .unknown_exception)
616 STD_EXCEPTION_COMMON(0xd00, single_step, .single_step_exception)
617 STD_EXCEPTION_COMMON(0xe00, trap_0e, .unknown_exception)
618 STD_EXCEPTION_COMMON(0xe40, emulation_assist, .program_check_exception)
619 STD_EXCEPTION_COMMON(0xe60, hmi_exception, .unknown_exception)
620 STD_EXCEPTION_COMMON_ASYNC(0xf00, performance_monitor, .performance_monitor_exception)
621 STD_EXCEPTION_COMMON(0x1300, instruction_breakpoint, .instruction_breakpoint_exception)
622 STD_EXCEPTION_COMMON(0x1502, denorm, .unknown_exception)
623#ifdef CONFIG_ALTIVEC
624 STD_EXCEPTION_COMMON(0x1700, altivec_assist, .altivec_assist_exception)
625#else
626 STD_EXCEPTION_COMMON(0x1700, altivec_assist, .unknown_exception)
627#endif
628#ifdef CONFIG_CBE_RAS
629 STD_EXCEPTION_COMMON(0x1200, cbe_system_error, .cbe_system_error_exception)
630 STD_EXCEPTION_COMMON(0x1600, cbe_maintenance, .cbe_maintenance_exception)
631 STD_EXCEPTION_COMMON(0x1800, cbe_thermal, .cbe_thermal_exception)
632#endif /* CONFIG_CBE_RAS */
633
634 .align 7
635system_call_entry:
636 b system_call_common
637
638ppc64_runlatch_on_trampoline:
639 b .__ppc64_runlatch_on
640
641/*
642 * Here we have detected that the kernel stack pointer is bad.
643 * R9 contains the saved CR, r13 points to the paca,
644 * r10 contains the (bad) kernel stack pointer,
645 * r11 and r12 contain the saved SRR0 and SRR1.
646 * We switch to using an emergency stack, save the registers there,
647 * and call kernel_bad_stack(), which panics.
648 */
649bad_stack:
650 ld r1,PACAEMERGSP(r13)
651 subi r1,r1,64+INT_FRAME_SIZE
652 std r9,_CCR(r1)
653 std r10,GPR1(r1)
654 std r11,_NIP(r1)
655 std r12,_MSR(r1)
656 mfspr r11,SPRN_DAR
657 mfspr r12,SPRN_DSISR
658 std r11,_DAR(r1)
659 std r12,_DSISR(r1)
660 mflr r10
661 mfctr r11
662 mfxer r12
663 std r10,_LINK(r1)
664 std r11,_CTR(r1)
665 std r12,_XER(r1)
666 SAVE_GPR(0,r1)
667 SAVE_GPR(2,r1)
668 ld r10,EX_R3(r3)
669 std r10,GPR3(r1)
670 SAVE_GPR(4,r1)
671 SAVE_4GPRS(5,r1)
672 ld r9,EX_R9(r3)
673 ld r10,EX_R10(r3)
674 SAVE_2GPRS(9,r1)
675 ld r9,EX_R11(r3)
676 ld r10,EX_R12(r3)
677 ld r11,EX_R13(r3)
678 std r9,GPR11(r1)
679 std r10,GPR12(r1)
680 std r11,GPR13(r1)
681BEGIN_FTR_SECTION
682 ld r10,EX_CFAR(r3)
683 std r10,ORIG_GPR3(r1)
684END_FTR_SECTION_IFSET(CPU_FTR_CFAR)
685 SAVE_8GPRS(14,r1)
686 SAVE_10GPRS(22,r1)
687 lhz r12,PACA_TRAP_SAVE(r13)
688 std r12,_TRAP(r1)
689 addi r11,r1,INT_FRAME_SIZE
690 std r11,0(r1)
691 li r12,0
692 std r12,0(r11)
693 ld r2,PACATOC(r13)
694 ld r11,exception_marker@toc(r2)
695 std r12,RESULT(r1)
696 std r11,STACK_FRAME_OVERHEAD-16(r1)
6971: addi r3,r1,STACK_FRAME_OVERHEAD
698 bl .kernel_bad_stack
699 b 1b
700
701/*
702 * Here r13 points to the paca, r9 contains the saved CR,
703 * SRR0 and SRR1 are saved in r11 and r12,
704 * r9 - r13 are saved in paca->exgen.
705 */
706 .align 7
707 .globl data_access_common
708data_access_common:
709 mfspr r10,SPRN_DAR
710 std r10,PACA_EXGEN+EX_DAR(r13)
711 mfspr r10,SPRN_DSISR
712 stw r10,PACA_EXGEN+EX_DSISR(r13)
713 EXCEPTION_PROLOG_COMMON(0x300, PACA_EXGEN)
714 DISABLE_INTS
715 ld r12,_MSR(r1)
716 ld r3,PACA_EXGEN+EX_DAR(r13)
717 lwz r4,PACA_EXGEN+EX_DSISR(r13)
718 li r5,0x300
719 b .do_hash_page /* Try to handle as hpte fault */
720
721 .align 7
722 .globl h_data_storage_common
723h_data_storage_common:
724 mfspr r10,SPRN_HDAR
725 std r10,PACA_EXGEN+EX_DAR(r13)
726 mfspr r10,SPRN_HDSISR
727 stw r10,PACA_EXGEN+EX_DSISR(r13)
728 EXCEPTION_PROLOG_COMMON(0xe00, PACA_EXGEN)
729 bl .save_nvgprs
730 DISABLE_INTS
731 addi r3,r1,STACK_FRAME_OVERHEAD
732 bl .unknown_exception
733 b .ret_from_except
734
735 .align 7
736 .globl instruction_access_common
737instruction_access_common:
738 EXCEPTION_PROLOG_COMMON(0x400, PACA_EXGEN)
739 DISABLE_INTS
740 ld r12,_MSR(r1)
741 ld r3,_NIP(r1)
742 andis. r4,r12,0x5820
743 li r5,0x400
744 b .do_hash_page /* Try to handle as hpte fault */
745
746 STD_EXCEPTION_COMMON(0xe20, h_instr_storage, .unknown_exception)
747
748/*
749 * Here is the common SLB miss user that is used when going to virtual
750 * mode for SLB misses, that is currently not used
751 */
752#ifdef __DISABLED__
753 .align 7
754 .globl slb_miss_user_common
755slb_miss_user_common:
756 mflr r10
757 std r3,PACA_EXGEN+EX_DAR(r13)
758 stw r9,PACA_EXGEN+EX_CCR(r13)
759 std r10,PACA_EXGEN+EX_LR(r13)
760 std r11,PACA_EXGEN+EX_SRR0(r13)
761 bl .slb_allocate_user
762
763 ld r10,PACA_EXGEN+EX_LR(r13)
764 ld r3,PACA_EXGEN+EX_R3(r13)
765 lwz r9,PACA_EXGEN+EX_CCR(r13)
766 ld r11,PACA_EXGEN+EX_SRR0(r13)
767 mtlr r10
768 beq- slb_miss_fault
769
770 andi. r10,r12,MSR_RI /* check for unrecoverable exception */
771 beq- unrecov_user_slb
772 mfmsr r10
773
774.machine push
775.machine "power4"
776 mtcrf 0x80,r9
777.machine pop
778
779 clrrdi r10,r10,2 /* clear RI before setting SRR0/1 */
780 mtmsrd r10,1
781
782 mtspr SRR0,r11
783 mtspr SRR1,r12
784
785 ld r9,PACA_EXGEN+EX_R9(r13)
786 ld r10,PACA_EXGEN+EX_R10(r13)
787 ld r11,PACA_EXGEN+EX_R11(r13)
788 ld r12,PACA_EXGEN+EX_R12(r13)
789 ld r13,PACA_EXGEN+EX_R13(r13)
790 rfid
791 b .
792
793slb_miss_fault:
794 EXCEPTION_PROLOG_COMMON(0x380, PACA_EXGEN)
795 ld r4,PACA_EXGEN+EX_DAR(r13)
796 li r5,0
797 std r4,_DAR(r1)
798 std r5,_DSISR(r1)
799 b handle_page_fault
800
801unrecov_user_slb:
802 EXCEPTION_PROLOG_COMMON(0x4200, PACA_EXGEN)
803 DISABLE_INTS
804 bl .save_nvgprs
8051: addi r3,r1,STACK_FRAME_OVERHEAD
806 bl .unrecoverable_exception
807 b 1b
808
809#endif /* __DISABLED__ */
810
811
812/*
813 * r13 points to the PACA, r9 contains the saved CR,
814 * r12 contain the saved SRR1, SRR0 is still ready for return
815 * r3 has the faulting address
816 * r9 - r13 are saved in paca->exslb.
817 * r3 is saved in paca->slb_r3
818 * We assume we aren't going to take any exceptions during this procedure.
819 */
820_GLOBAL(slb_miss_realmode)
821 mflr r10
822#ifdef CONFIG_RELOCATABLE
823 mtctr r11
824#endif
825
826 stw r9,PACA_EXSLB+EX_CCR(r13) /* save CR in exc. frame */
827 std r10,PACA_EXSLB+EX_LR(r13) /* save LR */
828
829 bl .slb_allocate_realmode
830
831 /* All done -- return from exception. */
832
833 ld r10,PACA_EXSLB+EX_LR(r13)
834 ld r3,PACA_EXSLB+EX_R3(r13)
835 lwz r9,PACA_EXSLB+EX_CCR(r13) /* get saved CR */
836
837 mtlr r10
838
839 andi. r10,r12,MSR_RI /* check for unrecoverable exception */
840 beq- 2f
841
842.machine push
843.machine "power4"
844 mtcrf 0x80,r9
845 mtcrf 0x01,r9 /* slb_allocate uses cr0 and cr7 */
846.machine pop
847
848 ld r9,PACA_EXSLB+EX_R9(r13)
849 ld r10,PACA_EXSLB+EX_R10(r13)
850 ld r11,PACA_EXSLB+EX_R11(r13)
851 ld r12,PACA_EXSLB+EX_R12(r13)
852 ld r13,PACA_EXSLB+EX_R13(r13)
853 rfid
854 b . /* prevent speculative execution */
855
8562: mfspr r11,SPRN_SRR0
857 ld r10,PACAKBASE(r13)
858 LOAD_HANDLER(r10,unrecov_slb)
859 mtspr SPRN_SRR0,r10
860 ld r10,PACAKMSR(r13)
861 mtspr SPRN_SRR1,r10
862 rfid
863 b .
864
865unrecov_slb:
866 EXCEPTION_PROLOG_COMMON(0x4100, PACA_EXSLB)
867 DISABLE_INTS
868 bl .save_nvgprs
8691: addi r3,r1,STACK_FRAME_OVERHEAD
870 bl .unrecoverable_exception
871 b 1b
872
873
874#ifdef CONFIG_PPC_970_NAP
875power4_fixup_nap:
876 andc r9,r9,r10
877 std r9,TI_LOCAL_FLAGS(r11)
878 ld r10,_LINK(r1) /* make idle task do the */
879 std r10,_NIP(r1) /* equivalent of a blr */
880 blr
881#endif
882
883 .align 7
884 .globl alignment_common
885alignment_common:
886 mfspr r10,SPRN_DAR
887 std r10,PACA_EXGEN+EX_DAR(r13)
888 mfspr r10,SPRN_DSISR
889 stw r10,PACA_EXGEN+EX_DSISR(r13)
890 EXCEPTION_PROLOG_COMMON(0x600, PACA_EXGEN)
891 ld r3,PACA_EXGEN+EX_DAR(r13)
892 lwz r4,PACA_EXGEN+EX_DSISR(r13)
893 std r3,_DAR(r1)
894 std r4,_DSISR(r1)
895 bl .save_nvgprs
896 DISABLE_INTS
897 addi r3,r1,STACK_FRAME_OVERHEAD
898 bl .alignment_exception
899 b .ret_from_except
900
901 .align 7
902 .globl program_check_common
903program_check_common:
904 EXCEPTION_PROLOG_COMMON(0x700, PACA_EXGEN)
905 bl .save_nvgprs
906 DISABLE_INTS
907 addi r3,r1,STACK_FRAME_OVERHEAD
908 bl .program_check_exception
909 b .ret_from_except
910
911 .align 7
912 .globl fp_unavailable_common
913fp_unavailable_common:
914 EXCEPTION_PROLOG_COMMON(0x800, PACA_EXGEN)
915 bne 1f /* if from user, just load it up */
916 bl .save_nvgprs
917 DISABLE_INTS
918 addi r3,r1,STACK_FRAME_OVERHEAD
919 bl .kernel_fp_unavailable_exception
920 BUG_OPCODE
9211: bl .load_up_fpu
922 b fast_exception_return
923
924 .align 7
925 .globl altivec_unavailable_common
926altivec_unavailable_common:
927 EXCEPTION_PROLOG_COMMON(0xf20, PACA_EXGEN)
928#ifdef CONFIG_ALTIVEC
929BEGIN_FTR_SECTION
930 beq 1f
931 bl .load_up_altivec
932 b fast_exception_return
9331:
934END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
935#endif
936 bl .save_nvgprs
937 DISABLE_INTS
938 addi r3,r1,STACK_FRAME_OVERHEAD
939 bl .altivec_unavailable_exception
940 b .ret_from_except
941
942 .align 7
943 .globl vsx_unavailable_common
944vsx_unavailable_common:
945 EXCEPTION_PROLOG_COMMON(0xf40, PACA_EXGEN)
946#ifdef CONFIG_VSX
947BEGIN_FTR_SECTION
948 beq 1f
949 b .load_up_vsx
9501:
951END_FTR_SECTION_IFSET(CPU_FTR_VSX)
952#endif
953 bl .save_nvgprs
954 DISABLE_INTS
955 addi r3,r1,STACK_FRAME_OVERHEAD
956 bl .vsx_unavailable_exception
957 b .ret_from_except
958
959 .align 7
960 .globl __end_handlers
961__end_handlers:
962
963/*
964 * Hash table stuff
965 */
966 .align 7
967_STATIC(do_hash_page)
968 std r3,_DAR(r1)
969 std r4,_DSISR(r1)
970
971 andis. r0,r4,0xa410 /* weird error? */
972 bne- handle_page_fault /* if not, try to insert a HPTE */
973 andis. r0,r4,DSISR_DABRMATCH@h
974 bne- handle_dabr_fault
975
976BEGIN_FTR_SECTION
977 andis. r0,r4,0x0020 /* Is it a segment table fault? */
978 bne- do_ste_alloc /* If so handle it */
979END_MMU_FTR_SECTION_IFCLR(MMU_FTR_SLB)
980
981 CURRENT_THREAD_INFO(r11, r1)
982 lwz r0,TI_PREEMPT(r11) /* If we're in an "NMI" */
983 andis. r0,r0,NMI_MASK@h /* (i.e. an irq when soft-disabled) */
984 bne 77f /* then don't call hash_page now */
985 /*
986 * We need to set the _PAGE_USER bit if MSR_PR is set or if we are
987 * accessing a userspace segment (even from the kernel). We assume
988 * kernel addresses always have the high bit set.
989 */
990 rlwinm r4,r4,32-25+9,31-9,31-9 /* DSISR_STORE -> _PAGE_RW */
991 rotldi r0,r3,15 /* Move high bit into MSR_PR posn */
992 orc r0,r12,r0 /* MSR_PR | ~high_bit */
993 rlwimi r4,r0,32-13,30,30 /* becomes _PAGE_USER access bit */
994 ori r4,r4,1 /* add _PAGE_PRESENT */
995 rlwimi r4,r5,22+2,31-2,31-2 /* Set _PAGE_EXEC if trap is 0x400 */
996
997 /*
998 * r3 contains the faulting address
999 * r4 contains the required access permissions
1000 * r5 contains the trap number
1001 *
1002 * at return r3 = 0 for success, 1 for page fault, negative for error
1003 */
1004 bl .hash_page /* build HPTE if possible */
1005 cmpdi r3,0 /* see if hash_page succeeded */
1006
1007 /* Success */
1008 beq fast_exc_return_irq /* Return from exception on success */
1009
1010 /* Error */
1011 blt- 13f
1012
1013/* Here we have a page fault that hash_page can't handle. */
1014handle_page_fault:
101511: ld r4,_DAR(r1)
1016 ld r5,_DSISR(r1)
1017 addi r3,r1,STACK_FRAME_OVERHEAD
1018 bl .do_page_fault
1019 cmpdi r3,0
1020 beq+ 12f
1021 bl .save_nvgprs
1022 mr r5,r3
1023 addi r3,r1,STACK_FRAME_OVERHEAD
1024 lwz r4,_DAR(r1)
1025 bl .bad_page_fault
1026 b .ret_from_except
1027
1028/* We have a data breakpoint exception - handle it */
1029handle_dabr_fault:
1030 bl .save_nvgprs
1031 ld r4,_DAR(r1)
1032 ld r5,_DSISR(r1)
1033 addi r3,r1,STACK_FRAME_OVERHEAD
1034 bl .do_dabr
103512: b .ret_from_except_lite
1036
1037
1038/* We have a page fault that hash_page could handle but HV refused
1039 * the PTE insertion
1040 */
104113: bl .save_nvgprs
1042 mr r5,r3
1043 addi r3,r1,STACK_FRAME_OVERHEAD
1044 ld r4,_DAR(r1)
1045 bl .low_hash_fault
1046 b .ret_from_except
1047
1048/*
1049 * We come here as a result of a DSI at a point where we don't want
1050 * to call hash_page, such as when we are accessing memory (possibly
1051 * user memory) inside a PMU interrupt that occurred while interrupts
1052 * were soft-disabled. We want to invoke the exception handler for
1053 * the access, or panic if there isn't a handler.
1054 */
105577: bl .save_nvgprs
1056 mr r4,r3
1057 addi r3,r1,STACK_FRAME_OVERHEAD
1058 li r5,SIGSEGV
1059 bl .bad_page_fault
1060 b .ret_from_except
1061
1062 /* here we have a segment miss */
1063do_ste_alloc:
1064 bl .ste_allocate /* try to insert stab entry */
1065 cmpdi r3,0
1066 bne- handle_page_fault
1067 b fast_exception_return
1068
1069/*
1070 * r13 points to the PACA, r9 contains the saved CR,
1071 * r11 and r12 contain the saved SRR0 and SRR1.
1072 * r9 - r13 are saved in paca->exslb.
1073 * We assume we aren't going to take any exceptions during this procedure.
1074 * We assume (DAR >> 60) == 0xc.
1075 */
1076 .align 7
1077_GLOBAL(do_stab_bolted)
1078 stw r9,PACA_EXSLB+EX_CCR(r13) /* save CR in exc. frame */
1079 std r11,PACA_EXSLB+EX_SRR0(r13) /* save SRR0 in exc. frame */
1080
1081 /* Hash to the primary group */
1082 ld r10,PACASTABVIRT(r13)
1083 mfspr r11,SPRN_DAR
1084 srdi r11,r11,28
1085 rldimi r10,r11,7,52 /* r10 = first ste of the group */
1086
1087 /* Calculate VSID */
1088 /* This is a kernel address, so protovsid = ESID | 1 << 37 */
1089 li r9,0x1
1090 rldimi r11,r9,(CONTEXT_BITS + USER_ESID_BITS),0
1091 ASM_VSID_SCRAMBLE(r11, r9, 256M)
1092 rldic r9,r11,12,16 /* r9 = vsid << 12 */
1093
1094 /* Search the primary group for a free entry */
10951: ld r11,0(r10) /* Test valid bit of the current ste */
1096 andi. r11,r11,0x80
1097 beq 2f
1098 addi r10,r10,16
1099 andi. r11,r10,0x70
1100 bne 1b
1101
1102 /* Stick for only searching the primary group for now. */
1103 /* At least for now, we use a very simple random castout scheme */
1104 /* Use the TB as a random number ; OR in 1 to avoid entry 0 */
1105 mftb r11
1106 rldic r11,r11,4,57 /* r11 = (r11 << 4) & 0x70 */
1107 ori r11,r11,0x10
1108
1109 /* r10 currently points to an ste one past the group of interest */
1110 /* make it point to the randomly selected entry */
1111 subi r10,r10,128
1112 or r10,r10,r11 /* r10 is the entry to invalidate */
1113
1114 isync /* mark the entry invalid */
1115 ld r11,0(r10)
1116 rldicl r11,r11,56,1 /* clear the valid bit */
1117 rotldi r11,r11,8
1118 std r11,0(r10)
1119 sync
1120
1121 clrrdi r11,r11,28 /* Get the esid part of the ste */
1122 slbie r11
1123
11242: std r9,8(r10) /* Store the vsid part of the ste */
1125 eieio
1126
1127 mfspr r11,SPRN_DAR /* Get the new esid */
1128 clrrdi r11,r11,28 /* Permits a full 32b of ESID */
1129 ori r11,r11,0x90 /* Turn on valid and kp */
1130 std r11,0(r10) /* Put new entry back into the stab */
1131
1132 sync
1133
1134 /* All done -- return from exception. */
1135 lwz r9,PACA_EXSLB+EX_CCR(r13) /* get saved CR */
1136 ld r11,PACA_EXSLB+EX_SRR0(r13) /* get saved SRR0 */
1137
1138 andi. r10,r12,MSR_RI
1139 beq- unrecov_slb
1140
1141 mtcrf 0x80,r9 /* restore CR */
1142
1143 mfmsr r10
1144 clrrdi r10,r10,2
1145 mtmsrd r10,1
1146
1147 mtspr SPRN_SRR0,r11
1148 mtspr SPRN_SRR1,r12
1149 ld r9,PACA_EXSLB+EX_R9(r13)
1150 ld r10,PACA_EXSLB+EX_R10(r13)
1151 ld r11,PACA_EXSLB+EX_R11(r13)
1152 ld r12,PACA_EXSLB+EX_R12(r13)
1153 ld r13,PACA_EXSLB+EX_R13(r13)
1154 rfid
1155 b . /* prevent speculative execution */
1156
1157#if defined(CONFIG_PPC_PSERIES) || defined(CONFIG_PPC_POWERNV)
1158/*
1159 * Data area reserved for FWNMI option.
1160 * This address (0x7000) is fixed by the RPA.
1161 */
1162 .= 0x7000
1163 .globl fwnmi_data_area
1164fwnmi_data_area:
1165
1166 /* pseries and powernv need to keep the whole page from
1167 * 0x7000 to 0x8000 free for use by the firmware
1168 */
1169 . = 0x8000
1170#endif /* defined(CONFIG_PPC_PSERIES) || defined(CONFIG_PPC_POWERNV) */
1171
1172/* Space for CPU0's segment table */
1173 .balign 4096
1174 .globl initial_stab
1175initial_stab:
1176 .space 4096
1177
1178#ifdef CONFIG_PPC_POWERNV
1179_GLOBAL(opal_mc_secondary_handler)
1180 HMT_MEDIUM
1181 SET_SCRATCH0(r13)
1182 GET_PACA(r13)
1183 clrldi r3,r3,2
1184 tovirt(r3,r3)
1185 std r3,PACA_OPAL_MC_EVT(r13)
1186 ld r13,OPAL_MC_SRR0(r3)
1187 mtspr SPRN_SRR0,r13
1188 ld r13,OPAL_MC_SRR1(r3)
1189 mtspr SPRN_SRR1,r13
1190 ld r3,OPAL_MC_GPR3(r3)
1191 GET_SCRATCH0(r13)
1192 b machine_check_pSeries
1193#endif /* CONFIG_PPC_POWERNV */