]> git.proxmox.com Git - mirror_ubuntu-zesty-kernel.git/blob - arch/powerpc/kernel/exceptions-64s.S
powerpc/vio: Add modalias support
[mirror_ubuntu-zesty-kernel.git] / arch / powerpc / kernel / exceptions-64s.S
1 /*
2 * This file contains the 64-bit "server" PowerPC variant
3 * of the low level exception handling including exception
4 * vectors, exception return, part of the slb and stab
5 * handling and other fixed offset specific things.
6 *
7 * This file is meant to be #included from head_64.S due to
8 * position dependant assembly.
9 *
10 * Most of this originates from head_64.S and thus has the same
11 * copyright history.
12 *
13 */
14
15 #include <asm/exception-64s.h>
16
17 /*
18 * We layout physical memory as follows:
19 * 0x0000 - 0x00ff : Secondary processor spin code
20 * 0x0100 - 0x2fff : pSeries Interrupt prologs
21 * 0x3000 - 0x5fff : interrupt support, iSeries and common interrupt prologs
22 * 0x6000 - 0x6fff : Initial (CPU0) segment table
23 * 0x7000 - 0x7fff : FWNMI data area
24 * 0x8000 - : Early init and support code
25 */
26
27 /*
28 * This is the start of the interrupt handlers for pSeries
29 * This code runs with relocation off.
30 * Code from here to __end_interrupts gets copied down to real
31 * address 0x100 when we are running a relocatable kernel.
32 * Therefore any relative branches in this section must only
33 * branch to labels in this section.
34 */
35 . = 0x100
36 .globl __start_interrupts
37 __start_interrupts:
38
39 STD_EXCEPTION_PSERIES(0x100, system_reset)
40
41 . = 0x200
42 _machine_check_pSeries:
43 HMT_MEDIUM
44 DO_KVM 0x200
45 mtspr SPRN_SPRG_SCRATCH0,r13 /* save r13 */
46 EXCEPTION_PROLOG_PSERIES(PACA_EXMC, machine_check_common)
47
48 . = 0x300
49 .globl data_access_pSeries
50 data_access_pSeries:
51 HMT_MEDIUM
52 DO_KVM 0x300
53 mtspr SPRN_SPRG_SCRATCH0,r13
54 BEGIN_FTR_SECTION
55 mfspr r13,SPRN_SPRG_PACA
56 std r9,PACA_EXSLB+EX_R9(r13)
57 std r10,PACA_EXSLB+EX_R10(r13)
58 mfspr r10,SPRN_DAR
59 mfspr r9,SPRN_DSISR
60 srdi r10,r10,60
61 rlwimi r10,r9,16,0x20
62 mfcr r9
63 cmpwi r10,0x2c
64 beq do_stab_bolted_pSeries
65 ld r10,PACA_EXSLB+EX_R10(r13)
66 std r11,PACA_EXGEN+EX_R11(r13)
67 ld r11,PACA_EXSLB+EX_R9(r13)
68 std r12,PACA_EXGEN+EX_R12(r13)
69 mfspr r12,SPRN_SPRG_SCRATCH0
70 std r10,PACA_EXGEN+EX_R10(r13)
71 std r11,PACA_EXGEN+EX_R9(r13)
72 std r12,PACA_EXGEN+EX_R13(r13)
73 EXCEPTION_PROLOG_PSERIES_1(data_access_common)
74 FTR_SECTION_ELSE
75 EXCEPTION_PROLOG_PSERIES(PACA_EXGEN, data_access_common)
76 ALT_FTR_SECTION_END_IFCLR(CPU_FTR_SLB)
77
78 . = 0x380
79 .globl data_access_slb_pSeries
80 data_access_slb_pSeries:
81 HMT_MEDIUM
82 DO_KVM 0x380
83 mtspr SPRN_SPRG_SCRATCH0,r13
84 mfspr r13,SPRN_SPRG_PACA /* get paca address into r13 */
85 std r3,PACA_EXSLB+EX_R3(r13)
86 mfspr r3,SPRN_DAR
87 std r9,PACA_EXSLB+EX_R9(r13) /* save r9 - r12 */
88 mfcr r9
89 #ifdef __DISABLED__
90 /* Keep that around for when we re-implement dynamic VSIDs */
91 cmpdi r3,0
92 bge slb_miss_user_pseries
93 #endif /* __DISABLED__ */
94 std r10,PACA_EXSLB+EX_R10(r13)
95 std r11,PACA_EXSLB+EX_R11(r13)
96 std r12,PACA_EXSLB+EX_R12(r13)
97 mfspr r10,SPRN_SPRG_SCRATCH0
98 std r10,PACA_EXSLB+EX_R13(r13)
99 mfspr r12,SPRN_SRR1 /* and SRR1 */
100 #ifndef CONFIG_RELOCATABLE
101 b .slb_miss_realmode
102 #else
103 /*
104 * We can't just use a direct branch to .slb_miss_realmode
105 * because the distance from here to there depends on where
106 * the kernel ends up being put.
107 */
108 mfctr r11
109 ld r10,PACAKBASE(r13)
110 LOAD_HANDLER(r10, .slb_miss_realmode)
111 mtctr r10
112 bctr
113 #endif
114
115 STD_EXCEPTION_PSERIES(0x400, instruction_access)
116
117 . = 0x480
118 .globl instruction_access_slb_pSeries
119 instruction_access_slb_pSeries:
120 HMT_MEDIUM
121 DO_KVM 0x480
122 mtspr SPRN_SPRG_SCRATCH0,r13
123 mfspr r13,SPRN_SPRG_PACA /* get paca address into r13 */
124 std r3,PACA_EXSLB+EX_R3(r13)
125 mfspr r3,SPRN_SRR0 /* SRR0 is faulting address */
126 std r9,PACA_EXSLB+EX_R9(r13) /* save r9 - r12 */
127 mfcr r9
128 #ifdef __DISABLED__
129 /* Keep that around for when we re-implement dynamic VSIDs */
130 cmpdi r3,0
131 bge slb_miss_user_pseries
132 #endif /* __DISABLED__ */
133 std r10,PACA_EXSLB+EX_R10(r13)
134 std r11,PACA_EXSLB+EX_R11(r13)
135 std r12,PACA_EXSLB+EX_R12(r13)
136 mfspr r10,SPRN_SPRG_SCRATCH0
137 std r10,PACA_EXSLB+EX_R13(r13)
138 mfspr r12,SPRN_SRR1 /* and SRR1 */
139 #ifndef CONFIG_RELOCATABLE
140 b .slb_miss_realmode
141 #else
142 mfctr r11
143 ld r10,PACAKBASE(r13)
144 LOAD_HANDLER(r10, .slb_miss_realmode)
145 mtctr r10
146 bctr
147 #endif
148
149 MASKABLE_EXCEPTION_PSERIES(0x500, hardware_interrupt)
150 STD_EXCEPTION_PSERIES(0x600, alignment)
151 STD_EXCEPTION_PSERIES(0x700, program_check)
152 STD_EXCEPTION_PSERIES(0x800, fp_unavailable)
153 MASKABLE_EXCEPTION_PSERIES(0x900, decrementer)
154 STD_EXCEPTION_PSERIES(0xa00, trap_0a)
155 STD_EXCEPTION_PSERIES(0xb00, trap_0b)
156
157 . = 0xc00
158 .globl system_call_pSeries
159 system_call_pSeries:
160 HMT_MEDIUM
161 DO_KVM 0xc00
162 BEGIN_FTR_SECTION
163 cmpdi r0,0x1ebe
164 beq- 1f
165 END_FTR_SECTION_IFSET(CPU_FTR_REAL_LE)
166 mr r9,r13
167 mfspr r13,SPRN_SPRG_PACA
168 mfspr r11,SPRN_SRR0
169 ld r12,PACAKBASE(r13)
170 ld r10,PACAKMSR(r13)
171 LOAD_HANDLER(r12, system_call_entry)
172 mtspr SPRN_SRR0,r12
173 mfspr r12,SPRN_SRR1
174 mtspr SPRN_SRR1,r10
175 rfid
176 b . /* prevent speculative execution */
177
178 /* Fast LE/BE switch system call */
179 1: mfspr r12,SPRN_SRR1
180 xori r12,r12,MSR_LE
181 mtspr SPRN_SRR1,r12
182 rfid /* return to userspace */
183 b .
184
185 STD_EXCEPTION_PSERIES(0xd00, single_step)
186 STD_EXCEPTION_PSERIES(0xe00, trap_0e)
187
188 /* We need to deal with the Altivec unavailable exception
189 * here which is at 0xf20, thus in the middle of the
190 * prolog code of the PerformanceMonitor one. A little
191 * trickery is thus necessary
192 */
193 performance_monitor_pSeries_1:
194 . = 0xf00
195 DO_KVM 0xf00
196 b performance_monitor_pSeries
197
198 altivec_unavailable_pSeries_1:
199 . = 0xf20
200 DO_KVM 0xf20
201 b altivec_unavailable_pSeries
202
203 vsx_unavailable_pSeries_1:
204 . = 0xf40
205 DO_KVM 0xf40
206 b vsx_unavailable_pSeries
207
208 #ifdef CONFIG_CBE_RAS
209 HSTD_EXCEPTION_PSERIES(0x1200, cbe_system_error)
210 #endif /* CONFIG_CBE_RAS */
211 STD_EXCEPTION_PSERIES(0x1300, instruction_breakpoint)
212 #ifdef CONFIG_CBE_RAS
213 HSTD_EXCEPTION_PSERIES(0x1600, cbe_maintenance)
214 #endif /* CONFIG_CBE_RAS */
215 STD_EXCEPTION_PSERIES(0x1700, altivec_assist)
216 #ifdef CONFIG_CBE_RAS
217 HSTD_EXCEPTION_PSERIES(0x1800, cbe_thermal)
218 #endif /* CONFIG_CBE_RAS */
219
220 . = 0x3000
221
222 /*** pSeries interrupt support ***/
223
224 /* moved from 0xf00 */
225 STD_EXCEPTION_PSERIES(., performance_monitor)
226 STD_EXCEPTION_PSERIES(., altivec_unavailable)
227 STD_EXCEPTION_PSERIES(., vsx_unavailable)
228
229 /*
230 * An interrupt came in while soft-disabled; clear EE in SRR1,
231 * clear paca->hard_enabled and return.
232 */
233 masked_interrupt:
234 stb r10,PACAHARDIRQEN(r13)
235 mtcrf 0x80,r9
236 ld r9,PACA_EXGEN+EX_R9(r13)
237 mfspr r10,SPRN_SRR1
238 rldicl r10,r10,48,1 /* clear MSR_EE */
239 rotldi r10,r10,16
240 mtspr SPRN_SRR1,r10
241 ld r10,PACA_EXGEN+EX_R10(r13)
242 mfspr r13,SPRN_SPRG_SCRATCH0
243 rfid
244 b .
245
246 .align 7
247 do_stab_bolted_pSeries:
248 std r11,PACA_EXSLB+EX_R11(r13)
249 std r12,PACA_EXSLB+EX_R12(r13)
250 mfspr r10,SPRN_SPRG_SCRATCH0
251 std r10,PACA_EXSLB+EX_R13(r13)
252 EXCEPTION_PROLOG_PSERIES_1(.do_stab_bolted)
253
254 #ifdef CONFIG_PPC_PSERIES
255 /*
256 * Vectors for the FWNMI option. Share common code.
257 */
258 .globl system_reset_fwnmi
259 .align 7
260 system_reset_fwnmi:
261 HMT_MEDIUM
262 mtspr SPRN_SPRG_SCRATCH0,r13 /* save r13 */
263 EXCEPTION_PROLOG_PSERIES(PACA_EXGEN, system_reset_common)
264
265 .globl machine_check_fwnmi
266 .align 7
267 machine_check_fwnmi:
268 HMT_MEDIUM
269 mtspr SPRN_SPRG_SCRATCH0,r13 /* save r13 */
270 EXCEPTION_PROLOG_PSERIES(PACA_EXMC, machine_check_common)
271
272 #endif /* CONFIG_PPC_PSERIES */
273
274 #ifdef __DISABLED__
275 /*
276 * This is used for when the SLB miss handler has to go virtual,
277 * which doesn't happen for now anymore but will once we re-implement
278 * dynamic VSIDs for shared page tables
279 */
280 slb_miss_user_pseries:
281 std r10,PACA_EXGEN+EX_R10(r13)
282 std r11,PACA_EXGEN+EX_R11(r13)
283 std r12,PACA_EXGEN+EX_R12(r13)
284 mfspr r10,SPRG_SCRATCH0
285 ld r11,PACA_EXSLB+EX_R9(r13)
286 ld r12,PACA_EXSLB+EX_R3(r13)
287 std r10,PACA_EXGEN+EX_R13(r13)
288 std r11,PACA_EXGEN+EX_R9(r13)
289 std r12,PACA_EXGEN+EX_R3(r13)
290 clrrdi r12,r13,32
291 mfmsr r10
292 mfspr r11,SRR0 /* save SRR0 */
293 ori r12,r12,slb_miss_user_common@l /* virt addr of handler */
294 ori r10,r10,MSR_IR|MSR_DR|MSR_RI
295 mtspr SRR0,r12
296 mfspr r12,SRR1 /* and SRR1 */
297 mtspr SRR1,r10
298 rfid
299 b . /* prevent spec. execution */
300 #endif /* __DISABLED__ */
301
302 .align 7
303 .globl __end_interrupts
304 __end_interrupts:
305
306 /*
307 * Code from here down to __end_handlers is invoked from the
308 * exception prologs above. Because the prologs assemble the
309 * addresses of these handlers using the LOAD_HANDLER macro,
310 * which uses an addi instruction, these handlers must be in
311 * the first 32k of the kernel image.
312 */
313
314 /*** Common interrupt handlers ***/
315
316 STD_EXCEPTION_COMMON(0x100, system_reset, .system_reset_exception)
317
318 /*
319 * Machine check is different because we use a different
320 * save area: PACA_EXMC instead of PACA_EXGEN.
321 */
322 .align 7
323 .globl machine_check_common
324 machine_check_common:
325 EXCEPTION_PROLOG_COMMON(0x200, PACA_EXMC)
326 FINISH_NAP
327 DISABLE_INTS
328 bl .save_nvgprs
329 addi r3,r1,STACK_FRAME_OVERHEAD
330 bl .machine_check_exception
331 b .ret_from_except
332
333 STD_EXCEPTION_COMMON_LITE(0x900, decrementer, .timer_interrupt)
334 STD_EXCEPTION_COMMON(0xa00, trap_0a, .unknown_exception)
335 STD_EXCEPTION_COMMON(0xb00, trap_0b, .unknown_exception)
336 STD_EXCEPTION_COMMON(0xd00, single_step, .single_step_exception)
337 STD_EXCEPTION_COMMON(0xe00, trap_0e, .unknown_exception)
338 STD_EXCEPTION_COMMON_IDLE(0xf00, performance_monitor, .performance_monitor_exception)
339 STD_EXCEPTION_COMMON(0x1300, instruction_breakpoint, .instruction_breakpoint_exception)
340 #ifdef CONFIG_ALTIVEC
341 STD_EXCEPTION_COMMON(0x1700, altivec_assist, .altivec_assist_exception)
342 #else
343 STD_EXCEPTION_COMMON(0x1700, altivec_assist, .unknown_exception)
344 #endif
345 #ifdef CONFIG_CBE_RAS
346 STD_EXCEPTION_COMMON(0x1200, cbe_system_error, .cbe_system_error_exception)
347 STD_EXCEPTION_COMMON(0x1600, cbe_maintenance, .cbe_maintenance_exception)
348 STD_EXCEPTION_COMMON(0x1800, cbe_thermal, .cbe_thermal_exception)
349 #endif /* CONFIG_CBE_RAS */
350
351 .align 7
352 system_call_entry:
353 b system_call_common
354
355 /*
356 * Here we have detected that the kernel stack pointer is bad.
357 * R9 contains the saved CR, r13 points to the paca,
358 * r10 contains the (bad) kernel stack pointer,
359 * r11 and r12 contain the saved SRR0 and SRR1.
360 * We switch to using an emergency stack, save the registers there,
361 * and call kernel_bad_stack(), which panics.
362 */
363 bad_stack:
364 ld r1,PACAEMERGSP(r13)
365 subi r1,r1,64+INT_FRAME_SIZE
366 std r9,_CCR(r1)
367 std r10,GPR1(r1)
368 std r11,_NIP(r1)
369 std r12,_MSR(r1)
370 mfspr r11,SPRN_DAR
371 mfspr r12,SPRN_DSISR
372 std r11,_DAR(r1)
373 std r12,_DSISR(r1)
374 mflr r10
375 mfctr r11
376 mfxer r12
377 std r10,_LINK(r1)
378 std r11,_CTR(r1)
379 std r12,_XER(r1)
380 SAVE_GPR(0,r1)
381 SAVE_GPR(2,r1)
382 SAVE_4GPRS(3,r1)
383 SAVE_2GPRS(7,r1)
384 SAVE_10GPRS(12,r1)
385 SAVE_10GPRS(22,r1)
386 lhz r12,PACA_TRAP_SAVE(r13)
387 std r12,_TRAP(r1)
388 addi r11,r1,INT_FRAME_SIZE
389 std r11,0(r1)
390 li r12,0
391 std r12,0(r11)
392 ld r2,PACATOC(r13)
393 1: addi r3,r1,STACK_FRAME_OVERHEAD
394 bl .kernel_bad_stack
395 b 1b
396
397 /*
398 * Here r13 points to the paca, r9 contains the saved CR,
399 * SRR0 and SRR1 are saved in r11 and r12,
400 * r9 - r13 are saved in paca->exgen.
401 */
402 .align 7
403 .globl data_access_common
404 data_access_common:
405 mfspr r10,SPRN_DAR
406 std r10,PACA_EXGEN+EX_DAR(r13)
407 mfspr r10,SPRN_DSISR
408 stw r10,PACA_EXGEN+EX_DSISR(r13)
409 EXCEPTION_PROLOG_COMMON(0x300, PACA_EXGEN)
410 ld r3,PACA_EXGEN+EX_DAR(r13)
411 lwz r4,PACA_EXGEN+EX_DSISR(r13)
412 li r5,0x300
413 b .do_hash_page /* Try to handle as hpte fault */
414
415 .align 7
416 .globl instruction_access_common
417 instruction_access_common:
418 EXCEPTION_PROLOG_COMMON(0x400, PACA_EXGEN)
419 ld r3,_NIP(r1)
420 andis. r4,r12,0x5820
421 li r5,0x400
422 b .do_hash_page /* Try to handle as hpte fault */
423
424 /*
425 * Here is the common SLB miss user that is used when going to virtual
426 * mode for SLB misses, that is currently not used
427 */
428 #ifdef __DISABLED__
429 .align 7
430 .globl slb_miss_user_common
431 slb_miss_user_common:
432 mflr r10
433 std r3,PACA_EXGEN+EX_DAR(r13)
434 stw r9,PACA_EXGEN+EX_CCR(r13)
435 std r10,PACA_EXGEN+EX_LR(r13)
436 std r11,PACA_EXGEN+EX_SRR0(r13)
437 bl .slb_allocate_user
438
439 ld r10,PACA_EXGEN+EX_LR(r13)
440 ld r3,PACA_EXGEN+EX_R3(r13)
441 lwz r9,PACA_EXGEN+EX_CCR(r13)
442 ld r11,PACA_EXGEN+EX_SRR0(r13)
443 mtlr r10
444 beq- slb_miss_fault
445
446 andi. r10,r12,MSR_RI /* check for unrecoverable exception */
447 beq- unrecov_user_slb
448 mfmsr r10
449
450 .machine push
451 .machine "power4"
452 mtcrf 0x80,r9
453 .machine pop
454
455 clrrdi r10,r10,2 /* clear RI before setting SRR0/1 */
456 mtmsrd r10,1
457
458 mtspr SRR0,r11
459 mtspr SRR1,r12
460
461 ld r9,PACA_EXGEN+EX_R9(r13)
462 ld r10,PACA_EXGEN+EX_R10(r13)
463 ld r11,PACA_EXGEN+EX_R11(r13)
464 ld r12,PACA_EXGEN+EX_R12(r13)
465 ld r13,PACA_EXGEN+EX_R13(r13)
466 rfid
467 b .
468
469 slb_miss_fault:
470 EXCEPTION_PROLOG_COMMON(0x380, PACA_EXGEN)
471 ld r4,PACA_EXGEN+EX_DAR(r13)
472 li r5,0
473 std r4,_DAR(r1)
474 std r5,_DSISR(r1)
475 b handle_page_fault
476
477 unrecov_user_slb:
478 EXCEPTION_PROLOG_COMMON(0x4200, PACA_EXGEN)
479 DISABLE_INTS
480 bl .save_nvgprs
481 1: addi r3,r1,STACK_FRAME_OVERHEAD
482 bl .unrecoverable_exception
483 b 1b
484
485 #endif /* __DISABLED__ */
486
487
488 /*
489 * r13 points to the PACA, r9 contains the saved CR,
490 * r12 contain the saved SRR1, SRR0 is still ready for return
491 * r3 has the faulting address
492 * r9 - r13 are saved in paca->exslb.
493 * r3 is saved in paca->slb_r3
494 * We assume we aren't going to take any exceptions during this procedure.
495 */
496 _GLOBAL(slb_miss_realmode)
497 mflr r10
498 #ifdef CONFIG_RELOCATABLE
499 mtctr r11
500 #endif
501
502 stw r9,PACA_EXSLB+EX_CCR(r13) /* save CR in exc. frame */
503 std r10,PACA_EXSLB+EX_LR(r13) /* save LR */
504
505 bl .slb_allocate_realmode
506
507 /* All done -- return from exception. */
508
509 ld r10,PACA_EXSLB+EX_LR(r13)
510 ld r3,PACA_EXSLB+EX_R3(r13)
511 lwz r9,PACA_EXSLB+EX_CCR(r13) /* get saved CR */
512 #ifdef CONFIG_PPC_ISERIES
513 BEGIN_FW_FTR_SECTION
514 ld r11,PACALPPACAPTR(r13)
515 ld r11,LPPACASRR0(r11) /* get SRR0 value */
516 END_FW_FTR_SECTION_IFSET(FW_FEATURE_ISERIES)
517 #endif /* CONFIG_PPC_ISERIES */
518
519 mtlr r10
520
521 andi. r10,r12,MSR_RI /* check for unrecoverable exception */
522 beq- 2f
523
524 .machine push
525 .machine "power4"
526 mtcrf 0x80,r9
527 mtcrf 0x01,r9 /* slb_allocate uses cr0 and cr7 */
528 .machine pop
529
530 #ifdef CONFIG_PPC_ISERIES
531 BEGIN_FW_FTR_SECTION
532 mtspr SPRN_SRR0,r11
533 mtspr SPRN_SRR1,r12
534 END_FW_FTR_SECTION_IFSET(FW_FEATURE_ISERIES)
535 #endif /* CONFIG_PPC_ISERIES */
536 ld r9,PACA_EXSLB+EX_R9(r13)
537 ld r10,PACA_EXSLB+EX_R10(r13)
538 ld r11,PACA_EXSLB+EX_R11(r13)
539 ld r12,PACA_EXSLB+EX_R12(r13)
540 ld r13,PACA_EXSLB+EX_R13(r13)
541 rfid
542 b . /* prevent speculative execution */
543
544 2:
545 #ifdef CONFIG_PPC_ISERIES
546 BEGIN_FW_FTR_SECTION
547 b unrecov_slb
548 END_FW_FTR_SECTION_IFSET(FW_FEATURE_ISERIES)
549 #endif /* CONFIG_PPC_ISERIES */
550 mfspr r11,SPRN_SRR0
551 ld r10,PACAKBASE(r13)
552 LOAD_HANDLER(r10,unrecov_slb)
553 mtspr SPRN_SRR0,r10
554 ld r10,PACAKMSR(r13)
555 mtspr SPRN_SRR1,r10
556 rfid
557 b .
558
559 unrecov_slb:
560 EXCEPTION_PROLOG_COMMON(0x4100, PACA_EXSLB)
561 DISABLE_INTS
562 bl .save_nvgprs
563 1: addi r3,r1,STACK_FRAME_OVERHEAD
564 bl .unrecoverable_exception
565 b 1b
566
567 .align 7
568 .globl hardware_interrupt_common
569 .globl hardware_interrupt_entry
570 hardware_interrupt_common:
571 EXCEPTION_PROLOG_COMMON(0x500, PACA_EXGEN)
572 FINISH_NAP
573 hardware_interrupt_entry:
574 DISABLE_INTS
575 BEGIN_FTR_SECTION
576 bl .ppc64_runlatch_on
577 END_FTR_SECTION_IFSET(CPU_FTR_CTRL)
578 addi r3,r1,STACK_FRAME_OVERHEAD
579 bl .do_IRQ
580 b .ret_from_except_lite
581
582 #ifdef CONFIG_PPC_970_NAP
583 power4_fixup_nap:
584 andc r9,r9,r10
585 std r9,TI_LOCAL_FLAGS(r11)
586 ld r10,_LINK(r1) /* make idle task do the */
587 std r10,_NIP(r1) /* equivalent of a blr */
588 blr
589 #endif
590
591 .align 7
592 .globl alignment_common
593 alignment_common:
594 mfspr r10,SPRN_DAR
595 std r10,PACA_EXGEN+EX_DAR(r13)
596 mfspr r10,SPRN_DSISR
597 stw r10,PACA_EXGEN+EX_DSISR(r13)
598 EXCEPTION_PROLOG_COMMON(0x600, PACA_EXGEN)
599 ld r3,PACA_EXGEN+EX_DAR(r13)
600 lwz r4,PACA_EXGEN+EX_DSISR(r13)
601 std r3,_DAR(r1)
602 std r4,_DSISR(r1)
603 bl .save_nvgprs
604 addi r3,r1,STACK_FRAME_OVERHEAD
605 ENABLE_INTS
606 bl .alignment_exception
607 b .ret_from_except
608
609 .align 7
610 .globl program_check_common
611 program_check_common:
612 EXCEPTION_PROLOG_COMMON(0x700, PACA_EXGEN)
613 bl .save_nvgprs
614 addi r3,r1,STACK_FRAME_OVERHEAD
615 ENABLE_INTS
616 bl .program_check_exception
617 b .ret_from_except
618
619 .align 7
620 .globl fp_unavailable_common
621 fp_unavailable_common:
622 EXCEPTION_PROLOG_COMMON(0x800, PACA_EXGEN)
623 bne 1f /* if from user, just load it up */
624 bl .save_nvgprs
625 addi r3,r1,STACK_FRAME_OVERHEAD
626 ENABLE_INTS
627 bl .kernel_fp_unavailable_exception
628 BUG_OPCODE
629 1: bl .load_up_fpu
630 b fast_exception_return
631
632 .align 7
633 .globl altivec_unavailable_common
634 altivec_unavailable_common:
635 EXCEPTION_PROLOG_COMMON(0xf20, PACA_EXGEN)
636 #ifdef CONFIG_ALTIVEC
637 BEGIN_FTR_SECTION
638 beq 1f
639 bl .load_up_altivec
640 b fast_exception_return
641 1:
642 END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
643 #endif
644 bl .save_nvgprs
645 addi r3,r1,STACK_FRAME_OVERHEAD
646 ENABLE_INTS
647 bl .altivec_unavailable_exception
648 b .ret_from_except
649
650 .align 7
651 .globl vsx_unavailable_common
652 vsx_unavailable_common:
653 EXCEPTION_PROLOG_COMMON(0xf40, PACA_EXGEN)
654 #ifdef CONFIG_VSX
655 BEGIN_FTR_SECTION
656 bne .load_up_vsx
657 1:
658 END_FTR_SECTION_IFSET(CPU_FTR_VSX)
659 #endif
660 bl .save_nvgprs
661 addi r3,r1,STACK_FRAME_OVERHEAD
662 ENABLE_INTS
663 bl .vsx_unavailable_exception
664 b .ret_from_except
665
666 .align 7
667 .globl __end_handlers
668 __end_handlers:
669
670 /*
671 * Return from an exception with minimal checks.
672 * The caller is assumed to have done EXCEPTION_PROLOG_COMMON.
673 * If interrupts have been enabled, or anything has been
674 * done that might have changed the scheduling status of
675 * any task or sent any task a signal, you should use
676 * ret_from_except or ret_from_except_lite instead of this.
677 */
678 fast_exc_return_irq: /* restores irq state too */
679 ld r3,SOFTE(r1)
680 TRACE_AND_RESTORE_IRQ(r3);
681 ld r12,_MSR(r1)
682 rldicl r4,r12,49,63 /* get MSR_EE to LSB */
683 stb r4,PACAHARDIRQEN(r13) /* restore paca->hard_enabled */
684 b 1f
685
686 .globl fast_exception_return
687 fast_exception_return:
688 ld r12,_MSR(r1)
689 1: ld r11,_NIP(r1)
690 andi. r3,r12,MSR_RI /* check if RI is set */
691 beq- unrecov_fer
692
693 #ifdef CONFIG_VIRT_CPU_ACCOUNTING
694 andi. r3,r12,MSR_PR
695 beq 2f
696 ACCOUNT_CPU_USER_EXIT(r3, r4)
697 2:
698 #endif
699
700 ld r3,_CCR(r1)
701 ld r4,_LINK(r1)
702 ld r5,_CTR(r1)
703 ld r6,_XER(r1)
704 mtcr r3
705 mtlr r4
706 mtctr r5
707 mtxer r6
708 REST_GPR(0, r1)
709 REST_8GPRS(2, r1)
710
711 mfmsr r10
712 rldicl r10,r10,48,1 /* clear EE */
713 rldicr r10,r10,16,61 /* clear RI (LE is 0 already) */
714 mtmsrd r10,1
715
716 mtspr SPRN_SRR1,r12
717 mtspr SPRN_SRR0,r11
718 REST_4GPRS(10, r1)
719 ld r1,GPR1(r1)
720 rfid
721 b . /* prevent speculative execution */
722
723 unrecov_fer:
724 bl .save_nvgprs
725 1: addi r3,r1,STACK_FRAME_OVERHEAD
726 bl .unrecoverable_exception
727 b 1b
728
729
730 /*
731 * Hash table stuff
732 */
733 .align 7
734 _STATIC(do_hash_page)
735 std r3,_DAR(r1)
736 std r4,_DSISR(r1)
737
738 andis. r0,r4,0xa450 /* weird error? */
739 bne- handle_page_fault /* if not, try to insert a HPTE */
740 BEGIN_FTR_SECTION
741 andis. r0,r4,0x0020 /* Is it a segment table fault? */
742 bne- do_ste_alloc /* If so handle it */
743 END_FTR_SECTION_IFCLR(CPU_FTR_SLB)
744
745 clrrdi r11,r1,THREAD_SHIFT
746 lwz r0,TI_PREEMPT(r11) /* If we're in an "NMI" */
747 andis. r0,r0,NMI_MASK@h /* (i.e. an irq when soft-disabled) */
748 bne 77f /* then don't call hash_page now */
749
750 /*
751 * On iSeries, we soft-disable interrupts here, then
752 * hard-enable interrupts so that the hash_page code can spin on
753 * the hash_table_lock without problems on a shared processor.
754 */
755 DISABLE_INTS
756
757 /*
758 * Currently, trace_hardirqs_off() will be called by DISABLE_INTS
759 * and will clobber volatile registers when irq tracing is enabled
760 * so we need to reload them. It may be possible to be smarter here
761 * and move the irq tracing elsewhere but let's keep it simple for
762 * now
763 */
764 #ifdef CONFIG_TRACE_IRQFLAGS
765 ld r3,_DAR(r1)
766 ld r4,_DSISR(r1)
767 ld r5,_TRAP(r1)
768 ld r12,_MSR(r1)
769 clrrdi r5,r5,4
770 #endif /* CONFIG_TRACE_IRQFLAGS */
771 /*
772 * We need to set the _PAGE_USER bit if MSR_PR is set or if we are
773 * accessing a userspace segment (even from the kernel). We assume
774 * kernel addresses always have the high bit set.
775 */
776 rlwinm r4,r4,32-25+9,31-9,31-9 /* DSISR_STORE -> _PAGE_RW */
777 rotldi r0,r3,15 /* Move high bit into MSR_PR posn */
778 orc r0,r12,r0 /* MSR_PR | ~high_bit */
779 rlwimi r4,r0,32-13,30,30 /* becomes _PAGE_USER access bit */
780 ori r4,r4,1 /* add _PAGE_PRESENT */
781 rlwimi r4,r5,22+2,31-2,31-2 /* Set _PAGE_EXEC if trap is 0x400 */
782
783 /*
784 * r3 contains the faulting address
785 * r4 contains the required access permissions
786 * r5 contains the trap number
787 *
788 * at return r3 = 0 for success
789 */
790 bl .hash_page /* build HPTE if possible */
791 cmpdi r3,0 /* see if hash_page succeeded */
792
793 BEGIN_FW_FTR_SECTION
794 /*
795 * If we had interrupts soft-enabled at the point where the
796 * DSI/ISI occurred, and an interrupt came in during hash_page,
797 * handle it now.
798 * We jump to ret_from_except_lite rather than fast_exception_return
799 * because ret_from_except_lite will check for and handle pending
800 * interrupts if necessary.
801 */
802 beq 13f
803 END_FW_FTR_SECTION_IFSET(FW_FEATURE_ISERIES)
804
805 BEGIN_FW_FTR_SECTION
806 /*
807 * Here we have interrupts hard-disabled, so it is sufficient
808 * to restore paca->{soft,hard}_enable and get out.
809 */
810 beq fast_exc_return_irq /* Return from exception on success */
811 END_FW_FTR_SECTION_IFCLR(FW_FEATURE_ISERIES)
812
813 /* For a hash failure, we don't bother re-enabling interrupts */
814 ble- 12f
815
816 /*
817 * hash_page couldn't handle it, set soft interrupt enable back
818 * to what it was before the trap. Note that .raw_local_irq_restore
819 * handles any interrupts pending at this point.
820 */
821 ld r3,SOFTE(r1)
822 TRACE_AND_RESTORE_IRQ_PARTIAL(r3, 11f)
823 bl .raw_local_irq_restore
824 b 11f
825
826 /* Here we have a page fault that hash_page can't handle. */
827 handle_page_fault:
828 ENABLE_INTS
829 11: ld r4,_DAR(r1)
830 ld r5,_DSISR(r1)
831 addi r3,r1,STACK_FRAME_OVERHEAD
832 bl .do_page_fault
833 cmpdi r3,0
834 beq+ 13f
835 bl .save_nvgprs
836 mr r5,r3
837 addi r3,r1,STACK_FRAME_OVERHEAD
838 lwz r4,_DAR(r1)
839 bl .bad_page_fault
840 b .ret_from_except
841
842 13: b .ret_from_except_lite
843
844 /* We have a page fault that hash_page could handle but HV refused
845 * the PTE insertion
846 */
847 12: bl .save_nvgprs
848 mr r5,r3
849 addi r3,r1,STACK_FRAME_OVERHEAD
850 ld r4,_DAR(r1)
851 bl .low_hash_fault
852 b .ret_from_except
853
854 /*
855 * We come here as a result of a DSI at a point where we don't want
856 * to call hash_page, such as when we are accessing memory (possibly
857 * user memory) inside a PMU interrupt that occurred while interrupts
858 * were soft-disabled. We want to invoke the exception handler for
859 * the access, or panic if there isn't a handler.
860 */
861 77: bl .save_nvgprs
862 mr r4,r3
863 addi r3,r1,STACK_FRAME_OVERHEAD
864 li r5,SIGSEGV
865 bl .bad_page_fault
866 b .ret_from_except
867
868 /* here we have a segment miss */
869 do_ste_alloc:
870 bl .ste_allocate /* try to insert stab entry */
871 cmpdi r3,0
872 bne- handle_page_fault
873 b fast_exception_return
874
875 /*
876 * r13 points to the PACA, r9 contains the saved CR,
877 * r11 and r12 contain the saved SRR0 and SRR1.
878 * r9 - r13 are saved in paca->exslb.
879 * We assume we aren't going to take any exceptions during this procedure.
880 * We assume (DAR >> 60) == 0xc.
881 */
882 .align 7
883 _GLOBAL(do_stab_bolted)
884 stw r9,PACA_EXSLB+EX_CCR(r13) /* save CR in exc. frame */
885 std r11,PACA_EXSLB+EX_SRR0(r13) /* save SRR0 in exc. frame */
886
887 /* Hash to the primary group */
888 ld r10,PACASTABVIRT(r13)
889 mfspr r11,SPRN_DAR
890 srdi r11,r11,28
891 rldimi r10,r11,7,52 /* r10 = first ste of the group */
892
893 /* Calculate VSID */
894 /* This is a kernel address, so protovsid = ESID */
895 ASM_VSID_SCRAMBLE(r11, r9, 256M)
896 rldic r9,r11,12,16 /* r9 = vsid << 12 */
897
898 /* Search the primary group for a free entry */
899 1: ld r11,0(r10) /* Test valid bit of the current ste */
900 andi. r11,r11,0x80
901 beq 2f
902 addi r10,r10,16
903 andi. r11,r10,0x70
904 bne 1b
905
906 /* Stick for only searching the primary group for now. */
907 /* At least for now, we use a very simple random castout scheme */
908 /* Use the TB as a random number ; OR in 1 to avoid entry 0 */
909 mftb r11
910 rldic r11,r11,4,57 /* r11 = (r11 << 4) & 0x70 */
911 ori r11,r11,0x10
912
913 /* r10 currently points to an ste one past the group of interest */
914 /* make it point to the randomly selected entry */
915 subi r10,r10,128
916 or r10,r10,r11 /* r10 is the entry to invalidate */
917
918 isync /* mark the entry invalid */
919 ld r11,0(r10)
920 rldicl r11,r11,56,1 /* clear the valid bit */
921 rotldi r11,r11,8
922 std r11,0(r10)
923 sync
924
925 clrrdi r11,r11,28 /* Get the esid part of the ste */
926 slbie r11
927
928 2: std r9,8(r10) /* Store the vsid part of the ste */
929 eieio
930
931 mfspr r11,SPRN_DAR /* Get the new esid */
932 clrrdi r11,r11,28 /* Permits a full 32b of ESID */
933 ori r11,r11,0x90 /* Turn on valid and kp */
934 std r11,0(r10) /* Put new entry back into the stab */
935
936 sync
937
938 /* All done -- return from exception. */
939 lwz r9,PACA_EXSLB+EX_CCR(r13) /* get saved CR */
940 ld r11,PACA_EXSLB+EX_SRR0(r13) /* get saved SRR0 */
941
942 andi. r10,r12,MSR_RI
943 beq- unrecov_slb
944
945 mtcrf 0x80,r9 /* restore CR */
946
947 mfmsr r10
948 clrrdi r10,r10,2
949 mtmsrd r10,1
950
951 mtspr SPRN_SRR0,r11
952 mtspr SPRN_SRR1,r12
953 ld r9,PACA_EXSLB+EX_R9(r13)
954 ld r10,PACA_EXSLB+EX_R10(r13)
955 ld r11,PACA_EXSLB+EX_R11(r13)
956 ld r12,PACA_EXSLB+EX_R12(r13)
957 ld r13,PACA_EXSLB+EX_R13(r13)
958 rfid
959 b . /* prevent speculative execution */
960
961 /*
962 * Space for CPU0's segment table.
963 *
964 * On iSeries, the hypervisor must fill in at least one entry before
965 * we get control (with relocate on). The address is given to the hv
966 * as a page number (see xLparMap below), so this must be at a
967 * fixed address (the linker can't compute (u64)&initial_stab >>
968 * PAGE_SHIFT).
969 */
970 . = STAB0_OFFSET /* 0x6000 */
971 .globl initial_stab
972 initial_stab:
973 .space 4096
974
975 #ifdef CONFIG_PPC_PSERIES
976 /*
977 * Data area reserved for FWNMI option.
978 * This address (0x7000) is fixed by the RPA.
979 */
980 .= 0x7000
981 .globl fwnmi_data_area
982 fwnmi_data_area:
983 #endif /* CONFIG_PPC_PSERIES */
984
985 /* iSeries does not use the FWNMI stuff, so it is safe to put
986 * this here, even if we later allow kernels that will boot on
987 * both pSeries and iSeries */
988 #ifdef CONFIG_PPC_ISERIES
989 . = LPARMAP_PHYS
990 .globl xLparMap
991 xLparMap:
992 .quad HvEsidsToMap /* xNumberEsids */
993 .quad HvRangesToMap /* xNumberRanges */
994 .quad STAB0_PAGE /* xSegmentTableOffs */
995 .zero 40 /* xRsvd */
996 /* xEsids (HvEsidsToMap entries of 2 quads) */
997 .quad PAGE_OFFSET_ESID /* xKernelEsid */
998 .quad PAGE_OFFSET_VSID /* xKernelVsid */
999 .quad VMALLOC_START_ESID /* xKernelEsid */
1000 .quad VMALLOC_START_VSID /* xKernelVsid */
1001 /* xRanges (HvRangesToMap entries of 3 quads) */
1002 .quad HvPagesToMap /* xPages */
1003 .quad 0 /* xOffset */
1004 .quad PAGE_OFFSET_VSID << (SID_SHIFT - HW_PAGE_SHIFT) /* xVPN */
1005
1006 #endif /* CONFIG_PPC_ISERIES */
1007
1008 #ifdef CONFIG_PPC_PSERIES
1009 . = 0x8000
1010 #endif /* CONFIG_PPC_PSERIES */