]> git.proxmox.com Git - mirror_ubuntu-zesty-kernel.git/blob - arch/powerpc/kernel/exceptions-64s.S
KVM: PPC: Add support for Book3S processors in hypervisor mode
[mirror_ubuntu-zesty-kernel.git] / arch / powerpc / kernel / exceptions-64s.S
1 /*
2 * This file contains the 64-bit "server" PowerPC variant
3 * of the low level exception handling including exception
4 * vectors, exception return, part of the slb and stab
5 * handling and other fixed offset specific things.
6 *
7 * This file is meant to be #included from head_64.S due to
8 * position dependent assembly.
9 *
10 * Most of this originates from head_64.S and thus has the same
11 * copyright history.
12 *
13 */
14
15 #include <asm/exception-64s.h>
16 #include <asm/ptrace.h>
17
18 /*
19 * We layout physical memory as follows:
20 * 0x0000 - 0x00ff : Secondary processor spin code
21 * 0x0100 - 0x2fff : pSeries Interrupt prologs
22 * 0x3000 - 0x5fff : interrupt support, iSeries and common interrupt prologs
23 * 0x6000 - 0x6fff : Initial (CPU0) segment table
24 * 0x7000 - 0x7fff : FWNMI data area
25 * 0x8000 - : Early init and support code
26 */
27
28 /*
29 * This is the start of the interrupt handlers for pSeries
30 * This code runs with relocation off.
31 * Code from here to __end_interrupts gets copied down to real
32 * address 0x100 when we are running a relocatable kernel.
33 * Therefore any relative branches in this section must only
34 * branch to labels in this section.
35 */
36 . = 0x100
37 .globl __start_interrupts
38 __start_interrupts:
39
40 .globl system_reset_pSeries;
41 system_reset_pSeries:
42 HMT_MEDIUM;
43 SET_SCRATCH0(r13)
44 #ifdef CONFIG_PPC_P7_NAP
45 BEGIN_FTR_SECTION
46 /* Running native on arch 2.06 or later, check if we are
47 * waking up from nap. We only handle no state loss and
48 * supervisor state loss. We do -not- handle hypervisor
49 * state loss at this time.
50 */
51 mfspr r13,SPRN_SRR1
52 rlwinm r13,r13,47-31,30,31
53 cmpwi cr0,r13,1
54 bne 1f
55 b .power7_wakeup_noloss
56 1: cmpwi cr0,r13,2
57 bne 1f
58 b .power7_wakeup_loss
59 /* Total loss of HV state is fatal, we could try to use the
60 * PIR to locate a PACA, then use an emergency stack etc...
61 * but for now, let's just stay stuck here
62 */
63 1: cmpwi cr0,r13,3
64 beq .
65 END_FTR_SECTION_IFSET(CPU_FTR_HVMODE_206)
66 #endif /* CONFIG_PPC_P7_NAP */
67 EXCEPTION_PROLOG_PSERIES(PACA_EXGEN, system_reset_common, EXC_STD,
68 NOTEST, 0x100)
69
70 . = 0x200
71 machine_check_pSeries_1:
72 /* This is moved out of line as it can be patched by FW, but
73 * some code path might still want to branch into the original
74 * vector
75 */
76 b machine_check_pSeries
77
78 . = 0x300
79 .globl data_access_pSeries
80 data_access_pSeries:
81 HMT_MEDIUM
82 SET_SCRATCH0(r13)
83 #ifndef CONFIG_POWER4_ONLY
84 BEGIN_FTR_SECTION
85 b data_access_check_stab
86 data_access_not_stab:
87 END_MMU_FTR_SECTION_IFCLR(MMU_FTR_SLB)
88 #endif
89 EXCEPTION_PROLOG_PSERIES(PACA_EXGEN, data_access_common, EXC_STD,
90 KVMTEST_PR, 0x300)
91
92 . = 0x380
93 .globl data_access_slb_pSeries
94 data_access_slb_pSeries:
95 HMT_MEDIUM
96 SET_SCRATCH0(r13)
97 EXCEPTION_PROLOG_1(PACA_EXSLB, KVMTEST_PR, 0x380)
98 std r3,PACA_EXSLB+EX_R3(r13)
99 mfspr r3,SPRN_DAR
100 #ifdef __DISABLED__
101 /* Keep that around for when we re-implement dynamic VSIDs */
102 cmpdi r3,0
103 bge slb_miss_user_pseries
104 #endif /* __DISABLED__ */
105 mfspr r12,SPRN_SRR1
106 #ifndef CONFIG_RELOCATABLE
107 b .slb_miss_realmode
108 #else
109 /*
110 * We can't just use a direct branch to .slb_miss_realmode
111 * because the distance from here to there depends on where
112 * the kernel ends up being put.
113 */
114 mfctr r11
115 ld r10,PACAKBASE(r13)
116 LOAD_HANDLER(r10, .slb_miss_realmode)
117 mtctr r10
118 bctr
119 #endif
120
121 STD_EXCEPTION_PSERIES(0x400, 0x400, instruction_access)
122
123 . = 0x480
124 .globl instruction_access_slb_pSeries
125 instruction_access_slb_pSeries:
126 HMT_MEDIUM
127 SET_SCRATCH0(r13)
128 EXCEPTION_PROLOG_1(PACA_EXSLB, KVMTEST_PR, 0x480)
129 std r3,PACA_EXSLB+EX_R3(r13)
130 mfspr r3,SPRN_SRR0 /* SRR0 is faulting address */
131 #ifdef __DISABLED__
132 /* Keep that around for when we re-implement dynamic VSIDs */
133 cmpdi r3,0
134 bge slb_miss_user_pseries
135 #endif /* __DISABLED__ */
136 mfspr r12,SPRN_SRR1
137 #ifndef CONFIG_RELOCATABLE
138 b .slb_miss_realmode
139 #else
140 mfctr r11
141 ld r10,PACAKBASE(r13)
142 LOAD_HANDLER(r10, .slb_miss_realmode)
143 mtctr r10
144 bctr
145 #endif
146
147 /* We open code these as we can't have a ". = x" (even with
148 * x = "." within a feature section
149 */
150 . = 0x500;
151 .globl hardware_interrupt_pSeries;
152 .globl hardware_interrupt_hv;
153 hardware_interrupt_pSeries:
154 hardware_interrupt_hv:
155 BEGIN_FTR_SECTION
156 _MASKABLE_EXCEPTION_PSERIES(0x502, hardware_interrupt,
157 EXC_HV, SOFTEN_TEST_HV)
158 KVM_HANDLER(PACA_EXGEN, EXC_HV, 0x502)
159 FTR_SECTION_ELSE
160 _MASKABLE_EXCEPTION_PSERIES(0x500, hardware_interrupt,
161 EXC_STD, SOFTEN_TEST_PR)
162 KVM_HANDLER(PACA_EXGEN, EXC_STD, 0x500)
163 ALT_FTR_SECTION_END_IFSET(CPU_FTR_HVMODE_206)
164
165 STD_EXCEPTION_PSERIES(0x600, 0x600, alignment)
166 KVM_HANDLER_PR(PACA_EXGEN, EXC_STD, 0x600)
167
168 STD_EXCEPTION_PSERIES(0x700, 0x700, program_check)
169 KVM_HANDLER_PR(PACA_EXGEN, EXC_STD, 0x700)
170
171 STD_EXCEPTION_PSERIES(0x800, 0x800, fp_unavailable)
172 KVM_HANDLER_PR(PACA_EXGEN, EXC_STD, 0x800)
173
174 MASKABLE_EXCEPTION_PSERIES(0x900, 0x900, decrementer)
175 MASKABLE_EXCEPTION_HV(0x980, 0x982, decrementer)
176
177 STD_EXCEPTION_PSERIES(0xa00, 0xa00, trap_0a)
178 KVM_HANDLER_PR(PACA_EXGEN, EXC_STD, 0xa00)
179
180 STD_EXCEPTION_PSERIES(0xb00, 0xb00, trap_0b)
181 KVM_HANDLER_PR(PACA_EXGEN, EXC_STD, 0xb00)
182
183 . = 0xc00
184 .globl system_call_pSeries
185 system_call_pSeries:
186 HMT_MEDIUM
187 #ifdef CONFIG_KVM_BOOK3S_64_HANDLER
188 SET_SCRATCH0(r13)
189 GET_PACA(r13)
190 std r9,PACA_EXGEN+EX_R9(r13)
191 std r10,PACA_EXGEN+EX_R10(r13)
192 mfcr r9
193 KVMTEST(0xc00)
194 GET_SCRATCH0(r13)
195 #endif
196 BEGIN_FTR_SECTION
197 cmpdi r0,0x1ebe
198 beq- 1f
199 END_FTR_SECTION_IFSET(CPU_FTR_REAL_LE)
200 mr r9,r13
201 GET_PACA(r13)
202 mfspr r11,SPRN_SRR0
203 mfspr r12,SPRN_SRR1
204 ld r10,PACAKBASE(r13)
205 LOAD_HANDLER(r10, system_call_entry)
206 mtspr SPRN_SRR0,r10
207 ld r10,PACAKMSR(r13)
208 mtspr SPRN_SRR1,r10
209 rfid
210 b . /* prevent speculative execution */
211
212 KVM_HANDLER(PACA_EXGEN, EXC_STD, 0xc00)
213
214 /* Fast LE/BE switch system call */
215 1: mfspr r12,SPRN_SRR1
216 xori r12,r12,MSR_LE
217 mtspr SPRN_SRR1,r12
218 rfid /* return to userspace */
219 b .
220
221 STD_EXCEPTION_PSERIES(0xd00, 0xd00, single_step)
222 KVM_HANDLER_PR(PACA_EXGEN, EXC_STD, 0xd00)
223
224 /* At 0xe??? we have a bunch of hypervisor exceptions, we branch
225 * out of line to handle them
226 */
227 . = 0xe00
228 b h_data_storage_hv
229 . = 0xe20
230 b h_instr_storage_hv
231 . = 0xe40
232 b emulation_assist_hv
233 . = 0xe50
234 b hmi_exception_hv
235 . = 0xe60
236 b hmi_exception_hv
237
238 /* We need to deal with the Altivec unavailable exception
239 * here which is at 0xf20, thus in the middle of the
240 * prolog code of the PerformanceMonitor one. A little
241 * trickery is thus necessary
242 */
243 performance_monitor_pSeries_1:
244 . = 0xf00
245 b performance_monitor_pSeries
246
247 altivec_unavailable_pSeries_1:
248 . = 0xf20
249 b altivec_unavailable_pSeries
250
251 vsx_unavailable_pSeries_1:
252 . = 0xf40
253 b vsx_unavailable_pSeries
254
255 #ifdef CONFIG_CBE_RAS
256 STD_EXCEPTION_HV(0x1200, 0x1202, cbe_system_error)
257 KVM_HANDLER_PR_SKIP(PACA_EXGEN, EXC_HV, 0x1202)
258 #endif /* CONFIG_CBE_RAS */
259
260 STD_EXCEPTION_PSERIES(0x1300, 0x1300, instruction_breakpoint)
261 KVM_HANDLER_PR_SKIP(PACA_EXGEN, EXC_STD, 0x1300)
262
263 #ifdef CONFIG_CBE_RAS
264 STD_EXCEPTION_HV(0x1600, 0x1602, cbe_maintenance)
265 KVM_HANDLER_PR_SKIP(PACA_EXGEN, EXC_HV, 0x1602)
266 #endif /* CONFIG_CBE_RAS */
267
268 STD_EXCEPTION_PSERIES(0x1700, 0x1700, altivec_assist)
269 KVM_HANDLER_PR(PACA_EXGEN, EXC_STD, 0x1700)
270
271 #ifdef CONFIG_CBE_RAS
272 STD_EXCEPTION_HV(0x1800, 0x1802, cbe_thermal)
273 KVM_HANDLER_PR_SKIP(PACA_EXGEN, EXC_HV, 0x1802)
274 #endif /* CONFIG_CBE_RAS */
275
276 . = 0x3000
277
278 /*** Out of line interrupts support ***/
279
280 /* moved from 0x200 */
281 machine_check_pSeries:
282 .globl machine_check_fwnmi
283 machine_check_fwnmi:
284 HMT_MEDIUM
285 SET_SCRATCH0(r13) /* save r13 */
286 EXCEPTION_PROLOG_PSERIES(PACA_EXMC, machine_check_common,
287 EXC_STD, KVMTEST, 0x200)
288 KVM_HANDLER_SKIP(PACA_EXMC, EXC_STD, 0x200)
289
290 #ifndef CONFIG_POWER4_ONLY
291 /* moved from 0x300 */
292 data_access_check_stab:
293 GET_PACA(r13)
294 std r9,PACA_EXSLB+EX_R9(r13)
295 std r10,PACA_EXSLB+EX_R10(r13)
296 mfspr r10,SPRN_DAR
297 mfspr r9,SPRN_DSISR
298 srdi r10,r10,60
299 rlwimi r10,r9,16,0x20
300 #ifdef CONFIG_KVM_BOOK3S_PR
301 lbz r9,HSTATE_IN_GUEST(r13)
302 rlwimi r10,r9,8,0x300
303 #endif
304 mfcr r9
305 cmpwi r10,0x2c
306 beq do_stab_bolted_pSeries
307 mtcrf 0x80,r9
308 ld r9,PACA_EXSLB+EX_R9(r13)
309 ld r10,PACA_EXSLB+EX_R10(r13)
310 b data_access_not_stab
311 do_stab_bolted_pSeries:
312 std r11,PACA_EXSLB+EX_R11(r13)
313 std r12,PACA_EXSLB+EX_R12(r13)
314 GET_SCRATCH0(r10)
315 std r10,PACA_EXSLB+EX_R13(r13)
316 EXCEPTION_PROLOG_PSERIES_1(.do_stab_bolted, EXC_STD)
317 #endif /* CONFIG_POWER4_ONLY */
318
319 KVM_HANDLER_PR_SKIP(PACA_EXGEN, EXC_STD, 0x300)
320 KVM_HANDLER_PR_SKIP(PACA_EXSLB, EXC_STD, 0x380)
321 KVM_HANDLER_PR(PACA_EXGEN, EXC_STD, 0x400)
322 KVM_HANDLER_PR(PACA_EXSLB, EXC_STD, 0x480)
323 KVM_HANDLER_PR(PACA_EXGEN, EXC_STD, 0x900)
324 KVM_HANDLER(PACA_EXGEN, EXC_HV, 0x982)
325
326 .align 7
327 /* moved from 0xe00 */
328 STD_EXCEPTION_HV(., 0xe02, h_data_storage)
329 KVM_HANDLER_SKIP(PACA_EXGEN, EXC_HV, 0xe02)
330 STD_EXCEPTION_HV(., 0xe22, h_instr_storage)
331 KVM_HANDLER(PACA_EXGEN, EXC_HV, 0xe22)
332 STD_EXCEPTION_HV(., 0xe42, emulation_assist)
333 KVM_HANDLER(PACA_EXGEN, EXC_HV, 0xe42)
334 STD_EXCEPTION_HV(., 0xe62, hmi_exception) /* need to flush cache ? */
335 KVM_HANDLER(PACA_EXGEN, EXC_HV, 0xe62)
336
337 /* moved from 0xf00 */
338 STD_EXCEPTION_PSERIES(., 0xf00, performance_monitor)
339 KVM_HANDLER_PR(PACA_EXGEN, EXC_STD, 0xf00)
340 STD_EXCEPTION_PSERIES(., 0xf20, altivec_unavailable)
341 KVM_HANDLER_PR(PACA_EXGEN, EXC_STD, 0xf20)
342 STD_EXCEPTION_PSERIES(., 0xf40, vsx_unavailable)
343 KVM_HANDLER_PR(PACA_EXGEN, EXC_STD, 0xf40)
344
345 /*
346 * An interrupt came in while soft-disabled; clear EE in SRR1,
347 * clear paca->hard_enabled and return.
348 */
349 masked_interrupt:
350 stb r10,PACAHARDIRQEN(r13)
351 mtcrf 0x80,r9
352 ld r9,PACA_EXGEN+EX_R9(r13)
353 mfspr r10,SPRN_SRR1
354 rldicl r10,r10,48,1 /* clear MSR_EE */
355 rotldi r10,r10,16
356 mtspr SPRN_SRR1,r10
357 ld r10,PACA_EXGEN+EX_R10(r13)
358 GET_SCRATCH0(r13)
359 rfid
360 b .
361
362 masked_Hinterrupt:
363 stb r10,PACAHARDIRQEN(r13)
364 mtcrf 0x80,r9
365 ld r9,PACA_EXGEN+EX_R9(r13)
366 mfspr r10,SPRN_HSRR1
367 rldicl r10,r10,48,1 /* clear MSR_EE */
368 rotldi r10,r10,16
369 mtspr SPRN_HSRR1,r10
370 ld r10,PACA_EXGEN+EX_R10(r13)
371 GET_SCRATCH0(r13)
372 hrfid
373 b .
374
375 #ifdef CONFIG_PPC_PSERIES
376 /*
377 * Vectors for the FWNMI option. Share common code.
378 */
379 .globl system_reset_fwnmi
380 .align 7
381 system_reset_fwnmi:
382 HMT_MEDIUM
383 SET_SCRATCH0(r13) /* save r13 */
384 EXCEPTION_PROLOG_PSERIES(PACA_EXGEN, system_reset_common, EXC_STD,
385 NOTEST, 0x100)
386
387 #endif /* CONFIG_PPC_PSERIES */
388
389 #ifdef __DISABLED__
390 /*
391 * This is used for when the SLB miss handler has to go virtual,
392 * which doesn't happen for now anymore but will once we re-implement
393 * dynamic VSIDs for shared page tables
394 */
395 slb_miss_user_pseries:
396 std r10,PACA_EXGEN+EX_R10(r13)
397 std r11,PACA_EXGEN+EX_R11(r13)
398 std r12,PACA_EXGEN+EX_R12(r13)
399 GET_SCRATCH0(r10)
400 ld r11,PACA_EXSLB+EX_R9(r13)
401 ld r12,PACA_EXSLB+EX_R3(r13)
402 std r10,PACA_EXGEN+EX_R13(r13)
403 std r11,PACA_EXGEN+EX_R9(r13)
404 std r12,PACA_EXGEN+EX_R3(r13)
405 clrrdi r12,r13,32
406 mfmsr r10
407 mfspr r11,SRR0 /* save SRR0 */
408 ori r12,r12,slb_miss_user_common@l /* virt addr of handler */
409 ori r10,r10,MSR_IR|MSR_DR|MSR_RI
410 mtspr SRR0,r12
411 mfspr r12,SRR1 /* and SRR1 */
412 mtspr SRR1,r10
413 rfid
414 b . /* prevent spec. execution */
415 #endif /* __DISABLED__ */
416
417 /* KVM's trampoline code needs to be close to the interrupt handlers */
418
419 #ifdef CONFIG_KVM_BOOK3S_64_HANDLER
420 #ifdef CONFIG_KVM_BOOK3S_PR
421 #include "../kvm/book3s_rmhandlers.S"
422 #else
423 #include "../kvm/book3s_hv_rmhandlers.S"
424 #endif
425 #endif
426
427 .align 7
428 .globl __end_interrupts
429 __end_interrupts:
430
431 /*
432 * Code from here down to __end_handlers is invoked from the
433 * exception prologs above. Because the prologs assemble the
434 * addresses of these handlers using the LOAD_HANDLER macro,
435 * which uses an addi instruction, these handlers must be in
436 * the first 32k of the kernel image.
437 */
438
439 /*** Common interrupt handlers ***/
440
441 STD_EXCEPTION_COMMON(0x100, system_reset, .system_reset_exception)
442
443 /*
444 * Machine check is different because we use a different
445 * save area: PACA_EXMC instead of PACA_EXGEN.
446 */
447 .align 7
448 .globl machine_check_common
449 machine_check_common:
450 EXCEPTION_PROLOG_COMMON(0x200, PACA_EXMC)
451 FINISH_NAP
452 DISABLE_INTS
453 bl .save_nvgprs
454 addi r3,r1,STACK_FRAME_OVERHEAD
455 bl .machine_check_exception
456 b .ret_from_except
457
458 STD_EXCEPTION_COMMON_LITE(0x900, decrementer, .timer_interrupt)
459 STD_EXCEPTION_COMMON(0xa00, trap_0a, .unknown_exception)
460 STD_EXCEPTION_COMMON(0xb00, trap_0b, .unknown_exception)
461 STD_EXCEPTION_COMMON(0xd00, single_step, .single_step_exception)
462 STD_EXCEPTION_COMMON(0xe00, trap_0e, .unknown_exception)
463 STD_EXCEPTION_COMMON(0xe40, emulation_assist, .program_check_exception)
464 STD_EXCEPTION_COMMON(0xe60, hmi_exception, .unknown_exception)
465 STD_EXCEPTION_COMMON_IDLE(0xf00, performance_monitor, .performance_monitor_exception)
466 STD_EXCEPTION_COMMON(0x1300, instruction_breakpoint, .instruction_breakpoint_exception)
467 #ifdef CONFIG_ALTIVEC
468 STD_EXCEPTION_COMMON(0x1700, altivec_assist, .altivec_assist_exception)
469 #else
470 STD_EXCEPTION_COMMON(0x1700, altivec_assist, .unknown_exception)
471 #endif
472 #ifdef CONFIG_CBE_RAS
473 STD_EXCEPTION_COMMON(0x1200, cbe_system_error, .cbe_system_error_exception)
474 STD_EXCEPTION_COMMON(0x1600, cbe_maintenance, .cbe_maintenance_exception)
475 STD_EXCEPTION_COMMON(0x1800, cbe_thermal, .cbe_thermal_exception)
476 #endif /* CONFIG_CBE_RAS */
477
478 .align 7
479 system_call_entry:
480 b system_call_common
481
482 /*
483 * Here we have detected that the kernel stack pointer is bad.
484 * R9 contains the saved CR, r13 points to the paca,
485 * r10 contains the (bad) kernel stack pointer,
486 * r11 and r12 contain the saved SRR0 and SRR1.
487 * We switch to using an emergency stack, save the registers there,
488 * and call kernel_bad_stack(), which panics.
489 */
490 bad_stack:
491 ld r1,PACAEMERGSP(r13)
492 subi r1,r1,64+INT_FRAME_SIZE
493 std r9,_CCR(r1)
494 std r10,GPR1(r1)
495 std r11,_NIP(r1)
496 std r12,_MSR(r1)
497 mfspr r11,SPRN_DAR
498 mfspr r12,SPRN_DSISR
499 std r11,_DAR(r1)
500 std r12,_DSISR(r1)
501 mflr r10
502 mfctr r11
503 mfxer r12
504 std r10,_LINK(r1)
505 std r11,_CTR(r1)
506 std r12,_XER(r1)
507 SAVE_GPR(0,r1)
508 SAVE_GPR(2,r1)
509 ld r10,EX_R3(r3)
510 std r10,GPR3(r1)
511 SAVE_GPR(4,r1)
512 SAVE_4GPRS(5,r1)
513 ld r9,EX_R9(r3)
514 ld r10,EX_R10(r3)
515 SAVE_2GPRS(9,r1)
516 ld r9,EX_R11(r3)
517 ld r10,EX_R12(r3)
518 ld r11,EX_R13(r3)
519 std r9,GPR11(r1)
520 std r10,GPR12(r1)
521 std r11,GPR13(r1)
522 BEGIN_FTR_SECTION
523 ld r10,EX_CFAR(r3)
524 std r10,ORIG_GPR3(r1)
525 END_FTR_SECTION_IFSET(CPU_FTR_CFAR)
526 SAVE_8GPRS(14,r1)
527 SAVE_10GPRS(22,r1)
528 lhz r12,PACA_TRAP_SAVE(r13)
529 std r12,_TRAP(r1)
530 addi r11,r1,INT_FRAME_SIZE
531 std r11,0(r1)
532 li r12,0
533 std r12,0(r11)
534 ld r2,PACATOC(r13)
535 ld r11,exception_marker@toc(r2)
536 std r12,RESULT(r1)
537 std r11,STACK_FRAME_OVERHEAD-16(r1)
538 1: addi r3,r1,STACK_FRAME_OVERHEAD
539 bl .kernel_bad_stack
540 b 1b
541
542 /*
543 * Here r13 points to the paca, r9 contains the saved CR,
544 * SRR0 and SRR1 are saved in r11 and r12,
545 * r9 - r13 are saved in paca->exgen.
546 */
547 .align 7
548 .globl data_access_common
549 data_access_common:
550 mfspr r10,SPRN_DAR
551 std r10,PACA_EXGEN+EX_DAR(r13)
552 mfspr r10,SPRN_DSISR
553 stw r10,PACA_EXGEN+EX_DSISR(r13)
554 EXCEPTION_PROLOG_COMMON(0x300, PACA_EXGEN)
555 ld r3,PACA_EXGEN+EX_DAR(r13)
556 lwz r4,PACA_EXGEN+EX_DSISR(r13)
557 li r5,0x300
558 b .do_hash_page /* Try to handle as hpte fault */
559
560 .align 7
561 .globl h_data_storage_common
562 h_data_storage_common:
563 mfspr r10,SPRN_HDAR
564 std r10,PACA_EXGEN+EX_DAR(r13)
565 mfspr r10,SPRN_HDSISR
566 stw r10,PACA_EXGEN+EX_DSISR(r13)
567 EXCEPTION_PROLOG_COMMON(0xe00, PACA_EXGEN)
568 bl .save_nvgprs
569 addi r3,r1,STACK_FRAME_OVERHEAD
570 bl .unknown_exception
571 b .ret_from_except
572
573 .align 7
574 .globl instruction_access_common
575 instruction_access_common:
576 EXCEPTION_PROLOG_COMMON(0x400, PACA_EXGEN)
577 ld r3,_NIP(r1)
578 andis. r4,r12,0x5820
579 li r5,0x400
580 b .do_hash_page /* Try to handle as hpte fault */
581
582 STD_EXCEPTION_COMMON(0xe20, h_instr_storage, .unknown_exception)
583
584 /*
585 * Here is the common SLB miss user that is used when going to virtual
586 * mode for SLB misses, that is currently not used
587 */
588 #ifdef __DISABLED__
589 .align 7
590 .globl slb_miss_user_common
591 slb_miss_user_common:
592 mflr r10
593 std r3,PACA_EXGEN+EX_DAR(r13)
594 stw r9,PACA_EXGEN+EX_CCR(r13)
595 std r10,PACA_EXGEN+EX_LR(r13)
596 std r11,PACA_EXGEN+EX_SRR0(r13)
597 bl .slb_allocate_user
598
599 ld r10,PACA_EXGEN+EX_LR(r13)
600 ld r3,PACA_EXGEN+EX_R3(r13)
601 lwz r9,PACA_EXGEN+EX_CCR(r13)
602 ld r11,PACA_EXGEN+EX_SRR0(r13)
603 mtlr r10
604 beq- slb_miss_fault
605
606 andi. r10,r12,MSR_RI /* check for unrecoverable exception */
607 beq- unrecov_user_slb
608 mfmsr r10
609
610 .machine push
611 .machine "power4"
612 mtcrf 0x80,r9
613 .machine pop
614
615 clrrdi r10,r10,2 /* clear RI before setting SRR0/1 */
616 mtmsrd r10,1
617
618 mtspr SRR0,r11
619 mtspr SRR1,r12
620
621 ld r9,PACA_EXGEN+EX_R9(r13)
622 ld r10,PACA_EXGEN+EX_R10(r13)
623 ld r11,PACA_EXGEN+EX_R11(r13)
624 ld r12,PACA_EXGEN+EX_R12(r13)
625 ld r13,PACA_EXGEN+EX_R13(r13)
626 rfid
627 b .
628
629 slb_miss_fault:
630 EXCEPTION_PROLOG_COMMON(0x380, PACA_EXGEN)
631 ld r4,PACA_EXGEN+EX_DAR(r13)
632 li r5,0
633 std r4,_DAR(r1)
634 std r5,_DSISR(r1)
635 b handle_page_fault
636
637 unrecov_user_slb:
638 EXCEPTION_PROLOG_COMMON(0x4200, PACA_EXGEN)
639 DISABLE_INTS
640 bl .save_nvgprs
641 1: addi r3,r1,STACK_FRAME_OVERHEAD
642 bl .unrecoverable_exception
643 b 1b
644
645 #endif /* __DISABLED__ */
646
647
648 /*
649 * r13 points to the PACA, r9 contains the saved CR,
650 * r12 contain the saved SRR1, SRR0 is still ready for return
651 * r3 has the faulting address
652 * r9 - r13 are saved in paca->exslb.
653 * r3 is saved in paca->slb_r3
654 * We assume we aren't going to take any exceptions during this procedure.
655 */
656 _GLOBAL(slb_miss_realmode)
657 mflr r10
658 #ifdef CONFIG_RELOCATABLE
659 mtctr r11
660 #endif
661
662 stw r9,PACA_EXSLB+EX_CCR(r13) /* save CR in exc. frame */
663 std r10,PACA_EXSLB+EX_LR(r13) /* save LR */
664
665 bl .slb_allocate_realmode
666
667 /* All done -- return from exception. */
668
669 ld r10,PACA_EXSLB+EX_LR(r13)
670 ld r3,PACA_EXSLB+EX_R3(r13)
671 lwz r9,PACA_EXSLB+EX_CCR(r13) /* get saved CR */
672 #ifdef CONFIG_PPC_ISERIES
673 BEGIN_FW_FTR_SECTION
674 ld r11,PACALPPACAPTR(r13)
675 ld r11,LPPACASRR0(r11) /* get SRR0 value */
676 END_FW_FTR_SECTION_IFSET(FW_FEATURE_ISERIES)
677 #endif /* CONFIG_PPC_ISERIES */
678
679 mtlr r10
680
681 andi. r10,r12,MSR_RI /* check for unrecoverable exception */
682 beq- 2f
683
684 .machine push
685 .machine "power4"
686 mtcrf 0x80,r9
687 mtcrf 0x01,r9 /* slb_allocate uses cr0 and cr7 */
688 .machine pop
689
690 #ifdef CONFIG_PPC_ISERIES
691 BEGIN_FW_FTR_SECTION
692 mtspr SPRN_SRR0,r11
693 mtspr SPRN_SRR1,r12
694 END_FW_FTR_SECTION_IFSET(FW_FEATURE_ISERIES)
695 #endif /* CONFIG_PPC_ISERIES */
696 ld r9,PACA_EXSLB+EX_R9(r13)
697 ld r10,PACA_EXSLB+EX_R10(r13)
698 ld r11,PACA_EXSLB+EX_R11(r13)
699 ld r12,PACA_EXSLB+EX_R12(r13)
700 ld r13,PACA_EXSLB+EX_R13(r13)
701 rfid
702 b . /* prevent speculative execution */
703
704 2:
705 #ifdef CONFIG_PPC_ISERIES
706 BEGIN_FW_FTR_SECTION
707 b unrecov_slb
708 END_FW_FTR_SECTION_IFSET(FW_FEATURE_ISERIES)
709 #endif /* CONFIG_PPC_ISERIES */
710 mfspr r11,SPRN_SRR0
711 ld r10,PACAKBASE(r13)
712 LOAD_HANDLER(r10,unrecov_slb)
713 mtspr SPRN_SRR0,r10
714 ld r10,PACAKMSR(r13)
715 mtspr SPRN_SRR1,r10
716 rfid
717 b .
718
719 unrecov_slb:
720 EXCEPTION_PROLOG_COMMON(0x4100, PACA_EXSLB)
721 DISABLE_INTS
722 bl .save_nvgprs
723 1: addi r3,r1,STACK_FRAME_OVERHEAD
724 bl .unrecoverable_exception
725 b 1b
726
727 .align 7
728 .globl hardware_interrupt_common
729 .globl hardware_interrupt_entry
730 hardware_interrupt_common:
731 EXCEPTION_PROLOG_COMMON(0x500, PACA_EXGEN)
732 FINISH_NAP
733 hardware_interrupt_entry:
734 DISABLE_INTS
735 BEGIN_FTR_SECTION
736 bl .ppc64_runlatch_on
737 END_FTR_SECTION_IFSET(CPU_FTR_CTRL)
738 addi r3,r1,STACK_FRAME_OVERHEAD
739 bl .do_IRQ
740 b .ret_from_except_lite
741
742 #ifdef CONFIG_PPC_970_NAP
743 power4_fixup_nap:
744 andc r9,r9,r10
745 std r9,TI_LOCAL_FLAGS(r11)
746 ld r10,_LINK(r1) /* make idle task do the */
747 std r10,_NIP(r1) /* equivalent of a blr */
748 blr
749 #endif
750
751 .align 7
752 .globl alignment_common
753 alignment_common:
754 mfspr r10,SPRN_DAR
755 std r10,PACA_EXGEN+EX_DAR(r13)
756 mfspr r10,SPRN_DSISR
757 stw r10,PACA_EXGEN+EX_DSISR(r13)
758 EXCEPTION_PROLOG_COMMON(0x600, PACA_EXGEN)
759 ld r3,PACA_EXGEN+EX_DAR(r13)
760 lwz r4,PACA_EXGEN+EX_DSISR(r13)
761 std r3,_DAR(r1)
762 std r4,_DSISR(r1)
763 bl .save_nvgprs
764 addi r3,r1,STACK_FRAME_OVERHEAD
765 ENABLE_INTS
766 bl .alignment_exception
767 b .ret_from_except
768
769 .align 7
770 .globl program_check_common
771 program_check_common:
772 EXCEPTION_PROLOG_COMMON(0x700, PACA_EXGEN)
773 bl .save_nvgprs
774 addi r3,r1,STACK_FRAME_OVERHEAD
775 ENABLE_INTS
776 bl .program_check_exception
777 b .ret_from_except
778
779 .align 7
780 .globl fp_unavailable_common
781 fp_unavailable_common:
782 EXCEPTION_PROLOG_COMMON(0x800, PACA_EXGEN)
783 bne 1f /* if from user, just load it up */
784 bl .save_nvgprs
785 addi r3,r1,STACK_FRAME_OVERHEAD
786 ENABLE_INTS
787 bl .kernel_fp_unavailable_exception
788 BUG_OPCODE
789 1: bl .load_up_fpu
790 b fast_exception_return
791
792 .align 7
793 .globl altivec_unavailable_common
794 altivec_unavailable_common:
795 EXCEPTION_PROLOG_COMMON(0xf20, PACA_EXGEN)
796 #ifdef CONFIG_ALTIVEC
797 BEGIN_FTR_SECTION
798 beq 1f
799 bl .load_up_altivec
800 b fast_exception_return
801 1:
802 END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
803 #endif
804 bl .save_nvgprs
805 addi r3,r1,STACK_FRAME_OVERHEAD
806 ENABLE_INTS
807 bl .altivec_unavailable_exception
808 b .ret_from_except
809
810 .align 7
811 .globl vsx_unavailable_common
812 vsx_unavailable_common:
813 EXCEPTION_PROLOG_COMMON(0xf40, PACA_EXGEN)
814 #ifdef CONFIG_VSX
815 BEGIN_FTR_SECTION
816 bne .load_up_vsx
817 1:
818 END_FTR_SECTION_IFSET(CPU_FTR_VSX)
819 #endif
820 bl .save_nvgprs
821 addi r3,r1,STACK_FRAME_OVERHEAD
822 ENABLE_INTS
823 bl .vsx_unavailable_exception
824 b .ret_from_except
825
826 .align 7
827 .globl __end_handlers
828 __end_handlers:
829
830 /*
831 * Return from an exception with minimal checks.
832 * The caller is assumed to have done EXCEPTION_PROLOG_COMMON.
833 * If interrupts have been enabled, or anything has been
834 * done that might have changed the scheduling status of
835 * any task or sent any task a signal, you should use
836 * ret_from_except or ret_from_except_lite instead of this.
837 */
838 fast_exc_return_irq: /* restores irq state too */
839 ld r3,SOFTE(r1)
840 TRACE_AND_RESTORE_IRQ(r3);
841 ld r12,_MSR(r1)
842 rldicl r4,r12,49,63 /* get MSR_EE to LSB */
843 stb r4,PACAHARDIRQEN(r13) /* restore paca->hard_enabled */
844 b 1f
845
846 .globl fast_exception_return
847 fast_exception_return:
848 ld r12,_MSR(r1)
849 1: ld r11,_NIP(r1)
850 andi. r3,r12,MSR_RI /* check if RI is set */
851 beq- unrecov_fer
852
853 #ifdef CONFIG_VIRT_CPU_ACCOUNTING
854 andi. r3,r12,MSR_PR
855 beq 2f
856 ACCOUNT_CPU_USER_EXIT(r3, r4)
857 2:
858 #endif
859
860 ld r3,_CCR(r1)
861 ld r4,_LINK(r1)
862 ld r5,_CTR(r1)
863 ld r6,_XER(r1)
864 mtcr r3
865 mtlr r4
866 mtctr r5
867 mtxer r6
868 REST_GPR(0, r1)
869 REST_8GPRS(2, r1)
870
871 mfmsr r10
872 rldicl r10,r10,48,1 /* clear EE */
873 rldicr r10,r10,16,61 /* clear RI (LE is 0 already) */
874 mtmsrd r10,1
875
876 mtspr SPRN_SRR1,r12
877 mtspr SPRN_SRR0,r11
878 REST_4GPRS(10, r1)
879 ld r1,GPR1(r1)
880 rfid
881 b . /* prevent speculative execution */
882
883 unrecov_fer:
884 bl .save_nvgprs
885 1: addi r3,r1,STACK_FRAME_OVERHEAD
886 bl .unrecoverable_exception
887 b 1b
888
889
890 /*
891 * Hash table stuff
892 */
893 .align 7
894 _STATIC(do_hash_page)
895 std r3,_DAR(r1)
896 std r4,_DSISR(r1)
897
898 andis. r0,r4,0xa410 /* weird error? */
899 bne- handle_page_fault /* if not, try to insert a HPTE */
900 andis. r0,r4,DSISR_DABRMATCH@h
901 bne- handle_dabr_fault
902
903 BEGIN_FTR_SECTION
904 andis. r0,r4,0x0020 /* Is it a segment table fault? */
905 bne- do_ste_alloc /* If so handle it */
906 END_MMU_FTR_SECTION_IFCLR(MMU_FTR_SLB)
907
908 clrrdi r11,r1,THREAD_SHIFT
909 lwz r0,TI_PREEMPT(r11) /* If we're in an "NMI" */
910 andis. r0,r0,NMI_MASK@h /* (i.e. an irq when soft-disabled) */
911 bne 77f /* then don't call hash_page now */
912
913 /*
914 * On iSeries, we soft-disable interrupts here, then
915 * hard-enable interrupts so that the hash_page code can spin on
916 * the hash_table_lock without problems on a shared processor.
917 */
918 DISABLE_INTS
919
920 /*
921 * Currently, trace_hardirqs_off() will be called by DISABLE_INTS
922 * and will clobber volatile registers when irq tracing is enabled
923 * so we need to reload them. It may be possible to be smarter here
924 * and move the irq tracing elsewhere but let's keep it simple for
925 * now
926 */
927 #ifdef CONFIG_TRACE_IRQFLAGS
928 ld r3,_DAR(r1)
929 ld r4,_DSISR(r1)
930 ld r5,_TRAP(r1)
931 ld r12,_MSR(r1)
932 clrrdi r5,r5,4
933 #endif /* CONFIG_TRACE_IRQFLAGS */
934 /*
935 * We need to set the _PAGE_USER bit if MSR_PR is set or if we are
936 * accessing a userspace segment (even from the kernel). We assume
937 * kernel addresses always have the high bit set.
938 */
939 rlwinm r4,r4,32-25+9,31-9,31-9 /* DSISR_STORE -> _PAGE_RW */
940 rotldi r0,r3,15 /* Move high bit into MSR_PR posn */
941 orc r0,r12,r0 /* MSR_PR | ~high_bit */
942 rlwimi r4,r0,32-13,30,30 /* becomes _PAGE_USER access bit */
943 ori r4,r4,1 /* add _PAGE_PRESENT */
944 rlwimi r4,r5,22+2,31-2,31-2 /* Set _PAGE_EXEC if trap is 0x400 */
945
946 /*
947 * r3 contains the faulting address
948 * r4 contains the required access permissions
949 * r5 contains the trap number
950 *
951 * at return r3 = 0 for success
952 */
953 bl .hash_page /* build HPTE if possible */
954 cmpdi r3,0 /* see if hash_page succeeded */
955
956 BEGIN_FW_FTR_SECTION
957 /*
958 * If we had interrupts soft-enabled at the point where the
959 * DSI/ISI occurred, and an interrupt came in during hash_page,
960 * handle it now.
961 * We jump to ret_from_except_lite rather than fast_exception_return
962 * because ret_from_except_lite will check for and handle pending
963 * interrupts if necessary.
964 */
965 beq 13f
966 END_FW_FTR_SECTION_IFSET(FW_FEATURE_ISERIES)
967
968 BEGIN_FW_FTR_SECTION
969 /*
970 * Here we have interrupts hard-disabled, so it is sufficient
971 * to restore paca->{soft,hard}_enable and get out.
972 */
973 beq fast_exc_return_irq /* Return from exception on success */
974 END_FW_FTR_SECTION_IFCLR(FW_FEATURE_ISERIES)
975
976 /* For a hash failure, we don't bother re-enabling interrupts */
977 ble- 12f
978
979 /*
980 * hash_page couldn't handle it, set soft interrupt enable back
981 * to what it was before the trap. Note that .arch_local_irq_restore
982 * handles any interrupts pending at this point.
983 */
984 ld r3,SOFTE(r1)
985 TRACE_AND_RESTORE_IRQ_PARTIAL(r3, 11f)
986 bl .arch_local_irq_restore
987 b 11f
988
989 /* We have a data breakpoint exception - handle it */
990 handle_dabr_fault:
991 bl .save_nvgprs
992 ld r4,_DAR(r1)
993 ld r5,_DSISR(r1)
994 addi r3,r1,STACK_FRAME_OVERHEAD
995 bl .do_dabr
996 b .ret_from_except_lite
997
998 /* Here we have a page fault that hash_page can't handle. */
999 handle_page_fault:
1000 ENABLE_INTS
1001 11: ld r4,_DAR(r1)
1002 ld r5,_DSISR(r1)
1003 addi r3,r1,STACK_FRAME_OVERHEAD
1004 bl .do_page_fault
1005 cmpdi r3,0
1006 beq+ 13f
1007 bl .save_nvgprs
1008 mr r5,r3
1009 addi r3,r1,STACK_FRAME_OVERHEAD
1010 lwz r4,_DAR(r1)
1011 bl .bad_page_fault
1012 b .ret_from_except
1013
1014 13: b .ret_from_except_lite
1015
1016 /* We have a page fault that hash_page could handle but HV refused
1017 * the PTE insertion
1018 */
1019 12: bl .save_nvgprs
1020 mr r5,r3
1021 addi r3,r1,STACK_FRAME_OVERHEAD
1022 ld r4,_DAR(r1)
1023 bl .low_hash_fault
1024 b .ret_from_except
1025
1026 /*
1027 * We come here as a result of a DSI at a point where we don't want
1028 * to call hash_page, such as when we are accessing memory (possibly
1029 * user memory) inside a PMU interrupt that occurred while interrupts
1030 * were soft-disabled. We want to invoke the exception handler for
1031 * the access, or panic if there isn't a handler.
1032 */
1033 77: bl .save_nvgprs
1034 mr r4,r3
1035 addi r3,r1,STACK_FRAME_OVERHEAD
1036 li r5,SIGSEGV
1037 bl .bad_page_fault
1038 b .ret_from_except
1039
1040 /* here we have a segment miss */
1041 do_ste_alloc:
1042 bl .ste_allocate /* try to insert stab entry */
1043 cmpdi r3,0
1044 bne- handle_page_fault
1045 b fast_exception_return
1046
1047 /*
1048 * r13 points to the PACA, r9 contains the saved CR,
1049 * r11 and r12 contain the saved SRR0 and SRR1.
1050 * r9 - r13 are saved in paca->exslb.
1051 * We assume we aren't going to take any exceptions during this procedure.
1052 * We assume (DAR >> 60) == 0xc.
1053 */
1054 .align 7
1055 _GLOBAL(do_stab_bolted)
1056 stw r9,PACA_EXSLB+EX_CCR(r13) /* save CR in exc. frame */
1057 std r11,PACA_EXSLB+EX_SRR0(r13) /* save SRR0 in exc. frame */
1058
1059 /* Hash to the primary group */
1060 ld r10,PACASTABVIRT(r13)
1061 mfspr r11,SPRN_DAR
1062 srdi r11,r11,28
1063 rldimi r10,r11,7,52 /* r10 = first ste of the group */
1064
1065 /* Calculate VSID */
1066 /* This is a kernel address, so protovsid = ESID */
1067 ASM_VSID_SCRAMBLE(r11, r9, 256M)
1068 rldic r9,r11,12,16 /* r9 = vsid << 12 */
1069
1070 /* Search the primary group for a free entry */
1071 1: ld r11,0(r10) /* Test valid bit of the current ste */
1072 andi. r11,r11,0x80
1073 beq 2f
1074 addi r10,r10,16
1075 andi. r11,r10,0x70
1076 bne 1b
1077
1078 /* Stick for only searching the primary group for now. */
1079 /* At least for now, we use a very simple random castout scheme */
1080 /* Use the TB as a random number ; OR in 1 to avoid entry 0 */
1081 mftb r11
1082 rldic r11,r11,4,57 /* r11 = (r11 << 4) & 0x70 */
1083 ori r11,r11,0x10
1084
1085 /* r10 currently points to an ste one past the group of interest */
1086 /* make it point to the randomly selected entry */
1087 subi r10,r10,128
1088 or r10,r10,r11 /* r10 is the entry to invalidate */
1089
1090 isync /* mark the entry invalid */
1091 ld r11,0(r10)
1092 rldicl r11,r11,56,1 /* clear the valid bit */
1093 rotldi r11,r11,8
1094 std r11,0(r10)
1095 sync
1096
1097 clrrdi r11,r11,28 /* Get the esid part of the ste */
1098 slbie r11
1099
1100 2: std r9,8(r10) /* Store the vsid part of the ste */
1101 eieio
1102
1103 mfspr r11,SPRN_DAR /* Get the new esid */
1104 clrrdi r11,r11,28 /* Permits a full 32b of ESID */
1105 ori r11,r11,0x90 /* Turn on valid and kp */
1106 std r11,0(r10) /* Put new entry back into the stab */
1107
1108 sync
1109
1110 /* All done -- return from exception. */
1111 lwz r9,PACA_EXSLB+EX_CCR(r13) /* get saved CR */
1112 ld r11,PACA_EXSLB+EX_SRR0(r13) /* get saved SRR0 */
1113
1114 andi. r10,r12,MSR_RI
1115 beq- unrecov_slb
1116
1117 mtcrf 0x80,r9 /* restore CR */
1118
1119 mfmsr r10
1120 clrrdi r10,r10,2
1121 mtmsrd r10,1
1122
1123 mtspr SPRN_SRR0,r11
1124 mtspr SPRN_SRR1,r12
1125 ld r9,PACA_EXSLB+EX_R9(r13)
1126 ld r10,PACA_EXSLB+EX_R10(r13)
1127 ld r11,PACA_EXSLB+EX_R11(r13)
1128 ld r12,PACA_EXSLB+EX_R12(r13)
1129 ld r13,PACA_EXSLB+EX_R13(r13)
1130 rfid
1131 b . /* prevent speculative execution */
1132
1133 #ifdef CONFIG_PPC_PSERIES
1134 /*
1135 * Data area reserved for FWNMI option.
1136 * This address (0x7000) is fixed by the RPA.
1137 */
1138 .= 0x7000
1139 .globl fwnmi_data_area
1140 fwnmi_data_area:
1141 #endif /* CONFIG_PPC_PSERIES */
1142
1143 /* iSeries does not use the FWNMI stuff, so it is safe to put
1144 * this here, even if we later allow kernels that will boot on
1145 * both pSeries and iSeries */
1146 #ifdef CONFIG_PPC_ISERIES
1147 . = LPARMAP_PHYS
1148 .globl xLparMap
1149 xLparMap:
1150 .quad HvEsidsToMap /* xNumberEsids */
1151 .quad HvRangesToMap /* xNumberRanges */
1152 .quad STAB0_PAGE /* xSegmentTableOffs */
1153 .zero 40 /* xRsvd */
1154 /* xEsids (HvEsidsToMap entries of 2 quads) */
1155 .quad PAGE_OFFSET_ESID /* xKernelEsid */
1156 .quad PAGE_OFFSET_VSID /* xKernelVsid */
1157 .quad VMALLOC_START_ESID /* xKernelEsid */
1158 .quad VMALLOC_START_VSID /* xKernelVsid */
1159 /* xRanges (HvRangesToMap entries of 3 quads) */
1160 .quad HvPagesToMap /* xPages */
1161 .quad 0 /* xOffset */
1162 .quad PAGE_OFFSET_VSID << (SID_SHIFT - HW_PAGE_SHIFT) /* xVPN */
1163
1164 #endif /* CONFIG_PPC_ISERIES */
1165
1166 #ifdef CONFIG_PPC_PSERIES
1167 . = 0x8000
1168 #endif /* CONFIG_PPC_PSERIES */
1169
1170 /*
1171 * Space for CPU0's segment table.
1172 *
1173 * On iSeries, the hypervisor must fill in at least one entry before
1174 * we get control (with relocate on). The address is given to the hv
1175 * as a page number (see xLparMap above), so this must be at a
1176 * fixed address (the linker can't compute (u64)&initial_stab >>
1177 * PAGE_SHIFT).
1178 */
1179 . = STAB0_OFFSET /* 0x8000 */
1180 .globl initial_stab
1181 initial_stab:
1182 .space 4096