]> git.proxmox.com Git - mirror_ubuntu-jammy-kernel.git/blob - arch/powerpc/kernel/interrupt_64.S
d4212d2ff0b54bc64b3167c8e2b34120d08eb70e
[mirror_ubuntu-jammy-kernel.git] / arch / powerpc / kernel / interrupt_64.S
1 #include <asm/asm-offsets.h>
2 #include <asm/bug.h>
3 #ifdef CONFIG_PPC_BOOK3S
4 #include <asm/exception-64s.h>
5 #else
6 #include <asm/exception-64e.h>
7 #endif
8 #include <asm/feature-fixups.h>
9 #include <asm/head-64.h>
10 #include <asm/hw_irq.h>
11 #include <asm/kup.h>
12 #include <asm/mmu.h>
13 #include <asm/ppc_asm.h>
14 #include <asm/ptrace.h>
15 #include <asm/tm.h>
16
17 .section ".toc","aw"
18 SYS_CALL_TABLE:
19 .tc sys_call_table[TC],sys_call_table
20
21 #ifdef CONFIG_COMPAT
22 COMPAT_SYS_CALL_TABLE:
23 .tc compat_sys_call_table[TC],compat_sys_call_table
24 #endif
25 .previous
26
27 .align 7
28
29 .macro DEBUG_SRR_VALID srr
30 #ifdef CONFIG_PPC_RFI_SRR_DEBUG
31 .ifc \srr,srr
32 mfspr r11,SPRN_SRR0
33 ld r12,_NIP(r1)
34 100: tdne r11,r12
35 EMIT_BUG_ENTRY 100b,__FILE__,__LINE__,(BUGFLAG_WARNING | BUGFLAG_ONCE)
36 mfspr r11,SPRN_SRR1
37 ld r12,_MSR(r1)
38 100: tdne r11,r12
39 EMIT_BUG_ENTRY 100b,__FILE__,__LINE__,(BUGFLAG_WARNING | BUGFLAG_ONCE)
40 .else
41 mfspr r11,SPRN_HSRR0
42 ld r12,_NIP(r1)
43 100: tdne r11,r12
44 EMIT_BUG_ENTRY 100b,__FILE__,__LINE__,(BUGFLAG_WARNING | BUGFLAG_ONCE)
45 mfspr r11,SPRN_HSRR1
46 ld r12,_MSR(r1)
47 100: tdne r11,r12
48 EMIT_BUG_ENTRY 100b,__FILE__,__LINE__,(BUGFLAG_WARNING | BUGFLAG_ONCE)
49 .endif
50 #endif
51 .endm
52
53 #ifdef CONFIG_PPC_BOOK3S
54 .macro system_call_vectored name trapnr
55 .globl system_call_vectored_\name
56 system_call_vectored_\name:
57 _ASM_NOKPROBE_SYMBOL(system_call_vectored_\name)
58 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
59 BEGIN_FTR_SECTION
60 extrdi. r10, r12, 1, (63-MSR_TS_T_LG) /* transaction active? */
61 bne tabort_syscall
62 END_FTR_SECTION_IFSET(CPU_FTR_TM)
63 #endif
64 SCV_INTERRUPT_TO_KERNEL
65 mr r10,r1
66 ld r1,PACAKSAVE(r13)
67 std r10,0(r1)
68 std r11,_NIP(r1)
69 std r12,_MSR(r1)
70 std r0,GPR0(r1)
71 std r10,GPR1(r1)
72 std r2,GPR2(r1)
73 ld r2,PACATOC(r13)
74 mfcr r12
75 li r11,0
76 /* Can we avoid saving r3-r8 in common case? */
77 std r3,GPR3(r1)
78 std r4,GPR4(r1)
79 std r5,GPR5(r1)
80 std r6,GPR6(r1)
81 std r7,GPR7(r1)
82 std r8,GPR8(r1)
83 /* Zero r9-r12, this should only be required when restoring all GPRs */
84 std r11,GPR9(r1)
85 std r11,GPR10(r1)
86 std r11,GPR11(r1)
87 std r11,GPR12(r1)
88 std r9,GPR13(r1)
89 SAVE_NVGPRS(r1)
90 std r11,_XER(r1)
91 std r11,_LINK(r1)
92 std r11,_CTR(r1)
93
94 li r11,\trapnr
95 std r11,_TRAP(r1)
96 std r12,_CCR(r1)
97 addi r10,r1,STACK_FRAME_OVERHEAD
98 ld r11,exception_marker@toc(r2)
99 std r11,-16(r10) /* "regshere" marker */
100
101 BEGIN_FTR_SECTION
102 HMT_MEDIUM
103 END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR)
104
105 /*
106 * scv enters with MSR[EE]=1 and is immediately considered soft-masked.
107 * The entry vector already sets PACAIRQSOFTMASK to IRQS_ALL_DISABLED,
108 * and interrupts may be masked and pending already.
109 * system_call_exception() will call trace_hardirqs_off() which means
110 * interrupts could already have been blocked before trace_hardirqs_off,
111 * but this is the best we can do.
112 */
113
114 /* Calling convention has r9 = orig r0, r10 = regs */
115 mr r9,r0
116 bl system_call_exception
117
118 .Lsyscall_vectored_\name\()_exit:
119 addi r4,r1,STACK_FRAME_OVERHEAD
120 li r5,1 /* scv */
121 bl syscall_exit_prepare
122 std r1,PACA_EXIT_SAVE_R1(r13) /* save r1 for restart */
123 .Lsyscall_vectored_\name\()_rst_start:
124 lbz r11,PACAIRQHAPPENED(r13)
125 andi. r11,r11,(~PACA_IRQ_HARD_DIS)@l
126 bne- syscall_vectored_\name\()_restart
127 li r11,IRQS_ENABLED
128 stb r11,PACAIRQSOFTMASK(r13)
129 li r11,0
130 stb r11,PACAIRQHAPPENED(r13) # clear out possible HARD_DIS
131
132 ld r2,_CCR(r1)
133 ld r4,_NIP(r1)
134 ld r5,_MSR(r1)
135
136 BEGIN_FTR_SECTION
137 stdcx. r0,0,r1 /* to clear the reservation */
138 END_FTR_SECTION_IFCLR(CPU_FTR_STCX_CHECKS_ADDRESS)
139
140 BEGIN_FTR_SECTION
141 HMT_MEDIUM_LOW
142 END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR)
143
144 cmpdi r3,0
145 bne .Lsyscall_vectored_\name\()_restore_regs
146
147 /* rfscv returns with LR->NIA and CTR->MSR */
148 mtlr r4
149 mtctr r5
150
151 /* Could zero these as per ABI, but we may consider a stricter ABI
152 * which preserves these if libc implementations can benefit, so
153 * restore them for now until further measurement is done. */
154 ld r0,GPR0(r1)
155 ld r4,GPR4(r1)
156 ld r5,GPR5(r1)
157 ld r6,GPR6(r1)
158 ld r7,GPR7(r1)
159 ld r8,GPR8(r1)
160 /* Zero volatile regs that may contain sensitive kernel data */
161 li r9,0
162 li r10,0
163 li r11,0
164 li r12,0
165 mtspr SPRN_XER,r0
166
167 /*
168 * We don't need to restore AMR on the way back to userspace for KUAP.
169 * The value of AMR only matters while we're in the kernel.
170 */
171 mtcr r2
172 ld r2,GPR2(r1)
173 ld r3,GPR3(r1)
174 ld r13,GPR13(r1)
175 ld r1,GPR1(r1)
176 RFSCV_TO_USER
177 b . /* prevent speculative execution */
178
179 .Lsyscall_vectored_\name\()_restore_regs:
180 mtspr SPRN_SRR0,r4
181 mtspr SPRN_SRR1,r5
182
183 ld r3,_CTR(r1)
184 ld r4,_LINK(r1)
185 ld r5,_XER(r1)
186
187 REST_NVGPRS(r1)
188 ld r0,GPR0(r1)
189 mtcr r2
190 mtctr r3
191 mtlr r4
192 mtspr SPRN_XER,r5
193 REST_10GPRS(2, r1)
194 REST_2GPRS(12, r1)
195 ld r1,GPR1(r1)
196 RFI_TO_USER
197 .Lsyscall_vectored_\name\()_rst_end:
198
199 syscall_vectored_\name\()_restart:
200 _ASM_NOKPROBE_SYMBOL(syscall_vectored_\name\()_restart)
201 GET_PACA(r13)
202 ld r1,PACA_EXIT_SAVE_R1(r13)
203 ld r2,PACATOC(r13)
204 ld r3,RESULT(r1)
205 addi r4,r1,STACK_FRAME_OVERHEAD
206 li r11,IRQS_ALL_DISABLED
207 stb r11,PACAIRQSOFTMASK(r13)
208 bl syscall_exit_restart
209 std r1,PACA_EXIT_SAVE_R1(r13) /* save r1 for restart */
210 b .Lsyscall_vectored_\name\()_rst_start
211 1:
212
213 SOFT_MASK_TABLE(.Lsyscall_vectored_\name\()_rst_start, 1b)
214 RESTART_TABLE(.Lsyscall_vectored_\name\()_rst_start, .Lsyscall_vectored_\name\()_rst_end, syscall_vectored_\name\()_restart)
215
216 .endm
217
218 system_call_vectored common 0x3000
219
220 /*
221 * We instantiate another entry copy for the SIGILL variant, with TRAP=0x7ff0
222 * which is tested by system_call_exception when r0 is -1 (as set by vector
223 * entry code).
224 */
225 system_call_vectored sigill 0x7ff0
226
227
228 /*
229 * Entered via kernel return set up by kernel/sstep.c, must match entry regs
230 */
231 .globl system_call_vectored_emulate
232 system_call_vectored_emulate:
233 _ASM_NOKPROBE_SYMBOL(system_call_vectored_emulate)
234 li r10,IRQS_ALL_DISABLED
235 stb r10,PACAIRQSOFTMASK(r13)
236 b system_call_vectored_common
237 #endif /* CONFIG_PPC_BOOK3S */
238
239 .balign IFETCH_ALIGN_BYTES
240 .globl system_call_common_real
241 system_call_common_real:
242 _ASM_NOKPROBE_SYMBOL(system_call_common_real)
243 ld r10,PACAKMSR(r13) /* get MSR value for kernel */
244 mtmsrd r10
245
246 .balign IFETCH_ALIGN_BYTES
247 .globl system_call_common
248 system_call_common:
249 _ASM_NOKPROBE_SYMBOL(system_call_common)
250 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
251 BEGIN_FTR_SECTION
252 extrdi. r10, r12, 1, (63-MSR_TS_T_LG) /* transaction active? */
253 bne tabort_syscall
254 END_FTR_SECTION_IFSET(CPU_FTR_TM)
255 #endif
256 mr r10,r1
257 ld r1,PACAKSAVE(r13)
258 std r10,0(r1)
259 std r11,_NIP(r1)
260 std r12,_MSR(r1)
261 std r0,GPR0(r1)
262 std r10,GPR1(r1)
263 std r2,GPR2(r1)
264 #ifdef CONFIG_PPC_FSL_BOOK3E
265 START_BTB_FLUSH_SECTION
266 BTB_FLUSH(r10)
267 END_BTB_FLUSH_SECTION
268 #endif
269 ld r2,PACATOC(r13)
270 mfcr r12
271 li r11,0
272 /* Can we avoid saving r3-r8 in common case? */
273 std r3,GPR3(r1)
274 std r4,GPR4(r1)
275 std r5,GPR5(r1)
276 std r6,GPR6(r1)
277 std r7,GPR7(r1)
278 std r8,GPR8(r1)
279 /* Zero r9-r12, this should only be required when restoring all GPRs */
280 std r11,GPR9(r1)
281 std r11,GPR10(r1)
282 std r11,GPR11(r1)
283 std r11,GPR12(r1)
284 std r9,GPR13(r1)
285 SAVE_NVGPRS(r1)
286 std r11,_XER(r1)
287 std r11,_CTR(r1)
288 mflr r10
289
290 /*
291 * This clears CR0.SO (bit 28), which is the error indication on
292 * return from this system call.
293 */
294 rldimi r12,r11,28,(63-28)
295 li r11,0xc00
296 std r10,_LINK(r1)
297 std r11,_TRAP(r1)
298 std r12,_CCR(r1)
299 addi r10,r1,STACK_FRAME_OVERHEAD
300 ld r11,exception_marker@toc(r2)
301 std r11,-16(r10) /* "regshere" marker */
302
303 #ifdef CONFIG_PPC_BOOK3S
304 li r11,1
305 stb r11,PACASRR_VALID(r13)
306 #endif
307
308 /*
309 * We always enter kernel from userspace with irq soft-mask enabled and
310 * nothing pending. system_call_exception() will call
311 * trace_hardirqs_off().
312 */
313 li r11,IRQS_ALL_DISABLED
314 stb r11,PACAIRQSOFTMASK(r13)
315 #ifdef CONFIG_PPC_BOOK3S
316 li r12,-1 /* Set MSR_EE and MSR_RI */
317 mtmsrd r12,1
318 #else
319 wrteei 1
320 #endif
321
322 /* Calling convention has r9 = orig r0, r10 = regs */
323 mr r9,r0
324 bl system_call_exception
325
326 .Lsyscall_exit:
327 addi r4,r1,STACK_FRAME_OVERHEAD
328 li r5,0 /* !scv */
329 bl syscall_exit_prepare
330 std r1,PACA_EXIT_SAVE_R1(r13) /* save r1 for restart */
331 #ifdef CONFIG_PPC_BOOK3S
332 .Lsyscall_rst_start:
333 lbz r11,PACAIRQHAPPENED(r13)
334 andi. r11,r11,(~PACA_IRQ_HARD_DIS)@l
335 bne- syscall_restart
336 #endif
337 li r11,IRQS_ENABLED
338 stb r11,PACAIRQSOFTMASK(r13)
339 li r11,0
340 stb r11,PACAIRQHAPPENED(r13) # clear out possible HARD_DIS
341
342 ld r2,_CCR(r1)
343 ld r6,_LINK(r1)
344 mtlr r6
345
346 #ifdef CONFIG_PPC_BOOK3S
347 lbz r4,PACASRR_VALID(r13)
348 cmpdi r4,0
349 bne 1f
350 li r4,0
351 stb r4,PACASRR_VALID(r13)
352 #endif
353 ld r4,_NIP(r1)
354 ld r5,_MSR(r1)
355 mtspr SPRN_SRR0,r4
356 mtspr SPRN_SRR1,r5
357 1:
358 DEBUG_SRR_VALID srr
359
360 BEGIN_FTR_SECTION
361 stdcx. r0,0,r1 /* to clear the reservation */
362 END_FTR_SECTION_IFCLR(CPU_FTR_STCX_CHECKS_ADDRESS)
363
364 cmpdi r3,0
365 bne .Lsyscall_restore_regs
366 /* Zero volatile regs that may contain sensitive kernel data */
367 li r0,0
368 li r4,0
369 li r5,0
370 li r6,0
371 li r7,0
372 li r8,0
373 li r9,0
374 li r10,0
375 li r11,0
376 li r12,0
377 mtctr r0
378 mtspr SPRN_XER,r0
379 .Lsyscall_restore_regs_cont:
380
381 BEGIN_FTR_SECTION
382 HMT_MEDIUM_LOW
383 END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR)
384
385 /*
386 * We don't need to restore AMR on the way back to userspace for KUAP.
387 * The value of AMR only matters while we're in the kernel.
388 */
389 mtcr r2
390 ld r2,GPR2(r1)
391 ld r3,GPR3(r1)
392 ld r13,GPR13(r1)
393 ld r1,GPR1(r1)
394 RFI_TO_USER
395 b . /* prevent speculative execution */
396
397 .Lsyscall_restore_regs:
398 ld r3,_CTR(r1)
399 ld r4,_XER(r1)
400 REST_NVGPRS(r1)
401 mtctr r3
402 mtspr SPRN_XER,r4
403 ld r0,GPR0(r1)
404 REST_8GPRS(4, r1)
405 ld r12,GPR12(r1)
406 b .Lsyscall_restore_regs_cont
407 .Lsyscall_rst_end:
408
409 #ifdef CONFIG_PPC_BOOK3S
410 syscall_restart:
411 _ASM_NOKPROBE_SYMBOL(syscall_restart)
412 GET_PACA(r13)
413 ld r1,PACA_EXIT_SAVE_R1(r13)
414 ld r2,PACATOC(r13)
415 ld r3,RESULT(r1)
416 addi r4,r1,STACK_FRAME_OVERHEAD
417 li r11,IRQS_ALL_DISABLED
418 stb r11,PACAIRQSOFTMASK(r13)
419 bl syscall_exit_restart
420 std r1,PACA_EXIT_SAVE_R1(r13) /* save r1 for restart */
421 b .Lsyscall_rst_start
422 1:
423
424 SOFT_MASK_TABLE(.Lsyscall_rst_start, 1b)
425 RESTART_TABLE(.Lsyscall_rst_start, .Lsyscall_rst_end, syscall_restart)
426 #endif
427
428 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
429 tabort_syscall:
430 _ASM_NOKPROBE_SYMBOL(tabort_syscall)
431 /* Firstly we need to enable TM in the kernel */
432 mfmsr r10
433 li r9, 1
434 rldimi r10, r9, MSR_TM_LG, 63-MSR_TM_LG
435 mtmsrd r10, 0
436
437 /* tabort, this dooms the transaction, nothing else */
438 li r9, (TM_CAUSE_SYSCALL|TM_CAUSE_PERSISTENT)
439 TABORT(R9)
440
441 /*
442 * Return directly to userspace. We have corrupted user register state,
443 * but userspace will never see that register state. Execution will
444 * resume after the tbegin of the aborted transaction with the
445 * checkpointed register state.
446 */
447 li r9, MSR_RI
448 andc r10, r10, r9
449 mtmsrd r10, 1
450 mtspr SPRN_SRR0, r11
451 mtspr SPRN_SRR1, r12
452 RFI_TO_USER
453 b . /* prevent speculative execution */
454 #endif
455
456 /*
457 * If MSR EE/RI was never enabled, IRQs not reconciled, NVGPRs not
458 * touched, no exit work created, then this can be used.
459 */
460 .balign IFETCH_ALIGN_BYTES
461 .globl fast_interrupt_return_srr
462 fast_interrupt_return_srr:
463 _ASM_NOKPROBE_SYMBOL(fast_interrupt_return_srr)
464 kuap_check_amr r3, r4
465 ld r5,_MSR(r1)
466 andi. r0,r5,MSR_PR
467 #ifdef CONFIG_PPC_BOOK3S
468 beq 1f
469 kuap_user_restore r3, r4
470 b .Lfast_user_interrupt_return_srr
471 1: kuap_kernel_restore r3, r4
472 andi. r0,r5,MSR_RI
473 li r3,0 /* 0 return value, no EMULATE_STACK_STORE */
474 bne+ .Lfast_kernel_interrupt_return_srr
475 addi r3,r1,STACK_FRAME_OVERHEAD
476 bl unrecoverable_exception
477 b . /* should not get here */
478 #else
479 bne .Lfast_user_interrupt_return_srr
480 b .Lfast_kernel_interrupt_return_srr
481 #endif
482
483 .macro interrupt_return_macro srr
484 .balign IFETCH_ALIGN_BYTES
485 .globl interrupt_return_\srr
486 interrupt_return_\srr\():
487 _ASM_NOKPROBE_SYMBOL(interrupt_return_\srr\())
488 ld r4,_MSR(r1)
489 andi. r0,r4,MSR_PR
490 beq interrupt_return_\srr\()_kernel
491 interrupt_return_\srr\()_user: /* make backtraces match the _kernel variant */
492 _ASM_NOKPROBE_SYMBOL(interrupt_return_\srr\()_user)
493 addi r3,r1,STACK_FRAME_OVERHEAD
494 bl interrupt_exit_user_prepare
495 cmpdi r3,0
496 bne- .Lrestore_nvgprs_\srr
497 .Lrestore_nvgprs_\srr\()_cont:
498 std r1,PACA_EXIT_SAVE_R1(r13) /* save r1 for restart */
499 #ifdef CONFIG_PPC_BOOK3S
500 .Linterrupt_return_\srr\()_user_rst_start:
501 lbz r11,PACAIRQHAPPENED(r13)
502 andi. r11,r11,(~PACA_IRQ_HARD_DIS)@l
503 bne- interrupt_return_\srr\()_user_restart
504 #endif
505 li r11,IRQS_ENABLED
506 stb r11,PACAIRQSOFTMASK(r13)
507 li r11,0
508 stb r11,PACAIRQHAPPENED(r13) # clear out possible HARD_DIS
509
510 .Lfast_user_interrupt_return_\srr\():
511 #ifdef CONFIG_PPC_BOOK3S
512 .ifc \srr,srr
513 lbz r4,PACASRR_VALID(r13)
514 .else
515 lbz r4,PACAHSRR_VALID(r13)
516 .endif
517 cmpdi r4,0
518 li r4,0
519 bne 1f
520 #endif
521 ld r11,_NIP(r1)
522 ld r12,_MSR(r1)
523 .ifc \srr,srr
524 mtspr SPRN_SRR0,r11
525 mtspr SPRN_SRR1,r12
526 1:
527 #ifdef CONFIG_PPC_BOOK3S
528 stb r4,PACASRR_VALID(r13)
529 #endif
530 .else
531 mtspr SPRN_HSRR0,r11
532 mtspr SPRN_HSRR1,r12
533 1:
534 #ifdef CONFIG_PPC_BOOK3S
535 stb r4,PACAHSRR_VALID(r13)
536 #endif
537 .endif
538 DEBUG_SRR_VALID \srr
539
540 #ifdef CONFIG_PPC_IRQ_SOFT_MASK_DEBUG
541 lbz r4,PACAIRQSOFTMASK(r13)
542 tdnei r4,IRQS_ENABLED
543 #endif
544
545 BEGIN_FTR_SECTION
546 ld r10,_PPR(r1)
547 mtspr SPRN_PPR,r10
548 END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR)
549
550 BEGIN_FTR_SECTION
551 stdcx. r0,0,r1 /* to clear the reservation */
552 FTR_SECTION_ELSE
553 ldarx r0,0,r1
554 ALT_FTR_SECTION_END_IFCLR(CPU_FTR_STCX_CHECKS_ADDRESS)
555
556 ld r3,_CCR(r1)
557 ld r4,_LINK(r1)
558 ld r5,_CTR(r1)
559 ld r6,_XER(r1)
560 li r0,0
561
562 REST_4GPRS(7, r1)
563 REST_2GPRS(11, r1)
564 REST_GPR(13, r1)
565
566 mtcr r3
567 mtlr r4
568 mtctr r5
569 mtspr SPRN_XER,r6
570
571 REST_4GPRS(2, r1)
572 REST_GPR(6, r1)
573 REST_GPR(0, r1)
574 REST_GPR(1, r1)
575 .ifc \srr,srr
576 RFI_TO_USER
577 .else
578 HRFI_TO_USER
579 .endif
580 b . /* prevent speculative execution */
581 .Linterrupt_return_\srr\()_user_rst_end:
582
583 .Lrestore_nvgprs_\srr\():
584 REST_NVGPRS(r1)
585 b .Lrestore_nvgprs_\srr\()_cont
586
587 #ifdef CONFIG_PPC_BOOK3S
588 interrupt_return_\srr\()_user_restart:
589 _ASM_NOKPROBE_SYMBOL(interrupt_return_\srr\()_user_restart)
590 GET_PACA(r13)
591 ld r1,PACA_EXIT_SAVE_R1(r13)
592 ld r2,PACATOC(r13)
593 addi r3,r1,STACK_FRAME_OVERHEAD
594 li r11,IRQS_ALL_DISABLED
595 stb r11,PACAIRQSOFTMASK(r13)
596 bl interrupt_exit_user_restart
597 std r1,PACA_EXIT_SAVE_R1(r13) /* save r1 for restart */
598 b .Linterrupt_return_\srr\()_user_rst_start
599 1:
600
601 SOFT_MASK_TABLE(.Linterrupt_return_\srr\()_user_rst_start, 1b)
602 RESTART_TABLE(.Linterrupt_return_\srr\()_user_rst_start, .Linterrupt_return_\srr\()_user_rst_end, interrupt_return_\srr\()_user_restart)
603 #endif
604
605 .balign IFETCH_ALIGN_BYTES
606 interrupt_return_\srr\()_kernel:
607 _ASM_NOKPROBE_SYMBOL(interrupt_return_\srr\()_kernel)
608 addi r3,r1,STACK_FRAME_OVERHEAD
609 bl interrupt_exit_kernel_prepare
610
611 std r1,PACA_EXIT_SAVE_R1(r13) /* save r1 for restart */
612 .Linterrupt_return_\srr\()_kernel_rst_start:
613 ld r11,SOFTE(r1)
614 cmpwi r11,IRQS_ENABLED
615 stb r11,PACAIRQSOFTMASK(r13)
616 bne 1f
617 #ifdef CONFIG_PPC_BOOK3S
618 lbz r11,PACAIRQHAPPENED(r13)
619 andi. r11,r11,(~PACA_IRQ_HARD_DIS)@l
620 bne- interrupt_return_\srr\()_kernel_restart
621 #endif
622 li r11,0
623 stb r11,PACAIRQHAPPENED(r13) # clear out possible HARD_DIS
624 1:
625
626 .Lfast_kernel_interrupt_return_\srr\():
627 cmpdi cr1,r3,0
628 #ifdef CONFIG_PPC_BOOK3S
629 .ifc \srr,srr
630 lbz r4,PACASRR_VALID(r13)
631 .else
632 lbz r4,PACAHSRR_VALID(r13)
633 .endif
634 cmpdi r4,0
635 li r4,0
636 bne 1f
637 #endif
638 ld r11,_NIP(r1)
639 ld r12,_MSR(r1)
640 .ifc \srr,srr
641 mtspr SPRN_SRR0,r11
642 mtspr SPRN_SRR1,r12
643 1:
644 #ifdef CONFIG_PPC_BOOK3S
645 stb r4,PACASRR_VALID(r13)
646 #endif
647 .else
648 mtspr SPRN_HSRR0,r11
649 mtspr SPRN_HSRR1,r12
650 1:
651 #ifdef CONFIG_PPC_BOOK3S
652 stb r4,PACAHSRR_VALID(r13)
653 #endif
654 .endif
655 DEBUG_SRR_VALID \srr
656
657 BEGIN_FTR_SECTION
658 stdcx. r0,0,r1 /* to clear the reservation */
659 FTR_SECTION_ELSE
660 ldarx r0,0,r1
661 ALT_FTR_SECTION_END_IFCLR(CPU_FTR_STCX_CHECKS_ADDRESS)
662
663 ld r3,_LINK(r1)
664 ld r4,_CTR(r1)
665 ld r5,_XER(r1)
666 ld r6,_CCR(r1)
667 li r0,0
668
669 REST_4GPRS(7, r1)
670 REST_2GPRS(11, r1)
671
672 mtlr r3
673 mtctr r4
674 mtspr SPRN_XER,r5
675
676 /*
677 * Leaving a stale exception_marker on the stack can confuse
678 * the reliable stack unwinder later on. Clear it.
679 */
680 std r0,STACK_FRAME_OVERHEAD-16(r1)
681
682 REST_4GPRS(2, r1)
683
684 bne- cr1,1f /* emulate stack store */
685 mtcr r6
686 REST_GPR(6, r1)
687 REST_GPR(0, r1)
688 REST_GPR(1, r1)
689 .ifc \srr,srr
690 RFI_TO_KERNEL
691 .else
692 HRFI_TO_KERNEL
693 .endif
694 b . /* prevent speculative execution */
695
696 1: /*
697 * Emulate stack store with update. New r1 value was already calculated
698 * and updated in our interrupt regs by emulate_loadstore, but we can't
699 * store the previous value of r1 to the stack before re-loading our
700 * registers from it, otherwise they could be clobbered. Use
701 * PACA_EXGEN as temporary storage to hold the store data, as
702 * interrupts are disabled here so it won't be clobbered.
703 */
704 mtcr r6
705 std r9,PACA_EXGEN+0(r13)
706 addi r9,r1,INT_FRAME_SIZE /* get original r1 */
707 REST_GPR(6, r1)
708 REST_GPR(0, r1)
709 REST_GPR(1, r1)
710 std r9,0(r1) /* perform store component of stdu */
711 ld r9,PACA_EXGEN+0(r13)
712
713 .ifc \srr,srr
714 RFI_TO_KERNEL
715 .else
716 HRFI_TO_KERNEL
717 .endif
718 b . /* prevent speculative execution */
719 .Linterrupt_return_\srr\()_kernel_rst_end:
720
721 #ifdef CONFIG_PPC_BOOK3S
722 interrupt_return_\srr\()_kernel_restart:
723 _ASM_NOKPROBE_SYMBOL(interrupt_return_\srr\()_kernel_restart)
724 GET_PACA(r13)
725 ld r1,PACA_EXIT_SAVE_R1(r13)
726 ld r2,PACATOC(r13)
727 addi r3,r1,STACK_FRAME_OVERHEAD
728 li r11,IRQS_ALL_DISABLED
729 stb r11,PACAIRQSOFTMASK(r13)
730 bl interrupt_exit_kernel_restart
731 std r1,PACA_EXIT_SAVE_R1(r13) /* save r1 for restart */
732 b .Linterrupt_return_\srr\()_kernel_rst_start
733 1:
734
735 SOFT_MASK_TABLE(.Linterrupt_return_\srr\()_kernel_rst_start, 1b)
736 RESTART_TABLE(.Linterrupt_return_\srr\()_kernel_rst_start, .Linterrupt_return_\srr\()_kernel_rst_end, interrupt_return_\srr\()_kernel_restart)
737 #endif
738
739 .endm
740
741 interrupt_return_macro srr
742 #ifdef CONFIG_PPC_BOOK3S
743 interrupt_return_macro hsrr
744
745 .globl __end_soft_masked
746 __end_soft_masked:
747 DEFINE_FIXED_SYMBOL(__end_soft_masked)
748 #endif /* CONFIG_PPC_BOOK3S */
749
750 #ifdef CONFIG_PPC_BOOK3S
751 _GLOBAL(ret_from_fork_scv)
752 bl schedule_tail
753 REST_NVGPRS(r1)
754 li r3,0 /* fork() return value */
755 b .Lsyscall_vectored_common_exit
756 #endif
757
758 _GLOBAL(ret_from_fork)
759 bl schedule_tail
760 REST_NVGPRS(r1)
761 li r3,0 /* fork() return value */
762 b .Lsyscall_exit
763
764 _GLOBAL(ret_from_kernel_thread)
765 bl schedule_tail
766 REST_NVGPRS(r1)
767 mtctr r14
768 mr r3,r15
769 #ifdef PPC64_ELF_ABI_v2
770 mr r12,r14
771 #endif
772 bctrl
773 li r3,0
774 b .Lsyscall_exit