]> git.proxmox.com Git - mirror_ubuntu-jammy-kernel.git/blob - arch/powerpc/kernel/interrupt_64.S
4b1ff94e67eb40cac47300884c1b670eebae288b
[mirror_ubuntu-jammy-kernel.git] / arch / powerpc / kernel / interrupt_64.S
1 #include <asm/asm-offsets.h>
2 #include <asm/bug.h>
3 #ifdef CONFIG_PPC_BOOK3S
4 #include <asm/exception-64s.h>
5 #else
6 #include <asm/exception-64e.h>
7 #endif
8 #include <asm/feature-fixups.h>
9 #include <asm/head-64.h>
10 #include <asm/hw_irq.h>
11 #include <asm/kup.h>
12 #include <asm/mmu.h>
13 #include <asm/ppc_asm.h>
14 #include <asm/ptrace.h>
15
16 .section ".toc","aw"
17 SYS_CALL_TABLE:
18 .tc sys_call_table[TC],sys_call_table
19
20 #ifdef CONFIG_COMPAT
21 COMPAT_SYS_CALL_TABLE:
22 .tc compat_sys_call_table[TC],compat_sys_call_table
23 #endif
24 .previous
25
26 .align 7
27
28 .macro DEBUG_SRR_VALID srr
29 #ifdef CONFIG_PPC_RFI_SRR_DEBUG
30 .ifc \srr,srr
31 mfspr r11,SPRN_SRR0
32 ld r12,_NIP(r1)
33 clrrdi r12,r12,2
34 100: tdne r11,r12
35 EMIT_WARN_ENTRY 100b,__FILE__,__LINE__,(BUGFLAG_WARNING | BUGFLAG_ONCE)
36 mfspr r11,SPRN_SRR1
37 ld r12,_MSR(r1)
38 100: tdne r11,r12
39 EMIT_WARN_ENTRY 100b,__FILE__,__LINE__,(BUGFLAG_WARNING | BUGFLAG_ONCE)
40 .else
41 mfspr r11,SPRN_HSRR0
42 ld r12,_NIP(r1)
43 clrrdi r12,r12,2
44 100: tdne r11,r12
45 EMIT_WARN_ENTRY 100b,__FILE__,__LINE__,(BUGFLAG_WARNING | BUGFLAG_ONCE)
46 mfspr r11,SPRN_HSRR1
47 ld r12,_MSR(r1)
48 100: tdne r11,r12
49 EMIT_WARN_ENTRY 100b,__FILE__,__LINE__,(BUGFLAG_WARNING | BUGFLAG_ONCE)
50 .endif
51 #endif
52 .endm
53
54 #ifdef CONFIG_PPC_BOOK3S
55 .macro system_call_vectored name trapnr
56 .globl system_call_vectored_\name
57 system_call_vectored_\name:
58 _ASM_NOKPROBE_SYMBOL(system_call_vectored_\name)
59 SCV_INTERRUPT_TO_KERNEL
60 mr r10,r1
61 ld r1,PACAKSAVE(r13)
62 std r10,0(r1)
63 std r11,_NIP(r1)
64 std r12,_MSR(r1)
65 std r0,GPR0(r1)
66 std r10,GPR1(r1)
67 std r2,GPR2(r1)
68 ld r2,PACATOC(r13)
69 mfcr r12
70 li r11,0
71 /* Can we avoid saving r3-r8 in common case? */
72 std r3,GPR3(r1)
73 std r4,GPR4(r1)
74 std r5,GPR5(r1)
75 std r6,GPR6(r1)
76 std r7,GPR7(r1)
77 std r8,GPR8(r1)
78 /* Zero r9-r12, this should only be required when restoring all GPRs */
79 std r11,GPR9(r1)
80 std r11,GPR10(r1)
81 std r11,GPR11(r1)
82 std r11,GPR12(r1)
83 std r9,GPR13(r1)
84 SAVE_NVGPRS(r1)
85 std r11,_XER(r1)
86 std r11,_LINK(r1)
87 std r11,_CTR(r1)
88
89 li r11,\trapnr
90 std r11,_TRAP(r1)
91 std r12,_CCR(r1)
92 addi r10,r1,STACK_FRAME_OVERHEAD
93 ld r11,exception_marker@toc(r2)
94 std r11,-16(r10) /* "regshere" marker */
95
96 BEGIN_FTR_SECTION
97 HMT_MEDIUM
98 END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR)
99
100 /*
101 * scv enters with MSR[EE]=1 and is immediately considered soft-masked.
102 * The entry vector already sets PACAIRQSOFTMASK to IRQS_ALL_DISABLED,
103 * and interrupts may be masked and pending already.
104 * system_call_exception() will call trace_hardirqs_off() which means
105 * interrupts could already have been blocked before trace_hardirqs_off,
106 * but this is the best we can do.
107 */
108
109 /* Calling convention has r9 = orig r0, r10 = regs */
110 mr r9,r0
111 bl system_call_exception
112
113 .Lsyscall_vectored_\name\()_exit:
114 addi r4,r1,STACK_FRAME_OVERHEAD
115 li r5,1 /* scv */
116 bl syscall_exit_prepare
117 std r1,PACA_EXIT_SAVE_R1(r13) /* save r1 for restart */
118 .Lsyscall_vectored_\name\()_rst_start:
119 lbz r11,PACAIRQHAPPENED(r13)
120 andi. r11,r11,(~PACA_IRQ_HARD_DIS)@l
121 bne- syscall_vectored_\name\()_restart
122 li r11,IRQS_ENABLED
123 stb r11,PACAIRQSOFTMASK(r13)
124 li r11,0
125 stb r11,PACAIRQHAPPENED(r13) # clear out possible HARD_DIS
126
127 ld r2,_CCR(r1)
128 ld r4,_NIP(r1)
129 ld r5,_MSR(r1)
130
131 BEGIN_FTR_SECTION
132 stdcx. r0,0,r1 /* to clear the reservation */
133 END_FTR_SECTION_IFCLR(CPU_FTR_STCX_CHECKS_ADDRESS)
134
135 BEGIN_FTR_SECTION
136 HMT_MEDIUM_LOW
137 END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR)
138
139 cmpdi r3,0
140 bne .Lsyscall_vectored_\name\()_restore_regs
141
142 /* rfscv returns with LR->NIA and CTR->MSR */
143 mtlr r4
144 mtctr r5
145
146 /* Could zero these as per ABI, but we may consider a stricter ABI
147 * which preserves these if libc implementations can benefit, so
148 * restore them for now until further measurement is done. */
149 ld r0,GPR0(r1)
150 ld r4,GPR4(r1)
151 ld r5,GPR5(r1)
152 ld r6,GPR6(r1)
153 ld r7,GPR7(r1)
154 ld r8,GPR8(r1)
155 /* Zero volatile regs that may contain sensitive kernel data */
156 li r9,0
157 li r10,0
158 li r11,0
159 li r12,0
160 mtspr SPRN_XER,r0
161
162 /*
163 * We don't need to restore AMR on the way back to userspace for KUAP.
164 * The value of AMR only matters while we're in the kernel.
165 */
166 mtcr r2
167 ld r2,GPR2(r1)
168 ld r3,GPR3(r1)
169 ld r13,GPR13(r1)
170 ld r1,GPR1(r1)
171 RFSCV_TO_USER
172 b . /* prevent speculative execution */
173
174 .Lsyscall_vectored_\name\()_restore_regs:
175 mtspr SPRN_SRR0,r4
176 mtspr SPRN_SRR1,r5
177
178 ld r3,_CTR(r1)
179 ld r4,_LINK(r1)
180 ld r5,_XER(r1)
181
182 REST_NVGPRS(r1)
183 ld r0,GPR0(r1)
184 mtcr r2
185 mtctr r3
186 mtlr r4
187 mtspr SPRN_XER,r5
188 REST_10GPRS(2, r1)
189 REST_2GPRS(12, r1)
190 ld r1,GPR1(r1)
191 RFI_TO_USER
192 .Lsyscall_vectored_\name\()_rst_end:
193
194 syscall_vectored_\name\()_restart:
195 _ASM_NOKPROBE_SYMBOL(syscall_vectored_\name\()_restart)
196 GET_PACA(r13)
197 ld r1,PACA_EXIT_SAVE_R1(r13)
198 ld r2,PACATOC(r13)
199 ld r3,RESULT(r1)
200 addi r4,r1,STACK_FRAME_OVERHEAD
201 li r11,IRQS_ALL_DISABLED
202 stb r11,PACAIRQSOFTMASK(r13)
203 bl syscall_exit_restart
204 std r1,PACA_EXIT_SAVE_R1(r13) /* save r1 for restart */
205 b .Lsyscall_vectored_\name\()_rst_start
206 1:
207
208 SOFT_MASK_TABLE(.Lsyscall_vectored_\name\()_rst_start, 1b)
209 RESTART_TABLE(.Lsyscall_vectored_\name\()_rst_start, .Lsyscall_vectored_\name\()_rst_end, syscall_vectored_\name\()_restart)
210
211 .endm
212
213 system_call_vectored common 0x3000
214
215 /*
216 * We instantiate another entry copy for the SIGILL variant, with TRAP=0x7ff0
217 * which is tested by system_call_exception when r0 is -1 (as set by vector
218 * entry code).
219 */
220 system_call_vectored sigill 0x7ff0
221
222
223 /*
224 * Entered via kernel return set up by kernel/sstep.c, must match entry regs
225 */
226 .globl system_call_vectored_emulate
227 system_call_vectored_emulate:
228 _ASM_NOKPROBE_SYMBOL(system_call_vectored_emulate)
229 li r10,IRQS_ALL_DISABLED
230 stb r10,PACAIRQSOFTMASK(r13)
231 b system_call_vectored_common
232 #endif /* CONFIG_PPC_BOOK3S */
233
234 .balign IFETCH_ALIGN_BYTES
235 .globl system_call_common_real
236 system_call_common_real:
237 _ASM_NOKPROBE_SYMBOL(system_call_common_real)
238 ld r10,PACAKMSR(r13) /* get MSR value for kernel */
239 mtmsrd r10
240
241 .balign IFETCH_ALIGN_BYTES
242 .globl system_call_common
243 system_call_common:
244 _ASM_NOKPROBE_SYMBOL(system_call_common)
245 mr r10,r1
246 ld r1,PACAKSAVE(r13)
247 std r10,0(r1)
248 std r11,_NIP(r1)
249 std r12,_MSR(r1)
250 std r0,GPR0(r1)
251 std r10,GPR1(r1)
252 std r2,GPR2(r1)
253 #ifdef CONFIG_PPC_FSL_BOOK3E
254 START_BTB_FLUSH_SECTION
255 BTB_FLUSH(r10)
256 END_BTB_FLUSH_SECTION
257 #endif
258 ld r2,PACATOC(r13)
259 mfcr r12
260 li r11,0
261 /* Can we avoid saving r3-r8 in common case? */
262 std r3,GPR3(r1)
263 std r4,GPR4(r1)
264 std r5,GPR5(r1)
265 std r6,GPR6(r1)
266 std r7,GPR7(r1)
267 std r8,GPR8(r1)
268 /* Zero r9-r12, this should only be required when restoring all GPRs */
269 std r11,GPR9(r1)
270 std r11,GPR10(r1)
271 std r11,GPR11(r1)
272 std r11,GPR12(r1)
273 std r9,GPR13(r1)
274 SAVE_NVGPRS(r1)
275 std r11,_XER(r1)
276 std r11,_CTR(r1)
277 mflr r10
278
279 /*
280 * This clears CR0.SO (bit 28), which is the error indication on
281 * return from this system call.
282 */
283 rldimi r12,r11,28,(63-28)
284 li r11,0xc00
285 std r10,_LINK(r1)
286 std r11,_TRAP(r1)
287 std r12,_CCR(r1)
288 addi r10,r1,STACK_FRAME_OVERHEAD
289 ld r11,exception_marker@toc(r2)
290 std r11,-16(r10) /* "regshere" marker */
291
292 #ifdef CONFIG_PPC_BOOK3S
293 li r11,1
294 stb r11,PACASRR_VALID(r13)
295 #endif
296
297 /*
298 * We always enter kernel from userspace with irq soft-mask enabled and
299 * nothing pending. system_call_exception() will call
300 * trace_hardirqs_off().
301 */
302 li r11,IRQS_ALL_DISABLED
303 stb r11,PACAIRQSOFTMASK(r13)
304 #ifdef CONFIG_PPC_BOOK3S
305 li r12,-1 /* Set MSR_EE and MSR_RI */
306 mtmsrd r12,1
307 #else
308 wrteei 1
309 #endif
310
311 /* Calling convention has r9 = orig r0, r10 = regs */
312 mr r9,r0
313 bl system_call_exception
314
315 .Lsyscall_exit:
316 addi r4,r1,STACK_FRAME_OVERHEAD
317 li r5,0 /* !scv */
318 bl syscall_exit_prepare
319 std r1,PACA_EXIT_SAVE_R1(r13) /* save r1 for restart */
320 #ifdef CONFIG_PPC_BOOK3S
321 .Lsyscall_rst_start:
322 lbz r11,PACAIRQHAPPENED(r13)
323 andi. r11,r11,(~PACA_IRQ_HARD_DIS)@l
324 bne- syscall_restart
325 #endif
326 li r11,IRQS_ENABLED
327 stb r11,PACAIRQSOFTMASK(r13)
328 li r11,0
329 stb r11,PACAIRQHAPPENED(r13) # clear out possible HARD_DIS
330
331 ld r2,_CCR(r1)
332 ld r6,_LINK(r1)
333 mtlr r6
334
335 #ifdef CONFIG_PPC_BOOK3S
336 lbz r4,PACASRR_VALID(r13)
337 cmpdi r4,0
338 bne 1f
339 li r4,0
340 stb r4,PACASRR_VALID(r13)
341 #endif
342 ld r4,_NIP(r1)
343 ld r5,_MSR(r1)
344 mtspr SPRN_SRR0,r4
345 mtspr SPRN_SRR1,r5
346 1:
347 DEBUG_SRR_VALID srr
348
349 BEGIN_FTR_SECTION
350 stdcx. r0,0,r1 /* to clear the reservation */
351 END_FTR_SECTION_IFCLR(CPU_FTR_STCX_CHECKS_ADDRESS)
352
353 cmpdi r3,0
354 bne .Lsyscall_restore_regs
355 /* Zero volatile regs that may contain sensitive kernel data */
356 li r0,0
357 li r4,0
358 li r5,0
359 li r6,0
360 li r7,0
361 li r8,0
362 li r9,0
363 li r10,0
364 li r11,0
365 li r12,0
366 mtctr r0
367 mtspr SPRN_XER,r0
368 .Lsyscall_restore_regs_cont:
369
370 BEGIN_FTR_SECTION
371 HMT_MEDIUM_LOW
372 END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR)
373
374 /*
375 * We don't need to restore AMR on the way back to userspace for KUAP.
376 * The value of AMR only matters while we're in the kernel.
377 */
378 mtcr r2
379 ld r2,GPR2(r1)
380 ld r3,GPR3(r1)
381 ld r13,GPR13(r1)
382 ld r1,GPR1(r1)
383 RFI_TO_USER
384 b . /* prevent speculative execution */
385
386 .Lsyscall_restore_regs:
387 ld r3,_CTR(r1)
388 ld r4,_XER(r1)
389 REST_NVGPRS(r1)
390 mtctr r3
391 mtspr SPRN_XER,r4
392 ld r0,GPR0(r1)
393 REST_8GPRS(4, r1)
394 ld r12,GPR12(r1)
395 b .Lsyscall_restore_regs_cont
396 .Lsyscall_rst_end:
397
398 #ifdef CONFIG_PPC_BOOK3S
399 syscall_restart:
400 _ASM_NOKPROBE_SYMBOL(syscall_restart)
401 GET_PACA(r13)
402 ld r1,PACA_EXIT_SAVE_R1(r13)
403 ld r2,PACATOC(r13)
404 ld r3,RESULT(r1)
405 addi r4,r1,STACK_FRAME_OVERHEAD
406 li r11,IRQS_ALL_DISABLED
407 stb r11,PACAIRQSOFTMASK(r13)
408 bl syscall_exit_restart
409 std r1,PACA_EXIT_SAVE_R1(r13) /* save r1 for restart */
410 b .Lsyscall_rst_start
411 1:
412
413 SOFT_MASK_TABLE(.Lsyscall_rst_start, 1b)
414 RESTART_TABLE(.Lsyscall_rst_start, .Lsyscall_rst_end, syscall_restart)
415 #endif
416
417 /*
418 * If MSR EE/RI was never enabled, IRQs not reconciled, NVGPRs not
419 * touched, no exit work created, then this can be used.
420 */
421 .balign IFETCH_ALIGN_BYTES
422 .globl fast_interrupt_return_srr
423 fast_interrupt_return_srr:
424 _ASM_NOKPROBE_SYMBOL(fast_interrupt_return_srr)
425 kuap_check_amr r3, r4
426 ld r5,_MSR(r1)
427 andi. r0,r5,MSR_PR
428 #ifdef CONFIG_PPC_BOOK3S
429 beq 1f
430 kuap_user_restore r3, r4
431 b .Lfast_user_interrupt_return_srr
432 1: kuap_kernel_restore r3, r4
433 andi. r0,r5,MSR_RI
434 li r3,0 /* 0 return value, no EMULATE_STACK_STORE */
435 bne+ .Lfast_kernel_interrupt_return_srr
436 addi r3,r1,STACK_FRAME_OVERHEAD
437 bl unrecoverable_exception
438 b . /* should not get here */
439 #else
440 bne .Lfast_user_interrupt_return_srr
441 b .Lfast_kernel_interrupt_return_srr
442 #endif
443
444 .macro interrupt_return_macro srr
445 .balign IFETCH_ALIGN_BYTES
446 .globl interrupt_return_\srr
447 interrupt_return_\srr\():
448 _ASM_NOKPROBE_SYMBOL(interrupt_return_\srr\())
449 ld r4,_MSR(r1)
450 andi. r0,r4,MSR_PR
451 beq interrupt_return_\srr\()_kernel
452 interrupt_return_\srr\()_user: /* make backtraces match the _kernel variant */
453 _ASM_NOKPROBE_SYMBOL(interrupt_return_\srr\()_user)
454 addi r3,r1,STACK_FRAME_OVERHEAD
455 bl interrupt_exit_user_prepare
456 cmpdi r3,0
457 bne- .Lrestore_nvgprs_\srr
458 .Lrestore_nvgprs_\srr\()_cont:
459 std r1,PACA_EXIT_SAVE_R1(r13) /* save r1 for restart */
460 #ifdef CONFIG_PPC_BOOK3S
461 .Linterrupt_return_\srr\()_user_rst_start:
462 lbz r11,PACAIRQHAPPENED(r13)
463 andi. r11,r11,(~PACA_IRQ_HARD_DIS)@l
464 bne- interrupt_return_\srr\()_user_restart
465 #endif
466 li r11,IRQS_ENABLED
467 stb r11,PACAIRQSOFTMASK(r13)
468 li r11,0
469 stb r11,PACAIRQHAPPENED(r13) # clear out possible HARD_DIS
470
471 .Lfast_user_interrupt_return_\srr\():
472 #ifdef CONFIG_PPC_BOOK3S
473 .ifc \srr,srr
474 lbz r4,PACASRR_VALID(r13)
475 .else
476 lbz r4,PACAHSRR_VALID(r13)
477 .endif
478 cmpdi r4,0
479 li r4,0
480 bne 1f
481 #endif
482 ld r11,_NIP(r1)
483 ld r12,_MSR(r1)
484 .ifc \srr,srr
485 mtspr SPRN_SRR0,r11
486 mtspr SPRN_SRR1,r12
487 1:
488 #ifdef CONFIG_PPC_BOOK3S
489 stb r4,PACASRR_VALID(r13)
490 #endif
491 .else
492 mtspr SPRN_HSRR0,r11
493 mtspr SPRN_HSRR1,r12
494 1:
495 #ifdef CONFIG_PPC_BOOK3S
496 stb r4,PACAHSRR_VALID(r13)
497 #endif
498 .endif
499 DEBUG_SRR_VALID \srr
500
501 #ifdef CONFIG_PPC_IRQ_SOFT_MASK_DEBUG
502 lbz r4,PACAIRQSOFTMASK(r13)
503 tdnei r4,IRQS_ENABLED
504 #endif
505
506 BEGIN_FTR_SECTION
507 ld r10,_PPR(r1)
508 mtspr SPRN_PPR,r10
509 END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR)
510
511 BEGIN_FTR_SECTION
512 stdcx. r0,0,r1 /* to clear the reservation */
513 FTR_SECTION_ELSE
514 ldarx r0,0,r1
515 ALT_FTR_SECTION_END_IFCLR(CPU_FTR_STCX_CHECKS_ADDRESS)
516
517 ld r3,_CCR(r1)
518 ld r4,_LINK(r1)
519 ld r5,_CTR(r1)
520 ld r6,_XER(r1)
521 li r0,0
522
523 REST_4GPRS(7, r1)
524 REST_2GPRS(11, r1)
525 REST_GPR(13, r1)
526
527 mtcr r3
528 mtlr r4
529 mtctr r5
530 mtspr SPRN_XER,r6
531
532 REST_4GPRS(2, r1)
533 REST_GPR(6, r1)
534 REST_GPR(0, r1)
535 REST_GPR(1, r1)
536 .ifc \srr,srr
537 RFI_TO_USER
538 .else
539 HRFI_TO_USER
540 .endif
541 b . /* prevent speculative execution */
542 .Linterrupt_return_\srr\()_user_rst_end:
543
544 .Lrestore_nvgprs_\srr\():
545 REST_NVGPRS(r1)
546 b .Lrestore_nvgprs_\srr\()_cont
547
548 #ifdef CONFIG_PPC_BOOK3S
549 interrupt_return_\srr\()_user_restart:
550 _ASM_NOKPROBE_SYMBOL(interrupt_return_\srr\()_user_restart)
551 GET_PACA(r13)
552 ld r1,PACA_EXIT_SAVE_R1(r13)
553 ld r2,PACATOC(r13)
554 addi r3,r1,STACK_FRAME_OVERHEAD
555 li r11,IRQS_ALL_DISABLED
556 stb r11,PACAIRQSOFTMASK(r13)
557 bl interrupt_exit_user_restart
558 std r1,PACA_EXIT_SAVE_R1(r13) /* save r1 for restart */
559 b .Linterrupt_return_\srr\()_user_rst_start
560 1:
561
562 SOFT_MASK_TABLE(.Linterrupt_return_\srr\()_user_rst_start, 1b)
563 RESTART_TABLE(.Linterrupt_return_\srr\()_user_rst_start, .Linterrupt_return_\srr\()_user_rst_end, interrupt_return_\srr\()_user_restart)
564 #endif
565
566 .balign IFETCH_ALIGN_BYTES
567 interrupt_return_\srr\()_kernel:
568 _ASM_NOKPROBE_SYMBOL(interrupt_return_\srr\()_kernel)
569 addi r3,r1,STACK_FRAME_OVERHEAD
570 bl interrupt_exit_kernel_prepare
571
572 std r1,PACA_EXIT_SAVE_R1(r13) /* save r1 for restart */
573 .Linterrupt_return_\srr\()_kernel_rst_start:
574 ld r11,SOFTE(r1)
575 cmpwi r11,IRQS_ENABLED
576 stb r11,PACAIRQSOFTMASK(r13)
577 bne 1f
578 #ifdef CONFIG_PPC_BOOK3S
579 lbz r11,PACAIRQHAPPENED(r13)
580 andi. r11,r11,(~PACA_IRQ_HARD_DIS)@l
581 bne- interrupt_return_\srr\()_kernel_restart
582 #endif
583 li r11,0
584 stb r11,PACAIRQHAPPENED(r13) # clear out possible HARD_DIS
585 1:
586
587 .Lfast_kernel_interrupt_return_\srr\():
588 cmpdi cr1,r3,0
589 #ifdef CONFIG_PPC_BOOK3S
590 .ifc \srr,srr
591 lbz r4,PACASRR_VALID(r13)
592 .else
593 lbz r4,PACAHSRR_VALID(r13)
594 .endif
595 cmpdi r4,0
596 li r4,0
597 bne 1f
598 #endif
599 ld r11,_NIP(r1)
600 ld r12,_MSR(r1)
601 .ifc \srr,srr
602 mtspr SPRN_SRR0,r11
603 mtspr SPRN_SRR1,r12
604 1:
605 #ifdef CONFIG_PPC_BOOK3S
606 stb r4,PACASRR_VALID(r13)
607 #endif
608 .else
609 mtspr SPRN_HSRR0,r11
610 mtspr SPRN_HSRR1,r12
611 1:
612 #ifdef CONFIG_PPC_BOOK3S
613 stb r4,PACAHSRR_VALID(r13)
614 #endif
615 .endif
616 DEBUG_SRR_VALID \srr
617
618 BEGIN_FTR_SECTION
619 stdcx. r0,0,r1 /* to clear the reservation */
620 FTR_SECTION_ELSE
621 ldarx r0,0,r1
622 ALT_FTR_SECTION_END_IFCLR(CPU_FTR_STCX_CHECKS_ADDRESS)
623
624 ld r3,_LINK(r1)
625 ld r4,_CTR(r1)
626 ld r5,_XER(r1)
627 ld r6,_CCR(r1)
628 li r0,0
629
630 REST_4GPRS(7, r1)
631 REST_2GPRS(11, r1)
632
633 mtlr r3
634 mtctr r4
635 mtspr SPRN_XER,r5
636
637 /*
638 * Leaving a stale exception_marker on the stack can confuse
639 * the reliable stack unwinder later on. Clear it.
640 */
641 std r0,STACK_FRAME_OVERHEAD-16(r1)
642
643 REST_4GPRS(2, r1)
644
645 bne- cr1,1f /* emulate stack store */
646 mtcr r6
647 REST_GPR(6, r1)
648 REST_GPR(0, r1)
649 REST_GPR(1, r1)
650 .ifc \srr,srr
651 RFI_TO_KERNEL
652 .else
653 HRFI_TO_KERNEL
654 .endif
655 b . /* prevent speculative execution */
656
657 1: /*
658 * Emulate stack store with update. New r1 value was already calculated
659 * and updated in our interrupt regs by emulate_loadstore, but we can't
660 * store the previous value of r1 to the stack before re-loading our
661 * registers from it, otherwise they could be clobbered. Use
662 * PACA_EXGEN as temporary storage to hold the store data, as
663 * interrupts are disabled here so it won't be clobbered.
664 */
665 mtcr r6
666 std r9,PACA_EXGEN+0(r13)
667 addi r9,r1,INT_FRAME_SIZE /* get original r1 */
668 REST_GPR(6, r1)
669 REST_GPR(0, r1)
670 REST_GPR(1, r1)
671 std r9,0(r1) /* perform store component of stdu */
672 ld r9,PACA_EXGEN+0(r13)
673
674 .ifc \srr,srr
675 RFI_TO_KERNEL
676 .else
677 HRFI_TO_KERNEL
678 .endif
679 b . /* prevent speculative execution */
680 .Linterrupt_return_\srr\()_kernel_rst_end:
681
682 #ifdef CONFIG_PPC_BOOK3S
683 interrupt_return_\srr\()_kernel_restart:
684 _ASM_NOKPROBE_SYMBOL(interrupt_return_\srr\()_kernel_restart)
685 GET_PACA(r13)
686 ld r1,PACA_EXIT_SAVE_R1(r13)
687 ld r2,PACATOC(r13)
688 addi r3,r1,STACK_FRAME_OVERHEAD
689 li r11,IRQS_ALL_DISABLED
690 stb r11,PACAIRQSOFTMASK(r13)
691 bl interrupt_exit_kernel_restart
692 std r1,PACA_EXIT_SAVE_R1(r13) /* save r1 for restart */
693 b .Linterrupt_return_\srr\()_kernel_rst_start
694 1:
695
696 SOFT_MASK_TABLE(.Linterrupt_return_\srr\()_kernel_rst_start, 1b)
697 RESTART_TABLE(.Linterrupt_return_\srr\()_kernel_rst_start, .Linterrupt_return_\srr\()_kernel_rst_end, interrupt_return_\srr\()_kernel_restart)
698 #endif
699
700 .endm
701
702 interrupt_return_macro srr
703 #ifdef CONFIG_PPC_BOOK3S
704 interrupt_return_macro hsrr
705
706 .globl __end_soft_masked
707 __end_soft_masked:
708 DEFINE_FIXED_SYMBOL(__end_soft_masked)
709 #endif /* CONFIG_PPC_BOOK3S */
710
711 #ifdef CONFIG_PPC_BOOK3S
712 _GLOBAL(ret_from_fork_scv)
713 bl schedule_tail
714 REST_NVGPRS(r1)
715 li r3,0 /* fork() return value */
716 b .Lsyscall_vectored_common_exit
717 #endif
718
719 _GLOBAL(ret_from_fork)
720 bl schedule_tail
721 REST_NVGPRS(r1)
722 li r3,0 /* fork() return value */
723 b .Lsyscall_exit
724
725 _GLOBAL(ret_from_kernel_thread)
726 bl schedule_tail
727 REST_NVGPRS(r1)
728 mtctr r14
729 mr r3,r15
730 #ifdef PPC64_ELF_ABI_v2
731 mr r12,r14
732 #endif
733 bctrl
734 li r3,0
735 b .Lsyscall_exit