1 #include <asm/asm-offsets.h>
3 #ifdef CONFIG_PPC_BOOK3S
4 #include <asm/exception-64s.h>
6 #include <asm/exception-64e.h>
8 #include <asm/feature-fixups.h>
9 #include <asm/head-64.h>
10 #include <asm/hw_irq.h>
13 #include <asm/ppc_asm.h>
14 #include <asm/ptrace.h>
18 .tc sys_call_table[TC],sys_call_table
21 COMPAT_SYS_CALL_TABLE:
22 .tc compat_sys_call_table[TC],compat_sys_call_table
28 .macro DEBUG_SRR_VALID srr
29 #ifdef CONFIG_PPC_RFI_SRR_DEBUG
35 EMIT_WARN_ENTRY 100b,__FILE__,__LINE__,(BUGFLAG_WARNING | BUGFLAG_ONCE)
39 EMIT_WARN_ENTRY 100b,__FILE__,__LINE__,(BUGFLAG_WARNING | BUGFLAG_ONCE)
45 EMIT_WARN_ENTRY 100b,__FILE__,__LINE__,(BUGFLAG_WARNING | BUGFLAG_ONCE)
49 EMIT_WARN_ENTRY 100b,__FILE__,__LINE__,(BUGFLAG_WARNING | BUGFLAG_ONCE)
54 #ifdef CONFIG_PPC_BOOK3S
55 .macro system_call_vectored name trapnr
56 .globl system_call_vectored_\name
57 system_call_vectored_\name:
58 _ASM_NOKPROBE_SYMBOL(system_call_vectored_\name)
59 SCV_INTERRUPT_TO_KERNEL
71 /* Can we avoid saving r3-r8 in common case? */
78 /* Zero r9-r12, this should only be required when restoring all GPRs */
92 addi r10,r1,STACK_FRAME_OVERHEAD
93 ld r11,exception_marker@toc(r2)
94 std r11,-16(r10) /* "regshere" marker */
98 END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR)
101 * scv enters with MSR[EE]=1 and is immediately considered soft-masked.
102 * The entry vector already sets PACAIRQSOFTMASK to IRQS_ALL_DISABLED,
103 * and interrupts may be masked and pending already.
104 * system_call_exception() will call trace_hardirqs_off() which means
105 * interrupts could already have been blocked before trace_hardirqs_off,
106 * but this is the best we can do.
109 /* Calling convention has r9 = orig r0, r10 = regs */
111 bl system_call_exception
113 .Lsyscall_vectored_\name\()_exit:
114 addi r4,r1,STACK_FRAME_OVERHEAD
116 bl syscall_exit_prepare
117 std r1,PACA_EXIT_SAVE_R1(r13) /* save r1 for restart */
118 .Lsyscall_vectored_\name\()_rst_start:
119 lbz r11,PACAIRQHAPPENED(r13)
120 andi. r11,r11,(~PACA_IRQ_HARD_DIS)@l
121 bne- syscall_vectored_\name\()_restart
123 stb r11,PACAIRQSOFTMASK(r13)
125 stb r11,PACAIRQHAPPENED(r13) # clear out possible HARD_DIS
132 stdcx. r0,0,r1 /* to clear the reservation */
133 END_FTR_SECTION_IFCLR(CPU_FTR_STCX_CHECKS_ADDRESS)
137 END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR)
140 bne .Lsyscall_vectored_\name\()_restore_regs
142 /* rfscv returns with LR->NIA and CTR->MSR */
146 /* Could zero these as per ABI, but we may consider a stricter ABI
147 * which preserves these if libc implementations can benefit, so
148 * restore them for now until further measurement is done. */
155 /* Zero volatile regs that may contain sensitive kernel data */
163 * We don't need to restore AMR on the way back to userspace for KUAP.
164 * The value of AMR only matters while we're in the kernel.
172 b . /* prevent speculative execution */
174 .Lsyscall_vectored_\name\()_restore_regs:
192 .Lsyscall_vectored_\name\()_rst_end:
194 syscall_vectored_\name\()_restart:
195 _ASM_NOKPROBE_SYMBOL(syscall_vectored_\name\()_restart)
197 ld r1,PACA_EXIT_SAVE_R1(r13)
200 addi r4,r1,STACK_FRAME_OVERHEAD
201 li r11,IRQS_ALL_DISABLED
202 stb r11,PACAIRQSOFTMASK(r13)
203 bl syscall_exit_restart
204 std r1,PACA_EXIT_SAVE_R1(r13) /* save r1 for restart */
205 b .Lsyscall_vectored_\name\()_rst_start
208 SOFT_MASK_TABLE(.Lsyscall_vectored_\name\()_rst_start, 1b)
209 RESTART_TABLE(.Lsyscall_vectored_\name\()_rst_start, .Lsyscall_vectored_\name\()_rst_end, syscall_vectored_\name\()_restart)
213 system_call_vectored common 0x3000
216 * We instantiate another entry copy for the SIGILL variant, with TRAP=0x7ff0
217 * which is tested by system_call_exception when r0 is -1 (as set by vector
220 system_call_vectored sigill 0x7ff0
224 * Entered via kernel return set up by kernel/sstep.c, must match entry regs
226 .globl system_call_vectored_emulate
227 system_call_vectored_emulate:
228 _ASM_NOKPROBE_SYMBOL(system_call_vectored_emulate)
229 li r10,IRQS_ALL_DISABLED
230 stb r10,PACAIRQSOFTMASK(r13)
231 b system_call_vectored_common
232 #endif /* CONFIG_PPC_BOOK3S */
234 .balign IFETCH_ALIGN_BYTES
235 .globl system_call_common_real
236 system_call_common_real:
237 _ASM_NOKPROBE_SYMBOL(system_call_common_real)
238 ld r10,PACAKMSR(r13) /* get MSR value for kernel */
241 .balign IFETCH_ALIGN_BYTES
242 .globl system_call_common
244 _ASM_NOKPROBE_SYMBOL(system_call_common)
253 #ifdef CONFIG_PPC_FSL_BOOK3E
254 START_BTB_FLUSH_SECTION
256 END_BTB_FLUSH_SECTION
261 /* Can we avoid saving r3-r8 in common case? */
268 /* Zero r9-r12, this should only be required when restoring all GPRs */
280 * This clears CR0.SO (bit 28), which is the error indication on
281 * return from this system call.
283 rldimi r12,r11,28,(63-28)
288 addi r10,r1,STACK_FRAME_OVERHEAD
289 ld r11,exception_marker@toc(r2)
290 std r11,-16(r10) /* "regshere" marker */
292 #ifdef CONFIG_PPC_BOOK3S
294 stb r11,PACASRR_VALID(r13)
298 * We always enter kernel from userspace with irq soft-mask enabled and
299 * nothing pending. system_call_exception() will call
300 * trace_hardirqs_off().
302 li r11,IRQS_ALL_DISABLED
303 stb r11,PACAIRQSOFTMASK(r13)
304 #ifdef CONFIG_PPC_BOOK3S
305 li r12,-1 /* Set MSR_EE and MSR_RI */
311 /* Calling convention has r9 = orig r0, r10 = regs */
313 bl system_call_exception
316 addi r4,r1,STACK_FRAME_OVERHEAD
318 bl syscall_exit_prepare
319 std r1,PACA_EXIT_SAVE_R1(r13) /* save r1 for restart */
320 #ifdef CONFIG_PPC_BOOK3S
322 lbz r11,PACAIRQHAPPENED(r13)
323 andi. r11,r11,(~PACA_IRQ_HARD_DIS)@l
327 stb r11,PACAIRQSOFTMASK(r13)
329 stb r11,PACAIRQHAPPENED(r13) # clear out possible HARD_DIS
335 #ifdef CONFIG_PPC_BOOK3S
336 lbz r4,PACASRR_VALID(r13)
340 stb r4,PACASRR_VALID(r13)
350 stdcx. r0,0,r1 /* to clear the reservation */
351 END_FTR_SECTION_IFCLR(CPU_FTR_STCX_CHECKS_ADDRESS)
354 bne .Lsyscall_restore_regs
355 /* Zero volatile regs that may contain sensitive kernel data */
368 .Lsyscall_restore_regs_cont:
372 END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR)
375 * We don't need to restore AMR on the way back to userspace for KUAP.
376 * The value of AMR only matters while we're in the kernel.
384 b . /* prevent speculative execution */
386 .Lsyscall_restore_regs:
395 b .Lsyscall_restore_regs_cont
398 #ifdef CONFIG_PPC_BOOK3S
400 _ASM_NOKPROBE_SYMBOL(syscall_restart)
402 ld r1,PACA_EXIT_SAVE_R1(r13)
405 addi r4,r1,STACK_FRAME_OVERHEAD
406 li r11,IRQS_ALL_DISABLED
407 stb r11,PACAIRQSOFTMASK(r13)
408 bl syscall_exit_restart
409 std r1,PACA_EXIT_SAVE_R1(r13) /* save r1 for restart */
410 b .Lsyscall_rst_start
413 SOFT_MASK_TABLE(.Lsyscall_rst_start, 1b)
414 RESTART_TABLE(.Lsyscall_rst_start, .Lsyscall_rst_end, syscall_restart)
418 * If MSR EE/RI was never enabled, IRQs not reconciled, NVGPRs not
419 * touched, no exit work created, then this can be used.
421 .balign IFETCH_ALIGN_BYTES
422 .globl fast_interrupt_return_srr
423 fast_interrupt_return_srr:
424 _ASM_NOKPROBE_SYMBOL(fast_interrupt_return_srr)
425 kuap_check_amr r3, r4
428 #ifdef CONFIG_PPC_BOOK3S
430 kuap_user_restore r3, r4
431 b .Lfast_user_interrupt_return_srr
432 1: kuap_kernel_restore r3, r4
434 li r3,0 /* 0 return value, no EMULATE_STACK_STORE */
435 bne+ .Lfast_kernel_interrupt_return_srr
436 addi r3,r1,STACK_FRAME_OVERHEAD
437 bl unrecoverable_exception
438 b . /* should not get here */
440 bne .Lfast_user_interrupt_return_srr
441 b .Lfast_kernel_interrupt_return_srr
444 .macro interrupt_return_macro srr
445 .balign IFETCH_ALIGN_BYTES
446 .globl interrupt_return_\srr
447 interrupt_return_\srr\():
448 _ASM_NOKPROBE_SYMBOL(interrupt_return_\srr\())
451 beq interrupt_return_\srr\()_kernel
452 interrupt_return_\srr\()_user: /* make backtraces match the _kernel variant */
453 _ASM_NOKPROBE_SYMBOL(interrupt_return_\srr\()_user)
454 addi r3,r1,STACK_FRAME_OVERHEAD
455 bl interrupt_exit_user_prepare
457 bne- .Lrestore_nvgprs_\srr
458 .Lrestore_nvgprs_\srr\()_cont:
459 std r1,PACA_EXIT_SAVE_R1(r13) /* save r1 for restart */
460 #ifdef CONFIG_PPC_BOOK3S
461 .Linterrupt_return_\srr\()_user_rst_start:
462 lbz r11,PACAIRQHAPPENED(r13)
463 andi. r11,r11,(~PACA_IRQ_HARD_DIS)@l
464 bne- interrupt_return_\srr\()_user_restart
467 stb r11,PACAIRQSOFTMASK(r13)
469 stb r11,PACAIRQHAPPENED(r13) # clear out possible HARD_DIS
471 .Lfast_user_interrupt_return_\srr\():
472 #ifdef CONFIG_PPC_BOOK3S
474 lbz r4,PACASRR_VALID(r13)
476 lbz r4,PACAHSRR_VALID(r13)
488 #ifdef CONFIG_PPC_BOOK3S
489 stb r4,PACASRR_VALID(r13)
495 #ifdef CONFIG_PPC_BOOK3S
496 stb r4,PACAHSRR_VALID(r13)
501 #ifdef CONFIG_PPC_IRQ_SOFT_MASK_DEBUG
502 lbz r4,PACAIRQSOFTMASK(r13)
503 tdnei r4,IRQS_ENABLED
509 END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR)
512 stdcx. r0,0,r1 /* to clear the reservation */
515 ALT_FTR_SECTION_END_IFCLR(CPU_FTR_STCX_CHECKS_ADDRESS)
541 b . /* prevent speculative execution */
542 .Linterrupt_return_\srr\()_user_rst_end:
544 .Lrestore_nvgprs_\srr\():
546 b .Lrestore_nvgprs_\srr\()_cont
548 #ifdef CONFIG_PPC_BOOK3S
549 interrupt_return_\srr\()_user_restart:
550 _ASM_NOKPROBE_SYMBOL(interrupt_return_\srr\()_user_restart)
552 ld r1,PACA_EXIT_SAVE_R1(r13)
554 addi r3,r1,STACK_FRAME_OVERHEAD
555 li r11,IRQS_ALL_DISABLED
556 stb r11,PACAIRQSOFTMASK(r13)
557 bl interrupt_exit_user_restart
558 std r1,PACA_EXIT_SAVE_R1(r13) /* save r1 for restart */
559 b .Linterrupt_return_\srr\()_user_rst_start
562 SOFT_MASK_TABLE(.Linterrupt_return_\srr\()_user_rst_start, 1b)
563 RESTART_TABLE(.Linterrupt_return_\srr\()_user_rst_start, .Linterrupt_return_\srr\()_user_rst_end, interrupt_return_\srr\()_user_restart)
566 .balign IFETCH_ALIGN_BYTES
567 interrupt_return_\srr\()_kernel:
568 _ASM_NOKPROBE_SYMBOL(interrupt_return_\srr\()_kernel)
569 addi r3,r1,STACK_FRAME_OVERHEAD
570 bl interrupt_exit_kernel_prepare
572 std r1,PACA_EXIT_SAVE_R1(r13) /* save r1 for restart */
573 .Linterrupt_return_\srr\()_kernel_rst_start:
575 cmpwi r11,IRQS_ENABLED
576 stb r11,PACAIRQSOFTMASK(r13)
578 #ifdef CONFIG_PPC_BOOK3S
579 lbz r11,PACAIRQHAPPENED(r13)
580 andi. r11,r11,(~PACA_IRQ_HARD_DIS)@l
581 bne- interrupt_return_\srr\()_kernel_restart
584 stb r11,PACAIRQHAPPENED(r13) # clear out possible HARD_DIS
587 .Lfast_kernel_interrupt_return_\srr\():
589 #ifdef CONFIG_PPC_BOOK3S
591 lbz r4,PACASRR_VALID(r13)
593 lbz r4,PACAHSRR_VALID(r13)
605 #ifdef CONFIG_PPC_BOOK3S
606 stb r4,PACASRR_VALID(r13)
612 #ifdef CONFIG_PPC_BOOK3S
613 stb r4,PACAHSRR_VALID(r13)
619 stdcx. r0,0,r1 /* to clear the reservation */
622 ALT_FTR_SECTION_END_IFCLR(CPU_FTR_STCX_CHECKS_ADDRESS)
638 * Leaving a stale exception_marker on the stack can confuse
639 * the reliable stack unwinder later on. Clear it.
641 std r0,STACK_FRAME_OVERHEAD-16(r1)
645 bne- cr1,1f /* emulate stack store */
655 b . /* prevent speculative execution */
658 * Emulate stack store with update. New r1 value was already calculated
659 * and updated in our interrupt regs by emulate_loadstore, but we can't
660 * store the previous value of r1 to the stack before re-loading our
661 * registers from it, otherwise they could be clobbered. Use
662 * PACA_EXGEN as temporary storage to hold the store data, as
663 * interrupts are disabled here so it won't be clobbered.
666 std r9,PACA_EXGEN+0(r13)
667 addi r9,r1,INT_FRAME_SIZE /* get original r1 */
671 std r9,0(r1) /* perform store component of stdu */
672 ld r9,PACA_EXGEN+0(r13)
679 b . /* prevent speculative execution */
680 .Linterrupt_return_\srr\()_kernel_rst_end:
682 #ifdef CONFIG_PPC_BOOK3S
683 interrupt_return_\srr\()_kernel_restart:
684 _ASM_NOKPROBE_SYMBOL(interrupt_return_\srr\()_kernel_restart)
686 ld r1,PACA_EXIT_SAVE_R1(r13)
688 addi r3,r1,STACK_FRAME_OVERHEAD
689 li r11,IRQS_ALL_DISABLED
690 stb r11,PACAIRQSOFTMASK(r13)
691 bl interrupt_exit_kernel_restart
692 std r1,PACA_EXIT_SAVE_R1(r13) /* save r1 for restart */
693 b .Linterrupt_return_\srr\()_kernel_rst_start
696 SOFT_MASK_TABLE(.Linterrupt_return_\srr\()_kernel_rst_start, 1b)
697 RESTART_TABLE(.Linterrupt_return_\srr\()_kernel_rst_start, .Linterrupt_return_\srr\()_kernel_rst_end, interrupt_return_\srr\()_kernel_restart)
702 interrupt_return_macro srr
703 #ifdef CONFIG_PPC_BOOK3S
704 interrupt_return_macro hsrr
706 .globl __end_soft_masked
708 DEFINE_FIXED_SYMBOL(__end_soft_masked)
709 #endif /* CONFIG_PPC_BOOK3S */
711 #ifdef CONFIG_PPC_BOOK3S
712 _GLOBAL(ret_from_fork_scv)
715 li r3,0 /* fork() return value */
716 b .Lsyscall_vectored_common_exit
719 _GLOBAL(ret_from_fork)
722 li r3,0 /* fork() return value */
725 _GLOBAL(ret_from_kernel_thread)
730 #ifdef PPC64_ELF_ABI_v2