1 /* SPDX-License-Identifier: GPL-2.0 */
5 #include <asm/ptrace.h> /* for STACK_FRAME_REGS_MARKER */
8 * Exception entry code. This code runs with address translation
9 * turned off, i.e. using physical addresses.
10 * We assume sprg3 has the physical address of the current
11 * task's thread_struct.
13 .macro EXCEPTION_PROLOG handle_dar_dsisr
=0
14 EXCEPTION_PROLOG_0 handle_dar_dsisr
=\handle_dar_dsisr
16 EXCEPTION_PROLOG_2 handle_dar_dsisr
=\handle_dar_dsisr
19 .macro EXCEPTION_PROLOG_0 handle_dar_dsisr
=0
20 mtspr SPRN_SPRG_SCRATCH0
,r10
21 mtspr SPRN_SPRG_SCRATCH1
,r11
22 #ifdef CONFIG_VMAP_STACK
23 mfspr r10
, SPRN_SPRG_THREAD
33 mfspr r11
, SPRN_SRR1
/* check whether user or kernel */
34 #ifdef CONFIG_VMAP_STACK
38 andi
. r11
, r11
, MSR_PR
41 .macro EXCEPTION_PROLOG_1 for_rtas
=0
42 #ifdef CONFIG_VMAP_STACK
44 subi r1
, r1
, INT_FRAME_SIZE
/* use r1 if kernel */
46 mfspr r1
,SPRN_SPRG_THREAD
47 lwz r1
,TASK_STACK
-THREAD(r1
)
48 addi r1
, r1
, THREAD_SIZE
- INT_FRAME_SIZE
50 subi r11
, r1
, INT_FRAME_SIZE
/* use r1 if kernel */
52 mfspr r11
,SPRN_SPRG_THREAD
53 lwz r11
,TASK_STACK
-THREAD(r11
)
54 addi r11
, r11
, THREAD_SIZE
- INT_FRAME_SIZE
57 tophys_novmstack r11
, r11
58 #ifdef CONFIG_VMAP_STACK
60 bt
32 - THREAD_ALIGN_SHIFT
, stack_overflow
64 .macro EXCEPTION_PROLOG_2 handle_dar_dsisr
=0
65 #ifdef CONFIG_VMAP_STACK
67 li r10
, MSR_KERNEL
& ~(MSR_IR
| MSR_RI
) /* can take DTLB miss */
71 stw r10
,_CCR(r11
) /* save registers */
73 mfspr r10
, SPRN_SPRG_SCRATCH0
74 #ifdef CONFIG_VMAP_STACK
81 tovirt(r1
, r11
) /* set new kernel sp */
86 #ifdef CONFIG_VMAP_STACK
90 mfspr r12
,SPRN_SPRG_SCRATCH1
94 #ifdef CONFIG_VMAP_STACK
95 mfspr r12
, SPRN_SPRG_THREAD
104 andi
. r10
, r9
, MSR_PR
111 rlwinm r9
,r9
,0,14,12 /* clear MSR_WE (necessary?) */
113 #ifdef CONFIG_VMAP_STACK
114 li r10
, MSR_KERNEL
& ~MSR_IR
/* can take exceptions */
116 li r10
,MSR_KERNEL
& ~(MSR_IR
|MSR_DR
) /* can take exceptions */
118 mtmsr r10
/* (except for mach check in rtas) */
121 lis r10
,STACK_FRAME_REGS_MARKER@ha
/* exception frame marker */
122 addi r10
,r10
,STACK_FRAME_REGS_MARKER@l
128 .macro SYSCALL_ENTRY trapno
129 mfspr r12
,SPRN_SPRG_THREAD
131 #ifdef CONFIG_VMAP_STACK
135 andi
. r11
, r9
, MSR_PR
136 lwz r11
,TASK_STACK
-THREAD(r12
)
138 addi r11
, r11
, THREAD_SIZE
- INT_FRAME_SIZE
139 #ifdef CONFIG_VMAP_STACK
140 li r10
, MSR_KERNEL
& ~(MSR_IR
| MSR_RI
) /* can take DTLB miss */
144 tovirt_vmstack r12
, r12
145 tophys_novmstack r11
, r11
148 #ifdef CONFIG_VMAP_STACK
155 tovirt_novmstack r1
, r11
/* set new kernel sp */
158 rlwinm r10
,r10
,0,4,2 /* Clear SO bit in CR */
159 stw r10
,_CCR(r11
) /* save registers */
161 rlwinm r9
,r9
,0,14,12 /* clear MSR_WE (necessary?) */
163 #ifdef CONFIG_VMAP_STACK
164 LOAD_REG_IMMEDIATE(r10
, MSR_KERNEL
& ~MSR_IR
) /* can take exceptions */
166 LOAD_REG_IMMEDIATE(r10
, MSR_KERNEL
& ~(MSR_IR
|MSR_DR
)) /* can take exceptions */
168 mtmsr r10
/* (except for mach check in rtas) */
170 lis r10
,STACK_FRAME_REGS_MARKER@ha
/* exception frame marker */
172 addi r10
,r10
,STACK_FRAME_REGS_MARKER@l
180 addi r11
,r1
,STACK_FRAME_OVERHEAD
183 #if defined(CONFIG_40x)
184 /* Check to see if the dbcr0 register is set up to debug. Use the
185 internal debug mode bit to do this. */
186 lwz r12
,THREAD_DBCR0(r12
)
187 andis
. r12
,r12
,DBCR0_IDM@h
189 ACCOUNT_CPU_USER_ENTRY(r2
, r11
, r12
)
190 #if defined(CONFIG_40x)
192 /* From user and task is ptraced - load up global dbcr0 */
193 li r12
,-1 /* clear all pending debug events */
195 lis r11
,global_dbcr0@ha
197 addi r11
,r11
,global_dbcr0@l
206 tovirt_novmstack r2
, r2
/* set r2 to current */
207 lis r11
, transfer_to_syscall@h
208 ori r11
, r11
, transfer_to_syscall@l
209 #ifdef CONFIG_TRACE_IRQFLAGS
211 * If MSR is changing we need to keep interrupts disabled at this point
212 * otherwise we might risk taking an interrupt before we tell lockdep
215 LOAD_REG_IMMEDIATE(r10
, MSR_KERNEL
)
216 rlwimi r10
, r9
, 0, MSR_EE
218 LOAD_REG_IMMEDIATE(r10
, MSR_KERNEL
| MSR_EE
)
220 #if defined(CONFIG_PPC_8xx) && defined(CONFIG_PERF_EVENTS)
225 RFI
/* jump to handler, enable MMU */
226 99: b ret_from_kernel_syscall
229 .macro save_dar_dsisr_on_stack reg1
, reg2
, sp
230 #ifndef CONFIG_VMAP_STACK
231 mfspr
\reg
1, SPRN_DAR
232 mfspr
\reg
2, SPRN_DSISR
234 stw
\reg
2, _DSISR(\sp
)
238 .macro get_and_save_dar_dsisr_on_stack reg1
, reg2
, sp
239 #ifdef CONFIG_VMAP_STACK
241 lwz
\reg
2, _DSISR(\sp
)
243 save_dar_dsisr_on_stack
\reg
1, \reg
2, \sp
247 .macro tovirt_vmstack dst
, src
248 #ifdef CONFIG_VMAP_STACK
257 .macro tovirt_novmstack dst
, src
258 #ifndef CONFIG_VMAP_STACK
267 .macro tophys_novmstack dst
, src
268 #ifndef CONFIG_VMAP_STACK
278 * Note: code which follows this uses cr0.eq (set if from kernel),
279 * r11, r12 (SRR0), and r9 (SRR1).
281 * Note2: once we have set r1 we are in a position to take exceptions
282 * again, and we could thus set MSR:RI at that point.
288 #ifdef CONFIG_PPC_BOOK3S
289 #define START_EXCEPTION(n, label) \
295 #define START_EXCEPTION(n, label) \
301 #define EXCEPTION(n, label, hdlr, xfer) \
302 START_EXCEPTION(n, label) \
304 addi r3,r1,STACK_FRAME_OVERHEAD; \
307 #define EXC_XFER_TEMPLATE(hdlr, trap, msr, tfer, ret) \
309 stw r10,_TRAP(r11); \
310 LOAD_REG_IMMEDIATE(r10, msr); \
315 #define EXC_XFER_STD(n, hdlr) \
316 EXC_XFER_TEMPLATE(hdlr, n, MSR_KERNEL, transfer_to_handler_full, \
317 ret_from_except_full)
319 #define EXC_XFER_LITE(n, hdlr) \
320 EXC_XFER_TEMPLATE(hdlr, n+1, MSR_KERNEL, transfer_to_handler, \
323 .macro vmap_stack_overflow_exception
324 #ifdef CONFIG_VMAP_STACK
326 mfspr r1
, SPRN_SPRG_THREAD
327 lwz r1
, TASK_CPU
- THREAD(r1
)
329 addis r1
, r1
, emergency_ctx@ha
331 lis r1
, emergency_ctx@ha
333 lwz r1
, emergency_ctx@
l(r1
)
336 lis r1
, init_thread_union@ha
337 addi r1
, r1
, init_thread_union@l
338 1: addi r1
, r1
, THREAD_SIZE
- INT_FRAME_SIZE
341 addi r3
, r1
, STACK_FRAME_OVERHEAD
342 EXC_XFER_STD(0, stack_overflow_exception
)
346 #endif /* __HEAD_32_H__ */