]> git.proxmox.com Git - mirror_ubuntu-jammy-kernel.git/blame - arch/powerpc/kernel/head_32.h
Merge tag 'dma-mapping-5.11' of git://git.infradead.org/users/hch/dma-mapping
[mirror_ubuntu-jammy-kernel.git] / arch / powerpc / kernel / head_32.h
CommitLineData
8a23fdec
CL
1/* SPDX-License-Identifier: GPL-2.0 */
2#ifndef __HEAD_32_H__
3#define __HEAD_32_H__
4
5#include <asm/ptrace.h> /* for STACK_FRAME_REGS_MARKER */
6
7/*
8 * Exception entry code. This code runs with address translation
9 * turned off, i.e. using physical addresses.
10 * We assume sprg3 has the physical address of the current
11 * task's thread_struct.
12 */
02847487
CL
13.macro EXCEPTION_PROLOG handle_dar_dsisr=0
14 EXCEPTION_PROLOG_0 handle_dar_dsisr=\handle_dar_dsisr
1f1c4d01 15 EXCEPTION_PROLOG_1
02847487 16 EXCEPTION_PROLOG_2 handle_dar_dsisr=\handle_dar_dsisr
1f1c4d01
CL
17.endm
18
02847487 19.macro EXCEPTION_PROLOG_0 handle_dar_dsisr=0
8a23fdec
CL
20 mtspr SPRN_SPRG_SCRATCH0,r10
21 mtspr SPRN_SPRG_SCRATCH1,r11
02847487
CL
22#ifdef CONFIG_VMAP_STACK
23 mfspr r10, SPRN_SPRG_THREAD
24 .if \handle_dar_dsisr
25 mfspr r11, SPRN_DAR
26 stw r11, DAR(r10)
27 mfspr r11, SPRN_DSISR
28 stw r11, DSISR(r10)
29 .endif
30 mfspr r11, SPRN_SRR0
31 stw r11, SRR0(r10)
32#endif
5ae8fabc 33 mfspr r11, SPRN_SRR1 /* check whether user or kernel */
02847487
CL
34#ifdef CONFIG_VMAP_STACK
35 stw r11, SRR1(r10)
36#endif
8a23fdec 37 mfcr r10
5ae8fabc 38 andi. r11, r11, MSR_PR
8a23fdec
CL
39.endm
40
cd08f109 41.macro EXCEPTION_PROLOG_1 for_rtas=0
da7bb43a 42#ifdef CONFIG_VMAP_STACK
d2e00603 43 mtspr SPRN_SPRG_SCRATCH2,r1
da7bb43a
CL
44 subi r1, r1, INT_FRAME_SIZE /* use r1 if kernel */
45 beq 1f
46 mfspr r1,SPRN_SPRG_THREAD
47 lwz r1,TASK_STACK-THREAD(r1)
48 addi r1, r1, THREAD_SIZE - INT_FRAME_SIZE
6285f9cf
CL
491:
50 mtcrf 0x7f, r1
51 bt 32 - THREAD_ALIGN_SHIFT, stack_overflow
da7bb43a 52#else
02847487 53 subi r11, r1, INT_FRAME_SIZE /* use r1 if kernel */
8a23fdec
CL
54 beq 1f
55 mfspr r11,SPRN_SPRG_THREAD
56 lwz r11,TASK_STACK-THREAD(r11)
02847487 57 addi r11, r11, THREAD_SIZE - INT_FRAME_SIZE
6285f9cf 581: tophys(r11, r11)
3978eb78 59#endif
8a23fdec
CL
60.endm
61
02847487 62.macro EXCEPTION_PROLOG_2 handle_dar_dsisr=0
c118c730 63#ifdef CONFIG_VMAP_STACK
d2e00603
CL
64 li r11, MSR_KERNEL & ~(MSR_IR | MSR_RI) /* can take DTLB miss */
65 mtmsr r11
c118c730 66 isync
d2e00603 67 mfspr r11, SPRN_SPRG_SCRATCH2
da7bb43a
CL
68 stw r11,GPR1(r1)
69 stw r11,0(r1)
70 mr r11, r1
71#else
72 stw r1,GPR1(r11)
73 stw r1,0(r11)
74 tovirt(r1, r11) /* set new kernel sp */
75#endif
d2e00603 76 stw r10,_CCR(r11) /* save registers */
8a23fdec
CL
77 stw r12,GPR12(r11)
78 stw r9,GPR9(r11)
d2e00603 79 mfspr r10,SPRN_SPRG_SCRATCH0
8a23fdec 80 mfspr r12,SPRN_SPRG_SCRATCH1
d2e00603 81 stw r10,GPR10(r11)
8a23fdec
CL
82 stw r12,GPR11(r11)
83 mflr r10
84 stw r10,_LINK(r11)
02847487
CL
85#ifdef CONFIG_VMAP_STACK
86 mfspr r12, SPRN_SPRG_THREAD
87 tovirt(r12, r12)
88 .if \handle_dar_dsisr
89 lwz r10, DAR(r12)
90 stw r10, _DAR(r11)
91 lwz r10, DSISR(r12)
92 stw r10, _DSISR(r11)
93 .endif
94 lwz r9, SRR1(r12)
95 lwz r12, SRR0(r12)
96#else
8a23fdec
CL
97 mfspr r12,SPRN_SRR0
98 mfspr r9,SPRN_SRR1
02847487 99#endif
90f204b9
CL
100#ifdef CONFIG_40x
101 rlwinm r9,r9,0,14,12 /* clear MSR_WE (necessary?) */
02847487
CL
102#else
103#ifdef CONFIG_VMAP_STACK
104 li r10, MSR_KERNEL & ~MSR_IR /* can take exceptions */
90f204b9 105#else
8a23fdec 106 li r10,MSR_KERNEL & ~(MSR_IR|MSR_DR) /* can take exceptions */
02847487 107#endif
39bccfd1 108 mtmsr r10 /* (except for mach check in rtas) */
90f204b9 109#endif
8a23fdec
CL
110 stw r0,GPR0(r11)
111 lis r10,STACK_FRAME_REGS_MARKER@ha /* exception frame marker */
112 addi r10,r10,STACK_FRAME_REGS_MARKER@l
113 stw r10,8(r11)
114 SAVE_4GPRS(3, r11)
115 SAVE_2GPRS(7, r11)
116.endm
117
b86fb888
CL
118.macro SYSCALL_ENTRY trapno
119 mfspr r12,SPRN_SPRG_THREAD
9e270862 120 mfspr r9, SPRN_SRR1
02847487 121#ifdef CONFIG_VMAP_STACK
9e270862 122 mfspr r11, SPRN_SRR0
c06f0aff 123 mtctr r11
02847487 124#endif
9e270862 125 andi. r11, r9, MSR_PR
b86fb888 126 lwz r11,TASK_STACK-THREAD(r12)
9e270862 127 beq- 99f
02847487
CL
128 addi r11, r11, THREAD_SIZE - INT_FRAME_SIZE
129#ifdef CONFIG_VMAP_STACK
c06f0aff
CL
130 li r10, MSR_KERNEL & ~(MSR_IR | MSR_RI) /* can take DTLB miss */
131 mtmsr r10
02847487
CL
132 isync
133#endif
134 tovirt_vmstack r12, r12
135 tophys_novmstack r11, r11
9e270862
CL
136 mflr r10
137 stw r10, _LINK(r11)
02847487 138#ifdef CONFIG_VMAP_STACK
c06f0aff 139 mfctr r10
02847487 140#else
b86fb888 141 mfspr r10,SPRN_SRR0
02847487 142#endif
b86fb888
CL
143 stw r1,GPR1(r11)
144 stw r1,0(r11)
02847487 145 tovirt_novmstack r1, r11 /* set new kernel sp */
b86fb888 146 stw r10,_NIP(r11)
c06f0aff
CL
147 mfcr r10
148 rlwinm r10,r10,0,4,2 /* Clear SO bit in CR */
149 stw r10,_CCR(r11) /* save registers */
b86fb888
CL
150#ifdef CONFIG_40x
151 rlwinm r9,r9,0,14,12 /* clear MSR_WE (necessary?) */
02847487
CL
152#else
153#ifdef CONFIG_VMAP_STACK
154 LOAD_REG_IMMEDIATE(r10, MSR_KERNEL & ~MSR_IR) /* can take exceptions */
b86fb888 155#else
ba18025f 156 LOAD_REG_IMMEDIATE(r10, MSR_KERNEL & ~(MSR_IR|MSR_DR)) /* can take exceptions */
02847487 157#endif
39bccfd1 158 mtmsr r10 /* (except for mach check in rtas) */
b86fb888
CL
159#endif
160 lis r10,STACK_FRAME_REGS_MARKER@ha /* exception frame marker */
161 stw r2,GPR2(r11)
162 addi r10,r10,STACK_FRAME_REGS_MARKER@l
163 stw r9,_MSR(r11)
164 li r2, \trapno + 1
165 stw r10,8(r11)
166 stw r2,_TRAP(r11)
167 SAVE_GPR(0, r11)
168 SAVE_4GPRS(3, r11)
169 SAVE_2GPRS(7, r11)
170 addi r11,r1,STACK_FRAME_OVERHEAD
171 addi r2,r12,-THREAD
172 stw r11,PT_REGS(r12)
173#if defined(CONFIG_40x)
174 /* Check to see if the dbcr0 register is set up to debug. Use the
175 internal debug mode bit to do this. */
176 lwz r12,THREAD_DBCR0(r12)
177 andis. r12,r12,DBCR0_IDM@h
178#endif
179 ACCOUNT_CPU_USER_ENTRY(r2, r11, r12)
180#if defined(CONFIG_40x)
181 beq+ 3f
182 /* From user and task is ptraced - load up global dbcr0 */
183 li r12,-1 /* clear all pending debug events */
184 mtspr SPRN_DBSR,r12
185 lis r11,global_dbcr0@ha
186 tophys(r11,r11)
187 addi r11,r11,global_dbcr0@l
188 lwz r12,0(r11)
189 mtspr SPRN_DBCR0,r12
190 lwz r12,4(r11)
191 addi r12,r12,-1
192 stw r12,4(r11)
193#endif
194
1953:
02847487 196 tovirt_novmstack r2, r2 /* set r2 to current */
b86fb888
CL
197 lis r11, transfer_to_syscall@h
198 ori r11, r11, transfer_to_syscall@l
199#ifdef CONFIG_TRACE_IRQFLAGS
200 /*
201 * If MSR is changing we need to keep interrupts disabled at this point
202 * otherwise we might risk taking an interrupt before we tell lockdep
203 * they are enabled.
204 */
ba18025f 205 LOAD_REG_IMMEDIATE(r10, MSR_KERNEL)
b86fb888
CL
206 rlwimi r10, r9, 0, MSR_EE
207#else
ba18025f 208 LOAD_REG_IMMEDIATE(r10, MSR_KERNEL | MSR_EE)
b86fb888
CL
209#endif
210#if defined(CONFIG_PPC_8xx) && defined(CONFIG_PERF_EVENTS)
211 mtspr SPRN_NRI, r0
212#endif
213 mtspr SPRN_SRR1,r10
214 mtspr SPRN_SRR0,r11
62182e6c
CL
215 rfi /* jump to handler, enable MMU */
216#ifdef CONFIG_40x
217 b . /* Prevent prefetch past rfi */
218#endif
9e270862 21999: b ret_from_kernel_syscall
b86fb888
CL
220.endm
221
c9c84fd9 222.macro save_dar_dsisr_on_stack reg1, reg2, sp
02847487 223#ifndef CONFIG_VMAP_STACK
c9c84fd9
CL
224 mfspr \reg1, SPRN_DAR
225 mfspr \reg2, SPRN_DSISR
226 stw \reg1, _DAR(\sp)
227 stw \reg2, _DSISR(\sp)
02847487 228#endif
c9c84fd9
CL
229.endm
230
231.macro get_and_save_dar_dsisr_on_stack reg1, reg2, sp
02847487
CL
232#ifdef CONFIG_VMAP_STACK
233 lwz \reg1, _DAR(\sp)
234 lwz \reg2, _DSISR(\sp)
235#else
c9c84fd9 236 save_dar_dsisr_on_stack \reg1, \reg2, \sp
02847487
CL
237#endif
238.endm
239
240.macro tovirt_vmstack dst, src
241#ifdef CONFIG_VMAP_STACK
242 tovirt(\dst, \src)
243#else
244 .ifnc \dst, \src
245 mr \dst, \src
246 .endif
247#endif
248.endm
249
250.macro tovirt_novmstack dst, src
251#ifndef CONFIG_VMAP_STACK
252 tovirt(\dst, \src)
253#else
254 .ifnc \dst, \src
255 mr \dst, \src
256 .endif
257#endif
258.endm
259
260.macro tophys_novmstack dst, src
261#ifndef CONFIG_VMAP_STACK
262 tophys(\dst, \src)
263#else
264 .ifnc \dst, \src
265 mr \dst, \src
266 .endif
267#endif
c9c84fd9
CL
268.endm
269
8a23fdec
CL
270/*
271 * Note: code which follows this uses cr0.eq (set if from kernel),
272 * r11, r12 (SRR0), and r9 (SRR1).
273 *
274 * Note2: once we have set r1 we are in a position to take exceptions
275 * again, and we could thus set MSR:RI at that point.
276 */
277
278/*
279 * Exception vectors.
280 */
281#ifdef CONFIG_PPC_BOOK3S
282#define START_EXCEPTION(n, label) \
283 . = n; \
284 DO_KVM n; \
285label:
286
287#else
288#define START_EXCEPTION(n, label) \
289 . = n; \
290label:
291
292#endif
293
294#define EXCEPTION(n, label, hdlr, xfer) \
295 START_EXCEPTION(n, label) \
296 EXCEPTION_PROLOG; \
297 addi r3,r1,STACK_FRAME_OVERHEAD; \
298 xfer(n, hdlr)
299
1ae99b4b 300#define EXC_XFER_TEMPLATE(hdlr, trap, msr, tfer, ret) \
8a23fdec
CL
301 li r10,trap; \
302 stw r10,_TRAP(r11); \
ba18025f 303 LOAD_REG_IMMEDIATE(r10, msr); \
8a23fdec 304 bl tfer; \
8a23fdec
CL
305 .long hdlr; \
306 .long ret
307
8a23fdec 308#define EXC_XFER_STD(n, hdlr) \
1ae99b4b 309 EXC_XFER_TEMPLATE(hdlr, n, MSR_KERNEL, transfer_to_handler_full, \
8a23fdec
CL
310 ret_from_except_full)
311
312#define EXC_XFER_LITE(n, hdlr) \
1ae99b4b 313 EXC_XFER_TEMPLATE(hdlr, n+1, MSR_KERNEL, transfer_to_handler, \
8a23fdec
CL
314 ret_from_except)
315
3978eb78
CL
316.macro vmap_stack_overflow_exception
317#ifdef CONFIG_VMAP_STACK
318#ifdef CONFIG_SMP
da7bb43a
CL
319 mfspr r1, SPRN_SPRG_THREAD
320 lwz r1, TASK_CPU - THREAD(r1)
321 slwi r1, r1, 3
322 addis r1, r1, emergency_ctx@ha
3978eb78 323#else
da7bb43a 324 lis r1, emergency_ctx@ha
3978eb78 325#endif
da7bb43a
CL
326 lwz r1, emergency_ctx@l(r1)
327 cmpwi cr1, r1, 0
3978eb78 328 bne cr1, 1f
da7bb43a
CL
329 lis r1, init_thread_union@ha
330 addi r1, r1, init_thread_union@l
3311: addi r1, r1, THREAD_SIZE - INT_FRAME_SIZE
3978eb78
CL
332 EXCEPTION_PROLOG_2
333 SAVE_NVGPRS(r11)
334 addi r3, r1, STACK_FRAME_OVERHEAD
335 EXC_XFER_STD(0, stack_overflow_exception)
336#endif
337.endm
338
8a23fdec 339#endif /* __HEAD_32_H__ */