]>
Commit | Line | Data |
---|---|---|
b2441318 | 1 | /* SPDX-License-Identifier: GPL-2.0 */ |
e821ea70 | 2 | #include <asm/processor.h> |
14cf11af | 3 | #include <asm/ppc_asm.h> |
b3b8dc6c | 4 | #include <asm/reg.h> |
e821ea70 BH |
5 | #include <asm/asm-offsets.h> |
6 | #include <asm/cputable.h> | |
7 | #include <asm/thread_info.h> | |
8 | #include <asm/page.h> | |
46f52210 | 9 | #include <asm/ptrace.h> |
9445aa1a | 10 | #include <asm/export.h> |
ec0c464c | 11 | #include <asm/asm-compat.h> |
e821ea70 | 12 | |
18461960 PM |
13 | /* |
14 | * Load state from memory into VMX registers including VSCR. | |
15 | * Assumes the caller has enabled VMX in the MSR. | |
16 | */ | |
17 | _GLOBAL(load_vr_state) | |
18 | li r4,VRSTATE_VSCR | |
c2ce6f9f AB |
19 | lvx v0,r4,r3 |
20 | mtvscr v0 | |
18461960 PM |
21 | REST_32VRS(0,r4,r3) |
22 | blr | |
9445aa1a | 23 | EXPORT_SYMBOL(load_vr_state) |
e2b36d59 | 24 | _ASM_NOKPROBE_SYMBOL(load_vr_state); /* used by restore_math */ |
18461960 PM |
25 | |
26 | /* | |
27 | * Store VMX state into memory, including VSCR. | |
28 | * Assumes the caller has enabled VMX in the MSR. | |
29 | */ | |
30 | _GLOBAL(store_vr_state) | |
31 | SAVE_32VRS(0, r4, r3) | |
c2ce6f9f | 32 | mfvscr v0 |
18461960 | 33 | li r4, VRSTATE_VSCR |
c2ce6f9f | 34 | stvx v0, r4, r3 |
18461960 | 35 | blr |
9445aa1a | 36 | EXPORT_SYMBOL(store_vr_state) |
18461960 | 37 | |
e821ea70 | 38 | /* |
e821ea70 BH |
39 | * Disable VMX for the task which had it previously, |
40 | * and save its vector registers in its thread_struct. | |
41 | * Enables the VMX for use in the kernel on return. | |
42 | * On SMP we know the VMX is free, since we give it up every | |
43 | * switch (ie, no lazy save of the vector registers). | |
955c1cab PM |
44 | * |
45 | * Note that on 32-bit this can only use registers that will be | |
46 | * restored by fast_exception_return, i.e. r3 - r6, r10 and r11. | |
e821ea70 BH |
47 | */ |
48 | _GLOBAL(load_up_altivec) | |
49 | mfmsr r5 /* grab the current MSR */ | |
50 | oris r5,r5,MSR_VEC@h | |
51 | MTMSRD(r5) /* enable use of AltiVec now */ | |
52 | isync | |
53 | ||
dd570237 AB |
54 | /* |
55 | * While userspace in general ignores VRSAVE, glibc uses it as a boolean | |
56 | * to optimise userspace context save/restore. Whenever we take an | |
57 | * altivec unavailable exception we must set VRSAVE to something non | |
58 | * zero. Set it to all 1s. See also the programming note in the ISA. | |
e821ea70 BH |
59 | */ |
60 | mfspr r4,SPRN_VRSAVE | |
e090aa80 | 61 | cmpwi 0,r4,0 |
e821ea70 BH |
62 | bne+ 1f |
63 | li r4,-1 | |
64 | mtspr SPRN_VRSAVE,r4 | |
65 | 1: | |
66 | /* enable use of VMX after return */ | |
67 | #ifdef CONFIG_PPC32 | |
ee43eb78 | 68 | mfspr r5,SPRN_SPRG_THREAD /* current task's THREAD (phys) */ |
e821ea70 | 69 | oris r9,r9,MSR_VEC@h |
cd08f109 CL |
70 | #ifdef CONFIG_VMAP_STACK |
71 | tovirt(r5, r5) | |
72 | #endif | |
e821ea70 BH |
73 | #else |
74 | ld r4,PACACURRENT(r13) | |
75 | addi r5,r4,THREAD /* Get THREAD */ | |
76 | oris r12,r12,MSR_VEC@h | |
77 | std r12,_MSR(r1) | |
78 | #endif | |
b2b46304 | 79 | li r4,1 |
70fe3d98 | 80 | stb r4,THREAD_LOAD_VEC(r5) |
955c1cab | 81 | addi r6,r5,THREAD_VRSTATE |
e821ea70 | 82 | li r4,1 |
de79f7b9 | 83 | li r10,VRSTATE_VSCR |
e821ea70 | 84 | stw r4,THREAD_USED_VR(r5) |
c2ce6f9f AB |
85 | lvx v0,r10,r6 |
86 | mtvscr v0 | |
955c1cab | 87 | REST_32VRS(0,r4,r6) |
e821ea70 BH |
88 | /* restore registers and return */ |
89 | blr | |
5f32e836 | 90 | _ASM_NOKPROBE_SYMBOL(load_up_altivec) |
e821ea70 BH |
91 | |
92 | /* | |
6f515d84 CB |
93 | * save_altivec(tsk) |
94 | * Save the vector registers to its thread_struct | |
e821ea70 | 95 | */ |
6f515d84 | 96 | _GLOBAL(save_altivec) |
e821ea70 | 97 | addi r3,r3,THREAD /* want THREAD of task */ |
18461960 | 98 | PPC_LL r7,THREAD_VRSAVEAREA(r3) |
e821ea70 | 99 | PPC_LL r5,PT_REGS(r3) |
18461960 PM |
100 | PPC_LCMPI 0,r7,0 |
101 | bne 2f | |
102 | addi r7,r3,THREAD_VRSTATE | |
6f515d84 | 103 | 2: SAVE_32VRS(0,r4,r7) |
c2ce6f9f | 104 | mfvscr v0 |
de79f7b9 | 105 | li r4,VRSTATE_VSCR |
c2ce6f9f | 106 | stvx v0,r4,r7 |
e821ea70 BH |
107 | blr |
108 | ||
109 | #ifdef CONFIG_VSX | |
110 | ||
111 | #ifdef CONFIG_PPC32 | |
112 | #error This asm code isn't ready for 32-bit kernels | |
113 | #endif | |
114 | ||
115 | /* | |
116 | * load_up_vsx(unused, unused, tsk) | |
117 | * Disable VSX for the task which had it previously, | |
118 | * and save its vector registers in its thread_struct. | |
119 | * Reuse the fp and vsx saves, but first check to see if they have | |
120 | * been saved already. | |
121 | */ | |
122 | _GLOBAL(load_up_vsx) | |
123 | /* Load FP and VSX registers if they haven't been done yet */ | |
124 | andi. r5,r12,MSR_FP | |
125 | beql+ load_up_fpu /* skip if already loaded */ | |
126 | andis. r5,r12,MSR_VEC@h | |
127 | beql+ load_up_altivec /* skip if already loaded */ | |
128 | ||
e821ea70 BH |
129 | ld r4,PACACURRENT(r13) |
130 | addi r4,r4,THREAD /* Get THREAD */ | |
131 | li r6,1 | |
132 | stw r6,THREAD_USED_VSR(r4) /* ... also set thread used vsr */ | |
133 | /* enable use of VSX after return */ | |
134 | oris r12,r12,MSR_VSX@h | |
135 | std r12,_MSR(r1) | |
6cc0c16d | 136 | b fast_interrupt_return |
e821ea70 | 137 | |
e821ea70 BH |
138 | #endif /* CONFIG_VSX */ |
139 | ||
14cf11af PM |
140 | |
141 | /* | |
142 | * The routines below are in assembler so we can closely control the | |
143 | * usage of floating-point registers. These routines must be called | |
144 | * with preempt disabled. | |
145 | */ | |
146 | #ifdef CONFIG_PPC32 | |
147 | .data | |
148 | fpzero: | |
149 | .long 0 | |
150 | fpone: | |
151 | .long 0x3f800000 /* 1.0 in single-precision FP */ | |
152 | fphalf: | |
153 | .long 0x3f000000 /* 0.5 in single-precision FP */ | |
154 | ||
155 | #define LDCONST(fr, name) \ | |
156 | lis r11,name@ha; \ | |
157 | lfs fr,name@l(r11) | |
158 | #else | |
159 | ||
160 | .section ".toc","aw" | |
161 | fpzero: | |
162 | .tc FD_0_0[TC],0 | |
163 | fpone: | |
164 | .tc FD_3ff00000_0[TC],0x3ff0000000000000 /* 1.0 */ | |
165 | fphalf: | |
166 | .tc FD_3fe00000_0[TC],0x3fe0000000000000 /* 0.5 */ | |
167 | ||
168 | #define LDCONST(fr, name) \ | |
169 | lfd fr,name@toc(r2) | |
170 | #endif | |
171 | ||
172 | .text | |
173 | /* | |
174 | * Internal routine to enable floating point and set FPSCR to 0. | |
175 | * Don't call it from C; it doesn't use the normal calling convention. | |
176 | */ | |
177 | fpenable: | |
178 | #ifdef CONFIG_PPC32 | |
179 | stwu r1,-64(r1) | |
180 | #else | |
181 | stdu r1,-64(r1) | |
182 | #endif | |
183 | mfmsr r10 | |
184 | ori r11,r10,MSR_FP | |
185 | mtmsr r11 | |
186 | isync | |
187 | stfd fr0,24(r1) | |
188 | stfd fr1,16(r1) | |
189 | stfd fr31,8(r1) | |
190 | LDCONST(fr1, fpzero) | |
191 | mffs fr31 | |
3a2c48cf | 192 | MTFSF_L(fr1) |
14cf11af PM |
193 | blr |
194 | ||
195 | fpdisable: | |
196 | mtlr r12 | |
3a2c48cf | 197 | MTFSF_L(fr31) |
14cf11af PM |
198 | lfd fr31,8(r1) |
199 | lfd fr1,16(r1) | |
200 | lfd fr0,24(r1) | |
201 | mtmsr r10 | |
202 | isync | |
203 | addi r1,r1,64 | |
204 | blr | |
205 | ||
206 | /* | |
207 | * Vector add, floating point. | |
208 | */ | |
209 | _GLOBAL(vaddfp) | |
210 | mflr r12 | |
211 | bl fpenable | |
212 | li r0,4 | |
213 | mtctr r0 | |
214 | li r6,0 | |
215 | 1: lfsx fr0,r4,r6 | |
216 | lfsx fr1,r5,r6 | |
217 | fadds fr0,fr0,fr1 | |
218 | stfsx fr0,r3,r6 | |
219 | addi r6,r6,4 | |
220 | bdnz 1b | |
221 | b fpdisable | |
222 | ||
223 | /* | |
224 | * Vector subtract, floating point. | |
225 | */ | |
226 | _GLOBAL(vsubfp) | |
227 | mflr r12 | |
228 | bl fpenable | |
229 | li r0,4 | |
230 | mtctr r0 | |
231 | li r6,0 | |
232 | 1: lfsx fr0,r4,r6 | |
233 | lfsx fr1,r5,r6 | |
234 | fsubs fr0,fr0,fr1 | |
235 | stfsx fr0,r3,r6 | |
236 | addi r6,r6,4 | |
237 | bdnz 1b | |
238 | b fpdisable | |
239 | ||
240 | /* | |
241 | * Vector multiply and add, floating point. | |
242 | */ | |
243 | _GLOBAL(vmaddfp) | |
244 | mflr r12 | |
245 | bl fpenable | |
246 | stfd fr2,32(r1) | |
247 | li r0,4 | |
248 | mtctr r0 | |
249 | li r7,0 | |
250 | 1: lfsx fr0,r4,r7 | |
251 | lfsx fr1,r5,r7 | |
252 | lfsx fr2,r6,r7 | |
253 | fmadds fr0,fr0,fr2,fr1 | |
254 | stfsx fr0,r3,r7 | |
255 | addi r7,r7,4 | |
256 | bdnz 1b | |
257 | lfd fr2,32(r1) | |
258 | b fpdisable | |
259 | ||
260 | /* | |
261 | * Vector negative multiply and subtract, floating point. | |
262 | */ | |
263 | _GLOBAL(vnmsubfp) | |
264 | mflr r12 | |
265 | bl fpenable | |
266 | stfd fr2,32(r1) | |
267 | li r0,4 | |
268 | mtctr r0 | |
269 | li r7,0 | |
270 | 1: lfsx fr0,r4,r7 | |
271 | lfsx fr1,r5,r7 | |
272 | lfsx fr2,r6,r7 | |
273 | fnmsubs fr0,fr0,fr2,fr1 | |
274 | stfsx fr0,r3,r7 | |
275 | addi r7,r7,4 | |
276 | bdnz 1b | |
277 | lfd fr2,32(r1) | |
278 | b fpdisable | |
279 | ||
280 | /* | |
281 | * Vector reciprocal estimate. We just compute 1.0/x. | |
282 | * r3 -> destination, r4 -> source. | |
283 | */ | |
284 | _GLOBAL(vrefp) | |
285 | mflr r12 | |
286 | bl fpenable | |
287 | li r0,4 | |
288 | LDCONST(fr1, fpone) | |
289 | mtctr r0 | |
290 | li r6,0 | |
291 | 1: lfsx fr0,r4,r6 | |
292 | fdivs fr0,fr1,fr0 | |
293 | stfsx fr0,r3,r6 | |
294 | addi r6,r6,4 | |
295 | bdnz 1b | |
296 | b fpdisable | |
297 | ||
298 | /* | |
299 | * Vector reciprocal square-root estimate, floating point. | |
300 | * We use the frsqrte instruction for the initial estimate followed | |
301 | * by 2 iterations of Newton-Raphson to get sufficient accuracy. | |
302 | * r3 -> destination, r4 -> source. | |
303 | */ | |
304 | _GLOBAL(vrsqrtefp) | |
305 | mflr r12 | |
306 | bl fpenable | |
307 | stfd fr2,32(r1) | |
308 | stfd fr3,40(r1) | |
309 | stfd fr4,48(r1) | |
310 | stfd fr5,56(r1) | |
311 | li r0,4 | |
312 | LDCONST(fr4, fpone) | |
313 | LDCONST(fr5, fphalf) | |
314 | mtctr r0 | |
315 | li r6,0 | |
316 | 1: lfsx fr0,r4,r6 | |
317 | frsqrte fr1,fr0 /* r = frsqrte(s) */ | |
318 | fmuls fr3,fr1,fr0 /* r * s */ | |
319 | fmuls fr2,fr1,fr5 /* r * 0.5 */ | |
320 | fnmsubs fr3,fr1,fr3,fr4 /* 1 - s * r * r */ | |
321 | fmadds fr1,fr2,fr3,fr1 /* r = r + 0.5 * r * (1 - s * r * r) */ | |
322 | fmuls fr3,fr1,fr0 /* r * s */ | |
323 | fmuls fr2,fr1,fr5 /* r * 0.5 */ | |
324 | fnmsubs fr3,fr1,fr3,fr4 /* 1 - s * r * r */ | |
325 | fmadds fr1,fr2,fr3,fr1 /* r = r + 0.5 * r * (1 - s * r * r) */ | |
326 | stfsx fr1,r3,r6 | |
327 | addi r6,r6,4 | |
328 | bdnz 1b | |
329 | lfd fr5,56(r1) | |
330 | lfd fr4,48(r1) | |
331 | lfd fr3,40(r1) | |
332 | lfd fr2,32(r1) | |
333 | b fpdisable |