]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blame - arch/powerpc/kernel/fpu.S
powerpc: add real mode support for dma operations on powernv
[mirror_ubuntu-bionic-kernel.git] / arch / powerpc / kernel / fpu.S
CommitLineData
14cf11af
PM
1/*
2 * FPU support code, moved here from head.S so that it can be used
3 * by chips which use other head-whatever.S files.
4 *
fea23bfe
PM
5 * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
6 * Copyright (C) 1996 Cort Dougan <cort@cs.nmt.edu>
7 * Copyright (C) 1996 Paul Mackerras.
8 * Copyright (C) 1997 Dan Malek (dmalek@jlc.net).
9 *
14cf11af
PM
10 * This program is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU General Public License
12 * as published by the Free Software Foundation; either version
13 * 2 of the License, or (at your option) any later version.
14 *
15 */
16
b3b8dc6c 17#include <asm/reg.h>
14cf11af
PM
18#include <asm/page.h>
19#include <asm/mmu.h>
20#include <asm/pgtable.h>
21#include <asm/cputable.h>
22#include <asm/cache.h>
23#include <asm/thread_info.h>
24#include <asm/ppc_asm.h>
25#include <asm/asm-offsets.h>
46f52210 26#include <asm/ptrace.h>
14cf11af 27
72ffff5b 28#ifdef CONFIG_VSX
0b7673c3 29#define __REST_32FPVSRS(n,c,base) \
72ffff5b
MN
30BEGIN_FTR_SECTION \
31 b 2f; \
32END_FTR_SECTION_IFSET(CPU_FTR_VSX); \
33 REST_32FPRS(n,base); \
34 b 3f; \
352: REST_32VSRS(n,c,base); \
363:
37
8b3c34cf
MN
38#define __REST_32FPVSRS_TRANSACT(n,c,base) \
39BEGIN_FTR_SECTION \
40 b 2f; \
41END_FTR_SECTION_IFSET(CPU_FTR_VSX); \
42 REST_32FPRS_TRANSACT(n,base); \
43 b 3f; \
442: REST_32VSRS_TRANSACT(n,c,base); \
453:
46
0b7673c3 47#define __SAVE_32FPVSRS(n,c,base) \
72ffff5b
MN
48BEGIN_FTR_SECTION \
49 b 2f; \
50END_FTR_SECTION_IFSET(CPU_FTR_VSX); \
51 SAVE_32FPRS(n,base); \
52 b 3f; \
532: SAVE_32VSRS(n,c,base); \
543:
55#else
0b7673c3 56#define __REST_32FPVSRS(n,b,base) REST_32FPRS(n, base)
8b3c34cf 57#define __REST_32FPVSRS_TRANSACT(n,b,base) REST_32FPRS(n, base)
0b7673c3 58#define __SAVE_32FPVSRS(n,b,base) SAVE_32FPRS(n, base)
72ffff5b 59#endif
0b7673c3 60#define REST_32FPVSRS(n,c,base) __REST_32FPVSRS(n,__REG_##c,__REG_##base)
8b3c34cf
MN
61#define REST_32FPVSRS_TRANSACT(n,c,base) \
62 __REST_32FPVSRS_TRANSACT(n,__REG_##c,__REG_##base)
0b7673c3 63#define SAVE_32FPVSRS(n,c,base) __SAVE_32FPVSRS(n,__REG_##c,__REG_##base)
72ffff5b 64
a2dcbb32
MN
65#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
66/*
67 * Wrapper to call load_up_fpu from C.
68 * void do_load_up_fpu(struct pt_regs *regs);
69 */
70_GLOBAL(do_load_up_fpu)
71 mflr r0
72 std r0, 16(r1)
73 stdu r1, -112(r1)
74
75 subi r6, r3, STACK_FRAME_OVERHEAD
76 /* load_up_fpu expects r12=MSR, r13=PACA, and returns
77 * with r12 = new MSR.
78 */
79 ld r12,_MSR(r6)
80 GET_PACA(r13)
81
82 bl load_up_fpu
83 std r12,_MSR(r6)
84
85 ld r0, 112+16(r1)
86 addi r1, r1, 112
87 mtlr r0
88 blr
89
90
91/* void do_load_up_transact_fpu(struct thread_struct *thread)
92 *
93 * This is similar to load_up_fpu but for the transactional version of the FP
94 * register set. It doesn't mess with the task MSR or valid flags.
95 * Furthermore, we don't do lazy FP with TM currently.
96 */
97_GLOBAL(do_load_up_transact_fpu)
98 mfmsr r6
99 ori r5,r6,MSR_FP
100#ifdef CONFIG_VSX
101BEGIN_FTR_SECTION
102 oris r5,r5,MSR_VSX@h
103END_FTR_SECTION_IFSET(CPU_FTR_VSX)
104#endif
105 SYNC
106 MTMSRD(r5)
107
108 lfd fr0,THREAD_TRANSACT_FPSCR(r3)
109 MTFSF_L(fr0)
110 REST_32FPVSRS_TRANSACT(0, R4, R3)
111
112 /* FP/VSX off again */
113 MTMSRD(r6)
114 SYNC
115
116 blr
117#endif /* CONFIG_PPC_TRANSACTIONAL_MEM */
118
14cf11af
PM
119/*
120 * This task wants to use the FPU now.
121 * On UP, disable FP for the task which had the FPU previously,
122 * and save its floating-point registers in its thread_struct.
123 * Load up this task's FP registers from its thread_struct,
124 * enable the FPU for the current task and return to the task.
125 */
b85a046a 126_GLOBAL(load_up_fpu)
14cf11af
PM
127 mfmsr r5
128 ori r5,r5,MSR_FP
ce48b210
MN
129#ifdef CONFIG_VSX
130BEGIN_FTR_SECTION
131 oris r5,r5,MSR_VSX@h
132END_FTR_SECTION_IFSET(CPU_FTR_VSX)
133#endif
14cf11af
PM
134 SYNC
135 MTMSRD(r5) /* enable use of fpu now */
136 isync
137/*
138 * For SMP, we don't do lazy FPU switching because it just gets too
139 * horrendously complex, especially when a task switches from one CPU
140 * to another. Instead we call giveup_fpu in switch_to.
141 */
142#ifndef CONFIG_SMP
e58c3495 143 LOAD_REG_ADDRBASE(r3, last_task_used_math)
6316222e 144 toreal(r3)
e58c3495 145 PPC_LL r4,ADDROFF(last_task_used_math)(r3)
3ddfbcf1 146 PPC_LCMPI 0,r4,0
14cf11af 147 beq 1f
6316222e 148 toreal(r4)
14cf11af 149 addi r4,r4,THREAD /* want last_task_used_math->thread */
0b7673c3 150 SAVE_32FPVSRS(0, R5, R4)
14cf11af 151 mffs fr0
25c8a78b 152 stfd fr0,THREAD_FPSCR(r4)
3ddfbcf1 153 PPC_LL r5,PT_REGS(r4)
6316222e 154 toreal(r5)
3ddfbcf1 155 PPC_LL r4,_MSR-STACK_FRAME_OVERHEAD(r5)
14cf11af
PM
156 li r10,MSR_FP|MSR_FE0|MSR_FE1
157 andc r4,r4,r10 /* disable FP for previous task */
3ddfbcf1 158 PPC_STL r4,_MSR-STACK_FRAME_OVERHEAD(r5)
14cf11af
PM
1591:
160#endif /* CONFIG_SMP */
161 /* enable use of FP after return */
b85a046a 162#ifdef CONFIG_PPC32
ee43eb78 163 mfspr r5,SPRN_SPRG_THREAD /* current task's THREAD (phys) */
14cf11af
PM
164 lwz r4,THREAD_FPEXC_MODE(r5)
165 ori r9,r9,MSR_FP /* enable FP for current */
166 or r9,r9,r4
b85a046a
PM
167#else
168 ld r4,PACACURRENT(r13)
169 addi r5,r4,THREAD /* Get THREAD */
e2f5a3c1 170 lwz r4,THREAD_FPEXC_MODE(r5)
b85a046a
PM
171 ori r12,r12,MSR_FP
172 or r12,r12,r4
173 std r12,_MSR(r1)
174#endif
25c8a78b 175 lfd fr0,THREAD_FPSCR(r5)
3a2c48cf 176 MTFSF_L(fr0)
c75df6f9 177 REST_32FPVSRS(0, R4, R5)
14cf11af
PM
178#ifndef CONFIG_SMP
179 subi r4,r5,THREAD
6316222e 180 fromreal(r4)
e58c3495 181 PPC_STL r4,ADDROFF(last_task_used_math)(r3)
14cf11af
PM
182#endif /* CONFIG_SMP */
183 /* restore registers and return */
184 /* we haven't used ctr or xer or lr */
6f3d8e69 185 blr
14cf11af 186
14cf11af
PM
187/*
188 * giveup_fpu(tsk)
189 * Disable FP for the task given as the argument,
190 * and save the floating-point registers in its thread_struct.
191 * Enables the FPU for use in the kernel on return.
192 */
b85a046a 193_GLOBAL(giveup_fpu)
14cf11af
PM
194 mfmsr r5
195 ori r5,r5,MSR_FP
ce48b210
MN
196#ifdef CONFIG_VSX
197BEGIN_FTR_SECTION
198 oris r5,r5,MSR_VSX@h
199END_FTR_SECTION_IFSET(CPU_FTR_VSX)
200#endif
14cf11af
PM
201 SYNC_601
202 ISYNC_601
203 MTMSRD(r5) /* enable use of fpu now */
204 SYNC_601
205 isync
3ddfbcf1 206 PPC_LCMPI 0,r3,0
14cf11af
PM
207 beqlr- /* if no previous owner, done */
208 addi r3,r3,THREAD /* want THREAD of task */
3ddfbcf1
DG
209 PPC_LL r5,PT_REGS(r3)
210 PPC_LCMPI 0,r5,0
c75df6f9 211 SAVE_32FPVSRS(0, R4 ,R3)
14cf11af 212 mffs fr0
25c8a78b 213 stfd fr0,THREAD_FPSCR(r3)
14cf11af 214 beq 1f
3ddfbcf1 215 PPC_LL r4,_MSR-STACK_FRAME_OVERHEAD(r5)
14cf11af 216 li r3,MSR_FP|MSR_FE0|MSR_FE1
7e875e9d
MN
217#ifdef CONFIG_VSX
218BEGIN_FTR_SECTION
219 oris r3,r3,MSR_VSX@h
220END_FTR_SECTION_IFSET(CPU_FTR_VSX)
221#endif
14cf11af 222 andc r4,r4,r3 /* disable FP for previous task */
3ddfbcf1 223 PPC_STL r4,_MSR-STACK_FRAME_OVERHEAD(r5)
14cf11af
PM
2241:
225#ifndef CONFIG_SMP
226 li r5,0
e58c3495
DG
227 LOAD_REG_ADDRBASE(r4,last_task_used_math)
228 PPC_STL r5,ADDROFF(last_task_used_math)(r4)
14cf11af
PM
229#endif /* CONFIG_SMP */
230 blr
25c8a78b
DG
231
232/*
233 * These are used in the alignment trap handler when emulating
234 * single-precision loads and stores.
25c8a78b
DG
235 */
236
237_GLOBAL(cvt_fd)
25c8a78b
DG
238 lfs 0,0(r3)
239 stfd 0,0(r4)
25c8a78b
DG
240 blr
241
242_GLOBAL(cvt_df)
25c8a78b
DG
243 lfd 0,0(r3)
244 stfs 0,0(r4)
25c8a78b 245 blr