]> git.proxmox.com Git - mirror_edk2.git/blame - MdePkg/Library/BaseLib/Arm/Math64.S
MdePkg: fix ARM version of InternalMathSwapBytes64 ()
[mirror_edk2.git] / MdePkg / Library / BaseLib / Arm / Math64.S
CommitLineData
8db92ab5
HT
1#------------------------------------------------------------------------------ \r
2#\r
3# Replacement for Math64.c that is coded to use older GCC intrinsics. \r
4# Doing this reduces the number of intrinsics that are required when\r
5# you port to a new version of gcc. \r
6#\r
7# Need to split this into multple files to size optimize the image.\r
8#\r
9# Copyright (c) 2009 - 2010, Apple Inc. All rights reserved.<BR>\r
10# This program and the accompanying materials\r
11# are licensed and made available under the terms and conditions of the BSD License\r
12# which accompanies this distribution. The full text of the license may be found at\r
13# http://opensource.org/licenses/bsd-license.php.\r
14#\r
15# THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS,\r
16# WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED.\r
17#\r
18#------------------------------------------------------------------------------\r
19\r
20 .text\r
21 .align 2\r
22 GCC_ASM_EXPORT(InternalMathLShiftU64)\r
23\r
24ASM_PFX(InternalMathLShiftU64):\r
25 stmfd sp!, {r4, r5, r6}\r
26 mov r6, r1\r
27 rsb ip, r2, #32\r
28 mov r4, r6, asl r2\r
29 subs r1, r2, #32\r
30 orr r4, r4, r0, lsr ip\r
31 mov r3, r0, asl r2\r
32 movpl r4, r0, asl r1\r
33 mov r5, r0\r
34 mov r0, r3\r
35 mov r1, r4\r
36 ldmfd sp!, {r4, r5, r6}\r
37 bx lr\r
38\r
39 .align 2\r
40 GCC_ASM_EXPORT(InternalMathRShiftU64)\r
41\r
42ASM_PFX(InternalMathRShiftU64):\r
43 stmfd sp!, {r4, r5, r6}\r
44 mov r5, r0\r
45 rsb ip, r2, #32\r
46 mov r3, r5, lsr r2\r
47 subs r0, r2, #32\r
48 orr r3, r3, r1, asl ip\r
49 mov r4, r1, lsr r2\r
50 movpl r3, r1, lsr r0\r
51 mov r6, r1\r
52 mov r0, r3\r
53 mov r1, r4\r
54 ldmfd sp!, {r4, r5, r6}\r
55 bx lr\r
56\r
57 .align 2\r
58 GCC_ASM_EXPORT(InternalMathARShiftU64)\r
59\r
60ASM_PFX(InternalMathARShiftU64):\r
61 stmfd sp!, {r4, r5, r6}\r
62 mov r5, r0\r
63 rsb ip, r2, #32\r
64 mov r3, r5, lsr r2\r
65 subs r0, r2, #32\r
66 orr r3, r3, r1, asl ip\r
67 mov r4, r1, asr r2\r
68 movpl r3, r1, asr r0\r
69 mov r6, r1\r
70 mov r0, r3\r
71 mov r1, r4\r
72 ldmfd sp!, {r4, r5, r6}\r
73 bx lr\r
74\r
75 .align 2\r
76 GCC_ASM_EXPORT(InternalMathLRotU64)\r
77\r
78ASM_PFX(InternalMathLRotU64):\r
79 stmfd sp!, {r4, r5, r6, r7, lr}\r
80 add r7, sp, #12\r
81 mov r6, r1\r
82 rsb ip, r2, #32\r
83 mov r4, r6, asl r2\r
84 rsb lr, r2, #64\r
85 subs r1, r2, #32\r
86 orr r4, r4, r0, lsr ip\r
87 mov r3, r0, asl r2\r
88 movpl r4, r0, asl r1\r
89 sub ip, r2, #32\r
90 mov r5, r0\r
91 mov r0, r0, lsr lr\r
92 rsbs r2, r2, #32\r
93 orr r0, r0, r6, asl ip\r
94 mov r1, r6, lsr lr\r
95 movpl r0, r6, lsr r2\r
96 orr r1, r1, r4\r
97 orr r0, r0, r3\r
98 ldmfd sp!, {r4, r5, r6, r7, pc}\r
99\r
100\r
101 .align 2\r
102 GCC_ASM_EXPORT(InternalMathRRotU64)\r
103\r
104ASM_PFX(InternalMathRRotU64):\r
105 stmfd sp!, {r4, r5, r6, r7, lr}\r
106 add r7, sp, #12\r
107 mov r5, r0\r
108 rsb ip, r2, #32\r
109 mov r3, r5, lsr r2\r
110 rsb lr, r2, #64\r
111 subs r0, r2, #32\r
112 orr r3, r3, r1, asl ip\r
113 mov r4, r1, lsr r2\r
114 movpl r3, r1, lsr r0\r
115 sub ip, r2, #32\r
116 mov r6, r1\r
117 mov r1, r1, asl lr\r
118 rsbs r2, r2, #32\r
119 orr r1, r1, r5, lsr ip\r
120 mov r0, r5, asl lr\r
121 movpl r1, r5, asl r2\r
122 orr r0, r0, r3\r
123 orr r1, r1, r4\r
124 ldmfd sp!, {r4, r5, r6, r7, pc}\r
125\r
126 .align 2\r
127 GCC_ASM_EXPORT(InternalMathMultU64x32)\r
128\r
129ASM_PFX(InternalMathMultU64x32):\r
130 stmfd sp!, {r7, lr}\r
131 add r7, sp, #0\r
132 mov r3, #0\r
133 mov ip, r0\r
134 mov lr, r1\r
135 umull r0, r1, ip, r2\r
136 mla r1, lr, r2, r1\r
137 mla r1, ip, r3, r1\r
138 ldmfd sp!, {r7, pc}\r
139\r
140 .align 2\r
141 GCC_ASM_EXPORT(InternalMathMultU64x64)\r
142\r
143ASM_PFX(InternalMathMultU64x64):\r
144 stmfd sp!, {r7, lr}\r
145 add r7, sp, #0\r
146 mov ip, r0\r
147 mov lr, r1\r
148 umull r0, r1, ip, r2\r
149 mla r1, lr, r2, r1\r
150 mla r1, ip, r3, r1\r
151 ldmfd sp!, {r7, pc}\r
152\r
153 .align 2\r
154 GCC_ASM_EXPORT(InternalMathDivU64x32)\r
155\r
156ASM_PFX(InternalMathDivU64x32):\r
157 stmfd sp!, {r7, lr}\r
158 add r7, sp, #0\r
159 mov r3, #0\r
160 bl ASM_PFX(__udivdi3)\r
161 ldmfd sp!, {r7, pc}\r
162 \r
163 \r
164 .align 2\r
165 GCC_ASM_EXPORT(InternalMathModU64x32)\r
166\r
167ASM_PFX(InternalMathModU64x32):\r
168 stmfd sp!, {r7, lr}\r
169 add r7, sp, #0\r
170 mov r3, #0\r
171 bl ASM_PFX(__umoddi3)\r
172 ldmfd sp!, {r7, pc}\r
173 \r
174 \r
175 .align 2\r
176 GCC_ASM_EXPORT(InternalMathDivRemU64x32)\r
177\r
178ASM_PFX(InternalMathDivRemU64x32):\r
179 stmfd sp!, {r4, r5, r6, r7, lr}\r
180 add r7, sp, #12\r
181 stmfd sp!, {r10, r11}\r
182 subs r6, r3, #0\r
183 mov r10, r0\r
184 mov r11, r1\r
185 moveq r4, r2\r
186 moveq r5, #0\r
187 beq L22\r
188 mov r4, r2\r
189 mov r5, #0\r
190 mov r3, #0\r
191 bl ASM_PFX(__umoddi3)\r
192 str r0, [r6, #0]\r
193L22:\r
194 mov r0, r10\r
195 mov r1, r11\r
196 mov r2, r4\r
197 mov r3, r5\r
198 bl ASM_PFX(__udivdi3)\r
199 ldmfd sp!, {r10, r11}\r
200 ldmfd sp!, {r4, r5, r6, r7, pc}\r
201 \r
202 \r
203 .align 2\r
204 GCC_ASM_EXPORT(InternalMathDivRemU64x64)\r
205\r
206ASM_PFX(InternalMathDivRemU64x64):\r
207 stmfd sp!, {r4, r5, r6, r7, lr}\r
208 add r7, sp, #12\r
209 stmfd sp!, {r10, r11}\r
210 ldr r6, [sp, #28]\r
211 mov r4, r0\r
212 cmp r6, #0\r
213 mov r5, r1\r
214 mov r10, r2\r
215 mov r11, r3\r
216 beq L26\r
217 bl ASM_PFX(__umoddi3)\r
218 stmia r6, {r0-r1}\r
219L26:\r
220 mov r0, r4\r
221 mov r1, r5\r
222 mov r2, r10\r
223 mov r3, r11\r
224 bl ASM_PFX(__udivdi3)\r
225 ldmfd sp!, {r10, r11}\r
226 ldmfd sp!, {r4, r5, r6, r7, pc}\r
227 \r
228 \r
229 .align 2\r
230 GCC_ASM_EXPORT(InternalMathDivRemS64x64)\r
231\r
232ASM_PFX(InternalMathDivRemS64x64):\r
233 stmfd sp!, {r4, r5, r6, r7, lr}\r
234 add r7, sp, #12\r
235 stmfd sp!, {r10, r11}\r
236 ldr r6, [sp, #28]\r
237 mov r4, r0\r
238 cmp r6, #0\r
239 mov r5, r1\r
240 mov r10, r2\r
241 mov r11, r3\r
242 beq L30\r
243 bl ASM_PFX(__moddi3)\r
244 stmia r6, {r0-r1}\r
245L30:\r
246 mov r0, r4\r
247 mov r1, r5\r
248 mov r2, r10\r
249 mov r3, r11\r
250 bl ASM_PFX(__divdi3)\r
251 ldmfd sp!, {r10, r11}\r
252 ldmfd sp!, {r4, r5, r6, r7, pc}\r
253 \r
254 \r
255 .align 2\r
256 GCC_ASM_EXPORT(InternalMathSwapBytes64)\r
257\r
258ASM_PFX(InternalMathSwapBytes64):\r
55f5af18 259 stmfd sp!, {r4, r5, r7, lr}\r
8db92ab5
HT
260 mov r5, r1\r
261 bl ASM_PFX(SwapBytes32)\r
55f5af18 262 mov r4, r0\r
8db92ab5
HT
263 mov r0, r5\r
264 bl ASM_PFX(SwapBytes32)\r
55f5af18
AB
265 mov r1, r4\r
266 ldmfd sp!, {r4, r5, r7, pc}\r
8db92ab5
HT
267\r
268\r
703f1d09 269ASM_FUNCTION_REMOVE_IF_UNREFERENCED