]> git.proxmox.com Git - mirror_edk2.git/blob - MdePkg/Library/BaseLib/Arm/Math64.S
2e5511b7e4777fd415c653ce20cf7b4d7a92a1bd
[mirror_edk2.git] / MdePkg / Library / BaseLib / Arm / Math64.S
1 #------------------------------------------------------------------------------
2 #
3 # Replacement for Math64.c that is coded to use older GCC intrinsics.
4 # Doing this reduces the number of intrinsics that are required when
5 # you port to a new version of gcc.
6 #
7 # Need to split this into multple files to size optimize the image.
8 #
9 # Portions copyright (c) 2009-2010 Apple Inc. All rights reserved.<BR>
10 # All rights reserved. This program and the accompanying materials
11 # are licensed and made available under the terms and conditions of the BSD License
12 # which accompanies this distribution. The full text of the license may be found at
13 # http://opensource.org/licenses/bsd-license.php
14 #
15 # THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS,
16 # WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED.
17 #
18 #------------------------------------------------------------------------------
19
20 .text
21 .align 2
22 .globl ASM_PFX(InternalMathLShiftU64)
23 ASM_PFX(InternalMathLShiftU64):
24 stmfd sp!, {r4, r5, r6}
25 mov r6, r1
26 rsb ip, r2, #32
27 mov r4, r6, asl r2
28 subs r1, r2, #32
29 orr r4, r4, r0, lsr ip
30 mov r3, r0, asl r2
31 movpl r4, r0, asl r1
32 mov r5, r0
33 mov r0, r3
34 mov r1, r4
35 ldmfd sp!, {r4, r5, r6}
36 bx lr
37
38 .align 2
39 .globl ASM_PFX(InternalMathRShiftU64)
40 ASM_PFX(InternalMathRShiftU64):
41 stmfd sp!, {r4, r5, r6}
42 mov r5, r0
43 rsb ip, r2, #32
44 mov r3, r5, lsr r2
45 subs r0, r2, #32
46 orr r3, r3, r1, asl ip
47 mov r4, r1, lsr r2
48 movpl r3, r1, lsr r0
49 mov r6, r1
50 mov r0, r3
51 mov r1, r4
52 ldmfd sp!, {r4, r5, r6}
53 bx lr
54
55 .align 2
56 .globl ASM_PFX(InternalMathARShiftU64)
57 ASM_PFX(InternalMathARShiftU64):
58 stmfd sp!, {r4, r5, r6}
59 mov r5, r0
60 rsb ip, r2, #32
61 mov r3, r5, lsr r2
62 subs r0, r2, #32
63 orr r3, r3, r1, asl ip
64 mov r4, r1, asr r2
65 movpl r3, r1, asr r0
66 mov r6, r1
67 mov r0, r3
68 mov r1, r4
69 ldmfd sp!, {r4, r5, r6}
70 bx lr
71
72 .align 2
73 .globl ASM_PFX(InternalMathLRotU64)
74 ASM_PFX(InternalMathLRotU64):
75 stmfd sp!, {r4, r5, r6, r7, lr}
76 add r7, sp, #12
77 mov r6, r1
78 rsb ip, r2, #32
79 mov r4, r6, asl r2
80 rsb lr, r2, #64
81 subs r1, r2, #32
82 orr r4, r4, r0, lsr ip
83 mov r3, r0, asl r2
84 movpl r4, r0, asl r1
85 sub ip, r2, #32
86 mov r5, r0
87 mov r0, r0, lsr lr
88 rsbs r2, r2, #32
89 orr r0, r0, r6, asl ip
90 mov r1, r6, lsr lr
91 movpl r0, r6, lsr r2
92 orr r1, r1, r4
93 orr r0, r0, r3
94 ldmfd sp!, {r4, r5, r6, r7, pc}
95
96
97 .align 2
98 .globl ASM_PFX(InternalMathRRotU64)
99 ASM_PFX(InternalMathRRotU64):
100 stmfd sp!, {r4, r5, r6, r7, lr}
101 add r7, sp, #12
102 mov r5, r0
103 rsb ip, r2, #32
104 mov r3, r5, lsr r2
105 rsb lr, r2, #64
106 subs r0, r2, #32
107 orr r3, r3, r1, asl ip
108 mov r4, r1, lsr r2
109 movpl r3, r1, lsr r0
110 sub ip, r2, #32
111 mov r6, r1
112 mov r1, r1, asl lr
113 rsbs r2, r2, #32
114 orr r1, r1, r5, lsr ip
115 mov r0, r5, asl lr
116 movpl r1, r5, asl r2
117 orr r0, r0, r3
118 orr r1, r1, r4
119 ldmfd sp!, {r4, r5, r6, r7, pc}
120
121 .align 2
122 .globl ASM_PFX(InternalMathMultU64x32)
123 ASM_PFX(InternalMathMultU64x32):
124 stmfd sp!, {r7, lr}
125 add r7, sp, #0
126 mov r3, #0
127 mov ip, r0
128 mov lr, r1
129 umull r0, r1, ip, r2
130 mla r1, lr, r2, r1
131 mla r1, ip, r3, r1
132 ldmfd sp!, {r7, pc}
133
134 .align 2
135 .globl ASM_PFX(InternalMathMultU64x64)
136 ASM_PFX(InternalMathMultU64x64):
137 stmfd sp!, {r7, lr}
138 add r7, sp, #0
139 mov ip, r0
140 mov lr, r1
141 umull r0, r1, ip, r2
142 mla r1, lr, r2, r1
143 mla r1, ip, r3, r1
144 ldmfd sp!, {r7, pc}
145
146 .align 2
147 .globl ASM_PFX(InternalMathDivU64x32)
148 ASM_PFX(InternalMathDivU64x32):
149 stmfd sp!, {r7, lr}
150 add r7, sp, #0
151 mov r3, #0
152 bl ASM_PFX(__udivdi3)
153 ldmfd sp!, {r7, pc}
154
155
156 .align 2
157 .globl ASM_PFX(InternalMathModU64x32)
158 ASM_PFX(InternalMathModU64x32):
159 stmfd sp!, {r7, lr}
160 add r7, sp, #0
161 mov r3, #0
162 bl ASM_PFX(__umoddi3)
163 ldmfd sp!, {r7, pc}
164
165
166 .align 2
167 .globl ASM_PFX(InternalMathDivRemU64x32)
168 ASM_PFX(InternalMathDivRemU64x32):
169 stmfd sp!, {r4, r5, r6, r7, lr}
170 add r7, sp, #12
171 stmfd sp!, {r10, r11}
172 subs r6, r3, #0
173 mov r10, r0
174 mov r11, r1
175 moveq r4, r2
176 moveq r5, #0
177 beq L22
178 mov r4, r2
179 mov r5, #0
180 mov r3, #0
181 bl ASM_PFX(__umoddi3)
182 str r0, [r6, #0]
183 L22:
184 mov r0, r10
185 mov r1, r11
186 mov r2, r4
187 mov r3, r5
188 bl ASM_PFX(__udivdi3)
189 ldmfd sp!, {r10, r11}
190 ldmfd sp!, {r4, r5, r6, r7, pc}
191
192
193 .align 2
194 .globl ASM_PFX(InternalMathDivRemU64x64)
195 ASM_PFX(InternalMathDivRemU64x64):
196 stmfd sp!, {r4, r5, r6, r7, lr}
197 add r7, sp, #12
198 stmfd sp!, {r10, r11}
199 ldr r6, [sp, #28]
200 mov r4, r0
201 cmp r6, #0
202 mov r5, r1
203 mov r10, r2
204 mov r11, r3
205 beq L26
206 bl ASM_PFX(__umoddi3)
207 stmia r6, {r0-r1}
208 L26:
209 mov r0, r4
210 mov r1, r5
211 mov r2, r10
212 mov r3, r11
213 bl ASM_PFX(__udivdi3)
214 ldmfd sp!, {r10, r11}
215 ldmfd sp!, {r4, r5, r6, r7, pc}
216
217
218 .align 2
219 .globl ASM_PFX(InternalMathDivRemS64x64)
220 ASM_PFX(InternalMathDivRemS64x64):
221 stmfd sp!, {r4, r5, r6, r7, lr}
222 add r7, sp, #12
223 stmfd sp!, {r10, r11}
224 ldr r6, [sp, #28]
225 mov r4, r0
226 cmp r6, #0
227 mov r5, r1
228 mov r10, r2
229 mov r11, r3
230 beq L30
231 bl ASM_PFX(__moddi3)
232 stmia r6, {r0-r1}
233 L30:
234 mov r0, r4
235 mov r1, r5
236 mov r2, r10
237 mov r3, r11
238 bl ASM_PFX(__divdi3)
239 ldmfd sp!, {r10, r11}
240 ldmfd sp!, {r4, r5, r6, r7, pc}
241
242
243 .align 2
244 .globl ASM_PFX(InternalMathSwapBytes64)
245 ASM_PFX(InternalMathSwapBytes64):
246 @ args = 0, pretend = 0, frame = 0
247 @ frame_needed = 1, uses_anonymous_args = 0
248 stmfd sp!, {r4, r5, r6, r7, lr}
249 add r7, sp, #12
250 mov r5, r1
251 bl ASM_PFX(SwapBytes32)
252 mov r6, r0
253 mov r0, r5
254 bl ASM_PFX(SwapBytes32)
255 mov r4, r6
256 mov r5, r4, asr #31
257 mov r2, #0
258 mov r1, r0, asr #31
259 orr r0, r0, r2
260 orr r1, r1, r4
261 ldmfd sp!, {r4, r5, r6, r7, pc}
262
263
264 ASM_FUNCTION_REMOVE_IF_UNREFERENCED