1 #------------------------------------------------------------------------------
3 # Replacement for Math64.c that is coded to use older GCC intrinsics.
4 # Doing this reduces the number of intrinsics that are required when
5 # you port to a new version of gcc.
7 # Need to split this into multple files to size optimize the image.
9 # Portions copyright (c) 2009-2010 Apple Inc. All rights reserved.<BR>
10 # All rights reserved. This program and the accompanying materials
11 # are licensed and made available under the terms and conditions of the BSD License
12 # which accompanies this distribution. The full text of the license may be found at
13 # http://opensource.org/licenses/bsd-license.php
15 # THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS,
16 # WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED.
18 #------------------------------------------------------------------------------
22 .globl ASM_PFX(InternalMathLShiftU64)
23 ASM_PFX(InternalMathLShiftU64):
24 stmfd sp!, {r4, r5, r6}
29 orr r4, r4, r0, lsr ip
35 ldmfd sp!, {r4, r5, r6}
39 .globl ASM_PFX(InternalMathRShiftU64)
40 ASM_PFX(InternalMathRShiftU64):
41 stmfd sp!, {r4, r5, r6}
46 orr r3, r3, r1, asl ip
52 ldmfd sp!, {r4, r5, r6}
56 .globl ASM_PFX(InternalMathARShiftU64)
57 ASM_PFX(InternalMathARShiftU64):
58 stmfd sp!, {r4, r5, r6}
63 orr r3, r3, r1, asl ip
69 ldmfd sp!, {r4, r5, r6}
73 .globl ASM_PFX(InternalMathLRotU64)
74 ASM_PFX(InternalMathLRotU64):
75 stmfd sp!, {r4, r5, r6, r7, lr}
82 orr r4, r4, r0, lsr ip
89 orr r0, r0, r6, asl ip
94 ldmfd sp!, {r4, r5, r6, r7, pc}
98 .globl ASM_PFX(InternalMathRRotU64)
99 ASM_PFX(InternalMathRRotU64):
100 stmfd sp!, {r4, r5, r6, r7, lr}
107 orr r3, r3, r1, asl ip
114 orr r1, r1, r5, lsr ip
119 ldmfd sp!, {r4, r5, r6, r7, pc}
122 .globl ASM_PFX(InternalMathMultU64x32)
123 ASM_PFX(InternalMathMultU64x32):
135 .globl ASM_PFX(InternalMathMultU64x64)
136 ASM_PFX(InternalMathMultU64x64):
147 .globl ASM_PFX(InternalMathDivU64x32)
148 ASM_PFX(InternalMathDivU64x32):
152 bl ASM_PFX(__udivdi3)
157 .globl ASM_PFX(InternalMathModU64x32)
158 ASM_PFX(InternalMathModU64x32):
162 bl ASM_PFX(__umoddi3)
167 .globl ASM_PFX(InternalMathDivRemU64x32)
168 ASM_PFX(InternalMathDivRemU64x32):
169 stmfd sp!, {r4, r5, r6, r7, lr}
171 stmfd sp!, {r10, r11}
181 bl ASM_PFX(__umoddi3)
188 bl ASM_PFX(__udivdi3)
189 ldmfd sp!, {r10, r11}
190 ldmfd sp!, {r4, r5, r6, r7, pc}
194 .globl ASM_PFX(InternalMathDivRemU64x64)
195 ASM_PFX(InternalMathDivRemU64x64):
196 stmfd sp!, {r4, r5, r6, r7, lr}
198 stmfd sp!, {r10, r11}
206 bl ASM_PFX(__umoddi3)
213 bl ASM_PFX(__udivdi3)
214 ldmfd sp!, {r10, r11}
215 ldmfd sp!, {r4, r5, r6, r7, pc}
219 .globl ASM_PFX(InternalMathDivRemS64x64)
220 ASM_PFX(InternalMathDivRemS64x64):
221 stmfd sp!, {r4, r5, r6, r7, lr}
223 stmfd sp!, {r10, r11}
239 ldmfd sp!, {r10, r11}
240 ldmfd sp!, {r4, r5, r6, r7, pc}
244 .globl ASM_PFX(InternalMathSwapBytes64)
245 ASM_PFX(InternalMathSwapBytes64):
246 @ args = 0, pretend = 0, frame = 0
247 @ frame_needed = 1, uses_anonymous_args = 0
248 stmfd sp!, {r4, r5, r6, r7, lr}
251 bl ASM_PFX(SwapBytes32)
254 bl ASM_PFX(SwapBytes32)
261 ldmfd sp!, {r4, r5, r6, r7, pc}
264 ASM_FUNCTION_REMOVE_IF_UNREFERENCED