-#------------------------------------------------------------------------------ \r
+#------------------------------------------------------------------------------\r
#\r
-# Replacement for Math64.c that is coded to use older GCC intrinsics. \r
+# Replacement for Math64.c that is coded to use older GCC intrinsics.\r
# Doing this reduces the number of intrinsics that are required when\r
-# you port to a new version of gcc. \r
+# you port to a new version of gcc.\r
#\r
# Need to split this into multple files to size optimize the image.\r
#\r
#\r
#------------------------------------------------------------------------------\r
\r
- .text\r
- .align 2\r
- GCC_ASM_EXPORT(InternalMathLShiftU64)\r
+ .text\r
+ .align 2\r
+ GCC_ASM_EXPORT(InternalMathLShiftU64)\r
\r
ASM_PFX(InternalMathLShiftU64):\r
- stmfd sp!, {r4, r5, r6}\r
- mov r6, r1\r
- rsb ip, r2, #32\r
- mov r4, r6, asl r2\r
- subs r1, r2, #32\r
- orr r4, r4, r0, lsr ip\r
- mov r3, r0, asl r2\r
- movpl r4, r0, asl r1\r
- mov r5, r0\r
- mov r0, r3\r
- mov r1, r4\r
- ldmfd sp!, {r4, r5, r6}\r
- bx lr\r
-\r
- .align 2\r
- GCC_ASM_EXPORT(InternalMathRShiftU64)\r
+ stmfd sp!, {r4, r5, r6}\r
+ mov r6, r1\r
+ rsb ip, r2, #32\r
+ mov r4, r6, asl r2\r
+ subs r1, r2, #32\r
+ orr r4, r4, r0, lsr ip\r
+ mov r3, r0, asl r2\r
+ movpl r4, r0, asl r1\r
+ mov r5, r0\r
+ mov r0, r3\r
+ mov r1, r4\r
+ ldmfd sp!, {r4, r5, r6}\r
+ bx lr\r
+\r
+ .align 2\r
+ GCC_ASM_EXPORT(InternalMathRShiftU64)\r
\r
ASM_PFX(InternalMathRShiftU64):\r
- stmfd sp!, {r4, r5, r6}\r
- mov r5, r0\r
- rsb ip, r2, #32\r
- mov r3, r5, lsr r2\r
- subs r0, r2, #32\r
- orr r3, r3, r1, asl ip\r
- mov r4, r1, lsr r2\r
- movpl r3, r1, lsr r0\r
- mov r6, r1\r
- mov r0, r3\r
- mov r1, r4\r
- ldmfd sp!, {r4, r5, r6}\r
- bx lr\r
-\r
- .align 2\r
- GCC_ASM_EXPORT(InternalMathARShiftU64)\r
+ stmfd sp!, {r4, r5, r6}\r
+ mov r5, r0\r
+ rsb ip, r2, #32\r
+ mov r3, r5, lsr r2\r
+ subs r0, r2, #32\r
+ orr r3, r3, r1, asl ip\r
+ mov r4, r1, lsr r2\r
+ movpl r3, r1, lsr r0\r
+ mov r6, r1\r
+ mov r0, r3\r
+ mov r1, r4\r
+ ldmfd sp!, {r4, r5, r6}\r
+ bx lr\r
+\r
+ .align 2\r
+ GCC_ASM_EXPORT(InternalMathARShiftU64)\r
\r
ASM_PFX(InternalMathARShiftU64):\r
- stmfd sp!, {r4, r5, r6}\r
- mov r5, r0\r
- rsb ip, r2, #32\r
- mov r3, r5, lsr r2\r
- subs r0, r2, #32\r
- orr r3, r3, r1, asl ip\r
- mov r4, r1, asr r2\r
- movpl r3, r1, asr r0\r
- mov r6, r1\r
- mov r0, r3\r
- mov r1, r4\r
- ldmfd sp!, {r4, r5, r6}\r
- bx lr\r
-\r
- .align 2\r
- GCC_ASM_EXPORT(InternalMathLRotU64)\r
+ stmfd sp!, {r4, r5, r6}\r
+ mov r5, r0\r
+ rsb ip, r2, #32\r
+ mov r3, r5, lsr r2\r
+ subs r0, r2, #32\r
+ orr r3, r3, r1, asl ip\r
+ mov r4, r1, asr r2\r
+ movpl r3, r1, asr r0\r
+ mov r6, r1\r
+ mov r0, r3\r
+ mov r1, r4\r
+ ldmfd sp!, {r4, r5, r6}\r
+ bx lr\r
+\r
+ .align 2\r
+ GCC_ASM_EXPORT(InternalMathLRotU64)\r
\r
ASM_PFX(InternalMathLRotU64):\r
- stmfd sp!, {r4, r5, r6, r7, lr}\r
- add r7, sp, #12\r
- mov r6, r1\r
- rsb ip, r2, #32\r
- mov r4, r6, asl r2\r
- rsb lr, r2, #64\r
- subs r1, r2, #32\r
- orr r4, r4, r0, lsr ip\r
- mov r3, r0, asl r2\r
- movpl r4, r0, asl r1\r
- sub ip, r2, #32\r
- mov r5, r0\r
- mov r0, r0, lsr lr\r
- rsbs r2, r2, #32\r
- orr r0, r0, r6, asl ip\r
- mov r1, r6, lsr lr\r
- movpl r0, r6, lsr r2\r
- orr r1, r1, r4\r
- orr r0, r0, r3\r
- ldmfd sp!, {r4, r5, r6, r7, pc}\r
-\r
-\r
- .align 2\r
- GCC_ASM_EXPORT(InternalMathRRotU64)\r
+ stmfd sp!, {r4, r5, r6, r7, lr}\r
+ add r7, sp, #12\r
+ mov r6, r1\r
+ rsb ip, r2, #32\r
+ mov r4, r6, asl r2\r
+ rsb lr, r2, #64\r
+ subs r1, r2, #32\r
+ orr r4, r4, r0, lsr ip\r
+ mov r3, r0, asl r2\r
+ movpl r4, r0, asl r1\r
+ sub ip, r2, #32\r
+ mov r5, r0\r
+ mov r0, r0, lsr lr\r
+ rsbs r2, r2, #32\r
+ orr r0, r0, r6, asl ip\r
+ mov r1, r6, lsr lr\r
+ movpl r0, r6, lsr r2\r
+ orr r1, r1, r4\r
+ orr r0, r0, r3\r
+ ldmfd sp!, {r4, r5, r6, r7, pc}\r
+\r
+\r
+ .align 2\r
+ GCC_ASM_EXPORT(InternalMathRRotU64)\r
\r
ASM_PFX(InternalMathRRotU64):\r
- stmfd sp!, {r4, r5, r6, r7, lr}\r
- add r7, sp, #12\r
- mov r5, r0\r
- rsb ip, r2, #32\r
- mov r3, r5, lsr r2\r
- rsb lr, r2, #64\r
- subs r0, r2, #32\r
- orr r3, r3, r1, asl ip\r
- mov r4, r1, lsr r2\r
- movpl r3, r1, lsr r0\r
- sub ip, r2, #32\r
- mov r6, r1\r
- mov r1, r1, asl lr\r
- rsbs r2, r2, #32\r
- orr r1, r1, r5, lsr ip\r
- mov r0, r5, asl lr\r
- movpl r1, r5, asl r2\r
- orr r0, r0, r3\r
- orr r1, r1, r4\r
- ldmfd sp!, {r4, r5, r6, r7, pc}\r
-\r
- .align 2\r
- GCC_ASM_EXPORT(InternalMathMultU64x32)\r
+ stmfd sp!, {r4, r5, r6, r7, lr}\r
+ add r7, sp, #12\r
+ mov r5, r0\r
+ rsb ip, r2, #32\r
+ mov r3, r5, lsr r2\r
+ rsb lr, r2, #64\r
+ subs r0, r2, #32\r
+ orr r3, r3, r1, asl ip\r
+ mov r4, r1, lsr r2\r
+ movpl r3, r1, lsr r0\r
+ sub ip, r2, #32\r
+ mov r6, r1\r
+ mov r1, r1, asl lr\r
+ rsbs r2, r2, #32\r
+ orr r1, r1, r5, lsr ip\r
+ mov r0, r5, asl lr\r
+ movpl r1, r5, asl r2\r
+ orr r0, r0, r3\r
+ orr r1, r1, r4\r
+ ldmfd sp!, {r4, r5, r6, r7, pc}\r
+\r
+ .align 2\r
+ GCC_ASM_EXPORT(InternalMathMultU64x32)\r
\r
ASM_PFX(InternalMathMultU64x32):\r
- stmfd sp!, {r7, lr}\r
- add r7, sp, #0\r
- mov r3, #0\r
- mov ip, r0\r
- mov lr, r1\r
- umull r0, r1, ip, r2\r
- mla r1, lr, r2, r1\r
- mla r1, ip, r3, r1\r
- ldmfd sp!, {r7, pc}\r
-\r
- .align 2\r
- GCC_ASM_EXPORT(InternalMathMultU64x64)\r
+ stmfd sp!, {r7, lr}\r
+ add r7, sp, #0\r
+ mov r3, #0\r
+ mov ip, r0\r
+ mov lr, r1\r
+ umull r0, r1, ip, r2\r
+ mla r1, lr, r2, r1\r
+ mla r1, ip, r3, r1\r
+ ldmfd sp!, {r7, pc}\r
+\r
+ .align 2\r
+ GCC_ASM_EXPORT(InternalMathMultU64x64)\r
\r
ASM_PFX(InternalMathMultU64x64):\r
- stmfd sp!, {r7, lr}\r
- add r7, sp, #0\r
- mov ip, r0\r
- mov lr, r1\r
- umull r0, r1, ip, r2\r
- mla r1, lr, r2, r1\r
- mla r1, ip, r3, r1\r
- ldmfd sp!, {r7, pc}\r
-\r
- .align 2\r
- GCC_ASM_EXPORT(InternalMathDivU64x32)\r
+ stmfd sp!, {r7, lr}\r
+ add r7, sp, #0\r
+ mov ip, r0\r
+ mov lr, r1\r
+ umull r0, r1, ip, r2\r
+ mla r1, lr, r2, r1\r
+ mla r1, ip, r3, r1\r
+ ldmfd sp!, {r7, pc}\r
+\r
+ .align 2\r
+ GCC_ASM_EXPORT(InternalMathDivU64x32)\r
\r
ASM_PFX(InternalMathDivU64x32):\r
- stmfd sp!, {r7, lr}\r
- add r7, sp, #0\r
- mov r3, #0\r
- bl ASM_PFX(__udivdi3)\r
- ldmfd sp!, {r7, pc}\r
- \r
- \r
- .align 2\r
- GCC_ASM_EXPORT(InternalMathModU64x32)\r
+ stmfd sp!, {r7, lr}\r
+ add r7, sp, #0\r
+ mov r3, #0\r
+ bl ASM_PFX(__udivdi3)\r
+ ldmfd sp!, {r7, pc}\r
+\r
+\r
+ .align 2\r
+ GCC_ASM_EXPORT(InternalMathModU64x32)\r
\r
ASM_PFX(InternalMathModU64x32):\r
- stmfd sp!, {r7, lr}\r
- add r7, sp, #0\r
- mov r3, #0\r
- bl ASM_PFX(__umoddi3)\r
- ldmfd sp!, {r7, pc}\r
- \r
- \r
- .align 2\r
- GCC_ASM_EXPORT(InternalMathDivRemU64x32)\r
+ stmfd sp!, {r7, lr}\r
+ add r7, sp, #0\r
+ mov r3, #0\r
+ bl ASM_PFX(__umoddi3)\r
+ ldmfd sp!, {r7, pc}\r
+\r
+\r
+ .align 2\r
+ GCC_ASM_EXPORT(InternalMathDivRemU64x32)\r
\r
ASM_PFX(InternalMathDivRemU64x32):\r
- stmfd sp!, {r4, r5, r6, r7, lr}\r
- add r7, sp, #12\r
- stmfd sp!, {r10, r11}\r
- subs r6, r3, #0\r
- mov r10, r0\r
- mov r11, r1\r
- moveq r4, r2\r
- moveq r5, #0\r
- beq L22\r
- mov r4, r2\r
- mov r5, #0\r
- mov r3, #0\r
- bl ASM_PFX(__umoddi3)\r
- str r0, [r6, #0]\r
+ stmfd sp!, {r4, r5, r6, r7, lr}\r
+ add r7, sp, #12\r
+ stmfd sp!, {r10, r11}\r
+ subs r6, r3, #0\r
+ mov r10, r0\r
+ mov r11, r1\r
+ moveq r4, r2\r
+ moveq r5, #0\r
+ beq L22\r
+ mov r4, r2\r
+ mov r5, #0\r
+ mov r3, #0\r
+ bl ASM_PFX(__umoddi3)\r
+ str r0, [r6, #0]\r
L22:\r
- mov r0, r10\r
- mov r1, r11\r
- mov r2, r4\r
- mov r3, r5\r
- bl ASM_PFX(__udivdi3)\r
- ldmfd sp!, {r10, r11}\r
- ldmfd sp!, {r4, r5, r6, r7, pc}\r
- \r
- \r
- .align 2\r
- GCC_ASM_EXPORT(InternalMathDivRemU64x64)\r
+ mov r0, r10\r
+ mov r1, r11\r
+ mov r2, r4\r
+ mov r3, r5\r
+ bl ASM_PFX(__udivdi3)\r
+ ldmfd sp!, {r10, r11}\r
+ ldmfd sp!, {r4, r5, r6, r7, pc}\r
+\r
+\r
+ .align 2\r
+ GCC_ASM_EXPORT(InternalMathDivRemU64x64)\r
\r
ASM_PFX(InternalMathDivRemU64x64):\r
- stmfd sp!, {r4, r5, r6, r7, lr}\r
- add r7, sp, #12\r
- stmfd sp!, {r10, r11}\r
- ldr r6, [sp, #28]\r
- mov r4, r0\r
- cmp r6, #0\r
- mov r5, r1\r
- mov r10, r2\r
- mov r11, r3\r
- beq L26\r
- bl ASM_PFX(__umoddi3)\r
- stmia r6, {r0-r1}\r
+ stmfd sp!, {r4, r5, r6, r7, lr}\r
+ add r7, sp, #12\r
+ stmfd sp!, {r10, r11}\r
+ ldr r6, [sp, #28]\r
+ mov r4, r0\r
+ cmp r6, #0\r
+ mov r5, r1\r
+ mov r10, r2\r
+ mov r11, r3\r
+ beq L26\r
+ bl ASM_PFX(__umoddi3)\r
+ stmia r6, {r0-r1}\r
L26:\r
- mov r0, r4\r
- mov r1, r5\r
- mov r2, r10\r
- mov r3, r11\r
- bl ASM_PFX(__udivdi3)\r
- ldmfd sp!, {r10, r11}\r
- ldmfd sp!, {r4, r5, r6, r7, pc}\r
- \r
- \r
- .align 2\r
- GCC_ASM_EXPORT(InternalMathDivRemS64x64)\r
+ mov r0, r4\r
+ mov r1, r5\r
+ mov r2, r10\r
+ mov r3, r11\r
+ bl ASM_PFX(__udivdi3)\r
+ ldmfd sp!, {r10, r11}\r
+ ldmfd sp!, {r4, r5, r6, r7, pc}\r
+\r
+\r
+ .align 2\r
+ GCC_ASM_EXPORT(InternalMathDivRemS64x64)\r
\r
ASM_PFX(InternalMathDivRemS64x64):\r
- stmfd sp!, {r4, r5, r6, r7, lr}\r
- add r7, sp, #12\r
- stmfd sp!, {r10, r11}\r
- ldr r6, [sp, #28]\r
- mov r4, r0\r
- cmp r6, #0\r
- mov r5, r1\r
- mov r10, r2\r
- mov r11, r3\r
- beq L30\r
- bl ASM_PFX(__moddi3)\r
- stmia r6, {r0-r1}\r
+ stmfd sp!, {r4, r5, r6, r7, lr}\r
+ add r7, sp, #12\r
+ stmfd sp!, {r10, r11}\r
+ ldr r6, [sp, #28]\r
+ mov r4, r0\r
+ cmp r6, #0\r
+ mov r5, r1\r
+ mov r10, r2\r
+ mov r11, r3\r
+ beq L30\r
+ bl ASM_PFX(__moddi3)\r
+ stmia r6, {r0-r1}\r
L30:\r
- mov r0, r4\r
- mov r1, r5\r
- mov r2, r10\r
- mov r3, r11\r
- bl ASM_PFX(__divdi3)\r
- ldmfd sp!, {r10, r11}\r
- ldmfd sp!, {r4, r5, r6, r7, pc}\r
- \r
- \r
- .align 2\r
- GCC_ASM_EXPORT(InternalMathSwapBytes64)\r
+ mov r0, r4\r
+ mov r1, r5\r
+ mov r2, r10\r
+ mov r3, r11\r
+ bl ASM_PFX(__divdi3)\r
+ ldmfd sp!, {r10, r11}\r
+ ldmfd sp!, {r4, r5, r6, r7, pc}\r
+\r
+\r
+ .align 2\r
+ GCC_ASM_EXPORT(InternalMathSwapBytes64)\r
\r
ASM_PFX(InternalMathSwapBytes64):\r
- stmfd sp!, {r4, r5, r7, lr}\r
- mov r5, r1\r
- bl ASM_PFX(SwapBytes32)\r
- mov r4, r0\r
- mov r0, r5\r
- bl ASM_PFX(SwapBytes32)\r
- mov r1, r4\r
- ldmfd sp!, {r4, r5, r7, pc}\r
+ stmfd sp!, {r4, r5, r7, lr}\r
+ mov r5, r1\r
+ bl ASM_PFX(SwapBytes32)\r
+ mov r4, r0\r
+ mov r0, r5\r
+ bl ASM_PFX(SwapBytes32)\r
+ mov r1, r4\r
+ ldmfd sp!, {r4, r5, r7, pc}\r
\r
\r
-ASM_FUNCTION_REMOVE_IF_UNREFERENCED
\ No newline at end of file
+ASM_FUNCTION_REMOVE_IF_UNREFERENCED\r