};
} udwords;
+// __aeabi_ return values
+typedef struct {
+ UINT64 Quotent;
+ UINT64 Remainder;
+} ulldiv_t;
+
+typedef struct {
+ INT64 Quotent;
+ INT64 Remainder;
+} lldiv_t;
+
+typedef struct {
+ UINT32 Quotent;
+ UINT32 Remainder;
+} uidiv_return;
+
#if __GNUC__
#define COUNT_LEADING_ZEROS(_a) __builtin_clz((_a))
#define COUNT_TRAILING_ZEROS(_a) __builtin_ctz((_a))
.text
.align 2
- .globl ___ashldi3
-___ashldi3:
- @ args = 0, pretend = 0, frame = 0
- @ frame_needed = 0, uses_anonymous_args = 0
- @ link register save eliminated.
+ .globl ASM_PFX(__ashldi3)
+
+ASM_PFX(__ashldi3):
cmp r2, #31
- @ lr needed for prologue
bls L2
cmp r2, #63
subls r2, r2, #32
.text
.align 2
- .globl ___ashrdi3
-___ashrdi3:
- @ args = 0, pretend = 0, frame = 0
- @ frame_needed = 0, uses_anonymous_args = 0
- @ link register save eliminated.
+ .globl ASM_PFX(__ashrdi3)
+
+ASM_PFX(__ashrdi3):
cmp r2, #31
- @ lr needed for prologue
bls L2
cmp r2, #63
subls r2, r2, #32
.text
.align 2
- .globl ___clzsi2
-___clzsi2:
- @ args = 0, pretend = 0, frame = 0
+ .globl ASM_PFX(__clzsi2)
+
+ASM_PFX(__clzsi2):
@ frame_needed = 1, uses_anonymous_args = 0
stmfd sp!, {r7, lr}
add r7, sp, #0
.text
.align 2
- .globl ___ctzsi2
-___ctzsi2:
- @ args = 0, pretend = 0, frame = 0
- @ frame_needed = 0, uses_anonymous_args = 0
- @ link register save eliminated.
+ .globl ASM_PFX(__ctzsi2)
+
+ASM_PFX(__ctzsi2):
uxth r3, r0
cmp r3, #0
moveq ip, #16
.text
.align 2
- .globl ___divdi3
-___divdi3:
+ .globl ASM_PFX(__divdi3)
+
+ASM_PFX(__divdi3):
@ args = 0, pretend = 0, frame = 0
@ frame_needed = 1, uses_anonymous_args = 0
stmfd sp!, {r4, r5, r7, lr}
subs r0, r0, r10
sbc r1, r1, r11
str ip, [sp, #0]
- bl ___udivmoddi4
+ bl ASM_PFX(__udivmoddi4)
eor r2, r10, r4
eor r3, r10, r4
eor r0, r0, r2
.text
.align 2
- .globl ___divsi3
-___divsi3:
- @ args = 0, pretend = 0, frame = 0
- @ frame_needed = 1, uses_anonymous_args = 0
+ .globl ASM_PFX(__divsi3)
+
+ASM_PFX(__divsi3):
eor r3, r0, r0, asr #31
eor r2, r1, r1, asr #31
stmfd sp!, {r4, r5, r7, lr}
mov r4, r1, asr #31
sub r0, r3, r0, asr #31
sub r1, r2, r1, asr #31
- bl ___udivsi3
+ bl ASM_PFX(__udivsi3)
eor r1, r5, r4
eor r0, r0, r1
rsb r0, r1, r0
.text
.align 2
- .globl ___lshrdi3
-___lshrdi3:
- @ args = 0, pretend = 0, frame = 0
- @ frame_needed = 0, uses_anonymous_args = 0
- @ link register save eliminated.
+ .globl ASM_PFX(__lshrdi3)
+
+ASM_PFX(__lshrdi3):
cmp r2, #31
- @ lr needed for prologue
bls L2
cmp r2, #63
subls r2, r2, #32
.text
.align 2
- .globl _memcpy
-_memcpy:
- @ args = 0, pretend = 0, frame = 0
- @ frame_needed = 1, uses_anonymous_args = 0
+ .globl ASM_PFX(memcpy)
+
+ASM_PFX(memcpy):
stmfd sp!, {r7, lr}
mov ip, #0
add r7, sp, #0
.text
.align 2
- .globl ___moddi3
-___moddi3:
- @ args = 0, pretend = 0, frame = 8
- @ frame_needed = 1, uses_anonymous_args = 0
+ .globl ASM_PFX(__moddi3)
+
+ASM_PFX(__moddi3):
stmfd sp!, {r4, r5, r7, lr}
mov r4, r1, asr #31
add r7, sp, #8
subs r2, r2, r10
sbc r3, r3, r11
str ip, [sp, #0]
- bl ___udivmoddi4
+ bl ASM_PFX(__udivmoddi4)
ldrd r0, [sp, #8]
eor r0, r0, r4
eor r1, r1, r4
.text
.align 2
- .globl ___modsi3
-___modsi3:
- @ args = 0, pretend = 0, frame = 0
- @ frame_needed = 1, uses_anonymous_args = 0
+ .globl ASM_PFX(__modsi3)
+
+ASM_PFX(__modsi3):
stmfd sp!, {r4, r5, r7, lr}
add r7, sp, #8
mov r5, r0
.text
.align 2
- .globl ___muldi3
-___muldi3:
- @ args = 0, pretend = 0, frame = 8
- @ frame_needed = 1, uses_anonymous_args = 0
+ .globl ASM_PFX(__muldi3)
+
+ASM_PFX(__muldi3):
stmfd sp!, {r4, r5, r6, r7, lr}
add r7, sp, #12
stmfd sp!, {r8, r10, r11}
--- /dev/null
+#------s------------------------------------------------------------------------
+#
+# Copyright (c) 2008-2010 Apple Inc. All rights reserved.
+#
+# All rights reserved. This program and the accompanying materials
+# are licensed and made available under the terms and conditions of the BSD License
+# which accompanies this distribution. The full text of the license may be found at
+# http://opensource.org/licenses/bsd-license.php
+#
+# THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS,
+# WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED.
+#
+#------------------------------------------------------------------------------
+
+
+ .text\r
+ .align 2\r
+ .globl ASM_PFX(__aeabi_ulcmp)\r
+ASM_PFX(__aeabi_ulcmp):\r
+ stmfd sp!, {r4, r5, r8}\r
+ cmp r3, r1\r
+ mov r8, r0\r
+ mov r9, r1\r
+ mov r4, r2\r
+ mov r5, r3\r
+ bls L16\r
+L2:\r
+ mvn r0, #0\r
+L1:\r
+ ldmfd sp!, {r4, r5, r8}\r
+ bx lr\r
+L16:\r
+ beq L17\r
+L4:\r
+ cmp r9, r5\r
+ bhi L7\r
+ beq L18\r
+ cmp r8, r4\r
+L14:\r
+ cmpeq r9, r5\r
+ moveq r0, #0\r
+ beq L1\r
+ b L1\r
+L18:\r
+ cmp r8, r4\r
+ bls L14\r
+L7:\r
+ mov r0, #1\r
+ b L1\r
+L17:\r
+ cmp r2, r0\r
+ bhi L2\r
+ b L4\r
+ \r
.text
.align 2
- .globl ___ucmpdi2
-___ucmpdi2:
- @ args = 0, pretend = 0, frame = 0
- @ frame_needed = 0, uses_anonymous_args = 0
+ .globl ASM_PFX(__ucmpdi2)
+
+ASM_PFX(__ucmpdi2):
stmfd sp!, {r4, r5, r8, lr}
cmp r1, r3
mov r8, r0
.text
.align 2
- .globl ___udivdi3
-___udivdi3:
- @ args = 0, pretend = 0, frame = 0
- @ frame_needed = 1, uses_anonymous_args = 0
+ .globl ASM_PFX(__udivdi3)
+
+ASM_PFX(__udivdi3):
stmfd sp!, {r7, lr}
add r7, sp, #0
sub sp, sp, #8
mov ip, #0
str ip, [sp, #0]
- bl ___udivmoddi4
+ bl ASM_PFX(__udivmoddi4)
sub sp, r7, #0
ldmfd sp!, {r7, pc}
.text
.align 2
- .globl ___udivmoddi4
-___udivmoddi4:
- @ args = 8, pretend = 0, frame = 16
- @ frame_needed = 1, uses_anonymous_args = 0
+ .globl ASM_PFX(__udivmoddi4)
+
+ASM_PFX(__udivmoddi4):
stmfd sp!, {r4, r5, r6, r7, lr}
add r7, sp, #12
stmfd sp!, {r10, r11}
cmp r6, #0
beq L8
mov r1, r2
- bl ___umodsi3
+ bl ASM_PFX(__umodsi3)
mov r1, #0
stmia r6, {r0-r1}
L8:
beq L16
mov r1, r2
mov r0, r11
- bl ___umodsi3
+ bl ASM_PFX(__umodsi3)
mov r1, #0
stmia r6, {r0-r1}
L16:
ldr r1, [sp, #4]
mov r0, r11
L45:
- bl ___udivsi3
+ bl ASM_PFX(__udivsi3)
L46:
mov r10, r0
mov r11, #0
beq L16
ldr r1, [sp, #4]
mov r0, r11
- bl ___umodsi3
+ bl ASM_PFX(__umodsi3)
mov r4, r10
mov r5, r0
stmia r6, {r4-r5}
.text
.align 2
- .globl ___udivsi3
-___udivsi3:
- @ args = 0, pretend = 0, frame = 0
- @ frame_needed = 1, uses_anonymous_args = 0
+ .globl ASM_PFX(__udivsi3)
+
+ASM_PFX(__udivsi3):
cmp r1, #0
cmpne r0, #0
stmfd sp!, {r4, r5, r7, lr}
L2:
mov r0, #0
ldmfd sp!, {r4, r5, r7, pc}
+
--- /dev/null
+/** @file
+
+ Copyright (c) 2008-2010, Apple Inc. All rights reserved.
+
+ All rights reserved. This program and the accompanying materials
+ are licensed and made available under the terms and conditions of the BSD License
+ which accompanies this distribution. The full text of the license may be found at
+ http://opensource.org/licenses/bsd-license.php
+
+ THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS,
+ WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED.
+
+**/
+
+#include "Llvm_int_lib.h"
+#include <Library/BaseLib.h>
+
+
+UINT32 __udivsi3(UINT32 n, UINT32 d);
+UINT32 __umodsi3(UINT32 a, UINT32 b);
+
+
+UINT64
+__aeabi_uidivmod(unsigned numerator, unsigned denominator)
+{
+ UINT64 Return;
+
+ Return = __udivsi3 (numerator, denominator);
+ Return |= LShiftU64 (__umodsi3 (numerator, denominator), 32);
+
+ return Return;
+}
+
+unsigned
+__aeabi_uidiv (unsigned n, unsigned d)
+{
+ return __udivsi3 (n, d);
+}
+
+
+
+
+
.text
.align 2
- .globl ___umoddi3
-___umoddi3:
- @ args = 0, pretend = 0, frame = 8
- @ frame_needed = 1, uses_anonymous_args = 0
+ .globl ASM_PFX(__umoddi3)
+
+ASM_PFX(__umoddi3):
stmfd sp!, {r7, lr}
add r7, sp, #0
sub sp, sp, #16
add ip, sp, #8
str ip, [sp, #0]
- bl ___udivmoddi4
+ bl ASM_PFX(__udivmoddi4)
ldrd r0, [sp, #8]
sub sp, r7, #0
ldmfd sp!, {r7, pc}
.text
.align 2
- .globl ___umodsi3
-___umodsi3:
- @ args = 0, pretend = 0, frame = 0
- @ frame_needed = 1, uses_anonymous_args = 0
+ .globl ASM_PFX(__umodsi3)
+
+ASM_PFX(__umodsi3):
stmfd sp!, {r4, r5, r7, lr}
add r7, sp, #8
mov r5, r0
mov r4, r1
- bl L___udivsi3$stub
+ bl ASM_PFX(__udivsi3)
mul r0, r4, r0
rsb r0, r0, r5
ldmfd sp!, {r4, r5, r7, pc}
- .section __TEXT,__symbol_stub4,symbol_stubs,none,12
- .align 2
-L___udivsi3$stub:
- .indirect_symbol ___udivsi3
- ldr ip, L___udivsi3$slp
- ldr pc, [ip, #0]
-L___udivsi3$slp:
- .long L___udivsi3$lazy_ptr
- .lazy_symbol_pointer
-L___udivsi3$lazy_ptr:
- .indirect_symbol ___udivsi3
- .long dyld_stub_binding_helper
+
Arm/switch16.S | GCC
Arm/switch32.S | GCC
+ Arm/sourcery.S | GCC
+ Arm/uldivmod.c | GCC
[Packages]
MdePkg/MdePkg.dec