-#------------------------------------------------------------------------------ \r
+#------------------------------------------------------------------------------\r
#\r
# SemMem() worker for ARM\r
#\r
# This file started out as C code that did 64 bit moves if the buffer was\r
# 32-bit aligned, else it does a byte copy. It also does a byte copy for\r
-# any trailing bytes. It was updated to do 32-byte at a time. \r
+# any trailing bytes. It was updated to do 32-byte at a time.\r
#\r
# Copyright (c) 2008 - 2010, Apple Inc. All rights reserved.<BR>\r
# This program and the accompanying materials\r
IN UINT8 Value\r
)\r
**/\r
- \r
+\r
.text\r
+.syntax unified\r
.align 2\r
-.globl ASM_PFX(InternalMemSetMem)\r
+GCC_ASM_EXPORT(InternalMemSetMem)\r
\r
ASM_PFX(InternalMemSetMem):\r
- stmfd sp!, {r4-r11, lr}\r
- tst r0, #3\r
- movne r3, #0\r
- moveq r3, #1\r
- cmp r1, #31\r
- movls lr, #0\r
- andhi lr, r3, #1\r
- cmp lr, #0\r
- mov r12, r0\r
- bne L31\r
+ stmfd sp!, {r4-r11, lr}\r
+ tst r0, #3\r
+ movne r3, #0\r
+ moveq r3, #1\r
+ cmp r1, #31\r
+ movls lr, #0\r
+ andhi lr, r3, #1\r
+ cmp lr, #0\r
+ mov r12, r0\r
+ bne L31\r
L32:\r
- mov r3, #0\r
- b L43\r
+ mov r3, #0\r
+ b L43\r
L31:\r
and r4, r2, #0xff\r
orr r4, r4, r4, LSL #8\r
- orr r4, r4, r4, LSL #16 \r
- mov r5, r4\r
- mov r5, r4\r
- mov r6, r4\r
- mov r7, r4 \r
- mov r8, r4 \r
- mov r9, r4 \r
- mov r10, r4 \r
- mov r11, r4 \r
- b L32\r
+ orr r4, r4, r4, LSL #16\r
+ mov r5, r4\r
+ mov r6, r4\r
+ mov r7, r4\r
+ mov r8, r4\r
+ mov r9, r4\r
+ mov r10, r4\r
+ mov r11, r4\r
+ b L32\r
L34:\r
- cmp lr, #0\r
- streqb r2, [r12], #1\r
- subeq r1, r1, #1\r
- beq L43\r
- sub r1, r1, #32\r
- cmp r1, #31\r
- movls lr, r3\r
- stmia r12!, {r4-r11}\r
+ cmp lr, #0\r
+ strbeq r2, [r12], #1\r
+ subeq r1, r1, #1\r
+ beq L43\r
+ sub r1, r1, #32\r
+ cmp r1, #31\r
+ movls lr, r3\r
+ stmia r12!, {r4-r11}\r
L43:\r
- cmp r1, #0\r
- bne L34\r
- ldmfd sp!, {r4-r11, pc}\r
-
\ No newline at end of file
+ cmp r1, #0\r
+ bne L34\r
+ ldmfd sp!, {r4-r11, pc}\r
+\r