#\r
#------------------------------------------------------------------------------\r
#\r
-# Copyright (c) 2006, Intel Corporation\r
-# All rights reserved. This program and the accompanying materials\r
+# Copyright (c) 2006 - 2009, Intel Corporation. All rights reserved.<BR>\r
+# This program and the accompanying materials\r
# are licensed and made available under the terms and conditions of the BSD License\r
# which accompanies this distribution. The full text of the license may be found at\r
# http://opensource.org/licenses/bsd-license.php\r
# IN UINT8 Value\r
# )\r
#------------------------------------------------------------------------------\r
-.intel_syntax noprefix\r
-.globl ASM_PFX(InternalMemSetMem)\r
+ASM_GLOBAL ASM_PFX(InternalMemSetMem)\r
ASM_PFX(InternalMemSetMem):\r
- push rdi\r
- mov rdi, rcx # rdi <- Buffer\r
- mov al, r8b # al <- Value\r
- mov r9, rdi # r9 <- Buffer as return value\r
- xor rcx, rcx\r
- sub rcx, rdi\r
- and rcx, 15 # rcx + rdi aligns on 16-byte boundary\r
+ pushq %rdi\r
+ movq %rcx, %rdi # rdi <- Buffer\r
+ movb %r8b, %al # al <- Value\r
+ movq %rdi, %r9 # r9 <- Buffer as return value\r
+ xorq %rcx, %rcx\r
+ subq %rdi, %rcx\r
+ andq $15, %rcx # rcx + rdi aligns on 16-byte boundary\r
jz L0\r
- cmp rcx, rdx\r
- cmova rcx, rdx\r
- sub rdx, rcx\r
+ cmpq %rdx, %rcx\r
+ cmova %rdx, %rcx\r
+ subq %rcx, %rdx\r
rep stosb\r
L0:\r
- mov rcx, rdx\r
- and rdx, 15\r
- shr rcx, 4\r
+ movq %rdx, %rcx\r
+ andq $15, %rdx\r
+ shrq $4, %rcx\r
jz L_SetBytes\r
- mov ah, al # ax <- Value repeats twice\r
- movdqa [rsp + 0x10], xmm0 # save xmm0\r
- movd xmm0, eax # xmm0[0..16] <- Value repeats twice\r
- pshuflw xmm0, xmm0, 0 # xmm0[0..63] <- Value repeats 8 times\r
- movlhps xmm0, xmm0 # xmm0 <- Value repeats 16 times\r
+ movb %al, %ah # ax <- Value repeats twice\r
+ movdqa %xmm0, 0x10(%rsp) # save xmm0\r
+ movd %eax, %xmm0 # xmm0[0..16] <- Value repeats twice\r
+ pshuflw $0, %xmm0, %xmm0 # xmm0[0..63] <- Value repeats 8 times\r
+ movlhps %xmm0, %xmm0 # xmm0 <- Value repeats 16 times\r
L1:\r
- movntdq [rdi], xmm0 # rdi should be 16-byte aligned\r
- add rdi, 16\r
+ movntdq %xmm0, (%rdi) # rdi should be 16-byte aligned\r
+ add $16, %rdi\r
loop L1\r
mfence\r
- movdqa xmm0, [rsp + 0x10] # restore xmm0\r
+ movdqa 0x10(%rsp), %xmm0 # restore xmm0\r
L_SetBytes:\r
- mov ecx, edx # high 32 bits of rcx are always zero\r
+ movl %edx, %ecx # high 32 bits of rcx are always zero\r
rep stosb\r
- mov rax, r9 # rax <- Return value\r
- pop rdi\r
+ movq %r9, %rax # rax <- Return value\r
+ popq %rdi\r
ret\r