]> git.proxmox.com Git - mirror_edk2.git/blame - MdePkg/Library/BaseMemoryLibOptDxe/X64/CopyMem.S
MdePkg: Clean up source files
[mirror_edk2.git] / MdePkg / Library / BaseMemoryLibOptDxe / X64 / CopyMem.S
CommitLineData
b1ff428c 1#\r
2# ConvertAsm.py: Automatically generated from CopyMem.asm\r
3#\r
4#------------------------------------------------------------------------------\r
5#\r
9095d37b 6# Copyright (c) 2006 - 2018, Intel Corporation. All rights reserved.<BR>\r
adc29a97 7# This program and the accompanying materials\r
b1ff428c 8# are licensed and made available under the terms and conditions of the BSD License\r
9# which accompanies this distribution. The full text of the license may be found at\r
2fc59a00 10# http://opensource.org/licenses/bsd-license.php.\r
b1ff428c 11#\r
12# THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS,\r
13# WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED.\r
14#\r
15# Module Name:\r
16#\r
17# CopyMem.S\r
18#\r
19# Abstract:\r
20#\r
21# CopyMem function\r
22#\r
23# Notes:\r
24#\r
25#------------------------------------------------------------------------------\r
26\r
27#------------------------------------------------------------------------------\r
28# VOID *\r
29# EFIAPI\r
30# InternalMemCopyMem (\r
31# IN VOID *Destination,\r
32# IN VOID *Source,\r
33# IN UINTN Count\r
34# )\r
2f3d6fb5 35#------------------------------------------------------------------------------\r
132f41f0 36ASM_GLOBAL ASM_PFX(InternalMemCopyMem)\r
2f3d6fb5 37ASM_PFX(InternalMemCopyMem):\r
1fef058f 38 pushq %rsi\r
39 pushq %rdi\r
40 movq %rdx, %rsi # rsi <- Source\r
41 movq %rcx, %rdi # rdi <- Destination\r
9095d37b 42 leaq -1(%rsi,%r8,), %r9 # r9 <- Last byte of Source\r
1fef058f 43 cmpq %rdi, %rsi\r
44 movq %rdi, %rax # rax <- Destination as return value\r
2f3d6fb5 45 jae L0 # Copy forward if Source > Destination\r
1fef058f 46 cmpq %rdi, %r9 # Overlapped?\r
47 jae L_CopyBackward # Copy backward if overlapped\r
2f3d6fb5 48L0:\r
1fef058f 49 xorq %rcx, %rcx\r
50 subq %rdi, %rcx # rcx <- -rdi\r
51 andq $15, %rcx # rcx + rsi should be 16 bytes aligned\r
2f3d6fb5 52 jz L1 # skip if rcx == 0\r
1fef058f 53 cmpq %r8, %rcx\r
54 cmova %r8, %rcx\r
55 subq %rcx, %r8\r
2f3d6fb5 56 rep movsb\r
57L1:\r
1fef058f 58 movq %r8, %rcx\r
59 andq $15, %r8\r
60 shrq $4, %rcx # rcx <- # of DQwords to copy\r
2f3d6fb5 61 jz L_CopyBytes\r
1fef058f 62 movdqu %xmm0, 0x18(%rsp) # save xmm0 on stack\r
2f3d6fb5 63L2:\r
1fef058f 64 movdqu (%rsi), %xmm0 # rsi may not be 16-byte aligned\r
65 movntdq %xmm0, (%rdi) # rdi should be 16-byte aligned\r
66 addq $16, %rsi\r
67 addq $16, %rdi\r
2f3d6fb5 68 loop L2\r
69 mfence\r
1fef058f 70 movdqa 0x18(%rsp), %xmm0 # restore xmm0\r
2f3d6fb5 71 jmp L_CopyBytes # copy remaining bytes\r
72L_CopyBackward:\r
1fef058f 73 movq %r9, %rsi # rsi <- Last byte of Source\r
74 leaq -1(%rdi, %r8,), %rdi # rdi <- Last byte of Destination\r
2f3d6fb5 75 std\r
76L_CopyBytes:\r
1fef058f 77 movq %r8, %rcx\r
2f3d6fb5 78 rep movsb\r
79 cld\r
1fef058f 80 popq %rdi\r
81 popq %rsi\r
2f3d6fb5 82 ret\r