--- /dev/null
+//\r
+// Copyright (c) 2013 - 2016, Linaro Limited\r
+// All rights reserved.\r
+//\r
+// Redistribution and use in source and binary forms, with or without\r
+// modification, are permitted provided that the following conditions are met:\r
+// * Redistributions of source code must retain the above copyright\r
+// notice, this list of conditions and the following disclaimer.\r
+// * Redistributions in binary form must reproduce the above copyright\r
+// notice, this list of conditions and the following disclaimer in the\r
+// documentation and/or other materials provided with the distribution.\r
+// * Neither the name of the Linaro nor the\r
+// names of its contributors may be used to endorse or promote products\r
+// derived from this software without specific prior written permission.\r
+//\r
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS\r
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT\r
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR\r
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT\r
+// HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,\r
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT\r
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,\r
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY\r
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\r
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\r
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\r
+//\r
+\r
+// Parameters and result.\r
+#define src1 r0\r
+#define src2 r1\r
+#define limit r2\r
+#define result r0\r
+\r
+// Internal variables.\r
+#define data1 r3\r
+#define data2 r4\r
+#define limit_wd r5\r
+#define diff r6\r
+#define tmp1 r7\r
+#define tmp2 r12\r
+#define pos r8\r
+#define mask r14\r
+\r
+ .text\r
+ .thumb\r
+ .syntax unified\r
+ .align 5\r
+ASM_GLOBAL ASM_PFX(InternalMemCompareMem)\r
+ASM_PFX(InternalMemCompareMem):\r
+ push {r4-r8, lr}\r
+ eor tmp1, src1, src2\r
+ tst tmp1, #3\r
+ bne .Lmisaligned4\r
+ ands tmp1, src1, #3\r
+ bne .Lmutual_align\r
+ add limit_wd, limit, #3\r
+ nop.w\r
+ lsr limit_wd, limit_wd, #2\r
+\r
+ // Start of performance-critical section -- one 32B cache line.\r
+.Lloop_aligned:\r
+ ldr data1, [src1], #4\r
+ ldr data2, [src2], #4\r
+.Lstart_realigned:\r
+ subs limit_wd, limit_wd, #1\r
+ eor diff, data1, data2 // Non-zero if differences found.\r
+ cbnz diff, 0f\r
+ bne .Lloop_aligned\r
+ // End of performance-critical section -- one 32B cache line.\r
+\r
+ // Not reached the limit, must have found a diff.\r
+0: cbnz limit_wd, .Lnot_limit\r
+\r
+ // Limit % 4 == 0 => all bytes significant.\r
+ ands limit, limit, #3\r
+ beq .Lnot_limit\r
+\r
+ lsl limit, limit, #3 // Bits -> bytes.\r
+ mov mask, #~0\r
+ lsl mask, mask, limit\r
+ bic data1, data1, mask\r
+ bic data2, data2, mask\r
+\r
+ orr diff, diff, mask\r
+\r
+.Lnot_limit:\r
+ rev diff, diff\r
+ rev data1, data1\r
+ rev data2, data2\r
+\r
+ // The MS-non-zero bit of DIFF marks either the first bit\r
+ // that is different, or the end of the significant data.\r
+ // Shifting left now will bring the critical information into the\r
+ // top bits.\r
+ clz pos, diff\r
+ lsl data1, data1, pos\r
+ lsl data2, data2, pos\r
+\r
+ // But we need to zero-extend (char is unsigned) the value and then\r
+ // perform a signed 32-bit subtraction.\r
+ lsr data1, data1, #28\r
+ sub result, data1, data2, lsr #28\r
+ pop {r4-r8, pc}\r
+\r
+.Lmutual_align:\r
+ // Sources are mutually aligned, but are not currently at an\r
+ // alignment boundary. Round down the addresses and then mask off\r
+ // the bytes that precede the start point.\r
+ bic src1, src1, #3\r
+ bic src2, src2, #3\r
+ add limit, limit, tmp1 // Adjust the limit for the extra.\r
+ lsl tmp1, tmp1, #2 // Bytes beyond alignment -> bits.\r
+ ldr data1, [src1], #4\r
+ neg tmp1, tmp1 // Bits to alignment -32.\r
+ ldr data2, [src2], #4\r
+ mov tmp2, #~0\r
+\r
+ // Little-endian. Early bytes are at LSB.\r
+ lsr tmp2, tmp2, tmp1 // Shift (tmp1 & 31).\r
+ add limit_wd, limit, #3\r
+ orr data1, data1, tmp2\r
+ orr data2, data2, tmp2\r
+ lsr limit_wd, limit_wd, #2\r
+ b .Lstart_realigned\r
+\r
+.Lmisaligned4:\r
+ sub limit, limit, #1\r
+1:\r
+ // Perhaps we can do better than this.\r
+ ldrb data1, [src1], #1\r
+ ldrb data2, [src2], #1\r
+ subs limit, limit, #1\r
+ it cs\r
+ cmpcs data1, data2\r
+ beq 1b\r
+ sub result, data1, data2\r
+ pop {r4-r8, pc}\r
--- /dev/null
+;\r
+; Copyright (c) 2013 - 2016, Linaro Limited\r
+; All rights reserved.\r
+;\r
+; Redistribution and use in source and binary forms, with or without\r
+; modification, are permitted provided that the following conditions are met:\r
+; * Redistributions of source code must retain the above copyright\r
+; notice, this list of conditions and the following disclaimer.\r
+; * Redistributions in binary form must reproduce the above copyright\r
+; notice, this list of conditions and the following disclaimer in the\r
+; documentation and/or other materials provided with the distribution.\r
+; * Neither the name of the Linaro nor the\r
+; names of its contributors may be used to endorse or promote products\r
+; derived from this software without specific prior written permission.\r
+;\r
+; THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS\r
+; "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT\r
+; LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR\r
+; A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT\r
+; HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,\r
+; SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT\r
+; LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,\r
+; DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY\r
+; THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\r
+; (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\r
+; OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\r
+;\r
+\r
+; Parameters and result.\r
+#define src1 r0\r
+#define src2 r1\r
+#define limit r2\r
+#define result r0\r
+\r
+; Internal variables.\r
+#define data1 r3\r
+#define data2 r4\r
+#define limit_wd r5\r
+#define diff r6\r
+#define tmp1 r7\r
+#define tmp2 r12\r
+#define pos r8\r
+#define mask r14\r
+\r
+ EXPORT InternalMemCompareMem\r
+ THUMB\r
+ AREA CompareMem, CODE, READONLY\r
+\r
+InternalMemCompareMem\r
+ push {r4-r8, lr}\r
+ eor tmp1, src1, src2\r
+ tst tmp1, #3\r
+ bne Lmisaligned4\r
+ ands tmp1, src1, #3\r
+ bne Lmutual_align\r
+ add limit_wd, limit, #3\r
+ nop.w\r
+ lsr limit_wd, limit_wd, #2\r
+\r
+ ; Start of performance-critical section -- one 32B cache line.\r
+Lloop_aligned\r
+ ldr data1, [src1], #4\r
+ ldr data2, [src2], #4\r
+Lstart_realigned\r
+ subs limit_wd, limit_wd, #1\r
+ eor diff, data1, data2 ; Non-zero if differences found.\r
+ cbnz diff, L0\r
+ bne Lloop_aligned\r
+ ; End of performance-critical section -- one 32B cache line.\r
+\r
+ ; Not reached the limit, must have found a diff.\r
+L0\r
+ cbnz limit_wd, Lnot_limit\r
+\r
+ // Limit % 4 == 0 => all bytes significant.\r
+ ands limit, limit, #3\r
+ beq Lnot_limit\r
+\r
+ lsl limit, limit, #3 // Bits -> bytes.\r
+ mov mask, #~0\r
+ lsl mask, mask, limit\r
+ bic data1, data1, mask\r
+ bic data2, data2, mask\r
+\r
+ orr diff, diff, mask\r
+\r
+Lnot_limit\r
+ rev diff, diff\r
+ rev data1, data1\r
+ rev data2, data2\r
+\r
+ ; The MS-non-zero bit of DIFF marks either the first bit\r
+ ; that is different, or the end of the significant data.\r
+ ; Shifting left now will bring the critical information into the\r
+ ; top bits.\r
+ clz pos, diff\r
+ lsl data1, data1, pos\r
+ lsl data2, data2, pos\r
+\r
+ ; But we need to zero-extend (char is unsigned) the value and then\r
+ ; perform a signed 32-bit subtraction.\r
+ lsr data1, data1, #28\r
+ sub result, data1, data2, lsr #28\r
+ pop {r4-r8, pc}\r
+\r
+Lmutual_align\r
+ ; Sources are mutually aligned, but are not currently at an\r
+ ; alignment boundary. Round down the addresses and then mask off\r
+ ; the bytes that precede the start point.\r
+ bic src1, src1, #3\r
+ bic src2, src2, #3\r
+ add limit, limit, tmp1 ; Adjust the limit for the extra.\r
+ lsl tmp1, tmp1, #2 ; Bytes beyond alignment -> bits.\r
+ ldr data1, [src1], #4\r
+ neg tmp1, tmp1 ; Bits to alignment -32.\r
+ ldr data2, [src2], #4\r
+ mov tmp2, #~0\r
+\r
+ ; Little-endian. Early bytes are at LSB.\r
+ lsr tmp2, tmp2, tmp1 ; Shift (tmp1 & 31).\r
+ add limit_wd, limit, #3\r
+ orr data1, data1, tmp2\r
+ orr data2, data2, tmp2\r
+ lsr limit_wd, limit_wd, #2\r
+ b Lstart_realigned\r
+\r
+Lmisaligned4\r
+ sub limit, limit, #1\r
+L1\r
+ // Perhaps we can do better than this.\r
+ ldrb data1, [src1], #1\r
+ ldrb data2, [src2], #1\r
+ subs limit, limit, #1\r
+ it cs\r
+ cmpcs data1, data2\r
+ beq L1\r
+ sub result, data1, data2\r
+ pop {r4-r8, pc}\r
+\r
+ END\r
--- /dev/null
+#------------------------------------------------------------------------------\r
+#\r
+# CopyMem() worker for ARM\r
+#\r
+# This file started out as C code that did 64 bit moves if the buffer was\r
+# 32-bit aligned, else it does a byte copy. It also does a byte copy for\r
+# any trailing bytes. It was updated to do 32-byte copies using stm/ldm.\r
+#\r
+# Copyright (c) 2008 - 2010, Apple Inc. All rights reserved.<BR>\r
+# Copyright (c) 2016, Linaro Ltd. All rights reserved.<BR>\r
+# This program and the accompanying materials\r
+# are licensed and made available under the terms and conditions of the BSD License\r
+# which accompanies this distribution. The full text of the license may be found at\r
+# http://opensource.org/licenses/bsd-license.php\r
+#\r
+# THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS,\r
+# WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED.\r
+#\r
+#------------------------------------------------------------------------------\r
+\r
+ .text\r
+ .thumb\r
+ .syntax unified\r
+\r
+/**\r
+ Copy Length bytes from Source to Destination. Overlap is OK.\r
+\r
+ This implementation\r
+\r
+ @param Destination Target of copy\r
+ @param Source Place to copy from\r
+ @param Length Number of bytes to copy\r
+\r
+ @return Destination\r
+\r
+\r
+VOID *\r
+EFIAPI\r
+InternalMemCopyMem (\r
+ OUT VOID *DestinationBuffer,\r
+ IN CONST VOID *SourceBuffer,\r
+ IN UINTN Length\r
+ )\r
+**/\r
+ASM_GLOBAL ASM_PFX(InternalMemCopyMem)\r
+ASM_PFX(InternalMemCopyMem):\r
+ push {r4-r11, lr}\r
+ // Save the input parameters in extra registers (r11 = destination, r14 = source, r12 = length)\r
+ mov r11, r0\r
+ mov r10, r0\r
+ mov r12, r2\r
+ mov r14, r1\r
+\r
+ cmp r11, r1\r
+ // If (dest < source)\r
+ bcc memcopy_check_optim_default\r
+\r
+ // If (source + length < dest)\r
+ rsb r3, r1, r11\r
+ cmp r12, r3\r
+ bcc memcopy_check_optim_default\r
+ b memcopy_check_optim_overlap\r
+\r
+memcopy_check_optim_default:\r
+ // Check if we can use an optimized path ((length >= 32) && destination word-aligned && source word-aligned) for the memcopy (optimized path if r0 == 1)\r
+ tst r0, #0xF\r
+ it ne\r
+ movne r0, #0\r
+ bne memcopy_default\r
+ tst r1, #0xF\r
+ ite ne\r
+ movne r3, #0\r
+ moveq r3, #1\r
+ cmp r2, #31\r
+ ite ls\r
+ movls r0, #0\r
+ andhi r0, r3, #1\r
+ b memcopy_default\r
+\r
+memcopy_check_optim_overlap:\r
+ // r10 = dest_end, r14 = source_end\r
+ add r10, r11, r12\r
+ add r14, r12, r1\r
+\r
+ // Are we in the optimized case ((length >= 32) && dest_end word-aligned && source_end word-aligned)\r
+ cmp r2, #31\r
+ ite ls\r
+ movls r0, #0\r
+ movhi r0, #1\r
+ tst r10, #0xF\r
+ it ne\r
+ movne r0, #0\r
+ tst r14, #0xF\r
+ it ne\r
+ movne r0, #0\r
+ b memcopy_overlapped\r
+\r
+memcopy_overlapped_non_optim:\r
+ // We read 1 byte from the end of the source buffer\r
+ sub r3, r14, #1\r
+ sub r12, r12, #1\r
+ ldrb r3, [r3, #0]\r
+ sub r2, r10, #1\r
+ cmp r12, #0\r
+ // We write 1 byte at the end of the dest buffer\r
+ sub r10, r10, #1\r
+ sub r14, r14, #1\r
+ strb r3, [r2, #0]\r
+ bne memcopy_overlapped_non_optim\r
+ b memcopy_end\r
+\r
+// r10 = dest_end, r14 = source_end\r
+memcopy_overlapped:\r
+ // Are we in the optimized case ?\r
+ cmp r0, #0\r
+ beq memcopy_overlapped_non_optim\r
+\r
+ // Optimized Overlapped - Read 32 bytes\r
+ sub r14, r14, #32\r
+ sub r12, r12, #32\r
+ cmp r12, #31\r
+ ldmia r14, {r2-r9}\r
+\r
+ // If length is less than 32 then disable optim\r
+ it ls\r
+ movls r0, #0\r
+\r
+ cmp r12, #0\r
+\r
+ // Optimized Overlapped - Write 32 bytes\r
+ sub r10, r10, #32\r
+ stmia r10, {r2-r9}\r
+\r
+ // while (length != 0)\r
+ bne memcopy_overlapped\r
+ b memcopy_end\r
+\r
+memcopy_default_non_optim:\r
+ // Byte copy\r
+ ldrb r3, [r14], #1\r
+ sub r12, r12, #1\r
+ strb r3, [r10], #1\r
+\r
+memcopy_default:\r
+ cmp r12, #0\r
+ beq memcopy_end\r
+\r
+// r10 = dest, r14 = source\r
+memcopy_default_loop:\r
+ cmp r0, #0\r
+ beq memcopy_default_non_optim\r
+\r
+ // Optimized memcopy - Read 32 Bytes\r
+ sub r12, r12, #32\r
+ cmp r12, #31\r
+ ldmia r14!, {r2-r9}\r
+\r
+ // If length is less than 32 then disable optim\r
+ it ls\r
+ movls r0, #0\r
+\r
+ cmp r12, #0\r
+\r
+ // Optimized memcopy - Write 32 Bytes\r
+ stmia r10!, {r2-r9}\r
+\r
+ // while (length != 0)\r
+ bne memcopy_default_loop\r
+\r
+memcopy_end:\r
+ mov r0, r11\r
+ pop {r4-r11, pc}\r
--- /dev/null
+;------------------------------------------------------------------------------\r
+;\r
+; CopyMem() worker for ARM\r
+;\r
+; This file started out as C code that did 64 bit moves if the buffer was\r
+; 32-bit aligned, else it does a byte copy. It also does a byte copy for\r
+; any trailing bytes. It was updated to do 32-byte copies using stm/ldm.\r
+;\r
+; Copyright (c) 2008 - 2010, Apple Inc. All rights reserved.<BR>\r
+; Copyright (c) 2016, Linaro Ltd. All rights reserved.<BR>\r
+; This program and the accompanying materials\r
+; are licensed and made available under the terms and conditions of the BSD License\r
+; which accompanies this distribution. The full text of the license may be found at\r
+; http://opensource.org/licenses/bsd-license.php\r
+;\r
+; THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS,\r
+; WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED.\r
+;\r
+;------------------------------------------------------------------------------\r
+\r
+ EXPORT InternalMemCopyMem\r
+ AREA SetMem, CODE, READONLY\r
+ THUMB\r
+\r
+InternalMemCopyMem\r
+ stmfd sp!, {r4-r11, lr}\r
+ // Save the input parameters in extra registers (r11 = destination, r14 = source, r12 = length)\r
+ mov r11, r0\r
+ mov r10, r0\r
+ mov r12, r2\r
+ mov r14, r1\r
+\r
+memcopy_check_overlapped\r
+ cmp r11, r1\r
+ // If (dest < source)\r
+ bcc memcopy_check_optim_default\r
+\r
+ // If (source + length < dest)\r
+ rsb r3, r1, r11\r
+ cmp r12, r3\r
+ bcc memcopy_check_optim_default\r
+ b memcopy_check_optim_overlap\r
+\r
+memcopy_check_optim_default\r
+ // Check if we can use an optimized path ((length >= 32) && destination word-aligned && source word-aligned) for the memcopy (optimized path if r0 == 1)\r
+ tst r0, #0xF\r
+ movne r0, #0\r
+ bne memcopy_default\r
+ tst r1, #0xF\r
+ movne r3, #0\r
+ moveq r3, #1\r
+ cmp r2, #31\r
+ movls r0, #0\r
+ andhi r0, r3, #1\r
+ b memcopy_default\r
+\r
+memcopy_check_optim_overlap\r
+ // r10 = dest_end, r14 = source_end\r
+ add r10, r11, r12\r
+ add r14, r12, r1\r
+\r
+ // Are we in the optimized case ((length >= 32) && dest_end word-aligned && source_end word-aligned)\r
+ cmp r2, #31\r
+ movls r0, #0\r
+ movhi r0, #1\r
+ tst r10, #0xF\r
+ movne r0, #0\r
+ tst r14, #0xF\r
+ movne r0, #0\r
+ b memcopy_overlapped\r
+\r
+memcopy_overlapped_non_optim\r
+ // We read 1 byte from the end of the source buffer\r
+ sub r3, r14, #1\r
+ sub r12, r12, #1\r
+ ldrb r3, [r3, #0]\r
+ sub r2, r10, #1\r
+ cmp r12, #0\r
+ // We write 1 byte at the end of the dest buffer\r
+ sub r10, r10, #1\r
+ sub r14, r14, #1\r
+ strb r3, [r2, #0]\r
+ bne memcopy_overlapped_non_optim\r
+ b memcopy_end\r
+\r
+// r10 = dest_end, r14 = source_end\r
+memcopy_overlapped\r
+ // Are we in the optimized case ?\r
+ cmp r0, #0\r
+ beq memcopy_overlapped_non_optim\r
+\r
+ // Optimized Overlapped - Read 32 bytes\r
+ sub r14, r14, #32\r
+ sub r12, r12, #32\r
+ cmp r12, #31\r
+ ldmia r14, {r2-r9}\r
+\r
+ // If length is less than 32 then disable optim\r
+ movls r0, #0\r
+\r
+ cmp r12, #0\r
+\r
+ // Optimized Overlapped - Write 32 bytes\r
+ sub r10, r10, #32\r
+ stmia r10, {r2-r9}\r
+\r
+ // while (length != 0)\r
+ bne memcopy_overlapped\r
+ b memcopy_end\r
+\r
+memcopy_default_non_optim\r
+ // Byte copy\r
+ ldrb r3, [r14], #1\r
+ sub r12, r12, #1\r
+ strb r3, [r10], #1\r
+\r
+memcopy_default\r
+ cmp r12, #0\r
+ beq memcopy_end\r
+\r
+// r10 = dest, r14 = source\r
+memcopy_default_loop\r
+ cmp r0, #0\r
+ beq memcopy_default_non_optim\r
+\r
+ // Optimized memcopy - Read 32 Bytes\r
+ sub r12, r12, #32\r
+ cmp r12, #31\r
+ ldmia r14!, {r2-r9}\r
+\r
+ // If length is less than 32 then disable optim\r
+ movls r0, #0\r
+\r
+ cmp r12, #0\r
+\r
+ // Optimized memcopy - Write 32 Bytes\r
+ stmia r10!, {r2-r9}\r
+\r
+ // while (length != 0)\r
+ bne memcopy_default_loop\r
+\r
+memcopy_end\r
+ mov r0, r11\r
+ ldmfd sp!, {r4-r11, pc}\r
+\r
+ END\r
+\r
--- /dev/null
+// Copyright (c) 2010-2011, Linaro Limited\r
+// All rights reserved.\r
+//\r
+// Redistribution and use in source and binary forms, with or without\r
+// modification, are permitted provided that the following conditions\r
+// are met:\r
+//\r
+// * Redistributions of source code must retain the above copyright\r
+// notice, this list of conditions and the following disclaimer.\r
+//\r
+// * Redistributions in binary form must reproduce the above copyright\r
+// notice, this list of conditions and the following disclaimer in the\r
+// documentation and/or other materials provided with the distribution.\r
+//\r
+// * Neither the name of Linaro Limited nor the names of its\r
+// contributors may be used to endorse or promote products derived\r
+// from this software without specific prior written permission.\r
+//\r
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS\r
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT\r
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR\r
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT\r
+// HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,\r
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT\r
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,\r
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY\r
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\r
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\r
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\r
+//\r
+\r
+//\r
+// Written by Dave Gilbert <david.gilbert@linaro.org>\r
+//\r
+// This memchr routine is optimised on a Cortex-A9 and should work on\r
+// all ARMv7 processors. It has a fast past for short sizes, and has\r
+// an optimised path for large data sets; the worst case is finding the\r
+// match early in a large data set.\r
+//\r
+\r
+\r
+// 2011-02-07 david.gilbert@linaro.org\r
+// Extracted from local git a5b438d861\r
+// 2011-07-14 david.gilbert@linaro.org\r
+// Import endianness fix from local git ea786f1b\r
+// 2011-12-07 david.gilbert@linaro.org\r
+// Removed unneeded cbz from align loop\r
+\r
+// this lets us check a flag in a 00/ff byte easily in either endianness\r
+#define CHARTSTMASK(c) 1<<(c*8)\r
+\r
+ .text\r
+ .thumb\r
+ .syntax unified\r
+\r
+ .type ASM_PFX(InternalMemScanMem8), %function\r
+ASM_GLOBAL ASM_PFX(InternalMemScanMem8)\r
+ASM_PFX(InternalMemScanMem8):\r
+ // r0 = start of memory to scan\r
+ // r1 = length\r
+ // r2 = character to look for\r
+ // returns r0 = pointer to character or NULL if not found\r
+ uxtb r2, r2 // Don't think we can trust the caller to actually pass a char\r
+\r
+ cmp r1, #16 // If it's short don't bother with anything clever\r
+ blt 20f\r
+\r
+ tst r0, #7 // If it's already aligned skip the next bit\r
+ beq 10f\r
+\r
+ // Work up to an aligned point\r
+5:\r
+ ldrb r3, [r0],#1\r
+ subs r1, r1, #1\r
+ cmp r3, r2\r
+ beq 50f // If it matches exit found\r
+ tst r0, #7\r
+ bne 5b // If not aligned yet then do next byte\r
+\r
+10:\r
+ // At this point, we are aligned, we know we have at least 8 bytes to work with\r
+ push {r4-r7}\r
+ orr r2, r2, r2, lsl #8 // expand the match word across to all bytes\r
+ orr r2, r2, r2, lsl #16\r
+ bic r4, r1, #7 // Number of double words to work with\r
+ mvns r7, #0 // all F's\r
+ movs r3, #0\r
+\r
+15:\r
+ ldmia r0!, {r5,r6}\r
+ subs r4, r4, #8\r
+ eor r5, r5, r2 // Get it so that r5,r6 have 00's where the bytes match the target\r
+ eor r6, r6, r2\r
+ uadd8 r5, r5, r7 // Parallel add 0xff - sets the GE bits for anything that wasn't 0\r
+ sel r5, r3, r7 // bytes are 00 for none-00 bytes, or ff for 00 bytes - NOTE INVERSION\r
+ uadd8 r6, r6, r7 // Parallel add 0xff - sets the GE bits for anything that wasn't 0\r
+ sel r6, r5, r7 // chained....bytes are 00 for none-00 bytes, or ff for 00 bytes - NOTE INVERSION\r
+ cbnz r6, 60f\r
+ bne 15b // (Flags from the subs above) If not run out of bytes then go around again\r
+\r
+ pop {r4-r7}\r
+ and r2, r2, #0xff // Get r2 back to a single character from the expansion above\r
+ and r1, r1, #7 // Leave the count remaining as the number after the double words have been done\r
+\r
+20:\r
+ cbz r1, 40f // 0 length or hit the end already then not found\r
+\r
+21: // Post aligned section, or just a short call\r
+ ldrb r3, [r0], #1\r
+ subs r1, r1, #1\r
+ eor r3, r3, r2 // r3 = 0 if match - doesn't break flags from sub\r
+ cbz r3, 50f\r
+ bne 21b // on r1 flags\r
+\r
+40:\r
+ movs r0, #0 // not found\r
+ bx lr\r
+\r
+50:\r
+ subs r0, r0, #1 // found\r
+ bx lr\r
+\r
+60: // We're here because the fast path found a hit - now we have to track down exactly which word it was\r
+ // r0 points to the start of the double word after the one that was tested\r
+ // r5 has the 00/ff pattern for the first word, r6 has the chained value\r
+ cmp r5, #0\r
+ itte eq\r
+ moveq r5, r6 // the end is in the 2nd word\r
+ subeq r0, r0, #3 // Points to 2nd byte of 2nd word\r
+ subne r0, r0, #7 // or 2nd byte of 1st word\r
+\r
+ // r0 currently points to the 3rd byte of the word containing the hit\r
+ tst r5, #CHARTSTMASK(0) // 1st character\r
+ bne 61f\r
+ adds r0, r0, #1\r
+ tst r5, #CHARTSTMASK(1) // 2nd character\r
+ ittt eq\r
+ addeq r0, r0 ,#1\r
+ tsteq r5, #(3 << 15) // 2nd & 3rd character\r
+ // If not the 3rd must be the last one\r
+ addeq r0, r0, #1\r
+\r
+61:\r
+ pop {r4-r7}\r
+ subs r0, r0, #1\r
+ bx lr\r
--- /dev/null
+; Copyright (c) 2010-2011, Linaro Limited\r
+; All rights reserved.\r
+;\r
+; Redistribution and use in source and binary forms, with or without\r
+; modification, are permitted provided that the following conditions\r
+; are met:\r
+;\r
+; * Redistributions of source code must retain the above copyright\r
+; notice, this list of conditions and the following disclaimer.\r
+;\r
+; * Redistributions in binary form must reproduce the above copyright\r
+; notice, this list of conditions and the following disclaimer in the\r
+; documentation and/or other materials provided with the distribution.\r
+;\r
+; * Neither the name of Linaro Limited nor the names of its\r
+; contributors may be used to endorse or promote products derived\r
+; from this software without specific prior written permission.\r
+;\r
+; THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS\r
+; "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT\r
+; LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR\r
+; A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT\r
+; HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,\r
+; SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT\r
+; LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,\r
+; DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY\r
+; THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\r
+; (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\r
+; OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\r
+;\r
+\r
+;\r
+; Written by Dave Gilbert <david.gilbert@linaro.org>\r
+;\r
+; This memchr routine is optimised on a Cortex-A9 and should work on\r
+; all ARMv7 processors. It has a fast past for short sizes, and has\r
+; an optimised path for large data sets; the worst case is finding the\r
+; match early in a large data set.\r
+;\r
+\r
+\r
+; 2011-02-07 david.gilbert@linaro.org\r
+; Extracted from local git a5b438d861\r
+; 2011-07-14 david.gilbert@linaro.org\r
+; Import endianness fix from local git ea786f1b\r
+; 2011-12-07 david.gilbert@linaro.org\r
+; Removed unneeded cbz from align loop\r
+\r
+; this lets us check a flag in a 00/ff byte easily in either endianness\r
+#define CHARTSTMASK(c) 1<<(c*8)\r
+\r
+ EXPORT InternalMemScanMem8\r
+ AREA ScanMem, CODE, READONLY\r
+ THUMB\r
+\r
+InternalMemScanMem8\r
+ ; r0 = start of memory to scan\r
+ ; r1 = length\r
+ ; r2 = character to look for\r
+ ; returns r0 = pointer to character or NULL if not found\r
+ uxtb r2, r2 ; Don't think we can trust the caller to actually pass a char\r
+\r
+ cmp r1, #16 ; If it's short don't bother with anything clever\r
+ blt L20\r
+\r
+ tst r0, #7 ; If it's already aligned skip the next bit\r
+ beq L10\r
+\r
+ ; Work up to an aligned point\r
+L5\r
+ ldrb r3, [r0],#1\r
+ subs r1, r1, #1\r
+ cmp r3, r2\r
+ beq L50 ; If it matches exit found\r
+ tst r0, #7\r
+ bne L5 ; If not aligned yet then do next byte\r
+\r
+L10\r
+ ; At this point, we are aligned, we know we have at least 8 bytes to work with\r
+ push {r4-r7}\r
+ orr r2, r2, r2, lsl #8 ; expand the match word across to all bytes\r
+ orr r2, r2, r2, lsl #16\r
+ bic r4, r1, #7 ; Number of double words to work with\r
+ mvns r7, #0 ; all F's\r
+ movs r3, #0\r
+\r
+L15\r
+ ldmia r0!, {r5,r6}\r
+ subs r4, r4, #8\r
+ eor r5, r5, r2 ; Get it so that r5,r6 have 00's where the bytes match the target\r
+ eor r6, r6, r2\r
+ uadd8 r5, r5, r7 ; Parallel add 0xff - sets the GE bits for anything that wasn't 0\r
+ sel r5, r3, r7 ; bytes are 00 for none-00 bytes, or ff for 00 bytes - NOTE INVERSION\r
+ uadd8 r6, r6, r7 ; Parallel add 0xff - sets the GE bits for anything that wasn't 0\r
+ sel r6, r5, r7 ; chained....bytes are 00 for none-00 bytes, or ff for 00 bytes - NOTE INVERSION\r
+ cbnz r6, L60\r
+ bne L15 ; (Flags from the subs above) If not run out of bytes then go around again\r
+\r
+ pop {r4-r7}\r
+ and r2, r2, #0xff ; Get r2 back to a single character from the expansion above\r
+ and r1, r1, #7 ; Leave the count remaining as the number after the double words have been done\r
+\r
+L20\r
+ cbz r1, L40 ; 0 length or hit the end already then not found\r
+\r
+L21 ; Post aligned section, or just a short call\r
+ ldrb r3, [r0], #1\r
+ subs r1, r1, #1\r
+ eor r3, r3, r2 ; r3 = 0 if match - doesn't break flags from sub\r
+ cbz r3, L50\r
+ bne L21 ; on r1 flags\r
+\r
+L40\r
+ movs r0, #0 ; not found\r
+ bx lr\r
+\r
+L50\r
+ subs r0, r0, #1 ; found\r
+ bx lr\r
+\r
+L60 ; We're here because the fast path found a hit - now we have to track down exactly which word it was\r
+ ; r0 points to the start of the double word after the one that was tested\r
+ ; r5 has the 00/ff pattern for the first word, r6 has the chained value\r
+ cmp r5, #0\r
+ itte eq\r
+ moveq r5, r6 ; the end is in the 2nd word\r
+ subeq r0, r0, #3 ; Points to 2nd byte of 2nd word\r
+ subne r0, r0, #7 ; or 2nd byte of 1st word\r
+\r
+ ; r0 currently points to the 3rd byte of the word containing the hit\r
+ tst r5, #CHARTSTMASK(0) ; 1st character\r
+ bne L61\r
+ adds r0, r0, #1\r
+ tst r5, #CHARTSTMASK(1) ; 2nd character\r
+ ittt eq\r
+ addeq r0, r0 ,#1\r
+ tsteq r5, #(3 << 15) ; 2nd & 3rd character\r
+ ; If not the 3rd must be the last one\r
+ addeq r0, r0, #1\r
+\r
+L61\r
+ pop {r4-r7}\r
+ subs r0, r0, #1\r
+ bx lr\r
+\r
+ END\r
+\r
--- /dev/null
+/** @file\r
+ Architecture Independent Base Memory Library Implementation.\r
+\r
+ The following BaseMemoryLib instances contain the same copy of this file:\r
+ BaseMemoryLib\r
+ PeiMemoryLib\r
+ UefiMemoryLib\r
+\r
+ Copyright (c) 2006 - 2016, Intel Corporation. All rights reserved.<BR>\r
+ This program and the accompanying materials\r
+ are licensed and made available under the terms and conditions of the BSD License\r
+ which accompanies this distribution. The full text of the license may be found at\r
+ http://opensource.org/licenses/bsd-license.php.\r
+\r
+ THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS,\r
+ WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED.\r
+\r
+**/\r
+\r
+#include "../MemLibInternals.h"\r
+\r
+/**\r
+ Scans a target buffer for a 16-bit value, and returns a pointer to the\r
+ matching 16-bit value in the target buffer.\r
+\r
+ @param Buffer The pointer to the target buffer to scan.\r
+ @param Length The count of 16-bit value to scan. Must be non-zero.\r
+ @param Value The value to search for in the target buffer.\r
+\r
+ @return The pointer to the first occurrence, or NULL if not found.\r
+\r
+**/\r
+CONST VOID *\r
+EFIAPI\r
+InternalMemScanMem16 (\r
+ IN CONST VOID *Buffer,\r
+ IN UINTN Length,\r
+ IN UINT16 Value\r
+ )\r
+{\r
+ CONST UINT16 *Pointer;\r
+\r
+ Pointer = (CONST UINT16*)Buffer;\r
+ do {\r
+ if (*Pointer == Value) {\r
+ return Pointer;\r
+ }\r
+ ++Pointer;\r
+ } while (--Length != 0);\r
+ return NULL;\r
+}\r
+\r
+/**\r
+ Scans a target buffer for a 32-bit value, and returns a pointer to the\r
+ matching 32-bit value in the target buffer.\r
+\r
+ @param Buffer The pointer to the target buffer to scan.\r
+ @param Length The count of 32-bit value to scan. Must be non-zero.\r
+ @param Value The value to search for in the target buffer.\r
+\r
+ @return The pointer to the first occurrence, or NULL if not found.\r
+\r
+**/\r
+CONST VOID *\r
+EFIAPI\r
+InternalMemScanMem32 (\r
+ IN CONST VOID *Buffer,\r
+ IN UINTN Length,\r
+ IN UINT32 Value\r
+ )\r
+{\r
+ CONST UINT32 *Pointer;\r
+\r
+ Pointer = (CONST UINT32*)Buffer;\r
+ do {\r
+ if (*Pointer == Value) {\r
+ return Pointer;\r
+ }\r
+ ++Pointer;\r
+ } while (--Length != 0);\r
+ return NULL;\r
+}\r
+\r
+/**\r
+ Scans a target buffer for a 64-bit value, and returns a pointer to the\r
+ matching 64-bit value in the target buffer.\r
+\r
+ @param Buffer The pointer to the target buffer to scan.\r
+ @param Length The count of 64-bit value to scan. Must be non-zero.\r
+ @param Value The value to search for in the target buffer.\r
+\r
+ @return The pointer to the first occurrence, or NULL if not found.\r
+\r
+**/\r
+CONST VOID *\r
+EFIAPI\r
+InternalMemScanMem64 (\r
+ IN CONST VOID *Buffer,\r
+ IN UINTN Length,\r
+ IN UINT64 Value\r
+ )\r
+{\r
+ CONST UINT64 *Pointer;\r
+\r
+ Pointer = (CONST UINT64*)Buffer;\r
+ do {\r
+ if (*Pointer == Value) {\r
+ return Pointer;\r
+ }\r
+ ++Pointer;\r
+ } while (--Length != 0);\r
+ return NULL;\r
+}\r
+\r
+/**\r
+ Checks whether the contents of a buffer are all zeros.\r
+\r
+ @param Buffer The pointer to the buffer to be checked.\r
+ @param Length The size of the buffer (in bytes) to be checked.\r
+\r
+ @retval TRUE Contents of the buffer are all zeros.\r
+ @retval FALSE Contents of the buffer are not all zeros.\r
+\r
+**/\r
+BOOLEAN\r
+EFIAPI\r
+InternalMemIsZeroBuffer (\r
+ IN CONST VOID *Buffer,\r
+ IN UINTN Length\r
+ )\r
+{\r
+ CONST UINT8 *BufferData;\r
+ UINTN Index;\r
+\r
+ BufferData = Buffer;\r
+ for (Index = 0; Index < Length; Index++) {\r
+ if (BufferData[Index] != 0) {\r
+ return FALSE;\r
+ }\r
+ }\r
+ return TRUE;\r
+}\r
--- /dev/null
+#------------------------------------------------------------------------------\r
+#\r
+# Copyright (c) 2016, Linaro Ltd. All rights reserved.<BR>\r
+#\r
+# This program and the accompanying materials are licensed and made available\r
+# under the terms and conditions of the BSD License which accompanies this\r
+# distribution. The full text of the license may be found at\r
+# http://opensource.org/licenses/bsd-license.php\r
+#\r
+# THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS,\r
+# WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED.\r
+#\r
+#------------------------------------------------------------------------------\r
+\r
+ .text\r
+ .thumb\r
+ .syntax unified\r
+ .align 5\r
+ASM_GLOBAL ASM_PFX(InternalMemZeroMem)\r
+ASM_PFX(InternalMemZeroMem):\r
+ movs r2, #0\r
+\r
+ASM_GLOBAL ASM_PFX(InternalMemSetMem)\r
+ASM_PFX(InternalMemSetMem):\r
+ uxtb r2, r2\r
+ orr r2, r2, r2, lsl #8\r
+\r
+ASM_GLOBAL ASM_PFX(InternalMemSetMem16)\r
+ASM_PFX(InternalMemSetMem16):\r
+ uxth r2, r2\r
+ orr r2, r2, r2, lsl #16\r
+\r
+ASM_GLOBAL ASM_PFX(InternalMemSetMem32)\r
+ASM_PFX(InternalMemSetMem32):\r
+ mov r3, r2\r
+\r
+ASM_GLOBAL ASM_PFX(InternalMemSetMem64)\r
+ASM_PFX(InternalMemSetMem64):\r
+ push {r4, lr}\r
+ cmp r1, #16 // fewer than 16 bytes of input?\r
+ add r1, r1, r0 // r1 := dst + length\r
+ add lr, r0, #16\r
+ blt 2f\r
+ bic lr, lr, #15 // align output pointer\r
+\r
+ str r2, [r0] // potentially unaligned store of 4 bytes\r
+ str r3, [r0, #4] // potentially unaligned store of 4 bytes\r
+ str r2, [r0, #8] // potentially unaligned store of 4 bytes\r
+ str r3, [r0, #12] // potentially unaligned store of 4 bytes\r
+ beq 1f\r
+\r
+0: add lr, lr, #16 // advance the output pointer by 16 bytes\r
+ subs r4, r1, lr // past the output?\r
+ blt 3f // break out of the loop\r
+ strd r2, r3, [lr, #-16] // aligned store of 16 bytes\r
+ strd r2, r3, [lr, #-8]\r
+ bne 0b // goto beginning of loop\r
+1: pop {r4, pc}\r
+\r
+2: subs r4, r1, lr\r
+3: adds r4, r4, #16\r
+ subs r1, r1, #8\r
+ cmp r4, #4 // between 4 and 15 bytes?\r
+ blt 4f\r
+ cmp r4, #8 // between 8 and 15 bytes?\r
+ str r2, [lr, #-16] // overlapping store of 4 + (4 + 4) + 4 bytes\r
+ itt gt\r
+ strgt r3, [lr, #-12]\r
+ strgt r2, [r1]\r
+ str r3, [r1, #4]\r
+ pop {r4, pc}\r
+\r
+4: cmp r4, #2 // 2 or 3 bytes?\r
+ strb r2, [lr, #-16] // store 1 byte\r
+ it ge\r
+ strhge r2, [r1, #6] // store 2 bytes\r
+ pop {r4, pc}\r
--- /dev/null
+;------------------------------------------------------------------------------\r
+;\r
+; Copyright (c) 2016, Linaro Ltd. All rights reserved.<BR>\r
+;\r
+; This program and the accompanying materials are licensed and made available\r
+; under the terms and conditions of the BSD License which accompanies this\r
+; distribution. The full text of the license may be found at\r
+; http://opensource.org/licenses/bsd-license.php\r
+;\r
+; THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS,\r
+; WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED.\r
+;\r
+;------------------------------------------------------------------------------\r
+\r
+ EXPORT InternalMemZeroMem\r
+ EXPORT InternalMemSetMem\r
+ EXPORT InternalMemSetMem16\r
+ EXPORT InternalMemSetMem32\r
+ EXPORT InternalMemSetMem64\r
+\r
+ AREA SetMem, CODE, READONLY, CODEALIGN, ALIGN=5\r
+ THUMB\r
+\r
+InternalMemZeroMem\r
+ movs r2, #0\r
+\r
+InternalMemSetMem\r
+ uxtb r2, r2\r
+ orr r2, r2, r2, lsl #8\r
+\r
+InternalMemSetMem16\r
+ uxth r2, r2\r
+ orr r2, r2, r2, lsr #16\r
+\r
+InternalMemSetMem32\r
+ mov r3, r2\r
+\r
+InternalMemSetMem64\r
+ push {r4, lr}\r
+ cmp r1, #16 ; fewer than 16 bytes of input?\r
+ add r1, r1, r0 ; r1 := dst + length\r
+ add lr, r0, #16\r
+ blt L2\r
+ bic lr, lr, #15 ; align output pointer\r
+\r
+ str r2, [r0] ; potentially unaligned store of 4 bytes\r
+ str r3, [r0, #4] ; potentially unaligned store of 4 bytes\r
+ str r2, [r0, #8] ; potentially unaligned store of 4 bytes\r
+ str r3, [r0, #12] ; potentially unaligned store of 4 bytes\r
+ beq L1\r
+\r
+L0\r
+ add lr, lr, #16 ; advance the output pointer by 16 bytes\r
+ subs r4, r1, lr ; past the output?\r
+ blt L3 ; break out of the loop\r
+ strd r2, r3, [lr, #-16] ; aligned store of 16 bytes\r
+ strd r2, r3, [lr, #-8]\r
+ bne L0 ; goto beginning of loop\r
+L1\r
+ pop {r4, pc}\r
+\r
+L2\r
+ subs r4, r1, lr\r
+L3\r
+ adds r4, r4, #16\r
+ subs r1, r1, #8\r
+ cmp r4, #4 ; between 4 and 15 bytes?\r
+ blt L4\r
+ cmp r4, #8 ; between 8 and 15 bytes?\r
+ str r2, [lr, #-16] ; overlapping store of 4 + (4 + 4) + 4 bytes\r
+ itt gt\r
+ strgt r3, [lr, #-12]\r
+ strgt r2, [r1]\r
+ str r3, [r1, #4]\r
+ pop {r4, pc}\r
+\r
+L4\r
+ cmp r4, #2 ; 2 or 3 bytes?\r
+ strb r2, [lr, #-16] ; store 1 byte\r
+ it ge\r
+ strhge r2, [r1, #6] ; store 2 bytes\r
+ pop {r4, pc}\r
+\r
+ END\r
\r
\r
#\r
-# VALID_ARCHITECTURES = IA32 X64\r
+# VALID_ARCHITECTURES = IA32 X64 ARM\r
#\r
\r
[Sources]\r
Ia32/CopyMem.nasm\r
Ia32/CopyMem.asm\r
Ia32/IsZeroBuffer.nasm\r
- ScanMem64Wrapper.c\r
- ScanMem32Wrapper.c\r
- ScanMem16Wrapper.c\r
- ScanMem8Wrapper.c\r
- ZeroMemWrapper.c\r
- CompareMemWrapper.c\r
- SetMem64Wrapper.c\r
- SetMem32Wrapper.c\r
- SetMem16Wrapper.c\r
- SetMemWrapper.c\r
- CopyMemWrapper.c\r
- IsZeroBufferWrapper.c\r
- MemLibGuid.c\r
\r
[Sources.X64]\r
X64/ScanMem64.nasm\r
X64/CopyMem.asm\r
X64/CopyMem.S\r
X64/IsZeroBuffer.nasm\r
+\r
+[Sources.ARM]\r
+ Arm/ScanMem.S |GCC\r
+ Arm/SetMem.S |GCC\r
+ Arm/CopyMem.S |GCC\r
+ Arm/CompareMem.S |GCC\r
+\r
+ Arm/ScanMem.asm |RVCT\r
+ Arm/SetMem.asm |RVCT\r
+ Arm/CopyMem.asm |RVCT\r
+ Arm/CompareMem.asm |RVCT\r
+\r
+ Arm/ScanMemGeneric.c\r
+\r
+[Sources]\r
ScanMem64Wrapper.c\r
ScanMem32Wrapper.c\r
ScanMem16Wrapper.c\r