From 1a49e2aa3c2b80f7e988c6cf5d7ae65e700d1d19 Mon Sep 17 00:00:00 2001 From: yi1 li Date: Wed, 13 Apr 2022 15:02:00 +0800 Subject: [PATCH] CryptoPkg: Add instrinsics to support building ECC on IA32 windows REF:https://bugzilla.tianocore.org/show_bug.cgi?id=3679 This dependency is needed to build openssl lib with ECC ciphers under IA32 Windows and adds implementation for _allmul and _allshr instrinsics. It is taken from Project Mu: microsoft/mu_basecore@b55b341 Cc: Jiewen Yao Cc: Jian J Wang Signed-off-by: yi1 li Reviewed-by: Jiewen Yao --- .../Library/IntrinsicLib/Ia32/MathLlmul.asm | 98 +++++++++++++++++++ .../Library/IntrinsicLib/Ia32/MathLlshr.asm | 78 +++++++++++++++ .../Library/IntrinsicLib/IntrinsicLib.inf | 2 + 3 files changed, 178 insertions(+) create mode 100644 CryptoPkg/Library/IntrinsicLib/Ia32/MathLlmul.asm create mode 100644 CryptoPkg/Library/IntrinsicLib/Ia32/MathLlshr.asm diff --git a/CryptoPkg/Library/IntrinsicLib/Ia32/MathLlmul.asm b/CryptoPkg/Library/IntrinsicLib/Ia32/MathLlmul.asm new file mode 100644 index 0000000000..341ea8a7bc --- /dev/null +++ b/CryptoPkg/Library/IntrinsicLib/Ia32/MathLlmul.asm @@ -0,0 +1,98 @@ +;*** +;llmul.asm - long multiply routine +; +; Copyright (c) Microsoft Corporation. All rights reserved. +; SPDX-License-Identifier: BSD-2-Clause-Patent +; +;Purpose: +; Defines long multiply routine +; Both signed and unsigned routines are the same, since multiply's +; work out the same in 2's complement +; creates the following routine: +; __allmul +; +;Original Implemenation: MSVC 14.12.25827 +; +;******************************************************************************* + .686 + .model flat,C + .code + + +;*** +;llmul - long multiply routine +; +;Purpose: +; Does a long multiply (same for signed/unsigned) +; Parameters are not changed. +; +;Entry: +; Parameters are passed on the stack: +; 1st pushed: multiplier (QWORD) +; 2nd pushed: multiplicand (QWORD) +; +;Exit: +; EDX:EAX - product of multiplier and multiplicand +; NOTE: parameters are removed from the stack +; +;Uses: +; ECX +; +;Exceptions: +; +;******************************************************************************* +_allmul PROC NEAR + +A EQU [esp + 4] ; stack address of a +B EQU [esp + 12] ; stack address of b + +HIGH_PART EQU [4] ; +LOW_PART EQU [0] + +; +; AHI, BHI : upper 32 bits of A and B +; ALO, BLO : lower 32 bits of A and B +; +; ALO * BLO +; ALO * BHI +; + BLO * AHI +; --------------------- +; + + mov eax,HIGH_PART(A) + mov ecx,HIGH_PART(B) + or ecx,eax ;test for both high dwords zero. + mov ecx,LOW_PART(B) + jnz short hard ;both are zero, just mult ALO and BLO + + mov eax,LOW_PART(A) + mul ecx + + ret 16 ; callee restores the stack + +hard: + push ebx + +; must redefine A and B since esp has been altered + +A2 EQU [esp + 8] ; stack address of a +B2 EQU [esp + 16] ; stack address of b + + mul ecx ;eax has AHI, ecx has BLO, so AHI * BLO + mov ebx,eax ;save result + + mov eax,LOW_PART(A2) + mul dword ptr HIGH_PART(B2) ;ALO * BHI + add ebx,eax ;ebx = ((ALO * BHI) + (AHI * BLO)) + + mov eax,LOW_PART(A2);ecx = BLO + mul ecx ;so edx:eax = ALO*BLO + add edx,ebx ;now edx has all the LO*HI stuff + + pop ebx + + ret 16 ; callee restores the stack + +_allmul ENDP + + end diff --git a/CryptoPkg/Library/IntrinsicLib/Ia32/MathLlshr.asm b/CryptoPkg/Library/IntrinsicLib/Ia32/MathLlshr.asm new file mode 100644 index 0000000000..ab8294580f --- /dev/null +++ b/CryptoPkg/Library/IntrinsicLib/Ia32/MathLlshr.asm @@ -0,0 +1,78 @@ +;*** +;llshr.asm - long shift right +; +; Copyright (c) Microsoft Corporation. All rights reserved. +; SPDX-License-Identifier: BSD-2-Clause-Patent +; +;Purpose: +; define signed long shift right routine +; __allshr +; +;Original Implemenation: MSVC 14.12.25827 +; +;******************************************************************************* + .686 + .model flat,C + .code + + + +;*** +;llshr - long shift right +; +;Purpose: +; Does a signed Long Shift Right +; Shifts a long right any number of bits. +; +;Entry: +; EDX:EAX - long value to be shifted +; CL - number of bits to shift by +; +;Exit: +; EDX:EAX - shifted value +; +;Uses: +; CL is destroyed. +; +;Exceptions: +; +;******************************************************************************* +_allshr PROC NEAR + +; +; Handle shifts of 64 bits or more (if shifting 64 bits or more, the result +; depends only on the high order bit of edx). +; + cmp cl,64 + jae short RETSIGN + +; +; Handle shifts of between 0 and 31 bits +; + cmp cl, 32 + jae short MORE32 + shrd eax,edx,cl + sar edx,cl + ret + +; +; Handle shifts of between 32 and 63 bits +; +MORE32: + mov eax,edx + sar edx,31 + and cl,31 + sar eax,cl + ret + +; +; Return double precision 0 or -1, depending on the sign of edx +; +RETSIGN: + sar edx,31 + mov eax,edx + ret + +_allshr ENDP + + end diff --git a/CryptoPkg/Library/IntrinsicLib/IntrinsicLib.inf b/CryptoPkg/Library/IntrinsicLib/IntrinsicLib.inf index fcbb93316c..86e74b57b1 100644 --- a/CryptoPkg/Library/IntrinsicLib/IntrinsicLib.inf +++ b/CryptoPkg/Library/IntrinsicLib/IntrinsicLib.inf @@ -30,6 +30,8 @@ Ia32/MathLShiftS64.c | MSFT Ia32/MathRShiftU64.c | MSFT Ia32/MathFtol.c | MSFT + Ia32/MathLlmul.asm | MSFT + Ia32/MathLlshr.asm | MSFT Ia32/MathLShiftS64.c | INTEL Ia32/MathRShiftU64.c | INTEL -- 2.39.2