/** @file\r
IA32, X64 and IPF Specific relocation fixups\r
\r
-Copyright (c) 2004 - 2014, Intel Corporation. All rights reserved.<BR>\r
+Copyright (c) 2004 - 2018, Intel Corporation. All rights reserved.<BR>\r
Portions Copyright (c) 2011 - 2013, ARM Ltd. All rights reserved.<BR>\r
-This program and the accompanying materials \r
-are licensed and made available under the terms and conditions of the BSD License \r
-which accompanies this distribution. The full text of the license may be found at \r
-http://opensource.org/licenses/bsd-license.php \r
- \r
-THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS, \r
-WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED. \r
+This program and the accompanying materials\r
+are licensed and made available under the terms and conditions of the BSD License\r
+which accompanies this distribution. The full text of the license may be found at\r
+http://opensource.org/licenses/bsd-license.php\r
+\r
+THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS,\r
+WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED.\r
\r
--*/\r
\r
*(UINT32*)Address = (*(UINT32*)Address & ~(((1 << Size) - 1) << InstPos)) | \\r
((UINT32)((((UINT64)Value >> ValPos) & (((UINT64)1 << Size) - 1))) << InstPos)\r
\r
-#define IMM64_IMM7B_INST_WORD_X 3 \r
-#define IMM64_IMM7B_SIZE_X 7 \r
-#define IMM64_IMM7B_INST_WORD_POS_X 4 \r
-#define IMM64_IMM7B_VAL_POS_X 0 \r
-\r
-#define IMM64_IMM9D_INST_WORD_X 3 \r
-#define IMM64_IMM9D_SIZE_X 9 \r
-#define IMM64_IMM9D_INST_WORD_POS_X 18 \r
-#define IMM64_IMM9D_VAL_POS_X 7 \r
-\r
-#define IMM64_IMM5C_INST_WORD_X 3 \r
-#define IMM64_IMM5C_SIZE_X 5 \r
-#define IMM64_IMM5C_INST_WORD_POS_X 13 \r
-#define IMM64_IMM5C_VAL_POS_X 16 \r
-\r
-#define IMM64_IC_INST_WORD_X 3 \r
-#define IMM64_IC_SIZE_X 1 \r
-#define IMM64_IC_INST_WORD_POS_X 12 \r
-#define IMM64_IC_VAL_POS_X 21 \r
-\r
-#define IMM64_IMM41a_INST_WORD_X 1 \r
-#define IMM64_IMM41a_SIZE_X 10 \r
-#define IMM64_IMM41a_INST_WORD_POS_X 14 \r
-#define IMM64_IMM41a_VAL_POS_X 22 \r
-\r
-#define IMM64_IMM41b_INST_WORD_X 1 \r
-#define IMM64_IMM41b_SIZE_X 8 \r
-#define IMM64_IMM41b_INST_WORD_POS_X 24 \r
-#define IMM64_IMM41b_VAL_POS_X 32 \r
-\r
-#define IMM64_IMM41c_INST_WORD_X 2 \r
-#define IMM64_IMM41c_SIZE_X 23 \r
-#define IMM64_IMM41c_INST_WORD_POS_X 0 \r
-#define IMM64_IMM41c_VAL_POS_X 40 \r
-\r
-#define IMM64_SIGN_INST_WORD_X 3 \r
-#define IMM64_SIGN_SIZE_X 1 \r
-#define IMM64_SIGN_INST_WORD_POS_X 27 \r
-#define IMM64_SIGN_VAL_POS_X 63 \r
+#define IMM64_IMM7B_INST_WORD_X 3\r
+#define IMM64_IMM7B_SIZE_X 7\r
+#define IMM64_IMM7B_INST_WORD_POS_X 4\r
+#define IMM64_IMM7B_VAL_POS_X 0\r
+\r
+#define IMM64_IMM9D_INST_WORD_X 3\r
+#define IMM64_IMM9D_SIZE_X 9\r
+#define IMM64_IMM9D_INST_WORD_POS_X 18\r
+#define IMM64_IMM9D_VAL_POS_X 7\r
+\r
+#define IMM64_IMM5C_INST_WORD_X 3\r
+#define IMM64_IMM5C_SIZE_X 5\r
+#define IMM64_IMM5C_INST_WORD_POS_X 13\r
+#define IMM64_IMM5C_VAL_POS_X 16\r
+\r
+#define IMM64_IC_INST_WORD_X 3\r
+#define IMM64_IC_SIZE_X 1\r
+#define IMM64_IC_INST_WORD_POS_X 12\r
+#define IMM64_IC_VAL_POS_X 21\r
+\r
+#define IMM64_IMM41a_INST_WORD_X 1\r
+#define IMM64_IMM41a_SIZE_X 10\r
+#define IMM64_IMM41a_INST_WORD_POS_X 14\r
+#define IMM64_IMM41a_VAL_POS_X 22\r
+\r
+#define IMM64_IMM41b_INST_WORD_X 1\r
+#define IMM64_IMM41b_SIZE_X 8\r
+#define IMM64_IMM41b_INST_WORD_POS_X 24\r
+#define IMM64_IMM41b_VAL_POS_X 32\r
+\r
+#define IMM64_IMM41c_INST_WORD_X 2\r
+#define IMM64_IMM41c_SIZE_X 23\r
+#define IMM64_IMM41c_INST_WORD_POS_X 0\r
+#define IMM64_IMM41c_VAL_POS_X 40\r
+\r
+#define IMM64_SIGN_INST_WORD_X 3\r
+#define IMM64_SIGN_SIZE_X 1\r
+#define IMM64_SIGN_INST_WORD_POS_X 27\r
+#define IMM64_SIGN_VAL_POS_X 63\r
\r
RETURN_STATUS\r
PeCoffLoaderRelocateIa32Image (\r
RETURN_STATUS\r
PeCoffLoaderRelocateIpfImage (\r
IN UINT16 *Reloc,\r
- IN OUT CHAR8 *Fixup, \r
+ IN OUT CHAR8 *Fixup,\r
IN OUT CHAR8 **FixupData,\r
IN UINT64 Adjust\r
)\r
\r
Fixup = (CHAR8 *)((UINTN) Fixup & (UINTN) ~(15));\r
FixupVal = (UINT64)0;\r
- \r
- // \r
+\r
+ //\r
// Extract the lower 32 bits of IMM64 from bundle\r
//\r
EXT_IMM64(FixupVal,\r
IMM64_IMM41a_INST_WORD_POS_X,\r
IMM64_IMM41a_VAL_POS_X\r
);\r
- \r
- // \r
+\r
+ //\r
// Update 64-bit address\r
//\r
FixupVal += Adjust;\r
\r
- // \r
+ //\r
// Insert IMM64 into bundle\r
//\r
INS_IMM64(FixupVal,\r
}\r
\r
/**\r
- Pass in a pointer to an ARM MOVT or MOVW immediate instruciton and \r
+ Pass in a pointer to an ARM MOVT or MOVW immediate instruciton and\r
return the immediate data encoded in the instruction\r
\r
@param Instruction Pointer to ARM MOVT or MOVW immediate instruction\r
\r
// Thumb2 is two 16-bit instructions working together. Not a single 32-bit instruction\r
// Example MOVT R0, #0 is 0x0000f2c0 or 0xf2c0 0x0000\r
- Movt = (*Instruction << 16) | (*(Instruction + 1)); \r
+ Movt = (*Instruction << 16) | (*(Instruction + 1));\r
\r
// imm16 = imm4:i:imm3:imm8\r
// imm4 -> Bit19:Bit16\r
UINT16 Patch;\r
\r
// First 16-bit chunk of instruciton\r
- Patch = ((Address >> 12) & 0x000f); // imm4 \r
+ Patch = ((Address >> 12) & 0x000f); // imm4\r
Patch |= (((Address & BIT11) != 0) ? BIT10 : 0); // i\r
*Instruction = (*Instruction & ~0x040f) | Patch;\r
\r
}\r
\r
/**\r
- Pass in a pointer to an ARM MOVW/MOVT instruciton pair and \r
+ Pass in a pointer to an ARM MOVW/MOVT instruciton pair and\r
return the immediate data encoded in the two` instruction\r
\r
@param Instructions Pointer to ARM MOVW/MOVT insturction pair\r
{\r
UINT16 *Word;\r
UINT16 *Top;\r
- \r
+\r
Word = Instructions; // MOVW\r
Top = Word + 2; // MOVT\r
- \r
+\r
return (ThumbMovtImmediateAddress (Top) << 16) + ThumbMovtImmediateAddress (Word);\r
}\r
\r
{\r
UINT16 *Word;\r
UINT16 *Top;\r
- \r
+\r
Word = (UINT16 *)Instructions; // MOVW\r
Top = Word + 2; // MOVT\r
\r
Fixup16 = (UINT16 *) Fixup;\r
\r
switch ((**Reloc) >> 12) {\r
- \r
+\r
case EFI_IMAGE_REL_BASED_ARM_MOV32T:\r
FixupVal = ThumbMovwMovtImmediateAddress (Fixup16) + (UINT32)Adjust;\r
ThumbMovwMovtImmediatePatch (Fixup16, FixupVal);\r
- \r
- \r
+\r
+\r
if (*FixupData != NULL) {\r
*FixupData = ALIGN_POINTER(*FixupData, sizeof(UINT64));\r
CopyMem (*FixupData, Fixup16, sizeof (UINT64));\r
*FixupData = *FixupData + sizeof(UINT64);\r
}\r
break;\r
- \r
+\r
case EFI_IMAGE_REL_BASED_ARM_MOV32A:\r
// break omitted - ARM instruction encoding not implemented\r
default:\r