/** @file\r
Elf64 convert solution\r
\r
-Copyright (c) 2010 - 2017, Intel Corporation. All rights reserved.<BR>\r
+Copyright (c) 2010 - 2018, Intel Corporation. All rights reserved.<BR>\r
Portions copyright (c) 2013-2014, ARM Ltd. All rights reserved.<BR>\r
\r
This program and the accompanying materials are licensed and made available\r
STATIC Elf_Shdr *mShdrBase;\r
STATIC Elf_Phdr *mPhdrBase;\r
\r
+//\r
+// GOT information\r
+//\r
+STATIC Elf_Shdr *mGOTShdr = NULL;\r
+STATIC UINT32 mGOTShindex = 0;\r
+STATIC UINT32 *mGOTCoffEntries = NULL;\r
+STATIC UINT32 mGOTMaxCoffEntries = 0;\r
+STATIC UINT32 mGOTNumCoffEntries = 0;\r
+\r
//\r
// Coff information\r
//\r
return StrtabContents + Sym->st_name;\r
}\r
\r
+//\r
+// Find the ELF section hosting the GOT from an ELF Rva\r
+// of a single GOT entry. Normally, GOT is placed in\r
+// ELF .text section, so assume once we find in which\r
+// section the GOT is, all GOT entries are there, and\r
+// just verify this.\r
+//\r
+STATIC\r
+VOID\r
+FindElfGOTSectionFromGOTEntryElfRva (\r
+ Elf64_Addr GOTEntryElfRva\r
+ )\r
+{\r
+ UINT32 i;\r
+ if (mGOTShdr != NULL) {\r
+ if (GOTEntryElfRva >= mGOTShdr->sh_addr &&\r
+ GOTEntryElfRva < mGOTShdr->sh_addr + mGOTShdr->sh_size) {\r
+ return;\r
+ }\r
+ Error (NULL, 0, 3000, "Unsupported", "FindElfGOTSectionFromGOTEntryElfRva: GOT entries found in multiple sections.");\r
+ exit(EXIT_FAILURE);\r
+ }\r
+ for (i = 0; i < mEhdr->e_shnum; i++) {\r
+ Elf_Shdr *shdr = GetShdrByIndex(i);\r
+ if (GOTEntryElfRva >= shdr->sh_addr &&\r
+ GOTEntryElfRva < shdr->sh_addr + shdr->sh_size) {\r
+ mGOTShdr = shdr;\r
+ mGOTShindex = i;\r
+ return;\r
+ }\r
+ }\r
+ Error (NULL, 0, 3000, "Invalid", "FindElfGOTSectionFromGOTEntryElfRva: ElfRva 0x%016LX for GOT entry not found in any section.", GOTEntryElfRva);\r
+ exit(EXIT_FAILURE);\r
+}\r
+\r
+//\r
+// Stores locations of GOT entries in COFF image.\r
+// Returns TRUE if GOT entry is new.\r
+// Simple implementation as number of GOT\r
+// entries is expected to be low.\r
+//\r
+\r
+STATIC\r
+BOOLEAN\r
+AccumulateCoffGOTEntries (\r
+ UINT32 GOTCoffEntry\r
+ )\r
+{\r
+ UINT32 i;\r
+ if (mGOTCoffEntries != NULL) {\r
+ for (i = 0; i < mGOTNumCoffEntries; i++) {\r
+ if (mGOTCoffEntries[i] == GOTCoffEntry) {\r
+ return FALSE;\r
+ }\r
+ }\r
+ }\r
+ if (mGOTCoffEntries == NULL) {\r
+ mGOTCoffEntries = (UINT32*)malloc(5 * sizeof *mGOTCoffEntries);\r
+ if (mGOTCoffEntries == NULL) {\r
+ Error (NULL, 0, 4001, "Resource", "memory cannot be allocated!");\r
+ }\r
+ assert (mGOTCoffEntries != NULL);\r
+ mGOTMaxCoffEntries = 5;\r
+ mGOTNumCoffEntries = 0;\r
+ } else if (mGOTNumCoffEntries == mGOTMaxCoffEntries) {\r
+ mGOTCoffEntries = (UINT32*)realloc(mGOTCoffEntries, 2 * mGOTMaxCoffEntries * sizeof *mGOTCoffEntries);\r
+ if (mGOTCoffEntries == NULL) {\r
+ Error (NULL, 0, 4001, "Resource", "memory cannot be allocated!");\r
+ }\r
+ assert (mGOTCoffEntries != NULL);\r
+ mGOTMaxCoffEntries += mGOTMaxCoffEntries;\r
+ }\r
+ mGOTCoffEntries[mGOTNumCoffEntries++] = GOTCoffEntry;\r
+ return TRUE;\r
+}\r
+\r
+//\r
+// 32-bit Unsigned integer comparator for qsort.\r
+//\r
+STATIC\r
+int\r
+UINT32Comparator (\r
+ const void* lhs,\r
+ const void* rhs\r
+ )\r
+{\r
+ if (*(const UINT32*)lhs < *(const UINT32*)rhs) {\r
+ return -1;\r
+ }\r
+ return *(const UINT32*)lhs > *(const UINT32*)rhs;\r
+}\r
+\r
+//\r
+// Emit accumulated Coff GOT entry relocations into\r
+// Coff image. This function performs its job\r
+// once and then releases the entry list, so\r
+// it can safely be called multiple times.\r
+//\r
+STATIC\r
+VOID\r
+EmitGOTRelocations (\r
+ VOID\r
+ )\r
+{\r
+ UINT32 i;\r
+ if (mGOTCoffEntries == NULL) {\r
+ return;\r
+ }\r
+ //\r
+ // Emit Coff relocations with Rvas ordered.\r
+ //\r
+ qsort(\r
+ mGOTCoffEntries,\r
+ mGOTNumCoffEntries,\r
+ sizeof *mGOTCoffEntries,\r
+ UINT32Comparator);\r
+ for (i = 0; i < mGOTNumCoffEntries; i++) {\r
+ VerboseMsg ("EFI_IMAGE_REL_BASED_DIR64 Offset: 0x%08X", mGOTCoffEntries[i]);\r
+ CoffAddFixup(\r
+ mGOTCoffEntries[i],\r
+ EFI_IMAGE_REL_BASED_DIR64);\r
+ }\r
+ free(mGOTCoffEntries);\r
+ mGOTCoffEntries = NULL;\r
+ mGOTMaxCoffEntries = 0;\r
+ mGOTNumCoffEntries = 0;\r
+}\r
+\r
//\r
// Elf functions interface implementation\r
//\r
}\r
}\r
\r
+ //\r
+ // Check if mCoffAlignment is larger than MAX_COFF_ALIGNMENT\r
+ //\r
+ if (mCoffAlignment > MAX_COFF_ALIGNMENT) {\r
+ Error (NULL, 0, 3000, "Invalid", "Section alignment is larger than MAX_COFF_ALIGNMENT.");\r
+ assert (FALSE);\r
+ }\r
+\r
+\r
//\r
// Move the PE/COFF header right before the first section. This will help us\r
// save space when converting to TE.\r
Elf_Shdr *SecShdr;\r
UINT32 SecOffset;\r
BOOLEAN (*Filter)(Elf_Shdr *);\r
+ Elf64_Addr GOTEntryRva;\r
\r
//\r
// Initialize filter pointer\r
switch (Shdr->sh_type) {\r
case SHT_PROGBITS:\r
/* Copy. */\r
+ if (Shdr->sh_offset + Shdr->sh_size > mFileBufferSize) {\r
+ return FALSE;\r
+ }\r
memcpy(mCoffFile + mCoffSectionsOffset[Idx],\r
(UINT8*)mEhdr + Shdr->sh_offset,\r
(size_t) Shdr->sh_size);\r
// section that applies to the entire binary, and which will have its section\r
// index set to #0 (which is a NULL section with the SHF_ALLOC bit cleared).\r
//\r
- // In the absence of GOT based relocations (which we currently don't support),\r
+ // In the absence of GOT based relocations,\r
// this RELA section will contain redundant R_xxx_RELATIVE relocations, one\r
// for every R_xxx_xx64 relocation appearing in the per-section RELA sections.\r
// (i.e., .rela.text and .rela.data)\r
// Absolute relocation.\r
//\r
VerboseMsg ("R_X86_64_64");\r
- VerboseMsg ("Offset: 0x%08X, Addend: 0x%016LX", \r
- (UINT32)(SecOffset + (Rel->r_offset - SecShdr->sh_addr)), \r
+ VerboseMsg ("Offset: 0x%08X, Addend: 0x%016LX",\r
+ (UINT32)(SecOffset + (Rel->r_offset - SecShdr->sh_addr)),\r
*(UINT64 *)Targ);\r
*(UINT64 *)Targ = *(UINT64 *)Targ - SymShdr->sh_addr + mCoffSectionsOffset[Sym->st_shndx];\r
VerboseMsg ("Relocation: 0x%016LX", *(UINT64*)Targ);\r
break;\r
case R_X86_64_32:\r
VerboseMsg ("R_X86_64_32");\r
- VerboseMsg ("Offset: 0x%08X, Addend: 0x%08X", \r
- (UINT32)(SecOffset + (Rel->r_offset - SecShdr->sh_addr)), \r
+ VerboseMsg ("Offset: 0x%08X, Addend: 0x%08X",\r
+ (UINT32)(SecOffset + (Rel->r_offset - SecShdr->sh_addr)),\r
*(UINT32 *)Targ);\r
*(UINT32 *)Targ = (UINT32)((UINT64)(*(UINT32 *)Targ) - SymShdr->sh_addr + mCoffSectionsOffset[Sym->st_shndx]);\r
VerboseMsg ("Relocation: 0x%08X", *(UINT32*)Targ);\r
break;\r
case R_X86_64_32S:\r
VerboseMsg ("R_X86_64_32S");\r
- VerboseMsg ("Offset: 0x%08X, Addend: 0x%08X", \r
- (UINT32)(SecOffset + (Rel->r_offset - SecShdr->sh_addr)), \r
+ VerboseMsg ("Offset: 0x%08X, Addend: 0x%08X",\r
+ (UINT32)(SecOffset + (Rel->r_offset - SecShdr->sh_addr)),\r
*(UINT32 *)Targ);\r
*(INT32 *)Targ = (INT32)((INT64)(*(INT32 *)Targ) - SymShdr->sh_addr + mCoffSectionsOffset[Sym->st_shndx]);\r
VerboseMsg ("Relocation: 0x%08X", *(UINT32*)Targ);\r
// Relative relocation: Symbol - Ip + Addend\r
//\r
VerboseMsg ("R_X86_64_PC32");\r
- VerboseMsg ("Offset: 0x%08X, Addend: 0x%08X", \r
- (UINT32)(SecOffset + (Rel->r_offset - SecShdr->sh_addr)), \r
+ VerboseMsg ("Offset: 0x%08X, Addend: 0x%08X",\r
+ (UINT32)(SecOffset + (Rel->r_offset - SecShdr->sh_addr)),\r
*(UINT32 *)Targ);\r
*(UINT32 *)Targ = (UINT32) (*(UINT32 *)Targ\r
+ (mCoffSectionsOffset[Sym->st_shndx] - SymShdr->sh_addr)\r
- (SecOffset - SecShdr->sh_addr));\r
VerboseMsg ("Relocation: 0x%08X", *(UINT32 *)Targ);\r
break;\r
+ case R_X86_64_GOTPCREL:\r
+ case R_X86_64_GOTPCRELX:\r
+ case R_X86_64_REX_GOTPCRELX:\r
+ VerboseMsg ("R_X86_64_GOTPCREL family");\r
+ VerboseMsg ("Offset: 0x%08X, Addend: 0x%08X",\r
+ (UINT32)(SecOffset + (Rel->r_offset - SecShdr->sh_addr)),\r
+ *(UINT32 *)Targ);\r
+ GOTEntryRva = Rel->r_offset - Rel->r_addend + *(INT32 *)Targ;\r
+ FindElfGOTSectionFromGOTEntryElfRva(GOTEntryRva);\r
+ *(UINT32 *)Targ = (UINT32) (*(UINT32 *)Targ\r
+ + (mCoffSectionsOffset[mGOTShindex] - mGOTShdr->sh_addr)\r
+ - (SecOffset - SecShdr->sh_addr));\r
+ VerboseMsg ("Relocation: 0x%08X", *(UINT32 *)Targ);\r
+ GOTEntryRva += (mCoffSectionsOffset[mGOTShindex] - mGOTShdr->sh_addr); // ELF Rva -> COFF Rva\r
+ if (AccumulateCoffGOTEntries((UINT32)GOTEntryRva)) {\r
+ //\r
+ // Relocate GOT entry if it's the first time we run into it\r
+ //\r
+ Targ = mCoffFile + GOTEntryRva;\r
+ //\r
+ // Limitation: The following three statements assume memory\r
+ // at *Targ is valid because the section containing the GOT\r
+ // has already been copied from the ELF image to the Coff image.\r
+ // This pre-condition presently holds because the GOT is placed\r
+ // in section .text, and the ELF text sections are all copied\r
+ // prior to reaching this point.\r
+ // If the pre-condition is violated in the future, this fixup\r
+ // either needs to be deferred after the GOT section is copied\r
+ // to the Coff image, or the fixup should be performed on the\r
+ // source Elf image instead of the destination Coff image.\r
+ //\r
+ VerboseMsg ("Offset: 0x%08X, Addend: 0x%016LX",\r
+ (UINT32)GOTEntryRva,\r
+ *(UINT64 *)Targ);\r
+ *(UINT64 *)Targ = *(UINT64 *)Targ - SymShdr->sh_addr + mCoffSectionsOffset[Sym->st_shndx];\r
+ VerboseMsg ("Relocation: 0x%016LX", *(UINT64*)Targ);\r
+ }\r
+ break;\r
default:\r
Error (NULL, 0, 3000, "Invalid", "%s unsupported ELF EM_X86_64 relocation 0x%x.", mInImageName, (unsigned) ELF_R_TYPE(Rel->r_info));\r
}\r
case R_X86_64_NONE:\r
case R_X86_64_PC32:\r
case R_X86_64_PLT32:\r
+ case R_X86_64_GOTPCREL:\r
+ case R_X86_64_GOTPCRELX:\r
+ case R_X86_64_REX_GOTPCRELX:\r
break;\r
case R_X86_64_64:\r
- VerboseMsg ("EFI_IMAGE_REL_BASED_DIR64 Offset: 0x%08X", \r
+ VerboseMsg ("EFI_IMAGE_REL_BASED_DIR64 Offset: 0x%08X",\r
mCoffSectionsOffset[RelShdr->sh_info] + (Rel->r_offset - SecShdr->sh_addr));\r
CoffAddFixup(\r
(UINT32) ((UINT64) mCoffSectionsOffset[RelShdr->sh_info]\r
break;\r
case R_X86_64_32S:\r
case R_X86_64_32:\r
- VerboseMsg ("EFI_IMAGE_REL_BASED_HIGHLOW Offset: 0x%08X", \r
+ VerboseMsg ("EFI_IMAGE_REL_BASED_HIGHLOW Offset: 0x%08X",\r
mCoffSectionsOffset[RelShdr->sh_info] + (Rel->r_offset - SecShdr->sh_addr));\r
CoffAddFixup(\r
(UINT32) ((UINT64) mCoffSectionsOffset[RelShdr->sh_info]\r
Error (NULL, 0, 3000, "Not Supported", "This tool does not support relocations for ELF with e_machine %u (processor type).", (unsigned) mEhdr->e_machine);\r
}\r
}\r
+ if (mEhdr->e_machine == EM_X86_64 && RelShdr->sh_info == mGOTShindex) {\r
+ //\r
+ // Tack relocations for GOT entries after other relocations for\r
+ // the section the GOT is in, as it's usually found at the end\r
+ // of the section. This is done in order to maintain Rva order\r
+ // of Coff relocations.\r
+ //\r
+ EmitGOTRelocations();\r
+ }\r
}\r
}\r
}\r
\r
+ if (mEhdr->e_machine == EM_X86_64) {\r
+ //\r
+ // This is a safety net just in case the GOT is in a section\r
+ // with no other relocations and the first invocation of\r
+ // EmitGOTRelocations() above was skipped. This invocation\r
+ // does not maintain Rva order of Coff relocations.\r
+ // At present, with a single text section, all references to\r
+ // the GOT and the GOT itself reside in section .text, so\r
+ // if there's a GOT at all, the first invocation above\r
+ // is executed.\r
+ //\r
+ EmitGOTRelocations();\r
+ }\r
//\r
// Pad by adding empty entries.\r
//\r