--- /dev/null
+/** @file\r
+* File managing the MMU for ARMv8 architecture\r
+*\r
+* Copyright (c) 2011-2014, ARM Limited. All rights reserved.\r
+* Copyright (c) 2016, Linaro Limited. All rights reserved.\r
+*\r
+* This program and the accompanying materials\r
+* are licensed and made available under the terms and conditions of the BSD License\r
+* which accompanies this distribution. The full text of the license may be found at\r
+* http://opensource.org/licenses/bsd-license.php\r
+*\r
+* THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS,\r
+* WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED.\r
+*\r
+**/\r
+\r
+#include <Uefi.h>\r
+#include <Chipset/AArch64.h>\r
+#include <Library/BaseMemoryLib.h>\r
+#include <Library/CacheMaintenanceLib.h>\r
+#include <Library/MemoryAllocationLib.h>\r
+#include <Library/ArmLib.h>\r
+#include <Library/ArmMmuLib.h>\r
+#include <Library/BaseLib.h>\r
+#include <Library/DebugLib.h>\r
+\r
+// We use this index definition to define an invalid block entry\r
+#define TT_ATTR_INDX_INVALID ((UINT32)~0)\r
+\r
+STATIC\r
+UINT64\r
+ArmMemoryAttributeToPageAttribute (\r
+ IN ARM_MEMORY_REGION_ATTRIBUTES Attributes\r
+ )\r
+{\r
+ switch (Attributes) {\r
+ case ARM_MEMORY_REGION_ATTRIBUTE_WRITE_BACK:\r
+ case ARM_MEMORY_REGION_ATTRIBUTE_NONSECURE_WRITE_BACK:\r
+ return TT_ATTR_INDX_MEMORY_WRITE_BACK | TT_SH_INNER_SHAREABLE;\r
+\r
+ case ARM_MEMORY_REGION_ATTRIBUTE_WRITE_THROUGH:\r
+ case ARM_MEMORY_REGION_ATTRIBUTE_NONSECURE_WRITE_THROUGH:\r
+ return TT_ATTR_INDX_MEMORY_WRITE_THROUGH | TT_SH_INNER_SHAREABLE;\r
+\r
+ // Uncached and device mappings are treated as outer shareable by default,\r
+ case ARM_MEMORY_REGION_ATTRIBUTE_UNCACHED_UNBUFFERED:\r
+ case ARM_MEMORY_REGION_ATTRIBUTE_NONSECURE_UNCACHED_UNBUFFERED:\r
+ return TT_ATTR_INDX_MEMORY_NON_CACHEABLE;\r
+\r
+ default:\r
+ ASSERT(0);\r
+ case ARM_MEMORY_REGION_ATTRIBUTE_DEVICE:\r
+ case ARM_MEMORY_REGION_ATTRIBUTE_NONSECURE_DEVICE:\r
+ if (ArmReadCurrentEL () == AARCH64_EL2)\r
+ return TT_ATTR_INDX_DEVICE_MEMORY | TT_XN_MASK;\r
+ else\r
+ return TT_ATTR_INDX_DEVICE_MEMORY | TT_UXN_MASK | TT_PXN_MASK;\r
+ }\r
+}\r
+\r
+UINT64\r
+PageAttributeToGcdAttribute (\r
+ IN UINT64 PageAttributes\r
+ )\r
+{\r
+ UINT64 GcdAttributes;\r
+\r
+ switch (PageAttributes & TT_ATTR_INDX_MASK) {\r
+ case TT_ATTR_INDX_DEVICE_MEMORY:\r
+ GcdAttributes = EFI_MEMORY_UC;\r
+ break;\r
+ case TT_ATTR_INDX_MEMORY_NON_CACHEABLE:\r
+ GcdAttributes = EFI_MEMORY_WC;\r
+ break;\r
+ case TT_ATTR_INDX_MEMORY_WRITE_THROUGH:\r
+ GcdAttributes = EFI_MEMORY_WT;\r
+ break;\r
+ case TT_ATTR_INDX_MEMORY_WRITE_BACK:\r
+ GcdAttributes = EFI_MEMORY_WB;\r
+ break;\r
+ default:\r
+ DEBUG ((EFI_D_ERROR, "PageAttributeToGcdAttribute: PageAttributes:0x%lX not supported.\n", PageAttributes));\r
+ ASSERT (0);\r
+ // The Global Coherency Domain (GCD) value is defined as a bit set.\r
+ // Returning 0 means no attribute has been set.\r
+ GcdAttributes = 0;\r
+ }\r
+\r
+ // Determine protection attributes\r
+ if (((PageAttributes & TT_AP_MASK) == TT_AP_NO_RO) || ((PageAttributes & TT_AP_MASK) == TT_AP_RO_RO)) {\r
+ // Read only cases map to write-protect\r
+ GcdAttributes |= EFI_MEMORY_WP;\r
+ }\r
+\r
+ // Process eXecute Never attribute\r
+ if ((PageAttributes & (TT_PXN_MASK | TT_UXN_MASK)) != 0 ) {\r
+ GcdAttributes |= EFI_MEMORY_XP;\r
+ }\r
+\r
+ return GcdAttributes;\r
+}\r
+\r
+ARM_MEMORY_REGION_ATTRIBUTES\r
+GcdAttributeToArmAttribute (\r
+ IN UINT64 GcdAttributes\r
+ )\r
+{\r
+ switch (GcdAttributes & 0xFF) {\r
+ case EFI_MEMORY_UC:\r
+ return ARM_MEMORY_REGION_ATTRIBUTE_DEVICE;\r
+ case EFI_MEMORY_WC:\r
+ return ARM_MEMORY_REGION_ATTRIBUTE_UNCACHED_UNBUFFERED;\r
+ case EFI_MEMORY_WT:\r
+ return ARM_MEMORY_REGION_ATTRIBUTE_WRITE_THROUGH;\r
+ case EFI_MEMORY_WB:\r
+ return ARM_MEMORY_REGION_ATTRIBUTE_WRITE_BACK;\r
+ default:\r
+ DEBUG ((EFI_D_ERROR, "GcdAttributeToArmAttribute: 0x%lX attributes is not supported.\n", GcdAttributes));\r
+ ASSERT (0);\r
+ return ARM_MEMORY_REGION_ATTRIBUTE_DEVICE;\r
+ }\r
+}\r
+\r
+// Describe the T0SZ values for each translation table level\r
+typedef struct {\r
+ UINTN MinT0SZ;\r
+ UINTN MaxT0SZ;\r
+ UINTN LargestT0SZ; // Generally (MaxT0SZ == LargestT0SZ) but at the Level3 Table\r
+ // the MaxT0SZ is not at the boundary of the table\r
+} T0SZ_DESCRIPTION_PER_LEVEL;\r
+\r
+// Map table for the corresponding Level of Table\r
+STATIC CONST T0SZ_DESCRIPTION_PER_LEVEL T0SZPerTableLevel[] = {\r
+ { 16, 24, 24 }, // Table Level 0\r
+ { 25, 33, 33 }, // Table Level 1\r
+ { 34, 39, 42 } // Table Level 2\r
+};\r
+\r
+VOID\r
+GetRootTranslationTableInfo (\r
+ IN UINTN T0SZ,\r
+ OUT UINTN *TableLevel,\r
+ OUT UINTN *TableEntryCount\r
+ )\r
+{\r
+ UINTN Index;\r
+\r
+ // Identify the level of the root table from the given T0SZ\r
+ for (Index = 0; Index < sizeof (T0SZPerTableLevel) / sizeof (T0SZ_DESCRIPTION_PER_LEVEL); Index++) {\r
+ if (T0SZ <= T0SZPerTableLevel[Index].MaxT0SZ) {\r
+ break;\r
+ }\r
+ }\r
+\r
+ // If we have not found the corresponding maximum T0SZ then we use the last one\r
+ if (Index == sizeof (T0SZPerTableLevel) / sizeof (T0SZ_DESCRIPTION_PER_LEVEL)) {\r
+ Index--;\r
+ }\r
+\r
+ // Get the level of the root table\r
+ if (TableLevel) {\r
+ *TableLevel = Index;\r
+ }\r
+\r
+ // The Size of the Table is 2^(T0SZ-LargestT0SZ)\r
+ if (TableEntryCount) {\r
+ *TableEntryCount = 1 << (T0SZPerTableLevel[Index].LargestT0SZ - T0SZ + 1);\r
+ }\r
+}\r
+\r
+STATIC\r
+VOID\r
+ReplaceLiveEntry (\r
+ IN UINT64 *Entry,\r
+ IN UINT64 Value\r
+ )\r
+{\r
+ if (!ArmMmuEnabled ()) {\r
+ *Entry = Value;\r
+ } else {\r
+ ArmReplaceLiveTranslationEntry (Entry, Value);\r
+ }\r
+}\r
+\r
+STATIC\r
+VOID\r
+LookupAddresstoRootTable (\r
+ IN UINT64 MaxAddress,\r
+ OUT UINTN *T0SZ,\r
+ OUT UINTN *TableEntryCount\r
+ )\r
+{\r
+ UINTN TopBit;\r
+\r
+ // Check the parameters are not NULL\r
+ ASSERT ((T0SZ != NULL) && (TableEntryCount != NULL));\r
+\r
+ // Look for the highest bit set in MaxAddress\r
+ for (TopBit = 63; TopBit != 0; TopBit--) {\r
+ if ((1ULL << TopBit) & MaxAddress) {\r
+ // MaxAddress top bit is found\r
+ TopBit = TopBit + 1;\r
+ break;\r
+ }\r
+ }\r
+ ASSERT (TopBit != 0);\r
+\r
+ // Calculate T0SZ from the top bit of the MaxAddress\r
+ *T0SZ = 64 - TopBit;\r
+\r
+ // Get the Table info from T0SZ\r
+ GetRootTranslationTableInfo (*T0SZ, NULL, TableEntryCount);\r
+}\r
+\r
+STATIC\r
+UINT64*\r
+GetBlockEntryListFromAddress (\r
+ IN UINT64 *RootTable,\r
+ IN UINT64 RegionStart,\r
+ OUT UINTN *TableLevel,\r
+ IN OUT UINT64 *BlockEntrySize,\r
+ OUT UINT64 **LastBlockEntry\r
+ )\r
+{\r
+ UINTN RootTableLevel;\r
+ UINTN RootTableEntryCount;\r
+ UINT64 *TranslationTable;\r
+ UINT64 *BlockEntry;\r
+ UINT64 *SubTableBlockEntry;\r
+ UINT64 BlockEntryAddress;\r
+ UINTN BaseAddressAlignment;\r
+ UINTN PageLevel;\r
+ UINTN Index;\r
+ UINTN IndexLevel;\r
+ UINTN T0SZ;\r
+ UINT64 Attributes;\r
+ UINT64 TableAttributes;\r
+\r
+ // Initialize variable\r
+ BlockEntry = NULL;\r
+\r
+ // Ensure the parameters are valid\r
+ if (!(TableLevel && BlockEntrySize && LastBlockEntry)) {\r
+ ASSERT_EFI_ERROR (EFI_INVALID_PARAMETER);\r
+ return NULL;\r
+ }\r
+\r
+ // Ensure the Region is aligned on 4KB boundary\r
+ if ((RegionStart & (SIZE_4KB - 1)) != 0) {\r
+ ASSERT_EFI_ERROR (EFI_INVALID_PARAMETER);\r
+ return NULL;\r
+ }\r
+\r
+ // Ensure the required size is aligned on 4KB boundary and not 0\r
+ if ((*BlockEntrySize & (SIZE_4KB - 1)) != 0 || *BlockEntrySize == 0) {\r
+ ASSERT_EFI_ERROR (EFI_INVALID_PARAMETER);\r
+ return NULL;\r
+ }\r
+\r
+ T0SZ = ArmGetTCR () & TCR_T0SZ_MASK;\r
+ // Get the Table info from T0SZ\r
+ GetRootTranslationTableInfo (T0SZ, &RootTableLevel, &RootTableEntryCount);\r
+\r
+ // If the start address is 0x0 then we use the size of the region to identify the alignment\r
+ if (RegionStart == 0) {\r
+ // Identify the highest possible alignment for the Region Size\r
+ BaseAddressAlignment = LowBitSet64 (*BlockEntrySize);\r
+ } else {\r
+ // Identify the highest possible alignment for the Base Address\r
+ BaseAddressAlignment = LowBitSet64 (RegionStart);\r
+ }\r
+\r
+ // Identify the Page Level the RegionStart must belong to. Note that PageLevel\r
+ // should be at least 1 since block translations are not supported at level 0\r
+ PageLevel = MAX (3 - ((BaseAddressAlignment - 12) / 9), 1);\r
+\r
+ // If the required size is smaller than the current block size then we need to go to the page below.\r
+ // The PageLevel was calculated on the Base Address alignment but did not take in account the alignment\r
+ // of the allocation size\r
+ while (*BlockEntrySize < TT_BLOCK_ENTRY_SIZE_AT_LEVEL (PageLevel)) {\r
+ // It does not fit so we need to go a page level above\r
+ PageLevel++;\r
+ }\r
+\r
+ //\r
+ // Get the Table Descriptor for the corresponding PageLevel. We need to decompose RegionStart to get appropriate entries\r
+ //\r
+\r
+ TranslationTable = RootTable;\r
+ for (IndexLevel = RootTableLevel; IndexLevel <= PageLevel; IndexLevel++) {\r
+ BlockEntry = (UINT64*)TT_GET_ENTRY_FOR_ADDRESS (TranslationTable, IndexLevel, RegionStart);\r
+\r
+ if ((IndexLevel != 3) && ((*BlockEntry & TT_TYPE_MASK) == TT_TYPE_TABLE_ENTRY)) {\r
+ // Go to the next table\r
+ TranslationTable = (UINT64*)(*BlockEntry & TT_ADDRESS_MASK_DESCRIPTION_TABLE);\r
+\r
+ // If we are at the last level then update the last level to next level\r
+ if (IndexLevel == PageLevel) {\r
+ // Enter the next level\r
+ PageLevel++;\r
+ }\r
+ } else if ((*BlockEntry & TT_TYPE_MASK) == TT_TYPE_BLOCK_ENTRY) {\r
+ // If we are not at the last level then we need to split this BlockEntry\r
+ if (IndexLevel != PageLevel) {\r
+ // Retrieve the attributes from the block entry\r
+ Attributes = *BlockEntry & TT_ATTRIBUTES_MASK;\r
+\r
+ // Convert the block entry attributes into Table descriptor attributes\r
+ TableAttributes = TT_TABLE_AP_NO_PERMISSION;\r
+ if (Attributes & TT_NS) {\r
+ TableAttributes = TT_TABLE_NS;\r
+ }\r
+\r
+ // Get the address corresponding at this entry\r
+ BlockEntryAddress = RegionStart;\r
+ BlockEntryAddress = BlockEntryAddress >> TT_ADDRESS_OFFSET_AT_LEVEL(IndexLevel);\r
+ // Shift back to right to set zero before the effective address\r
+ BlockEntryAddress = BlockEntryAddress << TT_ADDRESS_OFFSET_AT_LEVEL(IndexLevel);\r
+\r
+ // Set the correct entry type for the next page level\r
+ if ((IndexLevel + 1) == 3) {\r
+ Attributes |= TT_TYPE_BLOCK_ENTRY_LEVEL3;\r
+ } else {\r
+ Attributes |= TT_TYPE_BLOCK_ENTRY;\r
+ }\r
+\r
+ // Create a new translation table\r
+ TranslationTable = (UINT64*)AllocateAlignedPages (EFI_SIZE_TO_PAGES(TT_ENTRY_COUNT * sizeof(UINT64)), TT_ALIGNMENT_DESCRIPTION_TABLE);\r
+ if (TranslationTable == NULL) {\r
+ return NULL;\r
+ }\r
+\r
+ // Populate the newly created lower level table\r
+ SubTableBlockEntry = TranslationTable;\r
+ for (Index = 0; Index < TT_ENTRY_COUNT; Index++) {\r
+ *SubTableBlockEntry = Attributes | (BlockEntryAddress + (Index << TT_ADDRESS_OFFSET_AT_LEVEL(IndexLevel + 1)));\r
+ SubTableBlockEntry++;\r
+ }\r
+\r
+ // Fill the BlockEntry with the new TranslationTable\r
+ ReplaceLiveEntry (BlockEntry,\r
+ ((UINTN)TranslationTable & TT_ADDRESS_MASK_DESCRIPTION_TABLE) | TableAttributes | TT_TYPE_TABLE_ENTRY);\r
+ }\r
+ } else {\r
+ if (IndexLevel != PageLevel) {\r
+ //\r
+ // Case when we have an Invalid Entry and we are at a page level above of the one targetted.\r
+ //\r
+\r
+ // Create a new translation table\r
+ TranslationTable = (UINT64*)AllocateAlignedPages (EFI_SIZE_TO_PAGES(TT_ENTRY_COUNT * sizeof(UINT64)), TT_ALIGNMENT_DESCRIPTION_TABLE);\r
+ if (TranslationTable == NULL) {\r
+ return NULL;\r
+ }\r
+\r
+ ZeroMem (TranslationTable, TT_ENTRY_COUNT * sizeof(UINT64));\r
+\r
+ // Fill the new BlockEntry with the TranslationTable\r
+ *BlockEntry = ((UINTN)TranslationTable & TT_ADDRESS_MASK_DESCRIPTION_TABLE) | TT_TYPE_TABLE_ENTRY;\r
+ }\r
+ }\r
+ }\r
+\r
+ // Expose the found PageLevel to the caller\r
+ *TableLevel = PageLevel;\r
+\r
+ // Now, we have the Table Level we can get the Block Size associated to this table\r
+ *BlockEntrySize = TT_BLOCK_ENTRY_SIZE_AT_LEVEL (PageLevel);\r
+\r
+ // The last block of the root table depends on the number of entry in this table,\r
+ // otherwise it is always the (TT_ENTRY_COUNT - 1)th entry in the table.\r
+ *LastBlockEntry = TT_LAST_BLOCK_ADDRESS(TranslationTable,\r
+ (PageLevel == RootTableLevel) ? RootTableEntryCount : TT_ENTRY_COUNT);\r
+\r
+ return BlockEntry;\r
+}\r
+\r
+STATIC\r
+RETURN_STATUS\r
+UpdateRegionMapping (\r
+ IN UINT64 *RootTable,\r
+ IN UINT64 RegionStart,\r
+ IN UINT64 RegionLength,\r
+ IN UINT64 Attributes,\r
+ IN UINT64 BlockEntryMask\r
+ )\r
+{\r
+ UINT32 Type;\r
+ UINT64 *BlockEntry;\r
+ UINT64 *LastBlockEntry;\r
+ UINT64 BlockEntrySize;\r
+ UINTN TableLevel;\r
+\r
+ // Ensure the Length is aligned on 4KB boundary\r
+ if ((RegionLength == 0) || ((RegionLength & (SIZE_4KB - 1)) != 0)) {\r
+ ASSERT_EFI_ERROR (EFI_INVALID_PARAMETER);\r
+ return RETURN_INVALID_PARAMETER;\r
+ }\r
+\r
+ do {\r
+ // Get the first Block Entry that matches the Virtual Address and also the information on the Table Descriptor\r
+ // such as the the size of the Block Entry and the address of the last BlockEntry of the Table Descriptor\r
+ BlockEntrySize = RegionLength;\r
+ BlockEntry = GetBlockEntryListFromAddress (RootTable, RegionStart, &TableLevel, &BlockEntrySize, &LastBlockEntry);\r
+ if (BlockEntry == NULL) {\r
+ // GetBlockEntryListFromAddress() return NULL when it fails to allocate new pages from the Translation Tables\r
+ return RETURN_OUT_OF_RESOURCES;\r
+ }\r
+\r
+ if (TableLevel != 3) {\r
+ Type = TT_TYPE_BLOCK_ENTRY;\r
+ } else {\r
+ Type = TT_TYPE_BLOCK_ENTRY_LEVEL3;\r
+ }\r
+\r
+ do {\r
+ // Fill the Block Entry with attribute and output block address\r
+ *BlockEntry &= BlockEntryMask;\r
+ *BlockEntry |= (RegionStart & TT_ADDRESS_MASK_BLOCK_ENTRY) | Attributes | Type;\r
+\r
+ // Go to the next BlockEntry\r
+ RegionStart += BlockEntrySize;\r
+ RegionLength -= BlockEntrySize;\r
+ BlockEntry++;\r
+\r
+ // Break the inner loop when next block is a table\r
+ // Rerun GetBlockEntryListFromAddress to avoid page table memory leak\r
+ if (TableLevel != 3 &&\r
+ (*BlockEntry & TT_TYPE_MASK) == TT_TYPE_TABLE_ENTRY) {\r
+ break;\r
+ }\r
+ } while ((RegionLength >= BlockEntrySize) && (BlockEntry <= LastBlockEntry));\r
+ } while (RegionLength != 0);\r
+\r
+ return RETURN_SUCCESS;\r
+}\r
+\r
+STATIC\r
+RETURN_STATUS\r
+FillTranslationTable (\r
+ IN UINT64 *RootTable,\r
+ IN ARM_MEMORY_REGION_DESCRIPTOR *MemoryRegion\r
+ )\r
+{\r
+ return UpdateRegionMapping (\r
+ RootTable,\r
+ MemoryRegion->VirtualBase,\r
+ MemoryRegion->Length,\r
+ ArmMemoryAttributeToPageAttribute (MemoryRegion->Attributes) | TT_AF,\r
+ 0\r
+ );\r
+}\r
+\r
+RETURN_STATUS\r
+SetMemoryAttributes (\r
+ IN EFI_PHYSICAL_ADDRESS BaseAddress,\r
+ IN UINT64 Length,\r
+ IN UINT64 Attributes,\r
+ IN EFI_PHYSICAL_ADDRESS VirtualMask\r
+ )\r
+{\r
+ RETURN_STATUS Status;\r
+ ARM_MEMORY_REGION_DESCRIPTOR MemoryRegion;\r
+ UINT64 *TranslationTable;\r
+\r
+ MemoryRegion.PhysicalBase = BaseAddress;\r
+ MemoryRegion.VirtualBase = BaseAddress;\r
+ MemoryRegion.Length = Length;\r
+ MemoryRegion.Attributes = GcdAttributeToArmAttribute (Attributes);\r
+\r
+ TranslationTable = ArmGetTTBR0BaseAddress ();\r
+\r
+ Status = FillTranslationTable (TranslationTable, &MemoryRegion);\r
+ if (RETURN_ERROR (Status)) {\r
+ return Status;\r
+ }\r
+\r
+ // Invalidate all TLB entries so changes are synced\r
+ ArmInvalidateTlb ();\r
+\r
+ return RETURN_SUCCESS;\r
+}\r
+\r
+STATIC\r
+RETURN_STATUS\r
+SetMemoryRegionAttribute (\r
+ IN EFI_PHYSICAL_ADDRESS BaseAddress,\r
+ IN UINT64 Length,\r
+ IN UINT64 Attributes,\r
+ IN UINT64 BlockEntryMask\r
+ )\r
+{\r
+ RETURN_STATUS Status;\r
+ UINT64 *RootTable;\r
+\r
+ RootTable = ArmGetTTBR0BaseAddress ();\r
+\r
+ Status = UpdateRegionMapping (RootTable, BaseAddress, Length, Attributes, BlockEntryMask);\r
+ if (RETURN_ERROR (Status)) {\r
+ return Status;\r
+ }\r
+\r
+ // Invalidate all TLB entries so changes are synced\r
+ ArmInvalidateTlb ();\r
+\r
+ return RETURN_SUCCESS;\r
+}\r
+\r
+RETURN_STATUS\r
+ArmSetMemoryRegionNoExec (\r
+ IN EFI_PHYSICAL_ADDRESS BaseAddress,\r
+ IN UINT64 Length\r
+ )\r
+{\r
+ UINT64 Val;\r
+\r
+ if (ArmReadCurrentEL () == AARCH64_EL1) {\r
+ Val = TT_PXN_MASK | TT_UXN_MASK;\r
+ } else {\r
+ Val = TT_XN_MASK;\r
+ }\r
+\r
+ return SetMemoryRegionAttribute (\r
+ BaseAddress,\r
+ Length,\r
+ Val,\r
+ ~TT_ADDRESS_MASK_BLOCK_ENTRY);\r
+}\r
+\r
+RETURN_STATUS\r
+ArmClearMemoryRegionNoExec (\r
+ IN EFI_PHYSICAL_ADDRESS BaseAddress,\r
+ IN UINT64 Length\r
+ )\r
+{\r
+ UINT64 Mask;\r
+\r
+ // XN maps to UXN in the EL1&0 translation regime\r
+ Mask = ~(TT_ADDRESS_MASK_BLOCK_ENTRY | TT_PXN_MASK | TT_XN_MASK);\r
+\r
+ return SetMemoryRegionAttribute (\r
+ BaseAddress,\r
+ Length,\r
+ 0,\r
+ Mask);\r
+}\r
+\r
+RETURN_STATUS\r
+ArmSetMemoryRegionReadOnly (\r
+ IN EFI_PHYSICAL_ADDRESS BaseAddress,\r
+ IN UINT64 Length\r
+ )\r
+{\r
+ return SetMemoryRegionAttribute (\r
+ BaseAddress,\r
+ Length,\r
+ TT_AP_RO_RO,\r
+ ~TT_ADDRESS_MASK_BLOCK_ENTRY);\r
+}\r
+\r
+RETURN_STATUS\r
+ArmClearMemoryRegionReadOnly (\r
+ IN EFI_PHYSICAL_ADDRESS BaseAddress,\r
+ IN UINT64 Length\r
+ )\r
+{\r
+ return SetMemoryRegionAttribute (\r
+ BaseAddress,\r
+ Length,\r
+ TT_AP_RW_RW,\r
+ ~(TT_ADDRESS_MASK_BLOCK_ENTRY | TT_AP_MASK));\r
+}\r
+\r
+RETURN_STATUS\r
+EFIAPI\r
+ArmConfigureMmu (\r
+ IN ARM_MEMORY_REGION_DESCRIPTOR *MemoryTable,\r
+ OUT VOID **TranslationTableBase OPTIONAL,\r
+ OUT UINTN *TranslationTableSize OPTIONAL\r
+ )\r
+{\r
+ VOID* TranslationTable;\r
+ UINTN TranslationTablePageCount;\r
+ UINT32 TranslationTableAttribute;\r
+ ARM_MEMORY_REGION_DESCRIPTOR *MemoryTableEntry;\r
+ UINT64 MaxAddress;\r
+ UINT64 TopAddress;\r
+ UINTN T0SZ;\r
+ UINTN RootTableEntryCount;\r
+ UINT64 TCR;\r
+ RETURN_STATUS Status;\r
+\r
+ if(MemoryTable == NULL) {\r
+ ASSERT (MemoryTable != NULL);\r
+ return RETURN_INVALID_PARAMETER;\r
+ }\r
+\r
+ // Identify the highest address of the memory table\r
+ MaxAddress = MemoryTable->PhysicalBase + MemoryTable->Length - 1;\r
+ MemoryTableEntry = MemoryTable;\r
+ while (MemoryTableEntry->Length != 0) {\r
+ TopAddress = MemoryTableEntry->PhysicalBase + MemoryTableEntry->Length - 1;\r
+ if (TopAddress > MaxAddress) {\r
+ MaxAddress = TopAddress;\r
+ }\r
+ MemoryTableEntry++;\r
+ }\r
+\r
+ // Lookup the Table Level to get the information\r
+ LookupAddresstoRootTable (MaxAddress, &T0SZ, &RootTableEntryCount);\r
+\r
+ //\r
+ // Set TCR that allows us to retrieve T0SZ in the subsequent functions\r
+ //\r
+ // Ideally we will be running at EL2, but should support EL1 as well.\r
+ // UEFI should not run at EL3.\r
+ if (ArmReadCurrentEL () == AARCH64_EL2) {\r
+ //Note: Bits 23 and 31 are reserved(RES1) bits in TCR_EL2\r
+ TCR = T0SZ | (1UL << 31) | (1UL << 23) | TCR_TG0_4KB;\r
+\r
+ // Set the Physical Address Size using MaxAddress\r
+ if (MaxAddress < SIZE_4GB) {\r
+ TCR |= TCR_PS_4GB;\r
+ } else if (MaxAddress < SIZE_64GB) {\r
+ TCR |= TCR_PS_64GB;\r
+ } else if (MaxAddress < SIZE_1TB) {\r
+ TCR |= TCR_PS_1TB;\r
+ } else if (MaxAddress < SIZE_4TB) {\r
+ TCR |= TCR_PS_4TB;\r
+ } else if (MaxAddress < SIZE_16TB) {\r
+ TCR |= TCR_PS_16TB;\r
+ } else if (MaxAddress < SIZE_256TB) {\r
+ TCR |= TCR_PS_256TB;\r
+ } else {\r
+ DEBUG ((EFI_D_ERROR, "ArmConfigureMmu: The MaxAddress 0x%lX is not supported by this MMU configuration.\n", MaxAddress));\r
+ ASSERT (0); // Bigger than 48-bit memory space are not supported\r
+ return RETURN_UNSUPPORTED;\r
+ }\r
+ } else if (ArmReadCurrentEL () == AARCH64_EL1) {\r
+ // Due to Cortex-A57 erratum #822227 we must set TG1[1] == 1, regardless of EPD1.\r
+ TCR = T0SZ | TCR_TG0_4KB | TCR_TG1_4KB | TCR_EPD1;\r
+\r
+ // Set the Physical Address Size using MaxAddress\r
+ if (MaxAddress < SIZE_4GB) {\r
+ TCR |= TCR_IPS_4GB;\r
+ } else if (MaxAddress < SIZE_64GB) {\r
+ TCR |= TCR_IPS_64GB;\r
+ } else if (MaxAddress < SIZE_1TB) {\r
+ TCR |= TCR_IPS_1TB;\r
+ } else if (MaxAddress < SIZE_4TB) {\r
+ TCR |= TCR_IPS_4TB;\r
+ } else if (MaxAddress < SIZE_16TB) {\r
+ TCR |= TCR_IPS_16TB;\r
+ } else if (MaxAddress < SIZE_256TB) {\r
+ TCR |= TCR_IPS_256TB;\r
+ } else {\r
+ DEBUG ((EFI_D_ERROR, "ArmConfigureMmu: The MaxAddress 0x%lX is not supported by this MMU configuration.\n", MaxAddress));\r
+ ASSERT (0); // Bigger than 48-bit memory space are not supported\r
+ return RETURN_UNSUPPORTED;\r
+ }\r
+ } else {\r
+ ASSERT (0); // UEFI is only expected to run at EL2 and EL1, not EL3.\r
+ return RETURN_UNSUPPORTED;\r
+ }\r
+\r
+ // Set TCR\r
+ ArmSetTCR (TCR);\r
+\r
+ // Allocate pages for translation table\r
+ TranslationTablePageCount = EFI_SIZE_TO_PAGES(RootTableEntryCount * sizeof(UINT64));\r
+ TranslationTable = (UINT64*)AllocateAlignedPages (TranslationTablePageCount, TT_ALIGNMENT_DESCRIPTION_TABLE);\r
+ if (TranslationTable == NULL) {\r
+ return RETURN_OUT_OF_RESOURCES;\r
+ }\r
+ // We set TTBR0 just after allocating the table to retrieve its location from the subsequent\r
+ // functions without needing to pass this value across the functions. The MMU is only enabled\r
+ // after the translation tables are populated.\r
+ ArmSetTTBR0 (TranslationTable);\r
+\r
+ if (TranslationTableBase != NULL) {\r
+ *TranslationTableBase = TranslationTable;\r
+ }\r
+\r
+ if (TranslationTableSize != NULL) {\r
+ *TranslationTableSize = RootTableEntryCount * sizeof(UINT64);\r
+ }\r
+\r
+ ZeroMem (TranslationTable, RootTableEntryCount * sizeof(UINT64));\r
+\r
+ // Disable MMU and caches. ArmDisableMmu() also invalidates the TLBs\r
+ ArmDisableMmu ();\r
+ ArmDisableDataCache ();\r
+ ArmDisableInstructionCache ();\r
+\r
+ // Make sure nothing sneaked into the cache\r
+ ArmCleanInvalidateDataCache ();\r
+ ArmInvalidateInstructionCache ();\r
+\r
+ TranslationTableAttribute = TT_ATTR_INDX_INVALID;\r
+ while (MemoryTable->Length != 0) {\r
+ // Find the memory attribute for the Translation Table\r
+ if (((UINTN)TranslationTable >= MemoryTable->PhysicalBase) &&\r
+ ((UINTN)TranslationTable <= MemoryTable->PhysicalBase - 1 + MemoryTable->Length)) {\r
+ TranslationTableAttribute = MemoryTable->Attributes;\r
+ }\r
+\r
+ Status = FillTranslationTable (TranslationTable, MemoryTable);\r
+ if (RETURN_ERROR (Status)) {\r
+ goto FREE_TRANSLATION_TABLE;\r
+ }\r
+ MemoryTable++;\r
+ }\r
+\r
+ // Translate the Memory Attributes into Translation Table Register Attributes\r
+ if ((TranslationTableAttribute == ARM_MEMORY_REGION_ATTRIBUTE_UNCACHED_UNBUFFERED) ||\r
+ (TranslationTableAttribute == ARM_MEMORY_REGION_ATTRIBUTE_NONSECURE_UNCACHED_UNBUFFERED)) {\r
+ TCR |= TCR_SH_NON_SHAREABLE | TCR_RGN_OUTER_NON_CACHEABLE | TCR_RGN_INNER_NON_CACHEABLE;\r
+ } else if ((TranslationTableAttribute == ARM_MEMORY_REGION_ATTRIBUTE_WRITE_BACK) ||\r
+ (TranslationTableAttribute == ARM_MEMORY_REGION_ATTRIBUTE_NONSECURE_WRITE_BACK)) {\r
+ TCR |= TCR_SH_INNER_SHAREABLE | TCR_RGN_OUTER_WRITE_BACK_ALLOC | TCR_RGN_INNER_WRITE_BACK_ALLOC;\r
+ } else if ((TranslationTableAttribute == ARM_MEMORY_REGION_ATTRIBUTE_WRITE_THROUGH) ||\r
+ (TranslationTableAttribute == ARM_MEMORY_REGION_ATTRIBUTE_NONSECURE_WRITE_THROUGH)) {\r
+ TCR |= TCR_SH_NON_SHAREABLE | TCR_RGN_OUTER_WRITE_THROUGH | TCR_RGN_INNER_WRITE_THROUGH;\r
+ } else {\r
+ // If we failed to find a mapping that contains the root translation table then it probably means the translation table\r
+ // is not mapped in the given memory map.\r
+ ASSERT (0);\r
+ Status = RETURN_UNSUPPORTED;\r
+ goto FREE_TRANSLATION_TABLE;\r
+ }\r
+\r
+ // Set again TCR after getting the Translation Table attributes\r
+ ArmSetTCR (TCR);\r
+\r
+ ArmSetMAIR (MAIR_ATTR(TT_ATTR_INDX_DEVICE_MEMORY, MAIR_ATTR_DEVICE_MEMORY) | // mapped to EFI_MEMORY_UC\r
+ MAIR_ATTR(TT_ATTR_INDX_MEMORY_NON_CACHEABLE, MAIR_ATTR_NORMAL_MEMORY_NON_CACHEABLE) | // mapped to EFI_MEMORY_WC\r
+ MAIR_ATTR(TT_ATTR_INDX_MEMORY_WRITE_THROUGH, MAIR_ATTR_NORMAL_MEMORY_WRITE_THROUGH) | // mapped to EFI_MEMORY_WT\r
+ MAIR_ATTR(TT_ATTR_INDX_MEMORY_WRITE_BACK, MAIR_ATTR_NORMAL_MEMORY_WRITE_BACK)); // mapped to EFI_MEMORY_WB\r
+\r
+ ArmDisableAlignmentCheck ();\r
+ ArmEnableInstructionCache ();\r
+ ArmEnableDataCache ();\r
+\r
+ ArmEnableMmu ();\r
+ return RETURN_SUCCESS;\r
+\r
+FREE_TRANSLATION_TABLE:\r
+ FreePages (TranslationTable, TranslationTablePageCount);\r
+ return Status;\r
+}\r
+\r
+RETURN_STATUS\r
+EFIAPI\r
+ArmMmuBaseLibConstructor (\r
+ VOID\r
+ )\r
+{\r
+ extern UINT32 ArmReplaceLiveTranslationEntrySize;\r
+\r
+ //\r
+ // The ArmReplaceLiveTranslationEntry () helper function may be invoked\r
+ // with the MMU off so we have to ensure that it gets cleaned to the PoC\r
+ //\r
+ WriteBackDataCacheRange (ArmReplaceLiveTranslationEntry,\r
+ ArmReplaceLiveTranslationEntrySize);\r
+\r
+ return RETURN_SUCCESS;\r
+}\r
--- /dev/null
+/** @file\r
+* File managing the MMU for ARMv7 architecture\r
+*\r
+* Copyright (c) 2011-2016, ARM Limited. All rights reserved.\r
+*\r
+* This program and the accompanying materials\r
+* are licensed and made available under the terms and conditions of the BSD License\r
+* which accompanies this distribution. The full text of the license may be found at\r
+* http://opensource.org/licenses/bsd-license.php\r
+*\r
+* THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS,\r
+* WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED.\r
+*\r
+**/\r
+\r
+#include <Uefi.h>\r
+#include <Chipset/ArmV7.h>\r
+#include <Library/BaseMemoryLib.h>\r
+#include <Library/MemoryAllocationLib.h>\r
+#include <Library/ArmLib.h>\r
+#include <Library/BaseLib.h>\r
+#include <Library/DebugLib.h>\r
+#include <Library/PcdLib.h>\r
+\r
+#define ID_MMFR0_SHARELVL_SHIFT 12\r
+#define ID_MMFR0_SHARELVL_MASK 0xf\r
+#define ID_MMFR0_SHARELVL_ONE 0\r
+#define ID_MMFR0_SHARELVL_TWO 1\r
+\r
+#define ID_MMFR0_INNERSHR_SHIFT 28\r
+#define ID_MMFR0_INNERSHR_MASK 0xf\r
+#define ID_MMFR0_OUTERSHR_SHIFT 8\r
+#define ID_MMFR0_OUTERSHR_MASK 0xf\r
+\r
+#define ID_MMFR0_SHR_IMP_UNCACHED 0\r
+#define ID_MMFR0_SHR_IMP_HW_COHERENT 1\r
+#define ID_MMFR0_SHR_IGNORED 0xf\r
+\r
+UINTN\r
+EFIAPI\r
+ArmReadIdMmfr0 (\r
+ VOID\r
+ );\r
+\r
+BOOLEAN\r
+EFIAPI\r
+ArmHasMpExtensions (\r
+ VOID\r
+ );\r
+\r
+UINT32\r
+ConvertSectionAttributesToPageAttributes (\r
+ IN UINT32 SectionAttributes,\r
+ IN BOOLEAN IsLargePage\r
+ )\r
+{\r
+ UINT32 PageAttributes;\r
+\r
+ PageAttributes = 0;\r
+ PageAttributes |= TT_DESCRIPTOR_CONVERT_TO_PAGE_CACHE_POLICY (SectionAttributes, IsLargePage);\r
+ PageAttributes |= TT_DESCRIPTOR_CONVERT_TO_PAGE_AP (SectionAttributes);\r
+ PageAttributes |= TT_DESCRIPTOR_CONVERT_TO_PAGE_XN (SectionAttributes, IsLargePage);\r
+ PageAttributes |= TT_DESCRIPTOR_CONVERT_TO_PAGE_NG (SectionAttributes);\r
+ PageAttributes |= TT_DESCRIPTOR_CONVERT_TO_PAGE_S (SectionAttributes);\r
+\r
+ return PageAttributes;\r
+}\r
+\r
+STATIC\r
+BOOLEAN\r
+PreferNonshareableMemory (\r
+ VOID\r
+ )\r
+{\r
+ UINTN Mmfr;\r
+ UINTN Val;\r
+\r
+ if (FeaturePcdGet (PcdNormalMemoryNonshareableOverride)) {\r
+ return TRUE;\r
+ }\r
+\r
+ //\r
+ // Check whether the innermost level of shareability (the level we will use\r
+ // by default to map normal memory) is implemented with hardware coherency\r
+ // support. Otherwise, revert to mapping as non-shareable.\r
+ //\r
+ Mmfr = ArmReadIdMmfr0 ();\r
+ switch ((Mmfr >> ID_MMFR0_SHARELVL_SHIFT) & ID_MMFR0_SHARELVL_MASK) {\r
+ case ID_MMFR0_SHARELVL_ONE:\r
+ // one level of shareability\r
+ Val = (Mmfr >> ID_MMFR0_OUTERSHR_SHIFT) & ID_MMFR0_OUTERSHR_MASK;\r
+ break;\r
+ case ID_MMFR0_SHARELVL_TWO:\r
+ // two levels of shareability\r
+ Val = (Mmfr >> ID_MMFR0_INNERSHR_SHIFT) & ID_MMFR0_INNERSHR_MASK;\r
+ break;\r
+ default:\r
+ // unexpected value -> shareable is the safe option\r
+ ASSERT (FALSE);\r
+ return FALSE;\r
+ }\r
+ return Val != ID_MMFR0_SHR_IMP_HW_COHERENT;\r
+}\r
+\r
+STATIC\r
+VOID\r
+PopulateLevel2PageTable (\r
+ IN UINT32 *SectionEntry,\r
+ IN UINT32 PhysicalBase,\r
+ IN UINT32 RemainLength,\r
+ IN ARM_MEMORY_REGION_ATTRIBUTES Attributes\r
+ )\r
+{\r
+ UINT32* PageEntry;\r
+ UINT32 Pages;\r
+ UINT32 Index;\r
+ UINT32 PageAttributes;\r
+ UINT32 SectionDescriptor;\r
+ UINT32 TranslationTable;\r
+ UINT32 BaseSectionAddress;\r
+\r
+ switch (Attributes) {\r
+ case ARM_MEMORY_REGION_ATTRIBUTE_WRITE_BACK:\r
+ case ARM_MEMORY_REGION_ATTRIBUTE_NONSECURE_WRITE_BACK:\r
+ PageAttributes = TT_DESCRIPTOR_PAGE_WRITE_BACK;\r
+ break;\r
+ case ARM_MEMORY_REGION_ATTRIBUTE_WRITE_THROUGH:\r
+ case ARM_MEMORY_REGION_ATTRIBUTE_NONSECURE_WRITE_THROUGH:\r
+ PageAttributes = TT_DESCRIPTOR_PAGE_WRITE_THROUGH;\r
+ break;\r
+ case ARM_MEMORY_REGION_ATTRIBUTE_DEVICE:\r
+ case ARM_MEMORY_REGION_ATTRIBUTE_NONSECURE_DEVICE:\r
+ PageAttributes = TT_DESCRIPTOR_PAGE_DEVICE;\r
+ break;\r
+ case ARM_MEMORY_REGION_ATTRIBUTE_UNCACHED_UNBUFFERED:\r
+ case ARM_MEMORY_REGION_ATTRIBUTE_NONSECURE_UNCACHED_UNBUFFERED:\r
+ PageAttributes = TT_DESCRIPTOR_PAGE_UNCACHED;\r
+ break;\r
+ default:\r
+ PageAttributes = TT_DESCRIPTOR_PAGE_UNCACHED;\r
+ break;\r
+ }\r
+\r
+ if (PreferNonshareableMemory ()) {\r
+ PageAttributes &= ~TT_DESCRIPTOR_PAGE_S_SHARED;\r
+ }\r
+\r
+ // Check if the Section Entry has already been populated. Otherwise attach a\r
+ // Level 2 Translation Table to it\r
+ if (*SectionEntry != 0) {\r
+ // The entry must be a page table. Otherwise it exists an overlapping in the memory map\r
+ if (TT_DESCRIPTOR_SECTION_TYPE_IS_PAGE_TABLE(*SectionEntry)) {\r
+ TranslationTable = *SectionEntry & TT_DESCRIPTOR_SECTION_PAGETABLE_ADDRESS_MASK;\r
+ } else if ((*SectionEntry & TT_DESCRIPTOR_SECTION_TYPE_MASK) == TT_DESCRIPTOR_SECTION_TYPE_SECTION) {\r
+ // Case where a virtual memory map descriptor overlapped a section entry\r
+\r
+ // Allocate a Level2 Page Table for this Section\r
+ TranslationTable = (UINTN)AllocatePages(EFI_SIZE_TO_PAGES(TRANSLATION_TABLE_PAGE_SIZE + TRANSLATION_TABLE_PAGE_ALIGNMENT));\r
+ TranslationTable = ((UINTN)TranslationTable + TRANSLATION_TABLE_PAGE_ALIGNMENT_MASK) & ~TRANSLATION_TABLE_PAGE_ALIGNMENT_MASK;\r
+\r
+ // Translate the Section Descriptor into Page Descriptor\r
+ SectionDescriptor = TT_DESCRIPTOR_PAGE_TYPE_PAGE | ConvertSectionAttributesToPageAttributes (*SectionEntry, FALSE);\r
+\r
+ BaseSectionAddress = TT_DESCRIPTOR_SECTION_BASE_ADDRESS(*SectionEntry);\r
+\r
+ // Populate the new Level2 Page Table for the section\r
+ PageEntry = (UINT32*)TranslationTable;\r
+ for (Index = 0; Index < TRANSLATION_TABLE_PAGE_COUNT; Index++) {\r
+ PageEntry[Index] = TT_DESCRIPTOR_PAGE_BASE_ADDRESS(BaseSectionAddress + (Index << 12)) | SectionDescriptor;\r
+ }\r
+\r
+ // Overwrite the section entry to point to the new Level2 Translation Table\r
+ *SectionEntry = (TranslationTable & TT_DESCRIPTOR_SECTION_PAGETABLE_ADDRESS_MASK) |\r
+ (IS_ARM_MEMORY_REGION_ATTRIBUTES_SECURE(Attributes) ? (1 << 3) : 0) |\r
+ TT_DESCRIPTOR_SECTION_TYPE_PAGE_TABLE;\r
+ } else {\r
+ // We do not support the other section type (16MB Section)\r
+ ASSERT(0);\r
+ return;\r
+ }\r
+ } else {\r
+ TranslationTable = (UINTN)AllocatePages(EFI_SIZE_TO_PAGES(TRANSLATION_TABLE_PAGE_SIZE + TRANSLATION_TABLE_PAGE_ALIGNMENT));\r
+ TranslationTable = ((UINTN)TranslationTable + TRANSLATION_TABLE_PAGE_ALIGNMENT_MASK) & ~TRANSLATION_TABLE_PAGE_ALIGNMENT_MASK;\r
+\r
+ ZeroMem ((VOID *)TranslationTable, TRANSLATION_TABLE_PAGE_SIZE);\r
+\r
+ *SectionEntry = (TranslationTable & TT_DESCRIPTOR_SECTION_PAGETABLE_ADDRESS_MASK) |\r
+ (IS_ARM_MEMORY_REGION_ATTRIBUTES_SECURE(Attributes) ? (1 << 3) : 0) |\r
+ TT_DESCRIPTOR_SECTION_TYPE_PAGE_TABLE;\r
+ }\r
+\r
+ PageEntry = ((UINT32 *)(TranslationTable) + ((PhysicalBase & TT_DESCRIPTOR_PAGE_INDEX_MASK) >> TT_DESCRIPTOR_PAGE_BASE_SHIFT));\r
+ Pages = RemainLength / TT_DESCRIPTOR_PAGE_SIZE;\r
+\r
+ for (Index = 0; Index < Pages; Index++) {\r
+ *PageEntry++ = TT_DESCRIPTOR_PAGE_BASE_ADDRESS(PhysicalBase) | PageAttributes;\r
+ PhysicalBase += TT_DESCRIPTOR_PAGE_SIZE;\r
+ }\r
+\r
+}\r
+\r
+STATIC\r
+VOID\r
+FillTranslationTable (\r
+ IN UINT32 *TranslationTable,\r
+ IN ARM_MEMORY_REGION_DESCRIPTOR *MemoryRegion\r
+ )\r
+{\r
+ UINT32 *SectionEntry;\r
+ UINT32 Attributes;\r
+ UINT32 PhysicalBase;\r
+ UINT64 RemainLength;\r
+\r
+ ASSERT(MemoryRegion->Length > 0);\r
+\r
+ if (MemoryRegion->PhysicalBase >= SIZE_4GB) {\r
+ return;\r
+ }\r
+\r
+ PhysicalBase = MemoryRegion->PhysicalBase;\r
+ RemainLength = MIN(MemoryRegion->Length, SIZE_4GB - PhysicalBase);\r
+\r
+ switch (MemoryRegion->Attributes) {\r
+ case ARM_MEMORY_REGION_ATTRIBUTE_WRITE_BACK:\r
+ Attributes = TT_DESCRIPTOR_SECTION_WRITE_BACK(0);\r
+ break;\r
+ case ARM_MEMORY_REGION_ATTRIBUTE_WRITE_THROUGH:\r
+ Attributes = TT_DESCRIPTOR_SECTION_WRITE_THROUGH(0);\r
+ break;\r
+ case ARM_MEMORY_REGION_ATTRIBUTE_DEVICE:\r
+ Attributes = TT_DESCRIPTOR_SECTION_DEVICE(0);\r
+ break;\r
+ case ARM_MEMORY_REGION_ATTRIBUTE_UNCACHED_UNBUFFERED:\r
+ Attributes = TT_DESCRIPTOR_SECTION_UNCACHED(0);\r
+ break;\r
+ case ARM_MEMORY_REGION_ATTRIBUTE_NONSECURE_WRITE_BACK:\r
+ Attributes = TT_DESCRIPTOR_SECTION_WRITE_BACK(1);\r
+ break;\r
+ case ARM_MEMORY_REGION_ATTRIBUTE_NONSECURE_WRITE_THROUGH:\r
+ Attributes = TT_DESCRIPTOR_SECTION_WRITE_THROUGH(1);\r
+ break;\r
+ case ARM_MEMORY_REGION_ATTRIBUTE_NONSECURE_DEVICE:\r
+ Attributes = TT_DESCRIPTOR_SECTION_DEVICE(1);\r
+ break;\r
+ case ARM_MEMORY_REGION_ATTRIBUTE_NONSECURE_UNCACHED_UNBUFFERED:\r
+ Attributes = TT_DESCRIPTOR_SECTION_UNCACHED(1);\r
+ break;\r
+ default:\r
+ Attributes = TT_DESCRIPTOR_SECTION_UNCACHED(0);\r
+ break;\r
+ }\r
+\r
+ if (PreferNonshareableMemory ()) {\r
+ Attributes &= ~TT_DESCRIPTOR_SECTION_S_SHARED;\r
+ }\r
+\r
+ // Get the first section entry for this mapping\r
+ SectionEntry = TRANSLATION_TABLE_ENTRY_FOR_VIRTUAL_ADDRESS(TranslationTable, MemoryRegion->VirtualBase);\r
+\r
+ while (RemainLength != 0) {\r
+ if (PhysicalBase % TT_DESCRIPTOR_SECTION_SIZE == 0) {\r
+ if (RemainLength >= TT_DESCRIPTOR_SECTION_SIZE) {\r
+ // Case: Physical address aligned on the Section Size (1MB) && the length is greater than the Section Size\r
+ *SectionEntry++ = TT_DESCRIPTOR_SECTION_BASE_ADDRESS(PhysicalBase) | Attributes;\r
+ PhysicalBase += TT_DESCRIPTOR_SECTION_SIZE;\r
+ } else {\r
+ // Case: Physical address aligned on the Section Size (1MB) && the length does not fill a section\r
+ PopulateLevel2PageTable (SectionEntry++, PhysicalBase, RemainLength, MemoryRegion->Attributes);\r
+\r
+ // It must be the last entry\r
+ break;\r
+ }\r
+ } else {\r
+ // Case: Physical address NOT aligned on the Section Size (1MB)\r
+ PopulateLevel2PageTable (SectionEntry++, PhysicalBase, RemainLength, MemoryRegion->Attributes);\r
+ // Aligned the address\r
+ PhysicalBase = (PhysicalBase + TT_DESCRIPTOR_SECTION_SIZE) & ~(TT_DESCRIPTOR_SECTION_SIZE-1);\r
+\r
+ // If it is the last entry\r
+ if (RemainLength < TT_DESCRIPTOR_SECTION_SIZE) {\r
+ break;\r
+ }\r
+ }\r
+ RemainLength -= TT_DESCRIPTOR_SECTION_SIZE;\r
+ }\r
+}\r
+\r
+RETURN_STATUS\r
+EFIAPI\r
+ArmConfigureMmu (\r
+ IN ARM_MEMORY_REGION_DESCRIPTOR *MemoryTable,\r
+ OUT VOID **TranslationTableBase OPTIONAL,\r
+ OUT UINTN *TranslationTableSize OPTIONAL\r
+ )\r
+{\r
+ VOID* TranslationTable;\r
+ ARM_MEMORY_REGION_ATTRIBUTES TranslationTableAttribute;\r
+ UINT32 TTBRAttributes;\r
+\r
+ // Allocate pages for translation table.\r
+ TranslationTable = AllocatePages (EFI_SIZE_TO_PAGES (TRANSLATION_TABLE_SECTION_SIZE + TRANSLATION_TABLE_SECTION_ALIGNMENT));\r
+ if (TranslationTable == NULL) {\r
+ return RETURN_OUT_OF_RESOURCES;\r
+ }\r
+ TranslationTable = (VOID*)(((UINTN)TranslationTable + TRANSLATION_TABLE_SECTION_ALIGNMENT_MASK) & ~TRANSLATION_TABLE_SECTION_ALIGNMENT_MASK);\r
+\r
+ if (TranslationTableBase != NULL) {\r
+ *TranslationTableBase = TranslationTable;\r
+ }\r
+\r
+ if (TranslationTableSize != NULL) {\r
+ *TranslationTableSize = TRANSLATION_TABLE_SECTION_SIZE;\r
+ }\r
+\r
+ ZeroMem (TranslationTable, TRANSLATION_TABLE_SECTION_SIZE);\r
+\r
+ // By default, mark the translation table as belonging to a uncached region\r
+ TranslationTableAttribute = ARM_MEMORY_REGION_ATTRIBUTE_UNCACHED_UNBUFFERED;\r
+ while (MemoryTable->Length != 0) {\r
+ // Find the memory attribute for the Translation Table\r
+ if (((UINTN)TranslationTable >= MemoryTable->PhysicalBase) && ((UINTN)TranslationTable <= MemoryTable->PhysicalBase - 1 + MemoryTable->Length)) {\r
+ TranslationTableAttribute = MemoryTable->Attributes;\r
+ }\r
+\r
+ FillTranslationTable (TranslationTable, MemoryTable);\r
+ MemoryTable++;\r
+ }\r
+\r
+ // Translate the Memory Attributes into Translation Table Register Attributes\r
+ if ((TranslationTableAttribute == ARM_MEMORY_REGION_ATTRIBUTE_UNCACHED_UNBUFFERED) ||\r
+ (TranslationTableAttribute == ARM_MEMORY_REGION_ATTRIBUTE_NONSECURE_UNCACHED_UNBUFFERED)) {\r
+ TTBRAttributes = ArmHasMpExtensions () ? TTBR_MP_NON_CACHEABLE : TTBR_NON_CACHEABLE;\r
+ } else if ((TranslationTableAttribute == ARM_MEMORY_REGION_ATTRIBUTE_WRITE_BACK) ||\r
+ (TranslationTableAttribute == ARM_MEMORY_REGION_ATTRIBUTE_NONSECURE_WRITE_BACK)) {\r
+ TTBRAttributes = ArmHasMpExtensions () ? TTBR_MP_WRITE_BACK_ALLOC : TTBR_WRITE_BACK_ALLOC;\r
+ } else if ((TranslationTableAttribute == ARM_MEMORY_REGION_ATTRIBUTE_WRITE_THROUGH) ||\r
+ (TranslationTableAttribute == ARM_MEMORY_REGION_ATTRIBUTE_NONSECURE_WRITE_THROUGH)) {\r
+ TTBRAttributes = ArmHasMpExtensions () ? TTBR_MP_WRITE_THROUGH : TTBR_WRITE_THROUGH;\r
+ } else {\r
+ ASSERT (0); // No support has been found for the attributes of the memory region that the translation table belongs to.\r
+ return RETURN_UNSUPPORTED;\r
+ }\r
+\r
+ if (TTBRAttributes & TTBR_SHAREABLE) {\r
+ if (PreferNonshareableMemory ()) {\r
+ TTBRAttributes ^= TTBR_SHAREABLE;\r
+ } else {\r
+ //\r
+ // Unlike the S bit in the short descriptors, which implies inner shareable\r
+ // on an implementation that supports two levels, the meaning of the S bit\r
+ // in the TTBR depends on the NOS bit, which defaults to Outer Shareable.\r
+ // However, we should only set this bit after we have confirmed that the\r
+ // implementation supports multiple levels, or else the NOS bit is UNK/SBZP\r
+ //\r
+ if (((ArmReadIdMmfr0 () >> 12) & 0xf) != 0) {\r
+ TTBRAttributes |= TTBR_NOT_OUTER_SHAREABLE;\r
+ }\r
+ }\r
+ }\r
+\r
+ ArmCleanInvalidateDataCache ();\r
+ ArmInvalidateInstructionCache ();\r
+\r
+ ArmDisableDataCache ();\r
+ ArmDisableInstructionCache();\r
+ // TLBs are also invalidated when calling ArmDisableMmu()\r
+ ArmDisableMmu ();\r
+\r
+ // Make sure nothing sneaked into the cache\r
+ ArmCleanInvalidateDataCache ();\r
+ ArmInvalidateInstructionCache ();\r
+\r
+ ArmSetTTBR0 ((VOID *)(UINTN)(((UINTN)TranslationTable & ~TRANSLATION_TABLE_SECTION_ALIGNMENT_MASK) | (TTBRAttributes & 0x7F)));\r
+\r
+ //\r
+ // The TTBCR register value is undefined at reset in the Non-Secure world.\r
+ // Writing 0 has the effect of:\r
+ // Clearing EAE: Use short descriptors, as mandated by specification.\r
+ // Clearing PD0 and PD1: Translation Table Walk Disable is off.\r
+ // Clearing N: Perform all translation table walks through TTBR0.\r
+ // (0 is the default reset value in systems not implementing\r
+ // the Security Extensions.)\r
+ //\r
+ ArmSetTTBCR (0);\r
+\r
+ ArmSetDomainAccessControl (DOMAIN_ACCESS_CONTROL_NONE(15) |\r
+ DOMAIN_ACCESS_CONTROL_NONE(14) |\r
+ DOMAIN_ACCESS_CONTROL_NONE(13) |\r
+ DOMAIN_ACCESS_CONTROL_NONE(12) |\r
+ DOMAIN_ACCESS_CONTROL_NONE(11) |\r
+ DOMAIN_ACCESS_CONTROL_NONE(10) |\r
+ DOMAIN_ACCESS_CONTROL_NONE( 9) |\r
+ DOMAIN_ACCESS_CONTROL_NONE( 8) |\r
+ DOMAIN_ACCESS_CONTROL_NONE( 7) |\r
+ DOMAIN_ACCESS_CONTROL_NONE( 6) |\r
+ DOMAIN_ACCESS_CONTROL_NONE( 5) |\r
+ DOMAIN_ACCESS_CONTROL_NONE( 4) |\r
+ DOMAIN_ACCESS_CONTROL_NONE( 3) |\r
+ DOMAIN_ACCESS_CONTROL_NONE( 2) |\r
+ DOMAIN_ACCESS_CONTROL_NONE( 1) |\r
+ DOMAIN_ACCESS_CONTROL_CLIENT(0));\r
+\r
+ ArmEnableInstructionCache();\r
+ ArmEnableDataCache();\r
+ ArmEnableMmu();\r
+ return RETURN_SUCCESS;\r
+}\r
+\r
+RETURN_STATUS\r
+ArmSetMemoryRegionNoExec (\r
+ IN EFI_PHYSICAL_ADDRESS BaseAddress,\r
+ IN UINT64 Length\r
+ )\r
+{\r
+ return RETURN_UNSUPPORTED;\r
+}\r
+\r
+RETURN_STATUS\r
+ArmClearMemoryRegionNoExec (\r
+ IN EFI_PHYSICAL_ADDRESS BaseAddress,\r
+ IN UINT64 Length\r
+ )\r
+{\r
+ return RETURN_UNSUPPORTED;\r
+}\r
+\r
+RETURN_STATUS\r
+ArmSetMemoryRegionReadOnly (\r
+ IN EFI_PHYSICAL_ADDRESS BaseAddress,\r
+ IN UINT64 Length\r
+ )\r
+{\r
+ return RETURN_UNSUPPORTED;\r
+}\r
+\r
+RETURN_STATUS\r
+ArmClearMemoryRegionReadOnly (\r
+ IN EFI_PHYSICAL_ADDRESS BaseAddress,\r
+ IN UINT64 Length\r
+ )\r
+{\r
+ return RETURN_UNSUPPORTED;\r
+}\r
+\r
+RETURN_STATUS\r
+EFIAPI\r
+ArmMmuBaseLibConstructor (\r
+ VOID\r
+ )\r
+{\r
+ return RETURN_SUCCESS;\r
+}\r