#include <Library/BaseLib.h>\r
#include <Library/DebugLib.h>\r
\r
-// We use this index definition to define an invalid block entry\r
-#define TT_ATTR_INDX_INVALID ((UINT32)~0)\r
-\r
STATIC\r
UINT64\r
ArmMemoryAttributeToPageAttribute (\r
)\r
{\r
switch (Attributes) {\r
- case ARM_MEMORY_REGION_ATTRIBUTE_WRITE_BACK_NONSHAREABLE:\r
- case ARM_MEMORY_REGION_ATTRIBUTE_NONSECURE_WRITE_BACK_NONSHAREABLE:\r
- return TT_ATTR_INDX_MEMORY_WRITE_BACK;\r
-\r
- case ARM_MEMORY_REGION_ATTRIBUTE_WRITE_BACK:\r
- case ARM_MEMORY_REGION_ATTRIBUTE_NONSECURE_WRITE_BACK:\r
- return TT_ATTR_INDX_MEMORY_WRITE_BACK | TT_SH_INNER_SHAREABLE;\r
-\r
- case ARM_MEMORY_REGION_ATTRIBUTE_WRITE_THROUGH:\r
- case ARM_MEMORY_REGION_ATTRIBUTE_NONSECURE_WRITE_THROUGH:\r
- return TT_ATTR_INDX_MEMORY_WRITE_THROUGH | TT_SH_INNER_SHAREABLE;\r
-\r
- // Uncached and device mappings are treated as outer shareable by default,\r
- case ARM_MEMORY_REGION_ATTRIBUTE_UNCACHED_UNBUFFERED:\r
- case ARM_MEMORY_REGION_ATTRIBUTE_NONSECURE_UNCACHED_UNBUFFERED:\r
- return TT_ATTR_INDX_MEMORY_NON_CACHEABLE;\r
-\r
- default:\r
- ASSERT(0);\r
- case ARM_MEMORY_REGION_ATTRIBUTE_DEVICE:\r
- case ARM_MEMORY_REGION_ATTRIBUTE_NONSECURE_DEVICE:\r
- if (ArmReadCurrentEL () == AARCH64_EL2)\r
- return TT_ATTR_INDX_DEVICE_MEMORY | TT_XN_MASK;\r
- else\r
- return TT_ATTR_INDX_DEVICE_MEMORY | TT_UXN_MASK | TT_PXN_MASK;\r
- }\r
-}\r
-\r
-UINT64\r
-PageAttributeToGcdAttribute (\r
- IN UINT64 PageAttributes\r
- )\r
-{\r
- UINT64 GcdAttributes;\r
-\r
- switch (PageAttributes & TT_ATTR_INDX_MASK) {\r
- case TT_ATTR_INDX_DEVICE_MEMORY:\r
- GcdAttributes = EFI_MEMORY_UC;\r
- break;\r
- case TT_ATTR_INDX_MEMORY_NON_CACHEABLE:\r
- GcdAttributes = EFI_MEMORY_WC;\r
- break;\r
- case TT_ATTR_INDX_MEMORY_WRITE_THROUGH:\r
- GcdAttributes = EFI_MEMORY_WT;\r
- break;\r
- case TT_ATTR_INDX_MEMORY_WRITE_BACK:\r
- GcdAttributes = EFI_MEMORY_WB;\r
- break;\r
- default:\r
- DEBUG ((EFI_D_ERROR, "PageAttributeToGcdAttribute: PageAttributes:0x%lX not supported.\n", PageAttributes));\r
- ASSERT (0);\r
- // The Global Coherency Domain (GCD) value is defined as a bit set.\r
- // Returning 0 means no attribute has been set.\r
- GcdAttributes = 0;\r
- }\r
-\r
- // Determine protection attributes\r
- if (((PageAttributes & TT_AP_MASK) == TT_AP_NO_RO) || ((PageAttributes & TT_AP_MASK) == TT_AP_RO_RO)) {\r
- // Read only cases map to write-protect\r
- GcdAttributes |= EFI_MEMORY_RO;\r
- }\r
-\r
- // Process eXecute Never attribute\r
- if ((PageAttributes & (TT_PXN_MASK | TT_UXN_MASK)) != 0 ) {\r
- GcdAttributes |= EFI_MEMORY_XP;\r
+ case ARM_MEMORY_REGION_ATTRIBUTE_WRITE_BACK_NONSHAREABLE:\r
+ case ARM_MEMORY_REGION_ATTRIBUTE_NONSECURE_WRITE_BACK_NONSHAREABLE:\r
+ return TT_ATTR_INDX_MEMORY_WRITE_BACK;\r
+\r
+ case ARM_MEMORY_REGION_ATTRIBUTE_WRITE_BACK:\r
+ case ARM_MEMORY_REGION_ATTRIBUTE_NONSECURE_WRITE_BACK:\r
+ return TT_ATTR_INDX_MEMORY_WRITE_BACK | TT_SH_INNER_SHAREABLE;\r
+\r
+ case ARM_MEMORY_REGION_ATTRIBUTE_WRITE_THROUGH:\r
+ case ARM_MEMORY_REGION_ATTRIBUTE_NONSECURE_WRITE_THROUGH:\r
+ return TT_ATTR_INDX_MEMORY_WRITE_THROUGH | TT_SH_INNER_SHAREABLE;\r
+\r
+ // Uncached and device mappings are treated as outer shareable by default,\r
+ case ARM_MEMORY_REGION_ATTRIBUTE_UNCACHED_UNBUFFERED:\r
+ case ARM_MEMORY_REGION_ATTRIBUTE_NONSECURE_UNCACHED_UNBUFFERED:\r
+ return TT_ATTR_INDX_MEMORY_NON_CACHEABLE;\r
+\r
+ default:\r
+ ASSERT (0);\r
+ case ARM_MEMORY_REGION_ATTRIBUTE_DEVICE:\r
+ case ARM_MEMORY_REGION_ATTRIBUTE_NONSECURE_DEVICE:\r
+ if (ArmReadCurrentEL () == AARCH64_EL2) {\r
+ return TT_ATTR_INDX_DEVICE_MEMORY | TT_XN_MASK;\r
+ } else {\r
+ return TT_ATTR_INDX_DEVICE_MEMORY | TT_UXN_MASK | TT_PXN_MASK;\r
+ }\r
}\r
-\r
- return GcdAttributes;\r
}\r
\r
#define MIN_T0SZ 16\r
#define BITS_PER_LEVEL 9\r
+#define MAX_VA_BITS 48\r
\r
-VOID\r
-GetRootTranslationTableInfo (\r
- IN UINTN T0SZ,\r
- OUT UINTN *TableLevel,\r
- OUT UINTN *TableEntryCount\r
+STATIC\r
+UINTN\r
+GetRootTableEntryCount (\r
+ IN UINTN T0SZ\r
)\r
{\r
- // Get the level of the root table\r
- if (TableLevel) {\r
- *TableLevel = (T0SZ - MIN_T0SZ) / BITS_PER_LEVEL;\r
- }\r
+ return TT_ENTRY_COUNT >> (T0SZ - MIN_T0SZ) % BITS_PER_LEVEL;\r
+}\r
\r
- if (TableEntryCount) {\r
- *TableEntryCount = 1UL << (BITS_PER_LEVEL - (T0SZ - MIN_T0SZ) % BITS_PER_LEVEL);\r
- }\r
+STATIC\r
+UINTN\r
+GetRootTableLevel (\r
+ IN UINTN T0SZ\r
+ )\r
+{\r
+ return (T0SZ - MIN_T0SZ) / BITS_PER_LEVEL;\r
}\r
\r
STATIC\r
VOID\r
ReplaceTableEntry (\r
- IN UINT64 *Entry,\r
- IN UINT64 Value,\r
- IN UINT64 RegionStart,\r
- IN BOOLEAN IsLiveBlockMapping\r
+ IN UINT64 *Entry,\r
+ IN UINT64 Value,\r
+ IN UINT64 RegionStart,\r
+ IN BOOLEAN IsLiveBlockMapping\r
)\r
{\r
if (!ArmMmuEnabled () || !IsLiveBlockMapping) {\r
STATIC\r
VOID\r
FreePageTablesRecursive (\r
- IN UINT64 *TranslationTable\r
+ IN UINT64 *TranslationTable,\r
+ IN UINTN Level\r
)\r
{\r
- UINTN Index;\r
-\r
- for (Index = 0; Index < TT_ENTRY_COUNT; Index++) {\r
- if ((TranslationTable[Index] & TT_TYPE_MASK) == TT_TYPE_TABLE_ENTRY) {\r
- FreePageTablesRecursive ((VOID *)(UINTN)(TranslationTable[Index] &\r
- TT_ADDRESS_MASK_BLOCK_ENTRY));\r
+ UINTN Index;\r
+\r
+ ASSERT (Level <= 3);\r
+\r
+ if (Level < 3) {\r
+ for (Index = 0; Index < TT_ENTRY_COUNT; Index++) {\r
+ if ((TranslationTable[Index] & TT_TYPE_MASK) == TT_TYPE_TABLE_ENTRY) {\r
+ FreePageTablesRecursive (\r
+ (VOID *)(UINTN)(TranslationTable[Index] &\r
+ TT_ADDRESS_MASK_BLOCK_ENTRY),\r
+ Level + 1\r
+ );\r
+ }\r
}\r
}\r
+\r
FreePages (TranslationTable, 1);\r
}\r
\r
+STATIC\r
+BOOLEAN\r
+IsBlockEntry (\r
+ IN UINT64 Entry,\r
+ IN UINTN Level\r
+ )\r
+{\r
+ if (Level == 3) {\r
+ return (Entry & TT_TYPE_MASK) == TT_TYPE_BLOCK_ENTRY_LEVEL3;\r
+ }\r
+\r
+ return (Entry & TT_TYPE_MASK) == TT_TYPE_BLOCK_ENTRY;\r
+}\r
+\r
+STATIC\r
+BOOLEAN\r
+IsTableEntry (\r
+ IN UINT64 Entry,\r
+ IN UINTN Level\r
+ )\r
+{\r
+ if (Level == 3) {\r
+ //\r
+ // TT_TYPE_TABLE_ENTRY aliases TT_TYPE_BLOCK_ENTRY_LEVEL3\r
+ // so we need to take the level into account as well.\r
+ //\r
+ return FALSE;\r
+ }\r
+\r
+ return (Entry & TT_TYPE_MASK) == TT_TYPE_TABLE_ENTRY;\r
+}\r
+\r
STATIC\r
EFI_STATUS\r
UpdateRegionMappingRecursive (\r
- IN UINT64 RegionStart,\r
- IN UINT64 RegionEnd,\r
- IN UINT64 AttributeSetMask,\r
- IN UINT64 AttributeClearMask,\r
- IN UINT64 *PageTable,\r
- IN UINTN Level\r
+ IN UINT64 RegionStart,\r
+ IN UINT64 RegionEnd,\r
+ IN UINT64 AttributeSetMask,\r
+ IN UINT64 AttributeClearMask,\r
+ IN UINT64 *PageTable,\r
+ IN UINTN Level\r
)\r
{\r
- UINTN BlockShift;\r
- UINT64 BlockMask;\r
- UINT64 BlockEnd;\r
- UINT64 *Entry;\r
- UINT64 EntryValue;\r
- VOID *TranslationTable;\r
- EFI_STATUS Status;\r
+ UINTN BlockShift;\r
+ UINT64 BlockMask;\r
+ UINT64 BlockEnd;\r
+ UINT64 *Entry;\r
+ UINT64 EntryValue;\r
+ VOID *TranslationTable;\r
+ EFI_STATUS Status;\r
\r
ASSERT (((RegionStart | RegionEnd) & EFI_PAGE_MASK) == 0);\r
\r
BlockShift = (Level + 1) * BITS_PER_LEVEL + MIN_T0SZ;\r
- BlockMask = MAX_UINT64 >> BlockShift;\r
-\r
- DEBUG ((DEBUG_VERBOSE, "%a(%d): %llx - %llx set %lx clr %lx\n", __FUNCTION__,\r
- Level, RegionStart, RegionEnd, AttributeSetMask, AttributeClearMask));\r
-\r
- for (; RegionStart < RegionEnd; RegionStart = BlockEnd) {\r
+ BlockMask = MAX_UINT64 >> BlockShift;\r
+\r
+ DEBUG ((\r
+ DEBUG_VERBOSE,\r
+ "%a(%d): %llx - %llx set %lx clr %lx\n",\r
+ __FUNCTION__,\r
+ Level,\r
+ RegionStart,\r
+ RegionEnd,\r
+ AttributeSetMask,\r
+ AttributeClearMask\r
+ ));\r
+\r
+ for ( ; RegionStart < RegionEnd; RegionStart = BlockEnd) {\r
BlockEnd = MIN (RegionEnd, (RegionStart | BlockMask) + 1);\r
- Entry = &PageTable[(RegionStart >> (64 - BlockShift)) & (TT_ENTRY_COUNT - 1)];\r
+ Entry = &PageTable[(RegionStart >> (64 - BlockShift)) & (TT_ENTRY_COUNT - 1)];\r
\r
//\r
// If RegionStart or BlockEnd is not aligned to the block size at this\r
// than a block, and recurse to create the block or page entries at\r
// the next level. No block mappings are allowed at all at level 0,\r
// so in that case, we have to recurse unconditionally.\r
+ // If we are changing a table entry and the AttributeClearMask is non-zero,\r
+ // we cannot replace it with a block entry without potentially losing\r
+ // attribute information, so keep the table entry in that case.\r
//\r
- if (Level == 0 || ((RegionStart | BlockEnd) & BlockMask) != 0) {\r
+ if ((Level == 0) || (((RegionStart | BlockEnd) & BlockMask) != 0) ||\r
+ (IsTableEntry (*Entry, Level) && (AttributeClearMask != 0)))\r
+ {\r
ASSERT (Level < 3);\r
\r
- if ((*Entry & TT_TYPE_MASK) != TT_TYPE_TABLE_ENTRY) {\r
+ if (!IsTableEntry (*Entry, Level)) {\r
//\r
// No table entry exists yet, so we need to allocate a page table\r
// for the next level.\r
InvalidateDataCacheRange (TranslationTable, EFI_PAGE_SIZE);\r
}\r
\r
- if ((*Entry & TT_TYPE_MASK) == TT_TYPE_BLOCK_ENTRY) {\r
+ ZeroMem (TranslationTable, EFI_PAGE_SIZE);\r
+\r
+ if (IsBlockEntry (*Entry, Level)) {\r
//\r
// We are splitting an existing block entry, so we have to populate\r
// the new table with the attributes of the block entry it replaces.\r
//\r
- Status = UpdateRegionMappingRecursive (RegionStart & ~BlockMask,\r
- (RegionStart | BlockMask) + 1, *Entry & TT_ATTRIBUTES_MASK,\r
- 0, TranslationTable, Level + 1);\r
+ Status = UpdateRegionMappingRecursive (\r
+ RegionStart & ~BlockMask,\r
+ (RegionStart | BlockMask) + 1,\r
+ *Entry & TT_ATTRIBUTES_MASK,\r
+ 0,\r
+ TranslationTable,\r
+ Level + 1\r
+ );\r
if (EFI_ERROR (Status)) {\r
//\r
// The range we passed to UpdateRegionMappingRecursive () is block\r
FreePages (TranslationTable, 1);\r
return Status;\r
}\r
- } else {\r
- ZeroMem (TranslationTable, EFI_PAGE_SIZE);\r
}\r
} else {\r
TranslationTable = (VOID *)(UINTN)(*Entry & TT_ADDRESS_MASK_BLOCK_ENTRY);\r
//\r
// Recurse to the next level\r
//\r
- Status = UpdateRegionMappingRecursive (RegionStart, BlockEnd,\r
- AttributeSetMask, AttributeClearMask, TranslationTable,\r
- Level + 1);\r
+ Status = UpdateRegionMappingRecursive (\r
+ RegionStart,\r
+ BlockEnd,\r
+ AttributeSetMask,\r
+ AttributeClearMask,\r
+ TranslationTable,\r
+ Level + 1\r
+ );\r
if (EFI_ERROR (Status)) {\r
- if ((*Entry & TT_TYPE_MASK) != TT_TYPE_TABLE_ENTRY) {\r
+ if (!IsTableEntry (*Entry, Level)) {\r
//\r
// We are creating a new table entry, so on failure, we can free all\r
// allocations we made recursively, given that the whole subhierarchy\r
// possible for existing table entries, since we cannot revert the\r
// modifications we made to the subhierarchy it represents.)\r
//\r
- FreePageTablesRecursive (TranslationTable);\r
+ FreePageTablesRecursive (TranslationTable, Level + 1);\r
}\r
+\r
return Status;\r
}\r
\r
- if ((*Entry & TT_TYPE_MASK) != TT_TYPE_TABLE_ENTRY) {\r
+ if (!IsTableEntry (*Entry, Level)) {\r
EntryValue = (UINTN)TranslationTable | TT_TYPE_TABLE_ENTRY;\r
- ReplaceTableEntry (Entry, EntryValue, RegionStart,\r
- (*Entry & TT_TYPE_MASK) == TT_TYPE_BLOCK_ENTRY);\r
+ ReplaceTableEntry (\r
+ Entry,\r
+ EntryValue,\r
+ RegionStart,\r
+ IsBlockEntry (*Entry, Level)\r
+ );\r
}\r
} else {\r
- EntryValue = (*Entry & AttributeClearMask) | AttributeSetMask;\r
+ EntryValue = (*Entry & AttributeClearMask) | AttributeSetMask;\r
EntryValue |= RegionStart;\r
EntryValue |= (Level == 3) ? TT_TYPE_BLOCK_ENTRY_LEVEL3\r
: TT_TYPE_BLOCK_ENTRY;\r
\r
- ReplaceTableEntry (Entry, EntryValue, RegionStart, FALSE);\r
- }\r
- }\r
- return EFI_SUCCESS;\r
-}\r
-\r
-STATIC\r
-VOID\r
-LookupAddresstoRootTable (\r
- IN UINT64 MaxAddress,\r
- OUT UINTN *T0SZ,\r
- OUT UINTN *TableEntryCount\r
- )\r
-{\r
- UINTN TopBit;\r
-\r
- // Check the parameters are not NULL\r
- ASSERT ((T0SZ != NULL) && (TableEntryCount != NULL));\r
-\r
- // Look for the highest bit set in MaxAddress\r
- for (TopBit = 63; TopBit != 0; TopBit--) {\r
- if ((1ULL << TopBit) & MaxAddress) {\r
- // MaxAddress top bit is found\r
- TopBit = TopBit + 1;\r
- break;\r
+ if (IsTableEntry (*Entry, Level)) {\r
+ //\r
+ // We are replacing a table entry with a block entry. This is only\r
+ // possible if we are keeping none of the original attributes.\r
+ // We can free the table entry's page table, and all the ones below\r
+ // it, since we are dropping the only possible reference to it.\r
+ //\r
+ ASSERT (AttributeClearMask == 0);\r
+ TranslationTable = (VOID *)(UINTN)(*Entry & TT_ADDRESS_MASK_BLOCK_ENTRY);\r
+ ReplaceTableEntry (Entry, EntryValue, RegionStart, TRUE);\r
+ FreePageTablesRecursive (TranslationTable, Level + 1);\r
+ } else {\r
+ ReplaceTableEntry (Entry, EntryValue, RegionStart, FALSE);\r
+ }\r
}\r
}\r
- ASSERT (TopBit != 0);\r
-\r
- // Calculate T0SZ from the top bit of the MaxAddress\r
- *T0SZ = 64 - TopBit;\r
\r
- // Get the Table info from T0SZ\r
- GetRootTranslationTableInfo (*T0SZ, NULL, TableEntryCount);\r
+ return EFI_SUCCESS;\r
}\r
\r
STATIC\r
IN UINT64 AttributeClearMask\r
)\r
{\r
- UINTN RootTableLevel;\r
- UINTN T0SZ;\r
+ UINTN T0SZ;\r
\r
- if (((RegionStart | RegionLength) & EFI_PAGE_MASK)) {\r
+ if (((RegionStart | RegionLength) & EFI_PAGE_MASK) != 0) {\r
return EFI_INVALID_PARAMETER;\r
}\r
\r
T0SZ = ArmGetTCR () & TCR_T0SZ_MASK;\r
- GetRootTranslationTableInfo (T0SZ, &RootTableLevel, NULL);\r
\r
- return UpdateRegionMappingRecursive (RegionStart, RegionStart + RegionLength,\r
- AttributeSetMask, AttributeClearMask, ArmGetTTBR0BaseAddress (),\r
- RootTableLevel);\r
+ return UpdateRegionMappingRecursive (\r
+ RegionStart,\r
+ RegionStart + RegionLength,\r
+ AttributeSetMask,\r
+ AttributeClearMask,\r
+ ArmGetTTBR0BaseAddress (),\r
+ GetRootTableLevel (T0SZ)\r
+ );\r
}\r
\r
STATIC\r
STATIC\r
UINT64\r
GcdAttributeToPageAttribute (\r
- IN UINT64 GcdAttributes\r
+ IN UINT64 GcdAttributes\r
)\r
{\r
- UINT64 PageAttributes;\r
+ UINT64 PageAttributes;\r
\r
switch (GcdAttributes & EFI_MEMORY_CACHETYPE_MASK) {\r
- case EFI_MEMORY_UC:\r
- PageAttributes = TT_ATTR_INDX_DEVICE_MEMORY;\r
- break;\r
- case EFI_MEMORY_WC:\r
- PageAttributes = TT_ATTR_INDX_MEMORY_NON_CACHEABLE;\r
- break;\r
- case EFI_MEMORY_WT:\r
- PageAttributes = TT_ATTR_INDX_MEMORY_WRITE_THROUGH | TT_SH_INNER_SHAREABLE;\r
- break;\r
- case EFI_MEMORY_WB:\r
- PageAttributes = TT_ATTR_INDX_MEMORY_WRITE_BACK | TT_SH_INNER_SHAREABLE;\r
- break;\r
- default:\r
- PageAttributes = TT_ATTR_INDX_MASK;\r
- break;\r
+ case EFI_MEMORY_UC:\r
+ PageAttributes = TT_ATTR_INDX_DEVICE_MEMORY;\r
+ break;\r
+ case EFI_MEMORY_WC:\r
+ PageAttributes = TT_ATTR_INDX_MEMORY_NON_CACHEABLE;\r
+ break;\r
+ case EFI_MEMORY_WT:\r
+ PageAttributes = TT_ATTR_INDX_MEMORY_WRITE_THROUGH | TT_SH_INNER_SHAREABLE;\r
+ break;\r
+ case EFI_MEMORY_WB:\r
+ PageAttributes = TT_ATTR_INDX_MEMORY_WRITE_BACK | TT_SH_INNER_SHAREABLE;\r
+ break;\r
+ default:\r
+ PageAttributes = TT_ATTR_INDX_MASK;\r
+ break;\r
}\r
\r
- if ((GcdAttributes & EFI_MEMORY_XP) != 0 ||\r
- (GcdAttributes & EFI_MEMORY_CACHETYPE_MASK) == EFI_MEMORY_UC) {\r
+ if (((GcdAttributes & EFI_MEMORY_XP) != 0) ||\r
+ ((GcdAttributes & EFI_MEMORY_CACHETYPE_MASK) == EFI_MEMORY_UC))\r
+ {\r
if (ArmReadCurrentEL () == AARCH64_EL2) {\r
PageAttributes |= TT_XN_MASK;\r
} else {\r
\r
EFI_STATUS\r
ArmSetMemoryAttributes (\r
- IN EFI_PHYSICAL_ADDRESS BaseAddress,\r
- IN UINT64 Length,\r
- IN UINT64 Attributes\r
+ IN EFI_PHYSICAL_ADDRESS BaseAddress,\r
+ IN UINT64 Length,\r
+ IN UINT64 Attributes\r
)\r
{\r
- UINT64 PageAttributes;\r
- UINT64 PageAttributeMask;\r
+ UINT64 PageAttributes;\r
+ UINT64 PageAttributeMask;\r
\r
- PageAttributes = GcdAttributeToPageAttribute (Attributes);\r
+ PageAttributes = GcdAttributeToPageAttribute (Attributes);\r
PageAttributeMask = 0;\r
\r
if ((Attributes & EFI_MEMORY_CACHETYPE_MASK) == 0) {\r
// No memory type was set in Attributes, so we are going to update the\r
// permissions only.\r
//\r
- PageAttributes &= TT_AP_MASK | TT_UXN_MASK | TT_PXN_MASK;\r
+ PageAttributes &= TT_AP_MASK | TT_UXN_MASK | TT_PXN_MASK;\r
PageAttributeMask = ~(TT_ADDRESS_MASK_BLOCK_ENTRY | TT_AP_MASK |\r
TT_PXN_MASK | TT_XN_MASK);\r
}\r
\r
- return UpdateRegionMapping (BaseAddress, Length, PageAttributes,\r
- PageAttributeMask);\r
+ return UpdateRegionMapping (\r
+ BaseAddress,\r
+ Length,\r
+ PageAttributes,\r
+ PageAttributeMask\r
+ );\r
}\r
\r
STATIC\r
EFI_STATUS\r
SetMemoryRegionAttribute (\r
- IN EFI_PHYSICAL_ADDRESS BaseAddress,\r
- IN UINT64 Length,\r
- IN UINT64 Attributes,\r
- IN UINT64 BlockEntryMask\r
+ IN EFI_PHYSICAL_ADDRESS BaseAddress,\r
+ IN UINT64 Length,\r
+ IN UINT64 Attributes,\r
+ IN UINT64 BlockEntryMask\r
)\r
{\r
return UpdateRegionMapping (BaseAddress, Length, Attributes, BlockEntryMask);\r
\r
EFI_STATUS\r
ArmSetMemoryRegionNoExec (\r
- IN EFI_PHYSICAL_ADDRESS BaseAddress,\r
- IN UINT64 Length\r
+ IN EFI_PHYSICAL_ADDRESS BaseAddress,\r
+ IN UINT64 Length\r
)\r
{\r
- UINT64 Val;\r
+ UINT64 Val;\r
\r
if (ArmReadCurrentEL () == AARCH64_EL1) {\r
Val = TT_PXN_MASK | TT_UXN_MASK;\r
BaseAddress,\r
Length,\r
Val,\r
- ~TT_ADDRESS_MASK_BLOCK_ENTRY);\r
+ ~TT_ADDRESS_MASK_BLOCK_ENTRY\r
+ );\r
}\r
\r
EFI_STATUS\r
ArmClearMemoryRegionNoExec (\r
- IN EFI_PHYSICAL_ADDRESS BaseAddress,\r
- IN UINT64 Length\r
+ IN EFI_PHYSICAL_ADDRESS BaseAddress,\r
+ IN UINT64 Length\r
)\r
{\r
- UINT64 Mask;\r
+ UINT64 Mask;\r
\r
// XN maps to UXN in the EL1&0 translation regime\r
Mask = ~(TT_ADDRESS_MASK_BLOCK_ENTRY | TT_PXN_MASK | TT_XN_MASK);\r
BaseAddress,\r
Length,\r
0,\r
- Mask);\r
+ Mask\r
+ );\r
}\r
\r
EFI_STATUS\r
ArmSetMemoryRegionReadOnly (\r
- IN EFI_PHYSICAL_ADDRESS BaseAddress,\r
- IN UINT64 Length\r
+ IN EFI_PHYSICAL_ADDRESS BaseAddress,\r
+ IN UINT64 Length\r
)\r
{\r
return SetMemoryRegionAttribute (\r
BaseAddress,\r
Length,\r
TT_AP_RO_RO,\r
- ~TT_ADDRESS_MASK_BLOCK_ENTRY);\r
+ ~TT_ADDRESS_MASK_BLOCK_ENTRY\r
+ );\r
}\r
\r
EFI_STATUS\r
ArmClearMemoryRegionReadOnly (\r
- IN EFI_PHYSICAL_ADDRESS BaseAddress,\r
- IN UINT64 Length\r
+ IN EFI_PHYSICAL_ADDRESS BaseAddress,\r
+ IN UINT64 Length\r
)\r
{\r
return SetMemoryRegionAttribute (\r
BaseAddress,\r
Length,\r
TT_AP_RW_RW,\r
- ~(TT_ADDRESS_MASK_BLOCK_ENTRY | TT_AP_MASK));\r
+ ~(TT_ADDRESS_MASK_BLOCK_ENTRY | TT_AP_MASK)\r
+ );\r
}\r
\r
EFI_STATUS\r
EFIAPI\r
ArmConfigureMmu (\r
IN ARM_MEMORY_REGION_DESCRIPTOR *MemoryTable,\r
- OUT VOID **TranslationTableBase OPTIONAL,\r
+ OUT VOID **TranslationTableBase OPTIONAL,\r
OUT UINTN *TranslationTableSize OPTIONAL\r
)\r
{\r
- VOID* TranslationTable;\r
- UINT64 MaxAddress;\r
- UINTN T0SZ;\r
- UINTN RootTableEntryCount;\r
- UINT64 TCR;\r
- EFI_STATUS Status;\r
-\r
- if(MemoryTable == NULL) {\r
+ VOID *TranslationTable;\r
+ UINTN MaxAddressBits;\r
+ UINT64 MaxAddress;\r
+ UINTN T0SZ;\r
+ UINTN RootTableEntryCount;\r
+ UINT64 TCR;\r
+ EFI_STATUS Status;\r
+\r
+ if (MemoryTable == NULL) {\r
ASSERT (MemoryTable != NULL);\r
return EFI_INVALID_PARAMETER;\r
}\r
// into account the architectural limitations that result from UEFI's\r
// use of 4 KB pages.\r
//\r
- MaxAddress = MIN (LShiftU64 (1ULL, ArmGetPhysicalAddressBits ()) - 1,\r
- MAX_ALLOC_ADDRESS);\r
+ MaxAddressBits = MIN (ArmGetPhysicalAddressBits (), MAX_VA_BITS);\r
+ MaxAddress = LShiftU64 (1ULL, MaxAddressBits) - 1;\r
\r
- // Lookup the Table Level to get the information\r
- LookupAddresstoRootTable (MaxAddress, &T0SZ, &RootTableEntryCount);\r
+ T0SZ = 64 - MaxAddressBits;\r
+ RootTableEntryCount = GetRootTableEntryCount (T0SZ);\r
\r
//\r
// Set TCR that allows us to retrieve T0SZ in the subsequent functions\r
// Ideally we will be running at EL2, but should support EL1 as well.\r
// UEFI should not run at EL3.\r
if (ArmReadCurrentEL () == AARCH64_EL2) {\r
- //Note: Bits 23 and 31 are reserved(RES1) bits in TCR_EL2\r
+ // Note: Bits 23 and 31 are reserved(RES1) bits in TCR_EL2\r
TCR = T0SZ | (1UL << 31) | (1UL << 23) | TCR_TG0_4KB;\r
\r
// Set the Physical Address Size using MaxAddress\r
} else if (MaxAddress < SIZE_256TB) {\r
TCR |= TCR_PS_256TB;\r
} else {\r
- DEBUG ((EFI_D_ERROR, "ArmConfigureMmu: The MaxAddress 0x%lX is not supported by this MMU configuration.\n", MaxAddress));\r
+ DEBUG ((\r
+ DEBUG_ERROR,\r
+ "ArmConfigureMmu: The MaxAddress 0x%lX is not supported by this MMU configuration.\n",\r
+ MaxAddress\r
+ ));\r
ASSERT (0); // Bigger than 48-bit memory space are not supported\r
return EFI_UNSUPPORTED;\r
}\r
} else if (MaxAddress < SIZE_256TB) {\r
TCR |= TCR_IPS_256TB;\r
} else {\r
- DEBUG ((EFI_D_ERROR, "ArmConfigureMmu: The MaxAddress 0x%lX is not supported by this MMU configuration.\n", MaxAddress));\r
+ DEBUG ((\r
+ DEBUG_ERROR,\r
+ "ArmConfigureMmu: The MaxAddress 0x%lX is not supported by this MMU configuration.\n",\r
+ MaxAddress\r
+ ));\r
ASSERT (0); // Bigger than 48-bit memory space are not supported\r
return EFI_UNSUPPORTED;\r
}\r
if (TranslationTable == NULL) {\r
return EFI_OUT_OF_RESOURCES;\r
}\r
- // We set TTBR0 just after allocating the table to retrieve its location from the subsequent\r
- // functions without needing to pass this value across the functions. The MMU is only enabled\r
- // after the translation tables are populated.\r
+\r
+ //\r
+ // We set TTBR0 just after allocating the table to retrieve its location from\r
+ // the subsequent functions without needing to pass this value across the\r
+ // functions. The MMU is only enabled after the translation tables are\r
+ // populated.\r
+ //\r
ArmSetTTBR0 (TranslationTable);\r
\r
if (TranslationTableBase != NULL) {\r
}\r
\r
if (TranslationTableSize != NULL) {\r
- *TranslationTableSize = RootTableEntryCount * sizeof(UINT64);\r
+ *TranslationTableSize = RootTableEntryCount * sizeof (UINT64);\r
}\r
\r
//\r
// Make sure we are not inadvertently hitting in the caches\r
// when populating the page tables.\r
//\r
- InvalidateDataCacheRange (TranslationTable,\r
- RootTableEntryCount * sizeof(UINT64));\r
- ZeroMem (TranslationTable, RootTableEntryCount * sizeof(UINT64));\r
+ InvalidateDataCacheRange (\r
+ TranslationTable,\r
+ RootTableEntryCount * sizeof (UINT64)\r
+ );\r
+ ZeroMem (TranslationTable, RootTableEntryCount * sizeof (UINT64));\r
\r
while (MemoryTable->Length != 0) {\r
Status = FillTranslationTable (TranslationTable, MemoryTable);\r
if (EFI_ERROR (Status)) {\r
- goto FREE_TRANSLATION_TABLE;\r
+ goto FreeTranslationTable;\r
}\r
+\r
MemoryTable++;\r
}\r
\r
- ArmSetMAIR (MAIR_ATTR(TT_ATTR_INDX_DEVICE_MEMORY, MAIR_ATTR_DEVICE_MEMORY) | // mapped to EFI_MEMORY_UC\r
- MAIR_ATTR(TT_ATTR_INDX_MEMORY_NON_CACHEABLE, MAIR_ATTR_NORMAL_MEMORY_NON_CACHEABLE) | // mapped to EFI_MEMORY_WC\r
- MAIR_ATTR(TT_ATTR_INDX_MEMORY_WRITE_THROUGH, MAIR_ATTR_NORMAL_MEMORY_WRITE_THROUGH) | // mapped to EFI_MEMORY_WT\r
- MAIR_ATTR(TT_ATTR_INDX_MEMORY_WRITE_BACK, MAIR_ATTR_NORMAL_MEMORY_WRITE_BACK)); // mapped to EFI_MEMORY_WB\r
+ //\r
+ // EFI_MEMORY_UC ==> MAIR_ATTR_DEVICE_MEMORY\r
+ // EFI_MEMORY_WC ==> MAIR_ATTR_NORMAL_MEMORY_NON_CACHEABLE\r
+ // EFI_MEMORY_WT ==> MAIR_ATTR_NORMAL_MEMORY_WRITE_THROUGH\r
+ // EFI_MEMORY_WB ==> MAIR_ATTR_NORMAL_MEMORY_WRITE_BACK\r
+ //\r
+ ArmSetMAIR (\r
+ MAIR_ATTR (TT_ATTR_INDX_DEVICE_MEMORY, MAIR_ATTR_DEVICE_MEMORY) |\r
+ MAIR_ATTR (TT_ATTR_INDX_MEMORY_NON_CACHEABLE, MAIR_ATTR_NORMAL_MEMORY_NON_CACHEABLE) |\r
+ MAIR_ATTR (TT_ATTR_INDX_MEMORY_WRITE_THROUGH, MAIR_ATTR_NORMAL_MEMORY_WRITE_THROUGH) |\r
+ MAIR_ATTR (TT_ATTR_INDX_MEMORY_WRITE_BACK, MAIR_ATTR_NORMAL_MEMORY_WRITE_BACK)\r
+ );\r
\r
ArmDisableAlignmentCheck ();\r
ArmEnableStackAlignmentCheck ();\r
ArmEnableMmu ();\r
return EFI_SUCCESS;\r
\r
-FREE_TRANSLATION_TABLE:\r
+FreeTranslationTable:\r
FreePages (TranslationTable, 1);\r
return Status;\r
}\r
VOID\r
)\r
{\r
- extern UINT32 ArmReplaceLiveTranslationEntrySize;\r
+ extern UINT32 ArmReplaceLiveTranslationEntrySize;\r
\r
//\r
// The ArmReplaceLiveTranslationEntry () helper function may be invoked\r
// with the MMU off so we have to ensure that it gets cleaned to the PoC\r
//\r
- WriteBackDataCacheRange (ArmReplaceLiveTranslationEntry,\r
- ArmReplaceLiveTranslationEntrySize);\r
+ WriteBackDataCacheRange (\r
+ (VOID *)(UINTN)ArmReplaceLiveTranslationEntry,\r
+ ArmReplaceLiveTranslationEntrySize\r
+ );\r
\r
return RETURN_SUCCESS;\r
}\r