X-Git-Url: https://git.proxmox.com/?a=blobdiff_plain;f=ArmPkg%2FLibrary%2FArmMmuLib%2FAArch64%2FArmMmuLibCore.c;h=89da40fd8eec630467ecc63b3f970a51e03832d5;hb=429309e0c6b74792d679681a8edd0d5ae0ff850c;hp=f2eec7191328cbbaa4d83476e5935db8889b4fe4;hpb=d93fe5b579091a6d505419cbfe58aa9d62c3b292;p=mirror_edk2.git diff --git a/ArmPkg/Library/ArmMmuLib/AArch64/ArmMmuLibCore.c b/ArmPkg/Library/ArmMmuLib/AArch64/ArmMmuLibCore.c index f2eec71913..89da40fd8e 100644 --- a/ArmPkg/Library/ArmMmuLib/AArch64/ArmMmuLibCore.c +++ b/ArmPkg/Library/ArmMmuLib/AArch64/ArmMmuLibCore.c @@ -19,9 +19,6 @@ #include #include -// We use this index definition to define an invalid block entry -#define TT_ATTR_INDX_INVALID ((UINT32)~0) - STATIC UINT64 ArmMemoryAttributeToPageAttribute ( @@ -29,103 +26,64 @@ ArmMemoryAttributeToPageAttribute ( ) { switch (Attributes) { - case ARM_MEMORY_REGION_ATTRIBUTE_WRITE_BACK_NONSHAREABLE: - case ARM_MEMORY_REGION_ATTRIBUTE_NONSECURE_WRITE_BACK_NONSHAREABLE: - return TT_ATTR_INDX_MEMORY_WRITE_BACK; - - case ARM_MEMORY_REGION_ATTRIBUTE_WRITE_BACK: - case ARM_MEMORY_REGION_ATTRIBUTE_NONSECURE_WRITE_BACK: - return TT_ATTR_INDX_MEMORY_WRITE_BACK | TT_SH_INNER_SHAREABLE; - - case ARM_MEMORY_REGION_ATTRIBUTE_WRITE_THROUGH: - case ARM_MEMORY_REGION_ATTRIBUTE_NONSECURE_WRITE_THROUGH: - return TT_ATTR_INDX_MEMORY_WRITE_THROUGH | TT_SH_INNER_SHAREABLE; - - // Uncached and device mappings are treated as outer shareable by default, - case ARM_MEMORY_REGION_ATTRIBUTE_UNCACHED_UNBUFFERED: - case ARM_MEMORY_REGION_ATTRIBUTE_NONSECURE_UNCACHED_UNBUFFERED: - return TT_ATTR_INDX_MEMORY_NON_CACHEABLE; - - default: - ASSERT(0); - case ARM_MEMORY_REGION_ATTRIBUTE_DEVICE: - case ARM_MEMORY_REGION_ATTRIBUTE_NONSECURE_DEVICE: - if (ArmReadCurrentEL () == AARCH64_EL2) - return TT_ATTR_INDX_DEVICE_MEMORY | TT_XN_MASK; - else - return TT_ATTR_INDX_DEVICE_MEMORY | TT_UXN_MASK | TT_PXN_MASK; - } -} - -UINT64 -PageAttributeToGcdAttribute ( - IN UINT64 PageAttributes - ) -{ - UINT64 GcdAttributes; - - switch (PageAttributes & TT_ATTR_INDX_MASK) { - case TT_ATTR_INDX_DEVICE_MEMORY: - GcdAttributes = EFI_MEMORY_UC; - break; - case TT_ATTR_INDX_MEMORY_NON_CACHEABLE: - GcdAttributes = EFI_MEMORY_WC; - break; - case TT_ATTR_INDX_MEMORY_WRITE_THROUGH: - GcdAttributes = EFI_MEMORY_WT; - break; - case TT_ATTR_INDX_MEMORY_WRITE_BACK: - GcdAttributes = EFI_MEMORY_WB; - break; - default: - DEBUG ((EFI_D_ERROR, "PageAttributeToGcdAttribute: PageAttributes:0x%lX not supported.\n", PageAttributes)); - ASSERT (0); - // The Global Coherency Domain (GCD) value is defined as a bit set. - // Returning 0 means no attribute has been set. - GcdAttributes = 0; - } - - // Determine protection attributes - if (((PageAttributes & TT_AP_MASK) == TT_AP_NO_RO) || ((PageAttributes & TT_AP_MASK) == TT_AP_RO_RO)) { - // Read only cases map to write-protect - GcdAttributes |= EFI_MEMORY_RO; - } - - // Process eXecute Never attribute - if ((PageAttributes & (TT_PXN_MASK | TT_UXN_MASK)) != 0 ) { - GcdAttributes |= EFI_MEMORY_XP; + case ARM_MEMORY_REGION_ATTRIBUTE_WRITE_BACK_NONSHAREABLE: + case ARM_MEMORY_REGION_ATTRIBUTE_NONSECURE_WRITE_BACK_NONSHAREABLE: + return TT_ATTR_INDX_MEMORY_WRITE_BACK; + + case ARM_MEMORY_REGION_ATTRIBUTE_WRITE_BACK: + case ARM_MEMORY_REGION_ATTRIBUTE_NONSECURE_WRITE_BACK: + return TT_ATTR_INDX_MEMORY_WRITE_BACK | TT_SH_INNER_SHAREABLE; + + case ARM_MEMORY_REGION_ATTRIBUTE_WRITE_THROUGH: + case ARM_MEMORY_REGION_ATTRIBUTE_NONSECURE_WRITE_THROUGH: + return TT_ATTR_INDX_MEMORY_WRITE_THROUGH | TT_SH_INNER_SHAREABLE; + + // Uncached and device mappings are treated as outer shareable by default, + case ARM_MEMORY_REGION_ATTRIBUTE_UNCACHED_UNBUFFERED: + case ARM_MEMORY_REGION_ATTRIBUTE_NONSECURE_UNCACHED_UNBUFFERED: + return TT_ATTR_INDX_MEMORY_NON_CACHEABLE; + + default: + ASSERT (0); + case ARM_MEMORY_REGION_ATTRIBUTE_DEVICE: + case ARM_MEMORY_REGION_ATTRIBUTE_NONSECURE_DEVICE: + if (ArmReadCurrentEL () == AARCH64_EL2) { + return TT_ATTR_INDX_DEVICE_MEMORY | TT_XN_MASK; + } else { + return TT_ATTR_INDX_DEVICE_MEMORY | TT_UXN_MASK | TT_PXN_MASK; + } } - - return GcdAttributes; } #define MIN_T0SZ 16 #define BITS_PER_LEVEL 9 +#define MAX_VA_BITS 48 -VOID -GetRootTranslationTableInfo ( - IN UINTN T0SZ, - OUT UINTN *TableLevel, - OUT UINTN *TableEntryCount +STATIC +UINTN +GetRootTableEntryCount ( + IN UINTN T0SZ ) { - // Get the level of the root table - if (TableLevel) { - *TableLevel = (T0SZ - MIN_T0SZ) / BITS_PER_LEVEL; - } + return TT_ENTRY_COUNT >> (T0SZ - MIN_T0SZ) % BITS_PER_LEVEL; +} - if (TableEntryCount) { - *TableEntryCount = 1UL << (BITS_PER_LEVEL - (T0SZ - MIN_T0SZ) % BITS_PER_LEVEL); - } +STATIC +UINTN +GetRootTableLevel ( + IN UINTN T0SZ + ) +{ + return (T0SZ - MIN_T0SZ) / BITS_PER_LEVEL; } STATIC VOID ReplaceTableEntry ( - IN UINT64 *Entry, - IN UINT64 Value, - IN UINT64 RegionStart, - IN BOOLEAN IsLiveBlockMapping + IN UINT64 *Entry, + IN UINT64 Value, + IN UINT64 RegionStart, + IN BOOLEAN IsLiveBlockMapping ) { if (!ArmMmuEnabled () || !IsLiveBlockMapping) { @@ -139,50 +97,99 @@ ReplaceTableEntry ( STATIC VOID FreePageTablesRecursive ( - IN UINT64 *TranslationTable + IN UINT64 *TranslationTable, + IN UINTN Level ) { - UINTN Index; - - for (Index = 0; Index < TT_ENTRY_COUNT; Index++) { - if ((TranslationTable[Index] & TT_TYPE_MASK) == TT_TYPE_TABLE_ENTRY) { - FreePageTablesRecursive ((VOID *)(UINTN)(TranslationTable[Index] & - TT_ADDRESS_MASK_BLOCK_ENTRY)); + UINTN Index; + + ASSERT (Level <= 3); + + if (Level < 3) { + for (Index = 0; Index < TT_ENTRY_COUNT; Index++) { + if ((TranslationTable[Index] & TT_TYPE_MASK) == TT_TYPE_TABLE_ENTRY) { + FreePageTablesRecursive ( + (VOID *)(UINTN)(TranslationTable[Index] & + TT_ADDRESS_MASK_BLOCK_ENTRY), + Level + 1 + ); + } } } + FreePages (TranslationTable, 1); } +STATIC +BOOLEAN +IsBlockEntry ( + IN UINT64 Entry, + IN UINTN Level + ) +{ + if (Level == 3) { + return (Entry & TT_TYPE_MASK) == TT_TYPE_BLOCK_ENTRY_LEVEL3; + } + + return (Entry & TT_TYPE_MASK) == TT_TYPE_BLOCK_ENTRY; +} + +STATIC +BOOLEAN +IsTableEntry ( + IN UINT64 Entry, + IN UINTN Level + ) +{ + if (Level == 3) { + // + // TT_TYPE_TABLE_ENTRY aliases TT_TYPE_BLOCK_ENTRY_LEVEL3 + // so we need to take the level into account as well. + // + return FALSE; + } + + return (Entry & TT_TYPE_MASK) == TT_TYPE_TABLE_ENTRY; +} + STATIC EFI_STATUS UpdateRegionMappingRecursive ( - IN UINT64 RegionStart, - IN UINT64 RegionEnd, - IN UINT64 AttributeSetMask, - IN UINT64 AttributeClearMask, - IN UINT64 *PageTable, - IN UINTN Level + IN UINT64 RegionStart, + IN UINT64 RegionEnd, + IN UINT64 AttributeSetMask, + IN UINT64 AttributeClearMask, + IN UINT64 *PageTable, + IN UINTN Level ) { - UINTN BlockShift; - UINT64 BlockMask; - UINT64 BlockEnd; - UINT64 *Entry; - UINT64 EntryValue; - VOID *TranslationTable; - EFI_STATUS Status; + UINTN BlockShift; + UINT64 BlockMask; + UINT64 BlockEnd; + UINT64 *Entry; + UINT64 EntryValue; + VOID *TranslationTable; + EFI_STATUS Status; ASSERT (((RegionStart | RegionEnd) & EFI_PAGE_MASK) == 0); BlockShift = (Level + 1) * BITS_PER_LEVEL + MIN_T0SZ; - BlockMask = MAX_UINT64 >> BlockShift; - - DEBUG ((DEBUG_VERBOSE, "%a(%d): %llx - %llx set %lx clr %lx\n", __FUNCTION__, - Level, RegionStart, RegionEnd, AttributeSetMask, AttributeClearMask)); - - for (; RegionStart < RegionEnd; RegionStart = BlockEnd) { + BlockMask = MAX_UINT64 >> BlockShift; + + DEBUG (( + DEBUG_VERBOSE, + "%a(%d): %llx - %llx set %lx clr %lx\n", + __FUNCTION__, + Level, + RegionStart, + RegionEnd, + AttributeSetMask, + AttributeClearMask + )); + + for ( ; RegionStart < RegionEnd; RegionStart = BlockEnd) { BlockEnd = MIN (RegionEnd, (RegionStart | BlockMask) + 1); - Entry = &PageTable[(RegionStart >> (64 - BlockShift)) & (TT_ENTRY_COUNT - 1)]; + Entry = &PageTable[(RegionStart >> (64 - BlockShift)) & (TT_ENTRY_COUNT - 1)]; // // If RegionStart or BlockEnd is not aligned to the block size at this @@ -190,11 +197,16 @@ UpdateRegionMappingRecursive ( // than a block, and recurse to create the block or page entries at // the next level. No block mappings are allowed at all at level 0, // so in that case, we have to recurse unconditionally. + // If we are changing a table entry and the AttributeClearMask is non-zero, + // we cannot replace it with a block entry without potentially losing + // attribute information, so keep the table entry in that case. // - if (Level == 0 || ((RegionStart | BlockEnd) & BlockMask) != 0) { + if ((Level == 0) || (((RegionStart | BlockEnd) & BlockMask) != 0) || + (IsTableEntry (*Entry, Level) && (AttributeClearMask != 0))) + { ASSERT (Level < 3); - if ((*Entry & TT_TYPE_MASK) != TT_TYPE_TABLE_ENTRY) { + if (!IsTableEntry (*Entry, Level)) { // // No table entry exists yet, so we need to allocate a page table // for the next level. @@ -212,14 +224,21 @@ UpdateRegionMappingRecursive ( InvalidateDataCacheRange (TranslationTable, EFI_PAGE_SIZE); } - if ((*Entry & TT_TYPE_MASK) == TT_TYPE_BLOCK_ENTRY) { + ZeroMem (TranslationTable, EFI_PAGE_SIZE); + + if (IsBlockEntry (*Entry, Level)) { // // We are splitting an existing block entry, so we have to populate // the new table with the attributes of the block entry it replaces. // - Status = UpdateRegionMappingRecursive (RegionStart & ~BlockMask, - (RegionStart | BlockMask) + 1, *Entry & TT_ATTRIBUTES_MASK, - 0, TranslationTable, Level + 1); + Status = UpdateRegionMappingRecursive ( + RegionStart & ~BlockMask, + (RegionStart | BlockMask) + 1, + *Entry & TT_ATTRIBUTES_MASK, + 0, + TranslationTable, + Level + 1 + ); if (EFI_ERROR (Status)) { // // The range we passed to UpdateRegionMappingRecursive () is block @@ -229,8 +248,6 @@ UpdateRegionMappingRecursive ( FreePages (TranslationTable, 1); return Status; } - } else { - ZeroMem (TranslationTable, EFI_PAGE_SIZE); } } else { TranslationTable = (VOID *)(UINTN)(*Entry & TT_ADDRESS_MASK_BLOCK_ENTRY); @@ -239,11 +256,16 @@ UpdateRegionMappingRecursive ( // // Recurse to the next level // - Status = UpdateRegionMappingRecursive (RegionStart, BlockEnd, - AttributeSetMask, AttributeClearMask, TranslationTable, - Level + 1); + Status = UpdateRegionMappingRecursive ( + RegionStart, + BlockEnd, + AttributeSetMask, + AttributeClearMask, + TranslationTable, + Level + 1 + ); if (EFI_ERROR (Status)) { - if ((*Entry & TT_TYPE_MASK) != TT_TYPE_TABLE_ENTRY) { + if (!IsTableEntry (*Entry, Level)) { // // We are creating a new table entry, so on failure, we can free all // allocations we made recursively, given that the whole subhierarchy @@ -251,56 +273,45 @@ UpdateRegionMappingRecursive ( // possible for existing table entries, since we cannot revert the // modifications we made to the subhierarchy it represents.) // - FreePageTablesRecursive (TranslationTable); + FreePageTablesRecursive (TranslationTable, Level + 1); } + return Status; } - if ((*Entry & TT_TYPE_MASK) != TT_TYPE_TABLE_ENTRY) { + if (!IsTableEntry (*Entry, Level)) { EntryValue = (UINTN)TranslationTable | TT_TYPE_TABLE_ENTRY; - ReplaceTableEntry (Entry, EntryValue, RegionStart, - (*Entry & TT_TYPE_MASK) == TT_TYPE_BLOCK_ENTRY); + ReplaceTableEntry ( + Entry, + EntryValue, + RegionStart, + IsBlockEntry (*Entry, Level) + ); } } else { - EntryValue = (*Entry & AttributeClearMask) | AttributeSetMask; + EntryValue = (*Entry & AttributeClearMask) | AttributeSetMask; EntryValue |= RegionStart; EntryValue |= (Level == 3) ? TT_TYPE_BLOCK_ENTRY_LEVEL3 : TT_TYPE_BLOCK_ENTRY; - ReplaceTableEntry (Entry, EntryValue, RegionStart, FALSE); - } - } - return EFI_SUCCESS; -} - -STATIC -VOID -LookupAddresstoRootTable ( - IN UINT64 MaxAddress, - OUT UINTN *T0SZ, - OUT UINTN *TableEntryCount - ) -{ - UINTN TopBit; - - // Check the parameters are not NULL - ASSERT ((T0SZ != NULL) && (TableEntryCount != NULL)); - - // Look for the highest bit set in MaxAddress - for (TopBit = 63; TopBit != 0; TopBit--) { - if ((1ULL << TopBit) & MaxAddress) { - // MaxAddress top bit is found - TopBit = TopBit + 1; - break; + if (IsTableEntry (*Entry, Level)) { + // + // We are replacing a table entry with a block entry. This is only + // possible if we are keeping none of the original attributes. + // We can free the table entry's page table, and all the ones below + // it, since we are dropping the only possible reference to it. + // + ASSERT (AttributeClearMask == 0); + TranslationTable = (VOID *)(UINTN)(*Entry & TT_ADDRESS_MASK_BLOCK_ENTRY); + ReplaceTableEntry (Entry, EntryValue, RegionStart, TRUE); + FreePageTablesRecursive (TranslationTable, Level + 1); + } else { + ReplaceTableEntry (Entry, EntryValue, RegionStart, FALSE); + } } } - ASSERT (TopBit != 0); - - // Calculate T0SZ from the top bit of the MaxAddress - *T0SZ = 64 - TopBit; - // Get the Table info from T0SZ - GetRootTranslationTableInfo (*T0SZ, NULL, TableEntryCount); + return EFI_SUCCESS; } STATIC @@ -312,19 +323,22 @@ UpdateRegionMapping ( IN UINT64 AttributeClearMask ) { - UINTN RootTableLevel; - UINTN T0SZ; + UINTN T0SZ; - if (((RegionStart | RegionLength) & EFI_PAGE_MASK)) { + if (((RegionStart | RegionLength) & EFI_PAGE_MASK) != 0) { return EFI_INVALID_PARAMETER; } T0SZ = ArmGetTCR () & TCR_T0SZ_MASK; - GetRootTranslationTableInfo (T0SZ, &RootTableLevel, NULL); - return UpdateRegionMappingRecursive (RegionStart, RegionStart + RegionLength, - AttributeSetMask, AttributeClearMask, ArmGetTTBR0BaseAddress (), - RootTableLevel); + return UpdateRegionMappingRecursive ( + RegionStart, + RegionStart + RegionLength, + AttributeSetMask, + AttributeClearMask, + ArmGetTTBR0BaseAddress (), + GetRootTableLevel (T0SZ) + ); } STATIC @@ -345,31 +359,32 @@ FillTranslationTable ( STATIC UINT64 GcdAttributeToPageAttribute ( - IN UINT64 GcdAttributes + IN UINT64 GcdAttributes ) { - UINT64 PageAttributes; + UINT64 PageAttributes; switch (GcdAttributes & EFI_MEMORY_CACHETYPE_MASK) { - case EFI_MEMORY_UC: - PageAttributes = TT_ATTR_INDX_DEVICE_MEMORY; - break; - case EFI_MEMORY_WC: - PageAttributes = TT_ATTR_INDX_MEMORY_NON_CACHEABLE; - break; - case EFI_MEMORY_WT: - PageAttributes = TT_ATTR_INDX_MEMORY_WRITE_THROUGH | TT_SH_INNER_SHAREABLE; - break; - case EFI_MEMORY_WB: - PageAttributes = TT_ATTR_INDX_MEMORY_WRITE_BACK | TT_SH_INNER_SHAREABLE; - break; - default: - PageAttributes = TT_ATTR_INDX_MASK; - break; + case EFI_MEMORY_UC: + PageAttributes = TT_ATTR_INDX_DEVICE_MEMORY; + break; + case EFI_MEMORY_WC: + PageAttributes = TT_ATTR_INDX_MEMORY_NON_CACHEABLE; + break; + case EFI_MEMORY_WT: + PageAttributes = TT_ATTR_INDX_MEMORY_WRITE_THROUGH | TT_SH_INNER_SHAREABLE; + break; + case EFI_MEMORY_WB: + PageAttributes = TT_ATTR_INDX_MEMORY_WRITE_BACK | TT_SH_INNER_SHAREABLE; + break; + default: + PageAttributes = TT_ATTR_INDX_MASK; + break; } - if ((GcdAttributes & EFI_MEMORY_XP) != 0 || - (GcdAttributes & EFI_MEMORY_CACHETYPE_MASK) == EFI_MEMORY_UC) { + if (((GcdAttributes & EFI_MEMORY_XP) != 0) || + ((GcdAttributes & EFI_MEMORY_CACHETYPE_MASK) == EFI_MEMORY_UC)) + { if (ArmReadCurrentEL () == AARCH64_EL2) { PageAttributes |= TT_XN_MASK; } else { @@ -386,15 +401,15 @@ GcdAttributeToPageAttribute ( EFI_STATUS ArmSetMemoryAttributes ( - IN EFI_PHYSICAL_ADDRESS BaseAddress, - IN UINT64 Length, - IN UINT64 Attributes + IN EFI_PHYSICAL_ADDRESS BaseAddress, + IN UINT64 Length, + IN UINT64 Attributes ) { - UINT64 PageAttributes; - UINT64 PageAttributeMask; + UINT64 PageAttributes; + UINT64 PageAttributeMask; - PageAttributes = GcdAttributeToPageAttribute (Attributes); + PageAttributes = GcdAttributeToPageAttribute (Attributes); PageAttributeMask = 0; if ((Attributes & EFI_MEMORY_CACHETYPE_MASK) == 0) { @@ -402,22 +417,26 @@ ArmSetMemoryAttributes ( // No memory type was set in Attributes, so we are going to update the // permissions only. // - PageAttributes &= TT_AP_MASK | TT_UXN_MASK | TT_PXN_MASK; + PageAttributes &= TT_AP_MASK | TT_UXN_MASK | TT_PXN_MASK; PageAttributeMask = ~(TT_ADDRESS_MASK_BLOCK_ENTRY | TT_AP_MASK | TT_PXN_MASK | TT_XN_MASK); } - return UpdateRegionMapping (BaseAddress, Length, PageAttributes, - PageAttributeMask); + return UpdateRegionMapping ( + BaseAddress, + Length, + PageAttributes, + PageAttributeMask + ); } STATIC EFI_STATUS SetMemoryRegionAttribute ( - IN EFI_PHYSICAL_ADDRESS BaseAddress, - IN UINT64 Length, - IN UINT64 Attributes, - IN UINT64 BlockEntryMask + IN EFI_PHYSICAL_ADDRESS BaseAddress, + IN UINT64 Length, + IN UINT64 Attributes, + IN UINT64 BlockEntryMask ) { return UpdateRegionMapping (BaseAddress, Length, Attributes, BlockEntryMask); @@ -425,11 +444,11 @@ SetMemoryRegionAttribute ( EFI_STATUS ArmSetMemoryRegionNoExec ( - IN EFI_PHYSICAL_ADDRESS BaseAddress, - IN UINT64 Length + IN EFI_PHYSICAL_ADDRESS BaseAddress, + IN UINT64 Length ) { - UINT64 Val; + UINT64 Val; if (ArmReadCurrentEL () == AARCH64_EL1) { Val = TT_PXN_MASK | TT_UXN_MASK; @@ -441,16 +460,17 @@ ArmSetMemoryRegionNoExec ( BaseAddress, Length, Val, - ~TT_ADDRESS_MASK_BLOCK_ENTRY); + ~TT_ADDRESS_MASK_BLOCK_ENTRY + ); } EFI_STATUS ArmClearMemoryRegionNoExec ( - IN EFI_PHYSICAL_ADDRESS BaseAddress, - IN UINT64 Length + IN EFI_PHYSICAL_ADDRESS BaseAddress, + IN UINT64 Length ) { - UINT64 Mask; + UINT64 Mask; // XN maps to UXN in the EL1&0 translation regime Mask = ~(TT_ADDRESS_MASK_BLOCK_ENTRY | TT_PXN_MASK | TT_XN_MASK); @@ -459,51 +479,55 @@ ArmClearMemoryRegionNoExec ( BaseAddress, Length, 0, - Mask); + Mask + ); } EFI_STATUS ArmSetMemoryRegionReadOnly ( - IN EFI_PHYSICAL_ADDRESS BaseAddress, - IN UINT64 Length + IN EFI_PHYSICAL_ADDRESS BaseAddress, + IN UINT64 Length ) { return SetMemoryRegionAttribute ( BaseAddress, Length, TT_AP_RO_RO, - ~TT_ADDRESS_MASK_BLOCK_ENTRY); + ~TT_ADDRESS_MASK_BLOCK_ENTRY + ); } EFI_STATUS ArmClearMemoryRegionReadOnly ( - IN EFI_PHYSICAL_ADDRESS BaseAddress, - IN UINT64 Length + IN EFI_PHYSICAL_ADDRESS BaseAddress, + IN UINT64 Length ) { return SetMemoryRegionAttribute ( BaseAddress, Length, TT_AP_RW_RW, - ~(TT_ADDRESS_MASK_BLOCK_ENTRY | TT_AP_MASK)); + ~(TT_ADDRESS_MASK_BLOCK_ENTRY | TT_AP_MASK) + ); } EFI_STATUS EFIAPI ArmConfigureMmu ( IN ARM_MEMORY_REGION_DESCRIPTOR *MemoryTable, - OUT VOID **TranslationTableBase OPTIONAL, + OUT VOID **TranslationTableBase OPTIONAL, OUT UINTN *TranslationTableSize OPTIONAL ) { - VOID* TranslationTable; - UINT64 MaxAddress; - UINTN T0SZ; - UINTN RootTableEntryCount; - UINT64 TCR; - EFI_STATUS Status; - - if(MemoryTable == NULL) { + VOID *TranslationTable; + UINTN MaxAddressBits; + UINT64 MaxAddress; + UINTN T0SZ; + UINTN RootTableEntryCount; + UINT64 TCR; + EFI_STATUS Status; + + if (MemoryTable == NULL) { ASSERT (MemoryTable != NULL); return EFI_INVALID_PARAMETER; } @@ -515,11 +539,11 @@ ArmConfigureMmu ( // into account the architectural limitations that result from UEFI's // use of 4 KB pages. // - MaxAddress = MIN (LShiftU64 (1ULL, ArmGetPhysicalAddressBits ()) - 1, - MAX_ALLOC_ADDRESS); + MaxAddressBits = MIN (ArmGetPhysicalAddressBits (), MAX_VA_BITS); + MaxAddress = LShiftU64 (1ULL, MaxAddressBits) - 1; - // Lookup the Table Level to get the information - LookupAddresstoRootTable (MaxAddress, &T0SZ, &RootTableEntryCount); + T0SZ = 64 - MaxAddressBits; + RootTableEntryCount = GetRootTableEntryCount (T0SZ); // // Set TCR that allows us to retrieve T0SZ in the subsequent functions @@ -527,7 +551,7 @@ ArmConfigureMmu ( // Ideally we will be running at EL2, but should support EL1 as well. // UEFI should not run at EL3. if (ArmReadCurrentEL () == AARCH64_EL2) { - //Note: Bits 23 and 31 are reserved(RES1) bits in TCR_EL2 + // Note: Bits 23 and 31 are reserved(RES1) bits in TCR_EL2 TCR = T0SZ | (1UL << 31) | (1UL << 23) | TCR_TG0_4KB; // Set the Physical Address Size using MaxAddress @@ -544,7 +568,11 @@ ArmConfigureMmu ( } else if (MaxAddress < SIZE_256TB) { TCR |= TCR_PS_256TB; } else { - DEBUG ((EFI_D_ERROR, "ArmConfigureMmu: The MaxAddress 0x%lX is not supported by this MMU configuration.\n", MaxAddress)); + DEBUG (( + DEBUG_ERROR, + "ArmConfigureMmu: The MaxAddress 0x%lX is not supported by this MMU configuration.\n", + MaxAddress + )); ASSERT (0); // Bigger than 48-bit memory space are not supported return EFI_UNSUPPORTED; } @@ -566,7 +594,11 @@ ArmConfigureMmu ( } else if (MaxAddress < SIZE_256TB) { TCR |= TCR_IPS_256TB; } else { - DEBUG ((EFI_D_ERROR, "ArmConfigureMmu: The MaxAddress 0x%lX is not supported by this MMU configuration.\n", MaxAddress)); + DEBUG (( + DEBUG_ERROR, + "ArmConfigureMmu: The MaxAddress 0x%lX is not supported by this MMU configuration.\n", + MaxAddress + )); ASSERT (0); // Bigger than 48-bit memory space are not supported return EFI_UNSUPPORTED; } @@ -596,9 +628,13 @@ ArmConfigureMmu ( if (TranslationTable == NULL) { return EFI_OUT_OF_RESOURCES; } - // We set TTBR0 just after allocating the table to retrieve its location from the subsequent - // functions without needing to pass this value across the functions. The MMU is only enabled - // after the translation tables are populated. + + // + // We set TTBR0 just after allocating the table to retrieve its location from + // the subsequent functions without needing to pass this value across the + // functions. The MMU is only enabled after the translation tables are + // populated. + // ArmSetTTBR0 (TranslationTable); if (TranslationTableBase != NULL) { @@ -606,29 +642,40 @@ ArmConfigureMmu ( } if (TranslationTableSize != NULL) { - *TranslationTableSize = RootTableEntryCount * sizeof(UINT64); + *TranslationTableSize = RootTableEntryCount * sizeof (UINT64); } // // Make sure we are not inadvertently hitting in the caches // when populating the page tables. // - InvalidateDataCacheRange (TranslationTable, - RootTableEntryCount * sizeof(UINT64)); - ZeroMem (TranslationTable, RootTableEntryCount * sizeof(UINT64)); + InvalidateDataCacheRange ( + TranslationTable, + RootTableEntryCount * sizeof (UINT64) + ); + ZeroMem (TranslationTable, RootTableEntryCount * sizeof (UINT64)); while (MemoryTable->Length != 0) { Status = FillTranslationTable (TranslationTable, MemoryTable); if (EFI_ERROR (Status)) { - goto FREE_TRANSLATION_TABLE; + goto FreeTranslationTable; } + MemoryTable++; } - ArmSetMAIR (MAIR_ATTR(TT_ATTR_INDX_DEVICE_MEMORY, MAIR_ATTR_DEVICE_MEMORY) | // mapped to EFI_MEMORY_UC - MAIR_ATTR(TT_ATTR_INDX_MEMORY_NON_CACHEABLE, MAIR_ATTR_NORMAL_MEMORY_NON_CACHEABLE) | // mapped to EFI_MEMORY_WC - MAIR_ATTR(TT_ATTR_INDX_MEMORY_WRITE_THROUGH, MAIR_ATTR_NORMAL_MEMORY_WRITE_THROUGH) | // mapped to EFI_MEMORY_WT - MAIR_ATTR(TT_ATTR_INDX_MEMORY_WRITE_BACK, MAIR_ATTR_NORMAL_MEMORY_WRITE_BACK)); // mapped to EFI_MEMORY_WB + // + // EFI_MEMORY_UC ==> MAIR_ATTR_DEVICE_MEMORY + // EFI_MEMORY_WC ==> MAIR_ATTR_NORMAL_MEMORY_NON_CACHEABLE + // EFI_MEMORY_WT ==> MAIR_ATTR_NORMAL_MEMORY_WRITE_THROUGH + // EFI_MEMORY_WB ==> MAIR_ATTR_NORMAL_MEMORY_WRITE_BACK + // + ArmSetMAIR ( + MAIR_ATTR (TT_ATTR_INDX_DEVICE_MEMORY, MAIR_ATTR_DEVICE_MEMORY) | + MAIR_ATTR (TT_ATTR_INDX_MEMORY_NON_CACHEABLE, MAIR_ATTR_NORMAL_MEMORY_NON_CACHEABLE) | + MAIR_ATTR (TT_ATTR_INDX_MEMORY_WRITE_THROUGH, MAIR_ATTR_NORMAL_MEMORY_WRITE_THROUGH) | + MAIR_ATTR (TT_ATTR_INDX_MEMORY_WRITE_BACK, MAIR_ATTR_NORMAL_MEMORY_WRITE_BACK) + ); ArmDisableAlignmentCheck (); ArmEnableStackAlignmentCheck (); @@ -638,7 +685,7 @@ ArmConfigureMmu ( ArmEnableMmu (); return EFI_SUCCESS; -FREE_TRANSLATION_TABLE: +FreeTranslationTable: FreePages (TranslationTable, 1); return Status; } @@ -649,14 +696,16 @@ ArmMmuBaseLibConstructor ( VOID ) { - extern UINT32 ArmReplaceLiveTranslationEntrySize; + extern UINT32 ArmReplaceLiveTranslationEntrySize; // // The ArmReplaceLiveTranslationEntry () helper function may be invoked // with the MMU off so we have to ensure that it gets cleaned to the PoC // - WriteBackDataCacheRange (ArmReplaceLiveTranslationEntry, - ArmReplaceLiveTranslationEntrySize); + WriteBackDataCacheRange ( + (VOID *)(UINTN)ArmReplaceLiveTranslationEntry, + ArmReplaceLiveTranslationEntrySize + ); return RETURN_SUCCESS; }