2 * File managing the MMU for ARMv8 architecture
4 * Copyright (c) 2011-2014, ARM Limited. All rights reserved.
5 * Copyright (c) 2016, Linaro Limited. All rights reserved.
6 * Copyright (c) 2017, Intel Corporation. All rights reserved.<BR>
8 * SPDX-License-Identifier: BSD-2-Clause-Patent
13 #include <Chipset/AArch64.h>
14 #include <Library/BaseMemoryLib.h>
15 #include <Library/CacheMaintenanceLib.h>
16 #include <Library/MemoryAllocationLib.h>
17 #include <Library/ArmLib.h>
18 #include <Library/ArmMmuLib.h>
19 #include <Library/BaseLib.h>
20 #include <Library/DebugLib.h>
22 // We use this index definition to define an invalid block entry
23 #define TT_ATTR_INDX_INVALID ((UINT32)~0)
27 ArmMemoryAttributeToPageAttribute (
28 IN ARM_MEMORY_REGION_ATTRIBUTES Attributes
32 case ARM_MEMORY_REGION_ATTRIBUTE_WRITE_BACK_NONSHAREABLE
:
33 case ARM_MEMORY_REGION_ATTRIBUTE_NONSECURE_WRITE_BACK_NONSHAREABLE
:
34 return TT_ATTR_INDX_MEMORY_WRITE_BACK
;
36 case ARM_MEMORY_REGION_ATTRIBUTE_WRITE_BACK
:
37 case ARM_MEMORY_REGION_ATTRIBUTE_NONSECURE_WRITE_BACK
:
38 return TT_ATTR_INDX_MEMORY_WRITE_BACK
| TT_SH_INNER_SHAREABLE
;
40 case ARM_MEMORY_REGION_ATTRIBUTE_WRITE_THROUGH
:
41 case ARM_MEMORY_REGION_ATTRIBUTE_NONSECURE_WRITE_THROUGH
:
42 return TT_ATTR_INDX_MEMORY_WRITE_THROUGH
| TT_SH_INNER_SHAREABLE
;
44 // Uncached and device mappings are treated as outer shareable by default,
45 case ARM_MEMORY_REGION_ATTRIBUTE_UNCACHED_UNBUFFERED
:
46 case ARM_MEMORY_REGION_ATTRIBUTE_NONSECURE_UNCACHED_UNBUFFERED
:
47 return TT_ATTR_INDX_MEMORY_NON_CACHEABLE
;
51 case ARM_MEMORY_REGION_ATTRIBUTE_DEVICE
:
52 case ARM_MEMORY_REGION_ATTRIBUTE_NONSECURE_DEVICE
:
53 if (ArmReadCurrentEL () == AARCH64_EL2
)
54 return TT_ATTR_INDX_DEVICE_MEMORY
| TT_XN_MASK
;
56 return TT_ATTR_INDX_DEVICE_MEMORY
| TT_UXN_MASK
| TT_PXN_MASK
;
61 PageAttributeToGcdAttribute (
62 IN UINT64 PageAttributes
67 switch (PageAttributes
& TT_ATTR_INDX_MASK
) {
68 case TT_ATTR_INDX_DEVICE_MEMORY
:
69 GcdAttributes
= EFI_MEMORY_UC
;
71 case TT_ATTR_INDX_MEMORY_NON_CACHEABLE
:
72 GcdAttributes
= EFI_MEMORY_WC
;
74 case TT_ATTR_INDX_MEMORY_WRITE_THROUGH
:
75 GcdAttributes
= EFI_MEMORY_WT
;
77 case TT_ATTR_INDX_MEMORY_WRITE_BACK
:
78 GcdAttributes
= EFI_MEMORY_WB
;
81 DEBUG ((EFI_D_ERROR
, "PageAttributeToGcdAttribute: PageAttributes:0x%lX not supported.\n", PageAttributes
));
83 // The Global Coherency Domain (GCD) value is defined as a bit set.
84 // Returning 0 means no attribute has been set.
88 // Determine protection attributes
89 if (((PageAttributes
& TT_AP_MASK
) == TT_AP_NO_RO
) || ((PageAttributes
& TT_AP_MASK
) == TT_AP_RO_RO
)) {
90 // Read only cases map to write-protect
91 GcdAttributes
|= EFI_MEMORY_RO
;
94 // Process eXecute Never attribute
95 if ((PageAttributes
& (TT_PXN_MASK
| TT_UXN_MASK
)) != 0 ) {
96 GcdAttributes
|= EFI_MEMORY_XP
;
103 #define BITS_PER_LEVEL 9
106 GetRootTranslationTableInfo (
108 OUT UINTN
*TableLevel
,
109 OUT UINTN
*TableEntryCount
112 // Get the level of the root table
114 *TableLevel
= (T0SZ
- MIN_T0SZ
) / BITS_PER_LEVEL
;
117 if (TableEntryCount
) {
118 *TableEntryCount
= 1UL << (BITS_PER_LEVEL
- (T0SZ
- MIN_T0SZ
) % BITS_PER_LEVEL
);
127 IN UINT64 RegionStart
130 if (!ArmMmuEnabled ()) {
133 ArmReplaceLiveTranslationEntry (Entry
, Value
, RegionStart
);
139 LookupAddresstoRootTable (
140 IN UINT64 MaxAddress
,
142 OUT UINTN
*TableEntryCount
147 // Check the parameters are not NULL
148 ASSERT ((T0SZ
!= NULL
) && (TableEntryCount
!= NULL
));
150 // Look for the highest bit set in MaxAddress
151 for (TopBit
= 63; TopBit
!= 0; TopBit
--) {
152 if ((1ULL << TopBit
) & MaxAddress
) {
153 // MaxAddress top bit is found
158 ASSERT (TopBit
!= 0);
160 // Calculate T0SZ from the top bit of the MaxAddress
163 // Get the Table info from T0SZ
164 GetRootTranslationTableInfo (*T0SZ
, NULL
, TableEntryCount
);
169 GetBlockEntryListFromAddress (
170 IN UINT64
*RootTable
,
171 IN UINT64 RegionStart
,
172 OUT UINTN
*TableLevel
,
173 IN OUT UINT64
*BlockEntrySize
,
174 OUT UINT64
**LastBlockEntry
177 UINTN RootTableLevel
;
178 UINTN RootTableEntryCount
;
179 UINT64
*TranslationTable
;
181 UINT64
*SubTableBlockEntry
;
182 UINT64 BlockEntryAddress
;
183 UINTN BaseAddressAlignment
;
189 UINT64 TableAttributes
;
191 // Initialize variable
194 // Ensure the parameters are valid
195 if (!(TableLevel
&& BlockEntrySize
&& LastBlockEntry
)) {
196 ASSERT_EFI_ERROR (EFI_INVALID_PARAMETER
);
200 // Ensure the Region is aligned on 4KB boundary
201 if ((RegionStart
& (SIZE_4KB
- 1)) != 0) {
202 ASSERT_EFI_ERROR (EFI_INVALID_PARAMETER
);
206 // Ensure the required size is aligned on 4KB boundary and not 0
207 if ((*BlockEntrySize
& (SIZE_4KB
- 1)) != 0 || *BlockEntrySize
== 0) {
208 ASSERT_EFI_ERROR (EFI_INVALID_PARAMETER
);
212 T0SZ
= ArmGetTCR () & TCR_T0SZ_MASK
;
213 // Get the Table info from T0SZ
214 GetRootTranslationTableInfo (T0SZ
, &RootTableLevel
, &RootTableEntryCount
);
216 // If the start address is 0x0 then we use the size of the region to identify the alignment
217 if (RegionStart
== 0) {
218 // Identify the highest possible alignment for the Region Size
219 BaseAddressAlignment
= LowBitSet64 (*BlockEntrySize
);
221 // Identify the highest possible alignment for the Base Address
222 BaseAddressAlignment
= LowBitSet64 (RegionStart
);
225 // Identify the Page Level the RegionStart must belong to. Note that PageLevel
226 // should be at least 1 since block translations are not supported at level 0
227 PageLevel
= MAX (3 - ((BaseAddressAlignment
- 12) / 9), 1);
229 // If the required size is smaller than the current block size then we need to go to the page below.
230 // The PageLevel was calculated on the Base Address alignment but did not take in account the alignment
231 // of the allocation size
232 while (*BlockEntrySize
< TT_BLOCK_ENTRY_SIZE_AT_LEVEL (PageLevel
)) {
233 // It does not fit so we need to go a page level above
238 // Get the Table Descriptor for the corresponding PageLevel. We need to decompose RegionStart to get appropriate entries
241 TranslationTable
= RootTable
;
242 for (IndexLevel
= RootTableLevel
; IndexLevel
<= PageLevel
; IndexLevel
++) {
243 BlockEntry
= (UINT64
*)TT_GET_ENTRY_FOR_ADDRESS (TranslationTable
, IndexLevel
, RegionStart
);
245 if ((IndexLevel
!= 3) && ((*BlockEntry
& TT_TYPE_MASK
) == TT_TYPE_TABLE_ENTRY
)) {
246 // Go to the next table
247 TranslationTable
= (UINT64
*)(*BlockEntry
& TT_ADDRESS_MASK_DESCRIPTION_TABLE
);
249 // If we are at the last level then update the last level to next level
250 if (IndexLevel
== PageLevel
) {
251 // Enter the next level
254 } else if ((*BlockEntry
& TT_TYPE_MASK
) == TT_TYPE_BLOCK_ENTRY
) {
255 // If we are not at the last level then we need to split this BlockEntry
256 if (IndexLevel
!= PageLevel
) {
257 // Retrieve the attributes from the block entry
258 Attributes
= *BlockEntry
& TT_ATTRIBUTES_MASK
;
260 // Convert the block entry attributes into Table descriptor attributes
261 TableAttributes
= TT_TABLE_AP_NO_PERMISSION
;
262 if (Attributes
& TT_NS
) {
263 TableAttributes
= TT_TABLE_NS
;
266 // Get the address corresponding at this entry
267 BlockEntryAddress
= RegionStart
;
268 BlockEntryAddress
= BlockEntryAddress
>> TT_ADDRESS_OFFSET_AT_LEVEL(IndexLevel
);
269 // Shift back to right to set zero before the effective address
270 BlockEntryAddress
= BlockEntryAddress
<< TT_ADDRESS_OFFSET_AT_LEVEL(IndexLevel
);
272 // Set the correct entry type for the next page level
273 if ((IndexLevel
+ 1) == 3) {
274 Attributes
|= TT_TYPE_BLOCK_ENTRY_LEVEL3
;
276 Attributes
|= TT_TYPE_BLOCK_ENTRY
;
279 // Create a new translation table
280 TranslationTable
= AllocatePages (1);
281 if (TranslationTable
== NULL
) {
285 // Populate the newly created lower level table
286 SubTableBlockEntry
= TranslationTable
;
287 for (Index
= 0; Index
< TT_ENTRY_COUNT
; Index
++) {
288 *SubTableBlockEntry
= Attributes
| (BlockEntryAddress
+ (Index
<< TT_ADDRESS_OFFSET_AT_LEVEL(IndexLevel
+ 1)));
289 SubTableBlockEntry
++;
292 // Fill the BlockEntry with the new TranslationTable
293 ReplaceLiveEntry (BlockEntry
,
294 (UINTN
)TranslationTable
| TableAttributes
| TT_TYPE_TABLE_ENTRY
,
298 if (IndexLevel
!= PageLevel
) {
300 // Case when we have an Invalid Entry and we are at a page level above of the one targetted.
303 // Create a new translation table
304 TranslationTable
= AllocatePages (1);
305 if (TranslationTable
== NULL
) {
309 ZeroMem (TranslationTable
, TT_ENTRY_COUNT
* sizeof(UINT64
));
311 // Fill the new BlockEntry with the TranslationTable
312 *BlockEntry
= ((UINTN
)TranslationTable
& TT_ADDRESS_MASK_DESCRIPTION_TABLE
) | TT_TYPE_TABLE_ENTRY
;
317 // Expose the found PageLevel to the caller
318 *TableLevel
= PageLevel
;
320 // Now, we have the Table Level we can get the Block Size associated to this table
321 *BlockEntrySize
= TT_BLOCK_ENTRY_SIZE_AT_LEVEL (PageLevel
);
323 // The last block of the root table depends on the number of entry in this table,
324 // otherwise it is always the (TT_ENTRY_COUNT - 1)th entry in the table.
325 *LastBlockEntry
= TT_LAST_BLOCK_ADDRESS(TranslationTable
,
326 (PageLevel
== RootTableLevel
) ? RootTableEntryCount
: TT_ENTRY_COUNT
);
333 UpdateRegionMapping (
334 IN UINT64
*RootTable
,
335 IN UINT64 RegionStart
,
336 IN UINT64 RegionLength
,
337 IN UINT64 Attributes
,
338 IN UINT64 BlockEntryMask
343 UINT64
*LastBlockEntry
;
344 UINT64 BlockEntrySize
;
347 // Ensure the Length is aligned on 4KB boundary
348 if ((RegionLength
== 0) || ((RegionLength
& (SIZE_4KB
- 1)) != 0)) {
349 ASSERT_EFI_ERROR (EFI_INVALID_PARAMETER
);
350 return EFI_INVALID_PARAMETER
;
354 // Get the first Block Entry that matches the Virtual Address and also the information on the Table Descriptor
355 // such as the size of the Block Entry and the address of the last BlockEntry of the Table Descriptor
356 BlockEntrySize
= RegionLength
;
357 BlockEntry
= GetBlockEntryListFromAddress (RootTable
, RegionStart
, &TableLevel
, &BlockEntrySize
, &LastBlockEntry
);
358 if (BlockEntry
== NULL
) {
359 // GetBlockEntryListFromAddress() return NULL when it fails to allocate new pages from the Translation Tables
360 return EFI_OUT_OF_RESOURCES
;
363 if (TableLevel
!= 3) {
364 Type
= TT_TYPE_BLOCK_ENTRY
;
366 Type
= TT_TYPE_BLOCK_ENTRY_LEVEL3
;
370 // Fill the Block Entry with attribute and output block address
371 *BlockEntry
&= BlockEntryMask
;
372 *BlockEntry
|= (RegionStart
& TT_ADDRESS_MASK_BLOCK_ENTRY
) | Attributes
| Type
;
374 ArmUpdateTranslationTableEntry (BlockEntry
, (VOID
*)RegionStart
);
376 // Go to the next BlockEntry
377 RegionStart
+= BlockEntrySize
;
378 RegionLength
-= BlockEntrySize
;
381 // Break the inner loop when next block is a table
382 // Rerun GetBlockEntryListFromAddress to avoid page table memory leak
383 if (TableLevel
!= 3 && BlockEntry
<= LastBlockEntry
&&
384 (*BlockEntry
& TT_TYPE_MASK
) == TT_TYPE_TABLE_ENTRY
) {
387 } while ((RegionLength
>= BlockEntrySize
) && (BlockEntry
<= LastBlockEntry
));
388 } while (RegionLength
!= 0);
395 FillTranslationTable (
396 IN UINT64
*RootTable
,
397 IN ARM_MEMORY_REGION_DESCRIPTOR
*MemoryRegion
400 return UpdateRegionMapping (
402 MemoryRegion
->VirtualBase
,
403 MemoryRegion
->Length
,
404 ArmMemoryAttributeToPageAttribute (MemoryRegion
->Attributes
) | TT_AF
,
411 GcdAttributeToPageAttribute (
412 IN UINT64 GcdAttributes
415 UINT64 PageAttributes
;
417 switch (GcdAttributes
& EFI_MEMORY_CACHETYPE_MASK
) {
419 PageAttributes
= TT_ATTR_INDX_DEVICE_MEMORY
;
422 PageAttributes
= TT_ATTR_INDX_MEMORY_NON_CACHEABLE
;
425 PageAttributes
= TT_ATTR_INDX_MEMORY_WRITE_THROUGH
| TT_SH_INNER_SHAREABLE
;
428 PageAttributes
= TT_ATTR_INDX_MEMORY_WRITE_BACK
| TT_SH_INNER_SHAREABLE
;
431 PageAttributes
= TT_ATTR_INDX_MASK
;
435 if ((GcdAttributes
& EFI_MEMORY_XP
) != 0 ||
436 (GcdAttributes
& EFI_MEMORY_CACHETYPE_MASK
) == EFI_MEMORY_UC
) {
437 if (ArmReadCurrentEL () == AARCH64_EL2
) {
438 PageAttributes
|= TT_XN_MASK
;
440 PageAttributes
|= TT_UXN_MASK
| TT_PXN_MASK
;
444 if ((GcdAttributes
& EFI_MEMORY_RO
) != 0) {
445 PageAttributes
|= TT_AP_RO_RO
;
448 return PageAttributes
| TT_AF
;
452 ArmSetMemoryAttributes (
453 IN EFI_PHYSICAL_ADDRESS BaseAddress
,
459 UINT64
*TranslationTable
;
460 UINT64 PageAttributes
;
461 UINT64 PageAttributeMask
;
463 PageAttributes
= GcdAttributeToPageAttribute (Attributes
);
464 PageAttributeMask
= 0;
466 if ((Attributes
& EFI_MEMORY_CACHETYPE_MASK
) == 0) {
468 // No memory type was set in Attributes, so we are going to update the
471 PageAttributes
&= TT_AP_MASK
| TT_UXN_MASK
| TT_PXN_MASK
;
472 PageAttributeMask
= ~(TT_ADDRESS_MASK_BLOCK_ENTRY
| TT_AP_MASK
|
473 TT_PXN_MASK
| TT_XN_MASK
);
476 TranslationTable
= ArmGetTTBR0BaseAddress ();
478 Status
= UpdateRegionMapping (
484 if (EFI_ERROR (Status
)) {
493 SetMemoryRegionAttribute (
494 IN EFI_PHYSICAL_ADDRESS BaseAddress
,
496 IN UINT64 Attributes
,
497 IN UINT64 BlockEntryMask
503 RootTable
= ArmGetTTBR0BaseAddress ();
505 Status
= UpdateRegionMapping (RootTable
, BaseAddress
, Length
, Attributes
, BlockEntryMask
);
506 if (EFI_ERROR (Status
)) {
514 ArmSetMemoryRegionNoExec (
515 IN EFI_PHYSICAL_ADDRESS BaseAddress
,
521 if (ArmReadCurrentEL () == AARCH64_EL1
) {
522 Val
= TT_PXN_MASK
| TT_UXN_MASK
;
527 return SetMemoryRegionAttribute (
531 ~TT_ADDRESS_MASK_BLOCK_ENTRY
);
535 ArmClearMemoryRegionNoExec (
536 IN EFI_PHYSICAL_ADDRESS BaseAddress
,
542 // XN maps to UXN in the EL1&0 translation regime
543 Mask
= ~(TT_ADDRESS_MASK_BLOCK_ENTRY
| TT_PXN_MASK
| TT_XN_MASK
);
545 return SetMemoryRegionAttribute (
553 ArmSetMemoryRegionReadOnly (
554 IN EFI_PHYSICAL_ADDRESS BaseAddress
,
558 return SetMemoryRegionAttribute (
562 ~TT_ADDRESS_MASK_BLOCK_ENTRY
);
566 ArmClearMemoryRegionReadOnly (
567 IN EFI_PHYSICAL_ADDRESS BaseAddress
,
571 return SetMemoryRegionAttribute (
575 ~(TT_ADDRESS_MASK_BLOCK_ENTRY
| TT_AP_MASK
));
581 IN ARM_MEMORY_REGION_DESCRIPTOR
*MemoryTable
,
582 OUT VOID
**TranslationTableBase OPTIONAL
,
583 OUT UINTN
*TranslationTableSize OPTIONAL
586 VOID
* TranslationTable
;
587 UINT32 TranslationTableAttribute
;
590 UINTN RootTableEntryCount
;
594 if(MemoryTable
== NULL
) {
595 ASSERT (MemoryTable
!= NULL
);
596 return EFI_INVALID_PARAMETER
;
600 // Limit the virtual address space to what we can actually use: UEFI
601 // mandates a 1:1 mapping, so no point in making the virtual address
602 // space larger than the physical address space. We also have to take
603 // into account the architectural limitations that result from UEFI's
604 // use of 4 KB pages.
606 MaxAddress
= MIN (LShiftU64 (1ULL, ArmGetPhysicalAddressBits ()) - 1,
609 // Lookup the Table Level to get the information
610 LookupAddresstoRootTable (MaxAddress
, &T0SZ
, &RootTableEntryCount
);
613 // Set TCR that allows us to retrieve T0SZ in the subsequent functions
615 // Ideally we will be running at EL2, but should support EL1 as well.
616 // UEFI should not run at EL3.
617 if (ArmReadCurrentEL () == AARCH64_EL2
) {
618 //Note: Bits 23 and 31 are reserved(RES1) bits in TCR_EL2
619 TCR
= T0SZ
| (1UL << 31) | (1UL << 23) | TCR_TG0_4KB
;
621 // Set the Physical Address Size using MaxAddress
622 if (MaxAddress
< SIZE_4GB
) {
624 } else if (MaxAddress
< SIZE_64GB
) {
626 } else if (MaxAddress
< SIZE_1TB
) {
628 } else if (MaxAddress
< SIZE_4TB
) {
630 } else if (MaxAddress
< SIZE_16TB
) {
632 } else if (MaxAddress
< SIZE_256TB
) {
635 DEBUG ((EFI_D_ERROR
, "ArmConfigureMmu: The MaxAddress 0x%lX is not supported by this MMU configuration.\n", MaxAddress
));
636 ASSERT (0); // Bigger than 48-bit memory space are not supported
637 return EFI_UNSUPPORTED
;
639 } else if (ArmReadCurrentEL () == AARCH64_EL1
) {
640 // Due to Cortex-A57 erratum #822227 we must set TG1[1] == 1, regardless of EPD1.
641 TCR
= T0SZ
| TCR_TG0_4KB
| TCR_TG1_4KB
| TCR_EPD1
;
643 // Set the Physical Address Size using MaxAddress
644 if (MaxAddress
< SIZE_4GB
) {
646 } else if (MaxAddress
< SIZE_64GB
) {
648 } else if (MaxAddress
< SIZE_1TB
) {
650 } else if (MaxAddress
< SIZE_4TB
) {
652 } else if (MaxAddress
< SIZE_16TB
) {
654 } else if (MaxAddress
< SIZE_256TB
) {
655 TCR
|= TCR_IPS_256TB
;
657 DEBUG ((EFI_D_ERROR
, "ArmConfigureMmu: The MaxAddress 0x%lX is not supported by this MMU configuration.\n", MaxAddress
));
658 ASSERT (0); // Bigger than 48-bit memory space are not supported
659 return EFI_UNSUPPORTED
;
662 ASSERT (0); // UEFI is only expected to run at EL2 and EL1, not EL3.
663 return EFI_UNSUPPORTED
;
667 // Translation table walks are always cache coherent on ARMv8-A, so cache
668 // maintenance on page tables is never needed. Since there is a risk of
669 // loss of coherency when using mismatched attributes, and given that memory
670 // is mapped cacheable except for extraordinary cases (such as non-coherent
671 // DMA), have the page table walker perform cached accesses as well, and
672 // assert below that that matches the attributes we use for CPU accesses to
675 TCR
|= TCR_SH_INNER_SHAREABLE
|
676 TCR_RGN_OUTER_WRITE_BACK_ALLOC
|
677 TCR_RGN_INNER_WRITE_BACK_ALLOC
;
682 // Allocate pages for translation table
683 TranslationTable
= AllocatePages (1);
684 if (TranslationTable
== NULL
) {
685 return EFI_OUT_OF_RESOURCES
;
687 // We set TTBR0 just after allocating the table to retrieve its location from the subsequent
688 // functions without needing to pass this value across the functions. The MMU is only enabled
689 // after the translation tables are populated.
690 ArmSetTTBR0 (TranslationTable
);
692 if (TranslationTableBase
!= NULL
) {
693 *TranslationTableBase
= TranslationTable
;
696 if (TranslationTableSize
!= NULL
) {
697 *TranslationTableSize
= RootTableEntryCount
* sizeof(UINT64
);
700 ZeroMem (TranslationTable
, RootTableEntryCount
* sizeof(UINT64
));
702 // Disable MMU and caches. ArmDisableMmu() also invalidates the TLBs
704 ArmDisableDataCache ();
705 ArmDisableInstructionCache ();
707 // Make sure nothing sneaked into the cache
708 ArmCleanInvalidateDataCache ();
709 ArmInvalidateInstructionCache ();
711 TranslationTableAttribute
= TT_ATTR_INDX_INVALID
;
712 while (MemoryTable
->Length
!= 0) {
715 // Find the memory attribute for the Translation Table
716 if ((UINTN
)TranslationTable
>= MemoryTable
->PhysicalBase
&&
717 (UINTN
)TranslationTable
+ EFI_PAGE_SIZE
<= MemoryTable
->PhysicalBase
+
718 MemoryTable
->Length
) {
719 TranslationTableAttribute
= MemoryTable
->Attributes
;
723 Status
= FillTranslationTable (TranslationTable
, MemoryTable
);
724 if (EFI_ERROR (Status
)) {
725 goto FREE_TRANSLATION_TABLE
;
730 ASSERT (TranslationTableAttribute
== ARM_MEMORY_REGION_ATTRIBUTE_WRITE_BACK
||
731 TranslationTableAttribute
== ARM_MEMORY_REGION_ATTRIBUTE_NONSECURE_WRITE_BACK
);
733 ArmSetMAIR (MAIR_ATTR(TT_ATTR_INDX_DEVICE_MEMORY
, MAIR_ATTR_DEVICE_MEMORY
) | // mapped to EFI_MEMORY_UC
734 MAIR_ATTR(TT_ATTR_INDX_MEMORY_NON_CACHEABLE
, MAIR_ATTR_NORMAL_MEMORY_NON_CACHEABLE
) | // mapped to EFI_MEMORY_WC
735 MAIR_ATTR(TT_ATTR_INDX_MEMORY_WRITE_THROUGH
, MAIR_ATTR_NORMAL_MEMORY_WRITE_THROUGH
) | // mapped to EFI_MEMORY_WT
736 MAIR_ATTR(TT_ATTR_INDX_MEMORY_WRITE_BACK
, MAIR_ATTR_NORMAL_MEMORY_WRITE_BACK
)); // mapped to EFI_MEMORY_WB
738 ArmDisableAlignmentCheck ();
739 ArmEnableStackAlignmentCheck ();
740 ArmEnableInstructionCache ();
741 ArmEnableDataCache ();
746 FREE_TRANSLATION_TABLE
:
747 FreePages (TranslationTable
, 1);
753 ArmMmuBaseLibConstructor (
757 extern UINT32 ArmReplaceLiveTranslationEntrySize
;
760 // The ArmReplaceLiveTranslationEntry () helper function may be invoked
761 // with the MMU off so we have to ensure that it gets cleaned to the PoC
763 WriteBackDataCacheRange (ArmReplaceLiveTranslationEntry
,
764 ArmReplaceLiveTranslationEntrySize
);
766 return RETURN_SUCCESS
;