2 * File managing the MMU for ARMv8 architecture
4 * Copyright (c) 2011-2014, ARM Limited. All rights reserved.
5 * Copyright (c) 2016, Linaro Limited. All rights reserved.
6 * Copyright (c) 2017, Intel Corporation. All rights reserved.<BR>
8 * This program and the accompanying materials
9 * are licensed and made available under the terms and conditions of the BSD License
10 * which accompanies this distribution. The full text of the license may be found at
11 * http://opensource.org/licenses/bsd-license.php
13 * THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS,
14 * WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED.
19 #include <Chipset/AArch64.h>
20 #include <Library/BaseMemoryLib.h>
21 #include <Library/CacheMaintenanceLib.h>
22 #include <Library/MemoryAllocationLib.h>
23 #include <Library/ArmLib.h>
24 #include <Library/ArmMmuLib.h>
25 #include <Library/BaseLib.h>
26 #include <Library/DebugLib.h>
28 // We use this index definition to define an invalid block entry
29 #define TT_ATTR_INDX_INVALID ((UINT32)~0)
33 ArmMemoryAttributeToPageAttribute (
34 IN ARM_MEMORY_REGION_ATTRIBUTES Attributes
38 case ARM_MEMORY_REGION_ATTRIBUTE_WRITE_BACK
:
39 case ARM_MEMORY_REGION_ATTRIBUTE_NONSECURE_WRITE_BACK
:
40 return TT_ATTR_INDX_MEMORY_WRITE_BACK
| TT_SH_INNER_SHAREABLE
;
42 case ARM_MEMORY_REGION_ATTRIBUTE_WRITE_THROUGH
:
43 case ARM_MEMORY_REGION_ATTRIBUTE_NONSECURE_WRITE_THROUGH
:
44 return TT_ATTR_INDX_MEMORY_WRITE_THROUGH
| TT_SH_INNER_SHAREABLE
;
46 // Uncached and device mappings are treated as outer shareable by default,
47 case ARM_MEMORY_REGION_ATTRIBUTE_UNCACHED_UNBUFFERED
:
48 case ARM_MEMORY_REGION_ATTRIBUTE_NONSECURE_UNCACHED_UNBUFFERED
:
49 return TT_ATTR_INDX_MEMORY_NON_CACHEABLE
;
53 case ARM_MEMORY_REGION_ATTRIBUTE_DEVICE
:
54 case ARM_MEMORY_REGION_ATTRIBUTE_NONSECURE_DEVICE
:
55 if (ArmReadCurrentEL () == AARCH64_EL2
)
56 return TT_ATTR_INDX_DEVICE_MEMORY
| TT_XN_MASK
;
58 return TT_ATTR_INDX_DEVICE_MEMORY
| TT_UXN_MASK
| TT_PXN_MASK
;
63 PageAttributeToGcdAttribute (
64 IN UINT64 PageAttributes
69 switch (PageAttributes
& TT_ATTR_INDX_MASK
) {
70 case TT_ATTR_INDX_DEVICE_MEMORY
:
71 GcdAttributes
= EFI_MEMORY_UC
;
73 case TT_ATTR_INDX_MEMORY_NON_CACHEABLE
:
74 GcdAttributes
= EFI_MEMORY_WC
;
76 case TT_ATTR_INDX_MEMORY_WRITE_THROUGH
:
77 GcdAttributes
= EFI_MEMORY_WT
;
79 case TT_ATTR_INDX_MEMORY_WRITE_BACK
:
80 GcdAttributes
= EFI_MEMORY_WB
;
83 DEBUG ((EFI_D_ERROR
, "PageAttributeToGcdAttribute: PageAttributes:0x%lX not supported.\n", PageAttributes
));
85 // The Global Coherency Domain (GCD) value is defined as a bit set.
86 // Returning 0 means no attribute has been set.
90 // Determine protection attributes
91 if (((PageAttributes
& TT_AP_MASK
) == TT_AP_NO_RO
) || ((PageAttributes
& TT_AP_MASK
) == TT_AP_RO_RO
)) {
92 // Read only cases map to write-protect
93 GcdAttributes
|= EFI_MEMORY_RO
;
96 // Process eXecute Never attribute
97 if ((PageAttributes
& (TT_PXN_MASK
| TT_UXN_MASK
)) != 0 ) {
98 GcdAttributes
|= EFI_MEMORY_XP
;
101 return GcdAttributes
;
105 #define BITS_PER_LEVEL 9
108 GetRootTranslationTableInfo (
110 OUT UINTN
*TableLevel
,
111 OUT UINTN
*TableEntryCount
114 // Get the level of the root table
116 *TableLevel
= (T0SZ
- MIN_T0SZ
) / BITS_PER_LEVEL
;
119 if (TableEntryCount
) {
120 *TableEntryCount
= 1UL << (BITS_PER_LEVEL
- (T0SZ
- MIN_T0SZ
) % BITS_PER_LEVEL
);
131 if (!ArmMmuEnabled ()) {
134 ArmReplaceLiveTranslationEntry (Entry
, Value
);
140 LookupAddresstoRootTable (
141 IN UINT64 MaxAddress
,
143 OUT UINTN
*TableEntryCount
148 // Check the parameters are not NULL
149 ASSERT ((T0SZ
!= NULL
) && (TableEntryCount
!= NULL
));
151 // Look for the highest bit set in MaxAddress
152 for (TopBit
= 63; TopBit
!= 0; TopBit
--) {
153 if ((1ULL << TopBit
) & MaxAddress
) {
154 // MaxAddress top bit is found
159 ASSERT (TopBit
!= 0);
161 // Calculate T0SZ from the top bit of the MaxAddress
164 // Get the Table info from T0SZ
165 GetRootTranslationTableInfo (*T0SZ
, NULL
, TableEntryCount
);
170 GetBlockEntryListFromAddress (
171 IN UINT64
*RootTable
,
172 IN UINT64 RegionStart
,
173 OUT UINTN
*TableLevel
,
174 IN OUT UINT64
*BlockEntrySize
,
175 OUT UINT64
**LastBlockEntry
178 UINTN RootTableLevel
;
179 UINTN RootTableEntryCount
;
180 UINT64
*TranslationTable
;
182 UINT64
*SubTableBlockEntry
;
183 UINT64 BlockEntryAddress
;
184 UINTN BaseAddressAlignment
;
190 UINT64 TableAttributes
;
192 // Initialize variable
195 // Ensure the parameters are valid
196 if (!(TableLevel
&& BlockEntrySize
&& LastBlockEntry
)) {
197 ASSERT_EFI_ERROR (EFI_INVALID_PARAMETER
);
201 // Ensure the Region is aligned on 4KB boundary
202 if ((RegionStart
& (SIZE_4KB
- 1)) != 0) {
203 ASSERT_EFI_ERROR (EFI_INVALID_PARAMETER
);
207 // Ensure the required size is aligned on 4KB boundary and not 0
208 if ((*BlockEntrySize
& (SIZE_4KB
- 1)) != 0 || *BlockEntrySize
== 0) {
209 ASSERT_EFI_ERROR (EFI_INVALID_PARAMETER
);
213 T0SZ
= ArmGetTCR () & TCR_T0SZ_MASK
;
214 // Get the Table info from T0SZ
215 GetRootTranslationTableInfo (T0SZ
, &RootTableLevel
, &RootTableEntryCount
);
217 // If the start address is 0x0 then we use the size of the region to identify the alignment
218 if (RegionStart
== 0) {
219 // Identify the highest possible alignment for the Region Size
220 BaseAddressAlignment
= LowBitSet64 (*BlockEntrySize
);
222 // Identify the highest possible alignment for the Base Address
223 BaseAddressAlignment
= LowBitSet64 (RegionStart
);
226 // Identify the Page Level the RegionStart must belong to. Note that PageLevel
227 // should be at least 1 since block translations are not supported at level 0
228 PageLevel
= MAX (3 - ((BaseAddressAlignment
- 12) / 9), 1);
230 // If the required size is smaller than the current block size then we need to go to the page below.
231 // The PageLevel was calculated on the Base Address alignment but did not take in account the alignment
232 // of the allocation size
233 while (*BlockEntrySize
< TT_BLOCK_ENTRY_SIZE_AT_LEVEL (PageLevel
)) {
234 // It does not fit so we need to go a page level above
239 // Get the Table Descriptor for the corresponding PageLevel. We need to decompose RegionStart to get appropriate entries
242 TranslationTable
= RootTable
;
243 for (IndexLevel
= RootTableLevel
; IndexLevel
<= PageLevel
; IndexLevel
++) {
244 BlockEntry
= (UINT64
*)TT_GET_ENTRY_FOR_ADDRESS (TranslationTable
, IndexLevel
, RegionStart
);
246 if ((IndexLevel
!= 3) && ((*BlockEntry
& TT_TYPE_MASK
) == TT_TYPE_TABLE_ENTRY
)) {
247 // Go to the next table
248 TranslationTable
= (UINT64
*)(*BlockEntry
& TT_ADDRESS_MASK_DESCRIPTION_TABLE
);
250 // If we are at the last level then update the last level to next level
251 if (IndexLevel
== PageLevel
) {
252 // Enter the next level
255 } else if ((*BlockEntry
& TT_TYPE_MASK
) == TT_TYPE_BLOCK_ENTRY
) {
256 // If we are not at the last level then we need to split this BlockEntry
257 if (IndexLevel
!= PageLevel
) {
258 // Retrieve the attributes from the block entry
259 Attributes
= *BlockEntry
& TT_ATTRIBUTES_MASK
;
261 // Convert the block entry attributes into Table descriptor attributes
262 TableAttributes
= TT_TABLE_AP_NO_PERMISSION
;
263 if (Attributes
& TT_NS
) {
264 TableAttributes
= TT_TABLE_NS
;
267 // Get the address corresponding at this entry
268 BlockEntryAddress
= RegionStart
;
269 BlockEntryAddress
= BlockEntryAddress
>> TT_ADDRESS_OFFSET_AT_LEVEL(IndexLevel
);
270 // Shift back to right to set zero before the effective address
271 BlockEntryAddress
= BlockEntryAddress
<< TT_ADDRESS_OFFSET_AT_LEVEL(IndexLevel
);
273 // Set the correct entry type for the next page level
274 if ((IndexLevel
+ 1) == 3) {
275 Attributes
|= TT_TYPE_BLOCK_ENTRY_LEVEL3
;
277 Attributes
|= TT_TYPE_BLOCK_ENTRY
;
280 // Create a new translation table
281 TranslationTable
= AllocatePages (1);
282 if (TranslationTable
== NULL
) {
286 // Populate the newly created lower level table
287 SubTableBlockEntry
= TranslationTable
;
288 for (Index
= 0; Index
< TT_ENTRY_COUNT
; Index
++) {
289 *SubTableBlockEntry
= Attributes
| (BlockEntryAddress
+ (Index
<< TT_ADDRESS_OFFSET_AT_LEVEL(IndexLevel
+ 1)));
290 SubTableBlockEntry
++;
293 // Fill the BlockEntry with the new TranslationTable
294 ReplaceLiveEntry (BlockEntry
,
295 ((UINTN
)TranslationTable
& TT_ADDRESS_MASK_DESCRIPTION_TABLE
) | TableAttributes
| TT_TYPE_TABLE_ENTRY
);
298 if (IndexLevel
!= PageLevel
) {
300 // Case when we have an Invalid Entry and we are at a page level above of the one targetted.
303 // Create a new translation table
304 TranslationTable
= AllocatePages (1);
305 if (TranslationTable
== NULL
) {
309 ZeroMem (TranslationTable
, TT_ENTRY_COUNT
* sizeof(UINT64
));
311 // Fill the new BlockEntry with the TranslationTable
312 *BlockEntry
= ((UINTN
)TranslationTable
& TT_ADDRESS_MASK_DESCRIPTION_TABLE
) | TT_TYPE_TABLE_ENTRY
;
317 // Expose the found PageLevel to the caller
318 *TableLevel
= PageLevel
;
320 // Now, we have the Table Level we can get the Block Size associated to this table
321 *BlockEntrySize
= TT_BLOCK_ENTRY_SIZE_AT_LEVEL (PageLevel
);
323 // The last block of the root table depends on the number of entry in this table,
324 // otherwise it is always the (TT_ENTRY_COUNT - 1)th entry in the table.
325 *LastBlockEntry
= TT_LAST_BLOCK_ADDRESS(TranslationTable
,
326 (PageLevel
== RootTableLevel
) ? RootTableEntryCount
: TT_ENTRY_COUNT
);
333 UpdateRegionMapping (
334 IN UINT64
*RootTable
,
335 IN UINT64 RegionStart
,
336 IN UINT64 RegionLength
,
337 IN UINT64 Attributes
,
338 IN UINT64 BlockEntryMask
343 UINT64
*LastBlockEntry
;
344 UINT64 BlockEntrySize
;
347 // Ensure the Length is aligned on 4KB boundary
348 if ((RegionLength
== 0) || ((RegionLength
& (SIZE_4KB
- 1)) != 0)) {
349 ASSERT_EFI_ERROR (EFI_INVALID_PARAMETER
);
350 return EFI_INVALID_PARAMETER
;
354 // Get the first Block Entry that matches the Virtual Address and also the information on the Table Descriptor
355 // such as the the size of the Block Entry and the address of the last BlockEntry of the Table Descriptor
356 BlockEntrySize
= RegionLength
;
357 BlockEntry
= GetBlockEntryListFromAddress (RootTable
, RegionStart
, &TableLevel
, &BlockEntrySize
, &LastBlockEntry
);
358 if (BlockEntry
== NULL
) {
359 // GetBlockEntryListFromAddress() return NULL when it fails to allocate new pages from the Translation Tables
360 return EFI_OUT_OF_RESOURCES
;
363 if (TableLevel
!= 3) {
364 Type
= TT_TYPE_BLOCK_ENTRY
;
366 Type
= TT_TYPE_BLOCK_ENTRY_LEVEL3
;
370 // Fill the Block Entry with attribute and output block address
371 *BlockEntry
&= BlockEntryMask
;
372 *BlockEntry
|= (RegionStart
& TT_ADDRESS_MASK_BLOCK_ENTRY
) | Attributes
| Type
;
374 // Go to the next BlockEntry
375 RegionStart
+= BlockEntrySize
;
376 RegionLength
-= BlockEntrySize
;
379 // Break the inner loop when next block is a table
380 // Rerun GetBlockEntryListFromAddress to avoid page table memory leak
381 if (TableLevel
!= 3 &&
382 (*BlockEntry
& TT_TYPE_MASK
) == TT_TYPE_TABLE_ENTRY
) {
385 } while ((RegionLength
>= BlockEntrySize
) && (BlockEntry
<= LastBlockEntry
));
386 } while (RegionLength
!= 0);
393 FillTranslationTable (
394 IN UINT64
*RootTable
,
395 IN ARM_MEMORY_REGION_DESCRIPTOR
*MemoryRegion
398 return UpdateRegionMapping (
400 MemoryRegion
->VirtualBase
,
401 MemoryRegion
->Length
,
402 ArmMemoryAttributeToPageAttribute (MemoryRegion
->Attributes
) | TT_AF
,
409 GcdAttributeToPageAttribute (
410 IN UINT64 GcdAttributes
413 UINT64 PageAttributes
;
415 switch (GcdAttributes
& EFI_MEMORY_CACHETYPE_MASK
) {
417 PageAttributes
= TT_ATTR_INDX_DEVICE_MEMORY
;
420 PageAttributes
= TT_ATTR_INDX_MEMORY_NON_CACHEABLE
;
423 PageAttributes
= TT_ATTR_INDX_MEMORY_WRITE_THROUGH
| TT_SH_INNER_SHAREABLE
;
426 PageAttributes
= TT_ATTR_INDX_MEMORY_WRITE_BACK
| TT_SH_INNER_SHAREABLE
;
429 PageAttributes
= TT_ATTR_INDX_MASK
;
433 if ((GcdAttributes
& EFI_MEMORY_XP
) != 0 ||
434 (GcdAttributes
& EFI_MEMORY_CACHETYPE_MASK
) == EFI_MEMORY_UC
) {
435 if (ArmReadCurrentEL () == AARCH64_EL2
) {
436 PageAttributes
|= TT_XN_MASK
;
438 PageAttributes
|= TT_UXN_MASK
| TT_PXN_MASK
;
442 if ((GcdAttributes
& EFI_MEMORY_RO
) != 0) {
443 PageAttributes
|= TT_AP_RO_RO
;
446 return PageAttributes
| TT_AF
;
450 SetMemoryAttributes (
451 IN EFI_PHYSICAL_ADDRESS BaseAddress
,
453 IN UINT64 Attributes
,
454 IN EFI_PHYSICAL_ADDRESS VirtualMask
458 UINT64
*TranslationTable
;
459 UINT64 PageAttributes
;
460 UINT64 PageAttributeMask
;
462 PageAttributes
= GcdAttributeToPageAttribute (Attributes
);
463 PageAttributeMask
= 0;
465 if ((Attributes
& EFI_MEMORY_CACHETYPE_MASK
) == 0) {
467 // No memory type was set in Attributes, so we are going to update the
470 PageAttributes
&= TT_AP_MASK
| TT_UXN_MASK
| TT_PXN_MASK
;
471 PageAttributeMask
= ~(TT_ADDRESS_MASK_BLOCK_ENTRY
| TT_AP_MASK
|
472 TT_PXN_MASK
| TT_XN_MASK
);
475 TranslationTable
= ArmGetTTBR0BaseAddress ();
477 Status
= UpdateRegionMapping (
483 if (EFI_ERROR (Status
)) {
487 // Invalidate all TLB entries so changes are synced
495 SetMemoryRegionAttribute (
496 IN EFI_PHYSICAL_ADDRESS BaseAddress
,
498 IN UINT64 Attributes
,
499 IN UINT64 BlockEntryMask
505 RootTable
= ArmGetTTBR0BaseAddress ();
507 Status
= UpdateRegionMapping (RootTable
, BaseAddress
, Length
, Attributes
, BlockEntryMask
);
508 if (EFI_ERROR (Status
)) {
512 // Invalidate all TLB entries so changes are synced
519 ArmSetMemoryRegionNoExec (
520 IN EFI_PHYSICAL_ADDRESS BaseAddress
,
526 if (ArmReadCurrentEL () == AARCH64_EL1
) {
527 Val
= TT_PXN_MASK
| TT_UXN_MASK
;
532 return SetMemoryRegionAttribute (
536 ~TT_ADDRESS_MASK_BLOCK_ENTRY
);
540 ArmClearMemoryRegionNoExec (
541 IN EFI_PHYSICAL_ADDRESS BaseAddress
,
547 // XN maps to UXN in the EL1&0 translation regime
548 Mask
= ~(TT_ADDRESS_MASK_BLOCK_ENTRY
| TT_PXN_MASK
| TT_XN_MASK
);
550 return SetMemoryRegionAttribute (
558 ArmSetMemoryRegionReadOnly (
559 IN EFI_PHYSICAL_ADDRESS BaseAddress
,
563 return SetMemoryRegionAttribute (
567 ~TT_ADDRESS_MASK_BLOCK_ENTRY
);
571 ArmClearMemoryRegionReadOnly (
572 IN EFI_PHYSICAL_ADDRESS BaseAddress
,
576 return SetMemoryRegionAttribute (
580 ~(TT_ADDRESS_MASK_BLOCK_ENTRY
| TT_AP_MASK
));
586 IN ARM_MEMORY_REGION_DESCRIPTOR
*MemoryTable
,
587 OUT VOID
**TranslationTableBase OPTIONAL
,
588 OUT UINTN
*TranslationTableSize OPTIONAL
591 VOID
* TranslationTable
;
592 UINT32 TranslationTableAttribute
;
595 UINTN RootTableEntryCount
;
599 if(MemoryTable
== NULL
) {
600 ASSERT (MemoryTable
!= NULL
);
601 return EFI_INVALID_PARAMETER
;
604 // Cover the entire GCD memory space
605 MaxAddress
= (1UL << PcdGet8 (PcdPrePiCpuMemorySize
)) - 1;
607 // Lookup the Table Level to get the information
608 LookupAddresstoRootTable (MaxAddress
, &T0SZ
, &RootTableEntryCount
);
611 // Set TCR that allows us to retrieve T0SZ in the subsequent functions
613 // Ideally we will be running at EL2, but should support EL1 as well.
614 // UEFI should not run at EL3.
615 if (ArmReadCurrentEL () == AARCH64_EL2
) {
616 //Note: Bits 23 and 31 are reserved(RES1) bits in TCR_EL2
617 TCR
= T0SZ
| (1UL << 31) | (1UL << 23) | TCR_TG0_4KB
;
619 // Set the Physical Address Size using MaxAddress
620 if (MaxAddress
< SIZE_4GB
) {
622 } else if (MaxAddress
< SIZE_64GB
) {
624 } else if (MaxAddress
< SIZE_1TB
) {
626 } else if (MaxAddress
< SIZE_4TB
) {
628 } else if (MaxAddress
< SIZE_16TB
) {
630 } else if (MaxAddress
< SIZE_256TB
) {
633 DEBUG ((EFI_D_ERROR
, "ArmConfigureMmu: The MaxAddress 0x%lX is not supported by this MMU configuration.\n", MaxAddress
));
634 ASSERT (0); // Bigger than 48-bit memory space are not supported
635 return EFI_UNSUPPORTED
;
637 } else if (ArmReadCurrentEL () == AARCH64_EL1
) {
638 // Due to Cortex-A57 erratum #822227 we must set TG1[1] == 1, regardless of EPD1.
639 TCR
= T0SZ
| TCR_TG0_4KB
| TCR_TG1_4KB
| TCR_EPD1
;
641 // Set the Physical Address Size using MaxAddress
642 if (MaxAddress
< SIZE_4GB
) {
644 } else if (MaxAddress
< SIZE_64GB
) {
646 } else if (MaxAddress
< SIZE_1TB
) {
648 } else if (MaxAddress
< SIZE_4TB
) {
650 } else if (MaxAddress
< SIZE_16TB
) {
652 } else if (MaxAddress
< SIZE_256TB
) {
653 TCR
|= TCR_IPS_256TB
;
655 DEBUG ((EFI_D_ERROR
, "ArmConfigureMmu: The MaxAddress 0x%lX is not supported by this MMU configuration.\n", MaxAddress
));
656 ASSERT (0); // Bigger than 48-bit memory space are not supported
657 return EFI_UNSUPPORTED
;
660 ASSERT (0); // UEFI is only expected to run at EL2 and EL1, not EL3.
661 return EFI_UNSUPPORTED
;
665 // Translation table walks are always cache coherent on ARMv8-A, so cache
666 // maintenance on page tables is never needed. Since there is a risk of
667 // loss of coherency when using mismatched attributes, and given that memory
668 // is mapped cacheable except for extraordinary cases (such as non-coherent
669 // DMA), have the page table walker perform cached accesses as well, and
670 // assert below that that matches the attributes we use for CPU accesses to
673 TCR
|= TCR_SH_INNER_SHAREABLE
|
674 TCR_RGN_OUTER_WRITE_BACK_ALLOC
|
675 TCR_RGN_INNER_WRITE_BACK_ALLOC
;
680 // Allocate pages for translation table
681 TranslationTable
= AllocatePages (1);
682 if (TranslationTable
== NULL
) {
683 return EFI_OUT_OF_RESOURCES
;
685 // We set TTBR0 just after allocating the table to retrieve its location from the subsequent
686 // functions without needing to pass this value across the functions. The MMU is only enabled
687 // after the translation tables are populated.
688 ArmSetTTBR0 (TranslationTable
);
690 if (TranslationTableBase
!= NULL
) {
691 *TranslationTableBase
= TranslationTable
;
694 if (TranslationTableSize
!= NULL
) {
695 *TranslationTableSize
= RootTableEntryCount
* sizeof(UINT64
);
698 ZeroMem (TranslationTable
, RootTableEntryCount
* sizeof(UINT64
));
700 // Disable MMU and caches. ArmDisableMmu() also invalidates the TLBs
702 ArmDisableDataCache ();
703 ArmDisableInstructionCache ();
705 // Make sure nothing sneaked into the cache
706 ArmCleanInvalidateDataCache ();
707 ArmInvalidateInstructionCache ();
709 TranslationTableAttribute
= TT_ATTR_INDX_INVALID
;
710 while (MemoryTable
->Length
!= 0) {
713 // Find the memory attribute for the Translation Table
714 if ((UINTN
)TranslationTable
>= MemoryTable
->PhysicalBase
&&
715 (UINTN
)TranslationTable
+ EFI_PAGE_SIZE
<= MemoryTable
->PhysicalBase
+
716 MemoryTable
->Length
) {
717 TranslationTableAttribute
= MemoryTable
->Attributes
;
721 Status
= FillTranslationTable (TranslationTable
, MemoryTable
);
722 if (EFI_ERROR (Status
)) {
723 goto FREE_TRANSLATION_TABLE
;
728 ASSERT (TranslationTableAttribute
== ARM_MEMORY_REGION_ATTRIBUTE_WRITE_BACK
||
729 TranslationTableAttribute
== ARM_MEMORY_REGION_ATTRIBUTE_NONSECURE_WRITE_BACK
);
731 ArmSetMAIR (MAIR_ATTR(TT_ATTR_INDX_DEVICE_MEMORY
, MAIR_ATTR_DEVICE_MEMORY
) | // mapped to EFI_MEMORY_UC
732 MAIR_ATTR(TT_ATTR_INDX_MEMORY_NON_CACHEABLE
, MAIR_ATTR_NORMAL_MEMORY_NON_CACHEABLE
) | // mapped to EFI_MEMORY_WC
733 MAIR_ATTR(TT_ATTR_INDX_MEMORY_WRITE_THROUGH
, MAIR_ATTR_NORMAL_MEMORY_WRITE_THROUGH
) | // mapped to EFI_MEMORY_WT
734 MAIR_ATTR(TT_ATTR_INDX_MEMORY_WRITE_BACK
, MAIR_ATTR_NORMAL_MEMORY_WRITE_BACK
)); // mapped to EFI_MEMORY_WB
736 ArmDisableAlignmentCheck ();
737 ArmEnableStackAlignmentCheck ();
738 ArmEnableInstructionCache ();
739 ArmEnableDataCache ();
744 FREE_TRANSLATION_TABLE
:
745 FreePages (TranslationTable
, 1);
751 ArmMmuBaseLibConstructor (
755 extern UINT32 ArmReplaceLiveTranslationEntrySize
;
758 // The ArmReplaceLiveTranslationEntry () helper function may be invoked
759 // with the MMU off so we have to ensure that it gets cleaned to the PoC
761 WriteBackDataCacheRange (ArmReplaceLiveTranslationEntry
,
762 ArmReplaceLiveTranslationEntrySize
);
764 return RETURN_SUCCESS
;