2 * File managing the MMU for ARMv8 architecture
4 * Copyright (c) 2011-2014, ARM Limited. All rights reserved.
5 * Copyright (c) 2016, Linaro Limited. All rights reserved.
6 * Copyright (c) 2017, Intel Corporation. All rights reserved.<BR>
8 * This program and the accompanying materials
9 * are licensed and made available under the terms and conditions of the BSD License
10 * which accompanies this distribution. The full text of the license may be found at
11 * http://opensource.org/licenses/bsd-license.php
13 * THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS,
14 * WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED.
19 #include <Chipset/AArch64.h>
20 #include <Library/BaseMemoryLib.h>
21 #include <Library/CacheMaintenanceLib.h>
22 #include <Library/MemoryAllocationLib.h>
23 #include <Library/ArmLib.h>
24 #include <Library/ArmMmuLib.h>
25 #include <Library/BaseLib.h>
26 #include <Library/DebugLib.h>
28 // We use this index definition to define an invalid block entry
29 #define TT_ATTR_INDX_INVALID ((UINT32)~0)
33 ArmMemoryAttributeToPageAttribute (
34 IN ARM_MEMORY_REGION_ATTRIBUTES Attributes
38 case ARM_MEMORY_REGION_ATTRIBUTE_WRITE_BACK_NONSHAREABLE
:
39 case ARM_MEMORY_REGION_ATTRIBUTE_NONSECURE_WRITE_BACK_NONSHAREABLE
:
40 return TT_ATTR_INDX_MEMORY_WRITE_BACK
;
42 case ARM_MEMORY_REGION_ATTRIBUTE_WRITE_BACK
:
43 case ARM_MEMORY_REGION_ATTRIBUTE_NONSECURE_WRITE_BACK
:
44 return TT_ATTR_INDX_MEMORY_WRITE_BACK
| TT_SH_INNER_SHAREABLE
;
46 case ARM_MEMORY_REGION_ATTRIBUTE_WRITE_THROUGH
:
47 case ARM_MEMORY_REGION_ATTRIBUTE_NONSECURE_WRITE_THROUGH
:
48 return TT_ATTR_INDX_MEMORY_WRITE_THROUGH
| TT_SH_INNER_SHAREABLE
;
50 // Uncached and device mappings are treated as outer shareable by default,
51 case ARM_MEMORY_REGION_ATTRIBUTE_UNCACHED_UNBUFFERED
:
52 case ARM_MEMORY_REGION_ATTRIBUTE_NONSECURE_UNCACHED_UNBUFFERED
:
53 return TT_ATTR_INDX_MEMORY_NON_CACHEABLE
;
57 case ARM_MEMORY_REGION_ATTRIBUTE_DEVICE
:
58 case ARM_MEMORY_REGION_ATTRIBUTE_NONSECURE_DEVICE
:
59 if (ArmReadCurrentEL () == AARCH64_EL2
)
60 return TT_ATTR_INDX_DEVICE_MEMORY
| TT_XN_MASK
;
62 return TT_ATTR_INDX_DEVICE_MEMORY
| TT_UXN_MASK
| TT_PXN_MASK
;
67 PageAttributeToGcdAttribute (
68 IN UINT64 PageAttributes
73 switch (PageAttributes
& TT_ATTR_INDX_MASK
) {
74 case TT_ATTR_INDX_DEVICE_MEMORY
:
75 GcdAttributes
= EFI_MEMORY_UC
;
77 case TT_ATTR_INDX_MEMORY_NON_CACHEABLE
:
78 GcdAttributes
= EFI_MEMORY_WC
;
80 case TT_ATTR_INDX_MEMORY_WRITE_THROUGH
:
81 GcdAttributes
= EFI_MEMORY_WT
;
83 case TT_ATTR_INDX_MEMORY_WRITE_BACK
:
84 GcdAttributes
= EFI_MEMORY_WB
;
87 DEBUG ((EFI_D_ERROR
, "PageAttributeToGcdAttribute: PageAttributes:0x%lX not supported.\n", PageAttributes
));
89 // The Global Coherency Domain (GCD) value is defined as a bit set.
90 // Returning 0 means no attribute has been set.
94 // Determine protection attributes
95 if (((PageAttributes
& TT_AP_MASK
) == TT_AP_NO_RO
) || ((PageAttributes
& TT_AP_MASK
) == TT_AP_RO_RO
)) {
96 // Read only cases map to write-protect
97 GcdAttributes
|= EFI_MEMORY_RO
;
100 // Process eXecute Never attribute
101 if ((PageAttributes
& (TT_PXN_MASK
| TT_UXN_MASK
)) != 0 ) {
102 GcdAttributes
|= EFI_MEMORY_XP
;
105 return GcdAttributes
;
109 #define BITS_PER_LEVEL 9
112 GetRootTranslationTableInfo (
114 OUT UINTN
*TableLevel
,
115 OUT UINTN
*TableEntryCount
118 // Get the level of the root table
120 *TableLevel
= (T0SZ
- MIN_T0SZ
) / BITS_PER_LEVEL
;
123 if (TableEntryCount
) {
124 *TableEntryCount
= 1UL << (BITS_PER_LEVEL
- (T0SZ
- MIN_T0SZ
) % BITS_PER_LEVEL
);
135 if (!ArmMmuEnabled ()) {
138 ArmReplaceLiveTranslationEntry (Entry
, Value
);
144 LookupAddresstoRootTable (
145 IN UINT64 MaxAddress
,
147 OUT UINTN
*TableEntryCount
152 // Check the parameters are not NULL
153 ASSERT ((T0SZ
!= NULL
) && (TableEntryCount
!= NULL
));
155 // Look for the highest bit set in MaxAddress
156 for (TopBit
= 63; TopBit
!= 0; TopBit
--) {
157 if ((1ULL << TopBit
) & MaxAddress
) {
158 // MaxAddress top bit is found
163 ASSERT (TopBit
!= 0);
165 // Calculate T0SZ from the top bit of the MaxAddress
168 // Get the Table info from T0SZ
169 GetRootTranslationTableInfo (*T0SZ
, NULL
, TableEntryCount
);
174 GetBlockEntryListFromAddress (
175 IN UINT64
*RootTable
,
176 IN UINT64 RegionStart
,
177 OUT UINTN
*TableLevel
,
178 IN OUT UINT64
*BlockEntrySize
,
179 OUT UINT64
**LastBlockEntry
182 UINTN RootTableLevel
;
183 UINTN RootTableEntryCount
;
184 UINT64
*TranslationTable
;
186 UINT64
*SubTableBlockEntry
;
187 UINT64 BlockEntryAddress
;
188 UINTN BaseAddressAlignment
;
194 UINT64 TableAttributes
;
196 // Initialize variable
199 // Ensure the parameters are valid
200 if (!(TableLevel
&& BlockEntrySize
&& LastBlockEntry
)) {
201 ASSERT_EFI_ERROR (EFI_INVALID_PARAMETER
);
205 // Ensure the Region is aligned on 4KB boundary
206 if ((RegionStart
& (SIZE_4KB
- 1)) != 0) {
207 ASSERT_EFI_ERROR (EFI_INVALID_PARAMETER
);
211 // Ensure the required size is aligned on 4KB boundary and not 0
212 if ((*BlockEntrySize
& (SIZE_4KB
- 1)) != 0 || *BlockEntrySize
== 0) {
213 ASSERT_EFI_ERROR (EFI_INVALID_PARAMETER
);
217 T0SZ
= ArmGetTCR () & TCR_T0SZ_MASK
;
218 // Get the Table info from T0SZ
219 GetRootTranslationTableInfo (T0SZ
, &RootTableLevel
, &RootTableEntryCount
);
221 // If the start address is 0x0 then we use the size of the region to identify the alignment
222 if (RegionStart
== 0) {
223 // Identify the highest possible alignment for the Region Size
224 BaseAddressAlignment
= LowBitSet64 (*BlockEntrySize
);
226 // Identify the highest possible alignment for the Base Address
227 BaseAddressAlignment
= LowBitSet64 (RegionStart
);
230 // Identify the Page Level the RegionStart must belong to. Note that PageLevel
231 // should be at least 1 since block translations are not supported at level 0
232 PageLevel
= MAX (3 - ((BaseAddressAlignment
- 12) / 9), 1);
234 // If the required size is smaller than the current block size then we need to go to the page below.
235 // The PageLevel was calculated on the Base Address alignment but did not take in account the alignment
236 // of the allocation size
237 while (*BlockEntrySize
< TT_BLOCK_ENTRY_SIZE_AT_LEVEL (PageLevel
)) {
238 // It does not fit so we need to go a page level above
243 // Get the Table Descriptor for the corresponding PageLevel. We need to decompose RegionStart to get appropriate entries
246 TranslationTable
= RootTable
;
247 for (IndexLevel
= RootTableLevel
; IndexLevel
<= PageLevel
; IndexLevel
++) {
248 BlockEntry
= (UINT64
*)TT_GET_ENTRY_FOR_ADDRESS (TranslationTable
, IndexLevel
, RegionStart
);
250 if ((IndexLevel
!= 3) && ((*BlockEntry
& TT_TYPE_MASK
) == TT_TYPE_TABLE_ENTRY
)) {
251 // Go to the next table
252 TranslationTable
= (UINT64
*)(*BlockEntry
& TT_ADDRESS_MASK_DESCRIPTION_TABLE
);
254 // If we are at the last level then update the last level to next level
255 if (IndexLevel
== PageLevel
) {
256 // Enter the next level
259 } else if ((*BlockEntry
& TT_TYPE_MASK
) == TT_TYPE_BLOCK_ENTRY
) {
260 // If we are not at the last level then we need to split this BlockEntry
261 if (IndexLevel
!= PageLevel
) {
262 // Retrieve the attributes from the block entry
263 Attributes
= *BlockEntry
& TT_ATTRIBUTES_MASK
;
265 // Convert the block entry attributes into Table descriptor attributes
266 TableAttributes
= TT_TABLE_AP_NO_PERMISSION
;
267 if (Attributes
& TT_NS
) {
268 TableAttributes
= TT_TABLE_NS
;
271 // Get the address corresponding at this entry
272 BlockEntryAddress
= RegionStart
;
273 BlockEntryAddress
= BlockEntryAddress
>> TT_ADDRESS_OFFSET_AT_LEVEL(IndexLevel
);
274 // Shift back to right to set zero before the effective address
275 BlockEntryAddress
= BlockEntryAddress
<< TT_ADDRESS_OFFSET_AT_LEVEL(IndexLevel
);
277 // Set the correct entry type for the next page level
278 if ((IndexLevel
+ 1) == 3) {
279 Attributes
|= TT_TYPE_BLOCK_ENTRY_LEVEL3
;
281 Attributes
|= TT_TYPE_BLOCK_ENTRY
;
284 // Create a new translation table
285 TranslationTable
= AllocatePages (1);
286 if (TranslationTable
== NULL
) {
290 // Populate the newly created lower level table
291 SubTableBlockEntry
= TranslationTable
;
292 for (Index
= 0; Index
< TT_ENTRY_COUNT
; Index
++) {
293 *SubTableBlockEntry
= Attributes
| (BlockEntryAddress
+ (Index
<< TT_ADDRESS_OFFSET_AT_LEVEL(IndexLevel
+ 1)));
294 SubTableBlockEntry
++;
297 // Fill the BlockEntry with the new TranslationTable
298 ReplaceLiveEntry (BlockEntry
,
299 ((UINTN
)TranslationTable
& TT_ADDRESS_MASK_DESCRIPTION_TABLE
) | TableAttributes
| TT_TYPE_TABLE_ENTRY
);
302 if (IndexLevel
!= PageLevel
) {
304 // Case when we have an Invalid Entry and we are at a page level above of the one targetted.
307 // Create a new translation table
308 TranslationTable
= AllocatePages (1);
309 if (TranslationTable
== NULL
) {
313 ZeroMem (TranslationTable
, TT_ENTRY_COUNT
* sizeof(UINT64
));
315 // Fill the new BlockEntry with the TranslationTable
316 *BlockEntry
= ((UINTN
)TranslationTable
& TT_ADDRESS_MASK_DESCRIPTION_TABLE
) | TT_TYPE_TABLE_ENTRY
;
321 // Expose the found PageLevel to the caller
322 *TableLevel
= PageLevel
;
324 // Now, we have the Table Level we can get the Block Size associated to this table
325 *BlockEntrySize
= TT_BLOCK_ENTRY_SIZE_AT_LEVEL (PageLevel
);
327 // The last block of the root table depends on the number of entry in this table,
328 // otherwise it is always the (TT_ENTRY_COUNT - 1)th entry in the table.
329 *LastBlockEntry
= TT_LAST_BLOCK_ADDRESS(TranslationTable
,
330 (PageLevel
== RootTableLevel
) ? RootTableEntryCount
: TT_ENTRY_COUNT
);
337 UpdateRegionMapping (
338 IN UINT64
*RootTable
,
339 IN UINT64 RegionStart
,
340 IN UINT64 RegionLength
,
341 IN UINT64 Attributes
,
342 IN UINT64 BlockEntryMask
347 UINT64
*LastBlockEntry
;
348 UINT64 BlockEntrySize
;
351 // Ensure the Length is aligned on 4KB boundary
352 if ((RegionLength
== 0) || ((RegionLength
& (SIZE_4KB
- 1)) != 0)) {
353 ASSERT_EFI_ERROR (EFI_INVALID_PARAMETER
);
354 return EFI_INVALID_PARAMETER
;
358 // Get the first Block Entry that matches the Virtual Address and also the information on the Table Descriptor
359 // such as the the size of the Block Entry and the address of the last BlockEntry of the Table Descriptor
360 BlockEntrySize
= RegionLength
;
361 BlockEntry
= GetBlockEntryListFromAddress (RootTable
, RegionStart
, &TableLevel
, &BlockEntrySize
, &LastBlockEntry
);
362 if (BlockEntry
== NULL
) {
363 // GetBlockEntryListFromAddress() return NULL when it fails to allocate new pages from the Translation Tables
364 return EFI_OUT_OF_RESOURCES
;
367 if (TableLevel
!= 3) {
368 Type
= TT_TYPE_BLOCK_ENTRY
;
370 Type
= TT_TYPE_BLOCK_ENTRY_LEVEL3
;
374 // Fill the Block Entry with attribute and output block address
375 *BlockEntry
&= BlockEntryMask
;
376 *BlockEntry
|= (RegionStart
& TT_ADDRESS_MASK_BLOCK_ENTRY
) | Attributes
| Type
;
378 // Go to the next BlockEntry
379 RegionStart
+= BlockEntrySize
;
380 RegionLength
-= BlockEntrySize
;
383 // Break the inner loop when next block is a table
384 // Rerun GetBlockEntryListFromAddress to avoid page table memory leak
385 if (TableLevel
!= 3 &&
386 (*BlockEntry
& TT_TYPE_MASK
) == TT_TYPE_TABLE_ENTRY
) {
389 } while ((RegionLength
>= BlockEntrySize
) && (BlockEntry
<= LastBlockEntry
));
390 } while (RegionLength
!= 0);
397 FillTranslationTable (
398 IN UINT64
*RootTable
,
399 IN ARM_MEMORY_REGION_DESCRIPTOR
*MemoryRegion
402 return UpdateRegionMapping (
404 MemoryRegion
->VirtualBase
,
405 MemoryRegion
->Length
,
406 ArmMemoryAttributeToPageAttribute (MemoryRegion
->Attributes
) | TT_AF
,
413 GcdAttributeToPageAttribute (
414 IN UINT64 GcdAttributes
417 UINT64 PageAttributes
;
419 switch (GcdAttributes
& EFI_MEMORY_CACHETYPE_MASK
) {
421 PageAttributes
= TT_ATTR_INDX_DEVICE_MEMORY
;
424 PageAttributes
= TT_ATTR_INDX_MEMORY_NON_CACHEABLE
;
427 PageAttributes
= TT_ATTR_INDX_MEMORY_WRITE_THROUGH
| TT_SH_INNER_SHAREABLE
;
430 PageAttributes
= TT_ATTR_INDX_MEMORY_WRITE_BACK
| TT_SH_INNER_SHAREABLE
;
433 PageAttributes
= TT_ATTR_INDX_MASK
;
437 if ((GcdAttributes
& EFI_MEMORY_XP
) != 0 ||
438 (GcdAttributes
& EFI_MEMORY_CACHETYPE_MASK
) == EFI_MEMORY_UC
) {
439 if (ArmReadCurrentEL () == AARCH64_EL2
) {
440 PageAttributes
|= TT_XN_MASK
;
442 PageAttributes
|= TT_UXN_MASK
| TT_PXN_MASK
;
446 if ((GcdAttributes
& EFI_MEMORY_RO
) != 0) {
447 PageAttributes
|= TT_AP_RO_RO
;
450 return PageAttributes
| TT_AF
;
454 ArmSetMemoryAttributes (
455 IN EFI_PHYSICAL_ADDRESS BaseAddress
,
461 UINT64
*TranslationTable
;
462 UINT64 PageAttributes
;
463 UINT64 PageAttributeMask
;
465 PageAttributes
= GcdAttributeToPageAttribute (Attributes
);
466 PageAttributeMask
= 0;
468 if ((Attributes
& EFI_MEMORY_CACHETYPE_MASK
) == 0) {
470 // No memory type was set in Attributes, so we are going to update the
473 PageAttributes
&= TT_AP_MASK
| TT_UXN_MASK
| TT_PXN_MASK
;
474 PageAttributeMask
= ~(TT_ADDRESS_MASK_BLOCK_ENTRY
| TT_AP_MASK
|
475 TT_PXN_MASK
| TT_XN_MASK
);
478 TranslationTable
= ArmGetTTBR0BaseAddress ();
480 Status
= UpdateRegionMapping (
486 if (EFI_ERROR (Status
)) {
490 // Invalidate all TLB entries so changes are synced
498 SetMemoryRegionAttribute (
499 IN EFI_PHYSICAL_ADDRESS BaseAddress
,
501 IN UINT64 Attributes
,
502 IN UINT64 BlockEntryMask
508 RootTable
= ArmGetTTBR0BaseAddress ();
510 Status
= UpdateRegionMapping (RootTable
, BaseAddress
, Length
, Attributes
, BlockEntryMask
);
511 if (EFI_ERROR (Status
)) {
515 // Invalidate all TLB entries so changes are synced
522 ArmSetMemoryRegionNoExec (
523 IN EFI_PHYSICAL_ADDRESS BaseAddress
,
529 if (ArmReadCurrentEL () == AARCH64_EL1
) {
530 Val
= TT_PXN_MASK
| TT_UXN_MASK
;
535 return SetMemoryRegionAttribute (
539 ~TT_ADDRESS_MASK_BLOCK_ENTRY
);
543 ArmClearMemoryRegionNoExec (
544 IN EFI_PHYSICAL_ADDRESS BaseAddress
,
550 // XN maps to UXN in the EL1&0 translation regime
551 Mask
= ~(TT_ADDRESS_MASK_BLOCK_ENTRY
| TT_PXN_MASK
| TT_XN_MASK
);
553 return SetMemoryRegionAttribute (
561 ArmSetMemoryRegionReadOnly (
562 IN EFI_PHYSICAL_ADDRESS BaseAddress
,
566 return SetMemoryRegionAttribute (
570 ~TT_ADDRESS_MASK_BLOCK_ENTRY
);
574 ArmClearMemoryRegionReadOnly (
575 IN EFI_PHYSICAL_ADDRESS BaseAddress
,
579 return SetMemoryRegionAttribute (
583 ~(TT_ADDRESS_MASK_BLOCK_ENTRY
| TT_AP_MASK
));
589 IN ARM_MEMORY_REGION_DESCRIPTOR
*MemoryTable
,
590 OUT VOID
**TranslationTableBase OPTIONAL
,
591 OUT UINTN
*TranslationTableSize OPTIONAL
594 VOID
* TranslationTable
;
595 UINT32 TranslationTableAttribute
;
598 UINTN RootTableEntryCount
;
602 if(MemoryTable
== NULL
) {
603 ASSERT (MemoryTable
!= NULL
);
604 return EFI_INVALID_PARAMETER
;
607 // Cover the entire GCD memory space
608 MaxAddress
= (1UL << PcdGet8 (PcdPrePiCpuMemorySize
)) - 1;
610 // Lookup the Table Level to get the information
611 LookupAddresstoRootTable (MaxAddress
, &T0SZ
, &RootTableEntryCount
);
614 // Set TCR that allows us to retrieve T0SZ in the subsequent functions
616 // Ideally we will be running at EL2, but should support EL1 as well.
617 // UEFI should not run at EL3.
618 if (ArmReadCurrentEL () == AARCH64_EL2
) {
619 //Note: Bits 23 and 31 are reserved(RES1) bits in TCR_EL2
620 TCR
= T0SZ
| (1UL << 31) | (1UL << 23) | TCR_TG0_4KB
;
622 // Set the Physical Address Size using MaxAddress
623 if (MaxAddress
< SIZE_4GB
) {
625 } else if (MaxAddress
< SIZE_64GB
) {
627 } else if (MaxAddress
< SIZE_1TB
) {
629 } else if (MaxAddress
< SIZE_4TB
) {
631 } else if (MaxAddress
< SIZE_16TB
) {
633 } else if (MaxAddress
< SIZE_256TB
) {
636 DEBUG ((EFI_D_ERROR
, "ArmConfigureMmu: The MaxAddress 0x%lX is not supported by this MMU configuration.\n", MaxAddress
));
637 ASSERT (0); // Bigger than 48-bit memory space are not supported
638 return EFI_UNSUPPORTED
;
640 } else if (ArmReadCurrentEL () == AARCH64_EL1
) {
641 // Due to Cortex-A57 erratum #822227 we must set TG1[1] == 1, regardless of EPD1.
642 TCR
= T0SZ
| TCR_TG0_4KB
| TCR_TG1_4KB
| TCR_EPD1
;
644 // Set the Physical Address Size using MaxAddress
645 if (MaxAddress
< SIZE_4GB
) {
647 } else if (MaxAddress
< SIZE_64GB
) {
649 } else if (MaxAddress
< SIZE_1TB
) {
651 } else if (MaxAddress
< SIZE_4TB
) {
653 } else if (MaxAddress
< SIZE_16TB
) {
655 } else if (MaxAddress
< SIZE_256TB
) {
656 TCR
|= TCR_IPS_256TB
;
658 DEBUG ((EFI_D_ERROR
, "ArmConfigureMmu: The MaxAddress 0x%lX is not supported by this MMU configuration.\n", MaxAddress
));
659 ASSERT (0); // Bigger than 48-bit memory space are not supported
660 return EFI_UNSUPPORTED
;
663 ASSERT (0); // UEFI is only expected to run at EL2 and EL1, not EL3.
664 return EFI_UNSUPPORTED
;
668 // Translation table walks are always cache coherent on ARMv8-A, so cache
669 // maintenance on page tables is never needed. Since there is a risk of
670 // loss of coherency when using mismatched attributes, and given that memory
671 // is mapped cacheable except for extraordinary cases (such as non-coherent
672 // DMA), have the page table walker perform cached accesses as well, and
673 // assert below that that matches the attributes we use for CPU accesses to
676 TCR
|= TCR_SH_INNER_SHAREABLE
|
677 TCR_RGN_OUTER_WRITE_BACK_ALLOC
|
678 TCR_RGN_INNER_WRITE_BACK_ALLOC
;
683 // Allocate pages for translation table
684 TranslationTable
= AllocatePages (1);
685 if (TranslationTable
== NULL
) {
686 return EFI_OUT_OF_RESOURCES
;
688 // We set TTBR0 just after allocating the table to retrieve its location from the subsequent
689 // functions without needing to pass this value across the functions. The MMU is only enabled
690 // after the translation tables are populated.
691 ArmSetTTBR0 (TranslationTable
);
693 if (TranslationTableBase
!= NULL
) {
694 *TranslationTableBase
= TranslationTable
;
697 if (TranslationTableSize
!= NULL
) {
698 *TranslationTableSize
= RootTableEntryCount
* sizeof(UINT64
);
701 ZeroMem (TranslationTable
, RootTableEntryCount
* sizeof(UINT64
));
703 // Disable MMU and caches. ArmDisableMmu() also invalidates the TLBs
705 ArmDisableDataCache ();
706 ArmDisableInstructionCache ();
708 // Make sure nothing sneaked into the cache
709 ArmCleanInvalidateDataCache ();
710 ArmInvalidateInstructionCache ();
712 TranslationTableAttribute
= TT_ATTR_INDX_INVALID
;
713 while (MemoryTable
->Length
!= 0) {
716 // Find the memory attribute for the Translation Table
717 if ((UINTN
)TranslationTable
>= MemoryTable
->PhysicalBase
&&
718 (UINTN
)TranslationTable
+ EFI_PAGE_SIZE
<= MemoryTable
->PhysicalBase
+
719 MemoryTable
->Length
) {
720 TranslationTableAttribute
= MemoryTable
->Attributes
;
724 Status
= FillTranslationTable (TranslationTable
, MemoryTable
);
725 if (EFI_ERROR (Status
)) {
726 goto FREE_TRANSLATION_TABLE
;
731 ASSERT (TranslationTableAttribute
== ARM_MEMORY_REGION_ATTRIBUTE_WRITE_BACK
||
732 TranslationTableAttribute
== ARM_MEMORY_REGION_ATTRIBUTE_NONSECURE_WRITE_BACK
);
734 ArmSetMAIR (MAIR_ATTR(TT_ATTR_INDX_DEVICE_MEMORY
, MAIR_ATTR_DEVICE_MEMORY
) | // mapped to EFI_MEMORY_UC
735 MAIR_ATTR(TT_ATTR_INDX_MEMORY_NON_CACHEABLE
, MAIR_ATTR_NORMAL_MEMORY_NON_CACHEABLE
) | // mapped to EFI_MEMORY_WC
736 MAIR_ATTR(TT_ATTR_INDX_MEMORY_WRITE_THROUGH
, MAIR_ATTR_NORMAL_MEMORY_WRITE_THROUGH
) | // mapped to EFI_MEMORY_WT
737 MAIR_ATTR(TT_ATTR_INDX_MEMORY_WRITE_BACK
, MAIR_ATTR_NORMAL_MEMORY_WRITE_BACK
)); // mapped to EFI_MEMORY_WB
739 ArmDisableAlignmentCheck ();
740 ArmEnableStackAlignmentCheck ();
741 ArmEnableInstructionCache ();
742 ArmEnableDataCache ();
747 FREE_TRANSLATION_TABLE
:
748 FreePages (TranslationTable
, 1);
754 ArmMmuBaseLibConstructor (
758 extern UINT32 ArmReplaceLiveTranslationEntrySize
;
761 // The ArmReplaceLiveTranslationEntry () helper function may be invoked
762 // with the MMU off so we have to ensure that it gets cleaned to the PoC
764 WriteBackDataCacheRange (ArmReplaceLiveTranslationEntry
,
765 ArmReplaceLiveTranslationEntrySize
);
767 return RETURN_SUCCESS
;