2 * File managing the MMU for ARMv8 architecture
4 * Copyright (c) 2011-2014, ARM Limited. All rights reserved.
5 * Copyright (c) 2016, Linaro Limited. All rights reserved.
6 * Copyright (c) 2017, Intel Corporation. All rights reserved.<BR>
8 * This program and the accompanying materials
9 * are licensed and made available under the terms and conditions of the BSD License
10 * which accompanies this distribution. The full text of the license may be found at
11 * http://opensource.org/licenses/bsd-license.php
13 * THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS,
14 * WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED.
19 #include <Chipset/AArch64.h>
20 #include <Library/BaseMemoryLib.h>
21 #include <Library/CacheMaintenanceLib.h>
22 #include <Library/MemoryAllocationLib.h>
23 #include <Library/ArmLib.h>
24 #include <Library/ArmMmuLib.h>
25 #include <Library/BaseLib.h>
26 #include <Library/DebugLib.h>
28 // We use this index definition to define an invalid block entry
29 #define TT_ATTR_INDX_INVALID ((UINT32)~0)
33 ArmMemoryAttributeToPageAttribute (
34 IN ARM_MEMORY_REGION_ATTRIBUTES Attributes
38 case ARM_MEMORY_REGION_ATTRIBUTE_WRITE_BACK
:
39 case ARM_MEMORY_REGION_ATTRIBUTE_NONSECURE_WRITE_BACK
:
40 return TT_ATTR_INDX_MEMORY_WRITE_BACK
| TT_SH_INNER_SHAREABLE
;
42 case ARM_MEMORY_REGION_ATTRIBUTE_WRITE_THROUGH
:
43 case ARM_MEMORY_REGION_ATTRIBUTE_NONSECURE_WRITE_THROUGH
:
44 return TT_ATTR_INDX_MEMORY_WRITE_THROUGH
| TT_SH_INNER_SHAREABLE
;
46 // Uncached and device mappings are treated as outer shareable by default,
47 case ARM_MEMORY_REGION_ATTRIBUTE_UNCACHED_UNBUFFERED
:
48 case ARM_MEMORY_REGION_ATTRIBUTE_NONSECURE_UNCACHED_UNBUFFERED
:
49 return TT_ATTR_INDX_MEMORY_NON_CACHEABLE
;
53 case ARM_MEMORY_REGION_ATTRIBUTE_DEVICE
:
54 case ARM_MEMORY_REGION_ATTRIBUTE_NONSECURE_DEVICE
:
55 if (ArmReadCurrentEL () == AARCH64_EL2
)
56 return TT_ATTR_INDX_DEVICE_MEMORY
| TT_XN_MASK
;
58 return TT_ATTR_INDX_DEVICE_MEMORY
| TT_UXN_MASK
| TT_PXN_MASK
;
63 PageAttributeToGcdAttribute (
64 IN UINT64 PageAttributes
69 switch (PageAttributes
& TT_ATTR_INDX_MASK
) {
70 case TT_ATTR_INDX_DEVICE_MEMORY
:
71 GcdAttributes
= EFI_MEMORY_UC
;
73 case TT_ATTR_INDX_MEMORY_NON_CACHEABLE
:
74 GcdAttributes
= EFI_MEMORY_WC
;
76 case TT_ATTR_INDX_MEMORY_WRITE_THROUGH
:
77 GcdAttributes
= EFI_MEMORY_WT
;
79 case TT_ATTR_INDX_MEMORY_WRITE_BACK
:
80 GcdAttributes
= EFI_MEMORY_WB
;
83 DEBUG ((EFI_D_ERROR
, "PageAttributeToGcdAttribute: PageAttributes:0x%lX not supported.\n", PageAttributes
));
85 // The Global Coherency Domain (GCD) value is defined as a bit set.
86 // Returning 0 means no attribute has been set.
90 // Determine protection attributes
91 if (((PageAttributes
& TT_AP_MASK
) == TT_AP_NO_RO
) || ((PageAttributes
& TT_AP_MASK
) == TT_AP_RO_RO
)) {
92 // Read only cases map to write-protect
93 GcdAttributes
|= EFI_MEMORY_RO
;
96 // Process eXecute Never attribute
97 if ((PageAttributes
& (TT_PXN_MASK
| TT_UXN_MASK
)) != 0 ) {
98 GcdAttributes
|= EFI_MEMORY_XP
;
101 return GcdAttributes
;
104 ARM_MEMORY_REGION_ATTRIBUTES
105 GcdAttributeToArmAttribute (
106 IN UINT64 GcdAttributes
109 switch (GcdAttributes
& 0xFF) {
111 return ARM_MEMORY_REGION_ATTRIBUTE_DEVICE
;
113 return ARM_MEMORY_REGION_ATTRIBUTE_UNCACHED_UNBUFFERED
;
115 return ARM_MEMORY_REGION_ATTRIBUTE_WRITE_THROUGH
;
117 return ARM_MEMORY_REGION_ATTRIBUTE_WRITE_BACK
;
119 DEBUG ((EFI_D_ERROR
, "GcdAttributeToArmAttribute: 0x%lX attributes is not supported.\n", GcdAttributes
));
121 return ARM_MEMORY_REGION_ATTRIBUTE_DEVICE
;
126 #define BITS_PER_LEVEL 9
129 GetRootTranslationTableInfo (
131 OUT UINTN
*TableLevel
,
132 OUT UINTN
*TableEntryCount
135 // Get the level of the root table
137 *TableLevel
= (T0SZ
- MIN_T0SZ
) / BITS_PER_LEVEL
;
140 if (TableEntryCount
) {
141 *TableEntryCount
= 1UL << (BITS_PER_LEVEL
- (T0SZ
- MIN_T0SZ
) % BITS_PER_LEVEL
);
152 if (!ArmMmuEnabled ()) {
155 ArmReplaceLiveTranslationEntry (Entry
, Value
);
161 LookupAddresstoRootTable (
162 IN UINT64 MaxAddress
,
164 OUT UINTN
*TableEntryCount
169 // Check the parameters are not NULL
170 ASSERT ((T0SZ
!= NULL
) && (TableEntryCount
!= NULL
));
172 // Look for the highest bit set in MaxAddress
173 for (TopBit
= 63; TopBit
!= 0; TopBit
--) {
174 if ((1ULL << TopBit
) & MaxAddress
) {
175 // MaxAddress top bit is found
180 ASSERT (TopBit
!= 0);
182 // Calculate T0SZ from the top bit of the MaxAddress
185 // Get the Table info from T0SZ
186 GetRootTranslationTableInfo (*T0SZ
, NULL
, TableEntryCount
);
191 GetBlockEntryListFromAddress (
192 IN UINT64
*RootTable
,
193 IN UINT64 RegionStart
,
194 OUT UINTN
*TableLevel
,
195 IN OUT UINT64
*BlockEntrySize
,
196 OUT UINT64
**LastBlockEntry
199 UINTN RootTableLevel
;
200 UINTN RootTableEntryCount
;
201 UINT64
*TranslationTable
;
203 UINT64
*SubTableBlockEntry
;
204 UINT64 BlockEntryAddress
;
205 UINTN BaseAddressAlignment
;
211 UINT64 TableAttributes
;
213 // Initialize variable
216 // Ensure the parameters are valid
217 if (!(TableLevel
&& BlockEntrySize
&& LastBlockEntry
)) {
218 ASSERT_EFI_ERROR (EFI_INVALID_PARAMETER
);
222 // Ensure the Region is aligned on 4KB boundary
223 if ((RegionStart
& (SIZE_4KB
- 1)) != 0) {
224 ASSERT_EFI_ERROR (EFI_INVALID_PARAMETER
);
228 // Ensure the required size is aligned on 4KB boundary and not 0
229 if ((*BlockEntrySize
& (SIZE_4KB
- 1)) != 0 || *BlockEntrySize
== 0) {
230 ASSERT_EFI_ERROR (EFI_INVALID_PARAMETER
);
234 T0SZ
= ArmGetTCR () & TCR_T0SZ_MASK
;
235 // Get the Table info from T0SZ
236 GetRootTranslationTableInfo (T0SZ
, &RootTableLevel
, &RootTableEntryCount
);
238 // If the start address is 0x0 then we use the size of the region to identify the alignment
239 if (RegionStart
== 0) {
240 // Identify the highest possible alignment for the Region Size
241 BaseAddressAlignment
= LowBitSet64 (*BlockEntrySize
);
243 // Identify the highest possible alignment for the Base Address
244 BaseAddressAlignment
= LowBitSet64 (RegionStart
);
247 // Identify the Page Level the RegionStart must belong to. Note that PageLevel
248 // should be at least 1 since block translations are not supported at level 0
249 PageLevel
= MAX (3 - ((BaseAddressAlignment
- 12) / 9), 1);
251 // If the required size is smaller than the current block size then we need to go to the page below.
252 // The PageLevel was calculated on the Base Address alignment but did not take in account the alignment
253 // of the allocation size
254 while (*BlockEntrySize
< TT_BLOCK_ENTRY_SIZE_AT_LEVEL (PageLevel
)) {
255 // It does not fit so we need to go a page level above
260 // Get the Table Descriptor for the corresponding PageLevel. We need to decompose RegionStart to get appropriate entries
263 TranslationTable
= RootTable
;
264 for (IndexLevel
= RootTableLevel
; IndexLevel
<= PageLevel
; IndexLevel
++) {
265 BlockEntry
= (UINT64
*)TT_GET_ENTRY_FOR_ADDRESS (TranslationTable
, IndexLevel
, RegionStart
);
267 if ((IndexLevel
!= 3) && ((*BlockEntry
& TT_TYPE_MASK
) == TT_TYPE_TABLE_ENTRY
)) {
268 // Go to the next table
269 TranslationTable
= (UINT64
*)(*BlockEntry
& TT_ADDRESS_MASK_DESCRIPTION_TABLE
);
271 // If we are at the last level then update the last level to next level
272 if (IndexLevel
== PageLevel
) {
273 // Enter the next level
276 } else if ((*BlockEntry
& TT_TYPE_MASK
) == TT_TYPE_BLOCK_ENTRY
) {
277 // If we are not at the last level then we need to split this BlockEntry
278 if (IndexLevel
!= PageLevel
) {
279 // Retrieve the attributes from the block entry
280 Attributes
= *BlockEntry
& TT_ATTRIBUTES_MASK
;
282 // Convert the block entry attributes into Table descriptor attributes
283 TableAttributes
= TT_TABLE_AP_NO_PERMISSION
;
284 if (Attributes
& TT_NS
) {
285 TableAttributes
= TT_TABLE_NS
;
288 // Get the address corresponding at this entry
289 BlockEntryAddress
= RegionStart
;
290 BlockEntryAddress
= BlockEntryAddress
>> TT_ADDRESS_OFFSET_AT_LEVEL(IndexLevel
);
291 // Shift back to right to set zero before the effective address
292 BlockEntryAddress
= BlockEntryAddress
<< TT_ADDRESS_OFFSET_AT_LEVEL(IndexLevel
);
294 // Set the correct entry type for the next page level
295 if ((IndexLevel
+ 1) == 3) {
296 Attributes
|= TT_TYPE_BLOCK_ENTRY_LEVEL3
;
298 Attributes
|= TT_TYPE_BLOCK_ENTRY
;
301 // Create a new translation table
302 TranslationTable
= AllocatePages (1);
303 if (TranslationTable
== NULL
) {
307 // Populate the newly created lower level table
308 SubTableBlockEntry
= TranslationTable
;
309 for (Index
= 0; Index
< TT_ENTRY_COUNT
; Index
++) {
310 *SubTableBlockEntry
= Attributes
| (BlockEntryAddress
+ (Index
<< TT_ADDRESS_OFFSET_AT_LEVEL(IndexLevel
+ 1)));
311 SubTableBlockEntry
++;
314 // Fill the BlockEntry with the new TranslationTable
315 ReplaceLiveEntry (BlockEntry
,
316 ((UINTN
)TranslationTable
& TT_ADDRESS_MASK_DESCRIPTION_TABLE
) | TableAttributes
| TT_TYPE_TABLE_ENTRY
);
319 if (IndexLevel
!= PageLevel
) {
321 // Case when we have an Invalid Entry and we are at a page level above of the one targetted.
324 // Create a new translation table
325 TranslationTable
= AllocatePages (1);
326 if (TranslationTable
== NULL
) {
330 ZeroMem (TranslationTable
, TT_ENTRY_COUNT
* sizeof(UINT64
));
332 // Fill the new BlockEntry with the TranslationTable
333 *BlockEntry
= ((UINTN
)TranslationTable
& TT_ADDRESS_MASK_DESCRIPTION_TABLE
) | TT_TYPE_TABLE_ENTRY
;
338 // Expose the found PageLevel to the caller
339 *TableLevel
= PageLevel
;
341 // Now, we have the Table Level we can get the Block Size associated to this table
342 *BlockEntrySize
= TT_BLOCK_ENTRY_SIZE_AT_LEVEL (PageLevel
);
344 // The last block of the root table depends on the number of entry in this table,
345 // otherwise it is always the (TT_ENTRY_COUNT - 1)th entry in the table.
346 *LastBlockEntry
= TT_LAST_BLOCK_ADDRESS(TranslationTable
,
347 (PageLevel
== RootTableLevel
) ? RootTableEntryCount
: TT_ENTRY_COUNT
);
354 UpdateRegionMapping (
355 IN UINT64
*RootTable
,
356 IN UINT64 RegionStart
,
357 IN UINT64 RegionLength
,
358 IN UINT64 Attributes
,
359 IN UINT64 BlockEntryMask
364 UINT64
*LastBlockEntry
;
365 UINT64 BlockEntrySize
;
368 // Ensure the Length is aligned on 4KB boundary
369 if ((RegionLength
== 0) || ((RegionLength
& (SIZE_4KB
- 1)) != 0)) {
370 ASSERT_EFI_ERROR (EFI_INVALID_PARAMETER
);
371 return RETURN_INVALID_PARAMETER
;
375 // Get the first Block Entry that matches the Virtual Address and also the information on the Table Descriptor
376 // such as the the size of the Block Entry and the address of the last BlockEntry of the Table Descriptor
377 BlockEntrySize
= RegionLength
;
378 BlockEntry
= GetBlockEntryListFromAddress (RootTable
, RegionStart
, &TableLevel
, &BlockEntrySize
, &LastBlockEntry
);
379 if (BlockEntry
== NULL
) {
380 // GetBlockEntryListFromAddress() return NULL when it fails to allocate new pages from the Translation Tables
381 return RETURN_OUT_OF_RESOURCES
;
384 if (TableLevel
!= 3) {
385 Type
= TT_TYPE_BLOCK_ENTRY
;
387 Type
= TT_TYPE_BLOCK_ENTRY_LEVEL3
;
391 // Fill the Block Entry with attribute and output block address
392 *BlockEntry
&= BlockEntryMask
;
393 *BlockEntry
|= (RegionStart
& TT_ADDRESS_MASK_BLOCK_ENTRY
) | Attributes
| Type
;
395 // Go to the next BlockEntry
396 RegionStart
+= BlockEntrySize
;
397 RegionLength
-= BlockEntrySize
;
400 // Break the inner loop when next block is a table
401 // Rerun GetBlockEntryListFromAddress to avoid page table memory leak
402 if (TableLevel
!= 3 &&
403 (*BlockEntry
& TT_TYPE_MASK
) == TT_TYPE_TABLE_ENTRY
) {
406 } while ((RegionLength
>= BlockEntrySize
) && (BlockEntry
<= LastBlockEntry
));
407 } while (RegionLength
!= 0);
409 return RETURN_SUCCESS
;
414 FillTranslationTable (
415 IN UINT64
*RootTable
,
416 IN ARM_MEMORY_REGION_DESCRIPTOR
*MemoryRegion
419 return UpdateRegionMapping (
421 MemoryRegion
->VirtualBase
,
422 MemoryRegion
->Length
,
423 ArmMemoryAttributeToPageAttribute (MemoryRegion
->Attributes
) | TT_AF
,
429 SetMemoryAttributes (
430 IN EFI_PHYSICAL_ADDRESS BaseAddress
,
432 IN UINT64 Attributes
,
433 IN EFI_PHYSICAL_ADDRESS VirtualMask
436 RETURN_STATUS Status
;
437 ARM_MEMORY_REGION_DESCRIPTOR MemoryRegion
;
438 UINT64
*TranslationTable
;
440 MemoryRegion
.PhysicalBase
= BaseAddress
;
441 MemoryRegion
.VirtualBase
= BaseAddress
;
442 MemoryRegion
.Length
= Length
;
443 MemoryRegion
.Attributes
= GcdAttributeToArmAttribute (Attributes
);
445 TranslationTable
= ArmGetTTBR0BaseAddress ();
447 Status
= FillTranslationTable (TranslationTable
, &MemoryRegion
);
448 if (RETURN_ERROR (Status
)) {
452 // Invalidate all TLB entries so changes are synced
455 return RETURN_SUCCESS
;
460 SetMemoryRegionAttribute (
461 IN EFI_PHYSICAL_ADDRESS BaseAddress
,
463 IN UINT64 Attributes
,
464 IN UINT64 BlockEntryMask
467 RETURN_STATUS Status
;
470 RootTable
= ArmGetTTBR0BaseAddress ();
472 Status
= UpdateRegionMapping (RootTable
, BaseAddress
, Length
, Attributes
, BlockEntryMask
);
473 if (RETURN_ERROR (Status
)) {
477 // Invalidate all TLB entries so changes are synced
480 return RETURN_SUCCESS
;
484 ArmSetMemoryRegionNoExec (
485 IN EFI_PHYSICAL_ADDRESS BaseAddress
,
491 if (ArmReadCurrentEL () == AARCH64_EL1
) {
492 Val
= TT_PXN_MASK
| TT_UXN_MASK
;
497 return SetMemoryRegionAttribute (
501 ~TT_ADDRESS_MASK_BLOCK_ENTRY
);
505 ArmClearMemoryRegionNoExec (
506 IN EFI_PHYSICAL_ADDRESS BaseAddress
,
512 // XN maps to UXN in the EL1&0 translation regime
513 Mask
= ~(TT_ADDRESS_MASK_BLOCK_ENTRY
| TT_PXN_MASK
| TT_XN_MASK
);
515 return SetMemoryRegionAttribute (
523 ArmSetMemoryRegionReadOnly (
524 IN EFI_PHYSICAL_ADDRESS BaseAddress
,
528 return SetMemoryRegionAttribute (
532 ~TT_ADDRESS_MASK_BLOCK_ENTRY
);
536 ArmClearMemoryRegionReadOnly (
537 IN EFI_PHYSICAL_ADDRESS BaseAddress
,
541 return SetMemoryRegionAttribute (
545 ~(TT_ADDRESS_MASK_BLOCK_ENTRY
| TT_AP_MASK
));
551 IN ARM_MEMORY_REGION_DESCRIPTOR
*MemoryTable
,
552 OUT VOID
**TranslationTableBase OPTIONAL
,
553 OUT UINTN
*TranslationTableSize OPTIONAL
556 VOID
* TranslationTable
;
557 UINT32 TranslationTableAttribute
;
560 UINTN RootTableEntryCount
;
562 RETURN_STATUS Status
;
564 if(MemoryTable
== NULL
) {
565 ASSERT (MemoryTable
!= NULL
);
566 return RETURN_INVALID_PARAMETER
;
569 // Cover the entire GCD memory space
570 MaxAddress
= (1UL << PcdGet8 (PcdPrePiCpuMemorySize
)) - 1;
572 // Lookup the Table Level to get the information
573 LookupAddresstoRootTable (MaxAddress
, &T0SZ
, &RootTableEntryCount
);
576 // Set TCR that allows us to retrieve T0SZ in the subsequent functions
578 // Ideally we will be running at EL2, but should support EL1 as well.
579 // UEFI should not run at EL3.
580 if (ArmReadCurrentEL () == AARCH64_EL2
) {
581 //Note: Bits 23 and 31 are reserved(RES1) bits in TCR_EL2
582 TCR
= T0SZ
| (1UL << 31) | (1UL << 23) | TCR_TG0_4KB
;
584 // Set the Physical Address Size using MaxAddress
585 if (MaxAddress
< SIZE_4GB
) {
587 } else if (MaxAddress
< SIZE_64GB
) {
589 } else if (MaxAddress
< SIZE_1TB
) {
591 } else if (MaxAddress
< SIZE_4TB
) {
593 } else if (MaxAddress
< SIZE_16TB
) {
595 } else if (MaxAddress
< SIZE_256TB
) {
598 DEBUG ((EFI_D_ERROR
, "ArmConfigureMmu: The MaxAddress 0x%lX is not supported by this MMU configuration.\n", MaxAddress
));
599 ASSERT (0); // Bigger than 48-bit memory space are not supported
600 return RETURN_UNSUPPORTED
;
602 } else if (ArmReadCurrentEL () == AARCH64_EL1
) {
603 // Due to Cortex-A57 erratum #822227 we must set TG1[1] == 1, regardless of EPD1.
604 TCR
= T0SZ
| TCR_TG0_4KB
| TCR_TG1_4KB
| TCR_EPD1
;
606 // Set the Physical Address Size using MaxAddress
607 if (MaxAddress
< SIZE_4GB
) {
609 } else if (MaxAddress
< SIZE_64GB
) {
611 } else if (MaxAddress
< SIZE_1TB
) {
613 } else if (MaxAddress
< SIZE_4TB
) {
615 } else if (MaxAddress
< SIZE_16TB
) {
617 } else if (MaxAddress
< SIZE_256TB
) {
618 TCR
|= TCR_IPS_256TB
;
620 DEBUG ((EFI_D_ERROR
, "ArmConfigureMmu: The MaxAddress 0x%lX is not supported by this MMU configuration.\n", MaxAddress
));
621 ASSERT (0); // Bigger than 48-bit memory space are not supported
622 return RETURN_UNSUPPORTED
;
625 ASSERT (0); // UEFI is only expected to run at EL2 and EL1, not EL3.
626 return RETURN_UNSUPPORTED
;
630 // Translation table walks are always cache coherent on ARMv8-A, so cache
631 // maintenance on page tables is never needed. Since there is a risk of
632 // loss of coherency when using mismatched attributes, and given that memory
633 // is mapped cacheable except for extraordinary cases (such as non-coherent
634 // DMA), have the page table walker perform cached accesses as well, and
635 // assert below that that matches the attributes we use for CPU accesses to
638 TCR
|= TCR_SH_INNER_SHAREABLE
|
639 TCR_RGN_OUTER_WRITE_BACK_ALLOC
|
640 TCR_RGN_INNER_WRITE_BACK_ALLOC
;
645 // Allocate pages for translation table
646 TranslationTable
= AllocatePages (1);
647 if (TranslationTable
== NULL
) {
648 return RETURN_OUT_OF_RESOURCES
;
650 // We set TTBR0 just after allocating the table to retrieve its location from the subsequent
651 // functions without needing to pass this value across the functions. The MMU is only enabled
652 // after the translation tables are populated.
653 ArmSetTTBR0 (TranslationTable
);
655 if (TranslationTableBase
!= NULL
) {
656 *TranslationTableBase
= TranslationTable
;
659 if (TranslationTableSize
!= NULL
) {
660 *TranslationTableSize
= RootTableEntryCount
* sizeof(UINT64
);
663 ZeroMem (TranslationTable
, RootTableEntryCount
* sizeof(UINT64
));
665 // Disable MMU and caches. ArmDisableMmu() also invalidates the TLBs
667 ArmDisableDataCache ();
668 ArmDisableInstructionCache ();
670 // Make sure nothing sneaked into the cache
671 ArmCleanInvalidateDataCache ();
672 ArmInvalidateInstructionCache ();
674 TranslationTableAttribute
= TT_ATTR_INDX_INVALID
;
675 while (MemoryTable
->Length
!= 0) {
678 // Find the memory attribute for the Translation Table
679 if ((UINTN
)TranslationTable
>= MemoryTable
->PhysicalBase
&&
680 (UINTN
)TranslationTable
+ EFI_PAGE_SIZE
<= MemoryTable
->PhysicalBase
+
681 MemoryTable
->Length
) {
682 TranslationTableAttribute
= MemoryTable
->Attributes
;
686 Status
= FillTranslationTable (TranslationTable
, MemoryTable
);
687 if (RETURN_ERROR (Status
)) {
688 goto FREE_TRANSLATION_TABLE
;
693 ASSERT (TranslationTableAttribute
== ARM_MEMORY_REGION_ATTRIBUTE_WRITE_BACK
||
694 TranslationTableAttribute
== ARM_MEMORY_REGION_ATTRIBUTE_NONSECURE_WRITE_BACK
);
696 ArmSetMAIR (MAIR_ATTR(TT_ATTR_INDX_DEVICE_MEMORY
, MAIR_ATTR_DEVICE_MEMORY
) | // mapped to EFI_MEMORY_UC
697 MAIR_ATTR(TT_ATTR_INDX_MEMORY_NON_CACHEABLE
, MAIR_ATTR_NORMAL_MEMORY_NON_CACHEABLE
) | // mapped to EFI_MEMORY_WC
698 MAIR_ATTR(TT_ATTR_INDX_MEMORY_WRITE_THROUGH
, MAIR_ATTR_NORMAL_MEMORY_WRITE_THROUGH
) | // mapped to EFI_MEMORY_WT
699 MAIR_ATTR(TT_ATTR_INDX_MEMORY_WRITE_BACK
, MAIR_ATTR_NORMAL_MEMORY_WRITE_BACK
)); // mapped to EFI_MEMORY_WB
701 ArmDisableAlignmentCheck ();
702 ArmEnableInstructionCache ();
703 ArmEnableDataCache ();
706 return RETURN_SUCCESS
;
708 FREE_TRANSLATION_TABLE
:
709 FreePages (TranslationTable
, 1);
715 ArmMmuBaseLibConstructor (
719 extern UINT32 ArmReplaceLiveTranslationEntrySize
;
722 // The ArmReplaceLiveTranslationEntry () helper function may be invoked
723 // with the MMU off so we have to ensure that it gets cleaned to the PoC
725 WriteBackDataCacheRange (ArmReplaceLiveTranslationEntry
,
726 ArmReplaceLiveTranslationEntrySize
);
728 return RETURN_SUCCESS
;