2 * File managing the MMU for ARMv8 architecture
4 * Copyright (c) 2011-2020, ARM Limited. All rights reserved.
5 * Copyright (c) 2016, Linaro Limited. All rights reserved.
6 * Copyright (c) 2017, Intel Corporation. All rights reserved.<BR>
8 * SPDX-License-Identifier: BSD-2-Clause-Patent
13 #include <Chipset/AArch64.h>
14 #include <Library/BaseMemoryLib.h>
15 #include <Library/CacheMaintenanceLib.h>
16 #include <Library/MemoryAllocationLib.h>
17 #include <Library/ArmLib.h>
18 #include <Library/ArmMmuLib.h>
19 #include <Library/BaseLib.h>
20 #include <Library/DebugLib.h>
22 // We use this index definition to define an invalid block entry
23 #define TT_ATTR_INDX_INVALID ((UINT32)~0)
27 ArmMemoryAttributeToPageAttribute (
28 IN ARM_MEMORY_REGION_ATTRIBUTES Attributes
32 case ARM_MEMORY_REGION_ATTRIBUTE_WRITE_BACK_NONSHAREABLE
:
33 case ARM_MEMORY_REGION_ATTRIBUTE_NONSECURE_WRITE_BACK_NONSHAREABLE
:
34 return TT_ATTR_INDX_MEMORY_WRITE_BACK
;
36 case ARM_MEMORY_REGION_ATTRIBUTE_WRITE_BACK
:
37 case ARM_MEMORY_REGION_ATTRIBUTE_NONSECURE_WRITE_BACK
:
38 return TT_ATTR_INDX_MEMORY_WRITE_BACK
| TT_SH_INNER_SHAREABLE
;
40 case ARM_MEMORY_REGION_ATTRIBUTE_WRITE_THROUGH
:
41 case ARM_MEMORY_REGION_ATTRIBUTE_NONSECURE_WRITE_THROUGH
:
42 return TT_ATTR_INDX_MEMORY_WRITE_THROUGH
| TT_SH_INNER_SHAREABLE
;
44 // Uncached and device mappings are treated as outer shareable by default,
45 case ARM_MEMORY_REGION_ATTRIBUTE_UNCACHED_UNBUFFERED
:
46 case ARM_MEMORY_REGION_ATTRIBUTE_NONSECURE_UNCACHED_UNBUFFERED
:
47 return TT_ATTR_INDX_MEMORY_NON_CACHEABLE
;
51 case ARM_MEMORY_REGION_ATTRIBUTE_DEVICE
:
52 case ARM_MEMORY_REGION_ATTRIBUTE_NONSECURE_DEVICE
:
53 if (ArmReadCurrentEL () == AARCH64_EL2
)
54 return TT_ATTR_INDX_DEVICE_MEMORY
| TT_XN_MASK
;
56 return TT_ATTR_INDX_DEVICE_MEMORY
| TT_UXN_MASK
| TT_PXN_MASK
;
61 PageAttributeToGcdAttribute (
62 IN UINT64 PageAttributes
67 switch (PageAttributes
& TT_ATTR_INDX_MASK
) {
68 case TT_ATTR_INDX_DEVICE_MEMORY
:
69 GcdAttributes
= EFI_MEMORY_UC
;
71 case TT_ATTR_INDX_MEMORY_NON_CACHEABLE
:
72 GcdAttributes
= EFI_MEMORY_WC
;
74 case TT_ATTR_INDX_MEMORY_WRITE_THROUGH
:
75 GcdAttributes
= EFI_MEMORY_WT
;
77 case TT_ATTR_INDX_MEMORY_WRITE_BACK
:
78 GcdAttributes
= EFI_MEMORY_WB
;
81 DEBUG ((EFI_D_ERROR
, "PageAttributeToGcdAttribute: PageAttributes:0x%lX not supported.\n", PageAttributes
));
83 // The Global Coherency Domain (GCD) value is defined as a bit set.
84 // Returning 0 means no attribute has been set.
88 // Determine protection attributes
89 if (((PageAttributes
& TT_AP_MASK
) == TT_AP_NO_RO
) || ((PageAttributes
& TT_AP_MASK
) == TT_AP_RO_RO
)) {
90 // Read only cases map to write-protect
91 GcdAttributes
|= EFI_MEMORY_RO
;
94 // Process eXecute Never attribute
95 if ((PageAttributes
& (TT_PXN_MASK
| TT_UXN_MASK
)) != 0 ) {
96 GcdAttributes
|= EFI_MEMORY_XP
;
103 #define BITS_PER_LEVEL 9
106 GetRootTranslationTableInfo (
108 OUT UINTN
*TableLevel
,
109 OUT UINTN
*TableEntryCount
112 // Get the level of the root table
114 *TableLevel
= (T0SZ
- MIN_T0SZ
) / BITS_PER_LEVEL
;
117 if (TableEntryCount
) {
118 *TableEntryCount
= 1UL << (BITS_PER_LEVEL
- (T0SZ
- MIN_T0SZ
) % BITS_PER_LEVEL
);
127 IN UINT64 RegionStart
,
128 IN BOOLEAN IsLiveBlockMapping
131 if (!ArmMmuEnabled () || !IsLiveBlockMapping
) {
133 ArmUpdateTranslationTableEntry (Entry
, (VOID
*)(UINTN
)RegionStart
);
135 ArmReplaceLiveTranslationEntry (Entry
, Value
, RegionStart
);
141 FreePageTablesRecursive (
142 IN UINT64
*TranslationTable
147 for (Index
= 0; Index
< TT_ENTRY_COUNT
; Index
++) {
148 if ((TranslationTable
[Index
] & TT_TYPE_MASK
) == TT_TYPE_TABLE_ENTRY
) {
149 FreePageTablesRecursive ((VOID
*)(UINTN
)(TranslationTable
[Index
] &
150 TT_ADDRESS_MASK_BLOCK_ENTRY
));
153 FreePages (TranslationTable
, 1);
158 UpdateRegionMappingRecursive (
159 IN UINT64 RegionStart
,
161 IN UINT64 AttributeSetMask
,
162 IN UINT64 AttributeClearMask
,
163 IN UINT64
*PageTable
,
172 VOID
*TranslationTable
;
175 ASSERT (((RegionStart
| RegionEnd
) & EFI_PAGE_MASK
) == 0);
177 BlockShift
= (Level
+ 1) * BITS_PER_LEVEL
+ MIN_T0SZ
;
178 BlockMask
= MAX_UINT64
>> BlockShift
;
180 DEBUG ((DEBUG_VERBOSE
, "%a(%d): %llx - %llx set %lx clr %lx\n", __FUNCTION__
,
181 Level
, RegionStart
, RegionEnd
, AttributeSetMask
, AttributeClearMask
));
183 for (; RegionStart
< RegionEnd
; RegionStart
= BlockEnd
) {
184 BlockEnd
= MIN (RegionEnd
, (RegionStart
| BlockMask
) + 1);
185 Entry
= &PageTable
[(RegionStart
>> (64 - BlockShift
)) & (TT_ENTRY_COUNT
- 1)];
188 // If RegionStart or BlockEnd is not aligned to the block size at this
189 // level, we will have to create a table mapping in order to map less
190 // than a block, and recurse to create the block or page entries at
191 // the next level. No block mappings are allowed at all at level 0,
192 // so in that case, we have to recurse unconditionally.
194 if (Level
== 0 || ((RegionStart
| BlockEnd
) & BlockMask
) != 0) {
197 if ((*Entry
& TT_TYPE_MASK
) != TT_TYPE_TABLE_ENTRY
) {
199 // No table entry exists yet, so we need to allocate a page table
200 // for the next level.
202 TranslationTable
= AllocatePages (1);
203 if (TranslationTable
== NULL
) {
204 return EFI_OUT_OF_RESOURCES
;
207 if ((*Entry
& TT_TYPE_MASK
) == TT_TYPE_BLOCK_ENTRY
) {
209 // We are splitting an existing block entry, so we have to populate
210 // the new table with the attributes of the block entry it replaces.
212 Status
= UpdateRegionMappingRecursive (RegionStart
& ~BlockMask
,
213 (RegionStart
| BlockMask
) + 1, *Entry
& TT_ATTRIBUTES_MASK
,
214 0, TranslationTable
, Level
+ 1);
215 if (EFI_ERROR (Status
)) {
217 // The range we passed to UpdateRegionMappingRecursive () is block
218 // aligned, so it is guaranteed that no further pages were allocated
219 // by it, and so we only have to free the page we allocated here.
221 FreePages (TranslationTable
, 1);
225 ZeroMem (TranslationTable
, EFI_PAGE_SIZE
);
228 TranslationTable
= (VOID
*)(UINTN
)(*Entry
& TT_ADDRESS_MASK_BLOCK_ENTRY
);
232 // Recurse to the next level
234 Status
= UpdateRegionMappingRecursive (RegionStart
, BlockEnd
,
235 AttributeSetMask
, AttributeClearMask
, TranslationTable
,
237 if (EFI_ERROR (Status
)) {
238 if ((*Entry
& TT_TYPE_MASK
) != TT_TYPE_TABLE_ENTRY
) {
240 // We are creating a new table entry, so on failure, we can free all
241 // allocations we made recursively, given that the whole subhierarchy
242 // has not been wired into the live page tables yet. (This is not
243 // possible for existing table entries, since we cannot revert the
244 // modifications we made to the subhierarchy it represents.)
246 FreePageTablesRecursive (TranslationTable
);
251 if ((*Entry
& TT_TYPE_MASK
) != TT_TYPE_TABLE_ENTRY
) {
252 EntryValue
= (UINTN
)TranslationTable
| TT_TYPE_TABLE_ENTRY
;
253 ReplaceTableEntry (Entry
, EntryValue
, RegionStart
,
254 (*Entry
& TT_TYPE_MASK
) == TT_TYPE_BLOCK_ENTRY
);
257 EntryValue
= (*Entry
& AttributeClearMask
) | AttributeSetMask
;
258 EntryValue
|= RegionStart
;
259 EntryValue
|= (Level
== 3) ? TT_TYPE_BLOCK_ENTRY_LEVEL3
260 : TT_TYPE_BLOCK_ENTRY
;
262 ReplaceTableEntry (Entry
, EntryValue
, RegionStart
, FALSE
);
270 LookupAddresstoRootTable (
271 IN UINT64 MaxAddress
,
273 OUT UINTN
*TableEntryCount
278 // Check the parameters are not NULL
279 ASSERT ((T0SZ
!= NULL
) && (TableEntryCount
!= NULL
));
281 // Look for the highest bit set in MaxAddress
282 for (TopBit
= 63; TopBit
!= 0; TopBit
--) {
283 if ((1ULL << TopBit
) & MaxAddress
) {
284 // MaxAddress top bit is found
289 ASSERT (TopBit
!= 0);
291 // Calculate T0SZ from the top bit of the MaxAddress
294 // Get the Table info from T0SZ
295 GetRootTranslationTableInfo (*T0SZ
, NULL
, TableEntryCount
);
300 UpdateRegionMapping (
301 IN UINT64 RegionStart
,
302 IN UINT64 RegionLength
,
303 IN UINT64 AttributeSetMask
,
304 IN UINT64 AttributeClearMask
307 UINTN RootTableLevel
;
310 if (((RegionStart
| RegionLength
) & EFI_PAGE_MASK
)) {
311 return EFI_INVALID_PARAMETER
;
314 T0SZ
= ArmGetTCR () & TCR_T0SZ_MASK
;
315 GetRootTranslationTableInfo (T0SZ
, &RootTableLevel
, NULL
);
317 return UpdateRegionMappingRecursive (RegionStart
, RegionStart
+ RegionLength
,
318 AttributeSetMask
, AttributeClearMask
, ArmGetTTBR0BaseAddress (),
324 FillTranslationTable (
325 IN UINT64
*RootTable
,
326 IN ARM_MEMORY_REGION_DESCRIPTOR
*MemoryRegion
329 return UpdateRegionMapping (
330 MemoryRegion
->VirtualBase
,
331 MemoryRegion
->Length
,
332 ArmMemoryAttributeToPageAttribute (MemoryRegion
->Attributes
) | TT_AF
,
339 GcdAttributeToPageAttribute (
340 IN UINT64 GcdAttributes
343 UINT64 PageAttributes
;
345 switch (GcdAttributes
& EFI_MEMORY_CACHETYPE_MASK
) {
347 PageAttributes
= TT_ATTR_INDX_DEVICE_MEMORY
;
350 PageAttributes
= TT_ATTR_INDX_MEMORY_NON_CACHEABLE
;
353 PageAttributes
= TT_ATTR_INDX_MEMORY_WRITE_THROUGH
| TT_SH_INNER_SHAREABLE
;
356 PageAttributes
= TT_ATTR_INDX_MEMORY_WRITE_BACK
| TT_SH_INNER_SHAREABLE
;
359 PageAttributes
= TT_ATTR_INDX_MASK
;
363 if ((GcdAttributes
& EFI_MEMORY_XP
) != 0 ||
364 (GcdAttributes
& EFI_MEMORY_CACHETYPE_MASK
) == EFI_MEMORY_UC
) {
365 if (ArmReadCurrentEL () == AARCH64_EL2
) {
366 PageAttributes
|= TT_XN_MASK
;
368 PageAttributes
|= TT_UXN_MASK
| TT_PXN_MASK
;
372 if ((GcdAttributes
& EFI_MEMORY_RO
) != 0) {
373 PageAttributes
|= TT_AP_RO_RO
;
376 return PageAttributes
| TT_AF
;
380 ArmSetMemoryAttributes (
381 IN EFI_PHYSICAL_ADDRESS BaseAddress
,
386 UINT64 PageAttributes
;
387 UINT64 PageAttributeMask
;
389 PageAttributes
= GcdAttributeToPageAttribute (Attributes
);
390 PageAttributeMask
= 0;
392 if ((Attributes
& EFI_MEMORY_CACHETYPE_MASK
) == 0) {
394 // No memory type was set in Attributes, so we are going to update the
397 PageAttributes
&= TT_AP_MASK
| TT_UXN_MASK
| TT_PXN_MASK
;
398 PageAttributeMask
= ~(TT_ADDRESS_MASK_BLOCK_ENTRY
| TT_AP_MASK
|
399 TT_PXN_MASK
| TT_XN_MASK
);
402 return UpdateRegionMapping (BaseAddress
, Length
, PageAttributes
,
408 SetMemoryRegionAttribute (
409 IN EFI_PHYSICAL_ADDRESS BaseAddress
,
411 IN UINT64 Attributes
,
412 IN UINT64 BlockEntryMask
415 return UpdateRegionMapping (BaseAddress
, Length
, Attributes
, BlockEntryMask
);
419 ArmSetMemoryRegionNoExec (
420 IN EFI_PHYSICAL_ADDRESS BaseAddress
,
426 if (ArmReadCurrentEL () == AARCH64_EL1
) {
427 Val
= TT_PXN_MASK
| TT_UXN_MASK
;
432 return SetMemoryRegionAttribute (
436 ~TT_ADDRESS_MASK_BLOCK_ENTRY
);
440 ArmClearMemoryRegionNoExec (
441 IN EFI_PHYSICAL_ADDRESS BaseAddress
,
447 // XN maps to UXN in the EL1&0 translation regime
448 Mask
= ~(TT_ADDRESS_MASK_BLOCK_ENTRY
| TT_PXN_MASK
| TT_XN_MASK
);
450 return SetMemoryRegionAttribute (
458 ArmSetMemoryRegionReadOnly (
459 IN EFI_PHYSICAL_ADDRESS BaseAddress
,
463 return SetMemoryRegionAttribute (
467 ~TT_ADDRESS_MASK_BLOCK_ENTRY
);
471 ArmClearMemoryRegionReadOnly (
472 IN EFI_PHYSICAL_ADDRESS BaseAddress
,
476 return SetMemoryRegionAttribute (
480 ~(TT_ADDRESS_MASK_BLOCK_ENTRY
| TT_AP_MASK
));
486 IN ARM_MEMORY_REGION_DESCRIPTOR
*MemoryTable
,
487 OUT VOID
**TranslationTableBase OPTIONAL
,
488 OUT UINTN
*TranslationTableSize OPTIONAL
491 VOID
* TranslationTable
;
492 UINT32 TranslationTableAttribute
;
495 UINTN RootTableEntryCount
;
499 if(MemoryTable
== NULL
) {
500 ASSERT (MemoryTable
!= NULL
);
501 return EFI_INVALID_PARAMETER
;
505 // Limit the virtual address space to what we can actually use: UEFI
506 // mandates a 1:1 mapping, so no point in making the virtual address
507 // space larger than the physical address space. We also have to take
508 // into account the architectural limitations that result from UEFI's
509 // use of 4 KB pages.
511 MaxAddress
= MIN (LShiftU64 (1ULL, ArmGetPhysicalAddressBits ()) - 1,
514 // Lookup the Table Level to get the information
515 LookupAddresstoRootTable (MaxAddress
, &T0SZ
, &RootTableEntryCount
);
518 // Set TCR that allows us to retrieve T0SZ in the subsequent functions
520 // Ideally we will be running at EL2, but should support EL1 as well.
521 // UEFI should not run at EL3.
522 if (ArmReadCurrentEL () == AARCH64_EL2
) {
523 //Note: Bits 23 and 31 are reserved(RES1) bits in TCR_EL2
524 TCR
= T0SZ
| (1UL << 31) | (1UL << 23) | TCR_TG0_4KB
;
526 // Set the Physical Address Size using MaxAddress
527 if (MaxAddress
< SIZE_4GB
) {
529 } else if (MaxAddress
< SIZE_64GB
) {
531 } else if (MaxAddress
< SIZE_1TB
) {
533 } else if (MaxAddress
< SIZE_4TB
) {
535 } else if (MaxAddress
< SIZE_16TB
) {
537 } else if (MaxAddress
< SIZE_256TB
) {
540 DEBUG ((EFI_D_ERROR
, "ArmConfigureMmu: The MaxAddress 0x%lX is not supported by this MMU configuration.\n", MaxAddress
));
541 ASSERT (0); // Bigger than 48-bit memory space are not supported
542 return EFI_UNSUPPORTED
;
544 } else if (ArmReadCurrentEL () == AARCH64_EL1
) {
545 // Due to Cortex-A57 erratum #822227 we must set TG1[1] == 1, regardless of EPD1.
546 TCR
= T0SZ
| TCR_TG0_4KB
| TCR_TG1_4KB
| TCR_EPD1
;
548 // Set the Physical Address Size using MaxAddress
549 if (MaxAddress
< SIZE_4GB
) {
551 } else if (MaxAddress
< SIZE_64GB
) {
553 } else if (MaxAddress
< SIZE_1TB
) {
555 } else if (MaxAddress
< SIZE_4TB
) {
557 } else if (MaxAddress
< SIZE_16TB
) {
559 } else if (MaxAddress
< SIZE_256TB
) {
560 TCR
|= TCR_IPS_256TB
;
562 DEBUG ((EFI_D_ERROR
, "ArmConfigureMmu: The MaxAddress 0x%lX is not supported by this MMU configuration.\n", MaxAddress
));
563 ASSERT (0); // Bigger than 48-bit memory space are not supported
564 return EFI_UNSUPPORTED
;
567 ASSERT (0); // UEFI is only expected to run at EL2 and EL1, not EL3.
568 return EFI_UNSUPPORTED
;
572 // Translation table walks are always cache coherent on ARMv8-A, so cache
573 // maintenance on page tables is never needed. Since there is a risk of
574 // loss of coherency when using mismatched attributes, and given that memory
575 // is mapped cacheable except for extraordinary cases (such as non-coherent
576 // DMA), have the page table walker perform cached accesses as well, and
577 // assert below that that matches the attributes we use for CPU accesses to
580 TCR
|= TCR_SH_INNER_SHAREABLE
|
581 TCR_RGN_OUTER_WRITE_BACK_ALLOC
|
582 TCR_RGN_INNER_WRITE_BACK_ALLOC
;
587 // Allocate pages for translation table
588 TranslationTable
= AllocatePages (1);
589 if (TranslationTable
== NULL
) {
590 return EFI_OUT_OF_RESOURCES
;
592 // We set TTBR0 just after allocating the table to retrieve its location from the subsequent
593 // functions without needing to pass this value across the functions. The MMU is only enabled
594 // after the translation tables are populated.
595 ArmSetTTBR0 (TranslationTable
);
597 if (TranslationTableBase
!= NULL
) {
598 *TranslationTableBase
= TranslationTable
;
601 if (TranslationTableSize
!= NULL
) {
602 *TranslationTableSize
= RootTableEntryCount
* sizeof(UINT64
);
605 ZeroMem (TranslationTable
, RootTableEntryCount
* sizeof(UINT64
));
607 TranslationTableAttribute
= TT_ATTR_INDX_INVALID
;
608 while (MemoryTable
->Length
!= 0) {
611 // Find the memory attribute for the Translation Table
612 if ((UINTN
)TranslationTable
>= MemoryTable
->PhysicalBase
&&
613 (UINTN
)TranslationTable
+ EFI_PAGE_SIZE
<= MemoryTable
->PhysicalBase
+
614 MemoryTable
->Length
) {
615 TranslationTableAttribute
= MemoryTable
->Attributes
;
619 Status
= FillTranslationTable (TranslationTable
, MemoryTable
);
620 if (EFI_ERROR (Status
)) {
621 goto FREE_TRANSLATION_TABLE
;
626 ASSERT (TranslationTableAttribute
== ARM_MEMORY_REGION_ATTRIBUTE_WRITE_BACK
||
627 TranslationTableAttribute
== ARM_MEMORY_REGION_ATTRIBUTE_NONSECURE_WRITE_BACK
);
629 ArmSetMAIR (MAIR_ATTR(TT_ATTR_INDX_DEVICE_MEMORY
, MAIR_ATTR_DEVICE_MEMORY
) | // mapped to EFI_MEMORY_UC
630 MAIR_ATTR(TT_ATTR_INDX_MEMORY_NON_CACHEABLE
, MAIR_ATTR_NORMAL_MEMORY_NON_CACHEABLE
) | // mapped to EFI_MEMORY_WC
631 MAIR_ATTR(TT_ATTR_INDX_MEMORY_WRITE_THROUGH
, MAIR_ATTR_NORMAL_MEMORY_WRITE_THROUGH
) | // mapped to EFI_MEMORY_WT
632 MAIR_ATTR(TT_ATTR_INDX_MEMORY_WRITE_BACK
, MAIR_ATTR_NORMAL_MEMORY_WRITE_BACK
)); // mapped to EFI_MEMORY_WB
634 ArmDisableAlignmentCheck ();
635 ArmEnableStackAlignmentCheck ();
636 ArmEnableInstructionCache ();
637 ArmEnableDataCache ();
642 FREE_TRANSLATION_TABLE
:
643 FreePages (TranslationTable
, 1);
649 ArmMmuBaseLibConstructor (
653 extern UINT32 ArmReplaceLiveTranslationEntrySize
;
656 // The ArmReplaceLiveTranslationEntry () helper function may be invoked
657 // with the MMU off so we have to ensure that it gets cleaned to the PoC
659 WriteBackDataCacheRange (ArmReplaceLiveTranslationEntry
,
660 ArmReplaceLiveTranslationEntrySize
);
662 return RETURN_SUCCESS
;