2 * File managing the MMU for ARMv8 architecture
4 * Copyright (c) 2011-2020, ARM Limited. All rights reserved.
5 * Copyright (c) 2016, Linaro Limited. All rights reserved.
6 * Copyright (c) 2017, Intel Corporation. All rights reserved.<BR>
8 * SPDX-License-Identifier: BSD-2-Clause-Patent
13 #include <Pi/PiMultiPhase.h>
14 #include <Chipset/AArch64.h>
15 #include <Library/BaseMemoryLib.h>
16 #include <Library/CacheMaintenanceLib.h>
17 #include <Library/MemoryAllocationLib.h>
18 #include <Library/ArmLib.h>
19 #include <Library/ArmMmuLib.h>
20 #include <Library/BaseLib.h>
21 #include <Library/DebugLib.h>
22 #include <Library/HobLib.h>
26 EFIAPI
*mReplaceLiveEntryFunc
30 IN UINT64 RegionStart
,
32 ) = ArmReplaceLiveTranslationEntry
;
36 ArmMemoryAttributeToPageAttribute (
37 IN ARM_MEMORY_REGION_ATTRIBUTES Attributes
41 case ARM_MEMORY_REGION_ATTRIBUTE_WRITE_BACK_NONSHAREABLE
:
42 case ARM_MEMORY_REGION_ATTRIBUTE_NONSECURE_WRITE_BACK_NONSHAREABLE
:
43 return TT_ATTR_INDX_MEMORY_WRITE_BACK
;
45 case ARM_MEMORY_REGION_ATTRIBUTE_WRITE_BACK
:
46 case ARM_MEMORY_REGION_ATTRIBUTE_NONSECURE_WRITE_BACK
:
47 return TT_ATTR_INDX_MEMORY_WRITE_BACK
| TT_SH_INNER_SHAREABLE
;
49 case ARM_MEMORY_REGION_ATTRIBUTE_WRITE_THROUGH
:
50 case ARM_MEMORY_REGION_ATTRIBUTE_NONSECURE_WRITE_THROUGH
:
51 return TT_ATTR_INDX_MEMORY_WRITE_THROUGH
| TT_SH_INNER_SHAREABLE
;
53 // Uncached and device mappings are treated as outer shareable by default,
54 case ARM_MEMORY_REGION_ATTRIBUTE_UNCACHED_UNBUFFERED
:
55 case ARM_MEMORY_REGION_ATTRIBUTE_NONSECURE_UNCACHED_UNBUFFERED
:
56 return TT_ATTR_INDX_MEMORY_NON_CACHEABLE
;
60 case ARM_MEMORY_REGION_ATTRIBUTE_DEVICE
:
61 case ARM_MEMORY_REGION_ATTRIBUTE_NONSECURE_DEVICE
:
62 if (ArmReadCurrentEL () == AARCH64_EL2
) {
63 return TT_ATTR_INDX_DEVICE_MEMORY
| TT_XN_MASK
;
65 return TT_ATTR_INDX_DEVICE_MEMORY
| TT_UXN_MASK
| TT_PXN_MASK
;
71 #define BITS_PER_LEVEL 9
72 #define MAX_VA_BITS 48
76 GetRootTableEntryCount (
80 return TT_ENTRY_COUNT
>> (T0SZ
- MIN_T0SZ
) % BITS_PER_LEVEL
;
89 return (T0SZ
- MIN_T0SZ
) / BITS_PER_LEVEL
;
97 IN UINT64 RegionStart
,
99 IN BOOLEAN IsLiveBlockMapping
105 // Replacing a live block entry with a table entry (or vice versa) requires a
106 // break-before-make sequence as per the architecture. This means the mapping
107 // must be made invalid and cleaned from the TLBs first, and this is a bit of
108 // a hassle if the mapping in question covers the code that is actually doing
109 // the mapping and the unmapping, and so we only bother with this if actually
113 if (!IsLiveBlockMapping
|| !ArmMmuEnabled ()) {
114 // If the mapping is not a live block mapping, or the MMU is not on yet, we
115 // can simply overwrite the entry.
117 ArmUpdateTranslationTableEntry (Entry
, (VOID
*)(UINTN
)RegionStart
);
119 // If the mapping in question does not cover the code that updates the
120 // entry in memory, or the entry that we are intending to update, we can
121 // use an ordinary break before make. Otherwise, we will need to
122 // temporarily disable the MMU.
124 if ((((RegionStart
^ (UINTN
)mReplaceLiveEntryFunc
) & ~BlockMask
) == 0) ||
125 (((RegionStart
^ (UINTN
)Entry
) & ~BlockMask
) == 0))
128 DEBUG ((DEBUG_WARN
, "%a: splitting block entry with MMU disabled\n", __FUNCTION__
));
131 mReplaceLiveEntryFunc (Entry
, Value
, RegionStart
, DisableMmu
);
137 FreePageTablesRecursive (
138 IN UINT64
*TranslationTable
,
147 for (Index
= 0; Index
< TT_ENTRY_COUNT
; Index
++) {
148 if ((TranslationTable
[Index
] & TT_TYPE_MASK
) == TT_TYPE_TABLE_ENTRY
) {
149 FreePageTablesRecursive (
150 (VOID
*)(UINTN
)(TranslationTable
[Index
] &
151 TT_ADDRESS_MASK_BLOCK_ENTRY
),
158 FreePages (TranslationTable
, 1);
169 return (Entry
& TT_TYPE_MASK
) == TT_TYPE_BLOCK_ENTRY_LEVEL3
;
172 return (Entry
& TT_TYPE_MASK
) == TT_TYPE_BLOCK_ENTRY
;
184 // TT_TYPE_TABLE_ENTRY aliases TT_TYPE_BLOCK_ENTRY_LEVEL3
185 // so we need to take the level into account as well.
190 return (Entry
& TT_TYPE_MASK
) == TT_TYPE_TABLE_ENTRY
;
195 UpdateRegionMappingRecursive (
196 IN UINT64 RegionStart
,
198 IN UINT64 AttributeSetMask
,
199 IN UINT64 AttributeClearMask
,
200 IN UINT64
*PageTable
,
202 IN BOOLEAN TableIsLive
210 VOID
*TranslationTable
;
212 BOOLEAN NextTableIsLive
;
214 ASSERT (((RegionStart
| RegionEnd
) & EFI_PAGE_MASK
) == 0);
216 BlockShift
= (Level
+ 1) * BITS_PER_LEVEL
+ MIN_T0SZ
;
217 BlockMask
= MAX_UINT64
>> BlockShift
;
221 "%a(%d): %llx - %llx set %lx clr %lx\n",
230 for ( ; RegionStart
< RegionEnd
; RegionStart
= BlockEnd
) {
231 BlockEnd
= MIN (RegionEnd
, (RegionStart
| BlockMask
) + 1);
232 Entry
= &PageTable
[(RegionStart
>> (64 - BlockShift
)) & (TT_ENTRY_COUNT
- 1)];
235 // If RegionStart or BlockEnd is not aligned to the block size at this
236 // level, we will have to create a table mapping in order to map less
237 // than a block, and recurse to create the block or page entries at
238 // the next level. No block mappings are allowed at all at level 0,
239 // so in that case, we have to recurse unconditionally.
241 // One special case to take into account is any region that covers the page
242 // table itself: if we'd cover such a region with block mappings, we are
243 // more likely to end up in the situation later where we need to disable
244 // the MMU in order to update page table entries safely, so prefer page
245 // mappings in that particular case.
247 if ((Level
== 0) || (((RegionStart
| BlockEnd
) & BlockMask
) != 0) ||
248 ((Level
< 3) && (((UINT64
)PageTable
& ~BlockMask
) == RegionStart
)) ||
249 IsTableEntry (*Entry
, Level
))
253 if (!IsTableEntry (*Entry
, Level
)) {
255 // No table entry exists yet, so we need to allocate a page table
256 // for the next level.
258 TranslationTable
= AllocatePages (1);
259 if (TranslationTable
== NULL
) {
260 return EFI_OUT_OF_RESOURCES
;
263 if (!ArmMmuEnabled ()) {
265 // Make sure we are not inadvertently hitting in the caches
266 // when populating the page tables.
268 InvalidateDataCacheRange (TranslationTable
, EFI_PAGE_SIZE
);
271 ZeroMem (TranslationTable
, EFI_PAGE_SIZE
);
273 if (IsBlockEntry (*Entry
, Level
)) {
275 // We are splitting an existing block entry, so we have to populate
276 // the new table with the attributes of the block entry it replaces.
278 Status
= UpdateRegionMappingRecursive (
279 RegionStart
& ~BlockMask
,
280 (RegionStart
| BlockMask
) + 1,
281 *Entry
& TT_ATTRIBUTES_MASK
,
287 if (EFI_ERROR (Status
)) {
289 // The range we passed to UpdateRegionMappingRecursive () is block
290 // aligned, so it is guaranteed that no further pages were allocated
291 // by it, and so we only have to free the page we allocated here.
293 FreePages (TranslationTable
, 1);
298 NextTableIsLive
= FALSE
;
300 TranslationTable
= (VOID
*)(UINTN
)(*Entry
& TT_ADDRESS_MASK_BLOCK_ENTRY
);
301 NextTableIsLive
= TableIsLive
;
305 // Recurse to the next level
307 Status
= UpdateRegionMappingRecursive (
316 if (EFI_ERROR (Status
)) {
317 if (!IsTableEntry (*Entry
, Level
)) {
319 // We are creating a new table entry, so on failure, we can free all
320 // allocations we made recursively, given that the whole subhierarchy
321 // has not been wired into the live page tables yet. (This is not
322 // possible for existing table entries, since we cannot revert the
323 // modifications we made to the subhierarchy it represents.)
325 FreePageTablesRecursive (TranslationTable
, Level
+ 1);
331 if (!IsTableEntry (*Entry
, Level
)) {
332 EntryValue
= (UINTN
)TranslationTable
| TT_TYPE_TABLE_ENTRY
;
338 TableIsLive
&& IsBlockEntry (*Entry
, Level
)
342 EntryValue
= (*Entry
& AttributeClearMask
) | AttributeSetMask
;
343 EntryValue
|= RegionStart
;
344 EntryValue
|= (Level
== 3) ? TT_TYPE_BLOCK_ENTRY_LEVEL3
345 : TT_TYPE_BLOCK_ENTRY
;
347 ReplaceTableEntry (Entry
, EntryValue
, RegionStart
, BlockMask
, FALSE
);
356 UpdateRegionMapping (
357 IN UINT64 RegionStart
,
358 IN UINT64 RegionLength
,
359 IN UINT64 AttributeSetMask
,
360 IN UINT64 AttributeClearMask
,
361 IN UINT64
*RootTable
,
362 IN BOOLEAN TableIsLive
367 if (((RegionStart
| RegionLength
) & EFI_PAGE_MASK
) != 0) {
368 return EFI_INVALID_PARAMETER
;
371 T0SZ
= ArmGetTCR () & TCR_T0SZ_MASK
;
373 return UpdateRegionMappingRecursive (
375 RegionStart
+ RegionLength
,
379 GetRootTableLevel (T0SZ
),
386 FillTranslationTable (
387 IN UINT64
*RootTable
,
388 IN ARM_MEMORY_REGION_DESCRIPTOR
*MemoryRegion
391 return UpdateRegionMapping (
392 MemoryRegion
->VirtualBase
,
393 MemoryRegion
->Length
,
394 ArmMemoryAttributeToPageAttribute (MemoryRegion
->Attributes
) | TT_AF
,
403 GcdAttributeToPageAttribute (
404 IN UINT64 GcdAttributes
407 UINT64 PageAttributes
;
409 switch (GcdAttributes
& EFI_MEMORY_CACHETYPE_MASK
) {
411 PageAttributes
= TT_ATTR_INDX_DEVICE_MEMORY
;
414 PageAttributes
= TT_ATTR_INDX_MEMORY_NON_CACHEABLE
;
417 PageAttributes
= TT_ATTR_INDX_MEMORY_WRITE_THROUGH
| TT_SH_INNER_SHAREABLE
;
420 PageAttributes
= TT_ATTR_INDX_MEMORY_WRITE_BACK
| TT_SH_INNER_SHAREABLE
;
423 PageAttributes
= TT_ATTR_INDX_MASK
;
427 if (((GcdAttributes
& EFI_MEMORY_XP
) != 0) ||
428 ((GcdAttributes
& EFI_MEMORY_CACHETYPE_MASK
) == EFI_MEMORY_UC
))
430 if (ArmReadCurrentEL () == AARCH64_EL2
) {
431 PageAttributes
|= TT_XN_MASK
;
433 PageAttributes
|= TT_UXN_MASK
| TT_PXN_MASK
;
437 if ((GcdAttributes
& EFI_MEMORY_RO
) != 0) {
438 PageAttributes
|= TT_AP_NO_RO
;
441 return PageAttributes
| TT_AF
;
445 ArmSetMemoryAttributes (
446 IN EFI_PHYSICAL_ADDRESS BaseAddress
,
451 UINT64 PageAttributes
;
452 UINT64 PageAttributeMask
;
454 PageAttributes
= GcdAttributeToPageAttribute (Attributes
);
455 PageAttributeMask
= 0;
457 if ((Attributes
& EFI_MEMORY_CACHETYPE_MASK
) == 0) {
459 // No memory type was set in Attributes, so we are going to update the
462 PageAttributes
&= TT_AP_MASK
| TT_UXN_MASK
| TT_PXN_MASK
;
463 PageAttributeMask
= ~(TT_ADDRESS_MASK_BLOCK_ENTRY
| TT_AP_MASK
|
464 TT_PXN_MASK
| TT_XN_MASK
);
467 return UpdateRegionMapping (
472 ArmGetTTBR0BaseAddress (),
479 SetMemoryRegionAttribute (
480 IN EFI_PHYSICAL_ADDRESS BaseAddress
,
482 IN UINT64 Attributes
,
483 IN UINT64 BlockEntryMask
486 return UpdateRegionMapping (
491 ArmGetTTBR0BaseAddress (),
497 ArmSetMemoryRegionNoExec (
498 IN EFI_PHYSICAL_ADDRESS BaseAddress
,
504 if (ArmReadCurrentEL () == AARCH64_EL1
) {
505 Val
= TT_PXN_MASK
| TT_UXN_MASK
;
510 return SetMemoryRegionAttribute (
514 ~TT_ADDRESS_MASK_BLOCK_ENTRY
519 ArmClearMemoryRegionNoExec (
520 IN EFI_PHYSICAL_ADDRESS BaseAddress
,
526 // XN maps to UXN in the EL1&0 translation regime
527 Mask
= ~(TT_ADDRESS_MASK_BLOCK_ENTRY
| TT_PXN_MASK
| TT_XN_MASK
);
529 return SetMemoryRegionAttribute (
538 ArmSetMemoryRegionReadOnly (
539 IN EFI_PHYSICAL_ADDRESS BaseAddress
,
543 return SetMemoryRegionAttribute (
547 ~TT_ADDRESS_MASK_BLOCK_ENTRY
552 ArmClearMemoryRegionReadOnly (
553 IN EFI_PHYSICAL_ADDRESS BaseAddress
,
557 return SetMemoryRegionAttribute (
561 ~(TT_ADDRESS_MASK_BLOCK_ENTRY
| TT_AP_MASK
)
568 IN ARM_MEMORY_REGION_DESCRIPTOR
*MemoryTable
,
569 OUT VOID
**TranslationTableBase OPTIONAL
,
570 OUT UINTN
*TranslationTableSize OPTIONAL
573 VOID
*TranslationTable
;
574 UINTN MaxAddressBits
;
577 UINTN RootTableEntryCount
;
581 if (MemoryTable
== NULL
) {
582 ASSERT (MemoryTable
!= NULL
);
583 return EFI_INVALID_PARAMETER
;
587 // Limit the virtual address space to what we can actually use: UEFI
588 // mandates a 1:1 mapping, so no point in making the virtual address
589 // space larger than the physical address space. We also have to take
590 // into account the architectural limitations that result from UEFI's
591 // use of 4 KB pages.
593 MaxAddressBits
= MIN (ArmGetPhysicalAddressBits (), MAX_VA_BITS
);
594 MaxAddress
= LShiftU64 (1ULL, MaxAddressBits
) - 1;
596 T0SZ
= 64 - MaxAddressBits
;
597 RootTableEntryCount
= GetRootTableEntryCount (T0SZ
);
600 // Set TCR that allows us to retrieve T0SZ in the subsequent functions
602 // Ideally we will be running at EL2, but should support EL1 as well.
603 // UEFI should not run at EL3.
604 if (ArmReadCurrentEL () == AARCH64_EL2
) {
605 // Note: Bits 23 and 31 are reserved(RES1) bits in TCR_EL2
606 TCR
= T0SZ
| (1UL << 31) | (1UL << 23) | TCR_TG0_4KB
;
608 // Set the Physical Address Size using MaxAddress
609 if (MaxAddress
< SIZE_4GB
) {
611 } else if (MaxAddress
< SIZE_64GB
) {
613 } else if (MaxAddress
< SIZE_1TB
) {
615 } else if (MaxAddress
< SIZE_4TB
) {
617 } else if (MaxAddress
< SIZE_16TB
) {
619 } else if (MaxAddress
< SIZE_256TB
) {
624 "ArmConfigureMmu: The MaxAddress 0x%lX is not supported by this MMU configuration.\n",
627 ASSERT (0); // Bigger than 48-bit memory space are not supported
628 return EFI_UNSUPPORTED
;
630 } else if (ArmReadCurrentEL () == AARCH64_EL1
) {
631 // Due to Cortex-A57 erratum #822227 we must set TG1[1] == 1, regardless of EPD1.
632 TCR
= T0SZ
| TCR_TG0_4KB
| TCR_TG1_4KB
| TCR_EPD1
;
634 // Set the Physical Address Size using MaxAddress
635 if (MaxAddress
< SIZE_4GB
) {
637 } else if (MaxAddress
< SIZE_64GB
) {
639 } else if (MaxAddress
< SIZE_1TB
) {
641 } else if (MaxAddress
< SIZE_4TB
) {
643 } else if (MaxAddress
< SIZE_16TB
) {
645 } else if (MaxAddress
< SIZE_256TB
) {
646 TCR
|= TCR_IPS_256TB
;
650 "ArmConfigureMmu: The MaxAddress 0x%lX is not supported by this MMU configuration.\n",
653 ASSERT (0); // Bigger than 48-bit memory space are not supported
654 return EFI_UNSUPPORTED
;
657 ASSERT (0); // UEFI is only expected to run at EL2 and EL1, not EL3.
658 return EFI_UNSUPPORTED
;
662 // Translation table walks are always cache coherent on ARMv8-A, so cache
663 // maintenance on page tables is never needed. Since there is a risk of
664 // loss of coherency when using mismatched attributes, and given that memory
665 // is mapped cacheable except for extraordinary cases (such as non-coherent
666 // DMA), have the page table walker perform cached accesses as well, and
667 // assert below that matches the attributes we use for CPU accesses to
670 TCR
|= TCR_SH_INNER_SHAREABLE
|
671 TCR_RGN_OUTER_WRITE_BACK_ALLOC
|
672 TCR_RGN_INNER_WRITE_BACK_ALLOC
;
677 // Allocate pages for translation table
678 TranslationTable
= AllocatePages (1);
679 if (TranslationTable
== NULL
) {
680 return EFI_OUT_OF_RESOURCES
;
683 if (TranslationTableBase
!= NULL
) {
684 *TranslationTableBase
= TranslationTable
;
687 if (TranslationTableSize
!= NULL
) {
688 *TranslationTableSize
= RootTableEntryCount
* sizeof (UINT64
);
691 if (!ArmMmuEnabled ()) {
693 // Make sure we are not inadvertently hitting in the caches
694 // when populating the page tables.
696 InvalidateDataCacheRange (
698 RootTableEntryCount
* sizeof (UINT64
)
702 ZeroMem (TranslationTable
, RootTableEntryCount
* sizeof (UINT64
));
704 while (MemoryTable
->Length
!= 0) {
705 Status
= FillTranslationTable (TranslationTable
, MemoryTable
);
706 if (EFI_ERROR (Status
)) {
707 goto FreeTranslationTable
;
714 // EFI_MEMORY_UC ==> MAIR_ATTR_DEVICE_MEMORY
715 // EFI_MEMORY_WC ==> MAIR_ATTR_NORMAL_MEMORY_NON_CACHEABLE
716 // EFI_MEMORY_WT ==> MAIR_ATTR_NORMAL_MEMORY_WRITE_THROUGH
717 // EFI_MEMORY_WB ==> MAIR_ATTR_NORMAL_MEMORY_WRITE_BACK
720 MAIR_ATTR (TT_ATTR_INDX_DEVICE_MEMORY
, MAIR_ATTR_DEVICE_MEMORY
) |
721 MAIR_ATTR (TT_ATTR_INDX_MEMORY_NON_CACHEABLE
, MAIR_ATTR_NORMAL_MEMORY_NON_CACHEABLE
) |
722 MAIR_ATTR (TT_ATTR_INDX_MEMORY_WRITE_THROUGH
, MAIR_ATTR_NORMAL_MEMORY_WRITE_THROUGH
) |
723 MAIR_ATTR (TT_ATTR_INDX_MEMORY_WRITE_BACK
, MAIR_ATTR_NORMAL_MEMORY_WRITE_BACK
)
726 ArmSetTTBR0 (TranslationTable
);
728 if (!ArmMmuEnabled ()) {
729 ArmDisableAlignmentCheck ();
730 ArmEnableStackAlignmentCheck ();
731 ArmEnableInstructionCache ();
732 ArmEnableDataCache ();
739 FreeTranslationTable
:
740 FreePages (TranslationTable
, 1);
746 ArmMmuBaseLibConstructor (
750 extern UINT32 ArmReplaceLiveTranslationEntrySize
;
753 Hob
= GetFirstGuidHob (&gArmMmuReplaceLiveTranslationEntryFuncGuid
);
755 mReplaceLiveEntryFunc
= *(VOID
**)GET_GUID_HOB_DATA (Hob
);
758 // The ArmReplaceLiveTranslationEntry () helper function may be invoked
759 // with the MMU off so we have to ensure that it gets cleaned to the PoC
761 WriteBackDataCacheRange (
762 (VOID
*)(UINTN
)ArmReplaceLiveTranslationEntry
,
763 ArmReplaceLiveTranslationEntrySize
767 return RETURN_SUCCESS
;