2 * File managing the MMU for ARMv8 architecture
4 * Copyright (c) 2011-2020, ARM Limited. All rights reserved.
5 * Copyright (c) 2016, Linaro Limited. All rights reserved.
6 * Copyright (c) 2017, Intel Corporation. All rights reserved.<BR>
8 * SPDX-License-Identifier: BSD-2-Clause-Patent
13 #include <Chipset/AArch64.h>
14 #include <Library/BaseMemoryLib.h>
15 #include <Library/CacheMaintenanceLib.h>
16 #include <Library/MemoryAllocationLib.h>
17 #include <Library/ArmLib.h>
18 #include <Library/ArmMmuLib.h>
19 #include <Library/BaseLib.h>
20 #include <Library/DebugLib.h>
22 // We use this index definition to define an invalid block entry
23 #define TT_ATTR_INDX_INVALID ((UINT32)~0)
27 ArmMemoryAttributeToPageAttribute (
28 IN ARM_MEMORY_REGION_ATTRIBUTES Attributes
32 case ARM_MEMORY_REGION_ATTRIBUTE_WRITE_BACK_NONSHAREABLE
:
33 case ARM_MEMORY_REGION_ATTRIBUTE_NONSECURE_WRITE_BACK_NONSHAREABLE
:
34 return TT_ATTR_INDX_MEMORY_WRITE_BACK
;
36 case ARM_MEMORY_REGION_ATTRIBUTE_WRITE_BACK
:
37 case ARM_MEMORY_REGION_ATTRIBUTE_NONSECURE_WRITE_BACK
:
38 return TT_ATTR_INDX_MEMORY_WRITE_BACK
| TT_SH_INNER_SHAREABLE
;
40 case ARM_MEMORY_REGION_ATTRIBUTE_WRITE_THROUGH
:
41 case ARM_MEMORY_REGION_ATTRIBUTE_NONSECURE_WRITE_THROUGH
:
42 return TT_ATTR_INDX_MEMORY_WRITE_THROUGH
| TT_SH_INNER_SHAREABLE
;
44 // Uncached and device mappings are treated as outer shareable by default,
45 case ARM_MEMORY_REGION_ATTRIBUTE_UNCACHED_UNBUFFERED
:
46 case ARM_MEMORY_REGION_ATTRIBUTE_NONSECURE_UNCACHED_UNBUFFERED
:
47 return TT_ATTR_INDX_MEMORY_NON_CACHEABLE
;
51 case ARM_MEMORY_REGION_ATTRIBUTE_DEVICE
:
52 case ARM_MEMORY_REGION_ATTRIBUTE_NONSECURE_DEVICE
:
53 if (ArmReadCurrentEL () == AARCH64_EL2
)
54 return TT_ATTR_INDX_DEVICE_MEMORY
| TT_XN_MASK
;
56 return TT_ATTR_INDX_DEVICE_MEMORY
| TT_UXN_MASK
| TT_PXN_MASK
;
61 #define BITS_PER_LEVEL 9
64 GetRootTranslationTableInfo (
66 OUT UINTN
*TableLevel
,
67 OUT UINTN
*TableEntryCount
70 // Get the level of the root table
72 *TableLevel
= (T0SZ
- MIN_T0SZ
) / BITS_PER_LEVEL
;
75 if (TableEntryCount
) {
76 *TableEntryCount
= 1UL << (BITS_PER_LEVEL
- (T0SZ
- MIN_T0SZ
) % BITS_PER_LEVEL
);
85 IN UINT64 RegionStart
,
86 IN BOOLEAN IsLiveBlockMapping
89 if (!ArmMmuEnabled () || !IsLiveBlockMapping
) {
91 ArmUpdateTranslationTableEntry (Entry
, (VOID
*)(UINTN
)RegionStart
);
93 ArmReplaceLiveTranslationEntry (Entry
, Value
, RegionStart
);
99 FreePageTablesRecursive (
100 IN UINT64
*TranslationTable
,
109 for (Index
= 0; Index
< TT_ENTRY_COUNT
; Index
++) {
110 if ((TranslationTable
[Index
] & TT_TYPE_MASK
) == TT_TYPE_TABLE_ENTRY
) {
111 FreePageTablesRecursive ((VOID
*)(UINTN
)(TranslationTable
[Index
] &
112 TT_ADDRESS_MASK_BLOCK_ENTRY
),
117 FreePages (TranslationTable
, 1);
128 return (Entry
& TT_TYPE_MASK
) == TT_TYPE_BLOCK_ENTRY_LEVEL3
;
130 return (Entry
& TT_TYPE_MASK
) == TT_TYPE_BLOCK_ENTRY
;
142 // TT_TYPE_TABLE_ENTRY aliases TT_TYPE_BLOCK_ENTRY_LEVEL3
143 // so we need to take the level into account as well.
147 return (Entry
& TT_TYPE_MASK
) == TT_TYPE_TABLE_ENTRY
;
152 UpdateRegionMappingRecursive (
153 IN UINT64 RegionStart
,
155 IN UINT64 AttributeSetMask
,
156 IN UINT64 AttributeClearMask
,
157 IN UINT64
*PageTable
,
166 VOID
*TranslationTable
;
169 ASSERT (((RegionStart
| RegionEnd
) & EFI_PAGE_MASK
) == 0);
171 BlockShift
= (Level
+ 1) * BITS_PER_LEVEL
+ MIN_T0SZ
;
172 BlockMask
= MAX_UINT64
>> BlockShift
;
174 DEBUG ((DEBUG_VERBOSE
, "%a(%d): %llx - %llx set %lx clr %lx\n", __FUNCTION__
,
175 Level
, RegionStart
, RegionEnd
, AttributeSetMask
, AttributeClearMask
));
177 for (; RegionStart
< RegionEnd
; RegionStart
= BlockEnd
) {
178 BlockEnd
= MIN (RegionEnd
, (RegionStart
| BlockMask
) + 1);
179 Entry
= &PageTable
[(RegionStart
>> (64 - BlockShift
)) & (TT_ENTRY_COUNT
- 1)];
182 // If RegionStart or BlockEnd is not aligned to the block size at this
183 // level, we will have to create a table mapping in order to map less
184 // than a block, and recurse to create the block or page entries at
185 // the next level. No block mappings are allowed at all at level 0,
186 // so in that case, we have to recurse unconditionally.
187 // If we are changing a table entry and the AttributeClearMask is non-zero,
188 // we cannot replace it with a block entry without potentially losing
189 // attribute information, so keep the table entry in that case.
191 if (Level
== 0 || ((RegionStart
| BlockEnd
) & BlockMask
) != 0 ||
192 (IsTableEntry (*Entry
, Level
) && AttributeClearMask
!= 0)) {
195 if (!IsTableEntry (*Entry
, Level
)) {
197 // No table entry exists yet, so we need to allocate a page table
198 // for the next level.
200 TranslationTable
= AllocatePages (1);
201 if (TranslationTable
== NULL
) {
202 return EFI_OUT_OF_RESOURCES
;
205 if (!ArmMmuEnabled ()) {
207 // Make sure we are not inadvertently hitting in the caches
208 // when populating the page tables.
210 InvalidateDataCacheRange (TranslationTable
, EFI_PAGE_SIZE
);
213 ZeroMem (TranslationTable
, EFI_PAGE_SIZE
);
215 if (IsBlockEntry (*Entry
, Level
)) {
217 // We are splitting an existing block entry, so we have to populate
218 // the new table with the attributes of the block entry it replaces.
220 Status
= UpdateRegionMappingRecursive (RegionStart
& ~BlockMask
,
221 (RegionStart
| BlockMask
) + 1, *Entry
& TT_ATTRIBUTES_MASK
,
222 0, TranslationTable
, Level
+ 1);
223 if (EFI_ERROR (Status
)) {
225 // The range we passed to UpdateRegionMappingRecursive () is block
226 // aligned, so it is guaranteed that no further pages were allocated
227 // by it, and so we only have to free the page we allocated here.
229 FreePages (TranslationTable
, 1);
234 TranslationTable
= (VOID
*)(UINTN
)(*Entry
& TT_ADDRESS_MASK_BLOCK_ENTRY
);
238 // Recurse to the next level
240 Status
= UpdateRegionMappingRecursive (RegionStart
, BlockEnd
,
241 AttributeSetMask
, AttributeClearMask
, TranslationTable
,
243 if (EFI_ERROR (Status
)) {
244 if (!IsTableEntry (*Entry
, Level
)) {
246 // We are creating a new table entry, so on failure, we can free all
247 // allocations we made recursively, given that the whole subhierarchy
248 // has not been wired into the live page tables yet. (This is not
249 // possible for existing table entries, since we cannot revert the
250 // modifications we made to the subhierarchy it represents.)
252 FreePageTablesRecursive (TranslationTable
, Level
+ 1);
257 if (!IsTableEntry (*Entry
, Level
)) {
258 EntryValue
= (UINTN
)TranslationTable
| TT_TYPE_TABLE_ENTRY
;
259 ReplaceTableEntry (Entry
, EntryValue
, RegionStart
,
260 IsBlockEntry (*Entry
, Level
));
263 EntryValue
= (*Entry
& AttributeClearMask
) | AttributeSetMask
;
264 EntryValue
|= RegionStart
;
265 EntryValue
|= (Level
== 3) ? TT_TYPE_BLOCK_ENTRY_LEVEL3
266 : TT_TYPE_BLOCK_ENTRY
;
268 if (IsTableEntry (*Entry
, Level
)) {
270 // We are replacing a table entry with a block entry. This is only
271 // possible if we are keeping none of the original attributes.
272 // We can free the table entry's page table, and all the ones below
273 // it, since we are dropping the only possible reference to it.
275 ASSERT (AttributeClearMask
== 0);
276 TranslationTable
= (VOID
*)(UINTN
)(*Entry
& TT_ADDRESS_MASK_BLOCK_ENTRY
);
277 ReplaceTableEntry (Entry
, EntryValue
, RegionStart
, TRUE
);
278 FreePageTablesRecursive (TranslationTable
, Level
+ 1);
280 ReplaceTableEntry (Entry
, EntryValue
, RegionStart
, FALSE
);
289 LookupAddresstoRootTable (
290 IN UINT64 MaxAddress
,
292 OUT UINTN
*TableEntryCount
297 // Check the parameters are not NULL
298 ASSERT ((T0SZ
!= NULL
) && (TableEntryCount
!= NULL
));
300 // Look for the highest bit set in MaxAddress
301 for (TopBit
= 63; TopBit
!= 0; TopBit
--) {
302 if ((1ULL << TopBit
) & MaxAddress
) {
303 // MaxAddress top bit is found
308 ASSERT (TopBit
!= 0);
310 // Calculate T0SZ from the top bit of the MaxAddress
313 // Get the Table info from T0SZ
314 GetRootTranslationTableInfo (*T0SZ
, NULL
, TableEntryCount
);
319 UpdateRegionMapping (
320 IN UINT64 RegionStart
,
321 IN UINT64 RegionLength
,
322 IN UINT64 AttributeSetMask
,
323 IN UINT64 AttributeClearMask
326 UINTN RootTableLevel
;
329 if (((RegionStart
| RegionLength
) & EFI_PAGE_MASK
)) {
330 return EFI_INVALID_PARAMETER
;
333 T0SZ
= ArmGetTCR () & TCR_T0SZ_MASK
;
334 GetRootTranslationTableInfo (T0SZ
, &RootTableLevel
, NULL
);
336 return UpdateRegionMappingRecursive (RegionStart
, RegionStart
+ RegionLength
,
337 AttributeSetMask
, AttributeClearMask
, ArmGetTTBR0BaseAddress (),
343 FillTranslationTable (
344 IN UINT64
*RootTable
,
345 IN ARM_MEMORY_REGION_DESCRIPTOR
*MemoryRegion
348 return UpdateRegionMapping (
349 MemoryRegion
->VirtualBase
,
350 MemoryRegion
->Length
,
351 ArmMemoryAttributeToPageAttribute (MemoryRegion
->Attributes
) | TT_AF
,
358 GcdAttributeToPageAttribute (
359 IN UINT64 GcdAttributes
362 UINT64 PageAttributes
;
364 switch (GcdAttributes
& EFI_MEMORY_CACHETYPE_MASK
) {
366 PageAttributes
= TT_ATTR_INDX_DEVICE_MEMORY
;
369 PageAttributes
= TT_ATTR_INDX_MEMORY_NON_CACHEABLE
;
372 PageAttributes
= TT_ATTR_INDX_MEMORY_WRITE_THROUGH
| TT_SH_INNER_SHAREABLE
;
375 PageAttributes
= TT_ATTR_INDX_MEMORY_WRITE_BACK
| TT_SH_INNER_SHAREABLE
;
378 PageAttributes
= TT_ATTR_INDX_MASK
;
382 if ((GcdAttributes
& EFI_MEMORY_XP
) != 0 ||
383 (GcdAttributes
& EFI_MEMORY_CACHETYPE_MASK
) == EFI_MEMORY_UC
) {
384 if (ArmReadCurrentEL () == AARCH64_EL2
) {
385 PageAttributes
|= TT_XN_MASK
;
387 PageAttributes
|= TT_UXN_MASK
| TT_PXN_MASK
;
391 if ((GcdAttributes
& EFI_MEMORY_RO
) != 0) {
392 PageAttributes
|= TT_AP_RO_RO
;
395 return PageAttributes
| TT_AF
;
399 ArmSetMemoryAttributes (
400 IN EFI_PHYSICAL_ADDRESS BaseAddress
,
405 UINT64 PageAttributes
;
406 UINT64 PageAttributeMask
;
408 PageAttributes
= GcdAttributeToPageAttribute (Attributes
);
409 PageAttributeMask
= 0;
411 if ((Attributes
& EFI_MEMORY_CACHETYPE_MASK
) == 0) {
413 // No memory type was set in Attributes, so we are going to update the
416 PageAttributes
&= TT_AP_MASK
| TT_UXN_MASK
| TT_PXN_MASK
;
417 PageAttributeMask
= ~(TT_ADDRESS_MASK_BLOCK_ENTRY
| TT_AP_MASK
|
418 TT_PXN_MASK
| TT_XN_MASK
);
421 return UpdateRegionMapping (BaseAddress
, Length
, PageAttributes
,
427 SetMemoryRegionAttribute (
428 IN EFI_PHYSICAL_ADDRESS BaseAddress
,
430 IN UINT64 Attributes
,
431 IN UINT64 BlockEntryMask
434 return UpdateRegionMapping (BaseAddress
, Length
, Attributes
, BlockEntryMask
);
438 ArmSetMemoryRegionNoExec (
439 IN EFI_PHYSICAL_ADDRESS BaseAddress
,
445 if (ArmReadCurrentEL () == AARCH64_EL1
) {
446 Val
= TT_PXN_MASK
| TT_UXN_MASK
;
451 return SetMemoryRegionAttribute (
455 ~TT_ADDRESS_MASK_BLOCK_ENTRY
);
459 ArmClearMemoryRegionNoExec (
460 IN EFI_PHYSICAL_ADDRESS BaseAddress
,
466 // XN maps to UXN in the EL1&0 translation regime
467 Mask
= ~(TT_ADDRESS_MASK_BLOCK_ENTRY
| TT_PXN_MASK
| TT_XN_MASK
);
469 return SetMemoryRegionAttribute (
477 ArmSetMemoryRegionReadOnly (
478 IN EFI_PHYSICAL_ADDRESS BaseAddress
,
482 return SetMemoryRegionAttribute (
486 ~TT_ADDRESS_MASK_BLOCK_ENTRY
);
490 ArmClearMemoryRegionReadOnly (
491 IN EFI_PHYSICAL_ADDRESS BaseAddress
,
495 return SetMemoryRegionAttribute (
499 ~(TT_ADDRESS_MASK_BLOCK_ENTRY
| TT_AP_MASK
));
505 IN ARM_MEMORY_REGION_DESCRIPTOR
*MemoryTable
,
506 OUT VOID
**TranslationTableBase OPTIONAL
,
507 OUT UINTN
*TranslationTableSize OPTIONAL
510 VOID
* TranslationTable
;
513 UINTN RootTableEntryCount
;
517 if (MemoryTable
== NULL
) {
518 ASSERT (MemoryTable
!= NULL
);
519 return EFI_INVALID_PARAMETER
;
523 // Limit the virtual address space to what we can actually use: UEFI
524 // mandates a 1:1 mapping, so no point in making the virtual address
525 // space larger than the physical address space. We also have to take
526 // into account the architectural limitations that result from UEFI's
527 // use of 4 KB pages.
529 MaxAddress
= MIN (LShiftU64 (1ULL, ArmGetPhysicalAddressBits ()) - 1,
532 // Lookup the Table Level to get the information
533 LookupAddresstoRootTable (MaxAddress
, &T0SZ
, &RootTableEntryCount
);
536 // Set TCR that allows us to retrieve T0SZ in the subsequent functions
538 // Ideally we will be running at EL2, but should support EL1 as well.
539 // UEFI should not run at EL3.
540 if (ArmReadCurrentEL () == AARCH64_EL2
) {
541 //Note: Bits 23 and 31 are reserved(RES1) bits in TCR_EL2
542 TCR
= T0SZ
| (1UL << 31) | (1UL << 23) | TCR_TG0_4KB
;
544 // Set the Physical Address Size using MaxAddress
545 if (MaxAddress
< SIZE_4GB
) {
547 } else if (MaxAddress
< SIZE_64GB
) {
549 } else if (MaxAddress
< SIZE_1TB
) {
551 } else if (MaxAddress
< SIZE_4TB
) {
553 } else if (MaxAddress
< SIZE_16TB
) {
555 } else if (MaxAddress
< SIZE_256TB
) {
559 "ArmConfigureMmu: The MaxAddress 0x%lX is not supported by this MMU configuration.\n",
561 ASSERT (0); // Bigger than 48-bit memory space are not supported
562 return EFI_UNSUPPORTED
;
564 } else if (ArmReadCurrentEL () == AARCH64_EL1
) {
565 // Due to Cortex-A57 erratum #822227 we must set TG1[1] == 1, regardless of EPD1.
566 TCR
= T0SZ
| TCR_TG0_4KB
| TCR_TG1_4KB
| TCR_EPD1
;
568 // Set the Physical Address Size using MaxAddress
569 if (MaxAddress
< SIZE_4GB
) {
571 } else if (MaxAddress
< SIZE_64GB
) {
573 } else if (MaxAddress
< SIZE_1TB
) {
575 } else if (MaxAddress
< SIZE_4TB
) {
577 } else if (MaxAddress
< SIZE_16TB
) {
579 } else if (MaxAddress
< SIZE_256TB
) {
580 TCR
|= TCR_IPS_256TB
;
583 "ArmConfigureMmu: The MaxAddress 0x%lX is not supported by this MMU configuration.\n",
585 ASSERT (0); // Bigger than 48-bit memory space are not supported
586 return EFI_UNSUPPORTED
;
589 ASSERT (0); // UEFI is only expected to run at EL2 and EL1, not EL3.
590 return EFI_UNSUPPORTED
;
594 // Translation table walks are always cache coherent on ARMv8-A, so cache
595 // maintenance on page tables is never needed. Since there is a risk of
596 // loss of coherency when using mismatched attributes, and given that memory
597 // is mapped cacheable except for extraordinary cases (such as non-coherent
598 // DMA), have the page table walker perform cached accesses as well, and
599 // assert below that that matches the attributes we use for CPU accesses to
602 TCR
|= TCR_SH_INNER_SHAREABLE
|
603 TCR_RGN_OUTER_WRITE_BACK_ALLOC
|
604 TCR_RGN_INNER_WRITE_BACK_ALLOC
;
609 // Allocate pages for translation table
610 TranslationTable
= AllocatePages (1);
611 if (TranslationTable
== NULL
) {
612 return EFI_OUT_OF_RESOURCES
;
615 // We set TTBR0 just after allocating the table to retrieve its location from
616 // the subsequent functions without needing to pass this value across the
617 // functions. The MMU is only enabled after the translation tables are
620 ArmSetTTBR0 (TranslationTable
);
622 if (TranslationTableBase
!= NULL
) {
623 *TranslationTableBase
= TranslationTable
;
626 if (TranslationTableSize
!= NULL
) {
627 *TranslationTableSize
= RootTableEntryCount
* sizeof (UINT64
);
631 // Make sure we are not inadvertently hitting in the caches
632 // when populating the page tables.
634 InvalidateDataCacheRange (TranslationTable
,
635 RootTableEntryCount
* sizeof (UINT64
));
636 ZeroMem (TranslationTable
, RootTableEntryCount
* sizeof (UINT64
));
638 while (MemoryTable
->Length
!= 0) {
639 Status
= FillTranslationTable (TranslationTable
, MemoryTable
);
640 if (EFI_ERROR (Status
)) {
641 goto FreeTranslationTable
;
647 // EFI_MEMORY_UC ==> MAIR_ATTR_DEVICE_MEMORY
648 // EFI_MEMORY_WC ==> MAIR_ATTR_NORMAL_MEMORY_NON_CACHEABLE
649 // EFI_MEMORY_WT ==> MAIR_ATTR_NORMAL_MEMORY_WRITE_THROUGH
650 // EFI_MEMORY_WB ==> MAIR_ATTR_NORMAL_MEMORY_WRITE_BACK
653 MAIR_ATTR (TT_ATTR_INDX_DEVICE_MEMORY
, MAIR_ATTR_DEVICE_MEMORY
) |
654 MAIR_ATTR (TT_ATTR_INDX_MEMORY_NON_CACHEABLE
, MAIR_ATTR_NORMAL_MEMORY_NON_CACHEABLE
) |
655 MAIR_ATTR (TT_ATTR_INDX_MEMORY_WRITE_THROUGH
, MAIR_ATTR_NORMAL_MEMORY_WRITE_THROUGH
) |
656 MAIR_ATTR (TT_ATTR_INDX_MEMORY_WRITE_BACK
, MAIR_ATTR_NORMAL_MEMORY_WRITE_BACK
)
659 ArmDisableAlignmentCheck ();
660 ArmEnableStackAlignmentCheck ();
661 ArmEnableInstructionCache ();
662 ArmEnableDataCache ();
667 FreeTranslationTable
:
668 FreePages (TranslationTable
, 1);
674 ArmMmuBaseLibConstructor (
678 extern UINT32 ArmReplaceLiveTranslationEntrySize
;
681 // The ArmReplaceLiveTranslationEntry () helper function may be invoked
682 // with the MMU off so we have to ensure that it gets cleaned to the PoC
684 WriteBackDataCacheRange (ArmReplaceLiveTranslationEntry
,
685 ArmReplaceLiveTranslationEntrySize
);
687 return RETURN_SUCCESS
;