2 * File managing the MMU for ARMv8 architecture
4 * Copyright (c) 2011-2020, ARM Limited. All rights reserved.
5 * Copyright (c) 2016, Linaro Limited. All rights reserved.
6 * Copyright (c) 2017, Intel Corporation. All rights reserved.<BR>
8 * SPDX-License-Identifier: BSD-2-Clause-Patent
13 #include <Chipset/AArch64.h>
14 #include <Library/BaseMemoryLib.h>
15 #include <Library/CacheMaintenanceLib.h>
16 #include <Library/MemoryAllocationLib.h>
17 #include <Library/ArmLib.h>
18 #include <Library/ArmMmuLib.h>
19 #include <Library/BaseLib.h>
20 #include <Library/DebugLib.h>
24 ArmMemoryAttributeToPageAttribute (
25 IN ARM_MEMORY_REGION_ATTRIBUTES Attributes
29 case ARM_MEMORY_REGION_ATTRIBUTE_WRITE_BACK_NONSHAREABLE
:
30 case ARM_MEMORY_REGION_ATTRIBUTE_NONSECURE_WRITE_BACK_NONSHAREABLE
:
31 return TT_ATTR_INDX_MEMORY_WRITE_BACK
;
33 case ARM_MEMORY_REGION_ATTRIBUTE_WRITE_BACK
:
34 case ARM_MEMORY_REGION_ATTRIBUTE_NONSECURE_WRITE_BACK
:
35 return TT_ATTR_INDX_MEMORY_WRITE_BACK
| TT_SH_INNER_SHAREABLE
;
37 case ARM_MEMORY_REGION_ATTRIBUTE_WRITE_THROUGH
:
38 case ARM_MEMORY_REGION_ATTRIBUTE_NONSECURE_WRITE_THROUGH
:
39 return TT_ATTR_INDX_MEMORY_WRITE_THROUGH
| TT_SH_INNER_SHAREABLE
;
41 // Uncached and device mappings are treated as outer shareable by default,
42 case ARM_MEMORY_REGION_ATTRIBUTE_UNCACHED_UNBUFFERED
:
43 case ARM_MEMORY_REGION_ATTRIBUTE_NONSECURE_UNCACHED_UNBUFFERED
:
44 return TT_ATTR_INDX_MEMORY_NON_CACHEABLE
;
48 case ARM_MEMORY_REGION_ATTRIBUTE_DEVICE
:
49 case ARM_MEMORY_REGION_ATTRIBUTE_NONSECURE_DEVICE
:
50 if (ArmReadCurrentEL () == AARCH64_EL2
)
51 return TT_ATTR_INDX_DEVICE_MEMORY
| TT_XN_MASK
;
53 return TT_ATTR_INDX_DEVICE_MEMORY
| TT_UXN_MASK
| TT_PXN_MASK
;
58 #define BITS_PER_LEVEL 9
59 #define MAX_VA_BITS 48
63 GetRootTableEntryCount (
67 return TT_ENTRY_COUNT
>> (T0SZ
- MIN_T0SZ
) % BITS_PER_LEVEL
;
76 return (T0SZ
- MIN_T0SZ
) / BITS_PER_LEVEL
;
84 IN UINT64 RegionStart
,
85 IN BOOLEAN IsLiveBlockMapping
88 if (!ArmMmuEnabled () || !IsLiveBlockMapping
) {
90 ArmUpdateTranslationTableEntry (Entry
, (VOID
*)(UINTN
)RegionStart
);
92 ArmReplaceLiveTranslationEntry (Entry
, Value
, RegionStart
);
98 FreePageTablesRecursive (
99 IN UINT64
*TranslationTable
,
108 for (Index
= 0; Index
< TT_ENTRY_COUNT
; Index
++) {
109 if ((TranslationTable
[Index
] & TT_TYPE_MASK
) == TT_TYPE_TABLE_ENTRY
) {
110 FreePageTablesRecursive ((VOID
*)(UINTN
)(TranslationTable
[Index
] &
111 TT_ADDRESS_MASK_BLOCK_ENTRY
),
116 FreePages (TranslationTable
, 1);
127 return (Entry
& TT_TYPE_MASK
) == TT_TYPE_BLOCK_ENTRY_LEVEL3
;
129 return (Entry
& TT_TYPE_MASK
) == TT_TYPE_BLOCK_ENTRY
;
141 // TT_TYPE_TABLE_ENTRY aliases TT_TYPE_BLOCK_ENTRY_LEVEL3
142 // so we need to take the level into account as well.
146 return (Entry
& TT_TYPE_MASK
) == TT_TYPE_TABLE_ENTRY
;
151 UpdateRegionMappingRecursive (
152 IN UINT64 RegionStart
,
154 IN UINT64 AttributeSetMask
,
155 IN UINT64 AttributeClearMask
,
156 IN UINT64
*PageTable
,
165 VOID
*TranslationTable
;
168 ASSERT (((RegionStart
| RegionEnd
) & EFI_PAGE_MASK
) == 0);
170 BlockShift
= (Level
+ 1) * BITS_PER_LEVEL
+ MIN_T0SZ
;
171 BlockMask
= MAX_UINT64
>> BlockShift
;
173 DEBUG ((DEBUG_VERBOSE
, "%a(%d): %llx - %llx set %lx clr %lx\n", __FUNCTION__
,
174 Level
, RegionStart
, RegionEnd
, AttributeSetMask
, AttributeClearMask
));
176 for (; RegionStart
< RegionEnd
; RegionStart
= BlockEnd
) {
177 BlockEnd
= MIN (RegionEnd
, (RegionStart
| BlockMask
) + 1);
178 Entry
= &PageTable
[(RegionStart
>> (64 - BlockShift
)) & (TT_ENTRY_COUNT
- 1)];
181 // If RegionStart or BlockEnd is not aligned to the block size at this
182 // level, we will have to create a table mapping in order to map less
183 // than a block, and recurse to create the block or page entries at
184 // the next level. No block mappings are allowed at all at level 0,
185 // so in that case, we have to recurse unconditionally.
186 // If we are changing a table entry and the AttributeClearMask is non-zero,
187 // we cannot replace it with a block entry without potentially losing
188 // attribute information, so keep the table entry in that case.
190 if (Level
== 0 || ((RegionStart
| BlockEnd
) & BlockMask
) != 0 ||
191 (IsTableEntry (*Entry
, Level
) && AttributeClearMask
!= 0)) {
194 if (!IsTableEntry (*Entry
, Level
)) {
196 // No table entry exists yet, so we need to allocate a page table
197 // for the next level.
199 TranslationTable
= AllocatePages (1);
200 if (TranslationTable
== NULL
) {
201 return EFI_OUT_OF_RESOURCES
;
204 if (!ArmMmuEnabled ()) {
206 // Make sure we are not inadvertently hitting in the caches
207 // when populating the page tables.
209 InvalidateDataCacheRange (TranslationTable
, EFI_PAGE_SIZE
);
212 ZeroMem (TranslationTable
, EFI_PAGE_SIZE
);
214 if (IsBlockEntry (*Entry
, Level
)) {
216 // We are splitting an existing block entry, so we have to populate
217 // the new table with the attributes of the block entry it replaces.
219 Status
= UpdateRegionMappingRecursive (RegionStart
& ~BlockMask
,
220 (RegionStart
| BlockMask
) + 1, *Entry
& TT_ATTRIBUTES_MASK
,
221 0, TranslationTable
, Level
+ 1);
222 if (EFI_ERROR (Status
)) {
224 // The range we passed to UpdateRegionMappingRecursive () is block
225 // aligned, so it is guaranteed that no further pages were allocated
226 // by it, and so we only have to free the page we allocated here.
228 FreePages (TranslationTable
, 1);
233 TranslationTable
= (VOID
*)(UINTN
)(*Entry
& TT_ADDRESS_MASK_BLOCK_ENTRY
);
237 // Recurse to the next level
239 Status
= UpdateRegionMappingRecursive (RegionStart
, BlockEnd
,
240 AttributeSetMask
, AttributeClearMask
, TranslationTable
,
242 if (EFI_ERROR (Status
)) {
243 if (!IsTableEntry (*Entry
, Level
)) {
245 // We are creating a new table entry, so on failure, we can free all
246 // allocations we made recursively, given that the whole subhierarchy
247 // has not been wired into the live page tables yet. (This is not
248 // possible for existing table entries, since we cannot revert the
249 // modifications we made to the subhierarchy it represents.)
251 FreePageTablesRecursive (TranslationTable
, Level
+ 1);
256 if (!IsTableEntry (*Entry
, Level
)) {
257 EntryValue
= (UINTN
)TranslationTable
| TT_TYPE_TABLE_ENTRY
;
258 ReplaceTableEntry (Entry
, EntryValue
, RegionStart
,
259 IsBlockEntry (*Entry
, Level
));
262 EntryValue
= (*Entry
& AttributeClearMask
) | AttributeSetMask
;
263 EntryValue
|= RegionStart
;
264 EntryValue
|= (Level
== 3) ? TT_TYPE_BLOCK_ENTRY_LEVEL3
265 : TT_TYPE_BLOCK_ENTRY
;
267 if (IsTableEntry (*Entry
, Level
)) {
269 // We are replacing a table entry with a block entry. This is only
270 // possible if we are keeping none of the original attributes.
271 // We can free the table entry's page table, and all the ones below
272 // it, since we are dropping the only possible reference to it.
274 ASSERT (AttributeClearMask
== 0);
275 TranslationTable
= (VOID
*)(UINTN
)(*Entry
& TT_ADDRESS_MASK_BLOCK_ENTRY
);
276 ReplaceTableEntry (Entry
, EntryValue
, RegionStart
, TRUE
);
277 FreePageTablesRecursive (TranslationTable
, Level
+ 1);
279 ReplaceTableEntry (Entry
, EntryValue
, RegionStart
, FALSE
);
288 UpdateRegionMapping (
289 IN UINT64 RegionStart
,
290 IN UINT64 RegionLength
,
291 IN UINT64 AttributeSetMask
,
292 IN UINT64 AttributeClearMask
297 if (((RegionStart
| RegionLength
) & EFI_PAGE_MASK
) != 0) {
298 return EFI_INVALID_PARAMETER
;
301 T0SZ
= ArmGetTCR () & TCR_T0SZ_MASK
;
303 return UpdateRegionMappingRecursive (RegionStart
, RegionStart
+ RegionLength
,
304 AttributeSetMask
, AttributeClearMask
, ArmGetTTBR0BaseAddress (),
305 GetRootTableLevel (T0SZ
));
310 FillTranslationTable (
311 IN UINT64
*RootTable
,
312 IN ARM_MEMORY_REGION_DESCRIPTOR
*MemoryRegion
315 return UpdateRegionMapping (
316 MemoryRegion
->VirtualBase
,
317 MemoryRegion
->Length
,
318 ArmMemoryAttributeToPageAttribute (MemoryRegion
->Attributes
) | TT_AF
,
325 GcdAttributeToPageAttribute (
326 IN UINT64 GcdAttributes
329 UINT64 PageAttributes
;
331 switch (GcdAttributes
& EFI_MEMORY_CACHETYPE_MASK
) {
333 PageAttributes
= TT_ATTR_INDX_DEVICE_MEMORY
;
336 PageAttributes
= TT_ATTR_INDX_MEMORY_NON_CACHEABLE
;
339 PageAttributes
= TT_ATTR_INDX_MEMORY_WRITE_THROUGH
| TT_SH_INNER_SHAREABLE
;
342 PageAttributes
= TT_ATTR_INDX_MEMORY_WRITE_BACK
| TT_SH_INNER_SHAREABLE
;
345 PageAttributes
= TT_ATTR_INDX_MASK
;
349 if ((GcdAttributes
& EFI_MEMORY_XP
) != 0 ||
350 (GcdAttributes
& EFI_MEMORY_CACHETYPE_MASK
) == EFI_MEMORY_UC
) {
351 if (ArmReadCurrentEL () == AARCH64_EL2
) {
352 PageAttributes
|= TT_XN_MASK
;
354 PageAttributes
|= TT_UXN_MASK
| TT_PXN_MASK
;
358 if ((GcdAttributes
& EFI_MEMORY_RO
) != 0) {
359 PageAttributes
|= TT_AP_RO_RO
;
362 return PageAttributes
| TT_AF
;
366 ArmSetMemoryAttributes (
367 IN EFI_PHYSICAL_ADDRESS BaseAddress
,
372 UINT64 PageAttributes
;
373 UINT64 PageAttributeMask
;
375 PageAttributes
= GcdAttributeToPageAttribute (Attributes
);
376 PageAttributeMask
= 0;
378 if ((Attributes
& EFI_MEMORY_CACHETYPE_MASK
) == 0) {
380 // No memory type was set in Attributes, so we are going to update the
383 PageAttributes
&= TT_AP_MASK
| TT_UXN_MASK
| TT_PXN_MASK
;
384 PageAttributeMask
= ~(TT_ADDRESS_MASK_BLOCK_ENTRY
| TT_AP_MASK
|
385 TT_PXN_MASK
| TT_XN_MASK
);
388 return UpdateRegionMapping (BaseAddress
, Length
, PageAttributes
,
394 SetMemoryRegionAttribute (
395 IN EFI_PHYSICAL_ADDRESS BaseAddress
,
397 IN UINT64 Attributes
,
398 IN UINT64 BlockEntryMask
401 return UpdateRegionMapping (BaseAddress
, Length
, Attributes
, BlockEntryMask
);
405 ArmSetMemoryRegionNoExec (
406 IN EFI_PHYSICAL_ADDRESS BaseAddress
,
412 if (ArmReadCurrentEL () == AARCH64_EL1
) {
413 Val
= TT_PXN_MASK
| TT_UXN_MASK
;
418 return SetMemoryRegionAttribute (
422 ~TT_ADDRESS_MASK_BLOCK_ENTRY
);
426 ArmClearMemoryRegionNoExec (
427 IN EFI_PHYSICAL_ADDRESS BaseAddress
,
433 // XN maps to UXN in the EL1&0 translation regime
434 Mask
= ~(TT_ADDRESS_MASK_BLOCK_ENTRY
| TT_PXN_MASK
| TT_XN_MASK
);
436 return SetMemoryRegionAttribute (
444 ArmSetMemoryRegionReadOnly (
445 IN EFI_PHYSICAL_ADDRESS BaseAddress
,
449 return SetMemoryRegionAttribute (
453 ~TT_ADDRESS_MASK_BLOCK_ENTRY
);
457 ArmClearMemoryRegionReadOnly (
458 IN EFI_PHYSICAL_ADDRESS BaseAddress
,
462 return SetMemoryRegionAttribute (
466 ~(TT_ADDRESS_MASK_BLOCK_ENTRY
| TT_AP_MASK
));
472 IN ARM_MEMORY_REGION_DESCRIPTOR
*MemoryTable
,
473 OUT VOID
**TranslationTableBase OPTIONAL
,
474 OUT UINTN
*TranslationTableSize OPTIONAL
477 VOID
* TranslationTable
;
478 UINTN MaxAddressBits
;
481 UINTN RootTableEntryCount
;
485 if (MemoryTable
== NULL
) {
486 ASSERT (MemoryTable
!= NULL
);
487 return EFI_INVALID_PARAMETER
;
491 // Limit the virtual address space to what we can actually use: UEFI
492 // mandates a 1:1 mapping, so no point in making the virtual address
493 // space larger than the physical address space. We also have to take
494 // into account the architectural limitations that result from UEFI's
495 // use of 4 KB pages.
497 MaxAddressBits
= MIN (ArmGetPhysicalAddressBits (), MAX_VA_BITS
);
498 MaxAddress
= LShiftU64 (1ULL, MaxAddressBits
) - 1;
500 T0SZ
= 64 - MaxAddressBits
;
501 RootTableEntryCount
= GetRootTableEntryCount (T0SZ
);
504 // Set TCR that allows us to retrieve T0SZ in the subsequent functions
506 // Ideally we will be running at EL2, but should support EL1 as well.
507 // UEFI should not run at EL3.
508 if (ArmReadCurrentEL () == AARCH64_EL2
) {
509 //Note: Bits 23 and 31 are reserved(RES1) bits in TCR_EL2
510 TCR
= T0SZ
| (1UL << 31) | (1UL << 23) | TCR_TG0_4KB
;
512 // Set the Physical Address Size using MaxAddress
513 if (MaxAddress
< SIZE_4GB
) {
515 } else if (MaxAddress
< SIZE_64GB
) {
517 } else if (MaxAddress
< SIZE_1TB
) {
519 } else if (MaxAddress
< SIZE_4TB
) {
521 } else if (MaxAddress
< SIZE_16TB
) {
523 } else if (MaxAddress
< SIZE_256TB
) {
527 "ArmConfigureMmu: The MaxAddress 0x%lX is not supported by this MMU configuration.\n",
529 ASSERT (0); // Bigger than 48-bit memory space are not supported
530 return EFI_UNSUPPORTED
;
532 } else if (ArmReadCurrentEL () == AARCH64_EL1
) {
533 // Due to Cortex-A57 erratum #822227 we must set TG1[1] == 1, regardless of EPD1.
534 TCR
= T0SZ
| TCR_TG0_4KB
| TCR_TG1_4KB
| TCR_EPD1
;
536 // Set the Physical Address Size using MaxAddress
537 if (MaxAddress
< SIZE_4GB
) {
539 } else if (MaxAddress
< SIZE_64GB
) {
541 } else if (MaxAddress
< SIZE_1TB
) {
543 } else if (MaxAddress
< SIZE_4TB
) {
545 } else if (MaxAddress
< SIZE_16TB
) {
547 } else if (MaxAddress
< SIZE_256TB
) {
548 TCR
|= TCR_IPS_256TB
;
551 "ArmConfigureMmu: The MaxAddress 0x%lX is not supported by this MMU configuration.\n",
553 ASSERT (0); // Bigger than 48-bit memory space are not supported
554 return EFI_UNSUPPORTED
;
557 ASSERT (0); // UEFI is only expected to run at EL2 and EL1, not EL3.
558 return EFI_UNSUPPORTED
;
562 // Translation table walks are always cache coherent on ARMv8-A, so cache
563 // maintenance on page tables is never needed. Since there is a risk of
564 // loss of coherency when using mismatched attributes, and given that memory
565 // is mapped cacheable except for extraordinary cases (such as non-coherent
566 // DMA), have the page table walker perform cached accesses as well, and
567 // assert below that that matches the attributes we use for CPU accesses to
570 TCR
|= TCR_SH_INNER_SHAREABLE
|
571 TCR_RGN_OUTER_WRITE_BACK_ALLOC
|
572 TCR_RGN_INNER_WRITE_BACK_ALLOC
;
577 // Allocate pages for translation table
578 TranslationTable
= AllocatePages (1);
579 if (TranslationTable
== NULL
) {
580 return EFI_OUT_OF_RESOURCES
;
583 // We set TTBR0 just after allocating the table to retrieve its location from
584 // the subsequent functions without needing to pass this value across the
585 // functions. The MMU is only enabled after the translation tables are
588 ArmSetTTBR0 (TranslationTable
);
590 if (TranslationTableBase
!= NULL
) {
591 *TranslationTableBase
= TranslationTable
;
594 if (TranslationTableSize
!= NULL
) {
595 *TranslationTableSize
= RootTableEntryCount
* sizeof (UINT64
);
599 // Make sure we are not inadvertently hitting in the caches
600 // when populating the page tables.
602 InvalidateDataCacheRange (TranslationTable
,
603 RootTableEntryCount
* sizeof (UINT64
));
604 ZeroMem (TranslationTable
, RootTableEntryCount
* sizeof (UINT64
));
606 while (MemoryTable
->Length
!= 0) {
607 Status
= FillTranslationTable (TranslationTable
, MemoryTable
);
608 if (EFI_ERROR (Status
)) {
609 goto FreeTranslationTable
;
615 // EFI_MEMORY_UC ==> MAIR_ATTR_DEVICE_MEMORY
616 // EFI_MEMORY_WC ==> MAIR_ATTR_NORMAL_MEMORY_NON_CACHEABLE
617 // EFI_MEMORY_WT ==> MAIR_ATTR_NORMAL_MEMORY_WRITE_THROUGH
618 // EFI_MEMORY_WB ==> MAIR_ATTR_NORMAL_MEMORY_WRITE_BACK
621 MAIR_ATTR (TT_ATTR_INDX_DEVICE_MEMORY
, MAIR_ATTR_DEVICE_MEMORY
) |
622 MAIR_ATTR (TT_ATTR_INDX_MEMORY_NON_CACHEABLE
, MAIR_ATTR_NORMAL_MEMORY_NON_CACHEABLE
) |
623 MAIR_ATTR (TT_ATTR_INDX_MEMORY_WRITE_THROUGH
, MAIR_ATTR_NORMAL_MEMORY_WRITE_THROUGH
) |
624 MAIR_ATTR (TT_ATTR_INDX_MEMORY_WRITE_BACK
, MAIR_ATTR_NORMAL_MEMORY_WRITE_BACK
)
627 ArmDisableAlignmentCheck ();
628 ArmEnableStackAlignmentCheck ();
629 ArmEnableInstructionCache ();
630 ArmEnableDataCache ();
635 FreeTranslationTable
:
636 FreePages (TranslationTable
, 1);
642 ArmMmuBaseLibConstructor (
646 extern UINT32 ArmReplaceLiveTranslationEntrySize
;
649 // The ArmReplaceLiveTranslationEntry () helper function may be invoked
650 // with the MMU off so we have to ensure that it gets cleaned to the PoC
652 WriteBackDataCacheRange ((VOID
*)(UINTN
)ArmReplaceLiveTranslationEntry
,
653 ArmReplaceLiveTranslationEntrySize
);
655 return RETURN_SUCCESS
;