2 * File managing the MMU for ARMv8 architecture
4 * Copyright (c) 2011-2020, ARM Limited. All rights reserved.
5 * Copyright (c) 2016, Linaro Limited. All rights reserved.
6 * Copyright (c) 2017, Intel Corporation. All rights reserved.<BR>
8 * SPDX-License-Identifier: BSD-2-Clause-Patent
13 #include <Chipset/AArch64.h>
14 #include <Library/BaseMemoryLib.h>
15 #include <Library/CacheMaintenanceLib.h>
16 #include <Library/MemoryAllocationLib.h>
17 #include <Library/ArmLib.h>
18 #include <Library/ArmMmuLib.h>
19 #include <Library/BaseLib.h>
20 #include <Library/DebugLib.h>
22 // We use this index definition to define an invalid block entry
23 #define TT_ATTR_INDX_INVALID ((UINT32)~0)
27 ArmMemoryAttributeToPageAttribute (
28 IN ARM_MEMORY_REGION_ATTRIBUTES Attributes
32 case ARM_MEMORY_REGION_ATTRIBUTE_WRITE_BACK_NONSHAREABLE
:
33 case ARM_MEMORY_REGION_ATTRIBUTE_NONSECURE_WRITE_BACK_NONSHAREABLE
:
34 return TT_ATTR_INDX_MEMORY_WRITE_BACK
;
36 case ARM_MEMORY_REGION_ATTRIBUTE_WRITE_BACK
:
37 case ARM_MEMORY_REGION_ATTRIBUTE_NONSECURE_WRITE_BACK
:
38 return TT_ATTR_INDX_MEMORY_WRITE_BACK
| TT_SH_INNER_SHAREABLE
;
40 case ARM_MEMORY_REGION_ATTRIBUTE_WRITE_THROUGH
:
41 case ARM_MEMORY_REGION_ATTRIBUTE_NONSECURE_WRITE_THROUGH
:
42 return TT_ATTR_INDX_MEMORY_WRITE_THROUGH
| TT_SH_INNER_SHAREABLE
;
44 // Uncached and device mappings are treated as outer shareable by default,
45 case ARM_MEMORY_REGION_ATTRIBUTE_UNCACHED_UNBUFFERED
:
46 case ARM_MEMORY_REGION_ATTRIBUTE_NONSECURE_UNCACHED_UNBUFFERED
:
47 return TT_ATTR_INDX_MEMORY_NON_CACHEABLE
;
51 case ARM_MEMORY_REGION_ATTRIBUTE_DEVICE
:
52 case ARM_MEMORY_REGION_ATTRIBUTE_NONSECURE_DEVICE
:
53 if (ArmReadCurrentEL () == AARCH64_EL2
)
54 return TT_ATTR_INDX_DEVICE_MEMORY
| TT_XN_MASK
;
56 return TT_ATTR_INDX_DEVICE_MEMORY
| TT_UXN_MASK
| TT_PXN_MASK
;
61 PageAttributeToGcdAttribute (
62 IN UINT64 PageAttributes
67 switch (PageAttributes
& TT_ATTR_INDX_MASK
) {
68 case TT_ATTR_INDX_DEVICE_MEMORY
:
69 GcdAttributes
= EFI_MEMORY_UC
;
71 case TT_ATTR_INDX_MEMORY_NON_CACHEABLE
:
72 GcdAttributes
= EFI_MEMORY_WC
;
74 case TT_ATTR_INDX_MEMORY_WRITE_THROUGH
:
75 GcdAttributes
= EFI_MEMORY_WT
;
77 case TT_ATTR_INDX_MEMORY_WRITE_BACK
:
78 GcdAttributes
= EFI_MEMORY_WB
;
82 "PageAttributeToGcdAttribute: PageAttributes:0x%lX not supported.\n",
85 // The Global Coherency Domain (GCD) value is defined as a bit set.
86 // Returning 0 means no attribute has been set.
90 // Determine protection attributes
91 if (((PageAttributes
& TT_AP_MASK
) == TT_AP_NO_RO
) ||
92 ((PageAttributes
& TT_AP_MASK
) == TT_AP_RO_RO
)) {
93 // Read only cases map to write-protect
94 GcdAttributes
|= EFI_MEMORY_RO
;
97 // Process eXecute Never attribute
98 if ((PageAttributes
& (TT_PXN_MASK
| TT_UXN_MASK
)) != 0) {
99 GcdAttributes
|= EFI_MEMORY_XP
;
102 return GcdAttributes
;
106 #define BITS_PER_LEVEL 9
109 GetRootTranslationTableInfo (
111 OUT UINTN
*TableLevel
,
112 OUT UINTN
*TableEntryCount
115 // Get the level of the root table
117 *TableLevel
= (T0SZ
- MIN_T0SZ
) / BITS_PER_LEVEL
;
120 if (TableEntryCount
) {
121 *TableEntryCount
= 1UL << (BITS_PER_LEVEL
- (T0SZ
- MIN_T0SZ
) % BITS_PER_LEVEL
);
130 IN UINT64 RegionStart
,
131 IN BOOLEAN IsLiveBlockMapping
134 if (!ArmMmuEnabled () || !IsLiveBlockMapping
) {
136 ArmUpdateTranslationTableEntry (Entry
, (VOID
*)(UINTN
)RegionStart
);
138 ArmReplaceLiveTranslationEntry (Entry
, Value
, RegionStart
);
144 FreePageTablesRecursive (
145 IN UINT64
*TranslationTable
,
154 for (Index
= 0; Index
< TT_ENTRY_COUNT
; Index
++) {
155 if ((TranslationTable
[Index
] & TT_TYPE_MASK
) == TT_TYPE_TABLE_ENTRY
) {
156 FreePageTablesRecursive ((VOID
*)(UINTN
)(TranslationTable
[Index
] &
157 TT_ADDRESS_MASK_BLOCK_ENTRY
),
162 FreePages (TranslationTable
, 1);
167 UpdateRegionMappingRecursive (
168 IN UINT64 RegionStart
,
170 IN UINT64 AttributeSetMask
,
171 IN UINT64 AttributeClearMask
,
172 IN UINT64
*PageTable
,
181 VOID
*TranslationTable
;
184 ASSERT (((RegionStart
| RegionEnd
) & EFI_PAGE_MASK
) == 0);
186 BlockShift
= (Level
+ 1) * BITS_PER_LEVEL
+ MIN_T0SZ
;
187 BlockMask
= MAX_UINT64
>> BlockShift
;
189 DEBUG ((DEBUG_VERBOSE
, "%a(%d): %llx - %llx set %lx clr %lx\n", __FUNCTION__
,
190 Level
, RegionStart
, RegionEnd
, AttributeSetMask
, AttributeClearMask
));
192 for (; RegionStart
< RegionEnd
; RegionStart
= BlockEnd
) {
193 BlockEnd
= MIN (RegionEnd
, (RegionStart
| BlockMask
) + 1);
194 Entry
= &PageTable
[(RegionStart
>> (64 - BlockShift
)) & (TT_ENTRY_COUNT
- 1)];
197 // If RegionStart or BlockEnd is not aligned to the block size at this
198 // level, we will have to create a table mapping in order to map less
199 // than a block, and recurse to create the block or page entries at
200 // the next level. No block mappings are allowed at all at level 0,
201 // so in that case, we have to recurse unconditionally.
203 if (Level
== 0 || ((RegionStart
| BlockEnd
) & BlockMask
) != 0) {
206 if ((*Entry
& TT_TYPE_MASK
) != TT_TYPE_TABLE_ENTRY
) {
208 // No table entry exists yet, so we need to allocate a page table
209 // for the next level.
211 TranslationTable
= AllocatePages (1);
212 if (TranslationTable
== NULL
) {
213 return EFI_OUT_OF_RESOURCES
;
216 if (!ArmMmuEnabled ()) {
218 // Make sure we are not inadvertently hitting in the caches
219 // when populating the page tables.
221 InvalidateDataCacheRange (TranslationTable
, EFI_PAGE_SIZE
);
224 if ((*Entry
& TT_TYPE_MASK
) == TT_TYPE_BLOCK_ENTRY
) {
226 // We are splitting an existing block entry, so we have to populate
227 // the new table with the attributes of the block entry it replaces.
229 Status
= UpdateRegionMappingRecursive (RegionStart
& ~BlockMask
,
230 (RegionStart
| BlockMask
) + 1, *Entry
& TT_ATTRIBUTES_MASK
,
231 0, TranslationTable
, Level
+ 1);
232 if (EFI_ERROR (Status
)) {
234 // The range we passed to UpdateRegionMappingRecursive () is block
235 // aligned, so it is guaranteed that no further pages were allocated
236 // by it, and so we only have to free the page we allocated here.
238 FreePages (TranslationTable
, 1);
242 ZeroMem (TranslationTable
, EFI_PAGE_SIZE
);
245 TranslationTable
= (VOID
*)(UINTN
)(*Entry
& TT_ADDRESS_MASK_BLOCK_ENTRY
);
249 // Recurse to the next level
251 Status
= UpdateRegionMappingRecursive (RegionStart
, BlockEnd
,
252 AttributeSetMask
, AttributeClearMask
, TranslationTable
,
254 if (EFI_ERROR (Status
)) {
255 if ((*Entry
& TT_TYPE_MASK
) != TT_TYPE_TABLE_ENTRY
) {
257 // We are creating a new table entry, so on failure, we can free all
258 // allocations we made recursively, given that the whole subhierarchy
259 // has not been wired into the live page tables yet. (This is not
260 // possible for existing table entries, since we cannot revert the
261 // modifications we made to the subhierarchy it represents.)
263 FreePageTablesRecursive (TranslationTable
, Level
+ 1);
268 if ((*Entry
& TT_TYPE_MASK
) != TT_TYPE_TABLE_ENTRY
) {
269 EntryValue
= (UINTN
)TranslationTable
| TT_TYPE_TABLE_ENTRY
;
270 ReplaceTableEntry (Entry
, EntryValue
, RegionStart
,
271 (*Entry
& TT_TYPE_MASK
) == TT_TYPE_BLOCK_ENTRY
);
274 EntryValue
= (*Entry
& AttributeClearMask
) | AttributeSetMask
;
275 EntryValue
|= RegionStart
;
276 EntryValue
|= (Level
== 3) ? TT_TYPE_BLOCK_ENTRY_LEVEL3
277 : TT_TYPE_BLOCK_ENTRY
;
279 ReplaceTableEntry (Entry
, EntryValue
, RegionStart
, FALSE
);
287 LookupAddresstoRootTable (
288 IN UINT64 MaxAddress
,
290 OUT UINTN
*TableEntryCount
295 // Check the parameters are not NULL
296 ASSERT ((T0SZ
!= NULL
) && (TableEntryCount
!= NULL
));
298 // Look for the highest bit set in MaxAddress
299 for (TopBit
= 63; TopBit
!= 0; TopBit
--) {
300 if ((1ULL << TopBit
) & MaxAddress
) {
301 // MaxAddress top bit is found
306 ASSERT (TopBit
!= 0);
308 // Calculate T0SZ from the top bit of the MaxAddress
311 // Get the Table info from T0SZ
312 GetRootTranslationTableInfo (*T0SZ
, NULL
, TableEntryCount
);
317 UpdateRegionMapping (
318 IN UINT64 RegionStart
,
319 IN UINT64 RegionLength
,
320 IN UINT64 AttributeSetMask
,
321 IN UINT64 AttributeClearMask
324 UINTN RootTableLevel
;
327 if (((RegionStart
| RegionLength
) & EFI_PAGE_MASK
)) {
328 return EFI_INVALID_PARAMETER
;
331 T0SZ
= ArmGetTCR () & TCR_T0SZ_MASK
;
332 GetRootTranslationTableInfo (T0SZ
, &RootTableLevel
, NULL
);
334 return UpdateRegionMappingRecursive (RegionStart
, RegionStart
+ RegionLength
,
335 AttributeSetMask
, AttributeClearMask
, ArmGetTTBR0BaseAddress (),
341 FillTranslationTable (
342 IN UINT64
*RootTable
,
343 IN ARM_MEMORY_REGION_DESCRIPTOR
*MemoryRegion
346 return UpdateRegionMapping (
347 MemoryRegion
->VirtualBase
,
348 MemoryRegion
->Length
,
349 ArmMemoryAttributeToPageAttribute (MemoryRegion
->Attributes
) | TT_AF
,
356 GcdAttributeToPageAttribute (
357 IN UINT64 GcdAttributes
360 UINT64 PageAttributes
;
362 switch (GcdAttributes
& EFI_MEMORY_CACHETYPE_MASK
) {
364 PageAttributes
= TT_ATTR_INDX_DEVICE_MEMORY
;
367 PageAttributes
= TT_ATTR_INDX_MEMORY_NON_CACHEABLE
;
370 PageAttributes
= TT_ATTR_INDX_MEMORY_WRITE_THROUGH
| TT_SH_INNER_SHAREABLE
;
373 PageAttributes
= TT_ATTR_INDX_MEMORY_WRITE_BACK
| TT_SH_INNER_SHAREABLE
;
376 PageAttributes
= TT_ATTR_INDX_MASK
;
380 if ((GcdAttributes
& EFI_MEMORY_XP
) != 0 ||
381 (GcdAttributes
& EFI_MEMORY_CACHETYPE_MASK
) == EFI_MEMORY_UC
) {
382 if (ArmReadCurrentEL () == AARCH64_EL2
) {
383 PageAttributes
|= TT_XN_MASK
;
385 PageAttributes
|= TT_UXN_MASK
| TT_PXN_MASK
;
389 if ((GcdAttributes
& EFI_MEMORY_RO
) != 0) {
390 PageAttributes
|= TT_AP_RO_RO
;
393 return PageAttributes
| TT_AF
;
397 ArmSetMemoryAttributes (
398 IN EFI_PHYSICAL_ADDRESS BaseAddress
,
403 UINT64 PageAttributes
;
404 UINT64 PageAttributeMask
;
406 PageAttributes
= GcdAttributeToPageAttribute (Attributes
);
407 PageAttributeMask
= 0;
409 if ((Attributes
& EFI_MEMORY_CACHETYPE_MASK
) == 0) {
411 // No memory type was set in Attributes, so we are going to update the
414 PageAttributes
&= TT_AP_MASK
| TT_UXN_MASK
| TT_PXN_MASK
;
415 PageAttributeMask
= ~(TT_ADDRESS_MASK_BLOCK_ENTRY
| TT_AP_MASK
|
416 TT_PXN_MASK
| TT_XN_MASK
);
419 return UpdateRegionMapping (BaseAddress
, Length
, PageAttributes
,
425 SetMemoryRegionAttribute (
426 IN EFI_PHYSICAL_ADDRESS BaseAddress
,
428 IN UINT64 Attributes
,
429 IN UINT64 BlockEntryMask
432 return UpdateRegionMapping (BaseAddress
, Length
, Attributes
, BlockEntryMask
);
436 ArmSetMemoryRegionNoExec (
437 IN EFI_PHYSICAL_ADDRESS BaseAddress
,
443 if (ArmReadCurrentEL () == AARCH64_EL1
) {
444 Val
= TT_PXN_MASK
| TT_UXN_MASK
;
449 return SetMemoryRegionAttribute (
453 ~TT_ADDRESS_MASK_BLOCK_ENTRY
);
457 ArmClearMemoryRegionNoExec (
458 IN EFI_PHYSICAL_ADDRESS BaseAddress
,
464 // XN maps to UXN in the EL1&0 translation regime
465 Mask
= ~(TT_ADDRESS_MASK_BLOCK_ENTRY
| TT_PXN_MASK
| TT_XN_MASK
);
467 return SetMemoryRegionAttribute (
475 ArmSetMemoryRegionReadOnly (
476 IN EFI_PHYSICAL_ADDRESS BaseAddress
,
480 return SetMemoryRegionAttribute (
484 ~TT_ADDRESS_MASK_BLOCK_ENTRY
);
488 ArmClearMemoryRegionReadOnly (
489 IN EFI_PHYSICAL_ADDRESS BaseAddress
,
493 return SetMemoryRegionAttribute (
497 ~(TT_ADDRESS_MASK_BLOCK_ENTRY
| TT_AP_MASK
));
503 IN ARM_MEMORY_REGION_DESCRIPTOR
*MemoryTable
,
504 OUT VOID
**TranslationTableBase OPTIONAL
,
505 OUT UINTN
*TranslationTableSize OPTIONAL
508 VOID
* TranslationTable
;
511 UINTN RootTableEntryCount
;
515 if (MemoryTable
== NULL
) {
516 ASSERT (MemoryTable
!= NULL
);
517 return EFI_INVALID_PARAMETER
;
521 // Limit the virtual address space to what we can actually use: UEFI
522 // mandates a 1:1 mapping, so no point in making the virtual address
523 // space larger than the physical address space. We also have to take
524 // into account the architectural limitations that result from UEFI's
525 // use of 4 KB pages.
527 MaxAddress
= MIN (LShiftU64 (1ULL, ArmGetPhysicalAddressBits ()) - 1,
530 // Lookup the Table Level to get the information
531 LookupAddresstoRootTable (MaxAddress
, &T0SZ
, &RootTableEntryCount
);
534 // Set TCR that allows us to retrieve T0SZ in the subsequent functions
536 // Ideally we will be running at EL2, but should support EL1 as well.
537 // UEFI should not run at EL3.
538 if (ArmReadCurrentEL () == AARCH64_EL2
) {
539 //Note: Bits 23 and 31 are reserved(RES1) bits in TCR_EL2
540 TCR
= T0SZ
| (1UL << 31) | (1UL << 23) | TCR_TG0_4KB
;
542 // Set the Physical Address Size using MaxAddress
543 if (MaxAddress
< SIZE_4GB
) {
545 } else if (MaxAddress
< SIZE_64GB
) {
547 } else if (MaxAddress
< SIZE_1TB
) {
549 } else if (MaxAddress
< SIZE_4TB
) {
551 } else if (MaxAddress
< SIZE_16TB
) {
553 } else if (MaxAddress
< SIZE_256TB
) {
557 "ArmConfigureMmu: The MaxAddress 0x%lX is not supported by this MMU configuration.\n",
559 ASSERT (0); // Bigger than 48-bit memory space are not supported
560 return EFI_UNSUPPORTED
;
562 } else if (ArmReadCurrentEL () == AARCH64_EL1
) {
563 // Due to Cortex-A57 erratum #822227 we must set TG1[1] == 1, regardless of EPD1.
564 TCR
= T0SZ
| TCR_TG0_4KB
| TCR_TG1_4KB
| TCR_EPD1
;
566 // Set the Physical Address Size using MaxAddress
567 if (MaxAddress
< SIZE_4GB
) {
569 } else if (MaxAddress
< SIZE_64GB
) {
571 } else if (MaxAddress
< SIZE_1TB
) {
573 } else if (MaxAddress
< SIZE_4TB
) {
575 } else if (MaxAddress
< SIZE_16TB
) {
577 } else if (MaxAddress
< SIZE_256TB
) {
578 TCR
|= TCR_IPS_256TB
;
581 "ArmConfigureMmu: The MaxAddress 0x%lX is not supported by this MMU configuration.\n",
583 ASSERT (0); // Bigger than 48-bit memory space are not supported
584 return EFI_UNSUPPORTED
;
587 ASSERT (0); // UEFI is only expected to run at EL2 and EL1, not EL3.
588 return EFI_UNSUPPORTED
;
592 // Translation table walks are always cache coherent on ARMv8-A, so cache
593 // maintenance on page tables is never needed. Since there is a risk of
594 // loss of coherency when using mismatched attributes, and given that memory
595 // is mapped cacheable except for extraordinary cases (such as non-coherent
596 // DMA), have the page table walker perform cached accesses as well, and
597 // assert below that that matches the attributes we use for CPU accesses to
600 TCR
|= TCR_SH_INNER_SHAREABLE
|
601 TCR_RGN_OUTER_WRITE_BACK_ALLOC
|
602 TCR_RGN_INNER_WRITE_BACK_ALLOC
;
607 // Allocate pages for translation table
608 TranslationTable
= AllocatePages (1);
609 if (TranslationTable
== NULL
) {
610 return EFI_OUT_OF_RESOURCES
;
613 // We set TTBR0 just after allocating the table to retrieve its location from
614 // the subsequent functions without needing to pass this value across the
615 // functions. The MMU is only enabled after the translation tables are
618 ArmSetTTBR0 (TranslationTable
);
620 if (TranslationTableBase
!= NULL
) {
621 *TranslationTableBase
= TranslationTable
;
624 if (TranslationTableSize
!= NULL
) {
625 *TranslationTableSize
= RootTableEntryCount
* sizeof (UINT64
);
629 // Make sure we are not inadvertently hitting in the caches
630 // when populating the page tables.
632 InvalidateDataCacheRange (TranslationTable
,
633 RootTableEntryCount
* sizeof (UINT64
));
634 ZeroMem (TranslationTable
, RootTableEntryCount
* sizeof (UINT64
));
636 while (MemoryTable
->Length
!= 0) {
637 Status
= FillTranslationTable (TranslationTable
, MemoryTable
);
638 if (EFI_ERROR (Status
)) {
639 goto FreeTranslationTable
;
645 // EFI_MEMORY_UC ==> MAIR_ATTR_DEVICE_MEMORY
646 // EFI_MEMORY_WC ==> MAIR_ATTR_NORMAL_MEMORY_NON_CACHEABLE
647 // EFI_MEMORY_WT ==> MAIR_ATTR_NORMAL_MEMORY_WRITE_THROUGH
648 // EFI_MEMORY_WB ==> MAIR_ATTR_NORMAL_MEMORY_WRITE_BACK
651 MAIR_ATTR (TT_ATTR_INDX_DEVICE_MEMORY
, MAIR_ATTR_DEVICE_MEMORY
) |
652 MAIR_ATTR (TT_ATTR_INDX_MEMORY_NON_CACHEABLE
, MAIR_ATTR_NORMAL_MEMORY_NON_CACHEABLE
) |
653 MAIR_ATTR (TT_ATTR_INDX_MEMORY_WRITE_THROUGH
, MAIR_ATTR_NORMAL_MEMORY_WRITE_THROUGH
) |
654 MAIR_ATTR (TT_ATTR_INDX_MEMORY_WRITE_BACK
, MAIR_ATTR_NORMAL_MEMORY_WRITE_BACK
)
657 ArmDisableAlignmentCheck ();
658 ArmEnableStackAlignmentCheck ();
659 ArmEnableInstructionCache ();
660 ArmEnableDataCache ();
665 FreeTranslationTable
:
666 FreePages (TranslationTable
, 1);
672 ArmMmuBaseLibConstructor (
676 extern UINT32 ArmReplaceLiveTranslationEntrySize
;
679 // The ArmReplaceLiveTranslationEntry () helper function may be invoked
680 // with the MMU off so we have to ensure that it gets cleaned to the PoC
682 WriteBackDataCacheRange (ArmReplaceLiveTranslationEntry
,
683 ArmReplaceLiveTranslationEntrySize
);
685 return RETURN_SUCCESS
;