]> git.proxmox.com Git - mirror_edk2.git/blob - ArmPkg/Library/ArmMmuLib/AArch64/ArmMmuLibCore.c
ArmPkg/ArmMmuLib AARCH64: preserve attributes when replacing a table entry
[mirror_edk2.git] / ArmPkg / Library / ArmMmuLib / AArch64 / ArmMmuLibCore.c
1 /** @file
2 * File managing the MMU for ARMv8 architecture
3 *
4 * Copyright (c) 2011-2020, ARM Limited. All rights reserved.
5 * Copyright (c) 2016, Linaro Limited. All rights reserved.
6 * Copyright (c) 2017, Intel Corporation. All rights reserved.<BR>
7 *
8 * SPDX-License-Identifier: BSD-2-Clause-Patent
9 *
10 **/
11
12 #include <Uefi.h>
13 #include <Chipset/AArch64.h>
14 #include <Library/BaseMemoryLib.h>
15 #include <Library/CacheMaintenanceLib.h>
16 #include <Library/MemoryAllocationLib.h>
17 #include <Library/ArmLib.h>
18 #include <Library/ArmMmuLib.h>
19 #include <Library/BaseLib.h>
20 #include <Library/DebugLib.h>
21
22 // We use this index definition to define an invalid block entry
23 #define TT_ATTR_INDX_INVALID ((UINT32)~0)
24
25 STATIC
26 UINT64
27 ArmMemoryAttributeToPageAttribute (
28 IN ARM_MEMORY_REGION_ATTRIBUTES Attributes
29 )
30 {
31 switch (Attributes) {
32 case ARM_MEMORY_REGION_ATTRIBUTE_WRITE_BACK_NONSHAREABLE:
33 case ARM_MEMORY_REGION_ATTRIBUTE_NONSECURE_WRITE_BACK_NONSHAREABLE:
34 return TT_ATTR_INDX_MEMORY_WRITE_BACK;
35
36 case ARM_MEMORY_REGION_ATTRIBUTE_WRITE_BACK:
37 case ARM_MEMORY_REGION_ATTRIBUTE_NONSECURE_WRITE_BACK:
38 return TT_ATTR_INDX_MEMORY_WRITE_BACK | TT_SH_INNER_SHAREABLE;
39
40 case ARM_MEMORY_REGION_ATTRIBUTE_WRITE_THROUGH:
41 case ARM_MEMORY_REGION_ATTRIBUTE_NONSECURE_WRITE_THROUGH:
42 return TT_ATTR_INDX_MEMORY_WRITE_THROUGH | TT_SH_INNER_SHAREABLE;
43
44 // Uncached and device mappings are treated as outer shareable by default,
45 case ARM_MEMORY_REGION_ATTRIBUTE_UNCACHED_UNBUFFERED:
46 case ARM_MEMORY_REGION_ATTRIBUTE_NONSECURE_UNCACHED_UNBUFFERED:
47 return TT_ATTR_INDX_MEMORY_NON_CACHEABLE;
48
49 default:
50 ASSERT (0);
51 case ARM_MEMORY_REGION_ATTRIBUTE_DEVICE:
52 case ARM_MEMORY_REGION_ATTRIBUTE_NONSECURE_DEVICE:
53 if (ArmReadCurrentEL () == AARCH64_EL2)
54 return TT_ATTR_INDX_DEVICE_MEMORY | TT_XN_MASK;
55 else
56 return TT_ATTR_INDX_DEVICE_MEMORY | TT_UXN_MASK | TT_PXN_MASK;
57 }
58 }
59
60 UINT64
61 PageAttributeToGcdAttribute (
62 IN UINT64 PageAttributes
63 )
64 {
65 UINT64 GcdAttributes;
66
67 switch (PageAttributes & TT_ATTR_INDX_MASK) {
68 case TT_ATTR_INDX_DEVICE_MEMORY:
69 GcdAttributes = EFI_MEMORY_UC;
70 break;
71 case TT_ATTR_INDX_MEMORY_NON_CACHEABLE:
72 GcdAttributes = EFI_MEMORY_WC;
73 break;
74 case TT_ATTR_INDX_MEMORY_WRITE_THROUGH:
75 GcdAttributes = EFI_MEMORY_WT;
76 break;
77 case TT_ATTR_INDX_MEMORY_WRITE_BACK:
78 GcdAttributes = EFI_MEMORY_WB;
79 break;
80 default:
81 DEBUG ((DEBUG_ERROR,
82 "PageAttributeToGcdAttribute: PageAttributes:0x%lX not supported.\n",
83 PageAttributes));
84 ASSERT (0);
85 // The Global Coherency Domain (GCD) value is defined as a bit set.
86 // Returning 0 means no attribute has been set.
87 GcdAttributes = 0;
88 }
89
90 // Determine protection attributes
91 if (((PageAttributes & TT_AP_MASK) == TT_AP_NO_RO) ||
92 ((PageAttributes & TT_AP_MASK) == TT_AP_RO_RO)) {
93 // Read only cases map to write-protect
94 GcdAttributes |= EFI_MEMORY_RO;
95 }
96
97 // Process eXecute Never attribute
98 if ((PageAttributes & (TT_PXN_MASK | TT_UXN_MASK)) != 0) {
99 GcdAttributes |= EFI_MEMORY_XP;
100 }
101
102 return GcdAttributes;
103 }
104
105 #define MIN_T0SZ 16
106 #define BITS_PER_LEVEL 9
107
108 VOID
109 GetRootTranslationTableInfo (
110 IN UINTN T0SZ,
111 OUT UINTN *TableLevel,
112 OUT UINTN *TableEntryCount
113 )
114 {
115 // Get the level of the root table
116 if (TableLevel) {
117 *TableLevel = (T0SZ - MIN_T0SZ) / BITS_PER_LEVEL;
118 }
119
120 if (TableEntryCount) {
121 *TableEntryCount = 1UL << (BITS_PER_LEVEL - (T0SZ - MIN_T0SZ) % BITS_PER_LEVEL);
122 }
123 }
124
125 STATIC
126 VOID
127 ReplaceTableEntry (
128 IN UINT64 *Entry,
129 IN UINT64 Value,
130 IN UINT64 RegionStart,
131 IN BOOLEAN IsLiveBlockMapping
132 )
133 {
134 if (!ArmMmuEnabled () || !IsLiveBlockMapping) {
135 *Entry = Value;
136 ArmUpdateTranslationTableEntry (Entry, (VOID *)(UINTN)RegionStart);
137 } else {
138 ArmReplaceLiveTranslationEntry (Entry, Value, RegionStart);
139 }
140 }
141
142 STATIC
143 VOID
144 FreePageTablesRecursive (
145 IN UINT64 *TranslationTable,
146 IN UINTN Level
147 )
148 {
149 UINTN Index;
150
151 ASSERT (Level <= 3);
152
153 if (Level < 3) {
154 for (Index = 0; Index < TT_ENTRY_COUNT; Index++) {
155 if ((TranslationTable[Index] & TT_TYPE_MASK) == TT_TYPE_TABLE_ENTRY) {
156 FreePageTablesRecursive ((VOID *)(UINTN)(TranslationTable[Index] &
157 TT_ADDRESS_MASK_BLOCK_ENTRY),
158 Level + 1);
159 }
160 }
161 }
162 FreePages (TranslationTable, 1);
163 }
164
165 STATIC
166 BOOLEAN
167 IsBlockEntry (
168 IN UINT64 Entry,
169 IN UINTN Level
170 )
171 {
172 if (Level == 3) {
173 return (Entry & TT_TYPE_MASK) == TT_TYPE_BLOCK_ENTRY_LEVEL3;
174 }
175 return (Entry & TT_TYPE_MASK) == TT_TYPE_BLOCK_ENTRY;
176 }
177
178 STATIC
179 BOOLEAN
180 IsTableEntry (
181 IN UINT64 Entry,
182 IN UINTN Level
183 )
184 {
185 if (Level == 3) {
186 //
187 // TT_TYPE_TABLE_ENTRY aliases TT_TYPE_BLOCK_ENTRY_LEVEL3
188 // so we need to take the level into account as well.
189 //
190 return FALSE;
191 }
192 return (Entry & TT_TYPE_MASK) == TT_TYPE_TABLE_ENTRY;
193 }
194
195 STATIC
196 EFI_STATUS
197 UpdateRegionMappingRecursive (
198 IN UINT64 RegionStart,
199 IN UINT64 RegionEnd,
200 IN UINT64 AttributeSetMask,
201 IN UINT64 AttributeClearMask,
202 IN UINT64 *PageTable,
203 IN UINTN Level
204 )
205 {
206 UINTN BlockShift;
207 UINT64 BlockMask;
208 UINT64 BlockEnd;
209 UINT64 *Entry;
210 UINT64 EntryValue;
211 VOID *TranslationTable;
212 EFI_STATUS Status;
213
214 ASSERT (((RegionStart | RegionEnd) & EFI_PAGE_MASK) == 0);
215
216 BlockShift = (Level + 1) * BITS_PER_LEVEL + MIN_T0SZ;
217 BlockMask = MAX_UINT64 >> BlockShift;
218
219 DEBUG ((DEBUG_VERBOSE, "%a(%d): %llx - %llx set %lx clr %lx\n", __FUNCTION__,
220 Level, RegionStart, RegionEnd, AttributeSetMask, AttributeClearMask));
221
222 for (; RegionStart < RegionEnd; RegionStart = BlockEnd) {
223 BlockEnd = MIN (RegionEnd, (RegionStart | BlockMask) + 1);
224 Entry = &PageTable[(RegionStart >> (64 - BlockShift)) & (TT_ENTRY_COUNT - 1)];
225
226 //
227 // If RegionStart or BlockEnd is not aligned to the block size at this
228 // level, we will have to create a table mapping in order to map less
229 // than a block, and recurse to create the block or page entries at
230 // the next level. No block mappings are allowed at all at level 0,
231 // so in that case, we have to recurse unconditionally.
232 // If we are changing a table entry and the AttributeClearMask is non-zero,
233 // we cannot replace it with a block entry without potentially losing
234 // attribute information, so keep the table entry in that case.
235 //
236 if (Level == 0 || ((RegionStart | BlockEnd) & BlockMask) != 0 ||
237 (IsTableEntry (*Entry, Level) && AttributeClearMask != 0)) {
238 ASSERT (Level < 3);
239
240 if (!IsTableEntry (*Entry, Level)) {
241 //
242 // No table entry exists yet, so we need to allocate a page table
243 // for the next level.
244 //
245 TranslationTable = AllocatePages (1);
246 if (TranslationTable == NULL) {
247 return EFI_OUT_OF_RESOURCES;
248 }
249
250 if (!ArmMmuEnabled ()) {
251 //
252 // Make sure we are not inadvertently hitting in the caches
253 // when populating the page tables.
254 //
255 InvalidateDataCacheRange (TranslationTable, EFI_PAGE_SIZE);
256 }
257
258 ZeroMem (TranslationTable, EFI_PAGE_SIZE);
259
260 if (IsBlockEntry (*Entry, Level)) {
261 //
262 // We are splitting an existing block entry, so we have to populate
263 // the new table with the attributes of the block entry it replaces.
264 //
265 Status = UpdateRegionMappingRecursive (RegionStart & ~BlockMask,
266 (RegionStart | BlockMask) + 1, *Entry & TT_ATTRIBUTES_MASK,
267 0, TranslationTable, Level + 1);
268 if (EFI_ERROR (Status)) {
269 //
270 // The range we passed to UpdateRegionMappingRecursive () is block
271 // aligned, so it is guaranteed that no further pages were allocated
272 // by it, and so we only have to free the page we allocated here.
273 //
274 FreePages (TranslationTable, 1);
275 return Status;
276 }
277 }
278 } else {
279 TranslationTable = (VOID *)(UINTN)(*Entry & TT_ADDRESS_MASK_BLOCK_ENTRY);
280 }
281
282 //
283 // Recurse to the next level
284 //
285 Status = UpdateRegionMappingRecursive (RegionStart, BlockEnd,
286 AttributeSetMask, AttributeClearMask, TranslationTable,
287 Level + 1);
288 if (EFI_ERROR (Status)) {
289 if (!IsTableEntry (*Entry, Level)) {
290 //
291 // We are creating a new table entry, so on failure, we can free all
292 // allocations we made recursively, given that the whole subhierarchy
293 // has not been wired into the live page tables yet. (This is not
294 // possible for existing table entries, since we cannot revert the
295 // modifications we made to the subhierarchy it represents.)
296 //
297 FreePageTablesRecursive (TranslationTable, Level + 1);
298 }
299 return Status;
300 }
301
302 if (!IsTableEntry (*Entry, Level)) {
303 EntryValue = (UINTN)TranslationTable | TT_TYPE_TABLE_ENTRY;
304 ReplaceTableEntry (Entry, EntryValue, RegionStart,
305 IsBlockEntry (*Entry, Level));
306 }
307 } else {
308 EntryValue = (*Entry & AttributeClearMask) | AttributeSetMask;
309 EntryValue |= RegionStart;
310 EntryValue |= (Level == 3) ? TT_TYPE_BLOCK_ENTRY_LEVEL3
311 : TT_TYPE_BLOCK_ENTRY;
312
313 if (IsTableEntry (*Entry, Level)) {
314 //
315 // We are replacing a table entry with a block entry. This is only
316 // possible if we are keeping none of the original attributes.
317 // We can free the table entry's page table, and all the ones below
318 // it, since we are dropping the only possible reference to it.
319 //
320 ASSERT (AttributeClearMask == 0);
321 TranslationTable = (VOID *)(UINTN)(*Entry & TT_ADDRESS_MASK_BLOCK_ENTRY);
322 ReplaceTableEntry (Entry, EntryValue, RegionStart, TRUE);
323 FreePageTablesRecursive (TranslationTable, Level + 1);
324 } else {
325 ReplaceTableEntry (Entry, EntryValue, RegionStart, FALSE);
326 }
327 }
328 }
329 return EFI_SUCCESS;
330 }
331
332 STATIC
333 VOID
334 LookupAddresstoRootTable (
335 IN UINT64 MaxAddress,
336 OUT UINTN *T0SZ,
337 OUT UINTN *TableEntryCount
338 )
339 {
340 UINTN TopBit;
341
342 // Check the parameters are not NULL
343 ASSERT ((T0SZ != NULL) && (TableEntryCount != NULL));
344
345 // Look for the highest bit set in MaxAddress
346 for (TopBit = 63; TopBit != 0; TopBit--) {
347 if ((1ULL << TopBit) & MaxAddress) {
348 // MaxAddress top bit is found
349 TopBit = TopBit + 1;
350 break;
351 }
352 }
353 ASSERT (TopBit != 0);
354
355 // Calculate T0SZ from the top bit of the MaxAddress
356 *T0SZ = 64 - TopBit;
357
358 // Get the Table info from T0SZ
359 GetRootTranslationTableInfo (*T0SZ, NULL, TableEntryCount);
360 }
361
362 STATIC
363 EFI_STATUS
364 UpdateRegionMapping (
365 IN UINT64 RegionStart,
366 IN UINT64 RegionLength,
367 IN UINT64 AttributeSetMask,
368 IN UINT64 AttributeClearMask
369 )
370 {
371 UINTN RootTableLevel;
372 UINTN T0SZ;
373
374 if (((RegionStart | RegionLength) & EFI_PAGE_MASK)) {
375 return EFI_INVALID_PARAMETER;
376 }
377
378 T0SZ = ArmGetTCR () & TCR_T0SZ_MASK;
379 GetRootTranslationTableInfo (T0SZ, &RootTableLevel, NULL);
380
381 return UpdateRegionMappingRecursive (RegionStart, RegionStart + RegionLength,
382 AttributeSetMask, AttributeClearMask, ArmGetTTBR0BaseAddress (),
383 RootTableLevel);
384 }
385
386 STATIC
387 EFI_STATUS
388 FillTranslationTable (
389 IN UINT64 *RootTable,
390 IN ARM_MEMORY_REGION_DESCRIPTOR *MemoryRegion
391 )
392 {
393 return UpdateRegionMapping (
394 MemoryRegion->VirtualBase,
395 MemoryRegion->Length,
396 ArmMemoryAttributeToPageAttribute (MemoryRegion->Attributes) | TT_AF,
397 0
398 );
399 }
400
401 STATIC
402 UINT64
403 GcdAttributeToPageAttribute (
404 IN UINT64 GcdAttributes
405 )
406 {
407 UINT64 PageAttributes;
408
409 switch (GcdAttributes & EFI_MEMORY_CACHETYPE_MASK) {
410 case EFI_MEMORY_UC:
411 PageAttributes = TT_ATTR_INDX_DEVICE_MEMORY;
412 break;
413 case EFI_MEMORY_WC:
414 PageAttributes = TT_ATTR_INDX_MEMORY_NON_CACHEABLE;
415 break;
416 case EFI_MEMORY_WT:
417 PageAttributes = TT_ATTR_INDX_MEMORY_WRITE_THROUGH | TT_SH_INNER_SHAREABLE;
418 break;
419 case EFI_MEMORY_WB:
420 PageAttributes = TT_ATTR_INDX_MEMORY_WRITE_BACK | TT_SH_INNER_SHAREABLE;
421 break;
422 default:
423 PageAttributes = TT_ATTR_INDX_MASK;
424 break;
425 }
426
427 if ((GcdAttributes & EFI_MEMORY_XP) != 0 ||
428 (GcdAttributes & EFI_MEMORY_CACHETYPE_MASK) == EFI_MEMORY_UC) {
429 if (ArmReadCurrentEL () == AARCH64_EL2) {
430 PageAttributes |= TT_XN_MASK;
431 } else {
432 PageAttributes |= TT_UXN_MASK | TT_PXN_MASK;
433 }
434 }
435
436 if ((GcdAttributes & EFI_MEMORY_RO) != 0) {
437 PageAttributes |= TT_AP_RO_RO;
438 }
439
440 return PageAttributes | TT_AF;
441 }
442
443 EFI_STATUS
444 ArmSetMemoryAttributes (
445 IN EFI_PHYSICAL_ADDRESS BaseAddress,
446 IN UINT64 Length,
447 IN UINT64 Attributes
448 )
449 {
450 UINT64 PageAttributes;
451 UINT64 PageAttributeMask;
452
453 PageAttributes = GcdAttributeToPageAttribute (Attributes);
454 PageAttributeMask = 0;
455
456 if ((Attributes & EFI_MEMORY_CACHETYPE_MASK) == 0) {
457 //
458 // No memory type was set in Attributes, so we are going to update the
459 // permissions only.
460 //
461 PageAttributes &= TT_AP_MASK | TT_UXN_MASK | TT_PXN_MASK;
462 PageAttributeMask = ~(TT_ADDRESS_MASK_BLOCK_ENTRY | TT_AP_MASK |
463 TT_PXN_MASK | TT_XN_MASK);
464 }
465
466 return UpdateRegionMapping (BaseAddress, Length, PageAttributes,
467 PageAttributeMask);
468 }
469
470 STATIC
471 EFI_STATUS
472 SetMemoryRegionAttribute (
473 IN EFI_PHYSICAL_ADDRESS BaseAddress,
474 IN UINT64 Length,
475 IN UINT64 Attributes,
476 IN UINT64 BlockEntryMask
477 )
478 {
479 return UpdateRegionMapping (BaseAddress, Length, Attributes, BlockEntryMask);
480 }
481
482 EFI_STATUS
483 ArmSetMemoryRegionNoExec (
484 IN EFI_PHYSICAL_ADDRESS BaseAddress,
485 IN UINT64 Length
486 )
487 {
488 UINT64 Val;
489
490 if (ArmReadCurrentEL () == AARCH64_EL1) {
491 Val = TT_PXN_MASK | TT_UXN_MASK;
492 } else {
493 Val = TT_XN_MASK;
494 }
495
496 return SetMemoryRegionAttribute (
497 BaseAddress,
498 Length,
499 Val,
500 ~TT_ADDRESS_MASK_BLOCK_ENTRY);
501 }
502
503 EFI_STATUS
504 ArmClearMemoryRegionNoExec (
505 IN EFI_PHYSICAL_ADDRESS BaseAddress,
506 IN UINT64 Length
507 )
508 {
509 UINT64 Mask;
510
511 // XN maps to UXN in the EL1&0 translation regime
512 Mask = ~(TT_ADDRESS_MASK_BLOCK_ENTRY | TT_PXN_MASK | TT_XN_MASK);
513
514 return SetMemoryRegionAttribute (
515 BaseAddress,
516 Length,
517 0,
518 Mask);
519 }
520
521 EFI_STATUS
522 ArmSetMemoryRegionReadOnly (
523 IN EFI_PHYSICAL_ADDRESS BaseAddress,
524 IN UINT64 Length
525 )
526 {
527 return SetMemoryRegionAttribute (
528 BaseAddress,
529 Length,
530 TT_AP_RO_RO,
531 ~TT_ADDRESS_MASK_BLOCK_ENTRY);
532 }
533
534 EFI_STATUS
535 ArmClearMemoryRegionReadOnly (
536 IN EFI_PHYSICAL_ADDRESS BaseAddress,
537 IN UINT64 Length
538 )
539 {
540 return SetMemoryRegionAttribute (
541 BaseAddress,
542 Length,
543 TT_AP_RW_RW,
544 ~(TT_ADDRESS_MASK_BLOCK_ENTRY | TT_AP_MASK));
545 }
546
547 EFI_STATUS
548 EFIAPI
549 ArmConfigureMmu (
550 IN ARM_MEMORY_REGION_DESCRIPTOR *MemoryTable,
551 OUT VOID **TranslationTableBase OPTIONAL,
552 OUT UINTN *TranslationTableSize OPTIONAL
553 )
554 {
555 VOID* TranslationTable;
556 UINT64 MaxAddress;
557 UINTN T0SZ;
558 UINTN RootTableEntryCount;
559 UINT64 TCR;
560 EFI_STATUS Status;
561
562 if (MemoryTable == NULL) {
563 ASSERT (MemoryTable != NULL);
564 return EFI_INVALID_PARAMETER;
565 }
566
567 //
568 // Limit the virtual address space to what we can actually use: UEFI
569 // mandates a 1:1 mapping, so no point in making the virtual address
570 // space larger than the physical address space. We also have to take
571 // into account the architectural limitations that result from UEFI's
572 // use of 4 KB pages.
573 //
574 MaxAddress = MIN (LShiftU64 (1ULL, ArmGetPhysicalAddressBits ()) - 1,
575 MAX_ALLOC_ADDRESS);
576
577 // Lookup the Table Level to get the information
578 LookupAddresstoRootTable (MaxAddress, &T0SZ, &RootTableEntryCount);
579
580 //
581 // Set TCR that allows us to retrieve T0SZ in the subsequent functions
582 //
583 // Ideally we will be running at EL2, but should support EL1 as well.
584 // UEFI should not run at EL3.
585 if (ArmReadCurrentEL () == AARCH64_EL2) {
586 //Note: Bits 23 and 31 are reserved(RES1) bits in TCR_EL2
587 TCR = T0SZ | (1UL << 31) | (1UL << 23) | TCR_TG0_4KB;
588
589 // Set the Physical Address Size using MaxAddress
590 if (MaxAddress < SIZE_4GB) {
591 TCR |= TCR_PS_4GB;
592 } else if (MaxAddress < SIZE_64GB) {
593 TCR |= TCR_PS_64GB;
594 } else if (MaxAddress < SIZE_1TB) {
595 TCR |= TCR_PS_1TB;
596 } else if (MaxAddress < SIZE_4TB) {
597 TCR |= TCR_PS_4TB;
598 } else if (MaxAddress < SIZE_16TB) {
599 TCR |= TCR_PS_16TB;
600 } else if (MaxAddress < SIZE_256TB) {
601 TCR |= TCR_PS_256TB;
602 } else {
603 DEBUG ((DEBUG_ERROR,
604 "ArmConfigureMmu: The MaxAddress 0x%lX is not supported by this MMU configuration.\n",
605 MaxAddress));
606 ASSERT (0); // Bigger than 48-bit memory space are not supported
607 return EFI_UNSUPPORTED;
608 }
609 } else if (ArmReadCurrentEL () == AARCH64_EL1) {
610 // Due to Cortex-A57 erratum #822227 we must set TG1[1] == 1, regardless of EPD1.
611 TCR = T0SZ | TCR_TG0_4KB | TCR_TG1_4KB | TCR_EPD1;
612
613 // Set the Physical Address Size using MaxAddress
614 if (MaxAddress < SIZE_4GB) {
615 TCR |= TCR_IPS_4GB;
616 } else if (MaxAddress < SIZE_64GB) {
617 TCR |= TCR_IPS_64GB;
618 } else if (MaxAddress < SIZE_1TB) {
619 TCR |= TCR_IPS_1TB;
620 } else if (MaxAddress < SIZE_4TB) {
621 TCR |= TCR_IPS_4TB;
622 } else if (MaxAddress < SIZE_16TB) {
623 TCR |= TCR_IPS_16TB;
624 } else if (MaxAddress < SIZE_256TB) {
625 TCR |= TCR_IPS_256TB;
626 } else {
627 DEBUG ((DEBUG_ERROR,
628 "ArmConfigureMmu: The MaxAddress 0x%lX is not supported by this MMU configuration.\n",
629 MaxAddress));
630 ASSERT (0); // Bigger than 48-bit memory space are not supported
631 return EFI_UNSUPPORTED;
632 }
633 } else {
634 ASSERT (0); // UEFI is only expected to run at EL2 and EL1, not EL3.
635 return EFI_UNSUPPORTED;
636 }
637
638 //
639 // Translation table walks are always cache coherent on ARMv8-A, so cache
640 // maintenance on page tables is never needed. Since there is a risk of
641 // loss of coherency when using mismatched attributes, and given that memory
642 // is mapped cacheable except for extraordinary cases (such as non-coherent
643 // DMA), have the page table walker perform cached accesses as well, and
644 // assert below that that matches the attributes we use for CPU accesses to
645 // the region.
646 //
647 TCR |= TCR_SH_INNER_SHAREABLE |
648 TCR_RGN_OUTER_WRITE_BACK_ALLOC |
649 TCR_RGN_INNER_WRITE_BACK_ALLOC;
650
651 // Set TCR
652 ArmSetTCR (TCR);
653
654 // Allocate pages for translation table
655 TranslationTable = AllocatePages (1);
656 if (TranslationTable == NULL) {
657 return EFI_OUT_OF_RESOURCES;
658 }
659 //
660 // We set TTBR0 just after allocating the table to retrieve its location from
661 // the subsequent functions without needing to pass this value across the
662 // functions. The MMU is only enabled after the translation tables are
663 // populated.
664 //
665 ArmSetTTBR0 (TranslationTable);
666
667 if (TranslationTableBase != NULL) {
668 *TranslationTableBase = TranslationTable;
669 }
670
671 if (TranslationTableSize != NULL) {
672 *TranslationTableSize = RootTableEntryCount * sizeof (UINT64);
673 }
674
675 //
676 // Make sure we are not inadvertently hitting in the caches
677 // when populating the page tables.
678 //
679 InvalidateDataCacheRange (TranslationTable,
680 RootTableEntryCount * sizeof (UINT64));
681 ZeroMem (TranslationTable, RootTableEntryCount * sizeof (UINT64));
682
683 while (MemoryTable->Length != 0) {
684 Status = FillTranslationTable (TranslationTable, MemoryTable);
685 if (EFI_ERROR (Status)) {
686 goto FreeTranslationTable;
687 }
688 MemoryTable++;
689 }
690
691 //
692 // EFI_MEMORY_UC ==> MAIR_ATTR_DEVICE_MEMORY
693 // EFI_MEMORY_WC ==> MAIR_ATTR_NORMAL_MEMORY_NON_CACHEABLE
694 // EFI_MEMORY_WT ==> MAIR_ATTR_NORMAL_MEMORY_WRITE_THROUGH
695 // EFI_MEMORY_WB ==> MAIR_ATTR_NORMAL_MEMORY_WRITE_BACK
696 //
697 ArmSetMAIR (
698 MAIR_ATTR (TT_ATTR_INDX_DEVICE_MEMORY, MAIR_ATTR_DEVICE_MEMORY) |
699 MAIR_ATTR (TT_ATTR_INDX_MEMORY_NON_CACHEABLE, MAIR_ATTR_NORMAL_MEMORY_NON_CACHEABLE) |
700 MAIR_ATTR (TT_ATTR_INDX_MEMORY_WRITE_THROUGH, MAIR_ATTR_NORMAL_MEMORY_WRITE_THROUGH) |
701 MAIR_ATTR (TT_ATTR_INDX_MEMORY_WRITE_BACK, MAIR_ATTR_NORMAL_MEMORY_WRITE_BACK)
702 );
703
704 ArmDisableAlignmentCheck ();
705 ArmEnableStackAlignmentCheck ();
706 ArmEnableInstructionCache ();
707 ArmEnableDataCache ();
708
709 ArmEnableMmu ();
710 return EFI_SUCCESS;
711
712 FreeTranslationTable:
713 FreePages (TranslationTable, 1);
714 return Status;
715 }
716
717 RETURN_STATUS
718 EFIAPI
719 ArmMmuBaseLibConstructor (
720 VOID
721 )
722 {
723 extern UINT32 ArmReplaceLiveTranslationEntrySize;
724
725 //
726 // The ArmReplaceLiveTranslationEntry () helper function may be invoked
727 // with the MMU off so we have to ensure that it gets cleaned to the PoC
728 //
729 WriteBackDataCacheRange (ArmReplaceLiveTranslationEntry,
730 ArmReplaceLiveTranslationEntrySize);
731
732 return RETURN_SUCCESS;
733 }