]> git.proxmox.com Git - mirror_edk2.git/blob - ArmPkg/Library/ArmMmuLib/AArch64/ArmMmuLibCore.c
UefiCpuPkg: Move AsmRelocateApLoopStart from Mpfuncs.nasm to AmdSev.nasm
[mirror_edk2.git] / ArmPkg / Library / ArmMmuLib / AArch64 / ArmMmuLibCore.c
1 /** @file
2 * File managing the MMU for ARMv8 architecture
3 *
4 * Copyright (c) 2011-2020, ARM Limited. All rights reserved.
5 * Copyright (c) 2016, Linaro Limited. All rights reserved.
6 * Copyright (c) 2017, Intel Corporation. All rights reserved.<BR>
7 *
8 * SPDX-License-Identifier: BSD-2-Clause-Patent
9 *
10 **/
11
12 #include <Uefi.h>
13 #include <Pi/PiMultiPhase.h>
14 #include <Chipset/AArch64.h>
15 #include <Library/BaseMemoryLib.h>
16 #include <Library/CacheMaintenanceLib.h>
17 #include <Library/MemoryAllocationLib.h>
18 #include <Library/ArmLib.h>
19 #include <Library/ArmMmuLib.h>
20 #include <Library/BaseLib.h>
21 #include <Library/DebugLib.h>
22 #include <Library/HobLib.h>
23
24 STATIC
25 VOID (
26 EFIAPI *mReplaceLiveEntryFunc
27 )(
28 IN UINT64 *Entry,
29 IN UINT64 Value,
30 IN UINT64 RegionStart,
31 IN BOOLEAN DisableMmu
32 ) = ArmReplaceLiveTranslationEntry;
33
34 STATIC
35 UINT64
36 ArmMemoryAttributeToPageAttribute (
37 IN ARM_MEMORY_REGION_ATTRIBUTES Attributes
38 )
39 {
40 switch (Attributes) {
41 case ARM_MEMORY_REGION_ATTRIBUTE_WRITE_BACK_NONSHAREABLE:
42 case ARM_MEMORY_REGION_ATTRIBUTE_NONSECURE_WRITE_BACK_NONSHAREABLE:
43 return TT_ATTR_INDX_MEMORY_WRITE_BACK;
44
45 case ARM_MEMORY_REGION_ATTRIBUTE_WRITE_BACK:
46 case ARM_MEMORY_REGION_ATTRIBUTE_NONSECURE_WRITE_BACK:
47 return TT_ATTR_INDX_MEMORY_WRITE_BACK | TT_SH_INNER_SHAREABLE;
48
49 case ARM_MEMORY_REGION_ATTRIBUTE_WRITE_THROUGH:
50 case ARM_MEMORY_REGION_ATTRIBUTE_NONSECURE_WRITE_THROUGH:
51 return TT_ATTR_INDX_MEMORY_WRITE_THROUGH | TT_SH_INNER_SHAREABLE;
52
53 // Uncached and device mappings are treated as outer shareable by default,
54 case ARM_MEMORY_REGION_ATTRIBUTE_UNCACHED_UNBUFFERED:
55 case ARM_MEMORY_REGION_ATTRIBUTE_NONSECURE_UNCACHED_UNBUFFERED:
56 return TT_ATTR_INDX_MEMORY_NON_CACHEABLE;
57
58 default:
59 ASSERT (0);
60 case ARM_MEMORY_REGION_ATTRIBUTE_DEVICE:
61 case ARM_MEMORY_REGION_ATTRIBUTE_NONSECURE_DEVICE:
62 if (ArmReadCurrentEL () == AARCH64_EL2) {
63 return TT_ATTR_INDX_DEVICE_MEMORY | TT_XN_MASK;
64 } else {
65 return TT_ATTR_INDX_DEVICE_MEMORY | TT_UXN_MASK | TT_PXN_MASK;
66 }
67 }
68 }
69
70 #define MIN_T0SZ 16
71 #define BITS_PER_LEVEL 9
72 #define MAX_VA_BITS 48
73
74 STATIC
75 UINTN
76 GetRootTableEntryCount (
77 IN UINTN T0SZ
78 )
79 {
80 return TT_ENTRY_COUNT >> (T0SZ - MIN_T0SZ) % BITS_PER_LEVEL;
81 }
82
83 STATIC
84 UINTN
85 GetRootTableLevel (
86 IN UINTN T0SZ
87 )
88 {
89 return (T0SZ - MIN_T0SZ) / BITS_PER_LEVEL;
90 }
91
92 STATIC
93 VOID
94 ReplaceTableEntry (
95 IN UINT64 *Entry,
96 IN UINT64 Value,
97 IN UINT64 RegionStart,
98 IN UINT64 BlockMask,
99 IN BOOLEAN IsLiveBlockMapping
100 )
101 {
102 BOOLEAN DisableMmu;
103
104 //
105 // Replacing a live block entry with a table entry (or vice versa) requires a
106 // break-before-make sequence as per the architecture. This means the mapping
107 // must be made invalid and cleaned from the TLBs first, and this is a bit of
108 // a hassle if the mapping in question covers the code that is actually doing
109 // the mapping and the unmapping, and so we only bother with this if actually
110 // necessary.
111 //
112
113 if (!IsLiveBlockMapping || !ArmMmuEnabled ()) {
114 // If the mapping is not a live block mapping, or the MMU is not on yet, we
115 // can simply overwrite the entry.
116 *Entry = Value;
117 ArmUpdateTranslationTableEntry (Entry, (VOID *)(UINTN)RegionStart);
118 } else {
119 // If the mapping in question does not cover the code that updates the
120 // entry in memory, or the entry that we are intending to update, we can
121 // use an ordinary break before make. Otherwise, we will need to
122 // temporarily disable the MMU.
123 DisableMmu = FALSE;
124 if ((((RegionStart ^ (UINTN)mReplaceLiveEntryFunc) & ~BlockMask) == 0) ||
125 (((RegionStart ^ (UINTN)Entry) & ~BlockMask) == 0))
126 {
127 DisableMmu = TRUE;
128 DEBUG ((DEBUG_WARN, "%a: splitting block entry with MMU disabled\n", __FUNCTION__));
129 }
130
131 mReplaceLiveEntryFunc (Entry, Value, RegionStart, DisableMmu);
132 }
133 }
134
135 STATIC
136 VOID
137 FreePageTablesRecursive (
138 IN UINT64 *TranslationTable,
139 IN UINTN Level
140 )
141 {
142 UINTN Index;
143
144 ASSERT (Level <= 3);
145
146 if (Level < 3) {
147 for (Index = 0; Index < TT_ENTRY_COUNT; Index++) {
148 if ((TranslationTable[Index] & TT_TYPE_MASK) == TT_TYPE_TABLE_ENTRY) {
149 FreePageTablesRecursive (
150 (VOID *)(UINTN)(TranslationTable[Index] &
151 TT_ADDRESS_MASK_BLOCK_ENTRY),
152 Level + 1
153 );
154 }
155 }
156 }
157
158 FreePages (TranslationTable, 1);
159 }
160
161 STATIC
162 BOOLEAN
163 IsBlockEntry (
164 IN UINT64 Entry,
165 IN UINTN Level
166 )
167 {
168 if (Level == 3) {
169 return (Entry & TT_TYPE_MASK) == TT_TYPE_BLOCK_ENTRY_LEVEL3;
170 }
171
172 return (Entry & TT_TYPE_MASK) == TT_TYPE_BLOCK_ENTRY;
173 }
174
175 STATIC
176 BOOLEAN
177 IsTableEntry (
178 IN UINT64 Entry,
179 IN UINTN Level
180 )
181 {
182 if (Level == 3) {
183 //
184 // TT_TYPE_TABLE_ENTRY aliases TT_TYPE_BLOCK_ENTRY_LEVEL3
185 // so we need to take the level into account as well.
186 //
187 return FALSE;
188 }
189
190 return (Entry & TT_TYPE_MASK) == TT_TYPE_TABLE_ENTRY;
191 }
192
193 STATIC
194 EFI_STATUS
195 UpdateRegionMappingRecursive (
196 IN UINT64 RegionStart,
197 IN UINT64 RegionEnd,
198 IN UINT64 AttributeSetMask,
199 IN UINT64 AttributeClearMask,
200 IN UINT64 *PageTable,
201 IN UINTN Level,
202 IN BOOLEAN TableIsLive
203 )
204 {
205 UINTN BlockShift;
206 UINT64 BlockMask;
207 UINT64 BlockEnd;
208 UINT64 *Entry;
209 UINT64 EntryValue;
210 VOID *TranslationTable;
211 EFI_STATUS Status;
212 BOOLEAN NextTableIsLive;
213
214 ASSERT (((RegionStart | RegionEnd) & EFI_PAGE_MASK) == 0);
215
216 BlockShift = (Level + 1) * BITS_PER_LEVEL + MIN_T0SZ;
217 BlockMask = MAX_UINT64 >> BlockShift;
218
219 DEBUG ((
220 DEBUG_VERBOSE,
221 "%a(%d): %llx - %llx set %lx clr %lx\n",
222 __FUNCTION__,
223 Level,
224 RegionStart,
225 RegionEnd,
226 AttributeSetMask,
227 AttributeClearMask
228 ));
229
230 for ( ; RegionStart < RegionEnd; RegionStart = BlockEnd) {
231 BlockEnd = MIN (RegionEnd, (RegionStart | BlockMask) + 1);
232 Entry = &PageTable[(RegionStart >> (64 - BlockShift)) & (TT_ENTRY_COUNT - 1)];
233
234 //
235 // If RegionStart or BlockEnd is not aligned to the block size at this
236 // level, we will have to create a table mapping in order to map less
237 // than a block, and recurse to create the block or page entries at
238 // the next level. No block mappings are allowed at all at level 0,
239 // so in that case, we have to recurse unconditionally.
240 //
241 // One special case to take into account is any region that covers the page
242 // table itself: if we'd cover such a region with block mappings, we are
243 // more likely to end up in the situation later where we need to disable
244 // the MMU in order to update page table entries safely, so prefer page
245 // mappings in that particular case.
246 //
247 if ((Level == 0) || (((RegionStart | BlockEnd) & BlockMask) != 0) ||
248 ((Level < 3) && (((UINT64)PageTable & ~BlockMask) == RegionStart)) ||
249 IsTableEntry (*Entry, Level))
250 {
251 ASSERT (Level < 3);
252
253 if (!IsTableEntry (*Entry, Level)) {
254 //
255 // No table entry exists yet, so we need to allocate a page table
256 // for the next level.
257 //
258 TranslationTable = AllocatePages (1);
259 if (TranslationTable == NULL) {
260 return EFI_OUT_OF_RESOURCES;
261 }
262
263 if (!ArmMmuEnabled ()) {
264 //
265 // Make sure we are not inadvertently hitting in the caches
266 // when populating the page tables.
267 //
268 InvalidateDataCacheRange (TranslationTable, EFI_PAGE_SIZE);
269 }
270
271 ZeroMem (TranslationTable, EFI_PAGE_SIZE);
272
273 if (IsBlockEntry (*Entry, Level)) {
274 //
275 // We are splitting an existing block entry, so we have to populate
276 // the new table with the attributes of the block entry it replaces.
277 //
278 Status = UpdateRegionMappingRecursive (
279 RegionStart & ~BlockMask,
280 (RegionStart | BlockMask) + 1,
281 *Entry & TT_ATTRIBUTES_MASK,
282 0,
283 TranslationTable,
284 Level + 1,
285 FALSE
286 );
287 if (EFI_ERROR (Status)) {
288 //
289 // The range we passed to UpdateRegionMappingRecursive () is block
290 // aligned, so it is guaranteed that no further pages were allocated
291 // by it, and so we only have to free the page we allocated here.
292 //
293 FreePages (TranslationTable, 1);
294 return Status;
295 }
296 }
297
298 NextTableIsLive = FALSE;
299 } else {
300 TranslationTable = (VOID *)(UINTN)(*Entry & TT_ADDRESS_MASK_BLOCK_ENTRY);
301 NextTableIsLive = TableIsLive;
302 }
303
304 //
305 // Recurse to the next level
306 //
307 Status = UpdateRegionMappingRecursive (
308 RegionStart,
309 BlockEnd,
310 AttributeSetMask,
311 AttributeClearMask,
312 TranslationTable,
313 Level + 1,
314 NextTableIsLive
315 );
316 if (EFI_ERROR (Status)) {
317 if (!IsTableEntry (*Entry, Level)) {
318 //
319 // We are creating a new table entry, so on failure, we can free all
320 // allocations we made recursively, given that the whole subhierarchy
321 // has not been wired into the live page tables yet. (This is not
322 // possible for existing table entries, since we cannot revert the
323 // modifications we made to the subhierarchy it represents.)
324 //
325 FreePageTablesRecursive (TranslationTable, Level + 1);
326 }
327
328 return Status;
329 }
330
331 if (!IsTableEntry (*Entry, Level)) {
332 EntryValue = (UINTN)TranslationTable | TT_TYPE_TABLE_ENTRY;
333 ReplaceTableEntry (
334 Entry,
335 EntryValue,
336 RegionStart,
337 BlockMask,
338 TableIsLive && IsBlockEntry (*Entry, Level)
339 );
340 }
341 } else {
342 EntryValue = (*Entry & AttributeClearMask) | AttributeSetMask;
343 EntryValue |= RegionStart;
344 EntryValue |= (Level == 3) ? TT_TYPE_BLOCK_ENTRY_LEVEL3
345 : TT_TYPE_BLOCK_ENTRY;
346
347 ReplaceTableEntry (Entry, EntryValue, RegionStart, BlockMask, FALSE);
348 }
349 }
350
351 return EFI_SUCCESS;
352 }
353
354 STATIC
355 EFI_STATUS
356 UpdateRegionMapping (
357 IN UINT64 RegionStart,
358 IN UINT64 RegionLength,
359 IN UINT64 AttributeSetMask,
360 IN UINT64 AttributeClearMask,
361 IN UINT64 *RootTable,
362 IN BOOLEAN TableIsLive
363 )
364 {
365 UINTN T0SZ;
366
367 if (((RegionStart | RegionLength) & EFI_PAGE_MASK) != 0) {
368 return EFI_INVALID_PARAMETER;
369 }
370
371 T0SZ = ArmGetTCR () & TCR_T0SZ_MASK;
372
373 return UpdateRegionMappingRecursive (
374 RegionStart,
375 RegionStart + RegionLength,
376 AttributeSetMask,
377 AttributeClearMask,
378 RootTable,
379 GetRootTableLevel (T0SZ),
380 TableIsLive
381 );
382 }
383
384 STATIC
385 EFI_STATUS
386 FillTranslationTable (
387 IN UINT64 *RootTable,
388 IN ARM_MEMORY_REGION_DESCRIPTOR *MemoryRegion
389 )
390 {
391 return UpdateRegionMapping (
392 MemoryRegion->VirtualBase,
393 MemoryRegion->Length,
394 ArmMemoryAttributeToPageAttribute (MemoryRegion->Attributes) | TT_AF,
395 0,
396 RootTable,
397 FALSE
398 );
399 }
400
401 STATIC
402 UINT64
403 GcdAttributeToPageAttribute (
404 IN UINT64 GcdAttributes
405 )
406 {
407 UINT64 PageAttributes;
408
409 switch (GcdAttributes & EFI_MEMORY_CACHETYPE_MASK) {
410 case EFI_MEMORY_UC:
411 PageAttributes = TT_ATTR_INDX_DEVICE_MEMORY;
412 break;
413 case EFI_MEMORY_WC:
414 PageAttributes = TT_ATTR_INDX_MEMORY_NON_CACHEABLE;
415 break;
416 case EFI_MEMORY_WT:
417 PageAttributes = TT_ATTR_INDX_MEMORY_WRITE_THROUGH | TT_SH_INNER_SHAREABLE;
418 break;
419 case EFI_MEMORY_WB:
420 PageAttributes = TT_ATTR_INDX_MEMORY_WRITE_BACK | TT_SH_INNER_SHAREABLE;
421 break;
422 default:
423 PageAttributes = TT_ATTR_INDX_MASK;
424 break;
425 }
426
427 if (((GcdAttributes & EFI_MEMORY_XP) != 0) ||
428 ((GcdAttributes & EFI_MEMORY_CACHETYPE_MASK) == EFI_MEMORY_UC))
429 {
430 if (ArmReadCurrentEL () == AARCH64_EL2) {
431 PageAttributes |= TT_XN_MASK;
432 } else {
433 PageAttributes |= TT_UXN_MASK | TT_PXN_MASK;
434 }
435 }
436
437 if ((GcdAttributes & EFI_MEMORY_RO) != 0) {
438 PageAttributes |= TT_AP_NO_RO;
439 }
440
441 return PageAttributes | TT_AF;
442 }
443
444 EFI_STATUS
445 ArmSetMemoryAttributes (
446 IN EFI_PHYSICAL_ADDRESS BaseAddress,
447 IN UINT64 Length,
448 IN UINT64 Attributes
449 )
450 {
451 UINT64 PageAttributes;
452 UINT64 PageAttributeMask;
453
454 PageAttributes = GcdAttributeToPageAttribute (Attributes);
455 PageAttributeMask = 0;
456
457 if ((Attributes & EFI_MEMORY_CACHETYPE_MASK) == 0) {
458 //
459 // No memory type was set in Attributes, so we are going to update the
460 // permissions only.
461 //
462 PageAttributes &= TT_AP_MASK | TT_UXN_MASK | TT_PXN_MASK;
463 PageAttributeMask = ~(TT_ADDRESS_MASK_BLOCK_ENTRY | TT_AP_MASK |
464 TT_PXN_MASK | TT_XN_MASK);
465 }
466
467 return UpdateRegionMapping (
468 BaseAddress,
469 Length,
470 PageAttributes,
471 PageAttributeMask,
472 ArmGetTTBR0BaseAddress (),
473 TRUE
474 );
475 }
476
477 STATIC
478 EFI_STATUS
479 SetMemoryRegionAttribute (
480 IN EFI_PHYSICAL_ADDRESS BaseAddress,
481 IN UINT64 Length,
482 IN UINT64 Attributes,
483 IN UINT64 BlockEntryMask
484 )
485 {
486 return UpdateRegionMapping (
487 BaseAddress,
488 Length,
489 Attributes,
490 BlockEntryMask,
491 ArmGetTTBR0BaseAddress (),
492 TRUE
493 );
494 }
495
496 EFI_STATUS
497 ArmSetMemoryRegionNoExec (
498 IN EFI_PHYSICAL_ADDRESS BaseAddress,
499 IN UINT64 Length
500 )
501 {
502 UINT64 Val;
503
504 if (ArmReadCurrentEL () == AARCH64_EL1) {
505 Val = TT_PXN_MASK | TT_UXN_MASK;
506 } else {
507 Val = TT_XN_MASK;
508 }
509
510 return SetMemoryRegionAttribute (
511 BaseAddress,
512 Length,
513 Val,
514 ~TT_ADDRESS_MASK_BLOCK_ENTRY
515 );
516 }
517
518 EFI_STATUS
519 ArmClearMemoryRegionNoExec (
520 IN EFI_PHYSICAL_ADDRESS BaseAddress,
521 IN UINT64 Length
522 )
523 {
524 UINT64 Mask;
525
526 // XN maps to UXN in the EL1&0 translation regime
527 Mask = ~(TT_ADDRESS_MASK_BLOCK_ENTRY | TT_PXN_MASK | TT_XN_MASK);
528
529 return SetMemoryRegionAttribute (
530 BaseAddress,
531 Length,
532 0,
533 Mask
534 );
535 }
536
537 EFI_STATUS
538 ArmSetMemoryRegionReadOnly (
539 IN EFI_PHYSICAL_ADDRESS BaseAddress,
540 IN UINT64 Length
541 )
542 {
543 return SetMemoryRegionAttribute (
544 BaseAddress,
545 Length,
546 TT_AP_NO_RO,
547 ~TT_ADDRESS_MASK_BLOCK_ENTRY
548 );
549 }
550
551 EFI_STATUS
552 ArmClearMemoryRegionReadOnly (
553 IN EFI_PHYSICAL_ADDRESS BaseAddress,
554 IN UINT64 Length
555 )
556 {
557 return SetMemoryRegionAttribute (
558 BaseAddress,
559 Length,
560 TT_AP_NO_RW,
561 ~(TT_ADDRESS_MASK_BLOCK_ENTRY | TT_AP_MASK)
562 );
563 }
564
565 EFI_STATUS
566 EFIAPI
567 ArmConfigureMmu (
568 IN ARM_MEMORY_REGION_DESCRIPTOR *MemoryTable,
569 OUT VOID **TranslationTableBase OPTIONAL,
570 OUT UINTN *TranslationTableSize OPTIONAL
571 )
572 {
573 VOID *TranslationTable;
574 UINTN MaxAddressBits;
575 UINT64 MaxAddress;
576 UINTN T0SZ;
577 UINTN RootTableEntryCount;
578 UINT64 TCR;
579 EFI_STATUS Status;
580
581 if (MemoryTable == NULL) {
582 ASSERT (MemoryTable != NULL);
583 return EFI_INVALID_PARAMETER;
584 }
585
586 //
587 // Limit the virtual address space to what we can actually use: UEFI
588 // mandates a 1:1 mapping, so no point in making the virtual address
589 // space larger than the physical address space. We also have to take
590 // into account the architectural limitations that result from UEFI's
591 // use of 4 KB pages.
592 //
593 MaxAddressBits = MIN (ArmGetPhysicalAddressBits (), MAX_VA_BITS);
594 MaxAddress = LShiftU64 (1ULL, MaxAddressBits) - 1;
595
596 T0SZ = 64 - MaxAddressBits;
597 RootTableEntryCount = GetRootTableEntryCount (T0SZ);
598
599 //
600 // Set TCR that allows us to retrieve T0SZ in the subsequent functions
601 //
602 // Ideally we will be running at EL2, but should support EL1 as well.
603 // UEFI should not run at EL3.
604 if (ArmReadCurrentEL () == AARCH64_EL2) {
605 // Note: Bits 23 and 31 are reserved(RES1) bits in TCR_EL2
606 TCR = T0SZ | (1UL << 31) | (1UL << 23) | TCR_TG0_4KB;
607
608 // Set the Physical Address Size using MaxAddress
609 if (MaxAddress < SIZE_4GB) {
610 TCR |= TCR_PS_4GB;
611 } else if (MaxAddress < SIZE_64GB) {
612 TCR |= TCR_PS_64GB;
613 } else if (MaxAddress < SIZE_1TB) {
614 TCR |= TCR_PS_1TB;
615 } else if (MaxAddress < SIZE_4TB) {
616 TCR |= TCR_PS_4TB;
617 } else if (MaxAddress < SIZE_16TB) {
618 TCR |= TCR_PS_16TB;
619 } else if (MaxAddress < SIZE_256TB) {
620 TCR |= TCR_PS_256TB;
621 } else {
622 DEBUG ((
623 DEBUG_ERROR,
624 "ArmConfigureMmu: The MaxAddress 0x%lX is not supported by this MMU configuration.\n",
625 MaxAddress
626 ));
627 ASSERT (0); // Bigger than 48-bit memory space are not supported
628 return EFI_UNSUPPORTED;
629 }
630 } else if (ArmReadCurrentEL () == AARCH64_EL1) {
631 // Due to Cortex-A57 erratum #822227 we must set TG1[1] == 1, regardless of EPD1.
632 TCR = T0SZ | TCR_TG0_4KB | TCR_TG1_4KB | TCR_EPD1;
633
634 // Set the Physical Address Size using MaxAddress
635 if (MaxAddress < SIZE_4GB) {
636 TCR |= TCR_IPS_4GB;
637 } else if (MaxAddress < SIZE_64GB) {
638 TCR |= TCR_IPS_64GB;
639 } else if (MaxAddress < SIZE_1TB) {
640 TCR |= TCR_IPS_1TB;
641 } else if (MaxAddress < SIZE_4TB) {
642 TCR |= TCR_IPS_4TB;
643 } else if (MaxAddress < SIZE_16TB) {
644 TCR |= TCR_IPS_16TB;
645 } else if (MaxAddress < SIZE_256TB) {
646 TCR |= TCR_IPS_256TB;
647 } else {
648 DEBUG ((
649 DEBUG_ERROR,
650 "ArmConfigureMmu: The MaxAddress 0x%lX is not supported by this MMU configuration.\n",
651 MaxAddress
652 ));
653 ASSERT (0); // Bigger than 48-bit memory space are not supported
654 return EFI_UNSUPPORTED;
655 }
656 } else {
657 ASSERT (0); // UEFI is only expected to run at EL2 and EL1, not EL3.
658 return EFI_UNSUPPORTED;
659 }
660
661 //
662 // Translation table walks are always cache coherent on ARMv8-A, so cache
663 // maintenance on page tables is never needed. Since there is a risk of
664 // loss of coherency when using mismatched attributes, and given that memory
665 // is mapped cacheable except for extraordinary cases (such as non-coherent
666 // DMA), have the page table walker perform cached accesses as well, and
667 // assert below that matches the attributes we use for CPU accesses to
668 // the region.
669 //
670 TCR |= TCR_SH_INNER_SHAREABLE |
671 TCR_RGN_OUTER_WRITE_BACK_ALLOC |
672 TCR_RGN_INNER_WRITE_BACK_ALLOC;
673
674 // Set TCR
675 ArmSetTCR (TCR);
676
677 // Allocate pages for translation table
678 TranslationTable = AllocatePages (1);
679 if (TranslationTable == NULL) {
680 return EFI_OUT_OF_RESOURCES;
681 }
682
683 if (TranslationTableBase != NULL) {
684 *TranslationTableBase = TranslationTable;
685 }
686
687 if (TranslationTableSize != NULL) {
688 *TranslationTableSize = RootTableEntryCount * sizeof (UINT64);
689 }
690
691 if (!ArmMmuEnabled ()) {
692 //
693 // Make sure we are not inadvertently hitting in the caches
694 // when populating the page tables.
695 //
696 InvalidateDataCacheRange (
697 TranslationTable,
698 RootTableEntryCount * sizeof (UINT64)
699 );
700 }
701
702 ZeroMem (TranslationTable, RootTableEntryCount * sizeof (UINT64));
703
704 while (MemoryTable->Length != 0) {
705 Status = FillTranslationTable (TranslationTable, MemoryTable);
706 if (EFI_ERROR (Status)) {
707 goto FreeTranslationTable;
708 }
709
710 MemoryTable++;
711 }
712
713 //
714 // EFI_MEMORY_UC ==> MAIR_ATTR_DEVICE_MEMORY
715 // EFI_MEMORY_WC ==> MAIR_ATTR_NORMAL_MEMORY_NON_CACHEABLE
716 // EFI_MEMORY_WT ==> MAIR_ATTR_NORMAL_MEMORY_WRITE_THROUGH
717 // EFI_MEMORY_WB ==> MAIR_ATTR_NORMAL_MEMORY_WRITE_BACK
718 //
719 ArmSetMAIR (
720 MAIR_ATTR (TT_ATTR_INDX_DEVICE_MEMORY, MAIR_ATTR_DEVICE_MEMORY) |
721 MAIR_ATTR (TT_ATTR_INDX_MEMORY_NON_CACHEABLE, MAIR_ATTR_NORMAL_MEMORY_NON_CACHEABLE) |
722 MAIR_ATTR (TT_ATTR_INDX_MEMORY_WRITE_THROUGH, MAIR_ATTR_NORMAL_MEMORY_WRITE_THROUGH) |
723 MAIR_ATTR (TT_ATTR_INDX_MEMORY_WRITE_BACK, MAIR_ATTR_NORMAL_MEMORY_WRITE_BACK)
724 );
725
726 ArmSetTTBR0 (TranslationTable);
727
728 if (!ArmMmuEnabled ()) {
729 ArmDisableAlignmentCheck ();
730 ArmEnableStackAlignmentCheck ();
731 ArmEnableInstructionCache ();
732 ArmEnableDataCache ();
733
734 ArmEnableMmu ();
735 }
736
737 return EFI_SUCCESS;
738
739 FreeTranslationTable:
740 FreePages (TranslationTable, 1);
741 return Status;
742 }
743
744 RETURN_STATUS
745 EFIAPI
746 ArmMmuBaseLibConstructor (
747 VOID
748 )
749 {
750 extern UINT32 ArmReplaceLiveTranslationEntrySize;
751 VOID *Hob;
752
753 Hob = GetFirstGuidHob (&gArmMmuReplaceLiveTranslationEntryFuncGuid);
754 if (Hob != NULL) {
755 mReplaceLiveEntryFunc = *(VOID **)GET_GUID_HOB_DATA (Hob);
756 } else {
757 //
758 // The ArmReplaceLiveTranslationEntry () helper function may be invoked
759 // with the MMU off so we have to ensure that it gets cleaned to the PoC
760 //
761 WriteBackDataCacheRange (
762 (VOID *)(UINTN)ArmReplaceLiveTranslationEntry,
763 ArmReplaceLiveTranslationEntrySize
764 );
765 }
766
767 return RETURN_SUCCESS;
768 }