]> git.proxmox.com Git - mirror_edk2.git/blob - ArmPkg/Library/ArmMmuLib/AArch64/ArmMmuLibCore.c
ArmPkg/ArmMmuLib AARCH64: avoid EL0 accessible mappings
[mirror_edk2.git] / ArmPkg / Library / ArmMmuLib / AArch64 / ArmMmuLibCore.c
1 /** @file
2 * File managing the MMU for ARMv8 architecture
3 *
4 * Copyright (c) 2011-2020, ARM Limited. All rights reserved.
5 * Copyright (c) 2016, Linaro Limited. All rights reserved.
6 * Copyright (c) 2017, Intel Corporation. All rights reserved.<BR>
7 *
8 * SPDX-License-Identifier: BSD-2-Clause-Patent
9 *
10 **/
11
12 #include <Uefi.h>
13 #include <Chipset/AArch64.h>
14 #include <Library/BaseMemoryLib.h>
15 #include <Library/CacheMaintenanceLib.h>
16 #include <Library/MemoryAllocationLib.h>
17 #include <Library/ArmLib.h>
18 #include <Library/ArmMmuLib.h>
19 #include <Library/BaseLib.h>
20 #include <Library/DebugLib.h>
21
22 STATIC
23 UINT64
24 ArmMemoryAttributeToPageAttribute (
25 IN ARM_MEMORY_REGION_ATTRIBUTES Attributes
26 )
27 {
28 switch (Attributes) {
29 case ARM_MEMORY_REGION_ATTRIBUTE_WRITE_BACK_NONSHAREABLE:
30 case ARM_MEMORY_REGION_ATTRIBUTE_NONSECURE_WRITE_BACK_NONSHAREABLE:
31 return TT_ATTR_INDX_MEMORY_WRITE_BACK;
32
33 case ARM_MEMORY_REGION_ATTRIBUTE_WRITE_BACK:
34 case ARM_MEMORY_REGION_ATTRIBUTE_NONSECURE_WRITE_BACK:
35 return TT_ATTR_INDX_MEMORY_WRITE_BACK | TT_SH_INNER_SHAREABLE;
36
37 case ARM_MEMORY_REGION_ATTRIBUTE_WRITE_THROUGH:
38 case ARM_MEMORY_REGION_ATTRIBUTE_NONSECURE_WRITE_THROUGH:
39 return TT_ATTR_INDX_MEMORY_WRITE_THROUGH | TT_SH_INNER_SHAREABLE;
40
41 // Uncached and device mappings are treated as outer shareable by default,
42 case ARM_MEMORY_REGION_ATTRIBUTE_UNCACHED_UNBUFFERED:
43 case ARM_MEMORY_REGION_ATTRIBUTE_NONSECURE_UNCACHED_UNBUFFERED:
44 return TT_ATTR_INDX_MEMORY_NON_CACHEABLE;
45
46 default:
47 ASSERT (0);
48 case ARM_MEMORY_REGION_ATTRIBUTE_DEVICE:
49 case ARM_MEMORY_REGION_ATTRIBUTE_NONSECURE_DEVICE:
50 if (ArmReadCurrentEL () == AARCH64_EL2) {
51 return TT_ATTR_INDX_DEVICE_MEMORY | TT_XN_MASK;
52 } else {
53 return TT_ATTR_INDX_DEVICE_MEMORY | TT_UXN_MASK | TT_PXN_MASK;
54 }
55 }
56 }
57
58 #define MIN_T0SZ 16
59 #define BITS_PER_LEVEL 9
60 #define MAX_VA_BITS 48
61
62 STATIC
63 UINTN
64 GetRootTableEntryCount (
65 IN UINTN T0SZ
66 )
67 {
68 return TT_ENTRY_COUNT >> (T0SZ - MIN_T0SZ) % BITS_PER_LEVEL;
69 }
70
71 STATIC
72 UINTN
73 GetRootTableLevel (
74 IN UINTN T0SZ
75 )
76 {
77 return (T0SZ - MIN_T0SZ) / BITS_PER_LEVEL;
78 }
79
80 STATIC
81 VOID
82 ReplaceTableEntry (
83 IN UINT64 *Entry,
84 IN UINT64 Value,
85 IN UINT64 RegionStart,
86 IN BOOLEAN IsLiveBlockMapping
87 )
88 {
89 if (!ArmMmuEnabled () || !IsLiveBlockMapping) {
90 *Entry = Value;
91 ArmUpdateTranslationTableEntry (Entry, (VOID *)(UINTN)RegionStart);
92 } else {
93 ArmReplaceLiveTranslationEntry (Entry, Value, RegionStart);
94 }
95 }
96
97 STATIC
98 VOID
99 FreePageTablesRecursive (
100 IN UINT64 *TranslationTable,
101 IN UINTN Level
102 )
103 {
104 UINTN Index;
105
106 ASSERT (Level <= 3);
107
108 if (Level < 3) {
109 for (Index = 0; Index < TT_ENTRY_COUNT; Index++) {
110 if ((TranslationTable[Index] & TT_TYPE_MASK) == TT_TYPE_TABLE_ENTRY) {
111 FreePageTablesRecursive (
112 (VOID *)(UINTN)(TranslationTable[Index] &
113 TT_ADDRESS_MASK_BLOCK_ENTRY),
114 Level + 1
115 );
116 }
117 }
118 }
119
120 FreePages (TranslationTable, 1);
121 }
122
123 STATIC
124 BOOLEAN
125 IsBlockEntry (
126 IN UINT64 Entry,
127 IN UINTN Level
128 )
129 {
130 if (Level == 3) {
131 return (Entry & TT_TYPE_MASK) == TT_TYPE_BLOCK_ENTRY_LEVEL3;
132 }
133
134 return (Entry & TT_TYPE_MASK) == TT_TYPE_BLOCK_ENTRY;
135 }
136
137 STATIC
138 BOOLEAN
139 IsTableEntry (
140 IN UINT64 Entry,
141 IN UINTN Level
142 )
143 {
144 if (Level == 3) {
145 //
146 // TT_TYPE_TABLE_ENTRY aliases TT_TYPE_BLOCK_ENTRY_LEVEL3
147 // so we need to take the level into account as well.
148 //
149 return FALSE;
150 }
151
152 return (Entry & TT_TYPE_MASK) == TT_TYPE_TABLE_ENTRY;
153 }
154
155 STATIC
156 EFI_STATUS
157 UpdateRegionMappingRecursive (
158 IN UINT64 RegionStart,
159 IN UINT64 RegionEnd,
160 IN UINT64 AttributeSetMask,
161 IN UINT64 AttributeClearMask,
162 IN UINT64 *PageTable,
163 IN UINTN Level
164 )
165 {
166 UINTN BlockShift;
167 UINT64 BlockMask;
168 UINT64 BlockEnd;
169 UINT64 *Entry;
170 UINT64 EntryValue;
171 VOID *TranslationTable;
172 EFI_STATUS Status;
173
174 ASSERT (((RegionStart | RegionEnd) & EFI_PAGE_MASK) == 0);
175
176 BlockShift = (Level + 1) * BITS_PER_LEVEL + MIN_T0SZ;
177 BlockMask = MAX_UINT64 >> BlockShift;
178
179 DEBUG ((
180 DEBUG_VERBOSE,
181 "%a(%d): %llx - %llx set %lx clr %lx\n",
182 __FUNCTION__,
183 Level,
184 RegionStart,
185 RegionEnd,
186 AttributeSetMask,
187 AttributeClearMask
188 ));
189
190 for ( ; RegionStart < RegionEnd; RegionStart = BlockEnd) {
191 BlockEnd = MIN (RegionEnd, (RegionStart | BlockMask) + 1);
192 Entry = &PageTable[(RegionStart >> (64 - BlockShift)) & (TT_ENTRY_COUNT - 1)];
193
194 //
195 // If RegionStart or BlockEnd is not aligned to the block size at this
196 // level, we will have to create a table mapping in order to map less
197 // than a block, and recurse to create the block or page entries at
198 // the next level. No block mappings are allowed at all at level 0,
199 // so in that case, we have to recurse unconditionally.
200 // If we are changing a table entry and the AttributeClearMask is non-zero,
201 // we cannot replace it with a block entry without potentially losing
202 // attribute information, so keep the table entry in that case.
203 //
204 if ((Level == 0) || (((RegionStart | BlockEnd) & BlockMask) != 0) ||
205 (IsTableEntry (*Entry, Level) && (AttributeClearMask != 0)))
206 {
207 ASSERT (Level < 3);
208
209 if (!IsTableEntry (*Entry, Level)) {
210 //
211 // No table entry exists yet, so we need to allocate a page table
212 // for the next level.
213 //
214 TranslationTable = AllocatePages (1);
215 if (TranslationTable == NULL) {
216 return EFI_OUT_OF_RESOURCES;
217 }
218
219 if (!ArmMmuEnabled ()) {
220 //
221 // Make sure we are not inadvertently hitting in the caches
222 // when populating the page tables.
223 //
224 InvalidateDataCacheRange (TranslationTable, EFI_PAGE_SIZE);
225 }
226
227 ZeroMem (TranslationTable, EFI_PAGE_SIZE);
228
229 if (IsBlockEntry (*Entry, Level)) {
230 //
231 // We are splitting an existing block entry, so we have to populate
232 // the new table with the attributes of the block entry it replaces.
233 //
234 Status = UpdateRegionMappingRecursive (
235 RegionStart & ~BlockMask,
236 (RegionStart | BlockMask) + 1,
237 *Entry & TT_ATTRIBUTES_MASK,
238 0,
239 TranslationTable,
240 Level + 1
241 );
242 if (EFI_ERROR (Status)) {
243 //
244 // The range we passed to UpdateRegionMappingRecursive () is block
245 // aligned, so it is guaranteed that no further pages were allocated
246 // by it, and so we only have to free the page we allocated here.
247 //
248 FreePages (TranslationTable, 1);
249 return Status;
250 }
251 }
252 } else {
253 TranslationTable = (VOID *)(UINTN)(*Entry & TT_ADDRESS_MASK_BLOCK_ENTRY);
254 }
255
256 //
257 // Recurse to the next level
258 //
259 Status = UpdateRegionMappingRecursive (
260 RegionStart,
261 BlockEnd,
262 AttributeSetMask,
263 AttributeClearMask,
264 TranslationTable,
265 Level + 1
266 );
267 if (EFI_ERROR (Status)) {
268 if (!IsTableEntry (*Entry, Level)) {
269 //
270 // We are creating a new table entry, so on failure, we can free all
271 // allocations we made recursively, given that the whole subhierarchy
272 // has not been wired into the live page tables yet. (This is not
273 // possible for existing table entries, since we cannot revert the
274 // modifications we made to the subhierarchy it represents.)
275 //
276 FreePageTablesRecursive (TranslationTable, Level + 1);
277 }
278
279 return Status;
280 }
281
282 if (!IsTableEntry (*Entry, Level)) {
283 EntryValue = (UINTN)TranslationTable | TT_TYPE_TABLE_ENTRY;
284 ReplaceTableEntry (
285 Entry,
286 EntryValue,
287 RegionStart,
288 IsBlockEntry (*Entry, Level)
289 );
290 }
291 } else {
292 EntryValue = (*Entry & AttributeClearMask) | AttributeSetMask;
293 EntryValue |= RegionStart;
294 EntryValue |= (Level == 3) ? TT_TYPE_BLOCK_ENTRY_LEVEL3
295 : TT_TYPE_BLOCK_ENTRY;
296
297 if (IsTableEntry (*Entry, Level)) {
298 //
299 // We are replacing a table entry with a block entry. This is only
300 // possible if we are keeping none of the original attributes.
301 // We can free the table entry's page table, and all the ones below
302 // it, since we are dropping the only possible reference to it.
303 //
304 ASSERT (AttributeClearMask == 0);
305 TranslationTable = (VOID *)(UINTN)(*Entry & TT_ADDRESS_MASK_BLOCK_ENTRY);
306 ReplaceTableEntry (Entry, EntryValue, RegionStart, TRUE);
307 FreePageTablesRecursive (TranslationTable, Level + 1);
308 } else {
309 ReplaceTableEntry (Entry, EntryValue, RegionStart, FALSE);
310 }
311 }
312 }
313
314 return EFI_SUCCESS;
315 }
316
317 STATIC
318 EFI_STATUS
319 UpdateRegionMapping (
320 IN UINT64 RegionStart,
321 IN UINT64 RegionLength,
322 IN UINT64 AttributeSetMask,
323 IN UINT64 AttributeClearMask
324 )
325 {
326 UINTN T0SZ;
327
328 if (((RegionStart | RegionLength) & EFI_PAGE_MASK) != 0) {
329 return EFI_INVALID_PARAMETER;
330 }
331
332 T0SZ = ArmGetTCR () & TCR_T0SZ_MASK;
333
334 return UpdateRegionMappingRecursive (
335 RegionStart,
336 RegionStart + RegionLength,
337 AttributeSetMask,
338 AttributeClearMask,
339 ArmGetTTBR0BaseAddress (),
340 GetRootTableLevel (T0SZ)
341 );
342 }
343
344 STATIC
345 EFI_STATUS
346 FillTranslationTable (
347 IN UINT64 *RootTable,
348 IN ARM_MEMORY_REGION_DESCRIPTOR *MemoryRegion
349 )
350 {
351 return UpdateRegionMapping (
352 MemoryRegion->VirtualBase,
353 MemoryRegion->Length,
354 ArmMemoryAttributeToPageAttribute (MemoryRegion->Attributes) | TT_AF,
355 0
356 );
357 }
358
359 STATIC
360 UINT64
361 GcdAttributeToPageAttribute (
362 IN UINT64 GcdAttributes
363 )
364 {
365 UINT64 PageAttributes;
366
367 switch (GcdAttributes & EFI_MEMORY_CACHETYPE_MASK) {
368 case EFI_MEMORY_UC:
369 PageAttributes = TT_ATTR_INDX_DEVICE_MEMORY;
370 break;
371 case EFI_MEMORY_WC:
372 PageAttributes = TT_ATTR_INDX_MEMORY_NON_CACHEABLE;
373 break;
374 case EFI_MEMORY_WT:
375 PageAttributes = TT_ATTR_INDX_MEMORY_WRITE_THROUGH | TT_SH_INNER_SHAREABLE;
376 break;
377 case EFI_MEMORY_WB:
378 PageAttributes = TT_ATTR_INDX_MEMORY_WRITE_BACK | TT_SH_INNER_SHAREABLE;
379 break;
380 default:
381 PageAttributes = TT_ATTR_INDX_MASK;
382 break;
383 }
384
385 if (((GcdAttributes & EFI_MEMORY_XP) != 0) ||
386 ((GcdAttributes & EFI_MEMORY_CACHETYPE_MASK) == EFI_MEMORY_UC))
387 {
388 if (ArmReadCurrentEL () == AARCH64_EL2) {
389 PageAttributes |= TT_XN_MASK;
390 } else {
391 PageAttributes |= TT_UXN_MASK | TT_PXN_MASK;
392 }
393 }
394
395 if ((GcdAttributes & EFI_MEMORY_RO) != 0) {
396 PageAttributes |= TT_AP_NO_RO;
397 }
398
399 return PageAttributes | TT_AF;
400 }
401
402 EFI_STATUS
403 ArmSetMemoryAttributes (
404 IN EFI_PHYSICAL_ADDRESS BaseAddress,
405 IN UINT64 Length,
406 IN UINT64 Attributes
407 )
408 {
409 UINT64 PageAttributes;
410 UINT64 PageAttributeMask;
411
412 PageAttributes = GcdAttributeToPageAttribute (Attributes);
413 PageAttributeMask = 0;
414
415 if ((Attributes & EFI_MEMORY_CACHETYPE_MASK) == 0) {
416 //
417 // No memory type was set in Attributes, so we are going to update the
418 // permissions only.
419 //
420 PageAttributes &= TT_AP_MASK | TT_UXN_MASK | TT_PXN_MASK;
421 PageAttributeMask = ~(TT_ADDRESS_MASK_BLOCK_ENTRY | TT_AP_MASK |
422 TT_PXN_MASK | TT_XN_MASK);
423 }
424
425 return UpdateRegionMapping (
426 BaseAddress,
427 Length,
428 PageAttributes,
429 PageAttributeMask
430 );
431 }
432
433 STATIC
434 EFI_STATUS
435 SetMemoryRegionAttribute (
436 IN EFI_PHYSICAL_ADDRESS BaseAddress,
437 IN UINT64 Length,
438 IN UINT64 Attributes,
439 IN UINT64 BlockEntryMask
440 )
441 {
442 return UpdateRegionMapping (BaseAddress, Length, Attributes, BlockEntryMask);
443 }
444
445 EFI_STATUS
446 ArmSetMemoryRegionNoExec (
447 IN EFI_PHYSICAL_ADDRESS BaseAddress,
448 IN UINT64 Length
449 )
450 {
451 UINT64 Val;
452
453 if (ArmReadCurrentEL () == AARCH64_EL1) {
454 Val = TT_PXN_MASK | TT_UXN_MASK;
455 } else {
456 Val = TT_XN_MASK;
457 }
458
459 return SetMemoryRegionAttribute (
460 BaseAddress,
461 Length,
462 Val,
463 ~TT_ADDRESS_MASK_BLOCK_ENTRY
464 );
465 }
466
467 EFI_STATUS
468 ArmClearMemoryRegionNoExec (
469 IN EFI_PHYSICAL_ADDRESS BaseAddress,
470 IN UINT64 Length
471 )
472 {
473 UINT64 Mask;
474
475 // XN maps to UXN in the EL1&0 translation regime
476 Mask = ~(TT_ADDRESS_MASK_BLOCK_ENTRY | TT_PXN_MASK | TT_XN_MASK);
477
478 return SetMemoryRegionAttribute (
479 BaseAddress,
480 Length,
481 0,
482 Mask
483 );
484 }
485
486 EFI_STATUS
487 ArmSetMemoryRegionReadOnly (
488 IN EFI_PHYSICAL_ADDRESS BaseAddress,
489 IN UINT64 Length
490 )
491 {
492 return SetMemoryRegionAttribute (
493 BaseAddress,
494 Length,
495 TT_AP_NO_RO,
496 ~TT_ADDRESS_MASK_BLOCK_ENTRY
497 );
498 }
499
500 EFI_STATUS
501 ArmClearMemoryRegionReadOnly (
502 IN EFI_PHYSICAL_ADDRESS BaseAddress,
503 IN UINT64 Length
504 )
505 {
506 return SetMemoryRegionAttribute (
507 BaseAddress,
508 Length,
509 TT_AP_NO_RW,
510 ~(TT_ADDRESS_MASK_BLOCK_ENTRY | TT_AP_MASK)
511 );
512 }
513
514 EFI_STATUS
515 EFIAPI
516 ArmConfigureMmu (
517 IN ARM_MEMORY_REGION_DESCRIPTOR *MemoryTable,
518 OUT VOID **TranslationTableBase OPTIONAL,
519 OUT UINTN *TranslationTableSize OPTIONAL
520 )
521 {
522 VOID *TranslationTable;
523 UINTN MaxAddressBits;
524 UINT64 MaxAddress;
525 UINTN T0SZ;
526 UINTN RootTableEntryCount;
527 UINT64 TCR;
528 EFI_STATUS Status;
529
530 if (MemoryTable == NULL) {
531 ASSERT (MemoryTable != NULL);
532 return EFI_INVALID_PARAMETER;
533 }
534
535 //
536 // Limit the virtual address space to what we can actually use: UEFI
537 // mandates a 1:1 mapping, so no point in making the virtual address
538 // space larger than the physical address space. We also have to take
539 // into account the architectural limitations that result from UEFI's
540 // use of 4 KB pages.
541 //
542 MaxAddressBits = MIN (ArmGetPhysicalAddressBits (), MAX_VA_BITS);
543 MaxAddress = LShiftU64 (1ULL, MaxAddressBits) - 1;
544
545 T0SZ = 64 - MaxAddressBits;
546 RootTableEntryCount = GetRootTableEntryCount (T0SZ);
547
548 //
549 // Set TCR that allows us to retrieve T0SZ in the subsequent functions
550 //
551 // Ideally we will be running at EL2, but should support EL1 as well.
552 // UEFI should not run at EL3.
553 if (ArmReadCurrentEL () == AARCH64_EL2) {
554 // Note: Bits 23 and 31 are reserved(RES1) bits in TCR_EL2
555 TCR = T0SZ | (1UL << 31) | (1UL << 23) | TCR_TG0_4KB;
556
557 // Set the Physical Address Size using MaxAddress
558 if (MaxAddress < SIZE_4GB) {
559 TCR |= TCR_PS_4GB;
560 } else if (MaxAddress < SIZE_64GB) {
561 TCR |= TCR_PS_64GB;
562 } else if (MaxAddress < SIZE_1TB) {
563 TCR |= TCR_PS_1TB;
564 } else if (MaxAddress < SIZE_4TB) {
565 TCR |= TCR_PS_4TB;
566 } else if (MaxAddress < SIZE_16TB) {
567 TCR |= TCR_PS_16TB;
568 } else if (MaxAddress < SIZE_256TB) {
569 TCR |= TCR_PS_256TB;
570 } else {
571 DEBUG ((
572 DEBUG_ERROR,
573 "ArmConfigureMmu: The MaxAddress 0x%lX is not supported by this MMU configuration.\n",
574 MaxAddress
575 ));
576 ASSERT (0); // Bigger than 48-bit memory space are not supported
577 return EFI_UNSUPPORTED;
578 }
579 } else if (ArmReadCurrentEL () == AARCH64_EL1) {
580 // Due to Cortex-A57 erratum #822227 we must set TG1[1] == 1, regardless of EPD1.
581 TCR = T0SZ | TCR_TG0_4KB | TCR_TG1_4KB | TCR_EPD1;
582
583 // Set the Physical Address Size using MaxAddress
584 if (MaxAddress < SIZE_4GB) {
585 TCR |= TCR_IPS_4GB;
586 } else if (MaxAddress < SIZE_64GB) {
587 TCR |= TCR_IPS_64GB;
588 } else if (MaxAddress < SIZE_1TB) {
589 TCR |= TCR_IPS_1TB;
590 } else if (MaxAddress < SIZE_4TB) {
591 TCR |= TCR_IPS_4TB;
592 } else if (MaxAddress < SIZE_16TB) {
593 TCR |= TCR_IPS_16TB;
594 } else if (MaxAddress < SIZE_256TB) {
595 TCR |= TCR_IPS_256TB;
596 } else {
597 DEBUG ((
598 DEBUG_ERROR,
599 "ArmConfigureMmu: The MaxAddress 0x%lX is not supported by this MMU configuration.\n",
600 MaxAddress
601 ));
602 ASSERT (0); // Bigger than 48-bit memory space are not supported
603 return EFI_UNSUPPORTED;
604 }
605 } else {
606 ASSERT (0); // UEFI is only expected to run at EL2 and EL1, not EL3.
607 return EFI_UNSUPPORTED;
608 }
609
610 //
611 // Translation table walks are always cache coherent on ARMv8-A, so cache
612 // maintenance on page tables is never needed. Since there is a risk of
613 // loss of coherency when using mismatched attributes, and given that memory
614 // is mapped cacheable except for extraordinary cases (such as non-coherent
615 // DMA), have the page table walker perform cached accesses as well, and
616 // assert below that that matches the attributes we use for CPU accesses to
617 // the region.
618 //
619 TCR |= TCR_SH_INNER_SHAREABLE |
620 TCR_RGN_OUTER_WRITE_BACK_ALLOC |
621 TCR_RGN_INNER_WRITE_BACK_ALLOC;
622
623 // Set TCR
624 ArmSetTCR (TCR);
625
626 // Allocate pages for translation table
627 TranslationTable = AllocatePages (1);
628 if (TranslationTable == NULL) {
629 return EFI_OUT_OF_RESOURCES;
630 }
631
632 //
633 // We set TTBR0 just after allocating the table to retrieve its location from
634 // the subsequent functions without needing to pass this value across the
635 // functions. The MMU is only enabled after the translation tables are
636 // populated.
637 //
638 ArmSetTTBR0 (TranslationTable);
639
640 if (TranslationTableBase != NULL) {
641 *TranslationTableBase = TranslationTable;
642 }
643
644 if (TranslationTableSize != NULL) {
645 *TranslationTableSize = RootTableEntryCount * sizeof (UINT64);
646 }
647
648 //
649 // Make sure we are not inadvertently hitting in the caches
650 // when populating the page tables.
651 //
652 InvalidateDataCacheRange (
653 TranslationTable,
654 RootTableEntryCount * sizeof (UINT64)
655 );
656 ZeroMem (TranslationTable, RootTableEntryCount * sizeof (UINT64));
657
658 while (MemoryTable->Length != 0) {
659 Status = FillTranslationTable (TranslationTable, MemoryTable);
660 if (EFI_ERROR (Status)) {
661 goto FreeTranslationTable;
662 }
663
664 MemoryTable++;
665 }
666
667 //
668 // EFI_MEMORY_UC ==> MAIR_ATTR_DEVICE_MEMORY
669 // EFI_MEMORY_WC ==> MAIR_ATTR_NORMAL_MEMORY_NON_CACHEABLE
670 // EFI_MEMORY_WT ==> MAIR_ATTR_NORMAL_MEMORY_WRITE_THROUGH
671 // EFI_MEMORY_WB ==> MAIR_ATTR_NORMAL_MEMORY_WRITE_BACK
672 //
673 ArmSetMAIR (
674 MAIR_ATTR (TT_ATTR_INDX_DEVICE_MEMORY, MAIR_ATTR_DEVICE_MEMORY) |
675 MAIR_ATTR (TT_ATTR_INDX_MEMORY_NON_CACHEABLE, MAIR_ATTR_NORMAL_MEMORY_NON_CACHEABLE) |
676 MAIR_ATTR (TT_ATTR_INDX_MEMORY_WRITE_THROUGH, MAIR_ATTR_NORMAL_MEMORY_WRITE_THROUGH) |
677 MAIR_ATTR (TT_ATTR_INDX_MEMORY_WRITE_BACK, MAIR_ATTR_NORMAL_MEMORY_WRITE_BACK)
678 );
679
680 ArmDisableAlignmentCheck ();
681 ArmEnableStackAlignmentCheck ();
682 ArmEnableInstructionCache ();
683 ArmEnableDataCache ();
684
685 ArmEnableMmu ();
686 return EFI_SUCCESS;
687
688 FreeTranslationTable:
689 FreePages (TranslationTable, 1);
690 return Status;
691 }
692
693 RETURN_STATUS
694 EFIAPI
695 ArmMmuBaseLibConstructor (
696 VOID
697 )
698 {
699 extern UINT32 ArmReplaceLiveTranslationEntrySize;
700
701 //
702 // The ArmReplaceLiveTranslationEntry () helper function may be invoked
703 // with the MMU off so we have to ensure that it gets cleaned to the PoC
704 //
705 WriteBackDataCacheRange (
706 (VOID *)(UINTN)ArmReplaceLiveTranslationEntry,
707 ArmReplaceLiveTranslationEntrySize
708 );
709
710 return RETURN_SUCCESS;
711 }