]> git.proxmox.com Git - mirror_edk2.git/blob - ArmPkg/Library/ArmMmuLib/AArch64/ArmMmuLibCore.c
ArmPkg/ArmMmuLib: get rid of GetRootTranslationTableInfo()
[mirror_edk2.git] / ArmPkg / Library / ArmMmuLib / AArch64 / ArmMmuLibCore.c
1 /** @file
2 * File managing the MMU for ARMv8 architecture
3 *
4 * Copyright (c) 2011-2020, ARM Limited. All rights reserved.
5 * Copyright (c) 2016, Linaro Limited. All rights reserved.
6 * Copyright (c) 2017, Intel Corporation. All rights reserved.<BR>
7 *
8 * SPDX-License-Identifier: BSD-2-Clause-Patent
9 *
10 **/
11
12 #include <Uefi.h>
13 #include <Chipset/AArch64.h>
14 #include <Library/BaseMemoryLib.h>
15 #include <Library/CacheMaintenanceLib.h>
16 #include <Library/MemoryAllocationLib.h>
17 #include <Library/ArmLib.h>
18 #include <Library/ArmMmuLib.h>
19 #include <Library/BaseLib.h>
20 #include <Library/DebugLib.h>
21
22 // We use this index definition to define an invalid block entry
23 #define TT_ATTR_INDX_INVALID ((UINT32)~0)
24
25 STATIC
26 UINT64
27 ArmMemoryAttributeToPageAttribute (
28 IN ARM_MEMORY_REGION_ATTRIBUTES Attributes
29 )
30 {
31 switch (Attributes) {
32 case ARM_MEMORY_REGION_ATTRIBUTE_WRITE_BACK_NONSHAREABLE:
33 case ARM_MEMORY_REGION_ATTRIBUTE_NONSECURE_WRITE_BACK_NONSHAREABLE:
34 return TT_ATTR_INDX_MEMORY_WRITE_BACK;
35
36 case ARM_MEMORY_REGION_ATTRIBUTE_WRITE_BACK:
37 case ARM_MEMORY_REGION_ATTRIBUTE_NONSECURE_WRITE_BACK:
38 return TT_ATTR_INDX_MEMORY_WRITE_BACK | TT_SH_INNER_SHAREABLE;
39
40 case ARM_MEMORY_REGION_ATTRIBUTE_WRITE_THROUGH:
41 case ARM_MEMORY_REGION_ATTRIBUTE_NONSECURE_WRITE_THROUGH:
42 return TT_ATTR_INDX_MEMORY_WRITE_THROUGH | TT_SH_INNER_SHAREABLE;
43
44 // Uncached and device mappings are treated as outer shareable by default,
45 case ARM_MEMORY_REGION_ATTRIBUTE_UNCACHED_UNBUFFERED:
46 case ARM_MEMORY_REGION_ATTRIBUTE_NONSECURE_UNCACHED_UNBUFFERED:
47 return TT_ATTR_INDX_MEMORY_NON_CACHEABLE;
48
49 default:
50 ASSERT (0);
51 case ARM_MEMORY_REGION_ATTRIBUTE_DEVICE:
52 case ARM_MEMORY_REGION_ATTRIBUTE_NONSECURE_DEVICE:
53 if (ArmReadCurrentEL () == AARCH64_EL2)
54 return TT_ATTR_INDX_DEVICE_MEMORY | TT_XN_MASK;
55 else
56 return TT_ATTR_INDX_DEVICE_MEMORY | TT_UXN_MASK | TT_PXN_MASK;
57 }
58 }
59
60 #define MIN_T0SZ 16
61 #define BITS_PER_LEVEL 9
62 #define MAX_VA_BITS 48
63
64 STATIC
65 UINTN
66 GetRootTableEntryCount (
67 IN UINTN T0SZ
68 )
69 {
70 return TT_ENTRY_COUNT >> (T0SZ - MIN_T0SZ) % BITS_PER_LEVEL;
71 }
72
73 STATIC
74 UINTN
75 GetRootTableLevel (
76 IN UINTN T0SZ
77 )
78 {
79 return (T0SZ - MIN_T0SZ) / BITS_PER_LEVEL;
80 }
81
82 STATIC
83 VOID
84 ReplaceTableEntry (
85 IN UINT64 *Entry,
86 IN UINT64 Value,
87 IN UINT64 RegionStart,
88 IN BOOLEAN IsLiveBlockMapping
89 )
90 {
91 if (!ArmMmuEnabled () || !IsLiveBlockMapping) {
92 *Entry = Value;
93 ArmUpdateTranslationTableEntry (Entry, (VOID *)(UINTN)RegionStart);
94 } else {
95 ArmReplaceLiveTranslationEntry (Entry, Value, RegionStart);
96 }
97 }
98
99 STATIC
100 VOID
101 FreePageTablesRecursive (
102 IN UINT64 *TranslationTable,
103 IN UINTN Level
104 )
105 {
106 UINTN Index;
107
108 ASSERT (Level <= 3);
109
110 if (Level < 3) {
111 for (Index = 0; Index < TT_ENTRY_COUNT; Index++) {
112 if ((TranslationTable[Index] & TT_TYPE_MASK) == TT_TYPE_TABLE_ENTRY) {
113 FreePageTablesRecursive ((VOID *)(UINTN)(TranslationTable[Index] &
114 TT_ADDRESS_MASK_BLOCK_ENTRY),
115 Level + 1);
116 }
117 }
118 }
119 FreePages (TranslationTable, 1);
120 }
121
122 STATIC
123 BOOLEAN
124 IsBlockEntry (
125 IN UINT64 Entry,
126 IN UINTN Level
127 )
128 {
129 if (Level == 3) {
130 return (Entry & TT_TYPE_MASK) == TT_TYPE_BLOCK_ENTRY_LEVEL3;
131 }
132 return (Entry & TT_TYPE_MASK) == TT_TYPE_BLOCK_ENTRY;
133 }
134
135 STATIC
136 BOOLEAN
137 IsTableEntry (
138 IN UINT64 Entry,
139 IN UINTN Level
140 )
141 {
142 if (Level == 3) {
143 //
144 // TT_TYPE_TABLE_ENTRY aliases TT_TYPE_BLOCK_ENTRY_LEVEL3
145 // so we need to take the level into account as well.
146 //
147 return FALSE;
148 }
149 return (Entry & TT_TYPE_MASK) == TT_TYPE_TABLE_ENTRY;
150 }
151
152 STATIC
153 EFI_STATUS
154 UpdateRegionMappingRecursive (
155 IN UINT64 RegionStart,
156 IN UINT64 RegionEnd,
157 IN UINT64 AttributeSetMask,
158 IN UINT64 AttributeClearMask,
159 IN UINT64 *PageTable,
160 IN UINTN Level
161 )
162 {
163 UINTN BlockShift;
164 UINT64 BlockMask;
165 UINT64 BlockEnd;
166 UINT64 *Entry;
167 UINT64 EntryValue;
168 VOID *TranslationTable;
169 EFI_STATUS Status;
170
171 ASSERT (((RegionStart | RegionEnd) & EFI_PAGE_MASK) == 0);
172
173 BlockShift = (Level + 1) * BITS_PER_LEVEL + MIN_T0SZ;
174 BlockMask = MAX_UINT64 >> BlockShift;
175
176 DEBUG ((DEBUG_VERBOSE, "%a(%d): %llx - %llx set %lx clr %lx\n", __FUNCTION__,
177 Level, RegionStart, RegionEnd, AttributeSetMask, AttributeClearMask));
178
179 for (; RegionStart < RegionEnd; RegionStart = BlockEnd) {
180 BlockEnd = MIN (RegionEnd, (RegionStart | BlockMask) + 1);
181 Entry = &PageTable[(RegionStart >> (64 - BlockShift)) & (TT_ENTRY_COUNT - 1)];
182
183 //
184 // If RegionStart or BlockEnd is not aligned to the block size at this
185 // level, we will have to create a table mapping in order to map less
186 // than a block, and recurse to create the block or page entries at
187 // the next level. No block mappings are allowed at all at level 0,
188 // so in that case, we have to recurse unconditionally.
189 // If we are changing a table entry and the AttributeClearMask is non-zero,
190 // we cannot replace it with a block entry without potentially losing
191 // attribute information, so keep the table entry in that case.
192 //
193 if (Level == 0 || ((RegionStart | BlockEnd) & BlockMask) != 0 ||
194 (IsTableEntry (*Entry, Level) && AttributeClearMask != 0)) {
195 ASSERT (Level < 3);
196
197 if (!IsTableEntry (*Entry, Level)) {
198 //
199 // No table entry exists yet, so we need to allocate a page table
200 // for the next level.
201 //
202 TranslationTable = AllocatePages (1);
203 if (TranslationTable == NULL) {
204 return EFI_OUT_OF_RESOURCES;
205 }
206
207 if (!ArmMmuEnabled ()) {
208 //
209 // Make sure we are not inadvertently hitting in the caches
210 // when populating the page tables.
211 //
212 InvalidateDataCacheRange (TranslationTable, EFI_PAGE_SIZE);
213 }
214
215 ZeroMem (TranslationTable, EFI_PAGE_SIZE);
216
217 if (IsBlockEntry (*Entry, Level)) {
218 //
219 // We are splitting an existing block entry, so we have to populate
220 // the new table with the attributes of the block entry it replaces.
221 //
222 Status = UpdateRegionMappingRecursive (RegionStart & ~BlockMask,
223 (RegionStart | BlockMask) + 1, *Entry & TT_ATTRIBUTES_MASK,
224 0, TranslationTable, Level + 1);
225 if (EFI_ERROR (Status)) {
226 //
227 // The range we passed to UpdateRegionMappingRecursive () is block
228 // aligned, so it is guaranteed that no further pages were allocated
229 // by it, and so we only have to free the page we allocated here.
230 //
231 FreePages (TranslationTable, 1);
232 return Status;
233 }
234 }
235 } else {
236 TranslationTable = (VOID *)(UINTN)(*Entry & TT_ADDRESS_MASK_BLOCK_ENTRY);
237 }
238
239 //
240 // Recurse to the next level
241 //
242 Status = UpdateRegionMappingRecursive (RegionStart, BlockEnd,
243 AttributeSetMask, AttributeClearMask, TranslationTable,
244 Level + 1);
245 if (EFI_ERROR (Status)) {
246 if (!IsTableEntry (*Entry, Level)) {
247 //
248 // We are creating a new table entry, so on failure, we can free all
249 // allocations we made recursively, given that the whole subhierarchy
250 // has not been wired into the live page tables yet. (This is not
251 // possible for existing table entries, since we cannot revert the
252 // modifications we made to the subhierarchy it represents.)
253 //
254 FreePageTablesRecursive (TranslationTable, Level + 1);
255 }
256 return Status;
257 }
258
259 if (!IsTableEntry (*Entry, Level)) {
260 EntryValue = (UINTN)TranslationTable | TT_TYPE_TABLE_ENTRY;
261 ReplaceTableEntry (Entry, EntryValue, RegionStart,
262 IsBlockEntry (*Entry, Level));
263 }
264 } else {
265 EntryValue = (*Entry & AttributeClearMask) | AttributeSetMask;
266 EntryValue |= RegionStart;
267 EntryValue |= (Level == 3) ? TT_TYPE_BLOCK_ENTRY_LEVEL3
268 : TT_TYPE_BLOCK_ENTRY;
269
270 if (IsTableEntry (*Entry, Level)) {
271 //
272 // We are replacing a table entry with a block entry. This is only
273 // possible if we are keeping none of the original attributes.
274 // We can free the table entry's page table, and all the ones below
275 // it, since we are dropping the only possible reference to it.
276 //
277 ASSERT (AttributeClearMask == 0);
278 TranslationTable = (VOID *)(UINTN)(*Entry & TT_ADDRESS_MASK_BLOCK_ENTRY);
279 ReplaceTableEntry (Entry, EntryValue, RegionStart, TRUE);
280 FreePageTablesRecursive (TranslationTable, Level + 1);
281 } else {
282 ReplaceTableEntry (Entry, EntryValue, RegionStart, FALSE);
283 }
284 }
285 }
286 return EFI_SUCCESS;
287 }
288
289 STATIC
290 EFI_STATUS
291 UpdateRegionMapping (
292 IN UINT64 RegionStart,
293 IN UINT64 RegionLength,
294 IN UINT64 AttributeSetMask,
295 IN UINT64 AttributeClearMask
296 )
297 {
298 UINTN T0SZ;
299
300 if (((RegionStart | RegionLength) & EFI_PAGE_MASK)) {
301 return EFI_INVALID_PARAMETER;
302 }
303
304 T0SZ = ArmGetTCR () & TCR_T0SZ_MASK;
305
306 return UpdateRegionMappingRecursive (RegionStart, RegionStart + RegionLength,
307 AttributeSetMask, AttributeClearMask, ArmGetTTBR0BaseAddress (),
308 GetRootTableLevel (T0SZ));
309 }
310
311 STATIC
312 EFI_STATUS
313 FillTranslationTable (
314 IN UINT64 *RootTable,
315 IN ARM_MEMORY_REGION_DESCRIPTOR *MemoryRegion
316 )
317 {
318 return UpdateRegionMapping (
319 MemoryRegion->VirtualBase,
320 MemoryRegion->Length,
321 ArmMemoryAttributeToPageAttribute (MemoryRegion->Attributes) | TT_AF,
322 0
323 );
324 }
325
326 STATIC
327 UINT64
328 GcdAttributeToPageAttribute (
329 IN UINT64 GcdAttributes
330 )
331 {
332 UINT64 PageAttributes;
333
334 switch (GcdAttributes & EFI_MEMORY_CACHETYPE_MASK) {
335 case EFI_MEMORY_UC:
336 PageAttributes = TT_ATTR_INDX_DEVICE_MEMORY;
337 break;
338 case EFI_MEMORY_WC:
339 PageAttributes = TT_ATTR_INDX_MEMORY_NON_CACHEABLE;
340 break;
341 case EFI_MEMORY_WT:
342 PageAttributes = TT_ATTR_INDX_MEMORY_WRITE_THROUGH | TT_SH_INNER_SHAREABLE;
343 break;
344 case EFI_MEMORY_WB:
345 PageAttributes = TT_ATTR_INDX_MEMORY_WRITE_BACK | TT_SH_INNER_SHAREABLE;
346 break;
347 default:
348 PageAttributes = TT_ATTR_INDX_MASK;
349 break;
350 }
351
352 if ((GcdAttributes & EFI_MEMORY_XP) != 0 ||
353 (GcdAttributes & EFI_MEMORY_CACHETYPE_MASK) == EFI_MEMORY_UC) {
354 if (ArmReadCurrentEL () == AARCH64_EL2) {
355 PageAttributes |= TT_XN_MASK;
356 } else {
357 PageAttributes |= TT_UXN_MASK | TT_PXN_MASK;
358 }
359 }
360
361 if ((GcdAttributes & EFI_MEMORY_RO) != 0) {
362 PageAttributes |= TT_AP_RO_RO;
363 }
364
365 return PageAttributes | TT_AF;
366 }
367
368 EFI_STATUS
369 ArmSetMemoryAttributes (
370 IN EFI_PHYSICAL_ADDRESS BaseAddress,
371 IN UINT64 Length,
372 IN UINT64 Attributes
373 )
374 {
375 UINT64 PageAttributes;
376 UINT64 PageAttributeMask;
377
378 PageAttributes = GcdAttributeToPageAttribute (Attributes);
379 PageAttributeMask = 0;
380
381 if ((Attributes & EFI_MEMORY_CACHETYPE_MASK) == 0) {
382 //
383 // No memory type was set in Attributes, so we are going to update the
384 // permissions only.
385 //
386 PageAttributes &= TT_AP_MASK | TT_UXN_MASK | TT_PXN_MASK;
387 PageAttributeMask = ~(TT_ADDRESS_MASK_BLOCK_ENTRY | TT_AP_MASK |
388 TT_PXN_MASK | TT_XN_MASK);
389 }
390
391 return UpdateRegionMapping (BaseAddress, Length, PageAttributes,
392 PageAttributeMask);
393 }
394
395 STATIC
396 EFI_STATUS
397 SetMemoryRegionAttribute (
398 IN EFI_PHYSICAL_ADDRESS BaseAddress,
399 IN UINT64 Length,
400 IN UINT64 Attributes,
401 IN UINT64 BlockEntryMask
402 )
403 {
404 return UpdateRegionMapping (BaseAddress, Length, Attributes, BlockEntryMask);
405 }
406
407 EFI_STATUS
408 ArmSetMemoryRegionNoExec (
409 IN EFI_PHYSICAL_ADDRESS BaseAddress,
410 IN UINT64 Length
411 )
412 {
413 UINT64 Val;
414
415 if (ArmReadCurrentEL () == AARCH64_EL1) {
416 Val = TT_PXN_MASK | TT_UXN_MASK;
417 } else {
418 Val = TT_XN_MASK;
419 }
420
421 return SetMemoryRegionAttribute (
422 BaseAddress,
423 Length,
424 Val,
425 ~TT_ADDRESS_MASK_BLOCK_ENTRY);
426 }
427
428 EFI_STATUS
429 ArmClearMemoryRegionNoExec (
430 IN EFI_PHYSICAL_ADDRESS BaseAddress,
431 IN UINT64 Length
432 )
433 {
434 UINT64 Mask;
435
436 // XN maps to UXN in the EL1&0 translation regime
437 Mask = ~(TT_ADDRESS_MASK_BLOCK_ENTRY | TT_PXN_MASK | TT_XN_MASK);
438
439 return SetMemoryRegionAttribute (
440 BaseAddress,
441 Length,
442 0,
443 Mask);
444 }
445
446 EFI_STATUS
447 ArmSetMemoryRegionReadOnly (
448 IN EFI_PHYSICAL_ADDRESS BaseAddress,
449 IN UINT64 Length
450 )
451 {
452 return SetMemoryRegionAttribute (
453 BaseAddress,
454 Length,
455 TT_AP_RO_RO,
456 ~TT_ADDRESS_MASK_BLOCK_ENTRY);
457 }
458
459 EFI_STATUS
460 ArmClearMemoryRegionReadOnly (
461 IN EFI_PHYSICAL_ADDRESS BaseAddress,
462 IN UINT64 Length
463 )
464 {
465 return SetMemoryRegionAttribute (
466 BaseAddress,
467 Length,
468 TT_AP_RW_RW,
469 ~(TT_ADDRESS_MASK_BLOCK_ENTRY | TT_AP_MASK));
470 }
471
472 EFI_STATUS
473 EFIAPI
474 ArmConfigureMmu (
475 IN ARM_MEMORY_REGION_DESCRIPTOR *MemoryTable,
476 OUT VOID **TranslationTableBase OPTIONAL,
477 OUT UINTN *TranslationTableSize OPTIONAL
478 )
479 {
480 VOID* TranslationTable;
481 UINTN MaxAddressBits;
482 UINT64 MaxAddress;
483 UINTN T0SZ;
484 UINTN RootTableEntryCount;
485 UINT64 TCR;
486 EFI_STATUS Status;
487
488 if (MemoryTable == NULL) {
489 ASSERT (MemoryTable != NULL);
490 return EFI_INVALID_PARAMETER;
491 }
492
493 //
494 // Limit the virtual address space to what we can actually use: UEFI
495 // mandates a 1:1 mapping, so no point in making the virtual address
496 // space larger than the physical address space. We also have to take
497 // into account the architectural limitations that result from UEFI's
498 // use of 4 KB pages.
499 //
500 MaxAddressBits = MIN (ArmGetPhysicalAddressBits (), MAX_VA_BITS);
501 MaxAddress = LShiftU64 (1ULL, MaxAddressBits) - 1;
502
503 T0SZ = 64 - MaxAddressBits;
504 RootTableEntryCount = GetRootTableEntryCount (T0SZ);
505
506 //
507 // Set TCR that allows us to retrieve T0SZ in the subsequent functions
508 //
509 // Ideally we will be running at EL2, but should support EL1 as well.
510 // UEFI should not run at EL3.
511 if (ArmReadCurrentEL () == AARCH64_EL2) {
512 //Note: Bits 23 and 31 are reserved(RES1) bits in TCR_EL2
513 TCR = T0SZ | (1UL << 31) | (1UL << 23) | TCR_TG0_4KB;
514
515 // Set the Physical Address Size using MaxAddress
516 if (MaxAddress < SIZE_4GB) {
517 TCR |= TCR_PS_4GB;
518 } else if (MaxAddress < SIZE_64GB) {
519 TCR |= TCR_PS_64GB;
520 } else if (MaxAddress < SIZE_1TB) {
521 TCR |= TCR_PS_1TB;
522 } else if (MaxAddress < SIZE_4TB) {
523 TCR |= TCR_PS_4TB;
524 } else if (MaxAddress < SIZE_16TB) {
525 TCR |= TCR_PS_16TB;
526 } else if (MaxAddress < SIZE_256TB) {
527 TCR |= TCR_PS_256TB;
528 } else {
529 DEBUG ((DEBUG_ERROR,
530 "ArmConfigureMmu: The MaxAddress 0x%lX is not supported by this MMU configuration.\n",
531 MaxAddress));
532 ASSERT (0); // Bigger than 48-bit memory space are not supported
533 return EFI_UNSUPPORTED;
534 }
535 } else if (ArmReadCurrentEL () == AARCH64_EL1) {
536 // Due to Cortex-A57 erratum #822227 we must set TG1[1] == 1, regardless of EPD1.
537 TCR = T0SZ | TCR_TG0_4KB | TCR_TG1_4KB | TCR_EPD1;
538
539 // Set the Physical Address Size using MaxAddress
540 if (MaxAddress < SIZE_4GB) {
541 TCR |= TCR_IPS_4GB;
542 } else if (MaxAddress < SIZE_64GB) {
543 TCR |= TCR_IPS_64GB;
544 } else if (MaxAddress < SIZE_1TB) {
545 TCR |= TCR_IPS_1TB;
546 } else if (MaxAddress < SIZE_4TB) {
547 TCR |= TCR_IPS_4TB;
548 } else if (MaxAddress < SIZE_16TB) {
549 TCR |= TCR_IPS_16TB;
550 } else if (MaxAddress < SIZE_256TB) {
551 TCR |= TCR_IPS_256TB;
552 } else {
553 DEBUG ((DEBUG_ERROR,
554 "ArmConfigureMmu: The MaxAddress 0x%lX is not supported by this MMU configuration.\n",
555 MaxAddress));
556 ASSERT (0); // Bigger than 48-bit memory space are not supported
557 return EFI_UNSUPPORTED;
558 }
559 } else {
560 ASSERT (0); // UEFI is only expected to run at EL2 and EL1, not EL3.
561 return EFI_UNSUPPORTED;
562 }
563
564 //
565 // Translation table walks are always cache coherent on ARMv8-A, so cache
566 // maintenance on page tables is never needed. Since there is a risk of
567 // loss of coherency when using mismatched attributes, and given that memory
568 // is mapped cacheable except for extraordinary cases (such as non-coherent
569 // DMA), have the page table walker perform cached accesses as well, and
570 // assert below that that matches the attributes we use for CPU accesses to
571 // the region.
572 //
573 TCR |= TCR_SH_INNER_SHAREABLE |
574 TCR_RGN_OUTER_WRITE_BACK_ALLOC |
575 TCR_RGN_INNER_WRITE_BACK_ALLOC;
576
577 // Set TCR
578 ArmSetTCR (TCR);
579
580 // Allocate pages for translation table
581 TranslationTable = AllocatePages (1);
582 if (TranslationTable == NULL) {
583 return EFI_OUT_OF_RESOURCES;
584 }
585 //
586 // We set TTBR0 just after allocating the table to retrieve its location from
587 // the subsequent functions without needing to pass this value across the
588 // functions. The MMU is only enabled after the translation tables are
589 // populated.
590 //
591 ArmSetTTBR0 (TranslationTable);
592
593 if (TranslationTableBase != NULL) {
594 *TranslationTableBase = TranslationTable;
595 }
596
597 if (TranslationTableSize != NULL) {
598 *TranslationTableSize = RootTableEntryCount * sizeof (UINT64);
599 }
600
601 //
602 // Make sure we are not inadvertently hitting in the caches
603 // when populating the page tables.
604 //
605 InvalidateDataCacheRange (TranslationTable,
606 RootTableEntryCount * sizeof (UINT64));
607 ZeroMem (TranslationTable, RootTableEntryCount * sizeof (UINT64));
608
609 while (MemoryTable->Length != 0) {
610 Status = FillTranslationTable (TranslationTable, MemoryTable);
611 if (EFI_ERROR (Status)) {
612 goto FreeTranslationTable;
613 }
614 MemoryTable++;
615 }
616
617 //
618 // EFI_MEMORY_UC ==> MAIR_ATTR_DEVICE_MEMORY
619 // EFI_MEMORY_WC ==> MAIR_ATTR_NORMAL_MEMORY_NON_CACHEABLE
620 // EFI_MEMORY_WT ==> MAIR_ATTR_NORMAL_MEMORY_WRITE_THROUGH
621 // EFI_MEMORY_WB ==> MAIR_ATTR_NORMAL_MEMORY_WRITE_BACK
622 //
623 ArmSetMAIR (
624 MAIR_ATTR (TT_ATTR_INDX_DEVICE_MEMORY, MAIR_ATTR_DEVICE_MEMORY) |
625 MAIR_ATTR (TT_ATTR_INDX_MEMORY_NON_CACHEABLE, MAIR_ATTR_NORMAL_MEMORY_NON_CACHEABLE) |
626 MAIR_ATTR (TT_ATTR_INDX_MEMORY_WRITE_THROUGH, MAIR_ATTR_NORMAL_MEMORY_WRITE_THROUGH) |
627 MAIR_ATTR (TT_ATTR_INDX_MEMORY_WRITE_BACK, MAIR_ATTR_NORMAL_MEMORY_WRITE_BACK)
628 );
629
630 ArmDisableAlignmentCheck ();
631 ArmEnableStackAlignmentCheck ();
632 ArmEnableInstructionCache ();
633 ArmEnableDataCache ();
634
635 ArmEnableMmu ();
636 return EFI_SUCCESS;
637
638 FreeTranslationTable:
639 FreePages (TranslationTable, 1);
640 return Status;
641 }
642
643 RETURN_STATUS
644 EFIAPI
645 ArmMmuBaseLibConstructor (
646 VOID
647 )
648 {
649 extern UINT32 ArmReplaceLiveTranslationEntrySize;
650
651 //
652 // The ArmReplaceLiveTranslationEntry () helper function may be invoked
653 // with the MMU off so we have to ensure that it gets cleaned to the PoC
654 //
655 WriteBackDataCacheRange (ArmReplaceLiveTranslationEntry,
656 ArmReplaceLiveTranslationEntrySize);
657
658 return RETURN_SUCCESS;
659 }