]> git.proxmox.com Git - mirror_edk2.git/blob - ArmPkg/Library/ArmMmuLib/AArch64/ArmMmuLibCore.c
ArmPkg: Fix Ecc error 3002 in ArmMmuLib
[mirror_edk2.git] / ArmPkg / Library / ArmMmuLib / AArch64 / ArmMmuLibCore.c
1 /** @file
2 * File managing the MMU for ARMv8 architecture
3 *
4 * Copyright (c) 2011-2020, ARM Limited. All rights reserved.
5 * Copyright (c) 2016, Linaro Limited. All rights reserved.
6 * Copyright (c) 2017, Intel Corporation. All rights reserved.<BR>
7 *
8 * SPDX-License-Identifier: BSD-2-Clause-Patent
9 *
10 **/
11
12 #include <Uefi.h>
13 #include <Chipset/AArch64.h>
14 #include <Library/BaseMemoryLib.h>
15 #include <Library/CacheMaintenanceLib.h>
16 #include <Library/MemoryAllocationLib.h>
17 #include <Library/ArmLib.h>
18 #include <Library/ArmMmuLib.h>
19 #include <Library/BaseLib.h>
20 #include <Library/DebugLib.h>
21
22 STATIC
23 UINT64
24 ArmMemoryAttributeToPageAttribute (
25 IN ARM_MEMORY_REGION_ATTRIBUTES Attributes
26 )
27 {
28 switch (Attributes) {
29 case ARM_MEMORY_REGION_ATTRIBUTE_WRITE_BACK_NONSHAREABLE:
30 case ARM_MEMORY_REGION_ATTRIBUTE_NONSECURE_WRITE_BACK_NONSHAREABLE:
31 return TT_ATTR_INDX_MEMORY_WRITE_BACK;
32
33 case ARM_MEMORY_REGION_ATTRIBUTE_WRITE_BACK:
34 case ARM_MEMORY_REGION_ATTRIBUTE_NONSECURE_WRITE_BACK:
35 return TT_ATTR_INDX_MEMORY_WRITE_BACK | TT_SH_INNER_SHAREABLE;
36
37 case ARM_MEMORY_REGION_ATTRIBUTE_WRITE_THROUGH:
38 case ARM_MEMORY_REGION_ATTRIBUTE_NONSECURE_WRITE_THROUGH:
39 return TT_ATTR_INDX_MEMORY_WRITE_THROUGH | TT_SH_INNER_SHAREABLE;
40
41 // Uncached and device mappings are treated as outer shareable by default,
42 case ARM_MEMORY_REGION_ATTRIBUTE_UNCACHED_UNBUFFERED:
43 case ARM_MEMORY_REGION_ATTRIBUTE_NONSECURE_UNCACHED_UNBUFFERED:
44 return TT_ATTR_INDX_MEMORY_NON_CACHEABLE;
45
46 default:
47 ASSERT (0);
48 case ARM_MEMORY_REGION_ATTRIBUTE_DEVICE:
49 case ARM_MEMORY_REGION_ATTRIBUTE_NONSECURE_DEVICE:
50 if (ArmReadCurrentEL () == AARCH64_EL2)
51 return TT_ATTR_INDX_DEVICE_MEMORY | TT_XN_MASK;
52 else
53 return TT_ATTR_INDX_DEVICE_MEMORY | TT_UXN_MASK | TT_PXN_MASK;
54 }
55 }
56
57 #define MIN_T0SZ 16
58 #define BITS_PER_LEVEL 9
59 #define MAX_VA_BITS 48
60
61 STATIC
62 UINTN
63 GetRootTableEntryCount (
64 IN UINTN T0SZ
65 )
66 {
67 return TT_ENTRY_COUNT >> (T0SZ - MIN_T0SZ) % BITS_PER_LEVEL;
68 }
69
70 STATIC
71 UINTN
72 GetRootTableLevel (
73 IN UINTN T0SZ
74 )
75 {
76 return (T0SZ - MIN_T0SZ) / BITS_PER_LEVEL;
77 }
78
79 STATIC
80 VOID
81 ReplaceTableEntry (
82 IN UINT64 *Entry,
83 IN UINT64 Value,
84 IN UINT64 RegionStart,
85 IN BOOLEAN IsLiveBlockMapping
86 )
87 {
88 if (!ArmMmuEnabled () || !IsLiveBlockMapping) {
89 *Entry = Value;
90 ArmUpdateTranslationTableEntry (Entry, (VOID *)(UINTN)RegionStart);
91 } else {
92 ArmReplaceLiveTranslationEntry (Entry, Value, RegionStart);
93 }
94 }
95
96 STATIC
97 VOID
98 FreePageTablesRecursive (
99 IN UINT64 *TranslationTable,
100 IN UINTN Level
101 )
102 {
103 UINTN Index;
104
105 ASSERT (Level <= 3);
106
107 if (Level < 3) {
108 for (Index = 0; Index < TT_ENTRY_COUNT; Index++) {
109 if ((TranslationTable[Index] & TT_TYPE_MASK) == TT_TYPE_TABLE_ENTRY) {
110 FreePageTablesRecursive ((VOID *)(UINTN)(TranslationTable[Index] &
111 TT_ADDRESS_MASK_BLOCK_ENTRY),
112 Level + 1);
113 }
114 }
115 }
116 FreePages (TranslationTable, 1);
117 }
118
119 STATIC
120 BOOLEAN
121 IsBlockEntry (
122 IN UINT64 Entry,
123 IN UINTN Level
124 )
125 {
126 if (Level == 3) {
127 return (Entry & TT_TYPE_MASK) == TT_TYPE_BLOCK_ENTRY_LEVEL3;
128 }
129 return (Entry & TT_TYPE_MASK) == TT_TYPE_BLOCK_ENTRY;
130 }
131
132 STATIC
133 BOOLEAN
134 IsTableEntry (
135 IN UINT64 Entry,
136 IN UINTN Level
137 )
138 {
139 if (Level == 3) {
140 //
141 // TT_TYPE_TABLE_ENTRY aliases TT_TYPE_BLOCK_ENTRY_LEVEL3
142 // so we need to take the level into account as well.
143 //
144 return FALSE;
145 }
146 return (Entry & TT_TYPE_MASK) == TT_TYPE_TABLE_ENTRY;
147 }
148
149 STATIC
150 EFI_STATUS
151 UpdateRegionMappingRecursive (
152 IN UINT64 RegionStart,
153 IN UINT64 RegionEnd,
154 IN UINT64 AttributeSetMask,
155 IN UINT64 AttributeClearMask,
156 IN UINT64 *PageTable,
157 IN UINTN Level
158 )
159 {
160 UINTN BlockShift;
161 UINT64 BlockMask;
162 UINT64 BlockEnd;
163 UINT64 *Entry;
164 UINT64 EntryValue;
165 VOID *TranslationTable;
166 EFI_STATUS Status;
167
168 ASSERT (((RegionStart | RegionEnd) & EFI_PAGE_MASK) == 0);
169
170 BlockShift = (Level + 1) * BITS_PER_LEVEL + MIN_T0SZ;
171 BlockMask = MAX_UINT64 >> BlockShift;
172
173 DEBUG ((DEBUG_VERBOSE, "%a(%d): %llx - %llx set %lx clr %lx\n", __FUNCTION__,
174 Level, RegionStart, RegionEnd, AttributeSetMask, AttributeClearMask));
175
176 for (; RegionStart < RegionEnd; RegionStart = BlockEnd) {
177 BlockEnd = MIN (RegionEnd, (RegionStart | BlockMask) + 1);
178 Entry = &PageTable[(RegionStart >> (64 - BlockShift)) & (TT_ENTRY_COUNT - 1)];
179
180 //
181 // If RegionStart or BlockEnd is not aligned to the block size at this
182 // level, we will have to create a table mapping in order to map less
183 // than a block, and recurse to create the block or page entries at
184 // the next level. No block mappings are allowed at all at level 0,
185 // so in that case, we have to recurse unconditionally.
186 // If we are changing a table entry and the AttributeClearMask is non-zero,
187 // we cannot replace it with a block entry without potentially losing
188 // attribute information, so keep the table entry in that case.
189 //
190 if (Level == 0 || ((RegionStart | BlockEnd) & BlockMask) != 0 ||
191 (IsTableEntry (*Entry, Level) && AttributeClearMask != 0)) {
192 ASSERT (Level < 3);
193
194 if (!IsTableEntry (*Entry, Level)) {
195 //
196 // No table entry exists yet, so we need to allocate a page table
197 // for the next level.
198 //
199 TranslationTable = AllocatePages (1);
200 if (TranslationTable == NULL) {
201 return EFI_OUT_OF_RESOURCES;
202 }
203
204 if (!ArmMmuEnabled ()) {
205 //
206 // Make sure we are not inadvertently hitting in the caches
207 // when populating the page tables.
208 //
209 InvalidateDataCacheRange (TranslationTable, EFI_PAGE_SIZE);
210 }
211
212 ZeroMem (TranslationTable, EFI_PAGE_SIZE);
213
214 if (IsBlockEntry (*Entry, Level)) {
215 //
216 // We are splitting an existing block entry, so we have to populate
217 // the new table with the attributes of the block entry it replaces.
218 //
219 Status = UpdateRegionMappingRecursive (RegionStart & ~BlockMask,
220 (RegionStart | BlockMask) + 1, *Entry & TT_ATTRIBUTES_MASK,
221 0, TranslationTable, Level + 1);
222 if (EFI_ERROR (Status)) {
223 //
224 // The range we passed to UpdateRegionMappingRecursive () is block
225 // aligned, so it is guaranteed that no further pages were allocated
226 // by it, and so we only have to free the page we allocated here.
227 //
228 FreePages (TranslationTable, 1);
229 return Status;
230 }
231 }
232 } else {
233 TranslationTable = (VOID *)(UINTN)(*Entry & TT_ADDRESS_MASK_BLOCK_ENTRY);
234 }
235
236 //
237 // Recurse to the next level
238 //
239 Status = UpdateRegionMappingRecursive (RegionStart, BlockEnd,
240 AttributeSetMask, AttributeClearMask, TranslationTable,
241 Level + 1);
242 if (EFI_ERROR (Status)) {
243 if (!IsTableEntry (*Entry, Level)) {
244 //
245 // We are creating a new table entry, so on failure, we can free all
246 // allocations we made recursively, given that the whole subhierarchy
247 // has not been wired into the live page tables yet. (This is not
248 // possible for existing table entries, since we cannot revert the
249 // modifications we made to the subhierarchy it represents.)
250 //
251 FreePageTablesRecursive (TranslationTable, Level + 1);
252 }
253 return Status;
254 }
255
256 if (!IsTableEntry (*Entry, Level)) {
257 EntryValue = (UINTN)TranslationTable | TT_TYPE_TABLE_ENTRY;
258 ReplaceTableEntry (Entry, EntryValue, RegionStart,
259 IsBlockEntry (*Entry, Level));
260 }
261 } else {
262 EntryValue = (*Entry & AttributeClearMask) | AttributeSetMask;
263 EntryValue |= RegionStart;
264 EntryValue |= (Level == 3) ? TT_TYPE_BLOCK_ENTRY_LEVEL3
265 : TT_TYPE_BLOCK_ENTRY;
266
267 if (IsTableEntry (*Entry, Level)) {
268 //
269 // We are replacing a table entry with a block entry. This is only
270 // possible if we are keeping none of the original attributes.
271 // We can free the table entry's page table, and all the ones below
272 // it, since we are dropping the only possible reference to it.
273 //
274 ASSERT (AttributeClearMask == 0);
275 TranslationTable = (VOID *)(UINTN)(*Entry & TT_ADDRESS_MASK_BLOCK_ENTRY);
276 ReplaceTableEntry (Entry, EntryValue, RegionStart, TRUE);
277 FreePageTablesRecursive (TranslationTable, Level + 1);
278 } else {
279 ReplaceTableEntry (Entry, EntryValue, RegionStart, FALSE);
280 }
281 }
282 }
283 return EFI_SUCCESS;
284 }
285
286 STATIC
287 EFI_STATUS
288 UpdateRegionMapping (
289 IN UINT64 RegionStart,
290 IN UINT64 RegionLength,
291 IN UINT64 AttributeSetMask,
292 IN UINT64 AttributeClearMask
293 )
294 {
295 UINTN T0SZ;
296
297 if (((RegionStart | RegionLength) & EFI_PAGE_MASK) != 0) {
298 return EFI_INVALID_PARAMETER;
299 }
300
301 T0SZ = ArmGetTCR () & TCR_T0SZ_MASK;
302
303 return UpdateRegionMappingRecursive (RegionStart, RegionStart + RegionLength,
304 AttributeSetMask, AttributeClearMask, ArmGetTTBR0BaseAddress (),
305 GetRootTableLevel (T0SZ));
306 }
307
308 STATIC
309 EFI_STATUS
310 FillTranslationTable (
311 IN UINT64 *RootTable,
312 IN ARM_MEMORY_REGION_DESCRIPTOR *MemoryRegion
313 )
314 {
315 return UpdateRegionMapping (
316 MemoryRegion->VirtualBase,
317 MemoryRegion->Length,
318 ArmMemoryAttributeToPageAttribute (MemoryRegion->Attributes) | TT_AF,
319 0
320 );
321 }
322
323 STATIC
324 UINT64
325 GcdAttributeToPageAttribute (
326 IN UINT64 GcdAttributes
327 )
328 {
329 UINT64 PageAttributes;
330
331 switch (GcdAttributes & EFI_MEMORY_CACHETYPE_MASK) {
332 case EFI_MEMORY_UC:
333 PageAttributes = TT_ATTR_INDX_DEVICE_MEMORY;
334 break;
335 case EFI_MEMORY_WC:
336 PageAttributes = TT_ATTR_INDX_MEMORY_NON_CACHEABLE;
337 break;
338 case EFI_MEMORY_WT:
339 PageAttributes = TT_ATTR_INDX_MEMORY_WRITE_THROUGH | TT_SH_INNER_SHAREABLE;
340 break;
341 case EFI_MEMORY_WB:
342 PageAttributes = TT_ATTR_INDX_MEMORY_WRITE_BACK | TT_SH_INNER_SHAREABLE;
343 break;
344 default:
345 PageAttributes = TT_ATTR_INDX_MASK;
346 break;
347 }
348
349 if ((GcdAttributes & EFI_MEMORY_XP) != 0 ||
350 (GcdAttributes & EFI_MEMORY_CACHETYPE_MASK) == EFI_MEMORY_UC) {
351 if (ArmReadCurrentEL () == AARCH64_EL2) {
352 PageAttributes |= TT_XN_MASK;
353 } else {
354 PageAttributes |= TT_UXN_MASK | TT_PXN_MASK;
355 }
356 }
357
358 if ((GcdAttributes & EFI_MEMORY_RO) != 0) {
359 PageAttributes |= TT_AP_RO_RO;
360 }
361
362 return PageAttributes | TT_AF;
363 }
364
365 EFI_STATUS
366 ArmSetMemoryAttributes (
367 IN EFI_PHYSICAL_ADDRESS BaseAddress,
368 IN UINT64 Length,
369 IN UINT64 Attributes
370 )
371 {
372 UINT64 PageAttributes;
373 UINT64 PageAttributeMask;
374
375 PageAttributes = GcdAttributeToPageAttribute (Attributes);
376 PageAttributeMask = 0;
377
378 if ((Attributes & EFI_MEMORY_CACHETYPE_MASK) == 0) {
379 //
380 // No memory type was set in Attributes, so we are going to update the
381 // permissions only.
382 //
383 PageAttributes &= TT_AP_MASK | TT_UXN_MASK | TT_PXN_MASK;
384 PageAttributeMask = ~(TT_ADDRESS_MASK_BLOCK_ENTRY | TT_AP_MASK |
385 TT_PXN_MASK | TT_XN_MASK);
386 }
387
388 return UpdateRegionMapping (BaseAddress, Length, PageAttributes,
389 PageAttributeMask);
390 }
391
392 STATIC
393 EFI_STATUS
394 SetMemoryRegionAttribute (
395 IN EFI_PHYSICAL_ADDRESS BaseAddress,
396 IN UINT64 Length,
397 IN UINT64 Attributes,
398 IN UINT64 BlockEntryMask
399 )
400 {
401 return UpdateRegionMapping (BaseAddress, Length, Attributes, BlockEntryMask);
402 }
403
404 EFI_STATUS
405 ArmSetMemoryRegionNoExec (
406 IN EFI_PHYSICAL_ADDRESS BaseAddress,
407 IN UINT64 Length
408 )
409 {
410 UINT64 Val;
411
412 if (ArmReadCurrentEL () == AARCH64_EL1) {
413 Val = TT_PXN_MASK | TT_UXN_MASK;
414 } else {
415 Val = TT_XN_MASK;
416 }
417
418 return SetMemoryRegionAttribute (
419 BaseAddress,
420 Length,
421 Val,
422 ~TT_ADDRESS_MASK_BLOCK_ENTRY);
423 }
424
425 EFI_STATUS
426 ArmClearMemoryRegionNoExec (
427 IN EFI_PHYSICAL_ADDRESS BaseAddress,
428 IN UINT64 Length
429 )
430 {
431 UINT64 Mask;
432
433 // XN maps to UXN in the EL1&0 translation regime
434 Mask = ~(TT_ADDRESS_MASK_BLOCK_ENTRY | TT_PXN_MASK | TT_XN_MASK);
435
436 return SetMemoryRegionAttribute (
437 BaseAddress,
438 Length,
439 0,
440 Mask);
441 }
442
443 EFI_STATUS
444 ArmSetMemoryRegionReadOnly (
445 IN EFI_PHYSICAL_ADDRESS BaseAddress,
446 IN UINT64 Length
447 )
448 {
449 return SetMemoryRegionAttribute (
450 BaseAddress,
451 Length,
452 TT_AP_RO_RO,
453 ~TT_ADDRESS_MASK_BLOCK_ENTRY);
454 }
455
456 EFI_STATUS
457 ArmClearMemoryRegionReadOnly (
458 IN EFI_PHYSICAL_ADDRESS BaseAddress,
459 IN UINT64 Length
460 )
461 {
462 return SetMemoryRegionAttribute (
463 BaseAddress,
464 Length,
465 TT_AP_RW_RW,
466 ~(TT_ADDRESS_MASK_BLOCK_ENTRY | TT_AP_MASK));
467 }
468
469 EFI_STATUS
470 EFIAPI
471 ArmConfigureMmu (
472 IN ARM_MEMORY_REGION_DESCRIPTOR *MemoryTable,
473 OUT VOID **TranslationTableBase OPTIONAL,
474 OUT UINTN *TranslationTableSize OPTIONAL
475 )
476 {
477 VOID* TranslationTable;
478 UINTN MaxAddressBits;
479 UINT64 MaxAddress;
480 UINTN T0SZ;
481 UINTN RootTableEntryCount;
482 UINT64 TCR;
483 EFI_STATUS Status;
484
485 if (MemoryTable == NULL) {
486 ASSERT (MemoryTable != NULL);
487 return EFI_INVALID_PARAMETER;
488 }
489
490 //
491 // Limit the virtual address space to what we can actually use: UEFI
492 // mandates a 1:1 mapping, so no point in making the virtual address
493 // space larger than the physical address space. We also have to take
494 // into account the architectural limitations that result from UEFI's
495 // use of 4 KB pages.
496 //
497 MaxAddressBits = MIN (ArmGetPhysicalAddressBits (), MAX_VA_BITS);
498 MaxAddress = LShiftU64 (1ULL, MaxAddressBits) - 1;
499
500 T0SZ = 64 - MaxAddressBits;
501 RootTableEntryCount = GetRootTableEntryCount (T0SZ);
502
503 //
504 // Set TCR that allows us to retrieve T0SZ in the subsequent functions
505 //
506 // Ideally we will be running at EL2, but should support EL1 as well.
507 // UEFI should not run at EL3.
508 if (ArmReadCurrentEL () == AARCH64_EL2) {
509 //Note: Bits 23 and 31 are reserved(RES1) bits in TCR_EL2
510 TCR = T0SZ | (1UL << 31) | (1UL << 23) | TCR_TG0_4KB;
511
512 // Set the Physical Address Size using MaxAddress
513 if (MaxAddress < SIZE_4GB) {
514 TCR |= TCR_PS_4GB;
515 } else if (MaxAddress < SIZE_64GB) {
516 TCR |= TCR_PS_64GB;
517 } else if (MaxAddress < SIZE_1TB) {
518 TCR |= TCR_PS_1TB;
519 } else if (MaxAddress < SIZE_4TB) {
520 TCR |= TCR_PS_4TB;
521 } else if (MaxAddress < SIZE_16TB) {
522 TCR |= TCR_PS_16TB;
523 } else if (MaxAddress < SIZE_256TB) {
524 TCR |= TCR_PS_256TB;
525 } else {
526 DEBUG ((DEBUG_ERROR,
527 "ArmConfigureMmu: The MaxAddress 0x%lX is not supported by this MMU configuration.\n",
528 MaxAddress));
529 ASSERT (0); // Bigger than 48-bit memory space are not supported
530 return EFI_UNSUPPORTED;
531 }
532 } else if (ArmReadCurrentEL () == AARCH64_EL1) {
533 // Due to Cortex-A57 erratum #822227 we must set TG1[1] == 1, regardless of EPD1.
534 TCR = T0SZ | TCR_TG0_4KB | TCR_TG1_4KB | TCR_EPD1;
535
536 // Set the Physical Address Size using MaxAddress
537 if (MaxAddress < SIZE_4GB) {
538 TCR |= TCR_IPS_4GB;
539 } else if (MaxAddress < SIZE_64GB) {
540 TCR |= TCR_IPS_64GB;
541 } else if (MaxAddress < SIZE_1TB) {
542 TCR |= TCR_IPS_1TB;
543 } else if (MaxAddress < SIZE_4TB) {
544 TCR |= TCR_IPS_4TB;
545 } else if (MaxAddress < SIZE_16TB) {
546 TCR |= TCR_IPS_16TB;
547 } else if (MaxAddress < SIZE_256TB) {
548 TCR |= TCR_IPS_256TB;
549 } else {
550 DEBUG ((DEBUG_ERROR,
551 "ArmConfigureMmu: The MaxAddress 0x%lX is not supported by this MMU configuration.\n",
552 MaxAddress));
553 ASSERT (0); // Bigger than 48-bit memory space are not supported
554 return EFI_UNSUPPORTED;
555 }
556 } else {
557 ASSERT (0); // UEFI is only expected to run at EL2 and EL1, not EL3.
558 return EFI_UNSUPPORTED;
559 }
560
561 //
562 // Translation table walks are always cache coherent on ARMv8-A, so cache
563 // maintenance on page tables is never needed. Since there is a risk of
564 // loss of coherency when using mismatched attributes, and given that memory
565 // is mapped cacheable except for extraordinary cases (such as non-coherent
566 // DMA), have the page table walker perform cached accesses as well, and
567 // assert below that that matches the attributes we use for CPU accesses to
568 // the region.
569 //
570 TCR |= TCR_SH_INNER_SHAREABLE |
571 TCR_RGN_OUTER_WRITE_BACK_ALLOC |
572 TCR_RGN_INNER_WRITE_BACK_ALLOC;
573
574 // Set TCR
575 ArmSetTCR (TCR);
576
577 // Allocate pages for translation table
578 TranslationTable = AllocatePages (1);
579 if (TranslationTable == NULL) {
580 return EFI_OUT_OF_RESOURCES;
581 }
582 //
583 // We set TTBR0 just after allocating the table to retrieve its location from
584 // the subsequent functions without needing to pass this value across the
585 // functions. The MMU is only enabled after the translation tables are
586 // populated.
587 //
588 ArmSetTTBR0 (TranslationTable);
589
590 if (TranslationTableBase != NULL) {
591 *TranslationTableBase = TranslationTable;
592 }
593
594 if (TranslationTableSize != NULL) {
595 *TranslationTableSize = RootTableEntryCount * sizeof (UINT64);
596 }
597
598 //
599 // Make sure we are not inadvertently hitting in the caches
600 // when populating the page tables.
601 //
602 InvalidateDataCacheRange (TranslationTable,
603 RootTableEntryCount * sizeof (UINT64));
604 ZeroMem (TranslationTable, RootTableEntryCount * sizeof (UINT64));
605
606 while (MemoryTable->Length != 0) {
607 Status = FillTranslationTable (TranslationTable, MemoryTable);
608 if (EFI_ERROR (Status)) {
609 goto FreeTranslationTable;
610 }
611 MemoryTable++;
612 }
613
614 //
615 // EFI_MEMORY_UC ==> MAIR_ATTR_DEVICE_MEMORY
616 // EFI_MEMORY_WC ==> MAIR_ATTR_NORMAL_MEMORY_NON_CACHEABLE
617 // EFI_MEMORY_WT ==> MAIR_ATTR_NORMAL_MEMORY_WRITE_THROUGH
618 // EFI_MEMORY_WB ==> MAIR_ATTR_NORMAL_MEMORY_WRITE_BACK
619 //
620 ArmSetMAIR (
621 MAIR_ATTR (TT_ATTR_INDX_DEVICE_MEMORY, MAIR_ATTR_DEVICE_MEMORY) |
622 MAIR_ATTR (TT_ATTR_INDX_MEMORY_NON_CACHEABLE, MAIR_ATTR_NORMAL_MEMORY_NON_CACHEABLE) |
623 MAIR_ATTR (TT_ATTR_INDX_MEMORY_WRITE_THROUGH, MAIR_ATTR_NORMAL_MEMORY_WRITE_THROUGH) |
624 MAIR_ATTR (TT_ATTR_INDX_MEMORY_WRITE_BACK, MAIR_ATTR_NORMAL_MEMORY_WRITE_BACK)
625 );
626
627 ArmDisableAlignmentCheck ();
628 ArmEnableStackAlignmentCheck ();
629 ArmEnableInstructionCache ();
630 ArmEnableDataCache ();
631
632 ArmEnableMmu ();
633 return EFI_SUCCESS;
634
635 FreeTranslationTable:
636 FreePages (TranslationTable, 1);
637 return Status;
638 }
639
640 RETURN_STATUS
641 EFIAPI
642 ArmMmuBaseLibConstructor (
643 VOID
644 )
645 {
646 extern UINT32 ArmReplaceLiveTranslationEntrySize;
647
648 //
649 // The ArmReplaceLiveTranslationEntry () helper function may be invoked
650 // with the MMU off so we have to ensure that it gets cleaned to the PoC
651 //
652 WriteBackDataCacheRange ((VOID *)(UINTN)ArmReplaceLiveTranslationEntry,
653 ArmReplaceLiveTranslationEntrySize);
654
655 return RETURN_SUCCESS;
656 }