]> git.proxmox.com Git - mirror_edk2.git/blob - ArmPkg/Library/ArmMmuLib/AArch64/ArmMmuLibCore.c
b6f3ef54aa26aa72bb98e7c7843ca9389e3bbf66
[mirror_edk2.git] / ArmPkg / Library / ArmMmuLib / AArch64 / ArmMmuLibCore.c
1 /** @file
2 * File managing the MMU for ARMv8 architecture
3 *
4 * Copyright (c) 2011-2020, ARM Limited. All rights reserved.
5 * Copyright (c) 2016, Linaro Limited. All rights reserved.
6 * Copyright (c) 2017, Intel Corporation. All rights reserved.<BR>
7 *
8 * SPDX-License-Identifier: BSD-2-Clause-Patent
9 *
10 **/
11
12 #include <Uefi.h>
13 #include <Chipset/AArch64.h>
14 #include <Library/BaseMemoryLib.h>
15 #include <Library/CacheMaintenanceLib.h>
16 #include <Library/MemoryAllocationLib.h>
17 #include <Library/ArmLib.h>
18 #include <Library/ArmMmuLib.h>
19 #include <Library/BaseLib.h>
20 #include <Library/DebugLib.h>
21
22 // We use this index definition to define an invalid block entry
23 #define TT_ATTR_INDX_INVALID ((UINT32)~0)
24
25 STATIC
26 UINT64
27 ArmMemoryAttributeToPageAttribute (
28 IN ARM_MEMORY_REGION_ATTRIBUTES Attributes
29 )
30 {
31 switch (Attributes) {
32 case ARM_MEMORY_REGION_ATTRIBUTE_WRITE_BACK_NONSHAREABLE:
33 case ARM_MEMORY_REGION_ATTRIBUTE_NONSECURE_WRITE_BACK_NONSHAREABLE:
34 return TT_ATTR_INDX_MEMORY_WRITE_BACK;
35
36 case ARM_MEMORY_REGION_ATTRIBUTE_WRITE_BACK:
37 case ARM_MEMORY_REGION_ATTRIBUTE_NONSECURE_WRITE_BACK:
38 return TT_ATTR_INDX_MEMORY_WRITE_BACK | TT_SH_INNER_SHAREABLE;
39
40 case ARM_MEMORY_REGION_ATTRIBUTE_WRITE_THROUGH:
41 case ARM_MEMORY_REGION_ATTRIBUTE_NONSECURE_WRITE_THROUGH:
42 return TT_ATTR_INDX_MEMORY_WRITE_THROUGH | TT_SH_INNER_SHAREABLE;
43
44 // Uncached and device mappings are treated as outer shareable by default,
45 case ARM_MEMORY_REGION_ATTRIBUTE_UNCACHED_UNBUFFERED:
46 case ARM_MEMORY_REGION_ATTRIBUTE_NONSECURE_UNCACHED_UNBUFFERED:
47 return TT_ATTR_INDX_MEMORY_NON_CACHEABLE;
48
49 default:
50 ASSERT (0);
51 case ARM_MEMORY_REGION_ATTRIBUTE_DEVICE:
52 case ARM_MEMORY_REGION_ATTRIBUTE_NONSECURE_DEVICE:
53 if (ArmReadCurrentEL () == AARCH64_EL2)
54 return TT_ATTR_INDX_DEVICE_MEMORY | TT_XN_MASK;
55 else
56 return TT_ATTR_INDX_DEVICE_MEMORY | TT_UXN_MASK | TT_PXN_MASK;
57 }
58 }
59
60 #define MIN_T0SZ 16
61 #define BITS_PER_LEVEL 9
62 #define MAX_VA_BITS 48
63
64 STATIC
65 UINTN
66 GetRootTableEntryCount (
67 IN UINTN T0SZ
68 )
69 {
70 return TT_ENTRY_COUNT >> (T0SZ - MIN_T0SZ) % BITS_PER_LEVEL;
71 }
72
73 VOID
74 GetRootTranslationTableInfo (
75 IN UINTN T0SZ,
76 OUT UINTN *TableLevel,
77 OUT UINTN *TableEntryCount
78 )
79 {
80 // Get the level of the root table
81 if (TableLevel) {
82 *TableLevel = (T0SZ - MIN_T0SZ) / BITS_PER_LEVEL;
83 }
84
85 if (TableEntryCount) {
86 *TableEntryCount = 1UL << (BITS_PER_LEVEL - (T0SZ - MIN_T0SZ) % BITS_PER_LEVEL);
87 }
88 }
89
90 STATIC
91 VOID
92 ReplaceTableEntry (
93 IN UINT64 *Entry,
94 IN UINT64 Value,
95 IN UINT64 RegionStart,
96 IN BOOLEAN IsLiveBlockMapping
97 )
98 {
99 if (!ArmMmuEnabled () || !IsLiveBlockMapping) {
100 *Entry = Value;
101 ArmUpdateTranslationTableEntry (Entry, (VOID *)(UINTN)RegionStart);
102 } else {
103 ArmReplaceLiveTranslationEntry (Entry, Value, RegionStart);
104 }
105 }
106
107 STATIC
108 VOID
109 FreePageTablesRecursive (
110 IN UINT64 *TranslationTable,
111 IN UINTN Level
112 )
113 {
114 UINTN Index;
115
116 ASSERT (Level <= 3);
117
118 if (Level < 3) {
119 for (Index = 0; Index < TT_ENTRY_COUNT; Index++) {
120 if ((TranslationTable[Index] & TT_TYPE_MASK) == TT_TYPE_TABLE_ENTRY) {
121 FreePageTablesRecursive ((VOID *)(UINTN)(TranslationTable[Index] &
122 TT_ADDRESS_MASK_BLOCK_ENTRY),
123 Level + 1);
124 }
125 }
126 }
127 FreePages (TranslationTable, 1);
128 }
129
130 STATIC
131 BOOLEAN
132 IsBlockEntry (
133 IN UINT64 Entry,
134 IN UINTN Level
135 )
136 {
137 if (Level == 3) {
138 return (Entry & TT_TYPE_MASK) == TT_TYPE_BLOCK_ENTRY_LEVEL3;
139 }
140 return (Entry & TT_TYPE_MASK) == TT_TYPE_BLOCK_ENTRY;
141 }
142
143 STATIC
144 BOOLEAN
145 IsTableEntry (
146 IN UINT64 Entry,
147 IN UINTN Level
148 )
149 {
150 if (Level == 3) {
151 //
152 // TT_TYPE_TABLE_ENTRY aliases TT_TYPE_BLOCK_ENTRY_LEVEL3
153 // so we need to take the level into account as well.
154 //
155 return FALSE;
156 }
157 return (Entry & TT_TYPE_MASK) == TT_TYPE_TABLE_ENTRY;
158 }
159
160 STATIC
161 EFI_STATUS
162 UpdateRegionMappingRecursive (
163 IN UINT64 RegionStart,
164 IN UINT64 RegionEnd,
165 IN UINT64 AttributeSetMask,
166 IN UINT64 AttributeClearMask,
167 IN UINT64 *PageTable,
168 IN UINTN Level
169 )
170 {
171 UINTN BlockShift;
172 UINT64 BlockMask;
173 UINT64 BlockEnd;
174 UINT64 *Entry;
175 UINT64 EntryValue;
176 VOID *TranslationTable;
177 EFI_STATUS Status;
178
179 ASSERT (((RegionStart | RegionEnd) & EFI_PAGE_MASK) == 0);
180
181 BlockShift = (Level + 1) * BITS_PER_LEVEL + MIN_T0SZ;
182 BlockMask = MAX_UINT64 >> BlockShift;
183
184 DEBUG ((DEBUG_VERBOSE, "%a(%d): %llx - %llx set %lx clr %lx\n", __FUNCTION__,
185 Level, RegionStart, RegionEnd, AttributeSetMask, AttributeClearMask));
186
187 for (; RegionStart < RegionEnd; RegionStart = BlockEnd) {
188 BlockEnd = MIN (RegionEnd, (RegionStart | BlockMask) + 1);
189 Entry = &PageTable[(RegionStart >> (64 - BlockShift)) & (TT_ENTRY_COUNT - 1)];
190
191 //
192 // If RegionStart or BlockEnd is not aligned to the block size at this
193 // level, we will have to create a table mapping in order to map less
194 // than a block, and recurse to create the block or page entries at
195 // the next level. No block mappings are allowed at all at level 0,
196 // so in that case, we have to recurse unconditionally.
197 // If we are changing a table entry and the AttributeClearMask is non-zero,
198 // we cannot replace it with a block entry without potentially losing
199 // attribute information, so keep the table entry in that case.
200 //
201 if (Level == 0 || ((RegionStart | BlockEnd) & BlockMask) != 0 ||
202 (IsTableEntry (*Entry, Level) && AttributeClearMask != 0)) {
203 ASSERT (Level < 3);
204
205 if (!IsTableEntry (*Entry, Level)) {
206 //
207 // No table entry exists yet, so we need to allocate a page table
208 // for the next level.
209 //
210 TranslationTable = AllocatePages (1);
211 if (TranslationTable == NULL) {
212 return EFI_OUT_OF_RESOURCES;
213 }
214
215 if (!ArmMmuEnabled ()) {
216 //
217 // Make sure we are not inadvertently hitting in the caches
218 // when populating the page tables.
219 //
220 InvalidateDataCacheRange (TranslationTable, EFI_PAGE_SIZE);
221 }
222
223 ZeroMem (TranslationTable, EFI_PAGE_SIZE);
224
225 if (IsBlockEntry (*Entry, Level)) {
226 //
227 // We are splitting an existing block entry, so we have to populate
228 // the new table with the attributes of the block entry it replaces.
229 //
230 Status = UpdateRegionMappingRecursive (RegionStart & ~BlockMask,
231 (RegionStart | BlockMask) + 1, *Entry & TT_ATTRIBUTES_MASK,
232 0, TranslationTable, Level + 1);
233 if (EFI_ERROR (Status)) {
234 //
235 // The range we passed to UpdateRegionMappingRecursive () is block
236 // aligned, so it is guaranteed that no further pages were allocated
237 // by it, and so we only have to free the page we allocated here.
238 //
239 FreePages (TranslationTable, 1);
240 return Status;
241 }
242 }
243 } else {
244 TranslationTable = (VOID *)(UINTN)(*Entry & TT_ADDRESS_MASK_BLOCK_ENTRY);
245 }
246
247 //
248 // Recurse to the next level
249 //
250 Status = UpdateRegionMappingRecursive (RegionStart, BlockEnd,
251 AttributeSetMask, AttributeClearMask, TranslationTable,
252 Level + 1);
253 if (EFI_ERROR (Status)) {
254 if (!IsTableEntry (*Entry, Level)) {
255 //
256 // We are creating a new table entry, so on failure, we can free all
257 // allocations we made recursively, given that the whole subhierarchy
258 // has not been wired into the live page tables yet. (This is not
259 // possible for existing table entries, since we cannot revert the
260 // modifications we made to the subhierarchy it represents.)
261 //
262 FreePageTablesRecursive (TranslationTable, Level + 1);
263 }
264 return Status;
265 }
266
267 if (!IsTableEntry (*Entry, Level)) {
268 EntryValue = (UINTN)TranslationTable | TT_TYPE_TABLE_ENTRY;
269 ReplaceTableEntry (Entry, EntryValue, RegionStart,
270 IsBlockEntry (*Entry, Level));
271 }
272 } else {
273 EntryValue = (*Entry & AttributeClearMask) | AttributeSetMask;
274 EntryValue |= RegionStart;
275 EntryValue |= (Level == 3) ? TT_TYPE_BLOCK_ENTRY_LEVEL3
276 : TT_TYPE_BLOCK_ENTRY;
277
278 if (IsTableEntry (*Entry, Level)) {
279 //
280 // We are replacing a table entry with a block entry. This is only
281 // possible if we are keeping none of the original attributes.
282 // We can free the table entry's page table, and all the ones below
283 // it, since we are dropping the only possible reference to it.
284 //
285 ASSERT (AttributeClearMask == 0);
286 TranslationTable = (VOID *)(UINTN)(*Entry & TT_ADDRESS_MASK_BLOCK_ENTRY);
287 ReplaceTableEntry (Entry, EntryValue, RegionStart, TRUE);
288 FreePageTablesRecursive (TranslationTable, Level + 1);
289 } else {
290 ReplaceTableEntry (Entry, EntryValue, RegionStart, FALSE);
291 }
292 }
293 }
294 return EFI_SUCCESS;
295 }
296
297 STATIC
298 EFI_STATUS
299 UpdateRegionMapping (
300 IN UINT64 RegionStart,
301 IN UINT64 RegionLength,
302 IN UINT64 AttributeSetMask,
303 IN UINT64 AttributeClearMask
304 )
305 {
306 UINTN RootTableLevel;
307 UINTN T0SZ;
308
309 if (((RegionStart | RegionLength) & EFI_PAGE_MASK)) {
310 return EFI_INVALID_PARAMETER;
311 }
312
313 T0SZ = ArmGetTCR () & TCR_T0SZ_MASK;
314 GetRootTranslationTableInfo (T0SZ, &RootTableLevel, NULL);
315
316 return UpdateRegionMappingRecursive (RegionStart, RegionStart + RegionLength,
317 AttributeSetMask, AttributeClearMask, ArmGetTTBR0BaseAddress (),
318 RootTableLevel);
319 }
320
321 STATIC
322 EFI_STATUS
323 FillTranslationTable (
324 IN UINT64 *RootTable,
325 IN ARM_MEMORY_REGION_DESCRIPTOR *MemoryRegion
326 )
327 {
328 return UpdateRegionMapping (
329 MemoryRegion->VirtualBase,
330 MemoryRegion->Length,
331 ArmMemoryAttributeToPageAttribute (MemoryRegion->Attributes) | TT_AF,
332 0
333 );
334 }
335
336 STATIC
337 UINT64
338 GcdAttributeToPageAttribute (
339 IN UINT64 GcdAttributes
340 )
341 {
342 UINT64 PageAttributes;
343
344 switch (GcdAttributes & EFI_MEMORY_CACHETYPE_MASK) {
345 case EFI_MEMORY_UC:
346 PageAttributes = TT_ATTR_INDX_DEVICE_MEMORY;
347 break;
348 case EFI_MEMORY_WC:
349 PageAttributes = TT_ATTR_INDX_MEMORY_NON_CACHEABLE;
350 break;
351 case EFI_MEMORY_WT:
352 PageAttributes = TT_ATTR_INDX_MEMORY_WRITE_THROUGH | TT_SH_INNER_SHAREABLE;
353 break;
354 case EFI_MEMORY_WB:
355 PageAttributes = TT_ATTR_INDX_MEMORY_WRITE_BACK | TT_SH_INNER_SHAREABLE;
356 break;
357 default:
358 PageAttributes = TT_ATTR_INDX_MASK;
359 break;
360 }
361
362 if ((GcdAttributes & EFI_MEMORY_XP) != 0 ||
363 (GcdAttributes & EFI_MEMORY_CACHETYPE_MASK) == EFI_MEMORY_UC) {
364 if (ArmReadCurrentEL () == AARCH64_EL2) {
365 PageAttributes |= TT_XN_MASK;
366 } else {
367 PageAttributes |= TT_UXN_MASK | TT_PXN_MASK;
368 }
369 }
370
371 if ((GcdAttributes & EFI_MEMORY_RO) != 0) {
372 PageAttributes |= TT_AP_RO_RO;
373 }
374
375 return PageAttributes | TT_AF;
376 }
377
378 EFI_STATUS
379 ArmSetMemoryAttributes (
380 IN EFI_PHYSICAL_ADDRESS BaseAddress,
381 IN UINT64 Length,
382 IN UINT64 Attributes
383 )
384 {
385 UINT64 PageAttributes;
386 UINT64 PageAttributeMask;
387
388 PageAttributes = GcdAttributeToPageAttribute (Attributes);
389 PageAttributeMask = 0;
390
391 if ((Attributes & EFI_MEMORY_CACHETYPE_MASK) == 0) {
392 //
393 // No memory type was set in Attributes, so we are going to update the
394 // permissions only.
395 //
396 PageAttributes &= TT_AP_MASK | TT_UXN_MASK | TT_PXN_MASK;
397 PageAttributeMask = ~(TT_ADDRESS_MASK_BLOCK_ENTRY | TT_AP_MASK |
398 TT_PXN_MASK | TT_XN_MASK);
399 }
400
401 return UpdateRegionMapping (BaseAddress, Length, PageAttributes,
402 PageAttributeMask);
403 }
404
405 STATIC
406 EFI_STATUS
407 SetMemoryRegionAttribute (
408 IN EFI_PHYSICAL_ADDRESS BaseAddress,
409 IN UINT64 Length,
410 IN UINT64 Attributes,
411 IN UINT64 BlockEntryMask
412 )
413 {
414 return UpdateRegionMapping (BaseAddress, Length, Attributes, BlockEntryMask);
415 }
416
417 EFI_STATUS
418 ArmSetMemoryRegionNoExec (
419 IN EFI_PHYSICAL_ADDRESS BaseAddress,
420 IN UINT64 Length
421 )
422 {
423 UINT64 Val;
424
425 if (ArmReadCurrentEL () == AARCH64_EL1) {
426 Val = TT_PXN_MASK | TT_UXN_MASK;
427 } else {
428 Val = TT_XN_MASK;
429 }
430
431 return SetMemoryRegionAttribute (
432 BaseAddress,
433 Length,
434 Val,
435 ~TT_ADDRESS_MASK_BLOCK_ENTRY);
436 }
437
438 EFI_STATUS
439 ArmClearMemoryRegionNoExec (
440 IN EFI_PHYSICAL_ADDRESS BaseAddress,
441 IN UINT64 Length
442 )
443 {
444 UINT64 Mask;
445
446 // XN maps to UXN in the EL1&0 translation regime
447 Mask = ~(TT_ADDRESS_MASK_BLOCK_ENTRY | TT_PXN_MASK | TT_XN_MASK);
448
449 return SetMemoryRegionAttribute (
450 BaseAddress,
451 Length,
452 0,
453 Mask);
454 }
455
456 EFI_STATUS
457 ArmSetMemoryRegionReadOnly (
458 IN EFI_PHYSICAL_ADDRESS BaseAddress,
459 IN UINT64 Length
460 )
461 {
462 return SetMemoryRegionAttribute (
463 BaseAddress,
464 Length,
465 TT_AP_RO_RO,
466 ~TT_ADDRESS_MASK_BLOCK_ENTRY);
467 }
468
469 EFI_STATUS
470 ArmClearMemoryRegionReadOnly (
471 IN EFI_PHYSICAL_ADDRESS BaseAddress,
472 IN UINT64 Length
473 )
474 {
475 return SetMemoryRegionAttribute (
476 BaseAddress,
477 Length,
478 TT_AP_RW_RW,
479 ~(TT_ADDRESS_MASK_BLOCK_ENTRY | TT_AP_MASK));
480 }
481
482 EFI_STATUS
483 EFIAPI
484 ArmConfigureMmu (
485 IN ARM_MEMORY_REGION_DESCRIPTOR *MemoryTable,
486 OUT VOID **TranslationTableBase OPTIONAL,
487 OUT UINTN *TranslationTableSize OPTIONAL
488 )
489 {
490 VOID* TranslationTable;
491 UINTN MaxAddressBits;
492 UINT64 MaxAddress;
493 UINTN T0SZ;
494 UINTN RootTableEntryCount;
495 UINT64 TCR;
496 EFI_STATUS Status;
497
498 if (MemoryTable == NULL) {
499 ASSERT (MemoryTable != NULL);
500 return EFI_INVALID_PARAMETER;
501 }
502
503 //
504 // Limit the virtual address space to what we can actually use: UEFI
505 // mandates a 1:1 mapping, so no point in making the virtual address
506 // space larger than the physical address space. We also have to take
507 // into account the architectural limitations that result from UEFI's
508 // use of 4 KB pages.
509 //
510 MaxAddressBits = MIN (ArmGetPhysicalAddressBits (), MAX_VA_BITS);
511 MaxAddress = LShiftU64 (1ULL, MaxAddressBits) - 1;
512
513 T0SZ = 64 - MaxAddressBits;
514 RootTableEntryCount = GetRootTableEntryCount (T0SZ);
515
516 //
517 // Set TCR that allows us to retrieve T0SZ in the subsequent functions
518 //
519 // Ideally we will be running at EL2, but should support EL1 as well.
520 // UEFI should not run at EL3.
521 if (ArmReadCurrentEL () == AARCH64_EL2) {
522 //Note: Bits 23 and 31 are reserved(RES1) bits in TCR_EL2
523 TCR = T0SZ | (1UL << 31) | (1UL << 23) | TCR_TG0_4KB;
524
525 // Set the Physical Address Size using MaxAddress
526 if (MaxAddress < SIZE_4GB) {
527 TCR |= TCR_PS_4GB;
528 } else if (MaxAddress < SIZE_64GB) {
529 TCR |= TCR_PS_64GB;
530 } else if (MaxAddress < SIZE_1TB) {
531 TCR |= TCR_PS_1TB;
532 } else if (MaxAddress < SIZE_4TB) {
533 TCR |= TCR_PS_4TB;
534 } else if (MaxAddress < SIZE_16TB) {
535 TCR |= TCR_PS_16TB;
536 } else if (MaxAddress < SIZE_256TB) {
537 TCR |= TCR_PS_256TB;
538 } else {
539 DEBUG ((DEBUG_ERROR,
540 "ArmConfigureMmu: The MaxAddress 0x%lX is not supported by this MMU configuration.\n",
541 MaxAddress));
542 ASSERT (0); // Bigger than 48-bit memory space are not supported
543 return EFI_UNSUPPORTED;
544 }
545 } else if (ArmReadCurrentEL () == AARCH64_EL1) {
546 // Due to Cortex-A57 erratum #822227 we must set TG1[1] == 1, regardless of EPD1.
547 TCR = T0SZ | TCR_TG0_4KB | TCR_TG1_4KB | TCR_EPD1;
548
549 // Set the Physical Address Size using MaxAddress
550 if (MaxAddress < SIZE_4GB) {
551 TCR |= TCR_IPS_4GB;
552 } else if (MaxAddress < SIZE_64GB) {
553 TCR |= TCR_IPS_64GB;
554 } else if (MaxAddress < SIZE_1TB) {
555 TCR |= TCR_IPS_1TB;
556 } else if (MaxAddress < SIZE_4TB) {
557 TCR |= TCR_IPS_4TB;
558 } else if (MaxAddress < SIZE_16TB) {
559 TCR |= TCR_IPS_16TB;
560 } else if (MaxAddress < SIZE_256TB) {
561 TCR |= TCR_IPS_256TB;
562 } else {
563 DEBUG ((DEBUG_ERROR,
564 "ArmConfigureMmu: The MaxAddress 0x%lX is not supported by this MMU configuration.\n",
565 MaxAddress));
566 ASSERT (0); // Bigger than 48-bit memory space are not supported
567 return EFI_UNSUPPORTED;
568 }
569 } else {
570 ASSERT (0); // UEFI is only expected to run at EL2 and EL1, not EL3.
571 return EFI_UNSUPPORTED;
572 }
573
574 //
575 // Translation table walks are always cache coherent on ARMv8-A, so cache
576 // maintenance on page tables is never needed. Since there is a risk of
577 // loss of coherency when using mismatched attributes, and given that memory
578 // is mapped cacheable except for extraordinary cases (such as non-coherent
579 // DMA), have the page table walker perform cached accesses as well, and
580 // assert below that that matches the attributes we use for CPU accesses to
581 // the region.
582 //
583 TCR |= TCR_SH_INNER_SHAREABLE |
584 TCR_RGN_OUTER_WRITE_BACK_ALLOC |
585 TCR_RGN_INNER_WRITE_BACK_ALLOC;
586
587 // Set TCR
588 ArmSetTCR (TCR);
589
590 // Allocate pages for translation table
591 TranslationTable = AllocatePages (1);
592 if (TranslationTable == NULL) {
593 return EFI_OUT_OF_RESOURCES;
594 }
595 //
596 // We set TTBR0 just after allocating the table to retrieve its location from
597 // the subsequent functions without needing to pass this value across the
598 // functions. The MMU is only enabled after the translation tables are
599 // populated.
600 //
601 ArmSetTTBR0 (TranslationTable);
602
603 if (TranslationTableBase != NULL) {
604 *TranslationTableBase = TranslationTable;
605 }
606
607 if (TranslationTableSize != NULL) {
608 *TranslationTableSize = RootTableEntryCount * sizeof (UINT64);
609 }
610
611 //
612 // Make sure we are not inadvertently hitting in the caches
613 // when populating the page tables.
614 //
615 InvalidateDataCacheRange (TranslationTable,
616 RootTableEntryCount * sizeof (UINT64));
617 ZeroMem (TranslationTable, RootTableEntryCount * sizeof (UINT64));
618
619 while (MemoryTable->Length != 0) {
620 Status = FillTranslationTable (TranslationTable, MemoryTable);
621 if (EFI_ERROR (Status)) {
622 goto FreeTranslationTable;
623 }
624 MemoryTable++;
625 }
626
627 //
628 // EFI_MEMORY_UC ==> MAIR_ATTR_DEVICE_MEMORY
629 // EFI_MEMORY_WC ==> MAIR_ATTR_NORMAL_MEMORY_NON_CACHEABLE
630 // EFI_MEMORY_WT ==> MAIR_ATTR_NORMAL_MEMORY_WRITE_THROUGH
631 // EFI_MEMORY_WB ==> MAIR_ATTR_NORMAL_MEMORY_WRITE_BACK
632 //
633 ArmSetMAIR (
634 MAIR_ATTR (TT_ATTR_INDX_DEVICE_MEMORY, MAIR_ATTR_DEVICE_MEMORY) |
635 MAIR_ATTR (TT_ATTR_INDX_MEMORY_NON_CACHEABLE, MAIR_ATTR_NORMAL_MEMORY_NON_CACHEABLE) |
636 MAIR_ATTR (TT_ATTR_INDX_MEMORY_WRITE_THROUGH, MAIR_ATTR_NORMAL_MEMORY_WRITE_THROUGH) |
637 MAIR_ATTR (TT_ATTR_INDX_MEMORY_WRITE_BACK, MAIR_ATTR_NORMAL_MEMORY_WRITE_BACK)
638 );
639
640 ArmDisableAlignmentCheck ();
641 ArmEnableStackAlignmentCheck ();
642 ArmEnableInstructionCache ();
643 ArmEnableDataCache ();
644
645 ArmEnableMmu ();
646 return EFI_SUCCESS;
647
648 FreeTranslationTable:
649 FreePages (TranslationTable, 1);
650 return Status;
651 }
652
653 RETURN_STATUS
654 EFIAPI
655 ArmMmuBaseLibConstructor (
656 VOID
657 )
658 {
659 extern UINT32 ArmReplaceLiveTranslationEntrySize;
660
661 //
662 // The ArmReplaceLiveTranslationEntry () helper function may be invoked
663 // with the MMU off so we have to ensure that it gets cleaned to the PoC
664 //
665 WriteBackDataCacheRange (ArmReplaceLiveTranslationEntry,
666 ArmReplaceLiveTranslationEntrySize);
667
668 return RETURN_SUCCESS;
669 }