]> git.proxmox.com Git - mirror_edk2.git/blob - ArmPkg/Library/ArmMmuLib/AArch64/ArmMmuLibCore.c
ArmPkg/ArmMmuLib AARCH64: rewrite page table code
[mirror_edk2.git] / ArmPkg / Library / ArmMmuLib / AArch64 / ArmMmuLibCore.c
1 /** @file
2 * File managing the MMU for ARMv8 architecture
3 *
4 * Copyright (c) 2011-2020, ARM Limited. All rights reserved.
5 * Copyright (c) 2016, Linaro Limited. All rights reserved.
6 * Copyright (c) 2017, Intel Corporation. All rights reserved.<BR>
7 *
8 * SPDX-License-Identifier: BSD-2-Clause-Patent
9 *
10 **/
11
12 #include <Uefi.h>
13 #include <Chipset/AArch64.h>
14 #include <Library/BaseMemoryLib.h>
15 #include <Library/CacheMaintenanceLib.h>
16 #include <Library/MemoryAllocationLib.h>
17 #include <Library/ArmLib.h>
18 #include <Library/ArmMmuLib.h>
19 #include <Library/BaseLib.h>
20 #include <Library/DebugLib.h>
21
22 // We use this index definition to define an invalid block entry
23 #define TT_ATTR_INDX_INVALID ((UINT32)~0)
24
25 STATIC
26 UINT64
27 ArmMemoryAttributeToPageAttribute (
28 IN ARM_MEMORY_REGION_ATTRIBUTES Attributes
29 )
30 {
31 switch (Attributes) {
32 case ARM_MEMORY_REGION_ATTRIBUTE_WRITE_BACK_NONSHAREABLE:
33 case ARM_MEMORY_REGION_ATTRIBUTE_NONSECURE_WRITE_BACK_NONSHAREABLE:
34 return TT_ATTR_INDX_MEMORY_WRITE_BACK;
35
36 case ARM_MEMORY_REGION_ATTRIBUTE_WRITE_BACK:
37 case ARM_MEMORY_REGION_ATTRIBUTE_NONSECURE_WRITE_BACK:
38 return TT_ATTR_INDX_MEMORY_WRITE_BACK | TT_SH_INNER_SHAREABLE;
39
40 case ARM_MEMORY_REGION_ATTRIBUTE_WRITE_THROUGH:
41 case ARM_MEMORY_REGION_ATTRIBUTE_NONSECURE_WRITE_THROUGH:
42 return TT_ATTR_INDX_MEMORY_WRITE_THROUGH | TT_SH_INNER_SHAREABLE;
43
44 // Uncached and device mappings are treated as outer shareable by default,
45 case ARM_MEMORY_REGION_ATTRIBUTE_UNCACHED_UNBUFFERED:
46 case ARM_MEMORY_REGION_ATTRIBUTE_NONSECURE_UNCACHED_UNBUFFERED:
47 return TT_ATTR_INDX_MEMORY_NON_CACHEABLE;
48
49 default:
50 ASSERT(0);
51 case ARM_MEMORY_REGION_ATTRIBUTE_DEVICE:
52 case ARM_MEMORY_REGION_ATTRIBUTE_NONSECURE_DEVICE:
53 if (ArmReadCurrentEL () == AARCH64_EL2)
54 return TT_ATTR_INDX_DEVICE_MEMORY | TT_XN_MASK;
55 else
56 return TT_ATTR_INDX_DEVICE_MEMORY | TT_UXN_MASK | TT_PXN_MASK;
57 }
58 }
59
60 UINT64
61 PageAttributeToGcdAttribute (
62 IN UINT64 PageAttributes
63 )
64 {
65 UINT64 GcdAttributes;
66
67 switch (PageAttributes & TT_ATTR_INDX_MASK) {
68 case TT_ATTR_INDX_DEVICE_MEMORY:
69 GcdAttributes = EFI_MEMORY_UC;
70 break;
71 case TT_ATTR_INDX_MEMORY_NON_CACHEABLE:
72 GcdAttributes = EFI_MEMORY_WC;
73 break;
74 case TT_ATTR_INDX_MEMORY_WRITE_THROUGH:
75 GcdAttributes = EFI_MEMORY_WT;
76 break;
77 case TT_ATTR_INDX_MEMORY_WRITE_BACK:
78 GcdAttributes = EFI_MEMORY_WB;
79 break;
80 default:
81 DEBUG ((EFI_D_ERROR, "PageAttributeToGcdAttribute: PageAttributes:0x%lX not supported.\n", PageAttributes));
82 ASSERT (0);
83 // The Global Coherency Domain (GCD) value is defined as a bit set.
84 // Returning 0 means no attribute has been set.
85 GcdAttributes = 0;
86 }
87
88 // Determine protection attributes
89 if (((PageAttributes & TT_AP_MASK) == TT_AP_NO_RO) || ((PageAttributes & TT_AP_MASK) == TT_AP_RO_RO)) {
90 // Read only cases map to write-protect
91 GcdAttributes |= EFI_MEMORY_RO;
92 }
93
94 // Process eXecute Never attribute
95 if ((PageAttributes & (TT_PXN_MASK | TT_UXN_MASK)) != 0 ) {
96 GcdAttributes |= EFI_MEMORY_XP;
97 }
98
99 return GcdAttributes;
100 }
101
102 #define MIN_T0SZ 16
103 #define BITS_PER_LEVEL 9
104
105 VOID
106 GetRootTranslationTableInfo (
107 IN UINTN T0SZ,
108 OUT UINTN *TableLevel,
109 OUT UINTN *TableEntryCount
110 )
111 {
112 // Get the level of the root table
113 if (TableLevel) {
114 *TableLevel = (T0SZ - MIN_T0SZ) / BITS_PER_LEVEL;
115 }
116
117 if (TableEntryCount) {
118 *TableEntryCount = 1UL << (BITS_PER_LEVEL - (T0SZ - MIN_T0SZ) % BITS_PER_LEVEL);
119 }
120 }
121
122 STATIC
123 VOID
124 ReplaceTableEntry (
125 IN UINT64 *Entry,
126 IN UINT64 Value,
127 IN UINT64 RegionStart,
128 IN BOOLEAN IsLiveBlockMapping
129 )
130 {
131 if (!ArmMmuEnabled () || !IsLiveBlockMapping) {
132 *Entry = Value;
133 ArmUpdateTranslationTableEntry (Entry, (VOID *)(UINTN)RegionStart);
134 } else {
135 ArmReplaceLiveTranslationEntry (Entry, Value, RegionStart);
136 }
137 }
138
139 STATIC
140 VOID
141 FreePageTablesRecursive (
142 IN UINT64 *TranslationTable
143 )
144 {
145 UINTN Index;
146
147 for (Index = 0; Index < TT_ENTRY_COUNT; Index++) {
148 if ((TranslationTable[Index] & TT_TYPE_MASK) == TT_TYPE_TABLE_ENTRY) {
149 FreePageTablesRecursive ((VOID *)(UINTN)(TranslationTable[Index] &
150 TT_ADDRESS_MASK_BLOCK_ENTRY));
151 }
152 }
153 FreePages (TranslationTable, 1);
154 }
155
156 STATIC
157 EFI_STATUS
158 UpdateRegionMappingRecursive (
159 IN UINT64 RegionStart,
160 IN UINT64 RegionEnd,
161 IN UINT64 AttributeSetMask,
162 IN UINT64 AttributeClearMask,
163 IN UINT64 *PageTable,
164 IN UINTN Level
165 )
166 {
167 UINTN BlockShift;
168 UINT64 BlockMask;
169 UINT64 BlockEnd;
170 UINT64 *Entry;
171 UINT64 EntryValue;
172 VOID *TranslationTable;
173 EFI_STATUS Status;
174
175 ASSERT (((RegionStart | RegionEnd) & EFI_PAGE_MASK) == 0);
176
177 BlockShift = (Level + 1) * BITS_PER_LEVEL + MIN_T0SZ;
178 BlockMask = MAX_UINT64 >> BlockShift;
179
180 DEBUG ((DEBUG_VERBOSE, "%a(%d): %llx - %llx set %lx clr %lx\n", __FUNCTION__,
181 Level, RegionStart, RegionEnd, AttributeSetMask, AttributeClearMask));
182
183 for (; RegionStart < RegionEnd; RegionStart = BlockEnd) {
184 BlockEnd = MIN (RegionEnd, (RegionStart | BlockMask) + 1);
185 Entry = &PageTable[(RegionStart >> (64 - BlockShift)) & (TT_ENTRY_COUNT - 1)];
186
187 //
188 // If RegionStart or BlockEnd is not aligned to the block size at this
189 // level, we will have to create a table mapping in order to map less
190 // than a block, and recurse to create the block or page entries at
191 // the next level. No block mappings are allowed at all at level 0,
192 // so in that case, we have to recurse unconditionally.
193 //
194 if (Level == 0 || ((RegionStart | BlockEnd) & BlockMask) != 0) {
195 ASSERT (Level < 3);
196
197 if ((*Entry & TT_TYPE_MASK) != TT_TYPE_TABLE_ENTRY) {
198 //
199 // No table entry exists yet, so we need to allocate a page table
200 // for the next level.
201 //
202 TranslationTable = AllocatePages (1);
203 if (TranslationTable == NULL) {
204 return EFI_OUT_OF_RESOURCES;
205 }
206
207 if ((*Entry & TT_TYPE_MASK) == TT_TYPE_BLOCK_ENTRY) {
208 //
209 // We are splitting an existing block entry, so we have to populate
210 // the new table with the attributes of the block entry it replaces.
211 //
212 Status = UpdateRegionMappingRecursive (RegionStart & ~BlockMask,
213 (RegionStart | BlockMask) + 1, *Entry & TT_ATTRIBUTES_MASK,
214 0, TranslationTable, Level + 1);
215 if (EFI_ERROR (Status)) {
216 //
217 // The range we passed to UpdateRegionMappingRecursive () is block
218 // aligned, so it is guaranteed that no further pages were allocated
219 // by it, and so we only have to free the page we allocated here.
220 //
221 FreePages (TranslationTable, 1);
222 return Status;
223 }
224 } else {
225 ZeroMem (TranslationTable, EFI_PAGE_SIZE);
226 }
227 } else {
228 TranslationTable = (VOID *)(UINTN)(*Entry & TT_ADDRESS_MASK_BLOCK_ENTRY);
229 }
230
231 //
232 // Recurse to the next level
233 //
234 Status = UpdateRegionMappingRecursive (RegionStart, BlockEnd,
235 AttributeSetMask, AttributeClearMask, TranslationTable,
236 Level + 1);
237 if (EFI_ERROR (Status)) {
238 if ((*Entry & TT_TYPE_MASK) != TT_TYPE_TABLE_ENTRY) {
239 //
240 // We are creating a new table entry, so on failure, we can free all
241 // allocations we made recursively, given that the whole subhierarchy
242 // has not been wired into the live page tables yet. (This is not
243 // possible for existing table entries, since we cannot revert the
244 // modifications we made to the subhierarchy it represents.)
245 //
246 FreePageTablesRecursive (TranslationTable);
247 }
248 return Status;
249 }
250
251 if ((*Entry & TT_TYPE_MASK) != TT_TYPE_TABLE_ENTRY) {
252 EntryValue = (UINTN)TranslationTable | TT_TYPE_TABLE_ENTRY;
253 ReplaceTableEntry (Entry, EntryValue, RegionStart,
254 (*Entry & TT_TYPE_MASK) == TT_TYPE_BLOCK_ENTRY);
255 }
256 } else {
257 EntryValue = (*Entry & AttributeClearMask) | AttributeSetMask;
258 EntryValue |= RegionStart;
259 EntryValue |= (Level == 3) ? TT_TYPE_BLOCK_ENTRY_LEVEL3
260 : TT_TYPE_BLOCK_ENTRY;
261
262 ReplaceTableEntry (Entry, EntryValue, RegionStart, FALSE);
263 }
264 }
265 return EFI_SUCCESS;
266 }
267
268 STATIC
269 VOID
270 LookupAddresstoRootTable (
271 IN UINT64 MaxAddress,
272 OUT UINTN *T0SZ,
273 OUT UINTN *TableEntryCount
274 )
275 {
276 UINTN TopBit;
277
278 // Check the parameters are not NULL
279 ASSERT ((T0SZ != NULL) && (TableEntryCount != NULL));
280
281 // Look for the highest bit set in MaxAddress
282 for (TopBit = 63; TopBit != 0; TopBit--) {
283 if ((1ULL << TopBit) & MaxAddress) {
284 // MaxAddress top bit is found
285 TopBit = TopBit + 1;
286 break;
287 }
288 }
289 ASSERT (TopBit != 0);
290
291 // Calculate T0SZ from the top bit of the MaxAddress
292 *T0SZ = 64 - TopBit;
293
294 // Get the Table info from T0SZ
295 GetRootTranslationTableInfo (*T0SZ, NULL, TableEntryCount);
296 }
297
298 STATIC
299 EFI_STATUS
300 UpdateRegionMapping (
301 IN UINT64 RegionStart,
302 IN UINT64 RegionLength,
303 IN UINT64 AttributeSetMask,
304 IN UINT64 AttributeClearMask
305 )
306 {
307 UINTN RootTableLevel;
308 UINTN T0SZ;
309
310 if (((RegionStart | RegionLength) & EFI_PAGE_MASK)) {
311 return EFI_INVALID_PARAMETER;
312 }
313
314 T0SZ = ArmGetTCR () & TCR_T0SZ_MASK;
315 GetRootTranslationTableInfo (T0SZ, &RootTableLevel, NULL);
316
317 return UpdateRegionMappingRecursive (RegionStart, RegionStart + RegionLength,
318 AttributeSetMask, AttributeClearMask, ArmGetTTBR0BaseAddress (),
319 RootTableLevel);
320 }
321
322 STATIC
323 EFI_STATUS
324 FillTranslationTable (
325 IN UINT64 *RootTable,
326 IN ARM_MEMORY_REGION_DESCRIPTOR *MemoryRegion
327 )
328 {
329 return UpdateRegionMapping (
330 MemoryRegion->VirtualBase,
331 MemoryRegion->Length,
332 ArmMemoryAttributeToPageAttribute (MemoryRegion->Attributes) | TT_AF,
333 0
334 );
335 }
336
337 STATIC
338 UINT64
339 GcdAttributeToPageAttribute (
340 IN UINT64 GcdAttributes
341 )
342 {
343 UINT64 PageAttributes;
344
345 switch (GcdAttributes & EFI_MEMORY_CACHETYPE_MASK) {
346 case EFI_MEMORY_UC:
347 PageAttributes = TT_ATTR_INDX_DEVICE_MEMORY;
348 break;
349 case EFI_MEMORY_WC:
350 PageAttributes = TT_ATTR_INDX_MEMORY_NON_CACHEABLE;
351 break;
352 case EFI_MEMORY_WT:
353 PageAttributes = TT_ATTR_INDX_MEMORY_WRITE_THROUGH | TT_SH_INNER_SHAREABLE;
354 break;
355 case EFI_MEMORY_WB:
356 PageAttributes = TT_ATTR_INDX_MEMORY_WRITE_BACK | TT_SH_INNER_SHAREABLE;
357 break;
358 default:
359 PageAttributes = TT_ATTR_INDX_MASK;
360 break;
361 }
362
363 if ((GcdAttributes & EFI_MEMORY_XP) != 0 ||
364 (GcdAttributes & EFI_MEMORY_CACHETYPE_MASK) == EFI_MEMORY_UC) {
365 if (ArmReadCurrentEL () == AARCH64_EL2) {
366 PageAttributes |= TT_XN_MASK;
367 } else {
368 PageAttributes |= TT_UXN_MASK | TT_PXN_MASK;
369 }
370 }
371
372 if ((GcdAttributes & EFI_MEMORY_RO) != 0) {
373 PageAttributes |= TT_AP_RO_RO;
374 }
375
376 return PageAttributes | TT_AF;
377 }
378
379 EFI_STATUS
380 ArmSetMemoryAttributes (
381 IN EFI_PHYSICAL_ADDRESS BaseAddress,
382 IN UINT64 Length,
383 IN UINT64 Attributes
384 )
385 {
386 UINT64 PageAttributes;
387 UINT64 PageAttributeMask;
388
389 PageAttributes = GcdAttributeToPageAttribute (Attributes);
390 PageAttributeMask = 0;
391
392 if ((Attributes & EFI_MEMORY_CACHETYPE_MASK) == 0) {
393 //
394 // No memory type was set in Attributes, so we are going to update the
395 // permissions only.
396 //
397 PageAttributes &= TT_AP_MASK | TT_UXN_MASK | TT_PXN_MASK;
398 PageAttributeMask = ~(TT_ADDRESS_MASK_BLOCK_ENTRY | TT_AP_MASK |
399 TT_PXN_MASK | TT_XN_MASK);
400 }
401
402 return UpdateRegionMapping (BaseAddress, Length, PageAttributes,
403 PageAttributeMask);
404 }
405
406 STATIC
407 EFI_STATUS
408 SetMemoryRegionAttribute (
409 IN EFI_PHYSICAL_ADDRESS BaseAddress,
410 IN UINT64 Length,
411 IN UINT64 Attributes,
412 IN UINT64 BlockEntryMask
413 )
414 {
415 return UpdateRegionMapping (BaseAddress, Length, Attributes, BlockEntryMask);
416 }
417
418 EFI_STATUS
419 ArmSetMemoryRegionNoExec (
420 IN EFI_PHYSICAL_ADDRESS BaseAddress,
421 IN UINT64 Length
422 )
423 {
424 UINT64 Val;
425
426 if (ArmReadCurrentEL () == AARCH64_EL1) {
427 Val = TT_PXN_MASK | TT_UXN_MASK;
428 } else {
429 Val = TT_XN_MASK;
430 }
431
432 return SetMemoryRegionAttribute (
433 BaseAddress,
434 Length,
435 Val,
436 ~TT_ADDRESS_MASK_BLOCK_ENTRY);
437 }
438
439 EFI_STATUS
440 ArmClearMemoryRegionNoExec (
441 IN EFI_PHYSICAL_ADDRESS BaseAddress,
442 IN UINT64 Length
443 )
444 {
445 UINT64 Mask;
446
447 // XN maps to UXN in the EL1&0 translation regime
448 Mask = ~(TT_ADDRESS_MASK_BLOCK_ENTRY | TT_PXN_MASK | TT_XN_MASK);
449
450 return SetMemoryRegionAttribute (
451 BaseAddress,
452 Length,
453 0,
454 Mask);
455 }
456
457 EFI_STATUS
458 ArmSetMemoryRegionReadOnly (
459 IN EFI_PHYSICAL_ADDRESS BaseAddress,
460 IN UINT64 Length
461 )
462 {
463 return SetMemoryRegionAttribute (
464 BaseAddress,
465 Length,
466 TT_AP_RO_RO,
467 ~TT_ADDRESS_MASK_BLOCK_ENTRY);
468 }
469
470 EFI_STATUS
471 ArmClearMemoryRegionReadOnly (
472 IN EFI_PHYSICAL_ADDRESS BaseAddress,
473 IN UINT64 Length
474 )
475 {
476 return SetMemoryRegionAttribute (
477 BaseAddress,
478 Length,
479 TT_AP_RW_RW,
480 ~(TT_ADDRESS_MASK_BLOCK_ENTRY | TT_AP_MASK));
481 }
482
483 EFI_STATUS
484 EFIAPI
485 ArmConfigureMmu (
486 IN ARM_MEMORY_REGION_DESCRIPTOR *MemoryTable,
487 OUT VOID **TranslationTableBase OPTIONAL,
488 OUT UINTN *TranslationTableSize OPTIONAL
489 )
490 {
491 VOID* TranslationTable;
492 UINT32 TranslationTableAttribute;
493 UINT64 MaxAddress;
494 UINTN T0SZ;
495 UINTN RootTableEntryCount;
496 UINT64 TCR;
497 EFI_STATUS Status;
498
499 if(MemoryTable == NULL) {
500 ASSERT (MemoryTable != NULL);
501 return EFI_INVALID_PARAMETER;
502 }
503
504 //
505 // Limit the virtual address space to what we can actually use: UEFI
506 // mandates a 1:1 mapping, so no point in making the virtual address
507 // space larger than the physical address space. We also have to take
508 // into account the architectural limitations that result from UEFI's
509 // use of 4 KB pages.
510 //
511 MaxAddress = MIN (LShiftU64 (1ULL, ArmGetPhysicalAddressBits ()) - 1,
512 MAX_ALLOC_ADDRESS);
513
514 // Lookup the Table Level to get the information
515 LookupAddresstoRootTable (MaxAddress, &T0SZ, &RootTableEntryCount);
516
517 //
518 // Set TCR that allows us to retrieve T0SZ in the subsequent functions
519 //
520 // Ideally we will be running at EL2, but should support EL1 as well.
521 // UEFI should not run at EL3.
522 if (ArmReadCurrentEL () == AARCH64_EL2) {
523 //Note: Bits 23 and 31 are reserved(RES1) bits in TCR_EL2
524 TCR = T0SZ | (1UL << 31) | (1UL << 23) | TCR_TG0_4KB;
525
526 // Set the Physical Address Size using MaxAddress
527 if (MaxAddress < SIZE_4GB) {
528 TCR |= TCR_PS_4GB;
529 } else if (MaxAddress < SIZE_64GB) {
530 TCR |= TCR_PS_64GB;
531 } else if (MaxAddress < SIZE_1TB) {
532 TCR |= TCR_PS_1TB;
533 } else if (MaxAddress < SIZE_4TB) {
534 TCR |= TCR_PS_4TB;
535 } else if (MaxAddress < SIZE_16TB) {
536 TCR |= TCR_PS_16TB;
537 } else if (MaxAddress < SIZE_256TB) {
538 TCR |= TCR_PS_256TB;
539 } else {
540 DEBUG ((EFI_D_ERROR, "ArmConfigureMmu: The MaxAddress 0x%lX is not supported by this MMU configuration.\n", MaxAddress));
541 ASSERT (0); // Bigger than 48-bit memory space are not supported
542 return EFI_UNSUPPORTED;
543 }
544 } else if (ArmReadCurrentEL () == AARCH64_EL1) {
545 // Due to Cortex-A57 erratum #822227 we must set TG1[1] == 1, regardless of EPD1.
546 TCR = T0SZ | TCR_TG0_4KB | TCR_TG1_4KB | TCR_EPD1;
547
548 // Set the Physical Address Size using MaxAddress
549 if (MaxAddress < SIZE_4GB) {
550 TCR |= TCR_IPS_4GB;
551 } else if (MaxAddress < SIZE_64GB) {
552 TCR |= TCR_IPS_64GB;
553 } else if (MaxAddress < SIZE_1TB) {
554 TCR |= TCR_IPS_1TB;
555 } else if (MaxAddress < SIZE_4TB) {
556 TCR |= TCR_IPS_4TB;
557 } else if (MaxAddress < SIZE_16TB) {
558 TCR |= TCR_IPS_16TB;
559 } else if (MaxAddress < SIZE_256TB) {
560 TCR |= TCR_IPS_256TB;
561 } else {
562 DEBUG ((EFI_D_ERROR, "ArmConfigureMmu: The MaxAddress 0x%lX is not supported by this MMU configuration.\n", MaxAddress));
563 ASSERT (0); // Bigger than 48-bit memory space are not supported
564 return EFI_UNSUPPORTED;
565 }
566 } else {
567 ASSERT (0); // UEFI is only expected to run at EL2 and EL1, not EL3.
568 return EFI_UNSUPPORTED;
569 }
570
571 //
572 // Translation table walks are always cache coherent on ARMv8-A, so cache
573 // maintenance on page tables is never needed. Since there is a risk of
574 // loss of coherency when using mismatched attributes, and given that memory
575 // is mapped cacheable except for extraordinary cases (such as non-coherent
576 // DMA), have the page table walker perform cached accesses as well, and
577 // assert below that that matches the attributes we use for CPU accesses to
578 // the region.
579 //
580 TCR |= TCR_SH_INNER_SHAREABLE |
581 TCR_RGN_OUTER_WRITE_BACK_ALLOC |
582 TCR_RGN_INNER_WRITE_BACK_ALLOC;
583
584 // Set TCR
585 ArmSetTCR (TCR);
586
587 // Allocate pages for translation table
588 TranslationTable = AllocatePages (1);
589 if (TranslationTable == NULL) {
590 return EFI_OUT_OF_RESOURCES;
591 }
592 // We set TTBR0 just after allocating the table to retrieve its location from the subsequent
593 // functions without needing to pass this value across the functions. The MMU is only enabled
594 // after the translation tables are populated.
595 ArmSetTTBR0 (TranslationTable);
596
597 if (TranslationTableBase != NULL) {
598 *TranslationTableBase = TranslationTable;
599 }
600
601 if (TranslationTableSize != NULL) {
602 *TranslationTableSize = RootTableEntryCount * sizeof(UINT64);
603 }
604
605 ZeroMem (TranslationTable, RootTableEntryCount * sizeof(UINT64));
606
607 TranslationTableAttribute = TT_ATTR_INDX_INVALID;
608 while (MemoryTable->Length != 0) {
609
610 DEBUG_CODE_BEGIN ();
611 // Find the memory attribute for the Translation Table
612 if ((UINTN)TranslationTable >= MemoryTable->PhysicalBase &&
613 (UINTN)TranslationTable + EFI_PAGE_SIZE <= MemoryTable->PhysicalBase +
614 MemoryTable->Length) {
615 TranslationTableAttribute = MemoryTable->Attributes;
616 }
617 DEBUG_CODE_END ();
618
619 Status = FillTranslationTable (TranslationTable, MemoryTable);
620 if (EFI_ERROR (Status)) {
621 goto FREE_TRANSLATION_TABLE;
622 }
623 MemoryTable++;
624 }
625
626 ASSERT (TranslationTableAttribute == ARM_MEMORY_REGION_ATTRIBUTE_WRITE_BACK ||
627 TranslationTableAttribute == ARM_MEMORY_REGION_ATTRIBUTE_NONSECURE_WRITE_BACK);
628
629 ArmSetMAIR (MAIR_ATTR(TT_ATTR_INDX_DEVICE_MEMORY, MAIR_ATTR_DEVICE_MEMORY) | // mapped to EFI_MEMORY_UC
630 MAIR_ATTR(TT_ATTR_INDX_MEMORY_NON_CACHEABLE, MAIR_ATTR_NORMAL_MEMORY_NON_CACHEABLE) | // mapped to EFI_MEMORY_WC
631 MAIR_ATTR(TT_ATTR_INDX_MEMORY_WRITE_THROUGH, MAIR_ATTR_NORMAL_MEMORY_WRITE_THROUGH) | // mapped to EFI_MEMORY_WT
632 MAIR_ATTR(TT_ATTR_INDX_MEMORY_WRITE_BACK, MAIR_ATTR_NORMAL_MEMORY_WRITE_BACK)); // mapped to EFI_MEMORY_WB
633
634 ArmDisableAlignmentCheck ();
635 ArmEnableStackAlignmentCheck ();
636 ArmEnableInstructionCache ();
637 ArmEnableDataCache ();
638
639 ArmEnableMmu ();
640 return EFI_SUCCESS;
641
642 FREE_TRANSLATION_TABLE:
643 FreePages (TranslationTable, 1);
644 return Status;
645 }
646
647 RETURN_STATUS
648 EFIAPI
649 ArmMmuBaseLibConstructor (
650 VOID
651 )
652 {
653 extern UINT32 ArmReplaceLiveTranslationEntrySize;
654
655 //
656 // The ArmReplaceLiveTranslationEntry () helper function may be invoked
657 // with the MMU off so we have to ensure that it gets cleaned to the PoC
658 //
659 WriteBackDataCacheRange (ArmReplaceLiveTranslationEntry,
660 ArmReplaceLiveTranslationEntrySize);
661
662 return RETURN_SUCCESS;
663 }