]> git.proxmox.com Git - mirror_edk2.git/blob - ArmPkg/Library/ArmMmuLib/AArch64/ArmMmuLibCore.c
f2eec7191328cbbaa4d83476e5935db8889b4fe4
[mirror_edk2.git] / ArmPkg / Library / ArmMmuLib / AArch64 / ArmMmuLibCore.c
1 /** @file
2 * File managing the MMU for ARMv8 architecture
3 *
4 * Copyright (c) 2011-2020, ARM Limited. All rights reserved.
5 * Copyright (c) 2016, Linaro Limited. All rights reserved.
6 * Copyright (c) 2017, Intel Corporation. All rights reserved.<BR>
7 *
8 * SPDX-License-Identifier: BSD-2-Clause-Patent
9 *
10 **/
11
12 #include <Uefi.h>
13 #include <Chipset/AArch64.h>
14 #include <Library/BaseMemoryLib.h>
15 #include <Library/CacheMaintenanceLib.h>
16 #include <Library/MemoryAllocationLib.h>
17 #include <Library/ArmLib.h>
18 #include <Library/ArmMmuLib.h>
19 #include <Library/BaseLib.h>
20 #include <Library/DebugLib.h>
21
22 // We use this index definition to define an invalid block entry
23 #define TT_ATTR_INDX_INVALID ((UINT32)~0)
24
25 STATIC
26 UINT64
27 ArmMemoryAttributeToPageAttribute (
28 IN ARM_MEMORY_REGION_ATTRIBUTES Attributes
29 )
30 {
31 switch (Attributes) {
32 case ARM_MEMORY_REGION_ATTRIBUTE_WRITE_BACK_NONSHAREABLE:
33 case ARM_MEMORY_REGION_ATTRIBUTE_NONSECURE_WRITE_BACK_NONSHAREABLE:
34 return TT_ATTR_INDX_MEMORY_WRITE_BACK;
35
36 case ARM_MEMORY_REGION_ATTRIBUTE_WRITE_BACK:
37 case ARM_MEMORY_REGION_ATTRIBUTE_NONSECURE_WRITE_BACK:
38 return TT_ATTR_INDX_MEMORY_WRITE_BACK | TT_SH_INNER_SHAREABLE;
39
40 case ARM_MEMORY_REGION_ATTRIBUTE_WRITE_THROUGH:
41 case ARM_MEMORY_REGION_ATTRIBUTE_NONSECURE_WRITE_THROUGH:
42 return TT_ATTR_INDX_MEMORY_WRITE_THROUGH | TT_SH_INNER_SHAREABLE;
43
44 // Uncached and device mappings are treated as outer shareable by default,
45 case ARM_MEMORY_REGION_ATTRIBUTE_UNCACHED_UNBUFFERED:
46 case ARM_MEMORY_REGION_ATTRIBUTE_NONSECURE_UNCACHED_UNBUFFERED:
47 return TT_ATTR_INDX_MEMORY_NON_CACHEABLE;
48
49 default:
50 ASSERT(0);
51 case ARM_MEMORY_REGION_ATTRIBUTE_DEVICE:
52 case ARM_MEMORY_REGION_ATTRIBUTE_NONSECURE_DEVICE:
53 if (ArmReadCurrentEL () == AARCH64_EL2)
54 return TT_ATTR_INDX_DEVICE_MEMORY | TT_XN_MASK;
55 else
56 return TT_ATTR_INDX_DEVICE_MEMORY | TT_UXN_MASK | TT_PXN_MASK;
57 }
58 }
59
60 UINT64
61 PageAttributeToGcdAttribute (
62 IN UINT64 PageAttributes
63 )
64 {
65 UINT64 GcdAttributes;
66
67 switch (PageAttributes & TT_ATTR_INDX_MASK) {
68 case TT_ATTR_INDX_DEVICE_MEMORY:
69 GcdAttributes = EFI_MEMORY_UC;
70 break;
71 case TT_ATTR_INDX_MEMORY_NON_CACHEABLE:
72 GcdAttributes = EFI_MEMORY_WC;
73 break;
74 case TT_ATTR_INDX_MEMORY_WRITE_THROUGH:
75 GcdAttributes = EFI_MEMORY_WT;
76 break;
77 case TT_ATTR_INDX_MEMORY_WRITE_BACK:
78 GcdAttributes = EFI_MEMORY_WB;
79 break;
80 default:
81 DEBUG ((EFI_D_ERROR, "PageAttributeToGcdAttribute: PageAttributes:0x%lX not supported.\n", PageAttributes));
82 ASSERT (0);
83 // The Global Coherency Domain (GCD) value is defined as a bit set.
84 // Returning 0 means no attribute has been set.
85 GcdAttributes = 0;
86 }
87
88 // Determine protection attributes
89 if (((PageAttributes & TT_AP_MASK) == TT_AP_NO_RO) || ((PageAttributes & TT_AP_MASK) == TT_AP_RO_RO)) {
90 // Read only cases map to write-protect
91 GcdAttributes |= EFI_MEMORY_RO;
92 }
93
94 // Process eXecute Never attribute
95 if ((PageAttributes & (TT_PXN_MASK | TT_UXN_MASK)) != 0 ) {
96 GcdAttributes |= EFI_MEMORY_XP;
97 }
98
99 return GcdAttributes;
100 }
101
102 #define MIN_T0SZ 16
103 #define BITS_PER_LEVEL 9
104
105 VOID
106 GetRootTranslationTableInfo (
107 IN UINTN T0SZ,
108 OUT UINTN *TableLevel,
109 OUT UINTN *TableEntryCount
110 )
111 {
112 // Get the level of the root table
113 if (TableLevel) {
114 *TableLevel = (T0SZ - MIN_T0SZ) / BITS_PER_LEVEL;
115 }
116
117 if (TableEntryCount) {
118 *TableEntryCount = 1UL << (BITS_PER_LEVEL - (T0SZ - MIN_T0SZ) % BITS_PER_LEVEL);
119 }
120 }
121
122 STATIC
123 VOID
124 ReplaceTableEntry (
125 IN UINT64 *Entry,
126 IN UINT64 Value,
127 IN UINT64 RegionStart,
128 IN BOOLEAN IsLiveBlockMapping
129 )
130 {
131 if (!ArmMmuEnabled () || !IsLiveBlockMapping) {
132 *Entry = Value;
133 ArmUpdateTranslationTableEntry (Entry, (VOID *)(UINTN)RegionStart);
134 } else {
135 ArmReplaceLiveTranslationEntry (Entry, Value, RegionStart);
136 }
137 }
138
139 STATIC
140 VOID
141 FreePageTablesRecursive (
142 IN UINT64 *TranslationTable
143 )
144 {
145 UINTN Index;
146
147 for (Index = 0; Index < TT_ENTRY_COUNT; Index++) {
148 if ((TranslationTable[Index] & TT_TYPE_MASK) == TT_TYPE_TABLE_ENTRY) {
149 FreePageTablesRecursive ((VOID *)(UINTN)(TranslationTable[Index] &
150 TT_ADDRESS_MASK_BLOCK_ENTRY));
151 }
152 }
153 FreePages (TranslationTable, 1);
154 }
155
156 STATIC
157 EFI_STATUS
158 UpdateRegionMappingRecursive (
159 IN UINT64 RegionStart,
160 IN UINT64 RegionEnd,
161 IN UINT64 AttributeSetMask,
162 IN UINT64 AttributeClearMask,
163 IN UINT64 *PageTable,
164 IN UINTN Level
165 )
166 {
167 UINTN BlockShift;
168 UINT64 BlockMask;
169 UINT64 BlockEnd;
170 UINT64 *Entry;
171 UINT64 EntryValue;
172 VOID *TranslationTable;
173 EFI_STATUS Status;
174
175 ASSERT (((RegionStart | RegionEnd) & EFI_PAGE_MASK) == 0);
176
177 BlockShift = (Level + 1) * BITS_PER_LEVEL + MIN_T0SZ;
178 BlockMask = MAX_UINT64 >> BlockShift;
179
180 DEBUG ((DEBUG_VERBOSE, "%a(%d): %llx - %llx set %lx clr %lx\n", __FUNCTION__,
181 Level, RegionStart, RegionEnd, AttributeSetMask, AttributeClearMask));
182
183 for (; RegionStart < RegionEnd; RegionStart = BlockEnd) {
184 BlockEnd = MIN (RegionEnd, (RegionStart | BlockMask) + 1);
185 Entry = &PageTable[(RegionStart >> (64 - BlockShift)) & (TT_ENTRY_COUNT - 1)];
186
187 //
188 // If RegionStart or BlockEnd is not aligned to the block size at this
189 // level, we will have to create a table mapping in order to map less
190 // than a block, and recurse to create the block or page entries at
191 // the next level. No block mappings are allowed at all at level 0,
192 // so in that case, we have to recurse unconditionally.
193 //
194 if (Level == 0 || ((RegionStart | BlockEnd) & BlockMask) != 0) {
195 ASSERT (Level < 3);
196
197 if ((*Entry & TT_TYPE_MASK) != TT_TYPE_TABLE_ENTRY) {
198 //
199 // No table entry exists yet, so we need to allocate a page table
200 // for the next level.
201 //
202 TranslationTable = AllocatePages (1);
203 if (TranslationTable == NULL) {
204 return EFI_OUT_OF_RESOURCES;
205 }
206
207 if (!ArmMmuEnabled ()) {
208 //
209 // Make sure we are not inadvertently hitting in the caches
210 // when populating the page tables.
211 //
212 InvalidateDataCacheRange (TranslationTable, EFI_PAGE_SIZE);
213 }
214
215 if ((*Entry & TT_TYPE_MASK) == TT_TYPE_BLOCK_ENTRY) {
216 //
217 // We are splitting an existing block entry, so we have to populate
218 // the new table with the attributes of the block entry it replaces.
219 //
220 Status = UpdateRegionMappingRecursive (RegionStart & ~BlockMask,
221 (RegionStart | BlockMask) + 1, *Entry & TT_ATTRIBUTES_MASK,
222 0, TranslationTable, Level + 1);
223 if (EFI_ERROR (Status)) {
224 //
225 // The range we passed to UpdateRegionMappingRecursive () is block
226 // aligned, so it is guaranteed that no further pages were allocated
227 // by it, and so we only have to free the page we allocated here.
228 //
229 FreePages (TranslationTable, 1);
230 return Status;
231 }
232 } else {
233 ZeroMem (TranslationTable, EFI_PAGE_SIZE);
234 }
235 } else {
236 TranslationTable = (VOID *)(UINTN)(*Entry & TT_ADDRESS_MASK_BLOCK_ENTRY);
237 }
238
239 //
240 // Recurse to the next level
241 //
242 Status = UpdateRegionMappingRecursive (RegionStart, BlockEnd,
243 AttributeSetMask, AttributeClearMask, TranslationTable,
244 Level + 1);
245 if (EFI_ERROR (Status)) {
246 if ((*Entry & TT_TYPE_MASK) != TT_TYPE_TABLE_ENTRY) {
247 //
248 // We are creating a new table entry, so on failure, we can free all
249 // allocations we made recursively, given that the whole subhierarchy
250 // has not been wired into the live page tables yet. (This is not
251 // possible for existing table entries, since we cannot revert the
252 // modifications we made to the subhierarchy it represents.)
253 //
254 FreePageTablesRecursive (TranslationTable);
255 }
256 return Status;
257 }
258
259 if ((*Entry & TT_TYPE_MASK) != TT_TYPE_TABLE_ENTRY) {
260 EntryValue = (UINTN)TranslationTable | TT_TYPE_TABLE_ENTRY;
261 ReplaceTableEntry (Entry, EntryValue, RegionStart,
262 (*Entry & TT_TYPE_MASK) == TT_TYPE_BLOCK_ENTRY);
263 }
264 } else {
265 EntryValue = (*Entry & AttributeClearMask) | AttributeSetMask;
266 EntryValue |= RegionStart;
267 EntryValue |= (Level == 3) ? TT_TYPE_BLOCK_ENTRY_LEVEL3
268 : TT_TYPE_BLOCK_ENTRY;
269
270 ReplaceTableEntry (Entry, EntryValue, RegionStart, FALSE);
271 }
272 }
273 return EFI_SUCCESS;
274 }
275
276 STATIC
277 VOID
278 LookupAddresstoRootTable (
279 IN UINT64 MaxAddress,
280 OUT UINTN *T0SZ,
281 OUT UINTN *TableEntryCount
282 )
283 {
284 UINTN TopBit;
285
286 // Check the parameters are not NULL
287 ASSERT ((T0SZ != NULL) && (TableEntryCount != NULL));
288
289 // Look for the highest bit set in MaxAddress
290 for (TopBit = 63; TopBit != 0; TopBit--) {
291 if ((1ULL << TopBit) & MaxAddress) {
292 // MaxAddress top bit is found
293 TopBit = TopBit + 1;
294 break;
295 }
296 }
297 ASSERT (TopBit != 0);
298
299 // Calculate T0SZ from the top bit of the MaxAddress
300 *T0SZ = 64 - TopBit;
301
302 // Get the Table info from T0SZ
303 GetRootTranslationTableInfo (*T0SZ, NULL, TableEntryCount);
304 }
305
306 STATIC
307 EFI_STATUS
308 UpdateRegionMapping (
309 IN UINT64 RegionStart,
310 IN UINT64 RegionLength,
311 IN UINT64 AttributeSetMask,
312 IN UINT64 AttributeClearMask
313 )
314 {
315 UINTN RootTableLevel;
316 UINTN T0SZ;
317
318 if (((RegionStart | RegionLength) & EFI_PAGE_MASK)) {
319 return EFI_INVALID_PARAMETER;
320 }
321
322 T0SZ = ArmGetTCR () & TCR_T0SZ_MASK;
323 GetRootTranslationTableInfo (T0SZ, &RootTableLevel, NULL);
324
325 return UpdateRegionMappingRecursive (RegionStart, RegionStart + RegionLength,
326 AttributeSetMask, AttributeClearMask, ArmGetTTBR0BaseAddress (),
327 RootTableLevel);
328 }
329
330 STATIC
331 EFI_STATUS
332 FillTranslationTable (
333 IN UINT64 *RootTable,
334 IN ARM_MEMORY_REGION_DESCRIPTOR *MemoryRegion
335 )
336 {
337 return UpdateRegionMapping (
338 MemoryRegion->VirtualBase,
339 MemoryRegion->Length,
340 ArmMemoryAttributeToPageAttribute (MemoryRegion->Attributes) | TT_AF,
341 0
342 );
343 }
344
345 STATIC
346 UINT64
347 GcdAttributeToPageAttribute (
348 IN UINT64 GcdAttributes
349 )
350 {
351 UINT64 PageAttributes;
352
353 switch (GcdAttributes & EFI_MEMORY_CACHETYPE_MASK) {
354 case EFI_MEMORY_UC:
355 PageAttributes = TT_ATTR_INDX_DEVICE_MEMORY;
356 break;
357 case EFI_MEMORY_WC:
358 PageAttributes = TT_ATTR_INDX_MEMORY_NON_CACHEABLE;
359 break;
360 case EFI_MEMORY_WT:
361 PageAttributes = TT_ATTR_INDX_MEMORY_WRITE_THROUGH | TT_SH_INNER_SHAREABLE;
362 break;
363 case EFI_MEMORY_WB:
364 PageAttributes = TT_ATTR_INDX_MEMORY_WRITE_BACK | TT_SH_INNER_SHAREABLE;
365 break;
366 default:
367 PageAttributes = TT_ATTR_INDX_MASK;
368 break;
369 }
370
371 if ((GcdAttributes & EFI_MEMORY_XP) != 0 ||
372 (GcdAttributes & EFI_MEMORY_CACHETYPE_MASK) == EFI_MEMORY_UC) {
373 if (ArmReadCurrentEL () == AARCH64_EL2) {
374 PageAttributes |= TT_XN_MASK;
375 } else {
376 PageAttributes |= TT_UXN_MASK | TT_PXN_MASK;
377 }
378 }
379
380 if ((GcdAttributes & EFI_MEMORY_RO) != 0) {
381 PageAttributes |= TT_AP_RO_RO;
382 }
383
384 return PageAttributes | TT_AF;
385 }
386
387 EFI_STATUS
388 ArmSetMemoryAttributes (
389 IN EFI_PHYSICAL_ADDRESS BaseAddress,
390 IN UINT64 Length,
391 IN UINT64 Attributes
392 )
393 {
394 UINT64 PageAttributes;
395 UINT64 PageAttributeMask;
396
397 PageAttributes = GcdAttributeToPageAttribute (Attributes);
398 PageAttributeMask = 0;
399
400 if ((Attributes & EFI_MEMORY_CACHETYPE_MASK) == 0) {
401 //
402 // No memory type was set in Attributes, so we are going to update the
403 // permissions only.
404 //
405 PageAttributes &= TT_AP_MASK | TT_UXN_MASK | TT_PXN_MASK;
406 PageAttributeMask = ~(TT_ADDRESS_MASK_BLOCK_ENTRY | TT_AP_MASK |
407 TT_PXN_MASK | TT_XN_MASK);
408 }
409
410 return UpdateRegionMapping (BaseAddress, Length, PageAttributes,
411 PageAttributeMask);
412 }
413
414 STATIC
415 EFI_STATUS
416 SetMemoryRegionAttribute (
417 IN EFI_PHYSICAL_ADDRESS BaseAddress,
418 IN UINT64 Length,
419 IN UINT64 Attributes,
420 IN UINT64 BlockEntryMask
421 )
422 {
423 return UpdateRegionMapping (BaseAddress, Length, Attributes, BlockEntryMask);
424 }
425
426 EFI_STATUS
427 ArmSetMemoryRegionNoExec (
428 IN EFI_PHYSICAL_ADDRESS BaseAddress,
429 IN UINT64 Length
430 )
431 {
432 UINT64 Val;
433
434 if (ArmReadCurrentEL () == AARCH64_EL1) {
435 Val = TT_PXN_MASK | TT_UXN_MASK;
436 } else {
437 Val = TT_XN_MASK;
438 }
439
440 return SetMemoryRegionAttribute (
441 BaseAddress,
442 Length,
443 Val,
444 ~TT_ADDRESS_MASK_BLOCK_ENTRY);
445 }
446
447 EFI_STATUS
448 ArmClearMemoryRegionNoExec (
449 IN EFI_PHYSICAL_ADDRESS BaseAddress,
450 IN UINT64 Length
451 )
452 {
453 UINT64 Mask;
454
455 // XN maps to UXN in the EL1&0 translation regime
456 Mask = ~(TT_ADDRESS_MASK_BLOCK_ENTRY | TT_PXN_MASK | TT_XN_MASK);
457
458 return SetMemoryRegionAttribute (
459 BaseAddress,
460 Length,
461 0,
462 Mask);
463 }
464
465 EFI_STATUS
466 ArmSetMemoryRegionReadOnly (
467 IN EFI_PHYSICAL_ADDRESS BaseAddress,
468 IN UINT64 Length
469 )
470 {
471 return SetMemoryRegionAttribute (
472 BaseAddress,
473 Length,
474 TT_AP_RO_RO,
475 ~TT_ADDRESS_MASK_BLOCK_ENTRY);
476 }
477
478 EFI_STATUS
479 ArmClearMemoryRegionReadOnly (
480 IN EFI_PHYSICAL_ADDRESS BaseAddress,
481 IN UINT64 Length
482 )
483 {
484 return SetMemoryRegionAttribute (
485 BaseAddress,
486 Length,
487 TT_AP_RW_RW,
488 ~(TT_ADDRESS_MASK_BLOCK_ENTRY | TT_AP_MASK));
489 }
490
491 EFI_STATUS
492 EFIAPI
493 ArmConfigureMmu (
494 IN ARM_MEMORY_REGION_DESCRIPTOR *MemoryTable,
495 OUT VOID **TranslationTableBase OPTIONAL,
496 OUT UINTN *TranslationTableSize OPTIONAL
497 )
498 {
499 VOID* TranslationTable;
500 UINT64 MaxAddress;
501 UINTN T0SZ;
502 UINTN RootTableEntryCount;
503 UINT64 TCR;
504 EFI_STATUS Status;
505
506 if(MemoryTable == NULL) {
507 ASSERT (MemoryTable != NULL);
508 return EFI_INVALID_PARAMETER;
509 }
510
511 //
512 // Limit the virtual address space to what we can actually use: UEFI
513 // mandates a 1:1 mapping, so no point in making the virtual address
514 // space larger than the physical address space. We also have to take
515 // into account the architectural limitations that result from UEFI's
516 // use of 4 KB pages.
517 //
518 MaxAddress = MIN (LShiftU64 (1ULL, ArmGetPhysicalAddressBits ()) - 1,
519 MAX_ALLOC_ADDRESS);
520
521 // Lookup the Table Level to get the information
522 LookupAddresstoRootTable (MaxAddress, &T0SZ, &RootTableEntryCount);
523
524 //
525 // Set TCR that allows us to retrieve T0SZ in the subsequent functions
526 //
527 // Ideally we will be running at EL2, but should support EL1 as well.
528 // UEFI should not run at EL3.
529 if (ArmReadCurrentEL () == AARCH64_EL2) {
530 //Note: Bits 23 and 31 are reserved(RES1) bits in TCR_EL2
531 TCR = T0SZ | (1UL << 31) | (1UL << 23) | TCR_TG0_4KB;
532
533 // Set the Physical Address Size using MaxAddress
534 if (MaxAddress < SIZE_4GB) {
535 TCR |= TCR_PS_4GB;
536 } else if (MaxAddress < SIZE_64GB) {
537 TCR |= TCR_PS_64GB;
538 } else if (MaxAddress < SIZE_1TB) {
539 TCR |= TCR_PS_1TB;
540 } else if (MaxAddress < SIZE_4TB) {
541 TCR |= TCR_PS_4TB;
542 } else if (MaxAddress < SIZE_16TB) {
543 TCR |= TCR_PS_16TB;
544 } else if (MaxAddress < SIZE_256TB) {
545 TCR |= TCR_PS_256TB;
546 } else {
547 DEBUG ((EFI_D_ERROR, "ArmConfigureMmu: The MaxAddress 0x%lX is not supported by this MMU configuration.\n", MaxAddress));
548 ASSERT (0); // Bigger than 48-bit memory space are not supported
549 return EFI_UNSUPPORTED;
550 }
551 } else if (ArmReadCurrentEL () == AARCH64_EL1) {
552 // Due to Cortex-A57 erratum #822227 we must set TG1[1] == 1, regardless of EPD1.
553 TCR = T0SZ | TCR_TG0_4KB | TCR_TG1_4KB | TCR_EPD1;
554
555 // Set the Physical Address Size using MaxAddress
556 if (MaxAddress < SIZE_4GB) {
557 TCR |= TCR_IPS_4GB;
558 } else if (MaxAddress < SIZE_64GB) {
559 TCR |= TCR_IPS_64GB;
560 } else if (MaxAddress < SIZE_1TB) {
561 TCR |= TCR_IPS_1TB;
562 } else if (MaxAddress < SIZE_4TB) {
563 TCR |= TCR_IPS_4TB;
564 } else if (MaxAddress < SIZE_16TB) {
565 TCR |= TCR_IPS_16TB;
566 } else if (MaxAddress < SIZE_256TB) {
567 TCR |= TCR_IPS_256TB;
568 } else {
569 DEBUG ((EFI_D_ERROR, "ArmConfigureMmu: The MaxAddress 0x%lX is not supported by this MMU configuration.\n", MaxAddress));
570 ASSERT (0); // Bigger than 48-bit memory space are not supported
571 return EFI_UNSUPPORTED;
572 }
573 } else {
574 ASSERT (0); // UEFI is only expected to run at EL2 and EL1, not EL3.
575 return EFI_UNSUPPORTED;
576 }
577
578 //
579 // Translation table walks are always cache coherent on ARMv8-A, so cache
580 // maintenance on page tables is never needed. Since there is a risk of
581 // loss of coherency when using mismatched attributes, and given that memory
582 // is mapped cacheable except for extraordinary cases (such as non-coherent
583 // DMA), have the page table walker perform cached accesses as well, and
584 // assert below that that matches the attributes we use for CPU accesses to
585 // the region.
586 //
587 TCR |= TCR_SH_INNER_SHAREABLE |
588 TCR_RGN_OUTER_WRITE_BACK_ALLOC |
589 TCR_RGN_INNER_WRITE_BACK_ALLOC;
590
591 // Set TCR
592 ArmSetTCR (TCR);
593
594 // Allocate pages for translation table
595 TranslationTable = AllocatePages (1);
596 if (TranslationTable == NULL) {
597 return EFI_OUT_OF_RESOURCES;
598 }
599 // We set TTBR0 just after allocating the table to retrieve its location from the subsequent
600 // functions without needing to pass this value across the functions. The MMU is only enabled
601 // after the translation tables are populated.
602 ArmSetTTBR0 (TranslationTable);
603
604 if (TranslationTableBase != NULL) {
605 *TranslationTableBase = TranslationTable;
606 }
607
608 if (TranslationTableSize != NULL) {
609 *TranslationTableSize = RootTableEntryCount * sizeof(UINT64);
610 }
611
612 //
613 // Make sure we are not inadvertently hitting in the caches
614 // when populating the page tables.
615 //
616 InvalidateDataCacheRange (TranslationTable,
617 RootTableEntryCount * sizeof(UINT64));
618 ZeroMem (TranslationTable, RootTableEntryCount * sizeof(UINT64));
619
620 while (MemoryTable->Length != 0) {
621 Status = FillTranslationTable (TranslationTable, MemoryTable);
622 if (EFI_ERROR (Status)) {
623 goto FREE_TRANSLATION_TABLE;
624 }
625 MemoryTable++;
626 }
627
628 ArmSetMAIR (MAIR_ATTR(TT_ATTR_INDX_DEVICE_MEMORY, MAIR_ATTR_DEVICE_MEMORY) | // mapped to EFI_MEMORY_UC
629 MAIR_ATTR(TT_ATTR_INDX_MEMORY_NON_CACHEABLE, MAIR_ATTR_NORMAL_MEMORY_NON_CACHEABLE) | // mapped to EFI_MEMORY_WC
630 MAIR_ATTR(TT_ATTR_INDX_MEMORY_WRITE_THROUGH, MAIR_ATTR_NORMAL_MEMORY_WRITE_THROUGH) | // mapped to EFI_MEMORY_WT
631 MAIR_ATTR(TT_ATTR_INDX_MEMORY_WRITE_BACK, MAIR_ATTR_NORMAL_MEMORY_WRITE_BACK)); // mapped to EFI_MEMORY_WB
632
633 ArmDisableAlignmentCheck ();
634 ArmEnableStackAlignmentCheck ();
635 ArmEnableInstructionCache ();
636 ArmEnableDataCache ();
637
638 ArmEnableMmu ();
639 return EFI_SUCCESS;
640
641 FREE_TRANSLATION_TABLE:
642 FreePages (TranslationTable, 1);
643 return Status;
644 }
645
646 RETURN_STATUS
647 EFIAPI
648 ArmMmuBaseLibConstructor (
649 VOID
650 )
651 {
652 extern UINT32 ArmReplaceLiveTranslationEntrySize;
653
654 //
655 // The ArmReplaceLiveTranslationEntry () helper function may be invoked
656 // with the MMU off so we have to ensure that it gets cleaned to the PoC
657 //
658 WriteBackDataCacheRange (ArmReplaceLiveTranslationEntry,
659 ArmReplaceLiveTranslationEntrySize);
660
661 return RETURN_SUCCESS;
662 }