]> git.proxmox.com Git - mirror_edk2.git/blob - ArmPkg/Library/ArmMmuLib/AArch64/ArmMmuLibCore.c
a43d468b73ca72adb9af58412a52c32cbbd9e992
[mirror_edk2.git] / ArmPkg / Library / ArmMmuLib / AArch64 / ArmMmuLibCore.c
1 /** @file
2 * File managing the MMU for ARMv8 architecture
3 *
4 * Copyright (c) 2011-2020, ARM Limited. All rights reserved.
5 * Copyright (c) 2016, Linaro Limited. All rights reserved.
6 * Copyright (c) 2017, Intel Corporation. All rights reserved.<BR>
7 *
8 * SPDX-License-Identifier: BSD-2-Clause-Patent
9 *
10 **/
11
12 #include <Uefi.h>
13 #include <Chipset/AArch64.h>
14 #include <Library/BaseMemoryLib.h>
15 #include <Library/CacheMaintenanceLib.h>
16 #include <Library/MemoryAllocationLib.h>
17 #include <Library/ArmLib.h>
18 #include <Library/ArmMmuLib.h>
19 #include <Library/BaseLib.h>
20 #include <Library/DebugLib.h>
21
22 // We use this index definition to define an invalid block entry
23 #define TT_ATTR_INDX_INVALID ((UINT32)~0)
24
25 STATIC
26 UINT64
27 ArmMemoryAttributeToPageAttribute (
28 IN ARM_MEMORY_REGION_ATTRIBUTES Attributes
29 )
30 {
31 switch (Attributes) {
32 case ARM_MEMORY_REGION_ATTRIBUTE_WRITE_BACK_NONSHAREABLE:
33 case ARM_MEMORY_REGION_ATTRIBUTE_NONSECURE_WRITE_BACK_NONSHAREABLE:
34 return TT_ATTR_INDX_MEMORY_WRITE_BACK;
35
36 case ARM_MEMORY_REGION_ATTRIBUTE_WRITE_BACK:
37 case ARM_MEMORY_REGION_ATTRIBUTE_NONSECURE_WRITE_BACK:
38 return TT_ATTR_INDX_MEMORY_WRITE_BACK | TT_SH_INNER_SHAREABLE;
39
40 case ARM_MEMORY_REGION_ATTRIBUTE_WRITE_THROUGH:
41 case ARM_MEMORY_REGION_ATTRIBUTE_NONSECURE_WRITE_THROUGH:
42 return TT_ATTR_INDX_MEMORY_WRITE_THROUGH | TT_SH_INNER_SHAREABLE;
43
44 // Uncached and device mappings are treated as outer shareable by default,
45 case ARM_MEMORY_REGION_ATTRIBUTE_UNCACHED_UNBUFFERED:
46 case ARM_MEMORY_REGION_ATTRIBUTE_NONSECURE_UNCACHED_UNBUFFERED:
47 return TT_ATTR_INDX_MEMORY_NON_CACHEABLE;
48
49 default:
50 ASSERT (0);
51 case ARM_MEMORY_REGION_ATTRIBUTE_DEVICE:
52 case ARM_MEMORY_REGION_ATTRIBUTE_NONSECURE_DEVICE:
53 if (ArmReadCurrentEL () == AARCH64_EL2)
54 return TT_ATTR_INDX_DEVICE_MEMORY | TT_XN_MASK;
55 else
56 return TT_ATTR_INDX_DEVICE_MEMORY | TT_UXN_MASK | TT_PXN_MASK;
57 }
58 }
59
60 UINT64
61 PageAttributeToGcdAttribute (
62 IN UINT64 PageAttributes
63 )
64 {
65 UINT64 GcdAttributes;
66
67 switch (PageAttributes & TT_ATTR_INDX_MASK) {
68 case TT_ATTR_INDX_DEVICE_MEMORY:
69 GcdAttributes = EFI_MEMORY_UC;
70 break;
71 case TT_ATTR_INDX_MEMORY_NON_CACHEABLE:
72 GcdAttributes = EFI_MEMORY_WC;
73 break;
74 case TT_ATTR_INDX_MEMORY_WRITE_THROUGH:
75 GcdAttributes = EFI_MEMORY_WT;
76 break;
77 case TT_ATTR_INDX_MEMORY_WRITE_BACK:
78 GcdAttributes = EFI_MEMORY_WB;
79 break;
80 default:
81 DEBUG ((DEBUG_ERROR,
82 "PageAttributeToGcdAttribute: PageAttributes:0x%lX not supported.\n",
83 PageAttributes));
84 ASSERT (0);
85 // The Global Coherency Domain (GCD) value is defined as a bit set.
86 // Returning 0 means no attribute has been set.
87 GcdAttributes = 0;
88 }
89
90 // Determine protection attributes
91 if (((PageAttributes & TT_AP_MASK) == TT_AP_NO_RO) ||
92 ((PageAttributes & TT_AP_MASK) == TT_AP_RO_RO)) {
93 // Read only cases map to write-protect
94 GcdAttributes |= EFI_MEMORY_RO;
95 }
96
97 // Process eXecute Never attribute
98 if ((PageAttributes & (TT_PXN_MASK | TT_UXN_MASK)) != 0) {
99 GcdAttributes |= EFI_MEMORY_XP;
100 }
101
102 return GcdAttributes;
103 }
104
105 #define MIN_T0SZ 16
106 #define BITS_PER_LEVEL 9
107
108 VOID
109 GetRootTranslationTableInfo (
110 IN UINTN T0SZ,
111 OUT UINTN *TableLevel,
112 OUT UINTN *TableEntryCount
113 )
114 {
115 // Get the level of the root table
116 if (TableLevel) {
117 *TableLevel = (T0SZ - MIN_T0SZ) / BITS_PER_LEVEL;
118 }
119
120 if (TableEntryCount) {
121 *TableEntryCount = 1UL << (BITS_PER_LEVEL - (T0SZ - MIN_T0SZ) % BITS_PER_LEVEL);
122 }
123 }
124
125 STATIC
126 VOID
127 ReplaceTableEntry (
128 IN UINT64 *Entry,
129 IN UINT64 Value,
130 IN UINT64 RegionStart,
131 IN BOOLEAN IsLiveBlockMapping
132 )
133 {
134 if (!ArmMmuEnabled () || !IsLiveBlockMapping) {
135 *Entry = Value;
136 ArmUpdateTranslationTableEntry (Entry, (VOID *)(UINTN)RegionStart);
137 } else {
138 ArmReplaceLiveTranslationEntry (Entry, Value, RegionStart);
139 }
140 }
141
142 STATIC
143 VOID
144 FreePageTablesRecursive (
145 IN UINT64 *TranslationTable
146 )
147 {
148 UINTN Index;
149
150 for (Index = 0; Index < TT_ENTRY_COUNT; Index++) {
151 if ((TranslationTable[Index] & TT_TYPE_MASK) == TT_TYPE_TABLE_ENTRY) {
152 FreePageTablesRecursive ((VOID *)(UINTN)(TranslationTable[Index] &
153 TT_ADDRESS_MASK_BLOCK_ENTRY));
154 }
155 }
156 FreePages (TranslationTable, 1);
157 }
158
159 STATIC
160 EFI_STATUS
161 UpdateRegionMappingRecursive (
162 IN UINT64 RegionStart,
163 IN UINT64 RegionEnd,
164 IN UINT64 AttributeSetMask,
165 IN UINT64 AttributeClearMask,
166 IN UINT64 *PageTable,
167 IN UINTN Level
168 )
169 {
170 UINTN BlockShift;
171 UINT64 BlockMask;
172 UINT64 BlockEnd;
173 UINT64 *Entry;
174 UINT64 EntryValue;
175 VOID *TranslationTable;
176 EFI_STATUS Status;
177
178 ASSERT (((RegionStart | RegionEnd) & EFI_PAGE_MASK) == 0);
179
180 BlockShift = (Level + 1) * BITS_PER_LEVEL + MIN_T0SZ;
181 BlockMask = MAX_UINT64 >> BlockShift;
182
183 DEBUG ((DEBUG_VERBOSE, "%a(%d): %llx - %llx set %lx clr %lx\n", __FUNCTION__,
184 Level, RegionStart, RegionEnd, AttributeSetMask, AttributeClearMask));
185
186 for (; RegionStart < RegionEnd; RegionStart = BlockEnd) {
187 BlockEnd = MIN (RegionEnd, (RegionStart | BlockMask) + 1);
188 Entry = &PageTable[(RegionStart >> (64 - BlockShift)) & (TT_ENTRY_COUNT - 1)];
189
190 //
191 // If RegionStart or BlockEnd is not aligned to the block size at this
192 // level, we will have to create a table mapping in order to map less
193 // than a block, and recurse to create the block or page entries at
194 // the next level. No block mappings are allowed at all at level 0,
195 // so in that case, we have to recurse unconditionally.
196 //
197 if (Level == 0 || ((RegionStart | BlockEnd) & BlockMask) != 0) {
198 ASSERT (Level < 3);
199
200 if ((*Entry & TT_TYPE_MASK) != TT_TYPE_TABLE_ENTRY) {
201 //
202 // No table entry exists yet, so we need to allocate a page table
203 // for the next level.
204 //
205 TranslationTable = AllocatePages (1);
206 if (TranslationTable == NULL) {
207 return EFI_OUT_OF_RESOURCES;
208 }
209
210 if (!ArmMmuEnabled ()) {
211 //
212 // Make sure we are not inadvertently hitting in the caches
213 // when populating the page tables.
214 //
215 InvalidateDataCacheRange (TranslationTable, EFI_PAGE_SIZE);
216 }
217
218 if ((*Entry & TT_TYPE_MASK) == TT_TYPE_BLOCK_ENTRY) {
219 //
220 // We are splitting an existing block entry, so we have to populate
221 // the new table with the attributes of the block entry it replaces.
222 //
223 Status = UpdateRegionMappingRecursive (RegionStart & ~BlockMask,
224 (RegionStart | BlockMask) + 1, *Entry & TT_ATTRIBUTES_MASK,
225 0, TranslationTable, Level + 1);
226 if (EFI_ERROR (Status)) {
227 //
228 // The range we passed to UpdateRegionMappingRecursive () is block
229 // aligned, so it is guaranteed that no further pages were allocated
230 // by it, and so we only have to free the page we allocated here.
231 //
232 FreePages (TranslationTable, 1);
233 return Status;
234 }
235 } else {
236 ZeroMem (TranslationTable, EFI_PAGE_SIZE);
237 }
238 } else {
239 TranslationTable = (VOID *)(UINTN)(*Entry & TT_ADDRESS_MASK_BLOCK_ENTRY);
240 }
241
242 //
243 // Recurse to the next level
244 //
245 Status = UpdateRegionMappingRecursive (RegionStart, BlockEnd,
246 AttributeSetMask, AttributeClearMask, TranslationTable,
247 Level + 1);
248 if (EFI_ERROR (Status)) {
249 if ((*Entry & TT_TYPE_MASK) != TT_TYPE_TABLE_ENTRY) {
250 //
251 // We are creating a new table entry, so on failure, we can free all
252 // allocations we made recursively, given that the whole subhierarchy
253 // has not been wired into the live page tables yet. (This is not
254 // possible for existing table entries, since we cannot revert the
255 // modifications we made to the subhierarchy it represents.)
256 //
257 FreePageTablesRecursive (TranslationTable);
258 }
259 return Status;
260 }
261
262 if ((*Entry & TT_TYPE_MASK) != TT_TYPE_TABLE_ENTRY) {
263 EntryValue = (UINTN)TranslationTable | TT_TYPE_TABLE_ENTRY;
264 ReplaceTableEntry (Entry, EntryValue, RegionStart,
265 (*Entry & TT_TYPE_MASK) == TT_TYPE_BLOCK_ENTRY);
266 }
267 } else {
268 EntryValue = (*Entry & AttributeClearMask) | AttributeSetMask;
269 EntryValue |= RegionStart;
270 EntryValue |= (Level == 3) ? TT_TYPE_BLOCK_ENTRY_LEVEL3
271 : TT_TYPE_BLOCK_ENTRY;
272
273 ReplaceTableEntry (Entry, EntryValue, RegionStart, FALSE);
274 }
275 }
276 return EFI_SUCCESS;
277 }
278
279 STATIC
280 VOID
281 LookupAddresstoRootTable (
282 IN UINT64 MaxAddress,
283 OUT UINTN *T0SZ,
284 OUT UINTN *TableEntryCount
285 )
286 {
287 UINTN TopBit;
288
289 // Check the parameters are not NULL
290 ASSERT ((T0SZ != NULL) && (TableEntryCount != NULL));
291
292 // Look for the highest bit set in MaxAddress
293 for (TopBit = 63; TopBit != 0; TopBit--) {
294 if ((1ULL << TopBit) & MaxAddress) {
295 // MaxAddress top bit is found
296 TopBit = TopBit + 1;
297 break;
298 }
299 }
300 ASSERT (TopBit != 0);
301
302 // Calculate T0SZ from the top bit of the MaxAddress
303 *T0SZ = 64 - TopBit;
304
305 // Get the Table info from T0SZ
306 GetRootTranslationTableInfo (*T0SZ, NULL, TableEntryCount);
307 }
308
309 STATIC
310 EFI_STATUS
311 UpdateRegionMapping (
312 IN UINT64 RegionStart,
313 IN UINT64 RegionLength,
314 IN UINT64 AttributeSetMask,
315 IN UINT64 AttributeClearMask
316 )
317 {
318 UINTN RootTableLevel;
319 UINTN T0SZ;
320
321 if (((RegionStart | RegionLength) & EFI_PAGE_MASK)) {
322 return EFI_INVALID_PARAMETER;
323 }
324
325 T0SZ = ArmGetTCR () & TCR_T0SZ_MASK;
326 GetRootTranslationTableInfo (T0SZ, &RootTableLevel, NULL);
327
328 return UpdateRegionMappingRecursive (RegionStart, RegionStart + RegionLength,
329 AttributeSetMask, AttributeClearMask, ArmGetTTBR0BaseAddress (),
330 RootTableLevel);
331 }
332
333 STATIC
334 EFI_STATUS
335 FillTranslationTable (
336 IN UINT64 *RootTable,
337 IN ARM_MEMORY_REGION_DESCRIPTOR *MemoryRegion
338 )
339 {
340 return UpdateRegionMapping (
341 MemoryRegion->VirtualBase,
342 MemoryRegion->Length,
343 ArmMemoryAttributeToPageAttribute (MemoryRegion->Attributes) | TT_AF,
344 0
345 );
346 }
347
348 STATIC
349 UINT64
350 GcdAttributeToPageAttribute (
351 IN UINT64 GcdAttributes
352 )
353 {
354 UINT64 PageAttributes;
355
356 switch (GcdAttributes & EFI_MEMORY_CACHETYPE_MASK) {
357 case EFI_MEMORY_UC:
358 PageAttributes = TT_ATTR_INDX_DEVICE_MEMORY;
359 break;
360 case EFI_MEMORY_WC:
361 PageAttributes = TT_ATTR_INDX_MEMORY_NON_CACHEABLE;
362 break;
363 case EFI_MEMORY_WT:
364 PageAttributes = TT_ATTR_INDX_MEMORY_WRITE_THROUGH | TT_SH_INNER_SHAREABLE;
365 break;
366 case EFI_MEMORY_WB:
367 PageAttributes = TT_ATTR_INDX_MEMORY_WRITE_BACK | TT_SH_INNER_SHAREABLE;
368 break;
369 default:
370 PageAttributes = TT_ATTR_INDX_MASK;
371 break;
372 }
373
374 if ((GcdAttributes & EFI_MEMORY_XP) != 0 ||
375 (GcdAttributes & EFI_MEMORY_CACHETYPE_MASK) == EFI_MEMORY_UC) {
376 if (ArmReadCurrentEL () == AARCH64_EL2) {
377 PageAttributes |= TT_XN_MASK;
378 } else {
379 PageAttributes |= TT_UXN_MASK | TT_PXN_MASK;
380 }
381 }
382
383 if ((GcdAttributes & EFI_MEMORY_RO) != 0) {
384 PageAttributes |= TT_AP_RO_RO;
385 }
386
387 return PageAttributes | TT_AF;
388 }
389
390 EFI_STATUS
391 ArmSetMemoryAttributes (
392 IN EFI_PHYSICAL_ADDRESS BaseAddress,
393 IN UINT64 Length,
394 IN UINT64 Attributes
395 )
396 {
397 UINT64 PageAttributes;
398 UINT64 PageAttributeMask;
399
400 PageAttributes = GcdAttributeToPageAttribute (Attributes);
401 PageAttributeMask = 0;
402
403 if ((Attributes & EFI_MEMORY_CACHETYPE_MASK) == 0) {
404 //
405 // No memory type was set in Attributes, so we are going to update the
406 // permissions only.
407 //
408 PageAttributes &= TT_AP_MASK | TT_UXN_MASK | TT_PXN_MASK;
409 PageAttributeMask = ~(TT_ADDRESS_MASK_BLOCK_ENTRY | TT_AP_MASK |
410 TT_PXN_MASK | TT_XN_MASK);
411 }
412
413 return UpdateRegionMapping (BaseAddress, Length, PageAttributes,
414 PageAttributeMask);
415 }
416
417 STATIC
418 EFI_STATUS
419 SetMemoryRegionAttribute (
420 IN EFI_PHYSICAL_ADDRESS BaseAddress,
421 IN UINT64 Length,
422 IN UINT64 Attributes,
423 IN UINT64 BlockEntryMask
424 )
425 {
426 return UpdateRegionMapping (BaseAddress, Length, Attributes, BlockEntryMask);
427 }
428
429 EFI_STATUS
430 ArmSetMemoryRegionNoExec (
431 IN EFI_PHYSICAL_ADDRESS BaseAddress,
432 IN UINT64 Length
433 )
434 {
435 UINT64 Val;
436
437 if (ArmReadCurrentEL () == AARCH64_EL1) {
438 Val = TT_PXN_MASK | TT_UXN_MASK;
439 } else {
440 Val = TT_XN_MASK;
441 }
442
443 return SetMemoryRegionAttribute (
444 BaseAddress,
445 Length,
446 Val,
447 ~TT_ADDRESS_MASK_BLOCK_ENTRY);
448 }
449
450 EFI_STATUS
451 ArmClearMemoryRegionNoExec (
452 IN EFI_PHYSICAL_ADDRESS BaseAddress,
453 IN UINT64 Length
454 )
455 {
456 UINT64 Mask;
457
458 // XN maps to UXN in the EL1&0 translation regime
459 Mask = ~(TT_ADDRESS_MASK_BLOCK_ENTRY | TT_PXN_MASK | TT_XN_MASK);
460
461 return SetMemoryRegionAttribute (
462 BaseAddress,
463 Length,
464 0,
465 Mask);
466 }
467
468 EFI_STATUS
469 ArmSetMemoryRegionReadOnly (
470 IN EFI_PHYSICAL_ADDRESS BaseAddress,
471 IN UINT64 Length
472 )
473 {
474 return SetMemoryRegionAttribute (
475 BaseAddress,
476 Length,
477 TT_AP_RO_RO,
478 ~TT_ADDRESS_MASK_BLOCK_ENTRY);
479 }
480
481 EFI_STATUS
482 ArmClearMemoryRegionReadOnly (
483 IN EFI_PHYSICAL_ADDRESS BaseAddress,
484 IN UINT64 Length
485 )
486 {
487 return SetMemoryRegionAttribute (
488 BaseAddress,
489 Length,
490 TT_AP_RW_RW,
491 ~(TT_ADDRESS_MASK_BLOCK_ENTRY | TT_AP_MASK));
492 }
493
494 EFI_STATUS
495 EFIAPI
496 ArmConfigureMmu (
497 IN ARM_MEMORY_REGION_DESCRIPTOR *MemoryTable,
498 OUT VOID **TranslationTableBase OPTIONAL,
499 OUT UINTN *TranslationTableSize OPTIONAL
500 )
501 {
502 VOID* TranslationTable;
503 UINT64 MaxAddress;
504 UINTN T0SZ;
505 UINTN RootTableEntryCount;
506 UINT64 TCR;
507 EFI_STATUS Status;
508
509 if (MemoryTable == NULL) {
510 ASSERT (MemoryTable != NULL);
511 return EFI_INVALID_PARAMETER;
512 }
513
514 //
515 // Limit the virtual address space to what we can actually use: UEFI
516 // mandates a 1:1 mapping, so no point in making the virtual address
517 // space larger than the physical address space. We also have to take
518 // into account the architectural limitations that result from UEFI's
519 // use of 4 KB pages.
520 //
521 MaxAddress = MIN (LShiftU64 (1ULL, ArmGetPhysicalAddressBits ()) - 1,
522 MAX_ALLOC_ADDRESS);
523
524 // Lookup the Table Level to get the information
525 LookupAddresstoRootTable (MaxAddress, &T0SZ, &RootTableEntryCount);
526
527 //
528 // Set TCR that allows us to retrieve T0SZ in the subsequent functions
529 //
530 // Ideally we will be running at EL2, but should support EL1 as well.
531 // UEFI should not run at EL3.
532 if (ArmReadCurrentEL () == AARCH64_EL2) {
533 //Note: Bits 23 and 31 are reserved(RES1) bits in TCR_EL2
534 TCR = T0SZ | (1UL << 31) | (1UL << 23) | TCR_TG0_4KB;
535
536 // Set the Physical Address Size using MaxAddress
537 if (MaxAddress < SIZE_4GB) {
538 TCR |= TCR_PS_4GB;
539 } else if (MaxAddress < SIZE_64GB) {
540 TCR |= TCR_PS_64GB;
541 } else if (MaxAddress < SIZE_1TB) {
542 TCR |= TCR_PS_1TB;
543 } else if (MaxAddress < SIZE_4TB) {
544 TCR |= TCR_PS_4TB;
545 } else if (MaxAddress < SIZE_16TB) {
546 TCR |= TCR_PS_16TB;
547 } else if (MaxAddress < SIZE_256TB) {
548 TCR |= TCR_PS_256TB;
549 } else {
550 DEBUG ((DEBUG_ERROR,
551 "ArmConfigureMmu: The MaxAddress 0x%lX is not supported by this MMU configuration.\n",
552 MaxAddress));
553 ASSERT (0); // Bigger than 48-bit memory space are not supported
554 return EFI_UNSUPPORTED;
555 }
556 } else if (ArmReadCurrentEL () == AARCH64_EL1) {
557 // Due to Cortex-A57 erratum #822227 we must set TG1[1] == 1, regardless of EPD1.
558 TCR = T0SZ | TCR_TG0_4KB | TCR_TG1_4KB | TCR_EPD1;
559
560 // Set the Physical Address Size using MaxAddress
561 if (MaxAddress < SIZE_4GB) {
562 TCR |= TCR_IPS_4GB;
563 } else if (MaxAddress < SIZE_64GB) {
564 TCR |= TCR_IPS_64GB;
565 } else if (MaxAddress < SIZE_1TB) {
566 TCR |= TCR_IPS_1TB;
567 } else if (MaxAddress < SIZE_4TB) {
568 TCR |= TCR_IPS_4TB;
569 } else if (MaxAddress < SIZE_16TB) {
570 TCR |= TCR_IPS_16TB;
571 } else if (MaxAddress < SIZE_256TB) {
572 TCR |= TCR_IPS_256TB;
573 } else {
574 DEBUG ((DEBUG_ERROR,
575 "ArmConfigureMmu: The MaxAddress 0x%lX is not supported by this MMU configuration.\n",
576 MaxAddress));
577 ASSERT (0); // Bigger than 48-bit memory space are not supported
578 return EFI_UNSUPPORTED;
579 }
580 } else {
581 ASSERT (0); // UEFI is only expected to run at EL2 and EL1, not EL3.
582 return EFI_UNSUPPORTED;
583 }
584
585 //
586 // Translation table walks are always cache coherent on ARMv8-A, so cache
587 // maintenance on page tables is never needed. Since there is a risk of
588 // loss of coherency when using mismatched attributes, and given that memory
589 // is mapped cacheable except for extraordinary cases (such as non-coherent
590 // DMA), have the page table walker perform cached accesses as well, and
591 // assert below that that matches the attributes we use for CPU accesses to
592 // the region.
593 //
594 TCR |= TCR_SH_INNER_SHAREABLE |
595 TCR_RGN_OUTER_WRITE_BACK_ALLOC |
596 TCR_RGN_INNER_WRITE_BACK_ALLOC;
597
598 // Set TCR
599 ArmSetTCR (TCR);
600
601 // Allocate pages for translation table
602 TranslationTable = AllocatePages (1);
603 if (TranslationTable == NULL) {
604 return EFI_OUT_OF_RESOURCES;
605 }
606 //
607 // We set TTBR0 just after allocating the table to retrieve its location from
608 // the subsequent functions without needing to pass this value across the
609 // functions. The MMU is only enabled after the translation tables are
610 // populated.
611 //
612 ArmSetTTBR0 (TranslationTable);
613
614 if (TranslationTableBase != NULL) {
615 *TranslationTableBase = TranslationTable;
616 }
617
618 if (TranslationTableSize != NULL) {
619 *TranslationTableSize = RootTableEntryCount * sizeof (UINT64);
620 }
621
622 //
623 // Make sure we are not inadvertently hitting in the caches
624 // when populating the page tables.
625 //
626 InvalidateDataCacheRange (TranslationTable,
627 RootTableEntryCount * sizeof (UINT64));
628 ZeroMem (TranslationTable, RootTableEntryCount * sizeof (UINT64));
629
630 while (MemoryTable->Length != 0) {
631 Status = FillTranslationTable (TranslationTable, MemoryTable);
632 if (EFI_ERROR (Status)) {
633 goto FreeTranslationTable;
634 }
635 MemoryTable++;
636 }
637
638 //
639 // EFI_MEMORY_UC ==> MAIR_ATTR_DEVICE_MEMORY
640 // EFI_MEMORY_WC ==> MAIR_ATTR_NORMAL_MEMORY_NON_CACHEABLE
641 // EFI_MEMORY_WT ==> MAIR_ATTR_NORMAL_MEMORY_WRITE_THROUGH
642 // EFI_MEMORY_WB ==> MAIR_ATTR_NORMAL_MEMORY_WRITE_BACK
643 //
644 ArmSetMAIR (
645 MAIR_ATTR (TT_ATTR_INDX_DEVICE_MEMORY, MAIR_ATTR_DEVICE_MEMORY) |
646 MAIR_ATTR (TT_ATTR_INDX_MEMORY_NON_CACHEABLE, MAIR_ATTR_NORMAL_MEMORY_NON_CACHEABLE) |
647 MAIR_ATTR (TT_ATTR_INDX_MEMORY_WRITE_THROUGH, MAIR_ATTR_NORMAL_MEMORY_WRITE_THROUGH) |
648 MAIR_ATTR (TT_ATTR_INDX_MEMORY_WRITE_BACK, MAIR_ATTR_NORMAL_MEMORY_WRITE_BACK)
649 );
650
651 ArmDisableAlignmentCheck ();
652 ArmEnableStackAlignmentCheck ();
653 ArmEnableInstructionCache ();
654 ArmEnableDataCache ();
655
656 ArmEnableMmu ();
657 return EFI_SUCCESS;
658
659 FreeTranslationTable:
660 FreePages (TranslationTable, 1);
661 return Status;
662 }
663
664 RETURN_STATUS
665 EFIAPI
666 ArmMmuBaseLibConstructor (
667 VOID
668 )
669 {
670 extern UINT32 ArmReplaceLiveTranslationEntrySize;
671
672 //
673 // The ArmReplaceLiveTranslationEntry () helper function may be invoked
674 // with the MMU off so we have to ensure that it gets cleaned to the PoC
675 //
676 WriteBackDataCacheRange (ArmReplaceLiveTranslationEntry,
677 ArmReplaceLiveTranslationEntrySize);
678
679 return RETURN_SUCCESS;
680 }