]> git.proxmox.com Git - mirror_edk2.git/blob - ArmPkg/Library/ArmMmuLib/AArch64/ArmMmuLibCore.c
ArmPkg/ArmMmuLib AARCH64: limit recursion when freeing page tables
[mirror_edk2.git] / ArmPkg / Library / ArmMmuLib / AArch64 / ArmMmuLibCore.c
1 /** @file
2 * File managing the MMU for ARMv8 architecture
3 *
4 * Copyright (c) 2011-2020, ARM Limited. All rights reserved.
5 * Copyright (c) 2016, Linaro Limited. All rights reserved.
6 * Copyright (c) 2017, Intel Corporation. All rights reserved.<BR>
7 *
8 * SPDX-License-Identifier: BSD-2-Clause-Patent
9 *
10 **/
11
12 #include <Uefi.h>
13 #include <Chipset/AArch64.h>
14 #include <Library/BaseMemoryLib.h>
15 #include <Library/CacheMaintenanceLib.h>
16 #include <Library/MemoryAllocationLib.h>
17 #include <Library/ArmLib.h>
18 #include <Library/ArmMmuLib.h>
19 #include <Library/BaseLib.h>
20 #include <Library/DebugLib.h>
21
22 // We use this index definition to define an invalid block entry
23 #define TT_ATTR_INDX_INVALID ((UINT32)~0)
24
25 STATIC
26 UINT64
27 ArmMemoryAttributeToPageAttribute (
28 IN ARM_MEMORY_REGION_ATTRIBUTES Attributes
29 )
30 {
31 switch (Attributes) {
32 case ARM_MEMORY_REGION_ATTRIBUTE_WRITE_BACK_NONSHAREABLE:
33 case ARM_MEMORY_REGION_ATTRIBUTE_NONSECURE_WRITE_BACK_NONSHAREABLE:
34 return TT_ATTR_INDX_MEMORY_WRITE_BACK;
35
36 case ARM_MEMORY_REGION_ATTRIBUTE_WRITE_BACK:
37 case ARM_MEMORY_REGION_ATTRIBUTE_NONSECURE_WRITE_BACK:
38 return TT_ATTR_INDX_MEMORY_WRITE_BACK | TT_SH_INNER_SHAREABLE;
39
40 case ARM_MEMORY_REGION_ATTRIBUTE_WRITE_THROUGH:
41 case ARM_MEMORY_REGION_ATTRIBUTE_NONSECURE_WRITE_THROUGH:
42 return TT_ATTR_INDX_MEMORY_WRITE_THROUGH | TT_SH_INNER_SHAREABLE;
43
44 // Uncached and device mappings are treated as outer shareable by default,
45 case ARM_MEMORY_REGION_ATTRIBUTE_UNCACHED_UNBUFFERED:
46 case ARM_MEMORY_REGION_ATTRIBUTE_NONSECURE_UNCACHED_UNBUFFERED:
47 return TT_ATTR_INDX_MEMORY_NON_CACHEABLE;
48
49 default:
50 ASSERT (0);
51 case ARM_MEMORY_REGION_ATTRIBUTE_DEVICE:
52 case ARM_MEMORY_REGION_ATTRIBUTE_NONSECURE_DEVICE:
53 if (ArmReadCurrentEL () == AARCH64_EL2)
54 return TT_ATTR_INDX_DEVICE_MEMORY | TT_XN_MASK;
55 else
56 return TT_ATTR_INDX_DEVICE_MEMORY | TT_UXN_MASK | TT_PXN_MASK;
57 }
58 }
59
60 UINT64
61 PageAttributeToGcdAttribute (
62 IN UINT64 PageAttributes
63 )
64 {
65 UINT64 GcdAttributes;
66
67 switch (PageAttributes & TT_ATTR_INDX_MASK) {
68 case TT_ATTR_INDX_DEVICE_MEMORY:
69 GcdAttributes = EFI_MEMORY_UC;
70 break;
71 case TT_ATTR_INDX_MEMORY_NON_CACHEABLE:
72 GcdAttributes = EFI_MEMORY_WC;
73 break;
74 case TT_ATTR_INDX_MEMORY_WRITE_THROUGH:
75 GcdAttributes = EFI_MEMORY_WT;
76 break;
77 case TT_ATTR_INDX_MEMORY_WRITE_BACK:
78 GcdAttributes = EFI_MEMORY_WB;
79 break;
80 default:
81 DEBUG ((DEBUG_ERROR,
82 "PageAttributeToGcdAttribute: PageAttributes:0x%lX not supported.\n",
83 PageAttributes));
84 ASSERT (0);
85 // The Global Coherency Domain (GCD) value is defined as a bit set.
86 // Returning 0 means no attribute has been set.
87 GcdAttributes = 0;
88 }
89
90 // Determine protection attributes
91 if (((PageAttributes & TT_AP_MASK) == TT_AP_NO_RO) ||
92 ((PageAttributes & TT_AP_MASK) == TT_AP_RO_RO)) {
93 // Read only cases map to write-protect
94 GcdAttributes |= EFI_MEMORY_RO;
95 }
96
97 // Process eXecute Never attribute
98 if ((PageAttributes & (TT_PXN_MASK | TT_UXN_MASK)) != 0) {
99 GcdAttributes |= EFI_MEMORY_XP;
100 }
101
102 return GcdAttributes;
103 }
104
105 #define MIN_T0SZ 16
106 #define BITS_PER_LEVEL 9
107
108 VOID
109 GetRootTranslationTableInfo (
110 IN UINTN T0SZ,
111 OUT UINTN *TableLevel,
112 OUT UINTN *TableEntryCount
113 )
114 {
115 // Get the level of the root table
116 if (TableLevel) {
117 *TableLevel = (T0SZ - MIN_T0SZ) / BITS_PER_LEVEL;
118 }
119
120 if (TableEntryCount) {
121 *TableEntryCount = 1UL << (BITS_PER_LEVEL - (T0SZ - MIN_T0SZ) % BITS_PER_LEVEL);
122 }
123 }
124
125 STATIC
126 VOID
127 ReplaceTableEntry (
128 IN UINT64 *Entry,
129 IN UINT64 Value,
130 IN UINT64 RegionStart,
131 IN BOOLEAN IsLiveBlockMapping
132 )
133 {
134 if (!ArmMmuEnabled () || !IsLiveBlockMapping) {
135 *Entry = Value;
136 ArmUpdateTranslationTableEntry (Entry, (VOID *)(UINTN)RegionStart);
137 } else {
138 ArmReplaceLiveTranslationEntry (Entry, Value, RegionStart);
139 }
140 }
141
142 STATIC
143 VOID
144 FreePageTablesRecursive (
145 IN UINT64 *TranslationTable,
146 IN UINTN Level
147 )
148 {
149 UINTN Index;
150
151 ASSERT (Level <= 3);
152
153 if (Level < 3) {
154 for (Index = 0; Index < TT_ENTRY_COUNT; Index++) {
155 if ((TranslationTable[Index] & TT_TYPE_MASK) == TT_TYPE_TABLE_ENTRY) {
156 FreePageTablesRecursive ((VOID *)(UINTN)(TranslationTable[Index] &
157 TT_ADDRESS_MASK_BLOCK_ENTRY),
158 Level + 1);
159 }
160 }
161 }
162 FreePages (TranslationTable, 1);
163 }
164
165 STATIC
166 EFI_STATUS
167 UpdateRegionMappingRecursive (
168 IN UINT64 RegionStart,
169 IN UINT64 RegionEnd,
170 IN UINT64 AttributeSetMask,
171 IN UINT64 AttributeClearMask,
172 IN UINT64 *PageTable,
173 IN UINTN Level
174 )
175 {
176 UINTN BlockShift;
177 UINT64 BlockMask;
178 UINT64 BlockEnd;
179 UINT64 *Entry;
180 UINT64 EntryValue;
181 VOID *TranslationTable;
182 EFI_STATUS Status;
183
184 ASSERT (((RegionStart | RegionEnd) & EFI_PAGE_MASK) == 0);
185
186 BlockShift = (Level + 1) * BITS_PER_LEVEL + MIN_T0SZ;
187 BlockMask = MAX_UINT64 >> BlockShift;
188
189 DEBUG ((DEBUG_VERBOSE, "%a(%d): %llx - %llx set %lx clr %lx\n", __FUNCTION__,
190 Level, RegionStart, RegionEnd, AttributeSetMask, AttributeClearMask));
191
192 for (; RegionStart < RegionEnd; RegionStart = BlockEnd) {
193 BlockEnd = MIN (RegionEnd, (RegionStart | BlockMask) + 1);
194 Entry = &PageTable[(RegionStart >> (64 - BlockShift)) & (TT_ENTRY_COUNT - 1)];
195
196 //
197 // If RegionStart or BlockEnd is not aligned to the block size at this
198 // level, we will have to create a table mapping in order to map less
199 // than a block, and recurse to create the block or page entries at
200 // the next level. No block mappings are allowed at all at level 0,
201 // so in that case, we have to recurse unconditionally.
202 //
203 if (Level == 0 || ((RegionStart | BlockEnd) & BlockMask) != 0) {
204 ASSERT (Level < 3);
205
206 if ((*Entry & TT_TYPE_MASK) != TT_TYPE_TABLE_ENTRY) {
207 //
208 // No table entry exists yet, so we need to allocate a page table
209 // for the next level.
210 //
211 TranslationTable = AllocatePages (1);
212 if (TranslationTable == NULL) {
213 return EFI_OUT_OF_RESOURCES;
214 }
215
216 if (!ArmMmuEnabled ()) {
217 //
218 // Make sure we are not inadvertently hitting in the caches
219 // when populating the page tables.
220 //
221 InvalidateDataCacheRange (TranslationTable, EFI_PAGE_SIZE);
222 }
223
224 if ((*Entry & TT_TYPE_MASK) == TT_TYPE_BLOCK_ENTRY) {
225 //
226 // We are splitting an existing block entry, so we have to populate
227 // the new table with the attributes of the block entry it replaces.
228 //
229 Status = UpdateRegionMappingRecursive (RegionStart & ~BlockMask,
230 (RegionStart | BlockMask) + 1, *Entry & TT_ATTRIBUTES_MASK,
231 0, TranslationTable, Level + 1);
232 if (EFI_ERROR (Status)) {
233 //
234 // The range we passed to UpdateRegionMappingRecursive () is block
235 // aligned, so it is guaranteed that no further pages were allocated
236 // by it, and so we only have to free the page we allocated here.
237 //
238 FreePages (TranslationTable, 1);
239 return Status;
240 }
241 } else {
242 ZeroMem (TranslationTable, EFI_PAGE_SIZE);
243 }
244 } else {
245 TranslationTable = (VOID *)(UINTN)(*Entry & TT_ADDRESS_MASK_BLOCK_ENTRY);
246 }
247
248 //
249 // Recurse to the next level
250 //
251 Status = UpdateRegionMappingRecursive (RegionStart, BlockEnd,
252 AttributeSetMask, AttributeClearMask, TranslationTable,
253 Level + 1);
254 if (EFI_ERROR (Status)) {
255 if ((*Entry & TT_TYPE_MASK) != TT_TYPE_TABLE_ENTRY) {
256 //
257 // We are creating a new table entry, so on failure, we can free all
258 // allocations we made recursively, given that the whole subhierarchy
259 // has not been wired into the live page tables yet. (This is not
260 // possible for existing table entries, since we cannot revert the
261 // modifications we made to the subhierarchy it represents.)
262 //
263 FreePageTablesRecursive (TranslationTable, Level + 1);
264 }
265 return Status;
266 }
267
268 if ((*Entry & TT_TYPE_MASK) != TT_TYPE_TABLE_ENTRY) {
269 EntryValue = (UINTN)TranslationTable | TT_TYPE_TABLE_ENTRY;
270 ReplaceTableEntry (Entry, EntryValue, RegionStart,
271 (*Entry & TT_TYPE_MASK) == TT_TYPE_BLOCK_ENTRY);
272 }
273 } else {
274 EntryValue = (*Entry & AttributeClearMask) | AttributeSetMask;
275 EntryValue |= RegionStart;
276 EntryValue |= (Level == 3) ? TT_TYPE_BLOCK_ENTRY_LEVEL3
277 : TT_TYPE_BLOCK_ENTRY;
278
279 ReplaceTableEntry (Entry, EntryValue, RegionStart, FALSE);
280 }
281 }
282 return EFI_SUCCESS;
283 }
284
285 STATIC
286 VOID
287 LookupAddresstoRootTable (
288 IN UINT64 MaxAddress,
289 OUT UINTN *T0SZ,
290 OUT UINTN *TableEntryCount
291 )
292 {
293 UINTN TopBit;
294
295 // Check the parameters are not NULL
296 ASSERT ((T0SZ != NULL) && (TableEntryCount != NULL));
297
298 // Look for the highest bit set in MaxAddress
299 for (TopBit = 63; TopBit != 0; TopBit--) {
300 if ((1ULL << TopBit) & MaxAddress) {
301 // MaxAddress top bit is found
302 TopBit = TopBit + 1;
303 break;
304 }
305 }
306 ASSERT (TopBit != 0);
307
308 // Calculate T0SZ from the top bit of the MaxAddress
309 *T0SZ = 64 - TopBit;
310
311 // Get the Table info from T0SZ
312 GetRootTranslationTableInfo (*T0SZ, NULL, TableEntryCount);
313 }
314
315 STATIC
316 EFI_STATUS
317 UpdateRegionMapping (
318 IN UINT64 RegionStart,
319 IN UINT64 RegionLength,
320 IN UINT64 AttributeSetMask,
321 IN UINT64 AttributeClearMask
322 )
323 {
324 UINTN RootTableLevel;
325 UINTN T0SZ;
326
327 if (((RegionStart | RegionLength) & EFI_PAGE_MASK)) {
328 return EFI_INVALID_PARAMETER;
329 }
330
331 T0SZ = ArmGetTCR () & TCR_T0SZ_MASK;
332 GetRootTranslationTableInfo (T0SZ, &RootTableLevel, NULL);
333
334 return UpdateRegionMappingRecursive (RegionStart, RegionStart + RegionLength,
335 AttributeSetMask, AttributeClearMask, ArmGetTTBR0BaseAddress (),
336 RootTableLevel);
337 }
338
339 STATIC
340 EFI_STATUS
341 FillTranslationTable (
342 IN UINT64 *RootTable,
343 IN ARM_MEMORY_REGION_DESCRIPTOR *MemoryRegion
344 )
345 {
346 return UpdateRegionMapping (
347 MemoryRegion->VirtualBase,
348 MemoryRegion->Length,
349 ArmMemoryAttributeToPageAttribute (MemoryRegion->Attributes) | TT_AF,
350 0
351 );
352 }
353
354 STATIC
355 UINT64
356 GcdAttributeToPageAttribute (
357 IN UINT64 GcdAttributes
358 )
359 {
360 UINT64 PageAttributes;
361
362 switch (GcdAttributes & EFI_MEMORY_CACHETYPE_MASK) {
363 case EFI_MEMORY_UC:
364 PageAttributes = TT_ATTR_INDX_DEVICE_MEMORY;
365 break;
366 case EFI_MEMORY_WC:
367 PageAttributes = TT_ATTR_INDX_MEMORY_NON_CACHEABLE;
368 break;
369 case EFI_MEMORY_WT:
370 PageAttributes = TT_ATTR_INDX_MEMORY_WRITE_THROUGH | TT_SH_INNER_SHAREABLE;
371 break;
372 case EFI_MEMORY_WB:
373 PageAttributes = TT_ATTR_INDX_MEMORY_WRITE_BACK | TT_SH_INNER_SHAREABLE;
374 break;
375 default:
376 PageAttributes = TT_ATTR_INDX_MASK;
377 break;
378 }
379
380 if ((GcdAttributes & EFI_MEMORY_XP) != 0 ||
381 (GcdAttributes & EFI_MEMORY_CACHETYPE_MASK) == EFI_MEMORY_UC) {
382 if (ArmReadCurrentEL () == AARCH64_EL2) {
383 PageAttributes |= TT_XN_MASK;
384 } else {
385 PageAttributes |= TT_UXN_MASK | TT_PXN_MASK;
386 }
387 }
388
389 if ((GcdAttributes & EFI_MEMORY_RO) != 0) {
390 PageAttributes |= TT_AP_RO_RO;
391 }
392
393 return PageAttributes | TT_AF;
394 }
395
396 EFI_STATUS
397 ArmSetMemoryAttributes (
398 IN EFI_PHYSICAL_ADDRESS BaseAddress,
399 IN UINT64 Length,
400 IN UINT64 Attributes
401 )
402 {
403 UINT64 PageAttributes;
404 UINT64 PageAttributeMask;
405
406 PageAttributes = GcdAttributeToPageAttribute (Attributes);
407 PageAttributeMask = 0;
408
409 if ((Attributes & EFI_MEMORY_CACHETYPE_MASK) == 0) {
410 //
411 // No memory type was set in Attributes, so we are going to update the
412 // permissions only.
413 //
414 PageAttributes &= TT_AP_MASK | TT_UXN_MASK | TT_PXN_MASK;
415 PageAttributeMask = ~(TT_ADDRESS_MASK_BLOCK_ENTRY | TT_AP_MASK |
416 TT_PXN_MASK | TT_XN_MASK);
417 }
418
419 return UpdateRegionMapping (BaseAddress, Length, PageAttributes,
420 PageAttributeMask);
421 }
422
423 STATIC
424 EFI_STATUS
425 SetMemoryRegionAttribute (
426 IN EFI_PHYSICAL_ADDRESS BaseAddress,
427 IN UINT64 Length,
428 IN UINT64 Attributes,
429 IN UINT64 BlockEntryMask
430 )
431 {
432 return UpdateRegionMapping (BaseAddress, Length, Attributes, BlockEntryMask);
433 }
434
435 EFI_STATUS
436 ArmSetMemoryRegionNoExec (
437 IN EFI_PHYSICAL_ADDRESS BaseAddress,
438 IN UINT64 Length
439 )
440 {
441 UINT64 Val;
442
443 if (ArmReadCurrentEL () == AARCH64_EL1) {
444 Val = TT_PXN_MASK | TT_UXN_MASK;
445 } else {
446 Val = TT_XN_MASK;
447 }
448
449 return SetMemoryRegionAttribute (
450 BaseAddress,
451 Length,
452 Val,
453 ~TT_ADDRESS_MASK_BLOCK_ENTRY);
454 }
455
456 EFI_STATUS
457 ArmClearMemoryRegionNoExec (
458 IN EFI_PHYSICAL_ADDRESS BaseAddress,
459 IN UINT64 Length
460 )
461 {
462 UINT64 Mask;
463
464 // XN maps to UXN in the EL1&0 translation regime
465 Mask = ~(TT_ADDRESS_MASK_BLOCK_ENTRY | TT_PXN_MASK | TT_XN_MASK);
466
467 return SetMemoryRegionAttribute (
468 BaseAddress,
469 Length,
470 0,
471 Mask);
472 }
473
474 EFI_STATUS
475 ArmSetMemoryRegionReadOnly (
476 IN EFI_PHYSICAL_ADDRESS BaseAddress,
477 IN UINT64 Length
478 )
479 {
480 return SetMemoryRegionAttribute (
481 BaseAddress,
482 Length,
483 TT_AP_RO_RO,
484 ~TT_ADDRESS_MASK_BLOCK_ENTRY);
485 }
486
487 EFI_STATUS
488 ArmClearMemoryRegionReadOnly (
489 IN EFI_PHYSICAL_ADDRESS BaseAddress,
490 IN UINT64 Length
491 )
492 {
493 return SetMemoryRegionAttribute (
494 BaseAddress,
495 Length,
496 TT_AP_RW_RW,
497 ~(TT_ADDRESS_MASK_BLOCK_ENTRY | TT_AP_MASK));
498 }
499
500 EFI_STATUS
501 EFIAPI
502 ArmConfigureMmu (
503 IN ARM_MEMORY_REGION_DESCRIPTOR *MemoryTable,
504 OUT VOID **TranslationTableBase OPTIONAL,
505 OUT UINTN *TranslationTableSize OPTIONAL
506 )
507 {
508 VOID* TranslationTable;
509 UINT64 MaxAddress;
510 UINTN T0SZ;
511 UINTN RootTableEntryCount;
512 UINT64 TCR;
513 EFI_STATUS Status;
514
515 if (MemoryTable == NULL) {
516 ASSERT (MemoryTable != NULL);
517 return EFI_INVALID_PARAMETER;
518 }
519
520 //
521 // Limit the virtual address space to what we can actually use: UEFI
522 // mandates a 1:1 mapping, so no point in making the virtual address
523 // space larger than the physical address space. We also have to take
524 // into account the architectural limitations that result from UEFI's
525 // use of 4 KB pages.
526 //
527 MaxAddress = MIN (LShiftU64 (1ULL, ArmGetPhysicalAddressBits ()) - 1,
528 MAX_ALLOC_ADDRESS);
529
530 // Lookup the Table Level to get the information
531 LookupAddresstoRootTable (MaxAddress, &T0SZ, &RootTableEntryCount);
532
533 //
534 // Set TCR that allows us to retrieve T0SZ in the subsequent functions
535 //
536 // Ideally we will be running at EL2, but should support EL1 as well.
537 // UEFI should not run at EL3.
538 if (ArmReadCurrentEL () == AARCH64_EL2) {
539 //Note: Bits 23 and 31 are reserved(RES1) bits in TCR_EL2
540 TCR = T0SZ | (1UL << 31) | (1UL << 23) | TCR_TG0_4KB;
541
542 // Set the Physical Address Size using MaxAddress
543 if (MaxAddress < SIZE_4GB) {
544 TCR |= TCR_PS_4GB;
545 } else if (MaxAddress < SIZE_64GB) {
546 TCR |= TCR_PS_64GB;
547 } else if (MaxAddress < SIZE_1TB) {
548 TCR |= TCR_PS_1TB;
549 } else if (MaxAddress < SIZE_4TB) {
550 TCR |= TCR_PS_4TB;
551 } else if (MaxAddress < SIZE_16TB) {
552 TCR |= TCR_PS_16TB;
553 } else if (MaxAddress < SIZE_256TB) {
554 TCR |= TCR_PS_256TB;
555 } else {
556 DEBUG ((DEBUG_ERROR,
557 "ArmConfigureMmu: The MaxAddress 0x%lX is not supported by this MMU configuration.\n",
558 MaxAddress));
559 ASSERT (0); // Bigger than 48-bit memory space are not supported
560 return EFI_UNSUPPORTED;
561 }
562 } else if (ArmReadCurrentEL () == AARCH64_EL1) {
563 // Due to Cortex-A57 erratum #822227 we must set TG1[1] == 1, regardless of EPD1.
564 TCR = T0SZ | TCR_TG0_4KB | TCR_TG1_4KB | TCR_EPD1;
565
566 // Set the Physical Address Size using MaxAddress
567 if (MaxAddress < SIZE_4GB) {
568 TCR |= TCR_IPS_4GB;
569 } else if (MaxAddress < SIZE_64GB) {
570 TCR |= TCR_IPS_64GB;
571 } else if (MaxAddress < SIZE_1TB) {
572 TCR |= TCR_IPS_1TB;
573 } else if (MaxAddress < SIZE_4TB) {
574 TCR |= TCR_IPS_4TB;
575 } else if (MaxAddress < SIZE_16TB) {
576 TCR |= TCR_IPS_16TB;
577 } else if (MaxAddress < SIZE_256TB) {
578 TCR |= TCR_IPS_256TB;
579 } else {
580 DEBUG ((DEBUG_ERROR,
581 "ArmConfigureMmu: The MaxAddress 0x%lX is not supported by this MMU configuration.\n",
582 MaxAddress));
583 ASSERT (0); // Bigger than 48-bit memory space are not supported
584 return EFI_UNSUPPORTED;
585 }
586 } else {
587 ASSERT (0); // UEFI is only expected to run at EL2 and EL1, not EL3.
588 return EFI_UNSUPPORTED;
589 }
590
591 //
592 // Translation table walks are always cache coherent on ARMv8-A, so cache
593 // maintenance on page tables is never needed. Since there is a risk of
594 // loss of coherency when using mismatched attributes, and given that memory
595 // is mapped cacheable except for extraordinary cases (such as non-coherent
596 // DMA), have the page table walker perform cached accesses as well, and
597 // assert below that that matches the attributes we use for CPU accesses to
598 // the region.
599 //
600 TCR |= TCR_SH_INNER_SHAREABLE |
601 TCR_RGN_OUTER_WRITE_BACK_ALLOC |
602 TCR_RGN_INNER_WRITE_BACK_ALLOC;
603
604 // Set TCR
605 ArmSetTCR (TCR);
606
607 // Allocate pages for translation table
608 TranslationTable = AllocatePages (1);
609 if (TranslationTable == NULL) {
610 return EFI_OUT_OF_RESOURCES;
611 }
612 //
613 // We set TTBR0 just after allocating the table to retrieve its location from
614 // the subsequent functions without needing to pass this value across the
615 // functions. The MMU is only enabled after the translation tables are
616 // populated.
617 //
618 ArmSetTTBR0 (TranslationTable);
619
620 if (TranslationTableBase != NULL) {
621 *TranslationTableBase = TranslationTable;
622 }
623
624 if (TranslationTableSize != NULL) {
625 *TranslationTableSize = RootTableEntryCount * sizeof (UINT64);
626 }
627
628 //
629 // Make sure we are not inadvertently hitting in the caches
630 // when populating the page tables.
631 //
632 InvalidateDataCacheRange (TranslationTable,
633 RootTableEntryCount * sizeof (UINT64));
634 ZeroMem (TranslationTable, RootTableEntryCount * sizeof (UINT64));
635
636 while (MemoryTable->Length != 0) {
637 Status = FillTranslationTable (TranslationTable, MemoryTable);
638 if (EFI_ERROR (Status)) {
639 goto FreeTranslationTable;
640 }
641 MemoryTable++;
642 }
643
644 //
645 // EFI_MEMORY_UC ==> MAIR_ATTR_DEVICE_MEMORY
646 // EFI_MEMORY_WC ==> MAIR_ATTR_NORMAL_MEMORY_NON_CACHEABLE
647 // EFI_MEMORY_WT ==> MAIR_ATTR_NORMAL_MEMORY_WRITE_THROUGH
648 // EFI_MEMORY_WB ==> MAIR_ATTR_NORMAL_MEMORY_WRITE_BACK
649 //
650 ArmSetMAIR (
651 MAIR_ATTR (TT_ATTR_INDX_DEVICE_MEMORY, MAIR_ATTR_DEVICE_MEMORY) |
652 MAIR_ATTR (TT_ATTR_INDX_MEMORY_NON_CACHEABLE, MAIR_ATTR_NORMAL_MEMORY_NON_CACHEABLE) |
653 MAIR_ATTR (TT_ATTR_INDX_MEMORY_WRITE_THROUGH, MAIR_ATTR_NORMAL_MEMORY_WRITE_THROUGH) |
654 MAIR_ATTR (TT_ATTR_INDX_MEMORY_WRITE_BACK, MAIR_ATTR_NORMAL_MEMORY_WRITE_BACK)
655 );
656
657 ArmDisableAlignmentCheck ();
658 ArmEnableStackAlignmentCheck ();
659 ArmEnableInstructionCache ();
660 ArmEnableDataCache ();
661
662 ArmEnableMmu ();
663 return EFI_SUCCESS;
664
665 FreeTranslationTable:
666 FreePages (TranslationTable, 1);
667 return Status;
668 }
669
670 RETURN_STATUS
671 EFIAPI
672 ArmMmuBaseLibConstructor (
673 VOID
674 )
675 {
676 extern UINT32 ArmReplaceLiveTranslationEntrySize;
677
678 //
679 // The ArmReplaceLiveTranslationEntry () helper function may be invoked
680 // with the MMU off so we have to ensure that it gets cleaned to the PoC
681 //
682 WriteBackDataCacheRange (ArmReplaceLiveTranslationEntry,
683 ArmReplaceLiveTranslationEntrySize);
684
685 return RETURN_SUCCESS;
686 }