]> git.proxmox.com Git - mirror_edk2.git/blob - ArmPkg/Library/ArmMmuLib/AArch64/ArmMmuLibCore.c
ArmPkg/CpuDxe: move PageAttributeToGcdAttribute() out of ArmMmuLib
[mirror_edk2.git] / ArmPkg / Library / ArmMmuLib / AArch64 / ArmMmuLibCore.c
1 /** @file
2 * File managing the MMU for ARMv8 architecture
3 *
4 * Copyright (c) 2011-2020, ARM Limited. All rights reserved.
5 * Copyright (c) 2016, Linaro Limited. All rights reserved.
6 * Copyright (c) 2017, Intel Corporation. All rights reserved.<BR>
7 *
8 * SPDX-License-Identifier: BSD-2-Clause-Patent
9 *
10 **/
11
12 #include <Uefi.h>
13 #include <Chipset/AArch64.h>
14 #include <Library/BaseMemoryLib.h>
15 #include <Library/CacheMaintenanceLib.h>
16 #include <Library/MemoryAllocationLib.h>
17 #include <Library/ArmLib.h>
18 #include <Library/ArmMmuLib.h>
19 #include <Library/BaseLib.h>
20 #include <Library/DebugLib.h>
21
22 // We use this index definition to define an invalid block entry
23 #define TT_ATTR_INDX_INVALID ((UINT32)~0)
24
25 STATIC
26 UINT64
27 ArmMemoryAttributeToPageAttribute (
28 IN ARM_MEMORY_REGION_ATTRIBUTES Attributes
29 )
30 {
31 switch (Attributes) {
32 case ARM_MEMORY_REGION_ATTRIBUTE_WRITE_BACK_NONSHAREABLE:
33 case ARM_MEMORY_REGION_ATTRIBUTE_NONSECURE_WRITE_BACK_NONSHAREABLE:
34 return TT_ATTR_INDX_MEMORY_WRITE_BACK;
35
36 case ARM_MEMORY_REGION_ATTRIBUTE_WRITE_BACK:
37 case ARM_MEMORY_REGION_ATTRIBUTE_NONSECURE_WRITE_BACK:
38 return TT_ATTR_INDX_MEMORY_WRITE_BACK | TT_SH_INNER_SHAREABLE;
39
40 case ARM_MEMORY_REGION_ATTRIBUTE_WRITE_THROUGH:
41 case ARM_MEMORY_REGION_ATTRIBUTE_NONSECURE_WRITE_THROUGH:
42 return TT_ATTR_INDX_MEMORY_WRITE_THROUGH | TT_SH_INNER_SHAREABLE;
43
44 // Uncached and device mappings are treated as outer shareable by default,
45 case ARM_MEMORY_REGION_ATTRIBUTE_UNCACHED_UNBUFFERED:
46 case ARM_MEMORY_REGION_ATTRIBUTE_NONSECURE_UNCACHED_UNBUFFERED:
47 return TT_ATTR_INDX_MEMORY_NON_CACHEABLE;
48
49 default:
50 ASSERT (0);
51 case ARM_MEMORY_REGION_ATTRIBUTE_DEVICE:
52 case ARM_MEMORY_REGION_ATTRIBUTE_NONSECURE_DEVICE:
53 if (ArmReadCurrentEL () == AARCH64_EL2)
54 return TT_ATTR_INDX_DEVICE_MEMORY | TT_XN_MASK;
55 else
56 return TT_ATTR_INDX_DEVICE_MEMORY | TT_UXN_MASK | TT_PXN_MASK;
57 }
58 }
59
60 #define MIN_T0SZ 16
61 #define BITS_PER_LEVEL 9
62
63 VOID
64 GetRootTranslationTableInfo (
65 IN UINTN T0SZ,
66 OUT UINTN *TableLevel,
67 OUT UINTN *TableEntryCount
68 )
69 {
70 // Get the level of the root table
71 if (TableLevel) {
72 *TableLevel = (T0SZ - MIN_T0SZ) / BITS_PER_LEVEL;
73 }
74
75 if (TableEntryCount) {
76 *TableEntryCount = 1UL << (BITS_PER_LEVEL - (T0SZ - MIN_T0SZ) % BITS_PER_LEVEL);
77 }
78 }
79
80 STATIC
81 VOID
82 ReplaceTableEntry (
83 IN UINT64 *Entry,
84 IN UINT64 Value,
85 IN UINT64 RegionStart,
86 IN BOOLEAN IsLiveBlockMapping
87 )
88 {
89 if (!ArmMmuEnabled () || !IsLiveBlockMapping) {
90 *Entry = Value;
91 ArmUpdateTranslationTableEntry (Entry, (VOID *)(UINTN)RegionStart);
92 } else {
93 ArmReplaceLiveTranslationEntry (Entry, Value, RegionStart);
94 }
95 }
96
97 STATIC
98 VOID
99 FreePageTablesRecursive (
100 IN UINT64 *TranslationTable,
101 IN UINTN Level
102 )
103 {
104 UINTN Index;
105
106 ASSERT (Level <= 3);
107
108 if (Level < 3) {
109 for (Index = 0; Index < TT_ENTRY_COUNT; Index++) {
110 if ((TranslationTable[Index] & TT_TYPE_MASK) == TT_TYPE_TABLE_ENTRY) {
111 FreePageTablesRecursive ((VOID *)(UINTN)(TranslationTable[Index] &
112 TT_ADDRESS_MASK_BLOCK_ENTRY),
113 Level + 1);
114 }
115 }
116 }
117 FreePages (TranslationTable, 1);
118 }
119
120 STATIC
121 BOOLEAN
122 IsBlockEntry (
123 IN UINT64 Entry,
124 IN UINTN Level
125 )
126 {
127 if (Level == 3) {
128 return (Entry & TT_TYPE_MASK) == TT_TYPE_BLOCK_ENTRY_LEVEL3;
129 }
130 return (Entry & TT_TYPE_MASK) == TT_TYPE_BLOCK_ENTRY;
131 }
132
133 STATIC
134 BOOLEAN
135 IsTableEntry (
136 IN UINT64 Entry,
137 IN UINTN Level
138 )
139 {
140 if (Level == 3) {
141 //
142 // TT_TYPE_TABLE_ENTRY aliases TT_TYPE_BLOCK_ENTRY_LEVEL3
143 // so we need to take the level into account as well.
144 //
145 return FALSE;
146 }
147 return (Entry & TT_TYPE_MASK) == TT_TYPE_TABLE_ENTRY;
148 }
149
150 STATIC
151 EFI_STATUS
152 UpdateRegionMappingRecursive (
153 IN UINT64 RegionStart,
154 IN UINT64 RegionEnd,
155 IN UINT64 AttributeSetMask,
156 IN UINT64 AttributeClearMask,
157 IN UINT64 *PageTable,
158 IN UINTN Level
159 )
160 {
161 UINTN BlockShift;
162 UINT64 BlockMask;
163 UINT64 BlockEnd;
164 UINT64 *Entry;
165 UINT64 EntryValue;
166 VOID *TranslationTable;
167 EFI_STATUS Status;
168
169 ASSERT (((RegionStart | RegionEnd) & EFI_PAGE_MASK) == 0);
170
171 BlockShift = (Level + 1) * BITS_PER_LEVEL + MIN_T0SZ;
172 BlockMask = MAX_UINT64 >> BlockShift;
173
174 DEBUG ((DEBUG_VERBOSE, "%a(%d): %llx - %llx set %lx clr %lx\n", __FUNCTION__,
175 Level, RegionStart, RegionEnd, AttributeSetMask, AttributeClearMask));
176
177 for (; RegionStart < RegionEnd; RegionStart = BlockEnd) {
178 BlockEnd = MIN (RegionEnd, (RegionStart | BlockMask) + 1);
179 Entry = &PageTable[(RegionStart >> (64 - BlockShift)) & (TT_ENTRY_COUNT - 1)];
180
181 //
182 // If RegionStart or BlockEnd is not aligned to the block size at this
183 // level, we will have to create a table mapping in order to map less
184 // than a block, and recurse to create the block or page entries at
185 // the next level. No block mappings are allowed at all at level 0,
186 // so in that case, we have to recurse unconditionally.
187 // If we are changing a table entry and the AttributeClearMask is non-zero,
188 // we cannot replace it with a block entry without potentially losing
189 // attribute information, so keep the table entry in that case.
190 //
191 if (Level == 0 || ((RegionStart | BlockEnd) & BlockMask) != 0 ||
192 (IsTableEntry (*Entry, Level) && AttributeClearMask != 0)) {
193 ASSERT (Level < 3);
194
195 if (!IsTableEntry (*Entry, Level)) {
196 //
197 // No table entry exists yet, so we need to allocate a page table
198 // for the next level.
199 //
200 TranslationTable = AllocatePages (1);
201 if (TranslationTable == NULL) {
202 return EFI_OUT_OF_RESOURCES;
203 }
204
205 if (!ArmMmuEnabled ()) {
206 //
207 // Make sure we are not inadvertently hitting in the caches
208 // when populating the page tables.
209 //
210 InvalidateDataCacheRange (TranslationTable, EFI_PAGE_SIZE);
211 }
212
213 ZeroMem (TranslationTable, EFI_PAGE_SIZE);
214
215 if (IsBlockEntry (*Entry, Level)) {
216 //
217 // We are splitting an existing block entry, so we have to populate
218 // the new table with the attributes of the block entry it replaces.
219 //
220 Status = UpdateRegionMappingRecursive (RegionStart & ~BlockMask,
221 (RegionStart | BlockMask) + 1, *Entry & TT_ATTRIBUTES_MASK,
222 0, TranslationTable, Level + 1);
223 if (EFI_ERROR (Status)) {
224 //
225 // The range we passed to UpdateRegionMappingRecursive () is block
226 // aligned, so it is guaranteed that no further pages were allocated
227 // by it, and so we only have to free the page we allocated here.
228 //
229 FreePages (TranslationTable, 1);
230 return Status;
231 }
232 }
233 } else {
234 TranslationTable = (VOID *)(UINTN)(*Entry & TT_ADDRESS_MASK_BLOCK_ENTRY);
235 }
236
237 //
238 // Recurse to the next level
239 //
240 Status = UpdateRegionMappingRecursive (RegionStart, BlockEnd,
241 AttributeSetMask, AttributeClearMask, TranslationTable,
242 Level + 1);
243 if (EFI_ERROR (Status)) {
244 if (!IsTableEntry (*Entry, Level)) {
245 //
246 // We are creating a new table entry, so on failure, we can free all
247 // allocations we made recursively, given that the whole subhierarchy
248 // has not been wired into the live page tables yet. (This is not
249 // possible for existing table entries, since we cannot revert the
250 // modifications we made to the subhierarchy it represents.)
251 //
252 FreePageTablesRecursive (TranslationTable, Level + 1);
253 }
254 return Status;
255 }
256
257 if (!IsTableEntry (*Entry, Level)) {
258 EntryValue = (UINTN)TranslationTable | TT_TYPE_TABLE_ENTRY;
259 ReplaceTableEntry (Entry, EntryValue, RegionStart,
260 IsBlockEntry (*Entry, Level));
261 }
262 } else {
263 EntryValue = (*Entry & AttributeClearMask) | AttributeSetMask;
264 EntryValue |= RegionStart;
265 EntryValue |= (Level == 3) ? TT_TYPE_BLOCK_ENTRY_LEVEL3
266 : TT_TYPE_BLOCK_ENTRY;
267
268 if (IsTableEntry (*Entry, Level)) {
269 //
270 // We are replacing a table entry with a block entry. This is only
271 // possible if we are keeping none of the original attributes.
272 // We can free the table entry's page table, and all the ones below
273 // it, since we are dropping the only possible reference to it.
274 //
275 ASSERT (AttributeClearMask == 0);
276 TranslationTable = (VOID *)(UINTN)(*Entry & TT_ADDRESS_MASK_BLOCK_ENTRY);
277 ReplaceTableEntry (Entry, EntryValue, RegionStart, TRUE);
278 FreePageTablesRecursive (TranslationTable, Level + 1);
279 } else {
280 ReplaceTableEntry (Entry, EntryValue, RegionStart, FALSE);
281 }
282 }
283 }
284 return EFI_SUCCESS;
285 }
286
287 STATIC
288 VOID
289 LookupAddresstoRootTable (
290 IN UINT64 MaxAddress,
291 OUT UINTN *T0SZ,
292 OUT UINTN *TableEntryCount
293 )
294 {
295 UINTN TopBit;
296
297 // Check the parameters are not NULL
298 ASSERT ((T0SZ != NULL) && (TableEntryCount != NULL));
299
300 // Look for the highest bit set in MaxAddress
301 for (TopBit = 63; TopBit != 0; TopBit--) {
302 if ((1ULL << TopBit) & MaxAddress) {
303 // MaxAddress top bit is found
304 TopBit = TopBit + 1;
305 break;
306 }
307 }
308 ASSERT (TopBit != 0);
309
310 // Calculate T0SZ from the top bit of the MaxAddress
311 *T0SZ = 64 - TopBit;
312
313 // Get the Table info from T0SZ
314 GetRootTranslationTableInfo (*T0SZ, NULL, TableEntryCount);
315 }
316
317 STATIC
318 EFI_STATUS
319 UpdateRegionMapping (
320 IN UINT64 RegionStart,
321 IN UINT64 RegionLength,
322 IN UINT64 AttributeSetMask,
323 IN UINT64 AttributeClearMask
324 )
325 {
326 UINTN RootTableLevel;
327 UINTN T0SZ;
328
329 if (((RegionStart | RegionLength) & EFI_PAGE_MASK)) {
330 return EFI_INVALID_PARAMETER;
331 }
332
333 T0SZ = ArmGetTCR () & TCR_T0SZ_MASK;
334 GetRootTranslationTableInfo (T0SZ, &RootTableLevel, NULL);
335
336 return UpdateRegionMappingRecursive (RegionStart, RegionStart + RegionLength,
337 AttributeSetMask, AttributeClearMask, ArmGetTTBR0BaseAddress (),
338 RootTableLevel);
339 }
340
341 STATIC
342 EFI_STATUS
343 FillTranslationTable (
344 IN UINT64 *RootTable,
345 IN ARM_MEMORY_REGION_DESCRIPTOR *MemoryRegion
346 )
347 {
348 return UpdateRegionMapping (
349 MemoryRegion->VirtualBase,
350 MemoryRegion->Length,
351 ArmMemoryAttributeToPageAttribute (MemoryRegion->Attributes) | TT_AF,
352 0
353 );
354 }
355
356 STATIC
357 UINT64
358 GcdAttributeToPageAttribute (
359 IN UINT64 GcdAttributes
360 )
361 {
362 UINT64 PageAttributes;
363
364 switch (GcdAttributes & EFI_MEMORY_CACHETYPE_MASK) {
365 case EFI_MEMORY_UC:
366 PageAttributes = TT_ATTR_INDX_DEVICE_MEMORY;
367 break;
368 case EFI_MEMORY_WC:
369 PageAttributes = TT_ATTR_INDX_MEMORY_NON_CACHEABLE;
370 break;
371 case EFI_MEMORY_WT:
372 PageAttributes = TT_ATTR_INDX_MEMORY_WRITE_THROUGH | TT_SH_INNER_SHAREABLE;
373 break;
374 case EFI_MEMORY_WB:
375 PageAttributes = TT_ATTR_INDX_MEMORY_WRITE_BACK | TT_SH_INNER_SHAREABLE;
376 break;
377 default:
378 PageAttributes = TT_ATTR_INDX_MASK;
379 break;
380 }
381
382 if ((GcdAttributes & EFI_MEMORY_XP) != 0 ||
383 (GcdAttributes & EFI_MEMORY_CACHETYPE_MASK) == EFI_MEMORY_UC) {
384 if (ArmReadCurrentEL () == AARCH64_EL2) {
385 PageAttributes |= TT_XN_MASK;
386 } else {
387 PageAttributes |= TT_UXN_MASK | TT_PXN_MASK;
388 }
389 }
390
391 if ((GcdAttributes & EFI_MEMORY_RO) != 0) {
392 PageAttributes |= TT_AP_RO_RO;
393 }
394
395 return PageAttributes | TT_AF;
396 }
397
398 EFI_STATUS
399 ArmSetMemoryAttributes (
400 IN EFI_PHYSICAL_ADDRESS BaseAddress,
401 IN UINT64 Length,
402 IN UINT64 Attributes
403 )
404 {
405 UINT64 PageAttributes;
406 UINT64 PageAttributeMask;
407
408 PageAttributes = GcdAttributeToPageAttribute (Attributes);
409 PageAttributeMask = 0;
410
411 if ((Attributes & EFI_MEMORY_CACHETYPE_MASK) == 0) {
412 //
413 // No memory type was set in Attributes, so we are going to update the
414 // permissions only.
415 //
416 PageAttributes &= TT_AP_MASK | TT_UXN_MASK | TT_PXN_MASK;
417 PageAttributeMask = ~(TT_ADDRESS_MASK_BLOCK_ENTRY | TT_AP_MASK |
418 TT_PXN_MASK | TT_XN_MASK);
419 }
420
421 return UpdateRegionMapping (BaseAddress, Length, PageAttributes,
422 PageAttributeMask);
423 }
424
425 STATIC
426 EFI_STATUS
427 SetMemoryRegionAttribute (
428 IN EFI_PHYSICAL_ADDRESS BaseAddress,
429 IN UINT64 Length,
430 IN UINT64 Attributes,
431 IN UINT64 BlockEntryMask
432 )
433 {
434 return UpdateRegionMapping (BaseAddress, Length, Attributes, BlockEntryMask);
435 }
436
437 EFI_STATUS
438 ArmSetMemoryRegionNoExec (
439 IN EFI_PHYSICAL_ADDRESS BaseAddress,
440 IN UINT64 Length
441 )
442 {
443 UINT64 Val;
444
445 if (ArmReadCurrentEL () == AARCH64_EL1) {
446 Val = TT_PXN_MASK | TT_UXN_MASK;
447 } else {
448 Val = TT_XN_MASK;
449 }
450
451 return SetMemoryRegionAttribute (
452 BaseAddress,
453 Length,
454 Val,
455 ~TT_ADDRESS_MASK_BLOCK_ENTRY);
456 }
457
458 EFI_STATUS
459 ArmClearMemoryRegionNoExec (
460 IN EFI_PHYSICAL_ADDRESS BaseAddress,
461 IN UINT64 Length
462 )
463 {
464 UINT64 Mask;
465
466 // XN maps to UXN in the EL1&0 translation regime
467 Mask = ~(TT_ADDRESS_MASK_BLOCK_ENTRY | TT_PXN_MASK | TT_XN_MASK);
468
469 return SetMemoryRegionAttribute (
470 BaseAddress,
471 Length,
472 0,
473 Mask);
474 }
475
476 EFI_STATUS
477 ArmSetMemoryRegionReadOnly (
478 IN EFI_PHYSICAL_ADDRESS BaseAddress,
479 IN UINT64 Length
480 )
481 {
482 return SetMemoryRegionAttribute (
483 BaseAddress,
484 Length,
485 TT_AP_RO_RO,
486 ~TT_ADDRESS_MASK_BLOCK_ENTRY);
487 }
488
489 EFI_STATUS
490 ArmClearMemoryRegionReadOnly (
491 IN EFI_PHYSICAL_ADDRESS BaseAddress,
492 IN UINT64 Length
493 )
494 {
495 return SetMemoryRegionAttribute (
496 BaseAddress,
497 Length,
498 TT_AP_RW_RW,
499 ~(TT_ADDRESS_MASK_BLOCK_ENTRY | TT_AP_MASK));
500 }
501
502 EFI_STATUS
503 EFIAPI
504 ArmConfigureMmu (
505 IN ARM_MEMORY_REGION_DESCRIPTOR *MemoryTable,
506 OUT VOID **TranslationTableBase OPTIONAL,
507 OUT UINTN *TranslationTableSize OPTIONAL
508 )
509 {
510 VOID* TranslationTable;
511 UINT64 MaxAddress;
512 UINTN T0SZ;
513 UINTN RootTableEntryCount;
514 UINT64 TCR;
515 EFI_STATUS Status;
516
517 if (MemoryTable == NULL) {
518 ASSERT (MemoryTable != NULL);
519 return EFI_INVALID_PARAMETER;
520 }
521
522 //
523 // Limit the virtual address space to what we can actually use: UEFI
524 // mandates a 1:1 mapping, so no point in making the virtual address
525 // space larger than the physical address space. We also have to take
526 // into account the architectural limitations that result from UEFI's
527 // use of 4 KB pages.
528 //
529 MaxAddress = MIN (LShiftU64 (1ULL, ArmGetPhysicalAddressBits ()) - 1,
530 MAX_ALLOC_ADDRESS);
531
532 // Lookup the Table Level to get the information
533 LookupAddresstoRootTable (MaxAddress, &T0SZ, &RootTableEntryCount);
534
535 //
536 // Set TCR that allows us to retrieve T0SZ in the subsequent functions
537 //
538 // Ideally we will be running at EL2, but should support EL1 as well.
539 // UEFI should not run at EL3.
540 if (ArmReadCurrentEL () == AARCH64_EL2) {
541 //Note: Bits 23 and 31 are reserved(RES1) bits in TCR_EL2
542 TCR = T0SZ | (1UL << 31) | (1UL << 23) | TCR_TG0_4KB;
543
544 // Set the Physical Address Size using MaxAddress
545 if (MaxAddress < SIZE_4GB) {
546 TCR |= TCR_PS_4GB;
547 } else if (MaxAddress < SIZE_64GB) {
548 TCR |= TCR_PS_64GB;
549 } else if (MaxAddress < SIZE_1TB) {
550 TCR |= TCR_PS_1TB;
551 } else if (MaxAddress < SIZE_4TB) {
552 TCR |= TCR_PS_4TB;
553 } else if (MaxAddress < SIZE_16TB) {
554 TCR |= TCR_PS_16TB;
555 } else if (MaxAddress < SIZE_256TB) {
556 TCR |= TCR_PS_256TB;
557 } else {
558 DEBUG ((DEBUG_ERROR,
559 "ArmConfigureMmu: The MaxAddress 0x%lX is not supported by this MMU configuration.\n",
560 MaxAddress));
561 ASSERT (0); // Bigger than 48-bit memory space are not supported
562 return EFI_UNSUPPORTED;
563 }
564 } else if (ArmReadCurrentEL () == AARCH64_EL1) {
565 // Due to Cortex-A57 erratum #822227 we must set TG1[1] == 1, regardless of EPD1.
566 TCR = T0SZ | TCR_TG0_4KB | TCR_TG1_4KB | TCR_EPD1;
567
568 // Set the Physical Address Size using MaxAddress
569 if (MaxAddress < SIZE_4GB) {
570 TCR |= TCR_IPS_4GB;
571 } else if (MaxAddress < SIZE_64GB) {
572 TCR |= TCR_IPS_64GB;
573 } else if (MaxAddress < SIZE_1TB) {
574 TCR |= TCR_IPS_1TB;
575 } else if (MaxAddress < SIZE_4TB) {
576 TCR |= TCR_IPS_4TB;
577 } else if (MaxAddress < SIZE_16TB) {
578 TCR |= TCR_IPS_16TB;
579 } else if (MaxAddress < SIZE_256TB) {
580 TCR |= TCR_IPS_256TB;
581 } else {
582 DEBUG ((DEBUG_ERROR,
583 "ArmConfigureMmu: The MaxAddress 0x%lX is not supported by this MMU configuration.\n",
584 MaxAddress));
585 ASSERT (0); // Bigger than 48-bit memory space are not supported
586 return EFI_UNSUPPORTED;
587 }
588 } else {
589 ASSERT (0); // UEFI is only expected to run at EL2 and EL1, not EL3.
590 return EFI_UNSUPPORTED;
591 }
592
593 //
594 // Translation table walks are always cache coherent on ARMv8-A, so cache
595 // maintenance on page tables is never needed. Since there is a risk of
596 // loss of coherency when using mismatched attributes, and given that memory
597 // is mapped cacheable except for extraordinary cases (such as non-coherent
598 // DMA), have the page table walker perform cached accesses as well, and
599 // assert below that that matches the attributes we use for CPU accesses to
600 // the region.
601 //
602 TCR |= TCR_SH_INNER_SHAREABLE |
603 TCR_RGN_OUTER_WRITE_BACK_ALLOC |
604 TCR_RGN_INNER_WRITE_BACK_ALLOC;
605
606 // Set TCR
607 ArmSetTCR (TCR);
608
609 // Allocate pages for translation table
610 TranslationTable = AllocatePages (1);
611 if (TranslationTable == NULL) {
612 return EFI_OUT_OF_RESOURCES;
613 }
614 //
615 // We set TTBR0 just after allocating the table to retrieve its location from
616 // the subsequent functions without needing to pass this value across the
617 // functions. The MMU is only enabled after the translation tables are
618 // populated.
619 //
620 ArmSetTTBR0 (TranslationTable);
621
622 if (TranslationTableBase != NULL) {
623 *TranslationTableBase = TranslationTable;
624 }
625
626 if (TranslationTableSize != NULL) {
627 *TranslationTableSize = RootTableEntryCount * sizeof (UINT64);
628 }
629
630 //
631 // Make sure we are not inadvertently hitting in the caches
632 // when populating the page tables.
633 //
634 InvalidateDataCacheRange (TranslationTable,
635 RootTableEntryCount * sizeof (UINT64));
636 ZeroMem (TranslationTable, RootTableEntryCount * sizeof (UINT64));
637
638 while (MemoryTable->Length != 0) {
639 Status = FillTranslationTable (TranslationTable, MemoryTable);
640 if (EFI_ERROR (Status)) {
641 goto FreeTranslationTable;
642 }
643 MemoryTable++;
644 }
645
646 //
647 // EFI_MEMORY_UC ==> MAIR_ATTR_DEVICE_MEMORY
648 // EFI_MEMORY_WC ==> MAIR_ATTR_NORMAL_MEMORY_NON_CACHEABLE
649 // EFI_MEMORY_WT ==> MAIR_ATTR_NORMAL_MEMORY_WRITE_THROUGH
650 // EFI_MEMORY_WB ==> MAIR_ATTR_NORMAL_MEMORY_WRITE_BACK
651 //
652 ArmSetMAIR (
653 MAIR_ATTR (TT_ATTR_INDX_DEVICE_MEMORY, MAIR_ATTR_DEVICE_MEMORY) |
654 MAIR_ATTR (TT_ATTR_INDX_MEMORY_NON_CACHEABLE, MAIR_ATTR_NORMAL_MEMORY_NON_CACHEABLE) |
655 MAIR_ATTR (TT_ATTR_INDX_MEMORY_WRITE_THROUGH, MAIR_ATTR_NORMAL_MEMORY_WRITE_THROUGH) |
656 MAIR_ATTR (TT_ATTR_INDX_MEMORY_WRITE_BACK, MAIR_ATTR_NORMAL_MEMORY_WRITE_BACK)
657 );
658
659 ArmDisableAlignmentCheck ();
660 ArmEnableStackAlignmentCheck ();
661 ArmEnableInstructionCache ();
662 ArmEnableDataCache ();
663
664 ArmEnableMmu ();
665 return EFI_SUCCESS;
666
667 FreeTranslationTable:
668 FreePages (TranslationTable, 1);
669 return Status;
670 }
671
672 RETURN_STATUS
673 EFIAPI
674 ArmMmuBaseLibConstructor (
675 VOID
676 )
677 {
678 extern UINT32 ArmReplaceLiveTranslationEntrySize;
679
680 //
681 // The ArmReplaceLiveTranslationEntry () helper function may be invoked
682 // with the MMU off so we have to ensure that it gets cleaned to the PoC
683 //
684 WriteBackDataCacheRange (ArmReplaceLiveTranslationEntry,
685 ArmReplaceLiveTranslationEntrySize);
686
687 return RETURN_SUCCESS;
688 }