]> git.proxmox.com Git - mirror_edk2.git/blob - ArmPkg/Library/ArmMmuLib/AArch64/ArmMmuLibCore.c
ae59e9a7d04e34ba2e328b8afb246e668aea5493
[mirror_edk2.git] / ArmPkg / Library / ArmMmuLib / AArch64 / ArmMmuLibCore.c
1 /** @file
2 * File managing the MMU for ARMv8 architecture
3 *
4 * Copyright (c) 2011-2020, ARM Limited. All rights reserved.
5 * Copyright (c) 2016, Linaro Limited. All rights reserved.
6 * Copyright (c) 2017, Intel Corporation. All rights reserved.<BR>
7 *
8 * SPDX-License-Identifier: BSD-2-Clause-Patent
9 *
10 **/
11
12 #include <Uefi.h>
13 #include <Chipset/AArch64.h>
14 #include <Library/BaseMemoryLib.h>
15 #include <Library/CacheMaintenanceLib.h>
16 #include <Library/MemoryAllocationLib.h>
17 #include <Library/ArmLib.h>
18 #include <Library/ArmMmuLib.h>
19 #include <Library/BaseLib.h>
20 #include <Library/DebugLib.h>
21 #include <Library/HobLib.h>
22
23 STATIC
24 VOID (
25 EFIAPI *mReplaceLiveEntryFunc
26 )(
27 IN UINT64 *Entry,
28 IN UINT64 Value,
29 IN UINT64 RegionStart,
30 IN BOOLEAN DisableMmu
31 ) = ArmReplaceLiveTranslationEntry;
32
33 STATIC
34 UINT64
35 ArmMemoryAttributeToPageAttribute (
36 IN ARM_MEMORY_REGION_ATTRIBUTES Attributes
37 )
38 {
39 switch (Attributes) {
40 case ARM_MEMORY_REGION_ATTRIBUTE_WRITE_BACK_NONSHAREABLE:
41 case ARM_MEMORY_REGION_ATTRIBUTE_NONSECURE_WRITE_BACK_NONSHAREABLE:
42 return TT_ATTR_INDX_MEMORY_WRITE_BACK;
43
44 case ARM_MEMORY_REGION_ATTRIBUTE_WRITE_BACK:
45 case ARM_MEMORY_REGION_ATTRIBUTE_NONSECURE_WRITE_BACK:
46 return TT_ATTR_INDX_MEMORY_WRITE_BACK | TT_SH_INNER_SHAREABLE;
47
48 case ARM_MEMORY_REGION_ATTRIBUTE_WRITE_THROUGH:
49 case ARM_MEMORY_REGION_ATTRIBUTE_NONSECURE_WRITE_THROUGH:
50 return TT_ATTR_INDX_MEMORY_WRITE_THROUGH | TT_SH_INNER_SHAREABLE;
51
52 // Uncached and device mappings are treated as outer shareable by default,
53 case ARM_MEMORY_REGION_ATTRIBUTE_UNCACHED_UNBUFFERED:
54 case ARM_MEMORY_REGION_ATTRIBUTE_NONSECURE_UNCACHED_UNBUFFERED:
55 return TT_ATTR_INDX_MEMORY_NON_CACHEABLE;
56
57 default:
58 ASSERT (0);
59 case ARM_MEMORY_REGION_ATTRIBUTE_DEVICE:
60 case ARM_MEMORY_REGION_ATTRIBUTE_NONSECURE_DEVICE:
61 if (ArmReadCurrentEL () == AARCH64_EL2) {
62 return TT_ATTR_INDX_DEVICE_MEMORY | TT_XN_MASK;
63 } else {
64 return TT_ATTR_INDX_DEVICE_MEMORY | TT_UXN_MASK | TT_PXN_MASK;
65 }
66 }
67 }
68
69 #define MIN_T0SZ 16
70 #define BITS_PER_LEVEL 9
71 #define MAX_VA_BITS 48
72
73 STATIC
74 UINTN
75 GetRootTableEntryCount (
76 IN UINTN T0SZ
77 )
78 {
79 return TT_ENTRY_COUNT >> (T0SZ - MIN_T0SZ) % BITS_PER_LEVEL;
80 }
81
82 STATIC
83 UINTN
84 GetRootTableLevel (
85 IN UINTN T0SZ
86 )
87 {
88 return (T0SZ - MIN_T0SZ) / BITS_PER_LEVEL;
89 }
90
91 STATIC
92 VOID
93 ReplaceTableEntry (
94 IN UINT64 *Entry,
95 IN UINT64 Value,
96 IN UINT64 RegionStart,
97 IN UINT64 BlockMask,
98 IN BOOLEAN IsLiveBlockMapping
99 )
100 {
101 BOOLEAN DisableMmu;
102
103 //
104 // Replacing a live block entry with a table entry (or vice versa) requires a
105 // break-before-make sequence as per the architecture. This means the mapping
106 // must be made invalid and cleaned from the TLBs first, and this is a bit of
107 // a hassle if the mapping in question covers the code that is actually doing
108 // the mapping and the unmapping, and so we only bother with this if actually
109 // necessary.
110 //
111
112 if (!IsLiveBlockMapping || !ArmMmuEnabled ()) {
113 // If the mapping is not a live block mapping, or the MMU is not on yet, we
114 // can simply overwrite the entry.
115 *Entry = Value;
116 ArmUpdateTranslationTableEntry (Entry, (VOID *)(UINTN)RegionStart);
117 } else {
118 // If the mapping in question does not cover the code that updates the
119 // entry in memory, or the entry that we are intending to update, we can
120 // use an ordinary break before make. Otherwise, we will need to
121 // temporarily disable the MMU.
122 DisableMmu = FALSE;
123 if ((((RegionStart ^ (UINTN)ArmReplaceLiveTranslationEntry) & ~BlockMask) == 0) ||
124 (((RegionStart ^ (UINTN)Entry) & ~BlockMask) == 0))
125 {
126 DisableMmu = TRUE;
127 DEBUG ((DEBUG_WARN, "%a: splitting block entry with MMU disabled\n", __FUNCTION__));
128 }
129
130 ArmReplaceLiveTranslationEntry (Entry, Value, RegionStart, DisableMmu);
131 }
132 }
133
134 STATIC
135 VOID
136 FreePageTablesRecursive (
137 IN UINT64 *TranslationTable,
138 IN UINTN Level
139 )
140 {
141 UINTN Index;
142
143 ASSERT (Level <= 3);
144
145 if (Level < 3) {
146 for (Index = 0; Index < TT_ENTRY_COUNT; Index++) {
147 if ((TranslationTable[Index] & TT_TYPE_MASK) == TT_TYPE_TABLE_ENTRY) {
148 FreePageTablesRecursive (
149 (VOID *)(UINTN)(TranslationTable[Index] &
150 TT_ADDRESS_MASK_BLOCK_ENTRY),
151 Level + 1
152 );
153 }
154 }
155 }
156
157 FreePages (TranslationTable, 1);
158 }
159
160 STATIC
161 BOOLEAN
162 IsBlockEntry (
163 IN UINT64 Entry,
164 IN UINTN Level
165 )
166 {
167 if (Level == 3) {
168 return (Entry & TT_TYPE_MASK) == TT_TYPE_BLOCK_ENTRY_LEVEL3;
169 }
170
171 return (Entry & TT_TYPE_MASK) == TT_TYPE_BLOCK_ENTRY;
172 }
173
174 STATIC
175 BOOLEAN
176 IsTableEntry (
177 IN UINT64 Entry,
178 IN UINTN Level
179 )
180 {
181 if (Level == 3) {
182 //
183 // TT_TYPE_TABLE_ENTRY aliases TT_TYPE_BLOCK_ENTRY_LEVEL3
184 // so we need to take the level into account as well.
185 //
186 return FALSE;
187 }
188
189 return (Entry & TT_TYPE_MASK) == TT_TYPE_TABLE_ENTRY;
190 }
191
192 STATIC
193 EFI_STATUS
194 UpdateRegionMappingRecursive (
195 IN UINT64 RegionStart,
196 IN UINT64 RegionEnd,
197 IN UINT64 AttributeSetMask,
198 IN UINT64 AttributeClearMask,
199 IN UINT64 *PageTable,
200 IN UINTN Level,
201 IN BOOLEAN TableIsLive
202 )
203 {
204 UINTN BlockShift;
205 UINT64 BlockMask;
206 UINT64 BlockEnd;
207 UINT64 *Entry;
208 UINT64 EntryValue;
209 VOID *TranslationTable;
210 EFI_STATUS Status;
211 BOOLEAN NextTableIsLive;
212
213 ASSERT (((RegionStart | RegionEnd) & EFI_PAGE_MASK) == 0);
214
215 BlockShift = (Level + 1) * BITS_PER_LEVEL + MIN_T0SZ;
216 BlockMask = MAX_UINT64 >> BlockShift;
217
218 DEBUG ((
219 DEBUG_VERBOSE,
220 "%a(%d): %llx - %llx set %lx clr %lx\n",
221 __FUNCTION__,
222 Level,
223 RegionStart,
224 RegionEnd,
225 AttributeSetMask,
226 AttributeClearMask
227 ));
228
229 for ( ; RegionStart < RegionEnd; RegionStart = BlockEnd) {
230 BlockEnd = MIN (RegionEnd, (RegionStart | BlockMask) + 1);
231 Entry = &PageTable[(RegionStart >> (64 - BlockShift)) & (TT_ENTRY_COUNT - 1)];
232
233 //
234 // If RegionStart or BlockEnd is not aligned to the block size at this
235 // level, we will have to create a table mapping in order to map less
236 // than a block, and recurse to create the block or page entries at
237 // the next level. No block mappings are allowed at all at level 0,
238 // so in that case, we have to recurse unconditionally.
239 //
240 // One special case to take into account is any region that covers the page
241 // table itself: if we'd cover such a region with block mappings, we are
242 // more likely to end up in the situation later where we need to disable
243 // the MMU in order to update page table entries safely, so prefer page
244 // mappings in that particular case.
245 //
246 if ((Level == 0) || (((RegionStart | BlockEnd) & BlockMask) != 0) ||
247 ((Level < 3) && (((UINT64)PageTable & ~BlockMask) == RegionStart)) ||
248 IsTableEntry (*Entry, Level))
249 {
250 ASSERT (Level < 3);
251
252 if (!IsTableEntry (*Entry, Level)) {
253 //
254 // No table entry exists yet, so we need to allocate a page table
255 // for the next level.
256 //
257 TranslationTable = AllocatePages (1);
258 if (TranslationTable == NULL) {
259 return EFI_OUT_OF_RESOURCES;
260 }
261
262 if (!ArmMmuEnabled ()) {
263 //
264 // Make sure we are not inadvertently hitting in the caches
265 // when populating the page tables.
266 //
267 InvalidateDataCacheRange (TranslationTable, EFI_PAGE_SIZE);
268 }
269
270 ZeroMem (TranslationTable, EFI_PAGE_SIZE);
271
272 if (IsBlockEntry (*Entry, Level)) {
273 //
274 // We are splitting an existing block entry, so we have to populate
275 // the new table with the attributes of the block entry it replaces.
276 //
277 Status = UpdateRegionMappingRecursive (
278 RegionStart & ~BlockMask,
279 (RegionStart | BlockMask) + 1,
280 *Entry & TT_ATTRIBUTES_MASK,
281 0,
282 TranslationTable,
283 Level + 1,
284 FALSE
285 );
286 if (EFI_ERROR (Status)) {
287 //
288 // The range we passed to UpdateRegionMappingRecursive () is block
289 // aligned, so it is guaranteed that no further pages were allocated
290 // by it, and so we only have to free the page we allocated here.
291 //
292 FreePages (TranslationTable, 1);
293 return Status;
294 }
295 }
296
297 NextTableIsLive = FALSE;
298 } else {
299 TranslationTable = (VOID *)(UINTN)(*Entry & TT_ADDRESS_MASK_BLOCK_ENTRY);
300 NextTableIsLive = TableIsLive;
301 }
302
303 //
304 // Recurse to the next level
305 //
306 Status = UpdateRegionMappingRecursive (
307 RegionStart,
308 BlockEnd,
309 AttributeSetMask,
310 AttributeClearMask,
311 TranslationTable,
312 Level + 1,
313 NextTableIsLive
314 );
315 if (EFI_ERROR (Status)) {
316 if (!IsTableEntry (*Entry, Level)) {
317 //
318 // We are creating a new table entry, so on failure, we can free all
319 // allocations we made recursively, given that the whole subhierarchy
320 // has not been wired into the live page tables yet. (This is not
321 // possible for existing table entries, since we cannot revert the
322 // modifications we made to the subhierarchy it represents.)
323 //
324 FreePageTablesRecursive (TranslationTable, Level + 1);
325 }
326
327 return Status;
328 }
329
330 if (!IsTableEntry (*Entry, Level)) {
331 EntryValue = (UINTN)TranslationTable | TT_TYPE_TABLE_ENTRY;
332 ReplaceTableEntry (
333 Entry,
334 EntryValue,
335 RegionStart,
336 BlockMask,
337 TableIsLive && IsBlockEntry (*Entry, Level)
338 );
339 }
340 } else {
341 EntryValue = (*Entry & AttributeClearMask) | AttributeSetMask;
342 EntryValue |= RegionStart;
343 EntryValue |= (Level == 3) ? TT_TYPE_BLOCK_ENTRY_LEVEL3
344 : TT_TYPE_BLOCK_ENTRY;
345
346 ReplaceTableEntry (Entry, EntryValue, RegionStart, BlockMask, FALSE);
347 }
348 }
349
350 return EFI_SUCCESS;
351 }
352
353 STATIC
354 EFI_STATUS
355 UpdateRegionMapping (
356 IN UINT64 RegionStart,
357 IN UINT64 RegionLength,
358 IN UINT64 AttributeSetMask,
359 IN UINT64 AttributeClearMask,
360 IN UINT64 *RootTable,
361 IN BOOLEAN TableIsLive
362 )
363 {
364 UINTN T0SZ;
365
366 if (((RegionStart | RegionLength) & EFI_PAGE_MASK) != 0) {
367 return EFI_INVALID_PARAMETER;
368 }
369
370 T0SZ = ArmGetTCR () & TCR_T0SZ_MASK;
371
372 return UpdateRegionMappingRecursive (
373 RegionStart,
374 RegionStart + RegionLength,
375 AttributeSetMask,
376 AttributeClearMask,
377 RootTable,
378 GetRootTableLevel (T0SZ),
379 TableIsLive
380 );
381 }
382
383 STATIC
384 EFI_STATUS
385 FillTranslationTable (
386 IN UINT64 *RootTable,
387 IN ARM_MEMORY_REGION_DESCRIPTOR *MemoryRegion
388 )
389 {
390 return UpdateRegionMapping (
391 MemoryRegion->VirtualBase,
392 MemoryRegion->Length,
393 ArmMemoryAttributeToPageAttribute (MemoryRegion->Attributes) | TT_AF,
394 0,
395 RootTable,
396 FALSE
397 );
398 }
399
400 STATIC
401 UINT64
402 GcdAttributeToPageAttribute (
403 IN UINT64 GcdAttributes
404 )
405 {
406 UINT64 PageAttributes;
407
408 switch (GcdAttributes & EFI_MEMORY_CACHETYPE_MASK) {
409 case EFI_MEMORY_UC:
410 PageAttributes = TT_ATTR_INDX_DEVICE_MEMORY;
411 break;
412 case EFI_MEMORY_WC:
413 PageAttributes = TT_ATTR_INDX_MEMORY_NON_CACHEABLE;
414 break;
415 case EFI_MEMORY_WT:
416 PageAttributes = TT_ATTR_INDX_MEMORY_WRITE_THROUGH | TT_SH_INNER_SHAREABLE;
417 break;
418 case EFI_MEMORY_WB:
419 PageAttributes = TT_ATTR_INDX_MEMORY_WRITE_BACK | TT_SH_INNER_SHAREABLE;
420 break;
421 default:
422 PageAttributes = TT_ATTR_INDX_MASK;
423 break;
424 }
425
426 if (((GcdAttributes & EFI_MEMORY_XP) != 0) ||
427 ((GcdAttributes & EFI_MEMORY_CACHETYPE_MASK) == EFI_MEMORY_UC))
428 {
429 if (ArmReadCurrentEL () == AARCH64_EL2) {
430 PageAttributes |= TT_XN_MASK;
431 } else {
432 PageAttributes |= TT_UXN_MASK | TT_PXN_MASK;
433 }
434 }
435
436 if ((GcdAttributes & EFI_MEMORY_RO) != 0) {
437 PageAttributes |= TT_AP_NO_RO;
438 }
439
440 return PageAttributes | TT_AF;
441 }
442
443 EFI_STATUS
444 ArmSetMemoryAttributes (
445 IN EFI_PHYSICAL_ADDRESS BaseAddress,
446 IN UINT64 Length,
447 IN UINT64 Attributes
448 )
449 {
450 UINT64 PageAttributes;
451 UINT64 PageAttributeMask;
452
453 PageAttributes = GcdAttributeToPageAttribute (Attributes);
454 PageAttributeMask = 0;
455
456 if ((Attributes & EFI_MEMORY_CACHETYPE_MASK) == 0) {
457 //
458 // No memory type was set in Attributes, so we are going to update the
459 // permissions only.
460 //
461 PageAttributes &= TT_AP_MASK | TT_UXN_MASK | TT_PXN_MASK;
462 PageAttributeMask = ~(TT_ADDRESS_MASK_BLOCK_ENTRY | TT_AP_MASK |
463 TT_PXN_MASK | TT_XN_MASK);
464 }
465
466 return UpdateRegionMapping (
467 BaseAddress,
468 Length,
469 PageAttributes,
470 PageAttributeMask,
471 ArmGetTTBR0BaseAddress (),
472 TRUE
473 );
474 }
475
476 STATIC
477 EFI_STATUS
478 SetMemoryRegionAttribute (
479 IN EFI_PHYSICAL_ADDRESS BaseAddress,
480 IN UINT64 Length,
481 IN UINT64 Attributes,
482 IN UINT64 BlockEntryMask
483 )
484 {
485 return UpdateRegionMapping (
486 BaseAddress,
487 Length,
488 Attributes,
489 BlockEntryMask,
490 ArmGetTTBR0BaseAddress (),
491 TRUE
492 );
493 }
494
495 EFI_STATUS
496 ArmSetMemoryRegionNoExec (
497 IN EFI_PHYSICAL_ADDRESS BaseAddress,
498 IN UINT64 Length
499 )
500 {
501 UINT64 Val;
502
503 if (ArmReadCurrentEL () == AARCH64_EL1) {
504 Val = TT_PXN_MASK | TT_UXN_MASK;
505 } else {
506 Val = TT_XN_MASK;
507 }
508
509 return SetMemoryRegionAttribute (
510 BaseAddress,
511 Length,
512 Val,
513 ~TT_ADDRESS_MASK_BLOCK_ENTRY
514 );
515 }
516
517 EFI_STATUS
518 ArmClearMemoryRegionNoExec (
519 IN EFI_PHYSICAL_ADDRESS BaseAddress,
520 IN UINT64 Length
521 )
522 {
523 UINT64 Mask;
524
525 // XN maps to UXN in the EL1&0 translation regime
526 Mask = ~(TT_ADDRESS_MASK_BLOCK_ENTRY | TT_PXN_MASK | TT_XN_MASK);
527
528 return SetMemoryRegionAttribute (
529 BaseAddress,
530 Length,
531 0,
532 Mask
533 );
534 }
535
536 EFI_STATUS
537 ArmSetMemoryRegionReadOnly (
538 IN EFI_PHYSICAL_ADDRESS BaseAddress,
539 IN UINT64 Length
540 )
541 {
542 return SetMemoryRegionAttribute (
543 BaseAddress,
544 Length,
545 TT_AP_NO_RO,
546 ~TT_ADDRESS_MASK_BLOCK_ENTRY
547 );
548 }
549
550 EFI_STATUS
551 ArmClearMemoryRegionReadOnly (
552 IN EFI_PHYSICAL_ADDRESS BaseAddress,
553 IN UINT64 Length
554 )
555 {
556 return SetMemoryRegionAttribute (
557 BaseAddress,
558 Length,
559 TT_AP_NO_RW,
560 ~(TT_ADDRESS_MASK_BLOCK_ENTRY | TT_AP_MASK)
561 );
562 }
563
564 EFI_STATUS
565 EFIAPI
566 ArmConfigureMmu (
567 IN ARM_MEMORY_REGION_DESCRIPTOR *MemoryTable,
568 OUT VOID **TranslationTableBase OPTIONAL,
569 OUT UINTN *TranslationTableSize OPTIONAL
570 )
571 {
572 VOID *TranslationTable;
573 UINTN MaxAddressBits;
574 UINT64 MaxAddress;
575 UINTN T0SZ;
576 UINTN RootTableEntryCount;
577 UINT64 TCR;
578 EFI_STATUS Status;
579
580 if (MemoryTable == NULL) {
581 ASSERT (MemoryTable != NULL);
582 return EFI_INVALID_PARAMETER;
583 }
584
585 //
586 // Limit the virtual address space to what we can actually use: UEFI
587 // mandates a 1:1 mapping, so no point in making the virtual address
588 // space larger than the physical address space. We also have to take
589 // into account the architectural limitations that result from UEFI's
590 // use of 4 KB pages.
591 //
592 MaxAddressBits = MIN (ArmGetPhysicalAddressBits (), MAX_VA_BITS);
593 MaxAddress = LShiftU64 (1ULL, MaxAddressBits) - 1;
594
595 T0SZ = 64 - MaxAddressBits;
596 RootTableEntryCount = GetRootTableEntryCount (T0SZ);
597
598 //
599 // Set TCR that allows us to retrieve T0SZ in the subsequent functions
600 //
601 // Ideally we will be running at EL2, but should support EL1 as well.
602 // UEFI should not run at EL3.
603 if (ArmReadCurrentEL () == AARCH64_EL2) {
604 // Note: Bits 23 and 31 are reserved(RES1) bits in TCR_EL2
605 TCR = T0SZ | (1UL << 31) | (1UL << 23) | TCR_TG0_4KB;
606
607 // Set the Physical Address Size using MaxAddress
608 if (MaxAddress < SIZE_4GB) {
609 TCR |= TCR_PS_4GB;
610 } else if (MaxAddress < SIZE_64GB) {
611 TCR |= TCR_PS_64GB;
612 } else if (MaxAddress < SIZE_1TB) {
613 TCR |= TCR_PS_1TB;
614 } else if (MaxAddress < SIZE_4TB) {
615 TCR |= TCR_PS_4TB;
616 } else if (MaxAddress < SIZE_16TB) {
617 TCR |= TCR_PS_16TB;
618 } else if (MaxAddress < SIZE_256TB) {
619 TCR |= TCR_PS_256TB;
620 } else {
621 DEBUG ((
622 DEBUG_ERROR,
623 "ArmConfigureMmu: The MaxAddress 0x%lX is not supported by this MMU configuration.\n",
624 MaxAddress
625 ));
626 ASSERT (0); // Bigger than 48-bit memory space are not supported
627 return EFI_UNSUPPORTED;
628 }
629 } else if (ArmReadCurrentEL () == AARCH64_EL1) {
630 // Due to Cortex-A57 erratum #822227 we must set TG1[1] == 1, regardless of EPD1.
631 TCR = T0SZ | TCR_TG0_4KB | TCR_TG1_4KB | TCR_EPD1;
632
633 // Set the Physical Address Size using MaxAddress
634 if (MaxAddress < SIZE_4GB) {
635 TCR |= TCR_IPS_4GB;
636 } else if (MaxAddress < SIZE_64GB) {
637 TCR |= TCR_IPS_64GB;
638 } else if (MaxAddress < SIZE_1TB) {
639 TCR |= TCR_IPS_1TB;
640 } else if (MaxAddress < SIZE_4TB) {
641 TCR |= TCR_IPS_4TB;
642 } else if (MaxAddress < SIZE_16TB) {
643 TCR |= TCR_IPS_16TB;
644 } else if (MaxAddress < SIZE_256TB) {
645 TCR |= TCR_IPS_256TB;
646 } else {
647 DEBUG ((
648 DEBUG_ERROR,
649 "ArmConfigureMmu: The MaxAddress 0x%lX is not supported by this MMU configuration.\n",
650 MaxAddress
651 ));
652 ASSERT (0); // Bigger than 48-bit memory space are not supported
653 return EFI_UNSUPPORTED;
654 }
655 } else {
656 ASSERT (0); // UEFI is only expected to run at EL2 and EL1, not EL3.
657 return EFI_UNSUPPORTED;
658 }
659
660 //
661 // Translation table walks are always cache coherent on ARMv8-A, so cache
662 // maintenance on page tables is never needed. Since there is a risk of
663 // loss of coherency when using mismatched attributes, and given that memory
664 // is mapped cacheable except for extraordinary cases (such as non-coherent
665 // DMA), have the page table walker perform cached accesses as well, and
666 // assert below that matches the attributes we use for CPU accesses to
667 // the region.
668 //
669 TCR |= TCR_SH_INNER_SHAREABLE |
670 TCR_RGN_OUTER_WRITE_BACK_ALLOC |
671 TCR_RGN_INNER_WRITE_BACK_ALLOC;
672
673 // Set TCR
674 ArmSetTCR (TCR);
675
676 // Allocate pages for translation table
677 TranslationTable = AllocatePages (1);
678 if (TranslationTable == NULL) {
679 return EFI_OUT_OF_RESOURCES;
680 }
681
682 if (TranslationTableBase != NULL) {
683 *TranslationTableBase = TranslationTable;
684 }
685
686 if (TranslationTableSize != NULL) {
687 *TranslationTableSize = RootTableEntryCount * sizeof (UINT64);
688 }
689
690 if (!ArmMmuEnabled ()) {
691 //
692 // Make sure we are not inadvertently hitting in the caches
693 // when populating the page tables.
694 //
695 InvalidateDataCacheRange (
696 TranslationTable,
697 RootTableEntryCount * sizeof (UINT64)
698 );
699 }
700
701 ZeroMem (TranslationTable, RootTableEntryCount * sizeof (UINT64));
702
703 while (MemoryTable->Length != 0) {
704 Status = FillTranslationTable (TranslationTable, MemoryTable);
705 if (EFI_ERROR (Status)) {
706 goto FreeTranslationTable;
707 }
708
709 MemoryTable++;
710 }
711
712 //
713 // EFI_MEMORY_UC ==> MAIR_ATTR_DEVICE_MEMORY
714 // EFI_MEMORY_WC ==> MAIR_ATTR_NORMAL_MEMORY_NON_CACHEABLE
715 // EFI_MEMORY_WT ==> MAIR_ATTR_NORMAL_MEMORY_WRITE_THROUGH
716 // EFI_MEMORY_WB ==> MAIR_ATTR_NORMAL_MEMORY_WRITE_BACK
717 //
718 ArmSetMAIR (
719 MAIR_ATTR (TT_ATTR_INDX_DEVICE_MEMORY, MAIR_ATTR_DEVICE_MEMORY) |
720 MAIR_ATTR (TT_ATTR_INDX_MEMORY_NON_CACHEABLE, MAIR_ATTR_NORMAL_MEMORY_NON_CACHEABLE) |
721 MAIR_ATTR (TT_ATTR_INDX_MEMORY_WRITE_THROUGH, MAIR_ATTR_NORMAL_MEMORY_WRITE_THROUGH) |
722 MAIR_ATTR (TT_ATTR_INDX_MEMORY_WRITE_BACK, MAIR_ATTR_NORMAL_MEMORY_WRITE_BACK)
723 );
724
725 ArmSetTTBR0 (TranslationTable);
726
727 if (!ArmMmuEnabled ()) {
728 ArmDisableAlignmentCheck ();
729 ArmEnableStackAlignmentCheck ();
730 ArmEnableInstructionCache ();
731 ArmEnableDataCache ();
732
733 ArmEnableMmu ();
734 }
735
736 return EFI_SUCCESS;
737
738 FreeTranslationTable:
739 FreePages (TranslationTable, 1);
740 return Status;
741 }
742
743 RETURN_STATUS
744 EFIAPI
745 ArmMmuBaseLibConstructor (
746 VOID
747 )
748 {
749 extern UINT32 ArmReplaceLiveTranslationEntrySize;
750
751 //
752 // The ArmReplaceLiveTranslationEntry () helper function may be invoked
753 // with the MMU off so we have to ensure that it gets cleaned to the PoC
754 //
755 WriteBackDataCacheRange (
756 (VOID *)(UINTN)ArmReplaceLiveTranslationEntry,
757 ArmReplaceLiveTranslationEntrySize
758 );
759
760 return RETURN_SUCCESS;
761 }