]> git.proxmox.com Git - mirror_edk2.git/blob - ArmPkg/Library/ArmMmuLib/AArch64/ArmMmuLibCore.c
0680ba36d9079f47024b519101c6ce6acf2b2289
[mirror_edk2.git] / ArmPkg / Library / ArmMmuLib / AArch64 / ArmMmuLibCore.c
1 /** @file
2 * File managing the MMU for ARMv8 architecture
3 *
4 * Copyright (c) 2011-2020, ARM Limited. All rights reserved.
5 * Copyright (c) 2016, Linaro Limited. All rights reserved.
6 * Copyright (c) 2017, Intel Corporation. All rights reserved.<BR>
7 *
8 * SPDX-License-Identifier: BSD-2-Clause-Patent
9 *
10 **/
11
12 #include <Uefi.h>
13 #include <Chipset/AArch64.h>
14 #include <Library/BaseMemoryLib.h>
15 #include <Library/CacheMaintenanceLib.h>
16 #include <Library/MemoryAllocationLib.h>
17 #include <Library/ArmLib.h>
18 #include <Library/ArmMmuLib.h>
19 #include <Library/BaseLib.h>
20 #include <Library/DebugLib.h>
21
22 // We use this index definition to define an invalid block entry
23 #define TT_ATTR_INDX_INVALID ((UINT32)~0)
24
25 STATIC
26 UINT64
27 ArmMemoryAttributeToPageAttribute (
28 IN ARM_MEMORY_REGION_ATTRIBUTES Attributes
29 )
30 {
31 switch (Attributes) {
32 case ARM_MEMORY_REGION_ATTRIBUTE_WRITE_BACK_NONSHAREABLE:
33 case ARM_MEMORY_REGION_ATTRIBUTE_NONSECURE_WRITE_BACK_NONSHAREABLE:
34 return TT_ATTR_INDX_MEMORY_WRITE_BACK;
35
36 case ARM_MEMORY_REGION_ATTRIBUTE_WRITE_BACK:
37 case ARM_MEMORY_REGION_ATTRIBUTE_NONSECURE_WRITE_BACK:
38 return TT_ATTR_INDX_MEMORY_WRITE_BACK | TT_SH_INNER_SHAREABLE;
39
40 case ARM_MEMORY_REGION_ATTRIBUTE_WRITE_THROUGH:
41 case ARM_MEMORY_REGION_ATTRIBUTE_NONSECURE_WRITE_THROUGH:
42 return TT_ATTR_INDX_MEMORY_WRITE_THROUGH | TT_SH_INNER_SHAREABLE;
43
44 // Uncached and device mappings are treated as outer shareable by default,
45 case ARM_MEMORY_REGION_ATTRIBUTE_UNCACHED_UNBUFFERED:
46 case ARM_MEMORY_REGION_ATTRIBUTE_NONSECURE_UNCACHED_UNBUFFERED:
47 return TT_ATTR_INDX_MEMORY_NON_CACHEABLE;
48
49 default:
50 ASSERT (0);
51 case ARM_MEMORY_REGION_ATTRIBUTE_DEVICE:
52 case ARM_MEMORY_REGION_ATTRIBUTE_NONSECURE_DEVICE:
53 if (ArmReadCurrentEL () == AARCH64_EL2)
54 return TT_ATTR_INDX_DEVICE_MEMORY | TT_XN_MASK;
55 else
56 return TT_ATTR_INDX_DEVICE_MEMORY | TT_UXN_MASK | TT_PXN_MASK;
57 }
58 }
59
60 UINT64
61 PageAttributeToGcdAttribute (
62 IN UINT64 PageAttributes
63 )
64 {
65 UINT64 GcdAttributes;
66
67 switch (PageAttributes & TT_ATTR_INDX_MASK) {
68 case TT_ATTR_INDX_DEVICE_MEMORY:
69 GcdAttributes = EFI_MEMORY_UC;
70 break;
71 case TT_ATTR_INDX_MEMORY_NON_CACHEABLE:
72 GcdAttributes = EFI_MEMORY_WC;
73 break;
74 case TT_ATTR_INDX_MEMORY_WRITE_THROUGH:
75 GcdAttributes = EFI_MEMORY_WT;
76 break;
77 case TT_ATTR_INDX_MEMORY_WRITE_BACK:
78 GcdAttributes = EFI_MEMORY_WB;
79 break;
80 default:
81 DEBUG ((DEBUG_ERROR,
82 "PageAttributeToGcdAttribute: PageAttributes:0x%lX not supported.\n",
83 PageAttributes));
84 ASSERT (0);
85 // The Global Coherency Domain (GCD) value is defined as a bit set.
86 // Returning 0 means no attribute has been set.
87 GcdAttributes = 0;
88 }
89
90 // Determine protection attributes
91 if (((PageAttributes & TT_AP_MASK) == TT_AP_NO_RO) ||
92 ((PageAttributes & TT_AP_MASK) == TT_AP_RO_RO)) {
93 // Read only cases map to write-protect
94 GcdAttributes |= EFI_MEMORY_RO;
95 }
96
97 // Process eXecute Never attribute
98 if ((PageAttributes & (TT_PXN_MASK | TT_UXN_MASK)) != 0) {
99 GcdAttributes |= EFI_MEMORY_XP;
100 }
101
102 return GcdAttributes;
103 }
104
105 #define MIN_T0SZ 16
106 #define BITS_PER_LEVEL 9
107
108 VOID
109 GetRootTranslationTableInfo (
110 IN UINTN T0SZ,
111 OUT UINTN *TableLevel,
112 OUT UINTN *TableEntryCount
113 )
114 {
115 // Get the level of the root table
116 if (TableLevel) {
117 *TableLevel = (T0SZ - MIN_T0SZ) / BITS_PER_LEVEL;
118 }
119
120 if (TableEntryCount) {
121 *TableEntryCount = 1UL << (BITS_PER_LEVEL - (T0SZ - MIN_T0SZ) % BITS_PER_LEVEL);
122 }
123 }
124
125 STATIC
126 VOID
127 ReplaceTableEntry (
128 IN UINT64 *Entry,
129 IN UINT64 Value,
130 IN UINT64 RegionStart,
131 IN BOOLEAN IsLiveBlockMapping
132 )
133 {
134 if (!ArmMmuEnabled () || !IsLiveBlockMapping) {
135 *Entry = Value;
136 ArmUpdateTranslationTableEntry (Entry, (VOID *)(UINTN)RegionStart);
137 } else {
138 ArmReplaceLiveTranslationEntry (Entry, Value, RegionStart);
139 }
140 }
141
142 STATIC
143 VOID
144 FreePageTablesRecursive (
145 IN UINT64 *TranslationTable,
146 IN UINTN Level
147 )
148 {
149 UINTN Index;
150
151 ASSERT (Level <= 3);
152
153 if (Level < 3) {
154 for (Index = 0; Index < TT_ENTRY_COUNT; Index++) {
155 if ((TranslationTable[Index] & TT_TYPE_MASK) == TT_TYPE_TABLE_ENTRY) {
156 FreePageTablesRecursive ((VOID *)(UINTN)(TranslationTable[Index] &
157 TT_ADDRESS_MASK_BLOCK_ENTRY),
158 Level + 1);
159 }
160 }
161 }
162 FreePages (TranslationTable, 1);
163 }
164
165 STATIC
166 BOOLEAN
167 IsBlockEntry (
168 IN UINT64 Entry,
169 IN UINTN Level
170 )
171 {
172 if (Level == 3) {
173 return (Entry & TT_TYPE_MASK) == TT_TYPE_BLOCK_ENTRY_LEVEL3;
174 }
175 return (Entry & TT_TYPE_MASK) == TT_TYPE_BLOCK_ENTRY;
176 }
177
178 STATIC
179 BOOLEAN
180 IsTableEntry (
181 IN UINT64 Entry,
182 IN UINTN Level
183 )
184 {
185 if (Level == 3) {
186 //
187 // TT_TYPE_TABLE_ENTRY aliases TT_TYPE_BLOCK_ENTRY_LEVEL3
188 // so we need to take the level into account as well.
189 //
190 return FALSE;
191 }
192 return (Entry & TT_TYPE_MASK) == TT_TYPE_TABLE_ENTRY;
193 }
194
195 STATIC
196 EFI_STATUS
197 UpdateRegionMappingRecursive (
198 IN UINT64 RegionStart,
199 IN UINT64 RegionEnd,
200 IN UINT64 AttributeSetMask,
201 IN UINT64 AttributeClearMask,
202 IN UINT64 *PageTable,
203 IN UINTN Level
204 )
205 {
206 UINTN BlockShift;
207 UINT64 BlockMask;
208 UINT64 BlockEnd;
209 UINT64 *Entry;
210 UINT64 EntryValue;
211 VOID *TranslationTable;
212 EFI_STATUS Status;
213
214 ASSERT (((RegionStart | RegionEnd) & EFI_PAGE_MASK) == 0);
215
216 BlockShift = (Level + 1) * BITS_PER_LEVEL + MIN_T0SZ;
217 BlockMask = MAX_UINT64 >> BlockShift;
218
219 DEBUG ((DEBUG_VERBOSE, "%a(%d): %llx - %llx set %lx clr %lx\n", __FUNCTION__,
220 Level, RegionStart, RegionEnd, AttributeSetMask, AttributeClearMask));
221
222 for (; RegionStart < RegionEnd; RegionStart = BlockEnd) {
223 BlockEnd = MIN (RegionEnd, (RegionStart | BlockMask) + 1);
224 Entry = &PageTable[(RegionStart >> (64 - BlockShift)) & (TT_ENTRY_COUNT - 1)];
225
226 //
227 // If RegionStart or BlockEnd is not aligned to the block size at this
228 // level, we will have to create a table mapping in order to map less
229 // than a block, and recurse to create the block or page entries at
230 // the next level. No block mappings are allowed at all at level 0,
231 // so in that case, we have to recurse unconditionally.
232 //
233 if (Level == 0 || ((RegionStart | BlockEnd) & BlockMask) != 0) {
234 ASSERT (Level < 3);
235
236 if (!IsTableEntry (*Entry, Level)) {
237 //
238 // No table entry exists yet, so we need to allocate a page table
239 // for the next level.
240 //
241 TranslationTable = AllocatePages (1);
242 if (TranslationTable == NULL) {
243 return EFI_OUT_OF_RESOURCES;
244 }
245
246 if (!ArmMmuEnabled ()) {
247 //
248 // Make sure we are not inadvertently hitting in the caches
249 // when populating the page tables.
250 //
251 InvalidateDataCacheRange (TranslationTable, EFI_PAGE_SIZE);
252 }
253
254 if (IsBlockEntry (*Entry, Level)) {
255 //
256 // We are splitting an existing block entry, so we have to populate
257 // the new table with the attributes of the block entry it replaces.
258 //
259 Status = UpdateRegionMappingRecursive (RegionStart & ~BlockMask,
260 (RegionStart | BlockMask) + 1, *Entry & TT_ATTRIBUTES_MASK,
261 0, TranslationTable, Level + 1);
262 if (EFI_ERROR (Status)) {
263 //
264 // The range we passed to UpdateRegionMappingRecursive () is block
265 // aligned, so it is guaranteed that no further pages were allocated
266 // by it, and so we only have to free the page we allocated here.
267 //
268 FreePages (TranslationTable, 1);
269 return Status;
270 }
271 } else {
272 ZeroMem (TranslationTable, EFI_PAGE_SIZE);
273 }
274 } else {
275 TranslationTable = (VOID *)(UINTN)(*Entry & TT_ADDRESS_MASK_BLOCK_ENTRY);
276 }
277
278 //
279 // Recurse to the next level
280 //
281 Status = UpdateRegionMappingRecursive (RegionStart, BlockEnd,
282 AttributeSetMask, AttributeClearMask, TranslationTable,
283 Level + 1);
284 if (EFI_ERROR (Status)) {
285 if (!IsTableEntry (*Entry, Level)) {
286 //
287 // We are creating a new table entry, so on failure, we can free all
288 // allocations we made recursively, given that the whole subhierarchy
289 // has not been wired into the live page tables yet. (This is not
290 // possible for existing table entries, since we cannot revert the
291 // modifications we made to the subhierarchy it represents.)
292 //
293 FreePageTablesRecursive (TranslationTable, Level + 1);
294 }
295 return Status;
296 }
297
298 if (!IsTableEntry (*Entry, Level)) {
299 EntryValue = (UINTN)TranslationTable | TT_TYPE_TABLE_ENTRY;
300 ReplaceTableEntry (Entry, EntryValue, RegionStart,
301 IsBlockEntry (*Entry, Level));
302 }
303 } else {
304 EntryValue = (*Entry & AttributeClearMask) | AttributeSetMask;
305 EntryValue |= RegionStart;
306 EntryValue |= (Level == 3) ? TT_TYPE_BLOCK_ENTRY_LEVEL3
307 : TT_TYPE_BLOCK_ENTRY;
308
309 ReplaceTableEntry (Entry, EntryValue, RegionStart, FALSE);
310 }
311 }
312 return EFI_SUCCESS;
313 }
314
315 STATIC
316 VOID
317 LookupAddresstoRootTable (
318 IN UINT64 MaxAddress,
319 OUT UINTN *T0SZ,
320 OUT UINTN *TableEntryCount
321 )
322 {
323 UINTN TopBit;
324
325 // Check the parameters are not NULL
326 ASSERT ((T0SZ != NULL) && (TableEntryCount != NULL));
327
328 // Look for the highest bit set in MaxAddress
329 for (TopBit = 63; TopBit != 0; TopBit--) {
330 if ((1ULL << TopBit) & MaxAddress) {
331 // MaxAddress top bit is found
332 TopBit = TopBit + 1;
333 break;
334 }
335 }
336 ASSERT (TopBit != 0);
337
338 // Calculate T0SZ from the top bit of the MaxAddress
339 *T0SZ = 64 - TopBit;
340
341 // Get the Table info from T0SZ
342 GetRootTranslationTableInfo (*T0SZ, NULL, TableEntryCount);
343 }
344
345 STATIC
346 EFI_STATUS
347 UpdateRegionMapping (
348 IN UINT64 RegionStart,
349 IN UINT64 RegionLength,
350 IN UINT64 AttributeSetMask,
351 IN UINT64 AttributeClearMask
352 )
353 {
354 UINTN RootTableLevel;
355 UINTN T0SZ;
356
357 if (((RegionStart | RegionLength) & EFI_PAGE_MASK)) {
358 return EFI_INVALID_PARAMETER;
359 }
360
361 T0SZ = ArmGetTCR () & TCR_T0SZ_MASK;
362 GetRootTranslationTableInfo (T0SZ, &RootTableLevel, NULL);
363
364 return UpdateRegionMappingRecursive (RegionStart, RegionStart + RegionLength,
365 AttributeSetMask, AttributeClearMask, ArmGetTTBR0BaseAddress (),
366 RootTableLevel);
367 }
368
369 STATIC
370 EFI_STATUS
371 FillTranslationTable (
372 IN UINT64 *RootTable,
373 IN ARM_MEMORY_REGION_DESCRIPTOR *MemoryRegion
374 )
375 {
376 return UpdateRegionMapping (
377 MemoryRegion->VirtualBase,
378 MemoryRegion->Length,
379 ArmMemoryAttributeToPageAttribute (MemoryRegion->Attributes) | TT_AF,
380 0
381 );
382 }
383
384 STATIC
385 UINT64
386 GcdAttributeToPageAttribute (
387 IN UINT64 GcdAttributes
388 )
389 {
390 UINT64 PageAttributes;
391
392 switch (GcdAttributes & EFI_MEMORY_CACHETYPE_MASK) {
393 case EFI_MEMORY_UC:
394 PageAttributes = TT_ATTR_INDX_DEVICE_MEMORY;
395 break;
396 case EFI_MEMORY_WC:
397 PageAttributes = TT_ATTR_INDX_MEMORY_NON_CACHEABLE;
398 break;
399 case EFI_MEMORY_WT:
400 PageAttributes = TT_ATTR_INDX_MEMORY_WRITE_THROUGH | TT_SH_INNER_SHAREABLE;
401 break;
402 case EFI_MEMORY_WB:
403 PageAttributes = TT_ATTR_INDX_MEMORY_WRITE_BACK | TT_SH_INNER_SHAREABLE;
404 break;
405 default:
406 PageAttributes = TT_ATTR_INDX_MASK;
407 break;
408 }
409
410 if ((GcdAttributes & EFI_MEMORY_XP) != 0 ||
411 (GcdAttributes & EFI_MEMORY_CACHETYPE_MASK) == EFI_MEMORY_UC) {
412 if (ArmReadCurrentEL () == AARCH64_EL2) {
413 PageAttributes |= TT_XN_MASK;
414 } else {
415 PageAttributes |= TT_UXN_MASK | TT_PXN_MASK;
416 }
417 }
418
419 if ((GcdAttributes & EFI_MEMORY_RO) != 0) {
420 PageAttributes |= TT_AP_RO_RO;
421 }
422
423 return PageAttributes | TT_AF;
424 }
425
426 EFI_STATUS
427 ArmSetMemoryAttributes (
428 IN EFI_PHYSICAL_ADDRESS BaseAddress,
429 IN UINT64 Length,
430 IN UINT64 Attributes
431 )
432 {
433 UINT64 PageAttributes;
434 UINT64 PageAttributeMask;
435
436 PageAttributes = GcdAttributeToPageAttribute (Attributes);
437 PageAttributeMask = 0;
438
439 if ((Attributes & EFI_MEMORY_CACHETYPE_MASK) == 0) {
440 //
441 // No memory type was set in Attributes, so we are going to update the
442 // permissions only.
443 //
444 PageAttributes &= TT_AP_MASK | TT_UXN_MASK | TT_PXN_MASK;
445 PageAttributeMask = ~(TT_ADDRESS_MASK_BLOCK_ENTRY | TT_AP_MASK |
446 TT_PXN_MASK | TT_XN_MASK);
447 }
448
449 return UpdateRegionMapping (BaseAddress, Length, PageAttributes,
450 PageAttributeMask);
451 }
452
453 STATIC
454 EFI_STATUS
455 SetMemoryRegionAttribute (
456 IN EFI_PHYSICAL_ADDRESS BaseAddress,
457 IN UINT64 Length,
458 IN UINT64 Attributes,
459 IN UINT64 BlockEntryMask
460 )
461 {
462 return UpdateRegionMapping (BaseAddress, Length, Attributes, BlockEntryMask);
463 }
464
465 EFI_STATUS
466 ArmSetMemoryRegionNoExec (
467 IN EFI_PHYSICAL_ADDRESS BaseAddress,
468 IN UINT64 Length
469 )
470 {
471 UINT64 Val;
472
473 if (ArmReadCurrentEL () == AARCH64_EL1) {
474 Val = TT_PXN_MASK | TT_UXN_MASK;
475 } else {
476 Val = TT_XN_MASK;
477 }
478
479 return SetMemoryRegionAttribute (
480 BaseAddress,
481 Length,
482 Val,
483 ~TT_ADDRESS_MASK_BLOCK_ENTRY);
484 }
485
486 EFI_STATUS
487 ArmClearMemoryRegionNoExec (
488 IN EFI_PHYSICAL_ADDRESS BaseAddress,
489 IN UINT64 Length
490 )
491 {
492 UINT64 Mask;
493
494 // XN maps to UXN in the EL1&0 translation regime
495 Mask = ~(TT_ADDRESS_MASK_BLOCK_ENTRY | TT_PXN_MASK | TT_XN_MASK);
496
497 return SetMemoryRegionAttribute (
498 BaseAddress,
499 Length,
500 0,
501 Mask);
502 }
503
504 EFI_STATUS
505 ArmSetMemoryRegionReadOnly (
506 IN EFI_PHYSICAL_ADDRESS BaseAddress,
507 IN UINT64 Length
508 )
509 {
510 return SetMemoryRegionAttribute (
511 BaseAddress,
512 Length,
513 TT_AP_RO_RO,
514 ~TT_ADDRESS_MASK_BLOCK_ENTRY);
515 }
516
517 EFI_STATUS
518 ArmClearMemoryRegionReadOnly (
519 IN EFI_PHYSICAL_ADDRESS BaseAddress,
520 IN UINT64 Length
521 )
522 {
523 return SetMemoryRegionAttribute (
524 BaseAddress,
525 Length,
526 TT_AP_RW_RW,
527 ~(TT_ADDRESS_MASK_BLOCK_ENTRY | TT_AP_MASK));
528 }
529
530 EFI_STATUS
531 EFIAPI
532 ArmConfigureMmu (
533 IN ARM_MEMORY_REGION_DESCRIPTOR *MemoryTable,
534 OUT VOID **TranslationTableBase OPTIONAL,
535 OUT UINTN *TranslationTableSize OPTIONAL
536 )
537 {
538 VOID* TranslationTable;
539 UINT64 MaxAddress;
540 UINTN T0SZ;
541 UINTN RootTableEntryCount;
542 UINT64 TCR;
543 EFI_STATUS Status;
544
545 if (MemoryTable == NULL) {
546 ASSERT (MemoryTable != NULL);
547 return EFI_INVALID_PARAMETER;
548 }
549
550 //
551 // Limit the virtual address space to what we can actually use: UEFI
552 // mandates a 1:1 mapping, so no point in making the virtual address
553 // space larger than the physical address space. We also have to take
554 // into account the architectural limitations that result from UEFI's
555 // use of 4 KB pages.
556 //
557 MaxAddress = MIN (LShiftU64 (1ULL, ArmGetPhysicalAddressBits ()) - 1,
558 MAX_ALLOC_ADDRESS);
559
560 // Lookup the Table Level to get the information
561 LookupAddresstoRootTable (MaxAddress, &T0SZ, &RootTableEntryCount);
562
563 //
564 // Set TCR that allows us to retrieve T0SZ in the subsequent functions
565 //
566 // Ideally we will be running at EL2, but should support EL1 as well.
567 // UEFI should not run at EL3.
568 if (ArmReadCurrentEL () == AARCH64_EL2) {
569 //Note: Bits 23 and 31 are reserved(RES1) bits in TCR_EL2
570 TCR = T0SZ | (1UL << 31) | (1UL << 23) | TCR_TG0_4KB;
571
572 // Set the Physical Address Size using MaxAddress
573 if (MaxAddress < SIZE_4GB) {
574 TCR |= TCR_PS_4GB;
575 } else if (MaxAddress < SIZE_64GB) {
576 TCR |= TCR_PS_64GB;
577 } else if (MaxAddress < SIZE_1TB) {
578 TCR |= TCR_PS_1TB;
579 } else if (MaxAddress < SIZE_4TB) {
580 TCR |= TCR_PS_4TB;
581 } else if (MaxAddress < SIZE_16TB) {
582 TCR |= TCR_PS_16TB;
583 } else if (MaxAddress < SIZE_256TB) {
584 TCR |= TCR_PS_256TB;
585 } else {
586 DEBUG ((DEBUG_ERROR,
587 "ArmConfigureMmu: The MaxAddress 0x%lX is not supported by this MMU configuration.\n",
588 MaxAddress));
589 ASSERT (0); // Bigger than 48-bit memory space are not supported
590 return EFI_UNSUPPORTED;
591 }
592 } else if (ArmReadCurrentEL () == AARCH64_EL1) {
593 // Due to Cortex-A57 erratum #822227 we must set TG1[1] == 1, regardless of EPD1.
594 TCR = T0SZ | TCR_TG0_4KB | TCR_TG1_4KB | TCR_EPD1;
595
596 // Set the Physical Address Size using MaxAddress
597 if (MaxAddress < SIZE_4GB) {
598 TCR |= TCR_IPS_4GB;
599 } else if (MaxAddress < SIZE_64GB) {
600 TCR |= TCR_IPS_64GB;
601 } else if (MaxAddress < SIZE_1TB) {
602 TCR |= TCR_IPS_1TB;
603 } else if (MaxAddress < SIZE_4TB) {
604 TCR |= TCR_IPS_4TB;
605 } else if (MaxAddress < SIZE_16TB) {
606 TCR |= TCR_IPS_16TB;
607 } else if (MaxAddress < SIZE_256TB) {
608 TCR |= TCR_IPS_256TB;
609 } else {
610 DEBUG ((DEBUG_ERROR,
611 "ArmConfigureMmu: The MaxAddress 0x%lX is not supported by this MMU configuration.\n",
612 MaxAddress));
613 ASSERT (0); // Bigger than 48-bit memory space are not supported
614 return EFI_UNSUPPORTED;
615 }
616 } else {
617 ASSERT (0); // UEFI is only expected to run at EL2 and EL1, not EL3.
618 return EFI_UNSUPPORTED;
619 }
620
621 //
622 // Translation table walks are always cache coherent on ARMv8-A, so cache
623 // maintenance on page tables is never needed. Since there is a risk of
624 // loss of coherency when using mismatched attributes, and given that memory
625 // is mapped cacheable except for extraordinary cases (such as non-coherent
626 // DMA), have the page table walker perform cached accesses as well, and
627 // assert below that that matches the attributes we use for CPU accesses to
628 // the region.
629 //
630 TCR |= TCR_SH_INNER_SHAREABLE |
631 TCR_RGN_OUTER_WRITE_BACK_ALLOC |
632 TCR_RGN_INNER_WRITE_BACK_ALLOC;
633
634 // Set TCR
635 ArmSetTCR (TCR);
636
637 // Allocate pages for translation table
638 TranslationTable = AllocatePages (1);
639 if (TranslationTable == NULL) {
640 return EFI_OUT_OF_RESOURCES;
641 }
642 //
643 // We set TTBR0 just after allocating the table to retrieve its location from
644 // the subsequent functions without needing to pass this value across the
645 // functions. The MMU is only enabled after the translation tables are
646 // populated.
647 //
648 ArmSetTTBR0 (TranslationTable);
649
650 if (TranslationTableBase != NULL) {
651 *TranslationTableBase = TranslationTable;
652 }
653
654 if (TranslationTableSize != NULL) {
655 *TranslationTableSize = RootTableEntryCount * sizeof (UINT64);
656 }
657
658 //
659 // Make sure we are not inadvertently hitting in the caches
660 // when populating the page tables.
661 //
662 InvalidateDataCacheRange (TranslationTable,
663 RootTableEntryCount * sizeof (UINT64));
664 ZeroMem (TranslationTable, RootTableEntryCount * sizeof (UINT64));
665
666 while (MemoryTable->Length != 0) {
667 Status = FillTranslationTable (TranslationTable, MemoryTable);
668 if (EFI_ERROR (Status)) {
669 goto FreeTranslationTable;
670 }
671 MemoryTable++;
672 }
673
674 //
675 // EFI_MEMORY_UC ==> MAIR_ATTR_DEVICE_MEMORY
676 // EFI_MEMORY_WC ==> MAIR_ATTR_NORMAL_MEMORY_NON_CACHEABLE
677 // EFI_MEMORY_WT ==> MAIR_ATTR_NORMAL_MEMORY_WRITE_THROUGH
678 // EFI_MEMORY_WB ==> MAIR_ATTR_NORMAL_MEMORY_WRITE_BACK
679 //
680 ArmSetMAIR (
681 MAIR_ATTR (TT_ATTR_INDX_DEVICE_MEMORY, MAIR_ATTR_DEVICE_MEMORY) |
682 MAIR_ATTR (TT_ATTR_INDX_MEMORY_NON_CACHEABLE, MAIR_ATTR_NORMAL_MEMORY_NON_CACHEABLE) |
683 MAIR_ATTR (TT_ATTR_INDX_MEMORY_WRITE_THROUGH, MAIR_ATTR_NORMAL_MEMORY_WRITE_THROUGH) |
684 MAIR_ATTR (TT_ATTR_INDX_MEMORY_WRITE_BACK, MAIR_ATTR_NORMAL_MEMORY_WRITE_BACK)
685 );
686
687 ArmDisableAlignmentCheck ();
688 ArmEnableStackAlignmentCheck ();
689 ArmEnableInstructionCache ();
690 ArmEnableDataCache ();
691
692 ArmEnableMmu ();
693 return EFI_SUCCESS;
694
695 FreeTranslationTable:
696 FreePages (TranslationTable, 1);
697 return Status;
698 }
699
700 RETURN_STATUS
701 EFIAPI
702 ArmMmuBaseLibConstructor (
703 VOID
704 )
705 {
706 extern UINT32 ArmReplaceLiveTranslationEntrySize;
707
708 //
709 // The ArmReplaceLiveTranslationEntry () helper function may be invoked
710 // with the MMU off so we have to ensure that it gets cleaned to the PoC
711 //
712 WriteBackDataCacheRange (ArmReplaceLiveTranslationEntry,
713 ArmReplaceLiveTranslationEntrySize);
714
715 return RETURN_SUCCESS;
716 }