]> git.proxmox.com Git - mirror_edk2.git/blob - ArmPkg/Library/ArmMmuLib/AArch64/ArmMmuLibCore.c
ArmPkg/ArmMmuLib: use correct return type for exported functions
[mirror_edk2.git] / ArmPkg / Library / ArmMmuLib / AArch64 / ArmMmuLibCore.c
1 /** @file
2 * File managing the MMU for ARMv8 architecture
3 *
4 * Copyright (c) 2011-2014, ARM Limited. All rights reserved.
5 * Copyright (c) 2016, Linaro Limited. All rights reserved.
6 * Copyright (c) 2017, Intel Corporation. All rights reserved.<BR>
7 *
8 * This program and the accompanying materials
9 * are licensed and made available under the terms and conditions of the BSD License
10 * which accompanies this distribution. The full text of the license may be found at
11 * http://opensource.org/licenses/bsd-license.php
12 *
13 * THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS,
14 * WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED.
15 *
16 **/
17
18 #include <Uefi.h>
19 #include <Chipset/AArch64.h>
20 #include <Library/BaseMemoryLib.h>
21 #include <Library/CacheMaintenanceLib.h>
22 #include <Library/MemoryAllocationLib.h>
23 #include <Library/ArmLib.h>
24 #include <Library/ArmMmuLib.h>
25 #include <Library/BaseLib.h>
26 #include <Library/DebugLib.h>
27
28 // We use this index definition to define an invalid block entry
29 #define TT_ATTR_INDX_INVALID ((UINT32)~0)
30
31 STATIC
32 UINT64
33 ArmMemoryAttributeToPageAttribute (
34 IN ARM_MEMORY_REGION_ATTRIBUTES Attributes
35 )
36 {
37 switch (Attributes) {
38 case ARM_MEMORY_REGION_ATTRIBUTE_WRITE_BACK:
39 case ARM_MEMORY_REGION_ATTRIBUTE_NONSECURE_WRITE_BACK:
40 return TT_ATTR_INDX_MEMORY_WRITE_BACK | TT_SH_INNER_SHAREABLE;
41
42 case ARM_MEMORY_REGION_ATTRIBUTE_WRITE_THROUGH:
43 case ARM_MEMORY_REGION_ATTRIBUTE_NONSECURE_WRITE_THROUGH:
44 return TT_ATTR_INDX_MEMORY_WRITE_THROUGH | TT_SH_INNER_SHAREABLE;
45
46 // Uncached and device mappings are treated as outer shareable by default,
47 case ARM_MEMORY_REGION_ATTRIBUTE_UNCACHED_UNBUFFERED:
48 case ARM_MEMORY_REGION_ATTRIBUTE_NONSECURE_UNCACHED_UNBUFFERED:
49 return TT_ATTR_INDX_MEMORY_NON_CACHEABLE;
50
51 default:
52 ASSERT(0);
53 case ARM_MEMORY_REGION_ATTRIBUTE_DEVICE:
54 case ARM_MEMORY_REGION_ATTRIBUTE_NONSECURE_DEVICE:
55 if (ArmReadCurrentEL () == AARCH64_EL2)
56 return TT_ATTR_INDX_DEVICE_MEMORY | TT_XN_MASK;
57 else
58 return TT_ATTR_INDX_DEVICE_MEMORY | TT_UXN_MASK | TT_PXN_MASK;
59 }
60 }
61
62 UINT64
63 PageAttributeToGcdAttribute (
64 IN UINT64 PageAttributes
65 )
66 {
67 UINT64 GcdAttributes;
68
69 switch (PageAttributes & TT_ATTR_INDX_MASK) {
70 case TT_ATTR_INDX_DEVICE_MEMORY:
71 GcdAttributes = EFI_MEMORY_UC;
72 break;
73 case TT_ATTR_INDX_MEMORY_NON_CACHEABLE:
74 GcdAttributes = EFI_MEMORY_WC;
75 break;
76 case TT_ATTR_INDX_MEMORY_WRITE_THROUGH:
77 GcdAttributes = EFI_MEMORY_WT;
78 break;
79 case TT_ATTR_INDX_MEMORY_WRITE_BACK:
80 GcdAttributes = EFI_MEMORY_WB;
81 break;
82 default:
83 DEBUG ((EFI_D_ERROR, "PageAttributeToGcdAttribute: PageAttributes:0x%lX not supported.\n", PageAttributes));
84 ASSERT (0);
85 // The Global Coherency Domain (GCD) value is defined as a bit set.
86 // Returning 0 means no attribute has been set.
87 GcdAttributes = 0;
88 }
89
90 // Determine protection attributes
91 if (((PageAttributes & TT_AP_MASK) == TT_AP_NO_RO) || ((PageAttributes & TT_AP_MASK) == TT_AP_RO_RO)) {
92 // Read only cases map to write-protect
93 GcdAttributes |= EFI_MEMORY_RO;
94 }
95
96 // Process eXecute Never attribute
97 if ((PageAttributes & (TT_PXN_MASK | TT_UXN_MASK)) != 0 ) {
98 GcdAttributes |= EFI_MEMORY_XP;
99 }
100
101 return GcdAttributes;
102 }
103
104 #define MIN_T0SZ 16
105 #define BITS_PER_LEVEL 9
106
107 VOID
108 GetRootTranslationTableInfo (
109 IN UINTN T0SZ,
110 OUT UINTN *TableLevel,
111 OUT UINTN *TableEntryCount
112 )
113 {
114 // Get the level of the root table
115 if (TableLevel) {
116 *TableLevel = (T0SZ - MIN_T0SZ) / BITS_PER_LEVEL;
117 }
118
119 if (TableEntryCount) {
120 *TableEntryCount = 1UL << (BITS_PER_LEVEL - (T0SZ - MIN_T0SZ) % BITS_PER_LEVEL);
121 }
122 }
123
124 STATIC
125 VOID
126 ReplaceLiveEntry (
127 IN UINT64 *Entry,
128 IN UINT64 Value
129 )
130 {
131 if (!ArmMmuEnabled ()) {
132 *Entry = Value;
133 } else {
134 ArmReplaceLiveTranslationEntry (Entry, Value);
135 }
136 }
137
138 STATIC
139 VOID
140 LookupAddresstoRootTable (
141 IN UINT64 MaxAddress,
142 OUT UINTN *T0SZ,
143 OUT UINTN *TableEntryCount
144 )
145 {
146 UINTN TopBit;
147
148 // Check the parameters are not NULL
149 ASSERT ((T0SZ != NULL) && (TableEntryCount != NULL));
150
151 // Look for the highest bit set in MaxAddress
152 for (TopBit = 63; TopBit != 0; TopBit--) {
153 if ((1ULL << TopBit) & MaxAddress) {
154 // MaxAddress top bit is found
155 TopBit = TopBit + 1;
156 break;
157 }
158 }
159 ASSERT (TopBit != 0);
160
161 // Calculate T0SZ from the top bit of the MaxAddress
162 *T0SZ = 64 - TopBit;
163
164 // Get the Table info from T0SZ
165 GetRootTranslationTableInfo (*T0SZ, NULL, TableEntryCount);
166 }
167
168 STATIC
169 UINT64*
170 GetBlockEntryListFromAddress (
171 IN UINT64 *RootTable,
172 IN UINT64 RegionStart,
173 OUT UINTN *TableLevel,
174 IN OUT UINT64 *BlockEntrySize,
175 OUT UINT64 **LastBlockEntry
176 )
177 {
178 UINTN RootTableLevel;
179 UINTN RootTableEntryCount;
180 UINT64 *TranslationTable;
181 UINT64 *BlockEntry;
182 UINT64 *SubTableBlockEntry;
183 UINT64 BlockEntryAddress;
184 UINTN BaseAddressAlignment;
185 UINTN PageLevel;
186 UINTN Index;
187 UINTN IndexLevel;
188 UINTN T0SZ;
189 UINT64 Attributes;
190 UINT64 TableAttributes;
191
192 // Initialize variable
193 BlockEntry = NULL;
194
195 // Ensure the parameters are valid
196 if (!(TableLevel && BlockEntrySize && LastBlockEntry)) {
197 ASSERT_EFI_ERROR (EFI_INVALID_PARAMETER);
198 return NULL;
199 }
200
201 // Ensure the Region is aligned on 4KB boundary
202 if ((RegionStart & (SIZE_4KB - 1)) != 0) {
203 ASSERT_EFI_ERROR (EFI_INVALID_PARAMETER);
204 return NULL;
205 }
206
207 // Ensure the required size is aligned on 4KB boundary and not 0
208 if ((*BlockEntrySize & (SIZE_4KB - 1)) != 0 || *BlockEntrySize == 0) {
209 ASSERT_EFI_ERROR (EFI_INVALID_PARAMETER);
210 return NULL;
211 }
212
213 T0SZ = ArmGetTCR () & TCR_T0SZ_MASK;
214 // Get the Table info from T0SZ
215 GetRootTranslationTableInfo (T0SZ, &RootTableLevel, &RootTableEntryCount);
216
217 // If the start address is 0x0 then we use the size of the region to identify the alignment
218 if (RegionStart == 0) {
219 // Identify the highest possible alignment for the Region Size
220 BaseAddressAlignment = LowBitSet64 (*BlockEntrySize);
221 } else {
222 // Identify the highest possible alignment for the Base Address
223 BaseAddressAlignment = LowBitSet64 (RegionStart);
224 }
225
226 // Identify the Page Level the RegionStart must belong to. Note that PageLevel
227 // should be at least 1 since block translations are not supported at level 0
228 PageLevel = MAX (3 - ((BaseAddressAlignment - 12) / 9), 1);
229
230 // If the required size is smaller than the current block size then we need to go to the page below.
231 // The PageLevel was calculated on the Base Address alignment but did not take in account the alignment
232 // of the allocation size
233 while (*BlockEntrySize < TT_BLOCK_ENTRY_SIZE_AT_LEVEL (PageLevel)) {
234 // It does not fit so we need to go a page level above
235 PageLevel++;
236 }
237
238 //
239 // Get the Table Descriptor for the corresponding PageLevel. We need to decompose RegionStart to get appropriate entries
240 //
241
242 TranslationTable = RootTable;
243 for (IndexLevel = RootTableLevel; IndexLevel <= PageLevel; IndexLevel++) {
244 BlockEntry = (UINT64*)TT_GET_ENTRY_FOR_ADDRESS (TranslationTable, IndexLevel, RegionStart);
245
246 if ((IndexLevel != 3) && ((*BlockEntry & TT_TYPE_MASK) == TT_TYPE_TABLE_ENTRY)) {
247 // Go to the next table
248 TranslationTable = (UINT64*)(*BlockEntry & TT_ADDRESS_MASK_DESCRIPTION_TABLE);
249
250 // If we are at the last level then update the last level to next level
251 if (IndexLevel == PageLevel) {
252 // Enter the next level
253 PageLevel++;
254 }
255 } else if ((*BlockEntry & TT_TYPE_MASK) == TT_TYPE_BLOCK_ENTRY) {
256 // If we are not at the last level then we need to split this BlockEntry
257 if (IndexLevel != PageLevel) {
258 // Retrieve the attributes from the block entry
259 Attributes = *BlockEntry & TT_ATTRIBUTES_MASK;
260
261 // Convert the block entry attributes into Table descriptor attributes
262 TableAttributes = TT_TABLE_AP_NO_PERMISSION;
263 if (Attributes & TT_NS) {
264 TableAttributes = TT_TABLE_NS;
265 }
266
267 // Get the address corresponding at this entry
268 BlockEntryAddress = RegionStart;
269 BlockEntryAddress = BlockEntryAddress >> TT_ADDRESS_OFFSET_AT_LEVEL(IndexLevel);
270 // Shift back to right to set zero before the effective address
271 BlockEntryAddress = BlockEntryAddress << TT_ADDRESS_OFFSET_AT_LEVEL(IndexLevel);
272
273 // Set the correct entry type for the next page level
274 if ((IndexLevel + 1) == 3) {
275 Attributes |= TT_TYPE_BLOCK_ENTRY_LEVEL3;
276 } else {
277 Attributes |= TT_TYPE_BLOCK_ENTRY;
278 }
279
280 // Create a new translation table
281 TranslationTable = AllocatePages (1);
282 if (TranslationTable == NULL) {
283 return NULL;
284 }
285
286 // Populate the newly created lower level table
287 SubTableBlockEntry = TranslationTable;
288 for (Index = 0; Index < TT_ENTRY_COUNT; Index++) {
289 *SubTableBlockEntry = Attributes | (BlockEntryAddress + (Index << TT_ADDRESS_OFFSET_AT_LEVEL(IndexLevel + 1)));
290 SubTableBlockEntry++;
291 }
292
293 // Fill the BlockEntry with the new TranslationTable
294 ReplaceLiveEntry (BlockEntry,
295 ((UINTN)TranslationTable & TT_ADDRESS_MASK_DESCRIPTION_TABLE) | TableAttributes | TT_TYPE_TABLE_ENTRY);
296 }
297 } else {
298 if (IndexLevel != PageLevel) {
299 //
300 // Case when we have an Invalid Entry and we are at a page level above of the one targetted.
301 //
302
303 // Create a new translation table
304 TranslationTable = AllocatePages (1);
305 if (TranslationTable == NULL) {
306 return NULL;
307 }
308
309 ZeroMem (TranslationTable, TT_ENTRY_COUNT * sizeof(UINT64));
310
311 // Fill the new BlockEntry with the TranslationTable
312 *BlockEntry = ((UINTN)TranslationTable & TT_ADDRESS_MASK_DESCRIPTION_TABLE) | TT_TYPE_TABLE_ENTRY;
313 }
314 }
315 }
316
317 // Expose the found PageLevel to the caller
318 *TableLevel = PageLevel;
319
320 // Now, we have the Table Level we can get the Block Size associated to this table
321 *BlockEntrySize = TT_BLOCK_ENTRY_SIZE_AT_LEVEL (PageLevel);
322
323 // The last block of the root table depends on the number of entry in this table,
324 // otherwise it is always the (TT_ENTRY_COUNT - 1)th entry in the table.
325 *LastBlockEntry = TT_LAST_BLOCK_ADDRESS(TranslationTable,
326 (PageLevel == RootTableLevel) ? RootTableEntryCount : TT_ENTRY_COUNT);
327
328 return BlockEntry;
329 }
330
331 STATIC
332 EFI_STATUS
333 UpdateRegionMapping (
334 IN UINT64 *RootTable,
335 IN UINT64 RegionStart,
336 IN UINT64 RegionLength,
337 IN UINT64 Attributes,
338 IN UINT64 BlockEntryMask
339 )
340 {
341 UINT32 Type;
342 UINT64 *BlockEntry;
343 UINT64 *LastBlockEntry;
344 UINT64 BlockEntrySize;
345 UINTN TableLevel;
346
347 // Ensure the Length is aligned on 4KB boundary
348 if ((RegionLength == 0) || ((RegionLength & (SIZE_4KB - 1)) != 0)) {
349 ASSERT_EFI_ERROR (EFI_INVALID_PARAMETER);
350 return EFI_INVALID_PARAMETER;
351 }
352
353 do {
354 // Get the first Block Entry that matches the Virtual Address and also the information on the Table Descriptor
355 // such as the the size of the Block Entry and the address of the last BlockEntry of the Table Descriptor
356 BlockEntrySize = RegionLength;
357 BlockEntry = GetBlockEntryListFromAddress (RootTable, RegionStart, &TableLevel, &BlockEntrySize, &LastBlockEntry);
358 if (BlockEntry == NULL) {
359 // GetBlockEntryListFromAddress() return NULL when it fails to allocate new pages from the Translation Tables
360 return EFI_OUT_OF_RESOURCES;
361 }
362
363 if (TableLevel != 3) {
364 Type = TT_TYPE_BLOCK_ENTRY;
365 } else {
366 Type = TT_TYPE_BLOCK_ENTRY_LEVEL3;
367 }
368
369 do {
370 // Fill the Block Entry with attribute and output block address
371 *BlockEntry &= BlockEntryMask;
372 *BlockEntry |= (RegionStart & TT_ADDRESS_MASK_BLOCK_ENTRY) | Attributes | Type;
373
374 // Go to the next BlockEntry
375 RegionStart += BlockEntrySize;
376 RegionLength -= BlockEntrySize;
377 BlockEntry++;
378
379 // Break the inner loop when next block is a table
380 // Rerun GetBlockEntryListFromAddress to avoid page table memory leak
381 if (TableLevel != 3 &&
382 (*BlockEntry & TT_TYPE_MASK) == TT_TYPE_TABLE_ENTRY) {
383 break;
384 }
385 } while ((RegionLength >= BlockEntrySize) && (BlockEntry <= LastBlockEntry));
386 } while (RegionLength != 0);
387
388 return EFI_SUCCESS;
389 }
390
391 STATIC
392 EFI_STATUS
393 FillTranslationTable (
394 IN UINT64 *RootTable,
395 IN ARM_MEMORY_REGION_DESCRIPTOR *MemoryRegion
396 )
397 {
398 return UpdateRegionMapping (
399 RootTable,
400 MemoryRegion->VirtualBase,
401 MemoryRegion->Length,
402 ArmMemoryAttributeToPageAttribute (MemoryRegion->Attributes) | TT_AF,
403 0
404 );
405 }
406
407 STATIC
408 UINT64
409 GcdAttributeToPageAttribute (
410 IN UINT64 GcdAttributes
411 )
412 {
413 UINT64 PageAttributes;
414
415 switch (GcdAttributes & EFI_MEMORY_CACHETYPE_MASK) {
416 case EFI_MEMORY_UC:
417 PageAttributes = TT_ATTR_INDX_DEVICE_MEMORY;
418 break;
419 case EFI_MEMORY_WC:
420 PageAttributes = TT_ATTR_INDX_MEMORY_NON_CACHEABLE;
421 break;
422 case EFI_MEMORY_WT:
423 PageAttributes = TT_ATTR_INDX_MEMORY_WRITE_THROUGH | TT_SH_INNER_SHAREABLE;
424 break;
425 case EFI_MEMORY_WB:
426 PageAttributes = TT_ATTR_INDX_MEMORY_WRITE_BACK | TT_SH_INNER_SHAREABLE;
427 break;
428 default:
429 PageAttributes = TT_ATTR_INDX_MASK;
430 break;
431 }
432
433 if ((GcdAttributes & EFI_MEMORY_XP) != 0 ||
434 (GcdAttributes & EFI_MEMORY_CACHETYPE_MASK) == EFI_MEMORY_UC) {
435 if (ArmReadCurrentEL () == AARCH64_EL2) {
436 PageAttributes |= TT_XN_MASK;
437 } else {
438 PageAttributes |= TT_UXN_MASK | TT_PXN_MASK;
439 }
440 }
441
442 if ((GcdAttributes & EFI_MEMORY_RO) != 0) {
443 PageAttributes |= TT_AP_RO_RO;
444 }
445
446 return PageAttributes | TT_AF;
447 }
448
449 EFI_STATUS
450 SetMemoryAttributes (
451 IN EFI_PHYSICAL_ADDRESS BaseAddress,
452 IN UINT64 Length,
453 IN UINT64 Attributes,
454 IN EFI_PHYSICAL_ADDRESS VirtualMask
455 )
456 {
457 EFI_STATUS Status;
458 UINT64 *TranslationTable;
459 UINT64 PageAttributes;
460 UINT64 PageAttributeMask;
461
462 PageAttributes = GcdAttributeToPageAttribute (Attributes);
463 PageAttributeMask = 0;
464
465 if ((Attributes & EFI_MEMORY_CACHETYPE_MASK) == 0) {
466 //
467 // No memory type was set in Attributes, so we are going to update the
468 // permissions only.
469 //
470 PageAttributes &= TT_AP_MASK | TT_UXN_MASK | TT_PXN_MASK;
471 PageAttributeMask = ~(TT_ADDRESS_MASK_BLOCK_ENTRY | TT_AP_MASK |
472 TT_PXN_MASK | TT_XN_MASK);
473 }
474
475 TranslationTable = ArmGetTTBR0BaseAddress ();
476
477 Status = UpdateRegionMapping (
478 TranslationTable,
479 BaseAddress,
480 Length,
481 PageAttributes,
482 PageAttributeMask);
483 if (EFI_ERROR (Status)) {
484 return Status;
485 }
486
487 // Invalidate all TLB entries so changes are synced
488 ArmInvalidateTlb ();
489
490 return EFI_SUCCESS;
491 }
492
493 STATIC
494 EFI_STATUS
495 SetMemoryRegionAttribute (
496 IN EFI_PHYSICAL_ADDRESS BaseAddress,
497 IN UINT64 Length,
498 IN UINT64 Attributes,
499 IN UINT64 BlockEntryMask
500 )
501 {
502 EFI_STATUS Status;
503 UINT64 *RootTable;
504
505 RootTable = ArmGetTTBR0BaseAddress ();
506
507 Status = UpdateRegionMapping (RootTable, BaseAddress, Length, Attributes, BlockEntryMask);
508 if (EFI_ERROR (Status)) {
509 return Status;
510 }
511
512 // Invalidate all TLB entries so changes are synced
513 ArmInvalidateTlb ();
514
515 return EFI_SUCCESS;
516 }
517
518 EFI_STATUS
519 ArmSetMemoryRegionNoExec (
520 IN EFI_PHYSICAL_ADDRESS BaseAddress,
521 IN UINT64 Length
522 )
523 {
524 UINT64 Val;
525
526 if (ArmReadCurrentEL () == AARCH64_EL1) {
527 Val = TT_PXN_MASK | TT_UXN_MASK;
528 } else {
529 Val = TT_XN_MASK;
530 }
531
532 return SetMemoryRegionAttribute (
533 BaseAddress,
534 Length,
535 Val,
536 ~TT_ADDRESS_MASK_BLOCK_ENTRY);
537 }
538
539 EFI_STATUS
540 ArmClearMemoryRegionNoExec (
541 IN EFI_PHYSICAL_ADDRESS BaseAddress,
542 IN UINT64 Length
543 )
544 {
545 UINT64 Mask;
546
547 // XN maps to UXN in the EL1&0 translation regime
548 Mask = ~(TT_ADDRESS_MASK_BLOCK_ENTRY | TT_PXN_MASK | TT_XN_MASK);
549
550 return SetMemoryRegionAttribute (
551 BaseAddress,
552 Length,
553 0,
554 Mask);
555 }
556
557 EFI_STATUS
558 ArmSetMemoryRegionReadOnly (
559 IN EFI_PHYSICAL_ADDRESS BaseAddress,
560 IN UINT64 Length
561 )
562 {
563 return SetMemoryRegionAttribute (
564 BaseAddress,
565 Length,
566 TT_AP_RO_RO,
567 ~TT_ADDRESS_MASK_BLOCK_ENTRY);
568 }
569
570 EFI_STATUS
571 ArmClearMemoryRegionReadOnly (
572 IN EFI_PHYSICAL_ADDRESS BaseAddress,
573 IN UINT64 Length
574 )
575 {
576 return SetMemoryRegionAttribute (
577 BaseAddress,
578 Length,
579 TT_AP_RW_RW,
580 ~(TT_ADDRESS_MASK_BLOCK_ENTRY | TT_AP_MASK));
581 }
582
583 EFI_STATUS
584 EFIAPI
585 ArmConfigureMmu (
586 IN ARM_MEMORY_REGION_DESCRIPTOR *MemoryTable,
587 OUT VOID **TranslationTableBase OPTIONAL,
588 OUT UINTN *TranslationTableSize OPTIONAL
589 )
590 {
591 VOID* TranslationTable;
592 UINT32 TranslationTableAttribute;
593 UINT64 MaxAddress;
594 UINTN T0SZ;
595 UINTN RootTableEntryCount;
596 UINT64 TCR;
597 EFI_STATUS Status;
598
599 if(MemoryTable == NULL) {
600 ASSERT (MemoryTable != NULL);
601 return EFI_INVALID_PARAMETER;
602 }
603
604 // Cover the entire GCD memory space
605 MaxAddress = (1UL << PcdGet8 (PcdPrePiCpuMemorySize)) - 1;
606
607 // Lookup the Table Level to get the information
608 LookupAddresstoRootTable (MaxAddress, &T0SZ, &RootTableEntryCount);
609
610 //
611 // Set TCR that allows us to retrieve T0SZ in the subsequent functions
612 //
613 // Ideally we will be running at EL2, but should support EL1 as well.
614 // UEFI should not run at EL3.
615 if (ArmReadCurrentEL () == AARCH64_EL2) {
616 //Note: Bits 23 and 31 are reserved(RES1) bits in TCR_EL2
617 TCR = T0SZ | (1UL << 31) | (1UL << 23) | TCR_TG0_4KB;
618
619 // Set the Physical Address Size using MaxAddress
620 if (MaxAddress < SIZE_4GB) {
621 TCR |= TCR_PS_4GB;
622 } else if (MaxAddress < SIZE_64GB) {
623 TCR |= TCR_PS_64GB;
624 } else if (MaxAddress < SIZE_1TB) {
625 TCR |= TCR_PS_1TB;
626 } else if (MaxAddress < SIZE_4TB) {
627 TCR |= TCR_PS_4TB;
628 } else if (MaxAddress < SIZE_16TB) {
629 TCR |= TCR_PS_16TB;
630 } else if (MaxAddress < SIZE_256TB) {
631 TCR |= TCR_PS_256TB;
632 } else {
633 DEBUG ((EFI_D_ERROR, "ArmConfigureMmu: The MaxAddress 0x%lX is not supported by this MMU configuration.\n", MaxAddress));
634 ASSERT (0); // Bigger than 48-bit memory space are not supported
635 return EFI_UNSUPPORTED;
636 }
637 } else if (ArmReadCurrentEL () == AARCH64_EL1) {
638 // Due to Cortex-A57 erratum #822227 we must set TG1[1] == 1, regardless of EPD1.
639 TCR = T0SZ | TCR_TG0_4KB | TCR_TG1_4KB | TCR_EPD1;
640
641 // Set the Physical Address Size using MaxAddress
642 if (MaxAddress < SIZE_4GB) {
643 TCR |= TCR_IPS_4GB;
644 } else if (MaxAddress < SIZE_64GB) {
645 TCR |= TCR_IPS_64GB;
646 } else if (MaxAddress < SIZE_1TB) {
647 TCR |= TCR_IPS_1TB;
648 } else if (MaxAddress < SIZE_4TB) {
649 TCR |= TCR_IPS_4TB;
650 } else if (MaxAddress < SIZE_16TB) {
651 TCR |= TCR_IPS_16TB;
652 } else if (MaxAddress < SIZE_256TB) {
653 TCR |= TCR_IPS_256TB;
654 } else {
655 DEBUG ((EFI_D_ERROR, "ArmConfigureMmu: The MaxAddress 0x%lX is not supported by this MMU configuration.\n", MaxAddress));
656 ASSERT (0); // Bigger than 48-bit memory space are not supported
657 return EFI_UNSUPPORTED;
658 }
659 } else {
660 ASSERT (0); // UEFI is only expected to run at EL2 and EL1, not EL3.
661 return EFI_UNSUPPORTED;
662 }
663
664 //
665 // Translation table walks are always cache coherent on ARMv8-A, so cache
666 // maintenance on page tables is never needed. Since there is a risk of
667 // loss of coherency when using mismatched attributes, and given that memory
668 // is mapped cacheable except for extraordinary cases (such as non-coherent
669 // DMA), have the page table walker perform cached accesses as well, and
670 // assert below that that matches the attributes we use for CPU accesses to
671 // the region.
672 //
673 TCR |= TCR_SH_INNER_SHAREABLE |
674 TCR_RGN_OUTER_WRITE_BACK_ALLOC |
675 TCR_RGN_INNER_WRITE_BACK_ALLOC;
676
677 // Set TCR
678 ArmSetTCR (TCR);
679
680 // Allocate pages for translation table
681 TranslationTable = AllocatePages (1);
682 if (TranslationTable == NULL) {
683 return EFI_OUT_OF_RESOURCES;
684 }
685 // We set TTBR0 just after allocating the table to retrieve its location from the subsequent
686 // functions without needing to pass this value across the functions. The MMU is only enabled
687 // after the translation tables are populated.
688 ArmSetTTBR0 (TranslationTable);
689
690 if (TranslationTableBase != NULL) {
691 *TranslationTableBase = TranslationTable;
692 }
693
694 if (TranslationTableSize != NULL) {
695 *TranslationTableSize = RootTableEntryCount * sizeof(UINT64);
696 }
697
698 ZeroMem (TranslationTable, RootTableEntryCount * sizeof(UINT64));
699
700 // Disable MMU and caches. ArmDisableMmu() also invalidates the TLBs
701 ArmDisableMmu ();
702 ArmDisableDataCache ();
703 ArmDisableInstructionCache ();
704
705 // Make sure nothing sneaked into the cache
706 ArmCleanInvalidateDataCache ();
707 ArmInvalidateInstructionCache ();
708
709 TranslationTableAttribute = TT_ATTR_INDX_INVALID;
710 while (MemoryTable->Length != 0) {
711
712 DEBUG_CODE_BEGIN ();
713 // Find the memory attribute for the Translation Table
714 if ((UINTN)TranslationTable >= MemoryTable->PhysicalBase &&
715 (UINTN)TranslationTable + EFI_PAGE_SIZE <= MemoryTable->PhysicalBase +
716 MemoryTable->Length) {
717 TranslationTableAttribute = MemoryTable->Attributes;
718 }
719 DEBUG_CODE_END ();
720
721 Status = FillTranslationTable (TranslationTable, MemoryTable);
722 if (EFI_ERROR (Status)) {
723 goto FREE_TRANSLATION_TABLE;
724 }
725 MemoryTable++;
726 }
727
728 ASSERT (TranslationTableAttribute == ARM_MEMORY_REGION_ATTRIBUTE_WRITE_BACK ||
729 TranslationTableAttribute == ARM_MEMORY_REGION_ATTRIBUTE_NONSECURE_WRITE_BACK);
730
731 ArmSetMAIR (MAIR_ATTR(TT_ATTR_INDX_DEVICE_MEMORY, MAIR_ATTR_DEVICE_MEMORY) | // mapped to EFI_MEMORY_UC
732 MAIR_ATTR(TT_ATTR_INDX_MEMORY_NON_CACHEABLE, MAIR_ATTR_NORMAL_MEMORY_NON_CACHEABLE) | // mapped to EFI_MEMORY_WC
733 MAIR_ATTR(TT_ATTR_INDX_MEMORY_WRITE_THROUGH, MAIR_ATTR_NORMAL_MEMORY_WRITE_THROUGH) | // mapped to EFI_MEMORY_WT
734 MAIR_ATTR(TT_ATTR_INDX_MEMORY_WRITE_BACK, MAIR_ATTR_NORMAL_MEMORY_WRITE_BACK)); // mapped to EFI_MEMORY_WB
735
736 ArmDisableAlignmentCheck ();
737 ArmEnableStackAlignmentCheck ();
738 ArmEnableInstructionCache ();
739 ArmEnableDataCache ();
740
741 ArmEnableMmu ();
742 return EFI_SUCCESS;
743
744 FREE_TRANSLATION_TABLE:
745 FreePages (TranslationTable, 1);
746 return Status;
747 }
748
749 RETURN_STATUS
750 EFIAPI
751 ArmMmuBaseLibConstructor (
752 VOID
753 )
754 {
755 extern UINT32 ArmReplaceLiveTranslationEntrySize;
756
757 //
758 // The ArmReplaceLiveTranslationEntry () helper function may be invoked
759 // with the MMU off so we have to ensure that it gets cleaned to the PoC
760 //
761 WriteBackDataCacheRange (ArmReplaceLiveTranslationEntry,
762 ArmReplaceLiveTranslationEntrySize);
763
764 return RETURN_SUCCESS;
765 }