]> git.proxmox.com Git - mirror_edk2.git/blob - ArmPkg/Library/ArmMmuLib/AArch64/ArmMmuLibCore.c
ArmPkg/ArmMmuLib: remove VirtualMask arg from ArmSetMemoryAttributes
[mirror_edk2.git] / ArmPkg / Library / ArmMmuLib / AArch64 / ArmMmuLibCore.c
1 /** @file
2 * File managing the MMU for ARMv8 architecture
3 *
4 * Copyright (c) 2011-2014, ARM Limited. All rights reserved.
5 * Copyright (c) 2016, Linaro Limited. All rights reserved.
6 * Copyright (c) 2017, Intel Corporation. All rights reserved.<BR>
7 *
8 * This program and the accompanying materials
9 * are licensed and made available under the terms and conditions of the BSD License
10 * which accompanies this distribution. The full text of the license may be found at
11 * http://opensource.org/licenses/bsd-license.php
12 *
13 * THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS,
14 * WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED.
15 *
16 **/
17
18 #include <Uefi.h>
19 #include <Chipset/AArch64.h>
20 #include <Library/BaseMemoryLib.h>
21 #include <Library/CacheMaintenanceLib.h>
22 #include <Library/MemoryAllocationLib.h>
23 #include <Library/ArmLib.h>
24 #include <Library/ArmMmuLib.h>
25 #include <Library/BaseLib.h>
26 #include <Library/DebugLib.h>
27
28 // We use this index definition to define an invalid block entry
29 #define TT_ATTR_INDX_INVALID ((UINT32)~0)
30
31 STATIC
32 UINT64
33 ArmMemoryAttributeToPageAttribute (
34 IN ARM_MEMORY_REGION_ATTRIBUTES Attributes
35 )
36 {
37 switch (Attributes) {
38 case ARM_MEMORY_REGION_ATTRIBUTE_WRITE_BACK:
39 case ARM_MEMORY_REGION_ATTRIBUTE_NONSECURE_WRITE_BACK:
40 return TT_ATTR_INDX_MEMORY_WRITE_BACK | TT_SH_INNER_SHAREABLE;
41
42 case ARM_MEMORY_REGION_ATTRIBUTE_WRITE_THROUGH:
43 case ARM_MEMORY_REGION_ATTRIBUTE_NONSECURE_WRITE_THROUGH:
44 return TT_ATTR_INDX_MEMORY_WRITE_THROUGH | TT_SH_INNER_SHAREABLE;
45
46 // Uncached and device mappings are treated as outer shareable by default,
47 case ARM_MEMORY_REGION_ATTRIBUTE_UNCACHED_UNBUFFERED:
48 case ARM_MEMORY_REGION_ATTRIBUTE_NONSECURE_UNCACHED_UNBUFFERED:
49 return TT_ATTR_INDX_MEMORY_NON_CACHEABLE;
50
51 default:
52 ASSERT(0);
53 case ARM_MEMORY_REGION_ATTRIBUTE_DEVICE:
54 case ARM_MEMORY_REGION_ATTRIBUTE_NONSECURE_DEVICE:
55 if (ArmReadCurrentEL () == AARCH64_EL2)
56 return TT_ATTR_INDX_DEVICE_MEMORY | TT_XN_MASK;
57 else
58 return TT_ATTR_INDX_DEVICE_MEMORY | TT_UXN_MASK | TT_PXN_MASK;
59 }
60 }
61
62 UINT64
63 PageAttributeToGcdAttribute (
64 IN UINT64 PageAttributes
65 )
66 {
67 UINT64 GcdAttributes;
68
69 switch (PageAttributes & TT_ATTR_INDX_MASK) {
70 case TT_ATTR_INDX_DEVICE_MEMORY:
71 GcdAttributes = EFI_MEMORY_UC;
72 break;
73 case TT_ATTR_INDX_MEMORY_NON_CACHEABLE:
74 GcdAttributes = EFI_MEMORY_WC;
75 break;
76 case TT_ATTR_INDX_MEMORY_WRITE_THROUGH:
77 GcdAttributes = EFI_MEMORY_WT;
78 break;
79 case TT_ATTR_INDX_MEMORY_WRITE_BACK:
80 GcdAttributes = EFI_MEMORY_WB;
81 break;
82 default:
83 DEBUG ((EFI_D_ERROR, "PageAttributeToGcdAttribute: PageAttributes:0x%lX not supported.\n", PageAttributes));
84 ASSERT (0);
85 // The Global Coherency Domain (GCD) value is defined as a bit set.
86 // Returning 0 means no attribute has been set.
87 GcdAttributes = 0;
88 }
89
90 // Determine protection attributes
91 if (((PageAttributes & TT_AP_MASK) == TT_AP_NO_RO) || ((PageAttributes & TT_AP_MASK) == TT_AP_RO_RO)) {
92 // Read only cases map to write-protect
93 GcdAttributes |= EFI_MEMORY_RO;
94 }
95
96 // Process eXecute Never attribute
97 if ((PageAttributes & (TT_PXN_MASK | TT_UXN_MASK)) != 0 ) {
98 GcdAttributes |= EFI_MEMORY_XP;
99 }
100
101 return GcdAttributes;
102 }
103
104 #define MIN_T0SZ 16
105 #define BITS_PER_LEVEL 9
106
107 VOID
108 GetRootTranslationTableInfo (
109 IN UINTN T0SZ,
110 OUT UINTN *TableLevel,
111 OUT UINTN *TableEntryCount
112 )
113 {
114 // Get the level of the root table
115 if (TableLevel) {
116 *TableLevel = (T0SZ - MIN_T0SZ) / BITS_PER_LEVEL;
117 }
118
119 if (TableEntryCount) {
120 *TableEntryCount = 1UL << (BITS_PER_LEVEL - (T0SZ - MIN_T0SZ) % BITS_PER_LEVEL);
121 }
122 }
123
124 STATIC
125 VOID
126 ReplaceLiveEntry (
127 IN UINT64 *Entry,
128 IN UINT64 Value
129 )
130 {
131 if (!ArmMmuEnabled ()) {
132 *Entry = Value;
133 } else {
134 ArmReplaceLiveTranslationEntry (Entry, Value);
135 }
136 }
137
138 STATIC
139 VOID
140 LookupAddresstoRootTable (
141 IN UINT64 MaxAddress,
142 OUT UINTN *T0SZ,
143 OUT UINTN *TableEntryCount
144 )
145 {
146 UINTN TopBit;
147
148 // Check the parameters are not NULL
149 ASSERT ((T0SZ != NULL) && (TableEntryCount != NULL));
150
151 // Look for the highest bit set in MaxAddress
152 for (TopBit = 63; TopBit != 0; TopBit--) {
153 if ((1ULL << TopBit) & MaxAddress) {
154 // MaxAddress top bit is found
155 TopBit = TopBit + 1;
156 break;
157 }
158 }
159 ASSERT (TopBit != 0);
160
161 // Calculate T0SZ from the top bit of the MaxAddress
162 *T0SZ = 64 - TopBit;
163
164 // Get the Table info from T0SZ
165 GetRootTranslationTableInfo (*T0SZ, NULL, TableEntryCount);
166 }
167
168 STATIC
169 UINT64*
170 GetBlockEntryListFromAddress (
171 IN UINT64 *RootTable,
172 IN UINT64 RegionStart,
173 OUT UINTN *TableLevel,
174 IN OUT UINT64 *BlockEntrySize,
175 OUT UINT64 **LastBlockEntry
176 )
177 {
178 UINTN RootTableLevel;
179 UINTN RootTableEntryCount;
180 UINT64 *TranslationTable;
181 UINT64 *BlockEntry;
182 UINT64 *SubTableBlockEntry;
183 UINT64 BlockEntryAddress;
184 UINTN BaseAddressAlignment;
185 UINTN PageLevel;
186 UINTN Index;
187 UINTN IndexLevel;
188 UINTN T0SZ;
189 UINT64 Attributes;
190 UINT64 TableAttributes;
191
192 // Initialize variable
193 BlockEntry = NULL;
194
195 // Ensure the parameters are valid
196 if (!(TableLevel && BlockEntrySize && LastBlockEntry)) {
197 ASSERT_EFI_ERROR (EFI_INVALID_PARAMETER);
198 return NULL;
199 }
200
201 // Ensure the Region is aligned on 4KB boundary
202 if ((RegionStart & (SIZE_4KB - 1)) != 0) {
203 ASSERT_EFI_ERROR (EFI_INVALID_PARAMETER);
204 return NULL;
205 }
206
207 // Ensure the required size is aligned on 4KB boundary and not 0
208 if ((*BlockEntrySize & (SIZE_4KB - 1)) != 0 || *BlockEntrySize == 0) {
209 ASSERT_EFI_ERROR (EFI_INVALID_PARAMETER);
210 return NULL;
211 }
212
213 T0SZ = ArmGetTCR () & TCR_T0SZ_MASK;
214 // Get the Table info from T0SZ
215 GetRootTranslationTableInfo (T0SZ, &RootTableLevel, &RootTableEntryCount);
216
217 // If the start address is 0x0 then we use the size of the region to identify the alignment
218 if (RegionStart == 0) {
219 // Identify the highest possible alignment for the Region Size
220 BaseAddressAlignment = LowBitSet64 (*BlockEntrySize);
221 } else {
222 // Identify the highest possible alignment for the Base Address
223 BaseAddressAlignment = LowBitSet64 (RegionStart);
224 }
225
226 // Identify the Page Level the RegionStart must belong to. Note that PageLevel
227 // should be at least 1 since block translations are not supported at level 0
228 PageLevel = MAX (3 - ((BaseAddressAlignment - 12) / 9), 1);
229
230 // If the required size is smaller than the current block size then we need to go to the page below.
231 // The PageLevel was calculated on the Base Address alignment but did not take in account the alignment
232 // of the allocation size
233 while (*BlockEntrySize < TT_BLOCK_ENTRY_SIZE_AT_LEVEL (PageLevel)) {
234 // It does not fit so we need to go a page level above
235 PageLevel++;
236 }
237
238 //
239 // Get the Table Descriptor for the corresponding PageLevel. We need to decompose RegionStart to get appropriate entries
240 //
241
242 TranslationTable = RootTable;
243 for (IndexLevel = RootTableLevel; IndexLevel <= PageLevel; IndexLevel++) {
244 BlockEntry = (UINT64*)TT_GET_ENTRY_FOR_ADDRESS (TranslationTable, IndexLevel, RegionStart);
245
246 if ((IndexLevel != 3) && ((*BlockEntry & TT_TYPE_MASK) == TT_TYPE_TABLE_ENTRY)) {
247 // Go to the next table
248 TranslationTable = (UINT64*)(*BlockEntry & TT_ADDRESS_MASK_DESCRIPTION_TABLE);
249
250 // If we are at the last level then update the last level to next level
251 if (IndexLevel == PageLevel) {
252 // Enter the next level
253 PageLevel++;
254 }
255 } else if ((*BlockEntry & TT_TYPE_MASK) == TT_TYPE_BLOCK_ENTRY) {
256 // If we are not at the last level then we need to split this BlockEntry
257 if (IndexLevel != PageLevel) {
258 // Retrieve the attributes from the block entry
259 Attributes = *BlockEntry & TT_ATTRIBUTES_MASK;
260
261 // Convert the block entry attributes into Table descriptor attributes
262 TableAttributes = TT_TABLE_AP_NO_PERMISSION;
263 if (Attributes & TT_NS) {
264 TableAttributes = TT_TABLE_NS;
265 }
266
267 // Get the address corresponding at this entry
268 BlockEntryAddress = RegionStart;
269 BlockEntryAddress = BlockEntryAddress >> TT_ADDRESS_OFFSET_AT_LEVEL(IndexLevel);
270 // Shift back to right to set zero before the effective address
271 BlockEntryAddress = BlockEntryAddress << TT_ADDRESS_OFFSET_AT_LEVEL(IndexLevel);
272
273 // Set the correct entry type for the next page level
274 if ((IndexLevel + 1) == 3) {
275 Attributes |= TT_TYPE_BLOCK_ENTRY_LEVEL3;
276 } else {
277 Attributes |= TT_TYPE_BLOCK_ENTRY;
278 }
279
280 // Create a new translation table
281 TranslationTable = AllocatePages (1);
282 if (TranslationTable == NULL) {
283 return NULL;
284 }
285
286 // Populate the newly created lower level table
287 SubTableBlockEntry = TranslationTable;
288 for (Index = 0; Index < TT_ENTRY_COUNT; Index++) {
289 *SubTableBlockEntry = Attributes | (BlockEntryAddress + (Index << TT_ADDRESS_OFFSET_AT_LEVEL(IndexLevel + 1)));
290 SubTableBlockEntry++;
291 }
292
293 // Fill the BlockEntry with the new TranslationTable
294 ReplaceLiveEntry (BlockEntry,
295 ((UINTN)TranslationTable & TT_ADDRESS_MASK_DESCRIPTION_TABLE) | TableAttributes | TT_TYPE_TABLE_ENTRY);
296 }
297 } else {
298 if (IndexLevel != PageLevel) {
299 //
300 // Case when we have an Invalid Entry and we are at a page level above of the one targetted.
301 //
302
303 // Create a new translation table
304 TranslationTable = AllocatePages (1);
305 if (TranslationTable == NULL) {
306 return NULL;
307 }
308
309 ZeroMem (TranslationTable, TT_ENTRY_COUNT * sizeof(UINT64));
310
311 // Fill the new BlockEntry with the TranslationTable
312 *BlockEntry = ((UINTN)TranslationTable & TT_ADDRESS_MASK_DESCRIPTION_TABLE) | TT_TYPE_TABLE_ENTRY;
313 }
314 }
315 }
316
317 // Expose the found PageLevel to the caller
318 *TableLevel = PageLevel;
319
320 // Now, we have the Table Level we can get the Block Size associated to this table
321 *BlockEntrySize = TT_BLOCK_ENTRY_SIZE_AT_LEVEL (PageLevel);
322
323 // The last block of the root table depends on the number of entry in this table,
324 // otherwise it is always the (TT_ENTRY_COUNT - 1)th entry in the table.
325 *LastBlockEntry = TT_LAST_BLOCK_ADDRESS(TranslationTable,
326 (PageLevel == RootTableLevel) ? RootTableEntryCount : TT_ENTRY_COUNT);
327
328 return BlockEntry;
329 }
330
331 STATIC
332 EFI_STATUS
333 UpdateRegionMapping (
334 IN UINT64 *RootTable,
335 IN UINT64 RegionStart,
336 IN UINT64 RegionLength,
337 IN UINT64 Attributes,
338 IN UINT64 BlockEntryMask
339 )
340 {
341 UINT32 Type;
342 UINT64 *BlockEntry;
343 UINT64 *LastBlockEntry;
344 UINT64 BlockEntrySize;
345 UINTN TableLevel;
346
347 // Ensure the Length is aligned on 4KB boundary
348 if ((RegionLength == 0) || ((RegionLength & (SIZE_4KB - 1)) != 0)) {
349 ASSERT_EFI_ERROR (EFI_INVALID_PARAMETER);
350 return EFI_INVALID_PARAMETER;
351 }
352
353 do {
354 // Get the first Block Entry that matches the Virtual Address and also the information on the Table Descriptor
355 // such as the the size of the Block Entry and the address of the last BlockEntry of the Table Descriptor
356 BlockEntrySize = RegionLength;
357 BlockEntry = GetBlockEntryListFromAddress (RootTable, RegionStart, &TableLevel, &BlockEntrySize, &LastBlockEntry);
358 if (BlockEntry == NULL) {
359 // GetBlockEntryListFromAddress() return NULL when it fails to allocate new pages from the Translation Tables
360 return EFI_OUT_OF_RESOURCES;
361 }
362
363 if (TableLevel != 3) {
364 Type = TT_TYPE_BLOCK_ENTRY;
365 } else {
366 Type = TT_TYPE_BLOCK_ENTRY_LEVEL3;
367 }
368
369 do {
370 // Fill the Block Entry with attribute and output block address
371 *BlockEntry &= BlockEntryMask;
372 *BlockEntry |= (RegionStart & TT_ADDRESS_MASK_BLOCK_ENTRY) | Attributes | Type;
373
374 // Go to the next BlockEntry
375 RegionStart += BlockEntrySize;
376 RegionLength -= BlockEntrySize;
377 BlockEntry++;
378
379 // Break the inner loop when next block is a table
380 // Rerun GetBlockEntryListFromAddress to avoid page table memory leak
381 if (TableLevel != 3 &&
382 (*BlockEntry & TT_TYPE_MASK) == TT_TYPE_TABLE_ENTRY) {
383 break;
384 }
385 } while ((RegionLength >= BlockEntrySize) && (BlockEntry <= LastBlockEntry));
386 } while (RegionLength != 0);
387
388 return EFI_SUCCESS;
389 }
390
391 STATIC
392 EFI_STATUS
393 FillTranslationTable (
394 IN UINT64 *RootTable,
395 IN ARM_MEMORY_REGION_DESCRIPTOR *MemoryRegion
396 )
397 {
398 return UpdateRegionMapping (
399 RootTable,
400 MemoryRegion->VirtualBase,
401 MemoryRegion->Length,
402 ArmMemoryAttributeToPageAttribute (MemoryRegion->Attributes) | TT_AF,
403 0
404 );
405 }
406
407 STATIC
408 UINT64
409 GcdAttributeToPageAttribute (
410 IN UINT64 GcdAttributes
411 )
412 {
413 UINT64 PageAttributes;
414
415 switch (GcdAttributes & EFI_MEMORY_CACHETYPE_MASK) {
416 case EFI_MEMORY_UC:
417 PageAttributes = TT_ATTR_INDX_DEVICE_MEMORY;
418 break;
419 case EFI_MEMORY_WC:
420 PageAttributes = TT_ATTR_INDX_MEMORY_NON_CACHEABLE;
421 break;
422 case EFI_MEMORY_WT:
423 PageAttributes = TT_ATTR_INDX_MEMORY_WRITE_THROUGH | TT_SH_INNER_SHAREABLE;
424 break;
425 case EFI_MEMORY_WB:
426 PageAttributes = TT_ATTR_INDX_MEMORY_WRITE_BACK | TT_SH_INNER_SHAREABLE;
427 break;
428 default:
429 PageAttributes = TT_ATTR_INDX_MASK;
430 break;
431 }
432
433 if ((GcdAttributes & EFI_MEMORY_XP) != 0 ||
434 (GcdAttributes & EFI_MEMORY_CACHETYPE_MASK) == EFI_MEMORY_UC) {
435 if (ArmReadCurrentEL () == AARCH64_EL2) {
436 PageAttributes |= TT_XN_MASK;
437 } else {
438 PageAttributes |= TT_UXN_MASK | TT_PXN_MASK;
439 }
440 }
441
442 if ((GcdAttributes & EFI_MEMORY_RO) != 0) {
443 PageAttributes |= TT_AP_RO_RO;
444 }
445
446 return PageAttributes | TT_AF;
447 }
448
449 EFI_STATUS
450 ArmSetMemoryAttributes (
451 IN EFI_PHYSICAL_ADDRESS BaseAddress,
452 IN UINT64 Length,
453 IN UINT64 Attributes
454 )
455 {
456 EFI_STATUS Status;
457 UINT64 *TranslationTable;
458 UINT64 PageAttributes;
459 UINT64 PageAttributeMask;
460
461 PageAttributes = GcdAttributeToPageAttribute (Attributes);
462 PageAttributeMask = 0;
463
464 if ((Attributes & EFI_MEMORY_CACHETYPE_MASK) == 0) {
465 //
466 // No memory type was set in Attributes, so we are going to update the
467 // permissions only.
468 //
469 PageAttributes &= TT_AP_MASK | TT_UXN_MASK | TT_PXN_MASK;
470 PageAttributeMask = ~(TT_ADDRESS_MASK_BLOCK_ENTRY | TT_AP_MASK |
471 TT_PXN_MASK | TT_XN_MASK);
472 }
473
474 TranslationTable = ArmGetTTBR0BaseAddress ();
475
476 Status = UpdateRegionMapping (
477 TranslationTable,
478 BaseAddress,
479 Length,
480 PageAttributes,
481 PageAttributeMask);
482 if (EFI_ERROR (Status)) {
483 return Status;
484 }
485
486 // Invalidate all TLB entries so changes are synced
487 ArmInvalidateTlb ();
488
489 return EFI_SUCCESS;
490 }
491
492 STATIC
493 EFI_STATUS
494 SetMemoryRegionAttribute (
495 IN EFI_PHYSICAL_ADDRESS BaseAddress,
496 IN UINT64 Length,
497 IN UINT64 Attributes,
498 IN UINT64 BlockEntryMask
499 )
500 {
501 EFI_STATUS Status;
502 UINT64 *RootTable;
503
504 RootTable = ArmGetTTBR0BaseAddress ();
505
506 Status = UpdateRegionMapping (RootTable, BaseAddress, Length, Attributes, BlockEntryMask);
507 if (EFI_ERROR (Status)) {
508 return Status;
509 }
510
511 // Invalidate all TLB entries so changes are synced
512 ArmInvalidateTlb ();
513
514 return EFI_SUCCESS;
515 }
516
517 EFI_STATUS
518 ArmSetMemoryRegionNoExec (
519 IN EFI_PHYSICAL_ADDRESS BaseAddress,
520 IN UINT64 Length
521 )
522 {
523 UINT64 Val;
524
525 if (ArmReadCurrentEL () == AARCH64_EL1) {
526 Val = TT_PXN_MASK | TT_UXN_MASK;
527 } else {
528 Val = TT_XN_MASK;
529 }
530
531 return SetMemoryRegionAttribute (
532 BaseAddress,
533 Length,
534 Val,
535 ~TT_ADDRESS_MASK_BLOCK_ENTRY);
536 }
537
538 EFI_STATUS
539 ArmClearMemoryRegionNoExec (
540 IN EFI_PHYSICAL_ADDRESS BaseAddress,
541 IN UINT64 Length
542 )
543 {
544 UINT64 Mask;
545
546 // XN maps to UXN in the EL1&0 translation regime
547 Mask = ~(TT_ADDRESS_MASK_BLOCK_ENTRY | TT_PXN_MASK | TT_XN_MASK);
548
549 return SetMemoryRegionAttribute (
550 BaseAddress,
551 Length,
552 0,
553 Mask);
554 }
555
556 EFI_STATUS
557 ArmSetMemoryRegionReadOnly (
558 IN EFI_PHYSICAL_ADDRESS BaseAddress,
559 IN UINT64 Length
560 )
561 {
562 return SetMemoryRegionAttribute (
563 BaseAddress,
564 Length,
565 TT_AP_RO_RO,
566 ~TT_ADDRESS_MASK_BLOCK_ENTRY);
567 }
568
569 EFI_STATUS
570 ArmClearMemoryRegionReadOnly (
571 IN EFI_PHYSICAL_ADDRESS BaseAddress,
572 IN UINT64 Length
573 )
574 {
575 return SetMemoryRegionAttribute (
576 BaseAddress,
577 Length,
578 TT_AP_RW_RW,
579 ~(TT_ADDRESS_MASK_BLOCK_ENTRY | TT_AP_MASK));
580 }
581
582 EFI_STATUS
583 EFIAPI
584 ArmConfigureMmu (
585 IN ARM_MEMORY_REGION_DESCRIPTOR *MemoryTable,
586 OUT VOID **TranslationTableBase OPTIONAL,
587 OUT UINTN *TranslationTableSize OPTIONAL
588 )
589 {
590 VOID* TranslationTable;
591 UINT32 TranslationTableAttribute;
592 UINT64 MaxAddress;
593 UINTN T0SZ;
594 UINTN RootTableEntryCount;
595 UINT64 TCR;
596 EFI_STATUS Status;
597
598 if(MemoryTable == NULL) {
599 ASSERT (MemoryTable != NULL);
600 return EFI_INVALID_PARAMETER;
601 }
602
603 // Cover the entire GCD memory space
604 MaxAddress = (1UL << PcdGet8 (PcdPrePiCpuMemorySize)) - 1;
605
606 // Lookup the Table Level to get the information
607 LookupAddresstoRootTable (MaxAddress, &T0SZ, &RootTableEntryCount);
608
609 //
610 // Set TCR that allows us to retrieve T0SZ in the subsequent functions
611 //
612 // Ideally we will be running at EL2, but should support EL1 as well.
613 // UEFI should not run at EL3.
614 if (ArmReadCurrentEL () == AARCH64_EL2) {
615 //Note: Bits 23 and 31 are reserved(RES1) bits in TCR_EL2
616 TCR = T0SZ | (1UL << 31) | (1UL << 23) | TCR_TG0_4KB;
617
618 // Set the Physical Address Size using MaxAddress
619 if (MaxAddress < SIZE_4GB) {
620 TCR |= TCR_PS_4GB;
621 } else if (MaxAddress < SIZE_64GB) {
622 TCR |= TCR_PS_64GB;
623 } else if (MaxAddress < SIZE_1TB) {
624 TCR |= TCR_PS_1TB;
625 } else if (MaxAddress < SIZE_4TB) {
626 TCR |= TCR_PS_4TB;
627 } else if (MaxAddress < SIZE_16TB) {
628 TCR |= TCR_PS_16TB;
629 } else if (MaxAddress < SIZE_256TB) {
630 TCR |= TCR_PS_256TB;
631 } else {
632 DEBUG ((EFI_D_ERROR, "ArmConfigureMmu: The MaxAddress 0x%lX is not supported by this MMU configuration.\n", MaxAddress));
633 ASSERT (0); // Bigger than 48-bit memory space are not supported
634 return EFI_UNSUPPORTED;
635 }
636 } else if (ArmReadCurrentEL () == AARCH64_EL1) {
637 // Due to Cortex-A57 erratum #822227 we must set TG1[1] == 1, regardless of EPD1.
638 TCR = T0SZ | TCR_TG0_4KB | TCR_TG1_4KB | TCR_EPD1;
639
640 // Set the Physical Address Size using MaxAddress
641 if (MaxAddress < SIZE_4GB) {
642 TCR |= TCR_IPS_4GB;
643 } else if (MaxAddress < SIZE_64GB) {
644 TCR |= TCR_IPS_64GB;
645 } else if (MaxAddress < SIZE_1TB) {
646 TCR |= TCR_IPS_1TB;
647 } else if (MaxAddress < SIZE_4TB) {
648 TCR |= TCR_IPS_4TB;
649 } else if (MaxAddress < SIZE_16TB) {
650 TCR |= TCR_IPS_16TB;
651 } else if (MaxAddress < SIZE_256TB) {
652 TCR |= TCR_IPS_256TB;
653 } else {
654 DEBUG ((EFI_D_ERROR, "ArmConfigureMmu: The MaxAddress 0x%lX is not supported by this MMU configuration.\n", MaxAddress));
655 ASSERT (0); // Bigger than 48-bit memory space are not supported
656 return EFI_UNSUPPORTED;
657 }
658 } else {
659 ASSERT (0); // UEFI is only expected to run at EL2 and EL1, not EL3.
660 return EFI_UNSUPPORTED;
661 }
662
663 //
664 // Translation table walks are always cache coherent on ARMv8-A, so cache
665 // maintenance on page tables is never needed. Since there is a risk of
666 // loss of coherency when using mismatched attributes, and given that memory
667 // is mapped cacheable except for extraordinary cases (such as non-coherent
668 // DMA), have the page table walker perform cached accesses as well, and
669 // assert below that that matches the attributes we use for CPU accesses to
670 // the region.
671 //
672 TCR |= TCR_SH_INNER_SHAREABLE |
673 TCR_RGN_OUTER_WRITE_BACK_ALLOC |
674 TCR_RGN_INNER_WRITE_BACK_ALLOC;
675
676 // Set TCR
677 ArmSetTCR (TCR);
678
679 // Allocate pages for translation table
680 TranslationTable = AllocatePages (1);
681 if (TranslationTable == NULL) {
682 return EFI_OUT_OF_RESOURCES;
683 }
684 // We set TTBR0 just after allocating the table to retrieve its location from the subsequent
685 // functions without needing to pass this value across the functions. The MMU is only enabled
686 // after the translation tables are populated.
687 ArmSetTTBR0 (TranslationTable);
688
689 if (TranslationTableBase != NULL) {
690 *TranslationTableBase = TranslationTable;
691 }
692
693 if (TranslationTableSize != NULL) {
694 *TranslationTableSize = RootTableEntryCount * sizeof(UINT64);
695 }
696
697 ZeroMem (TranslationTable, RootTableEntryCount * sizeof(UINT64));
698
699 // Disable MMU and caches. ArmDisableMmu() also invalidates the TLBs
700 ArmDisableMmu ();
701 ArmDisableDataCache ();
702 ArmDisableInstructionCache ();
703
704 // Make sure nothing sneaked into the cache
705 ArmCleanInvalidateDataCache ();
706 ArmInvalidateInstructionCache ();
707
708 TranslationTableAttribute = TT_ATTR_INDX_INVALID;
709 while (MemoryTable->Length != 0) {
710
711 DEBUG_CODE_BEGIN ();
712 // Find the memory attribute for the Translation Table
713 if ((UINTN)TranslationTable >= MemoryTable->PhysicalBase &&
714 (UINTN)TranslationTable + EFI_PAGE_SIZE <= MemoryTable->PhysicalBase +
715 MemoryTable->Length) {
716 TranslationTableAttribute = MemoryTable->Attributes;
717 }
718 DEBUG_CODE_END ();
719
720 Status = FillTranslationTable (TranslationTable, MemoryTable);
721 if (EFI_ERROR (Status)) {
722 goto FREE_TRANSLATION_TABLE;
723 }
724 MemoryTable++;
725 }
726
727 ASSERT (TranslationTableAttribute == ARM_MEMORY_REGION_ATTRIBUTE_WRITE_BACK ||
728 TranslationTableAttribute == ARM_MEMORY_REGION_ATTRIBUTE_NONSECURE_WRITE_BACK);
729
730 ArmSetMAIR (MAIR_ATTR(TT_ATTR_INDX_DEVICE_MEMORY, MAIR_ATTR_DEVICE_MEMORY) | // mapped to EFI_MEMORY_UC
731 MAIR_ATTR(TT_ATTR_INDX_MEMORY_NON_CACHEABLE, MAIR_ATTR_NORMAL_MEMORY_NON_CACHEABLE) | // mapped to EFI_MEMORY_WC
732 MAIR_ATTR(TT_ATTR_INDX_MEMORY_WRITE_THROUGH, MAIR_ATTR_NORMAL_MEMORY_WRITE_THROUGH) | // mapped to EFI_MEMORY_WT
733 MAIR_ATTR(TT_ATTR_INDX_MEMORY_WRITE_BACK, MAIR_ATTR_NORMAL_MEMORY_WRITE_BACK)); // mapped to EFI_MEMORY_WB
734
735 ArmDisableAlignmentCheck ();
736 ArmEnableStackAlignmentCheck ();
737 ArmEnableInstructionCache ();
738 ArmEnableDataCache ();
739
740 ArmEnableMmu ();
741 return EFI_SUCCESS;
742
743 FREE_TRANSLATION_TABLE:
744 FreePages (TranslationTable, 1);
745 return Status;
746 }
747
748 RETURN_STATUS
749 EFIAPI
750 ArmMmuBaseLibConstructor (
751 VOID
752 )
753 {
754 extern UINT32 ArmReplaceLiveTranslationEntrySize;
755
756 //
757 // The ArmReplaceLiveTranslationEntry () helper function may be invoked
758 // with the MMU off so we have to ensure that it gets cleaned to the PoC
759 //
760 WriteBackDataCacheRange (ArmReplaceLiveTranslationEntry,
761 ArmReplaceLiveTranslationEntrySize);
762
763 return RETURN_SUCCESS;
764 }