]> git.proxmox.com Git - mirror_edk2.git/blob - ArmPkg/Library/ArmMmuLib/AArch64/ArmMmuLibCore.c
6e05e6085011a4bbbc8dd45d03173bc69d001532
[mirror_edk2.git] / ArmPkg / Library / ArmMmuLib / AArch64 / ArmMmuLibCore.c
1 /** @file
2 * File managing the MMU for ARMv8 architecture
3 *
4 * Copyright (c) 2011-2014, ARM Limited. All rights reserved.
5 * Copyright (c) 2016, Linaro Limited. All rights reserved.
6 *
7 * This program and the accompanying materials
8 * are licensed and made available under the terms and conditions of the BSD License
9 * which accompanies this distribution. The full text of the license may be found at
10 * http://opensource.org/licenses/bsd-license.php
11 *
12 * THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS,
13 * WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED.
14 *
15 **/
16
17 #include <Uefi.h>
18 #include <Chipset/AArch64.h>
19 #include <Library/BaseMemoryLib.h>
20 #include <Library/CacheMaintenanceLib.h>
21 #include <Library/MemoryAllocationLib.h>
22 #include <Library/ArmLib.h>
23 #include <Library/ArmMmuLib.h>
24 #include <Library/BaseLib.h>
25 #include <Library/DebugLib.h>
26
27 // We use this index definition to define an invalid block entry
28 #define TT_ATTR_INDX_INVALID ((UINT32)~0)
29
30 STATIC
31 UINT64
32 ArmMemoryAttributeToPageAttribute (
33 IN ARM_MEMORY_REGION_ATTRIBUTES Attributes
34 )
35 {
36 switch (Attributes) {
37 case ARM_MEMORY_REGION_ATTRIBUTE_WRITE_BACK:
38 case ARM_MEMORY_REGION_ATTRIBUTE_NONSECURE_WRITE_BACK:
39 return TT_ATTR_INDX_MEMORY_WRITE_BACK | TT_SH_INNER_SHAREABLE;
40
41 case ARM_MEMORY_REGION_ATTRIBUTE_WRITE_THROUGH:
42 case ARM_MEMORY_REGION_ATTRIBUTE_NONSECURE_WRITE_THROUGH:
43 return TT_ATTR_INDX_MEMORY_WRITE_THROUGH | TT_SH_INNER_SHAREABLE;
44
45 // Uncached and device mappings are treated as outer shareable by default,
46 case ARM_MEMORY_REGION_ATTRIBUTE_UNCACHED_UNBUFFERED:
47 case ARM_MEMORY_REGION_ATTRIBUTE_NONSECURE_UNCACHED_UNBUFFERED:
48 return TT_ATTR_INDX_MEMORY_NON_CACHEABLE;
49
50 default:
51 ASSERT(0);
52 case ARM_MEMORY_REGION_ATTRIBUTE_DEVICE:
53 case ARM_MEMORY_REGION_ATTRIBUTE_NONSECURE_DEVICE:
54 if (ArmReadCurrentEL () == AARCH64_EL2)
55 return TT_ATTR_INDX_DEVICE_MEMORY | TT_XN_MASK;
56 else
57 return TT_ATTR_INDX_DEVICE_MEMORY | TT_UXN_MASK | TT_PXN_MASK;
58 }
59 }
60
61 UINT64
62 PageAttributeToGcdAttribute (
63 IN UINT64 PageAttributes
64 )
65 {
66 UINT64 GcdAttributes;
67
68 switch (PageAttributes & TT_ATTR_INDX_MASK) {
69 case TT_ATTR_INDX_DEVICE_MEMORY:
70 GcdAttributes = EFI_MEMORY_UC;
71 break;
72 case TT_ATTR_INDX_MEMORY_NON_CACHEABLE:
73 GcdAttributes = EFI_MEMORY_WC;
74 break;
75 case TT_ATTR_INDX_MEMORY_WRITE_THROUGH:
76 GcdAttributes = EFI_MEMORY_WT;
77 break;
78 case TT_ATTR_INDX_MEMORY_WRITE_BACK:
79 GcdAttributes = EFI_MEMORY_WB;
80 break;
81 default:
82 DEBUG ((EFI_D_ERROR, "PageAttributeToGcdAttribute: PageAttributes:0x%lX not supported.\n", PageAttributes));
83 ASSERT (0);
84 // The Global Coherency Domain (GCD) value is defined as a bit set.
85 // Returning 0 means no attribute has been set.
86 GcdAttributes = 0;
87 }
88
89 // Determine protection attributes
90 if (((PageAttributes & TT_AP_MASK) == TT_AP_NO_RO) || ((PageAttributes & TT_AP_MASK) == TT_AP_RO_RO)) {
91 // Read only cases map to write-protect
92 GcdAttributes |= EFI_MEMORY_WP;
93 }
94
95 // Process eXecute Never attribute
96 if ((PageAttributes & (TT_PXN_MASK | TT_UXN_MASK)) != 0 ) {
97 GcdAttributes |= EFI_MEMORY_XP;
98 }
99
100 return GcdAttributes;
101 }
102
103 ARM_MEMORY_REGION_ATTRIBUTES
104 GcdAttributeToArmAttribute (
105 IN UINT64 GcdAttributes
106 )
107 {
108 switch (GcdAttributes & 0xFF) {
109 case EFI_MEMORY_UC:
110 return ARM_MEMORY_REGION_ATTRIBUTE_DEVICE;
111 case EFI_MEMORY_WC:
112 return ARM_MEMORY_REGION_ATTRIBUTE_UNCACHED_UNBUFFERED;
113 case EFI_MEMORY_WT:
114 return ARM_MEMORY_REGION_ATTRIBUTE_WRITE_THROUGH;
115 case EFI_MEMORY_WB:
116 return ARM_MEMORY_REGION_ATTRIBUTE_WRITE_BACK;
117 default:
118 DEBUG ((EFI_D_ERROR, "GcdAttributeToArmAttribute: 0x%lX attributes is not supported.\n", GcdAttributes));
119 ASSERT (0);
120 return ARM_MEMORY_REGION_ATTRIBUTE_DEVICE;
121 }
122 }
123
124 // Describe the T0SZ values for each translation table level
125 typedef struct {
126 UINTN MinT0SZ;
127 UINTN MaxT0SZ;
128 UINTN LargestT0SZ; // Generally (MaxT0SZ == LargestT0SZ) but at the Level3 Table
129 // the MaxT0SZ is not at the boundary of the table
130 } T0SZ_DESCRIPTION_PER_LEVEL;
131
132 // Map table for the corresponding Level of Table
133 STATIC CONST T0SZ_DESCRIPTION_PER_LEVEL T0SZPerTableLevel[] = {
134 { 16, 24, 24 }, // Table Level 0
135 { 25, 33, 33 }, // Table Level 1
136 { 34, 39, 42 } // Table Level 2
137 };
138
139 VOID
140 GetRootTranslationTableInfo (
141 IN UINTN T0SZ,
142 OUT UINTN *TableLevel,
143 OUT UINTN *TableEntryCount
144 )
145 {
146 UINTN Index;
147
148 // Identify the level of the root table from the given T0SZ
149 for (Index = 0; Index < sizeof (T0SZPerTableLevel) / sizeof (T0SZ_DESCRIPTION_PER_LEVEL); Index++) {
150 if (T0SZ <= T0SZPerTableLevel[Index].MaxT0SZ) {
151 break;
152 }
153 }
154
155 // If we have not found the corresponding maximum T0SZ then we use the last one
156 if (Index == sizeof (T0SZPerTableLevel) / sizeof (T0SZ_DESCRIPTION_PER_LEVEL)) {
157 Index--;
158 }
159
160 // Get the level of the root table
161 if (TableLevel) {
162 *TableLevel = Index;
163 }
164
165 // The Size of the Table is 2^(T0SZ-LargestT0SZ)
166 if (TableEntryCount) {
167 *TableEntryCount = 1 << (T0SZPerTableLevel[Index].LargestT0SZ - T0SZ + 1);
168 }
169 }
170
171 STATIC
172 VOID
173 ReplaceLiveEntry (
174 IN UINT64 *Entry,
175 IN UINT64 Value
176 )
177 {
178 if (!ArmMmuEnabled ()) {
179 *Entry = Value;
180 } else {
181 ArmReplaceLiveTranslationEntry (Entry, Value);
182 }
183 }
184
185 STATIC
186 VOID
187 LookupAddresstoRootTable (
188 IN UINT64 MaxAddress,
189 OUT UINTN *T0SZ,
190 OUT UINTN *TableEntryCount
191 )
192 {
193 UINTN TopBit;
194
195 // Check the parameters are not NULL
196 ASSERT ((T0SZ != NULL) && (TableEntryCount != NULL));
197
198 // Look for the highest bit set in MaxAddress
199 for (TopBit = 63; TopBit != 0; TopBit--) {
200 if ((1ULL << TopBit) & MaxAddress) {
201 // MaxAddress top bit is found
202 TopBit = TopBit + 1;
203 break;
204 }
205 }
206 ASSERT (TopBit != 0);
207
208 // Calculate T0SZ from the top bit of the MaxAddress
209 *T0SZ = 64 - TopBit;
210
211 // Get the Table info from T0SZ
212 GetRootTranslationTableInfo (*T0SZ, NULL, TableEntryCount);
213 }
214
215 STATIC
216 UINT64*
217 GetBlockEntryListFromAddress (
218 IN UINT64 *RootTable,
219 IN UINT64 RegionStart,
220 OUT UINTN *TableLevel,
221 IN OUT UINT64 *BlockEntrySize,
222 OUT UINT64 **LastBlockEntry
223 )
224 {
225 UINTN RootTableLevel;
226 UINTN RootTableEntryCount;
227 UINT64 *TranslationTable;
228 UINT64 *BlockEntry;
229 UINT64 *SubTableBlockEntry;
230 UINT64 BlockEntryAddress;
231 UINTN BaseAddressAlignment;
232 UINTN PageLevel;
233 UINTN Index;
234 UINTN IndexLevel;
235 UINTN T0SZ;
236 UINT64 Attributes;
237 UINT64 TableAttributes;
238
239 // Initialize variable
240 BlockEntry = NULL;
241
242 // Ensure the parameters are valid
243 if (!(TableLevel && BlockEntrySize && LastBlockEntry)) {
244 ASSERT_EFI_ERROR (EFI_INVALID_PARAMETER);
245 return NULL;
246 }
247
248 // Ensure the Region is aligned on 4KB boundary
249 if ((RegionStart & (SIZE_4KB - 1)) != 0) {
250 ASSERT_EFI_ERROR (EFI_INVALID_PARAMETER);
251 return NULL;
252 }
253
254 // Ensure the required size is aligned on 4KB boundary and not 0
255 if ((*BlockEntrySize & (SIZE_4KB - 1)) != 0 || *BlockEntrySize == 0) {
256 ASSERT_EFI_ERROR (EFI_INVALID_PARAMETER);
257 return NULL;
258 }
259
260 T0SZ = ArmGetTCR () & TCR_T0SZ_MASK;
261 // Get the Table info from T0SZ
262 GetRootTranslationTableInfo (T0SZ, &RootTableLevel, &RootTableEntryCount);
263
264 // If the start address is 0x0 then we use the size of the region to identify the alignment
265 if (RegionStart == 0) {
266 // Identify the highest possible alignment for the Region Size
267 BaseAddressAlignment = LowBitSet64 (*BlockEntrySize);
268 } else {
269 // Identify the highest possible alignment for the Base Address
270 BaseAddressAlignment = LowBitSet64 (RegionStart);
271 }
272
273 // Identify the Page Level the RegionStart must belong to. Note that PageLevel
274 // should be at least 1 since block translations are not supported at level 0
275 PageLevel = MAX (3 - ((BaseAddressAlignment - 12) / 9), 1);
276
277 // If the required size is smaller than the current block size then we need to go to the page below.
278 // The PageLevel was calculated on the Base Address alignment but did not take in account the alignment
279 // of the allocation size
280 while (*BlockEntrySize < TT_BLOCK_ENTRY_SIZE_AT_LEVEL (PageLevel)) {
281 // It does not fit so we need to go a page level above
282 PageLevel++;
283 }
284
285 //
286 // Get the Table Descriptor for the corresponding PageLevel. We need to decompose RegionStart to get appropriate entries
287 //
288
289 TranslationTable = RootTable;
290 for (IndexLevel = RootTableLevel; IndexLevel <= PageLevel; IndexLevel++) {
291 BlockEntry = (UINT64*)TT_GET_ENTRY_FOR_ADDRESS (TranslationTable, IndexLevel, RegionStart);
292
293 if ((IndexLevel != 3) && ((*BlockEntry & TT_TYPE_MASK) == TT_TYPE_TABLE_ENTRY)) {
294 // Go to the next table
295 TranslationTable = (UINT64*)(*BlockEntry & TT_ADDRESS_MASK_DESCRIPTION_TABLE);
296
297 // If we are at the last level then update the last level to next level
298 if (IndexLevel == PageLevel) {
299 // Enter the next level
300 PageLevel++;
301 }
302 } else if ((*BlockEntry & TT_TYPE_MASK) == TT_TYPE_BLOCK_ENTRY) {
303 // If we are not at the last level then we need to split this BlockEntry
304 if (IndexLevel != PageLevel) {
305 // Retrieve the attributes from the block entry
306 Attributes = *BlockEntry & TT_ATTRIBUTES_MASK;
307
308 // Convert the block entry attributes into Table descriptor attributes
309 TableAttributes = TT_TABLE_AP_NO_PERMISSION;
310 if (Attributes & TT_NS) {
311 TableAttributes = TT_TABLE_NS;
312 }
313
314 // Get the address corresponding at this entry
315 BlockEntryAddress = RegionStart;
316 BlockEntryAddress = BlockEntryAddress >> TT_ADDRESS_OFFSET_AT_LEVEL(IndexLevel);
317 // Shift back to right to set zero before the effective address
318 BlockEntryAddress = BlockEntryAddress << TT_ADDRESS_OFFSET_AT_LEVEL(IndexLevel);
319
320 // Set the correct entry type for the next page level
321 if ((IndexLevel + 1) == 3) {
322 Attributes |= TT_TYPE_BLOCK_ENTRY_LEVEL3;
323 } else {
324 Attributes |= TT_TYPE_BLOCK_ENTRY;
325 }
326
327 // Create a new translation table
328 TranslationTable = (UINT64*)AllocateAlignedPages (EFI_SIZE_TO_PAGES(TT_ENTRY_COUNT * sizeof(UINT64)), TT_ALIGNMENT_DESCRIPTION_TABLE);
329 if (TranslationTable == NULL) {
330 return NULL;
331 }
332
333 // Populate the newly created lower level table
334 SubTableBlockEntry = TranslationTable;
335 for (Index = 0; Index < TT_ENTRY_COUNT; Index++) {
336 *SubTableBlockEntry = Attributes | (BlockEntryAddress + (Index << TT_ADDRESS_OFFSET_AT_LEVEL(IndexLevel + 1)));
337 SubTableBlockEntry++;
338 }
339
340 // Fill the BlockEntry with the new TranslationTable
341 ReplaceLiveEntry (BlockEntry,
342 ((UINTN)TranslationTable & TT_ADDRESS_MASK_DESCRIPTION_TABLE) | TableAttributes | TT_TYPE_TABLE_ENTRY);
343 }
344 } else {
345 if (IndexLevel != PageLevel) {
346 //
347 // Case when we have an Invalid Entry and we are at a page level above of the one targetted.
348 //
349
350 // Create a new translation table
351 TranslationTable = (UINT64*)AllocateAlignedPages (EFI_SIZE_TO_PAGES(TT_ENTRY_COUNT * sizeof(UINT64)), TT_ALIGNMENT_DESCRIPTION_TABLE);
352 if (TranslationTable == NULL) {
353 return NULL;
354 }
355
356 ZeroMem (TranslationTable, TT_ENTRY_COUNT * sizeof(UINT64));
357
358 // Fill the new BlockEntry with the TranslationTable
359 *BlockEntry = ((UINTN)TranslationTable & TT_ADDRESS_MASK_DESCRIPTION_TABLE) | TT_TYPE_TABLE_ENTRY;
360 }
361 }
362 }
363
364 // Expose the found PageLevel to the caller
365 *TableLevel = PageLevel;
366
367 // Now, we have the Table Level we can get the Block Size associated to this table
368 *BlockEntrySize = TT_BLOCK_ENTRY_SIZE_AT_LEVEL (PageLevel);
369
370 // The last block of the root table depends on the number of entry in this table,
371 // otherwise it is always the (TT_ENTRY_COUNT - 1)th entry in the table.
372 *LastBlockEntry = TT_LAST_BLOCK_ADDRESS(TranslationTable,
373 (PageLevel == RootTableLevel) ? RootTableEntryCount : TT_ENTRY_COUNT);
374
375 return BlockEntry;
376 }
377
378 STATIC
379 RETURN_STATUS
380 UpdateRegionMapping (
381 IN UINT64 *RootTable,
382 IN UINT64 RegionStart,
383 IN UINT64 RegionLength,
384 IN UINT64 Attributes,
385 IN UINT64 BlockEntryMask
386 )
387 {
388 UINT32 Type;
389 UINT64 *BlockEntry;
390 UINT64 *LastBlockEntry;
391 UINT64 BlockEntrySize;
392 UINTN TableLevel;
393
394 // Ensure the Length is aligned on 4KB boundary
395 if ((RegionLength == 0) || ((RegionLength & (SIZE_4KB - 1)) != 0)) {
396 ASSERT_EFI_ERROR (EFI_INVALID_PARAMETER);
397 return RETURN_INVALID_PARAMETER;
398 }
399
400 do {
401 // Get the first Block Entry that matches the Virtual Address and also the information on the Table Descriptor
402 // such as the the size of the Block Entry and the address of the last BlockEntry of the Table Descriptor
403 BlockEntrySize = RegionLength;
404 BlockEntry = GetBlockEntryListFromAddress (RootTable, RegionStart, &TableLevel, &BlockEntrySize, &LastBlockEntry);
405 if (BlockEntry == NULL) {
406 // GetBlockEntryListFromAddress() return NULL when it fails to allocate new pages from the Translation Tables
407 return RETURN_OUT_OF_RESOURCES;
408 }
409
410 if (TableLevel != 3) {
411 Type = TT_TYPE_BLOCK_ENTRY;
412 } else {
413 Type = TT_TYPE_BLOCK_ENTRY_LEVEL3;
414 }
415
416 do {
417 // Fill the Block Entry with attribute and output block address
418 *BlockEntry &= BlockEntryMask;
419 *BlockEntry |= (RegionStart & TT_ADDRESS_MASK_BLOCK_ENTRY) | Attributes | Type;
420
421 // Go to the next BlockEntry
422 RegionStart += BlockEntrySize;
423 RegionLength -= BlockEntrySize;
424 BlockEntry++;
425
426 // Break the inner loop when next block is a table
427 // Rerun GetBlockEntryListFromAddress to avoid page table memory leak
428 if (TableLevel != 3 &&
429 (*BlockEntry & TT_TYPE_MASK) == TT_TYPE_TABLE_ENTRY) {
430 break;
431 }
432 } while ((RegionLength >= BlockEntrySize) && (BlockEntry <= LastBlockEntry));
433 } while (RegionLength != 0);
434
435 return RETURN_SUCCESS;
436 }
437
438 STATIC
439 RETURN_STATUS
440 FillTranslationTable (
441 IN UINT64 *RootTable,
442 IN ARM_MEMORY_REGION_DESCRIPTOR *MemoryRegion
443 )
444 {
445 return UpdateRegionMapping (
446 RootTable,
447 MemoryRegion->VirtualBase,
448 MemoryRegion->Length,
449 ArmMemoryAttributeToPageAttribute (MemoryRegion->Attributes) | TT_AF,
450 0
451 );
452 }
453
454 RETURN_STATUS
455 SetMemoryAttributes (
456 IN EFI_PHYSICAL_ADDRESS BaseAddress,
457 IN UINT64 Length,
458 IN UINT64 Attributes,
459 IN EFI_PHYSICAL_ADDRESS VirtualMask
460 )
461 {
462 RETURN_STATUS Status;
463 ARM_MEMORY_REGION_DESCRIPTOR MemoryRegion;
464 UINT64 *TranslationTable;
465
466 MemoryRegion.PhysicalBase = BaseAddress;
467 MemoryRegion.VirtualBase = BaseAddress;
468 MemoryRegion.Length = Length;
469 MemoryRegion.Attributes = GcdAttributeToArmAttribute (Attributes);
470
471 TranslationTable = ArmGetTTBR0BaseAddress ();
472
473 Status = FillTranslationTable (TranslationTable, &MemoryRegion);
474 if (RETURN_ERROR (Status)) {
475 return Status;
476 }
477
478 // Invalidate all TLB entries so changes are synced
479 ArmInvalidateTlb ();
480
481 return RETURN_SUCCESS;
482 }
483
484 STATIC
485 RETURN_STATUS
486 SetMemoryRegionAttribute (
487 IN EFI_PHYSICAL_ADDRESS BaseAddress,
488 IN UINT64 Length,
489 IN UINT64 Attributes,
490 IN UINT64 BlockEntryMask
491 )
492 {
493 RETURN_STATUS Status;
494 UINT64 *RootTable;
495
496 RootTable = ArmGetTTBR0BaseAddress ();
497
498 Status = UpdateRegionMapping (RootTable, BaseAddress, Length, Attributes, BlockEntryMask);
499 if (RETURN_ERROR (Status)) {
500 return Status;
501 }
502
503 // Invalidate all TLB entries so changes are synced
504 ArmInvalidateTlb ();
505
506 return RETURN_SUCCESS;
507 }
508
509 RETURN_STATUS
510 ArmSetMemoryRegionNoExec (
511 IN EFI_PHYSICAL_ADDRESS BaseAddress,
512 IN UINT64 Length
513 )
514 {
515 UINT64 Val;
516
517 if (ArmReadCurrentEL () == AARCH64_EL1) {
518 Val = TT_PXN_MASK | TT_UXN_MASK;
519 } else {
520 Val = TT_XN_MASK;
521 }
522
523 return SetMemoryRegionAttribute (
524 BaseAddress,
525 Length,
526 Val,
527 ~TT_ADDRESS_MASK_BLOCK_ENTRY);
528 }
529
530 RETURN_STATUS
531 ArmClearMemoryRegionNoExec (
532 IN EFI_PHYSICAL_ADDRESS BaseAddress,
533 IN UINT64 Length
534 )
535 {
536 UINT64 Mask;
537
538 // XN maps to UXN in the EL1&0 translation regime
539 Mask = ~(TT_ADDRESS_MASK_BLOCK_ENTRY | TT_PXN_MASK | TT_XN_MASK);
540
541 return SetMemoryRegionAttribute (
542 BaseAddress,
543 Length,
544 0,
545 Mask);
546 }
547
548 RETURN_STATUS
549 ArmSetMemoryRegionReadOnly (
550 IN EFI_PHYSICAL_ADDRESS BaseAddress,
551 IN UINT64 Length
552 )
553 {
554 return SetMemoryRegionAttribute (
555 BaseAddress,
556 Length,
557 TT_AP_RO_RO,
558 ~TT_ADDRESS_MASK_BLOCK_ENTRY);
559 }
560
561 RETURN_STATUS
562 ArmClearMemoryRegionReadOnly (
563 IN EFI_PHYSICAL_ADDRESS BaseAddress,
564 IN UINT64 Length
565 )
566 {
567 return SetMemoryRegionAttribute (
568 BaseAddress,
569 Length,
570 TT_AP_RW_RW,
571 ~(TT_ADDRESS_MASK_BLOCK_ENTRY | TT_AP_MASK));
572 }
573
574 RETURN_STATUS
575 EFIAPI
576 ArmConfigureMmu (
577 IN ARM_MEMORY_REGION_DESCRIPTOR *MemoryTable,
578 OUT VOID **TranslationTableBase OPTIONAL,
579 OUT UINTN *TranslationTableSize OPTIONAL
580 )
581 {
582 VOID* TranslationTable;
583 UINTN TranslationTablePageCount;
584 UINT32 TranslationTableAttribute;
585 ARM_MEMORY_REGION_DESCRIPTOR *MemoryTableEntry;
586 UINT64 MaxAddress;
587 UINT64 TopAddress;
588 UINTN T0SZ;
589 UINTN RootTableEntryCount;
590 UINT64 TCR;
591 RETURN_STATUS Status;
592
593 if(MemoryTable == NULL) {
594 ASSERT (MemoryTable != NULL);
595 return RETURN_INVALID_PARAMETER;
596 }
597
598 // Identify the highest address of the memory table
599 MaxAddress = MemoryTable->PhysicalBase + MemoryTable->Length - 1;
600 MemoryTableEntry = MemoryTable;
601 while (MemoryTableEntry->Length != 0) {
602 TopAddress = MemoryTableEntry->PhysicalBase + MemoryTableEntry->Length - 1;
603 if (TopAddress > MaxAddress) {
604 MaxAddress = TopAddress;
605 }
606 MemoryTableEntry++;
607 }
608
609 // Lookup the Table Level to get the information
610 LookupAddresstoRootTable (MaxAddress, &T0SZ, &RootTableEntryCount);
611
612 //
613 // Set TCR that allows us to retrieve T0SZ in the subsequent functions
614 //
615 // Ideally we will be running at EL2, but should support EL1 as well.
616 // UEFI should not run at EL3.
617 if (ArmReadCurrentEL () == AARCH64_EL2) {
618 //Note: Bits 23 and 31 are reserved(RES1) bits in TCR_EL2
619 TCR = T0SZ | (1UL << 31) | (1UL << 23) | TCR_TG0_4KB;
620
621 // Set the Physical Address Size using MaxAddress
622 if (MaxAddress < SIZE_4GB) {
623 TCR |= TCR_PS_4GB;
624 } else if (MaxAddress < SIZE_64GB) {
625 TCR |= TCR_PS_64GB;
626 } else if (MaxAddress < SIZE_1TB) {
627 TCR |= TCR_PS_1TB;
628 } else if (MaxAddress < SIZE_4TB) {
629 TCR |= TCR_PS_4TB;
630 } else if (MaxAddress < SIZE_16TB) {
631 TCR |= TCR_PS_16TB;
632 } else if (MaxAddress < SIZE_256TB) {
633 TCR |= TCR_PS_256TB;
634 } else {
635 DEBUG ((EFI_D_ERROR, "ArmConfigureMmu: The MaxAddress 0x%lX is not supported by this MMU configuration.\n", MaxAddress));
636 ASSERT (0); // Bigger than 48-bit memory space are not supported
637 return RETURN_UNSUPPORTED;
638 }
639 } else if (ArmReadCurrentEL () == AARCH64_EL1) {
640 // Due to Cortex-A57 erratum #822227 we must set TG1[1] == 1, regardless of EPD1.
641 TCR = T0SZ | TCR_TG0_4KB | TCR_TG1_4KB | TCR_EPD1;
642
643 // Set the Physical Address Size using MaxAddress
644 if (MaxAddress < SIZE_4GB) {
645 TCR |= TCR_IPS_4GB;
646 } else if (MaxAddress < SIZE_64GB) {
647 TCR |= TCR_IPS_64GB;
648 } else if (MaxAddress < SIZE_1TB) {
649 TCR |= TCR_IPS_1TB;
650 } else if (MaxAddress < SIZE_4TB) {
651 TCR |= TCR_IPS_4TB;
652 } else if (MaxAddress < SIZE_16TB) {
653 TCR |= TCR_IPS_16TB;
654 } else if (MaxAddress < SIZE_256TB) {
655 TCR |= TCR_IPS_256TB;
656 } else {
657 DEBUG ((EFI_D_ERROR, "ArmConfigureMmu: The MaxAddress 0x%lX is not supported by this MMU configuration.\n", MaxAddress));
658 ASSERT (0); // Bigger than 48-bit memory space are not supported
659 return RETURN_UNSUPPORTED;
660 }
661 } else {
662 ASSERT (0); // UEFI is only expected to run at EL2 and EL1, not EL3.
663 return RETURN_UNSUPPORTED;
664 }
665
666 // Set TCR
667 ArmSetTCR (TCR);
668
669 // Allocate pages for translation table
670 TranslationTablePageCount = EFI_SIZE_TO_PAGES(RootTableEntryCount * sizeof(UINT64));
671 TranslationTable = (UINT64*)AllocateAlignedPages (TranslationTablePageCount, TT_ALIGNMENT_DESCRIPTION_TABLE);
672 if (TranslationTable == NULL) {
673 return RETURN_OUT_OF_RESOURCES;
674 }
675 // We set TTBR0 just after allocating the table to retrieve its location from the subsequent
676 // functions without needing to pass this value across the functions. The MMU is only enabled
677 // after the translation tables are populated.
678 ArmSetTTBR0 (TranslationTable);
679
680 if (TranslationTableBase != NULL) {
681 *TranslationTableBase = TranslationTable;
682 }
683
684 if (TranslationTableSize != NULL) {
685 *TranslationTableSize = RootTableEntryCount * sizeof(UINT64);
686 }
687
688 ZeroMem (TranslationTable, RootTableEntryCount * sizeof(UINT64));
689
690 // Disable MMU and caches. ArmDisableMmu() also invalidates the TLBs
691 ArmDisableMmu ();
692 ArmDisableDataCache ();
693 ArmDisableInstructionCache ();
694
695 // Make sure nothing sneaked into the cache
696 ArmCleanInvalidateDataCache ();
697 ArmInvalidateInstructionCache ();
698
699 TranslationTableAttribute = TT_ATTR_INDX_INVALID;
700 while (MemoryTable->Length != 0) {
701 // Find the memory attribute for the Translation Table
702 if (((UINTN)TranslationTable >= MemoryTable->PhysicalBase) &&
703 ((UINTN)TranslationTable <= MemoryTable->PhysicalBase - 1 + MemoryTable->Length)) {
704 TranslationTableAttribute = MemoryTable->Attributes;
705 }
706
707 Status = FillTranslationTable (TranslationTable, MemoryTable);
708 if (RETURN_ERROR (Status)) {
709 goto FREE_TRANSLATION_TABLE;
710 }
711 MemoryTable++;
712 }
713
714 // Translate the Memory Attributes into Translation Table Register Attributes
715 if ((TranslationTableAttribute == ARM_MEMORY_REGION_ATTRIBUTE_UNCACHED_UNBUFFERED) ||
716 (TranslationTableAttribute == ARM_MEMORY_REGION_ATTRIBUTE_NONSECURE_UNCACHED_UNBUFFERED)) {
717 TCR |= TCR_SH_NON_SHAREABLE | TCR_RGN_OUTER_NON_CACHEABLE | TCR_RGN_INNER_NON_CACHEABLE;
718 } else if ((TranslationTableAttribute == ARM_MEMORY_REGION_ATTRIBUTE_WRITE_BACK) ||
719 (TranslationTableAttribute == ARM_MEMORY_REGION_ATTRIBUTE_NONSECURE_WRITE_BACK)) {
720 TCR |= TCR_SH_INNER_SHAREABLE | TCR_RGN_OUTER_WRITE_BACK_ALLOC | TCR_RGN_INNER_WRITE_BACK_ALLOC;
721 } else if ((TranslationTableAttribute == ARM_MEMORY_REGION_ATTRIBUTE_WRITE_THROUGH) ||
722 (TranslationTableAttribute == ARM_MEMORY_REGION_ATTRIBUTE_NONSECURE_WRITE_THROUGH)) {
723 TCR |= TCR_SH_NON_SHAREABLE | TCR_RGN_OUTER_WRITE_THROUGH | TCR_RGN_INNER_WRITE_THROUGH;
724 } else {
725 // If we failed to find a mapping that contains the root translation table then it probably means the translation table
726 // is not mapped in the given memory map.
727 ASSERT (0);
728 Status = RETURN_UNSUPPORTED;
729 goto FREE_TRANSLATION_TABLE;
730 }
731
732 // Set again TCR after getting the Translation Table attributes
733 ArmSetTCR (TCR);
734
735 ArmSetMAIR (MAIR_ATTR(TT_ATTR_INDX_DEVICE_MEMORY, MAIR_ATTR_DEVICE_MEMORY) | // mapped to EFI_MEMORY_UC
736 MAIR_ATTR(TT_ATTR_INDX_MEMORY_NON_CACHEABLE, MAIR_ATTR_NORMAL_MEMORY_NON_CACHEABLE) | // mapped to EFI_MEMORY_WC
737 MAIR_ATTR(TT_ATTR_INDX_MEMORY_WRITE_THROUGH, MAIR_ATTR_NORMAL_MEMORY_WRITE_THROUGH) | // mapped to EFI_MEMORY_WT
738 MAIR_ATTR(TT_ATTR_INDX_MEMORY_WRITE_BACK, MAIR_ATTR_NORMAL_MEMORY_WRITE_BACK)); // mapped to EFI_MEMORY_WB
739
740 ArmDisableAlignmentCheck ();
741 ArmEnableInstructionCache ();
742 ArmEnableDataCache ();
743
744 ArmEnableMmu ();
745 return RETURN_SUCCESS;
746
747 FREE_TRANSLATION_TABLE:
748 FreePages (TranslationTable, TranslationTablePageCount);
749 return Status;
750 }
751
752 RETURN_STATUS
753 EFIAPI
754 ArmMmuBaseLibConstructor (
755 VOID
756 )
757 {
758 extern UINT32 ArmReplaceLiveTranslationEntrySize;
759
760 //
761 // The ArmReplaceLiveTranslationEntry () helper function may be invoked
762 // with the MMU off so we have to ensure that it gets cleaned to the PoC
763 //
764 WriteBackDataCacheRange (ArmReplaceLiveTranslationEntry,
765 ArmReplaceLiveTranslationEntrySize);
766
767 return RETURN_SUCCESS;
768 }