]> git.proxmox.com Git - mirror_edk2.git/blob - ArmPkg/Library/ArmLib/AArch64/AArch64Mmu.c
07864bac28e616028396ae598616ec2c2d73fb83
[mirror_edk2.git] / ArmPkg / Library / ArmLib / AArch64 / AArch64Mmu.c
1 /** @file
2 * File managing the MMU for ARMv8 architecture
3 *
4 * Copyright (c) 2011-2014, ARM Limited. All rights reserved.
5 *
6 * This program and the accompanying materials
7 * are licensed and made available under the terms and conditions of the BSD License
8 * which accompanies this distribution. The full text of the license may be found at
9 * http://opensource.org/licenses/bsd-license.php
10 *
11 * THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED.
13 *
14 **/
15
16 #include <Uefi.h>
17 #include <Chipset/AArch64.h>
18 #include <Library/BaseMemoryLib.h>
19 #include <Library/MemoryAllocationLib.h>
20 #include <Library/ArmLib.h>
21 #include <Library/BaseLib.h>
22 #include <Library/DebugLib.h>
23 #include "AArch64Lib.h"
24 #include "ArmLibPrivate.h"
25
26 // We use this index definition to define an invalid block entry
27 #define TT_ATTR_INDX_INVALID ((UINT32)~0)
28
29 INT32 HaveMmuRoutines = 1;
30
31 STATIC
32 UINT64
33 ArmMemoryAttributeToPageAttribute (
34 IN ARM_MEMORY_REGION_ATTRIBUTES Attributes
35 )
36 {
37 switch (Attributes) {
38 case ARM_MEMORY_REGION_ATTRIBUTE_WRITE_BACK:
39 case ARM_MEMORY_REGION_ATTRIBUTE_NONSECURE_WRITE_BACK:
40 return TT_ATTR_INDX_MEMORY_WRITE_BACK | TT_SH_INNER_SHAREABLE;
41
42 case ARM_MEMORY_REGION_ATTRIBUTE_WRITE_THROUGH:
43 case ARM_MEMORY_REGION_ATTRIBUTE_NONSECURE_WRITE_THROUGH:
44 return TT_ATTR_INDX_MEMORY_WRITE_THROUGH | TT_SH_INNER_SHAREABLE;
45
46 // Uncached and device mappings are treated as outer shareable by default,
47 case ARM_MEMORY_REGION_ATTRIBUTE_UNCACHED_UNBUFFERED:
48 case ARM_MEMORY_REGION_ATTRIBUTE_NONSECURE_UNCACHED_UNBUFFERED:
49 return TT_ATTR_INDX_MEMORY_NON_CACHEABLE;
50
51 default:
52 ASSERT(0);
53 case ARM_MEMORY_REGION_ATTRIBUTE_DEVICE:
54 case ARM_MEMORY_REGION_ATTRIBUTE_NONSECURE_DEVICE:
55 if (ArmReadCurrentEL () == AARCH64_EL2)
56 return TT_ATTR_INDX_DEVICE_MEMORY | TT_XN_MASK;
57 else
58 return TT_ATTR_INDX_DEVICE_MEMORY | TT_UXN_MASK | TT_PXN_MASK;
59 }
60 }
61
62 UINT64
63 PageAttributeToGcdAttribute (
64 IN UINT64 PageAttributes
65 )
66 {
67 UINT64 GcdAttributes;
68
69 switch (PageAttributes & TT_ATTR_INDX_MASK) {
70 case TT_ATTR_INDX_DEVICE_MEMORY:
71 GcdAttributes = EFI_MEMORY_UC;
72 break;
73 case TT_ATTR_INDX_MEMORY_NON_CACHEABLE:
74 GcdAttributes = EFI_MEMORY_WC;
75 break;
76 case TT_ATTR_INDX_MEMORY_WRITE_THROUGH:
77 GcdAttributes = EFI_MEMORY_WT;
78 break;
79 case TT_ATTR_INDX_MEMORY_WRITE_BACK:
80 GcdAttributes = EFI_MEMORY_WB;
81 break;
82 default:
83 DEBUG ((EFI_D_ERROR, "PageAttributeToGcdAttribute: PageAttributes:0x%lX not supported.\n", PageAttributes));
84 ASSERT (0);
85 // The Global Coherency Domain (GCD) value is defined as a bit set.
86 // Returning 0 means no attribute has been set.
87 GcdAttributes = 0;
88 }
89
90 // Determine protection attributes
91 if (((PageAttributes & TT_AP_MASK) == TT_AP_NO_RO) || ((PageAttributes & TT_AP_MASK) == TT_AP_RO_RO)) {
92 // Read only cases map to write-protect
93 GcdAttributes |= EFI_MEMORY_WP;
94 }
95
96 // Process eXecute Never attribute
97 if ((PageAttributes & (TT_PXN_MASK | TT_UXN_MASK)) != 0 ) {
98 GcdAttributes |= EFI_MEMORY_XP;
99 }
100
101 return GcdAttributes;
102 }
103
104 ARM_MEMORY_REGION_ATTRIBUTES
105 GcdAttributeToArmAttribute (
106 IN UINT64 GcdAttributes
107 )
108 {
109 switch (GcdAttributes & 0xFF) {
110 case EFI_MEMORY_UC:
111 return ARM_MEMORY_REGION_ATTRIBUTE_DEVICE;
112 case EFI_MEMORY_WC:
113 return ARM_MEMORY_REGION_ATTRIBUTE_UNCACHED_UNBUFFERED;
114 case EFI_MEMORY_WT:
115 return ARM_MEMORY_REGION_ATTRIBUTE_WRITE_THROUGH;
116 case EFI_MEMORY_WB:
117 return ARM_MEMORY_REGION_ATTRIBUTE_WRITE_BACK;
118 default:
119 DEBUG ((EFI_D_ERROR, "GcdAttributeToArmAttribute: 0x%lX attributes is not supported.\n", GcdAttributes));
120 ASSERT (0);
121 return ARM_MEMORY_REGION_ATTRIBUTE_DEVICE;
122 }
123 }
124
125 // Describe the T0SZ values for each translation table level
126 typedef struct {
127 UINTN MinT0SZ;
128 UINTN MaxT0SZ;
129 UINTN LargestT0SZ; // Generally (MaxT0SZ == LargestT0SZ) but at the Level3 Table
130 // the MaxT0SZ is not at the boundary of the table
131 } T0SZ_DESCRIPTION_PER_LEVEL;
132
133 // Map table for the corresponding Level of Table
134 STATIC CONST T0SZ_DESCRIPTION_PER_LEVEL T0SZPerTableLevel[] = {
135 { 16, 24, 24 }, // Table Level 0
136 { 25, 33, 33 }, // Table Level 1
137 { 34, 39, 42 } // Table Level 2
138 };
139
140 VOID
141 GetRootTranslationTableInfo (
142 IN UINTN T0SZ,
143 OUT UINTN *TableLevel,
144 OUT UINTN *TableEntryCount
145 )
146 {
147 UINTN Index;
148
149 // Identify the level of the root table from the given T0SZ
150 for (Index = 0; Index < sizeof (T0SZPerTableLevel) / sizeof (T0SZ_DESCRIPTION_PER_LEVEL); Index++) {
151 if (T0SZ <= T0SZPerTableLevel[Index].MaxT0SZ) {
152 break;
153 }
154 }
155
156 // If we have not found the corresponding maximum T0SZ then we use the last one
157 if (Index == sizeof (T0SZPerTableLevel) / sizeof (T0SZ_DESCRIPTION_PER_LEVEL)) {
158 Index--;
159 }
160
161 // Get the level of the root table
162 if (TableLevel) {
163 *TableLevel = Index;
164 }
165
166 // The Size of the Table is 2^(T0SZ-LargestT0SZ)
167 if (TableEntryCount) {
168 *TableEntryCount = 1 << (T0SZPerTableLevel[Index].LargestT0SZ - T0SZ + 1);
169 }
170 }
171
172 STATIC
173 VOID
174 ReplaceLiveEntry (
175 IN UINT64 *Entry,
176 IN UINT64 Value
177 )
178 {
179 if (!ArmMmuEnabled ()) {
180 *Entry = Value;
181 } else {
182 ArmReplaceLiveTranslationEntry (Entry, Value);
183 }
184 }
185
186 STATIC
187 VOID
188 LookupAddresstoRootTable (
189 IN UINT64 MaxAddress,
190 OUT UINTN *T0SZ,
191 OUT UINTN *TableEntryCount
192 )
193 {
194 UINTN TopBit;
195
196 // Check the parameters are not NULL
197 ASSERT ((T0SZ != NULL) && (TableEntryCount != NULL));
198
199 // Look for the highest bit set in MaxAddress
200 for (TopBit = 63; TopBit != 0; TopBit--) {
201 if ((1ULL << TopBit) & MaxAddress) {
202 // MaxAddress top bit is found
203 TopBit = TopBit + 1;
204 break;
205 }
206 }
207 ASSERT (TopBit != 0);
208
209 // Calculate T0SZ from the top bit of the MaxAddress
210 *T0SZ = 64 - TopBit;
211
212 // Get the Table info from T0SZ
213 GetRootTranslationTableInfo (*T0SZ, NULL, TableEntryCount);
214 }
215
216 STATIC
217 UINT64*
218 GetBlockEntryListFromAddress (
219 IN UINT64 *RootTable,
220 IN UINT64 RegionStart,
221 OUT UINTN *TableLevel,
222 IN OUT UINT64 *BlockEntrySize,
223 OUT UINT64 **LastBlockEntry
224 )
225 {
226 UINTN RootTableLevel;
227 UINTN RootTableEntryCount;
228 UINT64 *TranslationTable;
229 UINT64 *BlockEntry;
230 UINT64 *SubTableBlockEntry;
231 UINT64 BlockEntryAddress;
232 UINTN BaseAddressAlignment;
233 UINTN PageLevel;
234 UINTN Index;
235 UINTN IndexLevel;
236 UINTN T0SZ;
237 UINT64 Attributes;
238 UINT64 TableAttributes;
239
240 // Initialize variable
241 BlockEntry = NULL;
242
243 // Ensure the parameters are valid
244 if (!(TableLevel && BlockEntrySize && LastBlockEntry)) {
245 ASSERT_EFI_ERROR (EFI_INVALID_PARAMETER);
246 return NULL;
247 }
248
249 // Ensure the Region is aligned on 4KB boundary
250 if ((RegionStart & (SIZE_4KB - 1)) != 0) {
251 ASSERT_EFI_ERROR (EFI_INVALID_PARAMETER);
252 return NULL;
253 }
254
255 // Ensure the required size is aligned on 4KB boundary and not 0
256 if ((*BlockEntrySize & (SIZE_4KB - 1)) != 0 || *BlockEntrySize == 0) {
257 ASSERT_EFI_ERROR (EFI_INVALID_PARAMETER);
258 return NULL;
259 }
260
261 T0SZ = ArmGetTCR () & TCR_T0SZ_MASK;
262 // Get the Table info from T0SZ
263 GetRootTranslationTableInfo (T0SZ, &RootTableLevel, &RootTableEntryCount);
264
265 // If the start address is 0x0 then we use the size of the region to identify the alignment
266 if (RegionStart == 0) {
267 // Identify the highest possible alignment for the Region Size
268 BaseAddressAlignment = LowBitSet64 (*BlockEntrySize);
269 } else {
270 // Identify the highest possible alignment for the Base Address
271 BaseAddressAlignment = LowBitSet64 (RegionStart);
272 }
273
274 // Identify the Page Level the RegionStart must belong to. Note that PageLevel
275 // should be at least 1 since block translations are not supported at level 0
276 PageLevel = MAX (3 - ((BaseAddressAlignment - 12) / 9), 1);
277
278 // If the required size is smaller than the current block size then we need to go to the page below.
279 // The PageLevel was calculated on the Base Address alignment but did not take in account the alignment
280 // of the allocation size
281 while (*BlockEntrySize < TT_BLOCK_ENTRY_SIZE_AT_LEVEL (PageLevel)) {
282 // It does not fit so we need to go a page level above
283 PageLevel++;
284 }
285
286 //
287 // Get the Table Descriptor for the corresponding PageLevel. We need to decompose RegionStart to get appropriate entries
288 //
289
290 TranslationTable = RootTable;
291 for (IndexLevel = RootTableLevel; IndexLevel <= PageLevel; IndexLevel++) {
292 BlockEntry = (UINT64*)TT_GET_ENTRY_FOR_ADDRESS (TranslationTable, IndexLevel, RegionStart);
293
294 if ((IndexLevel != 3) && ((*BlockEntry & TT_TYPE_MASK) == TT_TYPE_TABLE_ENTRY)) {
295 // Go to the next table
296 TranslationTable = (UINT64*)(*BlockEntry & TT_ADDRESS_MASK_DESCRIPTION_TABLE);
297
298 // If we are at the last level then update the last level to next level
299 if (IndexLevel == PageLevel) {
300 // Enter the next level
301 PageLevel++;
302 }
303 } else if ((*BlockEntry & TT_TYPE_MASK) == TT_TYPE_BLOCK_ENTRY) {
304 // If we are not at the last level then we need to split this BlockEntry
305 if (IndexLevel != PageLevel) {
306 // Retrieve the attributes from the block entry
307 Attributes = *BlockEntry & TT_ATTRIBUTES_MASK;
308
309 // Convert the block entry attributes into Table descriptor attributes
310 TableAttributes = TT_TABLE_AP_NO_PERMISSION;
311 if (Attributes & TT_NS) {
312 TableAttributes = TT_TABLE_NS;
313 }
314
315 // Get the address corresponding at this entry
316 BlockEntryAddress = RegionStart;
317 BlockEntryAddress = BlockEntryAddress >> TT_ADDRESS_OFFSET_AT_LEVEL(IndexLevel);
318 // Shift back to right to set zero before the effective address
319 BlockEntryAddress = BlockEntryAddress << TT_ADDRESS_OFFSET_AT_LEVEL(IndexLevel);
320
321 // Set the correct entry type for the next page level
322 if ((IndexLevel + 1) == 3) {
323 Attributes |= TT_TYPE_BLOCK_ENTRY_LEVEL3;
324 } else {
325 Attributes |= TT_TYPE_BLOCK_ENTRY;
326 }
327
328 // Create a new translation table
329 TranslationTable = (UINT64*)AllocateAlignedPages (EFI_SIZE_TO_PAGES(TT_ENTRY_COUNT * sizeof(UINT64)), TT_ALIGNMENT_DESCRIPTION_TABLE);
330 if (TranslationTable == NULL) {
331 return NULL;
332 }
333
334 // Populate the newly created lower level table
335 SubTableBlockEntry = TranslationTable;
336 for (Index = 0; Index < TT_ENTRY_COUNT; Index++) {
337 *SubTableBlockEntry = Attributes | (BlockEntryAddress + (Index << TT_ADDRESS_OFFSET_AT_LEVEL(IndexLevel + 1)));
338 SubTableBlockEntry++;
339 }
340
341 // Fill the BlockEntry with the new TranslationTable
342 ReplaceLiveEntry (BlockEntry,
343 ((UINTN)TranslationTable & TT_ADDRESS_MASK_DESCRIPTION_TABLE) | TableAttributes | TT_TYPE_TABLE_ENTRY);
344 }
345 } else {
346 if (IndexLevel != PageLevel) {
347 //
348 // Case when we have an Invalid Entry and we are at a page level above of the one targetted.
349 //
350
351 // Create a new translation table
352 TranslationTable = (UINT64*)AllocateAlignedPages (EFI_SIZE_TO_PAGES(TT_ENTRY_COUNT * sizeof(UINT64)), TT_ALIGNMENT_DESCRIPTION_TABLE);
353 if (TranslationTable == NULL) {
354 return NULL;
355 }
356
357 ZeroMem (TranslationTable, TT_ENTRY_COUNT * sizeof(UINT64));
358
359 // Fill the new BlockEntry with the TranslationTable
360 *BlockEntry = ((UINTN)TranslationTable & TT_ADDRESS_MASK_DESCRIPTION_TABLE) | TT_TYPE_TABLE_ENTRY;
361 }
362 }
363 }
364
365 // Expose the found PageLevel to the caller
366 *TableLevel = PageLevel;
367
368 // Now, we have the Table Level we can get the Block Size associated to this table
369 *BlockEntrySize = TT_BLOCK_ENTRY_SIZE_AT_LEVEL (PageLevel);
370
371 // The last block of the root table depends on the number of entry in this table,
372 // otherwise it is always the (TT_ENTRY_COUNT - 1)th entry in the table.
373 *LastBlockEntry = TT_LAST_BLOCK_ADDRESS(TranslationTable,
374 (PageLevel == RootTableLevel) ? RootTableEntryCount : TT_ENTRY_COUNT);
375
376 return BlockEntry;
377 }
378
379 STATIC
380 RETURN_STATUS
381 UpdateRegionMapping (
382 IN UINT64 *RootTable,
383 IN UINT64 RegionStart,
384 IN UINT64 RegionLength,
385 IN UINT64 Attributes,
386 IN UINT64 BlockEntryMask
387 )
388 {
389 UINT32 Type;
390 UINT64 *BlockEntry;
391 UINT64 *LastBlockEntry;
392 UINT64 BlockEntrySize;
393 UINTN TableLevel;
394
395 // Ensure the Length is aligned on 4KB boundary
396 if ((RegionLength == 0) || ((RegionLength & (SIZE_4KB - 1)) != 0)) {
397 ASSERT_EFI_ERROR (EFI_INVALID_PARAMETER);
398 return RETURN_INVALID_PARAMETER;
399 }
400
401 do {
402 // Get the first Block Entry that matches the Virtual Address and also the information on the Table Descriptor
403 // such as the the size of the Block Entry and the address of the last BlockEntry of the Table Descriptor
404 BlockEntrySize = RegionLength;
405 BlockEntry = GetBlockEntryListFromAddress (RootTable, RegionStart, &TableLevel, &BlockEntrySize, &LastBlockEntry);
406 if (BlockEntry == NULL) {
407 // GetBlockEntryListFromAddress() return NULL when it fails to allocate new pages from the Translation Tables
408 return RETURN_OUT_OF_RESOURCES;
409 }
410
411 if (TableLevel != 3) {
412 Type = TT_TYPE_BLOCK_ENTRY;
413 } else {
414 Type = TT_TYPE_BLOCK_ENTRY_LEVEL3;
415 }
416
417 do {
418 // Fill the Block Entry with attribute and output block address
419 *BlockEntry &= BlockEntryMask;
420 *BlockEntry |= (RegionStart & TT_ADDRESS_MASK_BLOCK_ENTRY) | Attributes | Type;
421
422 // Go to the next BlockEntry
423 RegionStart += BlockEntrySize;
424 RegionLength -= BlockEntrySize;
425 BlockEntry++;
426
427 // Break the inner loop when next block is a table
428 // Rerun GetBlockEntryListFromAddress to avoid page table memory leak
429 if (TableLevel != 3 &&
430 (*BlockEntry & TT_TYPE_MASK) == TT_TYPE_TABLE_ENTRY) {
431 break;
432 }
433 } while ((RegionLength >= BlockEntrySize) && (BlockEntry <= LastBlockEntry));
434 } while (RegionLength != 0);
435
436 return RETURN_SUCCESS;
437 }
438
439 STATIC
440 RETURN_STATUS
441 FillTranslationTable (
442 IN UINT64 *RootTable,
443 IN ARM_MEMORY_REGION_DESCRIPTOR *MemoryRegion
444 )
445 {
446 return UpdateRegionMapping (
447 RootTable,
448 MemoryRegion->VirtualBase,
449 MemoryRegion->Length,
450 ArmMemoryAttributeToPageAttribute (MemoryRegion->Attributes) | TT_AF,
451 0
452 );
453 }
454
455 RETURN_STATUS
456 SetMemoryAttributes (
457 IN EFI_PHYSICAL_ADDRESS BaseAddress,
458 IN UINT64 Length,
459 IN UINT64 Attributes,
460 IN EFI_PHYSICAL_ADDRESS VirtualMask
461 )
462 {
463 RETURN_STATUS Status;
464 ARM_MEMORY_REGION_DESCRIPTOR MemoryRegion;
465 UINT64 *TranslationTable;
466
467 MemoryRegion.PhysicalBase = BaseAddress;
468 MemoryRegion.VirtualBase = BaseAddress;
469 MemoryRegion.Length = Length;
470 MemoryRegion.Attributes = GcdAttributeToArmAttribute (Attributes);
471
472 TranslationTable = ArmGetTTBR0BaseAddress ();
473
474 Status = FillTranslationTable (TranslationTable, &MemoryRegion);
475 if (RETURN_ERROR (Status)) {
476 return Status;
477 }
478
479 // Invalidate all TLB entries so changes are synced
480 ArmInvalidateTlb ();
481
482 return RETURN_SUCCESS;
483 }
484
485 STATIC
486 RETURN_STATUS
487 SetMemoryRegionAttribute (
488 IN EFI_PHYSICAL_ADDRESS BaseAddress,
489 IN UINT64 Length,
490 IN UINT64 Attributes,
491 IN UINT64 BlockEntryMask
492 )
493 {
494 RETURN_STATUS Status;
495 UINT64 *RootTable;
496
497 RootTable = ArmGetTTBR0BaseAddress ();
498
499 Status = UpdateRegionMapping (RootTable, BaseAddress, Length, Attributes, BlockEntryMask);
500 if (RETURN_ERROR (Status)) {
501 return Status;
502 }
503
504 // Invalidate all TLB entries so changes are synced
505 ArmInvalidateTlb ();
506
507 return RETURN_SUCCESS;
508 }
509
510 RETURN_STATUS
511 ArmSetMemoryRegionNoExec (
512 IN EFI_PHYSICAL_ADDRESS BaseAddress,
513 IN UINT64 Length
514 )
515 {
516 UINT64 Val;
517
518 if (ArmReadCurrentEL () == AARCH64_EL1) {
519 Val = TT_PXN_MASK | TT_UXN_MASK;
520 } else {
521 Val = TT_XN_MASK;
522 }
523
524 return SetMemoryRegionAttribute (
525 BaseAddress,
526 Length,
527 Val,
528 ~TT_ADDRESS_MASK_BLOCK_ENTRY);
529 }
530
531 RETURN_STATUS
532 ArmClearMemoryRegionNoExec (
533 IN EFI_PHYSICAL_ADDRESS BaseAddress,
534 IN UINT64 Length
535 )
536 {
537 UINT64 Mask;
538
539 // XN maps to UXN in the EL1&0 translation regime
540 Mask = ~(TT_ADDRESS_MASK_BLOCK_ENTRY | TT_PXN_MASK | TT_XN_MASK);
541
542 return SetMemoryRegionAttribute (
543 BaseAddress,
544 Length,
545 0,
546 Mask);
547 }
548
549 RETURN_STATUS
550 ArmSetMemoryRegionReadOnly (
551 IN EFI_PHYSICAL_ADDRESS BaseAddress,
552 IN UINT64 Length
553 )
554 {
555 return SetMemoryRegionAttribute (
556 BaseAddress,
557 Length,
558 TT_AP_RO_RO,
559 ~TT_ADDRESS_MASK_BLOCK_ENTRY);
560 }
561
562 RETURN_STATUS
563 ArmClearMemoryRegionReadOnly (
564 IN EFI_PHYSICAL_ADDRESS BaseAddress,
565 IN UINT64 Length
566 )
567 {
568 return SetMemoryRegionAttribute (
569 BaseAddress,
570 Length,
571 TT_AP_RW_RW,
572 ~(TT_ADDRESS_MASK_BLOCK_ENTRY | TT_AP_MASK));
573 }
574
575 RETURN_STATUS
576 EFIAPI
577 ArmConfigureMmu (
578 IN ARM_MEMORY_REGION_DESCRIPTOR *MemoryTable,
579 OUT VOID **TranslationTableBase OPTIONAL,
580 OUT UINTN *TranslationTableSize OPTIONAL
581 )
582 {
583 VOID* TranslationTable;
584 UINTN TranslationTablePageCount;
585 UINT32 TranslationTableAttribute;
586 ARM_MEMORY_REGION_DESCRIPTOR *MemoryTableEntry;
587 UINT64 MaxAddress;
588 UINT64 TopAddress;
589 UINTN T0SZ;
590 UINTN RootTableEntryCount;
591 UINT64 TCR;
592 RETURN_STATUS Status;
593
594 if(MemoryTable == NULL) {
595 ASSERT (MemoryTable != NULL);
596 return RETURN_INVALID_PARAMETER;
597 }
598
599 // Identify the highest address of the memory table
600 MaxAddress = MemoryTable->PhysicalBase + MemoryTable->Length - 1;
601 MemoryTableEntry = MemoryTable;
602 while (MemoryTableEntry->Length != 0) {
603 TopAddress = MemoryTableEntry->PhysicalBase + MemoryTableEntry->Length - 1;
604 if (TopAddress > MaxAddress) {
605 MaxAddress = TopAddress;
606 }
607 MemoryTableEntry++;
608 }
609
610 // Lookup the Table Level to get the information
611 LookupAddresstoRootTable (MaxAddress, &T0SZ, &RootTableEntryCount);
612
613 //
614 // Set TCR that allows us to retrieve T0SZ in the subsequent functions
615 //
616 // Ideally we will be running at EL2, but should support EL1 as well.
617 // UEFI should not run at EL3.
618 if (ArmReadCurrentEL () == AARCH64_EL2) {
619 //Note: Bits 23 and 31 are reserved(RES1) bits in TCR_EL2
620 TCR = T0SZ | (1UL << 31) | (1UL << 23) | TCR_TG0_4KB;
621
622 // Set the Physical Address Size using MaxAddress
623 if (MaxAddress < SIZE_4GB) {
624 TCR |= TCR_PS_4GB;
625 } else if (MaxAddress < SIZE_64GB) {
626 TCR |= TCR_PS_64GB;
627 } else if (MaxAddress < SIZE_1TB) {
628 TCR |= TCR_PS_1TB;
629 } else if (MaxAddress < SIZE_4TB) {
630 TCR |= TCR_PS_4TB;
631 } else if (MaxAddress < SIZE_16TB) {
632 TCR |= TCR_PS_16TB;
633 } else if (MaxAddress < SIZE_256TB) {
634 TCR |= TCR_PS_256TB;
635 } else {
636 DEBUG ((EFI_D_ERROR, "ArmConfigureMmu: The MaxAddress 0x%lX is not supported by this MMU configuration.\n", MaxAddress));
637 ASSERT (0); // Bigger than 48-bit memory space are not supported
638 return RETURN_UNSUPPORTED;
639 }
640 } else if (ArmReadCurrentEL () == AARCH64_EL1) {
641 // Due to Cortex-A57 erratum #822227 we must set TG1[1] == 1, regardless of EPD1.
642 TCR = T0SZ | TCR_TG0_4KB | TCR_TG1_4KB | TCR_EPD1;
643
644 // Set the Physical Address Size using MaxAddress
645 if (MaxAddress < SIZE_4GB) {
646 TCR |= TCR_IPS_4GB;
647 } else if (MaxAddress < SIZE_64GB) {
648 TCR |= TCR_IPS_64GB;
649 } else if (MaxAddress < SIZE_1TB) {
650 TCR |= TCR_IPS_1TB;
651 } else if (MaxAddress < SIZE_4TB) {
652 TCR |= TCR_IPS_4TB;
653 } else if (MaxAddress < SIZE_16TB) {
654 TCR |= TCR_IPS_16TB;
655 } else if (MaxAddress < SIZE_256TB) {
656 TCR |= TCR_IPS_256TB;
657 } else {
658 DEBUG ((EFI_D_ERROR, "ArmConfigureMmu: The MaxAddress 0x%lX is not supported by this MMU configuration.\n", MaxAddress));
659 ASSERT (0); // Bigger than 48-bit memory space are not supported
660 return RETURN_UNSUPPORTED;
661 }
662 } else {
663 ASSERT (0); // UEFI is only expected to run at EL2 and EL1, not EL3.
664 return RETURN_UNSUPPORTED;
665 }
666
667 // Set TCR
668 ArmSetTCR (TCR);
669
670 // Allocate pages for translation table
671 TranslationTablePageCount = EFI_SIZE_TO_PAGES(RootTableEntryCount * sizeof(UINT64));
672 TranslationTable = (UINT64*)AllocateAlignedPages (TranslationTablePageCount, TT_ALIGNMENT_DESCRIPTION_TABLE);
673 if (TranslationTable == NULL) {
674 return RETURN_OUT_OF_RESOURCES;
675 }
676 // We set TTBR0 just after allocating the table to retrieve its location from the subsequent
677 // functions without needing to pass this value across the functions. The MMU is only enabled
678 // after the translation tables are populated.
679 ArmSetTTBR0 (TranslationTable);
680
681 if (TranslationTableBase != NULL) {
682 *TranslationTableBase = TranslationTable;
683 }
684
685 if (TranslationTableSize != NULL) {
686 *TranslationTableSize = RootTableEntryCount * sizeof(UINT64);
687 }
688
689 ZeroMem (TranslationTable, RootTableEntryCount * sizeof(UINT64));
690
691 // Disable MMU and caches. ArmDisableMmu() also invalidates the TLBs
692 ArmDisableMmu ();
693 ArmDisableDataCache ();
694 ArmDisableInstructionCache ();
695
696 // Make sure nothing sneaked into the cache
697 ArmCleanInvalidateDataCache ();
698 ArmInvalidateInstructionCache ();
699
700 TranslationTableAttribute = TT_ATTR_INDX_INVALID;
701 while (MemoryTable->Length != 0) {
702 // Find the memory attribute for the Translation Table
703 if (((UINTN)TranslationTable >= MemoryTable->PhysicalBase) &&
704 ((UINTN)TranslationTable <= MemoryTable->PhysicalBase - 1 + MemoryTable->Length)) {
705 TranslationTableAttribute = MemoryTable->Attributes;
706 }
707
708 Status = FillTranslationTable (TranslationTable, MemoryTable);
709 if (RETURN_ERROR (Status)) {
710 goto FREE_TRANSLATION_TABLE;
711 }
712 MemoryTable++;
713 }
714
715 // Translate the Memory Attributes into Translation Table Register Attributes
716 if ((TranslationTableAttribute == ARM_MEMORY_REGION_ATTRIBUTE_UNCACHED_UNBUFFERED) ||
717 (TranslationTableAttribute == ARM_MEMORY_REGION_ATTRIBUTE_NONSECURE_UNCACHED_UNBUFFERED)) {
718 TCR |= TCR_SH_NON_SHAREABLE | TCR_RGN_OUTER_NON_CACHEABLE | TCR_RGN_INNER_NON_CACHEABLE;
719 } else if ((TranslationTableAttribute == ARM_MEMORY_REGION_ATTRIBUTE_WRITE_BACK) ||
720 (TranslationTableAttribute == ARM_MEMORY_REGION_ATTRIBUTE_NONSECURE_WRITE_BACK)) {
721 TCR |= TCR_SH_INNER_SHAREABLE | TCR_RGN_OUTER_WRITE_BACK_ALLOC | TCR_RGN_INNER_WRITE_BACK_ALLOC;
722 } else if ((TranslationTableAttribute == ARM_MEMORY_REGION_ATTRIBUTE_WRITE_THROUGH) ||
723 (TranslationTableAttribute == ARM_MEMORY_REGION_ATTRIBUTE_NONSECURE_WRITE_THROUGH)) {
724 TCR |= TCR_SH_NON_SHAREABLE | TCR_RGN_OUTER_WRITE_THROUGH | TCR_RGN_INNER_WRITE_THROUGH;
725 } else {
726 // If we failed to find a mapping that contains the root translation table then it probably means the translation table
727 // is not mapped in the given memory map.
728 ASSERT (0);
729 Status = RETURN_UNSUPPORTED;
730 goto FREE_TRANSLATION_TABLE;
731 }
732
733 // Set again TCR after getting the Translation Table attributes
734 ArmSetTCR (TCR);
735
736 ArmSetMAIR (MAIR_ATTR(TT_ATTR_INDX_DEVICE_MEMORY, MAIR_ATTR_DEVICE_MEMORY) | // mapped to EFI_MEMORY_UC
737 MAIR_ATTR(TT_ATTR_INDX_MEMORY_NON_CACHEABLE, MAIR_ATTR_NORMAL_MEMORY_NON_CACHEABLE) | // mapped to EFI_MEMORY_WC
738 MAIR_ATTR(TT_ATTR_INDX_MEMORY_WRITE_THROUGH, MAIR_ATTR_NORMAL_MEMORY_WRITE_THROUGH) | // mapped to EFI_MEMORY_WT
739 MAIR_ATTR(TT_ATTR_INDX_MEMORY_WRITE_BACK, MAIR_ATTR_NORMAL_MEMORY_WRITE_BACK)); // mapped to EFI_MEMORY_WB
740
741 ArmDisableAlignmentCheck ();
742 ArmEnableInstructionCache ();
743 ArmEnableDataCache ();
744
745 ArmEnableMmu ();
746 return RETURN_SUCCESS;
747
748 FREE_TRANSLATION_TABLE:
749 FreePages (TranslationTable, TranslationTablePageCount);
750 return Status;
751 }