]> git.proxmox.com Git - mirror_edk2.git/blob - ArmPkg/Library/ArmLib/AArch64/AArch64Mmu.c
bababab880cb806c2cb0725ef531e3c94f1fc02c
[mirror_edk2.git] / ArmPkg / Library / ArmLib / AArch64 / AArch64Mmu.c
1 /** @file
2 * File managing the MMU for ARMv8 architecture
3 *
4 * Copyright (c) 2011-2014, ARM Limited. All rights reserved.
5 *
6 * This program and the accompanying materials
7 * are licensed and made available under the terms and conditions of the BSD License
8 * which accompanies this distribution. The full text of the license may be found at
9 * http://opensource.org/licenses/bsd-license.php
10 *
11 * THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED.
13 *
14 **/
15
16 #include <Uefi.h>
17 #include <Chipset/AArch64.h>
18 #include <Library/BaseMemoryLib.h>
19 #include <Library/MemoryAllocationLib.h>
20 #include <Library/ArmLib.h>
21 #include <Library/BaseLib.h>
22 #include <Library/DebugLib.h>
23 #include "AArch64Lib.h"
24 #include "ArmLibPrivate.h"
25
26 // We use this index definition to define an invalid block entry
27 #define TT_ATTR_INDX_INVALID ((UINT32)~0)
28
29 STATIC
30 UINT64
31 ArmMemoryAttributeToPageAttribute (
32 IN ARM_MEMORY_REGION_ATTRIBUTES Attributes
33 )
34 {
35 switch (Attributes) {
36 case ARM_MEMORY_REGION_ATTRIBUTE_WRITE_BACK:
37 return TT_ATTR_INDX_MEMORY_WRITE_BACK;
38 case ARM_MEMORY_REGION_ATTRIBUTE_WRITE_THROUGH:
39 return TT_ATTR_INDX_MEMORY_WRITE_THROUGH;
40 case ARM_MEMORY_REGION_ATTRIBUTE_DEVICE:
41 return TT_ATTR_INDX_DEVICE_MEMORY;
42 case ARM_MEMORY_REGION_ATTRIBUTE_UNCACHED_UNBUFFERED:
43 return TT_ATTR_INDX_MEMORY_NON_CACHEABLE;
44 case ARM_MEMORY_REGION_ATTRIBUTE_NONSECURE_WRITE_BACK:
45 return TT_ATTR_INDX_MEMORY_WRITE_BACK;
46 case ARM_MEMORY_REGION_ATTRIBUTE_NONSECURE_WRITE_THROUGH:
47 return TT_ATTR_INDX_MEMORY_WRITE_THROUGH;
48 case ARM_MEMORY_REGION_ATTRIBUTE_NONSECURE_DEVICE:
49 return TT_ATTR_INDX_DEVICE_MEMORY;
50 case ARM_MEMORY_REGION_ATTRIBUTE_NONSECURE_UNCACHED_UNBUFFERED:
51 return TT_ATTR_INDX_MEMORY_NON_CACHEABLE;
52 default:
53 ASSERT(0);
54 return TT_ATTR_INDX_DEVICE_MEMORY;
55 }
56 }
57
58 UINT64
59 PageAttributeToGcdAttribute (
60 IN UINT64 PageAttributes
61 )
62 {
63 UINT64 GcdAttributes;
64
65 switch (PageAttributes & TT_ATTR_INDX_MASK) {
66 case TT_ATTR_INDX_DEVICE_MEMORY:
67 GcdAttributes = EFI_MEMORY_UC;
68 break;
69 case TT_ATTR_INDX_MEMORY_NON_CACHEABLE:
70 GcdAttributes = EFI_MEMORY_WC;
71 break;
72 case TT_ATTR_INDX_MEMORY_WRITE_THROUGH:
73 GcdAttributes = EFI_MEMORY_WT;
74 break;
75 case TT_ATTR_INDX_MEMORY_WRITE_BACK:
76 GcdAttributes = EFI_MEMORY_WB;
77 break;
78 default:
79 DEBUG ((EFI_D_ERROR, "PageAttributeToGcdAttribute: PageAttributes:0x%lX not supported.\n", PageAttributes));
80 ASSERT (0);
81 // The Global Coherency Domain (GCD) value is defined as a bit set.
82 // Returning 0 means no attribute has been set.
83 GcdAttributes = 0;
84 }
85
86 // Determine protection attributes
87 if (((PageAttributes & TT_AP_MASK) == TT_AP_NO_RO) || ((PageAttributes & TT_AP_MASK) == TT_AP_RO_RO)) {
88 // Read only cases map to write-protect
89 GcdAttributes |= EFI_MEMORY_WP;
90 }
91
92 // Process eXecute Never attribute
93 if ((PageAttributes & (TT_PXN_MASK | TT_UXN_MASK)) != 0 ) {
94 GcdAttributes |= EFI_MEMORY_XP;
95 }
96
97 return GcdAttributes;
98 }
99
100 UINT64
101 GcdAttributeToPageAttribute (
102 IN UINT64 GcdAttributes
103 )
104 {
105 UINT64 PageAttributes;
106
107 switch (GcdAttributes & 0xFF) {
108 case EFI_MEMORY_UC:
109 PageAttributes = TT_ATTR_INDX_DEVICE_MEMORY;
110 break;
111 case EFI_MEMORY_WC:
112 PageAttributes = TT_ATTR_INDX_MEMORY_NON_CACHEABLE;
113 break;
114 case EFI_MEMORY_WT:
115 PageAttributes = TT_ATTR_INDX_MEMORY_WRITE_THROUGH;
116 break;
117 case EFI_MEMORY_WB:
118 PageAttributes = TT_ATTR_INDX_MEMORY_WRITE_BACK;
119 break;
120 default:
121 DEBUG ((EFI_D_ERROR, "GcdAttributeToPageAttribute: 0x%X attributes is not supported.\n", GcdAttributes));
122 ASSERT (0);
123 // If no match has been found then we mark the memory as device memory.
124 // The only side effect of using device memory should be a slow down in the performance.
125 PageAttributes = TT_ATTR_INDX_DEVICE_MEMORY;
126 }
127
128 // Determine protection attributes
129 if (GcdAttributes & EFI_MEMORY_WP) {
130 // Read only cases map to write-protect
131 PageAttributes |= TT_AP_RO_RO;
132 }
133
134 // Process eXecute Never attribute
135 if (GcdAttributes & EFI_MEMORY_XP) {
136 PageAttributes |= (TT_PXN_MASK | TT_UXN_MASK);
137 }
138
139 return PageAttributes;
140 }
141
142 ARM_MEMORY_REGION_ATTRIBUTES
143 GcdAttributeToArmAttribute (
144 IN UINT64 GcdAttributes
145 )
146 {
147 switch (GcdAttributes & 0xFF) {
148 case EFI_MEMORY_UC:
149 return ARM_MEMORY_REGION_ATTRIBUTE_DEVICE;
150 case EFI_MEMORY_WC:
151 return ARM_MEMORY_REGION_ATTRIBUTE_UNCACHED_UNBUFFERED;
152 case EFI_MEMORY_WT:
153 return ARM_MEMORY_REGION_ATTRIBUTE_WRITE_THROUGH;
154 case EFI_MEMORY_WB:
155 return ARM_MEMORY_REGION_ATTRIBUTE_WRITE_BACK;
156 default:
157 DEBUG ((EFI_D_ERROR, "GcdAttributeToArmAttribute: 0x%lX attributes is not supported.\n", GcdAttributes));
158 ASSERT (0);
159 return ARM_MEMORY_REGION_ATTRIBUTE_DEVICE;
160 }
161 }
162
163 // Describe the T0SZ values for each translation table level
164 typedef struct {
165 UINTN MinT0SZ;
166 UINTN MaxT0SZ;
167 UINTN LargestT0SZ; // Generally (MaxT0SZ == LargestT0SZ) but at the Level3 Table
168 // the MaxT0SZ is not at the boundary of the table
169 } T0SZ_DESCRIPTION_PER_LEVEL;
170
171 // Map table for the corresponding Level of Table
172 STATIC CONST T0SZ_DESCRIPTION_PER_LEVEL T0SZPerTableLevel[] = {
173 { 16, 24, 24 }, // Table Level 0
174 { 25, 33, 33 }, // Table Level 1
175 { 34, 39, 42 } // Table Level 2
176 };
177
178 VOID
179 GetRootTranslationTableInfo (
180 IN UINTN T0SZ,
181 OUT UINTN *TableLevel,
182 OUT UINTN *TableEntryCount
183 )
184 {
185 UINTN Index;
186
187 // Identify the level of the root table from the given T0SZ
188 for (Index = 0; Index < sizeof (T0SZPerTableLevel) / sizeof (T0SZ_DESCRIPTION_PER_LEVEL); Index++) {
189 if (T0SZ <= T0SZPerTableLevel[Index].MaxT0SZ) {
190 break;
191 }
192 }
193
194 // If we have not found the corresponding maximum T0SZ then we use the last one
195 if (Index == sizeof (T0SZPerTableLevel) / sizeof (T0SZ_DESCRIPTION_PER_LEVEL)) {
196 Index--;
197 }
198
199 // Get the level of the root table
200 if (TableLevel) {
201 *TableLevel = Index;
202 }
203
204 // The Size of the Table is 2^(T0SZ-LargestT0SZ)
205 if (TableEntryCount) {
206 *TableEntryCount = 1 << (T0SZPerTableLevel[Index].LargestT0SZ - T0SZ + 1);
207 }
208 }
209
210 STATIC
211 VOID
212 LookupAddresstoRootTable (
213 IN UINT64 MaxAddress,
214 OUT UINTN *T0SZ,
215 OUT UINTN *TableEntryCount
216 )
217 {
218 UINTN TopBit;
219
220 // Check the parameters are not NULL
221 ASSERT ((T0SZ != NULL) && (TableEntryCount != NULL));
222
223 // Look for the highest bit set in MaxAddress
224 for (TopBit = 63; TopBit != 0; TopBit--) {
225 if ((1ULL << TopBit) & MaxAddress) {
226 // MaxAddress top bit is found
227 TopBit = TopBit + 1;
228 break;
229 }
230 }
231 ASSERT (TopBit != 0);
232
233 // Calculate T0SZ from the top bit of the MaxAddress
234 *T0SZ = 64 - TopBit;
235
236 // Get the Table info from T0SZ
237 GetRootTranslationTableInfo (*T0SZ, NULL, TableEntryCount);
238 }
239
240 STATIC
241 UINT64*
242 GetBlockEntryListFromAddress (
243 IN UINT64 *RootTable,
244 IN UINT64 RegionStart,
245 OUT UINTN *TableLevel,
246 IN OUT UINT64 *BlockEntrySize,
247 IN OUT UINT64 **LastBlockEntry
248 )
249 {
250 UINTN RootTableLevel;
251 UINTN RootTableEntryCount;
252 UINT64 *TranslationTable;
253 UINT64 *BlockEntry;
254 UINT64 BlockEntryAddress;
255 UINTN BaseAddressAlignment;
256 UINTN PageLevel;
257 UINTN Index;
258 UINTN IndexLevel;
259 UINTN T0SZ;
260 UINT64 Attributes;
261 UINT64 TableAttributes;
262
263 // Initialize variable
264 BlockEntry = NULL;
265
266 // Ensure the parameters are valid
267 if (!(TableLevel && BlockEntrySize && LastBlockEntry)) {
268 ASSERT_EFI_ERROR (EFI_INVALID_PARAMETER);
269 return NULL;
270 }
271
272 // Ensure the Region is aligned on 4KB boundary
273 if ((RegionStart & (SIZE_4KB - 1)) != 0) {
274 ASSERT_EFI_ERROR (EFI_INVALID_PARAMETER);
275 return NULL;
276 }
277
278 // Ensure the required size is aligned on 4KB boundary
279 if ((*BlockEntrySize & (SIZE_4KB - 1)) != 0) {
280 ASSERT_EFI_ERROR (EFI_INVALID_PARAMETER);
281 return NULL;
282 }
283
284 //
285 // Calculate LastBlockEntry from T0SZ - this is the last block entry of the root Translation table
286 //
287 T0SZ = ArmGetTCR () & TCR_T0SZ_MASK;
288 // Get the Table info from T0SZ
289 GetRootTranslationTableInfo (T0SZ, &RootTableLevel, &RootTableEntryCount);
290 // The last block of the root table depends on the number of entry in this table
291 *LastBlockEntry = TT_LAST_BLOCK_ADDRESS(RootTable, RootTableEntryCount);
292
293 // If the start address is 0x0 then we use the size of the region to identify the alignment
294 if (RegionStart == 0) {
295 // Identify the highest possible alignment for the Region Size
296 for (BaseAddressAlignment = 0; BaseAddressAlignment < 64; BaseAddressAlignment++) {
297 if ((1 << BaseAddressAlignment) & *BlockEntrySize) {
298 break;
299 }
300 }
301 } else {
302 // Identify the highest possible alignment for the Base Address
303 for (BaseAddressAlignment = 0; BaseAddressAlignment < 64; BaseAddressAlignment++) {
304 if ((1 << BaseAddressAlignment) & RegionStart) {
305 break;
306 }
307 }
308 }
309
310 // Identify the Page Level the RegionStart must belongs to
311 PageLevel = 3 - ((BaseAddressAlignment - 12) / 9);
312
313 // If the required size is smaller than the current block size then we need to go to the page below.
314 // The PageLevel was calculated on the Base Address alignment but did not take in account the alignment
315 // of the allocation size
316 if (*BlockEntrySize < TT_BLOCK_ENTRY_SIZE_AT_LEVEL (PageLevel)) {
317 // It does not fit so we need to go a page level above
318 PageLevel++;
319 }
320
321 // Expose the found PageLevel to the caller
322 *TableLevel = PageLevel;
323
324 // Now, we have the Table Level we can get the Block Size associated to this table
325 *BlockEntrySize = TT_BLOCK_ENTRY_SIZE_AT_LEVEL (PageLevel);
326
327 //
328 // Get the Table Descriptor for the corresponding PageLevel. We need to decompose RegionStart to get appropriate entries
329 //
330
331 TranslationTable = RootTable;
332 for (IndexLevel = RootTableLevel; IndexLevel <= PageLevel; IndexLevel++) {
333 BlockEntry = (UINT64*)TT_GET_ENTRY_FOR_ADDRESS (TranslationTable, IndexLevel, RegionStart);
334
335 if ((IndexLevel != 3) && ((*BlockEntry & TT_TYPE_MASK) == TT_TYPE_TABLE_ENTRY)) {
336 // Go to the next table
337 TranslationTable = (UINT64*)(*BlockEntry & TT_ADDRESS_MASK_DESCRIPTION_TABLE);
338
339 // If we are at the last level then update the output
340 if (IndexLevel == PageLevel) {
341 // And get the appropriate BlockEntry at the next level
342 BlockEntry = (UINT64*)TT_GET_ENTRY_FOR_ADDRESS (TranslationTable, IndexLevel + 1, RegionStart);
343
344 // Set the last block for this new table
345 *LastBlockEntry = TT_LAST_BLOCK_ADDRESS(TranslationTable, TT_ENTRY_COUNT);
346 }
347 } else if ((*BlockEntry & TT_TYPE_MASK) == TT_TYPE_BLOCK_ENTRY) {
348 // If we are not at the last level then we need to split this BlockEntry
349 if (IndexLevel != PageLevel) {
350 // Retrieve the attributes from the block entry
351 Attributes = *BlockEntry & TT_ATTRIBUTES_MASK;
352
353 // Convert the block entry attributes into Table descriptor attributes
354 TableAttributes = TT_TABLE_AP_NO_PERMISSION;
355 if (Attributes & TT_PXN_MASK) {
356 TableAttributes = TT_TABLE_PXN;
357 }
358 if (Attributes & TT_UXN_MASK) {
359 TableAttributes = TT_TABLE_XN;
360 }
361 if (Attributes & TT_NS) {
362 TableAttributes = TT_TABLE_NS;
363 }
364
365 // Get the address corresponding at this entry
366 BlockEntryAddress = RegionStart;
367 BlockEntryAddress = BlockEntryAddress >> TT_ADDRESS_OFFSET_AT_LEVEL(IndexLevel);
368 // Shift back to right to set zero before the effective address
369 BlockEntryAddress = BlockEntryAddress << TT_ADDRESS_OFFSET_AT_LEVEL(IndexLevel);
370
371 // Set the correct entry type for the next page level
372 if ((IndexLevel + 1) == 3) {
373 Attributes |= TT_TYPE_BLOCK_ENTRY_LEVEL3;
374 } else {
375 Attributes |= TT_TYPE_BLOCK_ENTRY;
376 }
377
378 // Create a new translation table
379 TranslationTable = (UINT64*)AllocatePages (EFI_SIZE_TO_PAGES((TT_ENTRY_COUNT * sizeof(UINT64)) + TT_ALIGNMENT_DESCRIPTION_TABLE));
380 if (TranslationTable == NULL) {
381 return NULL;
382 }
383 TranslationTable = (UINT64*)((UINTN)TranslationTable & TT_ADDRESS_MASK_DESCRIPTION_TABLE);
384
385 // Fill the BlockEntry with the new TranslationTable
386 *BlockEntry = ((UINTN)TranslationTable & TT_ADDRESS_MASK_DESCRIPTION_TABLE) | TableAttributes | TT_TYPE_TABLE_ENTRY;
387 // Update the last block entry with the newly created translation table
388 *LastBlockEntry = TT_LAST_BLOCK_ADDRESS(TranslationTable, TT_ENTRY_COUNT);
389
390 // Populate the newly created lower level table
391 BlockEntry = TranslationTable;
392 for (Index = 0; Index < TT_ENTRY_COUNT; Index++) {
393 *BlockEntry = Attributes | (BlockEntryAddress + (Index << TT_ADDRESS_OFFSET_AT_LEVEL(IndexLevel + 1)));
394 BlockEntry++;
395 }
396 // Block Entry points at the beginning of the Translation Table
397 BlockEntry = TranslationTable;
398 }
399 } else {
400 // Case of Invalid Entry and we are at a page level above of the one targetted.
401 if (IndexLevel != PageLevel) {
402 // Create a new translation table
403 TranslationTable = (UINT64*)AllocatePages (EFI_SIZE_TO_PAGES((TT_ENTRY_COUNT * sizeof(UINT64)) + TT_ALIGNMENT_DESCRIPTION_TABLE));
404 if (TranslationTable == NULL) {
405 return NULL;
406 }
407 TranslationTable = (UINT64*)((UINTN)TranslationTable & TT_ADDRESS_MASK_DESCRIPTION_TABLE);
408
409 ZeroMem (TranslationTable, TT_ENTRY_COUNT * sizeof(UINT64));
410
411 // Fill the new BlockEntry with the TranslationTable
412 *BlockEntry = ((UINTN)TranslationTable & TT_ADDRESS_MASK_DESCRIPTION_TABLE) | TT_TYPE_TABLE_ENTRY;
413 // Update the last block entry with the newly created translation table
414 *LastBlockEntry = TT_LAST_BLOCK_ADDRESS(TranslationTable, TT_ENTRY_COUNT);
415 }
416 }
417 }
418
419 return BlockEntry;
420 }
421
422 STATIC
423 RETURN_STATUS
424 FillTranslationTable (
425 IN UINT64 *RootTable,
426 IN ARM_MEMORY_REGION_DESCRIPTOR *MemoryRegion
427 )
428 {
429 UINT64 Attributes;
430 UINT32 Type;
431 UINT64 RegionStart;
432 UINT64 RemainingRegionLength;
433 UINT64 *BlockEntry;
434 UINT64 *LastBlockEntry;
435 UINT64 BlockEntrySize;
436 UINTN TableLevel;
437
438 // Ensure the Length is aligned on 4KB boundary
439 if ((MemoryRegion->Length == 0) || ((MemoryRegion->Length & (SIZE_4KB - 1)) != 0)) {
440 ASSERT_EFI_ERROR (EFI_INVALID_PARAMETER);
441 return RETURN_INVALID_PARAMETER;
442 }
443
444 // Variable initialization
445 Attributes = ArmMemoryAttributeToPageAttribute (MemoryRegion->Attributes) | TT_AF;
446 RemainingRegionLength = MemoryRegion->Length;
447 RegionStart = MemoryRegion->VirtualBase;
448
449 do {
450 // Get the first Block Entry that matches the Virtual Address and also the information on the Table Descriptor
451 // such as the the size of the Block Entry and the address of the last BlockEntry of the Table Descriptor
452 BlockEntrySize = RemainingRegionLength;
453 BlockEntry = GetBlockEntryListFromAddress (RootTable, RegionStart, &TableLevel, &BlockEntrySize, &LastBlockEntry);
454 if (BlockEntry == NULL) {
455 // GetBlockEntryListFromAddress() return NULL when it fails to allocate new pages from the Translation Tables
456 return RETURN_OUT_OF_RESOURCES;
457 }
458
459 if (TableLevel != 3) {
460 Type = TT_TYPE_BLOCK_ENTRY;
461 } else {
462 Type = TT_TYPE_BLOCK_ENTRY_LEVEL3;
463 }
464
465 do {
466 // Fill the Block Entry with attribute and output block address
467 *BlockEntry = (RegionStart & TT_ADDRESS_MASK_BLOCK_ENTRY) | Attributes | Type;
468
469 // Go to the next BlockEntry
470 RegionStart += BlockEntrySize;
471 RemainingRegionLength -= BlockEntrySize;
472 BlockEntry++;
473 } while ((RemainingRegionLength >= BlockEntrySize) && (BlockEntry <= LastBlockEntry));
474 } while (RemainingRegionLength != 0);
475
476 return RETURN_SUCCESS;
477 }
478
479 RETURN_STATUS
480 SetMemoryAttributes (
481 IN EFI_PHYSICAL_ADDRESS BaseAddress,
482 IN UINT64 Length,
483 IN UINT64 Attributes,
484 IN EFI_PHYSICAL_ADDRESS VirtualMask
485 )
486 {
487 RETURN_STATUS Status;
488 ARM_MEMORY_REGION_DESCRIPTOR MemoryRegion;
489 UINT64 *TranslationTable;
490
491 MemoryRegion.PhysicalBase = BaseAddress;
492 MemoryRegion.VirtualBase = BaseAddress;
493 MemoryRegion.Length = Length;
494 MemoryRegion.Attributes = GcdAttributeToArmAttribute (Attributes);
495
496 TranslationTable = ArmGetTTBR0BaseAddress ();
497
498 Status = FillTranslationTable (TranslationTable, &MemoryRegion);
499 if (RETURN_ERROR (Status)) {
500 return Status;
501 }
502
503 // Flush d-cache so descriptors make it back to uncached memory for subsequent table walks
504 // flush and invalidate pages
505 ArmCleanInvalidateDataCache ();
506
507 ArmInvalidateInstructionCache ();
508
509 // Invalidate all TLB entries so changes are synced
510 ArmInvalidateTlb ();
511
512 return RETURN_SUCCESS;
513 }
514
515 RETURN_STATUS
516 EFIAPI
517 ArmConfigureMmu (
518 IN ARM_MEMORY_REGION_DESCRIPTOR *MemoryTable,
519 OUT VOID **TranslationTableBase OPTIONAL,
520 OUT UINTN *TranslationTableSize OPTIONAL
521 )
522 {
523 VOID* TranslationTable;
524 UINTN TranslationTablePageCount;
525 UINT32 TranslationTableAttribute;
526 ARM_MEMORY_REGION_DESCRIPTOR *MemoryTableEntry;
527 UINT64 MaxAddress;
528 UINT64 TopAddress;
529 UINTN T0SZ;
530 UINTN RootTableEntryCount;
531 UINT64 TCR;
532 RETURN_STATUS Status;
533
534 if(MemoryTable == NULL)
535 {
536 ASSERT (MemoryTable != NULL);
537 return RETURN_INVALID_PARAMETER;
538 }
539
540 // Identify the highest address of the memory table
541 MaxAddress = MemoryTable->PhysicalBase + MemoryTable->Length - 1;
542 MemoryTableEntry = MemoryTable;
543 while (MemoryTableEntry->Length != 0) {
544 TopAddress = MemoryTableEntry->PhysicalBase + MemoryTableEntry->Length - 1;
545 if (TopAddress > MaxAddress) {
546 MaxAddress = TopAddress;
547 }
548 MemoryTableEntry++;
549 }
550
551 // Lookup the Table Level to get the information
552 LookupAddresstoRootTable (MaxAddress, &T0SZ, &RootTableEntryCount);
553
554 //
555 // Set TCR that allows us to retrieve T0SZ in the subsequent functions
556 //
557 // Ideally we will be running at EL2, but should support EL1 as well.
558 // UEFI should not run at EL3.
559 if (ArmReadCurrentEL () == AARCH64_EL2) {
560 //Note: Bits 23 and 31 are reserved(RES1) bits in TCR_EL2
561 TCR = T0SZ | (1UL << 31) | (1UL << 23) | TCR_TG0_4KB;
562
563 // Set the Physical Address Size using MaxAddress
564 if (MaxAddress < SIZE_4GB) {
565 TCR |= TCR_PS_4GB;
566 } else if (MaxAddress < SIZE_64GB) {
567 TCR |= TCR_PS_64GB;
568 } else if (MaxAddress < SIZE_1TB) {
569 TCR |= TCR_PS_1TB;
570 } else if (MaxAddress < SIZE_4TB) {
571 TCR |= TCR_PS_4TB;
572 } else if (MaxAddress < SIZE_16TB) {
573 TCR |= TCR_PS_16TB;
574 } else if (MaxAddress < SIZE_256TB) {
575 TCR |= TCR_PS_256TB;
576 } else {
577 DEBUG ((EFI_D_ERROR, "ArmConfigureMmu: The MaxAddress 0x%lX is not supported by this MMU configuration.\n", MaxAddress));
578 ASSERT (0); // Bigger than 48-bit memory space are not supported
579 return RETURN_UNSUPPORTED;
580 }
581 } else if (ArmReadCurrentEL () == AARCH64_EL1) {
582 TCR = T0SZ | TCR_TG0_4KB;
583
584 // Set the Physical Address Size using MaxAddress
585 if (MaxAddress < SIZE_4GB) {
586 TCR |= TCR_IPS_4GB;
587 } else if (MaxAddress < SIZE_64GB) {
588 TCR |= TCR_IPS_64GB;
589 } else if (MaxAddress < SIZE_1TB) {
590 TCR |= TCR_IPS_1TB;
591 } else if (MaxAddress < SIZE_4TB) {
592 TCR |= TCR_IPS_4TB;
593 } else if (MaxAddress < SIZE_16TB) {
594 TCR |= TCR_IPS_16TB;
595 } else if (MaxAddress < SIZE_256TB) {
596 TCR |= TCR_IPS_256TB;
597 } else {
598 DEBUG ((EFI_D_ERROR, "ArmConfigureMmu: The MaxAddress 0x%lX is not supported by this MMU configuration.\n", MaxAddress));
599 ASSERT (0); // Bigger than 48-bit memory space are not supported
600 return RETURN_UNSUPPORTED;
601 }
602 } else {
603 ASSERT (0); // UEFI is only expected to run at EL2 and EL1, not EL3.
604 return RETURN_UNSUPPORTED;
605 }
606
607 // Set TCR
608 ArmSetTCR (TCR);
609
610 // Allocate pages for translation table
611 TranslationTablePageCount = EFI_SIZE_TO_PAGES((RootTableEntryCount * sizeof(UINT64)) + TT_ALIGNMENT_DESCRIPTION_TABLE);
612 TranslationTable = AllocatePages (TranslationTablePageCount);
613 if (TranslationTable == NULL) {
614 return RETURN_OUT_OF_RESOURCES;
615 }
616 TranslationTable = (VOID*)((UINTN)TranslationTable & TT_ADDRESS_MASK_DESCRIPTION_TABLE);
617 // We set TTBR0 just after allocating the table to retrieve its location from the subsequent
618 // functions without needing to pass this value across the functions. The MMU is only enabled
619 // after the translation tables are populated.
620 ArmSetTTBR0 (TranslationTable);
621
622 if (TranslationTableBase != NULL) {
623 *TranslationTableBase = TranslationTable;
624 }
625
626 if (TranslationTableSize != NULL) {
627 *TranslationTableSize = RootTableEntryCount * sizeof(UINT64);
628 }
629
630 ZeroMem (TranslationTable, RootTableEntryCount * sizeof(UINT64));
631
632 // Disable MMU and caches. ArmDisableMmu() also invalidates the TLBs
633 ArmDisableMmu ();
634 ArmDisableDataCache ();
635 ArmDisableInstructionCache ();
636
637 // Make sure nothing sneaked into the cache
638 ArmCleanInvalidateDataCache ();
639 ArmInvalidateInstructionCache ();
640
641 TranslationTableAttribute = TT_ATTR_INDX_INVALID;
642 while (MemoryTable->Length != 0) {
643 // Find the memory attribute for the Translation Table
644 if (((UINTN)TranslationTable >= MemoryTable->PhysicalBase) &&
645 ((UINTN)TranslationTable <= MemoryTable->PhysicalBase - 1 + MemoryTable->Length)) {
646 TranslationTableAttribute = MemoryTable->Attributes;
647 }
648
649 Status = FillTranslationTable (TranslationTable, MemoryTable);
650 if (RETURN_ERROR (Status)) {
651 goto FREE_TRANSLATION_TABLE;
652 }
653 MemoryTable++;
654 }
655
656 // Translate the Memory Attributes into Translation Table Register Attributes
657 if ((TranslationTableAttribute == ARM_MEMORY_REGION_ATTRIBUTE_UNCACHED_UNBUFFERED) ||
658 (TranslationTableAttribute == ARM_MEMORY_REGION_ATTRIBUTE_NONSECURE_UNCACHED_UNBUFFERED)) {
659 TCR |= TCR_SH_NON_SHAREABLE | TCR_RGN_OUTER_NON_CACHEABLE | TCR_RGN_INNER_NON_CACHEABLE;
660 } else if ((TranslationTableAttribute == ARM_MEMORY_REGION_ATTRIBUTE_WRITE_BACK) ||
661 (TranslationTableAttribute == ARM_MEMORY_REGION_ATTRIBUTE_NONSECURE_WRITE_BACK)) {
662 TCR |= TCR_SH_INNER_SHAREABLE | TCR_RGN_OUTER_WRITE_BACK_ALLOC | TCR_RGN_INNER_WRITE_BACK_ALLOC;
663 } else if ((TranslationTableAttribute == ARM_MEMORY_REGION_ATTRIBUTE_WRITE_THROUGH) ||
664 (TranslationTableAttribute == ARM_MEMORY_REGION_ATTRIBUTE_NONSECURE_WRITE_THROUGH)) {
665 TCR |= TCR_SH_NON_SHAREABLE | TCR_RGN_OUTER_WRITE_THROUGH | TCR_RGN_INNER_WRITE_THROUGH;
666 } else {
667 // If we failed to find a mapping that contains the root translation table then it probably means the translation table
668 // is not mapped in the given memory map.
669 ASSERT (0);
670 Status = RETURN_UNSUPPORTED;
671 goto FREE_TRANSLATION_TABLE;
672 }
673
674 ArmSetMAIR (MAIR_ATTR(TT_ATTR_INDX_DEVICE_MEMORY, MAIR_ATTR_DEVICE_MEMORY) | // mapped to EFI_MEMORY_UC
675 MAIR_ATTR(TT_ATTR_INDX_MEMORY_NON_CACHEABLE, MAIR_ATTR_NORMAL_MEMORY_NON_CACHEABLE) | // mapped to EFI_MEMORY_WC
676 MAIR_ATTR(TT_ATTR_INDX_MEMORY_WRITE_THROUGH, MAIR_ATTR_NORMAL_MEMORY_WRITE_THROUGH) | // mapped to EFI_MEMORY_WT
677 MAIR_ATTR(TT_ATTR_INDX_MEMORY_WRITE_BACK, MAIR_ATTR_NORMAL_MEMORY_WRITE_BACK)); // mapped to EFI_MEMORY_WB
678
679 ArmDisableAlignmentCheck ();
680 ArmEnableInstructionCache ();
681 ArmEnableDataCache ();
682
683 ArmEnableMmu ();
684 return RETURN_SUCCESS;
685
686 FREE_TRANSLATION_TABLE:
687 FreePages (TranslationTable, TranslationTablePageCount);
688 return Status;
689 }