]> git.proxmox.com Git - mirror_edk2.git/blob - ArmPkg/Library/ArmLib/AArch64/AArch64Mmu.c
ArmPkg/ArmLib/AArch64: Initialize the new N+1-level page table before registering it
[mirror_edk2.git] / ArmPkg / Library / ArmLib / AArch64 / AArch64Mmu.c
1 /** @file
2 * File managing the MMU for ARMv8 architecture
3 *
4 * Copyright (c) 2011-2014, ARM Limited. All rights reserved.
5 *
6 * This program and the accompanying materials
7 * are licensed and made available under the terms and conditions of the BSD License
8 * which accompanies this distribution. The full text of the license may be found at
9 * http://opensource.org/licenses/bsd-license.php
10 *
11 * THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED.
13 *
14 **/
15
16 #include <Uefi.h>
17 #include <Chipset/AArch64.h>
18 #include <Library/BaseMemoryLib.h>
19 #include <Library/MemoryAllocationLib.h>
20 #include <Library/ArmLib.h>
21 #include <Library/BaseLib.h>
22 #include <Library/DebugLib.h>
23 #include "AArch64Lib.h"
24 #include "ArmLibPrivate.h"
25
26 // We use this index definition to define an invalid block entry
27 #define TT_ATTR_INDX_INVALID ((UINT32)~0)
28
29 STATIC
30 UINT64
31 ArmMemoryAttributeToPageAttribute (
32 IN ARM_MEMORY_REGION_ATTRIBUTES Attributes
33 )
34 {
35 switch (Attributes) {
36 case ARM_MEMORY_REGION_ATTRIBUTE_WRITE_BACK:
37 return TT_ATTR_INDX_MEMORY_WRITE_BACK;
38 case ARM_MEMORY_REGION_ATTRIBUTE_WRITE_THROUGH:
39 return TT_ATTR_INDX_MEMORY_WRITE_THROUGH;
40 case ARM_MEMORY_REGION_ATTRIBUTE_DEVICE:
41 return TT_ATTR_INDX_DEVICE_MEMORY;
42 case ARM_MEMORY_REGION_ATTRIBUTE_UNCACHED_UNBUFFERED:
43 return TT_ATTR_INDX_MEMORY_NON_CACHEABLE;
44 case ARM_MEMORY_REGION_ATTRIBUTE_NONSECURE_WRITE_BACK:
45 return TT_ATTR_INDX_MEMORY_WRITE_BACK;
46 case ARM_MEMORY_REGION_ATTRIBUTE_NONSECURE_WRITE_THROUGH:
47 return TT_ATTR_INDX_MEMORY_WRITE_THROUGH;
48 case ARM_MEMORY_REGION_ATTRIBUTE_NONSECURE_DEVICE:
49 return TT_ATTR_INDX_DEVICE_MEMORY;
50 case ARM_MEMORY_REGION_ATTRIBUTE_NONSECURE_UNCACHED_UNBUFFERED:
51 return TT_ATTR_INDX_MEMORY_NON_CACHEABLE;
52 default:
53 ASSERT(0);
54 return TT_ATTR_INDX_DEVICE_MEMORY;
55 }
56 }
57
58 UINT64
59 PageAttributeToGcdAttribute (
60 IN UINT64 PageAttributes
61 )
62 {
63 UINT64 GcdAttributes;
64
65 switch (PageAttributes & TT_ATTR_INDX_MASK) {
66 case TT_ATTR_INDX_DEVICE_MEMORY:
67 GcdAttributes = EFI_MEMORY_UC;
68 break;
69 case TT_ATTR_INDX_MEMORY_NON_CACHEABLE:
70 GcdAttributes = EFI_MEMORY_WC;
71 break;
72 case TT_ATTR_INDX_MEMORY_WRITE_THROUGH:
73 GcdAttributes = EFI_MEMORY_WT;
74 break;
75 case TT_ATTR_INDX_MEMORY_WRITE_BACK:
76 GcdAttributes = EFI_MEMORY_WB;
77 break;
78 default:
79 DEBUG ((EFI_D_ERROR, "PageAttributeToGcdAttribute: PageAttributes:0x%lX not supported.\n", PageAttributes));
80 ASSERT (0);
81 // The Global Coherency Domain (GCD) value is defined as a bit set.
82 // Returning 0 means no attribute has been set.
83 GcdAttributes = 0;
84 }
85
86 // Determine protection attributes
87 if (((PageAttributes & TT_AP_MASK) == TT_AP_NO_RO) || ((PageAttributes & TT_AP_MASK) == TT_AP_RO_RO)) {
88 // Read only cases map to write-protect
89 GcdAttributes |= EFI_MEMORY_WP;
90 }
91
92 // Process eXecute Never attribute
93 if ((PageAttributes & (TT_PXN_MASK | TT_UXN_MASK)) != 0 ) {
94 GcdAttributes |= EFI_MEMORY_XP;
95 }
96
97 return GcdAttributes;
98 }
99
100 UINT64
101 GcdAttributeToPageAttribute (
102 IN UINT64 GcdAttributes
103 )
104 {
105 UINT64 PageAttributes;
106
107 switch (GcdAttributes & 0xFF) {
108 case EFI_MEMORY_UC:
109 PageAttributes = TT_ATTR_INDX_DEVICE_MEMORY;
110 break;
111 case EFI_MEMORY_WC:
112 PageAttributes = TT_ATTR_INDX_MEMORY_NON_CACHEABLE;
113 break;
114 case EFI_MEMORY_WT:
115 PageAttributes = TT_ATTR_INDX_MEMORY_WRITE_THROUGH;
116 break;
117 case EFI_MEMORY_WB:
118 PageAttributes = TT_ATTR_INDX_MEMORY_WRITE_BACK;
119 break;
120 default:
121 DEBUG ((EFI_D_ERROR, "GcdAttributeToPageAttribute: 0x%X attributes is not supported.\n", GcdAttributes));
122 ASSERT (0);
123 // If no match has been found then we mark the memory as device memory.
124 // The only side effect of using device memory should be a slow down in the performance.
125 PageAttributes = TT_ATTR_INDX_DEVICE_MEMORY;
126 }
127
128 // Determine protection attributes
129 if (GcdAttributes & EFI_MEMORY_WP) {
130 // Read only cases map to write-protect
131 PageAttributes |= TT_AP_RO_RO;
132 }
133
134 // Process eXecute Never attribute
135 if (GcdAttributes & EFI_MEMORY_XP) {
136 PageAttributes |= (TT_PXN_MASK | TT_UXN_MASK);
137 }
138
139 return PageAttributes;
140 }
141
142 ARM_MEMORY_REGION_ATTRIBUTES
143 GcdAttributeToArmAttribute (
144 IN UINT64 GcdAttributes
145 )
146 {
147 switch (GcdAttributes & 0xFF) {
148 case EFI_MEMORY_UC:
149 return ARM_MEMORY_REGION_ATTRIBUTE_DEVICE;
150 case EFI_MEMORY_WC:
151 return ARM_MEMORY_REGION_ATTRIBUTE_UNCACHED_UNBUFFERED;
152 case EFI_MEMORY_WT:
153 return ARM_MEMORY_REGION_ATTRIBUTE_WRITE_THROUGH;
154 case EFI_MEMORY_WB:
155 return ARM_MEMORY_REGION_ATTRIBUTE_WRITE_BACK;
156 default:
157 DEBUG ((EFI_D_ERROR, "GcdAttributeToArmAttribute: 0x%lX attributes is not supported.\n", GcdAttributes));
158 ASSERT (0);
159 return ARM_MEMORY_REGION_ATTRIBUTE_DEVICE;
160 }
161 }
162
163 // Describe the T0SZ values for each translation table level
164 typedef struct {
165 UINTN MinT0SZ;
166 UINTN MaxT0SZ;
167 UINTN LargestT0SZ; // Generally (MaxT0SZ == LargestT0SZ) but at the Level3 Table
168 // the MaxT0SZ is not at the boundary of the table
169 } T0SZ_DESCRIPTION_PER_LEVEL;
170
171 // Map table for the corresponding Level of Table
172 STATIC CONST T0SZ_DESCRIPTION_PER_LEVEL T0SZPerTableLevel[] = {
173 { 16, 24, 24 }, // Table Level 0
174 { 25, 33, 33 }, // Table Level 1
175 { 34, 39, 42 } // Table Level 2
176 };
177
178 VOID
179 GetRootTranslationTableInfo (
180 IN UINTN T0SZ,
181 OUT UINTN *TableLevel,
182 OUT UINTN *TableEntryCount
183 )
184 {
185 UINTN Index;
186
187 // Identify the level of the root table from the given T0SZ
188 for (Index = 0; Index < sizeof (T0SZPerTableLevel) / sizeof (T0SZ_DESCRIPTION_PER_LEVEL); Index++) {
189 if (T0SZ <= T0SZPerTableLevel[Index].MaxT0SZ) {
190 break;
191 }
192 }
193
194 // If we have not found the corresponding maximum T0SZ then we use the last one
195 if (Index == sizeof (T0SZPerTableLevel) / sizeof (T0SZ_DESCRIPTION_PER_LEVEL)) {
196 Index--;
197 }
198
199 // Get the level of the root table
200 if (TableLevel) {
201 *TableLevel = Index;
202 }
203
204 // The Size of the Table is 2^(T0SZ-LargestT0SZ)
205 if (TableEntryCount) {
206 *TableEntryCount = 1 << (T0SZPerTableLevel[Index].LargestT0SZ - T0SZ + 1);
207 }
208 }
209
210 STATIC
211 VOID
212 LookupAddresstoRootTable (
213 IN UINT64 MaxAddress,
214 OUT UINTN *T0SZ,
215 OUT UINTN *TableEntryCount
216 )
217 {
218 UINTN TopBit;
219
220 // Check the parameters are not NULL
221 ASSERT ((T0SZ != NULL) && (TableEntryCount != NULL));
222
223 // Look for the highest bit set in MaxAddress
224 for (TopBit = 63; TopBit != 0; TopBit--) {
225 if ((1ULL << TopBit) & MaxAddress) {
226 // MaxAddress top bit is found
227 TopBit = TopBit + 1;
228 break;
229 }
230 }
231 ASSERT (TopBit != 0);
232
233 // Calculate T0SZ from the top bit of the MaxAddress
234 *T0SZ = 64 - TopBit;
235
236 // Get the Table info from T0SZ
237 GetRootTranslationTableInfo (*T0SZ, NULL, TableEntryCount);
238 }
239
240 STATIC
241 UINT64*
242 GetBlockEntryListFromAddress (
243 IN UINT64 *RootTable,
244 IN UINT64 RegionStart,
245 OUT UINTN *TableLevel,
246 IN OUT UINT64 *BlockEntrySize,
247 IN OUT UINT64 **LastBlockEntry
248 )
249 {
250 UINTN RootTableLevel;
251 UINTN RootTableEntryCount;
252 UINT64 *TranslationTable;
253 UINT64 *BlockEntry;
254 UINT64 *SubTableBlockEntry;
255 UINT64 BlockEntryAddress;
256 UINTN BaseAddressAlignment;
257 UINTN PageLevel;
258 UINTN Index;
259 UINTN IndexLevel;
260 UINTN T0SZ;
261 UINT64 Attributes;
262 UINT64 TableAttributes;
263
264 // Initialize variable
265 BlockEntry = NULL;
266
267 // Ensure the parameters are valid
268 if (!(TableLevel && BlockEntrySize && LastBlockEntry)) {
269 ASSERT_EFI_ERROR (EFI_INVALID_PARAMETER);
270 return NULL;
271 }
272
273 // Ensure the Region is aligned on 4KB boundary
274 if ((RegionStart & (SIZE_4KB - 1)) != 0) {
275 ASSERT_EFI_ERROR (EFI_INVALID_PARAMETER);
276 return NULL;
277 }
278
279 // Ensure the required size is aligned on 4KB boundary
280 if ((*BlockEntrySize & (SIZE_4KB - 1)) != 0) {
281 ASSERT_EFI_ERROR (EFI_INVALID_PARAMETER);
282 return NULL;
283 }
284
285 //
286 // Calculate LastBlockEntry from T0SZ - this is the last block entry of the root Translation table
287 //
288 T0SZ = ArmGetTCR () & TCR_T0SZ_MASK;
289 // Get the Table info from T0SZ
290 GetRootTranslationTableInfo (T0SZ, &RootTableLevel, &RootTableEntryCount);
291 // The last block of the root table depends on the number of entry in this table
292 *LastBlockEntry = TT_LAST_BLOCK_ADDRESS(RootTable, RootTableEntryCount);
293
294 // If the start address is 0x0 then we use the size of the region to identify the alignment
295 if (RegionStart == 0) {
296 // Identify the highest possible alignment for the Region Size
297 for (BaseAddressAlignment = 0; BaseAddressAlignment < 64; BaseAddressAlignment++) {
298 if ((1 << BaseAddressAlignment) & *BlockEntrySize) {
299 break;
300 }
301 }
302 } else {
303 // Identify the highest possible alignment for the Base Address
304 for (BaseAddressAlignment = 0; BaseAddressAlignment < 64; BaseAddressAlignment++) {
305 if ((1 << BaseAddressAlignment) & RegionStart) {
306 break;
307 }
308 }
309 }
310
311 // Identify the Page Level the RegionStart must belongs to
312 PageLevel = 3 - ((BaseAddressAlignment - 12) / 9);
313
314 // If the required size is smaller than the current block size then we need to go to the page below.
315 // The PageLevel was calculated on the Base Address alignment but did not take in account the alignment
316 // of the allocation size
317 if (*BlockEntrySize < TT_BLOCK_ENTRY_SIZE_AT_LEVEL (PageLevel)) {
318 // It does not fit so we need to go a page level above
319 PageLevel++;
320 }
321
322 // Expose the found PageLevel to the caller
323 *TableLevel = PageLevel;
324
325 // Now, we have the Table Level we can get the Block Size associated to this table
326 *BlockEntrySize = TT_BLOCK_ENTRY_SIZE_AT_LEVEL (PageLevel);
327
328 //
329 // Get the Table Descriptor for the corresponding PageLevel. We need to decompose RegionStart to get appropriate entries
330 //
331
332 TranslationTable = RootTable;
333 for (IndexLevel = RootTableLevel; IndexLevel <= PageLevel; IndexLevel++) {
334 BlockEntry = (UINT64*)TT_GET_ENTRY_FOR_ADDRESS (TranslationTable, IndexLevel, RegionStart);
335
336 if ((IndexLevel != 3) && ((*BlockEntry & TT_TYPE_MASK) == TT_TYPE_TABLE_ENTRY)) {
337 // Go to the next table
338 TranslationTable = (UINT64*)(*BlockEntry & TT_ADDRESS_MASK_DESCRIPTION_TABLE);
339
340 // If we are at the last level then update the output
341 if (IndexLevel == PageLevel) {
342 // And get the appropriate BlockEntry at the next level
343 BlockEntry = (UINT64*)TT_GET_ENTRY_FOR_ADDRESS (TranslationTable, IndexLevel + 1, RegionStart);
344
345 // Set the last block for this new table
346 *LastBlockEntry = TT_LAST_BLOCK_ADDRESS(TranslationTable, TT_ENTRY_COUNT);
347 }
348 } else if ((*BlockEntry & TT_TYPE_MASK) == TT_TYPE_BLOCK_ENTRY) {
349 // If we are not at the last level then we need to split this BlockEntry
350 if (IndexLevel != PageLevel) {
351 // Retrieve the attributes from the block entry
352 Attributes = *BlockEntry & TT_ATTRIBUTES_MASK;
353
354 // Convert the block entry attributes into Table descriptor attributes
355 TableAttributes = TT_TABLE_AP_NO_PERMISSION;
356 if (Attributes & TT_PXN_MASK) {
357 TableAttributes = TT_TABLE_PXN;
358 }
359 if (Attributes & TT_UXN_MASK) {
360 TableAttributes = TT_TABLE_XN;
361 }
362 if (Attributes & TT_NS) {
363 TableAttributes = TT_TABLE_NS;
364 }
365
366 // Get the address corresponding at this entry
367 BlockEntryAddress = RegionStart;
368 BlockEntryAddress = BlockEntryAddress >> TT_ADDRESS_OFFSET_AT_LEVEL(IndexLevel);
369 // Shift back to right to set zero before the effective address
370 BlockEntryAddress = BlockEntryAddress << TT_ADDRESS_OFFSET_AT_LEVEL(IndexLevel);
371
372 // Set the correct entry type for the next page level
373 if ((IndexLevel + 1) == 3) {
374 Attributes |= TT_TYPE_BLOCK_ENTRY_LEVEL3;
375 } else {
376 Attributes |= TT_TYPE_BLOCK_ENTRY;
377 }
378
379 // Create a new translation table
380 TranslationTable = (UINT64*)AllocatePages (EFI_SIZE_TO_PAGES((TT_ENTRY_COUNT * sizeof(UINT64)) + TT_ALIGNMENT_DESCRIPTION_TABLE));
381 if (TranslationTable == NULL) {
382 return NULL;
383 }
384 TranslationTable = (UINT64*)((UINTN)TranslationTable & TT_ADDRESS_MASK_DESCRIPTION_TABLE);
385
386 // Populate the newly created lower level table
387 SubTableBlockEntry = TranslationTable;
388 for (Index = 0; Index < TT_ENTRY_COUNT; Index++) {
389 *SubTableBlockEntry = Attributes | (BlockEntryAddress + (Index << TT_ADDRESS_OFFSET_AT_LEVEL(IndexLevel + 1)));
390 SubTableBlockEntry++;
391 }
392
393 // Fill the BlockEntry with the new TranslationTable
394 *BlockEntry = ((UINTN)TranslationTable & TT_ADDRESS_MASK_DESCRIPTION_TABLE) | TableAttributes | TT_TYPE_TABLE_ENTRY;
395 // Update the last block entry with the newly created translation table
396 *LastBlockEntry = TT_LAST_BLOCK_ADDRESS(TranslationTable, TT_ENTRY_COUNT);
397
398 // Block Entry points at the beginning of the Translation Table
399 BlockEntry = TranslationTable;
400 }
401 } else {
402 if (IndexLevel != PageLevel) {
403 //
404 // Case when we have an Invalid Entry and we are at a page level above of the one targetted.
405 //
406
407 // Create a new translation table
408 TranslationTable = (UINT64*)AllocatePages (EFI_SIZE_TO_PAGES((TT_ENTRY_COUNT * sizeof(UINT64)) + TT_ALIGNMENT_DESCRIPTION_TABLE));
409 if (TranslationTable == NULL) {
410 return NULL;
411 }
412 TranslationTable = (UINT64*)((UINTN)TranslationTable & TT_ADDRESS_MASK_DESCRIPTION_TABLE);
413
414 ZeroMem (TranslationTable, TT_ENTRY_COUNT * sizeof(UINT64));
415
416 // Fill the new BlockEntry with the TranslationTable
417 *BlockEntry = ((UINTN)TranslationTable & TT_ADDRESS_MASK_DESCRIPTION_TABLE) | TT_TYPE_TABLE_ENTRY;
418 // Update the last block entry with the newly created translation table
419 *LastBlockEntry = TT_LAST_BLOCK_ADDRESS(TranslationTable, TT_ENTRY_COUNT);
420 } else {
421 //
422 // Case when the new region is part of an existing page table
423 //
424 *LastBlockEntry = TT_LAST_BLOCK_ADDRESS(TranslationTable, TT_ENTRY_COUNT);
425 }
426 }
427 }
428
429 return BlockEntry;
430 }
431
432 STATIC
433 RETURN_STATUS
434 FillTranslationTable (
435 IN UINT64 *RootTable,
436 IN ARM_MEMORY_REGION_DESCRIPTOR *MemoryRegion
437 )
438 {
439 UINT64 Attributes;
440 UINT32 Type;
441 UINT64 RegionStart;
442 UINT64 RemainingRegionLength;
443 UINT64 *BlockEntry;
444 UINT64 *LastBlockEntry;
445 UINT64 BlockEntrySize;
446 UINTN TableLevel;
447
448 // Ensure the Length is aligned on 4KB boundary
449 if ((MemoryRegion->Length == 0) || ((MemoryRegion->Length & (SIZE_4KB - 1)) != 0)) {
450 ASSERT_EFI_ERROR (EFI_INVALID_PARAMETER);
451 return RETURN_INVALID_PARAMETER;
452 }
453
454 // Variable initialization
455 Attributes = ArmMemoryAttributeToPageAttribute (MemoryRegion->Attributes) | TT_AF;
456 RemainingRegionLength = MemoryRegion->Length;
457 RegionStart = MemoryRegion->VirtualBase;
458
459 do {
460 // Get the first Block Entry that matches the Virtual Address and also the information on the Table Descriptor
461 // such as the the size of the Block Entry and the address of the last BlockEntry of the Table Descriptor
462 BlockEntrySize = RemainingRegionLength;
463 BlockEntry = GetBlockEntryListFromAddress (RootTable, RegionStart, &TableLevel, &BlockEntrySize, &LastBlockEntry);
464 if (BlockEntry == NULL) {
465 // GetBlockEntryListFromAddress() return NULL when it fails to allocate new pages from the Translation Tables
466 return RETURN_OUT_OF_RESOURCES;
467 }
468
469 if (TableLevel != 3) {
470 Type = TT_TYPE_BLOCK_ENTRY;
471 } else {
472 Type = TT_TYPE_BLOCK_ENTRY_LEVEL3;
473 }
474
475 do {
476 // Fill the Block Entry with attribute and output block address
477 *BlockEntry = (RegionStart & TT_ADDRESS_MASK_BLOCK_ENTRY) | Attributes | Type;
478
479 // Go to the next BlockEntry
480 RegionStart += BlockEntrySize;
481 RemainingRegionLength -= BlockEntrySize;
482 BlockEntry++;
483 } while ((RemainingRegionLength >= BlockEntrySize) && (BlockEntry <= LastBlockEntry));
484 } while (RemainingRegionLength != 0);
485
486 return RETURN_SUCCESS;
487 }
488
489 RETURN_STATUS
490 SetMemoryAttributes (
491 IN EFI_PHYSICAL_ADDRESS BaseAddress,
492 IN UINT64 Length,
493 IN UINT64 Attributes,
494 IN EFI_PHYSICAL_ADDRESS VirtualMask
495 )
496 {
497 RETURN_STATUS Status;
498 ARM_MEMORY_REGION_DESCRIPTOR MemoryRegion;
499 UINT64 *TranslationTable;
500
501 MemoryRegion.PhysicalBase = BaseAddress;
502 MemoryRegion.VirtualBase = BaseAddress;
503 MemoryRegion.Length = Length;
504 MemoryRegion.Attributes = GcdAttributeToArmAttribute (Attributes);
505
506 TranslationTable = ArmGetTTBR0BaseAddress ();
507
508 Status = FillTranslationTable (TranslationTable, &MemoryRegion);
509 if (RETURN_ERROR (Status)) {
510 return Status;
511 }
512
513 // Flush d-cache so descriptors make it back to uncached memory for subsequent table walks
514 // flush and invalidate pages
515 ArmCleanInvalidateDataCache ();
516
517 ArmInvalidateInstructionCache ();
518
519 // Invalidate all TLB entries so changes are synced
520 ArmInvalidateTlb ();
521
522 return RETURN_SUCCESS;
523 }
524
525 RETURN_STATUS
526 EFIAPI
527 ArmConfigureMmu (
528 IN ARM_MEMORY_REGION_DESCRIPTOR *MemoryTable,
529 OUT VOID **TranslationTableBase OPTIONAL,
530 OUT UINTN *TranslationTableSize OPTIONAL
531 )
532 {
533 VOID* TranslationTable;
534 UINTN TranslationTablePageCount;
535 UINT32 TranslationTableAttribute;
536 ARM_MEMORY_REGION_DESCRIPTOR *MemoryTableEntry;
537 UINT64 MaxAddress;
538 UINT64 TopAddress;
539 UINTN T0SZ;
540 UINTN RootTableEntryCount;
541 UINT64 TCR;
542 RETURN_STATUS Status;
543
544 if(MemoryTable == NULL) {
545 ASSERT (MemoryTable != NULL);
546 return RETURN_INVALID_PARAMETER;
547 }
548
549 // Identify the highest address of the memory table
550 MaxAddress = MemoryTable->PhysicalBase + MemoryTable->Length - 1;
551 MemoryTableEntry = MemoryTable;
552 while (MemoryTableEntry->Length != 0) {
553 TopAddress = MemoryTableEntry->PhysicalBase + MemoryTableEntry->Length - 1;
554 if (TopAddress > MaxAddress) {
555 MaxAddress = TopAddress;
556 }
557 MemoryTableEntry++;
558 }
559
560 // Lookup the Table Level to get the information
561 LookupAddresstoRootTable (MaxAddress, &T0SZ, &RootTableEntryCount);
562
563 //
564 // Set TCR that allows us to retrieve T0SZ in the subsequent functions
565 //
566 // Ideally we will be running at EL2, but should support EL1 as well.
567 // UEFI should not run at EL3.
568 if (ArmReadCurrentEL () == AARCH64_EL2) {
569 //Note: Bits 23 and 31 are reserved(RES1) bits in TCR_EL2
570 TCR = T0SZ | (1UL << 31) | (1UL << 23) | TCR_TG0_4KB;
571
572 // Set the Physical Address Size using MaxAddress
573 if (MaxAddress < SIZE_4GB) {
574 TCR |= TCR_PS_4GB;
575 } else if (MaxAddress < SIZE_64GB) {
576 TCR |= TCR_PS_64GB;
577 } else if (MaxAddress < SIZE_1TB) {
578 TCR |= TCR_PS_1TB;
579 } else if (MaxAddress < SIZE_4TB) {
580 TCR |= TCR_PS_4TB;
581 } else if (MaxAddress < SIZE_16TB) {
582 TCR |= TCR_PS_16TB;
583 } else if (MaxAddress < SIZE_256TB) {
584 TCR |= TCR_PS_256TB;
585 } else {
586 DEBUG ((EFI_D_ERROR, "ArmConfigureMmu: The MaxAddress 0x%lX is not supported by this MMU configuration.\n", MaxAddress));
587 ASSERT (0); // Bigger than 48-bit memory space are not supported
588 return RETURN_UNSUPPORTED;
589 }
590 } else if (ArmReadCurrentEL () == AARCH64_EL1) {
591 TCR = T0SZ | TCR_TG0_4KB;
592
593 // Set the Physical Address Size using MaxAddress
594 if (MaxAddress < SIZE_4GB) {
595 TCR |= TCR_IPS_4GB;
596 } else if (MaxAddress < SIZE_64GB) {
597 TCR |= TCR_IPS_64GB;
598 } else if (MaxAddress < SIZE_1TB) {
599 TCR |= TCR_IPS_1TB;
600 } else if (MaxAddress < SIZE_4TB) {
601 TCR |= TCR_IPS_4TB;
602 } else if (MaxAddress < SIZE_16TB) {
603 TCR |= TCR_IPS_16TB;
604 } else if (MaxAddress < SIZE_256TB) {
605 TCR |= TCR_IPS_256TB;
606 } else {
607 DEBUG ((EFI_D_ERROR, "ArmConfigureMmu: The MaxAddress 0x%lX is not supported by this MMU configuration.\n", MaxAddress));
608 ASSERT (0); // Bigger than 48-bit memory space are not supported
609 return RETURN_UNSUPPORTED;
610 }
611 } else {
612 ASSERT (0); // UEFI is only expected to run at EL2 and EL1, not EL3.
613 return RETURN_UNSUPPORTED;
614 }
615
616 // Set TCR
617 ArmSetTCR (TCR);
618
619 // Allocate pages for translation table
620 TranslationTablePageCount = EFI_SIZE_TO_PAGES((RootTableEntryCount * sizeof(UINT64)) + TT_ALIGNMENT_DESCRIPTION_TABLE);
621 TranslationTable = AllocatePages (TranslationTablePageCount);
622 if (TranslationTable == NULL) {
623 return RETURN_OUT_OF_RESOURCES;
624 }
625 TranslationTable = (VOID*)((UINTN)TranslationTable & TT_ADDRESS_MASK_DESCRIPTION_TABLE);
626 // We set TTBR0 just after allocating the table to retrieve its location from the subsequent
627 // functions without needing to pass this value across the functions. The MMU is only enabled
628 // after the translation tables are populated.
629 ArmSetTTBR0 (TranslationTable);
630
631 if (TranslationTableBase != NULL) {
632 *TranslationTableBase = TranslationTable;
633 }
634
635 if (TranslationTableSize != NULL) {
636 *TranslationTableSize = RootTableEntryCount * sizeof(UINT64);
637 }
638
639 ZeroMem (TranslationTable, RootTableEntryCount * sizeof(UINT64));
640
641 // Disable MMU and caches. ArmDisableMmu() also invalidates the TLBs
642 ArmDisableMmu ();
643 ArmDisableDataCache ();
644 ArmDisableInstructionCache ();
645
646 // Make sure nothing sneaked into the cache
647 ArmCleanInvalidateDataCache ();
648 ArmInvalidateInstructionCache ();
649
650 TranslationTableAttribute = TT_ATTR_INDX_INVALID;
651 while (MemoryTable->Length != 0) {
652 // Find the memory attribute for the Translation Table
653 if (((UINTN)TranslationTable >= MemoryTable->PhysicalBase) &&
654 ((UINTN)TranslationTable <= MemoryTable->PhysicalBase - 1 + MemoryTable->Length)) {
655 TranslationTableAttribute = MemoryTable->Attributes;
656 }
657
658 Status = FillTranslationTable (TranslationTable, MemoryTable);
659 if (RETURN_ERROR (Status)) {
660 goto FREE_TRANSLATION_TABLE;
661 }
662 MemoryTable++;
663 }
664
665 // Translate the Memory Attributes into Translation Table Register Attributes
666 if ((TranslationTableAttribute == ARM_MEMORY_REGION_ATTRIBUTE_UNCACHED_UNBUFFERED) ||
667 (TranslationTableAttribute == ARM_MEMORY_REGION_ATTRIBUTE_NONSECURE_UNCACHED_UNBUFFERED)) {
668 TCR |= TCR_SH_NON_SHAREABLE | TCR_RGN_OUTER_NON_CACHEABLE | TCR_RGN_INNER_NON_CACHEABLE;
669 } else if ((TranslationTableAttribute == ARM_MEMORY_REGION_ATTRIBUTE_WRITE_BACK) ||
670 (TranslationTableAttribute == ARM_MEMORY_REGION_ATTRIBUTE_NONSECURE_WRITE_BACK)) {
671 TCR |= TCR_SH_INNER_SHAREABLE | TCR_RGN_OUTER_WRITE_BACK_ALLOC | TCR_RGN_INNER_WRITE_BACK_ALLOC;
672 } else if ((TranslationTableAttribute == ARM_MEMORY_REGION_ATTRIBUTE_WRITE_THROUGH) ||
673 (TranslationTableAttribute == ARM_MEMORY_REGION_ATTRIBUTE_NONSECURE_WRITE_THROUGH)) {
674 TCR |= TCR_SH_NON_SHAREABLE | TCR_RGN_OUTER_WRITE_THROUGH | TCR_RGN_INNER_WRITE_THROUGH;
675 } else {
676 // If we failed to find a mapping that contains the root translation table then it probably means the translation table
677 // is not mapped in the given memory map.
678 ASSERT (0);
679 Status = RETURN_UNSUPPORTED;
680 goto FREE_TRANSLATION_TABLE;
681 }
682
683 // Set again TCR after getting the Translation Table attributes
684 ArmSetTCR (TCR);
685
686 ArmSetMAIR (MAIR_ATTR(TT_ATTR_INDX_DEVICE_MEMORY, MAIR_ATTR_DEVICE_MEMORY) | // mapped to EFI_MEMORY_UC
687 MAIR_ATTR(TT_ATTR_INDX_MEMORY_NON_CACHEABLE, MAIR_ATTR_NORMAL_MEMORY_NON_CACHEABLE) | // mapped to EFI_MEMORY_WC
688 MAIR_ATTR(TT_ATTR_INDX_MEMORY_WRITE_THROUGH, MAIR_ATTR_NORMAL_MEMORY_WRITE_THROUGH) | // mapped to EFI_MEMORY_WT
689 MAIR_ATTR(TT_ATTR_INDX_MEMORY_WRITE_BACK, MAIR_ATTR_NORMAL_MEMORY_WRITE_BACK)); // mapped to EFI_MEMORY_WB
690
691 ArmDisableAlignmentCheck ();
692 ArmEnableInstructionCache ();
693 ArmEnableDataCache ();
694
695 ArmEnableMmu ();
696 return RETURN_SUCCESS;
697
698 FREE_TRANSLATION_TABLE:
699 FreePages (TranslationTable, TranslationTablePageCount);
700 return Status;
701 }