]> git.proxmox.com Git - mirror_edk2.git/blob - ArmPkg/Library/ArmLib/AArch64/AArch64Mmu.c
ArmPkg: Added Aarch64 support
[mirror_edk2.git] / ArmPkg / Library / ArmLib / AArch64 / AArch64Mmu.c
1 /** @file
2 * File managing the MMU for ARMv8 architecture
3 *
4 * Copyright (c) 2011-2013, ARM Limited. All rights reserved.
5 *
6 * This program and the accompanying materials
7 * are licensed and made available under the terms and conditions of the BSD License
8 * which accompanies this distribution. The full text of the license may be found at
9 * http://opensource.org/licenses/bsd-license.php
10 *
11 * THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED.
13 *
14 **/
15
16 #include <Uefi.h>
17 #include <Chipset/AArch64.h>
18 #include <Library/BaseMemoryLib.h>
19 #include <Library/MemoryAllocationLib.h>
20 #include <Library/ArmLib.h>
21 #include <Library/BaseLib.h>
22 #include <Library/DebugLib.h>
23 #include "AArch64Lib.h"
24 #include "ArmLibPrivate.h"
25
26 // We use this index definition to define an invalid block entry
27 #define TT_ATTR_INDX_INVALID ((UINT32)~0)
28
29 STATIC
30 UINT64
31 ArmMemoryAttributeToPageAttribute (
32 IN ARM_MEMORY_REGION_ATTRIBUTES Attributes
33 )
34 {
35 switch (Attributes) {
36 case ARM_MEMORY_REGION_ATTRIBUTE_WRITE_BACK:
37 return TT_ATTR_INDX_MEMORY_WRITE_BACK;
38 case ARM_MEMORY_REGION_ATTRIBUTE_WRITE_THROUGH:
39 return TT_ATTR_INDX_MEMORY_WRITE_THROUGH;
40 case ARM_MEMORY_REGION_ATTRIBUTE_DEVICE:
41 return TT_ATTR_INDX_DEVICE_MEMORY;
42 case ARM_MEMORY_REGION_ATTRIBUTE_UNCACHED_UNBUFFERED:
43 return TT_ATTR_INDX_MEMORY_NON_CACHEABLE;
44 case ARM_MEMORY_REGION_ATTRIBUTE_NONSECURE_WRITE_BACK:
45 return TT_ATTR_INDX_MEMORY_WRITE_BACK;
46 case ARM_MEMORY_REGION_ATTRIBUTE_NONSECURE_WRITE_THROUGH:
47 return TT_ATTR_INDX_MEMORY_WRITE_THROUGH;
48 case ARM_MEMORY_REGION_ATTRIBUTE_NONSECURE_DEVICE:
49 return TT_ATTR_INDX_DEVICE_MEMORY;
50 case ARM_MEMORY_REGION_ATTRIBUTE_NONSECURE_UNCACHED_UNBUFFERED:
51 return TT_ATTR_INDX_MEMORY_NON_CACHEABLE;
52 default:
53 ASSERT(0);
54 return TT_ATTR_INDX_DEVICE_MEMORY;
55 }
56 }
57
58 UINT64
59 PageAttributeToGcdAttribute (
60 IN UINT64 PageAttributes
61 )
62 {
63 UINT64 GcdAttributes;
64
65 switch (PageAttributes & TT_ATTR_INDX_MASK) {
66 case TT_ATTR_INDX_DEVICE_MEMORY:
67 GcdAttributes = EFI_MEMORY_UC;
68 break;
69 case TT_ATTR_INDX_MEMORY_NON_CACHEABLE:
70 GcdAttributes = EFI_MEMORY_WC;
71 break;
72 case TT_ATTR_INDX_MEMORY_WRITE_THROUGH:
73 GcdAttributes = EFI_MEMORY_WT;
74 break;
75 case TT_ATTR_INDX_MEMORY_WRITE_BACK:
76 GcdAttributes = EFI_MEMORY_WB;
77 break;
78 default:
79 DEBUG ((EFI_D_ERROR, "PageAttributeToGcdAttribute: PageAttributes:0x%lX not supported.\n", PageAttributes));
80 ASSERT (0);
81 // The Global Coherency Domain (GCD) value is defined as a bit set.
82 // Returning 0 means no attribute has been set.
83 GcdAttributes = 0;
84 }
85
86 // Determine protection attributes
87 if (((PageAttributes & TT_AP_MASK) == TT_AP_NO_RO) || ((PageAttributes & TT_AP_MASK) == TT_AP_RO_RO)) {
88 // Read only cases map to write-protect
89 GcdAttributes |= EFI_MEMORY_WP;
90 }
91
92 // Process eXecute Never attribute
93 if ((PageAttributes & (TT_PXN_MASK | TT_UXN_MASK)) != 0 ) {
94 GcdAttributes |= EFI_MEMORY_XP;
95 }
96
97 return GcdAttributes;
98 }
99
100 UINT64
101 GcdAttributeToPageAttribute (
102 IN UINT64 GcdAttributes
103 )
104 {
105 UINT64 PageAttributes;
106
107 switch (GcdAttributes & 0xFF) {
108 case EFI_MEMORY_UC:
109 PageAttributes = TT_ATTR_INDX_DEVICE_MEMORY;
110 break;
111 case EFI_MEMORY_WC:
112 PageAttributes = TT_ATTR_INDX_MEMORY_NON_CACHEABLE;
113 break;
114 case EFI_MEMORY_WT:
115 PageAttributes = TT_ATTR_INDX_MEMORY_WRITE_THROUGH;
116 break;
117 case EFI_MEMORY_WB:
118 PageAttributes = TT_ATTR_INDX_MEMORY_WRITE_BACK;
119 break;
120 default:
121 DEBUG ((EFI_D_ERROR, "GcdAttributeToPageAttribute: 0x%X attributes is not supported.\n", GcdAttributes));
122 ASSERT (0);
123 // If no match has been found then we mark the memory as device memory.
124 // The only side effect of using device memory should be a slow down in the performance.
125 PageAttributes = TT_ATTR_INDX_DEVICE_MEMORY;
126 }
127
128 // Determine protection attributes
129 if (GcdAttributes & EFI_MEMORY_WP) {
130 // Read only cases map to write-protect
131 PageAttributes |= TT_AP_RO_RO;
132 }
133
134 // Process eXecute Never attribute
135 if (GcdAttributes & EFI_MEMORY_XP) {
136 PageAttributes |= (TT_PXN_MASK | TT_UXN_MASK);
137 }
138
139 return PageAttributes;
140 }
141
142 ARM_MEMORY_REGION_ATTRIBUTES
143 GcdAttributeToArmAttribute (
144 IN UINT64 GcdAttributes
145 )
146 {
147 switch (GcdAttributes & 0xFF) {
148 case EFI_MEMORY_UC:
149 return ARM_MEMORY_REGION_ATTRIBUTE_DEVICE;
150 case EFI_MEMORY_WC:
151 return ARM_MEMORY_REGION_ATTRIBUTE_UNCACHED_UNBUFFERED;
152 case EFI_MEMORY_WT:
153 return ARM_MEMORY_REGION_ATTRIBUTE_WRITE_THROUGH;
154 case EFI_MEMORY_WB:
155 return ARM_MEMORY_REGION_ATTRIBUTE_WRITE_BACK;
156 default:
157 DEBUG ((EFI_D_ERROR, "GcdAttributeToArmAttribute: 0x%lX attributes is not supported.\n", GcdAttributes));
158 ASSERT (0);
159 return ARM_MEMORY_REGION_ATTRIBUTE_DEVICE;
160 }
161 }
162
163 // Describe the T0SZ values for each translation table level
164 typedef struct {
165 UINTN MinT0SZ;
166 UINTN MaxT0SZ;
167 UINTN LargestT0SZ; // Generally (MaxT0SZ == LargestT0SZ) but at the Level3 Table
168 // the MaxT0SZ is not at the boundary of the table
169 } T0SZ_DESCRIPTION_PER_LEVEL;
170
171 // Map table for the corresponding Level of Table
172 STATIC CONST T0SZ_DESCRIPTION_PER_LEVEL T0SZPerTableLevel[] = {
173 { 16, 24, 24 }, // Table Level 0
174 { 25, 33, 33 }, // Table Level 1
175 { 34, 39, 42 } // Table Level 2
176 };
177
178 VOID
179 GetRootTranslationTableInfo (
180 IN UINTN T0SZ,
181 OUT UINTN *TableLevel,
182 OUT UINTN *TableEntryCount
183 )
184 {
185 UINTN Index;
186
187 // Identify the level of the root table from the given T0SZ
188 for (Index = 0; Index < sizeof (T0SZPerTableLevel) / sizeof (T0SZ_DESCRIPTION_PER_LEVEL); Index++) {
189 if (T0SZ <= T0SZPerTableLevel[Index].MaxT0SZ) {
190 break;
191 }
192 }
193
194 // If we have not found the corresponding maximum T0SZ then we use the last one
195 if (Index == sizeof (T0SZPerTableLevel) / sizeof (T0SZ_DESCRIPTION_PER_LEVEL)) {
196 Index--;
197 }
198
199 // Get the level of the root table
200 if (TableLevel) {
201 *TableLevel = Index;
202 }
203
204 // The Size of the Table is 2^(T0SZ-LargestT0SZ)
205 if (TableEntryCount) {
206 *TableEntryCount = 1 << (T0SZPerTableLevel[Index].LargestT0SZ - T0SZ + 1);
207 }
208 }
209
210 STATIC
211 VOID
212 LookupAddresstoRootTable (
213 IN UINT64 MaxAddress,
214 OUT UINTN *T0SZ,
215 OUT UINTN *TableEntryCount
216 )
217 {
218 UINTN TopBit;
219
220 // Check the parameters are not NULL
221 ASSERT ((T0SZ != NULL) && (TableEntryCount != NULL));
222
223 // Look for the highest bit set in MaxAddress
224 for (TopBit = 63; TopBit != 0; TopBit--) {
225 if ((1ULL << TopBit) & MaxAddress) {
226 // MaxAddress top bit is found
227 TopBit = TopBit + 1;
228 break;
229 }
230 }
231 ASSERT (TopBit != 0);
232
233 // Calculate T0SZ from the top bit of the MaxAddress
234 *T0SZ = 64 - TopBit;
235
236 // Get the Table info from T0SZ
237 GetRootTranslationTableInfo (*T0SZ, NULL, TableEntryCount);
238 }
239
240 STATIC
241 UINT64*
242 GetBlockEntryListFromAddress (
243 IN UINT64 *RootTable,
244 IN UINT64 RegionStart,
245 OUT UINTN *TableLevel,
246 IN OUT UINT64 *BlockEntrySize,
247 IN OUT UINT64 **LastBlockEntry
248 )
249 {
250 UINTN RootTableLevel;
251 UINTN RootTableEntryCount;
252 UINT64 *TranslationTable;
253 UINT64 *BlockEntry;
254 UINT64 BlockEntryAddress;
255 UINTN BaseAddressAlignment;
256 UINTN PageLevel;
257 UINTN Index;
258 UINTN IndexLevel;
259 UINTN T0SZ;
260 UINT64 Attributes;
261 UINT64 TableAttributes;
262
263 // Initialize variable
264 BlockEntry = NULL;
265
266 // Ensure the parameters are valid
267 ASSERT (TableLevel && BlockEntrySize && LastBlockEntry);
268
269 // Ensure the Region is aligned on 4KB boundary
270 ASSERT ((RegionStart & (SIZE_4KB - 1)) == 0);
271
272 // Ensure the required size is aligned on 4KB boundary
273 ASSERT ((*BlockEntrySize & (SIZE_4KB - 1)) == 0);
274
275 //
276 // Calculate LastBlockEntry from T0SZ
277 //
278 T0SZ = ArmGetTCR () & TCR_T0SZ_MASK;
279 // Get the Table info from T0SZ
280 GetRootTranslationTableInfo (T0SZ, &RootTableLevel, &RootTableEntryCount);
281 // The last block of the root table depends on the number of entry in this table
282 *LastBlockEntry = (UINT64*)((UINTN)RootTable + (RootTableEntryCount * sizeof(UINT64)));
283
284 // If the start address is 0x0 then we use the size of the region to identify the alignment
285 if (RegionStart == 0) {
286 // Identify the highest possible alignment for the Region Size
287 for (BaseAddressAlignment = 0; BaseAddressAlignment < 64; BaseAddressAlignment++) {
288 if ((1 << BaseAddressAlignment) & *BlockEntrySize) {
289 break;
290 }
291 }
292 } else {
293 // Identify the highest possible alignment for the Base Address
294 for (BaseAddressAlignment = 0; BaseAddressAlignment < 64; BaseAddressAlignment++) {
295 if ((1 << BaseAddressAlignment) & RegionStart) {
296 break;
297 }
298 }
299 }
300
301 // Identify the Page Level the RegionStart must belongs to
302 PageLevel = 3 - ((BaseAddressAlignment - 12) / 9);
303
304 // If the required size is smaller than the current block size then we need to go to the page bellow.
305 if (*BlockEntrySize < TT_ADDRESS_AT_LEVEL(PageLevel)) {
306 // It does not fit so we need to go a page level above
307 PageLevel++;
308 }
309
310 // Expose the found PageLevel to the caller
311 *TableLevel = PageLevel;
312
313 // Now, we have the Table Level we can get the Block Size associated to this table
314 *BlockEntrySize = TT_ADDRESS_AT_LEVEL(PageLevel);
315
316 //
317 // Get the Table Descriptor for the corresponding PageLevel. We need to decompose RegionStart to get appropriate entries
318 //
319
320 TranslationTable = RootTable;
321 for (IndexLevel = RootTableLevel; IndexLevel <= PageLevel; IndexLevel++) {
322 BlockEntry = (UINT64*)TT_GET_ENTRY_FOR_ADDRESS (TranslationTable, IndexLevel, RegionStart);
323
324 if ((IndexLevel != 3) && ((*BlockEntry & TT_TYPE_MASK) == TT_TYPE_TABLE_ENTRY)) {
325 // Go to the next table
326 TranslationTable = (UINT64*)(*BlockEntry & TT_ADDRESS_MASK_DESCRIPTION_TABLE);
327
328 // If we are at the last level then update the output
329 if (IndexLevel == PageLevel) {
330 // And get the appropriate BlockEntry at the next level
331 BlockEntry = (UINT64*)TT_GET_ENTRY_FOR_ADDRESS (TranslationTable, IndexLevel + 1, RegionStart);
332
333 // Set the last block for this new table
334 *LastBlockEntry = (UINT64*)((UINTN)TranslationTable + (TT_ENTRY_COUNT * sizeof(UINT64)));
335 }
336 } else if ((*BlockEntry & TT_TYPE_MASK) == TT_TYPE_BLOCK_ENTRY) {
337 // If we are not at the last level then we need to split this BlockEntry
338 if (IndexLevel != PageLevel) {
339 // Retrieve the attributes from the block entry
340 Attributes = *BlockEntry & TT_ATTRIBUTES_MASK;
341
342 // Convert the block entry attributes into Table descriptor attributes
343 TableAttributes = TT_TABLE_AP_NO_PERMISSION;
344 if (Attributes & TT_PXN_MASK) {
345 TableAttributes = TT_TABLE_PXN;
346 }
347 if (Attributes & TT_UXN_MASK) {
348 TableAttributes = TT_TABLE_XN;
349 }
350 if (Attributes & TT_NS) {
351 TableAttributes = TT_TABLE_NS;
352 }
353
354 // Get the address corresponding at this entry
355 BlockEntryAddress = RegionStart;
356 BlockEntryAddress = BlockEntryAddress >> TT_ADDRESS_OFFSET_AT_LEVEL(IndexLevel);
357 // Shift back to right to set zero before the effective address
358 BlockEntryAddress = BlockEntryAddress << TT_ADDRESS_OFFSET_AT_LEVEL(IndexLevel);
359
360 // Set the correct entry type
361 if (IndexLevel + 1 == 3) {
362 Attributes |= TT_TYPE_BLOCK_ENTRY_LEVEL3;
363 } else {
364 Attributes |= TT_TYPE_BLOCK_ENTRY;
365 }
366
367 // Create a new translation table
368 TranslationTable = (UINT64*)AllocatePages (EFI_SIZE_TO_PAGES((TT_ENTRY_COUNT * sizeof(UINT64)) + TT_ALIGNMENT_DESCRIPTION_TABLE));
369 if (TranslationTable == NULL) {
370 return NULL;
371 }
372 TranslationTable = (UINT64*)((UINTN)TranslationTable & TT_ADDRESS_MASK_DESCRIPTION_TABLE);
373
374 // Fill the new BlockEntry with the TranslationTable
375 *BlockEntry = ((UINTN)TranslationTable & TT_ADDRESS_MASK_DESCRIPTION_TABLE) | TableAttributes | TT_TYPE_TABLE_ENTRY;
376
377 // Populate the newly created lower level table
378 BlockEntry = TranslationTable;
379 for (Index = 0; Index < TT_ENTRY_COUNT; Index++) {
380 *BlockEntry = Attributes | (BlockEntryAddress + (Index << TT_ADDRESS_OFFSET_AT_LEVEL(IndexLevel + 1)));
381 BlockEntry++;
382 }
383 // Block Entry points at the beginning of the Translation Table
384 BlockEntry = TranslationTable;
385 }
386 } else {
387 // Case of Invalid Entry and we are at a page level above of the one targetted.
388 if (IndexLevel != PageLevel) {
389 // Create a new translation table
390 TranslationTable = (UINT64*)AllocatePages (EFI_SIZE_TO_PAGES((TT_ENTRY_COUNT * sizeof(UINT64)) + TT_ALIGNMENT_DESCRIPTION_TABLE));
391 if (TranslationTable == NULL) {
392 return NULL;
393 }
394 TranslationTable = (UINT64*)((UINTN)TranslationTable & TT_ADDRESS_MASK_DESCRIPTION_TABLE);
395
396 ZeroMem (TranslationTable, TT_ENTRY_COUNT * sizeof(UINT64));
397
398 // Fill the new BlockEntry with the TranslationTable
399 *BlockEntry = ((UINTN)TranslationTable & TT_ADDRESS_MASK_DESCRIPTION_TABLE) | TT_TYPE_TABLE_ENTRY;
400 }
401 }
402 }
403
404 return BlockEntry;
405 }
406
407 STATIC
408 RETURN_STATUS
409 FillTranslationTable (
410 IN UINT64 *RootTable,
411 IN ARM_MEMORY_REGION_DESCRIPTOR *MemoryRegion
412 )
413 {
414 UINT64 Attributes;
415 UINT32 Type;
416 UINT64 RegionStart;
417 UINT64 RemainingRegionLength;
418 UINT64 *BlockEntry;
419 UINT64 *LastBlockEntry;
420 UINT64 BlockEntrySize;
421 UINTN TableLevel;
422
423 // Ensure the Length is aligned on 4KB boundary
424 ASSERT ((MemoryRegion->Length > 0) && ((MemoryRegion->Length & (SIZE_4KB - 1)) == 0));
425
426 // Variable initialization
427 Attributes = ArmMemoryAttributeToPageAttribute (MemoryRegion->Attributes) | TT_AF;
428 RemainingRegionLength = MemoryRegion->Length;
429 RegionStart = MemoryRegion->VirtualBase;
430
431 do {
432 // Get the first Block Entry that matches the Virtual Address and also the information on the Table Descriptor
433 // such as the the size of the Block Entry and the address of the last BlockEntry of the Table Descriptor
434 BlockEntrySize = RemainingRegionLength;
435 BlockEntry = GetBlockEntryListFromAddress (RootTable, RegionStart, &TableLevel, &BlockEntrySize, &LastBlockEntry);
436 if (BlockEntry == NULL) {
437 // GetBlockEntryListFromAddress() return NULL when it fails to allocate new pages from the Translation Tables
438 return RETURN_OUT_OF_RESOURCES;
439 }
440
441 if (TableLevel != 3) {
442 Type = TT_TYPE_BLOCK_ENTRY;
443 } else {
444 Type = TT_TYPE_BLOCK_ENTRY_LEVEL3;
445 }
446
447 do {
448 // Fill the Block Entry with attribute and output block address
449 *BlockEntry = (RegionStart & TT_ADDRESS_MASK_BLOCK_ENTRY) | Attributes | Type;
450
451 // Go to the next BlockEntry
452 RegionStart += BlockEntrySize;
453 RemainingRegionLength -= BlockEntrySize;
454 BlockEntry++;
455 } while ((RemainingRegionLength >= BlockEntrySize) && (BlockEntry <= LastBlockEntry));
456 } while (RemainingRegionLength != 0);
457
458 return RETURN_SUCCESS;
459 }
460
461 RETURN_STATUS
462 SetMemoryAttributes (
463 IN EFI_PHYSICAL_ADDRESS BaseAddress,
464 IN UINT64 Length,
465 IN UINT64 Attributes,
466 IN EFI_PHYSICAL_ADDRESS VirtualMask
467 )
468 {
469 RETURN_STATUS Status;
470 ARM_MEMORY_REGION_DESCRIPTOR MemoryRegion;
471 UINT64 *TranslationTable;
472
473 MemoryRegion.PhysicalBase = BaseAddress;
474 MemoryRegion.VirtualBase = BaseAddress;
475 MemoryRegion.Length = Length;
476 MemoryRegion.Attributes = GcdAttributeToArmAttribute (Attributes);
477
478 TranslationTable = ArmGetTTBR0BaseAddress ();
479
480 Status = FillTranslationTable (TranslationTable, &MemoryRegion);
481 if (RETURN_ERROR (Status)) {
482 return Status;
483 }
484
485 // Flush d-cache so descriptors make it back to uncached memory for subsequent table walks
486 // flush and invalidate pages
487 ArmCleanInvalidateDataCache ();
488
489 ArmInvalidateInstructionCache ();
490
491 // Invalidate all TLB entries so changes are synced
492 ArmInvalidateTlb ();
493
494 return RETURN_SUCCESS;
495 }
496
497 RETURN_STATUS
498 EFIAPI
499 ArmConfigureMmu (
500 IN ARM_MEMORY_REGION_DESCRIPTOR *MemoryTable,
501 OUT VOID **TranslationTableBase OPTIONAL,
502 OUT UINTN *TranslationTableSize OPTIONAL
503 )
504 {
505 VOID* TranslationTable;
506 UINTN TranslationTablePageCount;
507 UINT32 TranslationTableAttribute;
508 ARM_MEMORY_REGION_DESCRIPTOR *MemoryTableEntry;
509 UINT64 MaxAddress;
510 UINT64 TopAddress;
511 UINTN T0SZ;
512 UINTN RootTableEntryCount;
513 UINT64 TCR;
514 RETURN_STATUS Status;
515
516 ASSERT (MemoryTable != NULL);
517
518 // Identify the highest address of the memory table
519 MaxAddress = MemoryTable->PhysicalBase + MemoryTable->Length - 1;
520 MemoryTableEntry = MemoryTable;
521 while (MemoryTableEntry->Length != 0) {
522 TopAddress = MemoryTableEntry->PhysicalBase + MemoryTableEntry->Length - 1;
523 if (TopAddress > MaxAddress) {
524 MaxAddress = TopAddress;
525 }
526 MemoryTableEntry++;
527 }
528
529 // Lookup the Table Level to get the information
530 LookupAddresstoRootTable (MaxAddress, &T0SZ, &RootTableEntryCount);
531
532 //
533 // Set TCR that allows us to retrieve T0SZ in the subsequent functions
534 //
535 if ((ArmReadCurrentEL () == AARCH64_EL2) || (ArmReadCurrentEL () == AARCH64_EL3)) {
536 //Note: Bits 23 and 31 are reserved bits in TCR_EL2 and TCR_EL3
537 TCR = T0SZ | (1UL << 31) | (1UL << 23) | TCR_TG0_4KB;
538
539 // Set the Physical Address Size using MaxAddress
540 if (MaxAddress < SIZE_4GB) {
541 TCR |= TCR_PS_4GB;
542 } else if (MaxAddress < SIZE_64GB) {
543 TCR |= TCR_PS_64GB;
544 } else if (MaxAddress < SIZE_1TB) {
545 TCR |= TCR_PS_1TB;
546 } else if (MaxAddress < SIZE_4TB) {
547 TCR |= TCR_PS_4TB;
548 } else if (MaxAddress < SIZE_16TB) {
549 TCR |= TCR_PS_16TB;
550 } else if (MaxAddress < SIZE_256TB) {
551 TCR |= TCR_PS_256TB;
552 } else {
553 DEBUG ((EFI_D_ERROR, "ArmConfigureMmu: The MaxAddress 0x%lX is not supported by this MMU support.\n", MaxAddress));
554 ASSERT (0); // Bigger than 48-bit memory space are not supported
555 return RETURN_UNSUPPORTED;
556 }
557 } else {
558 ASSERT (0); // Bigger than 48-bit memory space are not supported
559 return RETURN_UNSUPPORTED;
560 }
561
562 // Set TCR
563 ArmSetTCR (TCR);
564
565 // Allocate pages for translation table
566 TranslationTablePageCount = EFI_SIZE_TO_PAGES((RootTableEntryCount * sizeof(UINT64)) + TT_ALIGNMENT_DESCRIPTION_TABLE);
567 TranslationTable = AllocatePages (TranslationTablePageCount);
568 if (TranslationTable == NULL) {
569 return RETURN_OUT_OF_RESOURCES;
570 }
571 TranslationTable = (VOID*)((UINTN)TranslationTable & TT_ADDRESS_MASK_DESCRIPTION_TABLE);
572 // We set TTBR0 just after allocating the table to retrieve its location from the subsequent
573 // functions without needing to pass this value across the functions. The MMU is only enabled
574 // after the translation tables are populated.
575 ArmSetTTBR0 (TranslationTable);
576
577 if (TranslationTableBase != NULL) {
578 *TranslationTableBase = TranslationTable;
579 }
580
581 if (TranslationTableSize != NULL) {
582 *TranslationTableSize = RootTableEntryCount * sizeof(UINT64);
583 }
584
585 ZeroMem (TranslationTable, RootTableEntryCount * sizeof(UINT64));
586
587 // Disable MMU and caches. ArmDisableMmu() also invalidates the TLBs
588 ArmDisableMmu ();
589 ArmDisableDataCache ();
590 ArmDisableInstructionCache ();
591
592 // Make sure nothing sneaked into the cache
593 ArmCleanInvalidateDataCache ();
594 ArmInvalidateInstructionCache ();
595
596 TranslationTableAttribute = TT_ATTR_INDX_INVALID;
597 while (MemoryTable->Length != 0) {
598 // Find the memory attribute for the Translation Table
599 if (((UINTN)TranslationTable >= MemoryTable->PhysicalBase) &&
600 ((UINTN)TranslationTable <= MemoryTable->PhysicalBase - 1 + MemoryTable->Length)) {
601 TranslationTableAttribute = MemoryTable->Attributes;
602 }
603
604 Status = FillTranslationTable (TranslationTable, MemoryTable);
605 if (RETURN_ERROR (Status)) {
606 goto FREE_TRANSLATION_TABLE;
607 }
608 MemoryTable++;
609 }
610
611 // Translate the Memory Attributes into Translation Table Register Attributes
612 if ((TranslationTableAttribute == ARM_MEMORY_REGION_ATTRIBUTE_UNCACHED_UNBUFFERED) ||
613 (TranslationTableAttribute == ARM_MEMORY_REGION_ATTRIBUTE_NONSECURE_UNCACHED_UNBUFFERED)) {
614 TCR |= TCR_SH_NON_SHAREABLE | TCR_RGN_OUTER_NON_CACHEABLE | TCR_RGN_INNER_NON_CACHEABLE;
615 } else if ((TranslationTableAttribute == ARM_MEMORY_REGION_ATTRIBUTE_WRITE_BACK) ||
616 (TranslationTableAttribute == ARM_MEMORY_REGION_ATTRIBUTE_NONSECURE_WRITE_BACK)) {
617 TCR |= TCR_SH_INNER_SHAREABLE | TCR_RGN_OUTER_WRITE_BACK_ALLOC | TCR_RGN_INNER_WRITE_BACK_ALLOC;
618 } else if ((TranslationTableAttribute == ARM_MEMORY_REGION_ATTRIBUTE_WRITE_THROUGH) ||
619 (TranslationTableAttribute == ARM_MEMORY_REGION_ATTRIBUTE_NONSECURE_WRITE_THROUGH)) {
620 TCR |= TCR_SH_NON_SHAREABLE | TCR_RGN_OUTER_WRITE_THROUGH | TCR_RGN_INNER_WRITE_THROUGH;
621 } else {
622 // If we failed to find a mapping that contains the root translation table then it probably means the translation table
623 // is not mapped in the given memory map.
624 ASSERT (0);
625 Status = RETURN_UNSUPPORTED;
626 goto FREE_TRANSLATION_TABLE;
627 }
628
629 ArmSetMAIR (MAIR_ATTR(TT_ATTR_INDX_DEVICE_MEMORY, MAIR_ATTR_DEVICE_MEMORY) | // mapped to EFI_MEMORY_UC
630 MAIR_ATTR(TT_ATTR_INDX_MEMORY_NON_CACHEABLE, MAIR_ATTR_NORMAL_MEMORY_NON_CACHEABLE) | // mapped to EFI_MEMORY_WC
631 MAIR_ATTR(TT_ATTR_INDX_MEMORY_WRITE_THROUGH, MAIR_ATTR_NORMAL_MEMORY_WRITE_THROUGH) | // mapped to EFI_MEMORY_WT
632 MAIR_ATTR(TT_ATTR_INDX_MEMORY_WRITE_BACK, MAIR_ATTR_NORMAL_MEMORY_WRITE_BACK)); // mapped to EFI_MEMORY_WB
633
634 ArmDisableAlignmentCheck ();
635 ArmEnableInstructionCache ();
636 ArmEnableDataCache ();
637
638 ArmEnableMmu ();
639 return RETURN_SUCCESS;
640
641 FREE_TRANSLATION_TABLE:
642 FreePages (TranslationTable, TranslationTablePageCount);
643 return Status;
644 }