]> git.proxmox.com Git - mirror_edk2.git/blob - ArmPkg/Library/ArmMmuLib/AArch64/ArmMmuLibCore.c
ArmPkg/ArmMmuLib: take MAX_ALLOC_ADDRESS into account
[mirror_edk2.git] / ArmPkg / Library / ArmMmuLib / AArch64 / ArmMmuLibCore.c
1 /** @file
2 * File managing the MMU for ARMv8 architecture
3 *
4 * Copyright (c) 2011-2014, ARM Limited. All rights reserved.
5 * Copyright (c) 2016, Linaro Limited. All rights reserved.
6 * Copyright (c) 2017, Intel Corporation. All rights reserved.<BR>
7 *
8 * This program and the accompanying materials
9 * are licensed and made available under the terms and conditions of the BSD License
10 * which accompanies this distribution. The full text of the license may be found at
11 * http://opensource.org/licenses/bsd-license.php
12 *
13 * THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS,
14 * WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED.
15 *
16 **/
17
18 #include <Uefi.h>
19 #include <Chipset/AArch64.h>
20 #include <Library/BaseMemoryLib.h>
21 #include <Library/CacheMaintenanceLib.h>
22 #include <Library/MemoryAllocationLib.h>
23 #include <Library/ArmLib.h>
24 #include <Library/ArmMmuLib.h>
25 #include <Library/BaseLib.h>
26 #include <Library/DebugLib.h>
27
28 // We use this index definition to define an invalid block entry
29 #define TT_ATTR_INDX_INVALID ((UINT32)~0)
30
31 STATIC
32 UINT64
33 ArmMemoryAttributeToPageAttribute (
34 IN ARM_MEMORY_REGION_ATTRIBUTES Attributes
35 )
36 {
37 switch (Attributes) {
38 case ARM_MEMORY_REGION_ATTRIBUTE_WRITE_BACK_NONSHAREABLE:
39 case ARM_MEMORY_REGION_ATTRIBUTE_NONSECURE_WRITE_BACK_NONSHAREABLE:
40 return TT_ATTR_INDX_MEMORY_WRITE_BACK;
41
42 case ARM_MEMORY_REGION_ATTRIBUTE_WRITE_BACK:
43 case ARM_MEMORY_REGION_ATTRIBUTE_NONSECURE_WRITE_BACK:
44 return TT_ATTR_INDX_MEMORY_WRITE_BACK | TT_SH_INNER_SHAREABLE;
45
46 case ARM_MEMORY_REGION_ATTRIBUTE_WRITE_THROUGH:
47 case ARM_MEMORY_REGION_ATTRIBUTE_NONSECURE_WRITE_THROUGH:
48 return TT_ATTR_INDX_MEMORY_WRITE_THROUGH | TT_SH_INNER_SHAREABLE;
49
50 // Uncached and device mappings are treated as outer shareable by default,
51 case ARM_MEMORY_REGION_ATTRIBUTE_UNCACHED_UNBUFFERED:
52 case ARM_MEMORY_REGION_ATTRIBUTE_NONSECURE_UNCACHED_UNBUFFERED:
53 return TT_ATTR_INDX_MEMORY_NON_CACHEABLE;
54
55 default:
56 ASSERT(0);
57 case ARM_MEMORY_REGION_ATTRIBUTE_DEVICE:
58 case ARM_MEMORY_REGION_ATTRIBUTE_NONSECURE_DEVICE:
59 if (ArmReadCurrentEL () == AARCH64_EL2)
60 return TT_ATTR_INDX_DEVICE_MEMORY | TT_XN_MASK;
61 else
62 return TT_ATTR_INDX_DEVICE_MEMORY | TT_UXN_MASK | TT_PXN_MASK;
63 }
64 }
65
66 UINT64
67 PageAttributeToGcdAttribute (
68 IN UINT64 PageAttributes
69 )
70 {
71 UINT64 GcdAttributes;
72
73 switch (PageAttributes & TT_ATTR_INDX_MASK) {
74 case TT_ATTR_INDX_DEVICE_MEMORY:
75 GcdAttributes = EFI_MEMORY_UC;
76 break;
77 case TT_ATTR_INDX_MEMORY_NON_CACHEABLE:
78 GcdAttributes = EFI_MEMORY_WC;
79 break;
80 case TT_ATTR_INDX_MEMORY_WRITE_THROUGH:
81 GcdAttributes = EFI_MEMORY_WT;
82 break;
83 case TT_ATTR_INDX_MEMORY_WRITE_BACK:
84 GcdAttributes = EFI_MEMORY_WB;
85 break;
86 default:
87 DEBUG ((EFI_D_ERROR, "PageAttributeToGcdAttribute: PageAttributes:0x%lX not supported.\n", PageAttributes));
88 ASSERT (0);
89 // The Global Coherency Domain (GCD) value is defined as a bit set.
90 // Returning 0 means no attribute has been set.
91 GcdAttributes = 0;
92 }
93
94 // Determine protection attributes
95 if (((PageAttributes & TT_AP_MASK) == TT_AP_NO_RO) || ((PageAttributes & TT_AP_MASK) == TT_AP_RO_RO)) {
96 // Read only cases map to write-protect
97 GcdAttributes |= EFI_MEMORY_RO;
98 }
99
100 // Process eXecute Never attribute
101 if ((PageAttributes & (TT_PXN_MASK | TT_UXN_MASK)) != 0 ) {
102 GcdAttributes |= EFI_MEMORY_XP;
103 }
104
105 return GcdAttributes;
106 }
107
108 #define MIN_T0SZ 16
109 #define BITS_PER_LEVEL 9
110
111 VOID
112 GetRootTranslationTableInfo (
113 IN UINTN T0SZ,
114 OUT UINTN *TableLevel,
115 OUT UINTN *TableEntryCount
116 )
117 {
118 // Get the level of the root table
119 if (TableLevel) {
120 *TableLevel = (T0SZ - MIN_T0SZ) / BITS_PER_LEVEL;
121 }
122
123 if (TableEntryCount) {
124 *TableEntryCount = 1UL << (BITS_PER_LEVEL - (T0SZ - MIN_T0SZ) % BITS_PER_LEVEL);
125 }
126 }
127
128 STATIC
129 VOID
130 ReplaceLiveEntry (
131 IN UINT64 *Entry,
132 IN UINT64 Value
133 )
134 {
135 if (!ArmMmuEnabled ()) {
136 *Entry = Value;
137 } else {
138 ArmReplaceLiveTranslationEntry (Entry, Value);
139 }
140 }
141
142 STATIC
143 VOID
144 LookupAddresstoRootTable (
145 IN UINT64 MaxAddress,
146 OUT UINTN *T0SZ,
147 OUT UINTN *TableEntryCount
148 )
149 {
150 UINTN TopBit;
151
152 // Check the parameters are not NULL
153 ASSERT ((T0SZ != NULL) && (TableEntryCount != NULL));
154
155 // Look for the highest bit set in MaxAddress
156 for (TopBit = 63; TopBit != 0; TopBit--) {
157 if ((1ULL << TopBit) & MaxAddress) {
158 // MaxAddress top bit is found
159 TopBit = TopBit + 1;
160 break;
161 }
162 }
163 ASSERT (TopBit != 0);
164
165 // Calculate T0SZ from the top bit of the MaxAddress
166 *T0SZ = 64 - TopBit;
167
168 // Get the Table info from T0SZ
169 GetRootTranslationTableInfo (*T0SZ, NULL, TableEntryCount);
170 }
171
172 STATIC
173 UINT64*
174 GetBlockEntryListFromAddress (
175 IN UINT64 *RootTable,
176 IN UINT64 RegionStart,
177 OUT UINTN *TableLevel,
178 IN OUT UINT64 *BlockEntrySize,
179 OUT UINT64 **LastBlockEntry
180 )
181 {
182 UINTN RootTableLevel;
183 UINTN RootTableEntryCount;
184 UINT64 *TranslationTable;
185 UINT64 *BlockEntry;
186 UINT64 *SubTableBlockEntry;
187 UINT64 BlockEntryAddress;
188 UINTN BaseAddressAlignment;
189 UINTN PageLevel;
190 UINTN Index;
191 UINTN IndexLevel;
192 UINTN T0SZ;
193 UINT64 Attributes;
194 UINT64 TableAttributes;
195
196 // Initialize variable
197 BlockEntry = NULL;
198
199 // Ensure the parameters are valid
200 if (!(TableLevel && BlockEntrySize && LastBlockEntry)) {
201 ASSERT_EFI_ERROR (EFI_INVALID_PARAMETER);
202 return NULL;
203 }
204
205 // Ensure the Region is aligned on 4KB boundary
206 if ((RegionStart & (SIZE_4KB - 1)) != 0) {
207 ASSERT_EFI_ERROR (EFI_INVALID_PARAMETER);
208 return NULL;
209 }
210
211 // Ensure the required size is aligned on 4KB boundary and not 0
212 if ((*BlockEntrySize & (SIZE_4KB - 1)) != 0 || *BlockEntrySize == 0) {
213 ASSERT_EFI_ERROR (EFI_INVALID_PARAMETER);
214 return NULL;
215 }
216
217 T0SZ = ArmGetTCR () & TCR_T0SZ_MASK;
218 // Get the Table info from T0SZ
219 GetRootTranslationTableInfo (T0SZ, &RootTableLevel, &RootTableEntryCount);
220
221 // If the start address is 0x0 then we use the size of the region to identify the alignment
222 if (RegionStart == 0) {
223 // Identify the highest possible alignment for the Region Size
224 BaseAddressAlignment = LowBitSet64 (*BlockEntrySize);
225 } else {
226 // Identify the highest possible alignment for the Base Address
227 BaseAddressAlignment = LowBitSet64 (RegionStart);
228 }
229
230 // Identify the Page Level the RegionStart must belong to. Note that PageLevel
231 // should be at least 1 since block translations are not supported at level 0
232 PageLevel = MAX (3 - ((BaseAddressAlignment - 12) / 9), 1);
233
234 // If the required size is smaller than the current block size then we need to go to the page below.
235 // The PageLevel was calculated on the Base Address alignment but did not take in account the alignment
236 // of the allocation size
237 while (*BlockEntrySize < TT_BLOCK_ENTRY_SIZE_AT_LEVEL (PageLevel)) {
238 // It does not fit so we need to go a page level above
239 PageLevel++;
240 }
241
242 //
243 // Get the Table Descriptor for the corresponding PageLevel. We need to decompose RegionStart to get appropriate entries
244 //
245
246 TranslationTable = RootTable;
247 for (IndexLevel = RootTableLevel; IndexLevel <= PageLevel; IndexLevel++) {
248 BlockEntry = (UINT64*)TT_GET_ENTRY_FOR_ADDRESS (TranslationTable, IndexLevel, RegionStart);
249
250 if ((IndexLevel != 3) && ((*BlockEntry & TT_TYPE_MASK) == TT_TYPE_TABLE_ENTRY)) {
251 // Go to the next table
252 TranslationTable = (UINT64*)(*BlockEntry & TT_ADDRESS_MASK_DESCRIPTION_TABLE);
253
254 // If we are at the last level then update the last level to next level
255 if (IndexLevel == PageLevel) {
256 // Enter the next level
257 PageLevel++;
258 }
259 } else if ((*BlockEntry & TT_TYPE_MASK) == TT_TYPE_BLOCK_ENTRY) {
260 // If we are not at the last level then we need to split this BlockEntry
261 if (IndexLevel != PageLevel) {
262 // Retrieve the attributes from the block entry
263 Attributes = *BlockEntry & TT_ATTRIBUTES_MASK;
264
265 // Convert the block entry attributes into Table descriptor attributes
266 TableAttributes = TT_TABLE_AP_NO_PERMISSION;
267 if (Attributes & TT_NS) {
268 TableAttributes = TT_TABLE_NS;
269 }
270
271 // Get the address corresponding at this entry
272 BlockEntryAddress = RegionStart;
273 BlockEntryAddress = BlockEntryAddress >> TT_ADDRESS_OFFSET_AT_LEVEL(IndexLevel);
274 // Shift back to right to set zero before the effective address
275 BlockEntryAddress = BlockEntryAddress << TT_ADDRESS_OFFSET_AT_LEVEL(IndexLevel);
276
277 // Set the correct entry type for the next page level
278 if ((IndexLevel + 1) == 3) {
279 Attributes |= TT_TYPE_BLOCK_ENTRY_LEVEL3;
280 } else {
281 Attributes |= TT_TYPE_BLOCK_ENTRY;
282 }
283
284 // Create a new translation table
285 TranslationTable = AllocatePages (1);
286 if (TranslationTable == NULL) {
287 return NULL;
288 }
289
290 // Populate the newly created lower level table
291 SubTableBlockEntry = TranslationTable;
292 for (Index = 0; Index < TT_ENTRY_COUNT; Index++) {
293 *SubTableBlockEntry = Attributes | (BlockEntryAddress + (Index << TT_ADDRESS_OFFSET_AT_LEVEL(IndexLevel + 1)));
294 SubTableBlockEntry++;
295 }
296
297 // Fill the BlockEntry with the new TranslationTable
298 ReplaceLiveEntry (BlockEntry,
299 ((UINTN)TranslationTable & TT_ADDRESS_MASK_DESCRIPTION_TABLE) | TableAttributes | TT_TYPE_TABLE_ENTRY);
300 }
301 } else {
302 if (IndexLevel != PageLevel) {
303 //
304 // Case when we have an Invalid Entry and we are at a page level above of the one targetted.
305 //
306
307 // Create a new translation table
308 TranslationTable = AllocatePages (1);
309 if (TranslationTable == NULL) {
310 return NULL;
311 }
312
313 ZeroMem (TranslationTable, TT_ENTRY_COUNT * sizeof(UINT64));
314
315 // Fill the new BlockEntry with the TranslationTable
316 *BlockEntry = ((UINTN)TranslationTable & TT_ADDRESS_MASK_DESCRIPTION_TABLE) | TT_TYPE_TABLE_ENTRY;
317 }
318 }
319 }
320
321 // Expose the found PageLevel to the caller
322 *TableLevel = PageLevel;
323
324 // Now, we have the Table Level we can get the Block Size associated to this table
325 *BlockEntrySize = TT_BLOCK_ENTRY_SIZE_AT_LEVEL (PageLevel);
326
327 // The last block of the root table depends on the number of entry in this table,
328 // otherwise it is always the (TT_ENTRY_COUNT - 1)th entry in the table.
329 *LastBlockEntry = TT_LAST_BLOCK_ADDRESS(TranslationTable,
330 (PageLevel == RootTableLevel) ? RootTableEntryCount : TT_ENTRY_COUNT);
331
332 return BlockEntry;
333 }
334
335 STATIC
336 EFI_STATUS
337 UpdateRegionMapping (
338 IN UINT64 *RootTable,
339 IN UINT64 RegionStart,
340 IN UINT64 RegionLength,
341 IN UINT64 Attributes,
342 IN UINT64 BlockEntryMask
343 )
344 {
345 UINT32 Type;
346 UINT64 *BlockEntry;
347 UINT64 *LastBlockEntry;
348 UINT64 BlockEntrySize;
349 UINTN TableLevel;
350
351 // Ensure the Length is aligned on 4KB boundary
352 if ((RegionLength == 0) || ((RegionLength & (SIZE_4KB - 1)) != 0)) {
353 ASSERT_EFI_ERROR (EFI_INVALID_PARAMETER);
354 return EFI_INVALID_PARAMETER;
355 }
356
357 do {
358 // Get the first Block Entry that matches the Virtual Address and also the information on the Table Descriptor
359 // such as the the size of the Block Entry and the address of the last BlockEntry of the Table Descriptor
360 BlockEntrySize = RegionLength;
361 BlockEntry = GetBlockEntryListFromAddress (RootTable, RegionStart, &TableLevel, &BlockEntrySize, &LastBlockEntry);
362 if (BlockEntry == NULL) {
363 // GetBlockEntryListFromAddress() return NULL when it fails to allocate new pages from the Translation Tables
364 return EFI_OUT_OF_RESOURCES;
365 }
366
367 if (TableLevel != 3) {
368 Type = TT_TYPE_BLOCK_ENTRY;
369 } else {
370 Type = TT_TYPE_BLOCK_ENTRY_LEVEL3;
371 }
372
373 do {
374 // Fill the Block Entry with attribute and output block address
375 *BlockEntry &= BlockEntryMask;
376 *BlockEntry |= (RegionStart & TT_ADDRESS_MASK_BLOCK_ENTRY) | Attributes | Type;
377
378 // Go to the next BlockEntry
379 RegionStart += BlockEntrySize;
380 RegionLength -= BlockEntrySize;
381 BlockEntry++;
382
383 // Break the inner loop when next block is a table
384 // Rerun GetBlockEntryListFromAddress to avoid page table memory leak
385 if (TableLevel != 3 &&
386 (*BlockEntry & TT_TYPE_MASK) == TT_TYPE_TABLE_ENTRY) {
387 break;
388 }
389 } while ((RegionLength >= BlockEntrySize) && (BlockEntry <= LastBlockEntry));
390 } while (RegionLength != 0);
391
392 return EFI_SUCCESS;
393 }
394
395 STATIC
396 EFI_STATUS
397 FillTranslationTable (
398 IN UINT64 *RootTable,
399 IN ARM_MEMORY_REGION_DESCRIPTOR *MemoryRegion
400 )
401 {
402 return UpdateRegionMapping (
403 RootTable,
404 MemoryRegion->VirtualBase,
405 MemoryRegion->Length,
406 ArmMemoryAttributeToPageAttribute (MemoryRegion->Attributes) | TT_AF,
407 0
408 );
409 }
410
411 STATIC
412 UINT64
413 GcdAttributeToPageAttribute (
414 IN UINT64 GcdAttributes
415 )
416 {
417 UINT64 PageAttributes;
418
419 switch (GcdAttributes & EFI_MEMORY_CACHETYPE_MASK) {
420 case EFI_MEMORY_UC:
421 PageAttributes = TT_ATTR_INDX_DEVICE_MEMORY;
422 break;
423 case EFI_MEMORY_WC:
424 PageAttributes = TT_ATTR_INDX_MEMORY_NON_CACHEABLE;
425 break;
426 case EFI_MEMORY_WT:
427 PageAttributes = TT_ATTR_INDX_MEMORY_WRITE_THROUGH | TT_SH_INNER_SHAREABLE;
428 break;
429 case EFI_MEMORY_WB:
430 PageAttributes = TT_ATTR_INDX_MEMORY_WRITE_BACK | TT_SH_INNER_SHAREABLE;
431 break;
432 default:
433 PageAttributes = TT_ATTR_INDX_MASK;
434 break;
435 }
436
437 if ((GcdAttributes & EFI_MEMORY_XP) != 0 ||
438 (GcdAttributes & EFI_MEMORY_CACHETYPE_MASK) == EFI_MEMORY_UC) {
439 if (ArmReadCurrentEL () == AARCH64_EL2) {
440 PageAttributes |= TT_XN_MASK;
441 } else {
442 PageAttributes |= TT_UXN_MASK | TT_PXN_MASK;
443 }
444 }
445
446 if ((GcdAttributes & EFI_MEMORY_RO) != 0) {
447 PageAttributes |= TT_AP_RO_RO;
448 }
449
450 return PageAttributes | TT_AF;
451 }
452
453 EFI_STATUS
454 ArmSetMemoryAttributes (
455 IN EFI_PHYSICAL_ADDRESS BaseAddress,
456 IN UINT64 Length,
457 IN UINT64 Attributes
458 )
459 {
460 EFI_STATUS Status;
461 UINT64 *TranslationTable;
462 UINT64 PageAttributes;
463 UINT64 PageAttributeMask;
464
465 PageAttributes = GcdAttributeToPageAttribute (Attributes);
466 PageAttributeMask = 0;
467
468 if ((Attributes & EFI_MEMORY_CACHETYPE_MASK) == 0) {
469 //
470 // No memory type was set in Attributes, so we are going to update the
471 // permissions only.
472 //
473 PageAttributes &= TT_AP_MASK | TT_UXN_MASK | TT_PXN_MASK;
474 PageAttributeMask = ~(TT_ADDRESS_MASK_BLOCK_ENTRY | TT_AP_MASK |
475 TT_PXN_MASK | TT_XN_MASK);
476 }
477
478 TranslationTable = ArmGetTTBR0BaseAddress ();
479
480 Status = UpdateRegionMapping (
481 TranslationTable,
482 BaseAddress,
483 Length,
484 PageAttributes,
485 PageAttributeMask);
486 if (EFI_ERROR (Status)) {
487 return Status;
488 }
489
490 // Invalidate all TLB entries so changes are synced
491 ArmInvalidateTlb ();
492
493 return EFI_SUCCESS;
494 }
495
496 STATIC
497 EFI_STATUS
498 SetMemoryRegionAttribute (
499 IN EFI_PHYSICAL_ADDRESS BaseAddress,
500 IN UINT64 Length,
501 IN UINT64 Attributes,
502 IN UINT64 BlockEntryMask
503 )
504 {
505 EFI_STATUS Status;
506 UINT64 *RootTable;
507
508 RootTable = ArmGetTTBR0BaseAddress ();
509
510 Status = UpdateRegionMapping (RootTable, BaseAddress, Length, Attributes, BlockEntryMask);
511 if (EFI_ERROR (Status)) {
512 return Status;
513 }
514
515 // Invalidate all TLB entries so changes are synced
516 ArmInvalidateTlb ();
517
518 return EFI_SUCCESS;
519 }
520
521 EFI_STATUS
522 ArmSetMemoryRegionNoExec (
523 IN EFI_PHYSICAL_ADDRESS BaseAddress,
524 IN UINT64 Length
525 )
526 {
527 UINT64 Val;
528
529 if (ArmReadCurrentEL () == AARCH64_EL1) {
530 Val = TT_PXN_MASK | TT_UXN_MASK;
531 } else {
532 Val = TT_XN_MASK;
533 }
534
535 return SetMemoryRegionAttribute (
536 BaseAddress,
537 Length,
538 Val,
539 ~TT_ADDRESS_MASK_BLOCK_ENTRY);
540 }
541
542 EFI_STATUS
543 ArmClearMemoryRegionNoExec (
544 IN EFI_PHYSICAL_ADDRESS BaseAddress,
545 IN UINT64 Length
546 )
547 {
548 UINT64 Mask;
549
550 // XN maps to UXN in the EL1&0 translation regime
551 Mask = ~(TT_ADDRESS_MASK_BLOCK_ENTRY | TT_PXN_MASK | TT_XN_MASK);
552
553 return SetMemoryRegionAttribute (
554 BaseAddress,
555 Length,
556 0,
557 Mask);
558 }
559
560 EFI_STATUS
561 ArmSetMemoryRegionReadOnly (
562 IN EFI_PHYSICAL_ADDRESS BaseAddress,
563 IN UINT64 Length
564 )
565 {
566 return SetMemoryRegionAttribute (
567 BaseAddress,
568 Length,
569 TT_AP_RO_RO,
570 ~TT_ADDRESS_MASK_BLOCK_ENTRY);
571 }
572
573 EFI_STATUS
574 ArmClearMemoryRegionReadOnly (
575 IN EFI_PHYSICAL_ADDRESS BaseAddress,
576 IN UINT64 Length
577 )
578 {
579 return SetMemoryRegionAttribute (
580 BaseAddress,
581 Length,
582 TT_AP_RW_RW,
583 ~(TT_ADDRESS_MASK_BLOCK_ENTRY | TT_AP_MASK));
584 }
585
586 EFI_STATUS
587 EFIAPI
588 ArmConfigureMmu (
589 IN ARM_MEMORY_REGION_DESCRIPTOR *MemoryTable,
590 OUT VOID **TranslationTableBase OPTIONAL,
591 OUT UINTN *TranslationTableSize OPTIONAL
592 )
593 {
594 VOID* TranslationTable;
595 UINT32 TranslationTableAttribute;
596 UINT64 MaxAddress;
597 UINTN T0SZ;
598 UINTN RootTableEntryCount;
599 UINT64 TCR;
600 EFI_STATUS Status;
601
602 if(MemoryTable == NULL) {
603 ASSERT (MemoryTable != NULL);
604 return EFI_INVALID_PARAMETER;
605 }
606
607 //
608 // Limit the virtual address space to what we can actually use: UEFI
609 // mandates a 1:1 mapping, so no point in making the virtual address
610 // space larger than the physical address space. We also have to take
611 // into account the architectural limitations that result from UEFI's
612 // use of 4 KB pages.
613 //
614 MaxAddress = MIN (LShiftU64 (1ULL, ArmGetPhysicalAddressBits ()) - 1,
615 MAX_ALLOC_ADDRESS);
616
617 // Lookup the Table Level to get the information
618 LookupAddresstoRootTable (MaxAddress, &T0SZ, &RootTableEntryCount);
619
620 //
621 // Set TCR that allows us to retrieve T0SZ in the subsequent functions
622 //
623 // Ideally we will be running at EL2, but should support EL1 as well.
624 // UEFI should not run at EL3.
625 if (ArmReadCurrentEL () == AARCH64_EL2) {
626 //Note: Bits 23 and 31 are reserved(RES1) bits in TCR_EL2
627 TCR = T0SZ | (1UL << 31) | (1UL << 23) | TCR_TG0_4KB;
628
629 // Set the Physical Address Size using MaxAddress
630 if (MaxAddress < SIZE_4GB) {
631 TCR |= TCR_PS_4GB;
632 } else if (MaxAddress < SIZE_64GB) {
633 TCR |= TCR_PS_64GB;
634 } else if (MaxAddress < SIZE_1TB) {
635 TCR |= TCR_PS_1TB;
636 } else if (MaxAddress < SIZE_4TB) {
637 TCR |= TCR_PS_4TB;
638 } else if (MaxAddress < SIZE_16TB) {
639 TCR |= TCR_PS_16TB;
640 } else if (MaxAddress < SIZE_256TB) {
641 TCR |= TCR_PS_256TB;
642 } else {
643 DEBUG ((EFI_D_ERROR, "ArmConfigureMmu: The MaxAddress 0x%lX is not supported by this MMU configuration.\n", MaxAddress));
644 ASSERT (0); // Bigger than 48-bit memory space are not supported
645 return EFI_UNSUPPORTED;
646 }
647 } else if (ArmReadCurrentEL () == AARCH64_EL1) {
648 // Due to Cortex-A57 erratum #822227 we must set TG1[1] == 1, regardless of EPD1.
649 TCR = T0SZ | TCR_TG0_4KB | TCR_TG1_4KB | TCR_EPD1;
650
651 // Set the Physical Address Size using MaxAddress
652 if (MaxAddress < SIZE_4GB) {
653 TCR |= TCR_IPS_4GB;
654 } else if (MaxAddress < SIZE_64GB) {
655 TCR |= TCR_IPS_64GB;
656 } else if (MaxAddress < SIZE_1TB) {
657 TCR |= TCR_IPS_1TB;
658 } else if (MaxAddress < SIZE_4TB) {
659 TCR |= TCR_IPS_4TB;
660 } else if (MaxAddress < SIZE_16TB) {
661 TCR |= TCR_IPS_16TB;
662 } else if (MaxAddress < SIZE_256TB) {
663 TCR |= TCR_IPS_256TB;
664 } else {
665 DEBUG ((EFI_D_ERROR, "ArmConfigureMmu: The MaxAddress 0x%lX is not supported by this MMU configuration.\n", MaxAddress));
666 ASSERT (0); // Bigger than 48-bit memory space are not supported
667 return EFI_UNSUPPORTED;
668 }
669 } else {
670 ASSERT (0); // UEFI is only expected to run at EL2 and EL1, not EL3.
671 return EFI_UNSUPPORTED;
672 }
673
674 //
675 // Translation table walks are always cache coherent on ARMv8-A, so cache
676 // maintenance on page tables is never needed. Since there is a risk of
677 // loss of coherency when using mismatched attributes, and given that memory
678 // is mapped cacheable except for extraordinary cases (such as non-coherent
679 // DMA), have the page table walker perform cached accesses as well, and
680 // assert below that that matches the attributes we use for CPU accesses to
681 // the region.
682 //
683 TCR |= TCR_SH_INNER_SHAREABLE |
684 TCR_RGN_OUTER_WRITE_BACK_ALLOC |
685 TCR_RGN_INNER_WRITE_BACK_ALLOC;
686
687 // Set TCR
688 ArmSetTCR (TCR);
689
690 // Allocate pages for translation table
691 TranslationTable = AllocatePages (1);
692 if (TranslationTable == NULL) {
693 return EFI_OUT_OF_RESOURCES;
694 }
695 // We set TTBR0 just after allocating the table to retrieve its location from the subsequent
696 // functions without needing to pass this value across the functions. The MMU is only enabled
697 // after the translation tables are populated.
698 ArmSetTTBR0 (TranslationTable);
699
700 if (TranslationTableBase != NULL) {
701 *TranslationTableBase = TranslationTable;
702 }
703
704 if (TranslationTableSize != NULL) {
705 *TranslationTableSize = RootTableEntryCount * sizeof(UINT64);
706 }
707
708 ZeroMem (TranslationTable, RootTableEntryCount * sizeof(UINT64));
709
710 // Disable MMU and caches. ArmDisableMmu() also invalidates the TLBs
711 ArmDisableMmu ();
712 ArmDisableDataCache ();
713 ArmDisableInstructionCache ();
714
715 // Make sure nothing sneaked into the cache
716 ArmCleanInvalidateDataCache ();
717 ArmInvalidateInstructionCache ();
718
719 TranslationTableAttribute = TT_ATTR_INDX_INVALID;
720 while (MemoryTable->Length != 0) {
721
722 DEBUG_CODE_BEGIN ();
723 // Find the memory attribute for the Translation Table
724 if ((UINTN)TranslationTable >= MemoryTable->PhysicalBase &&
725 (UINTN)TranslationTable + EFI_PAGE_SIZE <= MemoryTable->PhysicalBase +
726 MemoryTable->Length) {
727 TranslationTableAttribute = MemoryTable->Attributes;
728 }
729 DEBUG_CODE_END ();
730
731 Status = FillTranslationTable (TranslationTable, MemoryTable);
732 if (EFI_ERROR (Status)) {
733 goto FREE_TRANSLATION_TABLE;
734 }
735 MemoryTable++;
736 }
737
738 ASSERT (TranslationTableAttribute == ARM_MEMORY_REGION_ATTRIBUTE_WRITE_BACK ||
739 TranslationTableAttribute == ARM_MEMORY_REGION_ATTRIBUTE_NONSECURE_WRITE_BACK);
740
741 ArmSetMAIR (MAIR_ATTR(TT_ATTR_INDX_DEVICE_MEMORY, MAIR_ATTR_DEVICE_MEMORY) | // mapped to EFI_MEMORY_UC
742 MAIR_ATTR(TT_ATTR_INDX_MEMORY_NON_CACHEABLE, MAIR_ATTR_NORMAL_MEMORY_NON_CACHEABLE) | // mapped to EFI_MEMORY_WC
743 MAIR_ATTR(TT_ATTR_INDX_MEMORY_WRITE_THROUGH, MAIR_ATTR_NORMAL_MEMORY_WRITE_THROUGH) | // mapped to EFI_MEMORY_WT
744 MAIR_ATTR(TT_ATTR_INDX_MEMORY_WRITE_BACK, MAIR_ATTR_NORMAL_MEMORY_WRITE_BACK)); // mapped to EFI_MEMORY_WB
745
746 ArmDisableAlignmentCheck ();
747 ArmEnableStackAlignmentCheck ();
748 ArmEnableInstructionCache ();
749 ArmEnableDataCache ();
750
751 ArmEnableMmu ();
752 return EFI_SUCCESS;
753
754 FREE_TRANSLATION_TABLE:
755 FreePages (TranslationTable, 1);
756 return Status;
757 }
758
759 RETURN_STATUS
760 EFIAPI
761 ArmMmuBaseLibConstructor (
762 VOID
763 )
764 {
765 extern UINT32 ArmReplaceLiveTranslationEntrySize;
766
767 //
768 // The ArmReplaceLiveTranslationEntry () helper function may be invoked
769 // with the MMU off so we have to ensure that it gets cleaned to the PoC
770 //
771 WriteBackDataCacheRange (ArmReplaceLiveTranslationEntry,
772 ArmReplaceLiveTranslationEntrySize);
773
774 return RETURN_SUCCESS;
775 }