]> git.proxmox.com Git - mirror_edk2.git/blob - ArmPkg/Library/ArmMmuLib/AArch64/ArmMmuLibCore.c
ArmPkg: Fix various typos
[mirror_edk2.git] / ArmPkg / Library / ArmMmuLib / AArch64 / ArmMmuLibCore.c
1 /** @file
2 * File managing the MMU for ARMv8 architecture
3 *
4 * Copyright (c) 2011-2014, ARM Limited. All rights reserved.
5 * Copyright (c) 2016, Linaro Limited. All rights reserved.
6 * Copyright (c) 2017, Intel Corporation. All rights reserved.<BR>
7 *
8 * SPDX-License-Identifier: BSD-2-Clause-Patent
9 *
10 **/
11
12 #include <Uefi.h>
13 #include <Chipset/AArch64.h>
14 #include <Library/BaseMemoryLib.h>
15 #include <Library/CacheMaintenanceLib.h>
16 #include <Library/MemoryAllocationLib.h>
17 #include <Library/ArmLib.h>
18 #include <Library/ArmMmuLib.h>
19 #include <Library/BaseLib.h>
20 #include <Library/DebugLib.h>
21
22 // We use this index definition to define an invalid block entry
23 #define TT_ATTR_INDX_INVALID ((UINT32)~0)
24
25 STATIC
26 UINT64
27 ArmMemoryAttributeToPageAttribute (
28 IN ARM_MEMORY_REGION_ATTRIBUTES Attributes
29 )
30 {
31 switch (Attributes) {
32 case ARM_MEMORY_REGION_ATTRIBUTE_WRITE_BACK_NONSHAREABLE:
33 case ARM_MEMORY_REGION_ATTRIBUTE_NONSECURE_WRITE_BACK_NONSHAREABLE:
34 return TT_ATTR_INDX_MEMORY_WRITE_BACK;
35
36 case ARM_MEMORY_REGION_ATTRIBUTE_WRITE_BACK:
37 case ARM_MEMORY_REGION_ATTRIBUTE_NONSECURE_WRITE_BACK:
38 return TT_ATTR_INDX_MEMORY_WRITE_BACK | TT_SH_INNER_SHAREABLE;
39
40 case ARM_MEMORY_REGION_ATTRIBUTE_WRITE_THROUGH:
41 case ARM_MEMORY_REGION_ATTRIBUTE_NONSECURE_WRITE_THROUGH:
42 return TT_ATTR_INDX_MEMORY_WRITE_THROUGH | TT_SH_INNER_SHAREABLE;
43
44 // Uncached and device mappings are treated as outer shareable by default,
45 case ARM_MEMORY_REGION_ATTRIBUTE_UNCACHED_UNBUFFERED:
46 case ARM_MEMORY_REGION_ATTRIBUTE_NONSECURE_UNCACHED_UNBUFFERED:
47 return TT_ATTR_INDX_MEMORY_NON_CACHEABLE;
48
49 default:
50 ASSERT(0);
51 case ARM_MEMORY_REGION_ATTRIBUTE_DEVICE:
52 case ARM_MEMORY_REGION_ATTRIBUTE_NONSECURE_DEVICE:
53 if (ArmReadCurrentEL () == AARCH64_EL2)
54 return TT_ATTR_INDX_DEVICE_MEMORY | TT_XN_MASK;
55 else
56 return TT_ATTR_INDX_DEVICE_MEMORY | TT_UXN_MASK | TT_PXN_MASK;
57 }
58 }
59
60 UINT64
61 PageAttributeToGcdAttribute (
62 IN UINT64 PageAttributes
63 )
64 {
65 UINT64 GcdAttributes;
66
67 switch (PageAttributes & TT_ATTR_INDX_MASK) {
68 case TT_ATTR_INDX_DEVICE_MEMORY:
69 GcdAttributes = EFI_MEMORY_UC;
70 break;
71 case TT_ATTR_INDX_MEMORY_NON_CACHEABLE:
72 GcdAttributes = EFI_MEMORY_WC;
73 break;
74 case TT_ATTR_INDX_MEMORY_WRITE_THROUGH:
75 GcdAttributes = EFI_MEMORY_WT;
76 break;
77 case TT_ATTR_INDX_MEMORY_WRITE_BACK:
78 GcdAttributes = EFI_MEMORY_WB;
79 break;
80 default:
81 DEBUG ((EFI_D_ERROR, "PageAttributeToGcdAttribute: PageAttributes:0x%lX not supported.\n", PageAttributes));
82 ASSERT (0);
83 // The Global Coherency Domain (GCD) value is defined as a bit set.
84 // Returning 0 means no attribute has been set.
85 GcdAttributes = 0;
86 }
87
88 // Determine protection attributes
89 if (((PageAttributes & TT_AP_MASK) == TT_AP_NO_RO) || ((PageAttributes & TT_AP_MASK) == TT_AP_RO_RO)) {
90 // Read only cases map to write-protect
91 GcdAttributes |= EFI_MEMORY_RO;
92 }
93
94 // Process eXecute Never attribute
95 if ((PageAttributes & (TT_PXN_MASK | TT_UXN_MASK)) != 0 ) {
96 GcdAttributes |= EFI_MEMORY_XP;
97 }
98
99 return GcdAttributes;
100 }
101
102 #define MIN_T0SZ 16
103 #define BITS_PER_LEVEL 9
104
105 VOID
106 GetRootTranslationTableInfo (
107 IN UINTN T0SZ,
108 OUT UINTN *TableLevel,
109 OUT UINTN *TableEntryCount
110 )
111 {
112 // Get the level of the root table
113 if (TableLevel) {
114 *TableLevel = (T0SZ - MIN_T0SZ) / BITS_PER_LEVEL;
115 }
116
117 if (TableEntryCount) {
118 *TableEntryCount = 1UL << (BITS_PER_LEVEL - (T0SZ - MIN_T0SZ) % BITS_PER_LEVEL);
119 }
120 }
121
122 STATIC
123 VOID
124 ReplaceLiveEntry (
125 IN UINT64 *Entry,
126 IN UINT64 Value,
127 IN UINT64 RegionStart
128 )
129 {
130 if (!ArmMmuEnabled ()) {
131 *Entry = Value;
132 } else {
133 ArmReplaceLiveTranslationEntry (Entry, Value, RegionStart);
134 }
135 }
136
137 STATIC
138 VOID
139 LookupAddresstoRootTable (
140 IN UINT64 MaxAddress,
141 OUT UINTN *T0SZ,
142 OUT UINTN *TableEntryCount
143 )
144 {
145 UINTN TopBit;
146
147 // Check the parameters are not NULL
148 ASSERT ((T0SZ != NULL) && (TableEntryCount != NULL));
149
150 // Look for the highest bit set in MaxAddress
151 for (TopBit = 63; TopBit != 0; TopBit--) {
152 if ((1ULL << TopBit) & MaxAddress) {
153 // MaxAddress top bit is found
154 TopBit = TopBit + 1;
155 break;
156 }
157 }
158 ASSERT (TopBit != 0);
159
160 // Calculate T0SZ from the top bit of the MaxAddress
161 *T0SZ = 64 - TopBit;
162
163 // Get the Table info from T0SZ
164 GetRootTranslationTableInfo (*T0SZ, NULL, TableEntryCount);
165 }
166
167 STATIC
168 UINT64*
169 GetBlockEntryListFromAddress (
170 IN UINT64 *RootTable,
171 IN UINT64 RegionStart,
172 OUT UINTN *TableLevel,
173 IN OUT UINT64 *BlockEntrySize,
174 OUT UINT64 **LastBlockEntry
175 )
176 {
177 UINTN RootTableLevel;
178 UINTN RootTableEntryCount;
179 UINT64 *TranslationTable;
180 UINT64 *BlockEntry;
181 UINT64 *SubTableBlockEntry;
182 UINT64 BlockEntryAddress;
183 UINTN BaseAddressAlignment;
184 UINTN PageLevel;
185 UINTN Index;
186 UINTN IndexLevel;
187 UINTN T0SZ;
188 UINT64 Attributes;
189 UINT64 TableAttributes;
190
191 // Initialize variable
192 BlockEntry = NULL;
193
194 // Ensure the parameters are valid
195 if (!(TableLevel && BlockEntrySize && LastBlockEntry)) {
196 ASSERT_EFI_ERROR (EFI_INVALID_PARAMETER);
197 return NULL;
198 }
199
200 // Ensure the Region is aligned on 4KB boundary
201 if ((RegionStart & (SIZE_4KB - 1)) != 0) {
202 ASSERT_EFI_ERROR (EFI_INVALID_PARAMETER);
203 return NULL;
204 }
205
206 // Ensure the required size is aligned on 4KB boundary and not 0
207 if ((*BlockEntrySize & (SIZE_4KB - 1)) != 0 || *BlockEntrySize == 0) {
208 ASSERT_EFI_ERROR (EFI_INVALID_PARAMETER);
209 return NULL;
210 }
211
212 T0SZ = ArmGetTCR () & TCR_T0SZ_MASK;
213 // Get the Table info from T0SZ
214 GetRootTranslationTableInfo (T0SZ, &RootTableLevel, &RootTableEntryCount);
215
216 // If the start address is 0x0 then we use the size of the region to identify the alignment
217 if (RegionStart == 0) {
218 // Identify the highest possible alignment for the Region Size
219 BaseAddressAlignment = LowBitSet64 (*BlockEntrySize);
220 } else {
221 // Identify the highest possible alignment for the Base Address
222 BaseAddressAlignment = LowBitSet64 (RegionStart);
223 }
224
225 // Identify the Page Level the RegionStart must belong to. Note that PageLevel
226 // should be at least 1 since block translations are not supported at level 0
227 PageLevel = MAX (3 - ((BaseAddressAlignment - 12) / 9), 1);
228
229 // If the required size is smaller than the current block size then we need to go to the page below.
230 // The PageLevel was calculated on the Base Address alignment but did not take in account the alignment
231 // of the allocation size
232 while (*BlockEntrySize < TT_BLOCK_ENTRY_SIZE_AT_LEVEL (PageLevel)) {
233 // It does not fit so we need to go a page level above
234 PageLevel++;
235 }
236
237 //
238 // Get the Table Descriptor for the corresponding PageLevel. We need to decompose RegionStart to get appropriate entries
239 //
240
241 TranslationTable = RootTable;
242 for (IndexLevel = RootTableLevel; IndexLevel <= PageLevel; IndexLevel++) {
243 BlockEntry = (UINT64*)TT_GET_ENTRY_FOR_ADDRESS (TranslationTable, IndexLevel, RegionStart);
244
245 if ((IndexLevel != 3) && ((*BlockEntry & TT_TYPE_MASK) == TT_TYPE_TABLE_ENTRY)) {
246 // Go to the next table
247 TranslationTable = (UINT64*)(*BlockEntry & TT_ADDRESS_MASK_DESCRIPTION_TABLE);
248
249 // If we are at the last level then update the last level to next level
250 if (IndexLevel == PageLevel) {
251 // Enter the next level
252 PageLevel++;
253 }
254 } else if ((*BlockEntry & TT_TYPE_MASK) == TT_TYPE_BLOCK_ENTRY) {
255 // If we are not at the last level then we need to split this BlockEntry
256 if (IndexLevel != PageLevel) {
257 // Retrieve the attributes from the block entry
258 Attributes = *BlockEntry & TT_ATTRIBUTES_MASK;
259
260 // Convert the block entry attributes into Table descriptor attributes
261 TableAttributes = TT_TABLE_AP_NO_PERMISSION;
262 if (Attributes & TT_NS) {
263 TableAttributes = TT_TABLE_NS;
264 }
265
266 // Get the address corresponding at this entry
267 BlockEntryAddress = RegionStart;
268 BlockEntryAddress = BlockEntryAddress >> TT_ADDRESS_OFFSET_AT_LEVEL(IndexLevel);
269 // Shift back to right to set zero before the effective address
270 BlockEntryAddress = BlockEntryAddress << TT_ADDRESS_OFFSET_AT_LEVEL(IndexLevel);
271
272 // Set the correct entry type for the next page level
273 if ((IndexLevel + 1) == 3) {
274 Attributes |= TT_TYPE_BLOCK_ENTRY_LEVEL3;
275 } else {
276 Attributes |= TT_TYPE_BLOCK_ENTRY;
277 }
278
279 // Create a new translation table
280 TranslationTable = AllocatePages (1);
281 if (TranslationTable == NULL) {
282 return NULL;
283 }
284
285 // Populate the newly created lower level table
286 SubTableBlockEntry = TranslationTable;
287 for (Index = 0; Index < TT_ENTRY_COUNT; Index++) {
288 *SubTableBlockEntry = Attributes | (BlockEntryAddress + (Index << TT_ADDRESS_OFFSET_AT_LEVEL(IndexLevel + 1)));
289 SubTableBlockEntry++;
290 }
291
292 // Fill the BlockEntry with the new TranslationTable
293 ReplaceLiveEntry (BlockEntry,
294 (UINTN)TranslationTable | TableAttributes | TT_TYPE_TABLE_ENTRY,
295 RegionStart);
296 }
297 } else {
298 if (IndexLevel != PageLevel) {
299 //
300 // Case when we have an Invalid Entry and we are at a page level above of the one targetted.
301 //
302
303 // Create a new translation table
304 TranslationTable = AllocatePages (1);
305 if (TranslationTable == NULL) {
306 return NULL;
307 }
308
309 ZeroMem (TranslationTable, TT_ENTRY_COUNT * sizeof(UINT64));
310
311 // Fill the new BlockEntry with the TranslationTable
312 *BlockEntry = ((UINTN)TranslationTable & TT_ADDRESS_MASK_DESCRIPTION_TABLE) | TT_TYPE_TABLE_ENTRY;
313 }
314 }
315 }
316
317 // Expose the found PageLevel to the caller
318 *TableLevel = PageLevel;
319
320 // Now, we have the Table Level we can get the Block Size associated to this table
321 *BlockEntrySize = TT_BLOCK_ENTRY_SIZE_AT_LEVEL (PageLevel);
322
323 // The last block of the root table depends on the number of entry in this table,
324 // otherwise it is always the (TT_ENTRY_COUNT - 1)th entry in the table.
325 *LastBlockEntry = TT_LAST_BLOCK_ADDRESS(TranslationTable,
326 (PageLevel == RootTableLevel) ? RootTableEntryCount : TT_ENTRY_COUNT);
327
328 return BlockEntry;
329 }
330
331 STATIC
332 EFI_STATUS
333 UpdateRegionMapping (
334 IN UINT64 *RootTable,
335 IN UINT64 RegionStart,
336 IN UINT64 RegionLength,
337 IN UINT64 Attributes,
338 IN UINT64 BlockEntryMask
339 )
340 {
341 UINT32 Type;
342 UINT64 *BlockEntry;
343 UINT64 *LastBlockEntry;
344 UINT64 BlockEntrySize;
345 UINTN TableLevel;
346
347 // Ensure the Length is aligned on 4KB boundary
348 if ((RegionLength == 0) || ((RegionLength & (SIZE_4KB - 1)) != 0)) {
349 ASSERT_EFI_ERROR (EFI_INVALID_PARAMETER);
350 return EFI_INVALID_PARAMETER;
351 }
352
353 do {
354 // Get the first Block Entry that matches the Virtual Address and also the information on the Table Descriptor
355 // such as the size of the Block Entry and the address of the last BlockEntry of the Table Descriptor
356 BlockEntrySize = RegionLength;
357 BlockEntry = GetBlockEntryListFromAddress (RootTable, RegionStart, &TableLevel, &BlockEntrySize, &LastBlockEntry);
358 if (BlockEntry == NULL) {
359 // GetBlockEntryListFromAddress() return NULL when it fails to allocate new pages from the Translation Tables
360 return EFI_OUT_OF_RESOURCES;
361 }
362
363 if (TableLevel != 3) {
364 Type = TT_TYPE_BLOCK_ENTRY;
365 } else {
366 Type = TT_TYPE_BLOCK_ENTRY_LEVEL3;
367 }
368
369 do {
370 // Fill the Block Entry with attribute and output block address
371 *BlockEntry &= BlockEntryMask;
372 *BlockEntry |= (RegionStart & TT_ADDRESS_MASK_BLOCK_ENTRY) | Attributes | Type;
373
374 ArmUpdateTranslationTableEntry (BlockEntry, (VOID *)RegionStart);
375
376 // Go to the next BlockEntry
377 RegionStart += BlockEntrySize;
378 RegionLength -= BlockEntrySize;
379 BlockEntry++;
380
381 // Break the inner loop when next block is a table
382 // Rerun GetBlockEntryListFromAddress to avoid page table memory leak
383 if (TableLevel != 3 && BlockEntry <= LastBlockEntry &&
384 (*BlockEntry & TT_TYPE_MASK) == TT_TYPE_TABLE_ENTRY) {
385 break;
386 }
387 } while ((RegionLength >= BlockEntrySize) && (BlockEntry <= LastBlockEntry));
388 } while (RegionLength != 0);
389
390 return EFI_SUCCESS;
391 }
392
393 STATIC
394 EFI_STATUS
395 FillTranslationTable (
396 IN UINT64 *RootTable,
397 IN ARM_MEMORY_REGION_DESCRIPTOR *MemoryRegion
398 )
399 {
400 return UpdateRegionMapping (
401 RootTable,
402 MemoryRegion->VirtualBase,
403 MemoryRegion->Length,
404 ArmMemoryAttributeToPageAttribute (MemoryRegion->Attributes) | TT_AF,
405 0
406 );
407 }
408
409 STATIC
410 UINT64
411 GcdAttributeToPageAttribute (
412 IN UINT64 GcdAttributes
413 )
414 {
415 UINT64 PageAttributes;
416
417 switch (GcdAttributes & EFI_MEMORY_CACHETYPE_MASK) {
418 case EFI_MEMORY_UC:
419 PageAttributes = TT_ATTR_INDX_DEVICE_MEMORY;
420 break;
421 case EFI_MEMORY_WC:
422 PageAttributes = TT_ATTR_INDX_MEMORY_NON_CACHEABLE;
423 break;
424 case EFI_MEMORY_WT:
425 PageAttributes = TT_ATTR_INDX_MEMORY_WRITE_THROUGH | TT_SH_INNER_SHAREABLE;
426 break;
427 case EFI_MEMORY_WB:
428 PageAttributes = TT_ATTR_INDX_MEMORY_WRITE_BACK | TT_SH_INNER_SHAREABLE;
429 break;
430 default:
431 PageAttributes = TT_ATTR_INDX_MASK;
432 break;
433 }
434
435 if ((GcdAttributes & EFI_MEMORY_XP) != 0 ||
436 (GcdAttributes & EFI_MEMORY_CACHETYPE_MASK) == EFI_MEMORY_UC) {
437 if (ArmReadCurrentEL () == AARCH64_EL2) {
438 PageAttributes |= TT_XN_MASK;
439 } else {
440 PageAttributes |= TT_UXN_MASK | TT_PXN_MASK;
441 }
442 }
443
444 if ((GcdAttributes & EFI_MEMORY_RO) != 0) {
445 PageAttributes |= TT_AP_RO_RO;
446 }
447
448 return PageAttributes | TT_AF;
449 }
450
451 EFI_STATUS
452 ArmSetMemoryAttributes (
453 IN EFI_PHYSICAL_ADDRESS BaseAddress,
454 IN UINT64 Length,
455 IN UINT64 Attributes
456 )
457 {
458 EFI_STATUS Status;
459 UINT64 *TranslationTable;
460 UINT64 PageAttributes;
461 UINT64 PageAttributeMask;
462
463 PageAttributes = GcdAttributeToPageAttribute (Attributes);
464 PageAttributeMask = 0;
465
466 if ((Attributes & EFI_MEMORY_CACHETYPE_MASK) == 0) {
467 //
468 // No memory type was set in Attributes, so we are going to update the
469 // permissions only.
470 //
471 PageAttributes &= TT_AP_MASK | TT_UXN_MASK | TT_PXN_MASK;
472 PageAttributeMask = ~(TT_ADDRESS_MASK_BLOCK_ENTRY | TT_AP_MASK |
473 TT_PXN_MASK | TT_XN_MASK);
474 }
475
476 TranslationTable = ArmGetTTBR0BaseAddress ();
477
478 Status = UpdateRegionMapping (
479 TranslationTable,
480 BaseAddress,
481 Length,
482 PageAttributes,
483 PageAttributeMask);
484 if (EFI_ERROR (Status)) {
485 return Status;
486 }
487
488 return EFI_SUCCESS;
489 }
490
491 STATIC
492 EFI_STATUS
493 SetMemoryRegionAttribute (
494 IN EFI_PHYSICAL_ADDRESS BaseAddress,
495 IN UINT64 Length,
496 IN UINT64 Attributes,
497 IN UINT64 BlockEntryMask
498 )
499 {
500 EFI_STATUS Status;
501 UINT64 *RootTable;
502
503 RootTable = ArmGetTTBR0BaseAddress ();
504
505 Status = UpdateRegionMapping (RootTable, BaseAddress, Length, Attributes, BlockEntryMask);
506 if (EFI_ERROR (Status)) {
507 return Status;
508 }
509
510 return EFI_SUCCESS;
511 }
512
513 EFI_STATUS
514 ArmSetMemoryRegionNoExec (
515 IN EFI_PHYSICAL_ADDRESS BaseAddress,
516 IN UINT64 Length
517 )
518 {
519 UINT64 Val;
520
521 if (ArmReadCurrentEL () == AARCH64_EL1) {
522 Val = TT_PXN_MASK | TT_UXN_MASK;
523 } else {
524 Val = TT_XN_MASK;
525 }
526
527 return SetMemoryRegionAttribute (
528 BaseAddress,
529 Length,
530 Val,
531 ~TT_ADDRESS_MASK_BLOCK_ENTRY);
532 }
533
534 EFI_STATUS
535 ArmClearMemoryRegionNoExec (
536 IN EFI_PHYSICAL_ADDRESS BaseAddress,
537 IN UINT64 Length
538 )
539 {
540 UINT64 Mask;
541
542 // XN maps to UXN in the EL1&0 translation regime
543 Mask = ~(TT_ADDRESS_MASK_BLOCK_ENTRY | TT_PXN_MASK | TT_XN_MASK);
544
545 return SetMemoryRegionAttribute (
546 BaseAddress,
547 Length,
548 0,
549 Mask);
550 }
551
552 EFI_STATUS
553 ArmSetMemoryRegionReadOnly (
554 IN EFI_PHYSICAL_ADDRESS BaseAddress,
555 IN UINT64 Length
556 )
557 {
558 return SetMemoryRegionAttribute (
559 BaseAddress,
560 Length,
561 TT_AP_RO_RO,
562 ~TT_ADDRESS_MASK_BLOCK_ENTRY);
563 }
564
565 EFI_STATUS
566 ArmClearMemoryRegionReadOnly (
567 IN EFI_PHYSICAL_ADDRESS BaseAddress,
568 IN UINT64 Length
569 )
570 {
571 return SetMemoryRegionAttribute (
572 BaseAddress,
573 Length,
574 TT_AP_RW_RW,
575 ~(TT_ADDRESS_MASK_BLOCK_ENTRY | TT_AP_MASK));
576 }
577
578 EFI_STATUS
579 EFIAPI
580 ArmConfigureMmu (
581 IN ARM_MEMORY_REGION_DESCRIPTOR *MemoryTable,
582 OUT VOID **TranslationTableBase OPTIONAL,
583 OUT UINTN *TranslationTableSize OPTIONAL
584 )
585 {
586 VOID* TranslationTable;
587 UINT32 TranslationTableAttribute;
588 UINT64 MaxAddress;
589 UINTN T0SZ;
590 UINTN RootTableEntryCount;
591 UINT64 TCR;
592 EFI_STATUS Status;
593
594 if(MemoryTable == NULL) {
595 ASSERT (MemoryTable != NULL);
596 return EFI_INVALID_PARAMETER;
597 }
598
599 //
600 // Limit the virtual address space to what we can actually use: UEFI
601 // mandates a 1:1 mapping, so no point in making the virtual address
602 // space larger than the physical address space. We also have to take
603 // into account the architectural limitations that result from UEFI's
604 // use of 4 KB pages.
605 //
606 MaxAddress = MIN (LShiftU64 (1ULL, ArmGetPhysicalAddressBits ()) - 1,
607 MAX_ALLOC_ADDRESS);
608
609 // Lookup the Table Level to get the information
610 LookupAddresstoRootTable (MaxAddress, &T0SZ, &RootTableEntryCount);
611
612 //
613 // Set TCR that allows us to retrieve T0SZ in the subsequent functions
614 //
615 // Ideally we will be running at EL2, but should support EL1 as well.
616 // UEFI should not run at EL3.
617 if (ArmReadCurrentEL () == AARCH64_EL2) {
618 //Note: Bits 23 and 31 are reserved(RES1) bits in TCR_EL2
619 TCR = T0SZ | (1UL << 31) | (1UL << 23) | TCR_TG0_4KB;
620
621 // Set the Physical Address Size using MaxAddress
622 if (MaxAddress < SIZE_4GB) {
623 TCR |= TCR_PS_4GB;
624 } else if (MaxAddress < SIZE_64GB) {
625 TCR |= TCR_PS_64GB;
626 } else if (MaxAddress < SIZE_1TB) {
627 TCR |= TCR_PS_1TB;
628 } else if (MaxAddress < SIZE_4TB) {
629 TCR |= TCR_PS_4TB;
630 } else if (MaxAddress < SIZE_16TB) {
631 TCR |= TCR_PS_16TB;
632 } else if (MaxAddress < SIZE_256TB) {
633 TCR |= TCR_PS_256TB;
634 } else {
635 DEBUG ((EFI_D_ERROR, "ArmConfigureMmu: The MaxAddress 0x%lX is not supported by this MMU configuration.\n", MaxAddress));
636 ASSERT (0); // Bigger than 48-bit memory space are not supported
637 return EFI_UNSUPPORTED;
638 }
639 } else if (ArmReadCurrentEL () == AARCH64_EL1) {
640 // Due to Cortex-A57 erratum #822227 we must set TG1[1] == 1, regardless of EPD1.
641 TCR = T0SZ | TCR_TG0_4KB | TCR_TG1_4KB | TCR_EPD1;
642
643 // Set the Physical Address Size using MaxAddress
644 if (MaxAddress < SIZE_4GB) {
645 TCR |= TCR_IPS_4GB;
646 } else if (MaxAddress < SIZE_64GB) {
647 TCR |= TCR_IPS_64GB;
648 } else if (MaxAddress < SIZE_1TB) {
649 TCR |= TCR_IPS_1TB;
650 } else if (MaxAddress < SIZE_4TB) {
651 TCR |= TCR_IPS_4TB;
652 } else if (MaxAddress < SIZE_16TB) {
653 TCR |= TCR_IPS_16TB;
654 } else if (MaxAddress < SIZE_256TB) {
655 TCR |= TCR_IPS_256TB;
656 } else {
657 DEBUG ((EFI_D_ERROR, "ArmConfigureMmu: The MaxAddress 0x%lX is not supported by this MMU configuration.\n", MaxAddress));
658 ASSERT (0); // Bigger than 48-bit memory space are not supported
659 return EFI_UNSUPPORTED;
660 }
661 } else {
662 ASSERT (0); // UEFI is only expected to run at EL2 and EL1, not EL3.
663 return EFI_UNSUPPORTED;
664 }
665
666 //
667 // Translation table walks are always cache coherent on ARMv8-A, so cache
668 // maintenance on page tables is never needed. Since there is a risk of
669 // loss of coherency when using mismatched attributes, and given that memory
670 // is mapped cacheable except for extraordinary cases (such as non-coherent
671 // DMA), have the page table walker perform cached accesses as well, and
672 // assert below that that matches the attributes we use for CPU accesses to
673 // the region.
674 //
675 TCR |= TCR_SH_INNER_SHAREABLE |
676 TCR_RGN_OUTER_WRITE_BACK_ALLOC |
677 TCR_RGN_INNER_WRITE_BACK_ALLOC;
678
679 // Set TCR
680 ArmSetTCR (TCR);
681
682 // Allocate pages for translation table
683 TranslationTable = AllocatePages (1);
684 if (TranslationTable == NULL) {
685 return EFI_OUT_OF_RESOURCES;
686 }
687 // We set TTBR0 just after allocating the table to retrieve its location from the subsequent
688 // functions without needing to pass this value across the functions. The MMU is only enabled
689 // after the translation tables are populated.
690 ArmSetTTBR0 (TranslationTable);
691
692 if (TranslationTableBase != NULL) {
693 *TranslationTableBase = TranslationTable;
694 }
695
696 if (TranslationTableSize != NULL) {
697 *TranslationTableSize = RootTableEntryCount * sizeof(UINT64);
698 }
699
700 ZeroMem (TranslationTable, RootTableEntryCount * sizeof(UINT64));
701
702 // Disable MMU and caches. ArmDisableMmu() also invalidates the TLBs
703 ArmDisableMmu ();
704 ArmDisableDataCache ();
705 ArmDisableInstructionCache ();
706
707 // Make sure nothing sneaked into the cache
708 ArmCleanInvalidateDataCache ();
709 ArmInvalidateInstructionCache ();
710
711 TranslationTableAttribute = TT_ATTR_INDX_INVALID;
712 while (MemoryTable->Length != 0) {
713
714 DEBUG_CODE_BEGIN ();
715 // Find the memory attribute for the Translation Table
716 if ((UINTN)TranslationTable >= MemoryTable->PhysicalBase &&
717 (UINTN)TranslationTable + EFI_PAGE_SIZE <= MemoryTable->PhysicalBase +
718 MemoryTable->Length) {
719 TranslationTableAttribute = MemoryTable->Attributes;
720 }
721 DEBUG_CODE_END ();
722
723 Status = FillTranslationTable (TranslationTable, MemoryTable);
724 if (EFI_ERROR (Status)) {
725 goto FREE_TRANSLATION_TABLE;
726 }
727 MemoryTable++;
728 }
729
730 ASSERT (TranslationTableAttribute == ARM_MEMORY_REGION_ATTRIBUTE_WRITE_BACK ||
731 TranslationTableAttribute == ARM_MEMORY_REGION_ATTRIBUTE_NONSECURE_WRITE_BACK);
732
733 ArmSetMAIR (MAIR_ATTR(TT_ATTR_INDX_DEVICE_MEMORY, MAIR_ATTR_DEVICE_MEMORY) | // mapped to EFI_MEMORY_UC
734 MAIR_ATTR(TT_ATTR_INDX_MEMORY_NON_CACHEABLE, MAIR_ATTR_NORMAL_MEMORY_NON_CACHEABLE) | // mapped to EFI_MEMORY_WC
735 MAIR_ATTR(TT_ATTR_INDX_MEMORY_WRITE_THROUGH, MAIR_ATTR_NORMAL_MEMORY_WRITE_THROUGH) | // mapped to EFI_MEMORY_WT
736 MAIR_ATTR(TT_ATTR_INDX_MEMORY_WRITE_BACK, MAIR_ATTR_NORMAL_MEMORY_WRITE_BACK)); // mapped to EFI_MEMORY_WB
737
738 ArmDisableAlignmentCheck ();
739 ArmEnableStackAlignmentCheck ();
740 ArmEnableInstructionCache ();
741 ArmEnableDataCache ();
742
743 ArmEnableMmu ();
744 return EFI_SUCCESS;
745
746 FREE_TRANSLATION_TABLE:
747 FreePages (TranslationTable, 1);
748 return Status;
749 }
750
751 RETURN_STATUS
752 EFIAPI
753 ArmMmuBaseLibConstructor (
754 VOID
755 )
756 {
757 extern UINT32 ArmReplaceLiveTranslationEntrySize;
758
759 //
760 // The ArmReplaceLiveTranslationEntry () helper function may be invoked
761 // with the MMU off so we have to ensure that it gets cleaned to the PoC
762 //
763 WriteBackDataCacheRange (ArmReplaceLiveTranslationEntry,
764 ArmReplaceLiveTranslationEntrySize);
765
766 return RETURN_SUCCESS;
767 }