]> git.proxmox.com Git - mirror_edk2.git/blob - ArmPkg/Library/ArmMmuLib/AArch64/ArmMmuLibCore.c
ArmPkg/ArmMmuLib AARCH64: get rid of needless TLB invalidation
[mirror_edk2.git] / ArmPkg / Library / ArmMmuLib / AArch64 / ArmMmuLibCore.c
1 /** @file
2 * File managing the MMU for ARMv8 architecture
3 *
4 * Copyright (c) 2011-2014, ARM Limited. All rights reserved.
5 * Copyright (c) 2016, Linaro Limited. All rights reserved.
6 * Copyright (c) 2017, Intel Corporation. All rights reserved.<BR>
7 *
8 * This program and the accompanying materials
9 * are licensed and made available under the terms and conditions of the BSD License
10 * which accompanies this distribution. The full text of the license may be found at
11 * http://opensource.org/licenses/bsd-license.php
12 *
13 * THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS,
14 * WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED.
15 *
16 **/
17
18 #include <Uefi.h>
19 #include <Chipset/AArch64.h>
20 #include <Library/BaseMemoryLib.h>
21 #include <Library/CacheMaintenanceLib.h>
22 #include <Library/MemoryAllocationLib.h>
23 #include <Library/ArmLib.h>
24 #include <Library/ArmMmuLib.h>
25 #include <Library/BaseLib.h>
26 #include <Library/DebugLib.h>
27
28 // We use this index definition to define an invalid block entry
29 #define TT_ATTR_INDX_INVALID ((UINT32)~0)
30
31 STATIC
32 UINT64
33 ArmMemoryAttributeToPageAttribute (
34 IN ARM_MEMORY_REGION_ATTRIBUTES Attributes
35 )
36 {
37 switch (Attributes) {
38 case ARM_MEMORY_REGION_ATTRIBUTE_WRITE_BACK_NONSHAREABLE:
39 case ARM_MEMORY_REGION_ATTRIBUTE_NONSECURE_WRITE_BACK_NONSHAREABLE:
40 return TT_ATTR_INDX_MEMORY_WRITE_BACK;
41
42 case ARM_MEMORY_REGION_ATTRIBUTE_WRITE_BACK:
43 case ARM_MEMORY_REGION_ATTRIBUTE_NONSECURE_WRITE_BACK:
44 return TT_ATTR_INDX_MEMORY_WRITE_BACK | TT_SH_INNER_SHAREABLE;
45
46 case ARM_MEMORY_REGION_ATTRIBUTE_WRITE_THROUGH:
47 case ARM_MEMORY_REGION_ATTRIBUTE_NONSECURE_WRITE_THROUGH:
48 return TT_ATTR_INDX_MEMORY_WRITE_THROUGH | TT_SH_INNER_SHAREABLE;
49
50 // Uncached and device mappings are treated as outer shareable by default,
51 case ARM_MEMORY_REGION_ATTRIBUTE_UNCACHED_UNBUFFERED:
52 case ARM_MEMORY_REGION_ATTRIBUTE_NONSECURE_UNCACHED_UNBUFFERED:
53 return TT_ATTR_INDX_MEMORY_NON_CACHEABLE;
54
55 default:
56 ASSERT(0);
57 case ARM_MEMORY_REGION_ATTRIBUTE_DEVICE:
58 case ARM_MEMORY_REGION_ATTRIBUTE_NONSECURE_DEVICE:
59 if (ArmReadCurrentEL () == AARCH64_EL2)
60 return TT_ATTR_INDX_DEVICE_MEMORY | TT_XN_MASK;
61 else
62 return TT_ATTR_INDX_DEVICE_MEMORY | TT_UXN_MASK | TT_PXN_MASK;
63 }
64 }
65
66 UINT64
67 PageAttributeToGcdAttribute (
68 IN UINT64 PageAttributes
69 )
70 {
71 UINT64 GcdAttributes;
72
73 switch (PageAttributes & TT_ATTR_INDX_MASK) {
74 case TT_ATTR_INDX_DEVICE_MEMORY:
75 GcdAttributes = EFI_MEMORY_UC;
76 break;
77 case TT_ATTR_INDX_MEMORY_NON_CACHEABLE:
78 GcdAttributes = EFI_MEMORY_WC;
79 break;
80 case TT_ATTR_INDX_MEMORY_WRITE_THROUGH:
81 GcdAttributes = EFI_MEMORY_WT;
82 break;
83 case TT_ATTR_INDX_MEMORY_WRITE_BACK:
84 GcdAttributes = EFI_MEMORY_WB;
85 break;
86 default:
87 DEBUG ((EFI_D_ERROR, "PageAttributeToGcdAttribute: PageAttributes:0x%lX not supported.\n", PageAttributes));
88 ASSERT (0);
89 // The Global Coherency Domain (GCD) value is defined as a bit set.
90 // Returning 0 means no attribute has been set.
91 GcdAttributes = 0;
92 }
93
94 // Determine protection attributes
95 if (((PageAttributes & TT_AP_MASK) == TT_AP_NO_RO) || ((PageAttributes & TT_AP_MASK) == TT_AP_RO_RO)) {
96 // Read only cases map to write-protect
97 GcdAttributes |= EFI_MEMORY_RO;
98 }
99
100 // Process eXecute Never attribute
101 if ((PageAttributes & (TT_PXN_MASK | TT_UXN_MASK)) != 0 ) {
102 GcdAttributes |= EFI_MEMORY_XP;
103 }
104
105 return GcdAttributes;
106 }
107
108 #define MIN_T0SZ 16
109 #define BITS_PER_LEVEL 9
110
111 VOID
112 GetRootTranslationTableInfo (
113 IN UINTN T0SZ,
114 OUT UINTN *TableLevel,
115 OUT UINTN *TableEntryCount
116 )
117 {
118 // Get the level of the root table
119 if (TableLevel) {
120 *TableLevel = (T0SZ - MIN_T0SZ) / BITS_PER_LEVEL;
121 }
122
123 if (TableEntryCount) {
124 *TableEntryCount = 1UL << (BITS_PER_LEVEL - (T0SZ - MIN_T0SZ) % BITS_PER_LEVEL);
125 }
126 }
127
128 STATIC
129 VOID
130 ReplaceLiveEntry (
131 IN UINT64 *Entry,
132 IN UINT64 Value,
133 IN UINT64 RegionStart
134 )
135 {
136 if (!ArmMmuEnabled ()) {
137 *Entry = Value;
138 } else {
139 ArmReplaceLiveTranslationEntry (Entry, Value, RegionStart);
140 }
141 }
142
143 STATIC
144 VOID
145 LookupAddresstoRootTable (
146 IN UINT64 MaxAddress,
147 OUT UINTN *T0SZ,
148 OUT UINTN *TableEntryCount
149 )
150 {
151 UINTN TopBit;
152
153 // Check the parameters are not NULL
154 ASSERT ((T0SZ != NULL) && (TableEntryCount != NULL));
155
156 // Look for the highest bit set in MaxAddress
157 for (TopBit = 63; TopBit != 0; TopBit--) {
158 if ((1ULL << TopBit) & MaxAddress) {
159 // MaxAddress top bit is found
160 TopBit = TopBit + 1;
161 break;
162 }
163 }
164 ASSERT (TopBit != 0);
165
166 // Calculate T0SZ from the top bit of the MaxAddress
167 *T0SZ = 64 - TopBit;
168
169 // Get the Table info from T0SZ
170 GetRootTranslationTableInfo (*T0SZ, NULL, TableEntryCount);
171 }
172
173 STATIC
174 UINT64*
175 GetBlockEntryListFromAddress (
176 IN UINT64 *RootTable,
177 IN UINT64 RegionStart,
178 OUT UINTN *TableLevel,
179 IN OUT UINT64 *BlockEntrySize,
180 OUT UINT64 **LastBlockEntry
181 )
182 {
183 UINTN RootTableLevel;
184 UINTN RootTableEntryCount;
185 UINT64 *TranslationTable;
186 UINT64 *BlockEntry;
187 UINT64 *SubTableBlockEntry;
188 UINT64 BlockEntryAddress;
189 UINTN BaseAddressAlignment;
190 UINTN PageLevel;
191 UINTN Index;
192 UINTN IndexLevel;
193 UINTN T0SZ;
194 UINT64 Attributes;
195 UINT64 TableAttributes;
196
197 // Initialize variable
198 BlockEntry = NULL;
199
200 // Ensure the parameters are valid
201 if (!(TableLevel && BlockEntrySize && LastBlockEntry)) {
202 ASSERT_EFI_ERROR (EFI_INVALID_PARAMETER);
203 return NULL;
204 }
205
206 // Ensure the Region is aligned on 4KB boundary
207 if ((RegionStart & (SIZE_4KB - 1)) != 0) {
208 ASSERT_EFI_ERROR (EFI_INVALID_PARAMETER);
209 return NULL;
210 }
211
212 // Ensure the required size is aligned on 4KB boundary and not 0
213 if ((*BlockEntrySize & (SIZE_4KB - 1)) != 0 || *BlockEntrySize == 0) {
214 ASSERT_EFI_ERROR (EFI_INVALID_PARAMETER);
215 return NULL;
216 }
217
218 T0SZ = ArmGetTCR () & TCR_T0SZ_MASK;
219 // Get the Table info from T0SZ
220 GetRootTranslationTableInfo (T0SZ, &RootTableLevel, &RootTableEntryCount);
221
222 // If the start address is 0x0 then we use the size of the region to identify the alignment
223 if (RegionStart == 0) {
224 // Identify the highest possible alignment for the Region Size
225 BaseAddressAlignment = LowBitSet64 (*BlockEntrySize);
226 } else {
227 // Identify the highest possible alignment for the Base Address
228 BaseAddressAlignment = LowBitSet64 (RegionStart);
229 }
230
231 // Identify the Page Level the RegionStart must belong to. Note that PageLevel
232 // should be at least 1 since block translations are not supported at level 0
233 PageLevel = MAX (3 - ((BaseAddressAlignment - 12) / 9), 1);
234
235 // If the required size is smaller than the current block size then we need to go to the page below.
236 // The PageLevel was calculated on the Base Address alignment but did not take in account the alignment
237 // of the allocation size
238 while (*BlockEntrySize < TT_BLOCK_ENTRY_SIZE_AT_LEVEL (PageLevel)) {
239 // It does not fit so we need to go a page level above
240 PageLevel++;
241 }
242
243 //
244 // Get the Table Descriptor for the corresponding PageLevel. We need to decompose RegionStart to get appropriate entries
245 //
246
247 TranslationTable = RootTable;
248 for (IndexLevel = RootTableLevel; IndexLevel <= PageLevel; IndexLevel++) {
249 BlockEntry = (UINT64*)TT_GET_ENTRY_FOR_ADDRESS (TranslationTable, IndexLevel, RegionStart);
250
251 if ((IndexLevel != 3) && ((*BlockEntry & TT_TYPE_MASK) == TT_TYPE_TABLE_ENTRY)) {
252 // Go to the next table
253 TranslationTable = (UINT64*)(*BlockEntry & TT_ADDRESS_MASK_DESCRIPTION_TABLE);
254
255 // If we are at the last level then update the last level to next level
256 if (IndexLevel == PageLevel) {
257 // Enter the next level
258 PageLevel++;
259 }
260 } else if ((*BlockEntry & TT_TYPE_MASK) == TT_TYPE_BLOCK_ENTRY) {
261 // If we are not at the last level then we need to split this BlockEntry
262 if (IndexLevel != PageLevel) {
263 // Retrieve the attributes from the block entry
264 Attributes = *BlockEntry & TT_ATTRIBUTES_MASK;
265
266 // Convert the block entry attributes into Table descriptor attributes
267 TableAttributes = TT_TABLE_AP_NO_PERMISSION;
268 if (Attributes & TT_NS) {
269 TableAttributes = TT_TABLE_NS;
270 }
271
272 // Get the address corresponding at this entry
273 BlockEntryAddress = RegionStart;
274 BlockEntryAddress = BlockEntryAddress >> TT_ADDRESS_OFFSET_AT_LEVEL(IndexLevel);
275 // Shift back to right to set zero before the effective address
276 BlockEntryAddress = BlockEntryAddress << TT_ADDRESS_OFFSET_AT_LEVEL(IndexLevel);
277
278 // Set the correct entry type for the next page level
279 if ((IndexLevel + 1) == 3) {
280 Attributes |= TT_TYPE_BLOCK_ENTRY_LEVEL3;
281 } else {
282 Attributes |= TT_TYPE_BLOCK_ENTRY;
283 }
284
285 // Create a new translation table
286 TranslationTable = AllocatePages (1);
287 if (TranslationTable == NULL) {
288 return NULL;
289 }
290
291 // Populate the newly created lower level table
292 SubTableBlockEntry = TranslationTable;
293 for (Index = 0; Index < TT_ENTRY_COUNT; Index++) {
294 *SubTableBlockEntry = Attributes | (BlockEntryAddress + (Index << TT_ADDRESS_OFFSET_AT_LEVEL(IndexLevel + 1)));
295 SubTableBlockEntry++;
296 }
297
298 // Fill the BlockEntry with the new TranslationTable
299 ReplaceLiveEntry (BlockEntry,
300 (UINTN)TranslationTable | TableAttributes | TT_TYPE_TABLE_ENTRY,
301 RegionStart);
302 }
303 } else {
304 if (IndexLevel != PageLevel) {
305 //
306 // Case when we have an Invalid Entry and we are at a page level above of the one targetted.
307 //
308
309 // Create a new translation table
310 TranslationTable = AllocatePages (1);
311 if (TranslationTable == NULL) {
312 return NULL;
313 }
314
315 ZeroMem (TranslationTable, TT_ENTRY_COUNT * sizeof(UINT64));
316
317 // Fill the new BlockEntry with the TranslationTable
318 *BlockEntry = ((UINTN)TranslationTable & TT_ADDRESS_MASK_DESCRIPTION_TABLE) | TT_TYPE_TABLE_ENTRY;
319 }
320 }
321 }
322
323 // Expose the found PageLevel to the caller
324 *TableLevel = PageLevel;
325
326 // Now, we have the Table Level we can get the Block Size associated to this table
327 *BlockEntrySize = TT_BLOCK_ENTRY_SIZE_AT_LEVEL (PageLevel);
328
329 // The last block of the root table depends on the number of entry in this table,
330 // otherwise it is always the (TT_ENTRY_COUNT - 1)th entry in the table.
331 *LastBlockEntry = TT_LAST_BLOCK_ADDRESS(TranslationTable,
332 (PageLevel == RootTableLevel) ? RootTableEntryCount : TT_ENTRY_COUNT);
333
334 return BlockEntry;
335 }
336
337 STATIC
338 EFI_STATUS
339 UpdateRegionMapping (
340 IN UINT64 *RootTable,
341 IN UINT64 RegionStart,
342 IN UINT64 RegionLength,
343 IN UINT64 Attributes,
344 IN UINT64 BlockEntryMask
345 )
346 {
347 UINT32 Type;
348 UINT64 *BlockEntry;
349 UINT64 *LastBlockEntry;
350 UINT64 BlockEntrySize;
351 UINTN TableLevel;
352
353 // Ensure the Length is aligned on 4KB boundary
354 if ((RegionLength == 0) || ((RegionLength & (SIZE_4KB - 1)) != 0)) {
355 ASSERT_EFI_ERROR (EFI_INVALID_PARAMETER);
356 return EFI_INVALID_PARAMETER;
357 }
358
359 do {
360 // Get the first Block Entry that matches the Virtual Address and also the information on the Table Descriptor
361 // such as the the size of the Block Entry and the address of the last BlockEntry of the Table Descriptor
362 BlockEntrySize = RegionLength;
363 BlockEntry = GetBlockEntryListFromAddress (RootTable, RegionStart, &TableLevel, &BlockEntrySize, &LastBlockEntry);
364 if (BlockEntry == NULL) {
365 // GetBlockEntryListFromAddress() return NULL when it fails to allocate new pages from the Translation Tables
366 return EFI_OUT_OF_RESOURCES;
367 }
368
369 if (TableLevel != 3) {
370 Type = TT_TYPE_BLOCK_ENTRY;
371 } else {
372 Type = TT_TYPE_BLOCK_ENTRY_LEVEL3;
373 }
374
375 do {
376 // Fill the Block Entry with attribute and output block address
377 *BlockEntry &= BlockEntryMask;
378 *BlockEntry |= (RegionStart & TT_ADDRESS_MASK_BLOCK_ENTRY) | Attributes | Type;
379
380 ArmUpdateTranslationTableEntry (BlockEntry, (VOID *)RegionStart);
381
382 // Go to the next BlockEntry
383 RegionStart += BlockEntrySize;
384 RegionLength -= BlockEntrySize;
385 BlockEntry++;
386
387 // Break the inner loop when next block is a table
388 // Rerun GetBlockEntryListFromAddress to avoid page table memory leak
389 if (TableLevel != 3 && BlockEntry <= LastBlockEntry &&
390 (*BlockEntry & TT_TYPE_MASK) == TT_TYPE_TABLE_ENTRY) {
391 break;
392 }
393 } while ((RegionLength >= BlockEntrySize) && (BlockEntry <= LastBlockEntry));
394 } while (RegionLength != 0);
395
396 return EFI_SUCCESS;
397 }
398
399 STATIC
400 EFI_STATUS
401 FillTranslationTable (
402 IN UINT64 *RootTable,
403 IN ARM_MEMORY_REGION_DESCRIPTOR *MemoryRegion
404 )
405 {
406 return UpdateRegionMapping (
407 RootTable,
408 MemoryRegion->VirtualBase,
409 MemoryRegion->Length,
410 ArmMemoryAttributeToPageAttribute (MemoryRegion->Attributes) | TT_AF,
411 0
412 );
413 }
414
415 STATIC
416 UINT64
417 GcdAttributeToPageAttribute (
418 IN UINT64 GcdAttributes
419 )
420 {
421 UINT64 PageAttributes;
422
423 switch (GcdAttributes & EFI_MEMORY_CACHETYPE_MASK) {
424 case EFI_MEMORY_UC:
425 PageAttributes = TT_ATTR_INDX_DEVICE_MEMORY;
426 break;
427 case EFI_MEMORY_WC:
428 PageAttributes = TT_ATTR_INDX_MEMORY_NON_CACHEABLE;
429 break;
430 case EFI_MEMORY_WT:
431 PageAttributes = TT_ATTR_INDX_MEMORY_WRITE_THROUGH | TT_SH_INNER_SHAREABLE;
432 break;
433 case EFI_MEMORY_WB:
434 PageAttributes = TT_ATTR_INDX_MEMORY_WRITE_BACK | TT_SH_INNER_SHAREABLE;
435 break;
436 default:
437 PageAttributes = TT_ATTR_INDX_MASK;
438 break;
439 }
440
441 if ((GcdAttributes & EFI_MEMORY_XP) != 0 ||
442 (GcdAttributes & EFI_MEMORY_CACHETYPE_MASK) == EFI_MEMORY_UC) {
443 if (ArmReadCurrentEL () == AARCH64_EL2) {
444 PageAttributes |= TT_XN_MASK;
445 } else {
446 PageAttributes |= TT_UXN_MASK | TT_PXN_MASK;
447 }
448 }
449
450 if ((GcdAttributes & EFI_MEMORY_RO) != 0) {
451 PageAttributes |= TT_AP_RO_RO;
452 }
453
454 return PageAttributes | TT_AF;
455 }
456
457 EFI_STATUS
458 ArmSetMemoryAttributes (
459 IN EFI_PHYSICAL_ADDRESS BaseAddress,
460 IN UINT64 Length,
461 IN UINT64 Attributes
462 )
463 {
464 EFI_STATUS Status;
465 UINT64 *TranslationTable;
466 UINT64 PageAttributes;
467 UINT64 PageAttributeMask;
468
469 PageAttributes = GcdAttributeToPageAttribute (Attributes);
470 PageAttributeMask = 0;
471
472 if ((Attributes & EFI_MEMORY_CACHETYPE_MASK) == 0) {
473 //
474 // No memory type was set in Attributes, so we are going to update the
475 // permissions only.
476 //
477 PageAttributes &= TT_AP_MASK | TT_UXN_MASK | TT_PXN_MASK;
478 PageAttributeMask = ~(TT_ADDRESS_MASK_BLOCK_ENTRY | TT_AP_MASK |
479 TT_PXN_MASK | TT_XN_MASK);
480 }
481
482 TranslationTable = ArmGetTTBR0BaseAddress ();
483
484 Status = UpdateRegionMapping (
485 TranslationTable,
486 BaseAddress,
487 Length,
488 PageAttributes,
489 PageAttributeMask);
490 if (EFI_ERROR (Status)) {
491 return Status;
492 }
493
494 return EFI_SUCCESS;
495 }
496
497 STATIC
498 EFI_STATUS
499 SetMemoryRegionAttribute (
500 IN EFI_PHYSICAL_ADDRESS BaseAddress,
501 IN UINT64 Length,
502 IN UINT64 Attributes,
503 IN UINT64 BlockEntryMask
504 )
505 {
506 EFI_STATUS Status;
507 UINT64 *RootTable;
508
509 RootTable = ArmGetTTBR0BaseAddress ();
510
511 Status = UpdateRegionMapping (RootTable, BaseAddress, Length, Attributes, BlockEntryMask);
512 if (EFI_ERROR (Status)) {
513 return Status;
514 }
515
516 return EFI_SUCCESS;
517 }
518
519 EFI_STATUS
520 ArmSetMemoryRegionNoExec (
521 IN EFI_PHYSICAL_ADDRESS BaseAddress,
522 IN UINT64 Length
523 )
524 {
525 UINT64 Val;
526
527 if (ArmReadCurrentEL () == AARCH64_EL1) {
528 Val = TT_PXN_MASK | TT_UXN_MASK;
529 } else {
530 Val = TT_XN_MASK;
531 }
532
533 return SetMemoryRegionAttribute (
534 BaseAddress,
535 Length,
536 Val,
537 ~TT_ADDRESS_MASK_BLOCK_ENTRY);
538 }
539
540 EFI_STATUS
541 ArmClearMemoryRegionNoExec (
542 IN EFI_PHYSICAL_ADDRESS BaseAddress,
543 IN UINT64 Length
544 )
545 {
546 UINT64 Mask;
547
548 // XN maps to UXN in the EL1&0 translation regime
549 Mask = ~(TT_ADDRESS_MASK_BLOCK_ENTRY | TT_PXN_MASK | TT_XN_MASK);
550
551 return SetMemoryRegionAttribute (
552 BaseAddress,
553 Length,
554 0,
555 Mask);
556 }
557
558 EFI_STATUS
559 ArmSetMemoryRegionReadOnly (
560 IN EFI_PHYSICAL_ADDRESS BaseAddress,
561 IN UINT64 Length
562 )
563 {
564 return SetMemoryRegionAttribute (
565 BaseAddress,
566 Length,
567 TT_AP_RO_RO,
568 ~TT_ADDRESS_MASK_BLOCK_ENTRY);
569 }
570
571 EFI_STATUS
572 ArmClearMemoryRegionReadOnly (
573 IN EFI_PHYSICAL_ADDRESS BaseAddress,
574 IN UINT64 Length
575 )
576 {
577 return SetMemoryRegionAttribute (
578 BaseAddress,
579 Length,
580 TT_AP_RW_RW,
581 ~(TT_ADDRESS_MASK_BLOCK_ENTRY | TT_AP_MASK));
582 }
583
584 EFI_STATUS
585 EFIAPI
586 ArmConfigureMmu (
587 IN ARM_MEMORY_REGION_DESCRIPTOR *MemoryTable,
588 OUT VOID **TranslationTableBase OPTIONAL,
589 OUT UINTN *TranslationTableSize OPTIONAL
590 )
591 {
592 VOID* TranslationTable;
593 UINT32 TranslationTableAttribute;
594 UINT64 MaxAddress;
595 UINTN T0SZ;
596 UINTN RootTableEntryCount;
597 UINT64 TCR;
598 EFI_STATUS Status;
599
600 if(MemoryTable == NULL) {
601 ASSERT (MemoryTable != NULL);
602 return EFI_INVALID_PARAMETER;
603 }
604
605 //
606 // Limit the virtual address space to what we can actually use: UEFI
607 // mandates a 1:1 mapping, so no point in making the virtual address
608 // space larger than the physical address space. We also have to take
609 // into account the architectural limitations that result from UEFI's
610 // use of 4 KB pages.
611 //
612 MaxAddress = MIN (LShiftU64 (1ULL, ArmGetPhysicalAddressBits ()) - 1,
613 MAX_ALLOC_ADDRESS);
614
615 // Lookup the Table Level to get the information
616 LookupAddresstoRootTable (MaxAddress, &T0SZ, &RootTableEntryCount);
617
618 //
619 // Set TCR that allows us to retrieve T0SZ in the subsequent functions
620 //
621 // Ideally we will be running at EL2, but should support EL1 as well.
622 // UEFI should not run at EL3.
623 if (ArmReadCurrentEL () == AARCH64_EL2) {
624 //Note: Bits 23 and 31 are reserved(RES1) bits in TCR_EL2
625 TCR = T0SZ | (1UL << 31) | (1UL << 23) | TCR_TG0_4KB;
626
627 // Set the Physical Address Size using MaxAddress
628 if (MaxAddress < SIZE_4GB) {
629 TCR |= TCR_PS_4GB;
630 } else if (MaxAddress < SIZE_64GB) {
631 TCR |= TCR_PS_64GB;
632 } else if (MaxAddress < SIZE_1TB) {
633 TCR |= TCR_PS_1TB;
634 } else if (MaxAddress < SIZE_4TB) {
635 TCR |= TCR_PS_4TB;
636 } else if (MaxAddress < SIZE_16TB) {
637 TCR |= TCR_PS_16TB;
638 } else if (MaxAddress < SIZE_256TB) {
639 TCR |= TCR_PS_256TB;
640 } else {
641 DEBUG ((EFI_D_ERROR, "ArmConfigureMmu: The MaxAddress 0x%lX is not supported by this MMU configuration.\n", MaxAddress));
642 ASSERT (0); // Bigger than 48-bit memory space are not supported
643 return EFI_UNSUPPORTED;
644 }
645 } else if (ArmReadCurrentEL () == AARCH64_EL1) {
646 // Due to Cortex-A57 erratum #822227 we must set TG1[1] == 1, regardless of EPD1.
647 TCR = T0SZ | TCR_TG0_4KB | TCR_TG1_4KB | TCR_EPD1;
648
649 // Set the Physical Address Size using MaxAddress
650 if (MaxAddress < SIZE_4GB) {
651 TCR |= TCR_IPS_4GB;
652 } else if (MaxAddress < SIZE_64GB) {
653 TCR |= TCR_IPS_64GB;
654 } else if (MaxAddress < SIZE_1TB) {
655 TCR |= TCR_IPS_1TB;
656 } else if (MaxAddress < SIZE_4TB) {
657 TCR |= TCR_IPS_4TB;
658 } else if (MaxAddress < SIZE_16TB) {
659 TCR |= TCR_IPS_16TB;
660 } else if (MaxAddress < SIZE_256TB) {
661 TCR |= TCR_IPS_256TB;
662 } else {
663 DEBUG ((EFI_D_ERROR, "ArmConfigureMmu: The MaxAddress 0x%lX is not supported by this MMU configuration.\n", MaxAddress));
664 ASSERT (0); // Bigger than 48-bit memory space are not supported
665 return EFI_UNSUPPORTED;
666 }
667 } else {
668 ASSERT (0); // UEFI is only expected to run at EL2 and EL1, not EL3.
669 return EFI_UNSUPPORTED;
670 }
671
672 //
673 // Translation table walks are always cache coherent on ARMv8-A, so cache
674 // maintenance on page tables is never needed. Since there is a risk of
675 // loss of coherency when using mismatched attributes, and given that memory
676 // is mapped cacheable except for extraordinary cases (such as non-coherent
677 // DMA), have the page table walker perform cached accesses as well, and
678 // assert below that that matches the attributes we use for CPU accesses to
679 // the region.
680 //
681 TCR |= TCR_SH_INNER_SHAREABLE |
682 TCR_RGN_OUTER_WRITE_BACK_ALLOC |
683 TCR_RGN_INNER_WRITE_BACK_ALLOC;
684
685 // Set TCR
686 ArmSetTCR (TCR);
687
688 // Allocate pages for translation table
689 TranslationTable = AllocatePages (1);
690 if (TranslationTable == NULL) {
691 return EFI_OUT_OF_RESOURCES;
692 }
693 // We set TTBR0 just after allocating the table to retrieve its location from the subsequent
694 // functions without needing to pass this value across the functions. The MMU is only enabled
695 // after the translation tables are populated.
696 ArmSetTTBR0 (TranslationTable);
697
698 if (TranslationTableBase != NULL) {
699 *TranslationTableBase = TranslationTable;
700 }
701
702 if (TranslationTableSize != NULL) {
703 *TranslationTableSize = RootTableEntryCount * sizeof(UINT64);
704 }
705
706 ZeroMem (TranslationTable, RootTableEntryCount * sizeof(UINT64));
707
708 // Disable MMU and caches. ArmDisableMmu() also invalidates the TLBs
709 ArmDisableMmu ();
710 ArmDisableDataCache ();
711 ArmDisableInstructionCache ();
712
713 // Make sure nothing sneaked into the cache
714 ArmCleanInvalidateDataCache ();
715 ArmInvalidateInstructionCache ();
716
717 TranslationTableAttribute = TT_ATTR_INDX_INVALID;
718 while (MemoryTable->Length != 0) {
719
720 DEBUG_CODE_BEGIN ();
721 // Find the memory attribute for the Translation Table
722 if ((UINTN)TranslationTable >= MemoryTable->PhysicalBase &&
723 (UINTN)TranslationTable + EFI_PAGE_SIZE <= MemoryTable->PhysicalBase +
724 MemoryTable->Length) {
725 TranslationTableAttribute = MemoryTable->Attributes;
726 }
727 DEBUG_CODE_END ();
728
729 Status = FillTranslationTable (TranslationTable, MemoryTable);
730 if (EFI_ERROR (Status)) {
731 goto FREE_TRANSLATION_TABLE;
732 }
733 MemoryTable++;
734 }
735
736 ASSERT (TranslationTableAttribute == ARM_MEMORY_REGION_ATTRIBUTE_WRITE_BACK ||
737 TranslationTableAttribute == ARM_MEMORY_REGION_ATTRIBUTE_NONSECURE_WRITE_BACK);
738
739 ArmSetMAIR (MAIR_ATTR(TT_ATTR_INDX_DEVICE_MEMORY, MAIR_ATTR_DEVICE_MEMORY) | // mapped to EFI_MEMORY_UC
740 MAIR_ATTR(TT_ATTR_INDX_MEMORY_NON_CACHEABLE, MAIR_ATTR_NORMAL_MEMORY_NON_CACHEABLE) | // mapped to EFI_MEMORY_WC
741 MAIR_ATTR(TT_ATTR_INDX_MEMORY_WRITE_THROUGH, MAIR_ATTR_NORMAL_MEMORY_WRITE_THROUGH) | // mapped to EFI_MEMORY_WT
742 MAIR_ATTR(TT_ATTR_INDX_MEMORY_WRITE_BACK, MAIR_ATTR_NORMAL_MEMORY_WRITE_BACK)); // mapped to EFI_MEMORY_WB
743
744 ArmDisableAlignmentCheck ();
745 ArmEnableStackAlignmentCheck ();
746 ArmEnableInstructionCache ();
747 ArmEnableDataCache ();
748
749 ArmEnableMmu ();
750 return EFI_SUCCESS;
751
752 FREE_TRANSLATION_TABLE:
753 FreePages (TranslationTable, 1);
754 return Status;
755 }
756
757 RETURN_STATUS
758 EFIAPI
759 ArmMmuBaseLibConstructor (
760 VOID
761 )
762 {
763 extern UINT32 ArmReplaceLiveTranslationEntrySize;
764
765 //
766 // The ArmReplaceLiveTranslationEntry () helper function may be invoked
767 // with the MMU off so we have to ensure that it gets cleaned to the PoC
768 //
769 WriteBackDataCacheRange (ArmReplaceLiveTranslationEntry,
770 ArmReplaceLiveTranslationEntrySize);
771
772 return RETURN_SUCCESS;
773 }