]> git.proxmox.com Git - mirror_edk2.git/blame_incremental - ArmPkg/Library/ArmMmuLib/AArch64/ArmMmuLibCore.c
ArmPkg/ArmMmuLib AARCH64: avoid EL0 accessible mappings
[mirror_edk2.git] / ArmPkg / Library / ArmMmuLib / AArch64 / ArmMmuLibCore.c
... / ...
CommitLineData
1/** @file\r
2* File managing the MMU for ARMv8 architecture\r
3*\r
4* Copyright (c) 2011-2020, ARM Limited. All rights reserved.\r
5* Copyright (c) 2016, Linaro Limited. All rights reserved.\r
6* Copyright (c) 2017, Intel Corporation. All rights reserved.<BR>\r
7*\r
8* SPDX-License-Identifier: BSD-2-Clause-Patent\r
9*\r
10**/\r
11\r
12#include <Uefi.h>\r
13#include <Chipset/AArch64.h>\r
14#include <Library/BaseMemoryLib.h>\r
15#include <Library/CacheMaintenanceLib.h>\r
16#include <Library/MemoryAllocationLib.h>\r
17#include <Library/ArmLib.h>\r
18#include <Library/ArmMmuLib.h>\r
19#include <Library/BaseLib.h>\r
20#include <Library/DebugLib.h>\r
21\r
22STATIC\r
23UINT64\r
24ArmMemoryAttributeToPageAttribute (\r
25 IN ARM_MEMORY_REGION_ATTRIBUTES Attributes\r
26 )\r
27{\r
28 switch (Attributes) {\r
29 case ARM_MEMORY_REGION_ATTRIBUTE_WRITE_BACK_NONSHAREABLE:\r
30 case ARM_MEMORY_REGION_ATTRIBUTE_NONSECURE_WRITE_BACK_NONSHAREABLE:\r
31 return TT_ATTR_INDX_MEMORY_WRITE_BACK;\r
32\r
33 case ARM_MEMORY_REGION_ATTRIBUTE_WRITE_BACK:\r
34 case ARM_MEMORY_REGION_ATTRIBUTE_NONSECURE_WRITE_BACK:\r
35 return TT_ATTR_INDX_MEMORY_WRITE_BACK | TT_SH_INNER_SHAREABLE;\r
36\r
37 case ARM_MEMORY_REGION_ATTRIBUTE_WRITE_THROUGH:\r
38 case ARM_MEMORY_REGION_ATTRIBUTE_NONSECURE_WRITE_THROUGH:\r
39 return TT_ATTR_INDX_MEMORY_WRITE_THROUGH | TT_SH_INNER_SHAREABLE;\r
40\r
41 // Uncached and device mappings are treated as outer shareable by default,\r
42 case ARM_MEMORY_REGION_ATTRIBUTE_UNCACHED_UNBUFFERED:\r
43 case ARM_MEMORY_REGION_ATTRIBUTE_NONSECURE_UNCACHED_UNBUFFERED:\r
44 return TT_ATTR_INDX_MEMORY_NON_CACHEABLE;\r
45\r
46 default:\r
47 ASSERT (0);\r
48 case ARM_MEMORY_REGION_ATTRIBUTE_DEVICE:\r
49 case ARM_MEMORY_REGION_ATTRIBUTE_NONSECURE_DEVICE:\r
50 if (ArmReadCurrentEL () == AARCH64_EL2) {\r
51 return TT_ATTR_INDX_DEVICE_MEMORY | TT_XN_MASK;\r
52 } else {\r
53 return TT_ATTR_INDX_DEVICE_MEMORY | TT_UXN_MASK | TT_PXN_MASK;\r
54 }\r
55 }\r
56}\r
57\r
58#define MIN_T0SZ 16\r
59#define BITS_PER_LEVEL 9\r
60#define MAX_VA_BITS 48\r
61\r
62STATIC\r
63UINTN\r
64GetRootTableEntryCount (\r
65 IN UINTN T0SZ\r
66 )\r
67{\r
68 return TT_ENTRY_COUNT >> (T0SZ - MIN_T0SZ) % BITS_PER_LEVEL;\r
69}\r
70\r
71STATIC\r
72UINTN\r
73GetRootTableLevel (\r
74 IN UINTN T0SZ\r
75 )\r
76{\r
77 return (T0SZ - MIN_T0SZ) / BITS_PER_LEVEL;\r
78}\r
79\r
80STATIC\r
81VOID\r
82ReplaceTableEntry (\r
83 IN UINT64 *Entry,\r
84 IN UINT64 Value,\r
85 IN UINT64 RegionStart,\r
86 IN BOOLEAN IsLiveBlockMapping\r
87 )\r
88{\r
89 if (!ArmMmuEnabled () || !IsLiveBlockMapping) {\r
90 *Entry = Value;\r
91 ArmUpdateTranslationTableEntry (Entry, (VOID *)(UINTN)RegionStart);\r
92 } else {\r
93 ArmReplaceLiveTranslationEntry (Entry, Value, RegionStart);\r
94 }\r
95}\r
96\r
97STATIC\r
98VOID\r
99FreePageTablesRecursive (\r
100 IN UINT64 *TranslationTable,\r
101 IN UINTN Level\r
102 )\r
103{\r
104 UINTN Index;\r
105\r
106 ASSERT (Level <= 3);\r
107\r
108 if (Level < 3) {\r
109 for (Index = 0; Index < TT_ENTRY_COUNT; Index++) {\r
110 if ((TranslationTable[Index] & TT_TYPE_MASK) == TT_TYPE_TABLE_ENTRY) {\r
111 FreePageTablesRecursive (\r
112 (VOID *)(UINTN)(TranslationTable[Index] &\r
113 TT_ADDRESS_MASK_BLOCK_ENTRY),\r
114 Level + 1\r
115 );\r
116 }\r
117 }\r
118 }\r
119\r
120 FreePages (TranslationTable, 1);\r
121}\r
122\r
123STATIC\r
124BOOLEAN\r
125IsBlockEntry (\r
126 IN UINT64 Entry,\r
127 IN UINTN Level\r
128 )\r
129{\r
130 if (Level == 3) {\r
131 return (Entry & TT_TYPE_MASK) == TT_TYPE_BLOCK_ENTRY_LEVEL3;\r
132 }\r
133\r
134 return (Entry & TT_TYPE_MASK) == TT_TYPE_BLOCK_ENTRY;\r
135}\r
136\r
137STATIC\r
138BOOLEAN\r
139IsTableEntry (\r
140 IN UINT64 Entry,\r
141 IN UINTN Level\r
142 )\r
143{\r
144 if (Level == 3) {\r
145 //\r
146 // TT_TYPE_TABLE_ENTRY aliases TT_TYPE_BLOCK_ENTRY_LEVEL3\r
147 // so we need to take the level into account as well.\r
148 //\r
149 return FALSE;\r
150 }\r
151\r
152 return (Entry & TT_TYPE_MASK) == TT_TYPE_TABLE_ENTRY;\r
153}\r
154\r
155STATIC\r
156EFI_STATUS\r
157UpdateRegionMappingRecursive (\r
158 IN UINT64 RegionStart,\r
159 IN UINT64 RegionEnd,\r
160 IN UINT64 AttributeSetMask,\r
161 IN UINT64 AttributeClearMask,\r
162 IN UINT64 *PageTable,\r
163 IN UINTN Level\r
164 )\r
165{\r
166 UINTN BlockShift;\r
167 UINT64 BlockMask;\r
168 UINT64 BlockEnd;\r
169 UINT64 *Entry;\r
170 UINT64 EntryValue;\r
171 VOID *TranslationTable;\r
172 EFI_STATUS Status;\r
173\r
174 ASSERT (((RegionStart | RegionEnd) & EFI_PAGE_MASK) == 0);\r
175\r
176 BlockShift = (Level + 1) * BITS_PER_LEVEL + MIN_T0SZ;\r
177 BlockMask = MAX_UINT64 >> BlockShift;\r
178\r
179 DEBUG ((\r
180 DEBUG_VERBOSE,\r
181 "%a(%d): %llx - %llx set %lx clr %lx\n",\r
182 __FUNCTION__,\r
183 Level,\r
184 RegionStart,\r
185 RegionEnd,\r
186 AttributeSetMask,\r
187 AttributeClearMask\r
188 ));\r
189\r
190 for ( ; RegionStart < RegionEnd; RegionStart = BlockEnd) {\r
191 BlockEnd = MIN (RegionEnd, (RegionStart | BlockMask) + 1);\r
192 Entry = &PageTable[(RegionStart >> (64 - BlockShift)) & (TT_ENTRY_COUNT - 1)];\r
193\r
194 //\r
195 // If RegionStart or BlockEnd is not aligned to the block size at this\r
196 // level, we will have to create a table mapping in order to map less\r
197 // than a block, and recurse to create the block or page entries at\r
198 // the next level. No block mappings are allowed at all at level 0,\r
199 // so in that case, we have to recurse unconditionally.\r
200 // If we are changing a table entry and the AttributeClearMask is non-zero,\r
201 // we cannot replace it with a block entry without potentially losing\r
202 // attribute information, so keep the table entry in that case.\r
203 //\r
204 if ((Level == 0) || (((RegionStart | BlockEnd) & BlockMask) != 0) ||\r
205 (IsTableEntry (*Entry, Level) && (AttributeClearMask != 0)))\r
206 {\r
207 ASSERT (Level < 3);\r
208\r
209 if (!IsTableEntry (*Entry, Level)) {\r
210 //\r
211 // No table entry exists yet, so we need to allocate a page table\r
212 // for the next level.\r
213 //\r
214 TranslationTable = AllocatePages (1);\r
215 if (TranslationTable == NULL) {\r
216 return EFI_OUT_OF_RESOURCES;\r
217 }\r
218\r
219 if (!ArmMmuEnabled ()) {\r
220 //\r
221 // Make sure we are not inadvertently hitting in the caches\r
222 // when populating the page tables.\r
223 //\r
224 InvalidateDataCacheRange (TranslationTable, EFI_PAGE_SIZE);\r
225 }\r
226\r
227 ZeroMem (TranslationTable, EFI_PAGE_SIZE);\r
228\r
229 if (IsBlockEntry (*Entry, Level)) {\r
230 //\r
231 // We are splitting an existing block entry, so we have to populate\r
232 // the new table with the attributes of the block entry it replaces.\r
233 //\r
234 Status = UpdateRegionMappingRecursive (\r
235 RegionStart & ~BlockMask,\r
236 (RegionStart | BlockMask) + 1,\r
237 *Entry & TT_ATTRIBUTES_MASK,\r
238 0,\r
239 TranslationTable,\r
240 Level + 1\r
241 );\r
242 if (EFI_ERROR (Status)) {\r
243 //\r
244 // The range we passed to UpdateRegionMappingRecursive () is block\r
245 // aligned, so it is guaranteed that no further pages were allocated\r
246 // by it, and so we only have to free the page we allocated here.\r
247 //\r
248 FreePages (TranslationTable, 1);\r
249 return Status;\r
250 }\r
251 }\r
252 } else {\r
253 TranslationTable = (VOID *)(UINTN)(*Entry & TT_ADDRESS_MASK_BLOCK_ENTRY);\r
254 }\r
255\r
256 //\r
257 // Recurse to the next level\r
258 //\r
259 Status = UpdateRegionMappingRecursive (\r
260 RegionStart,\r
261 BlockEnd,\r
262 AttributeSetMask,\r
263 AttributeClearMask,\r
264 TranslationTable,\r
265 Level + 1\r
266 );\r
267 if (EFI_ERROR (Status)) {\r
268 if (!IsTableEntry (*Entry, Level)) {\r
269 //\r
270 // We are creating a new table entry, so on failure, we can free all\r
271 // allocations we made recursively, given that the whole subhierarchy\r
272 // has not been wired into the live page tables yet. (This is not\r
273 // possible for existing table entries, since we cannot revert the\r
274 // modifications we made to the subhierarchy it represents.)\r
275 //\r
276 FreePageTablesRecursive (TranslationTable, Level + 1);\r
277 }\r
278\r
279 return Status;\r
280 }\r
281\r
282 if (!IsTableEntry (*Entry, Level)) {\r
283 EntryValue = (UINTN)TranslationTable | TT_TYPE_TABLE_ENTRY;\r
284 ReplaceTableEntry (\r
285 Entry,\r
286 EntryValue,\r
287 RegionStart,\r
288 IsBlockEntry (*Entry, Level)\r
289 );\r
290 }\r
291 } else {\r
292 EntryValue = (*Entry & AttributeClearMask) | AttributeSetMask;\r
293 EntryValue |= RegionStart;\r
294 EntryValue |= (Level == 3) ? TT_TYPE_BLOCK_ENTRY_LEVEL3\r
295 : TT_TYPE_BLOCK_ENTRY;\r
296\r
297 if (IsTableEntry (*Entry, Level)) {\r
298 //\r
299 // We are replacing a table entry with a block entry. This is only\r
300 // possible if we are keeping none of the original attributes.\r
301 // We can free the table entry's page table, and all the ones below\r
302 // it, since we are dropping the only possible reference to it.\r
303 //\r
304 ASSERT (AttributeClearMask == 0);\r
305 TranslationTable = (VOID *)(UINTN)(*Entry & TT_ADDRESS_MASK_BLOCK_ENTRY);\r
306 ReplaceTableEntry (Entry, EntryValue, RegionStart, TRUE);\r
307 FreePageTablesRecursive (TranslationTable, Level + 1);\r
308 } else {\r
309 ReplaceTableEntry (Entry, EntryValue, RegionStart, FALSE);\r
310 }\r
311 }\r
312 }\r
313\r
314 return EFI_SUCCESS;\r
315}\r
316\r
317STATIC\r
318EFI_STATUS\r
319UpdateRegionMapping (\r
320 IN UINT64 RegionStart,\r
321 IN UINT64 RegionLength,\r
322 IN UINT64 AttributeSetMask,\r
323 IN UINT64 AttributeClearMask\r
324 )\r
325{\r
326 UINTN T0SZ;\r
327\r
328 if (((RegionStart | RegionLength) & EFI_PAGE_MASK) != 0) {\r
329 return EFI_INVALID_PARAMETER;\r
330 }\r
331\r
332 T0SZ = ArmGetTCR () & TCR_T0SZ_MASK;\r
333\r
334 return UpdateRegionMappingRecursive (\r
335 RegionStart,\r
336 RegionStart + RegionLength,\r
337 AttributeSetMask,\r
338 AttributeClearMask,\r
339 ArmGetTTBR0BaseAddress (),\r
340 GetRootTableLevel (T0SZ)\r
341 );\r
342}\r
343\r
344STATIC\r
345EFI_STATUS\r
346FillTranslationTable (\r
347 IN UINT64 *RootTable,\r
348 IN ARM_MEMORY_REGION_DESCRIPTOR *MemoryRegion\r
349 )\r
350{\r
351 return UpdateRegionMapping (\r
352 MemoryRegion->VirtualBase,\r
353 MemoryRegion->Length,\r
354 ArmMemoryAttributeToPageAttribute (MemoryRegion->Attributes) | TT_AF,\r
355 0\r
356 );\r
357}\r
358\r
359STATIC\r
360UINT64\r
361GcdAttributeToPageAttribute (\r
362 IN UINT64 GcdAttributes\r
363 )\r
364{\r
365 UINT64 PageAttributes;\r
366\r
367 switch (GcdAttributes & EFI_MEMORY_CACHETYPE_MASK) {\r
368 case EFI_MEMORY_UC:\r
369 PageAttributes = TT_ATTR_INDX_DEVICE_MEMORY;\r
370 break;\r
371 case EFI_MEMORY_WC:\r
372 PageAttributes = TT_ATTR_INDX_MEMORY_NON_CACHEABLE;\r
373 break;\r
374 case EFI_MEMORY_WT:\r
375 PageAttributes = TT_ATTR_INDX_MEMORY_WRITE_THROUGH | TT_SH_INNER_SHAREABLE;\r
376 break;\r
377 case EFI_MEMORY_WB:\r
378 PageAttributes = TT_ATTR_INDX_MEMORY_WRITE_BACK | TT_SH_INNER_SHAREABLE;\r
379 break;\r
380 default:\r
381 PageAttributes = TT_ATTR_INDX_MASK;\r
382 break;\r
383 }\r
384\r
385 if (((GcdAttributes & EFI_MEMORY_XP) != 0) ||\r
386 ((GcdAttributes & EFI_MEMORY_CACHETYPE_MASK) == EFI_MEMORY_UC))\r
387 {\r
388 if (ArmReadCurrentEL () == AARCH64_EL2) {\r
389 PageAttributes |= TT_XN_MASK;\r
390 } else {\r
391 PageAttributes |= TT_UXN_MASK | TT_PXN_MASK;\r
392 }\r
393 }\r
394\r
395 if ((GcdAttributes & EFI_MEMORY_RO) != 0) {\r
396 PageAttributes |= TT_AP_NO_RO;\r
397 }\r
398\r
399 return PageAttributes | TT_AF;\r
400}\r
401\r
402EFI_STATUS\r
403ArmSetMemoryAttributes (\r
404 IN EFI_PHYSICAL_ADDRESS BaseAddress,\r
405 IN UINT64 Length,\r
406 IN UINT64 Attributes\r
407 )\r
408{\r
409 UINT64 PageAttributes;\r
410 UINT64 PageAttributeMask;\r
411\r
412 PageAttributes = GcdAttributeToPageAttribute (Attributes);\r
413 PageAttributeMask = 0;\r
414\r
415 if ((Attributes & EFI_MEMORY_CACHETYPE_MASK) == 0) {\r
416 //\r
417 // No memory type was set in Attributes, so we are going to update the\r
418 // permissions only.\r
419 //\r
420 PageAttributes &= TT_AP_MASK | TT_UXN_MASK | TT_PXN_MASK;\r
421 PageAttributeMask = ~(TT_ADDRESS_MASK_BLOCK_ENTRY | TT_AP_MASK |\r
422 TT_PXN_MASK | TT_XN_MASK);\r
423 }\r
424\r
425 return UpdateRegionMapping (\r
426 BaseAddress,\r
427 Length,\r
428 PageAttributes,\r
429 PageAttributeMask\r
430 );\r
431}\r
432\r
433STATIC\r
434EFI_STATUS\r
435SetMemoryRegionAttribute (\r
436 IN EFI_PHYSICAL_ADDRESS BaseAddress,\r
437 IN UINT64 Length,\r
438 IN UINT64 Attributes,\r
439 IN UINT64 BlockEntryMask\r
440 )\r
441{\r
442 return UpdateRegionMapping (BaseAddress, Length, Attributes, BlockEntryMask);\r
443}\r
444\r
445EFI_STATUS\r
446ArmSetMemoryRegionNoExec (\r
447 IN EFI_PHYSICAL_ADDRESS BaseAddress,\r
448 IN UINT64 Length\r
449 )\r
450{\r
451 UINT64 Val;\r
452\r
453 if (ArmReadCurrentEL () == AARCH64_EL1) {\r
454 Val = TT_PXN_MASK | TT_UXN_MASK;\r
455 } else {\r
456 Val = TT_XN_MASK;\r
457 }\r
458\r
459 return SetMemoryRegionAttribute (\r
460 BaseAddress,\r
461 Length,\r
462 Val,\r
463 ~TT_ADDRESS_MASK_BLOCK_ENTRY\r
464 );\r
465}\r
466\r
467EFI_STATUS\r
468ArmClearMemoryRegionNoExec (\r
469 IN EFI_PHYSICAL_ADDRESS BaseAddress,\r
470 IN UINT64 Length\r
471 )\r
472{\r
473 UINT64 Mask;\r
474\r
475 // XN maps to UXN in the EL1&0 translation regime\r
476 Mask = ~(TT_ADDRESS_MASK_BLOCK_ENTRY | TT_PXN_MASK | TT_XN_MASK);\r
477\r
478 return SetMemoryRegionAttribute (\r
479 BaseAddress,\r
480 Length,\r
481 0,\r
482 Mask\r
483 );\r
484}\r
485\r
486EFI_STATUS\r
487ArmSetMemoryRegionReadOnly (\r
488 IN EFI_PHYSICAL_ADDRESS BaseAddress,\r
489 IN UINT64 Length\r
490 )\r
491{\r
492 return SetMemoryRegionAttribute (\r
493 BaseAddress,\r
494 Length,\r
495 TT_AP_NO_RO,\r
496 ~TT_ADDRESS_MASK_BLOCK_ENTRY\r
497 );\r
498}\r
499\r
500EFI_STATUS\r
501ArmClearMemoryRegionReadOnly (\r
502 IN EFI_PHYSICAL_ADDRESS BaseAddress,\r
503 IN UINT64 Length\r
504 )\r
505{\r
506 return SetMemoryRegionAttribute (\r
507 BaseAddress,\r
508 Length,\r
509 TT_AP_NO_RW,\r
510 ~(TT_ADDRESS_MASK_BLOCK_ENTRY | TT_AP_MASK)\r
511 );\r
512}\r
513\r
514EFI_STATUS\r
515EFIAPI\r
516ArmConfigureMmu (\r
517 IN ARM_MEMORY_REGION_DESCRIPTOR *MemoryTable,\r
518 OUT VOID **TranslationTableBase OPTIONAL,\r
519 OUT UINTN *TranslationTableSize OPTIONAL\r
520 )\r
521{\r
522 VOID *TranslationTable;\r
523 UINTN MaxAddressBits;\r
524 UINT64 MaxAddress;\r
525 UINTN T0SZ;\r
526 UINTN RootTableEntryCount;\r
527 UINT64 TCR;\r
528 EFI_STATUS Status;\r
529\r
530 if (MemoryTable == NULL) {\r
531 ASSERT (MemoryTable != NULL);\r
532 return EFI_INVALID_PARAMETER;\r
533 }\r
534\r
535 //\r
536 // Limit the virtual address space to what we can actually use: UEFI\r
537 // mandates a 1:1 mapping, so no point in making the virtual address\r
538 // space larger than the physical address space. We also have to take\r
539 // into account the architectural limitations that result from UEFI's\r
540 // use of 4 KB pages.\r
541 //\r
542 MaxAddressBits = MIN (ArmGetPhysicalAddressBits (), MAX_VA_BITS);\r
543 MaxAddress = LShiftU64 (1ULL, MaxAddressBits) - 1;\r
544\r
545 T0SZ = 64 - MaxAddressBits;\r
546 RootTableEntryCount = GetRootTableEntryCount (T0SZ);\r
547\r
548 //\r
549 // Set TCR that allows us to retrieve T0SZ in the subsequent functions\r
550 //\r
551 // Ideally we will be running at EL2, but should support EL1 as well.\r
552 // UEFI should not run at EL3.\r
553 if (ArmReadCurrentEL () == AARCH64_EL2) {\r
554 // Note: Bits 23 and 31 are reserved(RES1) bits in TCR_EL2\r
555 TCR = T0SZ | (1UL << 31) | (1UL << 23) | TCR_TG0_4KB;\r
556\r
557 // Set the Physical Address Size using MaxAddress\r
558 if (MaxAddress < SIZE_4GB) {\r
559 TCR |= TCR_PS_4GB;\r
560 } else if (MaxAddress < SIZE_64GB) {\r
561 TCR |= TCR_PS_64GB;\r
562 } else if (MaxAddress < SIZE_1TB) {\r
563 TCR |= TCR_PS_1TB;\r
564 } else if (MaxAddress < SIZE_4TB) {\r
565 TCR |= TCR_PS_4TB;\r
566 } else if (MaxAddress < SIZE_16TB) {\r
567 TCR |= TCR_PS_16TB;\r
568 } else if (MaxAddress < SIZE_256TB) {\r
569 TCR |= TCR_PS_256TB;\r
570 } else {\r
571 DEBUG ((\r
572 DEBUG_ERROR,\r
573 "ArmConfigureMmu: The MaxAddress 0x%lX is not supported by this MMU configuration.\n",\r
574 MaxAddress\r
575 ));\r
576 ASSERT (0); // Bigger than 48-bit memory space are not supported\r
577 return EFI_UNSUPPORTED;\r
578 }\r
579 } else if (ArmReadCurrentEL () == AARCH64_EL1) {\r
580 // Due to Cortex-A57 erratum #822227 we must set TG1[1] == 1, regardless of EPD1.\r
581 TCR = T0SZ | TCR_TG0_4KB | TCR_TG1_4KB | TCR_EPD1;\r
582\r
583 // Set the Physical Address Size using MaxAddress\r
584 if (MaxAddress < SIZE_4GB) {\r
585 TCR |= TCR_IPS_4GB;\r
586 } else if (MaxAddress < SIZE_64GB) {\r
587 TCR |= TCR_IPS_64GB;\r
588 } else if (MaxAddress < SIZE_1TB) {\r
589 TCR |= TCR_IPS_1TB;\r
590 } else if (MaxAddress < SIZE_4TB) {\r
591 TCR |= TCR_IPS_4TB;\r
592 } else if (MaxAddress < SIZE_16TB) {\r
593 TCR |= TCR_IPS_16TB;\r
594 } else if (MaxAddress < SIZE_256TB) {\r
595 TCR |= TCR_IPS_256TB;\r
596 } else {\r
597 DEBUG ((\r
598 DEBUG_ERROR,\r
599 "ArmConfigureMmu: The MaxAddress 0x%lX is not supported by this MMU configuration.\n",\r
600 MaxAddress\r
601 ));\r
602 ASSERT (0); // Bigger than 48-bit memory space are not supported\r
603 return EFI_UNSUPPORTED;\r
604 }\r
605 } else {\r
606 ASSERT (0); // UEFI is only expected to run at EL2 and EL1, not EL3.\r
607 return EFI_UNSUPPORTED;\r
608 }\r
609\r
610 //\r
611 // Translation table walks are always cache coherent on ARMv8-A, so cache\r
612 // maintenance on page tables is never needed. Since there is a risk of\r
613 // loss of coherency when using mismatched attributes, and given that memory\r
614 // is mapped cacheable except for extraordinary cases (such as non-coherent\r
615 // DMA), have the page table walker perform cached accesses as well, and\r
616 // assert below that that matches the attributes we use for CPU accesses to\r
617 // the region.\r
618 //\r
619 TCR |= TCR_SH_INNER_SHAREABLE |\r
620 TCR_RGN_OUTER_WRITE_BACK_ALLOC |\r
621 TCR_RGN_INNER_WRITE_BACK_ALLOC;\r
622\r
623 // Set TCR\r
624 ArmSetTCR (TCR);\r
625\r
626 // Allocate pages for translation table\r
627 TranslationTable = AllocatePages (1);\r
628 if (TranslationTable == NULL) {\r
629 return EFI_OUT_OF_RESOURCES;\r
630 }\r
631\r
632 //\r
633 // We set TTBR0 just after allocating the table to retrieve its location from\r
634 // the subsequent functions without needing to pass this value across the\r
635 // functions. The MMU is only enabled after the translation tables are\r
636 // populated.\r
637 //\r
638 ArmSetTTBR0 (TranslationTable);\r
639\r
640 if (TranslationTableBase != NULL) {\r
641 *TranslationTableBase = TranslationTable;\r
642 }\r
643\r
644 if (TranslationTableSize != NULL) {\r
645 *TranslationTableSize = RootTableEntryCount * sizeof (UINT64);\r
646 }\r
647\r
648 //\r
649 // Make sure we are not inadvertently hitting in the caches\r
650 // when populating the page tables.\r
651 //\r
652 InvalidateDataCacheRange (\r
653 TranslationTable,\r
654 RootTableEntryCount * sizeof (UINT64)\r
655 );\r
656 ZeroMem (TranslationTable, RootTableEntryCount * sizeof (UINT64));\r
657\r
658 while (MemoryTable->Length != 0) {\r
659 Status = FillTranslationTable (TranslationTable, MemoryTable);\r
660 if (EFI_ERROR (Status)) {\r
661 goto FreeTranslationTable;\r
662 }\r
663\r
664 MemoryTable++;\r
665 }\r
666\r
667 //\r
668 // EFI_MEMORY_UC ==> MAIR_ATTR_DEVICE_MEMORY\r
669 // EFI_MEMORY_WC ==> MAIR_ATTR_NORMAL_MEMORY_NON_CACHEABLE\r
670 // EFI_MEMORY_WT ==> MAIR_ATTR_NORMAL_MEMORY_WRITE_THROUGH\r
671 // EFI_MEMORY_WB ==> MAIR_ATTR_NORMAL_MEMORY_WRITE_BACK\r
672 //\r
673 ArmSetMAIR (\r
674 MAIR_ATTR (TT_ATTR_INDX_DEVICE_MEMORY, MAIR_ATTR_DEVICE_MEMORY) |\r
675 MAIR_ATTR (TT_ATTR_INDX_MEMORY_NON_CACHEABLE, MAIR_ATTR_NORMAL_MEMORY_NON_CACHEABLE) |\r
676 MAIR_ATTR (TT_ATTR_INDX_MEMORY_WRITE_THROUGH, MAIR_ATTR_NORMAL_MEMORY_WRITE_THROUGH) |\r
677 MAIR_ATTR (TT_ATTR_INDX_MEMORY_WRITE_BACK, MAIR_ATTR_NORMAL_MEMORY_WRITE_BACK)\r
678 );\r
679\r
680 ArmDisableAlignmentCheck ();\r
681 ArmEnableStackAlignmentCheck ();\r
682 ArmEnableInstructionCache ();\r
683 ArmEnableDataCache ();\r
684\r
685 ArmEnableMmu ();\r
686 return EFI_SUCCESS;\r
687\r
688FreeTranslationTable:\r
689 FreePages (TranslationTable, 1);\r
690 return Status;\r
691}\r
692\r
693RETURN_STATUS\r
694EFIAPI\r
695ArmMmuBaseLibConstructor (\r
696 VOID\r
697 )\r
698{\r
699 extern UINT32 ArmReplaceLiveTranslationEntrySize;\r
700\r
701 //\r
702 // The ArmReplaceLiveTranslationEntry () helper function may be invoked\r
703 // with the MMU off so we have to ensure that it gets cleaned to the PoC\r
704 //\r
705 WriteBackDataCacheRange (\r
706 (VOID *)(UINTN)ArmReplaceLiveTranslationEntry,\r
707 ArmReplaceLiveTranslationEntrySize\r
708 );\r
709\r
710 return RETURN_SUCCESS;\r
711}\r