]> git.proxmox.com Git - mirror_edk2.git/blame - ArmPkg/Library/ArmMmuLib/AArch64/ArmMmuLibCore.c
ArmPkg/ArmMmuLib: drop pointless LookupAddresstoRootTable() routine
[mirror_edk2.git] / ArmPkg / Library / ArmMmuLib / AArch64 / ArmMmuLibCore.c
CommitLineData
d7f03464
AB
1/** @file\r
2* File managing the MMU for ARMv8 architecture\r
3*\r
191fa79b 4* Copyright (c) 2011-2020, ARM Limited. All rights reserved.\r
d7f03464 5* Copyright (c) 2016, Linaro Limited. All rights reserved.\r
b7a09b71 6* Copyright (c) 2017, Intel Corporation. All rights reserved.<BR>\r
d7f03464 7*\r
4059386c 8* SPDX-License-Identifier: BSD-2-Clause-Patent\r
d7f03464
AB
9*\r
10**/\r
11\r
12#include <Uefi.h>\r
13#include <Chipset/AArch64.h>\r
14#include <Library/BaseMemoryLib.h>\r
15#include <Library/CacheMaintenanceLib.h>\r
16#include <Library/MemoryAllocationLib.h>\r
17#include <Library/ArmLib.h>\r
18#include <Library/ArmMmuLib.h>\r
19#include <Library/BaseLib.h>\r
20#include <Library/DebugLib.h>\r
21\r
22// We use this index definition to define an invalid block entry\r
23#define TT_ATTR_INDX_INVALID ((UINT32)~0)\r
24\r
25STATIC\r
26UINT64\r
27ArmMemoryAttributeToPageAttribute (\r
28 IN ARM_MEMORY_REGION_ATTRIBUTES Attributes\r
29 )\r
30{\r
31 switch (Attributes) {\r
829633e3
PL
32 case ARM_MEMORY_REGION_ATTRIBUTE_WRITE_BACK_NONSHAREABLE:\r
33 case ARM_MEMORY_REGION_ATTRIBUTE_NONSECURE_WRITE_BACK_NONSHAREABLE:\r
34 return TT_ATTR_INDX_MEMORY_WRITE_BACK;\r
35\r
d7f03464
AB
36 case ARM_MEMORY_REGION_ATTRIBUTE_WRITE_BACK:\r
37 case ARM_MEMORY_REGION_ATTRIBUTE_NONSECURE_WRITE_BACK:\r
38 return TT_ATTR_INDX_MEMORY_WRITE_BACK | TT_SH_INNER_SHAREABLE;\r
39\r
40 case ARM_MEMORY_REGION_ATTRIBUTE_WRITE_THROUGH:\r
41 case ARM_MEMORY_REGION_ATTRIBUTE_NONSECURE_WRITE_THROUGH:\r
42 return TT_ATTR_INDX_MEMORY_WRITE_THROUGH | TT_SH_INNER_SHAREABLE;\r
43\r
44 // Uncached and device mappings are treated as outer shareable by default,\r
45 case ARM_MEMORY_REGION_ATTRIBUTE_UNCACHED_UNBUFFERED:\r
46 case ARM_MEMORY_REGION_ATTRIBUTE_NONSECURE_UNCACHED_UNBUFFERED:\r
47 return TT_ATTR_INDX_MEMORY_NON_CACHEABLE;\r
48\r
49 default:\r
4249278a 50 ASSERT (0);\r
d7f03464
AB
51 case ARM_MEMORY_REGION_ATTRIBUTE_DEVICE:\r
52 case ARM_MEMORY_REGION_ATTRIBUTE_NONSECURE_DEVICE:\r
53 if (ArmReadCurrentEL () == AARCH64_EL2)\r
54 return TT_ATTR_INDX_DEVICE_MEMORY | TT_XN_MASK;\r
55 else\r
56 return TT_ATTR_INDX_DEVICE_MEMORY | TT_UXN_MASK | TT_PXN_MASK;\r
57 }\r
58}\r
59\r
e93cb72e
AB
60#define MIN_T0SZ 16\r
61#define BITS_PER_LEVEL 9\r
db0f8c2f
AB
62#define MAX_VA_BITS 48\r
63\r
64STATIC\r
65UINTN\r
66GetRootTableEntryCount (\r
67 IN UINTN T0SZ\r
68 )\r
69{\r
70 return TT_ENTRY_COUNT >> (T0SZ - MIN_T0SZ) % BITS_PER_LEVEL;\r
71}\r
d7f03464
AB
72\r
73VOID\r
74GetRootTranslationTableInfo (\r
75 IN UINTN T0SZ,\r
76 OUT UINTN *TableLevel,\r
77 OUT UINTN *TableEntryCount\r
78 )\r
79{\r
d7f03464
AB
80 // Get the level of the root table\r
81 if (TableLevel) {\r
e93cb72e 82 *TableLevel = (T0SZ - MIN_T0SZ) / BITS_PER_LEVEL;\r
d7f03464
AB
83 }\r
84\r
d7f03464 85 if (TableEntryCount) {\r
e93cb72e 86 *TableEntryCount = 1UL << (BITS_PER_LEVEL - (T0SZ - MIN_T0SZ) % BITS_PER_LEVEL);\r
d7f03464
AB
87 }\r
88}\r
89\r
90STATIC\r
91VOID\r
191fa79b 92ReplaceTableEntry (\r
d7f03464 93 IN UINT64 *Entry,\r
d5788777 94 IN UINT64 Value,\r
191fa79b
AB
95 IN UINT64 RegionStart,\r
96 IN BOOLEAN IsLiveBlockMapping\r
d7f03464
AB
97 )\r
98{\r
191fa79b 99 if (!ArmMmuEnabled () || !IsLiveBlockMapping) {\r
d7f03464 100 *Entry = Value;\r
191fa79b 101 ArmUpdateTranslationTableEntry (Entry, (VOID *)(UINTN)RegionStart);\r
d7f03464 102 } else {\r
d5788777 103 ArmReplaceLiveTranslationEntry (Entry, Value, RegionStart);\r
d7f03464
AB
104 }\r
105}\r
106\r
107STATIC\r
108VOID\r
191fa79b 109FreePageTablesRecursive (\r
d390920e
AB
110 IN UINT64 *TranslationTable,\r
111 IN UINTN Level\r
d7f03464
AB
112 )\r
113{\r
191fa79b 114 UINTN Index;\r
d7f03464 115\r
d390920e
AB
116 ASSERT (Level <= 3);\r
117\r
118 if (Level < 3) {\r
119 for (Index = 0; Index < TT_ENTRY_COUNT; Index++) {\r
120 if ((TranslationTable[Index] & TT_TYPE_MASK) == TT_TYPE_TABLE_ENTRY) {\r
121 FreePageTablesRecursive ((VOID *)(UINTN)(TranslationTable[Index] &\r
122 TT_ADDRESS_MASK_BLOCK_ENTRY),\r
123 Level + 1);\r
124 }\r
d7f03464
AB
125 }\r
126 }\r
191fa79b 127 FreePages (TranslationTable, 1);\r
d7f03464
AB
128}\r
129\r
5fc89953
AB
130STATIC\r
131BOOLEAN\r
132IsBlockEntry (\r
133 IN UINT64 Entry,\r
134 IN UINTN Level\r
135 )\r
136{\r
137 if (Level == 3) {\r
138 return (Entry & TT_TYPE_MASK) == TT_TYPE_BLOCK_ENTRY_LEVEL3;\r
139 }\r
140 return (Entry & TT_TYPE_MASK) == TT_TYPE_BLOCK_ENTRY;\r
141}\r
142\r
143STATIC\r
144BOOLEAN\r
145IsTableEntry (\r
146 IN UINT64 Entry,\r
147 IN UINTN Level\r
148 )\r
149{\r
150 if (Level == 3) {\r
151 //\r
152 // TT_TYPE_TABLE_ENTRY aliases TT_TYPE_BLOCK_ENTRY_LEVEL3\r
153 // so we need to take the level into account as well.\r
154 //\r
155 return FALSE;\r
156 }\r
157 return (Entry & TT_TYPE_MASK) == TT_TYPE_TABLE_ENTRY;\r
158}\r
159\r
d7f03464 160STATIC\r
191fa79b
AB
161EFI_STATUS\r
162UpdateRegionMappingRecursive (\r
163 IN UINT64 RegionStart,\r
164 IN UINT64 RegionEnd,\r
165 IN UINT64 AttributeSetMask,\r
166 IN UINT64 AttributeClearMask,\r
167 IN UINT64 *PageTable,\r
168 IN UINTN Level\r
d7f03464
AB
169 )\r
170{\r
191fa79b
AB
171 UINTN BlockShift;\r
172 UINT64 BlockMask;\r
173 UINT64 BlockEnd;\r
174 UINT64 *Entry;\r
175 UINT64 EntryValue;\r
176 VOID *TranslationTable;\r
177 EFI_STATUS Status;\r
d7f03464 178\r
191fa79b 179 ASSERT (((RegionStart | RegionEnd) & EFI_PAGE_MASK) == 0);\r
d7f03464 180\r
191fa79b
AB
181 BlockShift = (Level + 1) * BITS_PER_LEVEL + MIN_T0SZ;\r
182 BlockMask = MAX_UINT64 >> BlockShift;\r
d7f03464 183\r
191fa79b
AB
184 DEBUG ((DEBUG_VERBOSE, "%a(%d): %llx - %llx set %lx clr %lx\n", __FUNCTION__,\r
185 Level, RegionStart, RegionEnd, AttributeSetMask, AttributeClearMask));\r
d7f03464 186\r
191fa79b
AB
187 for (; RegionStart < RegionEnd; RegionStart = BlockEnd) {\r
188 BlockEnd = MIN (RegionEnd, (RegionStart | BlockMask) + 1);\r
189 Entry = &PageTable[(RegionStart >> (64 - BlockShift)) & (TT_ENTRY_COUNT - 1)];\r
d7f03464 190\r
191fa79b
AB
191 //\r
192 // If RegionStart or BlockEnd is not aligned to the block size at this\r
193 // level, we will have to create a table mapping in order to map less\r
194 // than a block, and recurse to create the block or page entries at\r
195 // the next level. No block mappings are allowed at all at level 0,\r
196 // so in that case, we have to recurse unconditionally.\r
f7079d1b
AB
197 // If we are changing a table entry and the AttributeClearMask is non-zero,\r
198 // we cannot replace it with a block entry without potentially losing\r
199 // attribute information, so keep the table entry in that case.\r
191fa79b 200 //\r
f7079d1b
AB
201 if (Level == 0 || ((RegionStart | BlockEnd) & BlockMask) != 0 ||\r
202 (IsTableEntry (*Entry, Level) && AttributeClearMask != 0)) {\r
191fa79b 203 ASSERT (Level < 3);\r
d7f03464 204\r
5fc89953 205 if (!IsTableEntry (*Entry, Level)) {\r
191fa79b
AB
206 //\r
207 // No table entry exists yet, so we need to allocate a page table\r
208 // for the next level.\r
209 //\r
674e127e 210 TranslationTable = AllocatePages (1);\r
d7f03464 211 if (TranslationTable == NULL) {\r
191fa79b 212 return EFI_OUT_OF_RESOURCES;\r
d7f03464
AB
213 }\r
214\r
748fea62
AB
215 if (!ArmMmuEnabled ()) {\r
216 //\r
217 // Make sure we are not inadvertently hitting in the caches\r
218 // when populating the page tables.\r
219 //\r
220 InvalidateDataCacheRange (TranslationTable, EFI_PAGE_SIZE);\r
221 }\r
222\r
f7079d1b
AB
223 ZeroMem (TranslationTable, EFI_PAGE_SIZE);\r
224\r
5fc89953 225 if (IsBlockEntry (*Entry, Level)) {\r
191fa79b
AB
226 //\r
227 // We are splitting an existing block entry, so we have to populate\r
228 // the new table with the attributes of the block entry it replaces.\r
229 //\r
230 Status = UpdateRegionMappingRecursive (RegionStart & ~BlockMask,\r
231 (RegionStart | BlockMask) + 1, *Entry & TT_ATTRIBUTES_MASK,\r
232 0, TranslationTable, Level + 1);\r
233 if (EFI_ERROR (Status)) {\r
234 //\r
235 // The range we passed to UpdateRegionMappingRecursive () is block\r
236 // aligned, so it is guaranteed that no further pages were allocated\r
237 // by it, and so we only have to free the page we allocated here.\r
238 //\r
239 FreePages (TranslationTable, 1);\r
240 return Status;\r
241 }\r
d7f03464 242 }\r
191fa79b
AB
243 } else {\r
244 TranslationTable = (VOID *)(UINTN)(*Entry & TT_ADDRESS_MASK_BLOCK_ENTRY);\r
d7f03464 245 }\r
d7f03464 246\r
191fa79b
AB
247 //\r
248 // Recurse to the next level\r
249 //\r
250 Status = UpdateRegionMappingRecursive (RegionStart, BlockEnd,\r
251 AttributeSetMask, AttributeClearMask, TranslationTable,\r
252 Level + 1);\r
253 if (EFI_ERROR (Status)) {\r
5fc89953 254 if (!IsTableEntry (*Entry, Level)) {\r
191fa79b
AB
255 //\r
256 // We are creating a new table entry, so on failure, we can free all\r
257 // allocations we made recursively, given that the whole subhierarchy\r
258 // has not been wired into the live page tables yet. (This is not\r
259 // possible for existing table entries, since we cannot revert the\r
260 // modifications we made to the subhierarchy it represents.)\r
261 //\r
d390920e 262 FreePageTablesRecursive (TranslationTable, Level + 1);\r
d7f03464 263 }\r
191fa79b
AB
264 return Status;\r
265 }\r
d7f03464 266\r
5fc89953 267 if (!IsTableEntry (*Entry, Level)) {\r
191fa79b
AB
268 EntryValue = (UINTN)TranslationTable | TT_TYPE_TABLE_ENTRY;\r
269 ReplaceTableEntry (Entry, EntryValue, RegionStart,\r
5fc89953 270 IsBlockEntry (*Entry, Level));\r
d7f03464 271 }\r
191fa79b
AB
272 } else {\r
273 EntryValue = (*Entry & AttributeClearMask) | AttributeSetMask;\r
274 EntryValue |= RegionStart;\r
275 EntryValue |= (Level == 3) ? TT_TYPE_BLOCK_ENTRY_LEVEL3\r
276 : TT_TYPE_BLOCK_ENTRY;\r
277\r
f7079d1b
AB
278 if (IsTableEntry (*Entry, Level)) {\r
279 //\r
280 // We are replacing a table entry with a block entry. This is only\r
281 // possible if we are keeping none of the original attributes.\r
282 // We can free the table entry's page table, and all the ones below\r
283 // it, since we are dropping the only possible reference to it.\r
284 //\r
285 ASSERT (AttributeClearMask == 0);\r
286 TranslationTable = (VOID *)(UINTN)(*Entry & TT_ADDRESS_MASK_BLOCK_ENTRY);\r
287 ReplaceTableEntry (Entry, EntryValue, RegionStart, TRUE);\r
288 FreePageTablesRecursive (TranslationTable, Level + 1);\r
289 } else {\r
290 ReplaceTableEntry (Entry, EntryValue, RegionStart, FALSE);\r
291 }\r
d7f03464
AB
292 }\r
293 }\r
191fa79b
AB
294 return EFI_SUCCESS;\r
295}\r
d7f03464 296\r
d7f03464 297STATIC\r
f49ea03d 298EFI_STATUS\r
d7f03464 299UpdateRegionMapping (\r
d7f03464
AB
300 IN UINT64 RegionStart,\r
301 IN UINT64 RegionLength,\r
191fa79b
AB
302 IN UINT64 AttributeSetMask,\r
303 IN UINT64 AttributeClearMask\r
d7f03464
AB
304 )\r
305{\r
191fa79b
AB
306 UINTN RootTableLevel;\r
307 UINTN T0SZ;\r
308\r
309 if (((RegionStart | RegionLength) & EFI_PAGE_MASK)) {\r
f49ea03d 310 return EFI_INVALID_PARAMETER;\r
d7f03464
AB
311 }\r
312\r
191fa79b
AB
313 T0SZ = ArmGetTCR () & TCR_T0SZ_MASK;\r
314 GetRootTranslationTableInfo (T0SZ, &RootTableLevel, NULL);\r
d7f03464 315\r
191fa79b
AB
316 return UpdateRegionMappingRecursive (RegionStart, RegionStart + RegionLength,\r
317 AttributeSetMask, AttributeClearMask, ArmGetTTBR0BaseAddress (),\r
318 RootTableLevel);\r
d7f03464
AB
319}\r
320\r
321STATIC\r
f49ea03d 322EFI_STATUS\r
d7f03464
AB
323FillTranslationTable (\r
324 IN UINT64 *RootTable,\r
325 IN ARM_MEMORY_REGION_DESCRIPTOR *MemoryRegion\r
326 )\r
327{\r
328 return UpdateRegionMapping (\r
d7f03464
AB
329 MemoryRegion->VirtualBase,\r
330 MemoryRegion->Length,\r
331 ArmMemoryAttributeToPageAttribute (MemoryRegion->Attributes) | TT_AF,\r
332 0\r
333 );\r
334}\r
335\r
e0307a7d
AB
336STATIC\r
337UINT64\r
338GcdAttributeToPageAttribute (\r
339 IN UINT64 GcdAttributes\r
340 )\r
341{\r
342 UINT64 PageAttributes;\r
343\r
344 switch (GcdAttributes & EFI_MEMORY_CACHETYPE_MASK) {\r
345 case EFI_MEMORY_UC:\r
346 PageAttributes = TT_ATTR_INDX_DEVICE_MEMORY;\r
347 break;\r
348 case EFI_MEMORY_WC:\r
349 PageAttributes = TT_ATTR_INDX_MEMORY_NON_CACHEABLE;\r
350 break;\r
351 case EFI_MEMORY_WT:\r
352 PageAttributes = TT_ATTR_INDX_MEMORY_WRITE_THROUGH | TT_SH_INNER_SHAREABLE;\r
353 break;\r
354 case EFI_MEMORY_WB:\r
355 PageAttributes = TT_ATTR_INDX_MEMORY_WRITE_BACK | TT_SH_INNER_SHAREABLE;\r
356 break;\r
357 default:\r
358 PageAttributes = TT_ATTR_INDX_MASK;\r
359 break;\r
360 }\r
361\r
362 if ((GcdAttributes & EFI_MEMORY_XP) != 0 ||\r
363 (GcdAttributes & EFI_MEMORY_CACHETYPE_MASK) == EFI_MEMORY_UC) {\r
364 if (ArmReadCurrentEL () == AARCH64_EL2) {\r
365 PageAttributes |= TT_XN_MASK;\r
366 } else {\r
367 PageAttributes |= TT_UXN_MASK | TT_PXN_MASK;\r
368 }\r
369 }\r
370\r
371 if ((GcdAttributes & EFI_MEMORY_RO) != 0) {\r
372 PageAttributes |= TT_AP_RO_RO;\r
373 }\r
374\r
375 return PageAttributes | TT_AF;\r
376}\r
377\r
f49ea03d 378EFI_STATUS\r
521f3ced 379ArmSetMemoryAttributes (\r
d7f03464
AB
380 IN EFI_PHYSICAL_ADDRESS BaseAddress,\r
381 IN UINT64 Length,\r
d9c0d991 382 IN UINT64 Attributes\r
d7f03464
AB
383 )\r
384{\r
e0307a7d
AB
385 UINT64 PageAttributes;\r
386 UINT64 PageAttributeMask;\r
387\r
388 PageAttributes = GcdAttributeToPageAttribute (Attributes);\r
389 PageAttributeMask = 0;\r
390\r
391 if ((Attributes & EFI_MEMORY_CACHETYPE_MASK) == 0) {\r
392 //\r
393 // No memory type was set in Attributes, so we are going to update the\r
394 // permissions only.\r
395 //\r
396 PageAttributes &= TT_AP_MASK | TT_UXN_MASK | TT_PXN_MASK;\r
397 PageAttributeMask = ~(TT_ADDRESS_MASK_BLOCK_ENTRY | TT_AP_MASK |\r
398 TT_PXN_MASK | TT_XN_MASK);\r
399 }\r
d7f03464 400\r
191fa79b
AB
401 return UpdateRegionMapping (BaseAddress, Length, PageAttributes,\r
402 PageAttributeMask);\r
d7f03464
AB
403}\r
404\r
405STATIC\r
f49ea03d 406EFI_STATUS\r
d7f03464
AB
407SetMemoryRegionAttribute (\r
408 IN EFI_PHYSICAL_ADDRESS BaseAddress,\r
409 IN UINT64 Length,\r
410 IN UINT64 Attributes,\r
411 IN UINT64 BlockEntryMask\r
412 )\r
413{\r
191fa79b 414 return UpdateRegionMapping (BaseAddress, Length, Attributes, BlockEntryMask);\r
d7f03464
AB
415}\r
416\r
f49ea03d 417EFI_STATUS\r
d7f03464
AB
418ArmSetMemoryRegionNoExec (\r
419 IN EFI_PHYSICAL_ADDRESS BaseAddress,\r
420 IN UINT64 Length\r
421 )\r
422{\r
423 UINT64 Val;\r
424\r
425 if (ArmReadCurrentEL () == AARCH64_EL1) {\r
426 Val = TT_PXN_MASK | TT_UXN_MASK;\r
427 } else {\r
428 Val = TT_XN_MASK;\r
429 }\r
430\r
431 return SetMemoryRegionAttribute (\r
432 BaseAddress,\r
433 Length,\r
434 Val,\r
435 ~TT_ADDRESS_MASK_BLOCK_ENTRY);\r
436}\r
437\r
f49ea03d 438EFI_STATUS\r
d7f03464
AB
439ArmClearMemoryRegionNoExec (\r
440 IN EFI_PHYSICAL_ADDRESS BaseAddress,\r
441 IN UINT64 Length\r
442 )\r
443{\r
444 UINT64 Mask;\r
445\r
446 // XN maps to UXN in the EL1&0 translation regime\r
447 Mask = ~(TT_ADDRESS_MASK_BLOCK_ENTRY | TT_PXN_MASK | TT_XN_MASK);\r
448\r
449 return SetMemoryRegionAttribute (\r
450 BaseAddress,\r
451 Length,\r
452 0,\r
453 Mask);\r
454}\r
455\r
f49ea03d 456EFI_STATUS\r
d7f03464
AB
457ArmSetMemoryRegionReadOnly (\r
458 IN EFI_PHYSICAL_ADDRESS BaseAddress,\r
459 IN UINT64 Length\r
460 )\r
461{\r
462 return SetMemoryRegionAttribute (\r
463 BaseAddress,\r
464 Length,\r
465 TT_AP_RO_RO,\r
466 ~TT_ADDRESS_MASK_BLOCK_ENTRY);\r
467}\r
468\r
f49ea03d 469EFI_STATUS\r
d7f03464
AB
470ArmClearMemoryRegionReadOnly (\r
471 IN EFI_PHYSICAL_ADDRESS BaseAddress,\r
472 IN UINT64 Length\r
473 )\r
474{\r
475 return SetMemoryRegionAttribute (\r
476 BaseAddress,\r
477 Length,\r
478 TT_AP_RW_RW,\r
479 ~(TT_ADDRESS_MASK_BLOCK_ENTRY | TT_AP_MASK));\r
480}\r
481\r
f49ea03d 482EFI_STATUS\r
d7f03464
AB
483EFIAPI\r
484ArmConfigureMmu (\r
485 IN ARM_MEMORY_REGION_DESCRIPTOR *MemoryTable,\r
486 OUT VOID **TranslationTableBase OPTIONAL,\r
487 OUT UINTN *TranslationTableSize OPTIONAL\r
488 )\r
489{\r
490 VOID* TranslationTable;\r
db0f8c2f 491 UINTN MaxAddressBits;\r
d7f03464 492 UINT64 MaxAddress;\r
d7f03464
AB
493 UINTN T0SZ;\r
494 UINTN RootTableEntryCount;\r
495 UINT64 TCR;\r
f49ea03d 496 EFI_STATUS Status;\r
d7f03464 497\r
4249278a 498 if (MemoryTable == NULL) {\r
d7f03464 499 ASSERT (MemoryTable != NULL);\r
f49ea03d 500 return EFI_INVALID_PARAMETER;\r
d7f03464
AB
501 }\r
502\r
e36b243c
AB
503 //\r
504 // Limit the virtual address space to what we can actually use: UEFI\r
505 // mandates a 1:1 mapping, so no point in making the virtual address\r
506 // space larger than the physical address space. We also have to take\r
507 // into account the architectural limitations that result from UEFI's\r
508 // use of 4 KB pages.\r
509 //\r
db0f8c2f
AB
510 MaxAddressBits = MIN (ArmGetPhysicalAddressBits (), MAX_VA_BITS);\r
511 MaxAddress = LShiftU64 (1ULL, MaxAddressBits) - 1;\r
d7f03464 512\r
db0f8c2f
AB
513 T0SZ = 64 - MaxAddressBits;\r
514 RootTableEntryCount = GetRootTableEntryCount (T0SZ);\r
d7f03464
AB
515\r
516 //\r
517 // Set TCR that allows us to retrieve T0SZ in the subsequent functions\r
518 //\r
519 // Ideally we will be running at EL2, but should support EL1 as well.\r
520 // UEFI should not run at EL3.\r
521 if (ArmReadCurrentEL () == AARCH64_EL2) {\r
522 //Note: Bits 23 and 31 are reserved(RES1) bits in TCR_EL2\r
523 TCR = T0SZ | (1UL << 31) | (1UL << 23) | TCR_TG0_4KB;\r
524\r
525 // Set the Physical Address Size using MaxAddress\r
526 if (MaxAddress < SIZE_4GB) {\r
527 TCR |= TCR_PS_4GB;\r
528 } else if (MaxAddress < SIZE_64GB) {\r
529 TCR |= TCR_PS_64GB;\r
530 } else if (MaxAddress < SIZE_1TB) {\r
531 TCR |= TCR_PS_1TB;\r
532 } else if (MaxAddress < SIZE_4TB) {\r
533 TCR |= TCR_PS_4TB;\r
534 } else if (MaxAddress < SIZE_16TB) {\r
535 TCR |= TCR_PS_16TB;\r
536 } else if (MaxAddress < SIZE_256TB) {\r
537 TCR |= TCR_PS_256TB;\r
538 } else {\r
4249278a
AB
539 DEBUG ((DEBUG_ERROR,\r
540 "ArmConfigureMmu: The MaxAddress 0x%lX is not supported by this MMU configuration.\n",\r
541 MaxAddress));\r
d7f03464 542 ASSERT (0); // Bigger than 48-bit memory space are not supported\r
f49ea03d 543 return EFI_UNSUPPORTED;\r
d7f03464
AB
544 }\r
545 } else if (ArmReadCurrentEL () == AARCH64_EL1) {\r
546 // Due to Cortex-A57 erratum #822227 we must set TG1[1] == 1, regardless of EPD1.\r
547 TCR = T0SZ | TCR_TG0_4KB | TCR_TG1_4KB | TCR_EPD1;\r
548\r
549 // Set the Physical Address Size using MaxAddress\r
550 if (MaxAddress < SIZE_4GB) {\r
551 TCR |= TCR_IPS_4GB;\r
552 } else if (MaxAddress < SIZE_64GB) {\r
553 TCR |= TCR_IPS_64GB;\r
554 } else if (MaxAddress < SIZE_1TB) {\r
555 TCR |= TCR_IPS_1TB;\r
556 } else if (MaxAddress < SIZE_4TB) {\r
557 TCR |= TCR_IPS_4TB;\r
558 } else if (MaxAddress < SIZE_16TB) {\r
559 TCR |= TCR_IPS_16TB;\r
560 } else if (MaxAddress < SIZE_256TB) {\r
561 TCR |= TCR_IPS_256TB;\r
562 } else {\r
4249278a
AB
563 DEBUG ((DEBUG_ERROR,\r
564 "ArmConfigureMmu: The MaxAddress 0x%lX is not supported by this MMU configuration.\n",\r
565 MaxAddress));\r
d7f03464 566 ASSERT (0); // Bigger than 48-bit memory space are not supported\r
f49ea03d 567 return EFI_UNSUPPORTED;\r
d7f03464
AB
568 }\r
569 } else {\r
570 ASSERT (0); // UEFI is only expected to run at EL2 and EL1, not EL3.\r
f49ea03d 571 return EFI_UNSUPPORTED;\r
d7f03464
AB
572 }\r
573\r
35718840
AB
574 //\r
575 // Translation table walks are always cache coherent on ARMv8-A, so cache\r
576 // maintenance on page tables is never needed. Since there is a risk of\r
577 // loss of coherency when using mismatched attributes, and given that memory\r
578 // is mapped cacheable except for extraordinary cases (such as non-coherent\r
579 // DMA), have the page table walker perform cached accesses as well, and\r
580 // assert below that that matches the attributes we use for CPU accesses to\r
581 // the region.\r
582 //\r
583 TCR |= TCR_SH_INNER_SHAREABLE |\r
584 TCR_RGN_OUTER_WRITE_BACK_ALLOC |\r
585 TCR_RGN_INNER_WRITE_BACK_ALLOC;\r
586\r
d7f03464
AB
587 // Set TCR\r
588 ArmSetTCR (TCR);\r
589\r
aa961dea
AB
590 // Allocate pages for translation table\r
591 TranslationTable = AllocatePages (1);\r
d7f03464 592 if (TranslationTable == NULL) {\r
f49ea03d 593 return EFI_OUT_OF_RESOURCES;\r
d7f03464 594 }\r
4249278a
AB
595 //\r
596 // We set TTBR0 just after allocating the table to retrieve its location from\r
597 // the subsequent functions without needing to pass this value across the\r
598 // functions. The MMU is only enabled after the translation tables are\r
599 // populated.\r
600 //\r
d7f03464
AB
601 ArmSetTTBR0 (TranslationTable);\r
602\r
603 if (TranslationTableBase != NULL) {\r
604 *TranslationTableBase = TranslationTable;\r
605 }\r
606\r
607 if (TranslationTableSize != NULL) {\r
4249278a 608 *TranslationTableSize = RootTableEntryCount * sizeof (UINT64);\r
d7f03464
AB
609 }\r
610\r
748fea62
AB
611 //\r
612 // Make sure we are not inadvertently hitting in the caches\r
613 // when populating the page tables.\r
614 //\r
615 InvalidateDataCacheRange (TranslationTable,\r
4249278a
AB
616 RootTableEntryCount * sizeof (UINT64));\r
617 ZeroMem (TranslationTable, RootTableEntryCount * sizeof (UINT64));\r
d7f03464 618\r
d7f03464 619 while (MemoryTable->Length != 0) {\r
d7f03464 620 Status = FillTranslationTable (TranslationTable, MemoryTable);\r
f49ea03d 621 if (EFI_ERROR (Status)) {\r
4249278a 622 goto FreeTranslationTable;\r
d7f03464
AB
623 }\r
624 MemoryTable++;\r
625 }\r
626\r
4249278a
AB
627 //\r
628 // EFI_MEMORY_UC ==> MAIR_ATTR_DEVICE_MEMORY\r
629 // EFI_MEMORY_WC ==> MAIR_ATTR_NORMAL_MEMORY_NON_CACHEABLE\r
630 // EFI_MEMORY_WT ==> MAIR_ATTR_NORMAL_MEMORY_WRITE_THROUGH\r
631 // EFI_MEMORY_WB ==> MAIR_ATTR_NORMAL_MEMORY_WRITE_BACK\r
632 //\r
633 ArmSetMAIR (\r
634 MAIR_ATTR (TT_ATTR_INDX_DEVICE_MEMORY, MAIR_ATTR_DEVICE_MEMORY) |\r
635 MAIR_ATTR (TT_ATTR_INDX_MEMORY_NON_CACHEABLE, MAIR_ATTR_NORMAL_MEMORY_NON_CACHEABLE) |\r
636 MAIR_ATTR (TT_ATTR_INDX_MEMORY_WRITE_THROUGH, MAIR_ATTR_NORMAL_MEMORY_WRITE_THROUGH) |\r
637 MAIR_ATTR (TT_ATTR_INDX_MEMORY_WRITE_BACK, MAIR_ATTR_NORMAL_MEMORY_WRITE_BACK)\r
638 );\r
d7f03464
AB
639\r
640 ArmDisableAlignmentCheck ();\r
526f160f 641 ArmEnableStackAlignmentCheck ();\r
d7f03464
AB
642 ArmEnableInstructionCache ();\r
643 ArmEnableDataCache ();\r
644\r
645 ArmEnableMmu ();\r
f49ea03d 646 return EFI_SUCCESS;\r
d7f03464 647\r
4249278a 648FreeTranslationTable:\r
aa961dea 649 FreePages (TranslationTable, 1);\r
d7f03464
AB
650 return Status;\r
651}\r
652\r
653RETURN_STATUS\r
654EFIAPI\r
655ArmMmuBaseLibConstructor (\r
656 VOID\r
657 )\r
658{\r
659 extern UINT32 ArmReplaceLiveTranslationEntrySize;\r
660\r
661 //\r
662 // The ArmReplaceLiveTranslationEntry () helper function may be invoked\r
663 // with the MMU off so we have to ensure that it gets cleaned to the PoC\r
664 //\r
665 WriteBackDataCacheRange (ArmReplaceLiveTranslationEntry,\r
666 ArmReplaceLiveTranslationEntrySize);\r
667\r
668 return RETURN_SUCCESS;\r
669}\r