]> git.proxmox.com Git - mirror_edk2.git/blame - ArmPkg/Library/ArmMmuLib/AArch64/ArmMmuLibCore.c
ArmPkg/ArmMmuLib AARCH64: use helpers to determine table entry types
[mirror_edk2.git] / ArmPkg / Library / ArmMmuLib / AArch64 / ArmMmuLibCore.c
CommitLineData
d7f03464
AB
1/** @file\r
2* File managing the MMU for ARMv8 architecture\r
3*\r
191fa79b 4* Copyright (c) 2011-2020, ARM Limited. All rights reserved.\r
d7f03464 5* Copyright (c) 2016, Linaro Limited. All rights reserved.\r
b7a09b71 6* Copyright (c) 2017, Intel Corporation. All rights reserved.<BR>\r
d7f03464 7*\r
4059386c 8* SPDX-License-Identifier: BSD-2-Clause-Patent\r
d7f03464
AB
9*\r
10**/\r
11\r
12#include <Uefi.h>\r
13#include <Chipset/AArch64.h>\r
14#include <Library/BaseMemoryLib.h>\r
15#include <Library/CacheMaintenanceLib.h>\r
16#include <Library/MemoryAllocationLib.h>\r
17#include <Library/ArmLib.h>\r
18#include <Library/ArmMmuLib.h>\r
19#include <Library/BaseLib.h>\r
20#include <Library/DebugLib.h>\r
21\r
22// We use this index definition to define an invalid block entry\r
23#define TT_ATTR_INDX_INVALID ((UINT32)~0)\r
24\r
25STATIC\r
26UINT64\r
27ArmMemoryAttributeToPageAttribute (\r
28 IN ARM_MEMORY_REGION_ATTRIBUTES Attributes\r
29 )\r
30{\r
31 switch (Attributes) {\r
829633e3
PL
32 case ARM_MEMORY_REGION_ATTRIBUTE_WRITE_BACK_NONSHAREABLE:\r
33 case ARM_MEMORY_REGION_ATTRIBUTE_NONSECURE_WRITE_BACK_NONSHAREABLE:\r
34 return TT_ATTR_INDX_MEMORY_WRITE_BACK;\r
35\r
d7f03464
AB
36 case ARM_MEMORY_REGION_ATTRIBUTE_WRITE_BACK:\r
37 case ARM_MEMORY_REGION_ATTRIBUTE_NONSECURE_WRITE_BACK:\r
38 return TT_ATTR_INDX_MEMORY_WRITE_BACK | TT_SH_INNER_SHAREABLE;\r
39\r
40 case ARM_MEMORY_REGION_ATTRIBUTE_WRITE_THROUGH:\r
41 case ARM_MEMORY_REGION_ATTRIBUTE_NONSECURE_WRITE_THROUGH:\r
42 return TT_ATTR_INDX_MEMORY_WRITE_THROUGH | TT_SH_INNER_SHAREABLE;\r
43\r
44 // Uncached and device mappings are treated as outer shareable by default,\r
45 case ARM_MEMORY_REGION_ATTRIBUTE_UNCACHED_UNBUFFERED:\r
46 case ARM_MEMORY_REGION_ATTRIBUTE_NONSECURE_UNCACHED_UNBUFFERED:\r
47 return TT_ATTR_INDX_MEMORY_NON_CACHEABLE;\r
48\r
49 default:\r
4249278a 50 ASSERT (0);\r
d7f03464
AB
51 case ARM_MEMORY_REGION_ATTRIBUTE_DEVICE:\r
52 case ARM_MEMORY_REGION_ATTRIBUTE_NONSECURE_DEVICE:\r
53 if (ArmReadCurrentEL () == AARCH64_EL2)\r
54 return TT_ATTR_INDX_DEVICE_MEMORY | TT_XN_MASK;\r
55 else\r
56 return TT_ATTR_INDX_DEVICE_MEMORY | TT_UXN_MASK | TT_PXN_MASK;\r
57 }\r
58}\r
59\r
60UINT64\r
61PageAttributeToGcdAttribute (\r
62 IN UINT64 PageAttributes\r
63 )\r
64{\r
65 UINT64 GcdAttributes;\r
66\r
67 switch (PageAttributes & TT_ATTR_INDX_MASK) {\r
68 case TT_ATTR_INDX_DEVICE_MEMORY:\r
69 GcdAttributes = EFI_MEMORY_UC;\r
70 break;\r
71 case TT_ATTR_INDX_MEMORY_NON_CACHEABLE:\r
72 GcdAttributes = EFI_MEMORY_WC;\r
73 break;\r
74 case TT_ATTR_INDX_MEMORY_WRITE_THROUGH:\r
75 GcdAttributes = EFI_MEMORY_WT;\r
76 break;\r
77 case TT_ATTR_INDX_MEMORY_WRITE_BACK:\r
78 GcdAttributes = EFI_MEMORY_WB;\r
79 break;\r
80 default:\r
4249278a
AB
81 DEBUG ((DEBUG_ERROR,\r
82 "PageAttributeToGcdAttribute: PageAttributes:0x%lX not supported.\n",\r
83 PageAttributes));\r
d7f03464
AB
84 ASSERT (0);\r
85 // The Global Coherency Domain (GCD) value is defined as a bit set.\r
86 // Returning 0 means no attribute has been set.\r
87 GcdAttributes = 0;\r
88 }\r
89\r
90 // Determine protection attributes\r
4249278a
AB
91 if (((PageAttributes & TT_AP_MASK) == TT_AP_NO_RO) ||\r
92 ((PageAttributes & TT_AP_MASK) == TT_AP_RO_RO)) {\r
d7f03464 93 // Read only cases map to write-protect\r
b7a09b71 94 GcdAttributes |= EFI_MEMORY_RO;\r
d7f03464
AB
95 }\r
96\r
97 // Process eXecute Never attribute\r
4249278a 98 if ((PageAttributes & (TT_PXN_MASK | TT_UXN_MASK)) != 0) {\r
d7f03464
AB
99 GcdAttributes |= EFI_MEMORY_XP;\r
100 }\r
101\r
102 return GcdAttributes;\r
103}\r
104\r
e93cb72e
AB
105#define MIN_T0SZ 16\r
106#define BITS_PER_LEVEL 9\r
d7f03464
AB
107\r
108VOID\r
109GetRootTranslationTableInfo (\r
110 IN UINTN T0SZ,\r
111 OUT UINTN *TableLevel,\r
112 OUT UINTN *TableEntryCount\r
113 )\r
114{\r
d7f03464
AB
115 // Get the level of the root table\r
116 if (TableLevel) {\r
e93cb72e 117 *TableLevel = (T0SZ - MIN_T0SZ) / BITS_PER_LEVEL;\r
d7f03464
AB
118 }\r
119\r
d7f03464 120 if (TableEntryCount) {\r
e93cb72e 121 *TableEntryCount = 1UL << (BITS_PER_LEVEL - (T0SZ - MIN_T0SZ) % BITS_PER_LEVEL);\r
d7f03464
AB
122 }\r
123}\r
124\r
125STATIC\r
126VOID\r
191fa79b 127ReplaceTableEntry (\r
d7f03464 128 IN UINT64 *Entry,\r
d5788777 129 IN UINT64 Value,\r
191fa79b
AB
130 IN UINT64 RegionStart,\r
131 IN BOOLEAN IsLiveBlockMapping\r
d7f03464
AB
132 )\r
133{\r
191fa79b 134 if (!ArmMmuEnabled () || !IsLiveBlockMapping) {\r
d7f03464 135 *Entry = Value;\r
191fa79b 136 ArmUpdateTranslationTableEntry (Entry, (VOID *)(UINTN)RegionStart);\r
d7f03464 137 } else {\r
d5788777 138 ArmReplaceLiveTranslationEntry (Entry, Value, RegionStart);\r
d7f03464
AB
139 }\r
140}\r
141\r
142STATIC\r
143VOID\r
191fa79b 144FreePageTablesRecursive (\r
d390920e
AB
145 IN UINT64 *TranslationTable,\r
146 IN UINTN Level\r
d7f03464
AB
147 )\r
148{\r
191fa79b 149 UINTN Index;\r
d7f03464 150\r
d390920e
AB
151 ASSERT (Level <= 3);\r
152\r
153 if (Level < 3) {\r
154 for (Index = 0; Index < TT_ENTRY_COUNT; Index++) {\r
155 if ((TranslationTable[Index] & TT_TYPE_MASK) == TT_TYPE_TABLE_ENTRY) {\r
156 FreePageTablesRecursive ((VOID *)(UINTN)(TranslationTable[Index] &\r
157 TT_ADDRESS_MASK_BLOCK_ENTRY),\r
158 Level + 1);\r
159 }\r
d7f03464
AB
160 }\r
161 }\r
191fa79b 162 FreePages (TranslationTable, 1);\r
d7f03464
AB
163}\r
164\r
5fc89953
AB
165STATIC\r
166BOOLEAN\r
167IsBlockEntry (\r
168 IN UINT64 Entry,\r
169 IN UINTN Level\r
170 )\r
171{\r
172 if (Level == 3) {\r
173 return (Entry & TT_TYPE_MASK) == TT_TYPE_BLOCK_ENTRY_LEVEL3;\r
174 }\r
175 return (Entry & TT_TYPE_MASK) == TT_TYPE_BLOCK_ENTRY;\r
176}\r
177\r
178STATIC\r
179BOOLEAN\r
180IsTableEntry (\r
181 IN UINT64 Entry,\r
182 IN UINTN Level\r
183 )\r
184{\r
185 if (Level == 3) {\r
186 //\r
187 // TT_TYPE_TABLE_ENTRY aliases TT_TYPE_BLOCK_ENTRY_LEVEL3\r
188 // so we need to take the level into account as well.\r
189 //\r
190 return FALSE;\r
191 }\r
192 return (Entry & TT_TYPE_MASK) == TT_TYPE_TABLE_ENTRY;\r
193}\r
194\r
d7f03464 195STATIC\r
191fa79b
AB
196EFI_STATUS\r
197UpdateRegionMappingRecursive (\r
198 IN UINT64 RegionStart,\r
199 IN UINT64 RegionEnd,\r
200 IN UINT64 AttributeSetMask,\r
201 IN UINT64 AttributeClearMask,\r
202 IN UINT64 *PageTable,\r
203 IN UINTN Level\r
d7f03464
AB
204 )\r
205{\r
191fa79b
AB
206 UINTN BlockShift;\r
207 UINT64 BlockMask;\r
208 UINT64 BlockEnd;\r
209 UINT64 *Entry;\r
210 UINT64 EntryValue;\r
211 VOID *TranslationTable;\r
212 EFI_STATUS Status;\r
d7f03464 213\r
191fa79b 214 ASSERT (((RegionStart | RegionEnd) & EFI_PAGE_MASK) == 0);\r
d7f03464 215\r
191fa79b
AB
216 BlockShift = (Level + 1) * BITS_PER_LEVEL + MIN_T0SZ;\r
217 BlockMask = MAX_UINT64 >> BlockShift;\r
d7f03464 218\r
191fa79b
AB
219 DEBUG ((DEBUG_VERBOSE, "%a(%d): %llx - %llx set %lx clr %lx\n", __FUNCTION__,\r
220 Level, RegionStart, RegionEnd, AttributeSetMask, AttributeClearMask));\r
d7f03464 221\r
191fa79b
AB
222 for (; RegionStart < RegionEnd; RegionStart = BlockEnd) {\r
223 BlockEnd = MIN (RegionEnd, (RegionStart | BlockMask) + 1);\r
224 Entry = &PageTable[(RegionStart >> (64 - BlockShift)) & (TT_ENTRY_COUNT - 1)];\r
d7f03464 225\r
191fa79b
AB
226 //\r
227 // If RegionStart or BlockEnd is not aligned to the block size at this\r
228 // level, we will have to create a table mapping in order to map less\r
229 // than a block, and recurse to create the block or page entries at\r
230 // the next level. No block mappings are allowed at all at level 0,\r
231 // so in that case, we have to recurse unconditionally.\r
232 //\r
233 if (Level == 0 || ((RegionStart | BlockEnd) & BlockMask) != 0) {\r
234 ASSERT (Level < 3);\r
d7f03464 235\r
5fc89953 236 if (!IsTableEntry (*Entry, Level)) {\r
191fa79b
AB
237 //\r
238 // No table entry exists yet, so we need to allocate a page table\r
239 // for the next level.\r
240 //\r
674e127e 241 TranslationTable = AllocatePages (1);\r
d7f03464 242 if (TranslationTable == NULL) {\r
191fa79b 243 return EFI_OUT_OF_RESOURCES;\r
d7f03464
AB
244 }\r
245\r
748fea62
AB
246 if (!ArmMmuEnabled ()) {\r
247 //\r
248 // Make sure we are not inadvertently hitting in the caches\r
249 // when populating the page tables.\r
250 //\r
251 InvalidateDataCacheRange (TranslationTable, EFI_PAGE_SIZE);\r
252 }\r
253\r
5fc89953 254 if (IsBlockEntry (*Entry, Level)) {\r
191fa79b
AB
255 //\r
256 // We are splitting an existing block entry, so we have to populate\r
257 // the new table with the attributes of the block entry it replaces.\r
258 //\r
259 Status = UpdateRegionMappingRecursive (RegionStart & ~BlockMask,\r
260 (RegionStart | BlockMask) + 1, *Entry & TT_ATTRIBUTES_MASK,\r
261 0, TranslationTable, Level + 1);\r
262 if (EFI_ERROR (Status)) {\r
263 //\r
264 // The range we passed to UpdateRegionMappingRecursive () is block\r
265 // aligned, so it is guaranteed that no further pages were allocated\r
266 // by it, and so we only have to free the page we allocated here.\r
267 //\r
268 FreePages (TranslationTable, 1);\r
269 return Status;\r
270 }\r
271 } else {\r
272 ZeroMem (TranslationTable, EFI_PAGE_SIZE);\r
d7f03464 273 }\r
191fa79b
AB
274 } else {\r
275 TranslationTable = (VOID *)(UINTN)(*Entry & TT_ADDRESS_MASK_BLOCK_ENTRY);\r
d7f03464 276 }\r
d7f03464 277\r
191fa79b
AB
278 //\r
279 // Recurse to the next level\r
280 //\r
281 Status = UpdateRegionMappingRecursive (RegionStart, BlockEnd,\r
282 AttributeSetMask, AttributeClearMask, TranslationTable,\r
283 Level + 1);\r
284 if (EFI_ERROR (Status)) {\r
5fc89953 285 if (!IsTableEntry (*Entry, Level)) {\r
191fa79b
AB
286 //\r
287 // We are creating a new table entry, so on failure, we can free all\r
288 // allocations we made recursively, given that the whole subhierarchy\r
289 // has not been wired into the live page tables yet. (This is not\r
290 // possible for existing table entries, since we cannot revert the\r
291 // modifications we made to the subhierarchy it represents.)\r
292 //\r
d390920e 293 FreePageTablesRecursive (TranslationTable, Level + 1);\r
d7f03464 294 }\r
191fa79b
AB
295 return Status;\r
296 }\r
d7f03464 297\r
5fc89953 298 if (!IsTableEntry (*Entry, Level)) {\r
191fa79b
AB
299 EntryValue = (UINTN)TranslationTable | TT_TYPE_TABLE_ENTRY;\r
300 ReplaceTableEntry (Entry, EntryValue, RegionStart,\r
5fc89953 301 IsBlockEntry (*Entry, Level));\r
d7f03464 302 }\r
191fa79b
AB
303 } else {\r
304 EntryValue = (*Entry & AttributeClearMask) | AttributeSetMask;\r
305 EntryValue |= RegionStart;\r
306 EntryValue |= (Level == 3) ? TT_TYPE_BLOCK_ENTRY_LEVEL3\r
307 : TT_TYPE_BLOCK_ENTRY;\r
308\r
309 ReplaceTableEntry (Entry, EntryValue, RegionStart, FALSE);\r
d7f03464
AB
310 }\r
311 }\r
191fa79b
AB
312 return EFI_SUCCESS;\r
313}\r
d7f03464 314\r
191fa79b
AB
315STATIC\r
316VOID\r
317LookupAddresstoRootTable (\r
318 IN UINT64 MaxAddress,\r
319 OUT UINTN *T0SZ,\r
320 OUT UINTN *TableEntryCount\r
321 )\r
322{\r
323 UINTN TopBit;\r
d7f03464 324\r
191fa79b
AB
325 // Check the parameters are not NULL\r
326 ASSERT ((T0SZ != NULL) && (TableEntryCount != NULL));\r
d7f03464 327\r
191fa79b
AB
328 // Look for the highest bit set in MaxAddress\r
329 for (TopBit = 63; TopBit != 0; TopBit--) {\r
330 if ((1ULL << TopBit) & MaxAddress) {\r
331 // MaxAddress top bit is found\r
332 TopBit = TopBit + 1;\r
333 break;\r
334 }\r
335 }\r
336 ASSERT (TopBit != 0);\r
d7f03464 337\r
191fa79b
AB
338 // Calculate T0SZ from the top bit of the MaxAddress\r
339 *T0SZ = 64 - TopBit;\r
340\r
341 // Get the Table info from T0SZ\r
342 GetRootTranslationTableInfo (*T0SZ, NULL, TableEntryCount);\r
d7f03464
AB
343}\r
344\r
345STATIC\r
f49ea03d 346EFI_STATUS\r
d7f03464 347UpdateRegionMapping (\r
d7f03464
AB
348 IN UINT64 RegionStart,\r
349 IN UINT64 RegionLength,\r
191fa79b
AB
350 IN UINT64 AttributeSetMask,\r
351 IN UINT64 AttributeClearMask\r
d7f03464
AB
352 )\r
353{\r
191fa79b
AB
354 UINTN RootTableLevel;\r
355 UINTN T0SZ;\r
356\r
357 if (((RegionStart | RegionLength) & EFI_PAGE_MASK)) {\r
f49ea03d 358 return EFI_INVALID_PARAMETER;\r
d7f03464
AB
359 }\r
360\r
191fa79b
AB
361 T0SZ = ArmGetTCR () & TCR_T0SZ_MASK;\r
362 GetRootTranslationTableInfo (T0SZ, &RootTableLevel, NULL);\r
d7f03464 363\r
191fa79b
AB
364 return UpdateRegionMappingRecursive (RegionStart, RegionStart + RegionLength,\r
365 AttributeSetMask, AttributeClearMask, ArmGetTTBR0BaseAddress (),\r
366 RootTableLevel);\r
d7f03464
AB
367}\r
368\r
369STATIC\r
f49ea03d 370EFI_STATUS\r
d7f03464
AB
371FillTranslationTable (\r
372 IN UINT64 *RootTable,\r
373 IN ARM_MEMORY_REGION_DESCRIPTOR *MemoryRegion\r
374 )\r
375{\r
376 return UpdateRegionMapping (\r
d7f03464
AB
377 MemoryRegion->VirtualBase,\r
378 MemoryRegion->Length,\r
379 ArmMemoryAttributeToPageAttribute (MemoryRegion->Attributes) | TT_AF,\r
380 0\r
381 );\r
382}\r
383\r
e0307a7d
AB
384STATIC\r
385UINT64\r
386GcdAttributeToPageAttribute (\r
387 IN UINT64 GcdAttributes\r
388 )\r
389{\r
390 UINT64 PageAttributes;\r
391\r
392 switch (GcdAttributes & EFI_MEMORY_CACHETYPE_MASK) {\r
393 case EFI_MEMORY_UC:\r
394 PageAttributes = TT_ATTR_INDX_DEVICE_MEMORY;\r
395 break;\r
396 case EFI_MEMORY_WC:\r
397 PageAttributes = TT_ATTR_INDX_MEMORY_NON_CACHEABLE;\r
398 break;\r
399 case EFI_MEMORY_WT:\r
400 PageAttributes = TT_ATTR_INDX_MEMORY_WRITE_THROUGH | TT_SH_INNER_SHAREABLE;\r
401 break;\r
402 case EFI_MEMORY_WB:\r
403 PageAttributes = TT_ATTR_INDX_MEMORY_WRITE_BACK | TT_SH_INNER_SHAREABLE;\r
404 break;\r
405 default:\r
406 PageAttributes = TT_ATTR_INDX_MASK;\r
407 break;\r
408 }\r
409\r
410 if ((GcdAttributes & EFI_MEMORY_XP) != 0 ||\r
411 (GcdAttributes & EFI_MEMORY_CACHETYPE_MASK) == EFI_MEMORY_UC) {\r
412 if (ArmReadCurrentEL () == AARCH64_EL2) {\r
413 PageAttributes |= TT_XN_MASK;\r
414 } else {\r
415 PageAttributes |= TT_UXN_MASK | TT_PXN_MASK;\r
416 }\r
417 }\r
418\r
419 if ((GcdAttributes & EFI_MEMORY_RO) != 0) {\r
420 PageAttributes |= TT_AP_RO_RO;\r
421 }\r
422\r
423 return PageAttributes | TT_AF;\r
424}\r
425\r
f49ea03d 426EFI_STATUS\r
521f3ced 427ArmSetMemoryAttributes (\r
d7f03464
AB
428 IN EFI_PHYSICAL_ADDRESS BaseAddress,\r
429 IN UINT64 Length,\r
d9c0d991 430 IN UINT64 Attributes\r
d7f03464
AB
431 )\r
432{\r
e0307a7d
AB
433 UINT64 PageAttributes;\r
434 UINT64 PageAttributeMask;\r
435\r
436 PageAttributes = GcdAttributeToPageAttribute (Attributes);\r
437 PageAttributeMask = 0;\r
438\r
439 if ((Attributes & EFI_MEMORY_CACHETYPE_MASK) == 0) {\r
440 //\r
441 // No memory type was set in Attributes, so we are going to update the\r
442 // permissions only.\r
443 //\r
444 PageAttributes &= TT_AP_MASK | TT_UXN_MASK | TT_PXN_MASK;\r
445 PageAttributeMask = ~(TT_ADDRESS_MASK_BLOCK_ENTRY | TT_AP_MASK |\r
446 TT_PXN_MASK | TT_XN_MASK);\r
447 }\r
d7f03464 448\r
191fa79b
AB
449 return UpdateRegionMapping (BaseAddress, Length, PageAttributes,\r
450 PageAttributeMask);\r
d7f03464
AB
451}\r
452\r
453STATIC\r
f49ea03d 454EFI_STATUS\r
d7f03464
AB
455SetMemoryRegionAttribute (\r
456 IN EFI_PHYSICAL_ADDRESS BaseAddress,\r
457 IN UINT64 Length,\r
458 IN UINT64 Attributes,\r
459 IN UINT64 BlockEntryMask\r
460 )\r
461{\r
191fa79b 462 return UpdateRegionMapping (BaseAddress, Length, Attributes, BlockEntryMask);\r
d7f03464
AB
463}\r
464\r
f49ea03d 465EFI_STATUS\r
d7f03464
AB
466ArmSetMemoryRegionNoExec (\r
467 IN EFI_PHYSICAL_ADDRESS BaseAddress,\r
468 IN UINT64 Length\r
469 )\r
470{\r
471 UINT64 Val;\r
472\r
473 if (ArmReadCurrentEL () == AARCH64_EL1) {\r
474 Val = TT_PXN_MASK | TT_UXN_MASK;\r
475 } else {\r
476 Val = TT_XN_MASK;\r
477 }\r
478\r
479 return SetMemoryRegionAttribute (\r
480 BaseAddress,\r
481 Length,\r
482 Val,\r
483 ~TT_ADDRESS_MASK_BLOCK_ENTRY);\r
484}\r
485\r
f49ea03d 486EFI_STATUS\r
d7f03464
AB
487ArmClearMemoryRegionNoExec (\r
488 IN EFI_PHYSICAL_ADDRESS BaseAddress,\r
489 IN UINT64 Length\r
490 )\r
491{\r
492 UINT64 Mask;\r
493\r
494 // XN maps to UXN in the EL1&0 translation regime\r
495 Mask = ~(TT_ADDRESS_MASK_BLOCK_ENTRY | TT_PXN_MASK | TT_XN_MASK);\r
496\r
497 return SetMemoryRegionAttribute (\r
498 BaseAddress,\r
499 Length,\r
500 0,\r
501 Mask);\r
502}\r
503\r
f49ea03d 504EFI_STATUS\r
d7f03464
AB
505ArmSetMemoryRegionReadOnly (\r
506 IN EFI_PHYSICAL_ADDRESS BaseAddress,\r
507 IN UINT64 Length\r
508 )\r
509{\r
510 return SetMemoryRegionAttribute (\r
511 BaseAddress,\r
512 Length,\r
513 TT_AP_RO_RO,\r
514 ~TT_ADDRESS_MASK_BLOCK_ENTRY);\r
515}\r
516\r
f49ea03d 517EFI_STATUS\r
d7f03464
AB
518ArmClearMemoryRegionReadOnly (\r
519 IN EFI_PHYSICAL_ADDRESS BaseAddress,\r
520 IN UINT64 Length\r
521 )\r
522{\r
523 return SetMemoryRegionAttribute (\r
524 BaseAddress,\r
525 Length,\r
526 TT_AP_RW_RW,\r
527 ~(TT_ADDRESS_MASK_BLOCK_ENTRY | TT_AP_MASK));\r
528}\r
529\r
f49ea03d 530EFI_STATUS\r
d7f03464
AB
531EFIAPI\r
532ArmConfigureMmu (\r
533 IN ARM_MEMORY_REGION_DESCRIPTOR *MemoryTable,\r
534 OUT VOID **TranslationTableBase OPTIONAL,\r
535 OUT UINTN *TranslationTableSize OPTIONAL\r
536 )\r
537{\r
538 VOID* TranslationTable;\r
d7f03464 539 UINT64 MaxAddress;\r
d7f03464
AB
540 UINTN T0SZ;\r
541 UINTN RootTableEntryCount;\r
542 UINT64 TCR;\r
f49ea03d 543 EFI_STATUS Status;\r
d7f03464 544\r
4249278a 545 if (MemoryTable == NULL) {\r
d7f03464 546 ASSERT (MemoryTable != NULL);\r
f49ea03d 547 return EFI_INVALID_PARAMETER;\r
d7f03464
AB
548 }\r
549\r
e36b243c
AB
550 //\r
551 // Limit the virtual address space to what we can actually use: UEFI\r
552 // mandates a 1:1 mapping, so no point in making the virtual address\r
553 // space larger than the physical address space. We also have to take\r
554 // into account the architectural limitations that result from UEFI's\r
555 // use of 4 KB pages.\r
556 //\r
557 MaxAddress = MIN (LShiftU64 (1ULL, ArmGetPhysicalAddressBits ()) - 1,\r
1c36f028 558 MAX_ALLOC_ADDRESS);\r
d7f03464
AB
559\r
560 // Lookup the Table Level to get the information\r
561 LookupAddresstoRootTable (MaxAddress, &T0SZ, &RootTableEntryCount);\r
562\r
563 //\r
564 // Set TCR that allows us to retrieve T0SZ in the subsequent functions\r
565 //\r
566 // Ideally we will be running at EL2, but should support EL1 as well.\r
567 // UEFI should not run at EL3.\r
568 if (ArmReadCurrentEL () == AARCH64_EL2) {\r
569 //Note: Bits 23 and 31 are reserved(RES1) bits in TCR_EL2\r
570 TCR = T0SZ | (1UL << 31) | (1UL << 23) | TCR_TG0_4KB;\r
571\r
572 // Set the Physical Address Size using MaxAddress\r
573 if (MaxAddress < SIZE_4GB) {\r
574 TCR |= TCR_PS_4GB;\r
575 } else if (MaxAddress < SIZE_64GB) {\r
576 TCR |= TCR_PS_64GB;\r
577 } else if (MaxAddress < SIZE_1TB) {\r
578 TCR |= TCR_PS_1TB;\r
579 } else if (MaxAddress < SIZE_4TB) {\r
580 TCR |= TCR_PS_4TB;\r
581 } else if (MaxAddress < SIZE_16TB) {\r
582 TCR |= TCR_PS_16TB;\r
583 } else if (MaxAddress < SIZE_256TB) {\r
584 TCR |= TCR_PS_256TB;\r
585 } else {\r
4249278a
AB
586 DEBUG ((DEBUG_ERROR,\r
587 "ArmConfigureMmu: The MaxAddress 0x%lX is not supported by this MMU configuration.\n",\r
588 MaxAddress));\r
d7f03464 589 ASSERT (0); // Bigger than 48-bit memory space are not supported\r
f49ea03d 590 return EFI_UNSUPPORTED;\r
d7f03464
AB
591 }\r
592 } else if (ArmReadCurrentEL () == AARCH64_EL1) {\r
593 // Due to Cortex-A57 erratum #822227 we must set TG1[1] == 1, regardless of EPD1.\r
594 TCR = T0SZ | TCR_TG0_4KB | TCR_TG1_4KB | TCR_EPD1;\r
595\r
596 // Set the Physical Address Size using MaxAddress\r
597 if (MaxAddress < SIZE_4GB) {\r
598 TCR |= TCR_IPS_4GB;\r
599 } else if (MaxAddress < SIZE_64GB) {\r
600 TCR |= TCR_IPS_64GB;\r
601 } else if (MaxAddress < SIZE_1TB) {\r
602 TCR |= TCR_IPS_1TB;\r
603 } else if (MaxAddress < SIZE_4TB) {\r
604 TCR |= TCR_IPS_4TB;\r
605 } else if (MaxAddress < SIZE_16TB) {\r
606 TCR |= TCR_IPS_16TB;\r
607 } else if (MaxAddress < SIZE_256TB) {\r
608 TCR |= TCR_IPS_256TB;\r
609 } else {\r
4249278a
AB
610 DEBUG ((DEBUG_ERROR,\r
611 "ArmConfigureMmu: The MaxAddress 0x%lX is not supported by this MMU configuration.\n",\r
612 MaxAddress));\r
d7f03464 613 ASSERT (0); // Bigger than 48-bit memory space are not supported\r
f49ea03d 614 return EFI_UNSUPPORTED;\r
d7f03464
AB
615 }\r
616 } else {\r
617 ASSERT (0); // UEFI is only expected to run at EL2 and EL1, not EL3.\r
f49ea03d 618 return EFI_UNSUPPORTED;\r
d7f03464
AB
619 }\r
620\r
35718840
AB
621 //\r
622 // Translation table walks are always cache coherent on ARMv8-A, so cache\r
623 // maintenance on page tables is never needed. Since there is a risk of\r
624 // loss of coherency when using mismatched attributes, and given that memory\r
625 // is mapped cacheable except for extraordinary cases (such as non-coherent\r
626 // DMA), have the page table walker perform cached accesses as well, and\r
627 // assert below that that matches the attributes we use for CPU accesses to\r
628 // the region.\r
629 //\r
630 TCR |= TCR_SH_INNER_SHAREABLE |\r
631 TCR_RGN_OUTER_WRITE_BACK_ALLOC |\r
632 TCR_RGN_INNER_WRITE_BACK_ALLOC;\r
633\r
d7f03464
AB
634 // Set TCR\r
635 ArmSetTCR (TCR);\r
636\r
aa961dea
AB
637 // Allocate pages for translation table\r
638 TranslationTable = AllocatePages (1);\r
d7f03464 639 if (TranslationTable == NULL) {\r
f49ea03d 640 return EFI_OUT_OF_RESOURCES;\r
d7f03464 641 }\r
4249278a
AB
642 //\r
643 // We set TTBR0 just after allocating the table to retrieve its location from\r
644 // the subsequent functions without needing to pass this value across the\r
645 // functions. The MMU is only enabled after the translation tables are\r
646 // populated.\r
647 //\r
d7f03464
AB
648 ArmSetTTBR0 (TranslationTable);\r
649\r
650 if (TranslationTableBase != NULL) {\r
651 *TranslationTableBase = TranslationTable;\r
652 }\r
653\r
654 if (TranslationTableSize != NULL) {\r
4249278a 655 *TranslationTableSize = RootTableEntryCount * sizeof (UINT64);\r
d7f03464
AB
656 }\r
657\r
748fea62
AB
658 //\r
659 // Make sure we are not inadvertently hitting in the caches\r
660 // when populating the page tables.\r
661 //\r
662 InvalidateDataCacheRange (TranslationTable,\r
4249278a
AB
663 RootTableEntryCount * sizeof (UINT64));\r
664 ZeroMem (TranslationTable, RootTableEntryCount * sizeof (UINT64));\r
d7f03464 665\r
d7f03464 666 while (MemoryTable->Length != 0) {\r
d7f03464 667 Status = FillTranslationTable (TranslationTable, MemoryTable);\r
f49ea03d 668 if (EFI_ERROR (Status)) {\r
4249278a 669 goto FreeTranslationTable;\r
d7f03464
AB
670 }\r
671 MemoryTable++;\r
672 }\r
673\r
4249278a
AB
674 //\r
675 // EFI_MEMORY_UC ==> MAIR_ATTR_DEVICE_MEMORY\r
676 // EFI_MEMORY_WC ==> MAIR_ATTR_NORMAL_MEMORY_NON_CACHEABLE\r
677 // EFI_MEMORY_WT ==> MAIR_ATTR_NORMAL_MEMORY_WRITE_THROUGH\r
678 // EFI_MEMORY_WB ==> MAIR_ATTR_NORMAL_MEMORY_WRITE_BACK\r
679 //\r
680 ArmSetMAIR (\r
681 MAIR_ATTR (TT_ATTR_INDX_DEVICE_MEMORY, MAIR_ATTR_DEVICE_MEMORY) |\r
682 MAIR_ATTR (TT_ATTR_INDX_MEMORY_NON_CACHEABLE, MAIR_ATTR_NORMAL_MEMORY_NON_CACHEABLE) |\r
683 MAIR_ATTR (TT_ATTR_INDX_MEMORY_WRITE_THROUGH, MAIR_ATTR_NORMAL_MEMORY_WRITE_THROUGH) |\r
684 MAIR_ATTR (TT_ATTR_INDX_MEMORY_WRITE_BACK, MAIR_ATTR_NORMAL_MEMORY_WRITE_BACK)\r
685 );\r
d7f03464
AB
686\r
687 ArmDisableAlignmentCheck ();\r
526f160f 688 ArmEnableStackAlignmentCheck ();\r
d7f03464
AB
689 ArmEnableInstructionCache ();\r
690 ArmEnableDataCache ();\r
691\r
692 ArmEnableMmu ();\r
f49ea03d 693 return EFI_SUCCESS;\r
d7f03464 694\r
4249278a 695FreeTranslationTable:\r
aa961dea 696 FreePages (TranslationTable, 1);\r
d7f03464
AB
697 return Status;\r
698}\r
699\r
700RETURN_STATUS\r
701EFIAPI\r
702ArmMmuBaseLibConstructor (\r
703 VOID\r
704 )\r
705{\r
706 extern UINT32 ArmReplaceLiveTranslationEntrySize;\r
707\r
708 //\r
709 // The ArmReplaceLiveTranslationEntry () helper function may be invoked\r
710 // with the MMU off so we have to ensure that it gets cleaned to the PoC\r
711 //\r
712 WriteBackDataCacheRange (ArmReplaceLiveTranslationEntry,\r
713 ArmReplaceLiveTranslationEntrySize);\r
714\r
715 return RETURN_SUCCESS;\r
716}\r