]> git.proxmox.com Git - mirror_edk2.git/blame - ArmPkg/Library/ArmMmuLib/AArch64/ArmMmuLibCore.c
ArmPkg/ArmMmuLib AARCH64: preserve attributes when replacing a table entry
[mirror_edk2.git] / ArmPkg / Library / ArmMmuLib / AArch64 / ArmMmuLibCore.c
CommitLineData
d7f03464
AB
1/** @file\r
2* File managing the MMU for ARMv8 architecture\r
3*\r
191fa79b 4* Copyright (c) 2011-2020, ARM Limited. All rights reserved.\r
d7f03464 5* Copyright (c) 2016, Linaro Limited. All rights reserved.\r
b7a09b71 6* Copyright (c) 2017, Intel Corporation. All rights reserved.<BR>\r
d7f03464 7*\r
4059386c 8* SPDX-License-Identifier: BSD-2-Clause-Patent\r
d7f03464
AB
9*\r
10**/\r
11\r
12#include <Uefi.h>\r
13#include <Chipset/AArch64.h>\r
14#include <Library/BaseMemoryLib.h>\r
15#include <Library/CacheMaintenanceLib.h>\r
16#include <Library/MemoryAllocationLib.h>\r
17#include <Library/ArmLib.h>\r
18#include <Library/ArmMmuLib.h>\r
19#include <Library/BaseLib.h>\r
20#include <Library/DebugLib.h>\r
21\r
22// We use this index definition to define an invalid block entry\r
23#define TT_ATTR_INDX_INVALID ((UINT32)~0)\r
24\r
25STATIC\r
26UINT64\r
27ArmMemoryAttributeToPageAttribute (\r
28 IN ARM_MEMORY_REGION_ATTRIBUTES Attributes\r
29 )\r
30{\r
31 switch (Attributes) {\r
829633e3
PL
32 case ARM_MEMORY_REGION_ATTRIBUTE_WRITE_BACK_NONSHAREABLE:\r
33 case ARM_MEMORY_REGION_ATTRIBUTE_NONSECURE_WRITE_BACK_NONSHAREABLE:\r
34 return TT_ATTR_INDX_MEMORY_WRITE_BACK;\r
35\r
d7f03464
AB
36 case ARM_MEMORY_REGION_ATTRIBUTE_WRITE_BACK:\r
37 case ARM_MEMORY_REGION_ATTRIBUTE_NONSECURE_WRITE_BACK:\r
38 return TT_ATTR_INDX_MEMORY_WRITE_BACK | TT_SH_INNER_SHAREABLE;\r
39\r
40 case ARM_MEMORY_REGION_ATTRIBUTE_WRITE_THROUGH:\r
41 case ARM_MEMORY_REGION_ATTRIBUTE_NONSECURE_WRITE_THROUGH:\r
42 return TT_ATTR_INDX_MEMORY_WRITE_THROUGH | TT_SH_INNER_SHAREABLE;\r
43\r
44 // Uncached and device mappings are treated as outer shareable by default,\r
45 case ARM_MEMORY_REGION_ATTRIBUTE_UNCACHED_UNBUFFERED:\r
46 case ARM_MEMORY_REGION_ATTRIBUTE_NONSECURE_UNCACHED_UNBUFFERED:\r
47 return TT_ATTR_INDX_MEMORY_NON_CACHEABLE;\r
48\r
49 default:\r
4249278a 50 ASSERT (0);\r
d7f03464
AB
51 case ARM_MEMORY_REGION_ATTRIBUTE_DEVICE:\r
52 case ARM_MEMORY_REGION_ATTRIBUTE_NONSECURE_DEVICE:\r
53 if (ArmReadCurrentEL () == AARCH64_EL2)\r
54 return TT_ATTR_INDX_DEVICE_MEMORY | TT_XN_MASK;\r
55 else\r
56 return TT_ATTR_INDX_DEVICE_MEMORY | TT_UXN_MASK | TT_PXN_MASK;\r
57 }\r
58}\r
59\r
60UINT64\r
61PageAttributeToGcdAttribute (\r
62 IN UINT64 PageAttributes\r
63 )\r
64{\r
65 UINT64 GcdAttributes;\r
66\r
67 switch (PageAttributes & TT_ATTR_INDX_MASK) {\r
68 case TT_ATTR_INDX_DEVICE_MEMORY:\r
69 GcdAttributes = EFI_MEMORY_UC;\r
70 break;\r
71 case TT_ATTR_INDX_MEMORY_NON_CACHEABLE:\r
72 GcdAttributes = EFI_MEMORY_WC;\r
73 break;\r
74 case TT_ATTR_INDX_MEMORY_WRITE_THROUGH:\r
75 GcdAttributes = EFI_MEMORY_WT;\r
76 break;\r
77 case TT_ATTR_INDX_MEMORY_WRITE_BACK:\r
78 GcdAttributes = EFI_MEMORY_WB;\r
79 break;\r
80 default:\r
4249278a
AB
81 DEBUG ((DEBUG_ERROR,\r
82 "PageAttributeToGcdAttribute: PageAttributes:0x%lX not supported.\n",\r
83 PageAttributes));\r
d7f03464
AB
84 ASSERT (0);\r
85 // The Global Coherency Domain (GCD) value is defined as a bit set.\r
86 // Returning 0 means no attribute has been set.\r
87 GcdAttributes = 0;\r
88 }\r
89\r
90 // Determine protection attributes\r
4249278a
AB
91 if (((PageAttributes & TT_AP_MASK) == TT_AP_NO_RO) ||\r
92 ((PageAttributes & TT_AP_MASK) == TT_AP_RO_RO)) {\r
d7f03464 93 // Read only cases map to write-protect\r
b7a09b71 94 GcdAttributes |= EFI_MEMORY_RO;\r
d7f03464
AB
95 }\r
96\r
97 // Process eXecute Never attribute\r
4249278a 98 if ((PageAttributes & (TT_PXN_MASK | TT_UXN_MASK)) != 0) {\r
d7f03464
AB
99 GcdAttributes |= EFI_MEMORY_XP;\r
100 }\r
101\r
102 return GcdAttributes;\r
103}\r
104\r
e93cb72e
AB
105#define MIN_T0SZ 16\r
106#define BITS_PER_LEVEL 9\r
d7f03464
AB
107\r
108VOID\r
109GetRootTranslationTableInfo (\r
110 IN UINTN T0SZ,\r
111 OUT UINTN *TableLevel,\r
112 OUT UINTN *TableEntryCount\r
113 )\r
114{\r
d7f03464
AB
115 // Get the level of the root table\r
116 if (TableLevel) {\r
e93cb72e 117 *TableLevel = (T0SZ - MIN_T0SZ) / BITS_PER_LEVEL;\r
d7f03464
AB
118 }\r
119\r
d7f03464 120 if (TableEntryCount) {\r
e93cb72e 121 *TableEntryCount = 1UL << (BITS_PER_LEVEL - (T0SZ - MIN_T0SZ) % BITS_PER_LEVEL);\r
d7f03464
AB
122 }\r
123}\r
124\r
125STATIC\r
126VOID\r
191fa79b 127ReplaceTableEntry (\r
d7f03464 128 IN UINT64 *Entry,\r
d5788777 129 IN UINT64 Value,\r
191fa79b
AB
130 IN UINT64 RegionStart,\r
131 IN BOOLEAN IsLiveBlockMapping\r
d7f03464
AB
132 )\r
133{\r
191fa79b 134 if (!ArmMmuEnabled () || !IsLiveBlockMapping) {\r
d7f03464 135 *Entry = Value;\r
191fa79b 136 ArmUpdateTranslationTableEntry (Entry, (VOID *)(UINTN)RegionStart);\r
d7f03464 137 } else {\r
d5788777 138 ArmReplaceLiveTranslationEntry (Entry, Value, RegionStart);\r
d7f03464
AB
139 }\r
140}\r
141\r
142STATIC\r
143VOID\r
191fa79b 144FreePageTablesRecursive (\r
d390920e
AB
145 IN UINT64 *TranslationTable,\r
146 IN UINTN Level\r
d7f03464
AB
147 )\r
148{\r
191fa79b 149 UINTN Index;\r
d7f03464 150\r
d390920e
AB
151 ASSERT (Level <= 3);\r
152\r
153 if (Level < 3) {\r
154 for (Index = 0; Index < TT_ENTRY_COUNT; Index++) {\r
155 if ((TranslationTable[Index] & TT_TYPE_MASK) == TT_TYPE_TABLE_ENTRY) {\r
156 FreePageTablesRecursive ((VOID *)(UINTN)(TranslationTable[Index] &\r
157 TT_ADDRESS_MASK_BLOCK_ENTRY),\r
158 Level + 1);\r
159 }\r
d7f03464
AB
160 }\r
161 }\r
191fa79b 162 FreePages (TranslationTable, 1);\r
d7f03464
AB
163}\r
164\r
5fc89953
AB
165STATIC\r
166BOOLEAN\r
167IsBlockEntry (\r
168 IN UINT64 Entry,\r
169 IN UINTN Level\r
170 )\r
171{\r
172 if (Level == 3) {\r
173 return (Entry & TT_TYPE_MASK) == TT_TYPE_BLOCK_ENTRY_LEVEL3;\r
174 }\r
175 return (Entry & TT_TYPE_MASK) == TT_TYPE_BLOCK_ENTRY;\r
176}\r
177\r
178STATIC\r
179BOOLEAN\r
180IsTableEntry (\r
181 IN UINT64 Entry,\r
182 IN UINTN Level\r
183 )\r
184{\r
185 if (Level == 3) {\r
186 //\r
187 // TT_TYPE_TABLE_ENTRY aliases TT_TYPE_BLOCK_ENTRY_LEVEL3\r
188 // so we need to take the level into account as well.\r
189 //\r
190 return FALSE;\r
191 }\r
192 return (Entry & TT_TYPE_MASK) == TT_TYPE_TABLE_ENTRY;\r
193}\r
194\r
d7f03464 195STATIC\r
191fa79b
AB
196EFI_STATUS\r
197UpdateRegionMappingRecursive (\r
198 IN UINT64 RegionStart,\r
199 IN UINT64 RegionEnd,\r
200 IN UINT64 AttributeSetMask,\r
201 IN UINT64 AttributeClearMask,\r
202 IN UINT64 *PageTable,\r
203 IN UINTN Level\r
d7f03464
AB
204 )\r
205{\r
191fa79b
AB
206 UINTN BlockShift;\r
207 UINT64 BlockMask;\r
208 UINT64 BlockEnd;\r
209 UINT64 *Entry;\r
210 UINT64 EntryValue;\r
211 VOID *TranslationTable;\r
212 EFI_STATUS Status;\r
d7f03464 213\r
191fa79b 214 ASSERT (((RegionStart | RegionEnd) & EFI_PAGE_MASK) == 0);\r
d7f03464 215\r
191fa79b
AB
216 BlockShift = (Level + 1) * BITS_PER_LEVEL + MIN_T0SZ;\r
217 BlockMask = MAX_UINT64 >> BlockShift;\r
d7f03464 218\r
191fa79b
AB
219 DEBUG ((DEBUG_VERBOSE, "%a(%d): %llx - %llx set %lx clr %lx\n", __FUNCTION__,\r
220 Level, RegionStart, RegionEnd, AttributeSetMask, AttributeClearMask));\r
d7f03464 221\r
191fa79b
AB
222 for (; RegionStart < RegionEnd; RegionStart = BlockEnd) {\r
223 BlockEnd = MIN (RegionEnd, (RegionStart | BlockMask) + 1);\r
224 Entry = &PageTable[(RegionStart >> (64 - BlockShift)) & (TT_ENTRY_COUNT - 1)];\r
d7f03464 225\r
191fa79b
AB
226 //\r
227 // If RegionStart or BlockEnd is not aligned to the block size at this\r
228 // level, we will have to create a table mapping in order to map less\r
229 // than a block, and recurse to create the block or page entries at\r
230 // the next level. No block mappings are allowed at all at level 0,\r
231 // so in that case, we have to recurse unconditionally.\r
f7079d1b
AB
232 // If we are changing a table entry and the AttributeClearMask is non-zero,\r
233 // we cannot replace it with a block entry without potentially losing\r
234 // attribute information, so keep the table entry in that case.\r
191fa79b 235 //\r
f7079d1b
AB
236 if (Level == 0 || ((RegionStart | BlockEnd) & BlockMask) != 0 ||\r
237 (IsTableEntry (*Entry, Level) && AttributeClearMask != 0)) {\r
191fa79b 238 ASSERT (Level < 3);\r
d7f03464 239\r
5fc89953 240 if (!IsTableEntry (*Entry, Level)) {\r
191fa79b
AB
241 //\r
242 // No table entry exists yet, so we need to allocate a page table\r
243 // for the next level.\r
244 //\r
674e127e 245 TranslationTable = AllocatePages (1);\r
d7f03464 246 if (TranslationTable == NULL) {\r
191fa79b 247 return EFI_OUT_OF_RESOURCES;\r
d7f03464
AB
248 }\r
249\r
748fea62
AB
250 if (!ArmMmuEnabled ()) {\r
251 //\r
252 // Make sure we are not inadvertently hitting in the caches\r
253 // when populating the page tables.\r
254 //\r
255 InvalidateDataCacheRange (TranslationTable, EFI_PAGE_SIZE);\r
256 }\r
257\r
f7079d1b
AB
258 ZeroMem (TranslationTable, EFI_PAGE_SIZE);\r
259\r
5fc89953 260 if (IsBlockEntry (*Entry, Level)) {\r
191fa79b
AB
261 //\r
262 // We are splitting an existing block entry, so we have to populate\r
263 // the new table with the attributes of the block entry it replaces.\r
264 //\r
265 Status = UpdateRegionMappingRecursive (RegionStart & ~BlockMask,\r
266 (RegionStart | BlockMask) + 1, *Entry & TT_ATTRIBUTES_MASK,\r
267 0, TranslationTable, Level + 1);\r
268 if (EFI_ERROR (Status)) {\r
269 //\r
270 // The range we passed to UpdateRegionMappingRecursive () is block\r
271 // aligned, so it is guaranteed that no further pages were allocated\r
272 // by it, and so we only have to free the page we allocated here.\r
273 //\r
274 FreePages (TranslationTable, 1);\r
275 return Status;\r
276 }\r
d7f03464 277 }\r
191fa79b
AB
278 } else {\r
279 TranslationTable = (VOID *)(UINTN)(*Entry & TT_ADDRESS_MASK_BLOCK_ENTRY);\r
d7f03464 280 }\r
d7f03464 281\r
191fa79b
AB
282 //\r
283 // Recurse to the next level\r
284 //\r
285 Status = UpdateRegionMappingRecursive (RegionStart, BlockEnd,\r
286 AttributeSetMask, AttributeClearMask, TranslationTable,\r
287 Level + 1);\r
288 if (EFI_ERROR (Status)) {\r
5fc89953 289 if (!IsTableEntry (*Entry, Level)) {\r
191fa79b
AB
290 //\r
291 // We are creating a new table entry, so on failure, we can free all\r
292 // allocations we made recursively, given that the whole subhierarchy\r
293 // has not been wired into the live page tables yet. (This is not\r
294 // possible for existing table entries, since we cannot revert the\r
295 // modifications we made to the subhierarchy it represents.)\r
296 //\r
d390920e 297 FreePageTablesRecursive (TranslationTable, Level + 1);\r
d7f03464 298 }\r
191fa79b
AB
299 return Status;\r
300 }\r
d7f03464 301\r
5fc89953 302 if (!IsTableEntry (*Entry, Level)) {\r
191fa79b
AB
303 EntryValue = (UINTN)TranslationTable | TT_TYPE_TABLE_ENTRY;\r
304 ReplaceTableEntry (Entry, EntryValue, RegionStart,\r
5fc89953 305 IsBlockEntry (*Entry, Level));\r
d7f03464 306 }\r
191fa79b
AB
307 } else {\r
308 EntryValue = (*Entry & AttributeClearMask) | AttributeSetMask;\r
309 EntryValue |= RegionStart;\r
310 EntryValue |= (Level == 3) ? TT_TYPE_BLOCK_ENTRY_LEVEL3\r
311 : TT_TYPE_BLOCK_ENTRY;\r
312\r
f7079d1b
AB
313 if (IsTableEntry (*Entry, Level)) {\r
314 //\r
315 // We are replacing a table entry with a block entry. This is only\r
316 // possible if we are keeping none of the original attributes.\r
317 // We can free the table entry's page table, and all the ones below\r
318 // it, since we are dropping the only possible reference to it.\r
319 //\r
320 ASSERT (AttributeClearMask == 0);\r
321 TranslationTable = (VOID *)(UINTN)(*Entry & TT_ADDRESS_MASK_BLOCK_ENTRY);\r
322 ReplaceTableEntry (Entry, EntryValue, RegionStart, TRUE);\r
323 FreePageTablesRecursive (TranslationTable, Level + 1);\r
324 } else {\r
325 ReplaceTableEntry (Entry, EntryValue, RegionStart, FALSE);\r
326 }\r
d7f03464
AB
327 }\r
328 }\r
191fa79b
AB
329 return EFI_SUCCESS;\r
330}\r
d7f03464 331\r
191fa79b
AB
332STATIC\r
333VOID\r
334LookupAddresstoRootTable (\r
335 IN UINT64 MaxAddress,\r
336 OUT UINTN *T0SZ,\r
337 OUT UINTN *TableEntryCount\r
338 )\r
339{\r
340 UINTN TopBit;\r
d7f03464 341\r
191fa79b
AB
342 // Check the parameters are not NULL\r
343 ASSERT ((T0SZ != NULL) && (TableEntryCount != NULL));\r
d7f03464 344\r
191fa79b
AB
345 // Look for the highest bit set in MaxAddress\r
346 for (TopBit = 63; TopBit != 0; TopBit--) {\r
347 if ((1ULL << TopBit) & MaxAddress) {\r
348 // MaxAddress top bit is found\r
349 TopBit = TopBit + 1;\r
350 break;\r
351 }\r
352 }\r
353 ASSERT (TopBit != 0);\r
d7f03464 354\r
191fa79b
AB
355 // Calculate T0SZ from the top bit of the MaxAddress\r
356 *T0SZ = 64 - TopBit;\r
357\r
358 // Get the Table info from T0SZ\r
359 GetRootTranslationTableInfo (*T0SZ, NULL, TableEntryCount);\r
d7f03464
AB
360}\r
361\r
362STATIC\r
f49ea03d 363EFI_STATUS\r
d7f03464 364UpdateRegionMapping (\r
d7f03464
AB
365 IN UINT64 RegionStart,\r
366 IN UINT64 RegionLength,\r
191fa79b
AB
367 IN UINT64 AttributeSetMask,\r
368 IN UINT64 AttributeClearMask\r
d7f03464
AB
369 )\r
370{\r
191fa79b
AB
371 UINTN RootTableLevel;\r
372 UINTN T0SZ;\r
373\r
374 if (((RegionStart | RegionLength) & EFI_PAGE_MASK)) {\r
f49ea03d 375 return EFI_INVALID_PARAMETER;\r
d7f03464
AB
376 }\r
377\r
191fa79b
AB
378 T0SZ = ArmGetTCR () & TCR_T0SZ_MASK;\r
379 GetRootTranslationTableInfo (T0SZ, &RootTableLevel, NULL);\r
d7f03464 380\r
191fa79b
AB
381 return UpdateRegionMappingRecursive (RegionStart, RegionStart + RegionLength,\r
382 AttributeSetMask, AttributeClearMask, ArmGetTTBR0BaseAddress (),\r
383 RootTableLevel);\r
d7f03464
AB
384}\r
385\r
386STATIC\r
f49ea03d 387EFI_STATUS\r
d7f03464
AB
388FillTranslationTable (\r
389 IN UINT64 *RootTable,\r
390 IN ARM_MEMORY_REGION_DESCRIPTOR *MemoryRegion\r
391 )\r
392{\r
393 return UpdateRegionMapping (\r
d7f03464
AB
394 MemoryRegion->VirtualBase,\r
395 MemoryRegion->Length,\r
396 ArmMemoryAttributeToPageAttribute (MemoryRegion->Attributes) | TT_AF,\r
397 0\r
398 );\r
399}\r
400\r
e0307a7d
AB
401STATIC\r
402UINT64\r
403GcdAttributeToPageAttribute (\r
404 IN UINT64 GcdAttributes\r
405 )\r
406{\r
407 UINT64 PageAttributes;\r
408\r
409 switch (GcdAttributes & EFI_MEMORY_CACHETYPE_MASK) {\r
410 case EFI_MEMORY_UC:\r
411 PageAttributes = TT_ATTR_INDX_DEVICE_MEMORY;\r
412 break;\r
413 case EFI_MEMORY_WC:\r
414 PageAttributes = TT_ATTR_INDX_MEMORY_NON_CACHEABLE;\r
415 break;\r
416 case EFI_MEMORY_WT:\r
417 PageAttributes = TT_ATTR_INDX_MEMORY_WRITE_THROUGH | TT_SH_INNER_SHAREABLE;\r
418 break;\r
419 case EFI_MEMORY_WB:\r
420 PageAttributes = TT_ATTR_INDX_MEMORY_WRITE_BACK | TT_SH_INNER_SHAREABLE;\r
421 break;\r
422 default:\r
423 PageAttributes = TT_ATTR_INDX_MASK;\r
424 break;\r
425 }\r
426\r
427 if ((GcdAttributes & EFI_MEMORY_XP) != 0 ||\r
428 (GcdAttributes & EFI_MEMORY_CACHETYPE_MASK) == EFI_MEMORY_UC) {\r
429 if (ArmReadCurrentEL () == AARCH64_EL2) {\r
430 PageAttributes |= TT_XN_MASK;\r
431 } else {\r
432 PageAttributes |= TT_UXN_MASK | TT_PXN_MASK;\r
433 }\r
434 }\r
435\r
436 if ((GcdAttributes & EFI_MEMORY_RO) != 0) {\r
437 PageAttributes |= TT_AP_RO_RO;\r
438 }\r
439\r
440 return PageAttributes | TT_AF;\r
441}\r
442\r
f49ea03d 443EFI_STATUS\r
521f3ced 444ArmSetMemoryAttributes (\r
d7f03464
AB
445 IN EFI_PHYSICAL_ADDRESS BaseAddress,\r
446 IN UINT64 Length,\r
d9c0d991 447 IN UINT64 Attributes\r
d7f03464
AB
448 )\r
449{\r
e0307a7d
AB
450 UINT64 PageAttributes;\r
451 UINT64 PageAttributeMask;\r
452\r
453 PageAttributes = GcdAttributeToPageAttribute (Attributes);\r
454 PageAttributeMask = 0;\r
455\r
456 if ((Attributes & EFI_MEMORY_CACHETYPE_MASK) == 0) {\r
457 //\r
458 // No memory type was set in Attributes, so we are going to update the\r
459 // permissions only.\r
460 //\r
461 PageAttributes &= TT_AP_MASK | TT_UXN_MASK | TT_PXN_MASK;\r
462 PageAttributeMask = ~(TT_ADDRESS_MASK_BLOCK_ENTRY | TT_AP_MASK |\r
463 TT_PXN_MASK | TT_XN_MASK);\r
464 }\r
d7f03464 465\r
191fa79b
AB
466 return UpdateRegionMapping (BaseAddress, Length, PageAttributes,\r
467 PageAttributeMask);\r
d7f03464
AB
468}\r
469\r
470STATIC\r
f49ea03d 471EFI_STATUS\r
d7f03464
AB
472SetMemoryRegionAttribute (\r
473 IN EFI_PHYSICAL_ADDRESS BaseAddress,\r
474 IN UINT64 Length,\r
475 IN UINT64 Attributes,\r
476 IN UINT64 BlockEntryMask\r
477 )\r
478{\r
191fa79b 479 return UpdateRegionMapping (BaseAddress, Length, Attributes, BlockEntryMask);\r
d7f03464
AB
480}\r
481\r
f49ea03d 482EFI_STATUS\r
d7f03464
AB
483ArmSetMemoryRegionNoExec (\r
484 IN EFI_PHYSICAL_ADDRESS BaseAddress,\r
485 IN UINT64 Length\r
486 )\r
487{\r
488 UINT64 Val;\r
489\r
490 if (ArmReadCurrentEL () == AARCH64_EL1) {\r
491 Val = TT_PXN_MASK | TT_UXN_MASK;\r
492 } else {\r
493 Val = TT_XN_MASK;\r
494 }\r
495\r
496 return SetMemoryRegionAttribute (\r
497 BaseAddress,\r
498 Length,\r
499 Val,\r
500 ~TT_ADDRESS_MASK_BLOCK_ENTRY);\r
501}\r
502\r
f49ea03d 503EFI_STATUS\r
d7f03464
AB
504ArmClearMemoryRegionNoExec (\r
505 IN EFI_PHYSICAL_ADDRESS BaseAddress,\r
506 IN UINT64 Length\r
507 )\r
508{\r
509 UINT64 Mask;\r
510\r
511 // XN maps to UXN in the EL1&0 translation regime\r
512 Mask = ~(TT_ADDRESS_MASK_BLOCK_ENTRY | TT_PXN_MASK | TT_XN_MASK);\r
513\r
514 return SetMemoryRegionAttribute (\r
515 BaseAddress,\r
516 Length,\r
517 0,\r
518 Mask);\r
519}\r
520\r
f49ea03d 521EFI_STATUS\r
d7f03464
AB
522ArmSetMemoryRegionReadOnly (\r
523 IN EFI_PHYSICAL_ADDRESS BaseAddress,\r
524 IN UINT64 Length\r
525 )\r
526{\r
527 return SetMemoryRegionAttribute (\r
528 BaseAddress,\r
529 Length,\r
530 TT_AP_RO_RO,\r
531 ~TT_ADDRESS_MASK_BLOCK_ENTRY);\r
532}\r
533\r
f49ea03d 534EFI_STATUS\r
d7f03464
AB
535ArmClearMemoryRegionReadOnly (\r
536 IN EFI_PHYSICAL_ADDRESS BaseAddress,\r
537 IN UINT64 Length\r
538 )\r
539{\r
540 return SetMemoryRegionAttribute (\r
541 BaseAddress,\r
542 Length,\r
543 TT_AP_RW_RW,\r
544 ~(TT_ADDRESS_MASK_BLOCK_ENTRY | TT_AP_MASK));\r
545}\r
546\r
f49ea03d 547EFI_STATUS\r
d7f03464
AB
548EFIAPI\r
549ArmConfigureMmu (\r
550 IN ARM_MEMORY_REGION_DESCRIPTOR *MemoryTable,\r
551 OUT VOID **TranslationTableBase OPTIONAL,\r
552 OUT UINTN *TranslationTableSize OPTIONAL\r
553 )\r
554{\r
555 VOID* TranslationTable;\r
d7f03464 556 UINT64 MaxAddress;\r
d7f03464
AB
557 UINTN T0SZ;\r
558 UINTN RootTableEntryCount;\r
559 UINT64 TCR;\r
f49ea03d 560 EFI_STATUS Status;\r
d7f03464 561\r
4249278a 562 if (MemoryTable == NULL) {\r
d7f03464 563 ASSERT (MemoryTable != NULL);\r
f49ea03d 564 return EFI_INVALID_PARAMETER;\r
d7f03464
AB
565 }\r
566\r
e36b243c
AB
567 //\r
568 // Limit the virtual address space to what we can actually use: UEFI\r
569 // mandates a 1:1 mapping, so no point in making the virtual address\r
570 // space larger than the physical address space. We also have to take\r
571 // into account the architectural limitations that result from UEFI's\r
572 // use of 4 KB pages.\r
573 //\r
574 MaxAddress = MIN (LShiftU64 (1ULL, ArmGetPhysicalAddressBits ()) - 1,\r
1c36f028 575 MAX_ALLOC_ADDRESS);\r
d7f03464
AB
576\r
577 // Lookup the Table Level to get the information\r
578 LookupAddresstoRootTable (MaxAddress, &T0SZ, &RootTableEntryCount);\r
579\r
580 //\r
581 // Set TCR that allows us to retrieve T0SZ in the subsequent functions\r
582 //\r
583 // Ideally we will be running at EL2, but should support EL1 as well.\r
584 // UEFI should not run at EL3.\r
585 if (ArmReadCurrentEL () == AARCH64_EL2) {\r
586 //Note: Bits 23 and 31 are reserved(RES1) bits in TCR_EL2\r
587 TCR = T0SZ | (1UL << 31) | (1UL << 23) | TCR_TG0_4KB;\r
588\r
589 // Set the Physical Address Size using MaxAddress\r
590 if (MaxAddress < SIZE_4GB) {\r
591 TCR |= TCR_PS_4GB;\r
592 } else if (MaxAddress < SIZE_64GB) {\r
593 TCR |= TCR_PS_64GB;\r
594 } else if (MaxAddress < SIZE_1TB) {\r
595 TCR |= TCR_PS_1TB;\r
596 } else if (MaxAddress < SIZE_4TB) {\r
597 TCR |= TCR_PS_4TB;\r
598 } else if (MaxAddress < SIZE_16TB) {\r
599 TCR |= TCR_PS_16TB;\r
600 } else if (MaxAddress < SIZE_256TB) {\r
601 TCR |= TCR_PS_256TB;\r
602 } else {\r
4249278a
AB
603 DEBUG ((DEBUG_ERROR,\r
604 "ArmConfigureMmu: The MaxAddress 0x%lX is not supported by this MMU configuration.\n",\r
605 MaxAddress));\r
d7f03464 606 ASSERT (0); // Bigger than 48-bit memory space are not supported\r
f49ea03d 607 return EFI_UNSUPPORTED;\r
d7f03464
AB
608 }\r
609 } else if (ArmReadCurrentEL () == AARCH64_EL1) {\r
610 // Due to Cortex-A57 erratum #822227 we must set TG1[1] == 1, regardless of EPD1.\r
611 TCR = T0SZ | TCR_TG0_4KB | TCR_TG1_4KB | TCR_EPD1;\r
612\r
613 // Set the Physical Address Size using MaxAddress\r
614 if (MaxAddress < SIZE_4GB) {\r
615 TCR |= TCR_IPS_4GB;\r
616 } else if (MaxAddress < SIZE_64GB) {\r
617 TCR |= TCR_IPS_64GB;\r
618 } else if (MaxAddress < SIZE_1TB) {\r
619 TCR |= TCR_IPS_1TB;\r
620 } else if (MaxAddress < SIZE_4TB) {\r
621 TCR |= TCR_IPS_4TB;\r
622 } else if (MaxAddress < SIZE_16TB) {\r
623 TCR |= TCR_IPS_16TB;\r
624 } else if (MaxAddress < SIZE_256TB) {\r
625 TCR |= TCR_IPS_256TB;\r
626 } else {\r
4249278a
AB
627 DEBUG ((DEBUG_ERROR,\r
628 "ArmConfigureMmu: The MaxAddress 0x%lX is not supported by this MMU configuration.\n",\r
629 MaxAddress));\r
d7f03464 630 ASSERT (0); // Bigger than 48-bit memory space are not supported\r
f49ea03d 631 return EFI_UNSUPPORTED;\r
d7f03464
AB
632 }\r
633 } else {\r
634 ASSERT (0); // UEFI is only expected to run at EL2 and EL1, not EL3.\r
f49ea03d 635 return EFI_UNSUPPORTED;\r
d7f03464
AB
636 }\r
637\r
35718840
AB
638 //\r
639 // Translation table walks are always cache coherent on ARMv8-A, so cache\r
640 // maintenance on page tables is never needed. Since there is a risk of\r
641 // loss of coherency when using mismatched attributes, and given that memory\r
642 // is mapped cacheable except for extraordinary cases (such as non-coherent\r
643 // DMA), have the page table walker perform cached accesses as well, and\r
644 // assert below that that matches the attributes we use for CPU accesses to\r
645 // the region.\r
646 //\r
647 TCR |= TCR_SH_INNER_SHAREABLE |\r
648 TCR_RGN_OUTER_WRITE_BACK_ALLOC |\r
649 TCR_RGN_INNER_WRITE_BACK_ALLOC;\r
650\r
d7f03464
AB
651 // Set TCR\r
652 ArmSetTCR (TCR);\r
653\r
aa961dea
AB
654 // Allocate pages for translation table\r
655 TranslationTable = AllocatePages (1);\r
d7f03464 656 if (TranslationTable == NULL) {\r
f49ea03d 657 return EFI_OUT_OF_RESOURCES;\r
d7f03464 658 }\r
4249278a
AB
659 //\r
660 // We set TTBR0 just after allocating the table to retrieve its location from\r
661 // the subsequent functions without needing to pass this value across the\r
662 // functions. The MMU is only enabled after the translation tables are\r
663 // populated.\r
664 //\r
d7f03464
AB
665 ArmSetTTBR0 (TranslationTable);\r
666\r
667 if (TranslationTableBase != NULL) {\r
668 *TranslationTableBase = TranslationTable;\r
669 }\r
670\r
671 if (TranslationTableSize != NULL) {\r
4249278a 672 *TranslationTableSize = RootTableEntryCount * sizeof (UINT64);\r
d7f03464
AB
673 }\r
674\r
748fea62
AB
675 //\r
676 // Make sure we are not inadvertently hitting in the caches\r
677 // when populating the page tables.\r
678 //\r
679 InvalidateDataCacheRange (TranslationTable,\r
4249278a
AB
680 RootTableEntryCount * sizeof (UINT64));\r
681 ZeroMem (TranslationTable, RootTableEntryCount * sizeof (UINT64));\r
d7f03464 682\r
d7f03464 683 while (MemoryTable->Length != 0) {\r
d7f03464 684 Status = FillTranslationTable (TranslationTable, MemoryTable);\r
f49ea03d 685 if (EFI_ERROR (Status)) {\r
4249278a 686 goto FreeTranslationTable;\r
d7f03464
AB
687 }\r
688 MemoryTable++;\r
689 }\r
690\r
4249278a
AB
691 //\r
692 // EFI_MEMORY_UC ==> MAIR_ATTR_DEVICE_MEMORY\r
693 // EFI_MEMORY_WC ==> MAIR_ATTR_NORMAL_MEMORY_NON_CACHEABLE\r
694 // EFI_MEMORY_WT ==> MAIR_ATTR_NORMAL_MEMORY_WRITE_THROUGH\r
695 // EFI_MEMORY_WB ==> MAIR_ATTR_NORMAL_MEMORY_WRITE_BACK\r
696 //\r
697 ArmSetMAIR (\r
698 MAIR_ATTR (TT_ATTR_INDX_DEVICE_MEMORY, MAIR_ATTR_DEVICE_MEMORY) |\r
699 MAIR_ATTR (TT_ATTR_INDX_MEMORY_NON_CACHEABLE, MAIR_ATTR_NORMAL_MEMORY_NON_CACHEABLE) |\r
700 MAIR_ATTR (TT_ATTR_INDX_MEMORY_WRITE_THROUGH, MAIR_ATTR_NORMAL_MEMORY_WRITE_THROUGH) |\r
701 MAIR_ATTR (TT_ATTR_INDX_MEMORY_WRITE_BACK, MAIR_ATTR_NORMAL_MEMORY_WRITE_BACK)\r
702 );\r
d7f03464
AB
703\r
704 ArmDisableAlignmentCheck ();\r
526f160f 705 ArmEnableStackAlignmentCheck ();\r
d7f03464
AB
706 ArmEnableInstructionCache ();\r
707 ArmEnableDataCache ();\r
708\r
709 ArmEnableMmu ();\r
f49ea03d 710 return EFI_SUCCESS;\r
d7f03464 711\r
4249278a 712FreeTranslationTable:\r
aa961dea 713 FreePages (TranslationTable, 1);\r
d7f03464
AB
714 return Status;\r
715}\r
716\r
717RETURN_STATUS\r
718EFIAPI\r
719ArmMmuBaseLibConstructor (\r
720 VOID\r
721 )\r
722{\r
723 extern UINT32 ArmReplaceLiveTranslationEntrySize;\r
724\r
725 //\r
726 // The ArmReplaceLiveTranslationEntry () helper function may be invoked\r
727 // with the MMU off so we have to ensure that it gets cleaned to the PoC\r
728 //\r
729 WriteBackDataCacheRange (ArmReplaceLiveTranslationEntry,\r
730 ArmReplaceLiveTranslationEntrySize);\r
731\r
732 return RETURN_SUCCESS;\r
733}\r