ArmPkg/ArmMmuLib AARCH64: fix out of bounds access
[mirror_edk2.git] / ArmPkg / Library / ArmMmuLib / AArch64 / ArmMmuLibCore.c
CommitLineData
d7f03464
AB
1/** @file\r
2* File managing the MMU for ARMv8 architecture\r
3*\r
4* Copyright (c) 2011-2014, ARM Limited. All rights reserved.\r
5* Copyright (c) 2016, Linaro Limited. All rights reserved.\r
b7a09b71 6* Copyright (c) 2017, Intel Corporation. All rights reserved.<BR>\r
d7f03464
AB
7*\r
8* This program and the accompanying materials\r
9* are licensed and made available under the terms and conditions of the BSD License\r
10* which accompanies this distribution. The full text of the license may be found at\r
11* http://opensource.org/licenses/bsd-license.php\r
12*\r
13* THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS,\r
14* WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED.\r
15*\r
16**/\r
17\r
18#include <Uefi.h>\r
19#include <Chipset/AArch64.h>\r
20#include <Library/BaseMemoryLib.h>\r
21#include <Library/CacheMaintenanceLib.h>\r
22#include <Library/MemoryAllocationLib.h>\r
23#include <Library/ArmLib.h>\r
24#include <Library/ArmMmuLib.h>\r
25#include <Library/BaseLib.h>\r
26#include <Library/DebugLib.h>\r
27\r
28// We use this index definition to define an invalid block entry\r
29#define TT_ATTR_INDX_INVALID ((UINT32)~0)\r
30\r
31STATIC\r
32UINT64\r
33ArmMemoryAttributeToPageAttribute (\r
34 IN ARM_MEMORY_REGION_ATTRIBUTES Attributes\r
35 )\r
36{\r
37 switch (Attributes) {\r
829633e3
PL
38 case ARM_MEMORY_REGION_ATTRIBUTE_WRITE_BACK_NONSHAREABLE:\r
39 case ARM_MEMORY_REGION_ATTRIBUTE_NONSECURE_WRITE_BACK_NONSHAREABLE:\r
40 return TT_ATTR_INDX_MEMORY_WRITE_BACK;\r
41\r
d7f03464
AB
42 case ARM_MEMORY_REGION_ATTRIBUTE_WRITE_BACK:\r
43 case ARM_MEMORY_REGION_ATTRIBUTE_NONSECURE_WRITE_BACK:\r
44 return TT_ATTR_INDX_MEMORY_WRITE_BACK | TT_SH_INNER_SHAREABLE;\r
45\r
46 case ARM_MEMORY_REGION_ATTRIBUTE_WRITE_THROUGH:\r
47 case ARM_MEMORY_REGION_ATTRIBUTE_NONSECURE_WRITE_THROUGH:\r
48 return TT_ATTR_INDX_MEMORY_WRITE_THROUGH | TT_SH_INNER_SHAREABLE;\r
49\r
50 // Uncached and device mappings are treated as outer shareable by default,\r
51 case ARM_MEMORY_REGION_ATTRIBUTE_UNCACHED_UNBUFFERED:\r
52 case ARM_MEMORY_REGION_ATTRIBUTE_NONSECURE_UNCACHED_UNBUFFERED:\r
53 return TT_ATTR_INDX_MEMORY_NON_CACHEABLE;\r
54\r
55 default:\r
56 ASSERT(0);\r
57 case ARM_MEMORY_REGION_ATTRIBUTE_DEVICE:\r
58 case ARM_MEMORY_REGION_ATTRIBUTE_NONSECURE_DEVICE:\r
59 if (ArmReadCurrentEL () == AARCH64_EL2)\r
60 return TT_ATTR_INDX_DEVICE_MEMORY | TT_XN_MASK;\r
61 else\r
62 return TT_ATTR_INDX_DEVICE_MEMORY | TT_UXN_MASK | TT_PXN_MASK;\r
63 }\r
64}\r
65\r
66UINT64\r
67PageAttributeToGcdAttribute (\r
68 IN UINT64 PageAttributes\r
69 )\r
70{\r
71 UINT64 GcdAttributes;\r
72\r
73 switch (PageAttributes & TT_ATTR_INDX_MASK) {\r
74 case TT_ATTR_INDX_DEVICE_MEMORY:\r
75 GcdAttributes = EFI_MEMORY_UC;\r
76 break;\r
77 case TT_ATTR_INDX_MEMORY_NON_CACHEABLE:\r
78 GcdAttributes = EFI_MEMORY_WC;\r
79 break;\r
80 case TT_ATTR_INDX_MEMORY_WRITE_THROUGH:\r
81 GcdAttributes = EFI_MEMORY_WT;\r
82 break;\r
83 case TT_ATTR_INDX_MEMORY_WRITE_BACK:\r
84 GcdAttributes = EFI_MEMORY_WB;\r
85 break;\r
86 default:\r
87 DEBUG ((EFI_D_ERROR, "PageAttributeToGcdAttribute: PageAttributes:0x%lX not supported.\n", PageAttributes));\r
88 ASSERT (0);\r
89 // The Global Coherency Domain (GCD) value is defined as a bit set.\r
90 // Returning 0 means no attribute has been set.\r
91 GcdAttributes = 0;\r
92 }\r
93\r
94 // Determine protection attributes\r
95 if (((PageAttributes & TT_AP_MASK) == TT_AP_NO_RO) || ((PageAttributes & TT_AP_MASK) == TT_AP_RO_RO)) {\r
96 // Read only cases map to write-protect\r
b7a09b71 97 GcdAttributes |= EFI_MEMORY_RO;\r
d7f03464
AB
98 }\r
99\r
100 // Process eXecute Never attribute\r
101 if ((PageAttributes & (TT_PXN_MASK | TT_UXN_MASK)) != 0 ) {\r
102 GcdAttributes |= EFI_MEMORY_XP;\r
103 }\r
104\r
105 return GcdAttributes;\r
106}\r
107\r
e93cb72e
AB
108#define MIN_T0SZ 16\r
109#define BITS_PER_LEVEL 9\r
d7f03464
AB
110\r
111VOID\r
112GetRootTranslationTableInfo (\r
113 IN UINTN T0SZ,\r
114 OUT UINTN *TableLevel,\r
115 OUT UINTN *TableEntryCount\r
116 )\r
117{\r
d7f03464
AB
118 // Get the level of the root table\r
119 if (TableLevel) {\r
e93cb72e 120 *TableLevel = (T0SZ - MIN_T0SZ) / BITS_PER_LEVEL;\r
d7f03464
AB
121 }\r
122\r
d7f03464 123 if (TableEntryCount) {\r
e93cb72e 124 *TableEntryCount = 1UL << (BITS_PER_LEVEL - (T0SZ - MIN_T0SZ) % BITS_PER_LEVEL);\r
d7f03464
AB
125 }\r
126}\r
127\r
128STATIC\r
129VOID\r
130ReplaceLiveEntry (\r
131 IN UINT64 *Entry,\r
132 IN UINT64 Value\r
133 )\r
134{\r
135 if (!ArmMmuEnabled ()) {\r
136 *Entry = Value;\r
137 } else {\r
138 ArmReplaceLiveTranslationEntry (Entry, Value);\r
139 }\r
140}\r
141\r
142STATIC\r
143VOID\r
144LookupAddresstoRootTable (\r
145 IN UINT64 MaxAddress,\r
146 OUT UINTN *T0SZ,\r
147 OUT UINTN *TableEntryCount\r
148 )\r
149{\r
150 UINTN TopBit;\r
151\r
152 // Check the parameters are not NULL\r
153 ASSERT ((T0SZ != NULL) && (TableEntryCount != NULL));\r
154\r
155 // Look for the highest bit set in MaxAddress\r
156 for (TopBit = 63; TopBit != 0; TopBit--) {\r
157 if ((1ULL << TopBit) & MaxAddress) {\r
158 // MaxAddress top bit is found\r
159 TopBit = TopBit + 1;\r
160 break;\r
161 }\r
162 }\r
163 ASSERT (TopBit != 0);\r
164\r
165 // Calculate T0SZ from the top bit of the MaxAddress\r
166 *T0SZ = 64 - TopBit;\r
167\r
168 // Get the Table info from T0SZ\r
169 GetRootTranslationTableInfo (*T0SZ, NULL, TableEntryCount);\r
170}\r
171\r
172STATIC\r
173UINT64*\r
174GetBlockEntryListFromAddress (\r
175 IN UINT64 *RootTable,\r
176 IN UINT64 RegionStart,\r
177 OUT UINTN *TableLevel,\r
178 IN OUT UINT64 *BlockEntrySize,\r
179 OUT UINT64 **LastBlockEntry\r
180 )\r
181{\r
182 UINTN RootTableLevel;\r
183 UINTN RootTableEntryCount;\r
184 UINT64 *TranslationTable;\r
185 UINT64 *BlockEntry;\r
186 UINT64 *SubTableBlockEntry;\r
187 UINT64 BlockEntryAddress;\r
188 UINTN BaseAddressAlignment;\r
189 UINTN PageLevel;\r
190 UINTN Index;\r
191 UINTN IndexLevel;\r
192 UINTN T0SZ;\r
193 UINT64 Attributes;\r
194 UINT64 TableAttributes;\r
195\r
196 // Initialize variable\r
197 BlockEntry = NULL;\r
198\r
199 // Ensure the parameters are valid\r
200 if (!(TableLevel && BlockEntrySize && LastBlockEntry)) {\r
201 ASSERT_EFI_ERROR (EFI_INVALID_PARAMETER);\r
202 return NULL;\r
203 }\r
204\r
205 // Ensure the Region is aligned on 4KB boundary\r
206 if ((RegionStart & (SIZE_4KB - 1)) != 0) {\r
207 ASSERT_EFI_ERROR (EFI_INVALID_PARAMETER);\r
208 return NULL;\r
209 }\r
210\r
211 // Ensure the required size is aligned on 4KB boundary and not 0\r
212 if ((*BlockEntrySize & (SIZE_4KB - 1)) != 0 || *BlockEntrySize == 0) {\r
213 ASSERT_EFI_ERROR (EFI_INVALID_PARAMETER);\r
214 return NULL;\r
215 }\r
216\r
217 T0SZ = ArmGetTCR () & TCR_T0SZ_MASK;\r
218 // Get the Table info from T0SZ\r
219 GetRootTranslationTableInfo (T0SZ, &RootTableLevel, &RootTableEntryCount);\r
220\r
221 // If the start address is 0x0 then we use the size of the region to identify the alignment\r
222 if (RegionStart == 0) {\r
223 // Identify the highest possible alignment for the Region Size\r
224 BaseAddressAlignment = LowBitSet64 (*BlockEntrySize);\r
225 } else {\r
226 // Identify the highest possible alignment for the Base Address\r
227 BaseAddressAlignment = LowBitSet64 (RegionStart);\r
228 }\r
229\r
230 // Identify the Page Level the RegionStart must belong to. Note that PageLevel\r
231 // should be at least 1 since block translations are not supported at level 0\r
232 PageLevel = MAX (3 - ((BaseAddressAlignment - 12) / 9), 1);\r
233\r
234 // If the required size is smaller than the current block size then we need to go to the page below.\r
235 // The PageLevel was calculated on the Base Address alignment but did not take in account the alignment\r
236 // of the allocation size\r
237 while (*BlockEntrySize < TT_BLOCK_ENTRY_SIZE_AT_LEVEL (PageLevel)) {\r
238 // It does not fit so we need to go a page level above\r
239 PageLevel++;\r
240 }\r
241\r
242 //\r
243 // Get the Table Descriptor for the corresponding PageLevel. We need to decompose RegionStart to get appropriate entries\r
244 //\r
245\r
246 TranslationTable = RootTable;\r
247 for (IndexLevel = RootTableLevel; IndexLevel <= PageLevel; IndexLevel++) {\r
248 BlockEntry = (UINT64*)TT_GET_ENTRY_FOR_ADDRESS (TranslationTable, IndexLevel, RegionStart);\r
249\r
250 if ((IndexLevel != 3) && ((*BlockEntry & TT_TYPE_MASK) == TT_TYPE_TABLE_ENTRY)) {\r
251 // Go to the next table\r
252 TranslationTable = (UINT64*)(*BlockEntry & TT_ADDRESS_MASK_DESCRIPTION_TABLE);\r
253\r
254 // If we are at the last level then update the last level to next level\r
255 if (IndexLevel == PageLevel) {\r
256 // Enter the next level\r
257 PageLevel++;\r
258 }\r
259 } else if ((*BlockEntry & TT_TYPE_MASK) == TT_TYPE_BLOCK_ENTRY) {\r
260 // If we are not at the last level then we need to split this BlockEntry\r
261 if (IndexLevel != PageLevel) {\r
262 // Retrieve the attributes from the block entry\r
263 Attributes = *BlockEntry & TT_ATTRIBUTES_MASK;\r
264\r
265 // Convert the block entry attributes into Table descriptor attributes\r
266 TableAttributes = TT_TABLE_AP_NO_PERMISSION;\r
267 if (Attributes & TT_NS) {\r
268 TableAttributes = TT_TABLE_NS;\r
269 }\r
270\r
271 // Get the address corresponding at this entry\r
272 BlockEntryAddress = RegionStart;\r
273 BlockEntryAddress = BlockEntryAddress >> TT_ADDRESS_OFFSET_AT_LEVEL(IndexLevel);\r
274 // Shift back to right to set zero before the effective address\r
275 BlockEntryAddress = BlockEntryAddress << TT_ADDRESS_OFFSET_AT_LEVEL(IndexLevel);\r
276\r
277 // Set the correct entry type for the next page level\r
278 if ((IndexLevel + 1) == 3) {\r
279 Attributes |= TT_TYPE_BLOCK_ENTRY_LEVEL3;\r
280 } else {\r
281 Attributes |= TT_TYPE_BLOCK_ENTRY;\r
282 }\r
283\r
284 // Create a new translation table\r
674e127e 285 TranslationTable = AllocatePages (1);\r
d7f03464
AB
286 if (TranslationTable == NULL) {\r
287 return NULL;\r
288 }\r
289\r
290 // Populate the newly created lower level table\r
291 SubTableBlockEntry = TranslationTable;\r
292 for (Index = 0; Index < TT_ENTRY_COUNT; Index++) {\r
293 *SubTableBlockEntry = Attributes | (BlockEntryAddress + (Index << TT_ADDRESS_OFFSET_AT_LEVEL(IndexLevel + 1)));\r
294 SubTableBlockEntry++;\r
295 }\r
296\r
297 // Fill the BlockEntry with the new TranslationTable\r
298 ReplaceLiveEntry (BlockEntry,\r
299 ((UINTN)TranslationTable & TT_ADDRESS_MASK_DESCRIPTION_TABLE) | TableAttributes | TT_TYPE_TABLE_ENTRY);\r
300 }\r
301 } else {\r
302 if (IndexLevel != PageLevel) {\r
303 //\r
304 // Case when we have an Invalid Entry and we are at a page level above of the one targetted.\r
305 //\r
306\r
307 // Create a new translation table\r
674e127e 308 TranslationTable = AllocatePages (1);\r
d7f03464
AB
309 if (TranslationTable == NULL) {\r
310 return NULL;\r
311 }\r
312\r
313 ZeroMem (TranslationTable, TT_ENTRY_COUNT * sizeof(UINT64));\r
314\r
315 // Fill the new BlockEntry with the TranslationTable\r
316 *BlockEntry = ((UINTN)TranslationTable & TT_ADDRESS_MASK_DESCRIPTION_TABLE) | TT_TYPE_TABLE_ENTRY;\r
317 }\r
318 }\r
319 }\r
320\r
321 // Expose the found PageLevel to the caller\r
322 *TableLevel = PageLevel;\r
323\r
324 // Now, we have the Table Level we can get the Block Size associated to this table\r
325 *BlockEntrySize = TT_BLOCK_ENTRY_SIZE_AT_LEVEL (PageLevel);\r
326\r
327 // The last block of the root table depends on the number of entry in this table,\r
328 // otherwise it is always the (TT_ENTRY_COUNT - 1)th entry in the table.\r
329 *LastBlockEntry = TT_LAST_BLOCK_ADDRESS(TranslationTable,\r
330 (PageLevel == RootTableLevel) ? RootTableEntryCount : TT_ENTRY_COUNT);\r
331\r
332 return BlockEntry;\r
333}\r
334\r
335STATIC\r
f49ea03d 336EFI_STATUS\r
d7f03464
AB
337UpdateRegionMapping (\r
338 IN UINT64 *RootTable,\r
339 IN UINT64 RegionStart,\r
340 IN UINT64 RegionLength,\r
341 IN UINT64 Attributes,\r
342 IN UINT64 BlockEntryMask\r
343 )\r
344{\r
345 UINT32 Type;\r
346 UINT64 *BlockEntry;\r
347 UINT64 *LastBlockEntry;\r
348 UINT64 BlockEntrySize;\r
349 UINTN TableLevel;\r
350\r
351 // Ensure the Length is aligned on 4KB boundary\r
352 if ((RegionLength == 0) || ((RegionLength & (SIZE_4KB - 1)) != 0)) {\r
353 ASSERT_EFI_ERROR (EFI_INVALID_PARAMETER);\r
f49ea03d 354 return EFI_INVALID_PARAMETER;\r
d7f03464
AB
355 }\r
356\r
357 do {\r
358 // Get the first Block Entry that matches the Virtual Address and also the information on the Table Descriptor\r
359 // such as the the size of the Block Entry and the address of the last BlockEntry of the Table Descriptor\r
360 BlockEntrySize = RegionLength;\r
361 BlockEntry = GetBlockEntryListFromAddress (RootTable, RegionStart, &TableLevel, &BlockEntrySize, &LastBlockEntry);\r
362 if (BlockEntry == NULL) {\r
363 // GetBlockEntryListFromAddress() return NULL when it fails to allocate new pages from the Translation Tables\r
f49ea03d 364 return EFI_OUT_OF_RESOURCES;\r
d7f03464
AB
365 }\r
366\r
367 if (TableLevel != 3) {\r
368 Type = TT_TYPE_BLOCK_ENTRY;\r
369 } else {\r
370 Type = TT_TYPE_BLOCK_ENTRY_LEVEL3;\r
371 }\r
372\r
373 do {\r
374 // Fill the Block Entry with attribute and output block address\r
375 *BlockEntry &= BlockEntryMask;\r
376 *BlockEntry |= (RegionStart & TT_ADDRESS_MASK_BLOCK_ENTRY) | Attributes | Type;\r
377\r
378 // Go to the next BlockEntry\r
379 RegionStart += BlockEntrySize;\r
380 RegionLength -= BlockEntrySize;\r
381 BlockEntry++;\r
382\r
383 // Break the inner loop when next block is a table\r
384 // Rerun GetBlockEntryListFromAddress to avoid page table memory leak\r
76c23f9e 385 if (TableLevel != 3 && BlockEntry <= LastBlockEntry &&\r
d7f03464
AB
386 (*BlockEntry & TT_TYPE_MASK) == TT_TYPE_TABLE_ENTRY) {\r
387 break;\r
388 }\r
389 } while ((RegionLength >= BlockEntrySize) && (BlockEntry <= LastBlockEntry));\r
390 } while (RegionLength != 0);\r
391\r
f49ea03d 392 return EFI_SUCCESS;\r
d7f03464
AB
393}\r
394\r
395STATIC\r
f49ea03d 396EFI_STATUS\r
d7f03464
AB
397FillTranslationTable (\r
398 IN UINT64 *RootTable,\r
399 IN ARM_MEMORY_REGION_DESCRIPTOR *MemoryRegion\r
400 )\r
401{\r
402 return UpdateRegionMapping (\r
403 RootTable,\r
404 MemoryRegion->VirtualBase,\r
405 MemoryRegion->Length,\r
406 ArmMemoryAttributeToPageAttribute (MemoryRegion->Attributes) | TT_AF,\r
407 0\r
408 );\r
409}\r
410\r
e0307a7d
AB
411STATIC\r
412UINT64\r
413GcdAttributeToPageAttribute (\r
414 IN UINT64 GcdAttributes\r
415 )\r
416{\r
417 UINT64 PageAttributes;\r
418\r
419 switch (GcdAttributes & EFI_MEMORY_CACHETYPE_MASK) {\r
420 case EFI_MEMORY_UC:\r
421 PageAttributes = TT_ATTR_INDX_DEVICE_MEMORY;\r
422 break;\r
423 case EFI_MEMORY_WC:\r
424 PageAttributes = TT_ATTR_INDX_MEMORY_NON_CACHEABLE;\r
425 break;\r
426 case EFI_MEMORY_WT:\r
427 PageAttributes = TT_ATTR_INDX_MEMORY_WRITE_THROUGH | TT_SH_INNER_SHAREABLE;\r
428 break;\r
429 case EFI_MEMORY_WB:\r
430 PageAttributes = TT_ATTR_INDX_MEMORY_WRITE_BACK | TT_SH_INNER_SHAREABLE;\r
431 break;\r
432 default:\r
433 PageAttributes = TT_ATTR_INDX_MASK;\r
434 break;\r
435 }\r
436\r
437 if ((GcdAttributes & EFI_MEMORY_XP) != 0 ||\r
438 (GcdAttributes & EFI_MEMORY_CACHETYPE_MASK) == EFI_MEMORY_UC) {\r
439 if (ArmReadCurrentEL () == AARCH64_EL2) {\r
440 PageAttributes |= TT_XN_MASK;\r
441 } else {\r
442 PageAttributes |= TT_UXN_MASK | TT_PXN_MASK;\r
443 }\r
444 }\r
445\r
446 if ((GcdAttributes & EFI_MEMORY_RO) != 0) {\r
447 PageAttributes |= TT_AP_RO_RO;\r
448 }\r
449\r
450 return PageAttributes | TT_AF;\r
451}\r
452\r
f49ea03d 453EFI_STATUS\r
521f3ced 454ArmSetMemoryAttributes (\r
d7f03464
AB
455 IN EFI_PHYSICAL_ADDRESS BaseAddress,\r
456 IN UINT64 Length,\r
d9c0d991 457 IN UINT64 Attributes\r
d7f03464
AB
458 )\r
459{\r
f49ea03d 460 EFI_STATUS Status;\r
d7f03464 461 UINT64 *TranslationTable;\r
e0307a7d
AB
462 UINT64 PageAttributes;\r
463 UINT64 PageAttributeMask;\r
464\r
465 PageAttributes = GcdAttributeToPageAttribute (Attributes);\r
466 PageAttributeMask = 0;\r
467\r
468 if ((Attributes & EFI_MEMORY_CACHETYPE_MASK) == 0) {\r
469 //\r
470 // No memory type was set in Attributes, so we are going to update the\r
471 // permissions only.\r
472 //\r
473 PageAttributes &= TT_AP_MASK | TT_UXN_MASK | TT_PXN_MASK;\r
474 PageAttributeMask = ~(TT_ADDRESS_MASK_BLOCK_ENTRY | TT_AP_MASK |\r
475 TT_PXN_MASK | TT_XN_MASK);\r
476 }\r
d7f03464
AB
477\r
478 TranslationTable = ArmGetTTBR0BaseAddress ();\r
479\r
e0307a7d
AB
480 Status = UpdateRegionMapping (\r
481 TranslationTable,\r
482 BaseAddress,\r
483 Length,\r
484 PageAttributes,\r
485 PageAttributeMask);\r
f49ea03d 486 if (EFI_ERROR (Status)) {\r
d7f03464
AB
487 return Status;\r
488 }\r
489\r
490 // Invalidate all TLB entries so changes are synced\r
491 ArmInvalidateTlb ();\r
492\r
f49ea03d 493 return EFI_SUCCESS;\r
d7f03464
AB
494}\r
495\r
496STATIC\r
f49ea03d 497EFI_STATUS\r
d7f03464
AB
498SetMemoryRegionAttribute (\r
499 IN EFI_PHYSICAL_ADDRESS BaseAddress,\r
500 IN UINT64 Length,\r
501 IN UINT64 Attributes,\r
502 IN UINT64 BlockEntryMask\r
503 )\r
504{\r
f49ea03d 505 EFI_STATUS Status;\r
d7f03464
AB
506 UINT64 *RootTable;\r
507\r
508 RootTable = ArmGetTTBR0BaseAddress ();\r
509\r
510 Status = UpdateRegionMapping (RootTable, BaseAddress, Length, Attributes, BlockEntryMask);\r
f49ea03d 511 if (EFI_ERROR (Status)) {\r
d7f03464
AB
512 return Status;\r
513 }\r
514\r
515 // Invalidate all TLB entries so changes are synced\r
516 ArmInvalidateTlb ();\r
517\r
f49ea03d 518 return EFI_SUCCESS;\r
d7f03464
AB
519}\r
520\r
f49ea03d 521EFI_STATUS\r
d7f03464
AB
522ArmSetMemoryRegionNoExec (\r
523 IN EFI_PHYSICAL_ADDRESS BaseAddress,\r
524 IN UINT64 Length\r
525 )\r
526{\r
527 UINT64 Val;\r
528\r
529 if (ArmReadCurrentEL () == AARCH64_EL1) {\r
530 Val = TT_PXN_MASK | TT_UXN_MASK;\r
531 } else {\r
532 Val = TT_XN_MASK;\r
533 }\r
534\r
535 return SetMemoryRegionAttribute (\r
536 BaseAddress,\r
537 Length,\r
538 Val,\r
539 ~TT_ADDRESS_MASK_BLOCK_ENTRY);\r
540}\r
541\r
f49ea03d 542EFI_STATUS\r
d7f03464
AB
543ArmClearMemoryRegionNoExec (\r
544 IN EFI_PHYSICAL_ADDRESS BaseAddress,\r
545 IN UINT64 Length\r
546 )\r
547{\r
548 UINT64 Mask;\r
549\r
550 // XN maps to UXN in the EL1&0 translation regime\r
551 Mask = ~(TT_ADDRESS_MASK_BLOCK_ENTRY | TT_PXN_MASK | TT_XN_MASK);\r
552\r
553 return SetMemoryRegionAttribute (\r
554 BaseAddress,\r
555 Length,\r
556 0,\r
557 Mask);\r
558}\r
559\r
f49ea03d 560EFI_STATUS\r
d7f03464
AB
561ArmSetMemoryRegionReadOnly (\r
562 IN EFI_PHYSICAL_ADDRESS BaseAddress,\r
563 IN UINT64 Length\r
564 )\r
565{\r
566 return SetMemoryRegionAttribute (\r
567 BaseAddress,\r
568 Length,\r
569 TT_AP_RO_RO,\r
570 ~TT_ADDRESS_MASK_BLOCK_ENTRY);\r
571}\r
572\r
f49ea03d 573EFI_STATUS\r
d7f03464
AB
574ArmClearMemoryRegionReadOnly (\r
575 IN EFI_PHYSICAL_ADDRESS BaseAddress,\r
576 IN UINT64 Length\r
577 )\r
578{\r
579 return SetMemoryRegionAttribute (\r
580 BaseAddress,\r
581 Length,\r
582 TT_AP_RW_RW,\r
583 ~(TT_ADDRESS_MASK_BLOCK_ENTRY | TT_AP_MASK));\r
584}\r
585\r
f49ea03d 586EFI_STATUS\r
d7f03464
AB
587EFIAPI\r
588ArmConfigureMmu (\r
589 IN ARM_MEMORY_REGION_DESCRIPTOR *MemoryTable,\r
590 OUT VOID **TranslationTableBase OPTIONAL,\r
591 OUT UINTN *TranslationTableSize OPTIONAL\r
592 )\r
593{\r
594 VOID* TranslationTable;\r
d7f03464 595 UINT32 TranslationTableAttribute;\r
d7f03464 596 UINT64 MaxAddress;\r
d7f03464
AB
597 UINTN T0SZ;\r
598 UINTN RootTableEntryCount;\r
599 UINT64 TCR;\r
f49ea03d 600 EFI_STATUS Status;\r
d7f03464
AB
601\r
602 if(MemoryTable == NULL) {\r
603 ASSERT (MemoryTable != NULL);\r
f49ea03d 604 return EFI_INVALID_PARAMETER;\r
d7f03464
AB
605 }\r
606\r
e36b243c
AB
607 //\r
608 // Limit the virtual address space to what we can actually use: UEFI\r
609 // mandates a 1:1 mapping, so no point in making the virtual address\r
610 // space larger than the physical address space. We also have to take\r
611 // into account the architectural limitations that result from UEFI's\r
612 // use of 4 KB pages.\r
613 //\r
614 MaxAddress = MIN (LShiftU64 (1ULL, ArmGetPhysicalAddressBits ()) - 1,\r
1c36f028 615 MAX_ALLOC_ADDRESS);\r
d7f03464
AB
616\r
617 // Lookup the Table Level to get the information\r
618 LookupAddresstoRootTable (MaxAddress, &T0SZ, &RootTableEntryCount);\r
619\r
620 //\r
621 // Set TCR that allows us to retrieve T0SZ in the subsequent functions\r
622 //\r
623 // Ideally we will be running at EL2, but should support EL1 as well.\r
624 // UEFI should not run at EL3.\r
625 if (ArmReadCurrentEL () == AARCH64_EL2) {\r
626 //Note: Bits 23 and 31 are reserved(RES1) bits in TCR_EL2\r
627 TCR = T0SZ | (1UL << 31) | (1UL << 23) | TCR_TG0_4KB;\r
628\r
629 // Set the Physical Address Size using MaxAddress\r
630 if (MaxAddress < SIZE_4GB) {\r
631 TCR |= TCR_PS_4GB;\r
632 } else if (MaxAddress < SIZE_64GB) {\r
633 TCR |= TCR_PS_64GB;\r
634 } else if (MaxAddress < SIZE_1TB) {\r
635 TCR |= TCR_PS_1TB;\r
636 } else if (MaxAddress < SIZE_4TB) {\r
637 TCR |= TCR_PS_4TB;\r
638 } else if (MaxAddress < SIZE_16TB) {\r
639 TCR |= TCR_PS_16TB;\r
640 } else if (MaxAddress < SIZE_256TB) {\r
641 TCR |= TCR_PS_256TB;\r
642 } else {\r
643 DEBUG ((EFI_D_ERROR, "ArmConfigureMmu: The MaxAddress 0x%lX is not supported by this MMU configuration.\n", MaxAddress));\r
644 ASSERT (0); // Bigger than 48-bit memory space are not supported\r
f49ea03d 645 return EFI_UNSUPPORTED;\r
d7f03464
AB
646 }\r
647 } else if (ArmReadCurrentEL () == AARCH64_EL1) {\r
648 // Due to Cortex-A57 erratum #822227 we must set TG1[1] == 1, regardless of EPD1.\r
649 TCR = T0SZ | TCR_TG0_4KB | TCR_TG1_4KB | TCR_EPD1;\r
650\r
651 // Set the Physical Address Size using MaxAddress\r
652 if (MaxAddress < SIZE_4GB) {\r
653 TCR |= TCR_IPS_4GB;\r
654 } else if (MaxAddress < SIZE_64GB) {\r
655 TCR |= TCR_IPS_64GB;\r
656 } else if (MaxAddress < SIZE_1TB) {\r
657 TCR |= TCR_IPS_1TB;\r
658 } else if (MaxAddress < SIZE_4TB) {\r
659 TCR |= TCR_IPS_4TB;\r
660 } else if (MaxAddress < SIZE_16TB) {\r
661 TCR |= TCR_IPS_16TB;\r
662 } else if (MaxAddress < SIZE_256TB) {\r
663 TCR |= TCR_IPS_256TB;\r
664 } else {\r
665 DEBUG ((EFI_D_ERROR, "ArmConfigureMmu: The MaxAddress 0x%lX is not supported by this MMU configuration.\n", MaxAddress));\r
666 ASSERT (0); // Bigger than 48-bit memory space are not supported\r
f49ea03d 667 return EFI_UNSUPPORTED;\r
d7f03464
AB
668 }\r
669 } else {\r
670 ASSERT (0); // UEFI is only expected to run at EL2 and EL1, not EL3.\r
f49ea03d 671 return EFI_UNSUPPORTED;\r
d7f03464
AB
672 }\r
673\r
35718840
AB
674 //\r
675 // Translation table walks are always cache coherent on ARMv8-A, so cache\r
676 // maintenance on page tables is never needed. Since there is a risk of\r
677 // loss of coherency when using mismatched attributes, and given that memory\r
678 // is mapped cacheable except for extraordinary cases (such as non-coherent\r
679 // DMA), have the page table walker perform cached accesses as well, and\r
680 // assert below that that matches the attributes we use for CPU accesses to\r
681 // the region.\r
682 //\r
683 TCR |= TCR_SH_INNER_SHAREABLE |\r
684 TCR_RGN_OUTER_WRITE_BACK_ALLOC |\r
685 TCR_RGN_INNER_WRITE_BACK_ALLOC;\r
686\r
d7f03464
AB
687 // Set TCR\r
688 ArmSetTCR (TCR);\r
689\r
aa961dea
AB
690 // Allocate pages for translation table\r
691 TranslationTable = AllocatePages (1);\r
d7f03464 692 if (TranslationTable == NULL) {\r
f49ea03d 693 return EFI_OUT_OF_RESOURCES;\r
d7f03464
AB
694 }\r
695 // We set TTBR0 just after allocating the table to retrieve its location from the subsequent\r
696 // functions without needing to pass this value across the functions. The MMU is only enabled\r
697 // after the translation tables are populated.\r
698 ArmSetTTBR0 (TranslationTable);\r
699\r
700 if (TranslationTableBase != NULL) {\r
701 *TranslationTableBase = TranslationTable;\r
702 }\r
703\r
704 if (TranslationTableSize != NULL) {\r
aa961dea 705 *TranslationTableSize = RootTableEntryCount * sizeof(UINT64);\r
d7f03464
AB
706 }\r
707\r
aa961dea 708 ZeroMem (TranslationTable, RootTableEntryCount * sizeof(UINT64));\r
d7f03464
AB
709\r
710 // Disable MMU and caches. ArmDisableMmu() also invalidates the TLBs\r
711 ArmDisableMmu ();\r
712 ArmDisableDataCache ();\r
713 ArmDisableInstructionCache ();\r
714\r
715 // Make sure nothing sneaked into the cache\r
716 ArmCleanInvalidateDataCache ();\r
717 ArmInvalidateInstructionCache ();\r
718\r
719 TranslationTableAttribute = TT_ATTR_INDX_INVALID;\r
720 while (MemoryTable->Length != 0) {\r
35718840
AB
721\r
722 DEBUG_CODE_BEGIN ();\r
723 // Find the memory attribute for the Translation Table\r
724 if ((UINTN)TranslationTable >= MemoryTable->PhysicalBase &&\r
aa961dea 725 (UINTN)TranslationTable + EFI_PAGE_SIZE <= MemoryTable->PhysicalBase +\r
35718840
AB
726 MemoryTable->Length) {\r
727 TranslationTableAttribute = MemoryTable->Attributes;\r
728 }\r
729 DEBUG_CODE_END ();\r
d7f03464
AB
730\r
731 Status = FillTranslationTable (TranslationTable, MemoryTable);\r
f49ea03d 732 if (EFI_ERROR (Status)) {\r
d7f03464
AB
733 goto FREE_TRANSLATION_TABLE;\r
734 }\r
735 MemoryTable++;\r
736 }\r
737\r
35718840
AB
738 ASSERT (TranslationTableAttribute == ARM_MEMORY_REGION_ATTRIBUTE_WRITE_BACK ||\r
739 TranslationTableAttribute == ARM_MEMORY_REGION_ATTRIBUTE_NONSECURE_WRITE_BACK);\r
d7f03464
AB
740\r
741 ArmSetMAIR (MAIR_ATTR(TT_ATTR_INDX_DEVICE_MEMORY, MAIR_ATTR_DEVICE_MEMORY) | // mapped to EFI_MEMORY_UC\r
742 MAIR_ATTR(TT_ATTR_INDX_MEMORY_NON_CACHEABLE, MAIR_ATTR_NORMAL_MEMORY_NON_CACHEABLE) | // mapped to EFI_MEMORY_WC\r
743 MAIR_ATTR(TT_ATTR_INDX_MEMORY_WRITE_THROUGH, MAIR_ATTR_NORMAL_MEMORY_WRITE_THROUGH) | // mapped to EFI_MEMORY_WT\r
744 MAIR_ATTR(TT_ATTR_INDX_MEMORY_WRITE_BACK, MAIR_ATTR_NORMAL_MEMORY_WRITE_BACK)); // mapped to EFI_MEMORY_WB\r
745\r
746 ArmDisableAlignmentCheck ();\r
526f160f 747 ArmEnableStackAlignmentCheck ();\r
d7f03464
AB
748 ArmEnableInstructionCache ();\r
749 ArmEnableDataCache ();\r
750\r
751 ArmEnableMmu ();\r
f49ea03d 752 return EFI_SUCCESS;\r
d7f03464
AB
753\r
754FREE_TRANSLATION_TABLE:\r
aa961dea 755 FreePages (TranslationTable, 1);\r
d7f03464
AB
756 return Status;\r
757}\r
758\r
759RETURN_STATUS\r
760EFIAPI\r
761ArmMmuBaseLibConstructor (\r
762 VOID\r
763 )\r
764{\r
765 extern UINT32 ArmReplaceLiveTranslationEntrySize;\r
766\r
767 //\r
768 // The ArmReplaceLiveTranslationEntry () helper function may be invoked\r
769 // with the MMU off so we have to ensure that it gets cleaned to the PoC\r
770 //\r
771 WriteBackDataCacheRange (ArmReplaceLiveTranslationEntry,\r
772 ArmReplaceLiveTranslationEntrySize);\r
773\r
774 return RETURN_SUCCESS;\r
775}\r