]>
Commit | Line | Data |
---|---|---|
d7f03464 AB |
1 | /** @file\r |
2 | * File managing the MMU for ARMv8 architecture\r | |
3 | *\r | |
4 | * Copyright (c) 2011-2014, ARM Limited. All rights reserved.\r | |
5 | * Copyright (c) 2016, Linaro Limited. All rights reserved.\r | |
6 | *\r | |
7 | * This program and the accompanying materials\r | |
8 | * are licensed and made available under the terms and conditions of the BSD License\r | |
9 | * which accompanies this distribution. The full text of the license may be found at\r | |
10 | * http://opensource.org/licenses/bsd-license.php\r | |
11 | *\r | |
12 | * THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS,\r | |
13 | * WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED.\r | |
14 | *\r | |
15 | **/\r | |
16 | \r | |
17 | #include <Uefi.h>\r | |
18 | #include <Chipset/AArch64.h>\r | |
19 | #include <Library/BaseMemoryLib.h>\r | |
20 | #include <Library/CacheMaintenanceLib.h>\r | |
21 | #include <Library/MemoryAllocationLib.h>\r | |
22 | #include <Library/ArmLib.h>\r | |
23 | #include <Library/ArmMmuLib.h>\r | |
24 | #include <Library/BaseLib.h>\r | |
25 | #include <Library/DebugLib.h>\r | |
26 | \r | |
27 | // We use this index definition to define an invalid block entry\r | |
28 | #define TT_ATTR_INDX_INVALID ((UINT32)~0)\r | |
29 | \r | |
30 | STATIC\r | |
31 | UINT64\r | |
32 | ArmMemoryAttributeToPageAttribute (\r | |
33 | IN ARM_MEMORY_REGION_ATTRIBUTES Attributes\r | |
34 | )\r | |
35 | {\r | |
36 | switch (Attributes) {\r | |
37 | case ARM_MEMORY_REGION_ATTRIBUTE_WRITE_BACK:\r | |
38 | case ARM_MEMORY_REGION_ATTRIBUTE_NONSECURE_WRITE_BACK:\r | |
39 | return TT_ATTR_INDX_MEMORY_WRITE_BACK | TT_SH_INNER_SHAREABLE;\r | |
40 | \r | |
41 | case ARM_MEMORY_REGION_ATTRIBUTE_WRITE_THROUGH:\r | |
42 | case ARM_MEMORY_REGION_ATTRIBUTE_NONSECURE_WRITE_THROUGH:\r | |
43 | return TT_ATTR_INDX_MEMORY_WRITE_THROUGH | TT_SH_INNER_SHAREABLE;\r | |
44 | \r | |
45 | // Uncached and device mappings are treated as outer shareable by default,\r | |
46 | case ARM_MEMORY_REGION_ATTRIBUTE_UNCACHED_UNBUFFERED:\r | |
47 | case ARM_MEMORY_REGION_ATTRIBUTE_NONSECURE_UNCACHED_UNBUFFERED:\r | |
48 | return TT_ATTR_INDX_MEMORY_NON_CACHEABLE;\r | |
49 | \r | |
50 | default:\r | |
51 | ASSERT(0);\r | |
52 | case ARM_MEMORY_REGION_ATTRIBUTE_DEVICE:\r | |
53 | case ARM_MEMORY_REGION_ATTRIBUTE_NONSECURE_DEVICE:\r | |
54 | if (ArmReadCurrentEL () == AARCH64_EL2)\r | |
55 | return TT_ATTR_INDX_DEVICE_MEMORY | TT_XN_MASK;\r | |
56 | else\r | |
57 | return TT_ATTR_INDX_DEVICE_MEMORY | TT_UXN_MASK | TT_PXN_MASK;\r | |
58 | }\r | |
59 | }\r | |
60 | \r | |
61 | UINT64\r | |
62 | PageAttributeToGcdAttribute (\r | |
63 | IN UINT64 PageAttributes\r | |
64 | )\r | |
65 | {\r | |
66 | UINT64 GcdAttributes;\r | |
67 | \r | |
68 | switch (PageAttributes & TT_ATTR_INDX_MASK) {\r | |
69 | case TT_ATTR_INDX_DEVICE_MEMORY:\r | |
70 | GcdAttributes = EFI_MEMORY_UC;\r | |
71 | break;\r | |
72 | case TT_ATTR_INDX_MEMORY_NON_CACHEABLE:\r | |
73 | GcdAttributes = EFI_MEMORY_WC;\r | |
74 | break;\r | |
75 | case TT_ATTR_INDX_MEMORY_WRITE_THROUGH:\r | |
76 | GcdAttributes = EFI_MEMORY_WT;\r | |
77 | break;\r | |
78 | case TT_ATTR_INDX_MEMORY_WRITE_BACK:\r | |
79 | GcdAttributes = EFI_MEMORY_WB;\r | |
80 | break;\r | |
81 | default:\r | |
82 | DEBUG ((EFI_D_ERROR, "PageAttributeToGcdAttribute: PageAttributes:0x%lX not supported.\n", PageAttributes));\r | |
83 | ASSERT (0);\r | |
84 | // The Global Coherency Domain (GCD) value is defined as a bit set.\r | |
85 | // Returning 0 means no attribute has been set.\r | |
86 | GcdAttributes = 0;\r | |
87 | }\r | |
88 | \r | |
89 | // Determine protection attributes\r | |
90 | if (((PageAttributes & TT_AP_MASK) == TT_AP_NO_RO) || ((PageAttributes & TT_AP_MASK) == TT_AP_RO_RO)) {\r | |
91 | // Read only cases map to write-protect\r | |
92 | GcdAttributes |= EFI_MEMORY_WP;\r | |
93 | }\r | |
94 | \r | |
95 | // Process eXecute Never attribute\r | |
96 | if ((PageAttributes & (TT_PXN_MASK | TT_UXN_MASK)) != 0 ) {\r | |
97 | GcdAttributes |= EFI_MEMORY_XP;\r | |
98 | }\r | |
99 | \r | |
100 | return GcdAttributes;\r | |
101 | }\r | |
102 | \r | |
103 | ARM_MEMORY_REGION_ATTRIBUTES\r | |
104 | GcdAttributeToArmAttribute (\r | |
105 | IN UINT64 GcdAttributes\r | |
106 | )\r | |
107 | {\r | |
108 | switch (GcdAttributes & 0xFF) {\r | |
109 | case EFI_MEMORY_UC:\r | |
110 | return ARM_MEMORY_REGION_ATTRIBUTE_DEVICE;\r | |
111 | case EFI_MEMORY_WC:\r | |
112 | return ARM_MEMORY_REGION_ATTRIBUTE_UNCACHED_UNBUFFERED;\r | |
113 | case EFI_MEMORY_WT:\r | |
114 | return ARM_MEMORY_REGION_ATTRIBUTE_WRITE_THROUGH;\r | |
115 | case EFI_MEMORY_WB:\r | |
116 | return ARM_MEMORY_REGION_ATTRIBUTE_WRITE_BACK;\r | |
117 | default:\r | |
118 | DEBUG ((EFI_D_ERROR, "GcdAttributeToArmAttribute: 0x%lX attributes is not supported.\n", GcdAttributes));\r | |
119 | ASSERT (0);\r | |
120 | return ARM_MEMORY_REGION_ATTRIBUTE_DEVICE;\r | |
121 | }\r | |
122 | }\r | |
123 | \r | |
e93cb72e AB |
124 | #define MIN_T0SZ 16\r |
125 | #define BITS_PER_LEVEL 9\r | |
d7f03464 AB |
126 | \r |
127 | VOID\r | |
128 | GetRootTranslationTableInfo (\r | |
129 | IN UINTN T0SZ,\r | |
130 | OUT UINTN *TableLevel,\r | |
131 | OUT UINTN *TableEntryCount\r | |
132 | )\r | |
133 | {\r | |
d7f03464 AB |
134 | // Get the level of the root table\r |
135 | if (TableLevel) {\r | |
e93cb72e | 136 | *TableLevel = (T0SZ - MIN_T0SZ) / BITS_PER_LEVEL;\r |
d7f03464 AB |
137 | }\r |
138 | \r | |
d7f03464 | 139 | if (TableEntryCount) {\r |
e93cb72e | 140 | *TableEntryCount = 1UL << (BITS_PER_LEVEL - (T0SZ - MIN_T0SZ) % BITS_PER_LEVEL);\r |
d7f03464 AB |
141 | }\r |
142 | }\r | |
143 | \r | |
144 | STATIC\r | |
145 | VOID\r | |
146 | ReplaceLiveEntry (\r | |
147 | IN UINT64 *Entry,\r | |
148 | IN UINT64 Value\r | |
149 | )\r | |
150 | {\r | |
151 | if (!ArmMmuEnabled ()) {\r | |
152 | *Entry = Value;\r | |
153 | } else {\r | |
154 | ArmReplaceLiveTranslationEntry (Entry, Value);\r | |
155 | }\r | |
156 | }\r | |
157 | \r | |
158 | STATIC\r | |
159 | VOID\r | |
160 | LookupAddresstoRootTable (\r | |
161 | IN UINT64 MaxAddress,\r | |
162 | OUT UINTN *T0SZ,\r | |
163 | OUT UINTN *TableEntryCount\r | |
164 | )\r | |
165 | {\r | |
166 | UINTN TopBit;\r | |
167 | \r | |
168 | // Check the parameters are not NULL\r | |
169 | ASSERT ((T0SZ != NULL) && (TableEntryCount != NULL));\r | |
170 | \r | |
171 | // Look for the highest bit set in MaxAddress\r | |
172 | for (TopBit = 63; TopBit != 0; TopBit--) {\r | |
173 | if ((1ULL << TopBit) & MaxAddress) {\r | |
174 | // MaxAddress top bit is found\r | |
175 | TopBit = TopBit + 1;\r | |
176 | break;\r | |
177 | }\r | |
178 | }\r | |
179 | ASSERT (TopBit != 0);\r | |
180 | \r | |
181 | // Calculate T0SZ from the top bit of the MaxAddress\r | |
182 | *T0SZ = 64 - TopBit;\r | |
183 | \r | |
184 | // Get the Table info from T0SZ\r | |
185 | GetRootTranslationTableInfo (*T0SZ, NULL, TableEntryCount);\r | |
186 | }\r | |
187 | \r | |
188 | STATIC\r | |
189 | UINT64*\r | |
190 | GetBlockEntryListFromAddress (\r | |
191 | IN UINT64 *RootTable,\r | |
192 | IN UINT64 RegionStart,\r | |
193 | OUT UINTN *TableLevel,\r | |
194 | IN OUT UINT64 *BlockEntrySize,\r | |
195 | OUT UINT64 **LastBlockEntry\r | |
196 | )\r | |
197 | {\r | |
198 | UINTN RootTableLevel;\r | |
199 | UINTN RootTableEntryCount;\r | |
200 | UINT64 *TranslationTable;\r | |
201 | UINT64 *BlockEntry;\r | |
202 | UINT64 *SubTableBlockEntry;\r | |
203 | UINT64 BlockEntryAddress;\r | |
204 | UINTN BaseAddressAlignment;\r | |
205 | UINTN PageLevel;\r | |
206 | UINTN Index;\r | |
207 | UINTN IndexLevel;\r | |
208 | UINTN T0SZ;\r | |
209 | UINT64 Attributes;\r | |
210 | UINT64 TableAttributes;\r | |
211 | \r | |
212 | // Initialize variable\r | |
213 | BlockEntry = NULL;\r | |
214 | \r | |
215 | // Ensure the parameters are valid\r | |
216 | if (!(TableLevel && BlockEntrySize && LastBlockEntry)) {\r | |
217 | ASSERT_EFI_ERROR (EFI_INVALID_PARAMETER);\r | |
218 | return NULL;\r | |
219 | }\r | |
220 | \r | |
221 | // Ensure the Region is aligned on 4KB boundary\r | |
222 | if ((RegionStart & (SIZE_4KB - 1)) != 0) {\r | |
223 | ASSERT_EFI_ERROR (EFI_INVALID_PARAMETER);\r | |
224 | return NULL;\r | |
225 | }\r | |
226 | \r | |
227 | // Ensure the required size is aligned on 4KB boundary and not 0\r | |
228 | if ((*BlockEntrySize & (SIZE_4KB - 1)) != 0 || *BlockEntrySize == 0) {\r | |
229 | ASSERT_EFI_ERROR (EFI_INVALID_PARAMETER);\r | |
230 | return NULL;\r | |
231 | }\r | |
232 | \r | |
233 | T0SZ = ArmGetTCR () & TCR_T0SZ_MASK;\r | |
234 | // Get the Table info from T0SZ\r | |
235 | GetRootTranslationTableInfo (T0SZ, &RootTableLevel, &RootTableEntryCount);\r | |
236 | \r | |
237 | // If the start address is 0x0 then we use the size of the region to identify the alignment\r | |
238 | if (RegionStart == 0) {\r | |
239 | // Identify the highest possible alignment for the Region Size\r | |
240 | BaseAddressAlignment = LowBitSet64 (*BlockEntrySize);\r | |
241 | } else {\r | |
242 | // Identify the highest possible alignment for the Base Address\r | |
243 | BaseAddressAlignment = LowBitSet64 (RegionStart);\r | |
244 | }\r | |
245 | \r | |
246 | // Identify the Page Level the RegionStart must belong to. Note that PageLevel\r | |
247 | // should be at least 1 since block translations are not supported at level 0\r | |
248 | PageLevel = MAX (3 - ((BaseAddressAlignment - 12) / 9), 1);\r | |
249 | \r | |
250 | // If the required size is smaller than the current block size then we need to go to the page below.\r | |
251 | // The PageLevel was calculated on the Base Address alignment but did not take in account the alignment\r | |
252 | // of the allocation size\r | |
253 | while (*BlockEntrySize < TT_BLOCK_ENTRY_SIZE_AT_LEVEL (PageLevel)) {\r | |
254 | // It does not fit so we need to go a page level above\r | |
255 | PageLevel++;\r | |
256 | }\r | |
257 | \r | |
258 | //\r | |
259 | // Get the Table Descriptor for the corresponding PageLevel. We need to decompose RegionStart to get appropriate entries\r | |
260 | //\r | |
261 | \r | |
262 | TranslationTable = RootTable;\r | |
263 | for (IndexLevel = RootTableLevel; IndexLevel <= PageLevel; IndexLevel++) {\r | |
264 | BlockEntry = (UINT64*)TT_GET_ENTRY_FOR_ADDRESS (TranslationTable, IndexLevel, RegionStart);\r | |
265 | \r | |
266 | if ((IndexLevel != 3) && ((*BlockEntry & TT_TYPE_MASK) == TT_TYPE_TABLE_ENTRY)) {\r | |
267 | // Go to the next table\r | |
268 | TranslationTable = (UINT64*)(*BlockEntry & TT_ADDRESS_MASK_DESCRIPTION_TABLE);\r | |
269 | \r | |
270 | // If we are at the last level then update the last level to next level\r | |
271 | if (IndexLevel == PageLevel) {\r | |
272 | // Enter the next level\r | |
273 | PageLevel++;\r | |
274 | }\r | |
275 | } else if ((*BlockEntry & TT_TYPE_MASK) == TT_TYPE_BLOCK_ENTRY) {\r | |
276 | // If we are not at the last level then we need to split this BlockEntry\r | |
277 | if (IndexLevel != PageLevel) {\r | |
278 | // Retrieve the attributes from the block entry\r | |
279 | Attributes = *BlockEntry & TT_ATTRIBUTES_MASK;\r | |
280 | \r | |
281 | // Convert the block entry attributes into Table descriptor attributes\r | |
282 | TableAttributes = TT_TABLE_AP_NO_PERMISSION;\r | |
283 | if (Attributes & TT_NS) {\r | |
284 | TableAttributes = TT_TABLE_NS;\r | |
285 | }\r | |
286 | \r | |
287 | // Get the address corresponding at this entry\r | |
288 | BlockEntryAddress = RegionStart;\r | |
289 | BlockEntryAddress = BlockEntryAddress >> TT_ADDRESS_OFFSET_AT_LEVEL(IndexLevel);\r | |
290 | // Shift back to right to set zero before the effective address\r | |
291 | BlockEntryAddress = BlockEntryAddress << TT_ADDRESS_OFFSET_AT_LEVEL(IndexLevel);\r | |
292 | \r | |
293 | // Set the correct entry type for the next page level\r | |
294 | if ((IndexLevel + 1) == 3) {\r | |
295 | Attributes |= TT_TYPE_BLOCK_ENTRY_LEVEL3;\r | |
296 | } else {\r | |
297 | Attributes |= TT_TYPE_BLOCK_ENTRY;\r | |
298 | }\r | |
299 | \r | |
300 | // Create a new translation table\r | |
674e127e | 301 | TranslationTable = AllocatePages (1);\r |
d7f03464 AB |
302 | if (TranslationTable == NULL) {\r |
303 | return NULL;\r | |
304 | }\r | |
305 | \r | |
306 | // Populate the newly created lower level table\r | |
307 | SubTableBlockEntry = TranslationTable;\r | |
308 | for (Index = 0; Index < TT_ENTRY_COUNT; Index++) {\r | |
309 | *SubTableBlockEntry = Attributes | (BlockEntryAddress + (Index << TT_ADDRESS_OFFSET_AT_LEVEL(IndexLevel + 1)));\r | |
310 | SubTableBlockEntry++;\r | |
311 | }\r | |
312 | \r | |
313 | // Fill the BlockEntry with the new TranslationTable\r | |
314 | ReplaceLiveEntry (BlockEntry,\r | |
315 | ((UINTN)TranslationTable & TT_ADDRESS_MASK_DESCRIPTION_TABLE) | TableAttributes | TT_TYPE_TABLE_ENTRY);\r | |
316 | }\r | |
317 | } else {\r | |
318 | if (IndexLevel != PageLevel) {\r | |
319 | //\r | |
320 | // Case when we have an Invalid Entry and we are at a page level above of the one targetted.\r | |
321 | //\r | |
322 | \r | |
323 | // Create a new translation table\r | |
674e127e | 324 | TranslationTable = AllocatePages (1);\r |
d7f03464 AB |
325 | if (TranslationTable == NULL) {\r |
326 | return NULL;\r | |
327 | }\r | |
328 | \r | |
329 | ZeroMem (TranslationTable, TT_ENTRY_COUNT * sizeof(UINT64));\r | |
330 | \r | |
331 | // Fill the new BlockEntry with the TranslationTable\r | |
332 | *BlockEntry = ((UINTN)TranslationTable & TT_ADDRESS_MASK_DESCRIPTION_TABLE) | TT_TYPE_TABLE_ENTRY;\r | |
333 | }\r | |
334 | }\r | |
335 | }\r | |
336 | \r | |
337 | // Expose the found PageLevel to the caller\r | |
338 | *TableLevel = PageLevel;\r | |
339 | \r | |
340 | // Now, we have the Table Level we can get the Block Size associated to this table\r | |
341 | *BlockEntrySize = TT_BLOCK_ENTRY_SIZE_AT_LEVEL (PageLevel);\r | |
342 | \r | |
343 | // The last block of the root table depends on the number of entry in this table,\r | |
344 | // otherwise it is always the (TT_ENTRY_COUNT - 1)th entry in the table.\r | |
345 | *LastBlockEntry = TT_LAST_BLOCK_ADDRESS(TranslationTable,\r | |
346 | (PageLevel == RootTableLevel) ? RootTableEntryCount : TT_ENTRY_COUNT);\r | |
347 | \r | |
348 | return BlockEntry;\r | |
349 | }\r | |
350 | \r | |
351 | STATIC\r | |
352 | RETURN_STATUS\r | |
353 | UpdateRegionMapping (\r | |
354 | IN UINT64 *RootTable,\r | |
355 | IN UINT64 RegionStart,\r | |
356 | IN UINT64 RegionLength,\r | |
357 | IN UINT64 Attributes,\r | |
358 | IN UINT64 BlockEntryMask\r | |
359 | )\r | |
360 | {\r | |
361 | UINT32 Type;\r | |
362 | UINT64 *BlockEntry;\r | |
363 | UINT64 *LastBlockEntry;\r | |
364 | UINT64 BlockEntrySize;\r | |
365 | UINTN TableLevel;\r | |
366 | \r | |
367 | // Ensure the Length is aligned on 4KB boundary\r | |
368 | if ((RegionLength == 0) || ((RegionLength & (SIZE_4KB - 1)) != 0)) {\r | |
369 | ASSERT_EFI_ERROR (EFI_INVALID_PARAMETER);\r | |
370 | return RETURN_INVALID_PARAMETER;\r | |
371 | }\r | |
372 | \r | |
373 | do {\r | |
374 | // Get the first Block Entry that matches the Virtual Address and also the information on the Table Descriptor\r | |
375 | // such as the the size of the Block Entry and the address of the last BlockEntry of the Table Descriptor\r | |
376 | BlockEntrySize = RegionLength;\r | |
377 | BlockEntry = GetBlockEntryListFromAddress (RootTable, RegionStart, &TableLevel, &BlockEntrySize, &LastBlockEntry);\r | |
378 | if (BlockEntry == NULL) {\r | |
379 | // GetBlockEntryListFromAddress() return NULL when it fails to allocate new pages from the Translation Tables\r | |
380 | return RETURN_OUT_OF_RESOURCES;\r | |
381 | }\r | |
382 | \r | |
383 | if (TableLevel != 3) {\r | |
384 | Type = TT_TYPE_BLOCK_ENTRY;\r | |
385 | } else {\r | |
386 | Type = TT_TYPE_BLOCK_ENTRY_LEVEL3;\r | |
387 | }\r | |
388 | \r | |
389 | do {\r | |
390 | // Fill the Block Entry with attribute and output block address\r | |
391 | *BlockEntry &= BlockEntryMask;\r | |
392 | *BlockEntry |= (RegionStart & TT_ADDRESS_MASK_BLOCK_ENTRY) | Attributes | Type;\r | |
393 | \r | |
394 | // Go to the next BlockEntry\r | |
395 | RegionStart += BlockEntrySize;\r | |
396 | RegionLength -= BlockEntrySize;\r | |
397 | BlockEntry++;\r | |
398 | \r | |
399 | // Break the inner loop when next block is a table\r | |
400 | // Rerun GetBlockEntryListFromAddress to avoid page table memory leak\r | |
401 | if (TableLevel != 3 &&\r | |
402 | (*BlockEntry & TT_TYPE_MASK) == TT_TYPE_TABLE_ENTRY) {\r | |
403 | break;\r | |
404 | }\r | |
405 | } while ((RegionLength >= BlockEntrySize) && (BlockEntry <= LastBlockEntry));\r | |
406 | } while (RegionLength != 0);\r | |
407 | \r | |
408 | return RETURN_SUCCESS;\r | |
409 | }\r | |
410 | \r | |
411 | STATIC\r | |
412 | RETURN_STATUS\r | |
413 | FillTranslationTable (\r | |
414 | IN UINT64 *RootTable,\r | |
415 | IN ARM_MEMORY_REGION_DESCRIPTOR *MemoryRegion\r | |
416 | )\r | |
417 | {\r | |
418 | return UpdateRegionMapping (\r | |
419 | RootTable,\r | |
420 | MemoryRegion->VirtualBase,\r | |
421 | MemoryRegion->Length,\r | |
422 | ArmMemoryAttributeToPageAttribute (MemoryRegion->Attributes) | TT_AF,\r | |
423 | 0\r | |
424 | );\r | |
425 | }\r | |
426 | \r | |
427 | RETURN_STATUS\r | |
428 | SetMemoryAttributes (\r | |
429 | IN EFI_PHYSICAL_ADDRESS BaseAddress,\r | |
430 | IN UINT64 Length,\r | |
431 | IN UINT64 Attributes,\r | |
432 | IN EFI_PHYSICAL_ADDRESS VirtualMask\r | |
433 | )\r | |
434 | {\r | |
435 | RETURN_STATUS Status;\r | |
436 | ARM_MEMORY_REGION_DESCRIPTOR MemoryRegion;\r | |
437 | UINT64 *TranslationTable;\r | |
438 | \r | |
439 | MemoryRegion.PhysicalBase = BaseAddress;\r | |
440 | MemoryRegion.VirtualBase = BaseAddress;\r | |
441 | MemoryRegion.Length = Length;\r | |
442 | MemoryRegion.Attributes = GcdAttributeToArmAttribute (Attributes);\r | |
443 | \r | |
444 | TranslationTable = ArmGetTTBR0BaseAddress ();\r | |
445 | \r | |
446 | Status = FillTranslationTable (TranslationTable, &MemoryRegion);\r | |
447 | if (RETURN_ERROR (Status)) {\r | |
448 | return Status;\r | |
449 | }\r | |
450 | \r | |
451 | // Invalidate all TLB entries so changes are synced\r | |
452 | ArmInvalidateTlb ();\r | |
453 | \r | |
454 | return RETURN_SUCCESS;\r | |
455 | }\r | |
456 | \r | |
457 | STATIC\r | |
458 | RETURN_STATUS\r | |
459 | SetMemoryRegionAttribute (\r | |
460 | IN EFI_PHYSICAL_ADDRESS BaseAddress,\r | |
461 | IN UINT64 Length,\r | |
462 | IN UINT64 Attributes,\r | |
463 | IN UINT64 BlockEntryMask\r | |
464 | )\r | |
465 | {\r | |
466 | RETURN_STATUS Status;\r | |
467 | UINT64 *RootTable;\r | |
468 | \r | |
469 | RootTable = ArmGetTTBR0BaseAddress ();\r | |
470 | \r | |
471 | Status = UpdateRegionMapping (RootTable, BaseAddress, Length, Attributes, BlockEntryMask);\r | |
472 | if (RETURN_ERROR (Status)) {\r | |
473 | return Status;\r | |
474 | }\r | |
475 | \r | |
476 | // Invalidate all TLB entries so changes are synced\r | |
477 | ArmInvalidateTlb ();\r | |
478 | \r | |
479 | return RETURN_SUCCESS;\r | |
480 | }\r | |
481 | \r | |
482 | RETURN_STATUS\r | |
483 | ArmSetMemoryRegionNoExec (\r | |
484 | IN EFI_PHYSICAL_ADDRESS BaseAddress,\r | |
485 | IN UINT64 Length\r | |
486 | )\r | |
487 | {\r | |
488 | UINT64 Val;\r | |
489 | \r | |
490 | if (ArmReadCurrentEL () == AARCH64_EL1) {\r | |
491 | Val = TT_PXN_MASK | TT_UXN_MASK;\r | |
492 | } else {\r | |
493 | Val = TT_XN_MASK;\r | |
494 | }\r | |
495 | \r | |
496 | return SetMemoryRegionAttribute (\r | |
497 | BaseAddress,\r | |
498 | Length,\r | |
499 | Val,\r | |
500 | ~TT_ADDRESS_MASK_BLOCK_ENTRY);\r | |
501 | }\r | |
502 | \r | |
503 | RETURN_STATUS\r | |
504 | ArmClearMemoryRegionNoExec (\r | |
505 | IN EFI_PHYSICAL_ADDRESS BaseAddress,\r | |
506 | IN UINT64 Length\r | |
507 | )\r | |
508 | {\r | |
509 | UINT64 Mask;\r | |
510 | \r | |
511 | // XN maps to UXN in the EL1&0 translation regime\r | |
512 | Mask = ~(TT_ADDRESS_MASK_BLOCK_ENTRY | TT_PXN_MASK | TT_XN_MASK);\r | |
513 | \r | |
514 | return SetMemoryRegionAttribute (\r | |
515 | BaseAddress,\r | |
516 | Length,\r | |
517 | 0,\r | |
518 | Mask);\r | |
519 | }\r | |
520 | \r | |
521 | RETURN_STATUS\r | |
522 | ArmSetMemoryRegionReadOnly (\r | |
523 | IN EFI_PHYSICAL_ADDRESS BaseAddress,\r | |
524 | IN UINT64 Length\r | |
525 | )\r | |
526 | {\r | |
527 | return SetMemoryRegionAttribute (\r | |
528 | BaseAddress,\r | |
529 | Length,\r | |
530 | TT_AP_RO_RO,\r | |
531 | ~TT_ADDRESS_MASK_BLOCK_ENTRY);\r | |
532 | }\r | |
533 | \r | |
534 | RETURN_STATUS\r | |
535 | ArmClearMemoryRegionReadOnly (\r | |
536 | IN EFI_PHYSICAL_ADDRESS BaseAddress,\r | |
537 | IN UINT64 Length\r | |
538 | )\r | |
539 | {\r | |
540 | return SetMemoryRegionAttribute (\r | |
541 | BaseAddress,\r | |
542 | Length,\r | |
543 | TT_AP_RW_RW,\r | |
544 | ~(TT_ADDRESS_MASK_BLOCK_ENTRY | TT_AP_MASK));\r | |
545 | }\r | |
546 | \r | |
547 | RETURN_STATUS\r | |
548 | EFIAPI\r | |
549 | ArmConfigureMmu (\r | |
550 | IN ARM_MEMORY_REGION_DESCRIPTOR *MemoryTable,\r | |
551 | OUT VOID **TranslationTableBase OPTIONAL,\r | |
552 | OUT UINTN *TranslationTableSize OPTIONAL\r | |
553 | )\r | |
554 | {\r | |
555 | VOID* TranslationTable;\r | |
d32702d2 | 556 | VOID* TranslationTableBuffer;\r |
d7f03464 | 557 | UINT32 TranslationTableAttribute;\r |
d7f03464 | 558 | UINT64 MaxAddress;\r |
d7f03464 AB |
559 | UINTN T0SZ;\r |
560 | UINTN RootTableEntryCount;\r | |
d32702d2 | 561 | UINTN RootTableEntrySize;\r |
d7f03464 AB |
562 | UINT64 TCR;\r |
563 | RETURN_STATUS Status;\r | |
564 | \r | |
565 | if(MemoryTable == NULL) {\r | |
566 | ASSERT (MemoryTable != NULL);\r | |
567 | return RETURN_INVALID_PARAMETER;\r | |
568 | }\r | |
569 | \r | |
dd82465a AB |
570 | // Cover the entire GCD memory space\r |
571 | MaxAddress = (1UL << PcdGet8 (PcdPrePiCpuMemorySize)) - 1;\r | |
d7f03464 AB |
572 | \r |
573 | // Lookup the Table Level to get the information\r | |
574 | LookupAddresstoRootTable (MaxAddress, &T0SZ, &RootTableEntryCount);\r | |
575 | \r | |
576 | //\r | |
577 | // Set TCR that allows us to retrieve T0SZ in the subsequent functions\r | |
578 | //\r | |
579 | // Ideally we will be running at EL2, but should support EL1 as well.\r | |
580 | // UEFI should not run at EL3.\r | |
581 | if (ArmReadCurrentEL () == AARCH64_EL2) {\r | |
582 | //Note: Bits 23 and 31 are reserved(RES1) bits in TCR_EL2\r | |
583 | TCR = T0SZ | (1UL << 31) | (1UL << 23) | TCR_TG0_4KB;\r | |
584 | \r | |
585 | // Set the Physical Address Size using MaxAddress\r | |
586 | if (MaxAddress < SIZE_4GB) {\r | |
587 | TCR |= TCR_PS_4GB;\r | |
588 | } else if (MaxAddress < SIZE_64GB) {\r | |
589 | TCR |= TCR_PS_64GB;\r | |
590 | } else if (MaxAddress < SIZE_1TB) {\r | |
591 | TCR |= TCR_PS_1TB;\r | |
592 | } else if (MaxAddress < SIZE_4TB) {\r | |
593 | TCR |= TCR_PS_4TB;\r | |
594 | } else if (MaxAddress < SIZE_16TB) {\r | |
595 | TCR |= TCR_PS_16TB;\r | |
596 | } else if (MaxAddress < SIZE_256TB) {\r | |
597 | TCR |= TCR_PS_256TB;\r | |
598 | } else {\r | |
599 | DEBUG ((EFI_D_ERROR, "ArmConfigureMmu: The MaxAddress 0x%lX is not supported by this MMU configuration.\n", MaxAddress));\r | |
600 | ASSERT (0); // Bigger than 48-bit memory space are not supported\r | |
601 | return RETURN_UNSUPPORTED;\r | |
602 | }\r | |
603 | } else if (ArmReadCurrentEL () == AARCH64_EL1) {\r | |
604 | // Due to Cortex-A57 erratum #822227 we must set TG1[1] == 1, regardless of EPD1.\r | |
605 | TCR = T0SZ | TCR_TG0_4KB | TCR_TG1_4KB | TCR_EPD1;\r | |
606 | \r | |
607 | // Set the Physical Address Size using MaxAddress\r | |
608 | if (MaxAddress < SIZE_4GB) {\r | |
609 | TCR |= TCR_IPS_4GB;\r | |
610 | } else if (MaxAddress < SIZE_64GB) {\r | |
611 | TCR |= TCR_IPS_64GB;\r | |
612 | } else if (MaxAddress < SIZE_1TB) {\r | |
613 | TCR |= TCR_IPS_1TB;\r | |
614 | } else if (MaxAddress < SIZE_4TB) {\r | |
615 | TCR |= TCR_IPS_4TB;\r | |
616 | } else if (MaxAddress < SIZE_16TB) {\r | |
617 | TCR |= TCR_IPS_16TB;\r | |
618 | } else if (MaxAddress < SIZE_256TB) {\r | |
619 | TCR |= TCR_IPS_256TB;\r | |
620 | } else {\r | |
621 | DEBUG ((EFI_D_ERROR, "ArmConfigureMmu: The MaxAddress 0x%lX is not supported by this MMU configuration.\n", MaxAddress));\r | |
622 | ASSERT (0); // Bigger than 48-bit memory space are not supported\r | |
623 | return RETURN_UNSUPPORTED;\r | |
624 | }\r | |
625 | } else {\r | |
626 | ASSERT (0); // UEFI is only expected to run at EL2 and EL1, not EL3.\r | |
627 | return RETURN_UNSUPPORTED;\r | |
628 | }\r | |
629 | \r | |
35718840 AB |
630 | //\r |
631 | // Translation table walks are always cache coherent on ARMv8-A, so cache\r | |
632 | // maintenance on page tables is never needed. Since there is a risk of\r | |
633 | // loss of coherency when using mismatched attributes, and given that memory\r | |
634 | // is mapped cacheable except for extraordinary cases (such as non-coherent\r | |
635 | // DMA), have the page table walker perform cached accesses as well, and\r | |
636 | // assert below that that matches the attributes we use for CPU accesses to\r | |
637 | // the region.\r | |
638 | //\r | |
639 | TCR |= TCR_SH_INNER_SHAREABLE |\r | |
640 | TCR_RGN_OUTER_WRITE_BACK_ALLOC |\r | |
641 | TCR_RGN_INNER_WRITE_BACK_ALLOC;\r | |
642 | \r | |
d7f03464 AB |
643 | // Set TCR\r |
644 | ArmSetTCR (TCR);\r | |
645 | \r | |
d32702d2 AB |
646 | // Allocate pages for translation table. Pool allocations are 8 byte aligned,\r |
647 | // but we may require a higher alignment based on the size of the root table.\r | |
648 | RootTableEntrySize = RootTableEntryCount * sizeof(UINT64);\r | |
649 | if (RootTableEntrySize < EFI_PAGE_SIZE / 2) {\r | |
650 | TranslationTableBuffer = AllocatePool (2 * RootTableEntrySize - 8);\r | |
651 | //\r | |
652 | // Naturally align the root table. Preserves possible NULL value\r | |
653 | //\r | |
654 | TranslationTable = (VOID *)((UINTN)(TranslationTableBuffer - 1) | (RootTableEntrySize - 1)) + 1;\r | |
655 | } else {\r | |
656 | TranslationTable = AllocatePages (1);\r | |
657 | TranslationTableBuffer = NULL;\r | |
658 | }\r | |
d7f03464 AB |
659 | if (TranslationTable == NULL) {\r |
660 | return RETURN_OUT_OF_RESOURCES;\r | |
661 | }\r | |
662 | // We set TTBR0 just after allocating the table to retrieve its location from the subsequent\r | |
663 | // functions without needing to pass this value across the functions. The MMU is only enabled\r | |
664 | // after the translation tables are populated.\r | |
665 | ArmSetTTBR0 (TranslationTable);\r | |
666 | \r | |
667 | if (TranslationTableBase != NULL) {\r | |
668 | *TranslationTableBase = TranslationTable;\r | |
669 | }\r | |
670 | \r | |
671 | if (TranslationTableSize != NULL) {\r | |
d32702d2 | 672 | *TranslationTableSize = RootTableEntrySize;\r |
d7f03464 AB |
673 | }\r |
674 | \r | |
d32702d2 | 675 | ZeroMem (TranslationTable, RootTableEntrySize);\r |
d7f03464 AB |
676 | \r |
677 | // Disable MMU and caches. ArmDisableMmu() also invalidates the TLBs\r | |
678 | ArmDisableMmu ();\r | |
679 | ArmDisableDataCache ();\r | |
680 | ArmDisableInstructionCache ();\r | |
681 | \r | |
682 | // Make sure nothing sneaked into the cache\r | |
683 | ArmCleanInvalidateDataCache ();\r | |
684 | ArmInvalidateInstructionCache ();\r | |
685 | \r | |
686 | TranslationTableAttribute = TT_ATTR_INDX_INVALID;\r | |
687 | while (MemoryTable->Length != 0) {\r | |
35718840 AB |
688 | \r |
689 | DEBUG_CODE_BEGIN ();\r | |
690 | // Find the memory attribute for the Translation Table\r | |
691 | if ((UINTN)TranslationTable >= MemoryTable->PhysicalBase &&\r | |
692 | (UINTN)TranslationTable + RootTableEntrySize <= MemoryTable->PhysicalBase +\r | |
693 | MemoryTable->Length) {\r | |
694 | TranslationTableAttribute = MemoryTable->Attributes;\r | |
695 | }\r | |
696 | DEBUG_CODE_END ();\r | |
d7f03464 AB |
697 | \r |
698 | Status = FillTranslationTable (TranslationTable, MemoryTable);\r | |
699 | if (RETURN_ERROR (Status)) {\r | |
700 | goto FREE_TRANSLATION_TABLE;\r | |
701 | }\r | |
702 | MemoryTable++;\r | |
703 | }\r | |
704 | \r | |
35718840 AB |
705 | ASSERT (TranslationTableAttribute == ARM_MEMORY_REGION_ATTRIBUTE_WRITE_BACK ||\r |
706 | TranslationTableAttribute == ARM_MEMORY_REGION_ATTRIBUTE_NONSECURE_WRITE_BACK);\r | |
d7f03464 AB |
707 | \r |
708 | ArmSetMAIR (MAIR_ATTR(TT_ATTR_INDX_DEVICE_MEMORY, MAIR_ATTR_DEVICE_MEMORY) | // mapped to EFI_MEMORY_UC\r | |
709 | MAIR_ATTR(TT_ATTR_INDX_MEMORY_NON_CACHEABLE, MAIR_ATTR_NORMAL_MEMORY_NON_CACHEABLE) | // mapped to EFI_MEMORY_WC\r | |
710 | MAIR_ATTR(TT_ATTR_INDX_MEMORY_WRITE_THROUGH, MAIR_ATTR_NORMAL_MEMORY_WRITE_THROUGH) | // mapped to EFI_MEMORY_WT\r | |
711 | MAIR_ATTR(TT_ATTR_INDX_MEMORY_WRITE_BACK, MAIR_ATTR_NORMAL_MEMORY_WRITE_BACK)); // mapped to EFI_MEMORY_WB\r | |
712 | \r | |
713 | ArmDisableAlignmentCheck ();\r | |
714 | ArmEnableInstructionCache ();\r | |
715 | ArmEnableDataCache ();\r | |
716 | \r | |
717 | ArmEnableMmu ();\r | |
718 | return RETURN_SUCCESS;\r | |
719 | \r | |
720 | FREE_TRANSLATION_TABLE:\r | |
d32702d2 AB |
721 | if (TranslationTableBuffer != NULL) {\r |
722 | FreePool (TranslationTableBuffer);\r | |
723 | } else {\r | |
724 | FreePages (TranslationTable, 1);\r | |
725 | }\r | |
d7f03464 AB |
726 | return Status;\r |
727 | }\r | |
728 | \r | |
729 | RETURN_STATUS\r | |
730 | EFIAPI\r | |
731 | ArmMmuBaseLibConstructor (\r | |
732 | VOID\r | |
733 | )\r | |
734 | {\r | |
735 | extern UINT32 ArmReplaceLiveTranslationEntrySize;\r | |
736 | \r | |
737 | //\r | |
738 | // The ArmReplaceLiveTranslationEntry () helper function may be invoked\r | |
739 | // with the MMU off so we have to ensure that it gets cleaned to the PoC\r | |
740 | //\r | |
741 | WriteBackDataCacheRange (ArmReplaceLiveTranslationEntry,\r | |
742 | ArmReplaceLiveTranslationEntrySize);\r | |
743 | \r | |
744 | return RETURN_SUCCESS;\r | |
745 | }\r |