]> git.proxmox.com Git - mirror_edk2.git/blob - ArmPkg/Library/ArmMmuLib/Arm/ArmMmuLibCore.c
UefiCpuPkg: Move AsmRelocateApLoopStart from Mpfuncs.nasm to AmdSev.nasm
[mirror_edk2.git] / ArmPkg / Library / ArmMmuLib / Arm / ArmMmuLibCore.c
1 /** @file
2 * File managing the MMU for ARMv7 architecture
3 *
4 * Copyright (c) 2011-2016, ARM Limited. All rights reserved.
5 *
6 * SPDX-License-Identifier: BSD-2-Clause-Patent
7 *
8 **/
9
10 #include <Uefi.h>
11 #include <Chipset/ArmV7.h>
12 #include <Library/BaseMemoryLib.h>
13 #include <Library/CacheMaintenanceLib.h>
14 #include <Library/MemoryAllocationLib.h>
15 #include <Library/ArmLib.h>
16 #include <Library/BaseLib.h>
17 #include <Library/DebugLib.h>
18 #include <Library/PcdLib.h>
19
20 #define ID_MMFR0_SHARELVL_SHIFT 12
21 #define ID_MMFR0_SHARELVL_MASK 0xf
22 #define ID_MMFR0_SHARELVL_ONE 0
23 #define ID_MMFR0_SHARELVL_TWO 1
24
25 #define ID_MMFR0_INNERSHR_SHIFT 28
26 #define ID_MMFR0_INNERSHR_MASK 0xf
27 #define ID_MMFR0_OUTERSHR_SHIFT 8
28 #define ID_MMFR0_OUTERSHR_MASK 0xf
29
30 #define ID_MMFR0_SHR_IMP_UNCACHED 0
31 #define ID_MMFR0_SHR_IMP_HW_COHERENT 1
32 #define ID_MMFR0_SHR_IGNORED 0xf
33
34 UINTN
35 EFIAPI
36 ArmReadIdMmfr0 (
37 VOID
38 );
39
40 BOOLEAN
41 EFIAPI
42 ArmHasMpExtensions (
43 VOID
44 );
45
46 STATIC
47 BOOLEAN
48 PreferNonshareableMemory (
49 VOID
50 )
51 {
52 UINTN Mmfr;
53 UINTN Val;
54
55 if (FeaturePcdGet (PcdNormalMemoryNonshareableOverride)) {
56 return TRUE;
57 }
58
59 //
60 // Check whether the innermost level of shareability (the level we will use
61 // by default to map normal memory) is implemented with hardware coherency
62 // support. Otherwise, revert to mapping as non-shareable.
63 //
64 Mmfr = ArmReadIdMmfr0 ();
65 switch ((Mmfr >> ID_MMFR0_SHARELVL_SHIFT) & ID_MMFR0_SHARELVL_MASK) {
66 case ID_MMFR0_SHARELVL_ONE:
67 // one level of shareability
68 Val = (Mmfr >> ID_MMFR0_OUTERSHR_SHIFT) & ID_MMFR0_OUTERSHR_MASK;
69 break;
70 case ID_MMFR0_SHARELVL_TWO:
71 // two levels of shareability
72 Val = (Mmfr >> ID_MMFR0_INNERSHR_SHIFT) & ID_MMFR0_INNERSHR_MASK;
73 break;
74 default:
75 // unexpected value -> shareable is the safe option
76 ASSERT (FALSE);
77 return FALSE;
78 }
79
80 return Val != ID_MMFR0_SHR_IMP_HW_COHERENT;
81 }
82
83 STATIC
84 VOID
85 PopulateLevel2PageTable (
86 IN UINT32 *SectionEntry,
87 IN UINT32 PhysicalBase,
88 IN UINT32 RemainLength,
89 IN ARM_MEMORY_REGION_ATTRIBUTES Attributes
90 )
91 {
92 UINT32 *PageEntry;
93 UINT32 Pages;
94 UINT32 Index;
95 UINT32 PageAttributes;
96 UINT32 SectionDescriptor;
97 UINT32 TranslationTable;
98 UINT32 BaseSectionAddress;
99 UINT32 FirstPageOffset;
100
101 switch (Attributes) {
102 case ARM_MEMORY_REGION_ATTRIBUTE_WRITE_BACK:
103 case ARM_MEMORY_REGION_ATTRIBUTE_NONSECURE_WRITE_BACK:
104 PageAttributes = TT_DESCRIPTOR_PAGE_WRITE_BACK;
105 break;
106 case ARM_MEMORY_REGION_ATTRIBUTE_WRITE_BACK_NONSHAREABLE:
107 case ARM_MEMORY_REGION_ATTRIBUTE_NONSECURE_WRITE_BACK_NONSHAREABLE:
108 PageAttributes = TT_DESCRIPTOR_PAGE_WRITE_BACK;
109 PageAttributes &= ~TT_DESCRIPTOR_PAGE_S_SHARED;
110 break;
111 case ARM_MEMORY_REGION_ATTRIBUTE_WRITE_THROUGH:
112 case ARM_MEMORY_REGION_ATTRIBUTE_NONSECURE_WRITE_THROUGH:
113 PageAttributes = TT_DESCRIPTOR_PAGE_WRITE_THROUGH;
114 break;
115 case ARM_MEMORY_REGION_ATTRIBUTE_DEVICE:
116 case ARM_MEMORY_REGION_ATTRIBUTE_NONSECURE_DEVICE:
117 PageAttributes = TT_DESCRIPTOR_PAGE_DEVICE;
118 break;
119 case ARM_MEMORY_REGION_ATTRIBUTE_UNCACHED_UNBUFFERED:
120 case ARM_MEMORY_REGION_ATTRIBUTE_NONSECURE_UNCACHED_UNBUFFERED:
121 PageAttributes = TT_DESCRIPTOR_PAGE_UNCACHED;
122 break;
123 default:
124 PageAttributes = TT_DESCRIPTOR_PAGE_UNCACHED;
125 break;
126 }
127
128 if (PreferNonshareableMemory ()) {
129 PageAttributes &= ~TT_DESCRIPTOR_PAGE_S_SHARED;
130 }
131
132 // Check if the Section Entry has already been populated. Otherwise attach a
133 // Level 2 Translation Table to it
134 if (*SectionEntry != 0) {
135 // The entry must be a page table. Otherwise it exists an overlapping in the memory map
136 if (TT_DESCRIPTOR_SECTION_TYPE_IS_PAGE_TABLE (*SectionEntry)) {
137 TranslationTable = *SectionEntry & TT_DESCRIPTOR_SECTION_PAGETABLE_ADDRESS_MASK;
138 } else if ((*SectionEntry & TT_DESCRIPTOR_SECTION_TYPE_MASK) == TT_DESCRIPTOR_SECTION_TYPE_SECTION) {
139 // Case where a virtual memory map descriptor overlapped a section entry
140
141 // Allocate a Level2 Page Table for this Section
142 TranslationTable = (UINTN)AllocateAlignedPages (
143 EFI_SIZE_TO_PAGES (TRANSLATION_TABLE_PAGE_SIZE),
144 TRANSLATION_TABLE_PAGE_ALIGNMENT
145 );
146
147 // Translate the Section Descriptor into Page Descriptor
148 SectionDescriptor = TT_DESCRIPTOR_PAGE_TYPE_PAGE | ConvertSectionAttributesToPageAttributes (*SectionEntry, FALSE);
149
150 BaseSectionAddress = TT_DESCRIPTOR_SECTION_BASE_ADDRESS (*SectionEntry);
151
152 //
153 // Make sure we are not inadvertently hitting in the caches
154 // when populating the page tables
155 //
156 InvalidateDataCacheRange (
157 (VOID *)TranslationTable,
158 TRANSLATION_TABLE_PAGE_SIZE
159 );
160
161 // Populate the new Level2 Page Table for the section
162 PageEntry = (UINT32 *)TranslationTable;
163 for (Index = 0; Index < TRANSLATION_TABLE_PAGE_COUNT; Index++) {
164 PageEntry[Index] = TT_DESCRIPTOR_PAGE_BASE_ADDRESS (BaseSectionAddress + (Index << 12)) | SectionDescriptor;
165 }
166
167 // Overwrite the section entry to point to the new Level2 Translation Table
168 *SectionEntry = (TranslationTable & TT_DESCRIPTOR_SECTION_PAGETABLE_ADDRESS_MASK) |
169 (IS_ARM_MEMORY_REGION_ATTRIBUTES_SECURE (Attributes) ? (1 << 3) : 0) |
170 TT_DESCRIPTOR_SECTION_TYPE_PAGE_TABLE;
171 } else {
172 // We do not support the other section type (16MB Section)
173 ASSERT (0);
174 return;
175 }
176 } else {
177 TranslationTable = (UINTN)AllocateAlignedPages (
178 EFI_SIZE_TO_PAGES (TRANSLATION_TABLE_PAGE_SIZE),
179 TRANSLATION_TABLE_PAGE_ALIGNMENT
180 );
181 //
182 // Make sure we are not inadvertently hitting in the caches
183 // when populating the page tables
184 //
185 InvalidateDataCacheRange (
186 (VOID *)TranslationTable,
187 TRANSLATION_TABLE_PAGE_SIZE
188 );
189 ZeroMem ((VOID *)TranslationTable, TRANSLATION_TABLE_PAGE_SIZE);
190
191 *SectionEntry = (TranslationTable & TT_DESCRIPTOR_SECTION_PAGETABLE_ADDRESS_MASK) |
192 (IS_ARM_MEMORY_REGION_ATTRIBUTES_SECURE (Attributes) ? (1 << 3) : 0) |
193 TT_DESCRIPTOR_SECTION_TYPE_PAGE_TABLE;
194 }
195
196 FirstPageOffset = (PhysicalBase & TT_DESCRIPTOR_PAGE_INDEX_MASK) >> TT_DESCRIPTOR_PAGE_BASE_SHIFT;
197 PageEntry = (UINT32 *)TranslationTable + FirstPageOffset;
198 Pages = RemainLength / TT_DESCRIPTOR_PAGE_SIZE;
199
200 ASSERT (FirstPageOffset + Pages <= TRANSLATION_TABLE_PAGE_COUNT);
201
202 for (Index = 0; Index < Pages; Index++) {
203 *PageEntry++ = TT_DESCRIPTOR_PAGE_BASE_ADDRESS (PhysicalBase) | PageAttributes;
204 PhysicalBase += TT_DESCRIPTOR_PAGE_SIZE;
205 }
206
207 //
208 // Invalidate again to ensure that any line fetches that may have occurred
209 // [speculatively] since the previous invalidate are evicted again.
210 //
211 ArmDataMemoryBarrier ();
212 InvalidateDataCacheRange (
213 (UINT32 *)TranslationTable + FirstPageOffset,
214 RemainLength / TT_DESCRIPTOR_PAGE_SIZE * sizeof (*PageEntry)
215 );
216 }
217
218 STATIC
219 VOID
220 FillTranslationTable (
221 IN UINT32 *TranslationTable,
222 IN ARM_MEMORY_REGION_DESCRIPTOR *MemoryRegion
223 )
224 {
225 UINT32 *SectionEntry;
226 UINT32 Attributes;
227 UINT32 PhysicalBase;
228 UINT64 RemainLength;
229 UINT32 PageMapLength;
230
231 ASSERT (MemoryRegion->Length > 0);
232
233 if (MemoryRegion->PhysicalBase >= SIZE_4GB) {
234 return;
235 }
236
237 PhysicalBase = (UINT32)MemoryRegion->PhysicalBase;
238 RemainLength = MIN (MemoryRegion->Length, SIZE_4GB - PhysicalBase);
239
240 switch (MemoryRegion->Attributes) {
241 case ARM_MEMORY_REGION_ATTRIBUTE_WRITE_BACK:
242 Attributes = TT_DESCRIPTOR_SECTION_WRITE_BACK (0);
243 break;
244 case ARM_MEMORY_REGION_ATTRIBUTE_WRITE_BACK_NONSHAREABLE:
245 Attributes = TT_DESCRIPTOR_SECTION_WRITE_BACK (0);
246 Attributes &= ~TT_DESCRIPTOR_SECTION_S_SHARED;
247 break;
248 case ARM_MEMORY_REGION_ATTRIBUTE_WRITE_THROUGH:
249 Attributes = TT_DESCRIPTOR_SECTION_WRITE_THROUGH (0);
250 break;
251 case ARM_MEMORY_REGION_ATTRIBUTE_DEVICE:
252 Attributes = TT_DESCRIPTOR_SECTION_DEVICE (0);
253 break;
254 case ARM_MEMORY_REGION_ATTRIBUTE_UNCACHED_UNBUFFERED:
255 Attributes = TT_DESCRIPTOR_SECTION_UNCACHED (0);
256 break;
257 case ARM_MEMORY_REGION_ATTRIBUTE_NONSECURE_WRITE_BACK:
258 Attributes = TT_DESCRIPTOR_SECTION_WRITE_BACK (1);
259 break;
260 case ARM_MEMORY_REGION_ATTRIBUTE_NONSECURE_WRITE_BACK_NONSHAREABLE:
261 Attributes = TT_DESCRIPTOR_SECTION_WRITE_BACK (1);
262 Attributes &= ~TT_DESCRIPTOR_SECTION_S_SHARED;
263 break;
264 case ARM_MEMORY_REGION_ATTRIBUTE_NONSECURE_WRITE_THROUGH:
265 Attributes = TT_DESCRIPTOR_SECTION_WRITE_THROUGH (1);
266 break;
267 case ARM_MEMORY_REGION_ATTRIBUTE_NONSECURE_DEVICE:
268 Attributes = TT_DESCRIPTOR_SECTION_DEVICE (1);
269 break;
270 case ARM_MEMORY_REGION_ATTRIBUTE_NONSECURE_UNCACHED_UNBUFFERED:
271 Attributes = TT_DESCRIPTOR_SECTION_UNCACHED (1);
272 break;
273 default:
274 Attributes = TT_DESCRIPTOR_SECTION_UNCACHED (0);
275 break;
276 }
277
278 if (PreferNonshareableMemory ()) {
279 Attributes &= ~TT_DESCRIPTOR_SECTION_S_SHARED;
280 }
281
282 // Get the first section entry for this mapping
283 SectionEntry = TRANSLATION_TABLE_ENTRY_FOR_VIRTUAL_ADDRESS (TranslationTable, MemoryRegion->VirtualBase);
284
285 while (RemainLength != 0) {
286 if ((PhysicalBase % TT_DESCRIPTOR_SECTION_SIZE == 0) &&
287 (RemainLength >= TT_DESCRIPTOR_SECTION_SIZE))
288 {
289 // Case: Physical address aligned on the Section Size (1MB) && the length
290 // is greater than the Section Size
291 *SectionEntry = TT_DESCRIPTOR_SECTION_BASE_ADDRESS (PhysicalBase) | Attributes;
292
293 //
294 // Issue a DMB to ensure that the page table entry update made it to
295 // memory before we issue the invalidate, otherwise, a subsequent
296 // speculative fetch could observe the old value.
297 //
298 ArmDataMemoryBarrier ();
299 ArmInvalidateDataCacheEntryByMVA ((UINTN)SectionEntry++);
300
301 PhysicalBase += TT_DESCRIPTOR_SECTION_SIZE;
302 RemainLength -= TT_DESCRIPTOR_SECTION_SIZE;
303 } else {
304 PageMapLength = MIN (
305 (UINT32)RemainLength,
306 TT_DESCRIPTOR_SECTION_SIZE -
307 (PhysicalBase % TT_DESCRIPTOR_SECTION_SIZE)
308 );
309
310 // Case: Physical address aligned on the Section Size (1MB) && the length
311 // does not fill a section
312 // Case: Physical address NOT aligned on the Section Size (1MB)
313 PopulateLevel2PageTable (
314 SectionEntry,
315 PhysicalBase,
316 PageMapLength,
317 MemoryRegion->Attributes
318 );
319
320 //
321 // Issue a DMB to ensure that the page table entry update made it to
322 // memory before we issue the invalidate, otherwise, a subsequent
323 // speculative fetch could observe the old value.
324 //
325 ArmDataMemoryBarrier ();
326 ArmInvalidateDataCacheEntryByMVA ((UINTN)SectionEntry++);
327
328 // If it is the last entry
329 if (RemainLength < TT_DESCRIPTOR_SECTION_SIZE) {
330 break;
331 }
332
333 PhysicalBase += PageMapLength;
334 RemainLength -= PageMapLength;
335 }
336 }
337 }
338
339 RETURN_STATUS
340 EFIAPI
341 ArmConfigureMmu (
342 IN ARM_MEMORY_REGION_DESCRIPTOR *MemoryTable,
343 OUT VOID **TranslationTableBase OPTIONAL,
344 OUT UINTN *TranslationTableSize OPTIONAL
345 )
346 {
347 VOID *TranslationTable;
348 UINT32 TTBRAttributes;
349
350 TranslationTable = AllocateAlignedPages (
351 EFI_SIZE_TO_PAGES (TRANSLATION_TABLE_SECTION_SIZE),
352 TRANSLATION_TABLE_SECTION_ALIGNMENT
353 );
354 if (TranslationTable == NULL) {
355 return RETURN_OUT_OF_RESOURCES;
356 }
357
358 if (TranslationTableBase != NULL) {
359 *TranslationTableBase = TranslationTable;
360 }
361
362 if (TranslationTableSize != NULL) {
363 *TranslationTableSize = TRANSLATION_TABLE_SECTION_SIZE;
364 }
365
366 //
367 // Make sure we are not inadvertently hitting in the caches
368 // when populating the page tables
369 //
370 InvalidateDataCacheRange (TranslationTable, TRANSLATION_TABLE_SECTION_SIZE);
371 ZeroMem (TranslationTable, TRANSLATION_TABLE_SECTION_SIZE);
372
373 while (MemoryTable->Length != 0) {
374 FillTranslationTable (TranslationTable, MemoryTable);
375 MemoryTable++;
376 }
377
378 TTBRAttributes = ArmHasMpExtensions () ? TTBR_MP_WRITE_BACK_ALLOC
379 : TTBR_WRITE_BACK_ALLOC;
380 if (TTBRAttributes & TTBR_SHAREABLE) {
381 if (PreferNonshareableMemory ()) {
382 TTBRAttributes ^= TTBR_SHAREABLE;
383 } else {
384 //
385 // Unlike the S bit in the short descriptors, which implies inner shareable
386 // on an implementation that supports two levels, the meaning of the S bit
387 // in the TTBR depends on the NOS bit, which defaults to Outer Shareable.
388 // However, we should only set this bit after we have confirmed that the
389 // implementation supports multiple levels, or else the NOS bit is UNK/SBZP
390 //
391 if (((ArmReadIdMmfr0 () >> 12) & 0xf) != 0) {
392 TTBRAttributes |= TTBR_NOT_OUTER_SHAREABLE;
393 }
394 }
395 }
396
397 ArmSetTTBR0 ((VOID *)((UINTN)TranslationTable | TTBRAttributes));
398
399 //
400 // The TTBCR register value is undefined at reset in the Non-Secure world.
401 // Writing 0 has the effect of:
402 // Clearing EAE: Use short descriptors, as mandated by specification.
403 // Clearing PD0 and PD1: Translation Table Walk Disable is off.
404 // Clearing N: Perform all translation table walks through TTBR0.
405 // (0 is the default reset value in systems not implementing
406 // the Security Extensions.)
407 //
408 ArmSetTTBCR (0);
409
410 ArmSetDomainAccessControl (
411 DOMAIN_ACCESS_CONTROL_NONE (15) |
412 DOMAIN_ACCESS_CONTROL_NONE (14) |
413 DOMAIN_ACCESS_CONTROL_NONE (13) |
414 DOMAIN_ACCESS_CONTROL_NONE (12) |
415 DOMAIN_ACCESS_CONTROL_NONE (11) |
416 DOMAIN_ACCESS_CONTROL_NONE (10) |
417 DOMAIN_ACCESS_CONTROL_NONE (9) |
418 DOMAIN_ACCESS_CONTROL_NONE (8) |
419 DOMAIN_ACCESS_CONTROL_NONE (7) |
420 DOMAIN_ACCESS_CONTROL_NONE (6) |
421 DOMAIN_ACCESS_CONTROL_NONE (5) |
422 DOMAIN_ACCESS_CONTROL_NONE (4) |
423 DOMAIN_ACCESS_CONTROL_NONE (3) |
424 DOMAIN_ACCESS_CONTROL_NONE (2) |
425 DOMAIN_ACCESS_CONTROL_NONE (1) |
426 DOMAIN_ACCESS_CONTROL_CLIENT (0)
427 );
428
429 ArmEnableInstructionCache ();
430 ArmEnableDataCache ();
431 ArmEnableMmu ();
432 return RETURN_SUCCESS;
433 }