]> git.proxmox.com Git - mirror_edk2.git/blob - UefiCpuPkg/CpuMpPei/CpuPaging.c
UefiCpuPkg: Correct some typos.
[mirror_edk2.git] / UefiCpuPkg / CpuMpPei / CpuPaging.c
1 /** @file
2 Basic paging support for the CPU to enable Stack Guard.
3
4 Copyright (c) 2018 - 2019, Intel Corporation. All rights reserved.<BR>
5
6 SPDX-License-Identifier: BSD-2-Clause-Patent
7
8 **/
9
10 #include <Register/Intel/Cpuid.h>
11 #include <Register/Intel/Msr.h>
12 #include <Library/MemoryAllocationLib.h>
13 #include <Library/CpuLib.h>
14 #include <Library/BaseLib.h>
15 #include <Guid/MigratedFvInfo.h>
16
17 #include "CpuMpPei.h"
18
19 #define IA32_PG_P BIT0
20 #define IA32_PG_RW BIT1
21 #define IA32_PG_U BIT2
22 #define IA32_PG_A BIT5
23 #define IA32_PG_D BIT6
24 #define IA32_PG_PS BIT7
25 #define IA32_PG_NX BIT63
26
27 #define PAGE_ATTRIBUTE_BITS (IA32_PG_RW | IA32_PG_P)
28 #define PAGE_PROGATE_BITS (IA32_PG_D | IA32_PG_A | IA32_PG_NX | IA32_PG_U |\
29 PAGE_ATTRIBUTE_BITS)
30
31 #define PAGING_PAE_INDEX_MASK 0x1FF
32 #define PAGING_4K_ADDRESS_MASK_64 0x000FFFFFFFFFF000ull
33 #define PAGING_2M_ADDRESS_MASK_64 0x000FFFFFFFE00000ull
34 #define PAGING_1G_ADDRESS_MASK_64 0x000FFFFFC0000000ull
35 #define PAGING_512G_ADDRESS_MASK_64 0x000FFF8000000000ull
36
37 typedef enum {
38 PageNone = 0,
39 PageMin = 1,
40 Page4K = PageMin,
41 Page2M = 2,
42 Page1G = 3,
43 Page512G = 4,
44 PageMax = Page512G
45 } PAGE_ATTRIBUTE;
46
47 typedef struct {
48 PAGE_ATTRIBUTE Attribute;
49 UINT64 Length;
50 UINT64 AddressMask;
51 UINTN AddressBitOffset;
52 UINTN AddressBitLength;
53 } PAGE_ATTRIBUTE_TABLE;
54
55 PAGE_ATTRIBUTE_TABLE mPageAttributeTable[] = {
56 {PageNone, 0, 0, 0, 0},
57 {Page4K, SIZE_4KB, PAGING_4K_ADDRESS_MASK_64, 12, 9},
58 {Page2M, SIZE_2MB, PAGING_2M_ADDRESS_MASK_64, 21, 9},
59 {Page1G, SIZE_1GB, PAGING_1G_ADDRESS_MASK_64, 30, 9},
60 {Page512G, SIZE_512GB, PAGING_512G_ADDRESS_MASK_64, 39, 9},
61 };
62
63 EFI_PEI_NOTIFY_DESCRIPTOR mPostMemNotifyList[] = {
64 {
65 (EFI_PEI_PPI_DESCRIPTOR_NOTIFY_CALLBACK | EFI_PEI_PPI_DESCRIPTOR_TERMINATE_LIST),
66 &gEfiPeiMemoryDiscoveredPpiGuid,
67 MemoryDiscoveredPpiNotifyCallback
68 }
69 };
70
71 /**
72 The function will check if IA32 PAE is supported.
73
74 @retval TRUE IA32 PAE is supported.
75 @retval FALSE IA32 PAE is not supported.
76
77 **/
78 BOOLEAN
79 IsIa32PaeSupported (
80 VOID
81 )
82 {
83 UINT32 RegEax;
84 CPUID_VERSION_INFO_EDX RegEdx;
85
86 AsmCpuid (CPUID_SIGNATURE, &RegEax, NULL, NULL, NULL);
87 if (RegEax >= CPUID_VERSION_INFO) {
88 AsmCpuid (CPUID_VERSION_INFO, NULL, NULL, NULL, &RegEdx.Uint32);
89 if (RegEdx.Bits.PAE != 0) {
90 return TRUE;
91 }
92 }
93
94 return FALSE;
95 }
96
97 /**
98 This API provides a way to allocate memory for page table.
99
100 @param Pages The number of 4 KB pages to allocate.
101
102 @return A pointer to the allocated buffer or NULL if allocation fails.
103
104 **/
105 VOID *
106 AllocatePageTableMemory (
107 IN UINTN Pages
108 )
109 {
110 VOID *Address;
111
112 Address = AllocatePages(Pages);
113 if (Address != NULL) {
114 ZeroMem(Address, EFI_PAGES_TO_SIZE (Pages));
115 }
116
117 return Address;
118 }
119
120 /**
121 Get the address width supported by current processor.
122
123 @retval 32 If processor is in 32-bit mode.
124 @retval 36-48 If processor is in 64-bit mode.
125
126 **/
127 UINTN
128 GetPhysicalAddressWidth (
129 VOID
130 )
131 {
132 UINT32 RegEax;
133
134 if (sizeof(UINTN) == 4) {
135 return 32;
136 }
137
138 AsmCpuid(CPUID_EXTENDED_FUNCTION, &RegEax, NULL, NULL, NULL);
139 if (RegEax >= CPUID_VIR_PHY_ADDRESS_SIZE) {
140 AsmCpuid (CPUID_VIR_PHY_ADDRESS_SIZE, &RegEax, NULL, NULL, NULL);
141 RegEax &= 0xFF;
142 if (RegEax > 48) {
143 return 48;
144 }
145
146 return (UINTN)RegEax;
147 }
148
149 return 36;
150 }
151
152 /**
153 Get the type of top level page table.
154
155 @retval Page512G PML4 paging.
156 @retval Page1G PAE paging.
157
158 **/
159 PAGE_ATTRIBUTE
160 GetPageTableTopLevelType (
161 VOID
162 )
163 {
164 MSR_IA32_EFER_REGISTER MsrEfer;
165
166 MsrEfer.Uint64 = AsmReadMsr64 (MSR_CORE_IA32_EFER);
167
168 return (MsrEfer.Bits.LMA == 1) ? Page512G : Page1G;
169 }
170
171 /**
172 Return page table entry matching the address.
173
174 @param[in] Address The address to be checked.
175 @param[out] PageAttributes The page attribute of the page entry.
176
177 @return The page entry.
178 **/
179 VOID *
180 GetPageTableEntry (
181 IN PHYSICAL_ADDRESS Address,
182 OUT PAGE_ATTRIBUTE *PageAttribute
183 )
184 {
185 INTN Level;
186 UINTN Index;
187 UINT64 *PageTable;
188 UINT64 AddressEncMask;
189
190 AddressEncMask = PcdGet64 (PcdPteMemoryEncryptionAddressOrMask);
191 PageTable = (UINT64 *)(UINTN)(AsmReadCr3 () & PAGING_4K_ADDRESS_MASK_64);
192 for (Level = (INTN)GetPageTableTopLevelType (); Level > 0; --Level) {
193 Index = (UINTN)RShiftU64 (Address, mPageAttributeTable[Level].AddressBitOffset);
194 Index &= PAGING_PAE_INDEX_MASK;
195
196 //
197 // No mapping?
198 //
199 if (PageTable[Index] == 0) {
200 *PageAttribute = PageNone;
201 return NULL;
202 }
203
204 //
205 // Page memory?
206 //
207 if ((PageTable[Index] & IA32_PG_PS) != 0 || Level == PageMin) {
208 *PageAttribute = (PAGE_ATTRIBUTE)Level;
209 return &PageTable[Index];
210 }
211
212 //
213 // Page directory or table
214 //
215 PageTable = (UINT64 *)(UINTN)(PageTable[Index] &
216 ~AddressEncMask &
217 PAGING_4K_ADDRESS_MASK_64);
218 }
219
220 *PageAttribute = PageNone;
221 return NULL;
222 }
223
224 /**
225 This function splits one page entry to smaller page entries.
226
227 @param[in] PageEntry The page entry to be splitted.
228 @param[in] PageAttribute The page attribute of the page entry.
229 @param[in] SplitAttribute How to split the page entry.
230 @param[in] Recursively Do the split recursively or not.
231
232 @retval RETURN_SUCCESS The page entry is splitted.
233 @retval RETURN_INVALID_PARAMETER If target page attribute is invalid
234 @retval RETURN_OUT_OF_RESOURCES No resource to split page entry.
235 **/
236 RETURN_STATUS
237 SplitPage (
238 IN UINT64 *PageEntry,
239 IN PAGE_ATTRIBUTE PageAttribute,
240 IN PAGE_ATTRIBUTE SplitAttribute,
241 IN BOOLEAN Recursively
242 )
243 {
244 UINT64 BaseAddress;
245 UINT64 *NewPageEntry;
246 UINTN Index;
247 UINT64 AddressEncMask;
248 PAGE_ATTRIBUTE SplitTo;
249
250 if (SplitAttribute == PageNone || SplitAttribute >= PageAttribute) {
251 ASSERT (SplitAttribute != PageNone);
252 ASSERT (SplitAttribute < PageAttribute);
253 return RETURN_INVALID_PARAMETER;
254 }
255
256 NewPageEntry = AllocatePageTableMemory (1);
257 if (NewPageEntry == NULL) {
258 ASSERT (NewPageEntry != NULL);
259 return RETURN_OUT_OF_RESOURCES;
260 }
261
262 //
263 // One level down each step to achieve more compact page table.
264 //
265 SplitTo = PageAttribute - 1;
266 AddressEncMask = PcdGet64 (PcdPteMemoryEncryptionAddressOrMask) &
267 mPageAttributeTable[SplitTo].AddressMask;
268 BaseAddress = *PageEntry &
269 ~PcdGet64 (PcdPteMemoryEncryptionAddressOrMask) &
270 mPageAttributeTable[PageAttribute].AddressMask;
271 for (Index = 0; Index < SIZE_4KB / sizeof(UINT64); Index++) {
272 NewPageEntry[Index] = BaseAddress | AddressEncMask |
273 ((*PageEntry) & PAGE_PROGATE_BITS);
274
275 if (SplitTo != PageMin) {
276 NewPageEntry[Index] |= IA32_PG_PS;
277 }
278
279 if (Recursively && SplitTo > SplitAttribute) {
280 SplitPage (&NewPageEntry[Index], SplitTo, SplitAttribute, Recursively);
281 }
282
283 BaseAddress += mPageAttributeTable[SplitTo].Length;
284 }
285
286 (*PageEntry) = (UINT64)(UINTN)NewPageEntry | AddressEncMask | PAGE_ATTRIBUTE_BITS;
287
288 return RETURN_SUCCESS;
289 }
290
291 /**
292 This function modifies the page attributes for the memory region specified
293 by BaseAddress and Length from their current attributes to the attributes
294 specified by Attributes.
295
296 Caller should make sure BaseAddress and Length is at page boundary.
297
298 @param[in] BaseAddress Start address of a memory region.
299 @param[in] Length Size in bytes of the memory region.
300 @param[in] Attributes Bit mask of attributes to modify.
301
302 @retval RETURN_SUCCESS The attributes were modified for the memory
303 region.
304 @retval RETURN_INVALID_PARAMETER Length is zero; or,
305 Attributes specified an illegal combination
306 of attributes that cannot be set together; or
307 Addressis not 4KB aligned.
308 @retval RETURN_OUT_OF_RESOURCES There are not enough system resources to modify
309 the attributes.
310 @retval RETURN_UNSUPPORTED Cannot modify the attributes of given memory.
311
312 **/
313 RETURN_STATUS
314 EFIAPI
315 ConvertMemoryPageAttributes (
316 IN PHYSICAL_ADDRESS BaseAddress,
317 IN UINT64 Length,
318 IN UINT64 Attributes
319 )
320 {
321 UINT64 *PageEntry;
322 PAGE_ATTRIBUTE PageAttribute;
323 RETURN_STATUS Status;
324 EFI_PHYSICAL_ADDRESS MaximumAddress;
325
326 if (Length == 0 ||
327 (BaseAddress & (SIZE_4KB - 1)) != 0 ||
328 (Length & (SIZE_4KB - 1)) != 0) {
329
330 ASSERT (Length > 0);
331 ASSERT ((BaseAddress & (SIZE_4KB - 1)) == 0);
332 ASSERT ((Length & (SIZE_4KB - 1)) == 0);
333
334 return RETURN_INVALID_PARAMETER;
335 }
336
337 MaximumAddress = (EFI_PHYSICAL_ADDRESS)MAX_UINT32;
338 if (BaseAddress > MaximumAddress ||
339 Length > MaximumAddress ||
340 (BaseAddress > MaximumAddress - (Length - 1))) {
341 return RETURN_UNSUPPORTED;
342 }
343
344 //
345 // Below logic is to check 2M/4K page to make sure we do not waste memory.
346 //
347 while (Length != 0) {
348 PageEntry = GetPageTableEntry (BaseAddress, &PageAttribute);
349 if (PageEntry == NULL) {
350 return RETURN_UNSUPPORTED;
351 }
352
353 if (PageAttribute != Page4K) {
354 Status = SplitPage (PageEntry, PageAttribute, Page4K, FALSE);
355 if (RETURN_ERROR (Status)) {
356 return Status;
357 }
358 //
359 // Do it again until the page is 4K.
360 //
361 continue;
362 }
363
364 //
365 // Just take care of 'present' bit for Stack Guard.
366 //
367 if ((Attributes & IA32_PG_P) != 0) {
368 *PageEntry |= (UINT64)IA32_PG_P;
369 } else {
370 *PageEntry &= ~((UINT64)IA32_PG_P);
371 }
372
373 //
374 // Convert success, move to next
375 //
376 BaseAddress += SIZE_4KB;
377 Length -= SIZE_4KB;
378 }
379
380 return RETURN_SUCCESS;
381 }
382
383 /**
384 Get maximum size of page memory supported by current processor.
385
386 @param[in] TopLevelType The type of top level page entry.
387
388 @retval Page1G If processor supports 1G page and PML4.
389 @retval Page2M For all other situations.
390
391 **/
392 PAGE_ATTRIBUTE
393 GetMaxMemoryPage (
394 IN PAGE_ATTRIBUTE TopLevelType
395 )
396 {
397 UINT32 RegEax;
398 UINT32 RegEdx;
399
400 if (TopLevelType == Page512G) {
401 AsmCpuid (CPUID_EXTENDED_FUNCTION, &RegEax, NULL, NULL, NULL);
402 if (RegEax >= CPUID_EXTENDED_CPU_SIG) {
403 AsmCpuid (CPUID_EXTENDED_CPU_SIG, NULL, NULL, NULL, &RegEdx);
404 if ((RegEdx & BIT26) != 0) {
405 return Page1G;
406 }
407 }
408 }
409
410 return Page2M;
411 }
412
413 /**
414 Create PML4 or PAE page table.
415
416 @return The address of page table.
417
418 **/
419 UINTN
420 CreatePageTable (
421 VOID
422 )
423 {
424 RETURN_STATUS Status;
425 UINTN PhysicalAddressBits;
426 UINTN NumberOfEntries;
427 PAGE_ATTRIBUTE TopLevelPageAttr;
428 UINTN PageTable;
429 PAGE_ATTRIBUTE MaxMemoryPage;
430 UINTN Index;
431 UINT64 AddressEncMask;
432 UINT64 *PageEntry;
433 EFI_PHYSICAL_ADDRESS PhysicalAddress;
434
435 TopLevelPageAttr = (PAGE_ATTRIBUTE)GetPageTableTopLevelType ();
436 PhysicalAddressBits = GetPhysicalAddressWidth ();
437 NumberOfEntries = (UINTN)1 << (PhysicalAddressBits -
438 mPageAttributeTable[TopLevelPageAttr].AddressBitOffset);
439
440 PageTable = (UINTN) AllocatePageTableMemory (1);
441 if (PageTable == 0) {
442 return 0;
443 }
444
445 AddressEncMask = PcdGet64 (PcdPteMemoryEncryptionAddressOrMask);
446 AddressEncMask &= mPageAttributeTable[TopLevelPageAttr].AddressMask;
447 MaxMemoryPage = GetMaxMemoryPage (TopLevelPageAttr);
448 PageEntry = (UINT64 *)PageTable;
449
450 PhysicalAddress = 0;
451 for (Index = 0; Index < NumberOfEntries; ++Index) {
452 *PageEntry = PhysicalAddress | AddressEncMask | PAGE_ATTRIBUTE_BITS;
453
454 //
455 // Split the top page table down to the maximum page size supported
456 //
457 if (MaxMemoryPage < TopLevelPageAttr) {
458 Status = SplitPage(PageEntry, TopLevelPageAttr, MaxMemoryPage, TRUE);
459 ASSERT_EFI_ERROR (Status);
460 }
461
462 if (TopLevelPageAttr == Page1G) {
463 //
464 // PDPTE[2:1] (PAE Paging) must be 0. SplitPage() might change them to 1.
465 //
466 *PageEntry &= ~(UINT64)(IA32_PG_RW | IA32_PG_U);
467 }
468
469 PageEntry += 1;
470 PhysicalAddress += mPageAttributeTable[TopLevelPageAttr].Length;
471 }
472
473
474 return PageTable;
475 }
476
477 /**
478 Setup page tables and make them work.
479
480 **/
481 VOID
482 EnablePaging (
483 VOID
484 )
485 {
486 UINTN PageTable;
487
488 PageTable = CreatePageTable ();
489 ASSERT (PageTable != 0);
490 if (PageTable != 0) {
491 AsmWriteCr3(PageTable);
492 AsmWriteCr4 (AsmReadCr4 () | BIT5); // CR4.PAE
493 AsmWriteCr0 (AsmReadCr0 () | BIT31); // CR0.PG
494 }
495 }
496
497 /**
498 Get the base address of current AP's stack.
499
500 This function is called in AP's context and assumes that whole calling stacks
501 (till this function) consumed by AP's wakeup procedure will not exceed 4KB.
502
503 PcdCpuApStackSize must be configured with value taking the Guard page into
504 account.
505
506 @param[in,out] Buffer The pointer to private data buffer.
507
508 **/
509 VOID
510 EFIAPI
511 GetStackBase (
512 IN OUT VOID *Buffer
513 )
514 {
515 EFI_PHYSICAL_ADDRESS StackBase;
516
517 StackBase = (EFI_PHYSICAL_ADDRESS)(UINTN)&StackBase;
518 StackBase += BASE_4KB;
519 StackBase &= ~((EFI_PHYSICAL_ADDRESS)BASE_4KB - 1);
520 StackBase -= PcdGet32(PcdCpuApStackSize);
521
522 *(EFI_PHYSICAL_ADDRESS *)Buffer = StackBase;
523 }
524
525 /**
526 Setup stack Guard page at the stack base of each processor. BSP and APs have
527 different way to get stack base address.
528
529 **/
530 VOID
531 SetupStackGuardPage (
532 VOID
533 )
534 {
535 EFI_PEI_HOB_POINTERS Hob;
536 EFI_PHYSICAL_ADDRESS StackBase;
537 UINTN NumberOfProcessors;
538 UINTN Bsp;
539 UINTN Index;
540
541 //
542 // One extra page at the bottom of the stack is needed for Guard page.
543 //
544 if (PcdGet32(PcdCpuApStackSize) <= EFI_PAGE_SIZE) {
545 DEBUG ((DEBUG_ERROR, "PcdCpuApStackSize is not big enough for Stack Guard!\n"));
546 ASSERT (FALSE);
547 }
548
549 MpInitLibGetNumberOfProcessors(&NumberOfProcessors, NULL);
550 MpInitLibWhoAmI (&Bsp);
551 for (Index = 0; Index < NumberOfProcessors; ++Index) {
552 StackBase = 0;
553
554 if (Index == Bsp) {
555 Hob.Raw = GetHobList ();
556 while ((Hob.Raw = GetNextHob (EFI_HOB_TYPE_MEMORY_ALLOCATION, Hob.Raw)) != NULL) {
557 if (CompareGuid (&gEfiHobMemoryAllocStackGuid,
558 &(Hob.MemoryAllocationStack->AllocDescriptor.Name))) {
559 StackBase = Hob.MemoryAllocationStack->AllocDescriptor.MemoryBaseAddress;
560 break;
561 }
562 Hob.Raw = GET_NEXT_HOB (Hob);
563 }
564 } else {
565 //
566 // Ask AP to return is stack base address.
567 //
568 MpInitLibStartupThisAP(GetStackBase, Index, NULL, 0, (VOID *)&StackBase, NULL);
569 }
570 ASSERT (StackBase != 0);
571 //
572 // Set Guard page at stack base address.
573 //
574 ConvertMemoryPageAttributes(StackBase, EFI_PAGE_SIZE, 0);
575 DEBUG ((DEBUG_INFO, "Stack Guard set at %lx [cpu%lu]!\n",
576 (UINT64)StackBase, (UINT64)Index));
577 }
578
579 //
580 // Publish the changes of page table.
581 //
582 CpuFlushTlb ();
583 }
584
585 /**
586 Enable/setup stack guard for each processor if PcdCpuStackGuard is set to TRUE.
587
588 Doing this in the memory-discovered callback is to make sure the Stack Guard
589 feature to cover as most PEI code as possible.
590
591 @param[in] PeiServices General purpose services available to every PEIM.
592 @param[in] NotifyDescriptor The notification structure this PEIM registered on install.
593 @param[in] Ppi The memory discovered PPI. Not used.
594
595 @retval EFI_SUCCESS The function completed successfully.
596 @retval others There's error in MP initialization.
597 **/
598 EFI_STATUS
599 EFIAPI
600 MemoryDiscoveredPpiNotifyCallback (
601 IN EFI_PEI_SERVICES **PeiServices,
602 IN EFI_PEI_NOTIFY_DESCRIPTOR *NotifyDescriptor,
603 IN VOID *Ppi
604 )
605 {
606 EFI_STATUS Status;
607 BOOLEAN InitStackGuard;
608 BOOLEAN InterruptState;
609 EDKII_MIGRATED_FV_INFO *MigratedFvInfo;
610 EFI_PEI_HOB_POINTERS Hob;
611
612 if (PcdGetBool (PcdMigrateTemporaryRamFirmwareVolumes)) {
613 InterruptState = SaveAndDisableInterrupts ();
614 Status = MigrateGdt ();
615 ASSERT_EFI_ERROR (Status);
616 SetInterruptState (InterruptState);
617 }
618
619 //
620 // Paging must be setup first. Otherwise the exception TSS setup during MP
621 // initialization later will not contain paging information and then fail
622 // the task switch (for the sake of stack switch).
623 //
624 InitStackGuard = FALSE;
625 Hob.Raw = NULL;
626 if (IsIa32PaeSupported ()) {
627 Hob.Raw = GetFirstGuidHob (&gEdkiiMigratedFvInfoGuid);
628 InitStackGuard = PcdGetBool (PcdCpuStackGuard);
629 }
630
631 if (InitStackGuard || Hob.Raw != NULL) {
632 EnablePaging ();
633 }
634
635 Status = InitializeCpuMpWorker ((CONST EFI_PEI_SERVICES **)PeiServices);
636 ASSERT_EFI_ERROR (Status);
637
638 if (InitStackGuard) {
639 SetupStackGuardPage ();
640 }
641
642 while (Hob.Raw != NULL) {
643 MigratedFvInfo = GET_GUID_HOB_DATA (Hob);
644
645 //
646 // Enable #PF exception, so if the code access SPI after disable NEM, it will generate
647 // the exception to avoid potential vulnerability.
648 //
649 ConvertMemoryPageAttributes (MigratedFvInfo->FvOrgBase, MigratedFvInfo->FvLength, 0);
650
651 Hob.Raw = GET_NEXT_HOB (Hob);
652 Hob.Raw = GetNextGuidHob (&gEdkiiMigratedFvInfoGuid, Hob.Raw);
653 }
654 CpuFlushTlb ();
655
656 return Status;
657 }
658