2 Basic paging support for the CPU to enable Stack Guard.
4 Copyright (c) 2018 - 2019, Intel Corporation. All rights reserved.<BR>
6 SPDX-License-Identifier: BSD-2-Clause-Patent
10 #include <Register/Intel/Cpuid.h>
11 #include <Register/Intel/Msr.h>
12 #include <Library/MemoryAllocationLib.h>
13 #include <Library/CpuLib.h>
14 #include <Library/BaseLib.h>
15 #include <Guid/MigratedFvInfo.h>
19 #define IA32_PG_P BIT0
20 #define IA32_PG_RW BIT1
21 #define IA32_PG_U BIT2
22 #define IA32_PG_A BIT5
23 #define IA32_PG_D BIT6
24 #define IA32_PG_PS BIT7
25 #define IA32_PG_NX BIT63
27 #define PAGE_ATTRIBUTE_BITS (IA32_PG_RW | IA32_PG_P)
28 #define PAGE_PROGATE_BITS (IA32_PG_D | IA32_PG_A | IA32_PG_NX | IA32_PG_U | \
31 #define PAGING_PAE_INDEX_MASK 0x1FF
32 #define PAGING_4K_ADDRESS_MASK_64 0x000FFFFFFFFFF000ull
33 #define PAGING_2M_ADDRESS_MASK_64 0x000FFFFFFFE00000ull
34 #define PAGING_1G_ADDRESS_MASK_64 0x000FFFFFC0000000ull
35 #define PAGING_512G_ADDRESS_MASK_64 0x000FFF8000000000ull
48 PAGE_ATTRIBUTE Attribute
;
51 UINTN AddressBitOffset
;
52 UINTN AddressBitLength
;
53 } PAGE_ATTRIBUTE_TABLE
;
55 PAGE_ATTRIBUTE_TABLE mPageAttributeTable
[] = {
56 { PageNone
, 0, 0, 0, 0 },
57 { Page4K
, SIZE_4KB
, PAGING_4K_ADDRESS_MASK_64
, 12, 9 },
58 { Page2M
, SIZE_2MB
, PAGING_2M_ADDRESS_MASK_64
, 21, 9 },
59 { Page1G
, SIZE_1GB
, PAGING_1G_ADDRESS_MASK_64
, 30, 9 },
60 { Page512G
, SIZE_512GB
, PAGING_512G_ADDRESS_MASK_64
, 39, 9 },
63 EFI_PEI_NOTIFY_DESCRIPTOR mPostMemNotifyList
[] = {
65 (EFI_PEI_PPI_DESCRIPTOR_NOTIFY_CALLBACK
| EFI_PEI_PPI_DESCRIPTOR_TERMINATE_LIST
),
66 &gEfiPeiMemoryDiscoveredPpiGuid
,
67 MemoryDiscoveredPpiNotifyCallback
72 The function will check if IA32 PAE is supported.
74 @retval TRUE IA32 PAE is supported.
75 @retval FALSE IA32 PAE is not supported.
84 CPUID_VERSION_INFO_EDX RegEdx
;
86 AsmCpuid (CPUID_SIGNATURE
, &RegEax
, NULL
, NULL
, NULL
);
87 if (RegEax
>= CPUID_VERSION_INFO
) {
88 AsmCpuid (CPUID_VERSION_INFO
, NULL
, NULL
, NULL
, &RegEdx
.Uint32
);
89 if (RegEdx
.Bits
.PAE
!= 0) {
98 This API provides a way to allocate memory for page table.
100 @param Pages The number of 4 KB pages to allocate.
102 @return A pointer to the allocated buffer or NULL if allocation fails.
106 AllocatePageTableMemory (
112 Address
= AllocatePages (Pages
);
113 if (Address
!= NULL
) {
114 ZeroMem (Address
, EFI_PAGES_TO_SIZE (Pages
));
121 Get the address width supported by current processor.
123 @retval 32 If processor is in 32-bit mode.
124 @retval 36-48 If processor is in 64-bit mode.
128 GetPhysicalAddressWidth (
134 if (sizeof (UINTN
) == 4) {
138 AsmCpuid (CPUID_EXTENDED_FUNCTION
, &RegEax
, NULL
, NULL
, NULL
);
139 if (RegEax
>= CPUID_VIR_PHY_ADDRESS_SIZE
) {
140 AsmCpuid (CPUID_VIR_PHY_ADDRESS_SIZE
, &RegEax
, NULL
, NULL
, NULL
);
146 return (UINTN
)RegEax
;
153 Get the type of top level page table.
155 @retval Page512G PML4 paging.
156 @retval Page1G PAE paging.
160 GetPageTableTopLevelType (
164 MSR_IA32_EFER_REGISTER MsrEfer
;
166 MsrEfer
.Uint64
= AsmReadMsr64 (MSR_CORE_IA32_EFER
);
168 return (MsrEfer
.Bits
.LMA
== 1) ? Page512G
: Page1G
;
172 Return page table entry matching the address.
174 @param[in] Address The address to be checked.
175 @param[out] PageAttributes The page attribute of the page entry.
177 @return The page entry.
181 IN PHYSICAL_ADDRESS Address
,
182 OUT PAGE_ATTRIBUTE
*PageAttribute
188 UINT64 AddressEncMask
;
190 AddressEncMask
= PcdGet64 (PcdPteMemoryEncryptionAddressOrMask
);
191 PageTable
= (UINT64
*)(UINTN
)(AsmReadCr3 () & PAGING_4K_ADDRESS_MASK_64
);
192 for (Level
= (INTN
)GetPageTableTopLevelType (); Level
> 0; --Level
) {
193 Index
= (UINTN
)RShiftU64 (Address
, mPageAttributeTable
[Level
].AddressBitOffset
);
194 Index
&= PAGING_PAE_INDEX_MASK
;
199 if (PageTable
[Index
] == 0) {
200 *PageAttribute
= PageNone
;
207 if (((PageTable
[Index
] & IA32_PG_PS
) != 0) || (Level
== PageMin
)) {
208 *PageAttribute
= (PAGE_ATTRIBUTE
)Level
;
209 return &PageTable
[Index
];
213 // Page directory or table
215 PageTable
= (UINT64
*)(UINTN
)(PageTable
[Index
] &
217 PAGING_4K_ADDRESS_MASK_64
);
220 *PageAttribute
= PageNone
;
225 This function splits one page entry to smaller page entries.
227 @param[in] PageEntry The page entry to be splitted.
228 @param[in] PageAttribute The page attribute of the page entry.
229 @param[in] SplitAttribute How to split the page entry.
230 @param[in] Recursively Do the split recursively or not.
232 @retval RETURN_SUCCESS The page entry is splitted.
233 @retval RETURN_INVALID_PARAMETER If target page attribute is invalid
234 @retval RETURN_OUT_OF_RESOURCES No resource to split page entry.
238 IN UINT64
*PageEntry
,
239 IN PAGE_ATTRIBUTE PageAttribute
,
240 IN PAGE_ATTRIBUTE SplitAttribute
,
241 IN BOOLEAN Recursively
245 UINT64
*NewPageEntry
;
247 UINT64 AddressEncMask
;
248 PAGE_ATTRIBUTE SplitTo
;
250 if ((SplitAttribute
== PageNone
) || (SplitAttribute
>= PageAttribute
)) {
251 ASSERT (SplitAttribute
!= PageNone
);
252 ASSERT (SplitAttribute
< PageAttribute
);
253 return RETURN_INVALID_PARAMETER
;
256 NewPageEntry
= AllocatePageTableMemory (1);
257 if (NewPageEntry
== NULL
) {
258 ASSERT (NewPageEntry
!= NULL
);
259 return RETURN_OUT_OF_RESOURCES
;
263 // One level down each step to achieve more compact page table.
265 SplitTo
= PageAttribute
- 1;
266 AddressEncMask
= PcdGet64 (PcdPteMemoryEncryptionAddressOrMask
) &
267 mPageAttributeTable
[SplitTo
].AddressMask
;
268 BaseAddress
= *PageEntry
&
269 ~PcdGet64 (PcdPteMemoryEncryptionAddressOrMask
) &
270 mPageAttributeTable
[PageAttribute
].AddressMask
;
271 for (Index
= 0; Index
< SIZE_4KB
/ sizeof (UINT64
); Index
++) {
272 NewPageEntry
[Index
] = BaseAddress
| AddressEncMask
|
273 ((*PageEntry
) & PAGE_PROGATE_BITS
);
275 if (SplitTo
!= PageMin
) {
276 NewPageEntry
[Index
] |= IA32_PG_PS
;
279 if (Recursively
&& (SplitTo
> SplitAttribute
)) {
280 SplitPage (&NewPageEntry
[Index
], SplitTo
, SplitAttribute
, Recursively
);
283 BaseAddress
+= mPageAttributeTable
[SplitTo
].Length
;
286 (*PageEntry
) = (UINT64
)(UINTN
)NewPageEntry
| AddressEncMask
| PAGE_ATTRIBUTE_BITS
;
288 return RETURN_SUCCESS
;
292 This function modifies the page attributes for the memory region specified
293 by BaseAddress and Length from their current attributes to the attributes
294 specified by Attributes.
296 Caller should make sure BaseAddress and Length is at page boundary.
298 @param[in] BaseAddress Start address of a memory region.
299 @param[in] Length Size in bytes of the memory region.
300 @param[in] Attributes Bit mask of attributes to modify.
302 @retval RETURN_SUCCESS The attributes were modified for the memory
304 @retval RETURN_INVALID_PARAMETER Length is zero; or,
305 Attributes specified an illegal combination
306 of attributes that cannot be set together; or
307 Addressis not 4KB aligned.
308 @retval RETURN_OUT_OF_RESOURCES There are not enough system resources to modify
310 @retval RETURN_UNSUPPORTED Cannot modify the attributes of given memory.
315 ConvertMemoryPageAttributes (
316 IN PHYSICAL_ADDRESS BaseAddress
,
322 PAGE_ATTRIBUTE PageAttribute
;
323 RETURN_STATUS Status
;
324 EFI_PHYSICAL_ADDRESS MaximumAddress
;
327 ((BaseAddress
& (SIZE_4KB
- 1)) != 0) ||
328 ((Length
& (SIZE_4KB
- 1)) != 0))
331 ASSERT ((BaseAddress
& (SIZE_4KB
- 1)) == 0);
332 ASSERT ((Length
& (SIZE_4KB
- 1)) == 0);
334 return RETURN_INVALID_PARAMETER
;
337 MaximumAddress
= (EFI_PHYSICAL_ADDRESS
)MAX_UINT32
;
338 if ((BaseAddress
> MaximumAddress
) ||
339 (Length
> MaximumAddress
) ||
340 (BaseAddress
> MaximumAddress
- (Length
- 1)))
342 return RETURN_UNSUPPORTED
;
346 // Below logic is to check 2M/4K page to make sure we do not waste memory.
348 while (Length
!= 0) {
349 PageEntry
= GetPageTableEntry (BaseAddress
, &PageAttribute
);
350 if (PageEntry
== NULL
) {
351 return RETURN_UNSUPPORTED
;
354 if (PageAttribute
!= Page4K
) {
355 Status
= SplitPage (PageEntry
, PageAttribute
, Page4K
, FALSE
);
356 if (RETURN_ERROR (Status
)) {
361 // Do it again until the page is 4K.
367 // Just take care of 'present' bit for Stack Guard.
369 if ((Attributes
& IA32_PG_P
) != 0) {
370 *PageEntry
|= (UINT64
)IA32_PG_P
;
372 *PageEntry
&= ~((UINT64
)IA32_PG_P
);
376 // Convert success, move to next
378 BaseAddress
+= SIZE_4KB
;
382 return RETURN_SUCCESS
;
386 Get maximum size of page memory supported by current processor.
388 @param[in] TopLevelType The type of top level page entry.
390 @retval Page1G If processor supports 1G page and PML4.
391 @retval Page2M For all other situations.
396 IN PAGE_ATTRIBUTE TopLevelType
402 if (TopLevelType
== Page512G
) {
403 AsmCpuid (CPUID_EXTENDED_FUNCTION
, &RegEax
, NULL
, NULL
, NULL
);
404 if (RegEax
>= CPUID_EXTENDED_CPU_SIG
) {
405 AsmCpuid (CPUID_EXTENDED_CPU_SIG
, NULL
, NULL
, NULL
, &RegEdx
);
406 if ((RegEdx
& BIT26
) != 0) {
416 Create PML4 or PAE page table.
418 @return The address of page table.
426 RETURN_STATUS Status
;
427 UINTN PhysicalAddressBits
;
428 UINTN NumberOfEntries
;
429 PAGE_ATTRIBUTE TopLevelPageAttr
;
431 PAGE_ATTRIBUTE MaxMemoryPage
;
433 UINT64 AddressEncMask
;
435 EFI_PHYSICAL_ADDRESS PhysicalAddress
;
437 TopLevelPageAttr
= (PAGE_ATTRIBUTE
)GetPageTableTopLevelType ();
438 PhysicalAddressBits
= GetPhysicalAddressWidth ();
439 NumberOfEntries
= (UINTN
)1 << (PhysicalAddressBits
-
440 mPageAttributeTable
[TopLevelPageAttr
].AddressBitOffset
);
442 PageTable
= (UINTN
)AllocatePageTableMemory (1);
443 if (PageTable
== 0) {
447 AddressEncMask
= PcdGet64 (PcdPteMemoryEncryptionAddressOrMask
);
448 AddressEncMask
&= mPageAttributeTable
[TopLevelPageAttr
].AddressMask
;
449 MaxMemoryPage
= GetMaxMemoryPage (TopLevelPageAttr
);
450 PageEntry
= (UINT64
*)PageTable
;
453 for (Index
= 0; Index
< NumberOfEntries
; ++Index
) {
454 *PageEntry
= PhysicalAddress
| AddressEncMask
| PAGE_ATTRIBUTE_BITS
;
457 // Split the top page table down to the maximum page size supported
459 if (MaxMemoryPage
< TopLevelPageAttr
) {
460 Status
= SplitPage (PageEntry
, TopLevelPageAttr
, MaxMemoryPage
, TRUE
);
461 ASSERT_EFI_ERROR (Status
);
464 if (TopLevelPageAttr
== Page1G
) {
466 // PDPTE[2:1] (PAE Paging) must be 0. SplitPage() might change them to 1.
468 *PageEntry
&= ~(UINT64
)(IA32_PG_RW
| IA32_PG_U
);
472 PhysicalAddress
+= mPageAttributeTable
[TopLevelPageAttr
].Length
;
479 Setup page tables and make them work.
489 PageTable
= CreatePageTable ();
490 ASSERT (PageTable
!= 0);
491 if (PageTable
!= 0) {
492 AsmWriteCr3 (PageTable
);
493 AsmWriteCr4 (AsmReadCr4 () | BIT5
); // CR4.PAE
494 AsmWriteCr0 (AsmReadCr0 () | BIT31
); // CR0.PG
499 Get the base address of current AP's stack.
501 This function is called in AP's context and assumes that whole calling stacks
502 (till this function) consumed by AP's wakeup procedure will not exceed 4KB.
504 PcdCpuApStackSize must be configured with value taking the Guard page into
507 @param[in,out] Buffer The pointer to private data buffer.
516 EFI_PHYSICAL_ADDRESS StackBase
;
518 StackBase
= (EFI_PHYSICAL_ADDRESS
)(UINTN
)&StackBase
;
519 StackBase
+= BASE_4KB
;
520 StackBase
&= ~((EFI_PHYSICAL_ADDRESS
)BASE_4KB
- 1);
521 StackBase
-= PcdGet32 (PcdCpuApStackSize
);
523 *(EFI_PHYSICAL_ADDRESS
*)Buffer
= StackBase
;
527 Setup stack Guard page at the stack base of each processor. BSP and APs have
528 different way to get stack base address.
532 SetupStackGuardPage (
536 EFI_PEI_HOB_POINTERS Hob
;
537 EFI_PHYSICAL_ADDRESS StackBase
;
538 UINTN NumberOfProcessors
;
543 // One extra page at the bottom of the stack is needed for Guard page.
545 if (PcdGet32 (PcdCpuApStackSize
) <= EFI_PAGE_SIZE
) {
546 DEBUG ((DEBUG_ERROR
, "PcdCpuApStackSize is not big enough for Stack Guard!\n"));
550 MpInitLibGetNumberOfProcessors (&NumberOfProcessors
, NULL
);
551 MpInitLibWhoAmI (&Bsp
);
552 for (Index
= 0; Index
< NumberOfProcessors
; ++Index
) {
556 Hob
.Raw
= GetHobList ();
557 while ((Hob
.Raw
= GetNextHob (EFI_HOB_TYPE_MEMORY_ALLOCATION
, Hob
.Raw
)) != NULL
) {
559 &gEfiHobMemoryAllocStackGuid
,
560 &(Hob
.MemoryAllocationStack
->AllocDescriptor
.Name
)
563 StackBase
= Hob
.MemoryAllocationStack
->AllocDescriptor
.MemoryBaseAddress
;
567 Hob
.Raw
= GET_NEXT_HOB (Hob
);
571 // Ask AP to return is stack base address.
573 MpInitLibStartupThisAP (GetStackBase
, Index
, NULL
, 0, (VOID
*)&StackBase
, NULL
);
576 ASSERT (StackBase
!= 0);
578 // Set Guard page at stack base address.
580 ConvertMemoryPageAttributes (StackBase
, EFI_PAGE_SIZE
, 0);
583 "Stack Guard set at %lx [cpu%lu]!\n",
590 // Publish the changes of page table.
596 Enable/setup stack guard for each processor if PcdCpuStackGuard is set to TRUE.
598 Doing this in the memory-discovered callback is to make sure the Stack Guard
599 feature to cover as most PEI code as possible.
601 @param[in] PeiServices General purpose services available to every PEIM.
602 @param[in] NotifyDescriptor The notification structure this PEIM registered on install.
603 @param[in] Ppi The memory discovered PPI. Not used.
605 @retval EFI_SUCCESS The function completed successfully.
606 @retval others There's error in MP initialization.
610 MemoryDiscoveredPpiNotifyCallback (
611 IN EFI_PEI_SERVICES
**PeiServices
,
612 IN EFI_PEI_NOTIFY_DESCRIPTOR
*NotifyDescriptor
,
617 BOOLEAN InitStackGuard
;
618 EDKII_MIGRATED_FV_INFO
*MigratedFvInfo
;
619 EFI_PEI_HOB_POINTERS Hob
;
622 // Paging must be setup first. Otherwise the exception TSS setup during MP
623 // initialization later will not contain paging information and then fail
624 // the task switch (for the sake of stack switch).
626 InitStackGuard
= FALSE
;
628 if (IsIa32PaeSupported ()) {
629 Hob
.Raw
= GetFirstGuidHob (&gEdkiiMigratedFvInfoGuid
);
630 InitStackGuard
= PcdGetBool (PcdCpuStackGuard
);
633 if (InitStackGuard
|| (Hob
.Raw
!= NULL
)) {
637 Status
= InitializeCpuMpWorker ((CONST EFI_PEI_SERVICES
**)PeiServices
);
638 ASSERT_EFI_ERROR (Status
);
640 if (InitStackGuard
) {
641 SetupStackGuardPage ();
644 while (Hob
.Raw
!= NULL
) {
645 MigratedFvInfo
= GET_GUID_HOB_DATA (Hob
);
648 // Enable #PF exception, so if the code access SPI after disable NEM, it will generate
649 // the exception to avoid potential vulnerability.
651 ConvertMemoryPageAttributes (MigratedFvInfo
->FvOrgBase
, MigratedFvInfo
->FvLength
, 0);
653 Hob
.Raw
= GET_NEXT_HOB (Hob
);
654 Hob
.Raw
= GetNextGuidHob (&gEdkiiMigratedFvInfoGuid
, Hob
.Raw
);