]> git.proxmox.com Git - mirror_edk2.git/blame - UefiCpuPkg/CpuMpPei/CpuPaging.c
UefiCpuPkg/CpuMpPei: Add GDT migration support (CVE-2019-11098)
[mirror_edk2.git] / UefiCpuPkg / CpuMpPei / CpuPaging.c
CommitLineData
0a0d5296
JW
1/** @file\r
2 Basic paging support for the CPU to enable Stack Guard.\r
3\r
01acb06c 4Copyright (c) 2018 - 2019, Intel Corporation. All rights reserved.<BR>\r
0a0d5296 5\r
0acd8697 6SPDX-License-Identifier: BSD-2-Clause-Patent\r
0a0d5296
JW
7\r
8**/\r
9\r
01acb06c
RN
10#include <Register/Intel/Cpuid.h>\r
11#include <Register/Intel/Msr.h>\r
0a0d5296
JW
12#include <Library/MemoryAllocationLib.h>\r
13#include <Library/CpuLib.h>\r
14#include <Library/BaseLib.h>\r
15\r
16#include "CpuMpPei.h"\r
17\r
18#define IA32_PG_P BIT0\r
19#define IA32_PG_RW BIT1\r
20#define IA32_PG_U BIT2\r
21#define IA32_PG_A BIT5\r
22#define IA32_PG_D BIT6\r
23#define IA32_PG_PS BIT7\r
24#define IA32_PG_NX BIT63\r
25\r
26#define PAGE_ATTRIBUTE_BITS (IA32_PG_RW | IA32_PG_P)\r
27#define PAGE_PROGATE_BITS (IA32_PG_D | IA32_PG_A | IA32_PG_NX | IA32_PG_U |\\r
28 PAGE_ATTRIBUTE_BITS)\r
29\r
30#define PAGING_PAE_INDEX_MASK 0x1FF\r
31#define PAGING_4K_ADDRESS_MASK_64 0x000FFFFFFFFFF000ull\r
32#define PAGING_2M_ADDRESS_MASK_64 0x000FFFFFFFE00000ull\r
33#define PAGING_1G_ADDRESS_MASK_64 0x000FFFFFC0000000ull\r
34#define PAGING_512G_ADDRESS_MASK_64 0x000FFF8000000000ull\r
35\r
36typedef enum {\r
37 PageNone = 0,\r
38 PageMin = 1,\r
39 Page4K = PageMin,\r
40 Page2M = 2,\r
41 Page1G = 3,\r
42 Page512G = 4,\r
43 PageMax = Page512G\r
44} PAGE_ATTRIBUTE;\r
45\r
46typedef struct {\r
47 PAGE_ATTRIBUTE Attribute;\r
48 UINT64 Length;\r
49 UINT64 AddressMask;\r
50 UINTN AddressBitOffset;\r
51 UINTN AddressBitLength;\r
52} PAGE_ATTRIBUTE_TABLE;\r
53\r
54PAGE_ATTRIBUTE_TABLE mPageAttributeTable[] = {\r
55 {PageNone, 0, 0, 0, 0},\r
56 {Page4K, SIZE_4KB, PAGING_4K_ADDRESS_MASK_64, 12, 9},\r
57 {Page2M, SIZE_2MB, PAGING_2M_ADDRESS_MASK_64, 21, 9},\r
58 {Page1G, SIZE_1GB, PAGING_1G_ADDRESS_MASK_64, 30, 9},\r
59 {Page512G, SIZE_512GB, PAGING_512G_ADDRESS_MASK_64, 39, 9},\r
60};\r
61\r
62EFI_PEI_NOTIFY_DESCRIPTOR mPostMemNotifyList[] = {\r
63 {\r
64 (EFI_PEI_PPI_DESCRIPTOR_NOTIFY_CALLBACK | EFI_PEI_PPI_DESCRIPTOR_TERMINATE_LIST),\r
65 &gEfiPeiMemoryDiscoveredPpiGuid,\r
66 MemoryDiscoveredPpiNotifyCallback\r
67 }\r
68};\r
69\r
70/**\r
71 The function will check if IA32 PAE is supported.\r
72\r
73 @retval TRUE IA32 PAE is supported.\r
74 @retval FALSE IA32 PAE is not supported.\r
75\r
76**/\r
77BOOLEAN\r
78IsIa32PaeSupported (\r
79 VOID\r
80 )\r
81{\r
82 UINT32 RegEax;\r
83 CPUID_VERSION_INFO_EDX RegEdx;\r
84\r
85 AsmCpuid (CPUID_SIGNATURE, &RegEax, NULL, NULL, NULL);\r
86 if (RegEax >= CPUID_VERSION_INFO) {\r
87 AsmCpuid (CPUID_VERSION_INFO, NULL, NULL, NULL, &RegEdx.Uint32);\r
88 if (RegEdx.Bits.PAE != 0) {\r
89 return TRUE;\r
90 }\r
91 }\r
92\r
93 return FALSE;\r
94}\r
95\r
96/**\r
97 This API provides a way to allocate memory for page table.\r
98\r
99 @param Pages The number of 4 KB pages to allocate.\r
100\r
101 @return A pointer to the allocated buffer or NULL if allocation fails.\r
102\r
103**/\r
104VOID *\r
105AllocatePageTableMemory (\r
106 IN UINTN Pages\r
107 )\r
108{\r
109 VOID *Address;\r
110\r
111 Address = AllocatePages(Pages);\r
112 if (Address != NULL) {\r
113 ZeroMem(Address, EFI_PAGES_TO_SIZE (Pages));\r
114 }\r
115\r
116 return Address;\r
117}\r
118\r
119/**\r
120 Get the address width supported by current processor.\r
121\r
122 @retval 32 If processor is in 32-bit mode.\r
123 @retval 36-48 If processor is in 64-bit mode.\r
124\r
125**/\r
126UINTN\r
127GetPhysicalAddressWidth (\r
128 VOID\r
129 )\r
130{\r
131 UINT32 RegEax;\r
132\r
133 if (sizeof(UINTN) == 4) {\r
134 return 32;\r
135 }\r
136\r
137 AsmCpuid(CPUID_EXTENDED_FUNCTION, &RegEax, NULL, NULL, NULL);\r
138 if (RegEax >= CPUID_VIR_PHY_ADDRESS_SIZE) {\r
139 AsmCpuid (CPUID_VIR_PHY_ADDRESS_SIZE, &RegEax, NULL, NULL, NULL);\r
140 RegEax &= 0xFF;\r
141 if (RegEax > 48) {\r
142 return 48;\r
143 }\r
144\r
145 return (UINTN)RegEax;\r
146 }\r
147\r
148 return 36;\r
149}\r
150\r
151/**\r
152 Get the type of top level page table.\r
153\r
154 @retval Page512G PML4 paging.\r
155 @retval Page1G PAE paing.\r
156\r
157**/\r
158PAGE_ATTRIBUTE\r
159GetPageTableTopLevelType (\r
160 VOID\r
161 )\r
162{\r
163 MSR_IA32_EFER_REGISTER MsrEfer;\r
164\r
165 MsrEfer.Uint64 = AsmReadMsr64 (MSR_CORE_IA32_EFER);\r
166\r
167 return (MsrEfer.Bits.LMA == 1) ? Page512G : Page1G;\r
168}\r
169\r
170/**\r
171 Return page table entry matching the address.\r
172\r
173 @param[in] Address The address to be checked.\r
174 @param[out] PageAttributes The page attribute of the page entry.\r
175\r
176 @return The page entry.\r
177**/\r
178VOID *\r
179GetPageTableEntry (\r
180 IN PHYSICAL_ADDRESS Address,\r
181 OUT PAGE_ATTRIBUTE *PageAttribute\r
182 )\r
183{\r
184 INTN Level;\r
185 UINTN Index;\r
186 UINT64 *PageTable;\r
187 UINT64 AddressEncMask;\r
188\r
189 AddressEncMask = PcdGet64 (PcdPteMemoryEncryptionAddressOrMask);\r
190 PageTable = (UINT64 *)(UINTN)(AsmReadCr3 () & PAGING_4K_ADDRESS_MASK_64);\r
191 for (Level = (INTN)GetPageTableTopLevelType (); Level > 0; --Level) {\r
192 Index = (UINTN)RShiftU64 (Address, mPageAttributeTable[Level].AddressBitOffset);\r
193 Index &= PAGING_PAE_INDEX_MASK;\r
194\r
195 //\r
196 // No mapping?\r
197 //\r
198 if (PageTable[Index] == 0) {\r
199 *PageAttribute = PageNone;\r
200 return NULL;\r
201 }\r
202\r
203 //\r
204 // Page memory?\r
205 //\r
206 if ((PageTable[Index] & IA32_PG_PS) != 0 || Level == PageMin) {\r
207 *PageAttribute = (PAGE_ATTRIBUTE)Level;\r
208 return &PageTable[Index];\r
209 }\r
210\r
211 //\r
212 // Page directory or table\r
213 //\r
214 PageTable = (UINT64 *)(UINTN)(PageTable[Index] &\r
215 ~AddressEncMask &\r
216 PAGING_4K_ADDRESS_MASK_64);\r
217 }\r
218\r
219 *PageAttribute = PageNone;\r
220 return NULL;\r
221}\r
222\r
223/**\r
224 This function splits one page entry to smaller page entries.\r
225\r
226 @param[in] PageEntry The page entry to be splitted.\r
227 @param[in] PageAttribute The page attribute of the page entry.\r
228 @param[in] SplitAttribute How to split the page entry.\r
229 @param[in] Recursively Do the split recursively or not.\r
230\r
231 @retval RETURN_SUCCESS The page entry is splitted.\r
232 @retval RETURN_INVALID_PARAMETER If target page attribute is invalid\r
233 @retval RETURN_OUT_OF_RESOURCES No resource to split page entry.\r
234**/\r
235RETURN_STATUS\r
236SplitPage (\r
237 IN UINT64 *PageEntry,\r
238 IN PAGE_ATTRIBUTE PageAttribute,\r
239 IN PAGE_ATTRIBUTE SplitAttribute,\r
240 IN BOOLEAN Recursively\r
241 )\r
242{\r
243 UINT64 BaseAddress;\r
244 UINT64 *NewPageEntry;\r
245 UINTN Index;\r
246 UINT64 AddressEncMask;\r
247 PAGE_ATTRIBUTE SplitTo;\r
248\r
249 if (SplitAttribute == PageNone || SplitAttribute >= PageAttribute) {\r
250 ASSERT (SplitAttribute != PageNone);\r
251 ASSERT (SplitAttribute < PageAttribute);\r
252 return RETURN_INVALID_PARAMETER;\r
253 }\r
254\r
255 NewPageEntry = AllocatePageTableMemory (1);\r
256 if (NewPageEntry == NULL) {\r
257 ASSERT (NewPageEntry != NULL);\r
258 return RETURN_OUT_OF_RESOURCES;\r
259 }\r
260\r
261 //\r
262 // One level down each step to achieve more compact page table.\r
263 //\r
264 SplitTo = PageAttribute - 1;\r
265 AddressEncMask = PcdGet64 (PcdPteMemoryEncryptionAddressOrMask) &\r
266 mPageAttributeTable[SplitTo].AddressMask;\r
267 BaseAddress = *PageEntry &\r
268 ~PcdGet64 (PcdPteMemoryEncryptionAddressOrMask) &\r
269 mPageAttributeTable[PageAttribute].AddressMask;\r
270 for (Index = 0; Index < SIZE_4KB / sizeof(UINT64); Index++) {\r
271 NewPageEntry[Index] = BaseAddress | AddressEncMask |\r
272 ((*PageEntry) & PAGE_PROGATE_BITS);\r
273\r
274 if (SplitTo != PageMin) {\r
275 NewPageEntry[Index] |= IA32_PG_PS;\r
276 }\r
277\r
278 if (Recursively && SplitTo > SplitAttribute) {\r
279 SplitPage (&NewPageEntry[Index], SplitTo, SplitAttribute, Recursively);\r
280 }\r
281\r
282 BaseAddress += mPageAttributeTable[SplitTo].Length;\r
283 }\r
284\r
285 (*PageEntry) = (UINT64)(UINTN)NewPageEntry | AddressEncMask | PAGE_ATTRIBUTE_BITS;\r
286\r
287 return RETURN_SUCCESS;\r
288}\r
289\r
290/**\r
291 This function modifies the page attributes for the memory region specified\r
292 by BaseAddress and Length from their current attributes to the attributes\r
293 specified by Attributes.\r
294\r
295 Caller should make sure BaseAddress and Length is at page boundary.\r
296\r
297 @param[in] BaseAddress Start address of a memory region.\r
298 @param[in] Length Size in bytes of the memory region.\r
299 @param[in] Attributes Bit mask of attributes to modify.\r
300\r
301 @retval RETURN_SUCCESS The attributes were modified for the memory\r
302 region.\r
303 @retval RETURN_INVALID_PARAMETER Length is zero; or,\r
304 Attributes specified an illegal combination\r
305 of attributes that cannot be set together; or\r
306 Addressis not 4KB aligned.\r
307 @retval RETURN_OUT_OF_RESOURCES There are not enough system resources to modify\r
308 the attributes.\r
309 @retval RETURN_UNSUPPORTED Cannot modify the attributes of given memory.\r
310\r
311**/\r
312RETURN_STATUS\r
313EFIAPI\r
314ConvertMemoryPageAttributes (\r
315 IN PHYSICAL_ADDRESS BaseAddress,\r
316 IN UINT64 Length,\r
317 IN UINT64 Attributes\r
318 )\r
319{\r
320 UINT64 *PageEntry;\r
321 PAGE_ATTRIBUTE PageAttribute;\r
322 RETURN_STATUS Status;\r
323 EFI_PHYSICAL_ADDRESS MaximumAddress;\r
324\r
325 if (Length == 0 ||\r
326 (BaseAddress & (SIZE_4KB - 1)) != 0 ||\r
327 (Length & (SIZE_4KB - 1)) != 0) {\r
328\r
329 ASSERT (Length > 0);\r
330 ASSERT ((BaseAddress & (SIZE_4KB - 1)) == 0);\r
331 ASSERT ((Length & (SIZE_4KB - 1)) == 0);\r
332\r
333 return RETURN_INVALID_PARAMETER;\r
334 }\r
335\r
336 MaximumAddress = (EFI_PHYSICAL_ADDRESS)MAX_UINT32;\r
337 if (BaseAddress > MaximumAddress ||\r
338 Length > MaximumAddress ||\r
339 (BaseAddress > MaximumAddress - (Length - 1))) {\r
340 return RETURN_UNSUPPORTED;\r
341 }\r
342\r
343 //\r
344 // Below logic is to check 2M/4K page to make sure we do not waste memory.\r
345 //\r
346 while (Length != 0) {\r
347 PageEntry = GetPageTableEntry (BaseAddress, &PageAttribute);\r
348 if (PageEntry == NULL) {\r
349 return RETURN_UNSUPPORTED;\r
350 }\r
351\r
352 if (PageAttribute != Page4K) {\r
353 Status = SplitPage (PageEntry, PageAttribute, Page4K, FALSE);\r
354 if (RETURN_ERROR (Status)) {\r
355 return Status;\r
356 }\r
357 //\r
358 // Do it again until the page is 4K.\r
359 //\r
360 continue;\r
361 }\r
362\r
363 //\r
364 // Just take care of 'present' bit for Stack Guard.\r
365 //\r
366 if ((Attributes & IA32_PG_P) != 0) {\r
367 *PageEntry |= (UINT64)IA32_PG_P;\r
368 } else {\r
369 *PageEntry &= ~((UINT64)IA32_PG_P);\r
370 }\r
371\r
372 //\r
373 // Convert success, move to next\r
374 //\r
375 BaseAddress += SIZE_4KB;\r
376 Length -= SIZE_4KB;\r
377 }\r
378\r
379 return RETURN_SUCCESS;\r
380}\r
381\r
382/**\r
383 Get maximum size of page memory supported by current processor.\r
384\r
385 @param[in] TopLevelType The type of top level page entry.\r
386\r
387 @retval Page1G If processor supports 1G page and PML4.\r
388 @retval Page2M For all other situations.\r
389\r
390**/\r
391PAGE_ATTRIBUTE\r
392GetMaxMemoryPage (\r
393 IN PAGE_ATTRIBUTE TopLevelType\r
394 )\r
395{\r
396 UINT32 RegEax;\r
397 UINT32 RegEdx;\r
398\r
399 if (TopLevelType == Page512G) {\r
400 AsmCpuid (CPUID_EXTENDED_FUNCTION, &RegEax, NULL, NULL, NULL);\r
401 if (RegEax >= CPUID_EXTENDED_CPU_SIG) {\r
402 AsmCpuid (CPUID_EXTENDED_CPU_SIG, NULL, NULL, NULL, &RegEdx);\r
403 if ((RegEdx & BIT26) != 0) {\r
404 return Page1G;\r
405 }\r
406 }\r
407 }\r
408\r
409 return Page2M;\r
410}\r
411\r
412/**\r
413 Create PML4 or PAE page table.\r
414\r
415 @return The address of page table.\r
416\r
417**/\r
418UINTN\r
419CreatePageTable (\r
420 VOID\r
421 )\r
422{\r
423 RETURN_STATUS Status;\r
424 UINTN PhysicalAddressBits;\r
425 UINTN NumberOfEntries;\r
426 PAGE_ATTRIBUTE TopLevelPageAttr;\r
427 UINTN PageTable;\r
428 PAGE_ATTRIBUTE MaxMemoryPage;\r
429 UINTN Index;\r
430 UINT64 AddressEncMask;\r
431 UINT64 *PageEntry;\r
432 EFI_PHYSICAL_ADDRESS PhysicalAddress;\r
433\r
434 TopLevelPageAttr = (PAGE_ATTRIBUTE)GetPageTableTopLevelType ();\r
435 PhysicalAddressBits = GetPhysicalAddressWidth ();\r
436 NumberOfEntries = (UINTN)1 << (PhysicalAddressBits -\r
437 mPageAttributeTable[TopLevelPageAttr].AddressBitOffset);\r
438\r
439 PageTable = (UINTN) AllocatePageTableMemory (1);\r
440 if (PageTable == 0) {\r
441 return 0;\r
442 }\r
443\r
444 AddressEncMask = PcdGet64 (PcdPteMemoryEncryptionAddressOrMask);\r
445 AddressEncMask &= mPageAttributeTable[TopLevelPageAttr].AddressMask;\r
446 MaxMemoryPage = GetMaxMemoryPage (TopLevelPageAttr);\r
447 PageEntry = (UINT64 *)PageTable;\r
448\r
449 PhysicalAddress = 0;\r
450 for (Index = 0; Index < NumberOfEntries; ++Index) {\r
451 *PageEntry = PhysicalAddress | AddressEncMask | PAGE_ATTRIBUTE_BITS;\r
452\r
453 //\r
454 // Split the top page table down to the maximum page size supported\r
455 //\r
456 if (MaxMemoryPage < TopLevelPageAttr) {\r
457 Status = SplitPage(PageEntry, TopLevelPageAttr, MaxMemoryPage, TRUE);\r
458 ASSERT_EFI_ERROR (Status);\r
459 }\r
460\r
461 if (TopLevelPageAttr == Page1G) {\r
462 //\r
463 // PDPTE[2:1] (PAE Paging) must be 0. SplitPage() might change them to 1.\r
464 //\r
465 *PageEntry &= ~(UINT64)(IA32_PG_RW | IA32_PG_U);\r
466 }\r
467\r
468 PageEntry += 1;\r
469 PhysicalAddress += mPageAttributeTable[TopLevelPageAttr].Length;\r
470 }\r
471\r
472\r
473 return PageTable;\r
474}\r
475\r
476/**\r
477 Setup page tables and make them work.\r
478\r
479**/\r
480VOID\r
481EnablePaging (\r
482 VOID\r
483 )\r
484{\r
485 UINTN PageTable;\r
486\r
487 PageTable = CreatePageTable ();\r
488 ASSERT (PageTable != 0);\r
489 if (PageTable != 0) {\r
490 AsmWriteCr3(PageTable);\r
491 AsmWriteCr4 (AsmReadCr4 () | BIT5); // CR4.PAE\r
492 AsmWriteCr0 (AsmReadCr0 () | BIT31); // CR0.PG\r
493 }\r
494}\r
495\r
496/**\r
497 Get the base address of current AP's stack.\r
498\r
499 This function is called in AP's context and assumes that whole calling stacks\r
500 (till this function) consumed by AP's wakeup procedure will not exceed 4KB.\r
501\r
502 PcdCpuApStackSize must be configured with value taking the Guard page into\r
503 account.\r
504\r
505 @param[in,out] Buffer The pointer to private data buffer.\r
506\r
507**/\r
508VOID\r
509EFIAPI\r
510GetStackBase (\r
511 IN OUT VOID *Buffer\r
512 )\r
513{\r
514 EFI_PHYSICAL_ADDRESS StackBase;\r
515\r
516 StackBase = (EFI_PHYSICAL_ADDRESS)(UINTN)&StackBase;\r
517 StackBase += BASE_4KB;\r
518 StackBase &= ~((EFI_PHYSICAL_ADDRESS)BASE_4KB - 1);\r
519 StackBase -= PcdGet32(PcdCpuApStackSize);\r
520\r
521 *(EFI_PHYSICAL_ADDRESS *)Buffer = StackBase;\r
522}\r
523\r
524/**\r
525 Setup stack Guard page at the stack base of each processor. BSP and APs have\r
526 different way to get stack base address.\r
527\r
528**/\r
529VOID\r
530SetupStackGuardPage (\r
531 VOID\r
532 )\r
533{\r
534 EFI_PEI_HOB_POINTERS Hob;\r
535 EFI_PHYSICAL_ADDRESS StackBase;\r
536 UINTN NumberOfProcessors;\r
537 UINTN Bsp;\r
538 UINTN Index;\r
539\r
540 //\r
541 // One extra page at the bottom of the stack is needed for Guard page.\r
542 //\r
543 if (PcdGet32(PcdCpuApStackSize) <= EFI_PAGE_SIZE) {\r
544 DEBUG ((DEBUG_ERROR, "PcdCpuApStackSize is not big enough for Stack Guard!\n"));\r
545 ASSERT (FALSE);\r
546 }\r
547\r
548 MpInitLibGetNumberOfProcessors(&NumberOfProcessors, NULL);\r
549 MpInitLibWhoAmI (&Bsp);\r
550 for (Index = 0; Index < NumberOfProcessors; ++Index) {\r
2939283f
JW
551 StackBase = 0;\r
552\r
0a0d5296
JW
553 if (Index == Bsp) {\r
554 Hob.Raw = GetHobList ();\r
555 while ((Hob.Raw = GetNextHob (EFI_HOB_TYPE_MEMORY_ALLOCATION, Hob.Raw)) != NULL) {\r
556 if (CompareGuid (&gEfiHobMemoryAllocStackGuid,\r
557 &(Hob.MemoryAllocationStack->AllocDescriptor.Name))) {\r
558 StackBase = Hob.MemoryAllocationStack->AllocDescriptor.MemoryBaseAddress;\r
559 break;\r
560 }\r
561 Hob.Raw = GET_NEXT_HOB (Hob);\r
562 }\r
563 } else {\r
564 //\r
565 // Ask AP to return is stack base address.\r
566 //\r
567 MpInitLibStartupThisAP(GetStackBase, Index, NULL, 0, (VOID *)&StackBase, NULL);\r
568 }\r
2939283f 569 ASSERT (StackBase != 0);\r
0a0d5296
JW
570 //\r
571 // Set Guard page at stack base address.\r
572 //\r
573 ConvertMemoryPageAttributes(StackBase, EFI_PAGE_SIZE, 0);\r
574 DEBUG ((DEBUG_INFO, "Stack Guard set at %lx [cpu%lu]!\n",\r
575 (UINT64)StackBase, (UINT64)Index));\r
576 }\r
577\r
578 //\r
579 // Publish the changes of page table.\r
580 //\r
581 CpuFlushTlb ();\r
582}\r
583\r
584/**\r
585 Enabl/setup stack guard for each processor if PcdCpuStackGuard is set to TRUE.\r
586\r
587 Doing this in the memory-discovered callback is to make sure the Stack Guard\r
588 feature to cover as most PEI code as possible.\r
589\r
590 @param[in] PeiServices General purpose services available to every PEIM.\r
591 @param[in] NotifyDescriptor The notification structure this PEIM registered on install.\r
592 @param[in] Ppi The memory discovered PPI. Not used.\r
593\r
594 @retval EFI_SUCCESS The function completed successfully.\r
595 @retval others There's error in MP initialization.\r
596**/\r
597EFI_STATUS\r
598EFIAPI\r
599MemoryDiscoveredPpiNotifyCallback (\r
600 IN EFI_PEI_SERVICES **PeiServices,\r
601 IN EFI_PEI_NOTIFY_DESCRIPTOR *NotifyDescriptor,\r
602 IN VOID *Ppi\r
603 )\r
604{\r
60b12e69
MK
605 EFI_STATUS Status;\r
606 BOOLEAN InitStackGuard;\r
607 BOOLEAN InterruptState;\r
608\r
609 if (PcdGetBool (PcdMigrateTemporaryRamFirmwareVolumes)) {\r
610 InterruptState = SaveAndDisableInterrupts ();\r
611 Status = MigrateGdt ();\r
612 ASSERT_EFI_ERROR (Status);\r
613 SetInterruptState (InterruptState);\r
614 }\r
0a0d5296
JW
615\r
616 //\r
617 // Paging must be setup first. Otherwise the exception TSS setup during MP\r
618 // initialization later will not contain paging information and then fail\r
619 // the task switch (for the sake of stack switch).\r
620 //\r
621 InitStackGuard = FALSE;\r
622 if (IsIa32PaeSupported () && PcdGetBool (PcdCpuStackGuard)) {\r
623 EnablePaging ();\r
624 InitStackGuard = TRUE;\r
625 }\r
626\r
627 Status = InitializeCpuMpWorker ((CONST EFI_PEI_SERVICES **)PeiServices);\r
628 ASSERT_EFI_ERROR (Status);\r
629\r
630 if (InitStackGuard) {\r
631 SetupStackGuardPage ();\r
632 }\r
633\r
634 return Status;\r
635}\r
636\r