]> git.proxmox.com Git - mirror_edk2.git/blame - UefiCpuPkg/CpuMpPei/CpuPaging.c
MdeModulePkg/DxeIpl: support more NX related PCDs
[mirror_edk2.git] / UefiCpuPkg / CpuMpPei / CpuPaging.c
CommitLineData
0a0d5296
JW
1/** @file\r
2 Basic paging support for the CPU to enable Stack Guard.\r
3\r
4Copyright (c) 2018, Intel Corporation. All rights reserved.<BR>\r
5\r
6This program and the accompanying materials\r
7are licensed and made available under the terms and conditions of the BSD License\r
8which accompanies this distribution. The full text of the license may be found at\r
9http://opensource.org/licenses/bsd-license.php\r
10\r
11THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS,\r
12WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED.\r
13\r
14**/\r
15\r
16#include <Register/Cpuid.h>\r
17#include <Register/Msr.h>\r
18#include <Library/MemoryAllocationLib.h>\r
19#include <Library/CpuLib.h>\r
20#include <Library/BaseLib.h>\r
21\r
22#include "CpuMpPei.h"\r
23\r
24#define IA32_PG_P BIT0\r
25#define IA32_PG_RW BIT1\r
26#define IA32_PG_U BIT2\r
27#define IA32_PG_A BIT5\r
28#define IA32_PG_D BIT6\r
29#define IA32_PG_PS BIT7\r
30#define IA32_PG_NX BIT63\r
31\r
32#define PAGE_ATTRIBUTE_BITS (IA32_PG_RW | IA32_PG_P)\r
33#define PAGE_PROGATE_BITS (IA32_PG_D | IA32_PG_A | IA32_PG_NX | IA32_PG_U |\\r
34 PAGE_ATTRIBUTE_BITS)\r
35\r
36#define PAGING_PAE_INDEX_MASK 0x1FF\r
37#define PAGING_4K_ADDRESS_MASK_64 0x000FFFFFFFFFF000ull\r
38#define PAGING_2M_ADDRESS_MASK_64 0x000FFFFFFFE00000ull\r
39#define PAGING_1G_ADDRESS_MASK_64 0x000FFFFFC0000000ull\r
40#define PAGING_512G_ADDRESS_MASK_64 0x000FFF8000000000ull\r
41\r
42typedef enum {\r
43 PageNone = 0,\r
44 PageMin = 1,\r
45 Page4K = PageMin,\r
46 Page2M = 2,\r
47 Page1G = 3,\r
48 Page512G = 4,\r
49 PageMax = Page512G\r
50} PAGE_ATTRIBUTE;\r
51\r
52typedef struct {\r
53 PAGE_ATTRIBUTE Attribute;\r
54 UINT64 Length;\r
55 UINT64 AddressMask;\r
56 UINTN AddressBitOffset;\r
57 UINTN AddressBitLength;\r
58} PAGE_ATTRIBUTE_TABLE;\r
59\r
60PAGE_ATTRIBUTE_TABLE mPageAttributeTable[] = {\r
61 {PageNone, 0, 0, 0, 0},\r
62 {Page4K, SIZE_4KB, PAGING_4K_ADDRESS_MASK_64, 12, 9},\r
63 {Page2M, SIZE_2MB, PAGING_2M_ADDRESS_MASK_64, 21, 9},\r
64 {Page1G, SIZE_1GB, PAGING_1G_ADDRESS_MASK_64, 30, 9},\r
65 {Page512G, SIZE_512GB, PAGING_512G_ADDRESS_MASK_64, 39, 9},\r
66};\r
67\r
68EFI_PEI_NOTIFY_DESCRIPTOR mPostMemNotifyList[] = {\r
69 {\r
70 (EFI_PEI_PPI_DESCRIPTOR_NOTIFY_CALLBACK | EFI_PEI_PPI_DESCRIPTOR_TERMINATE_LIST),\r
71 &gEfiPeiMemoryDiscoveredPpiGuid,\r
72 MemoryDiscoveredPpiNotifyCallback\r
73 }\r
74};\r
75\r
76/**\r
77 The function will check if IA32 PAE is supported.\r
78\r
79 @retval TRUE IA32 PAE is supported.\r
80 @retval FALSE IA32 PAE is not supported.\r
81\r
82**/\r
83BOOLEAN\r
84IsIa32PaeSupported (\r
85 VOID\r
86 )\r
87{\r
88 UINT32 RegEax;\r
89 CPUID_VERSION_INFO_EDX RegEdx;\r
90\r
91 AsmCpuid (CPUID_SIGNATURE, &RegEax, NULL, NULL, NULL);\r
92 if (RegEax >= CPUID_VERSION_INFO) {\r
93 AsmCpuid (CPUID_VERSION_INFO, NULL, NULL, NULL, &RegEdx.Uint32);\r
94 if (RegEdx.Bits.PAE != 0) {\r
95 return TRUE;\r
96 }\r
97 }\r
98\r
99 return FALSE;\r
100}\r
101\r
102/**\r
103 This API provides a way to allocate memory for page table.\r
104\r
105 @param Pages The number of 4 KB pages to allocate.\r
106\r
107 @return A pointer to the allocated buffer or NULL if allocation fails.\r
108\r
109**/\r
110VOID *\r
111AllocatePageTableMemory (\r
112 IN UINTN Pages\r
113 )\r
114{\r
115 VOID *Address;\r
116\r
117 Address = AllocatePages(Pages);\r
118 if (Address != NULL) {\r
119 ZeroMem(Address, EFI_PAGES_TO_SIZE (Pages));\r
120 }\r
121\r
122 return Address;\r
123}\r
124\r
125/**\r
126 Get the address width supported by current processor.\r
127\r
128 @retval 32 If processor is in 32-bit mode.\r
129 @retval 36-48 If processor is in 64-bit mode.\r
130\r
131**/\r
132UINTN\r
133GetPhysicalAddressWidth (\r
134 VOID\r
135 )\r
136{\r
137 UINT32 RegEax;\r
138\r
139 if (sizeof(UINTN) == 4) {\r
140 return 32;\r
141 }\r
142\r
143 AsmCpuid(CPUID_EXTENDED_FUNCTION, &RegEax, NULL, NULL, NULL);\r
144 if (RegEax >= CPUID_VIR_PHY_ADDRESS_SIZE) {\r
145 AsmCpuid (CPUID_VIR_PHY_ADDRESS_SIZE, &RegEax, NULL, NULL, NULL);\r
146 RegEax &= 0xFF;\r
147 if (RegEax > 48) {\r
148 return 48;\r
149 }\r
150\r
151 return (UINTN)RegEax;\r
152 }\r
153\r
154 return 36;\r
155}\r
156\r
157/**\r
158 Get the type of top level page table.\r
159\r
160 @retval Page512G PML4 paging.\r
161 @retval Page1G PAE paing.\r
162\r
163**/\r
164PAGE_ATTRIBUTE\r
165GetPageTableTopLevelType (\r
166 VOID\r
167 )\r
168{\r
169 MSR_IA32_EFER_REGISTER MsrEfer;\r
170\r
171 MsrEfer.Uint64 = AsmReadMsr64 (MSR_CORE_IA32_EFER);\r
172\r
173 return (MsrEfer.Bits.LMA == 1) ? Page512G : Page1G;\r
174}\r
175\r
176/**\r
177 Return page table entry matching the address.\r
178\r
179 @param[in] Address The address to be checked.\r
180 @param[out] PageAttributes The page attribute of the page entry.\r
181\r
182 @return The page entry.\r
183**/\r
184VOID *\r
185GetPageTableEntry (\r
186 IN PHYSICAL_ADDRESS Address,\r
187 OUT PAGE_ATTRIBUTE *PageAttribute\r
188 )\r
189{\r
190 INTN Level;\r
191 UINTN Index;\r
192 UINT64 *PageTable;\r
193 UINT64 AddressEncMask;\r
194\r
195 AddressEncMask = PcdGet64 (PcdPteMemoryEncryptionAddressOrMask);\r
196 PageTable = (UINT64 *)(UINTN)(AsmReadCr3 () & PAGING_4K_ADDRESS_MASK_64);\r
197 for (Level = (INTN)GetPageTableTopLevelType (); Level > 0; --Level) {\r
198 Index = (UINTN)RShiftU64 (Address, mPageAttributeTable[Level].AddressBitOffset);\r
199 Index &= PAGING_PAE_INDEX_MASK;\r
200\r
201 //\r
202 // No mapping?\r
203 //\r
204 if (PageTable[Index] == 0) {\r
205 *PageAttribute = PageNone;\r
206 return NULL;\r
207 }\r
208\r
209 //\r
210 // Page memory?\r
211 //\r
212 if ((PageTable[Index] & IA32_PG_PS) != 0 || Level == PageMin) {\r
213 *PageAttribute = (PAGE_ATTRIBUTE)Level;\r
214 return &PageTable[Index];\r
215 }\r
216\r
217 //\r
218 // Page directory or table\r
219 //\r
220 PageTable = (UINT64 *)(UINTN)(PageTable[Index] &\r
221 ~AddressEncMask &\r
222 PAGING_4K_ADDRESS_MASK_64);\r
223 }\r
224\r
225 *PageAttribute = PageNone;\r
226 return NULL;\r
227}\r
228\r
229/**\r
230 This function splits one page entry to smaller page entries.\r
231\r
232 @param[in] PageEntry The page entry to be splitted.\r
233 @param[in] PageAttribute The page attribute of the page entry.\r
234 @param[in] SplitAttribute How to split the page entry.\r
235 @param[in] Recursively Do the split recursively or not.\r
236\r
237 @retval RETURN_SUCCESS The page entry is splitted.\r
238 @retval RETURN_INVALID_PARAMETER If target page attribute is invalid\r
239 @retval RETURN_OUT_OF_RESOURCES No resource to split page entry.\r
240**/\r
241RETURN_STATUS\r
242SplitPage (\r
243 IN UINT64 *PageEntry,\r
244 IN PAGE_ATTRIBUTE PageAttribute,\r
245 IN PAGE_ATTRIBUTE SplitAttribute,\r
246 IN BOOLEAN Recursively\r
247 )\r
248{\r
249 UINT64 BaseAddress;\r
250 UINT64 *NewPageEntry;\r
251 UINTN Index;\r
252 UINT64 AddressEncMask;\r
253 PAGE_ATTRIBUTE SplitTo;\r
254\r
255 if (SplitAttribute == PageNone || SplitAttribute >= PageAttribute) {\r
256 ASSERT (SplitAttribute != PageNone);\r
257 ASSERT (SplitAttribute < PageAttribute);\r
258 return RETURN_INVALID_PARAMETER;\r
259 }\r
260\r
261 NewPageEntry = AllocatePageTableMemory (1);\r
262 if (NewPageEntry == NULL) {\r
263 ASSERT (NewPageEntry != NULL);\r
264 return RETURN_OUT_OF_RESOURCES;\r
265 }\r
266\r
267 //\r
268 // One level down each step to achieve more compact page table.\r
269 //\r
270 SplitTo = PageAttribute - 1;\r
271 AddressEncMask = PcdGet64 (PcdPteMemoryEncryptionAddressOrMask) &\r
272 mPageAttributeTable[SplitTo].AddressMask;\r
273 BaseAddress = *PageEntry &\r
274 ~PcdGet64 (PcdPteMemoryEncryptionAddressOrMask) &\r
275 mPageAttributeTable[PageAttribute].AddressMask;\r
276 for (Index = 0; Index < SIZE_4KB / sizeof(UINT64); Index++) {\r
277 NewPageEntry[Index] = BaseAddress | AddressEncMask |\r
278 ((*PageEntry) & PAGE_PROGATE_BITS);\r
279\r
280 if (SplitTo != PageMin) {\r
281 NewPageEntry[Index] |= IA32_PG_PS;\r
282 }\r
283\r
284 if (Recursively && SplitTo > SplitAttribute) {\r
285 SplitPage (&NewPageEntry[Index], SplitTo, SplitAttribute, Recursively);\r
286 }\r
287\r
288 BaseAddress += mPageAttributeTable[SplitTo].Length;\r
289 }\r
290\r
291 (*PageEntry) = (UINT64)(UINTN)NewPageEntry | AddressEncMask | PAGE_ATTRIBUTE_BITS;\r
292\r
293 return RETURN_SUCCESS;\r
294}\r
295\r
296/**\r
297 This function modifies the page attributes for the memory region specified\r
298 by BaseAddress and Length from their current attributes to the attributes\r
299 specified by Attributes.\r
300\r
301 Caller should make sure BaseAddress and Length is at page boundary.\r
302\r
303 @param[in] BaseAddress Start address of a memory region.\r
304 @param[in] Length Size in bytes of the memory region.\r
305 @param[in] Attributes Bit mask of attributes to modify.\r
306\r
307 @retval RETURN_SUCCESS The attributes were modified for the memory\r
308 region.\r
309 @retval RETURN_INVALID_PARAMETER Length is zero; or,\r
310 Attributes specified an illegal combination\r
311 of attributes that cannot be set together; or\r
312 Addressis not 4KB aligned.\r
313 @retval RETURN_OUT_OF_RESOURCES There are not enough system resources to modify\r
314 the attributes.\r
315 @retval RETURN_UNSUPPORTED Cannot modify the attributes of given memory.\r
316\r
317**/\r
318RETURN_STATUS\r
319EFIAPI\r
320ConvertMemoryPageAttributes (\r
321 IN PHYSICAL_ADDRESS BaseAddress,\r
322 IN UINT64 Length,\r
323 IN UINT64 Attributes\r
324 )\r
325{\r
326 UINT64 *PageEntry;\r
327 PAGE_ATTRIBUTE PageAttribute;\r
328 RETURN_STATUS Status;\r
329 EFI_PHYSICAL_ADDRESS MaximumAddress;\r
330\r
331 if (Length == 0 ||\r
332 (BaseAddress & (SIZE_4KB - 1)) != 0 ||\r
333 (Length & (SIZE_4KB - 1)) != 0) {\r
334\r
335 ASSERT (Length > 0);\r
336 ASSERT ((BaseAddress & (SIZE_4KB - 1)) == 0);\r
337 ASSERT ((Length & (SIZE_4KB - 1)) == 0);\r
338\r
339 return RETURN_INVALID_PARAMETER;\r
340 }\r
341\r
342 MaximumAddress = (EFI_PHYSICAL_ADDRESS)MAX_UINT32;\r
343 if (BaseAddress > MaximumAddress ||\r
344 Length > MaximumAddress ||\r
345 (BaseAddress > MaximumAddress - (Length - 1))) {\r
346 return RETURN_UNSUPPORTED;\r
347 }\r
348\r
349 //\r
350 // Below logic is to check 2M/4K page to make sure we do not waste memory.\r
351 //\r
352 while (Length != 0) {\r
353 PageEntry = GetPageTableEntry (BaseAddress, &PageAttribute);\r
354 if (PageEntry == NULL) {\r
355 return RETURN_UNSUPPORTED;\r
356 }\r
357\r
358 if (PageAttribute != Page4K) {\r
359 Status = SplitPage (PageEntry, PageAttribute, Page4K, FALSE);\r
360 if (RETURN_ERROR (Status)) {\r
361 return Status;\r
362 }\r
363 //\r
364 // Do it again until the page is 4K.\r
365 //\r
366 continue;\r
367 }\r
368\r
369 //\r
370 // Just take care of 'present' bit for Stack Guard.\r
371 //\r
372 if ((Attributes & IA32_PG_P) != 0) {\r
373 *PageEntry |= (UINT64)IA32_PG_P;\r
374 } else {\r
375 *PageEntry &= ~((UINT64)IA32_PG_P);\r
376 }\r
377\r
378 //\r
379 // Convert success, move to next\r
380 //\r
381 BaseAddress += SIZE_4KB;\r
382 Length -= SIZE_4KB;\r
383 }\r
384\r
385 return RETURN_SUCCESS;\r
386}\r
387\r
388/**\r
389 Get maximum size of page memory supported by current processor.\r
390\r
391 @param[in] TopLevelType The type of top level page entry.\r
392\r
393 @retval Page1G If processor supports 1G page and PML4.\r
394 @retval Page2M For all other situations.\r
395\r
396**/\r
397PAGE_ATTRIBUTE\r
398GetMaxMemoryPage (\r
399 IN PAGE_ATTRIBUTE TopLevelType\r
400 )\r
401{\r
402 UINT32 RegEax;\r
403 UINT32 RegEdx;\r
404\r
405 if (TopLevelType == Page512G) {\r
406 AsmCpuid (CPUID_EXTENDED_FUNCTION, &RegEax, NULL, NULL, NULL);\r
407 if (RegEax >= CPUID_EXTENDED_CPU_SIG) {\r
408 AsmCpuid (CPUID_EXTENDED_CPU_SIG, NULL, NULL, NULL, &RegEdx);\r
409 if ((RegEdx & BIT26) != 0) {\r
410 return Page1G;\r
411 }\r
412 }\r
413 }\r
414\r
415 return Page2M;\r
416}\r
417\r
418/**\r
419 Create PML4 or PAE page table.\r
420\r
421 @return The address of page table.\r
422\r
423**/\r
424UINTN\r
425CreatePageTable (\r
426 VOID\r
427 )\r
428{\r
429 RETURN_STATUS Status;\r
430 UINTN PhysicalAddressBits;\r
431 UINTN NumberOfEntries;\r
432 PAGE_ATTRIBUTE TopLevelPageAttr;\r
433 UINTN PageTable;\r
434 PAGE_ATTRIBUTE MaxMemoryPage;\r
435 UINTN Index;\r
436 UINT64 AddressEncMask;\r
437 UINT64 *PageEntry;\r
438 EFI_PHYSICAL_ADDRESS PhysicalAddress;\r
439\r
440 TopLevelPageAttr = (PAGE_ATTRIBUTE)GetPageTableTopLevelType ();\r
441 PhysicalAddressBits = GetPhysicalAddressWidth ();\r
442 NumberOfEntries = (UINTN)1 << (PhysicalAddressBits -\r
443 mPageAttributeTable[TopLevelPageAttr].AddressBitOffset);\r
444\r
445 PageTable = (UINTN) AllocatePageTableMemory (1);\r
446 if (PageTable == 0) {\r
447 return 0;\r
448 }\r
449\r
450 AddressEncMask = PcdGet64 (PcdPteMemoryEncryptionAddressOrMask);\r
451 AddressEncMask &= mPageAttributeTable[TopLevelPageAttr].AddressMask;\r
452 MaxMemoryPage = GetMaxMemoryPage (TopLevelPageAttr);\r
453 PageEntry = (UINT64 *)PageTable;\r
454\r
455 PhysicalAddress = 0;\r
456 for (Index = 0; Index < NumberOfEntries; ++Index) {\r
457 *PageEntry = PhysicalAddress | AddressEncMask | PAGE_ATTRIBUTE_BITS;\r
458\r
459 //\r
460 // Split the top page table down to the maximum page size supported\r
461 //\r
462 if (MaxMemoryPage < TopLevelPageAttr) {\r
463 Status = SplitPage(PageEntry, TopLevelPageAttr, MaxMemoryPage, TRUE);\r
464 ASSERT_EFI_ERROR (Status);\r
465 }\r
466\r
467 if (TopLevelPageAttr == Page1G) {\r
468 //\r
469 // PDPTE[2:1] (PAE Paging) must be 0. SplitPage() might change them to 1.\r
470 //\r
471 *PageEntry &= ~(UINT64)(IA32_PG_RW | IA32_PG_U);\r
472 }\r
473\r
474 PageEntry += 1;\r
475 PhysicalAddress += mPageAttributeTable[TopLevelPageAttr].Length;\r
476 }\r
477\r
478\r
479 return PageTable;\r
480}\r
481\r
482/**\r
483 Setup page tables and make them work.\r
484\r
485**/\r
486VOID\r
487EnablePaging (\r
488 VOID\r
489 )\r
490{\r
491 UINTN PageTable;\r
492\r
493 PageTable = CreatePageTable ();\r
494 ASSERT (PageTable != 0);\r
495 if (PageTable != 0) {\r
496 AsmWriteCr3(PageTable);\r
497 AsmWriteCr4 (AsmReadCr4 () | BIT5); // CR4.PAE\r
498 AsmWriteCr0 (AsmReadCr0 () | BIT31); // CR0.PG\r
499 }\r
500}\r
501\r
502/**\r
503 Get the base address of current AP's stack.\r
504\r
505 This function is called in AP's context and assumes that whole calling stacks\r
506 (till this function) consumed by AP's wakeup procedure will not exceed 4KB.\r
507\r
508 PcdCpuApStackSize must be configured with value taking the Guard page into\r
509 account.\r
510\r
511 @param[in,out] Buffer The pointer to private data buffer.\r
512\r
513**/\r
514VOID\r
515EFIAPI\r
516GetStackBase (\r
517 IN OUT VOID *Buffer\r
518 )\r
519{\r
520 EFI_PHYSICAL_ADDRESS StackBase;\r
521\r
522 StackBase = (EFI_PHYSICAL_ADDRESS)(UINTN)&StackBase;\r
523 StackBase += BASE_4KB;\r
524 StackBase &= ~((EFI_PHYSICAL_ADDRESS)BASE_4KB - 1);\r
525 StackBase -= PcdGet32(PcdCpuApStackSize);\r
526\r
527 *(EFI_PHYSICAL_ADDRESS *)Buffer = StackBase;\r
528}\r
529\r
530/**\r
531 Setup stack Guard page at the stack base of each processor. BSP and APs have\r
532 different way to get stack base address.\r
533\r
534**/\r
535VOID\r
536SetupStackGuardPage (\r
537 VOID\r
538 )\r
539{\r
540 EFI_PEI_HOB_POINTERS Hob;\r
541 EFI_PHYSICAL_ADDRESS StackBase;\r
542 UINTN NumberOfProcessors;\r
543 UINTN Bsp;\r
544 UINTN Index;\r
545\r
546 //\r
547 // One extra page at the bottom of the stack is needed for Guard page.\r
548 //\r
549 if (PcdGet32(PcdCpuApStackSize) <= EFI_PAGE_SIZE) {\r
550 DEBUG ((DEBUG_ERROR, "PcdCpuApStackSize is not big enough for Stack Guard!\n"));\r
551 ASSERT (FALSE);\r
552 }\r
553\r
554 MpInitLibGetNumberOfProcessors(&NumberOfProcessors, NULL);\r
555 MpInitLibWhoAmI (&Bsp);\r
556 for (Index = 0; Index < NumberOfProcessors; ++Index) {\r
557 if (Index == Bsp) {\r
558 Hob.Raw = GetHobList ();\r
559 while ((Hob.Raw = GetNextHob (EFI_HOB_TYPE_MEMORY_ALLOCATION, Hob.Raw)) != NULL) {\r
560 if (CompareGuid (&gEfiHobMemoryAllocStackGuid,\r
561 &(Hob.MemoryAllocationStack->AllocDescriptor.Name))) {\r
562 StackBase = Hob.MemoryAllocationStack->AllocDescriptor.MemoryBaseAddress;\r
563 break;\r
564 }\r
565 Hob.Raw = GET_NEXT_HOB (Hob);\r
566 }\r
567 } else {\r
568 //\r
569 // Ask AP to return is stack base address.\r
570 //\r
571 MpInitLibStartupThisAP(GetStackBase, Index, NULL, 0, (VOID *)&StackBase, NULL);\r
572 }\r
573 //\r
574 // Set Guard page at stack base address.\r
575 //\r
576 ConvertMemoryPageAttributes(StackBase, EFI_PAGE_SIZE, 0);\r
577 DEBUG ((DEBUG_INFO, "Stack Guard set at %lx [cpu%lu]!\n",\r
578 (UINT64)StackBase, (UINT64)Index));\r
579 }\r
580\r
581 //\r
582 // Publish the changes of page table.\r
583 //\r
584 CpuFlushTlb ();\r
585}\r
586\r
587/**\r
588 Enabl/setup stack guard for each processor if PcdCpuStackGuard is set to TRUE.\r
589\r
590 Doing this in the memory-discovered callback is to make sure the Stack Guard\r
591 feature to cover as most PEI code as possible.\r
592\r
593 @param[in] PeiServices General purpose services available to every PEIM.\r
594 @param[in] NotifyDescriptor The notification structure this PEIM registered on install.\r
595 @param[in] Ppi The memory discovered PPI. Not used.\r
596\r
597 @retval EFI_SUCCESS The function completed successfully.\r
598 @retval others There's error in MP initialization.\r
599**/\r
600EFI_STATUS\r
601EFIAPI\r
602MemoryDiscoveredPpiNotifyCallback (\r
603 IN EFI_PEI_SERVICES **PeiServices,\r
604 IN EFI_PEI_NOTIFY_DESCRIPTOR *NotifyDescriptor,\r
605 IN VOID *Ppi\r
606 )\r
607{\r
608 EFI_STATUS Status;\r
609 BOOLEAN InitStackGuard;\r
610\r
611 //\r
612 // Paging must be setup first. Otherwise the exception TSS setup during MP\r
613 // initialization later will not contain paging information and then fail\r
614 // the task switch (for the sake of stack switch).\r
615 //\r
616 InitStackGuard = FALSE;\r
617 if (IsIa32PaeSupported () && PcdGetBool (PcdCpuStackGuard)) {\r
618 EnablePaging ();\r
619 InitStackGuard = TRUE;\r
620 }\r
621\r
622 Status = InitializeCpuMpWorker ((CONST EFI_PEI_SERVICES **)PeiServices);\r
623 ASSERT_EFI_ERROR (Status);\r
624\r
625 if (InitStackGuard) {\r
626 SetupStackGuardPage ();\r
627 }\r
628\r
629 return Status;\r
630}\r
631\r