]>
Commit | Line | Data |
---|---|---|
0a0d5296 JW |
1 | /** @file\r |
2 | Basic paging support for the CPU to enable Stack Guard.\r | |
3 | \r | |
01acb06c | 4 | Copyright (c) 2018 - 2019, Intel Corporation. All rights reserved.<BR>\r |
0a0d5296 | 5 | \r |
0acd8697 | 6 | SPDX-License-Identifier: BSD-2-Clause-Patent\r |
0a0d5296 JW |
7 | \r |
8 | **/\r | |
9 | \r | |
01acb06c RN |
10 | #include <Register/Intel/Cpuid.h>\r |
11 | #include <Register/Intel/Msr.h>\r | |
0a0d5296 JW |
12 | #include <Library/MemoryAllocationLib.h>\r |
13 | #include <Library/CpuLib.h>\r | |
14 | #include <Library/BaseLib.h>\r | |
d7c9de51 | 15 | #include <Guid/MigratedFvInfo.h>\r |
0a0d5296 JW |
16 | \r |
17 | #include "CpuMpPei.h"\r | |
18 | \r | |
19 | #define IA32_PG_P BIT0\r | |
20 | #define IA32_PG_RW BIT1\r | |
21 | #define IA32_PG_U BIT2\r | |
22 | #define IA32_PG_A BIT5\r | |
23 | #define IA32_PG_D BIT6\r | |
24 | #define IA32_PG_PS BIT7\r | |
25 | #define IA32_PG_NX BIT63\r | |
26 | \r | |
27 | #define PAGE_ATTRIBUTE_BITS (IA32_PG_RW | IA32_PG_P)\r | |
28 | #define PAGE_PROGATE_BITS (IA32_PG_D | IA32_PG_A | IA32_PG_NX | IA32_PG_U |\\r | |
29 | PAGE_ATTRIBUTE_BITS)\r | |
30 | \r | |
31 | #define PAGING_PAE_INDEX_MASK 0x1FF\r | |
32 | #define PAGING_4K_ADDRESS_MASK_64 0x000FFFFFFFFFF000ull\r | |
33 | #define PAGING_2M_ADDRESS_MASK_64 0x000FFFFFFFE00000ull\r | |
34 | #define PAGING_1G_ADDRESS_MASK_64 0x000FFFFFC0000000ull\r | |
35 | #define PAGING_512G_ADDRESS_MASK_64 0x000FFF8000000000ull\r | |
36 | \r | |
37 | typedef enum {\r | |
38 | PageNone = 0,\r | |
39 | PageMin = 1,\r | |
40 | Page4K = PageMin,\r | |
41 | Page2M = 2,\r | |
42 | Page1G = 3,\r | |
43 | Page512G = 4,\r | |
44 | PageMax = Page512G\r | |
45 | } PAGE_ATTRIBUTE;\r | |
46 | \r | |
47 | typedef struct {\r | |
48 | PAGE_ATTRIBUTE Attribute;\r | |
49 | UINT64 Length;\r | |
50 | UINT64 AddressMask;\r | |
51 | UINTN AddressBitOffset;\r | |
52 | UINTN AddressBitLength;\r | |
53 | } PAGE_ATTRIBUTE_TABLE;\r | |
54 | \r | |
55 | PAGE_ATTRIBUTE_TABLE mPageAttributeTable[] = {\r | |
56 | {PageNone, 0, 0, 0, 0},\r | |
57 | {Page4K, SIZE_4KB, PAGING_4K_ADDRESS_MASK_64, 12, 9},\r | |
58 | {Page2M, SIZE_2MB, PAGING_2M_ADDRESS_MASK_64, 21, 9},\r | |
59 | {Page1G, SIZE_1GB, PAGING_1G_ADDRESS_MASK_64, 30, 9},\r | |
60 | {Page512G, SIZE_512GB, PAGING_512G_ADDRESS_MASK_64, 39, 9},\r | |
61 | };\r | |
62 | \r | |
63 | EFI_PEI_NOTIFY_DESCRIPTOR mPostMemNotifyList[] = {\r | |
64 | {\r | |
65 | (EFI_PEI_PPI_DESCRIPTOR_NOTIFY_CALLBACK | EFI_PEI_PPI_DESCRIPTOR_TERMINATE_LIST),\r | |
66 | &gEfiPeiMemoryDiscoveredPpiGuid,\r | |
67 | MemoryDiscoveredPpiNotifyCallback\r | |
68 | }\r | |
69 | };\r | |
70 | \r | |
71 | /**\r | |
72 | The function will check if IA32 PAE is supported.\r | |
73 | \r | |
74 | @retval TRUE IA32 PAE is supported.\r | |
75 | @retval FALSE IA32 PAE is not supported.\r | |
76 | \r | |
77 | **/\r | |
78 | BOOLEAN\r | |
79 | IsIa32PaeSupported (\r | |
80 | VOID\r | |
81 | )\r | |
82 | {\r | |
83 | UINT32 RegEax;\r | |
84 | CPUID_VERSION_INFO_EDX RegEdx;\r | |
85 | \r | |
86 | AsmCpuid (CPUID_SIGNATURE, &RegEax, NULL, NULL, NULL);\r | |
87 | if (RegEax >= CPUID_VERSION_INFO) {\r | |
88 | AsmCpuid (CPUID_VERSION_INFO, NULL, NULL, NULL, &RegEdx.Uint32);\r | |
89 | if (RegEdx.Bits.PAE != 0) {\r | |
90 | return TRUE;\r | |
91 | }\r | |
92 | }\r | |
93 | \r | |
94 | return FALSE;\r | |
95 | }\r | |
96 | \r | |
97 | /**\r | |
98 | This API provides a way to allocate memory for page table.\r | |
99 | \r | |
100 | @param Pages The number of 4 KB pages to allocate.\r | |
101 | \r | |
102 | @return A pointer to the allocated buffer or NULL if allocation fails.\r | |
103 | \r | |
104 | **/\r | |
105 | VOID *\r | |
106 | AllocatePageTableMemory (\r | |
107 | IN UINTN Pages\r | |
108 | )\r | |
109 | {\r | |
110 | VOID *Address;\r | |
111 | \r | |
112 | Address = AllocatePages(Pages);\r | |
113 | if (Address != NULL) {\r | |
114 | ZeroMem(Address, EFI_PAGES_TO_SIZE (Pages));\r | |
115 | }\r | |
116 | \r | |
117 | return Address;\r | |
118 | }\r | |
119 | \r | |
120 | /**\r | |
121 | Get the address width supported by current processor.\r | |
122 | \r | |
123 | @retval 32 If processor is in 32-bit mode.\r | |
124 | @retval 36-48 If processor is in 64-bit mode.\r | |
125 | \r | |
126 | **/\r | |
127 | UINTN\r | |
128 | GetPhysicalAddressWidth (\r | |
129 | VOID\r | |
130 | )\r | |
131 | {\r | |
132 | UINT32 RegEax;\r | |
133 | \r | |
134 | if (sizeof(UINTN) == 4) {\r | |
135 | return 32;\r | |
136 | }\r | |
137 | \r | |
138 | AsmCpuid(CPUID_EXTENDED_FUNCTION, &RegEax, NULL, NULL, NULL);\r | |
139 | if (RegEax >= CPUID_VIR_PHY_ADDRESS_SIZE) {\r | |
140 | AsmCpuid (CPUID_VIR_PHY_ADDRESS_SIZE, &RegEax, NULL, NULL, NULL);\r | |
141 | RegEax &= 0xFF;\r | |
142 | if (RegEax > 48) {\r | |
143 | return 48;\r | |
144 | }\r | |
145 | \r | |
146 | return (UINTN)RegEax;\r | |
147 | }\r | |
148 | \r | |
149 | return 36;\r | |
150 | }\r | |
151 | \r | |
152 | /**\r | |
153 | Get the type of top level page table.\r | |
154 | \r | |
155 | @retval Page512G PML4 paging.\r | |
92c19c68 | 156 | @retval Page1G PAE paging.\r |
0a0d5296 JW |
157 | \r |
158 | **/\r | |
159 | PAGE_ATTRIBUTE\r | |
160 | GetPageTableTopLevelType (\r | |
161 | VOID\r | |
162 | )\r | |
163 | {\r | |
164 | MSR_IA32_EFER_REGISTER MsrEfer;\r | |
165 | \r | |
166 | MsrEfer.Uint64 = AsmReadMsr64 (MSR_CORE_IA32_EFER);\r | |
167 | \r | |
168 | return (MsrEfer.Bits.LMA == 1) ? Page512G : Page1G;\r | |
169 | }\r | |
170 | \r | |
171 | /**\r | |
172 | Return page table entry matching the address.\r | |
173 | \r | |
174 | @param[in] Address The address to be checked.\r | |
175 | @param[out] PageAttributes The page attribute of the page entry.\r | |
176 | \r | |
177 | @return The page entry.\r | |
178 | **/\r | |
179 | VOID *\r | |
180 | GetPageTableEntry (\r | |
181 | IN PHYSICAL_ADDRESS Address,\r | |
182 | OUT PAGE_ATTRIBUTE *PageAttribute\r | |
183 | )\r | |
184 | {\r | |
185 | INTN Level;\r | |
186 | UINTN Index;\r | |
187 | UINT64 *PageTable;\r | |
188 | UINT64 AddressEncMask;\r | |
189 | \r | |
190 | AddressEncMask = PcdGet64 (PcdPteMemoryEncryptionAddressOrMask);\r | |
191 | PageTable = (UINT64 *)(UINTN)(AsmReadCr3 () & PAGING_4K_ADDRESS_MASK_64);\r | |
192 | for (Level = (INTN)GetPageTableTopLevelType (); Level > 0; --Level) {\r | |
193 | Index = (UINTN)RShiftU64 (Address, mPageAttributeTable[Level].AddressBitOffset);\r | |
194 | Index &= PAGING_PAE_INDEX_MASK;\r | |
195 | \r | |
196 | //\r | |
197 | // No mapping?\r | |
198 | //\r | |
199 | if (PageTable[Index] == 0) {\r | |
200 | *PageAttribute = PageNone;\r | |
201 | return NULL;\r | |
202 | }\r | |
203 | \r | |
204 | //\r | |
205 | // Page memory?\r | |
206 | //\r | |
207 | if ((PageTable[Index] & IA32_PG_PS) != 0 || Level == PageMin) {\r | |
208 | *PageAttribute = (PAGE_ATTRIBUTE)Level;\r | |
209 | return &PageTable[Index];\r | |
210 | }\r | |
211 | \r | |
212 | //\r | |
213 | // Page directory or table\r | |
214 | //\r | |
215 | PageTable = (UINT64 *)(UINTN)(PageTable[Index] &\r | |
216 | ~AddressEncMask &\r | |
217 | PAGING_4K_ADDRESS_MASK_64);\r | |
218 | }\r | |
219 | \r | |
220 | *PageAttribute = PageNone;\r | |
221 | return NULL;\r | |
222 | }\r | |
223 | \r | |
224 | /**\r | |
225 | This function splits one page entry to smaller page entries.\r | |
226 | \r | |
227 | @param[in] PageEntry The page entry to be splitted.\r | |
228 | @param[in] PageAttribute The page attribute of the page entry.\r | |
229 | @param[in] SplitAttribute How to split the page entry.\r | |
230 | @param[in] Recursively Do the split recursively or not.\r | |
231 | \r | |
232 | @retval RETURN_SUCCESS The page entry is splitted.\r | |
233 | @retval RETURN_INVALID_PARAMETER If target page attribute is invalid\r | |
234 | @retval RETURN_OUT_OF_RESOURCES No resource to split page entry.\r | |
235 | **/\r | |
236 | RETURN_STATUS\r | |
237 | SplitPage (\r | |
238 | IN UINT64 *PageEntry,\r | |
239 | IN PAGE_ATTRIBUTE PageAttribute,\r | |
240 | IN PAGE_ATTRIBUTE SplitAttribute,\r | |
241 | IN BOOLEAN Recursively\r | |
242 | )\r | |
243 | {\r | |
244 | UINT64 BaseAddress;\r | |
245 | UINT64 *NewPageEntry;\r | |
246 | UINTN Index;\r | |
247 | UINT64 AddressEncMask;\r | |
248 | PAGE_ATTRIBUTE SplitTo;\r | |
249 | \r | |
250 | if (SplitAttribute == PageNone || SplitAttribute >= PageAttribute) {\r | |
251 | ASSERT (SplitAttribute != PageNone);\r | |
252 | ASSERT (SplitAttribute < PageAttribute);\r | |
253 | return RETURN_INVALID_PARAMETER;\r | |
254 | }\r | |
255 | \r | |
256 | NewPageEntry = AllocatePageTableMemory (1);\r | |
257 | if (NewPageEntry == NULL) {\r | |
258 | ASSERT (NewPageEntry != NULL);\r | |
259 | return RETURN_OUT_OF_RESOURCES;\r | |
260 | }\r | |
261 | \r | |
262 | //\r | |
263 | // One level down each step to achieve more compact page table.\r | |
264 | //\r | |
265 | SplitTo = PageAttribute - 1;\r | |
266 | AddressEncMask = PcdGet64 (PcdPteMemoryEncryptionAddressOrMask) &\r | |
267 | mPageAttributeTable[SplitTo].AddressMask;\r | |
268 | BaseAddress = *PageEntry &\r | |
269 | ~PcdGet64 (PcdPteMemoryEncryptionAddressOrMask) &\r | |
270 | mPageAttributeTable[PageAttribute].AddressMask;\r | |
271 | for (Index = 0; Index < SIZE_4KB / sizeof(UINT64); Index++) {\r | |
272 | NewPageEntry[Index] = BaseAddress | AddressEncMask |\r | |
273 | ((*PageEntry) & PAGE_PROGATE_BITS);\r | |
274 | \r | |
275 | if (SplitTo != PageMin) {\r | |
276 | NewPageEntry[Index] |= IA32_PG_PS;\r | |
277 | }\r | |
278 | \r | |
279 | if (Recursively && SplitTo > SplitAttribute) {\r | |
280 | SplitPage (&NewPageEntry[Index], SplitTo, SplitAttribute, Recursively);\r | |
281 | }\r | |
282 | \r | |
283 | BaseAddress += mPageAttributeTable[SplitTo].Length;\r | |
284 | }\r | |
285 | \r | |
286 | (*PageEntry) = (UINT64)(UINTN)NewPageEntry | AddressEncMask | PAGE_ATTRIBUTE_BITS;\r | |
287 | \r | |
288 | return RETURN_SUCCESS;\r | |
289 | }\r | |
290 | \r | |
291 | /**\r | |
292 | This function modifies the page attributes for the memory region specified\r | |
293 | by BaseAddress and Length from their current attributes to the attributes\r | |
294 | specified by Attributes.\r | |
295 | \r | |
296 | Caller should make sure BaseAddress and Length is at page boundary.\r | |
297 | \r | |
298 | @param[in] BaseAddress Start address of a memory region.\r | |
299 | @param[in] Length Size in bytes of the memory region.\r | |
300 | @param[in] Attributes Bit mask of attributes to modify.\r | |
301 | \r | |
302 | @retval RETURN_SUCCESS The attributes were modified for the memory\r | |
303 | region.\r | |
304 | @retval RETURN_INVALID_PARAMETER Length is zero; or,\r | |
305 | Attributes specified an illegal combination\r | |
306 | of attributes that cannot be set together; or\r | |
307 | Addressis not 4KB aligned.\r | |
308 | @retval RETURN_OUT_OF_RESOURCES There are not enough system resources to modify\r | |
309 | the attributes.\r | |
310 | @retval RETURN_UNSUPPORTED Cannot modify the attributes of given memory.\r | |
311 | \r | |
312 | **/\r | |
313 | RETURN_STATUS\r | |
314 | EFIAPI\r | |
315 | ConvertMemoryPageAttributes (\r | |
316 | IN PHYSICAL_ADDRESS BaseAddress,\r | |
317 | IN UINT64 Length,\r | |
318 | IN UINT64 Attributes\r | |
319 | )\r | |
320 | {\r | |
321 | UINT64 *PageEntry;\r | |
322 | PAGE_ATTRIBUTE PageAttribute;\r | |
323 | RETURN_STATUS Status;\r | |
324 | EFI_PHYSICAL_ADDRESS MaximumAddress;\r | |
325 | \r | |
326 | if (Length == 0 ||\r | |
327 | (BaseAddress & (SIZE_4KB - 1)) != 0 ||\r | |
328 | (Length & (SIZE_4KB - 1)) != 0) {\r | |
329 | \r | |
330 | ASSERT (Length > 0);\r | |
331 | ASSERT ((BaseAddress & (SIZE_4KB - 1)) == 0);\r | |
332 | ASSERT ((Length & (SIZE_4KB - 1)) == 0);\r | |
333 | \r | |
334 | return RETURN_INVALID_PARAMETER;\r | |
335 | }\r | |
336 | \r | |
337 | MaximumAddress = (EFI_PHYSICAL_ADDRESS)MAX_UINT32;\r | |
338 | if (BaseAddress > MaximumAddress ||\r | |
339 | Length > MaximumAddress ||\r | |
340 | (BaseAddress > MaximumAddress - (Length - 1))) {\r | |
341 | return RETURN_UNSUPPORTED;\r | |
342 | }\r | |
343 | \r | |
344 | //\r | |
345 | // Below logic is to check 2M/4K page to make sure we do not waste memory.\r | |
346 | //\r | |
347 | while (Length != 0) {\r | |
348 | PageEntry = GetPageTableEntry (BaseAddress, &PageAttribute);\r | |
349 | if (PageEntry == NULL) {\r | |
350 | return RETURN_UNSUPPORTED;\r | |
351 | }\r | |
352 | \r | |
353 | if (PageAttribute != Page4K) {\r | |
354 | Status = SplitPage (PageEntry, PageAttribute, Page4K, FALSE);\r | |
355 | if (RETURN_ERROR (Status)) {\r | |
356 | return Status;\r | |
357 | }\r | |
358 | //\r | |
359 | // Do it again until the page is 4K.\r | |
360 | //\r | |
361 | continue;\r | |
362 | }\r | |
363 | \r | |
364 | //\r | |
365 | // Just take care of 'present' bit for Stack Guard.\r | |
366 | //\r | |
367 | if ((Attributes & IA32_PG_P) != 0) {\r | |
368 | *PageEntry |= (UINT64)IA32_PG_P;\r | |
369 | } else {\r | |
370 | *PageEntry &= ~((UINT64)IA32_PG_P);\r | |
371 | }\r | |
372 | \r | |
373 | //\r | |
374 | // Convert success, move to next\r | |
375 | //\r | |
376 | BaseAddress += SIZE_4KB;\r | |
377 | Length -= SIZE_4KB;\r | |
378 | }\r | |
379 | \r | |
380 | return RETURN_SUCCESS;\r | |
381 | }\r | |
382 | \r | |
383 | /**\r | |
384 | Get maximum size of page memory supported by current processor.\r | |
385 | \r | |
386 | @param[in] TopLevelType The type of top level page entry.\r | |
387 | \r | |
388 | @retval Page1G If processor supports 1G page and PML4.\r | |
389 | @retval Page2M For all other situations.\r | |
390 | \r | |
391 | **/\r | |
392 | PAGE_ATTRIBUTE\r | |
393 | GetMaxMemoryPage (\r | |
394 | IN PAGE_ATTRIBUTE TopLevelType\r | |
395 | )\r | |
396 | {\r | |
397 | UINT32 RegEax;\r | |
398 | UINT32 RegEdx;\r | |
399 | \r | |
400 | if (TopLevelType == Page512G) {\r | |
401 | AsmCpuid (CPUID_EXTENDED_FUNCTION, &RegEax, NULL, NULL, NULL);\r | |
402 | if (RegEax >= CPUID_EXTENDED_CPU_SIG) {\r | |
403 | AsmCpuid (CPUID_EXTENDED_CPU_SIG, NULL, NULL, NULL, &RegEdx);\r | |
404 | if ((RegEdx & BIT26) != 0) {\r | |
405 | return Page1G;\r | |
406 | }\r | |
407 | }\r | |
408 | }\r | |
409 | \r | |
410 | return Page2M;\r | |
411 | }\r | |
412 | \r | |
413 | /**\r | |
414 | Create PML4 or PAE page table.\r | |
415 | \r | |
416 | @return The address of page table.\r | |
417 | \r | |
418 | **/\r | |
419 | UINTN\r | |
420 | CreatePageTable (\r | |
421 | VOID\r | |
422 | )\r | |
423 | {\r | |
424 | RETURN_STATUS Status;\r | |
425 | UINTN PhysicalAddressBits;\r | |
426 | UINTN NumberOfEntries;\r | |
427 | PAGE_ATTRIBUTE TopLevelPageAttr;\r | |
428 | UINTN PageTable;\r | |
429 | PAGE_ATTRIBUTE MaxMemoryPage;\r | |
430 | UINTN Index;\r | |
431 | UINT64 AddressEncMask;\r | |
432 | UINT64 *PageEntry;\r | |
433 | EFI_PHYSICAL_ADDRESS PhysicalAddress;\r | |
434 | \r | |
435 | TopLevelPageAttr = (PAGE_ATTRIBUTE)GetPageTableTopLevelType ();\r | |
436 | PhysicalAddressBits = GetPhysicalAddressWidth ();\r | |
437 | NumberOfEntries = (UINTN)1 << (PhysicalAddressBits -\r | |
438 | mPageAttributeTable[TopLevelPageAttr].AddressBitOffset);\r | |
439 | \r | |
440 | PageTable = (UINTN) AllocatePageTableMemory (1);\r | |
441 | if (PageTable == 0) {\r | |
442 | return 0;\r | |
443 | }\r | |
444 | \r | |
445 | AddressEncMask = PcdGet64 (PcdPteMemoryEncryptionAddressOrMask);\r | |
446 | AddressEncMask &= mPageAttributeTable[TopLevelPageAttr].AddressMask;\r | |
447 | MaxMemoryPage = GetMaxMemoryPage (TopLevelPageAttr);\r | |
448 | PageEntry = (UINT64 *)PageTable;\r | |
449 | \r | |
450 | PhysicalAddress = 0;\r | |
451 | for (Index = 0; Index < NumberOfEntries; ++Index) {\r | |
452 | *PageEntry = PhysicalAddress | AddressEncMask | PAGE_ATTRIBUTE_BITS;\r | |
453 | \r | |
454 | //\r | |
455 | // Split the top page table down to the maximum page size supported\r | |
456 | //\r | |
457 | if (MaxMemoryPage < TopLevelPageAttr) {\r | |
458 | Status = SplitPage(PageEntry, TopLevelPageAttr, MaxMemoryPage, TRUE);\r | |
459 | ASSERT_EFI_ERROR (Status);\r | |
460 | }\r | |
461 | \r | |
462 | if (TopLevelPageAttr == Page1G) {\r | |
463 | //\r | |
464 | // PDPTE[2:1] (PAE Paging) must be 0. SplitPage() might change them to 1.\r | |
465 | //\r | |
466 | *PageEntry &= ~(UINT64)(IA32_PG_RW | IA32_PG_U);\r | |
467 | }\r | |
468 | \r | |
469 | PageEntry += 1;\r | |
470 | PhysicalAddress += mPageAttributeTable[TopLevelPageAttr].Length;\r | |
471 | }\r | |
472 | \r | |
473 | \r | |
474 | return PageTable;\r | |
475 | }\r | |
476 | \r | |
477 | /**\r | |
478 | Setup page tables and make them work.\r | |
479 | \r | |
480 | **/\r | |
481 | VOID\r | |
482 | EnablePaging (\r | |
483 | VOID\r | |
484 | )\r | |
485 | {\r | |
486 | UINTN PageTable;\r | |
487 | \r | |
488 | PageTable = CreatePageTable ();\r | |
489 | ASSERT (PageTable != 0);\r | |
490 | if (PageTable != 0) {\r | |
491 | AsmWriteCr3(PageTable);\r | |
492 | AsmWriteCr4 (AsmReadCr4 () | BIT5); // CR4.PAE\r | |
493 | AsmWriteCr0 (AsmReadCr0 () | BIT31); // CR0.PG\r | |
494 | }\r | |
495 | }\r | |
496 | \r | |
497 | /**\r | |
498 | Get the base address of current AP's stack.\r | |
499 | \r | |
500 | This function is called in AP's context and assumes that whole calling stacks\r | |
501 | (till this function) consumed by AP's wakeup procedure will not exceed 4KB.\r | |
502 | \r | |
503 | PcdCpuApStackSize must be configured with value taking the Guard page into\r | |
504 | account.\r | |
505 | \r | |
506 | @param[in,out] Buffer The pointer to private data buffer.\r | |
507 | \r | |
508 | **/\r | |
509 | VOID\r | |
510 | EFIAPI\r | |
511 | GetStackBase (\r | |
512 | IN OUT VOID *Buffer\r | |
513 | )\r | |
514 | {\r | |
515 | EFI_PHYSICAL_ADDRESS StackBase;\r | |
516 | \r | |
517 | StackBase = (EFI_PHYSICAL_ADDRESS)(UINTN)&StackBase;\r | |
518 | StackBase += BASE_4KB;\r | |
519 | StackBase &= ~((EFI_PHYSICAL_ADDRESS)BASE_4KB - 1);\r | |
520 | StackBase -= PcdGet32(PcdCpuApStackSize);\r | |
521 | \r | |
522 | *(EFI_PHYSICAL_ADDRESS *)Buffer = StackBase;\r | |
523 | }\r | |
524 | \r | |
525 | /**\r | |
526 | Setup stack Guard page at the stack base of each processor. BSP and APs have\r | |
527 | different way to get stack base address.\r | |
528 | \r | |
529 | **/\r | |
530 | VOID\r | |
531 | SetupStackGuardPage (\r | |
532 | VOID\r | |
533 | )\r | |
534 | {\r | |
535 | EFI_PEI_HOB_POINTERS Hob;\r | |
536 | EFI_PHYSICAL_ADDRESS StackBase;\r | |
537 | UINTN NumberOfProcessors;\r | |
538 | UINTN Bsp;\r | |
539 | UINTN Index;\r | |
540 | \r | |
541 | //\r | |
542 | // One extra page at the bottom of the stack is needed for Guard page.\r | |
543 | //\r | |
544 | if (PcdGet32(PcdCpuApStackSize) <= EFI_PAGE_SIZE) {\r | |
545 | DEBUG ((DEBUG_ERROR, "PcdCpuApStackSize is not big enough for Stack Guard!\n"));\r | |
546 | ASSERT (FALSE);\r | |
547 | }\r | |
548 | \r | |
549 | MpInitLibGetNumberOfProcessors(&NumberOfProcessors, NULL);\r | |
550 | MpInitLibWhoAmI (&Bsp);\r | |
551 | for (Index = 0; Index < NumberOfProcessors; ++Index) {\r | |
2939283f JW |
552 | StackBase = 0;\r |
553 | \r | |
0a0d5296 JW |
554 | if (Index == Bsp) {\r |
555 | Hob.Raw = GetHobList ();\r | |
556 | while ((Hob.Raw = GetNextHob (EFI_HOB_TYPE_MEMORY_ALLOCATION, Hob.Raw)) != NULL) {\r | |
557 | if (CompareGuid (&gEfiHobMemoryAllocStackGuid,\r | |
558 | &(Hob.MemoryAllocationStack->AllocDescriptor.Name))) {\r | |
559 | StackBase = Hob.MemoryAllocationStack->AllocDescriptor.MemoryBaseAddress;\r | |
560 | break;\r | |
561 | }\r | |
562 | Hob.Raw = GET_NEXT_HOB (Hob);\r | |
563 | }\r | |
564 | } else {\r | |
565 | //\r | |
566 | // Ask AP to return is stack base address.\r | |
567 | //\r | |
568 | MpInitLibStartupThisAP(GetStackBase, Index, NULL, 0, (VOID *)&StackBase, NULL);\r | |
569 | }\r | |
2939283f | 570 | ASSERT (StackBase != 0);\r |
0a0d5296 JW |
571 | //\r |
572 | // Set Guard page at stack base address.\r | |
573 | //\r | |
574 | ConvertMemoryPageAttributes(StackBase, EFI_PAGE_SIZE, 0);\r | |
575 | DEBUG ((DEBUG_INFO, "Stack Guard set at %lx [cpu%lu]!\n",\r | |
576 | (UINT64)StackBase, (UINT64)Index));\r | |
577 | }\r | |
578 | \r | |
579 | //\r | |
580 | // Publish the changes of page table.\r | |
581 | //\r | |
582 | CpuFlushTlb ();\r | |
583 | }\r | |
584 | \r | |
585 | /**\r | |
92c19c68 | 586 | Enable/setup stack guard for each processor if PcdCpuStackGuard is set to TRUE.\r |
0a0d5296 JW |
587 | \r |
588 | Doing this in the memory-discovered callback is to make sure the Stack Guard\r | |
589 | feature to cover as most PEI code as possible.\r | |
590 | \r | |
591 | @param[in] PeiServices General purpose services available to every PEIM.\r | |
592 | @param[in] NotifyDescriptor The notification structure this PEIM registered on install.\r | |
593 | @param[in] Ppi The memory discovered PPI. Not used.\r | |
594 | \r | |
595 | @retval EFI_SUCCESS The function completed successfully.\r | |
596 | @retval others There's error in MP initialization.\r | |
597 | **/\r | |
598 | EFI_STATUS\r | |
599 | EFIAPI\r | |
600 | MemoryDiscoveredPpiNotifyCallback (\r | |
601 | IN EFI_PEI_SERVICES **PeiServices,\r | |
602 | IN EFI_PEI_NOTIFY_DESCRIPTOR *NotifyDescriptor,\r | |
603 | IN VOID *Ppi\r | |
604 | )\r | |
605 | {\r | |
d7c9de51 GJ |
606 | EFI_STATUS Status;\r |
607 | BOOLEAN InitStackGuard;\r | |
608 | BOOLEAN InterruptState;\r | |
609 | EDKII_MIGRATED_FV_INFO *MigratedFvInfo;\r | |
610 | EFI_PEI_HOB_POINTERS Hob;\r | |
60b12e69 MK |
611 | \r |
612 | if (PcdGetBool (PcdMigrateTemporaryRamFirmwareVolumes)) {\r | |
613 | InterruptState = SaveAndDisableInterrupts ();\r | |
614 | Status = MigrateGdt ();\r | |
615 | ASSERT_EFI_ERROR (Status);\r | |
616 | SetInterruptState (InterruptState);\r | |
617 | }\r | |
0a0d5296 JW |
618 | \r |
619 | //\r | |
620 | // Paging must be setup first. Otherwise the exception TSS setup during MP\r | |
621 | // initialization later will not contain paging information and then fail\r | |
622 | // the task switch (for the sake of stack switch).\r | |
623 | //\r | |
624 | InitStackGuard = FALSE;\r | |
d7c9de51 GJ |
625 | Hob.Raw = NULL;\r |
626 | if (IsIa32PaeSupported ()) {\r | |
627 | Hob.Raw = GetFirstGuidHob (&gEdkiiMigratedFvInfoGuid);\r | |
628 | InitStackGuard = PcdGetBool (PcdCpuStackGuard);\r | |
629 | }\r | |
630 | \r | |
631 | if (InitStackGuard || Hob.Raw != NULL) {\r | |
0a0d5296 | 632 | EnablePaging ();\r |
0a0d5296 JW |
633 | }\r |
634 | \r | |
635 | Status = InitializeCpuMpWorker ((CONST EFI_PEI_SERVICES **)PeiServices);\r | |
636 | ASSERT_EFI_ERROR (Status);\r | |
637 | \r | |
638 | if (InitStackGuard) {\r | |
639 | SetupStackGuardPage ();\r | |
640 | }\r | |
641 | \r | |
d7c9de51 GJ |
642 | while (Hob.Raw != NULL) {\r |
643 | MigratedFvInfo = GET_GUID_HOB_DATA (Hob);\r | |
644 | \r | |
645 | //\r | |
646 | // Enable #PF exception, so if the code access SPI after disable NEM, it will generate\r | |
647 | // the exception to avoid potential vulnerability.\r | |
648 | //\r | |
649 | ConvertMemoryPageAttributes (MigratedFvInfo->FvOrgBase, MigratedFvInfo->FvLength, 0);\r | |
650 | \r | |
651 | Hob.Raw = GET_NEXT_HOB (Hob);\r | |
652 | Hob.Raw = GetNextGuidHob (&gEdkiiMigratedFvInfoGuid, Hob.Raw);\r | |
653 | }\r | |
654 | CpuFlushTlb ();\r | |
655 | \r | |
0a0d5296 JW |
656 | return Status;\r |
657 | }\r | |
658 | \r |