]> git.proxmox.com Git - mirror_edk2.git/blob - UefiCpuPkg/CpuMpPei/CpuPaging.c
UefiCpuPkg/CpuMpPei: fix vs2012 build error
[mirror_edk2.git] / UefiCpuPkg / CpuMpPei / CpuPaging.c
1 /** @file
2 Basic paging support for the CPU to enable Stack Guard.
3
4 Copyright (c) 2018, Intel Corporation. All rights reserved.<BR>
5
6 This program and the accompanying materials
7 are licensed and made available under the terms and conditions of the BSD License
8 which accompanies this distribution. The full text of the license may be found at
9 http://opensource.org/licenses/bsd-license.php
10
11 THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS,
12 WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED.
13
14 **/
15
16 #include <Register/Cpuid.h>
17 #include <Register/Msr.h>
18 #include <Library/MemoryAllocationLib.h>
19 #include <Library/CpuLib.h>
20 #include <Library/BaseLib.h>
21
22 #include "CpuMpPei.h"
23
24 #define IA32_PG_P BIT0
25 #define IA32_PG_RW BIT1
26 #define IA32_PG_U BIT2
27 #define IA32_PG_A BIT5
28 #define IA32_PG_D BIT6
29 #define IA32_PG_PS BIT7
30 #define IA32_PG_NX BIT63
31
32 #define PAGE_ATTRIBUTE_BITS (IA32_PG_RW | IA32_PG_P)
33 #define PAGE_PROGATE_BITS (IA32_PG_D | IA32_PG_A | IA32_PG_NX | IA32_PG_U |\
34 PAGE_ATTRIBUTE_BITS)
35
36 #define PAGING_PAE_INDEX_MASK 0x1FF
37 #define PAGING_4K_ADDRESS_MASK_64 0x000FFFFFFFFFF000ull
38 #define PAGING_2M_ADDRESS_MASK_64 0x000FFFFFFFE00000ull
39 #define PAGING_1G_ADDRESS_MASK_64 0x000FFFFFC0000000ull
40 #define PAGING_512G_ADDRESS_MASK_64 0x000FFF8000000000ull
41
42 typedef enum {
43 PageNone = 0,
44 PageMin = 1,
45 Page4K = PageMin,
46 Page2M = 2,
47 Page1G = 3,
48 Page512G = 4,
49 PageMax = Page512G
50 } PAGE_ATTRIBUTE;
51
52 typedef struct {
53 PAGE_ATTRIBUTE Attribute;
54 UINT64 Length;
55 UINT64 AddressMask;
56 UINTN AddressBitOffset;
57 UINTN AddressBitLength;
58 } PAGE_ATTRIBUTE_TABLE;
59
60 PAGE_ATTRIBUTE_TABLE mPageAttributeTable[] = {
61 {PageNone, 0, 0, 0, 0},
62 {Page4K, SIZE_4KB, PAGING_4K_ADDRESS_MASK_64, 12, 9},
63 {Page2M, SIZE_2MB, PAGING_2M_ADDRESS_MASK_64, 21, 9},
64 {Page1G, SIZE_1GB, PAGING_1G_ADDRESS_MASK_64, 30, 9},
65 {Page512G, SIZE_512GB, PAGING_512G_ADDRESS_MASK_64, 39, 9},
66 };
67
68 EFI_PEI_NOTIFY_DESCRIPTOR mPostMemNotifyList[] = {
69 {
70 (EFI_PEI_PPI_DESCRIPTOR_NOTIFY_CALLBACK | EFI_PEI_PPI_DESCRIPTOR_TERMINATE_LIST),
71 &gEfiPeiMemoryDiscoveredPpiGuid,
72 MemoryDiscoveredPpiNotifyCallback
73 }
74 };
75
76 /**
77 The function will check if IA32 PAE is supported.
78
79 @retval TRUE IA32 PAE is supported.
80 @retval FALSE IA32 PAE is not supported.
81
82 **/
83 BOOLEAN
84 IsIa32PaeSupported (
85 VOID
86 )
87 {
88 UINT32 RegEax;
89 CPUID_VERSION_INFO_EDX RegEdx;
90
91 AsmCpuid (CPUID_SIGNATURE, &RegEax, NULL, NULL, NULL);
92 if (RegEax >= CPUID_VERSION_INFO) {
93 AsmCpuid (CPUID_VERSION_INFO, NULL, NULL, NULL, &RegEdx.Uint32);
94 if (RegEdx.Bits.PAE != 0) {
95 return TRUE;
96 }
97 }
98
99 return FALSE;
100 }
101
102 /**
103 This API provides a way to allocate memory for page table.
104
105 @param Pages The number of 4 KB pages to allocate.
106
107 @return A pointer to the allocated buffer or NULL if allocation fails.
108
109 **/
110 VOID *
111 AllocatePageTableMemory (
112 IN UINTN Pages
113 )
114 {
115 VOID *Address;
116
117 Address = AllocatePages(Pages);
118 if (Address != NULL) {
119 ZeroMem(Address, EFI_PAGES_TO_SIZE (Pages));
120 }
121
122 return Address;
123 }
124
125 /**
126 Get the address width supported by current processor.
127
128 @retval 32 If processor is in 32-bit mode.
129 @retval 36-48 If processor is in 64-bit mode.
130
131 **/
132 UINTN
133 GetPhysicalAddressWidth (
134 VOID
135 )
136 {
137 UINT32 RegEax;
138
139 if (sizeof(UINTN) == 4) {
140 return 32;
141 }
142
143 AsmCpuid(CPUID_EXTENDED_FUNCTION, &RegEax, NULL, NULL, NULL);
144 if (RegEax >= CPUID_VIR_PHY_ADDRESS_SIZE) {
145 AsmCpuid (CPUID_VIR_PHY_ADDRESS_SIZE, &RegEax, NULL, NULL, NULL);
146 RegEax &= 0xFF;
147 if (RegEax > 48) {
148 return 48;
149 }
150
151 return (UINTN)RegEax;
152 }
153
154 return 36;
155 }
156
157 /**
158 Get the type of top level page table.
159
160 @retval Page512G PML4 paging.
161 @retval Page1G PAE paing.
162
163 **/
164 PAGE_ATTRIBUTE
165 GetPageTableTopLevelType (
166 VOID
167 )
168 {
169 MSR_IA32_EFER_REGISTER MsrEfer;
170
171 MsrEfer.Uint64 = AsmReadMsr64 (MSR_CORE_IA32_EFER);
172
173 return (MsrEfer.Bits.LMA == 1) ? Page512G : Page1G;
174 }
175
176 /**
177 Return page table entry matching the address.
178
179 @param[in] Address The address to be checked.
180 @param[out] PageAttributes The page attribute of the page entry.
181
182 @return The page entry.
183 **/
184 VOID *
185 GetPageTableEntry (
186 IN PHYSICAL_ADDRESS Address,
187 OUT PAGE_ATTRIBUTE *PageAttribute
188 )
189 {
190 INTN Level;
191 UINTN Index;
192 UINT64 *PageTable;
193 UINT64 AddressEncMask;
194
195 AddressEncMask = PcdGet64 (PcdPteMemoryEncryptionAddressOrMask);
196 PageTable = (UINT64 *)(UINTN)(AsmReadCr3 () & PAGING_4K_ADDRESS_MASK_64);
197 for (Level = (INTN)GetPageTableTopLevelType (); Level > 0; --Level) {
198 Index = (UINTN)RShiftU64 (Address, mPageAttributeTable[Level].AddressBitOffset);
199 Index &= PAGING_PAE_INDEX_MASK;
200
201 //
202 // No mapping?
203 //
204 if (PageTable[Index] == 0) {
205 *PageAttribute = PageNone;
206 return NULL;
207 }
208
209 //
210 // Page memory?
211 //
212 if ((PageTable[Index] & IA32_PG_PS) != 0 || Level == PageMin) {
213 *PageAttribute = (PAGE_ATTRIBUTE)Level;
214 return &PageTable[Index];
215 }
216
217 //
218 // Page directory or table
219 //
220 PageTable = (UINT64 *)(UINTN)(PageTable[Index] &
221 ~AddressEncMask &
222 PAGING_4K_ADDRESS_MASK_64);
223 }
224
225 *PageAttribute = PageNone;
226 return NULL;
227 }
228
229 /**
230 This function splits one page entry to smaller page entries.
231
232 @param[in] PageEntry The page entry to be splitted.
233 @param[in] PageAttribute The page attribute of the page entry.
234 @param[in] SplitAttribute How to split the page entry.
235 @param[in] Recursively Do the split recursively or not.
236
237 @retval RETURN_SUCCESS The page entry is splitted.
238 @retval RETURN_INVALID_PARAMETER If target page attribute is invalid
239 @retval RETURN_OUT_OF_RESOURCES No resource to split page entry.
240 **/
241 RETURN_STATUS
242 SplitPage (
243 IN UINT64 *PageEntry,
244 IN PAGE_ATTRIBUTE PageAttribute,
245 IN PAGE_ATTRIBUTE SplitAttribute,
246 IN BOOLEAN Recursively
247 )
248 {
249 UINT64 BaseAddress;
250 UINT64 *NewPageEntry;
251 UINTN Index;
252 UINT64 AddressEncMask;
253 PAGE_ATTRIBUTE SplitTo;
254
255 if (SplitAttribute == PageNone || SplitAttribute >= PageAttribute) {
256 ASSERT (SplitAttribute != PageNone);
257 ASSERT (SplitAttribute < PageAttribute);
258 return RETURN_INVALID_PARAMETER;
259 }
260
261 NewPageEntry = AllocatePageTableMemory (1);
262 if (NewPageEntry == NULL) {
263 ASSERT (NewPageEntry != NULL);
264 return RETURN_OUT_OF_RESOURCES;
265 }
266
267 //
268 // One level down each step to achieve more compact page table.
269 //
270 SplitTo = PageAttribute - 1;
271 AddressEncMask = PcdGet64 (PcdPteMemoryEncryptionAddressOrMask) &
272 mPageAttributeTable[SplitTo].AddressMask;
273 BaseAddress = *PageEntry &
274 ~PcdGet64 (PcdPteMemoryEncryptionAddressOrMask) &
275 mPageAttributeTable[PageAttribute].AddressMask;
276 for (Index = 0; Index < SIZE_4KB / sizeof(UINT64); Index++) {
277 NewPageEntry[Index] = BaseAddress | AddressEncMask |
278 ((*PageEntry) & PAGE_PROGATE_BITS);
279
280 if (SplitTo != PageMin) {
281 NewPageEntry[Index] |= IA32_PG_PS;
282 }
283
284 if (Recursively && SplitTo > SplitAttribute) {
285 SplitPage (&NewPageEntry[Index], SplitTo, SplitAttribute, Recursively);
286 }
287
288 BaseAddress += mPageAttributeTable[SplitTo].Length;
289 }
290
291 (*PageEntry) = (UINT64)(UINTN)NewPageEntry | AddressEncMask | PAGE_ATTRIBUTE_BITS;
292
293 return RETURN_SUCCESS;
294 }
295
296 /**
297 This function modifies the page attributes for the memory region specified
298 by BaseAddress and Length from their current attributes to the attributes
299 specified by Attributes.
300
301 Caller should make sure BaseAddress and Length is at page boundary.
302
303 @param[in] BaseAddress Start address of a memory region.
304 @param[in] Length Size in bytes of the memory region.
305 @param[in] Attributes Bit mask of attributes to modify.
306
307 @retval RETURN_SUCCESS The attributes were modified for the memory
308 region.
309 @retval RETURN_INVALID_PARAMETER Length is zero; or,
310 Attributes specified an illegal combination
311 of attributes that cannot be set together; or
312 Addressis not 4KB aligned.
313 @retval RETURN_OUT_OF_RESOURCES There are not enough system resources to modify
314 the attributes.
315 @retval RETURN_UNSUPPORTED Cannot modify the attributes of given memory.
316
317 **/
318 RETURN_STATUS
319 EFIAPI
320 ConvertMemoryPageAttributes (
321 IN PHYSICAL_ADDRESS BaseAddress,
322 IN UINT64 Length,
323 IN UINT64 Attributes
324 )
325 {
326 UINT64 *PageEntry;
327 PAGE_ATTRIBUTE PageAttribute;
328 RETURN_STATUS Status;
329 EFI_PHYSICAL_ADDRESS MaximumAddress;
330
331 if (Length == 0 ||
332 (BaseAddress & (SIZE_4KB - 1)) != 0 ||
333 (Length & (SIZE_4KB - 1)) != 0) {
334
335 ASSERT (Length > 0);
336 ASSERT ((BaseAddress & (SIZE_4KB - 1)) == 0);
337 ASSERT ((Length & (SIZE_4KB - 1)) == 0);
338
339 return RETURN_INVALID_PARAMETER;
340 }
341
342 MaximumAddress = (EFI_PHYSICAL_ADDRESS)MAX_UINT32;
343 if (BaseAddress > MaximumAddress ||
344 Length > MaximumAddress ||
345 (BaseAddress > MaximumAddress - (Length - 1))) {
346 return RETURN_UNSUPPORTED;
347 }
348
349 //
350 // Below logic is to check 2M/4K page to make sure we do not waste memory.
351 //
352 while (Length != 0) {
353 PageEntry = GetPageTableEntry (BaseAddress, &PageAttribute);
354 if (PageEntry == NULL) {
355 return RETURN_UNSUPPORTED;
356 }
357
358 if (PageAttribute != Page4K) {
359 Status = SplitPage (PageEntry, PageAttribute, Page4K, FALSE);
360 if (RETURN_ERROR (Status)) {
361 return Status;
362 }
363 //
364 // Do it again until the page is 4K.
365 //
366 continue;
367 }
368
369 //
370 // Just take care of 'present' bit for Stack Guard.
371 //
372 if ((Attributes & IA32_PG_P) != 0) {
373 *PageEntry |= (UINT64)IA32_PG_P;
374 } else {
375 *PageEntry &= ~((UINT64)IA32_PG_P);
376 }
377
378 //
379 // Convert success, move to next
380 //
381 BaseAddress += SIZE_4KB;
382 Length -= SIZE_4KB;
383 }
384
385 return RETURN_SUCCESS;
386 }
387
388 /**
389 Get maximum size of page memory supported by current processor.
390
391 @param[in] TopLevelType The type of top level page entry.
392
393 @retval Page1G If processor supports 1G page and PML4.
394 @retval Page2M For all other situations.
395
396 **/
397 PAGE_ATTRIBUTE
398 GetMaxMemoryPage (
399 IN PAGE_ATTRIBUTE TopLevelType
400 )
401 {
402 UINT32 RegEax;
403 UINT32 RegEdx;
404
405 if (TopLevelType == Page512G) {
406 AsmCpuid (CPUID_EXTENDED_FUNCTION, &RegEax, NULL, NULL, NULL);
407 if (RegEax >= CPUID_EXTENDED_CPU_SIG) {
408 AsmCpuid (CPUID_EXTENDED_CPU_SIG, NULL, NULL, NULL, &RegEdx);
409 if ((RegEdx & BIT26) != 0) {
410 return Page1G;
411 }
412 }
413 }
414
415 return Page2M;
416 }
417
418 /**
419 Create PML4 or PAE page table.
420
421 @return The address of page table.
422
423 **/
424 UINTN
425 CreatePageTable (
426 VOID
427 )
428 {
429 RETURN_STATUS Status;
430 UINTN PhysicalAddressBits;
431 UINTN NumberOfEntries;
432 PAGE_ATTRIBUTE TopLevelPageAttr;
433 UINTN PageTable;
434 PAGE_ATTRIBUTE MaxMemoryPage;
435 UINTN Index;
436 UINT64 AddressEncMask;
437 UINT64 *PageEntry;
438 EFI_PHYSICAL_ADDRESS PhysicalAddress;
439
440 TopLevelPageAttr = (PAGE_ATTRIBUTE)GetPageTableTopLevelType ();
441 PhysicalAddressBits = GetPhysicalAddressWidth ();
442 NumberOfEntries = (UINTN)1 << (PhysicalAddressBits -
443 mPageAttributeTable[TopLevelPageAttr].AddressBitOffset);
444
445 PageTable = (UINTN) AllocatePageTableMemory (1);
446 if (PageTable == 0) {
447 return 0;
448 }
449
450 AddressEncMask = PcdGet64 (PcdPteMemoryEncryptionAddressOrMask);
451 AddressEncMask &= mPageAttributeTable[TopLevelPageAttr].AddressMask;
452 MaxMemoryPage = GetMaxMemoryPage (TopLevelPageAttr);
453 PageEntry = (UINT64 *)PageTable;
454
455 PhysicalAddress = 0;
456 for (Index = 0; Index < NumberOfEntries; ++Index) {
457 *PageEntry = PhysicalAddress | AddressEncMask | PAGE_ATTRIBUTE_BITS;
458
459 //
460 // Split the top page table down to the maximum page size supported
461 //
462 if (MaxMemoryPage < TopLevelPageAttr) {
463 Status = SplitPage(PageEntry, TopLevelPageAttr, MaxMemoryPage, TRUE);
464 ASSERT_EFI_ERROR (Status);
465 }
466
467 if (TopLevelPageAttr == Page1G) {
468 //
469 // PDPTE[2:1] (PAE Paging) must be 0. SplitPage() might change them to 1.
470 //
471 *PageEntry &= ~(UINT64)(IA32_PG_RW | IA32_PG_U);
472 }
473
474 PageEntry += 1;
475 PhysicalAddress += mPageAttributeTable[TopLevelPageAttr].Length;
476 }
477
478
479 return PageTable;
480 }
481
482 /**
483 Setup page tables and make them work.
484
485 **/
486 VOID
487 EnablePaging (
488 VOID
489 )
490 {
491 UINTN PageTable;
492
493 PageTable = CreatePageTable ();
494 ASSERT (PageTable != 0);
495 if (PageTable != 0) {
496 AsmWriteCr3(PageTable);
497 AsmWriteCr4 (AsmReadCr4 () | BIT5); // CR4.PAE
498 AsmWriteCr0 (AsmReadCr0 () | BIT31); // CR0.PG
499 }
500 }
501
502 /**
503 Get the base address of current AP's stack.
504
505 This function is called in AP's context and assumes that whole calling stacks
506 (till this function) consumed by AP's wakeup procedure will not exceed 4KB.
507
508 PcdCpuApStackSize must be configured with value taking the Guard page into
509 account.
510
511 @param[in,out] Buffer The pointer to private data buffer.
512
513 **/
514 VOID
515 EFIAPI
516 GetStackBase (
517 IN OUT VOID *Buffer
518 )
519 {
520 EFI_PHYSICAL_ADDRESS StackBase;
521
522 StackBase = (EFI_PHYSICAL_ADDRESS)(UINTN)&StackBase;
523 StackBase += BASE_4KB;
524 StackBase &= ~((EFI_PHYSICAL_ADDRESS)BASE_4KB - 1);
525 StackBase -= PcdGet32(PcdCpuApStackSize);
526
527 *(EFI_PHYSICAL_ADDRESS *)Buffer = StackBase;
528 }
529
530 /**
531 Setup stack Guard page at the stack base of each processor. BSP and APs have
532 different way to get stack base address.
533
534 **/
535 VOID
536 SetupStackGuardPage (
537 VOID
538 )
539 {
540 EFI_PEI_HOB_POINTERS Hob;
541 EFI_PHYSICAL_ADDRESS StackBase;
542 UINTN NumberOfProcessors;
543 UINTN Bsp;
544 UINTN Index;
545
546 //
547 // One extra page at the bottom of the stack is needed for Guard page.
548 //
549 if (PcdGet32(PcdCpuApStackSize) <= EFI_PAGE_SIZE) {
550 DEBUG ((DEBUG_ERROR, "PcdCpuApStackSize is not big enough for Stack Guard!\n"));
551 ASSERT (FALSE);
552 }
553
554 MpInitLibGetNumberOfProcessors(&NumberOfProcessors, NULL);
555 MpInitLibWhoAmI (&Bsp);
556 for (Index = 0; Index < NumberOfProcessors; ++Index) {
557 StackBase = 0;
558
559 if (Index == Bsp) {
560 Hob.Raw = GetHobList ();
561 while ((Hob.Raw = GetNextHob (EFI_HOB_TYPE_MEMORY_ALLOCATION, Hob.Raw)) != NULL) {
562 if (CompareGuid (&gEfiHobMemoryAllocStackGuid,
563 &(Hob.MemoryAllocationStack->AllocDescriptor.Name))) {
564 StackBase = Hob.MemoryAllocationStack->AllocDescriptor.MemoryBaseAddress;
565 break;
566 }
567 Hob.Raw = GET_NEXT_HOB (Hob);
568 }
569 } else {
570 //
571 // Ask AP to return is stack base address.
572 //
573 MpInitLibStartupThisAP(GetStackBase, Index, NULL, 0, (VOID *)&StackBase, NULL);
574 }
575 ASSERT (StackBase != 0);
576 //
577 // Set Guard page at stack base address.
578 //
579 ConvertMemoryPageAttributes(StackBase, EFI_PAGE_SIZE, 0);
580 DEBUG ((DEBUG_INFO, "Stack Guard set at %lx [cpu%lu]!\n",
581 (UINT64)StackBase, (UINT64)Index));
582 }
583
584 //
585 // Publish the changes of page table.
586 //
587 CpuFlushTlb ();
588 }
589
590 /**
591 Enabl/setup stack guard for each processor if PcdCpuStackGuard is set to TRUE.
592
593 Doing this in the memory-discovered callback is to make sure the Stack Guard
594 feature to cover as most PEI code as possible.
595
596 @param[in] PeiServices General purpose services available to every PEIM.
597 @param[in] NotifyDescriptor The notification structure this PEIM registered on install.
598 @param[in] Ppi The memory discovered PPI. Not used.
599
600 @retval EFI_SUCCESS The function completed successfully.
601 @retval others There's error in MP initialization.
602 **/
603 EFI_STATUS
604 EFIAPI
605 MemoryDiscoveredPpiNotifyCallback (
606 IN EFI_PEI_SERVICES **PeiServices,
607 IN EFI_PEI_NOTIFY_DESCRIPTOR *NotifyDescriptor,
608 IN VOID *Ppi
609 )
610 {
611 EFI_STATUS Status;
612 BOOLEAN InitStackGuard;
613
614 //
615 // Paging must be setup first. Otherwise the exception TSS setup during MP
616 // initialization later will not contain paging information and then fail
617 // the task switch (for the sake of stack switch).
618 //
619 InitStackGuard = FALSE;
620 if (IsIa32PaeSupported () && PcdGetBool (PcdCpuStackGuard)) {
621 EnablePaging ();
622 InitStackGuard = TRUE;
623 }
624
625 Status = InitializeCpuMpWorker ((CONST EFI_PEI_SERVICES **)PeiServices);
626 ASSERT_EFI_ERROR (Status);
627
628 if (InitStackGuard) {
629 SetupStackGuardPage ();
630 }
631
632 return Status;
633 }
634