]> git.proxmox.com Git - mirror_edk2.git/blame - UefiCpuPkg/PiSmmCpuDxeSmm/SmmProfile.c
UefiCpuPkg: Change use of EFI_D_* to DEBUG_*
[mirror_edk2.git] / UefiCpuPkg / PiSmmCpuDxeSmm / SmmProfile.c
CommitLineData
529a5a86
MK
1/** @file\r
2Enable SMM profile.\r
3\r
3eb69b08 4Copyright (c) 2012 - 2019, Intel Corporation. All rights reserved.<BR>\r
bdafda8c 5Copyright (c) 2017 - 2020, AMD Incorporated. All rights reserved.<BR>\r
241f9149 6\r
0acd8697 7SPDX-License-Identifier: BSD-2-Clause-Patent\r
529a5a86
MK
8\r
9**/\r
10\r
11#include "PiSmmCpuDxeSmm.h"\r
12#include "SmmProfileInternal.h"\r
13\r
14UINT32 mSmmProfileCr3;\r
15\r
16SMM_PROFILE_HEADER *mSmmProfileBase;\r
17MSR_DS_AREA_STRUCT *mMsrDsAreaBase;\r
18//\r
19// The buffer to store SMM profile data.\r
20//\r
21UINTN mSmmProfileSize;\r
22\r
23//\r
24// The buffer to enable branch trace store.\r
25//\r
26UINTN mMsrDsAreaSize = SMM_PROFILE_DTS_SIZE;\r
27\r
3c5ce64f
LE
28//\r
29// The flag indicates if execute-disable is supported by processor.\r
30//\r
31BOOLEAN mXdSupported = TRUE;\r
32\r
529a5a86
MK
33//\r
34// The flag indicates if execute-disable is enabled on processor.\r
35//\r
36BOOLEAN mXdEnabled = FALSE;\r
37\r
38//\r
39// The flag indicates if BTS is supported by processor.\r
40//\r
a46a4c90 41BOOLEAN mBtsSupported = TRUE;\r
529a5a86
MK
42\r
43//\r
44// The flag indicates if SMM profile starts to record data.\r
45//\r
46BOOLEAN mSmmProfileStart = FALSE;\r
47\r
09afd9a4
JW
48//\r
49// The flag indicates if #DB will be setup in #PF handler.\r
50//\r
51BOOLEAN mSetupDebugTrap = FALSE;\r
52\r
529a5a86
MK
53//\r
54// Record the page fault exception count for one instruction execution.\r
55//\r
56UINTN *mPFEntryCount;\r
57\r
58UINT64 (*mLastPFEntryValue)[MAX_PF_ENTRY_COUNT];\r
59UINT64 *(*mLastPFEntryPointer)[MAX_PF_ENTRY_COUNT];\r
60\r
61MSR_DS_AREA_STRUCT **mMsrDsArea;\r
62BRANCH_TRACE_RECORD **mMsrBTSRecord;\r
63UINTN mBTSRecordNumber;\r
64PEBS_RECORD **mMsrPEBSRecord;\r
65\r
66//\r
67// These memory ranges are always present, they does not generate the access type of page fault exception,\r
68// but they possibly generate instruction fetch type of page fault exception.\r
69//\r
70MEMORY_PROTECTION_RANGE *mProtectionMemRange = NULL;\r
71UINTN mProtectionMemRangeCount = 0;\r
72\r
73//\r
74// Some predefined memory ranges.\r
75//\r
76MEMORY_PROTECTION_RANGE mProtectionMemRangeTemplate[] = {\r
77 //\r
78 // SMRAM range (to be fixed in runtime).\r
79 // It is always present and instruction fetches are allowed.\r
80 //\r
81 {{0x00000000, 0x00000000},TRUE,FALSE},\r
82\r
83 //\r
84 // SMM profile data range( to be fixed in runtime).\r
85 // It is always present and instruction fetches are not allowed.\r
86 //\r
87 {{0x00000000, 0x00000000},TRUE,TRUE},\r
88\r
ee584389
JF
89 //\r
90 // SMRAM ranges not covered by mCpuHotPlugData.SmrrBase/mCpuHotPlugData.SmrrSiz (to be fixed in runtime).\r
91 // It is always present and instruction fetches are allowed.\r
92 // {{0x00000000, 0x00000000},TRUE,FALSE},\r
93 //\r
94\r
529a5a86
MK
95 //\r
96 // Future extended range could be added here.\r
97 //\r
98\r
99 //\r
100 // PCI MMIO ranges (to be added in runtime).\r
101 // They are always present and instruction fetches are not allowed.\r
102 //\r
103};\r
104\r
105//\r
106// These memory ranges are mapped by 4KB-page instead of 2MB-page.\r
107//\r
108MEMORY_RANGE *mSplitMemRange = NULL;\r
109UINTN mSplitMemRangeCount = 0;\r
110\r
111//\r
112// SMI command port.\r
113//\r
114UINT32 mSmiCommandPort;\r
115\r
116/**\r
117 Disable branch trace store.\r
118\r
119**/\r
120VOID\r
121DisableBTS (\r
122 VOID\r
123 )\r
124{\r
125 AsmMsrAnd64 (MSR_DEBUG_CTL, ~((UINT64)(MSR_DEBUG_CTL_BTS | MSR_DEBUG_CTL_TR)));\r
126}\r
127\r
128/**\r
129 Enable branch trace store.\r
130\r
131**/\r
132VOID\r
133EnableBTS (\r
134 VOID\r
135 )\r
136{\r
137 AsmMsrOr64 (MSR_DEBUG_CTL, (MSR_DEBUG_CTL_BTS | MSR_DEBUG_CTL_TR));\r
138}\r
139\r
140/**\r
141 Get CPU Index from APIC ID.\r
142\r
143**/\r
144UINTN\r
145GetCpuIndex (\r
146 VOID\r
147 )\r
148{\r
149 UINTN Index;\r
150 UINT32 ApicId;\r
151\r
152 ApicId = GetApicId ();\r
153\r
bb767506 154 for (Index = 0; Index < mMaxNumberOfCpus; Index++) {\r
529a5a86
MK
155 if (gSmmCpuPrivate->ProcessorInfo[Index].ProcessorId == ApicId) {\r
156 return Index;\r
157 }\r
158 }\r
159 ASSERT (FALSE);\r
160 return 0;\r
161}\r
162\r
163/**\r
164 Get the source of IP after execute-disable exception is triggered.\r
165\r
166 @param CpuIndex The index of CPU.\r
167 @param DestinationIP The destination address.\r
168\r
169**/\r
170UINT64\r
171GetSourceFromDestinationOnBts (\r
172 UINTN CpuIndex,\r
173 UINT64 DestinationIP\r
174 )\r
175{\r
176 BRANCH_TRACE_RECORD *CurrentBTSRecord;\r
177 UINTN Index;\r
178 BOOLEAN FirstMatch;\r
179\r
180 FirstMatch = FALSE;\r
181\r
182 CurrentBTSRecord = (BRANCH_TRACE_RECORD *)mMsrDsArea[CpuIndex]->BTSIndex;\r
183 for (Index = 0; Index < mBTSRecordNumber; Index++) {\r
184 if ((UINTN)CurrentBTSRecord < (UINTN)mMsrBTSRecord[CpuIndex]) {\r
185 //\r
186 // Underflow\r
187 //\r
188 CurrentBTSRecord = (BRANCH_TRACE_RECORD *)((UINTN)mMsrDsArea[CpuIndex]->BTSAbsoluteMaximum - 1);\r
189 CurrentBTSRecord --;\r
190 }\r
191 if (CurrentBTSRecord->LastBranchTo == DestinationIP) {\r
192 //\r
193 // Good! find 1st one, then find 2nd one.\r
194 //\r
195 if (!FirstMatch) {\r
196 //\r
197 // The first one is DEBUG exception\r
198 //\r
199 FirstMatch = TRUE;\r
200 } else {\r
201 //\r
202 // Good find proper one.\r
203 //\r
204 return CurrentBTSRecord->LastBranchFrom;\r
205 }\r
206 }\r
207 CurrentBTSRecord--;\r
208 }\r
209\r
210 return 0;\r
211}\r
212\r
213/**\r
214 SMM profile specific INT 1 (single-step) exception handler.\r
215\r
216 @param InterruptType Defines the type of interrupt or exception that\r
217 occurred on the processor.This parameter is processor architecture specific.\r
218 @param SystemContext A pointer to the processor context when\r
219 the interrupt occurred on the processor.\r
220**/\r
221VOID\r
222EFIAPI\r
223DebugExceptionHandler (\r
224 IN EFI_EXCEPTION_TYPE InterruptType,\r
225 IN EFI_SYSTEM_CONTEXT SystemContext\r
226 )\r
227{\r
228 UINTN CpuIndex;\r
229 UINTN PFEntry;\r
230\r
09afd9a4
JW
231 if (!mSmmProfileStart &&\r
232 !HEAP_GUARD_NONSTOP_MODE &&\r
233 !NULL_DETECTION_NONSTOP_MODE) {\r
529a5a86
MK
234 return;\r
235 }\r
236 CpuIndex = GetCpuIndex ();\r
237\r
238 //\r
239 // Clear last PF entries\r
240 //\r
241 for (PFEntry = 0; PFEntry < mPFEntryCount[CpuIndex]; PFEntry++) {\r
242 *mLastPFEntryPointer[CpuIndex][PFEntry] = mLastPFEntryValue[CpuIndex][PFEntry];\r
243 }\r
244\r
245 //\r
246 // Reset page fault exception count for next page fault.\r
247 //\r
248 mPFEntryCount[CpuIndex] = 0;\r
249\r
250 //\r
251 // Flush TLB\r
252 //\r
253 CpuFlushTlb ();\r
254\r
255 //\r
256 // Clear TF in EFLAGS\r
257 //\r
258 ClearTrapFlag (SystemContext);\r
259}\r
260\r
97f1061e
JF
261/**\r
262 Check if the input address is in SMM ranges.\r
263\r
264 @param[in] Address The input address.\r
265\r
266 @retval TRUE The input address is in SMM.\r
267 @retval FALSE The input address is not in SMM.\r
268**/\r
269BOOLEAN\r
270IsInSmmRanges (\r
271 IN EFI_PHYSICAL_ADDRESS Address\r
272 )\r
273{\r
274 UINTN Index;\r
275\r
29dc8aa8 276 if ((Address >= mCpuHotPlugData.SmrrBase) && (Address < mCpuHotPlugData.SmrrBase + mCpuHotPlugData.SmrrSize)) {\r
97f1061e
JF
277 return TRUE;\r
278 }\r
279 for (Index = 0; Index < mSmmCpuSmramRangeCount; Index++) {\r
280 if (Address >= mSmmCpuSmramRanges[Index].CpuStart &&\r
281 Address < mSmmCpuSmramRanges[Index].CpuStart + mSmmCpuSmramRanges[Index].PhysicalSize) {\r
282 return TRUE;\r
283 }\r
284 }\r
285 return FALSE;\r
286}\r
287\r
529a5a86
MK
288/**\r
289 Check if the memory address will be mapped by 4KB-page.\r
290\r
291 @param Address The address of Memory.\r
292 @param Nx The flag indicates if the memory is execute-disable.\r
293\r
294**/\r
295BOOLEAN\r
296IsAddressValid (\r
297 IN EFI_PHYSICAL_ADDRESS Address,\r
298 IN BOOLEAN *Nx\r
299 )\r
300{\r
301 UINTN Index;\r
302\r
529a5a86
MK
303 if (FeaturePcdGet (PcdCpuSmmProfileEnable)) {\r
304 //\r
305 // Check configuration\r
306 //\r
307 for (Index = 0; Index < mProtectionMemRangeCount; Index++) {\r
308 if ((Address >= mProtectionMemRange[Index].Range.Base) && (Address < mProtectionMemRange[Index].Range.Top)) {\r
309 *Nx = mProtectionMemRange[Index].Nx;\r
310 return mProtectionMemRange[Index].Present;\r
311 }\r
312 }\r
313 *Nx = TRUE;\r
314 return FALSE;\r
315\r
316 } else {\r
97f1061e
JF
317 *Nx = TRUE;\r
318 if (IsInSmmRanges (Address)) {\r
319 *Nx = FALSE;\r
529a5a86
MK
320 }\r
321 return TRUE;\r
322 }\r
323}\r
324\r
325/**\r
326 Check if the memory address will be mapped by 4KB-page.\r
327\r
328 @param Address The address of Memory.\r
329\r
330**/\r
331BOOLEAN\r
332IsAddressSplit (\r
333 IN EFI_PHYSICAL_ADDRESS Address\r
334 )\r
335{\r
336 UINTN Index;\r
337\r
338 if (FeaturePcdGet (PcdCpuSmmProfileEnable)) {\r
339 //\r
340 // Check configuration\r
341 //\r
342 for (Index = 0; Index < mSplitMemRangeCount; Index++) {\r
343 if ((Address >= mSplitMemRange[Index].Base) && (Address < mSplitMemRange[Index].Top)) {\r
344 return TRUE;\r
345 }\r
346 }\r
347 } else {\r
348 if (Address < mCpuHotPlugData.SmrrBase) {\r
349 if ((mCpuHotPlugData.SmrrBase - Address) < BASE_2MB) {\r
350 return TRUE;\r
351 }\r
352 } else if (Address > (mCpuHotPlugData.SmrrBase + mCpuHotPlugData.SmrrSize - BASE_2MB)) {\r
353 if ((Address - (mCpuHotPlugData.SmrrBase + mCpuHotPlugData.SmrrSize - BASE_2MB)) < BASE_2MB) {\r
354 return TRUE;\r
355 }\r
356 }\r
357 }\r
358 //\r
359 // Return default\r
360 //\r
361 return FALSE;\r
362}\r
363\r
364/**\r
365 Initialize the protected memory ranges and the 4KB-page mapped memory ranges.\r
366\r
367**/\r
368VOID\r
369InitProtectedMemRange (\r
370 VOID\r
371 )\r
372{\r
373 UINTN Index;\r
374 UINTN NumberOfDescriptors;\r
ee584389 375 UINTN NumberOfAddedDescriptors;\r
529a5a86
MK
376 UINTN NumberOfProtectRange;\r
377 UINTN NumberOfSpliteRange;\r
378 EFI_GCD_MEMORY_SPACE_DESCRIPTOR *MemorySpaceMap;\r
379 UINTN TotalSize;\r
529a5a86
MK
380 EFI_PHYSICAL_ADDRESS ProtectBaseAddress;\r
381 EFI_PHYSICAL_ADDRESS ProtectEndAddress;\r
382 EFI_PHYSICAL_ADDRESS Top2MBAlignedAddress;\r
383 EFI_PHYSICAL_ADDRESS Base2MBAlignedAddress;\r
384 UINT64 High4KBPageSize;\r
385 UINT64 Low4KBPageSize;\r
386\r
387 NumberOfDescriptors = 0;\r
ee584389 388 NumberOfAddedDescriptors = mSmmCpuSmramRangeCount;\r
529a5a86
MK
389 NumberOfSpliteRange = 0;\r
390 MemorySpaceMap = NULL;\r
391\r
392 //\r
393 // Get MMIO ranges from GCD and add them into protected memory ranges.\r
394 //\r
dc0a7143
LE
395 gDS->GetMemorySpaceMap (\r
396 &NumberOfDescriptors,\r
397 &MemorySpaceMap\r
398 );\r
529a5a86
MK
399 for (Index = 0; Index < NumberOfDescriptors; Index++) {\r
400 if (MemorySpaceMap[Index].GcdMemoryType == EfiGcdMemoryTypeMemoryMappedIo) {\r
ee584389 401 NumberOfAddedDescriptors++;\r
529a5a86
MK
402 }\r
403 }\r
404\r
ee584389
JF
405 if (NumberOfAddedDescriptors != 0) {\r
406 TotalSize = NumberOfAddedDescriptors * sizeof (MEMORY_PROTECTION_RANGE) + sizeof (mProtectionMemRangeTemplate);\r
529a5a86
MK
407 mProtectionMemRange = (MEMORY_PROTECTION_RANGE *) AllocateZeroPool (TotalSize);\r
408 ASSERT (mProtectionMemRange != NULL);\r
409 mProtectionMemRangeCount = TotalSize / sizeof (MEMORY_PROTECTION_RANGE);\r
410\r
411 //\r
412 // Copy existing ranges.\r
413 //\r
414 CopyMem (mProtectionMemRange, mProtectionMemRangeTemplate, sizeof (mProtectionMemRangeTemplate));\r
415\r
416 //\r
417 // Create split ranges which come from protected ranges.\r
418 //\r
419 TotalSize = (TotalSize / sizeof (MEMORY_PROTECTION_RANGE)) * sizeof (MEMORY_RANGE);\r
420 mSplitMemRange = (MEMORY_RANGE *) AllocateZeroPool (TotalSize);\r
421 ASSERT (mSplitMemRange != NULL);\r
422\r
ee584389
JF
423 //\r
424 // Create SMM ranges which are set to present and execution-enable.\r
425 //\r
426 NumberOfProtectRange = sizeof (mProtectionMemRangeTemplate) / sizeof (MEMORY_PROTECTION_RANGE);\r
427 for (Index = 0; Index < mSmmCpuSmramRangeCount; Index++) {\r
428 if (mSmmCpuSmramRanges[Index].CpuStart >= mProtectionMemRange[0].Range.Base &&\r
429 mSmmCpuSmramRanges[Index].CpuStart + mSmmCpuSmramRanges[Index].PhysicalSize < mProtectionMemRange[0].Range.Top) {\r
430 //\r
431 // If the address have been already covered by mCpuHotPlugData.SmrrBase/mCpuHotPlugData.SmrrSiz\r
432 //\r
433 break;\r
434 }\r
435 mProtectionMemRange[NumberOfProtectRange].Range.Base = mSmmCpuSmramRanges[Index].CpuStart;\r
436 mProtectionMemRange[NumberOfProtectRange].Range.Top = mSmmCpuSmramRanges[Index].CpuStart + mSmmCpuSmramRanges[Index].PhysicalSize;\r
437 mProtectionMemRange[NumberOfProtectRange].Present = TRUE;\r
438 mProtectionMemRange[NumberOfProtectRange].Nx = FALSE;\r
439 NumberOfProtectRange++;\r
440 }\r
441\r
529a5a86
MK
442 //\r
443 // Create MMIO ranges which are set to present and execution-disable.\r
444 //\r
529a5a86
MK
445 for (Index = 0; Index < NumberOfDescriptors; Index++) {\r
446 if (MemorySpaceMap[Index].GcdMemoryType != EfiGcdMemoryTypeMemoryMappedIo) {\r
447 continue;\r
448 }\r
449 mProtectionMemRange[NumberOfProtectRange].Range.Base = MemorySpaceMap[Index].BaseAddress;\r
450 mProtectionMemRange[NumberOfProtectRange].Range.Top = MemorySpaceMap[Index].BaseAddress + MemorySpaceMap[Index].Length;\r
451 mProtectionMemRange[NumberOfProtectRange].Present = TRUE;\r
452 mProtectionMemRange[NumberOfProtectRange].Nx = TRUE;\r
453 NumberOfProtectRange++;\r
454 }\r
ee584389
JF
455\r
456 //\r
457 // Check and updated actual protected memory ranges count\r
458 //\r
459 ASSERT (NumberOfProtectRange <= mProtectionMemRangeCount);\r
460 mProtectionMemRangeCount = NumberOfProtectRange;\r
529a5a86
MK
461 }\r
462\r
463 //\r
464 // According to protected ranges, create the ranges which will be mapped by 2KB page.\r
465 //\r
466 NumberOfSpliteRange = 0;\r
467 NumberOfProtectRange = mProtectionMemRangeCount;\r
468 for (Index = 0; Index < NumberOfProtectRange; Index++) {\r
469 //\r
470 // If MMIO base address is not 2MB alignment, make 2MB alignment for create 4KB page in page table.\r
471 //\r
472 ProtectBaseAddress = mProtectionMemRange[Index].Range.Base;\r
473 ProtectEndAddress = mProtectionMemRange[Index].Range.Top;\r
474 if (((ProtectBaseAddress & (SIZE_2MB - 1)) != 0) || ((ProtectEndAddress & (SIZE_2MB - 1)) != 0)) {\r
475 //\r
476 // Check if it is possible to create 4KB-page for not 2MB-aligned range and to create 2MB-page for 2MB-aligned range.\r
477 // A mix of 4KB and 2MB page could save SMRAM space.\r
478 //\r
479 Top2MBAlignedAddress = ProtectEndAddress & ~(SIZE_2MB - 1);\r
480 Base2MBAlignedAddress = (ProtectBaseAddress + SIZE_2MB - 1) & ~(SIZE_2MB - 1);\r
481 if ((Top2MBAlignedAddress > Base2MBAlignedAddress) &&\r
482 ((Top2MBAlignedAddress - Base2MBAlignedAddress) >= SIZE_2MB)) {\r
483 //\r
484 // There is an range which could be mapped by 2MB-page.\r
485 //\r
486 High4KBPageSize = ((ProtectEndAddress + SIZE_2MB - 1) & ~(SIZE_2MB - 1)) - (ProtectEndAddress & ~(SIZE_2MB - 1));\r
487 Low4KBPageSize = ((ProtectBaseAddress + SIZE_2MB - 1) & ~(SIZE_2MB - 1)) - (ProtectBaseAddress & ~(SIZE_2MB - 1));\r
488 if (High4KBPageSize != 0) {\r
489 //\r
490 // Add not 2MB-aligned range to be mapped by 4KB-page.\r
491 //\r
492 mSplitMemRange[NumberOfSpliteRange].Base = ProtectEndAddress & ~(SIZE_2MB - 1);\r
493 mSplitMemRange[NumberOfSpliteRange].Top = (ProtectEndAddress + SIZE_2MB - 1) & ~(SIZE_2MB - 1);\r
494 NumberOfSpliteRange++;\r
495 }\r
496 if (Low4KBPageSize != 0) {\r
497 //\r
498 // Add not 2MB-aligned range to be mapped by 4KB-page.\r
499 //\r
500 mSplitMemRange[NumberOfSpliteRange].Base = ProtectBaseAddress & ~(SIZE_2MB - 1);\r
501 mSplitMemRange[NumberOfSpliteRange].Top = (ProtectBaseAddress + SIZE_2MB - 1) & ~(SIZE_2MB - 1);\r
502 NumberOfSpliteRange++;\r
503 }\r
504 } else {\r
505 //\r
506 // The range could only be mapped by 4KB-page.\r
507 //\r
508 mSplitMemRange[NumberOfSpliteRange].Base = ProtectBaseAddress & ~(SIZE_2MB - 1);\r
509 mSplitMemRange[NumberOfSpliteRange].Top = (ProtectEndAddress + SIZE_2MB - 1) & ~(SIZE_2MB - 1);\r
510 NumberOfSpliteRange++;\r
511 }\r
512 }\r
513 }\r
514\r
515 mSplitMemRangeCount = NumberOfSpliteRange;\r
516\r
96e1cba5 517 DEBUG ((DEBUG_INFO, "SMM Profile Memory Ranges:\n"));\r
529a5a86 518 for (Index = 0; Index < mProtectionMemRangeCount; Index++) {\r
96e1cba5
MK
519 DEBUG ((DEBUG_INFO, "mProtectionMemRange[%d].Base = %lx\n", Index, mProtectionMemRange[Index].Range.Base));\r
520 DEBUG ((DEBUG_INFO, "mProtectionMemRange[%d].Top = %lx\n", Index, mProtectionMemRange[Index].Range.Top));\r
529a5a86
MK
521 }\r
522 for (Index = 0; Index < mSplitMemRangeCount; Index++) {\r
96e1cba5
MK
523 DEBUG ((DEBUG_INFO, "mSplitMemRange[%d].Base = %lx\n", Index, mSplitMemRange[Index].Base));\r
524 DEBUG ((DEBUG_INFO, "mSplitMemRange[%d].Top = %lx\n", Index, mSplitMemRange[Index].Top));\r
529a5a86
MK
525 }\r
526}\r
527\r
528/**\r
529 Update page table according to protected memory ranges and the 4KB-page mapped memory ranges.\r
530\r
531**/\r
532VOID\r
533InitPaging (\r
534 VOID\r
535 )\r
536{\r
4eee0cc7
RN
537 UINT64 Pml5Entry;\r
538 UINT64 Pml4Entry;\r
539 UINT64 *Pml5;\r
529a5a86 540 UINT64 *Pml4;\r
7e56f892
RN
541 UINT64 *Pdpt;\r
542 UINT64 *Pd;\r
529a5a86
MK
543 UINT64 *Pt;\r
544 UINTN Address;\r
4eee0cc7 545 UINTN Pml5Index;\r
7e56f892
RN
546 UINTN Pml4Index;\r
547 UINTN PdptIndex;\r
548 UINTN PdIndex;\r
549 UINTN PtIndex;\r
550 UINTN NumberOfPdptEntries;\r
529a5a86 551 UINTN NumberOfPml4Entries;\r
4eee0cc7 552 UINTN NumberOfPml5Entries;\r
529a5a86
MK
553 UINTN SizeOfMemorySpace;\r
554 BOOLEAN Nx;\r
4eee0cc7
RN
555 IA32_CR4 Cr4;\r
556 BOOLEAN Enable5LevelPaging;\r
557\r
558 Cr4.UintN = AsmReadCr4 ();\r
559 Enable5LevelPaging = (BOOLEAN) (Cr4.Bits.LA57 == 1);\r
529a5a86
MK
560\r
561 if (sizeof (UINTN) == sizeof (UINT64)) {\r
4eee0cc7
RN
562 if (!Enable5LevelPaging) {\r
563 Pml5Entry = (UINTN) mSmmProfileCr3 | IA32_PG_P;\r
564 Pml5 = &Pml5Entry;\r
565 } else {\r
566 Pml5 = (UINT64*) (UINTN) mSmmProfileCr3;\r
567 }\r
529a5a86
MK
568 SizeOfMemorySpace = HighBitSet64 (gPhyMask) + 1;\r
569 //\r
570 // Calculate the table entries of PML4E and PDPTE.\r
571 //\r
4eee0cc7
RN
572 NumberOfPml5Entries = 1;\r
573 if (SizeOfMemorySpace > 48) {\r
574 NumberOfPml5Entries = (UINTN) LShiftU64 (1, SizeOfMemorySpace - 48);\r
575 SizeOfMemorySpace = 48;\r
7365eb2c 576 }\r
4eee0cc7 577\r
4e78c7be 578 NumberOfPml4Entries = 1;\r
4eee0cc7
RN
579 if (SizeOfMemorySpace > 39) {\r
580 NumberOfPml4Entries = (UINTN) LShiftU64 (1, SizeOfMemorySpace - 39);\r
581 SizeOfMemorySpace = 39;\r
582 }\r
583\r
584 NumberOfPdptEntries = 1;\r
585 ASSERT (SizeOfMemorySpace > 30);\r
586 NumberOfPdptEntries = (UINTN) LShiftU64 (1, SizeOfMemorySpace - 30);\r
587 } else {\r
588 Pml4Entry = (UINTN) mSmmProfileCr3 | IA32_PG_P;\r
589 Pml4 = &Pml4Entry;\r
590 Pml5Entry = (UINTN) Pml4 | IA32_PG_P;\r
591 Pml5 = &Pml5Entry;\r
592 NumberOfPml5Entries = 1;\r
593 NumberOfPml4Entries = 1;\r
7e56f892 594 NumberOfPdptEntries = 4;\r
529a5a86
MK
595 }\r
596\r
597 //\r
598 // Go through page table and change 2MB-page into 4KB-page.\r
599 //\r
4eee0cc7
RN
600 for (Pml5Index = 0; Pml5Index < NumberOfPml5Entries; Pml5Index++) {\r
601 if ((Pml5[Pml5Index] & IA32_PG_P) == 0) {\r
602 //\r
603 // If PML5 entry does not exist, skip it\r
604 //\r
605 continue;\r
606 }\r
607 Pml4 = (UINT64 *) (UINTN) (Pml5[Pml5Index] & PHYSICAL_ADDRESS_MASK);\r
608 for (Pml4Index = 0; Pml4Index < NumberOfPml4Entries; Pml4Index++) {\r
7e56f892 609 if ((Pml4[Pml4Index] & IA32_PG_P) == 0) {\r
529a5a86 610 //\r
7e56f892 611 // If PML4 entry does not exist, skip it\r
529a5a86
MK
612 //\r
613 continue;\r
614 }\r
7e56f892 615 Pdpt = (UINT64 *)(UINTN)(Pml4[Pml4Index] & ~mAddressEncMask & PHYSICAL_ADDRESS_MASK);\r
4eee0cc7
RN
616 for (PdptIndex = 0; PdptIndex < NumberOfPdptEntries; PdptIndex++, Pdpt++) {\r
617 if ((*Pdpt & IA32_PG_P) == 0) {\r
618 //\r
619 // If PDPT entry does not exist, skip it\r
620 //\r
621 continue;\r
622 }\r
623 if ((*Pdpt & IA32_PG_PS) != 0) {\r
529a5a86 624 //\r
4eee0cc7 625 // This is 1G entry, skip it\r
529a5a86
MK
626 //\r
627 continue;\r
628 }\r
4eee0cc7
RN
629 Pd = (UINT64 *)(UINTN)(*Pdpt & ~mAddressEncMask & PHYSICAL_ADDRESS_MASK);\r
630 if (Pd == 0) {\r
631 continue;\r
632 }\r
633 for (PdIndex = 0; PdIndex < SIZE_4KB / sizeof (*Pd); PdIndex++, Pd++) {\r
634 if ((*Pd & IA32_PG_P) == 0) {\r
635 //\r
636 // If PD entry does not exist, skip it\r
637 //\r
638 continue;\r
639 }\r
640 Address = (UINTN) LShiftU64 (\r
641 LShiftU64 (\r
642 LShiftU64 ((Pml5Index << 9) + Pml4Index, 9) + PdptIndex,\r
643 9\r
644 ) + PdIndex,\r
645 21\r
646 );\r
529a5a86 647\r
529a5a86 648 //\r
4eee0cc7 649 // If it is 2M page, check IsAddressSplit()\r
529a5a86 650 //\r
4eee0cc7
RN
651 if (((*Pd & IA32_PG_PS) != 0) && IsAddressSplit (Address)) {\r
652 //\r
653 // Based on current page table, create 4KB page table for split area.\r
654 //\r
655 ASSERT (Address == (*Pd & PHYSICAL_ADDRESS_MASK));\r
656\r
657 Pt = AllocatePageTableMemory (1);\r
658 ASSERT (Pt != NULL);\r
529a5a86 659\r
4eee0cc7 660 // Split it\r
a5235562
LE
661 for (PtIndex = 0; PtIndex < SIZE_4KB / sizeof(*Pt); PtIndex++) {\r
662 Pt[PtIndex] = Address + ((PtIndex << 12) | mAddressEncMask | PAGE_ATTRIBUTE_BITS);\r
4eee0cc7
RN
663 } // end for PT\r
664 *Pd = (UINT64)(UINTN)Pt | mAddressEncMask | PAGE_ATTRIBUTE_BITS;\r
665 } // end if IsAddressSplit\r
666 } // end for PD\r
667 } // end for PDPT\r
668 } // end for PML4\r
669 } // end for PML5\r
529a5a86
MK
670\r
671 //\r
672 // Go through page table and set several page table entries to absent or execute-disable.\r
673 //\r
96e1cba5 674 DEBUG ((DEBUG_INFO, "Patch page table start ...\n"));\r
4eee0cc7
RN
675 for (Pml5Index = 0; Pml5Index < NumberOfPml5Entries; Pml5Index++) {\r
676 if ((Pml5[Pml5Index] & IA32_PG_P) == 0) {\r
677 //\r
678 // If PML5 entry does not exist, skip it\r
679 //\r
680 continue;\r
681 }\r
682 Pml4 = (UINT64 *) (UINTN) (Pml5[Pml5Index] & PHYSICAL_ADDRESS_MASK);\r
683 for (Pml4Index = 0; Pml4Index < NumberOfPml4Entries; Pml4Index++) {\r
7e56f892 684 if ((Pml4[Pml4Index] & IA32_PG_P) == 0) {\r
529a5a86 685 //\r
7e56f892 686 // If PML4 entry does not exist, skip it\r
529a5a86
MK
687 //\r
688 continue;\r
689 }\r
7e56f892 690 Pdpt = (UINT64 *)(UINTN)(Pml4[Pml4Index] & ~mAddressEncMask & PHYSICAL_ADDRESS_MASK);\r
4eee0cc7
RN
691 for (PdptIndex = 0; PdptIndex < NumberOfPdptEntries; PdptIndex++, Pdpt++) {\r
692 if ((*Pdpt & IA32_PG_P) == 0) {\r
693 //\r
694 // If PDPT entry does not exist, skip it\r
695 //\r
696 continue;\r
717fb604 697 }\r
4eee0cc7 698 if ((*Pdpt & IA32_PG_PS) != 0) {\r
529a5a86 699 //\r
4eee0cc7 700 // This is 1G entry, set NX bit and skip it\r
529a5a86 701 //\r
4eee0cc7
RN
702 if (mXdSupported) {\r
703 *Pdpt = *Pdpt | IA32_PG_NX;\r
704 }\r
529a5a86
MK
705 continue;\r
706 }\r
4eee0cc7
RN
707 Pd = (UINT64 *)(UINTN)(*Pdpt & ~mAddressEncMask & PHYSICAL_ADDRESS_MASK);\r
708 if (Pd == 0) {\r
709 continue;\r
710 }\r
711 for (PdIndex = 0; PdIndex < SIZE_4KB / sizeof (*Pd); PdIndex++, Pd++) {\r
712 if ((*Pd & IA32_PG_P) == 0) {\r
529a5a86 713 //\r
4eee0cc7 714 // If PD entry does not exist, skip it\r
529a5a86 715 //\r
529a5a86
MK
716 continue;\r
717 }\r
4eee0cc7
RN
718 Address = (UINTN) LShiftU64 (\r
719 LShiftU64 (\r
720 LShiftU64 ((Pml5Index << 9) + Pml4Index, 9) + PdptIndex,\r
721 9\r
722 ) + PdIndex,\r
723 21\r
724 );\r
725\r
726 if ((*Pd & IA32_PG_PS) != 0) {\r
727 // 2MB page\r
728\r
529a5a86 729 if (!IsAddressValid (Address, &Nx)) {\r
4eee0cc7
RN
730 //\r
731 // Patch to remove Present flag and RW flag\r
732 //\r
733 *Pd = *Pd & (INTN)(INT32)(~PAGE_ATTRIBUTE_BITS);\r
529a5a86
MK
734 }\r
735 if (Nx && mXdSupported) {\r
4eee0cc7
RN
736 *Pd = *Pd | IA32_PG_NX;\r
737 }\r
738 } else {\r
739 // 4KB page\r
740 Pt = (UINT64 *)(UINTN)(*Pd & ~mAddressEncMask & PHYSICAL_ADDRESS_MASK);\r
741 if (Pt == 0) {\r
742 continue;\r
529a5a86 743 }\r
4eee0cc7
RN
744 for (PtIndex = 0; PtIndex < SIZE_4KB / sizeof(*Pt); PtIndex++, Pt++) {\r
745 if (!IsAddressValid (Address, &Nx)) {\r
746 *Pt = *Pt & (INTN)(INT32)(~PAGE_ATTRIBUTE_BITS);\r
747 }\r
748 if (Nx && mXdSupported) {\r
749 *Pt = *Pt | IA32_PG_NX;\r
750 }\r
751 Address += SIZE_4KB;\r
752 } // end for PT\r
753 } // end if PS\r
754 } // end for PD\r
755 } // end for PDPT\r
756 } // end for PML4\r
757 } // end for PML5\r
529a5a86
MK
758\r
759 //\r
760 // Flush TLB\r
761 //\r
762 CpuFlushTlb ();\r
96e1cba5 763 DEBUG ((DEBUG_INFO, "Patch page table done!\n"));\r
529a5a86
MK
764 //\r
765 // Set execute-disable flag\r
766 //\r
767 mXdEnabled = TRUE;\r
768\r
769 return ;\r
770}\r
771\r
529a5a86
MK
772/**\r
773 To get system port address of the SMI Command Port in FADT table.\r
774\r
775**/\r
776VOID\r
777GetSmiCommandPort (\r
778 VOID\r
779 )\r
780{\r
781 EFI_ACPI_2_0_FIXED_ACPI_DESCRIPTION_TABLE *Fadt;\r
782\r
4c214f82
SZ
783 Fadt = (EFI_ACPI_2_0_FIXED_ACPI_DESCRIPTION_TABLE *) EfiLocateFirstAcpiTable (\r
784 EFI_ACPI_2_0_FIXED_ACPI_DESCRIPTION_TABLE_SIGNATURE\r
785 );\r
529a5a86
MK
786 ASSERT (Fadt != NULL);\r
787\r
788 mSmiCommandPort = Fadt->SmiCmd;\r
96e1cba5 789 DEBUG ((DEBUG_INFO, "mSmiCommandPort = %x\n", mSmiCommandPort));\r
529a5a86
MK
790}\r
791\r
792/**\r
793 Updates page table to make some memory ranges (like system memory) absent\r
794 and make some memory ranges (like MMIO) present and execute disable. It also\r
795 update 2MB-page to 4KB-page for some memory ranges.\r
796\r
797**/\r
798VOID\r
799SmmProfileStart (\r
800 VOID\r
801 )\r
802{\r
803 //\r
804 // The flag indicates SMM profile starts to work.\r
805 //\r
806 mSmmProfileStart = TRUE;\r
807}\r
808\r
809/**\r
810 Initialize SMM profile in SmmReadyToLock protocol callback function.\r
811\r
812 @param Protocol Points to the protocol's unique identifier.\r
813 @param Interface Points to the interface instance.\r
814 @param Handle The handle on which the interface was installed.\r
815\r
816 @retval EFI_SUCCESS SmmReadyToLock protocol callback runs successfully.\r
817**/\r
818EFI_STATUS\r
819EFIAPI\r
820InitSmmProfileCallBack (\r
821 IN CONST EFI_GUID *Protocol,\r
822 IN VOID *Interface,\r
823 IN EFI_HANDLE Handle\r
824 )\r
825{\r
529a5a86
MK
826 //\r
827 // Save to variable so that SMM profile data can be found.\r
828 //\r
dc0a7143
LE
829 gRT->SetVariable (\r
830 SMM_PROFILE_NAME,\r
831 &gEfiCallerIdGuid,\r
832 EFI_VARIABLE_BOOTSERVICE_ACCESS | EFI_VARIABLE_RUNTIME_ACCESS,\r
833 sizeof(mSmmProfileBase),\r
834 &mSmmProfileBase\r
835 );\r
529a5a86
MK
836\r
837 //\r
838 // Get Software SMI from FADT\r
839 //\r
840 GetSmiCommandPort ();\r
841\r
842 //\r
843 // Initialize protected memory range for patching page table later.\r
844 //\r
845 InitProtectedMemRange ();\r
846\r
847 return EFI_SUCCESS;\r
848}\r
849\r
850/**\r
851 Initialize SMM profile data structures.\r
852\r
853**/\r
854VOID\r
855InitSmmProfileInternal (\r
856 VOID\r
857 )\r
858{\r
859 EFI_STATUS Status;\r
860 EFI_PHYSICAL_ADDRESS Base;\r
861 VOID *Registration;\r
862 UINTN Index;\r
863 UINTN MsrDsAreaSizePerCpu;\r
864 UINTN TotalSize;\r
865\r
bb767506 866 mPFEntryCount = (UINTN *)AllocateZeroPool (sizeof (UINTN) * mMaxNumberOfCpus);\r
529a5a86
MK
867 ASSERT (mPFEntryCount != NULL);\r
868 mLastPFEntryValue = (UINT64 (*)[MAX_PF_ENTRY_COUNT])AllocateZeroPool (\r
bb767506 869 sizeof (mLastPFEntryValue[0]) * mMaxNumberOfCpus);\r
529a5a86
MK
870 ASSERT (mLastPFEntryValue != NULL);\r
871 mLastPFEntryPointer = (UINT64 *(*)[MAX_PF_ENTRY_COUNT])AllocateZeroPool (\r
bb767506 872 sizeof (mLastPFEntryPointer[0]) * mMaxNumberOfCpus);\r
529a5a86
MK
873 ASSERT (mLastPFEntryPointer != NULL);\r
874\r
875 //\r
876 // Allocate memory for SmmProfile below 4GB.\r
877 // The base address\r
878 //\r
879 mSmmProfileSize = PcdGet32 (PcdCpuSmmProfileSize);\r
880 ASSERT ((mSmmProfileSize & 0xFFF) == 0);\r
881\r
882 if (mBtsSupported) {\r
883 TotalSize = mSmmProfileSize + mMsrDsAreaSize;\r
884 } else {\r
885 TotalSize = mSmmProfileSize;\r
886 }\r
887\r
888 Base = 0xFFFFFFFF;\r
889 Status = gBS->AllocatePages (\r
890 AllocateMaxAddress,\r
891 EfiReservedMemoryType,\r
892 EFI_SIZE_TO_PAGES (TotalSize),\r
893 &Base\r
894 );\r
895 ASSERT_EFI_ERROR (Status);\r
896 ZeroMem ((VOID *)(UINTN)Base, TotalSize);\r
897 mSmmProfileBase = (SMM_PROFILE_HEADER *)(UINTN)Base;\r
898\r
899 //\r
900 // Initialize SMM profile data header.\r
901 //\r
902 mSmmProfileBase->HeaderSize = sizeof (SMM_PROFILE_HEADER);\r
903 mSmmProfileBase->MaxDataEntries = (UINT64)((mSmmProfileSize - sizeof(SMM_PROFILE_HEADER)) / sizeof (SMM_PROFILE_ENTRY));\r
904 mSmmProfileBase->MaxDataSize = MultU64x64 (mSmmProfileBase->MaxDataEntries, sizeof(SMM_PROFILE_ENTRY));\r
905 mSmmProfileBase->CurDataEntries = 0;\r
906 mSmmProfileBase->CurDataSize = 0;\r
907 mSmmProfileBase->TsegStart = mCpuHotPlugData.SmrrBase;\r
908 mSmmProfileBase->TsegSize = mCpuHotPlugData.SmrrSize;\r
909 mSmmProfileBase->NumSmis = 0;\r
910 mSmmProfileBase->NumCpus = gSmmCpuPrivate->SmmCoreEntryContext.NumberOfCpus;\r
911\r
912 if (mBtsSupported) {\r
bb767506 913 mMsrDsArea = (MSR_DS_AREA_STRUCT **)AllocateZeroPool (sizeof (MSR_DS_AREA_STRUCT *) * mMaxNumberOfCpus);\r
529a5a86 914 ASSERT (mMsrDsArea != NULL);\r
bb767506 915 mMsrBTSRecord = (BRANCH_TRACE_RECORD **)AllocateZeroPool (sizeof (BRANCH_TRACE_RECORD *) * mMaxNumberOfCpus);\r
529a5a86 916 ASSERT (mMsrBTSRecord != NULL);\r
bb767506 917 mMsrPEBSRecord = (PEBS_RECORD **)AllocateZeroPool (sizeof (PEBS_RECORD *) * mMaxNumberOfCpus);\r
529a5a86
MK
918 ASSERT (mMsrPEBSRecord != NULL);\r
919\r
920 mMsrDsAreaBase = (MSR_DS_AREA_STRUCT *)((UINTN)Base + mSmmProfileSize);\r
bb767506 921 MsrDsAreaSizePerCpu = mMsrDsAreaSize / mMaxNumberOfCpus;\r
529a5a86 922 mBTSRecordNumber = (MsrDsAreaSizePerCpu - sizeof(PEBS_RECORD) * PEBS_RECORD_NUMBER - sizeof(MSR_DS_AREA_STRUCT)) / sizeof(BRANCH_TRACE_RECORD);\r
bb767506 923 for (Index = 0; Index < mMaxNumberOfCpus; Index++) {\r
529a5a86
MK
924 mMsrDsArea[Index] = (MSR_DS_AREA_STRUCT *)((UINTN)mMsrDsAreaBase + MsrDsAreaSizePerCpu * Index);\r
925 mMsrBTSRecord[Index] = (BRANCH_TRACE_RECORD *)((UINTN)mMsrDsArea[Index] + sizeof(MSR_DS_AREA_STRUCT));\r
926 mMsrPEBSRecord[Index] = (PEBS_RECORD *)((UINTN)mMsrDsArea[Index] + MsrDsAreaSizePerCpu - sizeof(PEBS_RECORD) * PEBS_RECORD_NUMBER);\r
927\r
928 mMsrDsArea[Index]->BTSBufferBase = (UINTN)mMsrBTSRecord[Index];\r
929 mMsrDsArea[Index]->BTSIndex = mMsrDsArea[Index]->BTSBufferBase;\r
930 mMsrDsArea[Index]->BTSAbsoluteMaximum = mMsrDsArea[Index]->BTSBufferBase + mBTSRecordNumber * sizeof(BRANCH_TRACE_RECORD) + 1;\r
931 mMsrDsArea[Index]->BTSInterruptThreshold = mMsrDsArea[Index]->BTSAbsoluteMaximum + 1;\r
932\r
933 mMsrDsArea[Index]->PEBSBufferBase = (UINTN)mMsrPEBSRecord[Index];\r
934 mMsrDsArea[Index]->PEBSIndex = mMsrDsArea[Index]->PEBSBufferBase;\r
935 mMsrDsArea[Index]->PEBSAbsoluteMaximum = mMsrDsArea[Index]->PEBSBufferBase + PEBS_RECORD_NUMBER * sizeof(PEBS_RECORD) + 1;\r
936 mMsrDsArea[Index]->PEBSInterruptThreshold = mMsrDsArea[Index]->PEBSAbsoluteMaximum + 1;\r
937 }\r
938 }\r
939\r
940 mProtectionMemRange = mProtectionMemRangeTemplate;\r
941 mProtectionMemRangeCount = sizeof (mProtectionMemRangeTemplate) / sizeof (MEMORY_PROTECTION_RANGE);\r
942\r
943 //\r
944 // Update TSeg entry.\r
945 //\r
946 mProtectionMemRange[0].Range.Base = mCpuHotPlugData.SmrrBase;\r
947 mProtectionMemRange[0].Range.Top = mCpuHotPlugData.SmrrBase + mCpuHotPlugData.SmrrSize;\r
948\r
949 //\r
950 // Update SMM profile entry.\r
951 //\r
952 mProtectionMemRange[1].Range.Base = (EFI_PHYSICAL_ADDRESS)(UINTN)mSmmProfileBase;\r
953 mProtectionMemRange[1].Range.Top = (EFI_PHYSICAL_ADDRESS)(UINTN)mSmmProfileBase + TotalSize;\r
954\r
955 //\r
956 // Allocate memory reserved for creating 4KB pages.\r
957 //\r
958 InitPagesForPFHandler ();\r
959\r
960 //\r
961 // Start SMM profile when SmmReadyToLock protocol is installed.\r
962 //\r
963 Status = gSmst->SmmRegisterProtocolNotify (\r
964 &gEfiSmmReadyToLockProtocolGuid,\r
965 InitSmmProfileCallBack,\r
966 &Registration\r
967 );\r
968 ASSERT_EFI_ERROR (Status);\r
969\r
970 return ;\r
971}\r
972\r
973/**\r
3eb69b08 974 Check if feature is supported by a processor.\r
529a5a86
MK
975\r
976**/\r
977VOID\r
978CheckFeatureSupported (\r
51773d49 979 VOID\r
529a5a86
MK
980 )\r
981{\r
f85d3ce2 982 UINT32 RegEax;\r
3eb69b08 983 UINT32 RegEcx;\r
f85d3ce2
JF
984 UINT32 RegEdx;\r
985 MSR_IA32_MISC_ENABLE_REGISTER MiscEnableMsr;\r
529a5a86 986\r
3eb69b08 987 if ((PcdGet32 (PcdControlFlowEnforcementPropertyMask) != 0) && mCetSupported) {\r
5d34cc49
WH
988 AsmCpuid (CPUID_SIGNATURE, &RegEax, NULL, NULL, NULL);\r
989 if (RegEax >= CPUID_STRUCTURED_EXTENDED_FEATURE_FLAGS) {\r
990 AsmCpuidEx (CPUID_STRUCTURED_EXTENDED_FEATURE_FLAGS, CPUID_STRUCTURED_EXTENDED_FEATURE_FLAGS_SUB_LEAF_INFO, NULL, NULL, &RegEcx, NULL);\r
991 if ((RegEcx & CPUID_CET_SS) == 0) {\r
992 mCetSupported = FALSE;\r
993 PatchInstructionX86 (mPatchCetSupported, mCetSupported, 1);\r
994 }\r
995 } else {\r
3eb69b08
JY
996 mCetSupported = FALSE;\r
997 PatchInstructionX86 (mPatchCetSupported, mCetSupported, 1);\r
998 }\r
999 }\r
1000\r
529a5a86
MK
1001 if (mXdSupported) {\r
1002 AsmCpuid (CPUID_EXTENDED_FUNCTION, &RegEax, NULL, NULL, NULL);\r
1003 if (RegEax <= CPUID_EXTENDED_FUNCTION) {\r
1004 //\r
1005 // Extended CPUID functions are not supported on this processor.\r
1006 //\r
1007 mXdSupported = FALSE;\r
3c5ce64f 1008 PatchInstructionX86 (gPatchXdSupported, mXdSupported, 1);\r
529a5a86
MK
1009 }\r
1010\r
1011 AsmCpuid (CPUID_EXTENDED_CPU_SIG, NULL, NULL, NULL, &RegEdx);\r
1012 if ((RegEdx & CPUID1_EDX_XD_SUPPORT) == 0) {\r
1013 //\r
1014 // Execute Disable Bit feature is not supported on this processor.\r
1015 //\r
1016 mXdSupported = FALSE;\r
3c5ce64f 1017 PatchInstructionX86 (gPatchXdSupported, mXdSupported, 1);\r
529a5a86 1018 }\r
bdafda8c
KG
1019\r
1020 if (StandardSignatureIsAuthenticAMD ()) {\r
1021 //\r
1022 // AMD processors do not support MSR_IA32_MISC_ENABLE\r
1023 //\r
1024 PatchInstructionX86 (gPatchMsrIa32MiscEnableSupported, FALSE, 1);\r
1025 }\r
529a5a86
MK
1026 }\r
1027\r
1028 if (mBtsSupported) {\r
1029 AsmCpuid (CPUID_VERSION_INFO, NULL, NULL, NULL, &RegEdx);\r
1030 if ((RegEdx & CPUID1_EDX_BTS_AVAILABLE) != 0) {\r
1031 //\r
1032 // Per IA32 manuals:\r
1033 // When CPUID.1:EDX[21] is set, the following BTS facilities are available:\r
1034 // 1. The BTS_UNAVAILABLE flag in the IA32_MISC_ENABLE MSR indicates the\r
1035 // availability of the BTS facilities, including the ability to set the BTS and\r
1036 // BTINT bits in the MSR_DEBUGCTLA MSR.\r
1037 // 2. The IA32_DS_AREA MSR can be programmed to point to the DS save area.\r
1038 //\r
f85d3ce2
JF
1039 MiscEnableMsr.Uint64 = AsmReadMsr64 (MSR_IA32_MISC_ENABLE);\r
1040 if (MiscEnableMsr.Bits.BTS == 1) {\r
529a5a86 1041 //\r
f85d3ce2 1042 // BTS facilities is not supported if MSR_IA32_MISC_ENABLE.BTS bit is set.\r
529a5a86
MK
1043 //\r
1044 mBtsSupported = FALSE;\r
1045 }\r
1046 }\r
1047 }\r
1048}\r
1049\r
529a5a86
MK
1050/**\r
1051 Enable single step.\r
1052\r
1053**/\r
1054VOID\r
1055ActivateSingleStepDB (\r
1056 VOID\r
1057 )\r
1058{\r
1059 UINTN Dr6;\r
1060\r
1061 Dr6 = AsmReadDr6 ();\r
1062 if ((Dr6 & DR6_SINGLE_STEP) != 0) {\r
1063 return;\r
1064 }\r
1065 Dr6 |= DR6_SINGLE_STEP;\r
1066 AsmWriteDr6 (Dr6);\r
1067}\r
1068\r
1069/**\r
1070 Enable last branch.\r
1071\r
1072**/\r
1073VOID\r
1074ActivateLBR (\r
1075 VOID\r
1076 )\r
1077{\r
1078 UINT64 DebugCtl;\r
1079\r
1080 DebugCtl = AsmReadMsr64 (MSR_DEBUG_CTL);\r
1081 if ((DebugCtl & MSR_DEBUG_CTL_LBR) != 0) {\r
1082 return ;\r
1083 }\r
529a5a86
MK
1084 DebugCtl |= MSR_DEBUG_CTL_LBR;\r
1085 AsmWriteMsr64 (MSR_DEBUG_CTL, DebugCtl);\r
1086}\r
1087\r
1088/**\r
1089 Enable branch trace store.\r
1090\r
1091 @param CpuIndex The index of the processor.\r
1092\r
1093**/\r
1094VOID\r
1095ActivateBTS (\r
1096 IN UINTN CpuIndex\r
1097 )\r
1098{\r
1099 UINT64 DebugCtl;\r
1100\r
1101 DebugCtl = AsmReadMsr64 (MSR_DEBUG_CTL);\r
1102 if ((DebugCtl & MSR_DEBUG_CTL_BTS) != 0) {\r
1103 return ;\r
1104 }\r
1105\r
1106 AsmWriteMsr64 (MSR_DS_AREA, (UINT64)(UINTN)mMsrDsArea[CpuIndex]);\r
1107 DebugCtl |= (UINT64)(MSR_DEBUG_CTL_BTS | MSR_DEBUG_CTL_TR);\r
1108 DebugCtl &= ~((UINT64)MSR_DEBUG_CTL_BTINT);\r
1109 AsmWriteMsr64 (MSR_DEBUG_CTL, DebugCtl);\r
1110}\r
1111\r
1112/**\r
1113 Increase SMI number in each SMI entry.\r
1114\r
1115**/\r
1116VOID\r
1117SmmProfileRecordSmiNum (\r
1118 VOID\r
1119 )\r
1120{\r
1121 if (mSmmProfileStart) {\r
1122 mSmmProfileBase->NumSmis++;\r
1123 }\r
1124}\r
1125\r
1126/**\r
1127 Initialize processor environment for SMM profile.\r
1128\r
1129 @param CpuIndex The index of the processor.\r
1130\r
1131**/\r
1132VOID\r
1133ActivateSmmProfile (\r
1134 IN UINTN CpuIndex\r
1135 )\r
1136{\r
1137 //\r
1138 // Enable Single Step DB#\r
1139 //\r
1140 ActivateSingleStepDB ();\r
1141\r
1142 if (mBtsSupported) {\r
1143 //\r
1144 // We can not get useful information from LER, so we have to use BTS.\r
1145 //\r
1146 ActivateLBR ();\r
1147\r
1148 //\r
1149 // Enable BTS\r
1150 //\r
1151 ActivateBTS (CpuIndex);\r
1152 }\r
1153}\r
1154\r
1155/**\r
1156 Initialize SMM profile in SMM CPU entry point.\r
1157\r
1158 @param[in] Cr3 The base address of the page tables to use in SMM.\r
1159\r
1160**/\r
1161VOID\r
1162InitSmmProfile (\r
1163 UINT32 Cr3\r
1164 )\r
1165{\r
1166 //\r
1167 // Save Cr3\r
1168 //\r
1169 mSmmProfileCr3 = Cr3;\r
1170\r
1171 //\r
1172 // Skip SMM profile initialization if feature is disabled\r
1173 //\r
09afd9a4
JW
1174 if (!FeaturePcdGet (PcdCpuSmmProfileEnable) &&\r
1175 !HEAP_GUARD_NONSTOP_MODE &&\r
1176 !NULL_DETECTION_NONSTOP_MODE) {\r
529a5a86
MK
1177 return;\r
1178 }\r
1179\r
1180 //\r
1181 // Initialize SmmProfile here\r
1182 //\r
1183 InitSmmProfileInternal ();\r
1184\r
1185 //\r
1186 // Initialize profile IDT.\r
1187 //\r
1188 InitIdtr ();\r
09afd9a4
JW
1189\r
1190 //\r
1191 // Tell #PF handler to prepare a #DB subsequently.\r
1192 //\r
1193 mSetupDebugTrap = TRUE;\r
529a5a86
MK
1194}\r
1195\r
1196/**\r
1197 Update page table to map the memory correctly in order to make the instruction\r
1198 which caused page fault execute successfully. And it also save the original page\r
1199 table to be restored in single-step exception.\r
1200\r
1201 @param PageTable PageTable Address.\r
1202 @param PFAddress The memory address which caused page fault exception.\r
1203 @param CpuIndex The index of the processor.\r
1204 @param ErrorCode The Error code of exception.\r
1205\r
1206**/\r
1207VOID\r
1208RestorePageTableBelow4G (\r
1209 UINT64 *PageTable,\r
1210 UINT64 PFAddress,\r
1211 UINTN CpuIndex,\r
1212 UINTN ErrorCode\r
1213 )\r
1214{\r
1215 UINTN PTIndex;\r
1216 UINTN PFIndex;\r
4eee0cc7
RN
1217 IA32_CR4 Cr4;\r
1218 BOOLEAN Enable5LevelPaging;\r
1219\r
1220 Cr4.UintN = AsmReadCr4 ();\r
1221 Enable5LevelPaging = (BOOLEAN) (Cr4.Bits.LA57 == 1);\r
1222\r
1223 //\r
1224 // PML5\r
1225 //\r
1226 if (Enable5LevelPaging) {\r
1227 PTIndex = (UINTN)BitFieldRead64 (PFAddress, 48, 56);\r
1228 ASSERT (PageTable[PTIndex] != 0);\r
1229 PageTable = (UINT64*)(UINTN)(PageTable[PTIndex] & PHYSICAL_ADDRESS_MASK);\r
1230 }\r
529a5a86
MK
1231\r
1232 //\r
1233 // PML4\r
1234 //\r
1235 if (sizeof(UINT64) == sizeof(UINTN)) {\r
1236 PTIndex = (UINTN)BitFieldRead64 (PFAddress, 39, 47);\r
1237 ASSERT (PageTable[PTIndex] != 0);\r
1238 PageTable = (UINT64*)(UINTN)(PageTable[PTIndex] & PHYSICAL_ADDRESS_MASK);\r
1239 }\r
1240\r
1241 //\r
1242 // PDPTE\r
1243 //\r
1244 PTIndex = (UINTN)BitFieldRead64 (PFAddress, 30, 38);\r
1245 ASSERT (PageTable[PTIndex] != 0);\r
1246 PageTable = (UINT64*)(UINTN)(PageTable[PTIndex] & PHYSICAL_ADDRESS_MASK);\r
1247\r
1248 //\r
1249 // PD\r
1250 //\r
1251 PTIndex = (UINTN)BitFieldRead64 (PFAddress, 21, 29);\r
1252 if ((PageTable[PTIndex] & IA32_PG_PS) != 0) {\r
1253 //\r
1254 // Large page\r
1255 //\r
1256\r
1257 //\r
1258 // Record old entries with non-present status\r
1259 // Old entries include the memory which instruction is at and the memory which instruction access.\r
1260 //\r
1261 //\r
1262 ASSERT (mPFEntryCount[CpuIndex] < MAX_PF_ENTRY_COUNT);\r
1263 if (mPFEntryCount[CpuIndex] < MAX_PF_ENTRY_COUNT) {\r
1264 PFIndex = mPFEntryCount[CpuIndex];\r
1265 mLastPFEntryValue[CpuIndex][PFIndex] = PageTable[PTIndex];\r
1266 mLastPFEntryPointer[CpuIndex][PFIndex] = &PageTable[PTIndex];\r
1267 mPFEntryCount[CpuIndex]++;\r
1268 }\r
1269\r
1270 //\r
1271 // Set new entry\r
1272 //\r
1273 PageTable[PTIndex] = (PFAddress & ~((1ull << 21) - 1));\r
1274 PageTable[PTIndex] |= (UINT64)IA32_PG_PS;\r
881520ea 1275 PageTable[PTIndex] |= (UINT64)PAGE_ATTRIBUTE_BITS;\r
529a5a86
MK
1276 if ((ErrorCode & IA32_PF_EC_ID) != 0) {\r
1277 PageTable[PTIndex] &= ~IA32_PG_NX;\r
1278 }\r
1279 } else {\r
1280 //\r
1281 // Small page\r
1282 //\r
1283 ASSERT (PageTable[PTIndex] != 0);\r
1284 PageTable = (UINT64*)(UINTN)(PageTable[PTIndex] & PHYSICAL_ADDRESS_MASK);\r
1285\r
1286 //\r
1287 // 4K PTE\r
1288 //\r
1289 PTIndex = (UINTN)BitFieldRead64 (PFAddress, 12, 20);\r
1290\r
1291 //\r
1292 // Record old entries with non-present status\r
1293 // Old entries include the memory which instruction is at and the memory which instruction access.\r
1294 //\r
1295 //\r
1296 ASSERT (mPFEntryCount[CpuIndex] < MAX_PF_ENTRY_COUNT);\r
1297 if (mPFEntryCount[CpuIndex] < MAX_PF_ENTRY_COUNT) {\r
1298 PFIndex = mPFEntryCount[CpuIndex];\r
1299 mLastPFEntryValue[CpuIndex][PFIndex] = PageTable[PTIndex];\r
1300 mLastPFEntryPointer[CpuIndex][PFIndex] = &PageTable[PTIndex];\r
1301 mPFEntryCount[CpuIndex]++;\r
1302 }\r
1303\r
1304 //\r
1305 // Set new entry\r
1306 //\r
1307 PageTable[PTIndex] = (PFAddress & ~((1ull << 12) - 1));\r
881520ea 1308 PageTable[PTIndex] |= (UINT64)PAGE_ATTRIBUTE_BITS;\r
529a5a86
MK
1309 if ((ErrorCode & IA32_PF_EC_ID) != 0) {\r
1310 PageTable[PTIndex] &= ~IA32_PG_NX;\r
1311 }\r
1312 }\r
1313}\r
1314\r
09afd9a4
JW
1315/**\r
1316 Handler for Page Fault triggered by Guard page.\r
1317\r
1318 @param ErrorCode The Error code of exception.\r
1319\r
1320**/\r
1321VOID\r
1322GuardPagePFHandler (\r
1323 UINTN ErrorCode\r
1324 )\r
1325{\r
1326 UINT64 *PageTable;\r
1327 UINT64 PFAddress;\r
1328 UINT64 RestoreAddress;\r
1329 UINTN RestorePageNumber;\r
1330 UINTN CpuIndex;\r
1331\r
1332 PageTable = (UINT64 *)AsmReadCr3 ();\r
1333 PFAddress = AsmReadCr2 ();\r
1334 CpuIndex = GetCpuIndex ();\r
1335\r
1336 //\r
1337 // Memory operation cross pages, like "rep mov" instruction, will cause\r
1338 // infinite loop between this and Debug Trap handler. We have to make sure\r
1339 // that current page and the page followed are both in PRESENT state.\r
1340 //\r
1341 RestorePageNumber = 2;\r
1342 RestoreAddress = PFAddress;\r
1343 while (RestorePageNumber > 0) {\r
1344 RestorePageTableBelow4G (PageTable, RestoreAddress, CpuIndex, ErrorCode);\r
1345 RestoreAddress += EFI_PAGE_SIZE;\r
1346 RestorePageNumber--;\r
1347 }\r
1348\r
1349 //\r
1350 // Flush TLB\r
1351 //\r
1352 CpuFlushTlb ();\r
1353}\r
1354\r
529a5a86
MK
1355/**\r
1356 The Page fault handler to save SMM profile data.\r
1357\r
1358 @param Rip The RIP when exception happens.\r
1359 @param ErrorCode The Error code of exception.\r
1360\r
1361**/\r
1362VOID\r
1363SmmProfilePFHandler (\r
1364 UINTN Rip,\r
1365 UINTN ErrorCode\r
1366 )\r
1367{\r
1368 UINT64 *PageTable;\r
1369 UINT64 PFAddress;\r
890d9ee5
JW
1370 UINT64 RestoreAddress;\r
1371 UINTN RestorePageNumber;\r
529a5a86
MK
1372 UINTN CpuIndex;\r
1373 UINTN Index;\r
1374 UINT64 InstructionAddress;\r
1375 UINTN MaxEntryNumber;\r
1376 UINTN CurrentEntryNumber;\r
1377 BOOLEAN IsValidPFAddress;\r
1378 SMM_PROFILE_ENTRY *SmmProfileEntry;\r
1379 UINT64 SmiCommand;\r
1380 EFI_STATUS Status;\r
529a5a86
MK
1381 UINT8 SoftSmiValue;\r
1382 EFI_SMM_SAVE_STATE_IO_INFO IoInfo;\r
1383\r
1384 if (!mSmmProfileStart) {\r
1385 //\r
1386 // If SMM profile does not start, call original page fault handler.\r
1387 //\r
1388 SmiDefaultPFHandler ();\r
1389 return;\r
1390 }\r
1391\r
1392 if (mBtsSupported) {\r
1393 DisableBTS ();\r
1394 }\r
1395\r
1396 IsValidPFAddress = FALSE;\r
1397 PageTable = (UINT64 *)AsmReadCr3 ();\r
1398 PFAddress = AsmReadCr2 ();\r
1399 CpuIndex = GetCpuIndex ();\r
1400\r
890d9ee5
JW
1401 //\r
1402 // Memory operation cross pages, like "rep mov" instruction, will cause\r
1403 // infinite loop between this and Debug Trap handler. We have to make sure\r
1404 // that current page and the page followed are both in PRESENT state.\r
1405 //\r
1406 RestorePageNumber = 2;\r
1407 RestoreAddress = PFAddress;\r
1408 while (RestorePageNumber > 0) {\r
1409 if (RestoreAddress <= 0xFFFFFFFF) {\r
1410 RestorePageTableBelow4G (PageTable, RestoreAddress, CpuIndex, ErrorCode);\r
1411 } else {\r
1412 RestorePageTableAbove4G (PageTable, RestoreAddress, CpuIndex, ErrorCode, &IsValidPFAddress);\r
1413 }\r
1414 RestoreAddress += EFI_PAGE_SIZE;\r
1415 RestorePageNumber--;\r
529a5a86
MK
1416 }\r
1417\r
1418 if (!IsValidPFAddress) {\r
1419 InstructionAddress = Rip;\r
1420 if ((ErrorCode & IA32_PF_EC_ID) != 0 && (mBtsSupported)) {\r
1421 //\r
1422 // If it is instruction fetch failure, get the correct IP from BTS.\r
1423 //\r
1424 InstructionAddress = GetSourceFromDestinationOnBts (CpuIndex, Rip);\r
1425 if (InstructionAddress == 0) {\r
1426 //\r
1427 // It indicates the instruction which caused page fault is not a jump instruction,\r
1428 // set instruction address same as the page fault address.\r
1429 //\r
1430 InstructionAddress = PFAddress;\r
1431 }\r
1432 }\r
1433\r
529a5a86
MK
1434 //\r
1435 // Indicate it is not software SMI\r
1436 //\r
1437 SmiCommand = 0xFFFFFFFFFFFFFFFFULL;\r
1438 for (Index = 0; Index < gSmst->NumberOfCpus; Index++) {\r
1439 Status = SmmReadSaveState(&mSmmCpu, sizeof(IoInfo), EFI_SMM_SAVE_STATE_REGISTER_IO, Index, &IoInfo);\r
1440 if (EFI_ERROR (Status)) {\r
1441 continue;\r
1442 }\r
1443 if (IoInfo.IoPort == mSmiCommandPort) {\r
529a5a86
MK
1444 //\r
1445 // A software SMI triggered by SMI command port has been found, get SmiCommand from SMI command port.\r
1446 //\r
1447 SoftSmiValue = IoRead8 (mSmiCommandPort);\r
1448 SmiCommand = (UINT64)SoftSmiValue;\r
1449 break;\r
1450 }\r
1451 }\r
1452\r
1453 SmmProfileEntry = (SMM_PROFILE_ENTRY *)(UINTN)(mSmmProfileBase + 1);\r
1454 //\r
1455 // Check if there is already a same entry in profile data.\r
1456 //\r
1457 for (Index = 0; Index < (UINTN) mSmmProfileBase->CurDataEntries; Index++) {\r
1458 if ((SmmProfileEntry[Index].ErrorCode == (UINT64)ErrorCode) &&\r
1459 (SmmProfileEntry[Index].Address == PFAddress) &&\r
1460 (SmmProfileEntry[Index].CpuNum == (UINT64)CpuIndex) &&\r
1461 (SmmProfileEntry[Index].Instruction == InstructionAddress) &&\r
1462 (SmmProfileEntry[Index].SmiCmd == SmiCommand)) {\r
1463 //\r
1464 // Same record exist, need not save again.\r
1465 //\r
1466 break;\r
1467 }\r
1468 }\r
1469 if (Index == mSmmProfileBase->CurDataEntries) {\r
1470 CurrentEntryNumber = (UINTN) mSmmProfileBase->CurDataEntries;\r
1471 MaxEntryNumber = (UINTN) mSmmProfileBase->MaxDataEntries;\r
1472 if (FeaturePcdGet (PcdCpuSmmProfileRingBuffer)) {\r
1473 CurrentEntryNumber = CurrentEntryNumber % MaxEntryNumber;\r
1474 }\r
1475 if (CurrentEntryNumber < MaxEntryNumber) {\r
1476 //\r
1477 // Log the new entry\r
1478 //\r
1479 SmmProfileEntry[CurrentEntryNumber].SmiNum = mSmmProfileBase->NumSmis;\r
1480 SmmProfileEntry[CurrentEntryNumber].ErrorCode = (UINT64)ErrorCode;\r
1481 SmmProfileEntry[CurrentEntryNumber].ApicId = (UINT64)GetApicId ();\r
1482 SmmProfileEntry[CurrentEntryNumber].CpuNum = (UINT64)CpuIndex;\r
1483 SmmProfileEntry[CurrentEntryNumber].Address = PFAddress;\r
1484 SmmProfileEntry[CurrentEntryNumber].Instruction = InstructionAddress;\r
1485 SmmProfileEntry[CurrentEntryNumber].SmiCmd = SmiCommand;\r
1486 //\r
1487 // Update current entry index and data size in the header.\r
1488 //\r
1489 mSmmProfileBase->CurDataEntries++;\r
1490 mSmmProfileBase->CurDataSize = MultU64x64 (mSmmProfileBase->CurDataEntries, sizeof (SMM_PROFILE_ENTRY));\r
1491 }\r
1492 }\r
1493 }\r
1494 //\r
1495 // Flush TLB\r
1496 //\r
1497 CpuFlushTlb ();\r
1498\r
1499 if (mBtsSupported) {\r
1500 EnableBTS ();\r
1501 }\r
1502}\r
1503\r
1504/**\r
1505 Replace INT1 exception handler to restore page table to absent/execute-disable state\r
1506 in order to trigger page fault again to save SMM profile data..\r
1507\r
1508**/\r
1509VOID\r
1510InitIdtr (\r
1511 VOID\r
1512 )\r
1513{\r
5c88af79
JF
1514 EFI_STATUS Status;\r
1515\r
1516 Status = SmmRegisterExceptionHandler (&mSmmCpuService, EXCEPT_IA32_DEBUG, DebugExceptionHandler);\r
1517 ASSERT_EFI_ERROR (Status);\r
529a5a86 1518}\r