]> git.proxmox.com Git - mirror_edk2.git/blob - UefiCpuPkg/PiSmmCpuDxeSmm/SmmProfile.c
UefiCpuPkg/PiSmmCpu: Add Shadow Stack Support for X86 SMM.
[mirror_edk2.git] / UefiCpuPkg / PiSmmCpuDxeSmm / SmmProfile.c
1 /** @file
2 Enable SMM profile.
3
4 Copyright (c) 2012 - 2019, Intel Corporation. All rights reserved.<BR>
5 Copyright (c) 2017, AMD Incorporated. All rights reserved.<BR>
6
7 This program and the accompanying materials
8 are licensed and made available under the terms and conditions of the BSD License
9 which accompanies this distribution. The full text of the license may be found at
10 http://opensource.org/licenses/bsd-license.php
11
12 THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS,
13 WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED.
14
15 **/
16
17 #include "PiSmmCpuDxeSmm.h"
18 #include "SmmProfileInternal.h"
19
20 UINT32 mSmmProfileCr3;
21
22 SMM_PROFILE_HEADER *mSmmProfileBase;
23 MSR_DS_AREA_STRUCT *mMsrDsAreaBase;
24 //
25 // The buffer to store SMM profile data.
26 //
27 UINTN mSmmProfileSize;
28
29 //
30 // The buffer to enable branch trace store.
31 //
32 UINTN mMsrDsAreaSize = SMM_PROFILE_DTS_SIZE;
33
34 //
35 // The flag indicates if execute-disable is supported by processor.
36 //
37 BOOLEAN mXdSupported = TRUE;
38
39 //
40 // The flag indicates if execute-disable is enabled on processor.
41 //
42 BOOLEAN mXdEnabled = FALSE;
43
44 //
45 // The flag indicates if BTS is supported by processor.
46 //
47 BOOLEAN mBtsSupported = TRUE;
48
49 //
50 // The flag indicates if SMM profile starts to record data.
51 //
52 BOOLEAN mSmmProfileStart = FALSE;
53
54 //
55 // The flag indicates if #DB will be setup in #PF handler.
56 //
57 BOOLEAN mSetupDebugTrap = FALSE;
58
59 //
60 // Record the page fault exception count for one instruction execution.
61 //
62 UINTN *mPFEntryCount;
63
64 UINT64 (*mLastPFEntryValue)[MAX_PF_ENTRY_COUNT];
65 UINT64 *(*mLastPFEntryPointer)[MAX_PF_ENTRY_COUNT];
66
67 MSR_DS_AREA_STRUCT **mMsrDsArea;
68 BRANCH_TRACE_RECORD **mMsrBTSRecord;
69 UINTN mBTSRecordNumber;
70 PEBS_RECORD **mMsrPEBSRecord;
71
72 //
73 // These memory ranges are always present, they does not generate the access type of page fault exception,
74 // but they possibly generate instruction fetch type of page fault exception.
75 //
76 MEMORY_PROTECTION_RANGE *mProtectionMemRange = NULL;
77 UINTN mProtectionMemRangeCount = 0;
78
79 //
80 // Some predefined memory ranges.
81 //
82 MEMORY_PROTECTION_RANGE mProtectionMemRangeTemplate[] = {
83 //
84 // SMRAM range (to be fixed in runtime).
85 // It is always present and instruction fetches are allowed.
86 //
87 {{0x00000000, 0x00000000},TRUE,FALSE},
88
89 //
90 // SMM profile data range( to be fixed in runtime).
91 // It is always present and instruction fetches are not allowed.
92 //
93 {{0x00000000, 0x00000000},TRUE,TRUE},
94
95 //
96 // SMRAM ranges not covered by mCpuHotPlugData.SmrrBase/mCpuHotPlugData.SmrrSiz (to be fixed in runtime).
97 // It is always present and instruction fetches are allowed.
98 // {{0x00000000, 0x00000000},TRUE,FALSE},
99 //
100
101 //
102 // Future extended range could be added here.
103 //
104
105 //
106 // PCI MMIO ranges (to be added in runtime).
107 // They are always present and instruction fetches are not allowed.
108 //
109 };
110
111 //
112 // These memory ranges are mapped by 4KB-page instead of 2MB-page.
113 //
114 MEMORY_RANGE *mSplitMemRange = NULL;
115 UINTN mSplitMemRangeCount = 0;
116
117 //
118 // SMI command port.
119 //
120 UINT32 mSmiCommandPort;
121
122 /**
123 Disable branch trace store.
124
125 **/
126 VOID
127 DisableBTS (
128 VOID
129 )
130 {
131 AsmMsrAnd64 (MSR_DEBUG_CTL, ~((UINT64)(MSR_DEBUG_CTL_BTS | MSR_DEBUG_CTL_TR)));
132 }
133
134 /**
135 Enable branch trace store.
136
137 **/
138 VOID
139 EnableBTS (
140 VOID
141 )
142 {
143 AsmMsrOr64 (MSR_DEBUG_CTL, (MSR_DEBUG_CTL_BTS | MSR_DEBUG_CTL_TR));
144 }
145
146 /**
147 Get CPU Index from APIC ID.
148
149 **/
150 UINTN
151 GetCpuIndex (
152 VOID
153 )
154 {
155 UINTN Index;
156 UINT32 ApicId;
157
158 ApicId = GetApicId ();
159
160 for (Index = 0; Index < mMaxNumberOfCpus; Index++) {
161 if (gSmmCpuPrivate->ProcessorInfo[Index].ProcessorId == ApicId) {
162 return Index;
163 }
164 }
165 ASSERT (FALSE);
166 return 0;
167 }
168
169 /**
170 Get the source of IP after execute-disable exception is triggered.
171
172 @param CpuIndex The index of CPU.
173 @param DestinationIP The destination address.
174
175 **/
176 UINT64
177 GetSourceFromDestinationOnBts (
178 UINTN CpuIndex,
179 UINT64 DestinationIP
180 )
181 {
182 BRANCH_TRACE_RECORD *CurrentBTSRecord;
183 UINTN Index;
184 BOOLEAN FirstMatch;
185
186 FirstMatch = FALSE;
187
188 CurrentBTSRecord = (BRANCH_TRACE_RECORD *)mMsrDsArea[CpuIndex]->BTSIndex;
189 for (Index = 0; Index < mBTSRecordNumber; Index++) {
190 if ((UINTN)CurrentBTSRecord < (UINTN)mMsrBTSRecord[CpuIndex]) {
191 //
192 // Underflow
193 //
194 CurrentBTSRecord = (BRANCH_TRACE_RECORD *)((UINTN)mMsrDsArea[CpuIndex]->BTSAbsoluteMaximum - 1);
195 CurrentBTSRecord --;
196 }
197 if (CurrentBTSRecord->LastBranchTo == DestinationIP) {
198 //
199 // Good! find 1st one, then find 2nd one.
200 //
201 if (!FirstMatch) {
202 //
203 // The first one is DEBUG exception
204 //
205 FirstMatch = TRUE;
206 } else {
207 //
208 // Good find proper one.
209 //
210 return CurrentBTSRecord->LastBranchFrom;
211 }
212 }
213 CurrentBTSRecord--;
214 }
215
216 return 0;
217 }
218
219 /**
220 SMM profile specific INT 1 (single-step) exception handler.
221
222 @param InterruptType Defines the type of interrupt or exception that
223 occurred on the processor.This parameter is processor architecture specific.
224 @param SystemContext A pointer to the processor context when
225 the interrupt occurred on the processor.
226 **/
227 VOID
228 EFIAPI
229 DebugExceptionHandler (
230 IN EFI_EXCEPTION_TYPE InterruptType,
231 IN EFI_SYSTEM_CONTEXT SystemContext
232 )
233 {
234 UINTN CpuIndex;
235 UINTN PFEntry;
236
237 if (!mSmmProfileStart &&
238 !HEAP_GUARD_NONSTOP_MODE &&
239 !NULL_DETECTION_NONSTOP_MODE) {
240 return;
241 }
242 CpuIndex = GetCpuIndex ();
243
244 //
245 // Clear last PF entries
246 //
247 for (PFEntry = 0; PFEntry < mPFEntryCount[CpuIndex]; PFEntry++) {
248 *mLastPFEntryPointer[CpuIndex][PFEntry] = mLastPFEntryValue[CpuIndex][PFEntry];
249 }
250
251 //
252 // Reset page fault exception count for next page fault.
253 //
254 mPFEntryCount[CpuIndex] = 0;
255
256 //
257 // Flush TLB
258 //
259 CpuFlushTlb ();
260
261 //
262 // Clear TF in EFLAGS
263 //
264 ClearTrapFlag (SystemContext);
265 }
266
267 /**
268 Check if the input address is in SMM ranges.
269
270 @param[in] Address The input address.
271
272 @retval TRUE The input address is in SMM.
273 @retval FALSE The input address is not in SMM.
274 **/
275 BOOLEAN
276 IsInSmmRanges (
277 IN EFI_PHYSICAL_ADDRESS Address
278 )
279 {
280 UINTN Index;
281
282 if ((Address >= mCpuHotPlugData.SmrrBase) && (Address < mCpuHotPlugData.SmrrBase + mCpuHotPlugData.SmrrSize)) {
283 return TRUE;
284 }
285 for (Index = 0; Index < mSmmCpuSmramRangeCount; Index++) {
286 if (Address >= mSmmCpuSmramRanges[Index].CpuStart &&
287 Address < mSmmCpuSmramRanges[Index].CpuStart + mSmmCpuSmramRanges[Index].PhysicalSize) {
288 return TRUE;
289 }
290 }
291 return FALSE;
292 }
293
294 /**
295 Check if the memory address will be mapped by 4KB-page.
296
297 @param Address The address of Memory.
298 @param Nx The flag indicates if the memory is execute-disable.
299
300 **/
301 BOOLEAN
302 IsAddressValid (
303 IN EFI_PHYSICAL_ADDRESS Address,
304 IN BOOLEAN *Nx
305 )
306 {
307 UINTN Index;
308
309 if (FeaturePcdGet (PcdCpuSmmProfileEnable)) {
310 //
311 // Check configuration
312 //
313 for (Index = 0; Index < mProtectionMemRangeCount; Index++) {
314 if ((Address >= mProtectionMemRange[Index].Range.Base) && (Address < mProtectionMemRange[Index].Range.Top)) {
315 *Nx = mProtectionMemRange[Index].Nx;
316 return mProtectionMemRange[Index].Present;
317 }
318 }
319 *Nx = TRUE;
320 return FALSE;
321
322 } else {
323 *Nx = TRUE;
324 if (IsInSmmRanges (Address)) {
325 *Nx = FALSE;
326 }
327 return TRUE;
328 }
329 }
330
331 /**
332 Check if the memory address will be mapped by 4KB-page.
333
334 @param Address The address of Memory.
335
336 **/
337 BOOLEAN
338 IsAddressSplit (
339 IN EFI_PHYSICAL_ADDRESS Address
340 )
341 {
342 UINTN Index;
343
344 if (FeaturePcdGet (PcdCpuSmmProfileEnable)) {
345 //
346 // Check configuration
347 //
348 for (Index = 0; Index < mSplitMemRangeCount; Index++) {
349 if ((Address >= mSplitMemRange[Index].Base) && (Address < mSplitMemRange[Index].Top)) {
350 return TRUE;
351 }
352 }
353 } else {
354 if (Address < mCpuHotPlugData.SmrrBase) {
355 if ((mCpuHotPlugData.SmrrBase - Address) < BASE_2MB) {
356 return TRUE;
357 }
358 } else if (Address > (mCpuHotPlugData.SmrrBase + mCpuHotPlugData.SmrrSize - BASE_2MB)) {
359 if ((Address - (mCpuHotPlugData.SmrrBase + mCpuHotPlugData.SmrrSize - BASE_2MB)) < BASE_2MB) {
360 return TRUE;
361 }
362 }
363 }
364 //
365 // Return default
366 //
367 return FALSE;
368 }
369
370 /**
371 Initialize the protected memory ranges and the 4KB-page mapped memory ranges.
372
373 **/
374 VOID
375 InitProtectedMemRange (
376 VOID
377 )
378 {
379 UINTN Index;
380 UINTN NumberOfDescriptors;
381 UINTN NumberOfAddedDescriptors;
382 UINTN NumberOfProtectRange;
383 UINTN NumberOfSpliteRange;
384 EFI_GCD_MEMORY_SPACE_DESCRIPTOR *MemorySpaceMap;
385 UINTN TotalSize;
386 EFI_PHYSICAL_ADDRESS ProtectBaseAddress;
387 EFI_PHYSICAL_ADDRESS ProtectEndAddress;
388 EFI_PHYSICAL_ADDRESS Top2MBAlignedAddress;
389 EFI_PHYSICAL_ADDRESS Base2MBAlignedAddress;
390 UINT64 High4KBPageSize;
391 UINT64 Low4KBPageSize;
392
393 NumberOfDescriptors = 0;
394 NumberOfAddedDescriptors = mSmmCpuSmramRangeCount;
395 NumberOfSpliteRange = 0;
396 MemorySpaceMap = NULL;
397
398 //
399 // Get MMIO ranges from GCD and add them into protected memory ranges.
400 //
401 gDS->GetMemorySpaceMap (
402 &NumberOfDescriptors,
403 &MemorySpaceMap
404 );
405 for (Index = 0; Index < NumberOfDescriptors; Index++) {
406 if (MemorySpaceMap[Index].GcdMemoryType == EfiGcdMemoryTypeMemoryMappedIo) {
407 NumberOfAddedDescriptors++;
408 }
409 }
410
411 if (NumberOfAddedDescriptors != 0) {
412 TotalSize = NumberOfAddedDescriptors * sizeof (MEMORY_PROTECTION_RANGE) + sizeof (mProtectionMemRangeTemplate);
413 mProtectionMemRange = (MEMORY_PROTECTION_RANGE *) AllocateZeroPool (TotalSize);
414 ASSERT (mProtectionMemRange != NULL);
415 mProtectionMemRangeCount = TotalSize / sizeof (MEMORY_PROTECTION_RANGE);
416
417 //
418 // Copy existing ranges.
419 //
420 CopyMem (mProtectionMemRange, mProtectionMemRangeTemplate, sizeof (mProtectionMemRangeTemplate));
421
422 //
423 // Create split ranges which come from protected ranges.
424 //
425 TotalSize = (TotalSize / sizeof (MEMORY_PROTECTION_RANGE)) * sizeof (MEMORY_RANGE);
426 mSplitMemRange = (MEMORY_RANGE *) AllocateZeroPool (TotalSize);
427 ASSERT (mSplitMemRange != NULL);
428
429 //
430 // Create SMM ranges which are set to present and execution-enable.
431 //
432 NumberOfProtectRange = sizeof (mProtectionMemRangeTemplate) / sizeof (MEMORY_PROTECTION_RANGE);
433 for (Index = 0; Index < mSmmCpuSmramRangeCount; Index++) {
434 if (mSmmCpuSmramRanges[Index].CpuStart >= mProtectionMemRange[0].Range.Base &&
435 mSmmCpuSmramRanges[Index].CpuStart + mSmmCpuSmramRanges[Index].PhysicalSize < mProtectionMemRange[0].Range.Top) {
436 //
437 // If the address have been already covered by mCpuHotPlugData.SmrrBase/mCpuHotPlugData.SmrrSiz
438 //
439 break;
440 }
441 mProtectionMemRange[NumberOfProtectRange].Range.Base = mSmmCpuSmramRanges[Index].CpuStart;
442 mProtectionMemRange[NumberOfProtectRange].Range.Top = mSmmCpuSmramRanges[Index].CpuStart + mSmmCpuSmramRanges[Index].PhysicalSize;
443 mProtectionMemRange[NumberOfProtectRange].Present = TRUE;
444 mProtectionMemRange[NumberOfProtectRange].Nx = FALSE;
445 NumberOfProtectRange++;
446 }
447
448 //
449 // Create MMIO ranges which are set to present and execution-disable.
450 //
451 for (Index = 0; Index < NumberOfDescriptors; Index++) {
452 if (MemorySpaceMap[Index].GcdMemoryType != EfiGcdMemoryTypeMemoryMappedIo) {
453 continue;
454 }
455 mProtectionMemRange[NumberOfProtectRange].Range.Base = MemorySpaceMap[Index].BaseAddress;
456 mProtectionMemRange[NumberOfProtectRange].Range.Top = MemorySpaceMap[Index].BaseAddress + MemorySpaceMap[Index].Length;
457 mProtectionMemRange[NumberOfProtectRange].Present = TRUE;
458 mProtectionMemRange[NumberOfProtectRange].Nx = TRUE;
459 NumberOfProtectRange++;
460 }
461
462 //
463 // Check and updated actual protected memory ranges count
464 //
465 ASSERT (NumberOfProtectRange <= mProtectionMemRangeCount);
466 mProtectionMemRangeCount = NumberOfProtectRange;
467 }
468
469 //
470 // According to protected ranges, create the ranges which will be mapped by 2KB page.
471 //
472 NumberOfSpliteRange = 0;
473 NumberOfProtectRange = mProtectionMemRangeCount;
474 for (Index = 0; Index < NumberOfProtectRange; Index++) {
475 //
476 // If MMIO base address is not 2MB alignment, make 2MB alignment for create 4KB page in page table.
477 //
478 ProtectBaseAddress = mProtectionMemRange[Index].Range.Base;
479 ProtectEndAddress = mProtectionMemRange[Index].Range.Top;
480 if (((ProtectBaseAddress & (SIZE_2MB - 1)) != 0) || ((ProtectEndAddress & (SIZE_2MB - 1)) != 0)) {
481 //
482 // Check if it is possible to create 4KB-page for not 2MB-aligned range and to create 2MB-page for 2MB-aligned range.
483 // A mix of 4KB and 2MB page could save SMRAM space.
484 //
485 Top2MBAlignedAddress = ProtectEndAddress & ~(SIZE_2MB - 1);
486 Base2MBAlignedAddress = (ProtectBaseAddress + SIZE_2MB - 1) & ~(SIZE_2MB - 1);
487 if ((Top2MBAlignedAddress > Base2MBAlignedAddress) &&
488 ((Top2MBAlignedAddress - Base2MBAlignedAddress) >= SIZE_2MB)) {
489 //
490 // There is an range which could be mapped by 2MB-page.
491 //
492 High4KBPageSize = ((ProtectEndAddress + SIZE_2MB - 1) & ~(SIZE_2MB - 1)) - (ProtectEndAddress & ~(SIZE_2MB - 1));
493 Low4KBPageSize = ((ProtectBaseAddress + SIZE_2MB - 1) & ~(SIZE_2MB - 1)) - (ProtectBaseAddress & ~(SIZE_2MB - 1));
494 if (High4KBPageSize != 0) {
495 //
496 // Add not 2MB-aligned range to be mapped by 4KB-page.
497 //
498 mSplitMemRange[NumberOfSpliteRange].Base = ProtectEndAddress & ~(SIZE_2MB - 1);
499 mSplitMemRange[NumberOfSpliteRange].Top = (ProtectEndAddress + SIZE_2MB - 1) & ~(SIZE_2MB - 1);
500 NumberOfSpliteRange++;
501 }
502 if (Low4KBPageSize != 0) {
503 //
504 // Add not 2MB-aligned range to be mapped by 4KB-page.
505 //
506 mSplitMemRange[NumberOfSpliteRange].Base = ProtectBaseAddress & ~(SIZE_2MB - 1);
507 mSplitMemRange[NumberOfSpliteRange].Top = (ProtectBaseAddress + SIZE_2MB - 1) & ~(SIZE_2MB - 1);
508 NumberOfSpliteRange++;
509 }
510 } else {
511 //
512 // The range could only be mapped by 4KB-page.
513 //
514 mSplitMemRange[NumberOfSpliteRange].Base = ProtectBaseAddress & ~(SIZE_2MB - 1);
515 mSplitMemRange[NumberOfSpliteRange].Top = (ProtectEndAddress + SIZE_2MB - 1) & ~(SIZE_2MB - 1);
516 NumberOfSpliteRange++;
517 }
518 }
519 }
520
521 mSplitMemRangeCount = NumberOfSpliteRange;
522
523 DEBUG ((EFI_D_INFO, "SMM Profile Memory Ranges:\n"));
524 for (Index = 0; Index < mProtectionMemRangeCount; Index++) {
525 DEBUG ((EFI_D_INFO, "mProtectionMemRange[%d].Base = %lx\n", Index, mProtectionMemRange[Index].Range.Base));
526 DEBUG ((EFI_D_INFO, "mProtectionMemRange[%d].Top = %lx\n", Index, mProtectionMemRange[Index].Range.Top));
527 }
528 for (Index = 0; Index < mSplitMemRangeCount; Index++) {
529 DEBUG ((EFI_D_INFO, "mSplitMemRange[%d].Base = %lx\n", Index, mSplitMemRange[Index].Base));
530 DEBUG ((EFI_D_INFO, "mSplitMemRange[%d].Top = %lx\n", Index, mSplitMemRange[Index].Top));
531 }
532 }
533
534 /**
535 Update page table according to protected memory ranges and the 4KB-page mapped memory ranges.
536
537 **/
538 VOID
539 InitPaging (
540 VOID
541 )
542 {
543 UINT64 *Pml4;
544 UINT64 *Pde;
545 UINT64 *Pte;
546 UINT64 *Pt;
547 UINTN Address;
548 UINTN Level1;
549 UINTN Level2;
550 UINTN Level3;
551 UINTN Level4;
552 UINTN NumberOfPdpEntries;
553 UINTN NumberOfPml4Entries;
554 UINTN SizeOfMemorySpace;
555 BOOLEAN Nx;
556
557 if (sizeof (UINTN) == sizeof (UINT64)) {
558 Pml4 = (UINT64*)(UINTN)mSmmProfileCr3;
559 SizeOfMemorySpace = HighBitSet64 (gPhyMask) + 1;
560 //
561 // Calculate the table entries of PML4E and PDPTE.
562 //
563 if (SizeOfMemorySpace <= 39 ) {
564 NumberOfPml4Entries = 1;
565 NumberOfPdpEntries = (UINT32)LShiftU64 (1, (SizeOfMemorySpace - 30));
566 } else {
567 NumberOfPml4Entries = (UINT32)LShiftU64 (1, (SizeOfMemorySpace - 39));
568 NumberOfPdpEntries = 512;
569 }
570 } else {
571 NumberOfPml4Entries = 1;
572 NumberOfPdpEntries = 4;
573 }
574
575 //
576 // Go through page table and change 2MB-page into 4KB-page.
577 //
578 for (Level1 = 0; Level1 < NumberOfPml4Entries; Level1++) {
579 if (sizeof (UINTN) == sizeof (UINT64)) {
580 if ((Pml4[Level1] & IA32_PG_P) == 0) {
581 //
582 // If Pml4 entry does not exist, skip it
583 //
584 continue;
585 }
586 Pde = (UINT64 *)(UINTN)(Pml4[Level1] & ~mAddressEncMask & PHYSICAL_ADDRESS_MASK);
587 } else {
588 Pde = (UINT64*)(UINTN)mSmmProfileCr3;
589 }
590 for (Level2 = 0; Level2 < NumberOfPdpEntries; Level2++, Pde++) {
591 if ((*Pde & IA32_PG_P) == 0) {
592 //
593 // If PDE entry does not exist, skip it
594 //
595 continue;
596 }
597 if ((*Pde & IA32_PG_PS) != 0) {
598 //
599 // This is 1G entry, skip it
600 //
601 continue;
602 }
603 Pte = (UINT64 *)(UINTN)(*Pde & ~mAddressEncMask & PHYSICAL_ADDRESS_MASK);
604 if (Pte == 0) {
605 continue;
606 }
607 for (Level3 = 0; Level3 < SIZE_4KB / sizeof (*Pte); Level3++, Pte++) {
608 if ((*Pte & IA32_PG_P) == 0) {
609 //
610 // If PTE entry does not exist, skip it
611 //
612 continue;
613 }
614 Address = (((Level2 << 9) + Level3) << 21);
615
616 //
617 // If it is 2M page, check IsAddressSplit()
618 //
619 if (((*Pte & IA32_PG_PS) != 0) && IsAddressSplit (Address)) {
620 //
621 // Based on current page table, create 4KB page table for split area.
622 //
623 ASSERT (Address == (*Pte & PHYSICAL_ADDRESS_MASK));
624
625 Pt = AllocatePageTableMemory (1);
626 ASSERT (Pt != NULL);
627
628 // Split it
629 for (Level4 = 0; Level4 < SIZE_4KB / sizeof(*Pt); Level4++) {
630 Pt[Level4] = Address + ((Level4 << 12) | mAddressEncMask | PAGE_ATTRIBUTE_BITS);
631 } // end for PT
632 *Pte = (UINT64)(UINTN)Pt | mAddressEncMask | PAGE_ATTRIBUTE_BITS;
633 } // end if IsAddressSplit
634 } // end for PTE
635 } // end for PDE
636 }
637
638 //
639 // Go through page table and set several page table entries to absent or execute-disable.
640 //
641 DEBUG ((EFI_D_INFO, "Patch page table start ...\n"));
642 for (Level1 = 0; Level1 < NumberOfPml4Entries; Level1++) {
643 if (sizeof (UINTN) == sizeof (UINT64)) {
644 if ((Pml4[Level1] & IA32_PG_P) == 0) {
645 //
646 // If Pml4 entry does not exist, skip it
647 //
648 continue;
649 }
650 Pde = (UINT64 *)(UINTN)(Pml4[Level1] & ~mAddressEncMask & PHYSICAL_ADDRESS_MASK);
651 } else {
652 Pde = (UINT64*)(UINTN)mSmmProfileCr3;
653 }
654 for (Level2 = 0; Level2 < NumberOfPdpEntries; Level2++, Pde++) {
655 if ((*Pde & IA32_PG_P) == 0) {
656 //
657 // If PDE entry does not exist, skip it
658 //
659 continue;
660 }
661 if ((*Pde & IA32_PG_PS) != 0) {
662 //
663 // This is 1G entry, set NX bit and skip it
664 //
665 if (mXdSupported) {
666 *Pde = *Pde | IA32_PG_NX;
667 }
668 continue;
669 }
670 Pte = (UINT64 *)(UINTN)(*Pde & ~mAddressEncMask & PHYSICAL_ADDRESS_MASK);
671 if (Pte == 0) {
672 continue;
673 }
674 for (Level3 = 0; Level3 < SIZE_4KB / sizeof (*Pte); Level3++, Pte++) {
675 if ((*Pte & IA32_PG_P) == 0) {
676 //
677 // If PTE entry does not exist, skip it
678 //
679 continue;
680 }
681 Address = (((Level2 << 9) + Level3) << 21);
682
683 if ((*Pte & IA32_PG_PS) != 0) {
684 // 2MB page
685
686 if (!IsAddressValid (Address, &Nx)) {
687 //
688 // Patch to remove Present flag and RW flag
689 //
690 *Pte = *Pte & (INTN)(INT32)(~PAGE_ATTRIBUTE_BITS);
691 }
692 if (Nx && mXdSupported) {
693 *Pte = *Pte | IA32_PG_NX;
694 }
695 } else {
696 // 4KB page
697 Pt = (UINT64 *)(UINTN)(*Pte & ~mAddressEncMask & PHYSICAL_ADDRESS_MASK);
698 if (Pt == 0) {
699 continue;
700 }
701 for (Level4 = 0; Level4 < SIZE_4KB / sizeof(*Pt); Level4++, Pt++) {
702 if (!IsAddressValid (Address, &Nx)) {
703 *Pt = *Pt & (INTN)(INT32)(~PAGE_ATTRIBUTE_BITS);
704 }
705 if (Nx && mXdSupported) {
706 *Pt = *Pt | IA32_PG_NX;
707 }
708 Address += SIZE_4KB;
709 } // end for PT
710 } // end if PS
711 } // end for PTE
712 } // end for PDE
713 }
714
715 //
716 // Flush TLB
717 //
718 CpuFlushTlb ();
719 DEBUG ((EFI_D_INFO, "Patch page table done!\n"));
720 //
721 // Set execute-disable flag
722 //
723 mXdEnabled = TRUE;
724
725 return ;
726 }
727
728 /**
729 To get system port address of the SMI Command Port in FADT table.
730
731 **/
732 VOID
733 GetSmiCommandPort (
734 VOID
735 )
736 {
737 EFI_ACPI_2_0_FIXED_ACPI_DESCRIPTION_TABLE *Fadt;
738
739 Fadt = (EFI_ACPI_2_0_FIXED_ACPI_DESCRIPTION_TABLE *) EfiLocateFirstAcpiTable (
740 EFI_ACPI_2_0_FIXED_ACPI_DESCRIPTION_TABLE_SIGNATURE
741 );
742 ASSERT (Fadt != NULL);
743
744 mSmiCommandPort = Fadt->SmiCmd;
745 DEBUG ((EFI_D_INFO, "mSmiCommandPort = %x\n", mSmiCommandPort));
746 }
747
748 /**
749 Updates page table to make some memory ranges (like system memory) absent
750 and make some memory ranges (like MMIO) present and execute disable. It also
751 update 2MB-page to 4KB-page for some memory ranges.
752
753 **/
754 VOID
755 SmmProfileStart (
756 VOID
757 )
758 {
759 //
760 // The flag indicates SMM profile starts to work.
761 //
762 mSmmProfileStart = TRUE;
763 }
764
765 /**
766 Initialize SMM profile in SmmReadyToLock protocol callback function.
767
768 @param Protocol Points to the protocol's unique identifier.
769 @param Interface Points to the interface instance.
770 @param Handle The handle on which the interface was installed.
771
772 @retval EFI_SUCCESS SmmReadyToLock protocol callback runs successfully.
773 **/
774 EFI_STATUS
775 EFIAPI
776 InitSmmProfileCallBack (
777 IN CONST EFI_GUID *Protocol,
778 IN VOID *Interface,
779 IN EFI_HANDLE Handle
780 )
781 {
782 //
783 // Save to variable so that SMM profile data can be found.
784 //
785 gRT->SetVariable (
786 SMM_PROFILE_NAME,
787 &gEfiCallerIdGuid,
788 EFI_VARIABLE_BOOTSERVICE_ACCESS | EFI_VARIABLE_RUNTIME_ACCESS,
789 sizeof(mSmmProfileBase),
790 &mSmmProfileBase
791 );
792
793 //
794 // Get Software SMI from FADT
795 //
796 GetSmiCommandPort ();
797
798 //
799 // Initialize protected memory range for patching page table later.
800 //
801 InitProtectedMemRange ();
802
803 return EFI_SUCCESS;
804 }
805
806 /**
807 Initialize SMM profile data structures.
808
809 **/
810 VOID
811 InitSmmProfileInternal (
812 VOID
813 )
814 {
815 EFI_STATUS Status;
816 EFI_PHYSICAL_ADDRESS Base;
817 VOID *Registration;
818 UINTN Index;
819 UINTN MsrDsAreaSizePerCpu;
820 UINTN TotalSize;
821
822 mPFEntryCount = (UINTN *)AllocateZeroPool (sizeof (UINTN) * mMaxNumberOfCpus);
823 ASSERT (mPFEntryCount != NULL);
824 mLastPFEntryValue = (UINT64 (*)[MAX_PF_ENTRY_COUNT])AllocateZeroPool (
825 sizeof (mLastPFEntryValue[0]) * mMaxNumberOfCpus);
826 ASSERT (mLastPFEntryValue != NULL);
827 mLastPFEntryPointer = (UINT64 *(*)[MAX_PF_ENTRY_COUNT])AllocateZeroPool (
828 sizeof (mLastPFEntryPointer[0]) * mMaxNumberOfCpus);
829 ASSERT (mLastPFEntryPointer != NULL);
830
831 //
832 // Allocate memory for SmmProfile below 4GB.
833 // The base address
834 //
835 mSmmProfileSize = PcdGet32 (PcdCpuSmmProfileSize);
836 ASSERT ((mSmmProfileSize & 0xFFF) == 0);
837
838 if (mBtsSupported) {
839 TotalSize = mSmmProfileSize + mMsrDsAreaSize;
840 } else {
841 TotalSize = mSmmProfileSize;
842 }
843
844 Base = 0xFFFFFFFF;
845 Status = gBS->AllocatePages (
846 AllocateMaxAddress,
847 EfiReservedMemoryType,
848 EFI_SIZE_TO_PAGES (TotalSize),
849 &Base
850 );
851 ASSERT_EFI_ERROR (Status);
852 ZeroMem ((VOID *)(UINTN)Base, TotalSize);
853 mSmmProfileBase = (SMM_PROFILE_HEADER *)(UINTN)Base;
854
855 //
856 // Initialize SMM profile data header.
857 //
858 mSmmProfileBase->HeaderSize = sizeof (SMM_PROFILE_HEADER);
859 mSmmProfileBase->MaxDataEntries = (UINT64)((mSmmProfileSize - sizeof(SMM_PROFILE_HEADER)) / sizeof (SMM_PROFILE_ENTRY));
860 mSmmProfileBase->MaxDataSize = MultU64x64 (mSmmProfileBase->MaxDataEntries, sizeof(SMM_PROFILE_ENTRY));
861 mSmmProfileBase->CurDataEntries = 0;
862 mSmmProfileBase->CurDataSize = 0;
863 mSmmProfileBase->TsegStart = mCpuHotPlugData.SmrrBase;
864 mSmmProfileBase->TsegSize = mCpuHotPlugData.SmrrSize;
865 mSmmProfileBase->NumSmis = 0;
866 mSmmProfileBase->NumCpus = gSmmCpuPrivate->SmmCoreEntryContext.NumberOfCpus;
867
868 if (mBtsSupported) {
869 mMsrDsArea = (MSR_DS_AREA_STRUCT **)AllocateZeroPool (sizeof (MSR_DS_AREA_STRUCT *) * mMaxNumberOfCpus);
870 ASSERT (mMsrDsArea != NULL);
871 mMsrBTSRecord = (BRANCH_TRACE_RECORD **)AllocateZeroPool (sizeof (BRANCH_TRACE_RECORD *) * mMaxNumberOfCpus);
872 ASSERT (mMsrBTSRecord != NULL);
873 mMsrPEBSRecord = (PEBS_RECORD **)AllocateZeroPool (sizeof (PEBS_RECORD *) * mMaxNumberOfCpus);
874 ASSERT (mMsrPEBSRecord != NULL);
875
876 mMsrDsAreaBase = (MSR_DS_AREA_STRUCT *)((UINTN)Base + mSmmProfileSize);
877 MsrDsAreaSizePerCpu = mMsrDsAreaSize / mMaxNumberOfCpus;
878 mBTSRecordNumber = (MsrDsAreaSizePerCpu - sizeof(PEBS_RECORD) * PEBS_RECORD_NUMBER - sizeof(MSR_DS_AREA_STRUCT)) / sizeof(BRANCH_TRACE_RECORD);
879 for (Index = 0; Index < mMaxNumberOfCpus; Index++) {
880 mMsrDsArea[Index] = (MSR_DS_AREA_STRUCT *)((UINTN)mMsrDsAreaBase + MsrDsAreaSizePerCpu * Index);
881 mMsrBTSRecord[Index] = (BRANCH_TRACE_RECORD *)((UINTN)mMsrDsArea[Index] + sizeof(MSR_DS_AREA_STRUCT));
882 mMsrPEBSRecord[Index] = (PEBS_RECORD *)((UINTN)mMsrDsArea[Index] + MsrDsAreaSizePerCpu - sizeof(PEBS_RECORD) * PEBS_RECORD_NUMBER);
883
884 mMsrDsArea[Index]->BTSBufferBase = (UINTN)mMsrBTSRecord[Index];
885 mMsrDsArea[Index]->BTSIndex = mMsrDsArea[Index]->BTSBufferBase;
886 mMsrDsArea[Index]->BTSAbsoluteMaximum = mMsrDsArea[Index]->BTSBufferBase + mBTSRecordNumber * sizeof(BRANCH_TRACE_RECORD) + 1;
887 mMsrDsArea[Index]->BTSInterruptThreshold = mMsrDsArea[Index]->BTSAbsoluteMaximum + 1;
888
889 mMsrDsArea[Index]->PEBSBufferBase = (UINTN)mMsrPEBSRecord[Index];
890 mMsrDsArea[Index]->PEBSIndex = mMsrDsArea[Index]->PEBSBufferBase;
891 mMsrDsArea[Index]->PEBSAbsoluteMaximum = mMsrDsArea[Index]->PEBSBufferBase + PEBS_RECORD_NUMBER * sizeof(PEBS_RECORD) + 1;
892 mMsrDsArea[Index]->PEBSInterruptThreshold = mMsrDsArea[Index]->PEBSAbsoluteMaximum + 1;
893 }
894 }
895
896 mProtectionMemRange = mProtectionMemRangeTemplate;
897 mProtectionMemRangeCount = sizeof (mProtectionMemRangeTemplate) / sizeof (MEMORY_PROTECTION_RANGE);
898
899 //
900 // Update TSeg entry.
901 //
902 mProtectionMemRange[0].Range.Base = mCpuHotPlugData.SmrrBase;
903 mProtectionMemRange[0].Range.Top = mCpuHotPlugData.SmrrBase + mCpuHotPlugData.SmrrSize;
904
905 //
906 // Update SMM profile entry.
907 //
908 mProtectionMemRange[1].Range.Base = (EFI_PHYSICAL_ADDRESS)(UINTN)mSmmProfileBase;
909 mProtectionMemRange[1].Range.Top = (EFI_PHYSICAL_ADDRESS)(UINTN)mSmmProfileBase + TotalSize;
910
911 //
912 // Allocate memory reserved for creating 4KB pages.
913 //
914 InitPagesForPFHandler ();
915
916 //
917 // Start SMM profile when SmmReadyToLock protocol is installed.
918 //
919 Status = gSmst->SmmRegisterProtocolNotify (
920 &gEfiSmmReadyToLockProtocolGuid,
921 InitSmmProfileCallBack,
922 &Registration
923 );
924 ASSERT_EFI_ERROR (Status);
925
926 return ;
927 }
928
929 /**
930 Check if feature is supported by a processor.
931
932 **/
933 VOID
934 CheckFeatureSupported (
935 VOID
936 )
937 {
938 UINT32 RegEax;
939 UINT32 RegEcx;
940 UINT32 RegEdx;
941 MSR_IA32_MISC_ENABLE_REGISTER MiscEnableMsr;
942
943 if ((PcdGet32 (PcdControlFlowEnforcementPropertyMask) != 0) && mCetSupported) {
944 AsmCpuid (CPUID_EXTENDED_FUNCTION, &RegEax, NULL, NULL, NULL);
945 if (RegEax <= CPUID_EXTENDED_FUNCTION) {
946 mCetSupported = FALSE;
947 PatchInstructionX86 (mPatchCetSupported, mCetSupported, 1);
948 }
949 AsmCpuidEx (CPUID_STRUCTURED_EXTENDED_FEATURE_FLAGS, CPUID_STRUCTURED_EXTENDED_FEATURE_FLAGS_SUB_LEAF_INFO, NULL, NULL, &RegEcx, NULL);
950 if ((RegEcx & CPUID_CET_SS) == 0) {
951 mCetSupported = FALSE;
952 PatchInstructionX86 (mPatchCetSupported, mCetSupported, 1);
953 }
954 }
955
956 if (mXdSupported) {
957 AsmCpuid (CPUID_EXTENDED_FUNCTION, &RegEax, NULL, NULL, NULL);
958 if (RegEax <= CPUID_EXTENDED_FUNCTION) {
959 //
960 // Extended CPUID functions are not supported on this processor.
961 //
962 mXdSupported = FALSE;
963 PatchInstructionX86 (gPatchXdSupported, mXdSupported, 1);
964 }
965
966 AsmCpuid (CPUID_EXTENDED_CPU_SIG, NULL, NULL, NULL, &RegEdx);
967 if ((RegEdx & CPUID1_EDX_XD_SUPPORT) == 0) {
968 //
969 // Execute Disable Bit feature is not supported on this processor.
970 //
971 mXdSupported = FALSE;
972 PatchInstructionX86 (gPatchXdSupported, mXdSupported, 1);
973 }
974 }
975
976 if (mBtsSupported) {
977 AsmCpuid (CPUID_VERSION_INFO, NULL, NULL, NULL, &RegEdx);
978 if ((RegEdx & CPUID1_EDX_BTS_AVAILABLE) != 0) {
979 //
980 // Per IA32 manuals:
981 // When CPUID.1:EDX[21] is set, the following BTS facilities are available:
982 // 1. The BTS_UNAVAILABLE flag in the IA32_MISC_ENABLE MSR indicates the
983 // availability of the BTS facilities, including the ability to set the BTS and
984 // BTINT bits in the MSR_DEBUGCTLA MSR.
985 // 2. The IA32_DS_AREA MSR can be programmed to point to the DS save area.
986 //
987 MiscEnableMsr.Uint64 = AsmReadMsr64 (MSR_IA32_MISC_ENABLE);
988 if (MiscEnableMsr.Bits.BTS == 1) {
989 //
990 // BTS facilities is not supported if MSR_IA32_MISC_ENABLE.BTS bit is set.
991 //
992 mBtsSupported = FALSE;
993 }
994 }
995 }
996 }
997
998 /**
999 Enable single step.
1000
1001 **/
1002 VOID
1003 ActivateSingleStepDB (
1004 VOID
1005 )
1006 {
1007 UINTN Dr6;
1008
1009 Dr6 = AsmReadDr6 ();
1010 if ((Dr6 & DR6_SINGLE_STEP) != 0) {
1011 return;
1012 }
1013 Dr6 |= DR6_SINGLE_STEP;
1014 AsmWriteDr6 (Dr6);
1015 }
1016
1017 /**
1018 Enable last branch.
1019
1020 **/
1021 VOID
1022 ActivateLBR (
1023 VOID
1024 )
1025 {
1026 UINT64 DebugCtl;
1027
1028 DebugCtl = AsmReadMsr64 (MSR_DEBUG_CTL);
1029 if ((DebugCtl & MSR_DEBUG_CTL_LBR) != 0) {
1030 return ;
1031 }
1032 DebugCtl |= MSR_DEBUG_CTL_LBR;
1033 AsmWriteMsr64 (MSR_DEBUG_CTL, DebugCtl);
1034 }
1035
1036 /**
1037 Enable branch trace store.
1038
1039 @param CpuIndex The index of the processor.
1040
1041 **/
1042 VOID
1043 ActivateBTS (
1044 IN UINTN CpuIndex
1045 )
1046 {
1047 UINT64 DebugCtl;
1048
1049 DebugCtl = AsmReadMsr64 (MSR_DEBUG_CTL);
1050 if ((DebugCtl & MSR_DEBUG_CTL_BTS) != 0) {
1051 return ;
1052 }
1053
1054 AsmWriteMsr64 (MSR_DS_AREA, (UINT64)(UINTN)mMsrDsArea[CpuIndex]);
1055 DebugCtl |= (UINT64)(MSR_DEBUG_CTL_BTS | MSR_DEBUG_CTL_TR);
1056 DebugCtl &= ~((UINT64)MSR_DEBUG_CTL_BTINT);
1057 AsmWriteMsr64 (MSR_DEBUG_CTL, DebugCtl);
1058 }
1059
1060 /**
1061 Increase SMI number in each SMI entry.
1062
1063 **/
1064 VOID
1065 SmmProfileRecordSmiNum (
1066 VOID
1067 )
1068 {
1069 if (mSmmProfileStart) {
1070 mSmmProfileBase->NumSmis++;
1071 }
1072 }
1073
1074 /**
1075 Initialize processor environment for SMM profile.
1076
1077 @param CpuIndex The index of the processor.
1078
1079 **/
1080 VOID
1081 ActivateSmmProfile (
1082 IN UINTN CpuIndex
1083 )
1084 {
1085 //
1086 // Enable Single Step DB#
1087 //
1088 ActivateSingleStepDB ();
1089
1090 if (mBtsSupported) {
1091 //
1092 // We can not get useful information from LER, so we have to use BTS.
1093 //
1094 ActivateLBR ();
1095
1096 //
1097 // Enable BTS
1098 //
1099 ActivateBTS (CpuIndex);
1100 }
1101 }
1102
1103 /**
1104 Initialize SMM profile in SMM CPU entry point.
1105
1106 @param[in] Cr3 The base address of the page tables to use in SMM.
1107
1108 **/
1109 VOID
1110 InitSmmProfile (
1111 UINT32 Cr3
1112 )
1113 {
1114 //
1115 // Save Cr3
1116 //
1117 mSmmProfileCr3 = Cr3;
1118
1119 //
1120 // Skip SMM profile initialization if feature is disabled
1121 //
1122 if (!FeaturePcdGet (PcdCpuSmmProfileEnable) &&
1123 !HEAP_GUARD_NONSTOP_MODE &&
1124 !NULL_DETECTION_NONSTOP_MODE) {
1125 return;
1126 }
1127
1128 //
1129 // Initialize SmmProfile here
1130 //
1131 InitSmmProfileInternal ();
1132
1133 //
1134 // Initialize profile IDT.
1135 //
1136 InitIdtr ();
1137
1138 //
1139 // Tell #PF handler to prepare a #DB subsequently.
1140 //
1141 mSetupDebugTrap = TRUE;
1142 }
1143
1144 /**
1145 Update page table to map the memory correctly in order to make the instruction
1146 which caused page fault execute successfully. And it also save the original page
1147 table to be restored in single-step exception.
1148
1149 @param PageTable PageTable Address.
1150 @param PFAddress The memory address which caused page fault exception.
1151 @param CpuIndex The index of the processor.
1152 @param ErrorCode The Error code of exception.
1153
1154 **/
1155 VOID
1156 RestorePageTableBelow4G (
1157 UINT64 *PageTable,
1158 UINT64 PFAddress,
1159 UINTN CpuIndex,
1160 UINTN ErrorCode
1161 )
1162 {
1163 UINTN PTIndex;
1164 UINTN PFIndex;
1165
1166 //
1167 // PML4
1168 //
1169 if (sizeof(UINT64) == sizeof(UINTN)) {
1170 PTIndex = (UINTN)BitFieldRead64 (PFAddress, 39, 47);
1171 ASSERT (PageTable[PTIndex] != 0);
1172 PageTable = (UINT64*)(UINTN)(PageTable[PTIndex] & PHYSICAL_ADDRESS_MASK);
1173 }
1174
1175 //
1176 // PDPTE
1177 //
1178 PTIndex = (UINTN)BitFieldRead64 (PFAddress, 30, 38);
1179 ASSERT (PageTable[PTIndex] != 0);
1180 PageTable = (UINT64*)(UINTN)(PageTable[PTIndex] & PHYSICAL_ADDRESS_MASK);
1181
1182 //
1183 // PD
1184 //
1185 PTIndex = (UINTN)BitFieldRead64 (PFAddress, 21, 29);
1186 if ((PageTable[PTIndex] & IA32_PG_PS) != 0) {
1187 //
1188 // Large page
1189 //
1190
1191 //
1192 // Record old entries with non-present status
1193 // Old entries include the memory which instruction is at and the memory which instruction access.
1194 //
1195 //
1196 ASSERT (mPFEntryCount[CpuIndex] < MAX_PF_ENTRY_COUNT);
1197 if (mPFEntryCount[CpuIndex] < MAX_PF_ENTRY_COUNT) {
1198 PFIndex = mPFEntryCount[CpuIndex];
1199 mLastPFEntryValue[CpuIndex][PFIndex] = PageTable[PTIndex];
1200 mLastPFEntryPointer[CpuIndex][PFIndex] = &PageTable[PTIndex];
1201 mPFEntryCount[CpuIndex]++;
1202 }
1203
1204 //
1205 // Set new entry
1206 //
1207 PageTable[PTIndex] = (PFAddress & ~((1ull << 21) - 1));
1208 PageTable[PTIndex] |= (UINT64)IA32_PG_PS;
1209 PageTable[PTIndex] |= (UINT64)PAGE_ATTRIBUTE_BITS;
1210 if ((ErrorCode & IA32_PF_EC_ID) != 0) {
1211 PageTable[PTIndex] &= ~IA32_PG_NX;
1212 }
1213 } else {
1214 //
1215 // Small page
1216 //
1217 ASSERT (PageTable[PTIndex] != 0);
1218 PageTable = (UINT64*)(UINTN)(PageTable[PTIndex] & PHYSICAL_ADDRESS_MASK);
1219
1220 //
1221 // 4K PTE
1222 //
1223 PTIndex = (UINTN)BitFieldRead64 (PFAddress, 12, 20);
1224
1225 //
1226 // Record old entries with non-present status
1227 // Old entries include the memory which instruction is at and the memory which instruction access.
1228 //
1229 //
1230 ASSERT (mPFEntryCount[CpuIndex] < MAX_PF_ENTRY_COUNT);
1231 if (mPFEntryCount[CpuIndex] < MAX_PF_ENTRY_COUNT) {
1232 PFIndex = mPFEntryCount[CpuIndex];
1233 mLastPFEntryValue[CpuIndex][PFIndex] = PageTable[PTIndex];
1234 mLastPFEntryPointer[CpuIndex][PFIndex] = &PageTable[PTIndex];
1235 mPFEntryCount[CpuIndex]++;
1236 }
1237
1238 //
1239 // Set new entry
1240 //
1241 PageTable[PTIndex] = (PFAddress & ~((1ull << 12) - 1));
1242 PageTable[PTIndex] |= (UINT64)PAGE_ATTRIBUTE_BITS;
1243 if ((ErrorCode & IA32_PF_EC_ID) != 0) {
1244 PageTable[PTIndex] &= ~IA32_PG_NX;
1245 }
1246 }
1247 }
1248
1249 /**
1250 Handler for Page Fault triggered by Guard page.
1251
1252 @param ErrorCode The Error code of exception.
1253
1254 **/
1255 VOID
1256 GuardPagePFHandler (
1257 UINTN ErrorCode
1258 )
1259 {
1260 UINT64 *PageTable;
1261 UINT64 PFAddress;
1262 UINT64 RestoreAddress;
1263 UINTN RestorePageNumber;
1264 UINTN CpuIndex;
1265
1266 PageTable = (UINT64 *)AsmReadCr3 ();
1267 PFAddress = AsmReadCr2 ();
1268 CpuIndex = GetCpuIndex ();
1269
1270 //
1271 // Memory operation cross pages, like "rep mov" instruction, will cause
1272 // infinite loop between this and Debug Trap handler. We have to make sure
1273 // that current page and the page followed are both in PRESENT state.
1274 //
1275 RestorePageNumber = 2;
1276 RestoreAddress = PFAddress;
1277 while (RestorePageNumber > 0) {
1278 RestorePageTableBelow4G (PageTable, RestoreAddress, CpuIndex, ErrorCode);
1279 RestoreAddress += EFI_PAGE_SIZE;
1280 RestorePageNumber--;
1281 }
1282
1283 //
1284 // Flush TLB
1285 //
1286 CpuFlushTlb ();
1287 }
1288
1289 /**
1290 The Page fault handler to save SMM profile data.
1291
1292 @param Rip The RIP when exception happens.
1293 @param ErrorCode The Error code of exception.
1294
1295 **/
1296 VOID
1297 SmmProfilePFHandler (
1298 UINTN Rip,
1299 UINTN ErrorCode
1300 )
1301 {
1302 UINT64 *PageTable;
1303 UINT64 PFAddress;
1304 UINT64 RestoreAddress;
1305 UINTN RestorePageNumber;
1306 UINTN CpuIndex;
1307 UINTN Index;
1308 UINT64 InstructionAddress;
1309 UINTN MaxEntryNumber;
1310 UINTN CurrentEntryNumber;
1311 BOOLEAN IsValidPFAddress;
1312 SMM_PROFILE_ENTRY *SmmProfileEntry;
1313 UINT64 SmiCommand;
1314 EFI_STATUS Status;
1315 UINT8 SoftSmiValue;
1316 EFI_SMM_SAVE_STATE_IO_INFO IoInfo;
1317
1318 if (!mSmmProfileStart) {
1319 //
1320 // If SMM profile does not start, call original page fault handler.
1321 //
1322 SmiDefaultPFHandler ();
1323 return;
1324 }
1325
1326 if (mBtsSupported) {
1327 DisableBTS ();
1328 }
1329
1330 IsValidPFAddress = FALSE;
1331 PageTable = (UINT64 *)AsmReadCr3 ();
1332 PFAddress = AsmReadCr2 ();
1333 CpuIndex = GetCpuIndex ();
1334
1335 //
1336 // Memory operation cross pages, like "rep mov" instruction, will cause
1337 // infinite loop between this and Debug Trap handler. We have to make sure
1338 // that current page and the page followed are both in PRESENT state.
1339 //
1340 RestorePageNumber = 2;
1341 RestoreAddress = PFAddress;
1342 while (RestorePageNumber > 0) {
1343 if (RestoreAddress <= 0xFFFFFFFF) {
1344 RestorePageTableBelow4G (PageTable, RestoreAddress, CpuIndex, ErrorCode);
1345 } else {
1346 RestorePageTableAbove4G (PageTable, RestoreAddress, CpuIndex, ErrorCode, &IsValidPFAddress);
1347 }
1348 RestoreAddress += EFI_PAGE_SIZE;
1349 RestorePageNumber--;
1350 }
1351
1352 if (!IsValidPFAddress) {
1353 InstructionAddress = Rip;
1354 if ((ErrorCode & IA32_PF_EC_ID) != 0 && (mBtsSupported)) {
1355 //
1356 // If it is instruction fetch failure, get the correct IP from BTS.
1357 //
1358 InstructionAddress = GetSourceFromDestinationOnBts (CpuIndex, Rip);
1359 if (InstructionAddress == 0) {
1360 //
1361 // It indicates the instruction which caused page fault is not a jump instruction,
1362 // set instruction address same as the page fault address.
1363 //
1364 InstructionAddress = PFAddress;
1365 }
1366 }
1367
1368 //
1369 // Indicate it is not software SMI
1370 //
1371 SmiCommand = 0xFFFFFFFFFFFFFFFFULL;
1372 for (Index = 0; Index < gSmst->NumberOfCpus; Index++) {
1373 Status = SmmReadSaveState(&mSmmCpu, sizeof(IoInfo), EFI_SMM_SAVE_STATE_REGISTER_IO, Index, &IoInfo);
1374 if (EFI_ERROR (Status)) {
1375 continue;
1376 }
1377 if (IoInfo.IoPort == mSmiCommandPort) {
1378 //
1379 // A software SMI triggered by SMI command port has been found, get SmiCommand from SMI command port.
1380 //
1381 SoftSmiValue = IoRead8 (mSmiCommandPort);
1382 SmiCommand = (UINT64)SoftSmiValue;
1383 break;
1384 }
1385 }
1386
1387 SmmProfileEntry = (SMM_PROFILE_ENTRY *)(UINTN)(mSmmProfileBase + 1);
1388 //
1389 // Check if there is already a same entry in profile data.
1390 //
1391 for (Index = 0; Index < (UINTN) mSmmProfileBase->CurDataEntries; Index++) {
1392 if ((SmmProfileEntry[Index].ErrorCode == (UINT64)ErrorCode) &&
1393 (SmmProfileEntry[Index].Address == PFAddress) &&
1394 (SmmProfileEntry[Index].CpuNum == (UINT64)CpuIndex) &&
1395 (SmmProfileEntry[Index].Instruction == InstructionAddress) &&
1396 (SmmProfileEntry[Index].SmiCmd == SmiCommand)) {
1397 //
1398 // Same record exist, need not save again.
1399 //
1400 break;
1401 }
1402 }
1403 if (Index == mSmmProfileBase->CurDataEntries) {
1404 CurrentEntryNumber = (UINTN) mSmmProfileBase->CurDataEntries;
1405 MaxEntryNumber = (UINTN) mSmmProfileBase->MaxDataEntries;
1406 if (FeaturePcdGet (PcdCpuSmmProfileRingBuffer)) {
1407 CurrentEntryNumber = CurrentEntryNumber % MaxEntryNumber;
1408 }
1409 if (CurrentEntryNumber < MaxEntryNumber) {
1410 //
1411 // Log the new entry
1412 //
1413 SmmProfileEntry[CurrentEntryNumber].SmiNum = mSmmProfileBase->NumSmis;
1414 SmmProfileEntry[CurrentEntryNumber].ErrorCode = (UINT64)ErrorCode;
1415 SmmProfileEntry[CurrentEntryNumber].ApicId = (UINT64)GetApicId ();
1416 SmmProfileEntry[CurrentEntryNumber].CpuNum = (UINT64)CpuIndex;
1417 SmmProfileEntry[CurrentEntryNumber].Address = PFAddress;
1418 SmmProfileEntry[CurrentEntryNumber].Instruction = InstructionAddress;
1419 SmmProfileEntry[CurrentEntryNumber].SmiCmd = SmiCommand;
1420 //
1421 // Update current entry index and data size in the header.
1422 //
1423 mSmmProfileBase->CurDataEntries++;
1424 mSmmProfileBase->CurDataSize = MultU64x64 (mSmmProfileBase->CurDataEntries, sizeof (SMM_PROFILE_ENTRY));
1425 }
1426 }
1427 }
1428 //
1429 // Flush TLB
1430 //
1431 CpuFlushTlb ();
1432
1433 if (mBtsSupported) {
1434 EnableBTS ();
1435 }
1436 }
1437
1438 /**
1439 Replace INT1 exception handler to restore page table to absent/execute-disable state
1440 in order to trigger page fault again to save SMM profile data..
1441
1442 **/
1443 VOID
1444 InitIdtr (
1445 VOID
1446 )
1447 {
1448 EFI_STATUS Status;
1449
1450 Status = SmmRegisterExceptionHandler (&mSmmCpuService, EXCEPT_IA32_DEBUG, DebugExceptionHandler);
1451 ASSERT_EFI_ERROR (Status);
1452 }