]> git.proxmox.com Git - mirror_edk2.git/blob - UefiCpuPkg/PiSmmCpuDxeSmm/SmmProfile.c
UefiCpuPkg: Replace BSD License with BSD+Patent License
[mirror_edk2.git] / UefiCpuPkg / PiSmmCpuDxeSmm / SmmProfile.c
1 /** @file
2 Enable SMM profile.
3
4 Copyright (c) 2012 - 2019, Intel Corporation. All rights reserved.<BR>
5 Copyright (c) 2017, AMD Incorporated. All rights reserved.<BR>
6
7 SPDX-License-Identifier: BSD-2-Clause-Patent
8
9 **/
10
11 #include "PiSmmCpuDxeSmm.h"
12 #include "SmmProfileInternal.h"
13
14 UINT32 mSmmProfileCr3;
15
16 SMM_PROFILE_HEADER *mSmmProfileBase;
17 MSR_DS_AREA_STRUCT *mMsrDsAreaBase;
18 //
19 // The buffer to store SMM profile data.
20 //
21 UINTN mSmmProfileSize;
22
23 //
24 // The buffer to enable branch trace store.
25 //
26 UINTN mMsrDsAreaSize = SMM_PROFILE_DTS_SIZE;
27
28 //
29 // The flag indicates if execute-disable is supported by processor.
30 //
31 BOOLEAN mXdSupported = TRUE;
32
33 //
34 // The flag indicates if execute-disable is enabled on processor.
35 //
36 BOOLEAN mXdEnabled = FALSE;
37
38 //
39 // The flag indicates if BTS is supported by processor.
40 //
41 BOOLEAN mBtsSupported = TRUE;
42
43 //
44 // The flag indicates if SMM profile starts to record data.
45 //
46 BOOLEAN mSmmProfileStart = FALSE;
47
48 //
49 // The flag indicates if #DB will be setup in #PF handler.
50 //
51 BOOLEAN mSetupDebugTrap = FALSE;
52
53 //
54 // Record the page fault exception count for one instruction execution.
55 //
56 UINTN *mPFEntryCount;
57
58 UINT64 (*mLastPFEntryValue)[MAX_PF_ENTRY_COUNT];
59 UINT64 *(*mLastPFEntryPointer)[MAX_PF_ENTRY_COUNT];
60
61 MSR_DS_AREA_STRUCT **mMsrDsArea;
62 BRANCH_TRACE_RECORD **mMsrBTSRecord;
63 UINTN mBTSRecordNumber;
64 PEBS_RECORD **mMsrPEBSRecord;
65
66 //
67 // These memory ranges are always present, they does not generate the access type of page fault exception,
68 // but they possibly generate instruction fetch type of page fault exception.
69 //
70 MEMORY_PROTECTION_RANGE *mProtectionMemRange = NULL;
71 UINTN mProtectionMemRangeCount = 0;
72
73 //
74 // Some predefined memory ranges.
75 //
76 MEMORY_PROTECTION_RANGE mProtectionMemRangeTemplate[] = {
77 //
78 // SMRAM range (to be fixed in runtime).
79 // It is always present and instruction fetches are allowed.
80 //
81 {{0x00000000, 0x00000000},TRUE,FALSE},
82
83 //
84 // SMM profile data range( to be fixed in runtime).
85 // It is always present and instruction fetches are not allowed.
86 //
87 {{0x00000000, 0x00000000},TRUE,TRUE},
88
89 //
90 // SMRAM ranges not covered by mCpuHotPlugData.SmrrBase/mCpuHotPlugData.SmrrSiz (to be fixed in runtime).
91 // It is always present and instruction fetches are allowed.
92 // {{0x00000000, 0x00000000},TRUE,FALSE},
93 //
94
95 //
96 // Future extended range could be added here.
97 //
98
99 //
100 // PCI MMIO ranges (to be added in runtime).
101 // They are always present and instruction fetches are not allowed.
102 //
103 };
104
105 //
106 // These memory ranges are mapped by 4KB-page instead of 2MB-page.
107 //
108 MEMORY_RANGE *mSplitMemRange = NULL;
109 UINTN mSplitMemRangeCount = 0;
110
111 //
112 // SMI command port.
113 //
114 UINT32 mSmiCommandPort;
115
116 /**
117 Disable branch trace store.
118
119 **/
120 VOID
121 DisableBTS (
122 VOID
123 )
124 {
125 AsmMsrAnd64 (MSR_DEBUG_CTL, ~((UINT64)(MSR_DEBUG_CTL_BTS | MSR_DEBUG_CTL_TR)));
126 }
127
128 /**
129 Enable branch trace store.
130
131 **/
132 VOID
133 EnableBTS (
134 VOID
135 )
136 {
137 AsmMsrOr64 (MSR_DEBUG_CTL, (MSR_DEBUG_CTL_BTS | MSR_DEBUG_CTL_TR));
138 }
139
140 /**
141 Get CPU Index from APIC ID.
142
143 **/
144 UINTN
145 GetCpuIndex (
146 VOID
147 )
148 {
149 UINTN Index;
150 UINT32 ApicId;
151
152 ApicId = GetApicId ();
153
154 for (Index = 0; Index < mMaxNumberOfCpus; Index++) {
155 if (gSmmCpuPrivate->ProcessorInfo[Index].ProcessorId == ApicId) {
156 return Index;
157 }
158 }
159 ASSERT (FALSE);
160 return 0;
161 }
162
163 /**
164 Get the source of IP after execute-disable exception is triggered.
165
166 @param CpuIndex The index of CPU.
167 @param DestinationIP The destination address.
168
169 **/
170 UINT64
171 GetSourceFromDestinationOnBts (
172 UINTN CpuIndex,
173 UINT64 DestinationIP
174 )
175 {
176 BRANCH_TRACE_RECORD *CurrentBTSRecord;
177 UINTN Index;
178 BOOLEAN FirstMatch;
179
180 FirstMatch = FALSE;
181
182 CurrentBTSRecord = (BRANCH_TRACE_RECORD *)mMsrDsArea[CpuIndex]->BTSIndex;
183 for (Index = 0; Index < mBTSRecordNumber; Index++) {
184 if ((UINTN)CurrentBTSRecord < (UINTN)mMsrBTSRecord[CpuIndex]) {
185 //
186 // Underflow
187 //
188 CurrentBTSRecord = (BRANCH_TRACE_RECORD *)((UINTN)mMsrDsArea[CpuIndex]->BTSAbsoluteMaximum - 1);
189 CurrentBTSRecord --;
190 }
191 if (CurrentBTSRecord->LastBranchTo == DestinationIP) {
192 //
193 // Good! find 1st one, then find 2nd one.
194 //
195 if (!FirstMatch) {
196 //
197 // The first one is DEBUG exception
198 //
199 FirstMatch = TRUE;
200 } else {
201 //
202 // Good find proper one.
203 //
204 return CurrentBTSRecord->LastBranchFrom;
205 }
206 }
207 CurrentBTSRecord--;
208 }
209
210 return 0;
211 }
212
213 /**
214 SMM profile specific INT 1 (single-step) exception handler.
215
216 @param InterruptType Defines the type of interrupt or exception that
217 occurred on the processor.This parameter is processor architecture specific.
218 @param SystemContext A pointer to the processor context when
219 the interrupt occurred on the processor.
220 **/
221 VOID
222 EFIAPI
223 DebugExceptionHandler (
224 IN EFI_EXCEPTION_TYPE InterruptType,
225 IN EFI_SYSTEM_CONTEXT SystemContext
226 )
227 {
228 UINTN CpuIndex;
229 UINTN PFEntry;
230
231 if (!mSmmProfileStart &&
232 !HEAP_GUARD_NONSTOP_MODE &&
233 !NULL_DETECTION_NONSTOP_MODE) {
234 return;
235 }
236 CpuIndex = GetCpuIndex ();
237
238 //
239 // Clear last PF entries
240 //
241 for (PFEntry = 0; PFEntry < mPFEntryCount[CpuIndex]; PFEntry++) {
242 *mLastPFEntryPointer[CpuIndex][PFEntry] = mLastPFEntryValue[CpuIndex][PFEntry];
243 }
244
245 //
246 // Reset page fault exception count for next page fault.
247 //
248 mPFEntryCount[CpuIndex] = 0;
249
250 //
251 // Flush TLB
252 //
253 CpuFlushTlb ();
254
255 //
256 // Clear TF in EFLAGS
257 //
258 ClearTrapFlag (SystemContext);
259 }
260
261 /**
262 Check if the input address is in SMM ranges.
263
264 @param[in] Address The input address.
265
266 @retval TRUE The input address is in SMM.
267 @retval FALSE The input address is not in SMM.
268 **/
269 BOOLEAN
270 IsInSmmRanges (
271 IN EFI_PHYSICAL_ADDRESS Address
272 )
273 {
274 UINTN Index;
275
276 if ((Address >= mCpuHotPlugData.SmrrBase) && (Address < mCpuHotPlugData.SmrrBase + mCpuHotPlugData.SmrrSize)) {
277 return TRUE;
278 }
279 for (Index = 0; Index < mSmmCpuSmramRangeCount; Index++) {
280 if (Address >= mSmmCpuSmramRanges[Index].CpuStart &&
281 Address < mSmmCpuSmramRanges[Index].CpuStart + mSmmCpuSmramRanges[Index].PhysicalSize) {
282 return TRUE;
283 }
284 }
285 return FALSE;
286 }
287
288 /**
289 Check if the memory address will be mapped by 4KB-page.
290
291 @param Address The address of Memory.
292 @param Nx The flag indicates if the memory is execute-disable.
293
294 **/
295 BOOLEAN
296 IsAddressValid (
297 IN EFI_PHYSICAL_ADDRESS Address,
298 IN BOOLEAN *Nx
299 )
300 {
301 UINTN Index;
302
303 if (FeaturePcdGet (PcdCpuSmmProfileEnable)) {
304 //
305 // Check configuration
306 //
307 for (Index = 0; Index < mProtectionMemRangeCount; Index++) {
308 if ((Address >= mProtectionMemRange[Index].Range.Base) && (Address < mProtectionMemRange[Index].Range.Top)) {
309 *Nx = mProtectionMemRange[Index].Nx;
310 return mProtectionMemRange[Index].Present;
311 }
312 }
313 *Nx = TRUE;
314 return FALSE;
315
316 } else {
317 *Nx = TRUE;
318 if (IsInSmmRanges (Address)) {
319 *Nx = FALSE;
320 }
321 return TRUE;
322 }
323 }
324
325 /**
326 Check if the memory address will be mapped by 4KB-page.
327
328 @param Address The address of Memory.
329
330 **/
331 BOOLEAN
332 IsAddressSplit (
333 IN EFI_PHYSICAL_ADDRESS Address
334 )
335 {
336 UINTN Index;
337
338 if (FeaturePcdGet (PcdCpuSmmProfileEnable)) {
339 //
340 // Check configuration
341 //
342 for (Index = 0; Index < mSplitMemRangeCount; Index++) {
343 if ((Address >= mSplitMemRange[Index].Base) && (Address < mSplitMemRange[Index].Top)) {
344 return TRUE;
345 }
346 }
347 } else {
348 if (Address < mCpuHotPlugData.SmrrBase) {
349 if ((mCpuHotPlugData.SmrrBase - Address) < BASE_2MB) {
350 return TRUE;
351 }
352 } else if (Address > (mCpuHotPlugData.SmrrBase + mCpuHotPlugData.SmrrSize - BASE_2MB)) {
353 if ((Address - (mCpuHotPlugData.SmrrBase + mCpuHotPlugData.SmrrSize - BASE_2MB)) < BASE_2MB) {
354 return TRUE;
355 }
356 }
357 }
358 //
359 // Return default
360 //
361 return FALSE;
362 }
363
364 /**
365 Initialize the protected memory ranges and the 4KB-page mapped memory ranges.
366
367 **/
368 VOID
369 InitProtectedMemRange (
370 VOID
371 )
372 {
373 UINTN Index;
374 UINTN NumberOfDescriptors;
375 UINTN NumberOfAddedDescriptors;
376 UINTN NumberOfProtectRange;
377 UINTN NumberOfSpliteRange;
378 EFI_GCD_MEMORY_SPACE_DESCRIPTOR *MemorySpaceMap;
379 UINTN TotalSize;
380 EFI_PHYSICAL_ADDRESS ProtectBaseAddress;
381 EFI_PHYSICAL_ADDRESS ProtectEndAddress;
382 EFI_PHYSICAL_ADDRESS Top2MBAlignedAddress;
383 EFI_PHYSICAL_ADDRESS Base2MBAlignedAddress;
384 UINT64 High4KBPageSize;
385 UINT64 Low4KBPageSize;
386
387 NumberOfDescriptors = 0;
388 NumberOfAddedDescriptors = mSmmCpuSmramRangeCount;
389 NumberOfSpliteRange = 0;
390 MemorySpaceMap = NULL;
391
392 //
393 // Get MMIO ranges from GCD and add them into protected memory ranges.
394 //
395 gDS->GetMemorySpaceMap (
396 &NumberOfDescriptors,
397 &MemorySpaceMap
398 );
399 for (Index = 0; Index < NumberOfDescriptors; Index++) {
400 if (MemorySpaceMap[Index].GcdMemoryType == EfiGcdMemoryTypeMemoryMappedIo) {
401 NumberOfAddedDescriptors++;
402 }
403 }
404
405 if (NumberOfAddedDescriptors != 0) {
406 TotalSize = NumberOfAddedDescriptors * sizeof (MEMORY_PROTECTION_RANGE) + sizeof (mProtectionMemRangeTemplate);
407 mProtectionMemRange = (MEMORY_PROTECTION_RANGE *) AllocateZeroPool (TotalSize);
408 ASSERT (mProtectionMemRange != NULL);
409 mProtectionMemRangeCount = TotalSize / sizeof (MEMORY_PROTECTION_RANGE);
410
411 //
412 // Copy existing ranges.
413 //
414 CopyMem (mProtectionMemRange, mProtectionMemRangeTemplate, sizeof (mProtectionMemRangeTemplate));
415
416 //
417 // Create split ranges which come from protected ranges.
418 //
419 TotalSize = (TotalSize / sizeof (MEMORY_PROTECTION_RANGE)) * sizeof (MEMORY_RANGE);
420 mSplitMemRange = (MEMORY_RANGE *) AllocateZeroPool (TotalSize);
421 ASSERT (mSplitMemRange != NULL);
422
423 //
424 // Create SMM ranges which are set to present and execution-enable.
425 //
426 NumberOfProtectRange = sizeof (mProtectionMemRangeTemplate) / sizeof (MEMORY_PROTECTION_RANGE);
427 for (Index = 0; Index < mSmmCpuSmramRangeCount; Index++) {
428 if (mSmmCpuSmramRanges[Index].CpuStart >= mProtectionMemRange[0].Range.Base &&
429 mSmmCpuSmramRanges[Index].CpuStart + mSmmCpuSmramRanges[Index].PhysicalSize < mProtectionMemRange[0].Range.Top) {
430 //
431 // If the address have been already covered by mCpuHotPlugData.SmrrBase/mCpuHotPlugData.SmrrSiz
432 //
433 break;
434 }
435 mProtectionMemRange[NumberOfProtectRange].Range.Base = mSmmCpuSmramRanges[Index].CpuStart;
436 mProtectionMemRange[NumberOfProtectRange].Range.Top = mSmmCpuSmramRanges[Index].CpuStart + mSmmCpuSmramRanges[Index].PhysicalSize;
437 mProtectionMemRange[NumberOfProtectRange].Present = TRUE;
438 mProtectionMemRange[NumberOfProtectRange].Nx = FALSE;
439 NumberOfProtectRange++;
440 }
441
442 //
443 // Create MMIO ranges which are set to present and execution-disable.
444 //
445 for (Index = 0; Index < NumberOfDescriptors; Index++) {
446 if (MemorySpaceMap[Index].GcdMemoryType != EfiGcdMemoryTypeMemoryMappedIo) {
447 continue;
448 }
449 mProtectionMemRange[NumberOfProtectRange].Range.Base = MemorySpaceMap[Index].BaseAddress;
450 mProtectionMemRange[NumberOfProtectRange].Range.Top = MemorySpaceMap[Index].BaseAddress + MemorySpaceMap[Index].Length;
451 mProtectionMemRange[NumberOfProtectRange].Present = TRUE;
452 mProtectionMemRange[NumberOfProtectRange].Nx = TRUE;
453 NumberOfProtectRange++;
454 }
455
456 //
457 // Check and updated actual protected memory ranges count
458 //
459 ASSERT (NumberOfProtectRange <= mProtectionMemRangeCount);
460 mProtectionMemRangeCount = NumberOfProtectRange;
461 }
462
463 //
464 // According to protected ranges, create the ranges which will be mapped by 2KB page.
465 //
466 NumberOfSpliteRange = 0;
467 NumberOfProtectRange = mProtectionMemRangeCount;
468 for (Index = 0; Index < NumberOfProtectRange; Index++) {
469 //
470 // If MMIO base address is not 2MB alignment, make 2MB alignment for create 4KB page in page table.
471 //
472 ProtectBaseAddress = mProtectionMemRange[Index].Range.Base;
473 ProtectEndAddress = mProtectionMemRange[Index].Range.Top;
474 if (((ProtectBaseAddress & (SIZE_2MB - 1)) != 0) || ((ProtectEndAddress & (SIZE_2MB - 1)) != 0)) {
475 //
476 // Check if it is possible to create 4KB-page for not 2MB-aligned range and to create 2MB-page for 2MB-aligned range.
477 // A mix of 4KB and 2MB page could save SMRAM space.
478 //
479 Top2MBAlignedAddress = ProtectEndAddress & ~(SIZE_2MB - 1);
480 Base2MBAlignedAddress = (ProtectBaseAddress + SIZE_2MB - 1) & ~(SIZE_2MB - 1);
481 if ((Top2MBAlignedAddress > Base2MBAlignedAddress) &&
482 ((Top2MBAlignedAddress - Base2MBAlignedAddress) >= SIZE_2MB)) {
483 //
484 // There is an range which could be mapped by 2MB-page.
485 //
486 High4KBPageSize = ((ProtectEndAddress + SIZE_2MB - 1) & ~(SIZE_2MB - 1)) - (ProtectEndAddress & ~(SIZE_2MB - 1));
487 Low4KBPageSize = ((ProtectBaseAddress + SIZE_2MB - 1) & ~(SIZE_2MB - 1)) - (ProtectBaseAddress & ~(SIZE_2MB - 1));
488 if (High4KBPageSize != 0) {
489 //
490 // Add not 2MB-aligned range to be mapped by 4KB-page.
491 //
492 mSplitMemRange[NumberOfSpliteRange].Base = ProtectEndAddress & ~(SIZE_2MB - 1);
493 mSplitMemRange[NumberOfSpliteRange].Top = (ProtectEndAddress + SIZE_2MB - 1) & ~(SIZE_2MB - 1);
494 NumberOfSpliteRange++;
495 }
496 if (Low4KBPageSize != 0) {
497 //
498 // Add not 2MB-aligned range to be mapped by 4KB-page.
499 //
500 mSplitMemRange[NumberOfSpliteRange].Base = ProtectBaseAddress & ~(SIZE_2MB - 1);
501 mSplitMemRange[NumberOfSpliteRange].Top = (ProtectBaseAddress + SIZE_2MB - 1) & ~(SIZE_2MB - 1);
502 NumberOfSpliteRange++;
503 }
504 } else {
505 //
506 // The range could only be mapped by 4KB-page.
507 //
508 mSplitMemRange[NumberOfSpliteRange].Base = ProtectBaseAddress & ~(SIZE_2MB - 1);
509 mSplitMemRange[NumberOfSpliteRange].Top = (ProtectEndAddress + SIZE_2MB - 1) & ~(SIZE_2MB - 1);
510 NumberOfSpliteRange++;
511 }
512 }
513 }
514
515 mSplitMemRangeCount = NumberOfSpliteRange;
516
517 DEBUG ((EFI_D_INFO, "SMM Profile Memory Ranges:\n"));
518 for (Index = 0; Index < mProtectionMemRangeCount; Index++) {
519 DEBUG ((EFI_D_INFO, "mProtectionMemRange[%d].Base = %lx\n", Index, mProtectionMemRange[Index].Range.Base));
520 DEBUG ((EFI_D_INFO, "mProtectionMemRange[%d].Top = %lx\n", Index, mProtectionMemRange[Index].Range.Top));
521 }
522 for (Index = 0; Index < mSplitMemRangeCount; Index++) {
523 DEBUG ((EFI_D_INFO, "mSplitMemRange[%d].Base = %lx\n", Index, mSplitMemRange[Index].Base));
524 DEBUG ((EFI_D_INFO, "mSplitMemRange[%d].Top = %lx\n", Index, mSplitMemRange[Index].Top));
525 }
526 }
527
528 /**
529 Update page table according to protected memory ranges and the 4KB-page mapped memory ranges.
530
531 **/
532 VOID
533 InitPaging (
534 VOID
535 )
536 {
537 UINT64 *Pml4;
538 UINT64 *Pde;
539 UINT64 *Pte;
540 UINT64 *Pt;
541 UINTN Address;
542 UINTN Level1;
543 UINTN Level2;
544 UINTN Level3;
545 UINTN Level4;
546 UINTN NumberOfPdpEntries;
547 UINTN NumberOfPml4Entries;
548 UINTN SizeOfMemorySpace;
549 BOOLEAN Nx;
550
551 if (sizeof (UINTN) == sizeof (UINT64)) {
552 Pml4 = (UINT64*)(UINTN)mSmmProfileCr3;
553 SizeOfMemorySpace = HighBitSet64 (gPhyMask) + 1;
554 //
555 // Calculate the table entries of PML4E and PDPTE.
556 //
557 if (SizeOfMemorySpace <= 39 ) {
558 NumberOfPml4Entries = 1;
559 NumberOfPdpEntries = (UINT32)LShiftU64 (1, (SizeOfMemorySpace - 30));
560 } else {
561 NumberOfPml4Entries = (UINT32)LShiftU64 (1, (SizeOfMemorySpace - 39));
562 NumberOfPdpEntries = 512;
563 }
564 } else {
565 NumberOfPml4Entries = 1;
566 NumberOfPdpEntries = 4;
567 }
568
569 //
570 // Go through page table and change 2MB-page into 4KB-page.
571 //
572 for (Level1 = 0; Level1 < NumberOfPml4Entries; Level1++) {
573 if (sizeof (UINTN) == sizeof (UINT64)) {
574 if ((Pml4[Level1] & IA32_PG_P) == 0) {
575 //
576 // If Pml4 entry does not exist, skip it
577 //
578 continue;
579 }
580 Pde = (UINT64 *)(UINTN)(Pml4[Level1] & ~mAddressEncMask & PHYSICAL_ADDRESS_MASK);
581 } else {
582 Pde = (UINT64*)(UINTN)mSmmProfileCr3;
583 }
584 for (Level2 = 0; Level2 < NumberOfPdpEntries; Level2++, Pde++) {
585 if ((*Pde & IA32_PG_P) == 0) {
586 //
587 // If PDE entry does not exist, skip it
588 //
589 continue;
590 }
591 if ((*Pde & IA32_PG_PS) != 0) {
592 //
593 // This is 1G entry, skip it
594 //
595 continue;
596 }
597 Pte = (UINT64 *)(UINTN)(*Pde & ~mAddressEncMask & PHYSICAL_ADDRESS_MASK);
598 if (Pte == 0) {
599 continue;
600 }
601 for (Level3 = 0; Level3 < SIZE_4KB / sizeof (*Pte); Level3++, Pte++) {
602 if ((*Pte & IA32_PG_P) == 0) {
603 //
604 // If PTE entry does not exist, skip it
605 //
606 continue;
607 }
608 Address = (((Level2 << 9) + Level3) << 21);
609
610 //
611 // If it is 2M page, check IsAddressSplit()
612 //
613 if (((*Pte & IA32_PG_PS) != 0) && IsAddressSplit (Address)) {
614 //
615 // Based on current page table, create 4KB page table for split area.
616 //
617 ASSERT (Address == (*Pte & PHYSICAL_ADDRESS_MASK));
618
619 Pt = AllocatePageTableMemory (1);
620 ASSERT (Pt != NULL);
621
622 // Split it
623 for (Level4 = 0; Level4 < SIZE_4KB / sizeof(*Pt); Level4++) {
624 Pt[Level4] = Address + ((Level4 << 12) | mAddressEncMask | PAGE_ATTRIBUTE_BITS);
625 } // end for PT
626 *Pte = (UINT64)(UINTN)Pt | mAddressEncMask | PAGE_ATTRIBUTE_BITS;
627 } // end if IsAddressSplit
628 } // end for PTE
629 } // end for PDE
630 }
631
632 //
633 // Go through page table and set several page table entries to absent or execute-disable.
634 //
635 DEBUG ((EFI_D_INFO, "Patch page table start ...\n"));
636 for (Level1 = 0; Level1 < NumberOfPml4Entries; Level1++) {
637 if (sizeof (UINTN) == sizeof (UINT64)) {
638 if ((Pml4[Level1] & IA32_PG_P) == 0) {
639 //
640 // If Pml4 entry does not exist, skip it
641 //
642 continue;
643 }
644 Pde = (UINT64 *)(UINTN)(Pml4[Level1] & ~mAddressEncMask & PHYSICAL_ADDRESS_MASK);
645 } else {
646 Pde = (UINT64*)(UINTN)mSmmProfileCr3;
647 }
648 for (Level2 = 0; Level2 < NumberOfPdpEntries; Level2++, Pde++) {
649 if ((*Pde & IA32_PG_P) == 0) {
650 //
651 // If PDE entry does not exist, skip it
652 //
653 continue;
654 }
655 if ((*Pde & IA32_PG_PS) != 0) {
656 //
657 // This is 1G entry, set NX bit and skip it
658 //
659 if (mXdSupported) {
660 *Pde = *Pde | IA32_PG_NX;
661 }
662 continue;
663 }
664 Pte = (UINT64 *)(UINTN)(*Pde & ~mAddressEncMask & PHYSICAL_ADDRESS_MASK);
665 if (Pte == 0) {
666 continue;
667 }
668 for (Level3 = 0; Level3 < SIZE_4KB / sizeof (*Pte); Level3++, Pte++) {
669 if ((*Pte & IA32_PG_P) == 0) {
670 //
671 // If PTE entry does not exist, skip it
672 //
673 continue;
674 }
675 Address = (((Level2 << 9) + Level3) << 21);
676
677 if ((*Pte & IA32_PG_PS) != 0) {
678 // 2MB page
679
680 if (!IsAddressValid (Address, &Nx)) {
681 //
682 // Patch to remove Present flag and RW flag
683 //
684 *Pte = *Pte & (INTN)(INT32)(~PAGE_ATTRIBUTE_BITS);
685 }
686 if (Nx && mXdSupported) {
687 *Pte = *Pte | IA32_PG_NX;
688 }
689 } else {
690 // 4KB page
691 Pt = (UINT64 *)(UINTN)(*Pte & ~mAddressEncMask & PHYSICAL_ADDRESS_MASK);
692 if (Pt == 0) {
693 continue;
694 }
695 for (Level4 = 0; Level4 < SIZE_4KB / sizeof(*Pt); Level4++, Pt++) {
696 if (!IsAddressValid (Address, &Nx)) {
697 *Pt = *Pt & (INTN)(INT32)(~PAGE_ATTRIBUTE_BITS);
698 }
699 if (Nx && mXdSupported) {
700 *Pt = *Pt | IA32_PG_NX;
701 }
702 Address += SIZE_4KB;
703 } // end for PT
704 } // end if PS
705 } // end for PTE
706 } // end for PDE
707 }
708
709 //
710 // Flush TLB
711 //
712 CpuFlushTlb ();
713 DEBUG ((EFI_D_INFO, "Patch page table done!\n"));
714 //
715 // Set execute-disable flag
716 //
717 mXdEnabled = TRUE;
718
719 return ;
720 }
721
722 /**
723 To get system port address of the SMI Command Port in FADT table.
724
725 **/
726 VOID
727 GetSmiCommandPort (
728 VOID
729 )
730 {
731 EFI_ACPI_2_0_FIXED_ACPI_DESCRIPTION_TABLE *Fadt;
732
733 Fadt = (EFI_ACPI_2_0_FIXED_ACPI_DESCRIPTION_TABLE *) EfiLocateFirstAcpiTable (
734 EFI_ACPI_2_0_FIXED_ACPI_DESCRIPTION_TABLE_SIGNATURE
735 );
736 ASSERT (Fadt != NULL);
737
738 mSmiCommandPort = Fadt->SmiCmd;
739 DEBUG ((EFI_D_INFO, "mSmiCommandPort = %x\n", mSmiCommandPort));
740 }
741
742 /**
743 Updates page table to make some memory ranges (like system memory) absent
744 and make some memory ranges (like MMIO) present and execute disable. It also
745 update 2MB-page to 4KB-page for some memory ranges.
746
747 **/
748 VOID
749 SmmProfileStart (
750 VOID
751 )
752 {
753 //
754 // The flag indicates SMM profile starts to work.
755 //
756 mSmmProfileStart = TRUE;
757 }
758
759 /**
760 Initialize SMM profile in SmmReadyToLock protocol callback function.
761
762 @param Protocol Points to the protocol's unique identifier.
763 @param Interface Points to the interface instance.
764 @param Handle The handle on which the interface was installed.
765
766 @retval EFI_SUCCESS SmmReadyToLock protocol callback runs successfully.
767 **/
768 EFI_STATUS
769 EFIAPI
770 InitSmmProfileCallBack (
771 IN CONST EFI_GUID *Protocol,
772 IN VOID *Interface,
773 IN EFI_HANDLE Handle
774 )
775 {
776 //
777 // Save to variable so that SMM profile data can be found.
778 //
779 gRT->SetVariable (
780 SMM_PROFILE_NAME,
781 &gEfiCallerIdGuid,
782 EFI_VARIABLE_BOOTSERVICE_ACCESS | EFI_VARIABLE_RUNTIME_ACCESS,
783 sizeof(mSmmProfileBase),
784 &mSmmProfileBase
785 );
786
787 //
788 // Get Software SMI from FADT
789 //
790 GetSmiCommandPort ();
791
792 //
793 // Initialize protected memory range for patching page table later.
794 //
795 InitProtectedMemRange ();
796
797 return EFI_SUCCESS;
798 }
799
800 /**
801 Initialize SMM profile data structures.
802
803 **/
804 VOID
805 InitSmmProfileInternal (
806 VOID
807 )
808 {
809 EFI_STATUS Status;
810 EFI_PHYSICAL_ADDRESS Base;
811 VOID *Registration;
812 UINTN Index;
813 UINTN MsrDsAreaSizePerCpu;
814 UINTN TotalSize;
815
816 mPFEntryCount = (UINTN *)AllocateZeroPool (sizeof (UINTN) * mMaxNumberOfCpus);
817 ASSERT (mPFEntryCount != NULL);
818 mLastPFEntryValue = (UINT64 (*)[MAX_PF_ENTRY_COUNT])AllocateZeroPool (
819 sizeof (mLastPFEntryValue[0]) * mMaxNumberOfCpus);
820 ASSERT (mLastPFEntryValue != NULL);
821 mLastPFEntryPointer = (UINT64 *(*)[MAX_PF_ENTRY_COUNT])AllocateZeroPool (
822 sizeof (mLastPFEntryPointer[0]) * mMaxNumberOfCpus);
823 ASSERT (mLastPFEntryPointer != NULL);
824
825 //
826 // Allocate memory for SmmProfile below 4GB.
827 // The base address
828 //
829 mSmmProfileSize = PcdGet32 (PcdCpuSmmProfileSize);
830 ASSERT ((mSmmProfileSize & 0xFFF) == 0);
831
832 if (mBtsSupported) {
833 TotalSize = mSmmProfileSize + mMsrDsAreaSize;
834 } else {
835 TotalSize = mSmmProfileSize;
836 }
837
838 Base = 0xFFFFFFFF;
839 Status = gBS->AllocatePages (
840 AllocateMaxAddress,
841 EfiReservedMemoryType,
842 EFI_SIZE_TO_PAGES (TotalSize),
843 &Base
844 );
845 ASSERT_EFI_ERROR (Status);
846 ZeroMem ((VOID *)(UINTN)Base, TotalSize);
847 mSmmProfileBase = (SMM_PROFILE_HEADER *)(UINTN)Base;
848
849 //
850 // Initialize SMM profile data header.
851 //
852 mSmmProfileBase->HeaderSize = sizeof (SMM_PROFILE_HEADER);
853 mSmmProfileBase->MaxDataEntries = (UINT64)((mSmmProfileSize - sizeof(SMM_PROFILE_HEADER)) / sizeof (SMM_PROFILE_ENTRY));
854 mSmmProfileBase->MaxDataSize = MultU64x64 (mSmmProfileBase->MaxDataEntries, sizeof(SMM_PROFILE_ENTRY));
855 mSmmProfileBase->CurDataEntries = 0;
856 mSmmProfileBase->CurDataSize = 0;
857 mSmmProfileBase->TsegStart = mCpuHotPlugData.SmrrBase;
858 mSmmProfileBase->TsegSize = mCpuHotPlugData.SmrrSize;
859 mSmmProfileBase->NumSmis = 0;
860 mSmmProfileBase->NumCpus = gSmmCpuPrivate->SmmCoreEntryContext.NumberOfCpus;
861
862 if (mBtsSupported) {
863 mMsrDsArea = (MSR_DS_AREA_STRUCT **)AllocateZeroPool (sizeof (MSR_DS_AREA_STRUCT *) * mMaxNumberOfCpus);
864 ASSERT (mMsrDsArea != NULL);
865 mMsrBTSRecord = (BRANCH_TRACE_RECORD **)AllocateZeroPool (sizeof (BRANCH_TRACE_RECORD *) * mMaxNumberOfCpus);
866 ASSERT (mMsrBTSRecord != NULL);
867 mMsrPEBSRecord = (PEBS_RECORD **)AllocateZeroPool (sizeof (PEBS_RECORD *) * mMaxNumberOfCpus);
868 ASSERT (mMsrPEBSRecord != NULL);
869
870 mMsrDsAreaBase = (MSR_DS_AREA_STRUCT *)((UINTN)Base + mSmmProfileSize);
871 MsrDsAreaSizePerCpu = mMsrDsAreaSize / mMaxNumberOfCpus;
872 mBTSRecordNumber = (MsrDsAreaSizePerCpu - sizeof(PEBS_RECORD) * PEBS_RECORD_NUMBER - sizeof(MSR_DS_AREA_STRUCT)) / sizeof(BRANCH_TRACE_RECORD);
873 for (Index = 0; Index < mMaxNumberOfCpus; Index++) {
874 mMsrDsArea[Index] = (MSR_DS_AREA_STRUCT *)((UINTN)mMsrDsAreaBase + MsrDsAreaSizePerCpu * Index);
875 mMsrBTSRecord[Index] = (BRANCH_TRACE_RECORD *)((UINTN)mMsrDsArea[Index] + sizeof(MSR_DS_AREA_STRUCT));
876 mMsrPEBSRecord[Index] = (PEBS_RECORD *)((UINTN)mMsrDsArea[Index] + MsrDsAreaSizePerCpu - sizeof(PEBS_RECORD) * PEBS_RECORD_NUMBER);
877
878 mMsrDsArea[Index]->BTSBufferBase = (UINTN)mMsrBTSRecord[Index];
879 mMsrDsArea[Index]->BTSIndex = mMsrDsArea[Index]->BTSBufferBase;
880 mMsrDsArea[Index]->BTSAbsoluteMaximum = mMsrDsArea[Index]->BTSBufferBase + mBTSRecordNumber * sizeof(BRANCH_TRACE_RECORD) + 1;
881 mMsrDsArea[Index]->BTSInterruptThreshold = mMsrDsArea[Index]->BTSAbsoluteMaximum + 1;
882
883 mMsrDsArea[Index]->PEBSBufferBase = (UINTN)mMsrPEBSRecord[Index];
884 mMsrDsArea[Index]->PEBSIndex = mMsrDsArea[Index]->PEBSBufferBase;
885 mMsrDsArea[Index]->PEBSAbsoluteMaximum = mMsrDsArea[Index]->PEBSBufferBase + PEBS_RECORD_NUMBER * sizeof(PEBS_RECORD) + 1;
886 mMsrDsArea[Index]->PEBSInterruptThreshold = mMsrDsArea[Index]->PEBSAbsoluteMaximum + 1;
887 }
888 }
889
890 mProtectionMemRange = mProtectionMemRangeTemplate;
891 mProtectionMemRangeCount = sizeof (mProtectionMemRangeTemplate) / sizeof (MEMORY_PROTECTION_RANGE);
892
893 //
894 // Update TSeg entry.
895 //
896 mProtectionMemRange[0].Range.Base = mCpuHotPlugData.SmrrBase;
897 mProtectionMemRange[0].Range.Top = mCpuHotPlugData.SmrrBase + mCpuHotPlugData.SmrrSize;
898
899 //
900 // Update SMM profile entry.
901 //
902 mProtectionMemRange[1].Range.Base = (EFI_PHYSICAL_ADDRESS)(UINTN)mSmmProfileBase;
903 mProtectionMemRange[1].Range.Top = (EFI_PHYSICAL_ADDRESS)(UINTN)mSmmProfileBase + TotalSize;
904
905 //
906 // Allocate memory reserved for creating 4KB pages.
907 //
908 InitPagesForPFHandler ();
909
910 //
911 // Start SMM profile when SmmReadyToLock protocol is installed.
912 //
913 Status = gSmst->SmmRegisterProtocolNotify (
914 &gEfiSmmReadyToLockProtocolGuid,
915 InitSmmProfileCallBack,
916 &Registration
917 );
918 ASSERT_EFI_ERROR (Status);
919
920 return ;
921 }
922
923 /**
924 Check if feature is supported by a processor.
925
926 **/
927 VOID
928 CheckFeatureSupported (
929 VOID
930 )
931 {
932 UINT32 RegEax;
933 UINT32 RegEcx;
934 UINT32 RegEdx;
935 MSR_IA32_MISC_ENABLE_REGISTER MiscEnableMsr;
936
937 if ((PcdGet32 (PcdControlFlowEnforcementPropertyMask) != 0) && mCetSupported) {
938 AsmCpuid (CPUID_EXTENDED_FUNCTION, &RegEax, NULL, NULL, NULL);
939 if (RegEax <= CPUID_EXTENDED_FUNCTION) {
940 mCetSupported = FALSE;
941 PatchInstructionX86 (mPatchCetSupported, mCetSupported, 1);
942 }
943 AsmCpuidEx (CPUID_STRUCTURED_EXTENDED_FEATURE_FLAGS, CPUID_STRUCTURED_EXTENDED_FEATURE_FLAGS_SUB_LEAF_INFO, NULL, NULL, &RegEcx, NULL);
944 if ((RegEcx & CPUID_CET_SS) == 0) {
945 mCetSupported = FALSE;
946 PatchInstructionX86 (mPatchCetSupported, mCetSupported, 1);
947 }
948 }
949
950 if (mXdSupported) {
951 AsmCpuid (CPUID_EXTENDED_FUNCTION, &RegEax, NULL, NULL, NULL);
952 if (RegEax <= CPUID_EXTENDED_FUNCTION) {
953 //
954 // Extended CPUID functions are not supported on this processor.
955 //
956 mXdSupported = FALSE;
957 PatchInstructionX86 (gPatchXdSupported, mXdSupported, 1);
958 }
959
960 AsmCpuid (CPUID_EXTENDED_CPU_SIG, NULL, NULL, NULL, &RegEdx);
961 if ((RegEdx & CPUID1_EDX_XD_SUPPORT) == 0) {
962 //
963 // Execute Disable Bit feature is not supported on this processor.
964 //
965 mXdSupported = FALSE;
966 PatchInstructionX86 (gPatchXdSupported, mXdSupported, 1);
967 }
968 }
969
970 if (mBtsSupported) {
971 AsmCpuid (CPUID_VERSION_INFO, NULL, NULL, NULL, &RegEdx);
972 if ((RegEdx & CPUID1_EDX_BTS_AVAILABLE) != 0) {
973 //
974 // Per IA32 manuals:
975 // When CPUID.1:EDX[21] is set, the following BTS facilities are available:
976 // 1. The BTS_UNAVAILABLE flag in the IA32_MISC_ENABLE MSR indicates the
977 // availability of the BTS facilities, including the ability to set the BTS and
978 // BTINT bits in the MSR_DEBUGCTLA MSR.
979 // 2. The IA32_DS_AREA MSR can be programmed to point to the DS save area.
980 //
981 MiscEnableMsr.Uint64 = AsmReadMsr64 (MSR_IA32_MISC_ENABLE);
982 if (MiscEnableMsr.Bits.BTS == 1) {
983 //
984 // BTS facilities is not supported if MSR_IA32_MISC_ENABLE.BTS bit is set.
985 //
986 mBtsSupported = FALSE;
987 }
988 }
989 }
990 }
991
992 /**
993 Enable single step.
994
995 **/
996 VOID
997 ActivateSingleStepDB (
998 VOID
999 )
1000 {
1001 UINTN Dr6;
1002
1003 Dr6 = AsmReadDr6 ();
1004 if ((Dr6 & DR6_SINGLE_STEP) != 0) {
1005 return;
1006 }
1007 Dr6 |= DR6_SINGLE_STEP;
1008 AsmWriteDr6 (Dr6);
1009 }
1010
1011 /**
1012 Enable last branch.
1013
1014 **/
1015 VOID
1016 ActivateLBR (
1017 VOID
1018 )
1019 {
1020 UINT64 DebugCtl;
1021
1022 DebugCtl = AsmReadMsr64 (MSR_DEBUG_CTL);
1023 if ((DebugCtl & MSR_DEBUG_CTL_LBR) != 0) {
1024 return ;
1025 }
1026 DebugCtl |= MSR_DEBUG_CTL_LBR;
1027 AsmWriteMsr64 (MSR_DEBUG_CTL, DebugCtl);
1028 }
1029
1030 /**
1031 Enable branch trace store.
1032
1033 @param CpuIndex The index of the processor.
1034
1035 **/
1036 VOID
1037 ActivateBTS (
1038 IN UINTN CpuIndex
1039 )
1040 {
1041 UINT64 DebugCtl;
1042
1043 DebugCtl = AsmReadMsr64 (MSR_DEBUG_CTL);
1044 if ((DebugCtl & MSR_DEBUG_CTL_BTS) != 0) {
1045 return ;
1046 }
1047
1048 AsmWriteMsr64 (MSR_DS_AREA, (UINT64)(UINTN)mMsrDsArea[CpuIndex]);
1049 DebugCtl |= (UINT64)(MSR_DEBUG_CTL_BTS | MSR_DEBUG_CTL_TR);
1050 DebugCtl &= ~((UINT64)MSR_DEBUG_CTL_BTINT);
1051 AsmWriteMsr64 (MSR_DEBUG_CTL, DebugCtl);
1052 }
1053
1054 /**
1055 Increase SMI number in each SMI entry.
1056
1057 **/
1058 VOID
1059 SmmProfileRecordSmiNum (
1060 VOID
1061 )
1062 {
1063 if (mSmmProfileStart) {
1064 mSmmProfileBase->NumSmis++;
1065 }
1066 }
1067
1068 /**
1069 Initialize processor environment for SMM profile.
1070
1071 @param CpuIndex The index of the processor.
1072
1073 **/
1074 VOID
1075 ActivateSmmProfile (
1076 IN UINTN CpuIndex
1077 )
1078 {
1079 //
1080 // Enable Single Step DB#
1081 //
1082 ActivateSingleStepDB ();
1083
1084 if (mBtsSupported) {
1085 //
1086 // We can not get useful information from LER, so we have to use BTS.
1087 //
1088 ActivateLBR ();
1089
1090 //
1091 // Enable BTS
1092 //
1093 ActivateBTS (CpuIndex);
1094 }
1095 }
1096
1097 /**
1098 Initialize SMM profile in SMM CPU entry point.
1099
1100 @param[in] Cr3 The base address of the page tables to use in SMM.
1101
1102 **/
1103 VOID
1104 InitSmmProfile (
1105 UINT32 Cr3
1106 )
1107 {
1108 //
1109 // Save Cr3
1110 //
1111 mSmmProfileCr3 = Cr3;
1112
1113 //
1114 // Skip SMM profile initialization if feature is disabled
1115 //
1116 if (!FeaturePcdGet (PcdCpuSmmProfileEnable) &&
1117 !HEAP_GUARD_NONSTOP_MODE &&
1118 !NULL_DETECTION_NONSTOP_MODE) {
1119 return;
1120 }
1121
1122 //
1123 // Initialize SmmProfile here
1124 //
1125 InitSmmProfileInternal ();
1126
1127 //
1128 // Initialize profile IDT.
1129 //
1130 InitIdtr ();
1131
1132 //
1133 // Tell #PF handler to prepare a #DB subsequently.
1134 //
1135 mSetupDebugTrap = TRUE;
1136 }
1137
1138 /**
1139 Update page table to map the memory correctly in order to make the instruction
1140 which caused page fault execute successfully. And it also save the original page
1141 table to be restored in single-step exception.
1142
1143 @param PageTable PageTable Address.
1144 @param PFAddress The memory address which caused page fault exception.
1145 @param CpuIndex The index of the processor.
1146 @param ErrorCode The Error code of exception.
1147
1148 **/
1149 VOID
1150 RestorePageTableBelow4G (
1151 UINT64 *PageTable,
1152 UINT64 PFAddress,
1153 UINTN CpuIndex,
1154 UINTN ErrorCode
1155 )
1156 {
1157 UINTN PTIndex;
1158 UINTN PFIndex;
1159
1160 //
1161 // PML4
1162 //
1163 if (sizeof(UINT64) == sizeof(UINTN)) {
1164 PTIndex = (UINTN)BitFieldRead64 (PFAddress, 39, 47);
1165 ASSERT (PageTable[PTIndex] != 0);
1166 PageTable = (UINT64*)(UINTN)(PageTable[PTIndex] & PHYSICAL_ADDRESS_MASK);
1167 }
1168
1169 //
1170 // PDPTE
1171 //
1172 PTIndex = (UINTN)BitFieldRead64 (PFAddress, 30, 38);
1173 ASSERT (PageTable[PTIndex] != 0);
1174 PageTable = (UINT64*)(UINTN)(PageTable[PTIndex] & PHYSICAL_ADDRESS_MASK);
1175
1176 //
1177 // PD
1178 //
1179 PTIndex = (UINTN)BitFieldRead64 (PFAddress, 21, 29);
1180 if ((PageTable[PTIndex] & IA32_PG_PS) != 0) {
1181 //
1182 // Large page
1183 //
1184
1185 //
1186 // Record old entries with non-present status
1187 // Old entries include the memory which instruction is at and the memory which instruction access.
1188 //
1189 //
1190 ASSERT (mPFEntryCount[CpuIndex] < MAX_PF_ENTRY_COUNT);
1191 if (mPFEntryCount[CpuIndex] < MAX_PF_ENTRY_COUNT) {
1192 PFIndex = mPFEntryCount[CpuIndex];
1193 mLastPFEntryValue[CpuIndex][PFIndex] = PageTable[PTIndex];
1194 mLastPFEntryPointer[CpuIndex][PFIndex] = &PageTable[PTIndex];
1195 mPFEntryCount[CpuIndex]++;
1196 }
1197
1198 //
1199 // Set new entry
1200 //
1201 PageTable[PTIndex] = (PFAddress & ~((1ull << 21) - 1));
1202 PageTable[PTIndex] |= (UINT64)IA32_PG_PS;
1203 PageTable[PTIndex] |= (UINT64)PAGE_ATTRIBUTE_BITS;
1204 if ((ErrorCode & IA32_PF_EC_ID) != 0) {
1205 PageTable[PTIndex] &= ~IA32_PG_NX;
1206 }
1207 } else {
1208 //
1209 // Small page
1210 //
1211 ASSERT (PageTable[PTIndex] != 0);
1212 PageTable = (UINT64*)(UINTN)(PageTable[PTIndex] & PHYSICAL_ADDRESS_MASK);
1213
1214 //
1215 // 4K PTE
1216 //
1217 PTIndex = (UINTN)BitFieldRead64 (PFAddress, 12, 20);
1218
1219 //
1220 // Record old entries with non-present status
1221 // Old entries include the memory which instruction is at and the memory which instruction access.
1222 //
1223 //
1224 ASSERT (mPFEntryCount[CpuIndex] < MAX_PF_ENTRY_COUNT);
1225 if (mPFEntryCount[CpuIndex] < MAX_PF_ENTRY_COUNT) {
1226 PFIndex = mPFEntryCount[CpuIndex];
1227 mLastPFEntryValue[CpuIndex][PFIndex] = PageTable[PTIndex];
1228 mLastPFEntryPointer[CpuIndex][PFIndex] = &PageTable[PTIndex];
1229 mPFEntryCount[CpuIndex]++;
1230 }
1231
1232 //
1233 // Set new entry
1234 //
1235 PageTable[PTIndex] = (PFAddress & ~((1ull << 12) - 1));
1236 PageTable[PTIndex] |= (UINT64)PAGE_ATTRIBUTE_BITS;
1237 if ((ErrorCode & IA32_PF_EC_ID) != 0) {
1238 PageTable[PTIndex] &= ~IA32_PG_NX;
1239 }
1240 }
1241 }
1242
1243 /**
1244 Handler for Page Fault triggered by Guard page.
1245
1246 @param ErrorCode The Error code of exception.
1247
1248 **/
1249 VOID
1250 GuardPagePFHandler (
1251 UINTN ErrorCode
1252 )
1253 {
1254 UINT64 *PageTable;
1255 UINT64 PFAddress;
1256 UINT64 RestoreAddress;
1257 UINTN RestorePageNumber;
1258 UINTN CpuIndex;
1259
1260 PageTable = (UINT64 *)AsmReadCr3 ();
1261 PFAddress = AsmReadCr2 ();
1262 CpuIndex = GetCpuIndex ();
1263
1264 //
1265 // Memory operation cross pages, like "rep mov" instruction, will cause
1266 // infinite loop between this and Debug Trap handler. We have to make sure
1267 // that current page and the page followed are both in PRESENT state.
1268 //
1269 RestorePageNumber = 2;
1270 RestoreAddress = PFAddress;
1271 while (RestorePageNumber > 0) {
1272 RestorePageTableBelow4G (PageTable, RestoreAddress, CpuIndex, ErrorCode);
1273 RestoreAddress += EFI_PAGE_SIZE;
1274 RestorePageNumber--;
1275 }
1276
1277 //
1278 // Flush TLB
1279 //
1280 CpuFlushTlb ();
1281 }
1282
1283 /**
1284 The Page fault handler to save SMM profile data.
1285
1286 @param Rip The RIP when exception happens.
1287 @param ErrorCode The Error code of exception.
1288
1289 **/
1290 VOID
1291 SmmProfilePFHandler (
1292 UINTN Rip,
1293 UINTN ErrorCode
1294 )
1295 {
1296 UINT64 *PageTable;
1297 UINT64 PFAddress;
1298 UINT64 RestoreAddress;
1299 UINTN RestorePageNumber;
1300 UINTN CpuIndex;
1301 UINTN Index;
1302 UINT64 InstructionAddress;
1303 UINTN MaxEntryNumber;
1304 UINTN CurrentEntryNumber;
1305 BOOLEAN IsValidPFAddress;
1306 SMM_PROFILE_ENTRY *SmmProfileEntry;
1307 UINT64 SmiCommand;
1308 EFI_STATUS Status;
1309 UINT8 SoftSmiValue;
1310 EFI_SMM_SAVE_STATE_IO_INFO IoInfo;
1311
1312 if (!mSmmProfileStart) {
1313 //
1314 // If SMM profile does not start, call original page fault handler.
1315 //
1316 SmiDefaultPFHandler ();
1317 return;
1318 }
1319
1320 if (mBtsSupported) {
1321 DisableBTS ();
1322 }
1323
1324 IsValidPFAddress = FALSE;
1325 PageTable = (UINT64 *)AsmReadCr3 ();
1326 PFAddress = AsmReadCr2 ();
1327 CpuIndex = GetCpuIndex ();
1328
1329 //
1330 // Memory operation cross pages, like "rep mov" instruction, will cause
1331 // infinite loop between this and Debug Trap handler. We have to make sure
1332 // that current page and the page followed are both in PRESENT state.
1333 //
1334 RestorePageNumber = 2;
1335 RestoreAddress = PFAddress;
1336 while (RestorePageNumber > 0) {
1337 if (RestoreAddress <= 0xFFFFFFFF) {
1338 RestorePageTableBelow4G (PageTable, RestoreAddress, CpuIndex, ErrorCode);
1339 } else {
1340 RestorePageTableAbove4G (PageTable, RestoreAddress, CpuIndex, ErrorCode, &IsValidPFAddress);
1341 }
1342 RestoreAddress += EFI_PAGE_SIZE;
1343 RestorePageNumber--;
1344 }
1345
1346 if (!IsValidPFAddress) {
1347 InstructionAddress = Rip;
1348 if ((ErrorCode & IA32_PF_EC_ID) != 0 && (mBtsSupported)) {
1349 //
1350 // If it is instruction fetch failure, get the correct IP from BTS.
1351 //
1352 InstructionAddress = GetSourceFromDestinationOnBts (CpuIndex, Rip);
1353 if (InstructionAddress == 0) {
1354 //
1355 // It indicates the instruction which caused page fault is not a jump instruction,
1356 // set instruction address same as the page fault address.
1357 //
1358 InstructionAddress = PFAddress;
1359 }
1360 }
1361
1362 //
1363 // Indicate it is not software SMI
1364 //
1365 SmiCommand = 0xFFFFFFFFFFFFFFFFULL;
1366 for (Index = 0; Index < gSmst->NumberOfCpus; Index++) {
1367 Status = SmmReadSaveState(&mSmmCpu, sizeof(IoInfo), EFI_SMM_SAVE_STATE_REGISTER_IO, Index, &IoInfo);
1368 if (EFI_ERROR (Status)) {
1369 continue;
1370 }
1371 if (IoInfo.IoPort == mSmiCommandPort) {
1372 //
1373 // A software SMI triggered by SMI command port has been found, get SmiCommand from SMI command port.
1374 //
1375 SoftSmiValue = IoRead8 (mSmiCommandPort);
1376 SmiCommand = (UINT64)SoftSmiValue;
1377 break;
1378 }
1379 }
1380
1381 SmmProfileEntry = (SMM_PROFILE_ENTRY *)(UINTN)(mSmmProfileBase + 1);
1382 //
1383 // Check if there is already a same entry in profile data.
1384 //
1385 for (Index = 0; Index < (UINTN) mSmmProfileBase->CurDataEntries; Index++) {
1386 if ((SmmProfileEntry[Index].ErrorCode == (UINT64)ErrorCode) &&
1387 (SmmProfileEntry[Index].Address == PFAddress) &&
1388 (SmmProfileEntry[Index].CpuNum == (UINT64)CpuIndex) &&
1389 (SmmProfileEntry[Index].Instruction == InstructionAddress) &&
1390 (SmmProfileEntry[Index].SmiCmd == SmiCommand)) {
1391 //
1392 // Same record exist, need not save again.
1393 //
1394 break;
1395 }
1396 }
1397 if (Index == mSmmProfileBase->CurDataEntries) {
1398 CurrentEntryNumber = (UINTN) mSmmProfileBase->CurDataEntries;
1399 MaxEntryNumber = (UINTN) mSmmProfileBase->MaxDataEntries;
1400 if (FeaturePcdGet (PcdCpuSmmProfileRingBuffer)) {
1401 CurrentEntryNumber = CurrentEntryNumber % MaxEntryNumber;
1402 }
1403 if (CurrentEntryNumber < MaxEntryNumber) {
1404 //
1405 // Log the new entry
1406 //
1407 SmmProfileEntry[CurrentEntryNumber].SmiNum = mSmmProfileBase->NumSmis;
1408 SmmProfileEntry[CurrentEntryNumber].ErrorCode = (UINT64)ErrorCode;
1409 SmmProfileEntry[CurrentEntryNumber].ApicId = (UINT64)GetApicId ();
1410 SmmProfileEntry[CurrentEntryNumber].CpuNum = (UINT64)CpuIndex;
1411 SmmProfileEntry[CurrentEntryNumber].Address = PFAddress;
1412 SmmProfileEntry[CurrentEntryNumber].Instruction = InstructionAddress;
1413 SmmProfileEntry[CurrentEntryNumber].SmiCmd = SmiCommand;
1414 //
1415 // Update current entry index and data size in the header.
1416 //
1417 mSmmProfileBase->CurDataEntries++;
1418 mSmmProfileBase->CurDataSize = MultU64x64 (mSmmProfileBase->CurDataEntries, sizeof (SMM_PROFILE_ENTRY));
1419 }
1420 }
1421 }
1422 //
1423 // Flush TLB
1424 //
1425 CpuFlushTlb ();
1426
1427 if (mBtsSupported) {
1428 EnableBTS ();
1429 }
1430 }
1431
1432 /**
1433 Replace INT1 exception handler to restore page table to absent/execute-disable state
1434 in order to trigger page fault again to save SMM profile data..
1435
1436 **/
1437 VOID
1438 InitIdtr (
1439 VOID
1440 )
1441 {
1442 EFI_STATUS Status;
1443
1444 Status = SmmRegisterExceptionHandler (&mSmmCpuService, EXCEPT_IA32_DEBUG, DebugExceptionHandler);
1445 ASSERT_EFI_ERROR (Status);
1446 }