]> git.proxmox.com Git - mirror_edk2.git/blob - UefiCpuPkg/PiSmmCpuDxeSmm/SmmProfile.c
UefiCpuPkg: Move AsmRelocateApLoopStart from Mpfuncs.nasm to AmdSev.nasm
[mirror_edk2.git] / UefiCpuPkg / PiSmmCpuDxeSmm / SmmProfile.c
1 /** @file
2 Enable SMM profile.
3
4 Copyright (c) 2012 - 2019, Intel Corporation. All rights reserved.<BR>
5 Copyright (c) 2017 - 2020, AMD Incorporated. All rights reserved.<BR>
6
7 SPDX-License-Identifier: BSD-2-Clause-Patent
8
9 **/
10
11 #include "PiSmmCpuDxeSmm.h"
12 #include "SmmProfileInternal.h"
13
14 UINT32 mSmmProfileCr3;
15
16 SMM_PROFILE_HEADER *mSmmProfileBase;
17 MSR_DS_AREA_STRUCT *mMsrDsAreaBase;
18 //
19 // The buffer to store SMM profile data.
20 //
21 UINTN mSmmProfileSize;
22
23 //
24 // The buffer to enable branch trace store.
25 //
26 UINTN mMsrDsAreaSize = SMM_PROFILE_DTS_SIZE;
27
28 //
29 // The flag indicates if execute-disable is supported by processor.
30 //
31 BOOLEAN mXdSupported = TRUE;
32
33 //
34 // The flag indicates if execute-disable is enabled on processor.
35 //
36 BOOLEAN mXdEnabled = FALSE;
37
38 //
39 // The flag indicates if BTS is supported by processor.
40 //
41 BOOLEAN mBtsSupported = TRUE;
42
43 //
44 // The flag indicates if SMM profile starts to record data.
45 //
46 BOOLEAN mSmmProfileStart = FALSE;
47
48 //
49 // The flag indicates if #DB will be setup in #PF handler.
50 //
51 BOOLEAN mSetupDebugTrap = FALSE;
52
53 //
54 // Record the page fault exception count for one instruction execution.
55 //
56 UINTN *mPFEntryCount;
57
58 UINT64 (*mLastPFEntryValue)[MAX_PF_ENTRY_COUNT];
59 UINT64 *(*mLastPFEntryPointer)[MAX_PF_ENTRY_COUNT];
60
61 MSR_DS_AREA_STRUCT **mMsrDsArea;
62 BRANCH_TRACE_RECORD **mMsrBTSRecord;
63 UINTN mBTSRecordNumber;
64 PEBS_RECORD **mMsrPEBSRecord;
65
66 //
67 // These memory ranges are always present, they does not generate the access type of page fault exception,
68 // but they possibly generate instruction fetch type of page fault exception.
69 //
70 MEMORY_PROTECTION_RANGE *mProtectionMemRange = NULL;
71 UINTN mProtectionMemRangeCount = 0;
72
73 //
74 // Some predefined memory ranges.
75 //
76 MEMORY_PROTECTION_RANGE mProtectionMemRangeTemplate[] = {
77 //
78 // SMRAM range (to be fixed in runtime).
79 // It is always present and instruction fetches are allowed.
80 //
81 {
82 { 0x00000000, 0x00000000 }, TRUE, FALSE
83 },
84
85 //
86 // SMM profile data range( to be fixed in runtime).
87 // It is always present and instruction fetches are not allowed.
88 //
89 {
90 { 0x00000000, 0x00000000 }, TRUE, TRUE
91 },
92
93 //
94 // SMRAM ranges not covered by mCpuHotPlugData.SmrrBase/mCpuHotPlugData.SmrrSiz (to be fixed in runtime).
95 // It is always present and instruction fetches are allowed.
96 // {{0x00000000, 0x00000000},TRUE,FALSE},
97 //
98
99 //
100 // Future extended range could be added here.
101 //
102
103 //
104 // PCI MMIO ranges (to be added in runtime).
105 // They are always present and instruction fetches are not allowed.
106 //
107 };
108
109 //
110 // These memory ranges are mapped by 4KB-page instead of 2MB-page.
111 //
112 MEMORY_RANGE *mSplitMemRange = NULL;
113 UINTN mSplitMemRangeCount = 0;
114
115 //
116 // SMI command port.
117 //
118 UINT32 mSmiCommandPort;
119
120 /**
121 Disable branch trace store.
122
123 **/
124 VOID
125 DisableBTS (
126 VOID
127 )
128 {
129 AsmMsrAnd64 (MSR_DEBUG_CTL, ~((UINT64)(MSR_DEBUG_CTL_BTS | MSR_DEBUG_CTL_TR)));
130 }
131
132 /**
133 Enable branch trace store.
134
135 **/
136 VOID
137 EnableBTS (
138 VOID
139 )
140 {
141 AsmMsrOr64 (MSR_DEBUG_CTL, (MSR_DEBUG_CTL_BTS | MSR_DEBUG_CTL_TR));
142 }
143
144 /**
145 Get CPU Index from APIC ID.
146
147 **/
148 UINTN
149 GetCpuIndex (
150 VOID
151 )
152 {
153 UINTN Index;
154 UINT32 ApicId;
155
156 ApicId = GetApicId ();
157
158 for (Index = 0; Index < mMaxNumberOfCpus; Index++) {
159 if (gSmmCpuPrivate->ProcessorInfo[Index].ProcessorId == ApicId) {
160 return Index;
161 }
162 }
163
164 ASSERT (FALSE);
165 return 0;
166 }
167
168 /**
169 Get the source of IP after execute-disable exception is triggered.
170
171 @param CpuIndex The index of CPU.
172 @param DestinationIP The destination address.
173
174 **/
175 UINT64
176 GetSourceFromDestinationOnBts (
177 UINTN CpuIndex,
178 UINT64 DestinationIP
179 )
180 {
181 BRANCH_TRACE_RECORD *CurrentBTSRecord;
182 UINTN Index;
183 BOOLEAN FirstMatch;
184
185 FirstMatch = FALSE;
186
187 CurrentBTSRecord = (BRANCH_TRACE_RECORD *)mMsrDsArea[CpuIndex]->BTSIndex;
188 for (Index = 0; Index < mBTSRecordNumber; Index++) {
189 if ((UINTN)CurrentBTSRecord < (UINTN)mMsrBTSRecord[CpuIndex]) {
190 //
191 // Underflow
192 //
193 CurrentBTSRecord = (BRANCH_TRACE_RECORD *)((UINTN)mMsrDsArea[CpuIndex]->BTSAbsoluteMaximum - 1);
194 CurrentBTSRecord--;
195 }
196
197 if (CurrentBTSRecord->LastBranchTo == DestinationIP) {
198 //
199 // Good! find 1st one, then find 2nd one.
200 //
201 if (!FirstMatch) {
202 //
203 // The first one is DEBUG exception
204 //
205 FirstMatch = TRUE;
206 } else {
207 //
208 // Good find proper one.
209 //
210 return CurrentBTSRecord->LastBranchFrom;
211 }
212 }
213
214 CurrentBTSRecord--;
215 }
216
217 return 0;
218 }
219
220 /**
221 SMM profile specific INT 1 (single-step) exception handler.
222
223 @param InterruptType Defines the type of interrupt or exception that
224 occurred on the processor.This parameter is processor architecture specific.
225 @param SystemContext A pointer to the processor context when
226 the interrupt occurred on the processor.
227 **/
228 VOID
229 EFIAPI
230 DebugExceptionHandler (
231 IN EFI_EXCEPTION_TYPE InterruptType,
232 IN EFI_SYSTEM_CONTEXT SystemContext
233 )
234 {
235 UINTN CpuIndex;
236 UINTN PFEntry;
237
238 if (!mSmmProfileStart &&
239 !HEAP_GUARD_NONSTOP_MODE &&
240 !NULL_DETECTION_NONSTOP_MODE)
241 {
242 return;
243 }
244
245 CpuIndex = GetCpuIndex ();
246
247 //
248 // Clear last PF entries
249 //
250 for (PFEntry = 0; PFEntry < mPFEntryCount[CpuIndex]; PFEntry++) {
251 *mLastPFEntryPointer[CpuIndex][PFEntry] = mLastPFEntryValue[CpuIndex][PFEntry];
252 }
253
254 //
255 // Reset page fault exception count for next page fault.
256 //
257 mPFEntryCount[CpuIndex] = 0;
258
259 //
260 // Flush TLB
261 //
262 CpuFlushTlb ();
263
264 //
265 // Clear TF in EFLAGS
266 //
267 ClearTrapFlag (SystemContext);
268 }
269
270 /**
271 Check if the input address is in SMM ranges.
272
273 @param[in] Address The input address.
274
275 @retval TRUE The input address is in SMM.
276 @retval FALSE The input address is not in SMM.
277 **/
278 BOOLEAN
279 IsInSmmRanges (
280 IN EFI_PHYSICAL_ADDRESS Address
281 )
282 {
283 UINTN Index;
284
285 if ((Address >= mCpuHotPlugData.SmrrBase) && (Address < mCpuHotPlugData.SmrrBase + mCpuHotPlugData.SmrrSize)) {
286 return TRUE;
287 }
288
289 for (Index = 0; Index < mSmmCpuSmramRangeCount; Index++) {
290 if ((Address >= mSmmCpuSmramRanges[Index].CpuStart) &&
291 (Address < mSmmCpuSmramRanges[Index].CpuStart + mSmmCpuSmramRanges[Index].PhysicalSize))
292 {
293 return TRUE;
294 }
295 }
296
297 return FALSE;
298 }
299
300 /**
301 Check if the memory address will be mapped by 4KB-page.
302
303 @param Address The address of Memory.
304 @param Nx The flag indicates if the memory is execute-disable.
305
306 **/
307 BOOLEAN
308 IsAddressValid (
309 IN EFI_PHYSICAL_ADDRESS Address,
310 IN BOOLEAN *Nx
311 )
312 {
313 UINTN Index;
314
315 if (FeaturePcdGet (PcdCpuSmmProfileEnable)) {
316 //
317 // Check configuration
318 //
319 for (Index = 0; Index < mProtectionMemRangeCount; Index++) {
320 if ((Address >= mProtectionMemRange[Index].Range.Base) && (Address < mProtectionMemRange[Index].Range.Top)) {
321 *Nx = mProtectionMemRange[Index].Nx;
322 return mProtectionMemRange[Index].Present;
323 }
324 }
325
326 *Nx = TRUE;
327 return FALSE;
328 } else {
329 *Nx = TRUE;
330 if (IsInSmmRanges (Address)) {
331 *Nx = FALSE;
332 }
333
334 return TRUE;
335 }
336 }
337
338 /**
339 Check if the memory address will be mapped by 4KB-page.
340
341 @param Address The address of Memory.
342
343 **/
344 BOOLEAN
345 IsAddressSplit (
346 IN EFI_PHYSICAL_ADDRESS Address
347 )
348 {
349 UINTN Index;
350
351 if (FeaturePcdGet (PcdCpuSmmProfileEnable)) {
352 //
353 // Check configuration
354 //
355 for (Index = 0; Index < mSplitMemRangeCount; Index++) {
356 if ((Address >= mSplitMemRange[Index].Base) && (Address < mSplitMemRange[Index].Top)) {
357 return TRUE;
358 }
359 }
360 } else {
361 if (Address < mCpuHotPlugData.SmrrBase) {
362 if ((mCpuHotPlugData.SmrrBase - Address) < BASE_2MB) {
363 return TRUE;
364 }
365 } else if (Address > (mCpuHotPlugData.SmrrBase + mCpuHotPlugData.SmrrSize - BASE_2MB)) {
366 if ((Address - (mCpuHotPlugData.SmrrBase + mCpuHotPlugData.SmrrSize - BASE_2MB)) < BASE_2MB) {
367 return TRUE;
368 }
369 }
370 }
371
372 //
373 // Return default
374 //
375 return FALSE;
376 }
377
378 /**
379 Initialize the protected memory ranges and the 4KB-page mapped memory ranges.
380
381 **/
382 VOID
383 InitProtectedMemRange (
384 VOID
385 )
386 {
387 UINTN Index;
388 UINTN NumberOfDescriptors;
389 UINTN NumberOfAddedDescriptors;
390 UINTN NumberOfProtectRange;
391 UINTN NumberOfSpliteRange;
392 EFI_GCD_MEMORY_SPACE_DESCRIPTOR *MemorySpaceMap;
393 UINTN TotalSize;
394 EFI_PHYSICAL_ADDRESS ProtectBaseAddress;
395 EFI_PHYSICAL_ADDRESS ProtectEndAddress;
396 EFI_PHYSICAL_ADDRESS Top2MBAlignedAddress;
397 EFI_PHYSICAL_ADDRESS Base2MBAlignedAddress;
398 UINT64 High4KBPageSize;
399 UINT64 Low4KBPageSize;
400
401 NumberOfDescriptors = 0;
402 NumberOfAddedDescriptors = mSmmCpuSmramRangeCount;
403 NumberOfSpliteRange = 0;
404 MemorySpaceMap = NULL;
405
406 //
407 // Get MMIO ranges from GCD and add them into protected memory ranges.
408 //
409 gDS->GetMemorySpaceMap (
410 &NumberOfDescriptors,
411 &MemorySpaceMap
412 );
413 for (Index = 0; Index < NumberOfDescriptors; Index++) {
414 if (MemorySpaceMap[Index].GcdMemoryType == EfiGcdMemoryTypeMemoryMappedIo) {
415 NumberOfAddedDescriptors++;
416 }
417 }
418
419 if (NumberOfAddedDescriptors != 0) {
420 TotalSize = NumberOfAddedDescriptors * sizeof (MEMORY_PROTECTION_RANGE) + sizeof (mProtectionMemRangeTemplate);
421 mProtectionMemRange = (MEMORY_PROTECTION_RANGE *)AllocateZeroPool (TotalSize);
422 ASSERT (mProtectionMemRange != NULL);
423 mProtectionMemRangeCount = TotalSize / sizeof (MEMORY_PROTECTION_RANGE);
424
425 //
426 // Copy existing ranges.
427 //
428 CopyMem (mProtectionMemRange, mProtectionMemRangeTemplate, sizeof (mProtectionMemRangeTemplate));
429
430 //
431 // Create split ranges which come from protected ranges.
432 //
433 TotalSize = (TotalSize / sizeof (MEMORY_PROTECTION_RANGE)) * sizeof (MEMORY_RANGE);
434 mSplitMemRange = (MEMORY_RANGE *)AllocateZeroPool (TotalSize);
435 ASSERT (mSplitMemRange != NULL);
436
437 //
438 // Create SMM ranges which are set to present and execution-enable.
439 //
440 NumberOfProtectRange = sizeof (mProtectionMemRangeTemplate) / sizeof (MEMORY_PROTECTION_RANGE);
441 for (Index = 0; Index < mSmmCpuSmramRangeCount; Index++) {
442 if ((mSmmCpuSmramRanges[Index].CpuStart >= mProtectionMemRange[0].Range.Base) &&
443 (mSmmCpuSmramRanges[Index].CpuStart + mSmmCpuSmramRanges[Index].PhysicalSize < mProtectionMemRange[0].Range.Top))
444 {
445 //
446 // If the address have been already covered by mCpuHotPlugData.SmrrBase/mCpuHotPlugData.SmrrSiz
447 //
448 break;
449 }
450
451 mProtectionMemRange[NumberOfProtectRange].Range.Base = mSmmCpuSmramRanges[Index].CpuStart;
452 mProtectionMemRange[NumberOfProtectRange].Range.Top = mSmmCpuSmramRanges[Index].CpuStart + mSmmCpuSmramRanges[Index].PhysicalSize;
453 mProtectionMemRange[NumberOfProtectRange].Present = TRUE;
454 mProtectionMemRange[NumberOfProtectRange].Nx = FALSE;
455 NumberOfProtectRange++;
456 }
457
458 //
459 // Create MMIO ranges which are set to present and execution-disable.
460 //
461 for (Index = 0; Index < NumberOfDescriptors; Index++) {
462 if (MemorySpaceMap[Index].GcdMemoryType != EfiGcdMemoryTypeMemoryMappedIo) {
463 continue;
464 }
465
466 mProtectionMemRange[NumberOfProtectRange].Range.Base = MemorySpaceMap[Index].BaseAddress;
467 mProtectionMemRange[NumberOfProtectRange].Range.Top = MemorySpaceMap[Index].BaseAddress + MemorySpaceMap[Index].Length;
468 mProtectionMemRange[NumberOfProtectRange].Present = TRUE;
469 mProtectionMemRange[NumberOfProtectRange].Nx = TRUE;
470 NumberOfProtectRange++;
471 }
472
473 //
474 // Check and updated actual protected memory ranges count
475 //
476 ASSERT (NumberOfProtectRange <= mProtectionMemRangeCount);
477 mProtectionMemRangeCount = NumberOfProtectRange;
478 }
479
480 //
481 // According to protected ranges, create the ranges which will be mapped by 2KB page.
482 //
483 NumberOfSpliteRange = 0;
484 NumberOfProtectRange = mProtectionMemRangeCount;
485 for (Index = 0; Index < NumberOfProtectRange; Index++) {
486 //
487 // If MMIO base address is not 2MB alignment, make 2MB alignment for create 4KB page in page table.
488 //
489 ProtectBaseAddress = mProtectionMemRange[Index].Range.Base;
490 ProtectEndAddress = mProtectionMemRange[Index].Range.Top;
491 if (((ProtectBaseAddress & (SIZE_2MB - 1)) != 0) || ((ProtectEndAddress & (SIZE_2MB - 1)) != 0)) {
492 //
493 // Check if it is possible to create 4KB-page for not 2MB-aligned range and to create 2MB-page for 2MB-aligned range.
494 // A mix of 4KB and 2MB page could save SMRAM space.
495 //
496 Top2MBAlignedAddress = ProtectEndAddress & ~(SIZE_2MB - 1);
497 Base2MBAlignedAddress = (ProtectBaseAddress + SIZE_2MB - 1) & ~(SIZE_2MB - 1);
498 if ((Top2MBAlignedAddress > Base2MBAlignedAddress) &&
499 ((Top2MBAlignedAddress - Base2MBAlignedAddress) >= SIZE_2MB))
500 {
501 //
502 // There is an range which could be mapped by 2MB-page.
503 //
504 High4KBPageSize = ((ProtectEndAddress + SIZE_2MB - 1) & ~(SIZE_2MB - 1)) - (ProtectEndAddress & ~(SIZE_2MB - 1));
505 Low4KBPageSize = ((ProtectBaseAddress + SIZE_2MB - 1) & ~(SIZE_2MB - 1)) - (ProtectBaseAddress & ~(SIZE_2MB - 1));
506 if (High4KBPageSize != 0) {
507 //
508 // Add not 2MB-aligned range to be mapped by 4KB-page.
509 //
510 mSplitMemRange[NumberOfSpliteRange].Base = ProtectEndAddress & ~(SIZE_2MB - 1);
511 mSplitMemRange[NumberOfSpliteRange].Top = (ProtectEndAddress + SIZE_2MB - 1) & ~(SIZE_2MB - 1);
512 NumberOfSpliteRange++;
513 }
514
515 if (Low4KBPageSize != 0) {
516 //
517 // Add not 2MB-aligned range to be mapped by 4KB-page.
518 //
519 mSplitMemRange[NumberOfSpliteRange].Base = ProtectBaseAddress & ~(SIZE_2MB - 1);
520 mSplitMemRange[NumberOfSpliteRange].Top = (ProtectBaseAddress + SIZE_2MB - 1) & ~(SIZE_2MB - 1);
521 NumberOfSpliteRange++;
522 }
523 } else {
524 //
525 // The range could only be mapped by 4KB-page.
526 //
527 mSplitMemRange[NumberOfSpliteRange].Base = ProtectBaseAddress & ~(SIZE_2MB - 1);
528 mSplitMemRange[NumberOfSpliteRange].Top = (ProtectEndAddress + SIZE_2MB - 1) & ~(SIZE_2MB - 1);
529 NumberOfSpliteRange++;
530 }
531 }
532 }
533
534 mSplitMemRangeCount = NumberOfSpliteRange;
535
536 DEBUG ((DEBUG_INFO, "SMM Profile Memory Ranges:\n"));
537 for (Index = 0; Index < mProtectionMemRangeCount; Index++) {
538 DEBUG ((DEBUG_INFO, "mProtectionMemRange[%d].Base = %lx\n", Index, mProtectionMemRange[Index].Range.Base));
539 DEBUG ((DEBUG_INFO, "mProtectionMemRange[%d].Top = %lx\n", Index, mProtectionMemRange[Index].Range.Top));
540 }
541
542 for (Index = 0; Index < mSplitMemRangeCount; Index++) {
543 DEBUG ((DEBUG_INFO, "mSplitMemRange[%d].Base = %lx\n", Index, mSplitMemRange[Index].Base));
544 DEBUG ((DEBUG_INFO, "mSplitMemRange[%d].Top = %lx\n", Index, mSplitMemRange[Index].Top));
545 }
546 }
547
548 /**
549 Update page table according to protected memory ranges and the 4KB-page mapped memory ranges.
550
551 **/
552 VOID
553 InitPaging (
554 VOID
555 )
556 {
557 UINT64 Pml5Entry;
558 UINT64 Pml4Entry;
559 UINT64 *Pml5;
560 UINT64 *Pml4;
561 UINT64 *Pdpt;
562 UINT64 *Pd;
563 UINT64 *Pt;
564 UINTN Address;
565 UINTN Pml5Index;
566 UINTN Pml4Index;
567 UINTN PdptIndex;
568 UINTN PdIndex;
569 UINTN PtIndex;
570 UINTN NumberOfPdptEntries;
571 UINTN NumberOfPml4Entries;
572 UINTN NumberOfPml5Entries;
573 UINTN SizeOfMemorySpace;
574 BOOLEAN Nx;
575 IA32_CR4 Cr4;
576 BOOLEAN Enable5LevelPaging;
577
578 Cr4.UintN = AsmReadCr4 ();
579 Enable5LevelPaging = (BOOLEAN)(Cr4.Bits.LA57 == 1);
580
581 if (sizeof (UINTN) == sizeof (UINT64)) {
582 if (!Enable5LevelPaging) {
583 Pml5Entry = (UINTN)mSmmProfileCr3 | IA32_PG_P;
584 Pml5 = &Pml5Entry;
585 } else {
586 Pml5 = (UINT64 *)(UINTN)mSmmProfileCr3;
587 }
588
589 SizeOfMemorySpace = HighBitSet64 (gPhyMask) + 1;
590 //
591 // Calculate the table entries of PML4E and PDPTE.
592 //
593 NumberOfPml5Entries = 1;
594 if (SizeOfMemorySpace > 48) {
595 NumberOfPml5Entries = (UINTN)LShiftU64 (1, SizeOfMemorySpace - 48);
596 SizeOfMemorySpace = 48;
597 }
598
599 NumberOfPml4Entries = 1;
600 if (SizeOfMemorySpace > 39) {
601 NumberOfPml4Entries = (UINTN)LShiftU64 (1, SizeOfMemorySpace - 39);
602 SizeOfMemorySpace = 39;
603 }
604
605 NumberOfPdptEntries = 1;
606 ASSERT (SizeOfMemorySpace > 30);
607 NumberOfPdptEntries = (UINTN)LShiftU64 (1, SizeOfMemorySpace - 30);
608 } else {
609 Pml4Entry = (UINTN)mSmmProfileCr3 | IA32_PG_P;
610 Pml4 = &Pml4Entry;
611 Pml5Entry = (UINTN)Pml4 | IA32_PG_P;
612 Pml5 = &Pml5Entry;
613 NumberOfPml5Entries = 1;
614 NumberOfPml4Entries = 1;
615 NumberOfPdptEntries = 4;
616 }
617
618 //
619 // Go through page table and change 2MB-page into 4KB-page.
620 //
621 for (Pml5Index = 0; Pml5Index < NumberOfPml5Entries; Pml5Index++) {
622 if ((Pml5[Pml5Index] & IA32_PG_P) == 0) {
623 //
624 // If PML5 entry does not exist, skip it
625 //
626 continue;
627 }
628
629 Pml4 = (UINT64 *)(UINTN)(Pml5[Pml5Index] & PHYSICAL_ADDRESS_MASK);
630 for (Pml4Index = 0; Pml4Index < NumberOfPml4Entries; Pml4Index++) {
631 if ((Pml4[Pml4Index] & IA32_PG_P) == 0) {
632 //
633 // If PML4 entry does not exist, skip it
634 //
635 continue;
636 }
637
638 Pdpt = (UINT64 *)(UINTN)(Pml4[Pml4Index] & ~mAddressEncMask & PHYSICAL_ADDRESS_MASK);
639 for (PdptIndex = 0; PdptIndex < NumberOfPdptEntries; PdptIndex++, Pdpt++) {
640 if ((*Pdpt & IA32_PG_P) == 0) {
641 //
642 // If PDPT entry does not exist, skip it
643 //
644 continue;
645 }
646
647 if ((*Pdpt & IA32_PG_PS) != 0) {
648 //
649 // This is 1G entry, skip it
650 //
651 continue;
652 }
653
654 Pd = (UINT64 *)(UINTN)(*Pdpt & ~mAddressEncMask & PHYSICAL_ADDRESS_MASK);
655 if (Pd == 0) {
656 continue;
657 }
658
659 for (PdIndex = 0; PdIndex < SIZE_4KB / sizeof (*Pd); PdIndex++, Pd++) {
660 if ((*Pd & IA32_PG_P) == 0) {
661 //
662 // If PD entry does not exist, skip it
663 //
664 continue;
665 }
666
667 Address = (UINTN)LShiftU64 (
668 LShiftU64 (
669 LShiftU64 ((Pml5Index << 9) + Pml4Index, 9) + PdptIndex,
670 9
671 ) + PdIndex,
672 21
673 );
674
675 //
676 // If it is 2M page, check IsAddressSplit()
677 //
678 if (((*Pd & IA32_PG_PS) != 0) && IsAddressSplit (Address)) {
679 //
680 // Based on current page table, create 4KB page table for split area.
681 //
682 ASSERT (Address == (*Pd & PHYSICAL_ADDRESS_MASK));
683
684 Pt = AllocatePageTableMemory (1);
685 ASSERT (Pt != NULL);
686
687 // Split it
688 for (PtIndex = 0; PtIndex < SIZE_4KB / sizeof (*Pt); PtIndex++) {
689 Pt[PtIndex] = Address + ((PtIndex << 12) | mAddressEncMask | PAGE_ATTRIBUTE_BITS);
690 } // end for PT
691
692 *Pd = (UINT64)(UINTN)Pt | mAddressEncMask | PAGE_ATTRIBUTE_BITS;
693 } // end if IsAddressSplit
694 } // end for PD
695 } // end for PDPT
696 } // end for PML4
697 } // end for PML5
698
699 //
700 // Go through page table and set several page table entries to absent or execute-disable.
701 //
702 DEBUG ((DEBUG_INFO, "Patch page table start ...\n"));
703 for (Pml5Index = 0; Pml5Index < NumberOfPml5Entries; Pml5Index++) {
704 if ((Pml5[Pml5Index] & IA32_PG_P) == 0) {
705 //
706 // If PML5 entry does not exist, skip it
707 //
708 continue;
709 }
710
711 Pml4 = (UINT64 *)(UINTN)(Pml5[Pml5Index] & PHYSICAL_ADDRESS_MASK);
712 for (Pml4Index = 0; Pml4Index < NumberOfPml4Entries; Pml4Index++) {
713 if ((Pml4[Pml4Index] & IA32_PG_P) == 0) {
714 //
715 // If PML4 entry does not exist, skip it
716 //
717 continue;
718 }
719
720 Pdpt = (UINT64 *)(UINTN)(Pml4[Pml4Index] & ~mAddressEncMask & PHYSICAL_ADDRESS_MASK);
721 for (PdptIndex = 0; PdptIndex < NumberOfPdptEntries; PdptIndex++, Pdpt++) {
722 if ((*Pdpt & IA32_PG_P) == 0) {
723 //
724 // If PDPT entry does not exist, skip it
725 //
726 continue;
727 }
728
729 if ((*Pdpt & IA32_PG_PS) != 0) {
730 //
731 // This is 1G entry, set NX bit and skip it
732 //
733 if (mXdSupported) {
734 *Pdpt = *Pdpt | IA32_PG_NX;
735 }
736
737 continue;
738 }
739
740 Pd = (UINT64 *)(UINTN)(*Pdpt & ~mAddressEncMask & PHYSICAL_ADDRESS_MASK);
741 if (Pd == 0) {
742 continue;
743 }
744
745 for (PdIndex = 0; PdIndex < SIZE_4KB / sizeof (*Pd); PdIndex++, Pd++) {
746 if ((*Pd & IA32_PG_P) == 0) {
747 //
748 // If PD entry does not exist, skip it
749 //
750 continue;
751 }
752
753 Address = (UINTN)LShiftU64 (
754 LShiftU64 (
755 LShiftU64 ((Pml5Index << 9) + Pml4Index, 9) + PdptIndex,
756 9
757 ) + PdIndex,
758 21
759 );
760
761 if ((*Pd & IA32_PG_PS) != 0) {
762 // 2MB page
763
764 if (!IsAddressValid (Address, &Nx)) {
765 //
766 // Patch to remove Present flag and RW flag
767 //
768 *Pd = *Pd & (INTN)(INT32)(~PAGE_ATTRIBUTE_BITS);
769 }
770
771 if (Nx && mXdSupported) {
772 *Pd = *Pd | IA32_PG_NX;
773 }
774 } else {
775 // 4KB page
776 Pt = (UINT64 *)(UINTN)(*Pd & ~mAddressEncMask & PHYSICAL_ADDRESS_MASK);
777 if (Pt == 0) {
778 continue;
779 }
780
781 for (PtIndex = 0; PtIndex < SIZE_4KB / sizeof (*Pt); PtIndex++, Pt++) {
782 if (!IsAddressValid (Address, &Nx)) {
783 *Pt = *Pt & (INTN)(INT32)(~PAGE_ATTRIBUTE_BITS);
784 }
785
786 if (Nx && mXdSupported) {
787 *Pt = *Pt | IA32_PG_NX;
788 }
789
790 Address += SIZE_4KB;
791 } // end for PT
792 } // end if PS
793 } // end for PD
794 } // end for PDPT
795 } // end for PML4
796 } // end for PML5
797
798 //
799 // Flush TLB
800 //
801 CpuFlushTlb ();
802 DEBUG ((DEBUG_INFO, "Patch page table done!\n"));
803 //
804 // Set execute-disable flag
805 //
806 mXdEnabled = TRUE;
807
808 return;
809 }
810
811 /**
812 To get system port address of the SMI Command Port in FADT table.
813
814 **/
815 VOID
816 GetSmiCommandPort (
817 VOID
818 )
819 {
820 EFI_ACPI_2_0_FIXED_ACPI_DESCRIPTION_TABLE *Fadt;
821
822 Fadt = (EFI_ACPI_2_0_FIXED_ACPI_DESCRIPTION_TABLE *)EfiLocateFirstAcpiTable (
823 EFI_ACPI_2_0_FIXED_ACPI_DESCRIPTION_TABLE_SIGNATURE
824 );
825 ASSERT (Fadt != NULL);
826
827 mSmiCommandPort = Fadt->SmiCmd;
828 DEBUG ((DEBUG_INFO, "mSmiCommandPort = %x\n", mSmiCommandPort));
829 }
830
831 /**
832 Updates page table to make some memory ranges (like system memory) absent
833 and make some memory ranges (like MMIO) present and execute disable. It also
834 update 2MB-page to 4KB-page for some memory ranges.
835
836 **/
837 VOID
838 SmmProfileStart (
839 VOID
840 )
841 {
842 //
843 // The flag indicates SMM profile starts to work.
844 //
845 mSmmProfileStart = TRUE;
846 }
847
848 /**
849 Initialize SMM profile in SmmReadyToLock protocol callback function.
850
851 @param Protocol Points to the protocol's unique identifier.
852 @param Interface Points to the interface instance.
853 @param Handle The handle on which the interface was installed.
854
855 @retval EFI_SUCCESS SmmReadyToLock protocol callback runs successfully.
856 **/
857 EFI_STATUS
858 EFIAPI
859 InitSmmProfileCallBack (
860 IN CONST EFI_GUID *Protocol,
861 IN VOID *Interface,
862 IN EFI_HANDLE Handle
863 )
864 {
865 //
866 // Save to variable so that SMM profile data can be found.
867 //
868 gRT->SetVariable (
869 SMM_PROFILE_NAME,
870 &gEfiCallerIdGuid,
871 EFI_VARIABLE_BOOTSERVICE_ACCESS | EFI_VARIABLE_RUNTIME_ACCESS,
872 sizeof (mSmmProfileBase),
873 &mSmmProfileBase
874 );
875
876 //
877 // Get Software SMI from FADT
878 //
879 GetSmiCommandPort ();
880
881 //
882 // Initialize protected memory range for patching page table later.
883 //
884 InitProtectedMemRange ();
885
886 return EFI_SUCCESS;
887 }
888
889 /**
890 Initialize SMM profile data structures.
891
892 **/
893 VOID
894 InitSmmProfileInternal (
895 VOID
896 )
897 {
898 EFI_STATUS Status;
899 EFI_PHYSICAL_ADDRESS Base;
900 VOID *Registration;
901 UINTN Index;
902 UINTN MsrDsAreaSizePerCpu;
903 UINTN TotalSize;
904
905 mPFEntryCount = (UINTN *)AllocateZeroPool (sizeof (UINTN) * mMaxNumberOfCpus);
906 ASSERT (mPFEntryCount != NULL);
907 mLastPFEntryValue = (UINT64 (*)[MAX_PF_ENTRY_COUNT])AllocateZeroPool (
908 sizeof (mLastPFEntryValue[0]) * mMaxNumberOfCpus
909 );
910 ASSERT (mLastPFEntryValue != NULL);
911 mLastPFEntryPointer = (UINT64 *(*)[MAX_PF_ENTRY_COUNT])AllocateZeroPool (
912 sizeof (mLastPFEntryPointer[0]) * mMaxNumberOfCpus
913 );
914 ASSERT (mLastPFEntryPointer != NULL);
915
916 //
917 // Allocate memory for SmmProfile below 4GB.
918 // The base address
919 //
920 mSmmProfileSize = PcdGet32 (PcdCpuSmmProfileSize);
921 ASSERT ((mSmmProfileSize & 0xFFF) == 0);
922
923 if (mBtsSupported) {
924 TotalSize = mSmmProfileSize + mMsrDsAreaSize;
925 } else {
926 TotalSize = mSmmProfileSize;
927 }
928
929 Base = 0xFFFFFFFF;
930 Status = gBS->AllocatePages (
931 AllocateMaxAddress,
932 EfiReservedMemoryType,
933 EFI_SIZE_TO_PAGES (TotalSize),
934 &Base
935 );
936 ASSERT_EFI_ERROR (Status);
937 ZeroMem ((VOID *)(UINTN)Base, TotalSize);
938 mSmmProfileBase = (SMM_PROFILE_HEADER *)(UINTN)Base;
939
940 //
941 // Initialize SMM profile data header.
942 //
943 mSmmProfileBase->HeaderSize = sizeof (SMM_PROFILE_HEADER);
944 mSmmProfileBase->MaxDataEntries = (UINT64)((mSmmProfileSize - sizeof (SMM_PROFILE_HEADER)) / sizeof (SMM_PROFILE_ENTRY));
945 mSmmProfileBase->MaxDataSize = MultU64x64 (mSmmProfileBase->MaxDataEntries, sizeof (SMM_PROFILE_ENTRY));
946 mSmmProfileBase->CurDataEntries = 0;
947 mSmmProfileBase->CurDataSize = 0;
948 mSmmProfileBase->TsegStart = mCpuHotPlugData.SmrrBase;
949 mSmmProfileBase->TsegSize = mCpuHotPlugData.SmrrSize;
950 mSmmProfileBase->NumSmis = 0;
951 mSmmProfileBase->NumCpus = gSmmCpuPrivate->SmmCoreEntryContext.NumberOfCpus;
952
953 if (mBtsSupported) {
954 mMsrDsArea = (MSR_DS_AREA_STRUCT **)AllocateZeroPool (sizeof (MSR_DS_AREA_STRUCT *) * mMaxNumberOfCpus);
955 ASSERT (mMsrDsArea != NULL);
956 mMsrBTSRecord = (BRANCH_TRACE_RECORD **)AllocateZeroPool (sizeof (BRANCH_TRACE_RECORD *) * mMaxNumberOfCpus);
957 ASSERT (mMsrBTSRecord != NULL);
958 mMsrPEBSRecord = (PEBS_RECORD **)AllocateZeroPool (sizeof (PEBS_RECORD *) * mMaxNumberOfCpus);
959 ASSERT (mMsrPEBSRecord != NULL);
960
961 mMsrDsAreaBase = (MSR_DS_AREA_STRUCT *)((UINTN)Base + mSmmProfileSize);
962 MsrDsAreaSizePerCpu = mMsrDsAreaSize / mMaxNumberOfCpus;
963 mBTSRecordNumber = (MsrDsAreaSizePerCpu - sizeof (PEBS_RECORD) * PEBS_RECORD_NUMBER - sizeof (MSR_DS_AREA_STRUCT)) / sizeof (BRANCH_TRACE_RECORD);
964 for (Index = 0; Index < mMaxNumberOfCpus; Index++) {
965 mMsrDsArea[Index] = (MSR_DS_AREA_STRUCT *)((UINTN)mMsrDsAreaBase + MsrDsAreaSizePerCpu * Index);
966 mMsrBTSRecord[Index] = (BRANCH_TRACE_RECORD *)((UINTN)mMsrDsArea[Index] + sizeof (MSR_DS_AREA_STRUCT));
967 mMsrPEBSRecord[Index] = (PEBS_RECORD *)((UINTN)mMsrDsArea[Index] + MsrDsAreaSizePerCpu - sizeof (PEBS_RECORD) * PEBS_RECORD_NUMBER);
968
969 mMsrDsArea[Index]->BTSBufferBase = (UINTN)mMsrBTSRecord[Index];
970 mMsrDsArea[Index]->BTSIndex = mMsrDsArea[Index]->BTSBufferBase;
971 mMsrDsArea[Index]->BTSAbsoluteMaximum = mMsrDsArea[Index]->BTSBufferBase + mBTSRecordNumber * sizeof (BRANCH_TRACE_RECORD) + 1;
972 mMsrDsArea[Index]->BTSInterruptThreshold = mMsrDsArea[Index]->BTSAbsoluteMaximum + 1;
973
974 mMsrDsArea[Index]->PEBSBufferBase = (UINTN)mMsrPEBSRecord[Index];
975 mMsrDsArea[Index]->PEBSIndex = mMsrDsArea[Index]->PEBSBufferBase;
976 mMsrDsArea[Index]->PEBSAbsoluteMaximum = mMsrDsArea[Index]->PEBSBufferBase + PEBS_RECORD_NUMBER * sizeof (PEBS_RECORD) + 1;
977 mMsrDsArea[Index]->PEBSInterruptThreshold = mMsrDsArea[Index]->PEBSAbsoluteMaximum + 1;
978 }
979 }
980
981 mProtectionMemRange = mProtectionMemRangeTemplate;
982 mProtectionMemRangeCount = sizeof (mProtectionMemRangeTemplate) / sizeof (MEMORY_PROTECTION_RANGE);
983
984 //
985 // Update TSeg entry.
986 //
987 mProtectionMemRange[0].Range.Base = mCpuHotPlugData.SmrrBase;
988 mProtectionMemRange[0].Range.Top = mCpuHotPlugData.SmrrBase + mCpuHotPlugData.SmrrSize;
989
990 //
991 // Update SMM profile entry.
992 //
993 mProtectionMemRange[1].Range.Base = (EFI_PHYSICAL_ADDRESS)(UINTN)mSmmProfileBase;
994 mProtectionMemRange[1].Range.Top = (EFI_PHYSICAL_ADDRESS)(UINTN)mSmmProfileBase + TotalSize;
995
996 //
997 // Allocate memory reserved for creating 4KB pages.
998 //
999 InitPagesForPFHandler ();
1000
1001 //
1002 // Start SMM profile when SmmReadyToLock protocol is installed.
1003 //
1004 Status = gSmst->SmmRegisterProtocolNotify (
1005 &gEfiSmmReadyToLockProtocolGuid,
1006 InitSmmProfileCallBack,
1007 &Registration
1008 );
1009 ASSERT_EFI_ERROR (Status);
1010
1011 return;
1012 }
1013
1014 /**
1015 Check if feature is supported by a processor.
1016
1017 **/
1018 VOID
1019 CheckFeatureSupported (
1020 VOID
1021 )
1022 {
1023 UINT32 RegEax;
1024 UINT32 RegEcx;
1025 UINT32 RegEdx;
1026 MSR_IA32_MISC_ENABLE_REGISTER MiscEnableMsr;
1027
1028 if ((PcdGet32 (PcdControlFlowEnforcementPropertyMask) != 0) && mCetSupported) {
1029 AsmCpuid (CPUID_SIGNATURE, &RegEax, NULL, NULL, NULL);
1030 if (RegEax >= CPUID_STRUCTURED_EXTENDED_FEATURE_FLAGS) {
1031 AsmCpuidEx (CPUID_STRUCTURED_EXTENDED_FEATURE_FLAGS, CPUID_STRUCTURED_EXTENDED_FEATURE_FLAGS_SUB_LEAF_INFO, NULL, NULL, &RegEcx, NULL);
1032 if ((RegEcx & CPUID_CET_SS) == 0) {
1033 mCetSupported = FALSE;
1034 PatchInstructionX86 (mPatchCetSupported, mCetSupported, 1);
1035 }
1036 } else {
1037 mCetSupported = FALSE;
1038 PatchInstructionX86 (mPatchCetSupported, mCetSupported, 1);
1039 }
1040 }
1041
1042 if (mXdSupported) {
1043 AsmCpuid (CPUID_EXTENDED_FUNCTION, &RegEax, NULL, NULL, NULL);
1044 if (RegEax <= CPUID_EXTENDED_FUNCTION) {
1045 //
1046 // Extended CPUID functions are not supported on this processor.
1047 //
1048 mXdSupported = FALSE;
1049 PatchInstructionX86 (gPatchXdSupported, mXdSupported, 1);
1050 }
1051
1052 AsmCpuid (CPUID_EXTENDED_CPU_SIG, NULL, NULL, NULL, &RegEdx);
1053 if ((RegEdx & CPUID1_EDX_XD_SUPPORT) == 0) {
1054 //
1055 // Execute Disable Bit feature is not supported on this processor.
1056 //
1057 mXdSupported = FALSE;
1058 PatchInstructionX86 (gPatchXdSupported, mXdSupported, 1);
1059 }
1060
1061 if (StandardSignatureIsAuthenticAMD ()) {
1062 //
1063 // AMD processors do not support MSR_IA32_MISC_ENABLE
1064 //
1065 PatchInstructionX86 (gPatchMsrIa32MiscEnableSupported, FALSE, 1);
1066 }
1067 }
1068
1069 if (mBtsSupported) {
1070 AsmCpuid (CPUID_VERSION_INFO, NULL, NULL, NULL, &RegEdx);
1071 if ((RegEdx & CPUID1_EDX_BTS_AVAILABLE) != 0) {
1072 //
1073 // Per IA32 manuals:
1074 // When CPUID.1:EDX[21] is set, the following BTS facilities are available:
1075 // 1. The BTS_UNAVAILABLE flag in the IA32_MISC_ENABLE MSR indicates the
1076 // availability of the BTS facilities, including the ability to set the BTS and
1077 // BTINT bits in the MSR_DEBUGCTLA MSR.
1078 // 2. The IA32_DS_AREA MSR can be programmed to point to the DS save area.
1079 //
1080 MiscEnableMsr.Uint64 = AsmReadMsr64 (MSR_IA32_MISC_ENABLE);
1081 if (MiscEnableMsr.Bits.BTS == 1) {
1082 //
1083 // BTS facilities is not supported if MSR_IA32_MISC_ENABLE.BTS bit is set.
1084 //
1085 mBtsSupported = FALSE;
1086 }
1087 }
1088 }
1089 }
1090
1091 /**
1092 Enable single step.
1093
1094 **/
1095 VOID
1096 ActivateSingleStepDB (
1097 VOID
1098 )
1099 {
1100 UINTN Dr6;
1101
1102 Dr6 = AsmReadDr6 ();
1103 if ((Dr6 & DR6_SINGLE_STEP) != 0) {
1104 return;
1105 }
1106
1107 Dr6 |= DR6_SINGLE_STEP;
1108 AsmWriteDr6 (Dr6);
1109 }
1110
1111 /**
1112 Enable last branch.
1113
1114 **/
1115 VOID
1116 ActivateLBR (
1117 VOID
1118 )
1119 {
1120 UINT64 DebugCtl;
1121
1122 DebugCtl = AsmReadMsr64 (MSR_DEBUG_CTL);
1123 if ((DebugCtl & MSR_DEBUG_CTL_LBR) != 0) {
1124 return;
1125 }
1126
1127 DebugCtl |= MSR_DEBUG_CTL_LBR;
1128 AsmWriteMsr64 (MSR_DEBUG_CTL, DebugCtl);
1129 }
1130
1131 /**
1132 Enable branch trace store.
1133
1134 @param CpuIndex The index of the processor.
1135
1136 **/
1137 VOID
1138 ActivateBTS (
1139 IN UINTN CpuIndex
1140 )
1141 {
1142 UINT64 DebugCtl;
1143
1144 DebugCtl = AsmReadMsr64 (MSR_DEBUG_CTL);
1145 if ((DebugCtl & MSR_DEBUG_CTL_BTS) != 0) {
1146 return;
1147 }
1148
1149 AsmWriteMsr64 (MSR_DS_AREA, (UINT64)(UINTN)mMsrDsArea[CpuIndex]);
1150 DebugCtl |= (UINT64)(MSR_DEBUG_CTL_BTS | MSR_DEBUG_CTL_TR);
1151 DebugCtl &= ~((UINT64)MSR_DEBUG_CTL_BTINT);
1152 AsmWriteMsr64 (MSR_DEBUG_CTL, DebugCtl);
1153 }
1154
1155 /**
1156 Increase SMI number in each SMI entry.
1157
1158 **/
1159 VOID
1160 SmmProfileRecordSmiNum (
1161 VOID
1162 )
1163 {
1164 if (mSmmProfileStart) {
1165 mSmmProfileBase->NumSmis++;
1166 }
1167 }
1168
1169 /**
1170 Initialize processor environment for SMM profile.
1171
1172 @param CpuIndex The index of the processor.
1173
1174 **/
1175 VOID
1176 ActivateSmmProfile (
1177 IN UINTN CpuIndex
1178 )
1179 {
1180 //
1181 // Enable Single Step DB#
1182 //
1183 ActivateSingleStepDB ();
1184
1185 if (mBtsSupported) {
1186 //
1187 // We can not get useful information from LER, so we have to use BTS.
1188 //
1189 ActivateLBR ();
1190
1191 //
1192 // Enable BTS
1193 //
1194 ActivateBTS (CpuIndex);
1195 }
1196 }
1197
1198 /**
1199 Initialize SMM profile in SMM CPU entry point.
1200
1201 @param[in] Cr3 The base address of the page tables to use in SMM.
1202
1203 **/
1204 VOID
1205 InitSmmProfile (
1206 UINT32 Cr3
1207 )
1208 {
1209 //
1210 // Save Cr3
1211 //
1212 mSmmProfileCr3 = Cr3;
1213
1214 //
1215 // Skip SMM profile initialization if feature is disabled
1216 //
1217 if (!FeaturePcdGet (PcdCpuSmmProfileEnable) &&
1218 !HEAP_GUARD_NONSTOP_MODE &&
1219 !NULL_DETECTION_NONSTOP_MODE)
1220 {
1221 return;
1222 }
1223
1224 //
1225 // Initialize SmmProfile here
1226 //
1227 InitSmmProfileInternal ();
1228
1229 //
1230 // Initialize profile IDT.
1231 //
1232 InitIdtr ();
1233
1234 //
1235 // Tell #PF handler to prepare a #DB subsequently.
1236 //
1237 mSetupDebugTrap = TRUE;
1238 }
1239
1240 /**
1241 Update page table to map the memory correctly in order to make the instruction
1242 which caused page fault execute successfully. And it also save the original page
1243 table to be restored in single-step exception.
1244
1245 @param PageTable PageTable Address.
1246 @param PFAddress The memory address which caused page fault exception.
1247 @param CpuIndex The index of the processor.
1248 @param ErrorCode The Error code of exception.
1249
1250 **/
1251 VOID
1252 RestorePageTableBelow4G (
1253 UINT64 *PageTable,
1254 UINT64 PFAddress,
1255 UINTN CpuIndex,
1256 UINTN ErrorCode
1257 )
1258 {
1259 UINTN PTIndex;
1260 UINTN PFIndex;
1261 IA32_CR4 Cr4;
1262 BOOLEAN Enable5LevelPaging;
1263
1264 Cr4.UintN = AsmReadCr4 ();
1265 Enable5LevelPaging = (BOOLEAN)(Cr4.Bits.LA57 == 1);
1266
1267 //
1268 // PML5
1269 //
1270 if (Enable5LevelPaging) {
1271 PTIndex = (UINTN)BitFieldRead64 (PFAddress, 48, 56);
1272 ASSERT (PageTable[PTIndex] != 0);
1273 PageTable = (UINT64 *)(UINTN)(PageTable[PTIndex] & PHYSICAL_ADDRESS_MASK);
1274 }
1275
1276 //
1277 // PML4
1278 //
1279 if (sizeof (UINT64) == sizeof (UINTN)) {
1280 PTIndex = (UINTN)BitFieldRead64 (PFAddress, 39, 47);
1281 ASSERT (PageTable[PTIndex] != 0);
1282 PageTable = (UINT64 *)(UINTN)(PageTable[PTIndex] & PHYSICAL_ADDRESS_MASK);
1283 }
1284
1285 //
1286 // PDPTE
1287 //
1288 PTIndex = (UINTN)BitFieldRead64 (PFAddress, 30, 38);
1289 ASSERT (PageTable[PTIndex] != 0);
1290 PageTable = (UINT64 *)(UINTN)(PageTable[PTIndex] & PHYSICAL_ADDRESS_MASK);
1291
1292 //
1293 // PD
1294 //
1295 PTIndex = (UINTN)BitFieldRead64 (PFAddress, 21, 29);
1296 if ((PageTable[PTIndex] & IA32_PG_PS) != 0) {
1297 //
1298 // Large page
1299 //
1300
1301 //
1302 // Record old entries with non-present status
1303 // Old entries include the memory which instruction is at and the memory which instruction access.
1304 //
1305 //
1306 ASSERT (mPFEntryCount[CpuIndex] < MAX_PF_ENTRY_COUNT);
1307 if (mPFEntryCount[CpuIndex] < MAX_PF_ENTRY_COUNT) {
1308 PFIndex = mPFEntryCount[CpuIndex];
1309 mLastPFEntryValue[CpuIndex][PFIndex] = PageTable[PTIndex];
1310 mLastPFEntryPointer[CpuIndex][PFIndex] = &PageTable[PTIndex];
1311 mPFEntryCount[CpuIndex]++;
1312 }
1313
1314 //
1315 // Set new entry
1316 //
1317 PageTable[PTIndex] = (PFAddress & ~((1ull << 21) - 1));
1318 PageTable[PTIndex] |= (UINT64)IA32_PG_PS;
1319 PageTable[PTIndex] |= (UINT64)PAGE_ATTRIBUTE_BITS;
1320 if ((ErrorCode & IA32_PF_EC_ID) != 0) {
1321 PageTable[PTIndex] &= ~IA32_PG_NX;
1322 }
1323 } else {
1324 //
1325 // Small page
1326 //
1327 ASSERT (PageTable[PTIndex] != 0);
1328 PageTable = (UINT64 *)(UINTN)(PageTable[PTIndex] & PHYSICAL_ADDRESS_MASK);
1329
1330 //
1331 // 4K PTE
1332 //
1333 PTIndex = (UINTN)BitFieldRead64 (PFAddress, 12, 20);
1334
1335 //
1336 // Record old entries with non-present status
1337 // Old entries include the memory which instruction is at and the memory which instruction access.
1338 //
1339 //
1340 ASSERT (mPFEntryCount[CpuIndex] < MAX_PF_ENTRY_COUNT);
1341 if (mPFEntryCount[CpuIndex] < MAX_PF_ENTRY_COUNT) {
1342 PFIndex = mPFEntryCount[CpuIndex];
1343 mLastPFEntryValue[CpuIndex][PFIndex] = PageTable[PTIndex];
1344 mLastPFEntryPointer[CpuIndex][PFIndex] = &PageTable[PTIndex];
1345 mPFEntryCount[CpuIndex]++;
1346 }
1347
1348 //
1349 // Set new entry
1350 //
1351 PageTable[PTIndex] = (PFAddress & ~((1ull << 12) - 1));
1352 PageTable[PTIndex] |= (UINT64)PAGE_ATTRIBUTE_BITS;
1353 if ((ErrorCode & IA32_PF_EC_ID) != 0) {
1354 PageTable[PTIndex] &= ~IA32_PG_NX;
1355 }
1356 }
1357 }
1358
1359 /**
1360 Handler for Page Fault triggered by Guard page.
1361
1362 @param ErrorCode The Error code of exception.
1363
1364 **/
1365 VOID
1366 GuardPagePFHandler (
1367 UINTN ErrorCode
1368 )
1369 {
1370 UINT64 *PageTable;
1371 UINT64 PFAddress;
1372 UINT64 RestoreAddress;
1373 UINTN RestorePageNumber;
1374 UINTN CpuIndex;
1375
1376 PageTable = (UINT64 *)AsmReadCr3 ();
1377 PFAddress = AsmReadCr2 ();
1378 CpuIndex = GetCpuIndex ();
1379
1380 //
1381 // Memory operation cross pages, like "rep mov" instruction, will cause
1382 // infinite loop between this and Debug Trap handler. We have to make sure
1383 // that current page and the page followed are both in PRESENT state.
1384 //
1385 RestorePageNumber = 2;
1386 RestoreAddress = PFAddress;
1387 while (RestorePageNumber > 0) {
1388 RestorePageTableBelow4G (PageTable, RestoreAddress, CpuIndex, ErrorCode);
1389 RestoreAddress += EFI_PAGE_SIZE;
1390 RestorePageNumber--;
1391 }
1392
1393 //
1394 // Flush TLB
1395 //
1396 CpuFlushTlb ();
1397 }
1398
1399 /**
1400 The Page fault handler to save SMM profile data.
1401
1402 @param Rip The RIP when exception happens.
1403 @param ErrorCode The Error code of exception.
1404
1405 **/
1406 VOID
1407 SmmProfilePFHandler (
1408 UINTN Rip,
1409 UINTN ErrorCode
1410 )
1411 {
1412 UINT64 *PageTable;
1413 UINT64 PFAddress;
1414 UINT64 RestoreAddress;
1415 UINTN RestorePageNumber;
1416 UINTN CpuIndex;
1417 UINTN Index;
1418 UINT64 InstructionAddress;
1419 UINTN MaxEntryNumber;
1420 UINTN CurrentEntryNumber;
1421 BOOLEAN IsValidPFAddress;
1422 SMM_PROFILE_ENTRY *SmmProfileEntry;
1423 UINT64 SmiCommand;
1424 EFI_STATUS Status;
1425 UINT8 SoftSmiValue;
1426 EFI_SMM_SAVE_STATE_IO_INFO IoInfo;
1427
1428 if (!mSmmProfileStart) {
1429 //
1430 // If SMM profile does not start, call original page fault handler.
1431 //
1432 SmiDefaultPFHandler ();
1433 return;
1434 }
1435
1436 if (mBtsSupported) {
1437 DisableBTS ();
1438 }
1439
1440 IsValidPFAddress = FALSE;
1441 PageTable = (UINT64 *)AsmReadCr3 ();
1442 PFAddress = AsmReadCr2 ();
1443 CpuIndex = GetCpuIndex ();
1444
1445 //
1446 // Memory operation cross pages, like "rep mov" instruction, will cause
1447 // infinite loop between this and Debug Trap handler. We have to make sure
1448 // that current page and the page followed are both in PRESENT state.
1449 //
1450 RestorePageNumber = 2;
1451 RestoreAddress = PFAddress;
1452 while (RestorePageNumber > 0) {
1453 if (RestoreAddress <= 0xFFFFFFFF) {
1454 RestorePageTableBelow4G (PageTable, RestoreAddress, CpuIndex, ErrorCode);
1455 } else {
1456 RestorePageTableAbove4G (PageTable, RestoreAddress, CpuIndex, ErrorCode, &IsValidPFAddress);
1457 }
1458
1459 RestoreAddress += EFI_PAGE_SIZE;
1460 RestorePageNumber--;
1461 }
1462
1463 if (!IsValidPFAddress) {
1464 InstructionAddress = Rip;
1465 if (((ErrorCode & IA32_PF_EC_ID) != 0) && (mBtsSupported)) {
1466 //
1467 // If it is instruction fetch failure, get the correct IP from BTS.
1468 //
1469 InstructionAddress = GetSourceFromDestinationOnBts (CpuIndex, Rip);
1470 if (InstructionAddress == 0) {
1471 //
1472 // It indicates the instruction which caused page fault is not a jump instruction,
1473 // set instruction address same as the page fault address.
1474 //
1475 InstructionAddress = PFAddress;
1476 }
1477 }
1478
1479 //
1480 // Indicate it is not software SMI
1481 //
1482 SmiCommand = 0xFFFFFFFFFFFFFFFFULL;
1483 for (Index = 0; Index < gSmst->NumberOfCpus; Index++) {
1484 Status = SmmReadSaveState (&mSmmCpu, sizeof (IoInfo), EFI_SMM_SAVE_STATE_REGISTER_IO, Index, &IoInfo);
1485 if (EFI_ERROR (Status)) {
1486 continue;
1487 }
1488
1489 if (IoInfo.IoPort == mSmiCommandPort) {
1490 //
1491 // A software SMI triggered by SMI command port has been found, get SmiCommand from SMI command port.
1492 //
1493 SoftSmiValue = IoRead8 (mSmiCommandPort);
1494 SmiCommand = (UINT64)SoftSmiValue;
1495 break;
1496 }
1497 }
1498
1499 SmmProfileEntry = (SMM_PROFILE_ENTRY *)(UINTN)(mSmmProfileBase + 1);
1500 //
1501 // Check if there is already a same entry in profile data.
1502 //
1503 for (Index = 0; Index < (UINTN)mSmmProfileBase->CurDataEntries; Index++) {
1504 if ((SmmProfileEntry[Index].ErrorCode == (UINT64)ErrorCode) &&
1505 (SmmProfileEntry[Index].Address == PFAddress) &&
1506 (SmmProfileEntry[Index].CpuNum == (UINT64)CpuIndex) &&
1507 (SmmProfileEntry[Index].Instruction == InstructionAddress) &&
1508 (SmmProfileEntry[Index].SmiCmd == SmiCommand))
1509 {
1510 //
1511 // Same record exist, need not save again.
1512 //
1513 break;
1514 }
1515 }
1516
1517 if (Index == mSmmProfileBase->CurDataEntries) {
1518 CurrentEntryNumber = (UINTN)mSmmProfileBase->CurDataEntries;
1519 MaxEntryNumber = (UINTN)mSmmProfileBase->MaxDataEntries;
1520 if (FeaturePcdGet (PcdCpuSmmProfileRingBuffer)) {
1521 CurrentEntryNumber = CurrentEntryNumber % MaxEntryNumber;
1522 }
1523
1524 if (CurrentEntryNumber < MaxEntryNumber) {
1525 //
1526 // Log the new entry
1527 //
1528 SmmProfileEntry[CurrentEntryNumber].SmiNum = mSmmProfileBase->NumSmis;
1529 SmmProfileEntry[CurrentEntryNumber].ErrorCode = (UINT64)ErrorCode;
1530 SmmProfileEntry[CurrentEntryNumber].ApicId = (UINT64)GetApicId ();
1531 SmmProfileEntry[CurrentEntryNumber].CpuNum = (UINT64)CpuIndex;
1532 SmmProfileEntry[CurrentEntryNumber].Address = PFAddress;
1533 SmmProfileEntry[CurrentEntryNumber].Instruction = InstructionAddress;
1534 SmmProfileEntry[CurrentEntryNumber].SmiCmd = SmiCommand;
1535 //
1536 // Update current entry index and data size in the header.
1537 //
1538 mSmmProfileBase->CurDataEntries++;
1539 mSmmProfileBase->CurDataSize = MultU64x64 (mSmmProfileBase->CurDataEntries, sizeof (SMM_PROFILE_ENTRY));
1540 }
1541 }
1542 }
1543
1544 //
1545 // Flush TLB
1546 //
1547 CpuFlushTlb ();
1548
1549 if (mBtsSupported) {
1550 EnableBTS ();
1551 }
1552 }
1553
1554 /**
1555 Replace INT1 exception handler to restore page table to absent/execute-disable state
1556 in order to trigger page fault again to save SMM profile data..
1557
1558 **/
1559 VOID
1560 InitIdtr (
1561 VOID
1562 )
1563 {
1564 EFI_STATUS Status;
1565
1566 Status = SmmRegisterExceptionHandler (&mSmmCpuService, EXCEPT_IA32_DEBUG, DebugExceptionHandler);
1567 ASSERT_EFI_ERROR (Status);
1568 }