]> git.proxmox.com Git - mirror_edk2.git/blob - UefiCpuPkg/PiSmmCpuDxeSmm/SmmProfile.c
UefiCpuPkg: Move AsmRelocateApLoopStart from Mpfuncs.nasm to AmdSev.nasm
[mirror_edk2.git] / UefiCpuPkg / PiSmmCpuDxeSmm / SmmProfile.c
1 /** @file
2 Enable SMM profile.
3
4 Copyright (c) 2012 - 2023, Intel Corporation. All rights reserved.<BR>
5 Copyright (c) 2017 - 2020, AMD Incorporated. All rights reserved.<BR>
6
7 SPDX-License-Identifier: BSD-2-Clause-Patent
8
9 **/
10
11 #include "PiSmmCpuDxeSmm.h"
12 #include "SmmProfileInternal.h"
13
14 UINT32 mSmmProfileCr3;
15
16 SMM_PROFILE_HEADER *mSmmProfileBase;
17 MSR_DS_AREA_STRUCT *mMsrDsAreaBase;
18 //
19 // The buffer to store SMM profile data.
20 //
21 UINTN mSmmProfileSize;
22
23 //
24 // The buffer to enable branch trace store.
25 //
26 UINTN mMsrDsAreaSize = SMM_PROFILE_DTS_SIZE;
27
28 //
29 // The flag indicates if execute-disable is supported by processor.
30 //
31 BOOLEAN mXdSupported = TRUE;
32
33 //
34 // The flag indicates if execute-disable is enabled on processor.
35 //
36 BOOLEAN mXdEnabled = FALSE;
37
38 //
39 // The flag indicates if BTS is supported by processor.
40 //
41 BOOLEAN mBtsSupported = TRUE;
42
43 //
44 // The flag indicates if SMM profile starts to record data.
45 //
46 BOOLEAN mSmmProfileStart = FALSE;
47
48 //
49 // The flag indicates if #DB will be setup in #PF handler.
50 //
51 BOOLEAN mSetupDebugTrap = FALSE;
52
53 //
54 // Record the page fault exception count for one instruction execution.
55 //
56 UINTN *mPFEntryCount;
57
58 UINT64 (*mLastPFEntryValue)[MAX_PF_ENTRY_COUNT];
59 UINT64 *(*mLastPFEntryPointer)[MAX_PF_ENTRY_COUNT];
60
61 MSR_DS_AREA_STRUCT **mMsrDsArea;
62 BRANCH_TRACE_RECORD **mMsrBTSRecord;
63 UINTN mBTSRecordNumber;
64 PEBS_RECORD **mMsrPEBSRecord;
65
66 //
67 // These memory ranges are always present, they does not generate the access type of page fault exception,
68 // but they possibly generate instruction fetch type of page fault exception.
69 //
70 MEMORY_PROTECTION_RANGE *mProtectionMemRange = NULL;
71 UINTN mProtectionMemRangeCount = 0;
72
73 //
74 // Some predefined memory ranges.
75 //
76 MEMORY_PROTECTION_RANGE mProtectionMemRangeTemplate[] = {
77 //
78 // SMRAM range (to be fixed in runtime).
79 // It is always present and instruction fetches are allowed.
80 //
81 {
82 { 0x00000000, 0x00000000 }, TRUE, FALSE
83 },
84
85 //
86 // SMM profile data range( to be fixed in runtime).
87 // It is always present and instruction fetches are not allowed.
88 //
89 {
90 { 0x00000000, 0x00000000 }, TRUE, TRUE
91 },
92
93 //
94 // SMRAM ranges not covered by mCpuHotPlugData.SmrrBase/mCpuHotPlugData.SmrrSiz (to be fixed in runtime).
95 // It is always present and instruction fetches are allowed.
96 // {{0x00000000, 0x00000000},TRUE,FALSE},
97 //
98
99 //
100 // Future extended range could be added here.
101 //
102
103 //
104 // PCI MMIO ranges (to be added in runtime).
105 // They are always present and instruction fetches are not allowed.
106 //
107 };
108
109 //
110 // These memory ranges are mapped by 4KB-page instead of 2MB-page.
111 //
112 MEMORY_RANGE *mSplitMemRange = NULL;
113 UINTN mSplitMemRangeCount = 0;
114
115 //
116 // SMI command port.
117 //
118 UINT32 mSmiCommandPort;
119
120 /**
121 Disable branch trace store.
122
123 **/
124 VOID
125 DisableBTS (
126 VOID
127 )
128 {
129 AsmMsrAnd64 (MSR_DEBUG_CTL, ~((UINT64)(MSR_DEBUG_CTL_BTS | MSR_DEBUG_CTL_TR)));
130 }
131
132 /**
133 Enable branch trace store.
134
135 **/
136 VOID
137 EnableBTS (
138 VOID
139 )
140 {
141 AsmMsrOr64 (MSR_DEBUG_CTL, (MSR_DEBUG_CTL_BTS | MSR_DEBUG_CTL_TR));
142 }
143
144 /**
145 Get CPU Index from APIC ID.
146
147 **/
148 UINTN
149 GetCpuIndex (
150 VOID
151 )
152 {
153 UINTN Index;
154 UINT32 ApicId;
155
156 ApicId = GetApicId ();
157
158 for (Index = 0; Index < mMaxNumberOfCpus; Index++) {
159 if (gSmmCpuPrivate->ProcessorInfo[Index].ProcessorId == ApicId) {
160 return Index;
161 }
162 }
163
164 ASSERT (FALSE);
165 return 0;
166 }
167
168 /**
169 Get the source of IP after execute-disable exception is triggered.
170
171 @param CpuIndex The index of CPU.
172 @param DestinationIP The destination address.
173
174 **/
175 UINT64
176 GetSourceFromDestinationOnBts (
177 UINTN CpuIndex,
178 UINT64 DestinationIP
179 )
180 {
181 BRANCH_TRACE_RECORD *CurrentBTSRecord;
182 UINTN Index;
183 BOOLEAN FirstMatch;
184
185 FirstMatch = FALSE;
186
187 CurrentBTSRecord = (BRANCH_TRACE_RECORD *)mMsrDsArea[CpuIndex]->BTSIndex;
188 for (Index = 0; Index < mBTSRecordNumber; Index++) {
189 if ((UINTN)CurrentBTSRecord < (UINTN)mMsrBTSRecord[CpuIndex]) {
190 //
191 // Underflow
192 //
193 CurrentBTSRecord = (BRANCH_TRACE_RECORD *)((UINTN)mMsrDsArea[CpuIndex]->BTSAbsoluteMaximum - 1);
194 CurrentBTSRecord--;
195 }
196
197 if (CurrentBTSRecord->LastBranchTo == DestinationIP) {
198 //
199 // Good! find 1st one, then find 2nd one.
200 //
201 if (!FirstMatch) {
202 //
203 // The first one is DEBUG exception
204 //
205 FirstMatch = TRUE;
206 } else {
207 //
208 // Good find proper one.
209 //
210 return CurrentBTSRecord->LastBranchFrom;
211 }
212 }
213
214 CurrentBTSRecord--;
215 }
216
217 return 0;
218 }
219
220 /**
221 SMM profile specific INT 1 (single-step) exception handler.
222
223 @param InterruptType Defines the type of interrupt or exception that
224 occurred on the processor.This parameter is processor architecture specific.
225 @param SystemContext A pointer to the processor context when
226 the interrupt occurred on the processor.
227 **/
228 VOID
229 EFIAPI
230 DebugExceptionHandler (
231 IN EFI_EXCEPTION_TYPE InterruptType,
232 IN EFI_SYSTEM_CONTEXT SystemContext
233 )
234 {
235 UINTN CpuIndex;
236 UINTN PFEntry;
237
238 if (!mSmmProfileStart &&
239 !HEAP_GUARD_NONSTOP_MODE &&
240 !NULL_DETECTION_NONSTOP_MODE)
241 {
242 return;
243 }
244
245 CpuIndex = GetCpuIndex ();
246
247 //
248 // Clear last PF entries
249 //
250 for (PFEntry = 0; PFEntry < mPFEntryCount[CpuIndex]; PFEntry++) {
251 *mLastPFEntryPointer[CpuIndex][PFEntry] = mLastPFEntryValue[CpuIndex][PFEntry];
252 }
253
254 //
255 // Reset page fault exception count for next page fault.
256 //
257 mPFEntryCount[CpuIndex] = 0;
258
259 //
260 // Flush TLB
261 //
262 CpuFlushTlb ();
263
264 //
265 // Clear TF in EFLAGS
266 //
267 ClearTrapFlag (SystemContext);
268 }
269
270 /**
271 Check if the input address is in SMM ranges.
272
273 @param[in] Address The input address.
274
275 @retval TRUE The input address is in SMM.
276 @retval FALSE The input address is not in SMM.
277 **/
278 BOOLEAN
279 IsInSmmRanges (
280 IN EFI_PHYSICAL_ADDRESS Address
281 )
282 {
283 UINTN Index;
284
285 if ((Address >= mCpuHotPlugData.SmrrBase) && (Address < mCpuHotPlugData.SmrrBase + mCpuHotPlugData.SmrrSize)) {
286 return TRUE;
287 }
288
289 for (Index = 0; Index < mSmmCpuSmramRangeCount; Index++) {
290 if ((Address >= mSmmCpuSmramRanges[Index].CpuStart) &&
291 (Address < mSmmCpuSmramRanges[Index].CpuStart + mSmmCpuSmramRanges[Index].PhysicalSize))
292 {
293 return TRUE;
294 }
295 }
296
297 return FALSE;
298 }
299
300 /**
301 Check if the memory address will be mapped by 4KB-page.
302
303 @param Address The address of Memory.
304 @param Nx The flag indicates if the memory is execute-disable.
305
306 **/
307 BOOLEAN
308 IsAddressValid (
309 IN EFI_PHYSICAL_ADDRESS Address,
310 IN BOOLEAN *Nx
311 )
312 {
313 UINTN Index;
314
315 if (FeaturePcdGet (PcdCpuSmmProfileEnable)) {
316 //
317 // Check configuration
318 //
319 for (Index = 0; Index < mProtectionMemRangeCount; Index++) {
320 if ((Address >= mProtectionMemRange[Index].Range.Base) && (Address < mProtectionMemRange[Index].Range.Top)) {
321 *Nx = mProtectionMemRange[Index].Nx;
322 return mProtectionMemRange[Index].Present;
323 }
324 }
325
326 *Nx = TRUE;
327 return FALSE;
328 } else {
329 *Nx = TRUE;
330 if (IsInSmmRanges (Address)) {
331 *Nx = FALSE;
332 }
333
334 return TRUE;
335 }
336 }
337
338 /**
339 Check if the memory address will be mapped by 4KB-page.
340
341 @param Address The address of Memory.
342
343 **/
344 BOOLEAN
345 IsAddressSplit (
346 IN EFI_PHYSICAL_ADDRESS Address
347 )
348 {
349 UINTN Index;
350
351 if (FeaturePcdGet (PcdCpuSmmProfileEnable)) {
352 //
353 // Check configuration
354 //
355 for (Index = 0; Index < mSplitMemRangeCount; Index++) {
356 if ((Address >= mSplitMemRange[Index].Base) && (Address < mSplitMemRange[Index].Top)) {
357 return TRUE;
358 }
359 }
360 } else {
361 if (Address < mCpuHotPlugData.SmrrBase) {
362 if ((mCpuHotPlugData.SmrrBase - Address) < BASE_2MB) {
363 return TRUE;
364 }
365 } else if (Address > (mCpuHotPlugData.SmrrBase + mCpuHotPlugData.SmrrSize - BASE_2MB)) {
366 if ((Address - (mCpuHotPlugData.SmrrBase + mCpuHotPlugData.SmrrSize - BASE_2MB)) < BASE_2MB) {
367 return TRUE;
368 }
369 }
370 }
371
372 //
373 // Return default
374 //
375 return FALSE;
376 }
377
378 /**
379 Initialize the protected memory ranges and the 4KB-page mapped memory ranges.
380
381 **/
382 VOID
383 InitProtectedMemRange (
384 VOID
385 )
386 {
387 UINTN Index;
388 UINTN NumberOfDescriptors;
389 UINTN NumberOfAddedDescriptors;
390 UINTN NumberOfProtectRange;
391 UINTN NumberOfSpliteRange;
392 EFI_GCD_MEMORY_SPACE_DESCRIPTOR *MemorySpaceMap;
393 UINTN TotalSize;
394 EFI_PHYSICAL_ADDRESS ProtectBaseAddress;
395 EFI_PHYSICAL_ADDRESS ProtectEndAddress;
396 EFI_PHYSICAL_ADDRESS Top2MBAlignedAddress;
397 EFI_PHYSICAL_ADDRESS Base2MBAlignedAddress;
398 UINT64 High4KBPageSize;
399 UINT64 Low4KBPageSize;
400
401 NumberOfDescriptors = 0;
402 NumberOfAddedDescriptors = mSmmCpuSmramRangeCount;
403 NumberOfSpliteRange = 0;
404 MemorySpaceMap = NULL;
405
406 //
407 // Get MMIO ranges from GCD and add them into protected memory ranges.
408 //
409 gDS->GetMemorySpaceMap (
410 &NumberOfDescriptors,
411 &MemorySpaceMap
412 );
413 for (Index = 0; Index < NumberOfDescriptors; Index++) {
414 if (MemorySpaceMap[Index].GcdMemoryType == EfiGcdMemoryTypeMemoryMappedIo) {
415 NumberOfAddedDescriptors++;
416 }
417 }
418
419 if (NumberOfAddedDescriptors != 0) {
420 TotalSize = NumberOfAddedDescriptors * sizeof (MEMORY_PROTECTION_RANGE) + sizeof (mProtectionMemRangeTemplate);
421 mProtectionMemRange = (MEMORY_PROTECTION_RANGE *)AllocateZeroPool (TotalSize);
422 ASSERT (mProtectionMemRange != NULL);
423 mProtectionMemRangeCount = TotalSize / sizeof (MEMORY_PROTECTION_RANGE);
424
425 //
426 // Copy existing ranges.
427 //
428 CopyMem (mProtectionMemRange, mProtectionMemRangeTemplate, sizeof (mProtectionMemRangeTemplate));
429
430 //
431 // Create split ranges which come from protected ranges.
432 //
433 TotalSize = (TotalSize / sizeof (MEMORY_PROTECTION_RANGE)) * sizeof (MEMORY_RANGE);
434 mSplitMemRange = (MEMORY_RANGE *)AllocateZeroPool (TotalSize);
435 ASSERT (mSplitMemRange != NULL);
436
437 //
438 // Create SMM ranges which are set to present and execution-enable.
439 //
440 NumberOfProtectRange = sizeof (mProtectionMemRangeTemplate) / sizeof (MEMORY_PROTECTION_RANGE);
441 for (Index = 0; Index < mSmmCpuSmramRangeCount; Index++) {
442 if ((mSmmCpuSmramRanges[Index].CpuStart >= mProtectionMemRange[0].Range.Base) &&
443 (mSmmCpuSmramRanges[Index].CpuStart + mSmmCpuSmramRanges[Index].PhysicalSize < mProtectionMemRange[0].Range.Top))
444 {
445 //
446 // If the address have been already covered by mCpuHotPlugData.SmrrBase/mCpuHotPlugData.SmrrSiz
447 //
448 break;
449 }
450
451 mProtectionMemRange[NumberOfProtectRange].Range.Base = mSmmCpuSmramRanges[Index].CpuStart;
452 mProtectionMemRange[NumberOfProtectRange].Range.Top = mSmmCpuSmramRanges[Index].CpuStart + mSmmCpuSmramRanges[Index].PhysicalSize;
453 mProtectionMemRange[NumberOfProtectRange].Present = TRUE;
454 mProtectionMemRange[NumberOfProtectRange].Nx = FALSE;
455 NumberOfProtectRange++;
456 }
457
458 //
459 // Create MMIO ranges which are set to present and execution-disable.
460 //
461 for (Index = 0; Index < NumberOfDescriptors; Index++) {
462 if (MemorySpaceMap[Index].GcdMemoryType != EfiGcdMemoryTypeMemoryMappedIo) {
463 continue;
464 }
465
466 mProtectionMemRange[NumberOfProtectRange].Range.Base = MemorySpaceMap[Index].BaseAddress;
467 mProtectionMemRange[NumberOfProtectRange].Range.Top = MemorySpaceMap[Index].BaseAddress + MemorySpaceMap[Index].Length;
468 mProtectionMemRange[NumberOfProtectRange].Present = TRUE;
469 mProtectionMemRange[NumberOfProtectRange].Nx = TRUE;
470 NumberOfProtectRange++;
471 }
472
473 //
474 // Check and updated actual protected memory ranges count
475 //
476 ASSERT (NumberOfProtectRange <= mProtectionMemRangeCount);
477 mProtectionMemRangeCount = NumberOfProtectRange;
478 }
479
480 //
481 // According to protected ranges, create the ranges which will be mapped by 2KB page.
482 //
483 NumberOfSpliteRange = 0;
484 NumberOfProtectRange = mProtectionMemRangeCount;
485 for (Index = 0; Index < NumberOfProtectRange; Index++) {
486 //
487 // If MMIO base address is not 2MB alignment, make 2MB alignment for create 4KB page in page table.
488 //
489 ProtectBaseAddress = mProtectionMemRange[Index].Range.Base;
490 ProtectEndAddress = mProtectionMemRange[Index].Range.Top;
491 if (((ProtectBaseAddress & (SIZE_2MB - 1)) != 0) || ((ProtectEndAddress & (SIZE_2MB - 1)) != 0)) {
492 //
493 // Check if it is possible to create 4KB-page for not 2MB-aligned range and to create 2MB-page for 2MB-aligned range.
494 // A mix of 4KB and 2MB page could save SMRAM space.
495 //
496 Top2MBAlignedAddress = ProtectEndAddress & ~(SIZE_2MB - 1);
497 Base2MBAlignedAddress = (ProtectBaseAddress + SIZE_2MB - 1) & ~(SIZE_2MB - 1);
498 if ((Top2MBAlignedAddress > Base2MBAlignedAddress) &&
499 ((Top2MBAlignedAddress - Base2MBAlignedAddress) >= SIZE_2MB))
500 {
501 //
502 // There is an range which could be mapped by 2MB-page.
503 //
504 High4KBPageSize = ((ProtectEndAddress + SIZE_2MB - 1) & ~(SIZE_2MB - 1)) - (ProtectEndAddress & ~(SIZE_2MB - 1));
505 Low4KBPageSize = ((ProtectBaseAddress + SIZE_2MB - 1) & ~(SIZE_2MB - 1)) - (ProtectBaseAddress & ~(SIZE_2MB - 1));
506 if (High4KBPageSize != 0) {
507 //
508 // Add not 2MB-aligned range to be mapped by 4KB-page.
509 //
510 mSplitMemRange[NumberOfSpliteRange].Base = ProtectEndAddress & ~(SIZE_2MB - 1);
511 mSplitMemRange[NumberOfSpliteRange].Top = (ProtectEndAddress + SIZE_2MB - 1) & ~(SIZE_2MB - 1);
512 NumberOfSpliteRange++;
513 }
514
515 if (Low4KBPageSize != 0) {
516 //
517 // Add not 2MB-aligned range to be mapped by 4KB-page.
518 //
519 mSplitMemRange[NumberOfSpliteRange].Base = ProtectBaseAddress & ~(SIZE_2MB - 1);
520 mSplitMemRange[NumberOfSpliteRange].Top = (ProtectBaseAddress + SIZE_2MB - 1) & ~(SIZE_2MB - 1);
521 NumberOfSpliteRange++;
522 }
523 } else {
524 //
525 // The range could only be mapped by 4KB-page.
526 //
527 mSplitMemRange[NumberOfSpliteRange].Base = ProtectBaseAddress & ~(SIZE_2MB - 1);
528 mSplitMemRange[NumberOfSpliteRange].Top = (ProtectEndAddress + SIZE_2MB - 1) & ~(SIZE_2MB - 1);
529 NumberOfSpliteRange++;
530 }
531 }
532 }
533
534 mSplitMemRangeCount = NumberOfSpliteRange;
535
536 DEBUG ((DEBUG_INFO, "SMM Profile Memory Ranges:\n"));
537 for (Index = 0; Index < mProtectionMemRangeCount; Index++) {
538 DEBUG ((DEBUG_INFO, "mProtectionMemRange[%d].Base = %lx\n", Index, mProtectionMemRange[Index].Range.Base));
539 DEBUG ((DEBUG_INFO, "mProtectionMemRange[%d].Top = %lx\n", Index, mProtectionMemRange[Index].Range.Top));
540 }
541
542 for (Index = 0; Index < mSplitMemRangeCount; Index++) {
543 DEBUG ((DEBUG_INFO, "mSplitMemRange[%d].Base = %lx\n", Index, mSplitMemRange[Index].Base));
544 DEBUG ((DEBUG_INFO, "mSplitMemRange[%d].Top = %lx\n", Index, mSplitMemRange[Index].Top));
545 }
546 }
547
548 /**
549 Update page table according to protected memory ranges and the 4KB-page mapped memory ranges.
550
551 **/
552 VOID
553 InitPaging (
554 VOID
555 )
556 {
557 UINT64 Pml5Entry;
558 UINT64 Pml4Entry;
559 UINT64 *Pml5;
560 UINT64 *Pml4;
561 UINT64 *Pdpt;
562 UINT64 *Pd;
563 UINT64 *Pt;
564 UINTN Address;
565 UINTN Pml5Index;
566 UINTN Pml4Index;
567 UINTN PdptIndex;
568 UINTN PdIndex;
569 UINTN PtIndex;
570 UINTN NumberOfPdptEntries;
571 UINTN NumberOfPml4Entries;
572 UINTN NumberOfPml5Entries;
573 UINTN SizeOfMemorySpace;
574 BOOLEAN Nx;
575 IA32_CR4 Cr4;
576 BOOLEAN Enable5LevelPaging;
577
578 Cr4.UintN = AsmReadCr4 ();
579 Enable5LevelPaging = (BOOLEAN)(Cr4.Bits.LA57 == 1);
580
581 if (sizeof (UINTN) == sizeof (UINT64)) {
582 if (!Enable5LevelPaging) {
583 Pml5Entry = (UINTN)mSmmProfileCr3 | IA32_PG_P;
584 Pml5 = &Pml5Entry;
585 } else {
586 Pml5 = (UINT64 *)(UINTN)mSmmProfileCr3;
587 }
588
589 SizeOfMemorySpace = HighBitSet64 (gPhyMask) + 1;
590 ASSERT (SizeOfMemorySpace <= 52);
591
592 //
593 // Calculate the table entries of PML5E, PML4E and PDPTE.
594 //
595 NumberOfPml5Entries = 1;
596 if (SizeOfMemorySpace > 48) {
597 if (Enable5LevelPaging) {
598 NumberOfPml5Entries = (UINTN)LShiftU64 (1, SizeOfMemorySpace - 48);
599 }
600
601 SizeOfMemorySpace = 48;
602 }
603
604 NumberOfPml4Entries = 1;
605 if (SizeOfMemorySpace > 39) {
606 NumberOfPml4Entries = (UINTN)LShiftU64 (1, SizeOfMemorySpace - 39);
607 SizeOfMemorySpace = 39;
608 }
609
610 NumberOfPdptEntries = 1;
611 ASSERT (SizeOfMemorySpace > 30);
612 NumberOfPdptEntries = (UINTN)LShiftU64 (1, SizeOfMemorySpace - 30);
613 } else {
614 Pml4Entry = (UINTN)mSmmProfileCr3 | IA32_PG_P;
615 Pml4 = &Pml4Entry;
616 Pml5Entry = (UINTN)Pml4 | IA32_PG_P;
617 Pml5 = &Pml5Entry;
618 NumberOfPml5Entries = 1;
619 NumberOfPml4Entries = 1;
620 NumberOfPdptEntries = 4;
621 }
622
623 //
624 // Go through page table and change 2MB-page into 4KB-page.
625 //
626 for (Pml5Index = 0; Pml5Index < NumberOfPml5Entries; Pml5Index++) {
627 if ((Pml5[Pml5Index] & IA32_PG_P) == 0) {
628 //
629 // If PML5 entry does not exist, skip it
630 //
631 continue;
632 }
633
634 Pml4 = (UINT64 *)(UINTN)(Pml5[Pml5Index] & PHYSICAL_ADDRESS_MASK);
635 for (Pml4Index = 0; Pml4Index < NumberOfPml4Entries; Pml4Index++) {
636 if ((Pml4[Pml4Index] & IA32_PG_P) == 0) {
637 //
638 // If PML4 entry does not exist, skip it
639 //
640 continue;
641 }
642
643 Pdpt = (UINT64 *)(UINTN)(Pml4[Pml4Index] & ~mAddressEncMask & PHYSICAL_ADDRESS_MASK);
644 for (PdptIndex = 0; PdptIndex < NumberOfPdptEntries; PdptIndex++, Pdpt++) {
645 if ((*Pdpt & IA32_PG_P) == 0) {
646 //
647 // If PDPT entry does not exist, skip it
648 //
649 continue;
650 }
651
652 if ((*Pdpt & IA32_PG_PS) != 0) {
653 //
654 // This is 1G entry, skip it
655 //
656 continue;
657 }
658
659 Pd = (UINT64 *)(UINTN)(*Pdpt & ~mAddressEncMask & PHYSICAL_ADDRESS_MASK);
660 if (Pd == 0) {
661 continue;
662 }
663
664 for (PdIndex = 0; PdIndex < SIZE_4KB / sizeof (*Pd); PdIndex++, Pd++) {
665 if ((*Pd & IA32_PG_P) == 0) {
666 //
667 // If PD entry does not exist, skip it
668 //
669 continue;
670 }
671
672 Address = (UINTN)LShiftU64 (
673 LShiftU64 (
674 LShiftU64 ((Pml5Index << 9) + Pml4Index, 9) + PdptIndex,
675 9
676 ) + PdIndex,
677 21
678 );
679
680 //
681 // If it is 2M page, check IsAddressSplit()
682 //
683 if (((*Pd & IA32_PG_PS) != 0) && IsAddressSplit (Address)) {
684 //
685 // Based on current page table, create 4KB page table for split area.
686 //
687 ASSERT (Address == (*Pd & PHYSICAL_ADDRESS_MASK));
688
689 Pt = AllocatePageTableMemory (1);
690 ASSERT (Pt != NULL);
691
692 // Split it
693 for (PtIndex = 0; PtIndex < SIZE_4KB / sizeof (*Pt); PtIndex++) {
694 Pt[PtIndex] = Address + ((PtIndex << 12) | mAddressEncMask | PAGE_ATTRIBUTE_BITS);
695 } // end for PT
696
697 *Pd = (UINT64)(UINTN)Pt | mAddressEncMask | PAGE_ATTRIBUTE_BITS;
698 } // end if IsAddressSplit
699 } // end for PD
700 } // end for PDPT
701 } // end for PML4
702 } // end for PML5
703
704 //
705 // Go through page table and set several page table entries to absent or execute-disable.
706 //
707 DEBUG ((DEBUG_INFO, "Patch page table start ...\n"));
708 for (Pml5Index = 0; Pml5Index < NumberOfPml5Entries; Pml5Index++) {
709 if ((Pml5[Pml5Index] & IA32_PG_P) == 0) {
710 //
711 // If PML5 entry does not exist, skip it
712 //
713 continue;
714 }
715
716 Pml4 = (UINT64 *)(UINTN)(Pml5[Pml5Index] & PHYSICAL_ADDRESS_MASK);
717 for (Pml4Index = 0; Pml4Index < NumberOfPml4Entries; Pml4Index++) {
718 if ((Pml4[Pml4Index] & IA32_PG_P) == 0) {
719 //
720 // If PML4 entry does not exist, skip it
721 //
722 continue;
723 }
724
725 Pdpt = (UINT64 *)(UINTN)(Pml4[Pml4Index] & ~mAddressEncMask & PHYSICAL_ADDRESS_MASK);
726 for (PdptIndex = 0; PdptIndex < NumberOfPdptEntries; PdptIndex++, Pdpt++) {
727 if ((*Pdpt & IA32_PG_P) == 0) {
728 //
729 // If PDPT entry does not exist, skip it
730 //
731 continue;
732 }
733
734 if ((*Pdpt & IA32_PG_PS) != 0) {
735 //
736 // This is 1G entry, set NX bit and skip it
737 //
738 if (mXdSupported) {
739 *Pdpt = *Pdpt | IA32_PG_NX;
740 }
741
742 continue;
743 }
744
745 Pd = (UINT64 *)(UINTN)(*Pdpt & ~mAddressEncMask & PHYSICAL_ADDRESS_MASK);
746 if (Pd == 0) {
747 continue;
748 }
749
750 for (PdIndex = 0; PdIndex < SIZE_4KB / sizeof (*Pd); PdIndex++, Pd++) {
751 if ((*Pd & IA32_PG_P) == 0) {
752 //
753 // If PD entry does not exist, skip it
754 //
755 continue;
756 }
757
758 Address = (UINTN)LShiftU64 (
759 LShiftU64 (
760 LShiftU64 ((Pml5Index << 9) + Pml4Index, 9) + PdptIndex,
761 9
762 ) + PdIndex,
763 21
764 );
765
766 if ((*Pd & IA32_PG_PS) != 0) {
767 // 2MB page
768
769 if (!IsAddressValid (Address, &Nx)) {
770 //
771 // Patch to remove Present flag and RW flag
772 //
773 *Pd = *Pd & (INTN)(INT32)(~PAGE_ATTRIBUTE_BITS);
774 }
775
776 if (Nx && mXdSupported) {
777 *Pd = *Pd | IA32_PG_NX;
778 }
779 } else {
780 // 4KB page
781 Pt = (UINT64 *)(UINTN)(*Pd & ~mAddressEncMask & PHYSICAL_ADDRESS_MASK);
782 if (Pt == 0) {
783 continue;
784 }
785
786 for (PtIndex = 0; PtIndex < SIZE_4KB / sizeof (*Pt); PtIndex++, Pt++) {
787 if (!IsAddressValid (Address, &Nx)) {
788 *Pt = *Pt & (INTN)(INT32)(~PAGE_ATTRIBUTE_BITS);
789 }
790
791 if (Nx && mXdSupported) {
792 *Pt = *Pt | IA32_PG_NX;
793 }
794
795 Address += SIZE_4KB;
796 } // end for PT
797 } // end if PS
798 } // end for PD
799 } // end for PDPT
800 } // end for PML4
801 } // end for PML5
802
803 //
804 // Flush TLB
805 //
806 CpuFlushTlb ();
807 DEBUG ((DEBUG_INFO, "Patch page table done!\n"));
808 //
809 // Set execute-disable flag
810 //
811 mXdEnabled = TRUE;
812
813 return;
814 }
815
816 /**
817 To get system port address of the SMI Command Port in FADT table.
818
819 **/
820 VOID
821 GetSmiCommandPort (
822 VOID
823 )
824 {
825 EFI_ACPI_2_0_FIXED_ACPI_DESCRIPTION_TABLE *Fadt;
826
827 Fadt = (EFI_ACPI_2_0_FIXED_ACPI_DESCRIPTION_TABLE *)EfiLocateFirstAcpiTable (
828 EFI_ACPI_2_0_FIXED_ACPI_DESCRIPTION_TABLE_SIGNATURE
829 );
830 ASSERT (Fadt != NULL);
831
832 mSmiCommandPort = Fadt->SmiCmd;
833 DEBUG ((DEBUG_INFO, "mSmiCommandPort = %x\n", mSmiCommandPort));
834 }
835
836 /**
837 Updates page table to make some memory ranges (like system memory) absent
838 and make some memory ranges (like MMIO) present and execute disable. It also
839 update 2MB-page to 4KB-page for some memory ranges.
840
841 **/
842 VOID
843 SmmProfileStart (
844 VOID
845 )
846 {
847 //
848 // The flag indicates SMM profile starts to work.
849 //
850 mSmmProfileStart = TRUE;
851 }
852
853 /**
854 Initialize SMM profile in SmmReadyToLock protocol callback function.
855
856 @param Protocol Points to the protocol's unique identifier.
857 @param Interface Points to the interface instance.
858 @param Handle The handle on which the interface was installed.
859
860 @retval EFI_SUCCESS SmmReadyToLock protocol callback runs successfully.
861 **/
862 EFI_STATUS
863 EFIAPI
864 InitSmmProfileCallBack (
865 IN CONST EFI_GUID *Protocol,
866 IN VOID *Interface,
867 IN EFI_HANDLE Handle
868 )
869 {
870 //
871 // Save to variable so that SMM profile data can be found.
872 //
873 gRT->SetVariable (
874 SMM_PROFILE_NAME,
875 &gEfiCallerIdGuid,
876 EFI_VARIABLE_BOOTSERVICE_ACCESS | EFI_VARIABLE_RUNTIME_ACCESS,
877 sizeof (mSmmProfileBase),
878 &mSmmProfileBase
879 );
880
881 //
882 // Get Software SMI from FADT
883 //
884 GetSmiCommandPort ();
885
886 //
887 // Initialize protected memory range for patching page table later.
888 //
889 InitProtectedMemRange ();
890
891 return EFI_SUCCESS;
892 }
893
894 /**
895 Initialize SMM profile data structures.
896
897 **/
898 VOID
899 InitSmmProfileInternal (
900 VOID
901 )
902 {
903 EFI_STATUS Status;
904 EFI_PHYSICAL_ADDRESS Base;
905 VOID *Registration;
906 UINTN Index;
907 UINTN MsrDsAreaSizePerCpu;
908 UINTN TotalSize;
909
910 mPFEntryCount = (UINTN *)AllocateZeroPool (sizeof (UINTN) * mMaxNumberOfCpus);
911 ASSERT (mPFEntryCount != NULL);
912 mLastPFEntryValue = (UINT64 (*)[MAX_PF_ENTRY_COUNT])AllocateZeroPool (
913 sizeof (mLastPFEntryValue[0]) * mMaxNumberOfCpus
914 );
915 ASSERT (mLastPFEntryValue != NULL);
916 mLastPFEntryPointer = (UINT64 *(*)[MAX_PF_ENTRY_COUNT])AllocateZeroPool (
917 sizeof (mLastPFEntryPointer[0]) * mMaxNumberOfCpus
918 );
919 ASSERT (mLastPFEntryPointer != NULL);
920
921 //
922 // Allocate memory for SmmProfile below 4GB.
923 // The base address
924 //
925 mSmmProfileSize = PcdGet32 (PcdCpuSmmProfileSize);
926 ASSERT ((mSmmProfileSize & 0xFFF) == 0);
927
928 if (mBtsSupported) {
929 TotalSize = mSmmProfileSize + mMsrDsAreaSize;
930 } else {
931 TotalSize = mSmmProfileSize;
932 }
933
934 Base = 0xFFFFFFFF;
935 Status = gBS->AllocatePages (
936 AllocateMaxAddress,
937 EfiReservedMemoryType,
938 EFI_SIZE_TO_PAGES (TotalSize),
939 &Base
940 );
941 ASSERT_EFI_ERROR (Status);
942 ZeroMem ((VOID *)(UINTN)Base, TotalSize);
943 mSmmProfileBase = (SMM_PROFILE_HEADER *)(UINTN)Base;
944
945 //
946 // Initialize SMM profile data header.
947 //
948 mSmmProfileBase->HeaderSize = sizeof (SMM_PROFILE_HEADER);
949 mSmmProfileBase->MaxDataEntries = (UINT64)((mSmmProfileSize - sizeof (SMM_PROFILE_HEADER)) / sizeof (SMM_PROFILE_ENTRY));
950 mSmmProfileBase->MaxDataSize = MultU64x64 (mSmmProfileBase->MaxDataEntries, sizeof (SMM_PROFILE_ENTRY));
951 mSmmProfileBase->CurDataEntries = 0;
952 mSmmProfileBase->CurDataSize = 0;
953 mSmmProfileBase->TsegStart = mCpuHotPlugData.SmrrBase;
954 mSmmProfileBase->TsegSize = mCpuHotPlugData.SmrrSize;
955 mSmmProfileBase->NumSmis = 0;
956 mSmmProfileBase->NumCpus = gSmmCpuPrivate->SmmCoreEntryContext.NumberOfCpus;
957
958 if (mBtsSupported) {
959 mMsrDsArea = (MSR_DS_AREA_STRUCT **)AllocateZeroPool (sizeof (MSR_DS_AREA_STRUCT *) * mMaxNumberOfCpus);
960 ASSERT (mMsrDsArea != NULL);
961 mMsrBTSRecord = (BRANCH_TRACE_RECORD **)AllocateZeroPool (sizeof (BRANCH_TRACE_RECORD *) * mMaxNumberOfCpus);
962 ASSERT (mMsrBTSRecord != NULL);
963 mMsrPEBSRecord = (PEBS_RECORD **)AllocateZeroPool (sizeof (PEBS_RECORD *) * mMaxNumberOfCpus);
964 ASSERT (mMsrPEBSRecord != NULL);
965
966 mMsrDsAreaBase = (MSR_DS_AREA_STRUCT *)((UINTN)Base + mSmmProfileSize);
967 MsrDsAreaSizePerCpu = mMsrDsAreaSize / mMaxNumberOfCpus;
968 mBTSRecordNumber = (MsrDsAreaSizePerCpu - sizeof (PEBS_RECORD) * PEBS_RECORD_NUMBER - sizeof (MSR_DS_AREA_STRUCT)) / sizeof (BRANCH_TRACE_RECORD);
969 for (Index = 0; Index < mMaxNumberOfCpus; Index++) {
970 mMsrDsArea[Index] = (MSR_DS_AREA_STRUCT *)((UINTN)mMsrDsAreaBase + MsrDsAreaSizePerCpu * Index);
971 mMsrBTSRecord[Index] = (BRANCH_TRACE_RECORD *)((UINTN)mMsrDsArea[Index] + sizeof (MSR_DS_AREA_STRUCT));
972 mMsrPEBSRecord[Index] = (PEBS_RECORD *)((UINTN)mMsrDsArea[Index] + MsrDsAreaSizePerCpu - sizeof (PEBS_RECORD) * PEBS_RECORD_NUMBER);
973
974 mMsrDsArea[Index]->BTSBufferBase = (UINTN)mMsrBTSRecord[Index];
975 mMsrDsArea[Index]->BTSIndex = mMsrDsArea[Index]->BTSBufferBase;
976 mMsrDsArea[Index]->BTSAbsoluteMaximum = mMsrDsArea[Index]->BTSBufferBase + mBTSRecordNumber * sizeof (BRANCH_TRACE_RECORD) + 1;
977 mMsrDsArea[Index]->BTSInterruptThreshold = mMsrDsArea[Index]->BTSAbsoluteMaximum + 1;
978
979 mMsrDsArea[Index]->PEBSBufferBase = (UINTN)mMsrPEBSRecord[Index];
980 mMsrDsArea[Index]->PEBSIndex = mMsrDsArea[Index]->PEBSBufferBase;
981 mMsrDsArea[Index]->PEBSAbsoluteMaximum = mMsrDsArea[Index]->PEBSBufferBase + PEBS_RECORD_NUMBER * sizeof (PEBS_RECORD) + 1;
982 mMsrDsArea[Index]->PEBSInterruptThreshold = mMsrDsArea[Index]->PEBSAbsoluteMaximum + 1;
983 }
984 }
985
986 mProtectionMemRange = mProtectionMemRangeTemplate;
987 mProtectionMemRangeCount = sizeof (mProtectionMemRangeTemplate) / sizeof (MEMORY_PROTECTION_RANGE);
988
989 //
990 // Update TSeg entry.
991 //
992 mProtectionMemRange[0].Range.Base = mCpuHotPlugData.SmrrBase;
993 mProtectionMemRange[0].Range.Top = mCpuHotPlugData.SmrrBase + mCpuHotPlugData.SmrrSize;
994
995 //
996 // Update SMM profile entry.
997 //
998 mProtectionMemRange[1].Range.Base = (EFI_PHYSICAL_ADDRESS)(UINTN)mSmmProfileBase;
999 mProtectionMemRange[1].Range.Top = (EFI_PHYSICAL_ADDRESS)(UINTN)mSmmProfileBase + TotalSize;
1000
1001 //
1002 // Allocate memory reserved for creating 4KB pages.
1003 //
1004 InitPagesForPFHandler ();
1005
1006 //
1007 // Start SMM profile when SmmReadyToLock protocol is installed.
1008 //
1009 Status = gSmst->SmmRegisterProtocolNotify (
1010 &gEfiSmmReadyToLockProtocolGuid,
1011 InitSmmProfileCallBack,
1012 &Registration
1013 );
1014 ASSERT_EFI_ERROR (Status);
1015
1016 return;
1017 }
1018
1019 /**
1020 Check if feature is supported by a processor.
1021
1022 **/
1023 VOID
1024 CheckFeatureSupported (
1025 VOID
1026 )
1027 {
1028 UINT32 RegEax;
1029 UINT32 RegEcx;
1030 UINT32 RegEdx;
1031 MSR_IA32_MISC_ENABLE_REGISTER MiscEnableMsr;
1032
1033 if ((PcdGet32 (PcdControlFlowEnforcementPropertyMask) != 0) && mCetSupported) {
1034 AsmCpuid (CPUID_SIGNATURE, &RegEax, NULL, NULL, NULL);
1035 if (RegEax >= CPUID_STRUCTURED_EXTENDED_FEATURE_FLAGS) {
1036 AsmCpuidEx (CPUID_STRUCTURED_EXTENDED_FEATURE_FLAGS, CPUID_STRUCTURED_EXTENDED_FEATURE_FLAGS_SUB_LEAF_INFO, NULL, NULL, &RegEcx, NULL);
1037 if ((RegEcx & CPUID_CET_SS) == 0) {
1038 mCetSupported = FALSE;
1039 PatchInstructionX86 (mPatchCetSupported, mCetSupported, 1);
1040 }
1041 } else {
1042 mCetSupported = FALSE;
1043 PatchInstructionX86 (mPatchCetSupported, mCetSupported, 1);
1044 }
1045 }
1046
1047 if (mXdSupported) {
1048 AsmCpuid (CPUID_EXTENDED_FUNCTION, &RegEax, NULL, NULL, NULL);
1049 if (RegEax <= CPUID_EXTENDED_FUNCTION) {
1050 //
1051 // Extended CPUID functions are not supported on this processor.
1052 //
1053 mXdSupported = FALSE;
1054 PatchInstructionX86 (gPatchXdSupported, mXdSupported, 1);
1055 }
1056
1057 AsmCpuid (CPUID_EXTENDED_CPU_SIG, NULL, NULL, NULL, &RegEdx);
1058 if ((RegEdx & CPUID1_EDX_XD_SUPPORT) == 0) {
1059 //
1060 // Execute Disable Bit feature is not supported on this processor.
1061 //
1062 mXdSupported = FALSE;
1063 PatchInstructionX86 (gPatchXdSupported, mXdSupported, 1);
1064 }
1065
1066 if (StandardSignatureIsAuthenticAMD ()) {
1067 //
1068 // AMD processors do not support MSR_IA32_MISC_ENABLE
1069 //
1070 PatchInstructionX86 (gPatchMsrIa32MiscEnableSupported, FALSE, 1);
1071 }
1072 }
1073
1074 if (mBtsSupported) {
1075 AsmCpuid (CPUID_VERSION_INFO, NULL, NULL, NULL, &RegEdx);
1076 if ((RegEdx & CPUID1_EDX_BTS_AVAILABLE) != 0) {
1077 //
1078 // Per IA32 manuals:
1079 // When CPUID.1:EDX[21] is set, the following BTS facilities are available:
1080 // 1. The BTS_UNAVAILABLE flag in the IA32_MISC_ENABLE MSR indicates the
1081 // availability of the BTS facilities, including the ability to set the BTS and
1082 // BTINT bits in the MSR_DEBUGCTLA MSR.
1083 // 2. The IA32_DS_AREA MSR can be programmed to point to the DS save area.
1084 //
1085 MiscEnableMsr.Uint64 = AsmReadMsr64 (MSR_IA32_MISC_ENABLE);
1086 if (MiscEnableMsr.Bits.BTS == 1) {
1087 //
1088 // BTS facilities is not supported if MSR_IA32_MISC_ENABLE.BTS bit is set.
1089 //
1090 mBtsSupported = FALSE;
1091 }
1092 }
1093 }
1094 }
1095
1096 /**
1097 Enable single step.
1098
1099 **/
1100 VOID
1101 ActivateSingleStepDB (
1102 VOID
1103 )
1104 {
1105 UINTN Dr6;
1106
1107 Dr6 = AsmReadDr6 ();
1108 if ((Dr6 & DR6_SINGLE_STEP) != 0) {
1109 return;
1110 }
1111
1112 Dr6 |= DR6_SINGLE_STEP;
1113 AsmWriteDr6 (Dr6);
1114 }
1115
1116 /**
1117 Enable last branch.
1118
1119 **/
1120 VOID
1121 ActivateLBR (
1122 VOID
1123 )
1124 {
1125 UINT64 DebugCtl;
1126
1127 DebugCtl = AsmReadMsr64 (MSR_DEBUG_CTL);
1128 if ((DebugCtl & MSR_DEBUG_CTL_LBR) != 0) {
1129 return;
1130 }
1131
1132 DebugCtl |= MSR_DEBUG_CTL_LBR;
1133 AsmWriteMsr64 (MSR_DEBUG_CTL, DebugCtl);
1134 }
1135
1136 /**
1137 Enable branch trace store.
1138
1139 @param CpuIndex The index of the processor.
1140
1141 **/
1142 VOID
1143 ActivateBTS (
1144 IN UINTN CpuIndex
1145 )
1146 {
1147 UINT64 DebugCtl;
1148
1149 DebugCtl = AsmReadMsr64 (MSR_DEBUG_CTL);
1150 if ((DebugCtl & MSR_DEBUG_CTL_BTS) != 0) {
1151 return;
1152 }
1153
1154 AsmWriteMsr64 (MSR_DS_AREA, (UINT64)(UINTN)mMsrDsArea[CpuIndex]);
1155 DebugCtl |= (UINT64)(MSR_DEBUG_CTL_BTS | MSR_DEBUG_CTL_TR);
1156 DebugCtl &= ~((UINT64)MSR_DEBUG_CTL_BTINT);
1157 AsmWriteMsr64 (MSR_DEBUG_CTL, DebugCtl);
1158 }
1159
1160 /**
1161 Increase SMI number in each SMI entry.
1162
1163 **/
1164 VOID
1165 SmmProfileRecordSmiNum (
1166 VOID
1167 )
1168 {
1169 if (mSmmProfileStart) {
1170 mSmmProfileBase->NumSmis++;
1171 }
1172 }
1173
1174 /**
1175 Initialize processor environment for SMM profile.
1176
1177 @param CpuIndex The index of the processor.
1178
1179 **/
1180 VOID
1181 ActivateSmmProfile (
1182 IN UINTN CpuIndex
1183 )
1184 {
1185 //
1186 // Enable Single Step DB#
1187 //
1188 ActivateSingleStepDB ();
1189
1190 if (mBtsSupported) {
1191 //
1192 // We can not get useful information from LER, so we have to use BTS.
1193 //
1194 ActivateLBR ();
1195
1196 //
1197 // Enable BTS
1198 //
1199 ActivateBTS (CpuIndex);
1200 }
1201 }
1202
1203 /**
1204 Initialize SMM profile in SMM CPU entry point.
1205
1206 @param[in] Cr3 The base address of the page tables to use in SMM.
1207
1208 **/
1209 VOID
1210 InitSmmProfile (
1211 UINT32 Cr3
1212 )
1213 {
1214 //
1215 // Save Cr3
1216 //
1217 mSmmProfileCr3 = Cr3;
1218
1219 //
1220 // Skip SMM profile initialization if feature is disabled
1221 //
1222 if (!FeaturePcdGet (PcdCpuSmmProfileEnable) &&
1223 !HEAP_GUARD_NONSTOP_MODE &&
1224 !NULL_DETECTION_NONSTOP_MODE)
1225 {
1226 return;
1227 }
1228
1229 //
1230 // Initialize SmmProfile here
1231 //
1232 InitSmmProfileInternal ();
1233
1234 //
1235 // Initialize profile IDT.
1236 //
1237 InitIdtr ();
1238
1239 //
1240 // Tell #PF handler to prepare a #DB subsequently.
1241 //
1242 mSetupDebugTrap = TRUE;
1243 }
1244
1245 /**
1246 Update page table to map the memory correctly in order to make the instruction
1247 which caused page fault execute successfully. And it also save the original page
1248 table to be restored in single-step exception.
1249
1250 @param PageTable PageTable Address.
1251 @param PFAddress The memory address which caused page fault exception.
1252 @param CpuIndex The index of the processor.
1253 @param ErrorCode The Error code of exception.
1254
1255 **/
1256 VOID
1257 RestorePageTableBelow4G (
1258 UINT64 *PageTable,
1259 UINT64 PFAddress,
1260 UINTN CpuIndex,
1261 UINTN ErrorCode
1262 )
1263 {
1264 UINTN PTIndex;
1265 UINTN PFIndex;
1266 IA32_CR4 Cr4;
1267 BOOLEAN Enable5LevelPaging;
1268
1269 Cr4.UintN = AsmReadCr4 ();
1270 Enable5LevelPaging = (BOOLEAN)(Cr4.Bits.LA57 == 1);
1271
1272 //
1273 // PML5
1274 //
1275 if (Enable5LevelPaging) {
1276 PTIndex = (UINTN)BitFieldRead64 (PFAddress, 48, 56);
1277 ASSERT (PageTable[PTIndex] != 0);
1278 PageTable = (UINT64 *)(UINTN)(PageTable[PTIndex] & PHYSICAL_ADDRESS_MASK);
1279 }
1280
1281 //
1282 // PML4
1283 //
1284 if (sizeof (UINT64) == sizeof (UINTN)) {
1285 PTIndex = (UINTN)BitFieldRead64 (PFAddress, 39, 47);
1286 ASSERT (PageTable[PTIndex] != 0);
1287 PageTable = (UINT64 *)(UINTN)(PageTable[PTIndex] & PHYSICAL_ADDRESS_MASK);
1288 }
1289
1290 //
1291 // PDPTE
1292 //
1293 PTIndex = (UINTN)BitFieldRead64 (PFAddress, 30, 38);
1294 ASSERT (PageTable[PTIndex] != 0);
1295 PageTable = (UINT64 *)(UINTN)(PageTable[PTIndex] & PHYSICAL_ADDRESS_MASK);
1296
1297 //
1298 // PD
1299 //
1300 PTIndex = (UINTN)BitFieldRead64 (PFAddress, 21, 29);
1301 if ((PageTable[PTIndex] & IA32_PG_PS) != 0) {
1302 //
1303 // Large page
1304 //
1305
1306 //
1307 // Record old entries with non-present status
1308 // Old entries include the memory which instruction is at and the memory which instruction access.
1309 //
1310 //
1311 ASSERT (mPFEntryCount[CpuIndex] < MAX_PF_ENTRY_COUNT);
1312 if (mPFEntryCount[CpuIndex] < MAX_PF_ENTRY_COUNT) {
1313 PFIndex = mPFEntryCount[CpuIndex];
1314 mLastPFEntryValue[CpuIndex][PFIndex] = PageTable[PTIndex];
1315 mLastPFEntryPointer[CpuIndex][PFIndex] = &PageTable[PTIndex];
1316 mPFEntryCount[CpuIndex]++;
1317 }
1318
1319 //
1320 // Set new entry
1321 //
1322 PageTable[PTIndex] = (PFAddress & ~((1ull << 21) - 1));
1323 PageTable[PTIndex] |= (UINT64)IA32_PG_PS;
1324 PageTable[PTIndex] |= (UINT64)PAGE_ATTRIBUTE_BITS;
1325 if ((ErrorCode & IA32_PF_EC_ID) != 0) {
1326 PageTable[PTIndex] &= ~IA32_PG_NX;
1327 }
1328 } else {
1329 //
1330 // Small page
1331 //
1332 ASSERT (PageTable[PTIndex] != 0);
1333 PageTable = (UINT64 *)(UINTN)(PageTable[PTIndex] & PHYSICAL_ADDRESS_MASK);
1334
1335 //
1336 // 4K PTE
1337 //
1338 PTIndex = (UINTN)BitFieldRead64 (PFAddress, 12, 20);
1339
1340 //
1341 // Record old entries with non-present status
1342 // Old entries include the memory which instruction is at and the memory which instruction access.
1343 //
1344 //
1345 ASSERT (mPFEntryCount[CpuIndex] < MAX_PF_ENTRY_COUNT);
1346 if (mPFEntryCount[CpuIndex] < MAX_PF_ENTRY_COUNT) {
1347 PFIndex = mPFEntryCount[CpuIndex];
1348 mLastPFEntryValue[CpuIndex][PFIndex] = PageTable[PTIndex];
1349 mLastPFEntryPointer[CpuIndex][PFIndex] = &PageTable[PTIndex];
1350 mPFEntryCount[CpuIndex]++;
1351 }
1352
1353 //
1354 // Set new entry
1355 //
1356 PageTable[PTIndex] = (PFAddress & ~((1ull << 12) - 1));
1357 PageTable[PTIndex] |= (UINT64)PAGE_ATTRIBUTE_BITS;
1358 if ((ErrorCode & IA32_PF_EC_ID) != 0) {
1359 PageTable[PTIndex] &= ~IA32_PG_NX;
1360 }
1361 }
1362 }
1363
1364 /**
1365 Handler for Page Fault triggered by Guard page.
1366
1367 @param ErrorCode The Error code of exception.
1368
1369 **/
1370 VOID
1371 GuardPagePFHandler (
1372 UINTN ErrorCode
1373 )
1374 {
1375 UINT64 *PageTable;
1376 UINT64 PFAddress;
1377 UINT64 RestoreAddress;
1378 UINTN RestorePageNumber;
1379 UINTN CpuIndex;
1380
1381 PageTable = (UINT64 *)AsmReadCr3 ();
1382 PFAddress = AsmReadCr2 ();
1383 CpuIndex = GetCpuIndex ();
1384
1385 //
1386 // Memory operation cross pages, like "rep mov" instruction, will cause
1387 // infinite loop between this and Debug Trap handler. We have to make sure
1388 // that current page and the page followed are both in PRESENT state.
1389 //
1390 RestorePageNumber = 2;
1391 RestoreAddress = PFAddress;
1392 while (RestorePageNumber > 0) {
1393 RestorePageTableBelow4G (PageTable, RestoreAddress, CpuIndex, ErrorCode);
1394 RestoreAddress += EFI_PAGE_SIZE;
1395 RestorePageNumber--;
1396 }
1397
1398 //
1399 // Flush TLB
1400 //
1401 CpuFlushTlb ();
1402 }
1403
1404 /**
1405 The Page fault handler to save SMM profile data.
1406
1407 @param Rip The RIP when exception happens.
1408 @param ErrorCode The Error code of exception.
1409
1410 **/
1411 VOID
1412 SmmProfilePFHandler (
1413 UINTN Rip,
1414 UINTN ErrorCode
1415 )
1416 {
1417 UINT64 *PageTable;
1418 UINT64 PFAddress;
1419 UINT64 RestoreAddress;
1420 UINTN RestorePageNumber;
1421 UINTN CpuIndex;
1422 UINTN Index;
1423 UINT64 InstructionAddress;
1424 UINTN MaxEntryNumber;
1425 UINTN CurrentEntryNumber;
1426 BOOLEAN IsValidPFAddress;
1427 SMM_PROFILE_ENTRY *SmmProfileEntry;
1428 UINT64 SmiCommand;
1429 EFI_STATUS Status;
1430 UINT8 SoftSmiValue;
1431 EFI_SMM_SAVE_STATE_IO_INFO IoInfo;
1432
1433 if (!mSmmProfileStart) {
1434 //
1435 // If SMM profile does not start, call original page fault handler.
1436 //
1437 SmiDefaultPFHandler ();
1438 return;
1439 }
1440
1441 if (mBtsSupported) {
1442 DisableBTS ();
1443 }
1444
1445 IsValidPFAddress = FALSE;
1446 PageTable = (UINT64 *)AsmReadCr3 ();
1447 PFAddress = AsmReadCr2 ();
1448 CpuIndex = GetCpuIndex ();
1449
1450 //
1451 // Memory operation cross pages, like "rep mov" instruction, will cause
1452 // infinite loop between this and Debug Trap handler. We have to make sure
1453 // that current page and the page followed are both in PRESENT state.
1454 //
1455 RestorePageNumber = 2;
1456 RestoreAddress = PFAddress;
1457 while (RestorePageNumber > 0) {
1458 if (RestoreAddress <= 0xFFFFFFFF) {
1459 RestorePageTableBelow4G (PageTable, RestoreAddress, CpuIndex, ErrorCode);
1460 } else {
1461 RestorePageTableAbove4G (PageTable, RestoreAddress, CpuIndex, ErrorCode, &IsValidPFAddress);
1462 }
1463
1464 RestoreAddress += EFI_PAGE_SIZE;
1465 RestorePageNumber--;
1466 }
1467
1468 if (!IsValidPFAddress) {
1469 InstructionAddress = Rip;
1470 if (((ErrorCode & IA32_PF_EC_ID) != 0) && (mBtsSupported)) {
1471 //
1472 // If it is instruction fetch failure, get the correct IP from BTS.
1473 //
1474 InstructionAddress = GetSourceFromDestinationOnBts (CpuIndex, Rip);
1475 if (InstructionAddress == 0) {
1476 //
1477 // It indicates the instruction which caused page fault is not a jump instruction,
1478 // set instruction address same as the page fault address.
1479 //
1480 InstructionAddress = PFAddress;
1481 }
1482 }
1483
1484 //
1485 // Indicate it is not software SMI
1486 //
1487 SmiCommand = 0xFFFFFFFFFFFFFFFFULL;
1488 for (Index = 0; Index < gSmst->NumberOfCpus; Index++) {
1489 Status = SmmReadSaveState (&mSmmCpu, sizeof (IoInfo), EFI_SMM_SAVE_STATE_REGISTER_IO, Index, &IoInfo);
1490 if (EFI_ERROR (Status)) {
1491 continue;
1492 }
1493
1494 if (IoInfo.IoPort == mSmiCommandPort) {
1495 //
1496 // A software SMI triggered by SMI command port has been found, get SmiCommand from SMI command port.
1497 //
1498 SoftSmiValue = IoRead8 (mSmiCommandPort);
1499 SmiCommand = (UINT64)SoftSmiValue;
1500 break;
1501 }
1502 }
1503
1504 SmmProfileEntry = (SMM_PROFILE_ENTRY *)(UINTN)(mSmmProfileBase + 1);
1505 //
1506 // Check if there is already a same entry in profile data.
1507 //
1508 for (Index = 0; Index < (UINTN)mSmmProfileBase->CurDataEntries; Index++) {
1509 if ((SmmProfileEntry[Index].ErrorCode == (UINT64)ErrorCode) &&
1510 (SmmProfileEntry[Index].Address == PFAddress) &&
1511 (SmmProfileEntry[Index].CpuNum == (UINT64)CpuIndex) &&
1512 (SmmProfileEntry[Index].Instruction == InstructionAddress) &&
1513 (SmmProfileEntry[Index].SmiCmd == SmiCommand))
1514 {
1515 //
1516 // Same record exist, need not save again.
1517 //
1518 break;
1519 }
1520 }
1521
1522 if (Index == mSmmProfileBase->CurDataEntries) {
1523 CurrentEntryNumber = (UINTN)mSmmProfileBase->CurDataEntries;
1524 MaxEntryNumber = (UINTN)mSmmProfileBase->MaxDataEntries;
1525 if (FeaturePcdGet (PcdCpuSmmProfileRingBuffer)) {
1526 CurrentEntryNumber = CurrentEntryNumber % MaxEntryNumber;
1527 }
1528
1529 if (CurrentEntryNumber < MaxEntryNumber) {
1530 //
1531 // Log the new entry
1532 //
1533 SmmProfileEntry[CurrentEntryNumber].SmiNum = mSmmProfileBase->NumSmis;
1534 SmmProfileEntry[CurrentEntryNumber].ErrorCode = (UINT64)ErrorCode;
1535 SmmProfileEntry[CurrentEntryNumber].ApicId = (UINT64)GetApicId ();
1536 SmmProfileEntry[CurrentEntryNumber].CpuNum = (UINT64)CpuIndex;
1537 SmmProfileEntry[CurrentEntryNumber].Address = PFAddress;
1538 SmmProfileEntry[CurrentEntryNumber].Instruction = InstructionAddress;
1539 SmmProfileEntry[CurrentEntryNumber].SmiCmd = SmiCommand;
1540 //
1541 // Update current entry index and data size in the header.
1542 //
1543 mSmmProfileBase->CurDataEntries++;
1544 mSmmProfileBase->CurDataSize = MultU64x64 (mSmmProfileBase->CurDataEntries, sizeof (SMM_PROFILE_ENTRY));
1545 }
1546 }
1547 }
1548
1549 //
1550 // Flush TLB
1551 //
1552 CpuFlushTlb ();
1553
1554 if (mBtsSupported) {
1555 EnableBTS ();
1556 }
1557 }
1558
1559 /**
1560 Replace INT1 exception handler to restore page table to absent/execute-disable state
1561 in order to trigger page fault again to save SMM profile data..
1562
1563 **/
1564 VOID
1565 InitIdtr (
1566 VOID
1567 )
1568 {
1569 EFI_STATUS Status;
1570
1571 Status = SmmRegisterExceptionHandler (&mSmmCpuService, EXCEPT_IA32_DEBUG, DebugExceptionHandler);
1572 ASSERT_EFI_ERROR (Status);
1573 }