]> git.proxmox.com Git - mirror_edk2.git/blob - UefiCpuPkg/PiSmmCpuDxeSmm/SmmProfile.c
c47b5573e366bf5e8b4f56e9496d0e739d605e41
[mirror_edk2.git] / UefiCpuPkg / PiSmmCpuDxeSmm / SmmProfile.c
1 /** @file
2 Enable SMM profile.
3
4 Copyright (c) 2012 - 2019, Intel Corporation. All rights reserved.<BR>
5 Copyright (c) 2017, AMD Incorporated. All rights reserved.<BR>
6
7 SPDX-License-Identifier: BSD-2-Clause-Patent
8
9 **/
10
11 #include "PiSmmCpuDxeSmm.h"
12 #include "SmmProfileInternal.h"
13
14 UINT32 mSmmProfileCr3;
15
16 SMM_PROFILE_HEADER *mSmmProfileBase;
17 MSR_DS_AREA_STRUCT *mMsrDsAreaBase;
18 //
19 // The buffer to store SMM profile data.
20 //
21 UINTN mSmmProfileSize;
22
23 //
24 // The buffer to enable branch trace store.
25 //
26 UINTN mMsrDsAreaSize = SMM_PROFILE_DTS_SIZE;
27
28 //
29 // The flag indicates if execute-disable is supported by processor.
30 //
31 BOOLEAN mXdSupported = TRUE;
32
33 //
34 // The flag indicates if execute-disable is enabled on processor.
35 //
36 BOOLEAN mXdEnabled = FALSE;
37
38 //
39 // The flag indicates if BTS is supported by processor.
40 //
41 BOOLEAN mBtsSupported = TRUE;
42
43 //
44 // The flag indicates if SMM profile starts to record data.
45 //
46 BOOLEAN mSmmProfileStart = FALSE;
47
48 //
49 // The flag indicates if #DB will be setup in #PF handler.
50 //
51 BOOLEAN mSetupDebugTrap = FALSE;
52
53 //
54 // Record the page fault exception count for one instruction execution.
55 //
56 UINTN *mPFEntryCount;
57
58 UINT64 (*mLastPFEntryValue)[MAX_PF_ENTRY_COUNT];
59 UINT64 *(*mLastPFEntryPointer)[MAX_PF_ENTRY_COUNT];
60
61 MSR_DS_AREA_STRUCT **mMsrDsArea;
62 BRANCH_TRACE_RECORD **mMsrBTSRecord;
63 UINTN mBTSRecordNumber;
64 PEBS_RECORD **mMsrPEBSRecord;
65
66 //
67 // These memory ranges are always present, they does not generate the access type of page fault exception,
68 // but they possibly generate instruction fetch type of page fault exception.
69 //
70 MEMORY_PROTECTION_RANGE *mProtectionMemRange = NULL;
71 UINTN mProtectionMemRangeCount = 0;
72
73 //
74 // Some predefined memory ranges.
75 //
76 MEMORY_PROTECTION_RANGE mProtectionMemRangeTemplate[] = {
77 //
78 // SMRAM range (to be fixed in runtime).
79 // It is always present and instruction fetches are allowed.
80 //
81 {{0x00000000, 0x00000000},TRUE,FALSE},
82
83 //
84 // SMM profile data range( to be fixed in runtime).
85 // It is always present and instruction fetches are not allowed.
86 //
87 {{0x00000000, 0x00000000},TRUE,TRUE},
88
89 //
90 // SMRAM ranges not covered by mCpuHotPlugData.SmrrBase/mCpuHotPlugData.SmrrSiz (to be fixed in runtime).
91 // It is always present and instruction fetches are allowed.
92 // {{0x00000000, 0x00000000},TRUE,FALSE},
93 //
94
95 //
96 // Future extended range could be added here.
97 //
98
99 //
100 // PCI MMIO ranges (to be added in runtime).
101 // They are always present and instruction fetches are not allowed.
102 //
103 };
104
105 //
106 // These memory ranges are mapped by 4KB-page instead of 2MB-page.
107 //
108 MEMORY_RANGE *mSplitMemRange = NULL;
109 UINTN mSplitMemRangeCount = 0;
110
111 //
112 // SMI command port.
113 //
114 UINT32 mSmiCommandPort;
115
116 /**
117 Disable branch trace store.
118
119 **/
120 VOID
121 DisableBTS (
122 VOID
123 )
124 {
125 AsmMsrAnd64 (MSR_DEBUG_CTL, ~((UINT64)(MSR_DEBUG_CTL_BTS | MSR_DEBUG_CTL_TR)));
126 }
127
128 /**
129 Enable branch trace store.
130
131 **/
132 VOID
133 EnableBTS (
134 VOID
135 )
136 {
137 AsmMsrOr64 (MSR_DEBUG_CTL, (MSR_DEBUG_CTL_BTS | MSR_DEBUG_CTL_TR));
138 }
139
140 /**
141 Get CPU Index from APIC ID.
142
143 **/
144 UINTN
145 GetCpuIndex (
146 VOID
147 )
148 {
149 UINTN Index;
150 UINT32 ApicId;
151
152 ApicId = GetApicId ();
153
154 for (Index = 0; Index < mMaxNumberOfCpus; Index++) {
155 if (gSmmCpuPrivate->ProcessorInfo[Index].ProcessorId == ApicId) {
156 return Index;
157 }
158 }
159 ASSERT (FALSE);
160 return 0;
161 }
162
163 /**
164 Get the source of IP after execute-disable exception is triggered.
165
166 @param CpuIndex The index of CPU.
167 @param DestinationIP The destination address.
168
169 **/
170 UINT64
171 GetSourceFromDestinationOnBts (
172 UINTN CpuIndex,
173 UINT64 DestinationIP
174 )
175 {
176 BRANCH_TRACE_RECORD *CurrentBTSRecord;
177 UINTN Index;
178 BOOLEAN FirstMatch;
179
180 FirstMatch = FALSE;
181
182 CurrentBTSRecord = (BRANCH_TRACE_RECORD *)mMsrDsArea[CpuIndex]->BTSIndex;
183 for (Index = 0; Index < mBTSRecordNumber; Index++) {
184 if ((UINTN)CurrentBTSRecord < (UINTN)mMsrBTSRecord[CpuIndex]) {
185 //
186 // Underflow
187 //
188 CurrentBTSRecord = (BRANCH_TRACE_RECORD *)((UINTN)mMsrDsArea[CpuIndex]->BTSAbsoluteMaximum - 1);
189 CurrentBTSRecord --;
190 }
191 if (CurrentBTSRecord->LastBranchTo == DestinationIP) {
192 //
193 // Good! find 1st one, then find 2nd one.
194 //
195 if (!FirstMatch) {
196 //
197 // The first one is DEBUG exception
198 //
199 FirstMatch = TRUE;
200 } else {
201 //
202 // Good find proper one.
203 //
204 return CurrentBTSRecord->LastBranchFrom;
205 }
206 }
207 CurrentBTSRecord--;
208 }
209
210 return 0;
211 }
212
213 /**
214 SMM profile specific INT 1 (single-step) exception handler.
215
216 @param InterruptType Defines the type of interrupt or exception that
217 occurred on the processor.This parameter is processor architecture specific.
218 @param SystemContext A pointer to the processor context when
219 the interrupt occurred on the processor.
220 **/
221 VOID
222 EFIAPI
223 DebugExceptionHandler (
224 IN EFI_EXCEPTION_TYPE InterruptType,
225 IN EFI_SYSTEM_CONTEXT SystemContext
226 )
227 {
228 UINTN CpuIndex;
229 UINTN PFEntry;
230
231 if (!mSmmProfileStart &&
232 !HEAP_GUARD_NONSTOP_MODE &&
233 !NULL_DETECTION_NONSTOP_MODE) {
234 return;
235 }
236 CpuIndex = GetCpuIndex ();
237
238 //
239 // Clear last PF entries
240 //
241 for (PFEntry = 0; PFEntry < mPFEntryCount[CpuIndex]; PFEntry++) {
242 *mLastPFEntryPointer[CpuIndex][PFEntry] = mLastPFEntryValue[CpuIndex][PFEntry];
243 }
244
245 //
246 // Reset page fault exception count for next page fault.
247 //
248 mPFEntryCount[CpuIndex] = 0;
249
250 //
251 // Flush TLB
252 //
253 CpuFlushTlb ();
254
255 //
256 // Clear TF in EFLAGS
257 //
258 ClearTrapFlag (SystemContext);
259 }
260
261 /**
262 Check if the input address is in SMM ranges.
263
264 @param[in] Address The input address.
265
266 @retval TRUE The input address is in SMM.
267 @retval FALSE The input address is not in SMM.
268 **/
269 BOOLEAN
270 IsInSmmRanges (
271 IN EFI_PHYSICAL_ADDRESS Address
272 )
273 {
274 UINTN Index;
275
276 if ((Address >= mCpuHotPlugData.SmrrBase) && (Address < mCpuHotPlugData.SmrrBase + mCpuHotPlugData.SmrrSize)) {
277 return TRUE;
278 }
279 for (Index = 0; Index < mSmmCpuSmramRangeCount; Index++) {
280 if (Address >= mSmmCpuSmramRanges[Index].CpuStart &&
281 Address < mSmmCpuSmramRanges[Index].CpuStart + mSmmCpuSmramRanges[Index].PhysicalSize) {
282 return TRUE;
283 }
284 }
285 return FALSE;
286 }
287
288 /**
289 Check if the memory address will be mapped by 4KB-page.
290
291 @param Address The address of Memory.
292 @param Nx The flag indicates if the memory is execute-disable.
293
294 **/
295 BOOLEAN
296 IsAddressValid (
297 IN EFI_PHYSICAL_ADDRESS Address,
298 IN BOOLEAN *Nx
299 )
300 {
301 UINTN Index;
302
303 if (FeaturePcdGet (PcdCpuSmmProfileEnable)) {
304 //
305 // Check configuration
306 //
307 for (Index = 0; Index < mProtectionMemRangeCount; Index++) {
308 if ((Address >= mProtectionMemRange[Index].Range.Base) && (Address < mProtectionMemRange[Index].Range.Top)) {
309 *Nx = mProtectionMemRange[Index].Nx;
310 return mProtectionMemRange[Index].Present;
311 }
312 }
313 *Nx = TRUE;
314 return FALSE;
315
316 } else {
317 *Nx = TRUE;
318 if (IsInSmmRanges (Address)) {
319 *Nx = FALSE;
320 }
321 return TRUE;
322 }
323 }
324
325 /**
326 Check if the memory address will be mapped by 4KB-page.
327
328 @param Address The address of Memory.
329
330 **/
331 BOOLEAN
332 IsAddressSplit (
333 IN EFI_PHYSICAL_ADDRESS Address
334 )
335 {
336 UINTN Index;
337
338 if (FeaturePcdGet (PcdCpuSmmProfileEnable)) {
339 //
340 // Check configuration
341 //
342 for (Index = 0; Index < mSplitMemRangeCount; Index++) {
343 if ((Address >= mSplitMemRange[Index].Base) && (Address < mSplitMemRange[Index].Top)) {
344 return TRUE;
345 }
346 }
347 } else {
348 if (Address < mCpuHotPlugData.SmrrBase) {
349 if ((mCpuHotPlugData.SmrrBase - Address) < BASE_2MB) {
350 return TRUE;
351 }
352 } else if (Address > (mCpuHotPlugData.SmrrBase + mCpuHotPlugData.SmrrSize - BASE_2MB)) {
353 if ((Address - (mCpuHotPlugData.SmrrBase + mCpuHotPlugData.SmrrSize - BASE_2MB)) < BASE_2MB) {
354 return TRUE;
355 }
356 }
357 }
358 //
359 // Return default
360 //
361 return FALSE;
362 }
363
364 /**
365 Initialize the protected memory ranges and the 4KB-page mapped memory ranges.
366
367 **/
368 VOID
369 InitProtectedMemRange (
370 VOID
371 )
372 {
373 UINTN Index;
374 UINTN NumberOfDescriptors;
375 UINTN NumberOfAddedDescriptors;
376 UINTN NumberOfProtectRange;
377 UINTN NumberOfSpliteRange;
378 EFI_GCD_MEMORY_SPACE_DESCRIPTOR *MemorySpaceMap;
379 UINTN TotalSize;
380 EFI_PHYSICAL_ADDRESS ProtectBaseAddress;
381 EFI_PHYSICAL_ADDRESS ProtectEndAddress;
382 EFI_PHYSICAL_ADDRESS Top2MBAlignedAddress;
383 EFI_PHYSICAL_ADDRESS Base2MBAlignedAddress;
384 UINT64 High4KBPageSize;
385 UINT64 Low4KBPageSize;
386
387 NumberOfDescriptors = 0;
388 NumberOfAddedDescriptors = mSmmCpuSmramRangeCount;
389 NumberOfSpliteRange = 0;
390 MemorySpaceMap = NULL;
391
392 //
393 // Get MMIO ranges from GCD and add them into protected memory ranges.
394 //
395 gDS->GetMemorySpaceMap (
396 &NumberOfDescriptors,
397 &MemorySpaceMap
398 );
399 for (Index = 0; Index < NumberOfDescriptors; Index++) {
400 if (MemorySpaceMap[Index].GcdMemoryType == EfiGcdMemoryTypeMemoryMappedIo) {
401 NumberOfAddedDescriptors++;
402 }
403 }
404
405 if (NumberOfAddedDescriptors != 0) {
406 TotalSize = NumberOfAddedDescriptors * sizeof (MEMORY_PROTECTION_RANGE) + sizeof (mProtectionMemRangeTemplate);
407 mProtectionMemRange = (MEMORY_PROTECTION_RANGE *) AllocateZeroPool (TotalSize);
408 ASSERT (mProtectionMemRange != NULL);
409 mProtectionMemRangeCount = TotalSize / sizeof (MEMORY_PROTECTION_RANGE);
410
411 //
412 // Copy existing ranges.
413 //
414 CopyMem (mProtectionMemRange, mProtectionMemRangeTemplate, sizeof (mProtectionMemRangeTemplate));
415
416 //
417 // Create split ranges which come from protected ranges.
418 //
419 TotalSize = (TotalSize / sizeof (MEMORY_PROTECTION_RANGE)) * sizeof (MEMORY_RANGE);
420 mSplitMemRange = (MEMORY_RANGE *) AllocateZeroPool (TotalSize);
421 ASSERT (mSplitMemRange != NULL);
422
423 //
424 // Create SMM ranges which are set to present and execution-enable.
425 //
426 NumberOfProtectRange = sizeof (mProtectionMemRangeTemplate) / sizeof (MEMORY_PROTECTION_RANGE);
427 for (Index = 0; Index < mSmmCpuSmramRangeCount; Index++) {
428 if (mSmmCpuSmramRanges[Index].CpuStart >= mProtectionMemRange[0].Range.Base &&
429 mSmmCpuSmramRanges[Index].CpuStart + mSmmCpuSmramRanges[Index].PhysicalSize < mProtectionMemRange[0].Range.Top) {
430 //
431 // If the address have been already covered by mCpuHotPlugData.SmrrBase/mCpuHotPlugData.SmrrSiz
432 //
433 break;
434 }
435 mProtectionMemRange[NumberOfProtectRange].Range.Base = mSmmCpuSmramRanges[Index].CpuStart;
436 mProtectionMemRange[NumberOfProtectRange].Range.Top = mSmmCpuSmramRanges[Index].CpuStart + mSmmCpuSmramRanges[Index].PhysicalSize;
437 mProtectionMemRange[NumberOfProtectRange].Present = TRUE;
438 mProtectionMemRange[NumberOfProtectRange].Nx = FALSE;
439 NumberOfProtectRange++;
440 }
441
442 //
443 // Create MMIO ranges which are set to present and execution-disable.
444 //
445 for (Index = 0; Index < NumberOfDescriptors; Index++) {
446 if (MemorySpaceMap[Index].GcdMemoryType != EfiGcdMemoryTypeMemoryMappedIo) {
447 continue;
448 }
449 mProtectionMemRange[NumberOfProtectRange].Range.Base = MemorySpaceMap[Index].BaseAddress;
450 mProtectionMemRange[NumberOfProtectRange].Range.Top = MemorySpaceMap[Index].BaseAddress + MemorySpaceMap[Index].Length;
451 mProtectionMemRange[NumberOfProtectRange].Present = TRUE;
452 mProtectionMemRange[NumberOfProtectRange].Nx = TRUE;
453 NumberOfProtectRange++;
454 }
455
456 //
457 // Check and updated actual protected memory ranges count
458 //
459 ASSERT (NumberOfProtectRange <= mProtectionMemRangeCount);
460 mProtectionMemRangeCount = NumberOfProtectRange;
461 }
462
463 //
464 // According to protected ranges, create the ranges which will be mapped by 2KB page.
465 //
466 NumberOfSpliteRange = 0;
467 NumberOfProtectRange = mProtectionMemRangeCount;
468 for (Index = 0; Index < NumberOfProtectRange; Index++) {
469 //
470 // If MMIO base address is not 2MB alignment, make 2MB alignment for create 4KB page in page table.
471 //
472 ProtectBaseAddress = mProtectionMemRange[Index].Range.Base;
473 ProtectEndAddress = mProtectionMemRange[Index].Range.Top;
474 if (((ProtectBaseAddress & (SIZE_2MB - 1)) != 0) || ((ProtectEndAddress & (SIZE_2MB - 1)) != 0)) {
475 //
476 // Check if it is possible to create 4KB-page for not 2MB-aligned range and to create 2MB-page for 2MB-aligned range.
477 // A mix of 4KB and 2MB page could save SMRAM space.
478 //
479 Top2MBAlignedAddress = ProtectEndAddress & ~(SIZE_2MB - 1);
480 Base2MBAlignedAddress = (ProtectBaseAddress + SIZE_2MB - 1) & ~(SIZE_2MB - 1);
481 if ((Top2MBAlignedAddress > Base2MBAlignedAddress) &&
482 ((Top2MBAlignedAddress - Base2MBAlignedAddress) >= SIZE_2MB)) {
483 //
484 // There is an range which could be mapped by 2MB-page.
485 //
486 High4KBPageSize = ((ProtectEndAddress + SIZE_2MB - 1) & ~(SIZE_2MB - 1)) - (ProtectEndAddress & ~(SIZE_2MB - 1));
487 Low4KBPageSize = ((ProtectBaseAddress + SIZE_2MB - 1) & ~(SIZE_2MB - 1)) - (ProtectBaseAddress & ~(SIZE_2MB - 1));
488 if (High4KBPageSize != 0) {
489 //
490 // Add not 2MB-aligned range to be mapped by 4KB-page.
491 //
492 mSplitMemRange[NumberOfSpliteRange].Base = ProtectEndAddress & ~(SIZE_2MB - 1);
493 mSplitMemRange[NumberOfSpliteRange].Top = (ProtectEndAddress + SIZE_2MB - 1) & ~(SIZE_2MB - 1);
494 NumberOfSpliteRange++;
495 }
496 if (Low4KBPageSize != 0) {
497 //
498 // Add not 2MB-aligned range to be mapped by 4KB-page.
499 //
500 mSplitMemRange[NumberOfSpliteRange].Base = ProtectBaseAddress & ~(SIZE_2MB - 1);
501 mSplitMemRange[NumberOfSpliteRange].Top = (ProtectBaseAddress + SIZE_2MB - 1) & ~(SIZE_2MB - 1);
502 NumberOfSpliteRange++;
503 }
504 } else {
505 //
506 // The range could only be mapped by 4KB-page.
507 //
508 mSplitMemRange[NumberOfSpliteRange].Base = ProtectBaseAddress & ~(SIZE_2MB - 1);
509 mSplitMemRange[NumberOfSpliteRange].Top = (ProtectEndAddress + SIZE_2MB - 1) & ~(SIZE_2MB - 1);
510 NumberOfSpliteRange++;
511 }
512 }
513 }
514
515 mSplitMemRangeCount = NumberOfSpliteRange;
516
517 DEBUG ((EFI_D_INFO, "SMM Profile Memory Ranges:\n"));
518 for (Index = 0; Index < mProtectionMemRangeCount; Index++) {
519 DEBUG ((EFI_D_INFO, "mProtectionMemRange[%d].Base = %lx\n", Index, mProtectionMemRange[Index].Range.Base));
520 DEBUG ((EFI_D_INFO, "mProtectionMemRange[%d].Top = %lx\n", Index, mProtectionMemRange[Index].Range.Top));
521 }
522 for (Index = 0; Index < mSplitMemRangeCount; Index++) {
523 DEBUG ((EFI_D_INFO, "mSplitMemRange[%d].Base = %lx\n", Index, mSplitMemRange[Index].Base));
524 DEBUG ((EFI_D_INFO, "mSplitMemRange[%d].Top = %lx\n", Index, mSplitMemRange[Index].Top));
525 }
526 }
527
528 /**
529 Update page table according to protected memory ranges and the 4KB-page mapped memory ranges.
530
531 **/
532 VOID
533 InitPaging (
534 VOID
535 )
536 {
537 UINT64 Pml5Entry;
538 UINT64 Pml4Entry;
539 UINT64 *Pml5;
540 UINT64 *Pml4;
541 UINT64 *Pdpt;
542 UINT64 *Pd;
543 UINT64 *Pt;
544 UINTN Address;
545 UINTN Pml5Index;
546 UINTN Pml4Index;
547 UINTN PdptIndex;
548 UINTN PdIndex;
549 UINTN PtIndex;
550 UINTN NumberOfPdptEntries;
551 UINTN NumberOfPml4Entries;
552 UINTN NumberOfPml5Entries;
553 UINTN SizeOfMemorySpace;
554 BOOLEAN Nx;
555 IA32_CR4 Cr4;
556 BOOLEAN Enable5LevelPaging;
557
558 Cr4.UintN = AsmReadCr4 ();
559 Enable5LevelPaging = (BOOLEAN) (Cr4.Bits.LA57 == 1);
560
561 if (sizeof (UINTN) == sizeof (UINT64)) {
562 if (!Enable5LevelPaging) {
563 Pml5Entry = (UINTN) mSmmProfileCr3 | IA32_PG_P;
564 Pml5 = &Pml5Entry;
565 } else {
566 Pml5 = (UINT64*) (UINTN) mSmmProfileCr3;
567 }
568 SizeOfMemorySpace = HighBitSet64 (gPhyMask) + 1;
569 //
570 // Calculate the table entries of PML4E and PDPTE.
571 //
572 NumberOfPml5Entries = 1;
573 if (SizeOfMemorySpace > 48) {
574 NumberOfPml5Entries = (UINTN) LShiftU64 (1, SizeOfMemorySpace - 48);
575 SizeOfMemorySpace = 48;
576 }
577
578 NumberOfPml4Entries = 1;
579 if (SizeOfMemorySpace > 39) {
580 NumberOfPml4Entries = (UINTN) LShiftU64 (1, SizeOfMemorySpace - 39);
581 SizeOfMemorySpace = 39;
582 }
583
584 NumberOfPdptEntries = 1;
585 ASSERT (SizeOfMemorySpace > 30);
586 NumberOfPdptEntries = (UINTN) LShiftU64 (1, SizeOfMemorySpace - 30);
587 } else {
588 Pml4Entry = (UINTN) mSmmProfileCr3 | IA32_PG_P;
589 Pml4 = &Pml4Entry;
590 Pml5Entry = (UINTN) Pml4 | IA32_PG_P;
591 Pml5 = &Pml5Entry;
592 NumberOfPml5Entries = 1;
593 NumberOfPml4Entries = 1;
594 NumberOfPdptEntries = 4;
595 }
596
597 //
598 // Go through page table and change 2MB-page into 4KB-page.
599 //
600 for (Pml5Index = 0; Pml5Index < NumberOfPml5Entries; Pml5Index++) {
601 if ((Pml5[Pml5Index] & IA32_PG_P) == 0) {
602 //
603 // If PML5 entry does not exist, skip it
604 //
605 continue;
606 }
607 Pml4 = (UINT64 *) (UINTN) (Pml5[Pml5Index] & PHYSICAL_ADDRESS_MASK);
608 for (Pml4Index = 0; Pml4Index < NumberOfPml4Entries; Pml4Index++) {
609 if ((Pml4[Pml4Index] & IA32_PG_P) == 0) {
610 //
611 // If PML4 entry does not exist, skip it
612 //
613 continue;
614 }
615 Pdpt = (UINT64 *)(UINTN)(Pml4[Pml4Index] & ~mAddressEncMask & PHYSICAL_ADDRESS_MASK);
616 for (PdptIndex = 0; PdptIndex < NumberOfPdptEntries; PdptIndex++, Pdpt++) {
617 if ((*Pdpt & IA32_PG_P) == 0) {
618 //
619 // If PDPT entry does not exist, skip it
620 //
621 continue;
622 }
623 if ((*Pdpt & IA32_PG_PS) != 0) {
624 //
625 // This is 1G entry, skip it
626 //
627 continue;
628 }
629 Pd = (UINT64 *)(UINTN)(*Pdpt & ~mAddressEncMask & PHYSICAL_ADDRESS_MASK);
630 if (Pd == 0) {
631 continue;
632 }
633 for (PdIndex = 0; PdIndex < SIZE_4KB / sizeof (*Pd); PdIndex++, Pd++) {
634 if ((*Pd & IA32_PG_P) == 0) {
635 //
636 // If PD entry does not exist, skip it
637 //
638 continue;
639 }
640 Address = (UINTN) LShiftU64 (
641 LShiftU64 (
642 LShiftU64 ((Pml5Index << 9) + Pml4Index, 9) + PdptIndex,
643 9
644 ) + PdIndex,
645 21
646 );
647
648 //
649 // If it is 2M page, check IsAddressSplit()
650 //
651 if (((*Pd & IA32_PG_PS) != 0) && IsAddressSplit (Address)) {
652 //
653 // Based on current page table, create 4KB page table for split area.
654 //
655 ASSERT (Address == (*Pd & PHYSICAL_ADDRESS_MASK));
656
657 Pt = AllocatePageTableMemory (1);
658 ASSERT (Pt != NULL);
659
660 // Split it
661 for (PtIndex = 0; PtIndex < SIZE_4KB / sizeof(*Pt); PtIndex++) {
662 Pt[PtIndex] = Address + ((PtIndex << 12) | mAddressEncMask | PAGE_ATTRIBUTE_BITS);
663 } // end for PT
664 *Pd = (UINT64)(UINTN)Pt | mAddressEncMask | PAGE_ATTRIBUTE_BITS;
665 } // end if IsAddressSplit
666 } // end for PD
667 } // end for PDPT
668 } // end for PML4
669 } // end for PML5
670
671 //
672 // Go through page table and set several page table entries to absent or execute-disable.
673 //
674 DEBUG ((EFI_D_INFO, "Patch page table start ...\n"));
675 for (Pml5Index = 0; Pml5Index < NumberOfPml5Entries; Pml5Index++) {
676 if ((Pml5[Pml5Index] & IA32_PG_P) == 0) {
677 //
678 // If PML5 entry does not exist, skip it
679 //
680 continue;
681 }
682 Pml4 = (UINT64 *) (UINTN) (Pml5[Pml5Index] & PHYSICAL_ADDRESS_MASK);
683 for (Pml4Index = 0; Pml4Index < NumberOfPml4Entries; Pml4Index++) {
684 if ((Pml4[Pml4Index] & IA32_PG_P) == 0) {
685 //
686 // If PML4 entry does not exist, skip it
687 //
688 continue;
689 }
690 Pdpt = (UINT64 *)(UINTN)(Pml4[Pml4Index] & ~mAddressEncMask & PHYSICAL_ADDRESS_MASK);
691 for (PdptIndex = 0; PdptIndex < NumberOfPdptEntries; PdptIndex++, Pdpt++) {
692 if ((*Pdpt & IA32_PG_P) == 0) {
693 //
694 // If PDPT entry does not exist, skip it
695 //
696 continue;
697 }
698 if ((*Pdpt & IA32_PG_PS) != 0) {
699 //
700 // This is 1G entry, set NX bit and skip it
701 //
702 if (mXdSupported) {
703 *Pdpt = *Pdpt | IA32_PG_NX;
704 }
705 continue;
706 }
707 Pd = (UINT64 *)(UINTN)(*Pdpt & ~mAddressEncMask & PHYSICAL_ADDRESS_MASK);
708 if (Pd == 0) {
709 continue;
710 }
711 for (PdIndex = 0; PdIndex < SIZE_4KB / sizeof (*Pd); PdIndex++, Pd++) {
712 if ((*Pd & IA32_PG_P) == 0) {
713 //
714 // If PD entry does not exist, skip it
715 //
716 continue;
717 }
718 Address = (UINTN) LShiftU64 (
719 LShiftU64 (
720 LShiftU64 ((Pml5Index << 9) + Pml4Index, 9) + PdptIndex,
721 9
722 ) + PdIndex,
723 21
724 );
725
726 if ((*Pd & IA32_PG_PS) != 0) {
727 // 2MB page
728
729 if (!IsAddressValid (Address, &Nx)) {
730 //
731 // Patch to remove Present flag and RW flag
732 //
733 *Pd = *Pd & (INTN)(INT32)(~PAGE_ATTRIBUTE_BITS);
734 }
735 if (Nx && mXdSupported) {
736 *Pd = *Pd | IA32_PG_NX;
737 }
738 } else {
739 // 4KB page
740 Pt = (UINT64 *)(UINTN)(*Pd & ~mAddressEncMask & PHYSICAL_ADDRESS_MASK);
741 if (Pt == 0) {
742 continue;
743 }
744 for (PtIndex = 0; PtIndex < SIZE_4KB / sizeof(*Pt); PtIndex++, Pt++) {
745 if (!IsAddressValid (Address, &Nx)) {
746 *Pt = *Pt & (INTN)(INT32)(~PAGE_ATTRIBUTE_BITS);
747 }
748 if (Nx && mXdSupported) {
749 *Pt = *Pt | IA32_PG_NX;
750 }
751 Address += SIZE_4KB;
752 } // end for PT
753 } // end if PS
754 } // end for PD
755 } // end for PDPT
756 } // end for PML4
757 } // end for PML5
758
759 //
760 // Flush TLB
761 //
762 CpuFlushTlb ();
763 DEBUG ((EFI_D_INFO, "Patch page table done!\n"));
764 //
765 // Set execute-disable flag
766 //
767 mXdEnabled = TRUE;
768
769 return ;
770 }
771
772 /**
773 To get system port address of the SMI Command Port in FADT table.
774
775 **/
776 VOID
777 GetSmiCommandPort (
778 VOID
779 )
780 {
781 EFI_ACPI_2_0_FIXED_ACPI_DESCRIPTION_TABLE *Fadt;
782
783 Fadt = (EFI_ACPI_2_0_FIXED_ACPI_DESCRIPTION_TABLE *) EfiLocateFirstAcpiTable (
784 EFI_ACPI_2_0_FIXED_ACPI_DESCRIPTION_TABLE_SIGNATURE
785 );
786 ASSERT (Fadt != NULL);
787
788 mSmiCommandPort = Fadt->SmiCmd;
789 DEBUG ((EFI_D_INFO, "mSmiCommandPort = %x\n", mSmiCommandPort));
790 }
791
792 /**
793 Updates page table to make some memory ranges (like system memory) absent
794 and make some memory ranges (like MMIO) present and execute disable. It also
795 update 2MB-page to 4KB-page for some memory ranges.
796
797 **/
798 VOID
799 SmmProfileStart (
800 VOID
801 )
802 {
803 //
804 // The flag indicates SMM profile starts to work.
805 //
806 mSmmProfileStart = TRUE;
807 }
808
809 /**
810 Initialize SMM profile in SmmReadyToLock protocol callback function.
811
812 @param Protocol Points to the protocol's unique identifier.
813 @param Interface Points to the interface instance.
814 @param Handle The handle on which the interface was installed.
815
816 @retval EFI_SUCCESS SmmReadyToLock protocol callback runs successfully.
817 **/
818 EFI_STATUS
819 EFIAPI
820 InitSmmProfileCallBack (
821 IN CONST EFI_GUID *Protocol,
822 IN VOID *Interface,
823 IN EFI_HANDLE Handle
824 )
825 {
826 //
827 // Save to variable so that SMM profile data can be found.
828 //
829 gRT->SetVariable (
830 SMM_PROFILE_NAME,
831 &gEfiCallerIdGuid,
832 EFI_VARIABLE_BOOTSERVICE_ACCESS | EFI_VARIABLE_RUNTIME_ACCESS,
833 sizeof(mSmmProfileBase),
834 &mSmmProfileBase
835 );
836
837 //
838 // Get Software SMI from FADT
839 //
840 GetSmiCommandPort ();
841
842 //
843 // Initialize protected memory range for patching page table later.
844 //
845 InitProtectedMemRange ();
846
847 return EFI_SUCCESS;
848 }
849
850 /**
851 Initialize SMM profile data structures.
852
853 **/
854 VOID
855 InitSmmProfileInternal (
856 VOID
857 )
858 {
859 EFI_STATUS Status;
860 EFI_PHYSICAL_ADDRESS Base;
861 VOID *Registration;
862 UINTN Index;
863 UINTN MsrDsAreaSizePerCpu;
864 UINTN TotalSize;
865
866 mPFEntryCount = (UINTN *)AllocateZeroPool (sizeof (UINTN) * mMaxNumberOfCpus);
867 ASSERT (mPFEntryCount != NULL);
868 mLastPFEntryValue = (UINT64 (*)[MAX_PF_ENTRY_COUNT])AllocateZeroPool (
869 sizeof (mLastPFEntryValue[0]) * mMaxNumberOfCpus);
870 ASSERT (mLastPFEntryValue != NULL);
871 mLastPFEntryPointer = (UINT64 *(*)[MAX_PF_ENTRY_COUNT])AllocateZeroPool (
872 sizeof (mLastPFEntryPointer[0]) * mMaxNumberOfCpus);
873 ASSERT (mLastPFEntryPointer != NULL);
874
875 //
876 // Allocate memory for SmmProfile below 4GB.
877 // The base address
878 //
879 mSmmProfileSize = PcdGet32 (PcdCpuSmmProfileSize);
880 ASSERT ((mSmmProfileSize & 0xFFF) == 0);
881
882 if (mBtsSupported) {
883 TotalSize = mSmmProfileSize + mMsrDsAreaSize;
884 } else {
885 TotalSize = mSmmProfileSize;
886 }
887
888 Base = 0xFFFFFFFF;
889 Status = gBS->AllocatePages (
890 AllocateMaxAddress,
891 EfiReservedMemoryType,
892 EFI_SIZE_TO_PAGES (TotalSize),
893 &Base
894 );
895 ASSERT_EFI_ERROR (Status);
896 ZeroMem ((VOID *)(UINTN)Base, TotalSize);
897 mSmmProfileBase = (SMM_PROFILE_HEADER *)(UINTN)Base;
898
899 //
900 // Initialize SMM profile data header.
901 //
902 mSmmProfileBase->HeaderSize = sizeof (SMM_PROFILE_HEADER);
903 mSmmProfileBase->MaxDataEntries = (UINT64)((mSmmProfileSize - sizeof(SMM_PROFILE_HEADER)) / sizeof (SMM_PROFILE_ENTRY));
904 mSmmProfileBase->MaxDataSize = MultU64x64 (mSmmProfileBase->MaxDataEntries, sizeof(SMM_PROFILE_ENTRY));
905 mSmmProfileBase->CurDataEntries = 0;
906 mSmmProfileBase->CurDataSize = 0;
907 mSmmProfileBase->TsegStart = mCpuHotPlugData.SmrrBase;
908 mSmmProfileBase->TsegSize = mCpuHotPlugData.SmrrSize;
909 mSmmProfileBase->NumSmis = 0;
910 mSmmProfileBase->NumCpus = gSmmCpuPrivate->SmmCoreEntryContext.NumberOfCpus;
911
912 if (mBtsSupported) {
913 mMsrDsArea = (MSR_DS_AREA_STRUCT **)AllocateZeroPool (sizeof (MSR_DS_AREA_STRUCT *) * mMaxNumberOfCpus);
914 ASSERT (mMsrDsArea != NULL);
915 mMsrBTSRecord = (BRANCH_TRACE_RECORD **)AllocateZeroPool (sizeof (BRANCH_TRACE_RECORD *) * mMaxNumberOfCpus);
916 ASSERT (mMsrBTSRecord != NULL);
917 mMsrPEBSRecord = (PEBS_RECORD **)AllocateZeroPool (sizeof (PEBS_RECORD *) * mMaxNumberOfCpus);
918 ASSERT (mMsrPEBSRecord != NULL);
919
920 mMsrDsAreaBase = (MSR_DS_AREA_STRUCT *)((UINTN)Base + mSmmProfileSize);
921 MsrDsAreaSizePerCpu = mMsrDsAreaSize / mMaxNumberOfCpus;
922 mBTSRecordNumber = (MsrDsAreaSizePerCpu - sizeof(PEBS_RECORD) * PEBS_RECORD_NUMBER - sizeof(MSR_DS_AREA_STRUCT)) / sizeof(BRANCH_TRACE_RECORD);
923 for (Index = 0; Index < mMaxNumberOfCpus; Index++) {
924 mMsrDsArea[Index] = (MSR_DS_AREA_STRUCT *)((UINTN)mMsrDsAreaBase + MsrDsAreaSizePerCpu * Index);
925 mMsrBTSRecord[Index] = (BRANCH_TRACE_RECORD *)((UINTN)mMsrDsArea[Index] + sizeof(MSR_DS_AREA_STRUCT));
926 mMsrPEBSRecord[Index] = (PEBS_RECORD *)((UINTN)mMsrDsArea[Index] + MsrDsAreaSizePerCpu - sizeof(PEBS_RECORD) * PEBS_RECORD_NUMBER);
927
928 mMsrDsArea[Index]->BTSBufferBase = (UINTN)mMsrBTSRecord[Index];
929 mMsrDsArea[Index]->BTSIndex = mMsrDsArea[Index]->BTSBufferBase;
930 mMsrDsArea[Index]->BTSAbsoluteMaximum = mMsrDsArea[Index]->BTSBufferBase + mBTSRecordNumber * sizeof(BRANCH_TRACE_RECORD) + 1;
931 mMsrDsArea[Index]->BTSInterruptThreshold = mMsrDsArea[Index]->BTSAbsoluteMaximum + 1;
932
933 mMsrDsArea[Index]->PEBSBufferBase = (UINTN)mMsrPEBSRecord[Index];
934 mMsrDsArea[Index]->PEBSIndex = mMsrDsArea[Index]->PEBSBufferBase;
935 mMsrDsArea[Index]->PEBSAbsoluteMaximum = mMsrDsArea[Index]->PEBSBufferBase + PEBS_RECORD_NUMBER * sizeof(PEBS_RECORD) + 1;
936 mMsrDsArea[Index]->PEBSInterruptThreshold = mMsrDsArea[Index]->PEBSAbsoluteMaximum + 1;
937 }
938 }
939
940 mProtectionMemRange = mProtectionMemRangeTemplate;
941 mProtectionMemRangeCount = sizeof (mProtectionMemRangeTemplate) / sizeof (MEMORY_PROTECTION_RANGE);
942
943 //
944 // Update TSeg entry.
945 //
946 mProtectionMemRange[0].Range.Base = mCpuHotPlugData.SmrrBase;
947 mProtectionMemRange[0].Range.Top = mCpuHotPlugData.SmrrBase + mCpuHotPlugData.SmrrSize;
948
949 //
950 // Update SMM profile entry.
951 //
952 mProtectionMemRange[1].Range.Base = (EFI_PHYSICAL_ADDRESS)(UINTN)mSmmProfileBase;
953 mProtectionMemRange[1].Range.Top = (EFI_PHYSICAL_ADDRESS)(UINTN)mSmmProfileBase + TotalSize;
954
955 //
956 // Allocate memory reserved for creating 4KB pages.
957 //
958 InitPagesForPFHandler ();
959
960 //
961 // Start SMM profile when SmmReadyToLock protocol is installed.
962 //
963 Status = gSmst->SmmRegisterProtocolNotify (
964 &gEfiSmmReadyToLockProtocolGuid,
965 InitSmmProfileCallBack,
966 &Registration
967 );
968 ASSERT_EFI_ERROR (Status);
969
970 return ;
971 }
972
973 /**
974 Check if feature is supported by a processor.
975
976 **/
977 VOID
978 CheckFeatureSupported (
979 VOID
980 )
981 {
982 UINT32 RegEax;
983 UINT32 RegEcx;
984 UINT32 RegEdx;
985 MSR_IA32_MISC_ENABLE_REGISTER MiscEnableMsr;
986
987 if ((PcdGet32 (PcdControlFlowEnforcementPropertyMask) != 0) && mCetSupported) {
988 AsmCpuid (CPUID_EXTENDED_FUNCTION, &RegEax, NULL, NULL, NULL);
989 if (RegEax <= CPUID_EXTENDED_FUNCTION) {
990 mCetSupported = FALSE;
991 PatchInstructionX86 (mPatchCetSupported, mCetSupported, 1);
992 }
993 AsmCpuidEx (CPUID_STRUCTURED_EXTENDED_FEATURE_FLAGS, CPUID_STRUCTURED_EXTENDED_FEATURE_FLAGS_SUB_LEAF_INFO, NULL, NULL, &RegEcx, NULL);
994 if ((RegEcx & CPUID_CET_SS) == 0) {
995 mCetSupported = FALSE;
996 PatchInstructionX86 (mPatchCetSupported, mCetSupported, 1);
997 }
998 }
999
1000 if (mXdSupported) {
1001 AsmCpuid (CPUID_EXTENDED_FUNCTION, &RegEax, NULL, NULL, NULL);
1002 if (RegEax <= CPUID_EXTENDED_FUNCTION) {
1003 //
1004 // Extended CPUID functions are not supported on this processor.
1005 //
1006 mXdSupported = FALSE;
1007 PatchInstructionX86 (gPatchXdSupported, mXdSupported, 1);
1008 }
1009
1010 AsmCpuid (CPUID_EXTENDED_CPU_SIG, NULL, NULL, NULL, &RegEdx);
1011 if ((RegEdx & CPUID1_EDX_XD_SUPPORT) == 0) {
1012 //
1013 // Execute Disable Bit feature is not supported on this processor.
1014 //
1015 mXdSupported = FALSE;
1016 PatchInstructionX86 (gPatchXdSupported, mXdSupported, 1);
1017 }
1018 }
1019
1020 if (mBtsSupported) {
1021 AsmCpuid (CPUID_VERSION_INFO, NULL, NULL, NULL, &RegEdx);
1022 if ((RegEdx & CPUID1_EDX_BTS_AVAILABLE) != 0) {
1023 //
1024 // Per IA32 manuals:
1025 // When CPUID.1:EDX[21] is set, the following BTS facilities are available:
1026 // 1. The BTS_UNAVAILABLE flag in the IA32_MISC_ENABLE MSR indicates the
1027 // availability of the BTS facilities, including the ability to set the BTS and
1028 // BTINT bits in the MSR_DEBUGCTLA MSR.
1029 // 2. The IA32_DS_AREA MSR can be programmed to point to the DS save area.
1030 //
1031 MiscEnableMsr.Uint64 = AsmReadMsr64 (MSR_IA32_MISC_ENABLE);
1032 if (MiscEnableMsr.Bits.BTS == 1) {
1033 //
1034 // BTS facilities is not supported if MSR_IA32_MISC_ENABLE.BTS bit is set.
1035 //
1036 mBtsSupported = FALSE;
1037 }
1038 }
1039 }
1040 }
1041
1042 /**
1043 Enable single step.
1044
1045 **/
1046 VOID
1047 ActivateSingleStepDB (
1048 VOID
1049 )
1050 {
1051 UINTN Dr6;
1052
1053 Dr6 = AsmReadDr6 ();
1054 if ((Dr6 & DR6_SINGLE_STEP) != 0) {
1055 return;
1056 }
1057 Dr6 |= DR6_SINGLE_STEP;
1058 AsmWriteDr6 (Dr6);
1059 }
1060
1061 /**
1062 Enable last branch.
1063
1064 **/
1065 VOID
1066 ActivateLBR (
1067 VOID
1068 )
1069 {
1070 UINT64 DebugCtl;
1071
1072 DebugCtl = AsmReadMsr64 (MSR_DEBUG_CTL);
1073 if ((DebugCtl & MSR_DEBUG_CTL_LBR) != 0) {
1074 return ;
1075 }
1076 DebugCtl |= MSR_DEBUG_CTL_LBR;
1077 AsmWriteMsr64 (MSR_DEBUG_CTL, DebugCtl);
1078 }
1079
1080 /**
1081 Enable branch trace store.
1082
1083 @param CpuIndex The index of the processor.
1084
1085 **/
1086 VOID
1087 ActivateBTS (
1088 IN UINTN CpuIndex
1089 )
1090 {
1091 UINT64 DebugCtl;
1092
1093 DebugCtl = AsmReadMsr64 (MSR_DEBUG_CTL);
1094 if ((DebugCtl & MSR_DEBUG_CTL_BTS) != 0) {
1095 return ;
1096 }
1097
1098 AsmWriteMsr64 (MSR_DS_AREA, (UINT64)(UINTN)mMsrDsArea[CpuIndex]);
1099 DebugCtl |= (UINT64)(MSR_DEBUG_CTL_BTS | MSR_DEBUG_CTL_TR);
1100 DebugCtl &= ~((UINT64)MSR_DEBUG_CTL_BTINT);
1101 AsmWriteMsr64 (MSR_DEBUG_CTL, DebugCtl);
1102 }
1103
1104 /**
1105 Increase SMI number in each SMI entry.
1106
1107 **/
1108 VOID
1109 SmmProfileRecordSmiNum (
1110 VOID
1111 )
1112 {
1113 if (mSmmProfileStart) {
1114 mSmmProfileBase->NumSmis++;
1115 }
1116 }
1117
1118 /**
1119 Initialize processor environment for SMM profile.
1120
1121 @param CpuIndex The index of the processor.
1122
1123 **/
1124 VOID
1125 ActivateSmmProfile (
1126 IN UINTN CpuIndex
1127 )
1128 {
1129 //
1130 // Enable Single Step DB#
1131 //
1132 ActivateSingleStepDB ();
1133
1134 if (mBtsSupported) {
1135 //
1136 // We can not get useful information from LER, so we have to use BTS.
1137 //
1138 ActivateLBR ();
1139
1140 //
1141 // Enable BTS
1142 //
1143 ActivateBTS (CpuIndex);
1144 }
1145 }
1146
1147 /**
1148 Initialize SMM profile in SMM CPU entry point.
1149
1150 @param[in] Cr3 The base address of the page tables to use in SMM.
1151
1152 **/
1153 VOID
1154 InitSmmProfile (
1155 UINT32 Cr3
1156 )
1157 {
1158 //
1159 // Save Cr3
1160 //
1161 mSmmProfileCr3 = Cr3;
1162
1163 //
1164 // Skip SMM profile initialization if feature is disabled
1165 //
1166 if (!FeaturePcdGet (PcdCpuSmmProfileEnable) &&
1167 !HEAP_GUARD_NONSTOP_MODE &&
1168 !NULL_DETECTION_NONSTOP_MODE) {
1169 return;
1170 }
1171
1172 //
1173 // Initialize SmmProfile here
1174 //
1175 InitSmmProfileInternal ();
1176
1177 //
1178 // Initialize profile IDT.
1179 //
1180 InitIdtr ();
1181
1182 //
1183 // Tell #PF handler to prepare a #DB subsequently.
1184 //
1185 mSetupDebugTrap = TRUE;
1186 }
1187
1188 /**
1189 Update page table to map the memory correctly in order to make the instruction
1190 which caused page fault execute successfully. And it also save the original page
1191 table to be restored in single-step exception.
1192
1193 @param PageTable PageTable Address.
1194 @param PFAddress The memory address which caused page fault exception.
1195 @param CpuIndex The index of the processor.
1196 @param ErrorCode The Error code of exception.
1197
1198 **/
1199 VOID
1200 RestorePageTableBelow4G (
1201 UINT64 *PageTable,
1202 UINT64 PFAddress,
1203 UINTN CpuIndex,
1204 UINTN ErrorCode
1205 )
1206 {
1207 UINTN PTIndex;
1208 UINTN PFIndex;
1209 IA32_CR4 Cr4;
1210 BOOLEAN Enable5LevelPaging;
1211
1212 Cr4.UintN = AsmReadCr4 ();
1213 Enable5LevelPaging = (BOOLEAN) (Cr4.Bits.LA57 == 1);
1214
1215 //
1216 // PML5
1217 //
1218 if (Enable5LevelPaging) {
1219 PTIndex = (UINTN)BitFieldRead64 (PFAddress, 48, 56);
1220 ASSERT (PageTable[PTIndex] != 0);
1221 PageTable = (UINT64*)(UINTN)(PageTable[PTIndex] & PHYSICAL_ADDRESS_MASK);
1222 }
1223
1224 //
1225 // PML4
1226 //
1227 if (sizeof(UINT64) == sizeof(UINTN)) {
1228 PTIndex = (UINTN)BitFieldRead64 (PFAddress, 39, 47);
1229 ASSERT (PageTable[PTIndex] != 0);
1230 PageTable = (UINT64*)(UINTN)(PageTable[PTIndex] & PHYSICAL_ADDRESS_MASK);
1231 }
1232
1233 //
1234 // PDPTE
1235 //
1236 PTIndex = (UINTN)BitFieldRead64 (PFAddress, 30, 38);
1237 ASSERT (PageTable[PTIndex] != 0);
1238 PageTable = (UINT64*)(UINTN)(PageTable[PTIndex] & PHYSICAL_ADDRESS_MASK);
1239
1240 //
1241 // PD
1242 //
1243 PTIndex = (UINTN)BitFieldRead64 (PFAddress, 21, 29);
1244 if ((PageTable[PTIndex] & IA32_PG_PS) != 0) {
1245 //
1246 // Large page
1247 //
1248
1249 //
1250 // Record old entries with non-present status
1251 // Old entries include the memory which instruction is at and the memory which instruction access.
1252 //
1253 //
1254 ASSERT (mPFEntryCount[CpuIndex] < MAX_PF_ENTRY_COUNT);
1255 if (mPFEntryCount[CpuIndex] < MAX_PF_ENTRY_COUNT) {
1256 PFIndex = mPFEntryCount[CpuIndex];
1257 mLastPFEntryValue[CpuIndex][PFIndex] = PageTable[PTIndex];
1258 mLastPFEntryPointer[CpuIndex][PFIndex] = &PageTable[PTIndex];
1259 mPFEntryCount[CpuIndex]++;
1260 }
1261
1262 //
1263 // Set new entry
1264 //
1265 PageTable[PTIndex] = (PFAddress & ~((1ull << 21) - 1));
1266 PageTable[PTIndex] |= (UINT64)IA32_PG_PS;
1267 PageTable[PTIndex] |= (UINT64)PAGE_ATTRIBUTE_BITS;
1268 if ((ErrorCode & IA32_PF_EC_ID) != 0) {
1269 PageTable[PTIndex] &= ~IA32_PG_NX;
1270 }
1271 } else {
1272 //
1273 // Small page
1274 //
1275 ASSERT (PageTable[PTIndex] != 0);
1276 PageTable = (UINT64*)(UINTN)(PageTable[PTIndex] & PHYSICAL_ADDRESS_MASK);
1277
1278 //
1279 // 4K PTE
1280 //
1281 PTIndex = (UINTN)BitFieldRead64 (PFAddress, 12, 20);
1282
1283 //
1284 // Record old entries with non-present status
1285 // Old entries include the memory which instruction is at and the memory which instruction access.
1286 //
1287 //
1288 ASSERT (mPFEntryCount[CpuIndex] < MAX_PF_ENTRY_COUNT);
1289 if (mPFEntryCount[CpuIndex] < MAX_PF_ENTRY_COUNT) {
1290 PFIndex = mPFEntryCount[CpuIndex];
1291 mLastPFEntryValue[CpuIndex][PFIndex] = PageTable[PTIndex];
1292 mLastPFEntryPointer[CpuIndex][PFIndex] = &PageTable[PTIndex];
1293 mPFEntryCount[CpuIndex]++;
1294 }
1295
1296 //
1297 // Set new entry
1298 //
1299 PageTable[PTIndex] = (PFAddress & ~((1ull << 12) - 1));
1300 PageTable[PTIndex] |= (UINT64)PAGE_ATTRIBUTE_BITS;
1301 if ((ErrorCode & IA32_PF_EC_ID) != 0) {
1302 PageTable[PTIndex] &= ~IA32_PG_NX;
1303 }
1304 }
1305 }
1306
1307 /**
1308 Handler for Page Fault triggered by Guard page.
1309
1310 @param ErrorCode The Error code of exception.
1311
1312 **/
1313 VOID
1314 GuardPagePFHandler (
1315 UINTN ErrorCode
1316 )
1317 {
1318 UINT64 *PageTable;
1319 UINT64 PFAddress;
1320 UINT64 RestoreAddress;
1321 UINTN RestorePageNumber;
1322 UINTN CpuIndex;
1323
1324 PageTable = (UINT64 *)AsmReadCr3 ();
1325 PFAddress = AsmReadCr2 ();
1326 CpuIndex = GetCpuIndex ();
1327
1328 //
1329 // Memory operation cross pages, like "rep mov" instruction, will cause
1330 // infinite loop between this and Debug Trap handler. We have to make sure
1331 // that current page and the page followed are both in PRESENT state.
1332 //
1333 RestorePageNumber = 2;
1334 RestoreAddress = PFAddress;
1335 while (RestorePageNumber > 0) {
1336 RestorePageTableBelow4G (PageTable, RestoreAddress, CpuIndex, ErrorCode);
1337 RestoreAddress += EFI_PAGE_SIZE;
1338 RestorePageNumber--;
1339 }
1340
1341 //
1342 // Flush TLB
1343 //
1344 CpuFlushTlb ();
1345 }
1346
1347 /**
1348 The Page fault handler to save SMM profile data.
1349
1350 @param Rip The RIP when exception happens.
1351 @param ErrorCode The Error code of exception.
1352
1353 **/
1354 VOID
1355 SmmProfilePFHandler (
1356 UINTN Rip,
1357 UINTN ErrorCode
1358 )
1359 {
1360 UINT64 *PageTable;
1361 UINT64 PFAddress;
1362 UINT64 RestoreAddress;
1363 UINTN RestorePageNumber;
1364 UINTN CpuIndex;
1365 UINTN Index;
1366 UINT64 InstructionAddress;
1367 UINTN MaxEntryNumber;
1368 UINTN CurrentEntryNumber;
1369 BOOLEAN IsValidPFAddress;
1370 SMM_PROFILE_ENTRY *SmmProfileEntry;
1371 UINT64 SmiCommand;
1372 EFI_STATUS Status;
1373 UINT8 SoftSmiValue;
1374 EFI_SMM_SAVE_STATE_IO_INFO IoInfo;
1375
1376 if (!mSmmProfileStart) {
1377 //
1378 // If SMM profile does not start, call original page fault handler.
1379 //
1380 SmiDefaultPFHandler ();
1381 return;
1382 }
1383
1384 if (mBtsSupported) {
1385 DisableBTS ();
1386 }
1387
1388 IsValidPFAddress = FALSE;
1389 PageTable = (UINT64 *)AsmReadCr3 ();
1390 PFAddress = AsmReadCr2 ();
1391 CpuIndex = GetCpuIndex ();
1392
1393 //
1394 // Memory operation cross pages, like "rep mov" instruction, will cause
1395 // infinite loop between this and Debug Trap handler. We have to make sure
1396 // that current page and the page followed are both in PRESENT state.
1397 //
1398 RestorePageNumber = 2;
1399 RestoreAddress = PFAddress;
1400 while (RestorePageNumber > 0) {
1401 if (RestoreAddress <= 0xFFFFFFFF) {
1402 RestorePageTableBelow4G (PageTable, RestoreAddress, CpuIndex, ErrorCode);
1403 } else {
1404 RestorePageTableAbove4G (PageTable, RestoreAddress, CpuIndex, ErrorCode, &IsValidPFAddress);
1405 }
1406 RestoreAddress += EFI_PAGE_SIZE;
1407 RestorePageNumber--;
1408 }
1409
1410 if (!IsValidPFAddress) {
1411 InstructionAddress = Rip;
1412 if ((ErrorCode & IA32_PF_EC_ID) != 0 && (mBtsSupported)) {
1413 //
1414 // If it is instruction fetch failure, get the correct IP from BTS.
1415 //
1416 InstructionAddress = GetSourceFromDestinationOnBts (CpuIndex, Rip);
1417 if (InstructionAddress == 0) {
1418 //
1419 // It indicates the instruction which caused page fault is not a jump instruction,
1420 // set instruction address same as the page fault address.
1421 //
1422 InstructionAddress = PFAddress;
1423 }
1424 }
1425
1426 //
1427 // Indicate it is not software SMI
1428 //
1429 SmiCommand = 0xFFFFFFFFFFFFFFFFULL;
1430 for (Index = 0; Index < gSmst->NumberOfCpus; Index++) {
1431 Status = SmmReadSaveState(&mSmmCpu, sizeof(IoInfo), EFI_SMM_SAVE_STATE_REGISTER_IO, Index, &IoInfo);
1432 if (EFI_ERROR (Status)) {
1433 continue;
1434 }
1435 if (IoInfo.IoPort == mSmiCommandPort) {
1436 //
1437 // A software SMI triggered by SMI command port has been found, get SmiCommand from SMI command port.
1438 //
1439 SoftSmiValue = IoRead8 (mSmiCommandPort);
1440 SmiCommand = (UINT64)SoftSmiValue;
1441 break;
1442 }
1443 }
1444
1445 SmmProfileEntry = (SMM_PROFILE_ENTRY *)(UINTN)(mSmmProfileBase + 1);
1446 //
1447 // Check if there is already a same entry in profile data.
1448 //
1449 for (Index = 0; Index < (UINTN) mSmmProfileBase->CurDataEntries; Index++) {
1450 if ((SmmProfileEntry[Index].ErrorCode == (UINT64)ErrorCode) &&
1451 (SmmProfileEntry[Index].Address == PFAddress) &&
1452 (SmmProfileEntry[Index].CpuNum == (UINT64)CpuIndex) &&
1453 (SmmProfileEntry[Index].Instruction == InstructionAddress) &&
1454 (SmmProfileEntry[Index].SmiCmd == SmiCommand)) {
1455 //
1456 // Same record exist, need not save again.
1457 //
1458 break;
1459 }
1460 }
1461 if (Index == mSmmProfileBase->CurDataEntries) {
1462 CurrentEntryNumber = (UINTN) mSmmProfileBase->CurDataEntries;
1463 MaxEntryNumber = (UINTN) mSmmProfileBase->MaxDataEntries;
1464 if (FeaturePcdGet (PcdCpuSmmProfileRingBuffer)) {
1465 CurrentEntryNumber = CurrentEntryNumber % MaxEntryNumber;
1466 }
1467 if (CurrentEntryNumber < MaxEntryNumber) {
1468 //
1469 // Log the new entry
1470 //
1471 SmmProfileEntry[CurrentEntryNumber].SmiNum = mSmmProfileBase->NumSmis;
1472 SmmProfileEntry[CurrentEntryNumber].ErrorCode = (UINT64)ErrorCode;
1473 SmmProfileEntry[CurrentEntryNumber].ApicId = (UINT64)GetApicId ();
1474 SmmProfileEntry[CurrentEntryNumber].CpuNum = (UINT64)CpuIndex;
1475 SmmProfileEntry[CurrentEntryNumber].Address = PFAddress;
1476 SmmProfileEntry[CurrentEntryNumber].Instruction = InstructionAddress;
1477 SmmProfileEntry[CurrentEntryNumber].SmiCmd = SmiCommand;
1478 //
1479 // Update current entry index and data size in the header.
1480 //
1481 mSmmProfileBase->CurDataEntries++;
1482 mSmmProfileBase->CurDataSize = MultU64x64 (mSmmProfileBase->CurDataEntries, sizeof (SMM_PROFILE_ENTRY));
1483 }
1484 }
1485 }
1486 //
1487 // Flush TLB
1488 //
1489 CpuFlushTlb ();
1490
1491 if (mBtsSupported) {
1492 EnableBTS ();
1493 }
1494 }
1495
1496 /**
1497 Replace INT1 exception handler to restore page table to absent/execute-disable state
1498 in order to trigger page fault again to save SMM profile data..
1499
1500 **/
1501 VOID
1502 InitIdtr (
1503 VOID
1504 )
1505 {
1506 EFI_STATUS Status;
1507
1508 Status = SmmRegisterExceptionHandler (&mSmmCpuService, EXCEPT_IA32_DEBUG, DebugExceptionHandler);
1509 ASSERT_EFI_ERROR (Status);
1510 }