]> git.proxmox.com Git - mirror_edk2.git/blob - UefiCpuPkg/PiSmmCpuDxeSmm/SmmProfile.c
UefiCpuPkg PiSmmCpuDxeSmm: Use new EfiLocateFirstAcpiTable()
[mirror_edk2.git] / UefiCpuPkg / PiSmmCpuDxeSmm / SmmProfile.c
1 /** @file
2 Enable SMM profile.
3
4 Copyright (c) 2012 - 2018, Intel Corporation. All rights reserved.<BR>
5 Copyright (c) 2017, AMD Incorporated. All rights reserved.<BR>
6
7 This program and the accompanying materials
8 are licensed and made available under the terms and conditions of the BSD License
9 which accompanies this distribution. The full text of the license may be found at
10 http://opensource.org/licenses/bsd-license.php
11
12 THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS,
13 WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED.
14
15 **/
16
17 #include "PiSmmCpuDxeSmm.h"
18 #include "SmmProfileInternal.h"
19
20 UINT32 mSmmProfileCr3;
21
22 SMM_PROFILE_HEADER *mSmmProfileBase;
23 MSR_DS_AREA_STRUCT *mMsrDsAreaBase;
24 //
25 // The buffer to store SMM profile data.
26 //
27 UINTN mSmmProfileSize;
28
29 //
30 // The buffer to enable branch trace store.
31 //
32 UINTN mMsrDsAreaSize = SMM_PROFILE_DTS_SIZE;
33
34 //
35 // The flag indicates if execute-disable is supported by processor.
36 //
37 BOOLEAN mXdSupported = TRUE;
38
39 //
40 // The flag indicates if execute-disable is enabled on processor.
41 //
42 BOOLEAN mXdEnabled = FALSE;
43
44 //
45 // The flag indicates if BTS is supported by processor.
46 //
47 BOOLEAN mBtsSupported = TRUE;
48
49 //
50 // The flag indicates if SMM profile starts to record data.
51 //
52 BOOLEAN mSmmProfileStart = FALSE;
53
54 //
55 // The flag indicates if #DB will be setup in #PF handler.
56 //
57 BOOLEAN mSetupDebugTrap = FALSE;
58
59 //
60 // Record the page fault exception count for one instruction execution.
61 //
62 UINTN *mPFEntryCount;
63
64 UINT64 (*mLastPFEntryValue)[MAX_PF_ENTRY_COUNT];
65 UINT64 *(*mLastPFEntryPointer)[MAX_PF_ENTRY_COUNT];
66
67 MSR_DS_AREA_STRUCT **mMsrDsArea;
68 BRANCH_TRACE_RECORD **mMsrBTSRecord;
69 UINTN mBTSRecordNumber;
70 PEBS_RECORD **mMsrPEBSRecord;
71
72 //
73 // These memory ranges are always present, they does not generate the access type of page fault exception,
74 // but they possibly generate instruction fetch type of page fault exception.
75 //
76 MEMORY_PROTECTION_RANGE *mProtectionMemRange = NULL;
77 UINTN mProtectionMemRangeCount = 0;
78
79 //
80 // Some predefined memory ranges.
81 //
82 MEMORY_PROTECTION_RANGE mProtectionMemRangeTemplate[] = {
83 //
84 // SMRAM range (to be fixed in runtime).
85 // It is always present and instruction fetches are allowed.
86 //
87 {{0x00000000, 0x00000000},TRUE,FALSE},
88
89 //
90 // SMM profile data range( to be fixed in runtime).
91 // It is always present and instruction fetches are not allowed.
92 //
93 {{0x00000000, 0x00000000},TRUE,TRUE},
94
95 //
96 // SMRAM ranges not covered by mCpuHotPlugData.SmrrBase/mCpuHotPlugData.SmrrSiz (to be fixed in runtime).
97 // It is always present and instruction fetches are allowed.
98 // {{0x00000000, 0x00000000},TRUE,FALSE},
99 //
100
101 //
102 // Future extended range could be added here.
103 //
104
105 //
106 // PCI MMIO ranges (to be added in runtime).
107 // They are always present and instruction fetches are not allowed.
108 //
109 };
110
111 //
112 // These memory ranges are mapped by 4KB-page instead of 2MB-page.
113 //
114 MEMORY_RANGE *mSplitMemRange = NULL;
115 UINTN mSplitMemRangeCount = 0;
116
117 //
118 // SMI command port.
119 //
120 UINT32 mSmiCommandPort;
121
122 /**
123 Disable branch trace store.
124
125 **/
126 VOID
127 DisableBTS (
128 VOID
129 )
130 {
131 AsmMsrAnd64 (MSR_DEBUG_CTL, ~((UINT64)(MSR_DEBUG_CTL_BTS | MSR_DEBUG_CTL_TR)));
132 }
133
134 /**
135 Enable branch trace store.
136
137 **/
138 VOID
139 EnableBTS (
140 VOID
141 )
142 {
143 AsmMsrOr64 (MSR_DEBUG_CTL, (MSR_DEBUG_CTL_BTS | MSR_DEBUG_CTL_TR));
144 }
145
146 /**
147 Get CPU Index from APIC ID.
148
149 **/
150 UINTN
151 GetCpuIndex (
152 VOID
153 )
154 {
155 UINTN Index;
156 UINT32 ApicId;
157
158 ApicId = GetApicId ();
159
160 for (Index = 0; Index < mMaxNumberOfCpus; Index++) {
161 if (gSmmCpuPrivate->ProcessorInfo[Index].ProcessorId == ApicId) {
162 return Index;
163 }
164 }
165 ASSERT (FALSE);
166 return 0;
167 }
168
169 /**
170 Get the source of IP after execute-disable exception is triggered.
171
172 @param CpuIndex The index of CPU.
173 @param DestinationIP The destination address.
174
175 **/
176 UINT64
177 GetSourceFromDestinationOnBts (
178 UINTN CpuIndex,
179 UINT64 DestinationIP
180 )
181 {
182 BRANCH_TRACE_RECORD *CurrentBTSRecord;
183 UINTN Index;
184 BOOLEAN FirstMatch;
185
186 FirstMatch = FALSE;
187
188 CurrentBTSRecord = (BRANCH_TRACE_RECORD *)mMsrDsArea[CpuIndex]->BTSIndex;
189 for (Index = 0; Index < mBTSRecordNumber; Index++) {
190 if ((UINTN)CurrentBTSRecord < (UINTN)mMsrBTSRecord[CpuIndex]) {
191 //
192 // Underflow
193 //
194 CurrentBTSRecord = (BRANCH_TRACE_RECORD *)((UINTN)mMsrDsArea[CpuIndex]->BTSAbsoluteMaximum - 1);
195 CurrentBTSRecord --;
196 }
197 if (CurrentBTSRecord->LastBranchTo == DestinationIP) {
198 //
199 // Good! find 1st one, then find 2nd one.
200 //
201 if (!FirstMatch) {
202 //
203 // The first one is DEBUG exception
204 //
205 FirstMatch = TRUE;
206 } else {
207 //
208 // Good find proper one.
209 //
210 return CurrentBTSRecord->LastBranchFrom;
211 }
212 }
213 CurrentBTSRecord--;
214 }
215
216 return 0;
217 }
218
219 /**
220 SMM profile specific INT 1 (single-step) exception handler.
221
222 @param InterruptType Defines the type of interrupt or exception that
223 occurred on the processor.This parameter is processor architecture specific.
224 @param SystemContext A pointer to the processor context when
225 the interrupt occurred on the processor.
226 **/
227 VOID
228 EFIAPI
229 DebugExceptionHandler (
230 IN EFI_EXCEPTION_TYPE InterruptType,
231 IN EFI_SYSTEM_CONTEXT SystemContext
232 )
233 {
234 UINTN CpuIndex;
235 UINTN PFEntry;
236
237 if (!mSmmProfileStart &&
238 !HEAP_GUARD_NONSTOP_MODE &&
239 !NULL_DETECTION_NONSTOP_MODE) {
240 return;
241 }
242 CpuIndex = GetCpuIndex ();
243
244 //
245 // Clear last PF entries
246 //
247 for (PFEntry = 0; PFEntry < mPFEntryCount[CpuIndex]; PFEntry++) {
248 *mLastPFEntryPointer[CpuIndex][PFEntry] = mLastPFEntryValue[CpuIndex][PFEntry];
249 }
250
251 //
252 // Reset page fault exception count for next page fault.
253 //
254 mPFEntryCount[CpuIndex] = 0;
255
256 //
257 // Flush TLB
258 //
259 CpuFlushTlb ();
260
261 //
262 // Clear TF in EFLAGS
263 //
264 ClearTrapFlag (SystemContext);
265 }
266
267 /**
268 Check if the input address is in SMM ranges.
269
270 @param[in] Address The input address.
271
272 @retval TRUE The input address is in SMM.
273 @retval FALSE The input address is not in SMM.
274 **/
275 BOOLEAN
276 IsInSmmRanges (
277 IN EFI_PHYSICAL_ADDRESS Address
278 )
279 {
280 UINTN Index;
281
282 if ((Address >= mCpuHotPlugData.SmrrBase) && (Address < mCpuHotPlugData.SmrrBase + mCpuHotPlugData.SmrrSize)) {
283 return TRUE;
284 }
285 for (Index = 0; Index < mSmmCpuSmramRangeCount; Index++) {
286 if (Address >= mSmmCpuSmramRanges[Index].CpuStart &&
287 Address < mSmmCpuSmramRanges[Index].CpuStart + mSmmCpuSmramRanges[Index].PhysicalSize) {
288 return TRUE;
289 }
290 }
291 return FALSE;
292 }
293
294 /**
295 Check if the memory address will be mapped by 4KB-page.
296
297 @param Address The address of Memory.
298 @param Nx The flag indicates if the memory is execute-disable.
299
300 **/
301 BOOLEAN
302 IsAddressValid (
303 IN EFI_PHYSICAL_ADDRESS Address,
304 IN BOOLEAN *Nx
305 )
306 {
307 UINTN Index;
308
309 if (FeaturePcdGet (PcdCpuSmmProfileEnable)) {
310 //
311 // Check configuration
312 //
313 for (Index = 0; Index < mProtectionMemRangeCount; Index++) {
314 if ((Address >= mProtectionMemRange[Index].Range.Base) && (Address < mProtectionMemRange[Index].Range.Top)) {
315 *Nx = mProtectionMemRange[Index].Nx;
316 return mProtectionMemRange[Index].Present;
317 }
318 }
319 *Nx = TRUE;
320 return FALSE;
321
322 } else {
323 *Nx = TRUE;
324 if (IsInSmmRanges (Address)) {
325 *Nx = FALSE;
326 }
327 return TRUE;
328 }
329 }
330
331 /**
332 Check if the memory address will be mapped by 4KB-page.
333
334 @param Address The address of Memory.
335
336 **/
337 BOOLEAN
338 IsAddressSplit (
339 IN EFI_PHYSICAL_ADDRESS Address
340 )
341 {
342 UINTN Index;
343
344 if (FeaturePcdGet (PcdCpuSmmProfileEnable)) {
345 //
346 // Check configuration
347 //
348 for (Index = 0; Index < mSplitMemRangeCount; Index++) {
349 if ((Address >= mSplitMemRange[Index].Base) && (Address < mSplitMemRange[Index].Top)) {
350 return TRUE;
351 }
352 }
353 } else {
354 if (Address < mCpuHotPlugData.SmrrBase) {
355 if ((mCpuHotPlugData.SmrrBase - Address) < BASE_2MB) {
356 return TRUE;
357 }
358 } else if (Address > (mCpuHotPlugData.SmrrBase + mCpuHotPlugData.SmrrSize - BASE_2MB)) {
359 if ((Address - (mCpuHotPlugData.SmrrBase + mCpuHotPlugData.SmrrSize - BASE_2MB)) < BASE_2MB) {
360 return TRUE;
361 }
362 }
363 }
364 //
365 // Return default
366 //
367 return FALSE;
368 }
369
370 /**
371 Initialize the protected memory ranges and the 4KB-page mapped memory ranges.
372
373 **/
374 VOID
375 InitProtectedMemRange (
376 VOID
377 )
378 {
379 UINTN Index;
380 UINTN NumberOfDescriptors;
381 UINTN NumberOfAddedDescriptors;
382 UINTN NumberOfProtectRange;
383 UINTN NumberOfSpliteRange;
384 EFI_GCD_MEMORY_SPACE_DESCRIPTOR *MemorySpaceMap;
385 UINTN TotalSize;
386 EFI_PHYSICAL_ADDRESS ProtectBaseAddress;
387 EFI_PHYSICAL_ADDRESS ProtectEndAddress;
388 EFI_PHYSICAL_ADDRESS Top2MBAlignedAddress;
389 EFI_PHYSICAL_ADDRESS Base2MBAlignedAddress;
390 UINT64 High4KBPageSize;
391 UINT64 Low4KBPageSize;
392
393 NumberOfDescriptors = 0;
394 NumberOfAddedDescriptors = mSmmCpuSmramRangeCount;
395 NumberOfSpliteRange = 0;
396 MemorySpaceMap = NULL;
397
398 //
399 // Get MMIO ranges from GCD and add them into protected memory ranges.
400 //
401 gDS->GetMemorySpaceMap (
402 &NumberOfDescriptors,
403 &MemorySpaceMap
404 );
405 for (Index = 0; Index < NumberOfDescriptors; Index++) {
406 if (MemorySpaceMap[Index].GcdMemoryType == EfiGcdMemoryTypeMemoryMappedIo) {
407 NumberOfAddedDescriptors++;
408 }
409 }
410
411 if (NumberOfAddedDescriptors != 0) {
412 TotalSize = NumberOfAddedDescriptors * sizeof (MEMORY_PROTECTION_RANGE) + sizeof (mProtectionMemRangeTemplate);
413 mProtectionMemRange = (MEMORY_PROTECTION_RANGE *) AllocateZeroPool (TotalSize);
414 ASSERT (mProtectionMemRange != NULL);
415 mProtectionMemRangeCount = TotalSize / sizeof (MEMORY_PROTECTION_RANGE);
416
417 //
418 // Copy existing ranges.
419 //
420 CopyMem (mProtectionMemRange, mProtectionMemRangeTemplate, sizeof (mProtectionMemRangeTemplate));
421
422 //
423 // Create split ranges which come from protected ranges.
424 //
425 TotalSize = (TotalSize / sizeof (MEMORY_PROTECTION_RANGE)) * sizeof (MEMORY_RANGE);
426 mSplitMemRange = (MEMORY_RANGE *) AllocateZeroPool (TotalSize);
427 ASSERT (mSplitMemRange != NULL);
428
429 //
430 // Create SMM ranges which are set to present and execution-enable.
431 //
432 NumberOfProtectRange = sizeof (mProtectionMemRangeTemplate) / sizeof (MEMORY_PROTECTION_RANGE);
433 for (Index = 0; Index < mSmmCpuSmramRangeCount; Index++) {
434 if (mSmmCpuSmramRanges[Index].CpuStart >= mProtectionMemRange[0].Range.Base &&
435 mSmmCpuSmramRanges[Index].CpuStart + mSmmCpuSmramRanges[Index].PhysicalSize < mProtectionMemRange[0].Range.Top) {
436 //
437 // If the address have been already covered by mCpuHotPlugData.SmrrBase/mCpuHotPlugData.SmrrSiz
438 //
439 break;
440 }
441 mProtectionMemRange[NumberOfProtectRange].Range.Base = mSmmCpuSmramRanges[Index].CpuStart;
442 mProtectionMemRange[NumberOfProtectRange].Range.Top = mSmmCpuSmramRanges[Index].CpuStart + mSmmCpuSmramRanges[Index].PhysicalSize;
443 mProtectionMemRange[NumberOfProtectRange].Present = TRUE;
444 mProtectionMemRange[NumberOfProtectRange].Nx = FALSE;
445 NumberOfProtectRange++;
446 }
447
448 //
449 // Create MMIO ranges which are set to present and execution-disable.
450 //
451 for (Index = 0; Index < NumberOfDescriptors; Index++) {
452 if (MemorySpaceMap[Index].GcdMemoryType != EfiGcdMemoryTypeMemoryMappedIo) {
453 continue;
454 }
455 mProtectionMemRange[NumberOfProtectRange].Range.Base = MemorySpaceMap[Index].BaseAddress;
456 mProtectionMemRange[NumberOfProtectRange].Range.Top = MemorySpaceMap[Index].BaseAddress + MemorySpaceMap[Index].Length;
457 mProtectionMemRange[NumberOfProtectRange].Present = TRUE;
458 mProtectionMemRange[NumberOfProtectRange].Nx = TRUE;
459 NumberOfProtectRange++;
460 }
461
462 //
463 // Check and updated actual protected memory ranges count
464 //
465 ASSERT (NumberOfProtectRange <= mProtectionMemRangeCount);
466 mProtectionMemRangeCount = NumberOfProtectRange;
467 }
468
469 //
470 // According to protected ranges, create the ranges which will be mapped by 2KB page.
471 //
472 NumberOfSpliteRange = 0;
473 NumberOfProtectRange = mProtectionMemRangeCount;
474 for (Index = 0; Index < NumberOfProtectRange; Index++) {
475 //
476 // If MMIO base address is not 2MB alignment, make 2MB alignment for create 4KB page in page table.
477 //
478 ProtectBaseAddress = mProtectionMemRange[Index].Range.Base;
479 ProtectEndAddress = mProtectionMemRange[Index].Range.Top;
480 if (((ProtectBaseAddress & (SIZE_2MB - 1)) != 0) || ((ProtectEndAddress & (SIZE_2MB - 1)) != 0)) {
481 //
482 // Check if it is possible to create 4KB-page for not 2MB-aligned range and to create 2MB-page for 2MB-aligned range.
483 // A mix of 4KB and 2MB page could save SMRAM space.
484 //
485 Top2MBAlignedAddress = ProtectEndAddress & ~(SIZE_2MB - 1);
486 Base2MBAlignedAddress = (ProtectBaseAddress + SIZE_2MB - 1) & ~(SIZE_2MB - 1);
487 if ((Top2MBAlignedAddress > Base2MBAlignedAddress) &&
488 ((Top2MBAlignedAddress - Base2MBAlignedAddress) >= SIZE_2MB)) {
489 //
490 // There is an range which could be mapped by 2MB-page.
491 //
492 High4KBPageSize = ((ProtectEndAddress + SIZE_2MB - 1) & ~(SIZE_2MB - 1)) - (ProtectEndAddress & ~(SIZE_2MB - 1));
493 Low4KBPageSize = ((ProtectBaseAddress + SIZE_2MB - 1) & ~(SIZE_2MB - 1)) - (ProtectBaseAddress & ~(SIZE_2MB - 1));
494 if (High4KBPageSize != 0) {
495 //
496 // Add not 2MB-aligned range to be mapped by 4KB-page.
497 //
498 mSplitMemRange[NumberOfSpliteRange].Base = ProtectEndAddress & ~(SIZE_2MB - 1);
499 mSplitMemRange[NumberOfSpliteRange].Top = (ProtectEndAddress + SIZE_2MB - 1) & ~(SIZE_2MB - 1);
500 NumberOfSpliteRange++;
501 }
502 if (Low4KBPageSize != 0) {
503 //
504 // Add not 2MB-aligned range to be mapped by 4KB-page.
505 //
506 mSplitMemRange[NumberOfSpliteRange].Base = ProtectBaseAddress & ~(SIZE_2MB - 1);
507 mSplitMemRange[NumberOfSpliteRange].Top = (ProtectBaseAddress + SIZE_2MB - 1) & ~(SIZE_2MB - 1);
508 NumberOfSpliteRange++;
509 }
510 } else {
511 //
512 // The range could only be mapped by 4KB-page.
513 //
514 mSplitMemRange[NumberOfSpliteRange].Base = ProtectBaseAddress & ~(SIZE_2MB - 1);
515 mSplitMemRange[NumberOfSpliteRange].Top = (ProtectEndAddress + SIZE_2MB - 1) & ~(SIZE_2MB - 1);
516 NumberOfSpliteRange++;
517 }
518 }
519 }
520
521 mSplitMemRangeCount = NumberOfSpliteRange;
522
523 DEBUG ((EFI_D_INFO, "SMM Profile Memory Ranges:\n"));
524 for (Index = 0; Index < mProtectionMemRangeCount; Index++) {
525 DEBUG ((EFI_D_INFO, "mProtectionMemRange[%d].Base = %lx\n", Index, mProtectionMemRange[Index].Range.Base));
526 DEBUG ((EFI_D_INFO, "mProtectionMemRange[%d].Top = %lx\n", Index, mProtectionMemRange[Index].Range.Top));
527 }
528 for (Index = 0; Index < mSplitMemRangeCount; Index++) {
529 DEBUG ((EFI_D_INFO, "mSplitMemRange[%d].Base = %lx\n", Index, mSplitMemRange[Index].Base));
530 DEBUG ((EFI_D_INFO, "mSplitMemRange[%d].Top = %lx\n", Index, mSplitMemRange[Index].Top));
531 }
532 }
533
534 /**
535 Update page table according to protected memory ranges and the 4KB-page mapped memory ranges.
536
537 **/
538 VOID
539 InitPaging (
540 VOID
541 )
542 {
543 UINT64 *Pml4;
544 UINT64 *Pde;
545 UINT64 *Pte;
546 UINT64 *Pt;
547 UINTN Address;
548 UINTN Level1;
549 UINTN Level2;
550 UINTN Level3;
551 UINTN Level4;
552 UINTN NumberOfPdpEntries;
553 UINTN NumberOfPml4Entries;
554 UINTN SizeOfMemorySpace;
555 BOOLEAN Nx;
556
557 if (sizeof (UINTN) == sizeof (UINT64)) {
558 Pml4 = (UINT64*)(UINTN)mSmmProfileCr3;
559 SizeOfMemorySpace = HighBitSet64 (gPhyMask) + 1;
560 //
561 // Calculate the table entries of PML4E and PDPTE.
562 //
563 if (SizeOfMemorySpace <= 39 ) {
564 NumberOfPml4Entries = 1;
565 NumberOfPdpEntries = (UINT32)LShiftU64 (1, (SizeOfMemorySpace - 30));
566 } else {
567 NumberOfPml4Entries = (UINT32)LShiftU64 (1, (SizeOfMemorySpace - 39));
568 NumberOfPdpEntries = 512;
569 }
570 } else {
571 NumberOfPml4Entries = 1;
572 NumberOfPdpEntries = 4;
573 }
574
575 //
576 // Go through page table and change 2MB-page into 4KB-page.
577 //
578 for (Level1 = 0; Level1 < NumberOfPml4Entries; Level1++) {
579 if (sizeof (UINTN) == sizeof (UINT64)) {
580 if ((Pml4[Level1] & IA32_PG_P) == 0) {
581 //
582 // If Pml4 entry does not exist, skip it
583 //
584 continue;
585 }
586 Pde = (UINT64 *)(UINTN)(Pml4[Level1] & ~mAddressEncMask & PHYSICAL_ADDRESS_MASK);
587 } else {
588 Pde = (UINT64*)(UINTN)mSmmProfileCr3;
589 }
590 for (Level2 = 0; Level2 < NumberOfPdpEntries; Level2++, Pde++) {
591 if ((*Pde & IA32_PG_P) == 0) {
592 //
593 // If PDE entry does not exist, skip it
594 //
595 continue;
596 }
597 if ((*Pde & IA32_PG_PS) != 0) {
598 //
599 // This is 1G entry, skip it
600 //
601 continue;
602 }
603 Pte = (UINT64 *)(UINTN)(*Pde & ~mAddressEncMask & PHYSICAL_ADDRESS_MASK);
604 if (Pte == 0) {
605 continue;
606 }
607 for (Level3 = 0; Level3 < SIZE_4KB / sizeof (*Pte); Level3++, Pte++) {
608 if ((*Pte & IA32_PG_P) == 0) {
609 //
610 // If PTE entry does not exist, skip it
611 //
612 continue;
613 }
614 Address = (((Level2 << 9) + Level3) << 21);
615
616 //
617 // If it is 2M page, check IsAddressSplit()
618 //
619 if (((*Pte & IA32_PG_PS) != 0) && IsAddressSplit (Address)) {
620 //
621 // Based on current page table, create 4KB page table for split area.
622 //
623 ASSERT (Address == (*Pte & PHYSICAL_ADDRESS_MASK));
624
625 Pt = AllocatePageTableMemory (1);
626 ASSERT (Pt != NULL);
627
628 // Split it
629 for (Level4 = 0; Level4 < SIZE_4KB / sizeof(*Pt); Level4++) {
630 Pt[Level4] = Address + ((Level4 << 12) | mAddressEncMask | PAGE_ATTRIBUTE_BITS);
631 } // end for PT
632 *Pte = (UINT64)(UINTN)Pt | mAddressEncMask | PAGE_ATTRIBUTE_BITS;
633 } // end if IsAddressSplit
634 } // end for PTE
635 } // end for PDE
636 }
637
638 //
639 // Go through page table and set several page table entries to absent or execute-disable.
640 //
641 DEBUG ((EFI_D_INFO, "Patch page table start ...\n"));
642 for (Level1 = 0; Level1 < NumberOfPml4Entries; Level1++) {
643 if (sizeof (UINTN) == sizeof (UINT64)) {
644 if ((Pml4[Level1] & IA32_PG_P) == 0) {
645 //
646 // If Pml4 entry does not exist, skip it
647 //
648 continue;
649 }
650 Pde = (UINT64 *)(UINTN)(Pml4[Level1] & ~mAddressEncMask & PHYSICAL_ADDRESS_MASK);
651 } else {
652 Pde = (UINT64*)(UINTN)mSmmProfileCr3;
653 }
654 for (Level2 = 0; Level2 < NumberOfPdpEntries; Level2++, Pde++) {
655 if ((*Pde & IA32_PG_P) == 0) {
656 //
657 // If PDE entry does not exist, skip it
658 //
659 continue;
660 }
661 if ((*Pde & IA32_PG_PS) != 0) {
662 //
663 // This is 1G entry, set NX bit and skip it
664 //
665 if (mXdSupported) {
666 *Pde = *Pde | IA32_PG_NX;
667 }
668 continue;
669 }
670 Pte = (UINT64 *)(UINTN)(*Pde & ~mAddressEncMask & PHYSICAL_ADDRESS_MASK);
671 if (Pte == 0) {
672 continue;
673 }
674 for (Level3 = 0; Level3 < SIZE_4KB / sizeof (*Pte); Level3++, Pte++) {
675 if ((*Pte & IA32_PG_P) == 0) {
676 //
677 // If PTE entry does not exist, skip it
678 //
679 continue;
680 }
681 Address = (((Level2 << 9) + Level3) << 21);
682
683 if ((*Pte & IA32_PG_PS) != 0) {
684 // 2MB page
685
686 if (!IsAddressValid (Address, &Nx)) {
687 //
688 // Patch to remove Present flag and RW flag
689 //
690 *Pte = *Pte & (INTN)(INT32)(~PAGE_ATTRIBUTE_BITS);
691 }
692 if (Nx && mXdSupported) {
693 *Pte = *Pte | IA32_PG_NX;
694 }
695 } else {
696 // 4KB page
697 Pt = (UINT64 *)(UINTN)(*Pte & ~mAddressEncMask & PHYSICAL_ADDRESS_MASK);
698 if (Pt == 0) {
699 continue;
700 }
701 for (Level4 = 0; Level4 < SIZE_4KB / sizeof(*Pt); Level4++, Pt++) {
702 if (!IsAddressValid (Address, &Nx)) {
703 *Pt = *Pt & (INTN)(INT32)(~PAGE_ATTRIBUTE_BITS);
704 }
705 if (Nx && mXdSupported) {
706 *Pt = *Pt | IA32_PG_NX;
707 }
708 Address += SIZE_4KB;
709 } // end for PT
710 } // end if PS
711 } // end for PTE
712 } // end for PDE
713 }
714
715 //
716 // Flush TLB
717 //
718 CpuFlushTlb ();
719 DEBUG ((EFI_D_INFO, "Patch page table done!\n"));
720 //
721 // Set execute-disable flag
722 //
723 mXdEnabled = TRUE;
724
725 return ;
726 }
727
728 /**
729 To get system port address of the SMI Command Port in FADT table.
730
731 **/
732 VOID
733 GetSmiCommandPort (
734 VOID
735 )
736 {
737 EFI_ACPI_2_0_FIXED_ACPI_DESCRIPTION_TABLE *Fadt;
738
739 Fadt = (EFI_ACPI_2_0_FIXED_ACPI_DESCRIPTION_TABLE *) EfiLocateFirstAcpiTable (
740 EFI_ACPI_2_0_FIXED_ACPI_DESCRIPTION_TABLE_SIGNATURE
741 );
742 ASSERT (Fadt != NULL);
743
744 mSmiCommandPort = Fadt->SmiCmd;
745 DEBUG ((EFI_D_INFO, "mSmiCommandPort = %x\n", mSmiCommandPort));
746 }
747
748 /**
749 Updates page table to make some memory ranges (like system memory) absent
750 and make some memory ranges (like MMIO) present and execute disable. It also
751 update 2MB-page to 4KB-page for some memory ranges.
752
753 **/
754 VOID
755 SmmProfileStart (
756 VOID
757 )
758 {
759 //
760 // The flag indicates SMM profile starts to work.
761 //
762 mSmmProfileStart = TRUE;
763 }
764
765 /**
766 Initialize SMM profile in SmmReadyToLock protocol callback function.
767
768 @param Protocol Points to the protocol's unique identifier.
769 @param Interface Points to the interface instance.
770 @param Handle The handle on which the interface was installed.
771
772 @retval EFI_SUCCESS SmmReadyToLock protocol callback runs successfully.
773 **/
774 EFI_STATUS
775 EFIAPI
776 InitSmmProfileCallBack (
777 IN CONST EFI_GUID *Protocol,
778 IN VOID *Interface,
779 IN EFI_HANDLE Handle
780 )
781 {
782 //
783 // Save to variable so that SMM profile data can be found.
784 //
785 gRT->SetVariable (
786 SMM_PROFILE_NAME,
787 &gEfiCallerIdGuid,
788 EFI_VARIABLE_BOOTSERVICE_ACCESS | EFI_VARIABLE_RUNTIME_ACCESS,
789 sizeof(mSmmProfileBase),
790 &mSmmProfileBase
791 );
792
793 //
794 // Get Software SMI from FADT
795 //
796 GetSmiCommandPort ();
797
798 //
799 // Initialize protected memory range for patching page table later.
800 //
801 InitProtectedMemRange ();
802
803 return EFI_SUCCESS;
804 }
805
806 /**
807 Initialize SMM profile data structures.
808
809 **/
810 VOID
811 InitSmmProfileInternal (
812 VOID
813 )
814 {
815 EFI_STATUS Status;
816 EFI_PHYSICAL_ADDRESS Base;
817 VOID *Registration;
818 UINTN Index;
819 UINTN MsrDsAreaSizePerCpu;
820 UINTN TotalSize;
821
822 mPFEntryCount = (UINTN *)AllocateZeroPool (sizeof (UINTN) * mMaxNumberOfCpus);
823 ASSERT (mPFEntryCount != NULL);
824 mLastPFEntryValue = (UINT64 (*)[MAX_PF_ENTRY_COUNT])AllocateZeroPool (
825 sizeof (mLastPFEntryValue[0]) * mMaxNumberOfCpus);
826 ASSERT (mLastPFEntryValue != NULL);
827 mLastPFEntryPointer = (UINT64 *(*)[MAX_PF_ENTRY_COUNT])AllocateZeroPool (
828 sizeof (mLastPFEntryPointer[0]) * mMaxNumberOfCpus);
829 ASSERT (mLastPFEntryPointer != NULL);
830
831 //
832 // Allocate memory for SmmProfile below 4GB.
833 // The base address
834 //
835 mSmmProfileSize = PcdGet32 (PcdCpuSmmProfileSize);
836 ASSERT ((mSmmProfileSize & 0xFFF) == 0);
837
838 if (mBtsSupported) {
839 TotalSize = mSmmProfileSize + mMsrDsAreaSize;
840 } else {
841 TotalSize = mSmmProfileSize;
842 }
843
844 Base = 0xFFFFFFFF;
845 Status = gBS->AllocatePages (
846 AllocateMaxAddress,
847 EfiReservedMemoryType,
848 EFI_SIZE_TO_PAGES (TotalSize),
849 &Base
850 );
851 ASSERT_EFI_ERROR (Status);
852 ZeroMem ((VOID *)(UINTN)Base, TotalSize);
853 mSmmProfileBase = (SMM_PROFILE_HEADER *)(UINTN)Base;
854
855 //
856 // Initialize SMM profile data header.
857 //
858 mSmmProfileBase->HeaderSize = sizeof (SMM_PROFILE_HEADER);
859 mSmmProfileBase->MaxDataEntries = (UINT64)((mSmmProfileSize - sizeof(SMM_PROFILE_HEADER)) / sizeof (SMM_PROFILE_ENTRY));
860 mSmmProfileBase->MaxDataSize = MultU64x64 (mSmmProfileBase->MaxDataEntries, sizeof(SMM_PROFILE_ENTRY));
861 mSmmProfileBase->CurDataEntries = 0;
862 mSmmProfileBase->CurDataSize = 0;
863 mSmmProfileBase->TsegStart = mCpuHotPlugData.SmrrBase;
864 mSmmProfileBase->TsegSize = mCpuHotPlugData.SmrrSize;
865 mSmmProfileBase->NumSmis = 0;
866 mSmmProfileBase->NumCpus = gSmmCpuPrivate->SmmCoreEntryContext.NumberOfCpus;
867
868 if (mBtsSupported) {
869 mMsrDsArea = (MSR_DS_AREA_STRUCT **)AllocateZeroPool (sizeof (MSR_DS_AREA_STRUCT *) * mMaxNumberOfCpus);
870 ASSERT (mMsrDsArea != NULL);
871 mMsrBTSRecord = (BRANCH_TRACE_RECORD **)AllocateZeroPool (sizeof (BRANCH_TRACE_RECORD *) * mMaxNumberOfCpus);
872 ASSERT (mMsrBTSRecord != NULL);
873 mMsrPEBSRecord = (PEBS_RECORD **)AllocateZeroPool (sizeof (PEBS_RECORD *) * mMaxNumberOfCpus);
874 ASSERT (mMsrPEBSRecord != NULL);
875
876 mMsrDsAreaBase = (MSR_DS_AREA_STRUCT *)((UINTN)Base + mSmmProfileSize);
877 MsrDsAreaSizePerCpu = mMsrDsAreaSize / mMaxNumberOfCpus;
878 mBTSRecordNumber = (MsrDsAreaSizePerCpu - sizeof(PEBS_RECORD) * PEBS_RECORD_NUMBER - sizeof(MSR_DS_AREA_STRUCT)) / sizeof(BRANCH_TRACE_RECORD);
879 for (Index = 0; Index < mMaxNumberOfCpus; Index++) {
880 mMsrDsArea[Index] = (MSR_DS_AREA_STRUCT *)((UINTN)mMsrDsAreaBase + MsrDsAreaSizePerCpu * Index);
881 mMsrBTSRecord[Index] = (BRANCH_TRACE_RECORD *)((UINTN)mMsrDsArea[Index] + sizeof(MSR_DS_AREA_STRUCT));
882 mMsrPEBSRecord[Index] = (PEBS_RECORD *)((UINTN)mMsrDsArea[Index] + MsrDsAreaSizePerCpu - sizeof(PEBS_RECORD) * PEBS_RECORD_NUMBER);
883
884 mMsrDsArea[Index]->BTSBufferBase = (UINTN)mMsrBTSRecord[Index];
885 mMsrDsArea[Index]->BTSIndex = mMsrDsArea[Index]->BTSBufferBase;
886 mMsrDsArea[Index]->BTSAbsoluteMaximum = mMsrDsArea[Index]->BTSBufferBase + mBTSRecordNumber * sizeof(BRANCH_TRACE_RECORD) + 1;
887 mMsrDsArea[Index]->BTSInterruptThreshold = mMsrDsArea[Index]->BTSAbsoluteMaximum + 1;
888
889 mMsrDsArea[Index]->PEBSBufferBase = (UINTN)mMsrPEBSRecord[Index];
890 mMsrDsArea[Index]->PEBSIndex = mMsrDsArea[Index]->PEBSBufferBase;
891 mMsrDsArea[Index]->PEBSAbsoluteMaximum = mMsrDsArea[Index]->PEBSBufferBase + PEBS_RECORD_NUMBER * sizeof(PEBS_RECORD) + 1;
892 mMsrDsArea[Index]->PEBSInterruptThreshold = mMsrDsArea[Index]->PEBSAbsoluteMaximum + 1;
893 }
894 }
895
896 mProtectionMemRange = mProtectionMemRangeTemplate;
897 mProtectionMemRangeCount = sizeof (mProtectionMemRangeTemplate) / sizeof (MEMORY_PROTECTION_RANGE);
898
899 //
900 // Update TSeg entry.
901 //
902 mProtectionMemRange[0].Range.Base = mCpuHotPlugData.SmrrBase;
903 mProtectionMemRange[0].Range.Top = mCpuHotPlugData.SmrrBase + mCpuHotPlugData.SmrrSize;
904
905 //
906 // Update SMM profile entry.
907 //
908 mProtectionMemRange[1].Range.Base = (EFI_PHYSICAL_ADDRESS)(UINTN)mSmmProfileBase;
909 mProtectionMemRange[1].Range.Top = (EFI_PHYSICAL_ADDRESS)(UINTN)mSmmProfileBase + TotalSize;
910
911 //
912 // Allocate memory reserved for creating 4KB pages.
913 //
914 InitPagesForPFHandler ();
915
916 //
917 // Start SMM profile when SmmReadyToLock protocol is installed.
918 //
919 Status = gSmst->SmmRegisterProtocolNotify (
920 &gEfiSmmReadyToLockProtocolGuid,
921 InitSmmProfileCallBack,
922 &Registration
923 );
924 ASSERT_EFI_ERROR (Status);
925
926 return ;
927 }
928
929 /**
930 Check if XD feature is supported by a processor.
931
932 **/
933 VOID
934 CheckFeatureSupported (
935 VOID
936 )
937 {
938 UINT32 RegEax;
939 UINT32 RegEdx;
940 MSR_IA32_MISC_ENABLE_REGISTER MiscEnableMsr;
941
942 if (mXdSupported) {
943 AsmCpuid (CPUID_EXTENDED_FUNCTION, &RegEax, NULL, NULL, NULL);
944 if (RegEax <= CPUID_EXTENDED_FUNCTION) {
945 //
946 // Extended CPUID functions are not supported on this processor.
947 //
948 mXdSupported = FALSE;
949 PatchInstructionX86 (gPatchXdSupported, mXdSupported, 1);
950 }
951
952 AsmCpuid (CPUID_EXTENDED_CPU_SIG, NULL, NULL, NULL, &RegEdx);
953 if ((RegEdx & CPUID1_EDX_XD_SUPPORT) == 0) {
954 //
955 // Execute Disable Bit feature is not supported on this processor.
956 //
957 mXdSupported = FALSE;
958 PatchInstructionX86 (gPatchXdSupported, mXdSupported, 1);
959 }
960 }
961
962 if (mBtsSupported) {
963 AsmCpuid (CPUID_VERSION_INFO, NULL, NULL, NULL, &RegEdx);
964 if ((RegEdx & CPUID1_EDX_BTS_AVAILABLE) != 0) {
965 //
966 // Per IA32 manuals:
967 // When CPUID.1:EDX[21] is set, the following BTS facilities are available:
968 // 1. The BTS_UNAVAILABLE flag in the IA32_MISC_ENABLE MSR indicates the
969 // availability of the BTS facilities, including the ability to set the BTS and
970 // BTINT bits in the MSR_DEBUGCTLA MSR.
971 // 2. The IA32_DS_AREA MSR can be programmed to point to the DS save area.
972 //
973 MiscEnableMsr.Uint64 = AsmReadMsr64 (MSR_IA32_MISC_ENABLE);
974 if (MiscEnableMsr.Bits.BTS == 1) {
975 //
976 // BTS facilities is not supported if MSR_IA32_MISC_ENABLE.BTS bit is set.
977 //
978 mBtsSupported = FALSE;
979 }
980 }
981 }
982 }
983
984 /**
985 Enable single step.
986
987 **/
988 VOID
989 ActivateSingleStepDB (
990 VOID
991 )
992 {
993 UINTN Dr6;
994
995 Dr6 = AsmReadDr6 ();
996 if ((Dr6 & DR6_SINGLE_STEP) != 0) {
997 return;
998 }
999 Dr6 |= DR6_SINGLE_STEP;
1000 AsmWriteDr6 (Dr6);
1001 }
1002
1003 /**
1004 Enable last branch.
1005
1006 **/
1007 VOID
1008 ActivateLBR (
1009 VOID
1010 )
1011 {
1012 UINT64 DebugCtl;
1013
1014 DebugCtl = AsmReadMsr64 (MSR_DEBUG_CTL);
1015 if ((DebugCtl & MSR_DEBUG_CTL_LBR) != 0) {
1016 return ;
1017 }
1018 DebugCtl |= MSR_DEBUG_CTL_LBR;
1019 AsmWriteMsr64 (MSR_DEBUG_CTL, DebugCtl);
1020 }
1021
1022 /**
1023 Enable branch trace store.
1024
1025 @param CpuIndex The index of the processor.
1026
1027 **/
1028 VOID
1029 ActivateBTS (
1030 IN UINTN CpuIndex
1031 )
1032 {
1033 UINT64 DebugCtl;
1034
1035 DebugCtl = AsmReadMsr64 (MSR_DEBUG_CTL);
1036 if ((DebugCtl & MSR_DEBUG_CTL_BTS) != 0) {
1037 return ;
1038 }
1039
1040 AsmWriteMsr64 (MSR_DS_AREA, (UINT64)(UINTN)mMsrDsArea[CpuIndex]);
1041 DebugCtl |= (UINT64)(MSR_DEBUG_CTL_BTS | MSR_DEBUG_CTL_TR);
1042 DebugCtl &= ~((UINT64)MSR_DEBUG_CTL_BTINT);
1043 AsmWriteMsr64 (MSR_DEBUG_CTL, DebugCtl);
1044 }
1045
1046 /**
1047 Increase SMI number in each SMI entry.
1048
1049 **/
1050 VOID
1051 SmmProfileRecordSmiNum (
1052 VOID
1053 )
1054 {
1055 if (mSmmProfileStart) {
1056 mSmmProfileBase->NumSmis++;
1057 }
1058 }
1059
1060 /**
1061 Initialize processor environment for SMM profile.
1062
1063 @param CpuIndex The index of the processor.
1064
1065 **/
1066 VOID
1067 ActivateSmmProfile (
1068 IN UINTN CpuIndex
1069 )
1070 {
1071 //
1072 // Enable Single Step DB#
1073 //
1074 ActivateSingleStepDB ();
1075
1076 if (mBtsSupported) {
1077 //
1078 // We can not get useful information from LER, so we have to use BTS.
1079 //
1080 ActivateLBR ();
1081
1082 //
1083 // Enable BTS
1084 //
1085 ActivateBTS (CpuIndex);
1086 }
1087 }
1088
1089 /**
1090 Initialize SMM profile in SMM CPU entry point.
1091
1092 @param[in] Cr3 The base address of the page tables to use in SMM.
1093
1094 **/
1095 VOID
1096 InitSmmProfile (
1097 UINT32 Cr3
1098 )
1099 {
1100 //
1101 // Save Cr3
1102 //
1103 mSmmProfileCr3 = Cr3;
1104
1105 //
1106 // Skip SMM profile initialization if feature is disabled
1107 //
1108 if (!FeaturePcdGet (PcdCpuSmmProfileEnable) &&
1109 !HEAP_GUARD_NONSTOP_MODE &&
1110 !NULL_DETECTION_NONSTOP_MODE) {
1111 return;
1112 }
1113
1114 //
1115 // Initialize SmmProfile here
1116 //
1117 InitSmmProfileInternal ();
1118
1119 //
1120 // Initialize profile IDT.
1121 //
1122 InitIdtr ();
1123
1124 //
1125 // Tell #PF handler to prepare a #DB subsequently.
1126 //
1127 mSetupDebugTrap = TRUE;
1128 }
1129
1130 /**
1131 Update page table to map the memory correctly in order to make the instruction
1132 which caused page fault execute successfully. And it also save the original page
1133 table to be restored in single-step exception.
1134
1135 @param PageTable PageTable Address.
1136 @param PFAddress The memory address which caused page fault exception.
1137 @param CpuIndex The index of the processor.
1138 @param ErrorCode The Error code of exception.
1139
1140 **/
1141 VOID
1142 RestorePageTableBelow4G (
1143 UINT64 *PageTable,
1144 UINT64 PFAddress,
1145 UINTN CpuIndex,
1146 UINTN ErrorCode
1147 )
1148 {
1149 UINTN PTIndex;
1150 UINTN PFIndex;
1151
1152 //
1153 // PML4
1154 //
1155 if (sizeof(UINT64) == sizeof(UINTN)) {
1156 PTIndex = (UINTN)BitFieldRead64 (PFAddress, 39, 47);
1157 ASSERT (PageTable[PTIndex] != 0);
1158 PageTable = (UINT64*)(UINTN)(PageTable[PTIndex] & PHYSICAL_ADDRESS_MASK);
1159 }
1160
1161 //
1162 // PDPTE
1163 //
1164 PTIndex = (UINTN)BitFieldRead64 (PFAddress, 30, 38);
1165 ASSERT (PageTable[PTIndex] != 0);
1166 PageTable = (UINT64*)(UINTN)(PageTable[PTIndex] & PHYSICAL_ADDRESS_MASK);
1167
1168 //
1169 // PD
1170 //
1171 PTIndex = (UINTN)BitFieldRead64 (PFAddress, 21, 29);
1172 if ((PageTable[PTIndex] & IA32_PG_PS) != 0) {
1173 //
1174 // Large page
1175 //
1176
1177 //
1178 // Record old entries with non-present status
1179 // Old entries include the memory which instruction is at and the memory which instruction access.
1180 //
1181 //
1182 ASSERT (mPFEntryCount[CpuIndex] < MAX_PF_ENTRY_COUNT);
1183 if (mPFEntryCount[CpuIndex] < MAX_PF_ENTRY_COUNT) {
1184 PFIndex = mPFEntryCount[CpuIndex];
1185 mLastPFEntryValue[CpuIndex][PFIndex] = PageTable[PTIndex];
1186 mLastPFEntryPointer[CpuIndex][PFIndex] = &PageTable[PTIndex];
1187 mPFEntryCount[CpuIndex]++;
1188 }
1189
1190 //
1191 // Set new entry
1192 //
1193 PageTable[PTIndex] = (PFAddress & ~((1ull << 21) - 1));
1194 PageTable[PTIndex] |= (UINT64)IA32_PG_PS;
1195 PageTable[PTIndex] |= (UINT64)PAGE_ATTRIBUTE_BITS;
1196 if ((ErrorCode & IA32_PF_EC_ID) != 0) {
1197 PageTable[PTIndex] &= ~IA32_PG_NX;
1198 }
1199 } else {
1200 //
1201 // Small page
1202 //
1203 ASSERT (PageTable[PTIndex] != 0);
1204 PageTable = (UINT64*)(UINTN)(PageTable[PTIndex] & PHYSICAL_ADDRESS_MASK);
1205
1206 //
1207 // 4K PTE
1208 //
1209 PTIndex = (UINTN)BitFieldRead64 (PFAddress, 12, 20);
1210
1211 //
1212 // Record old entries with non-present status
1213 // Old entries include the memory which instruction is at and the memory which instruction access.
1214 //
1215 //
1216 ASSERT (mPFEntryCount[CpuIndex] < MAX_PF_ENTRY_COUNT);
1217 if (mPFEntryCount[CpuIndex] < MAX_PF_ENTRY_COUNT) {
1218 PFIndex = mPFEntryCount[CpuIndex];
1219 mLastPFEntryValue[CpuIndex][PFIndex] = PageTable[PTIndex];
1220 mLastPFEntryPointer[CpuIndex][PFIndex] = &PageTable[PTIndex];
1221 mPFEntryCount[CpuIndex]++;
1222 }
1223
1224 //
1225 // Set new entry
1226 //
1227 PageTable[PTIndex] = (PFAddress & ~((1ull << 12) - 1));
1228 PageTable[PTIndex] |= (UINT64)PAGE_ATTRIBUTE_BITS;
1229 if ((ErrorCode & IA32_PF_EC_ID) != 0) {
1230 PageTable[PTIndex] &= ~IA32_PG_NX;
1231 }
1232 }
1233 }
1234
1235 /**
1236 Handler for Page Fault triggered by Guard page.
1237
1238 @param ErrorCode The Error code of exception.
1239
1240 **/
1241 VOID
1242 GuardPagePFHandler (
1243 UINTN ErrorCode
1244 )
1245 {
1246 UINT64 *PageTable;
1247 UINT64 PFAddress;
1248 UINT64 RestoreAddress;
1249 UINTN RestorePageNumber;
1250 UINTN CpuIndex;
1251
1252 PageTable = (UINT64 *)AsmReadCr3 ();
1253 PFAddress = AsmReadCr2 ();
1254 CpuIndex = GetCpuIndex ();
1255
1256 //
1257 // Memory operation cross pages, like "rep mov" instruction, will cause
1258 // infinite loop between this and Debug Trap handler. We have to make sure
1259 // that current page and the page followed are both in PRESENT state.
1260 //
1261 RestorePageNumber = 2;
1262 RestoreAddress = PFAddress;
1263 while (RestorePageNumber > 0) {
1264 RestorePageTableBelow4G (PageTable, RestoreAddress, CpuIndex, ErrorCode);
1265 RestoreAddress += EFI_PAGE_SIZE;
1266 RestorePageNumber--;
1267 }
1268
1269 //
1270 // Flush TLB
1271 //
1272 CpuFlushTlb ();
1273 }
1274
1275 /**
1276 The Page fault handler to save SMM profile data.
1277
1278 @param Rip The RIP when exception happens.
1279 @param ErrorCode The Error code of exception.
1280
1281 **/
1282 VOID
1283 SmmProfilePFHandler (
1284 UINTN Rip,
1285 UINTN ErrorCode
1286 )
1287 {
1288 UINT64 *PageTable;
1289 UINT64 PFAddress;
1290 UINT64 RestoreAddress;
1291 UINTN RestorePageNumber;
1292 UINTN CpuIndex;
1293 UINTN Index;
1294 UINT64 InstructionAddress;
1295 UINTN MaxEntryNumber;
1296 UINTN CurrentEntryNumber;
1297 BOOLEAN IsValidPFAddress;
1298 SMM_PROFILE_ENTRY *SmmProfileEntry;
1299 UINT64 SmiCommand;
1300 EFI_STATUS Status;
1301 UINT8 SoftSmiValue;
1302 EFI_SMM_SAVE_STATE_IO_INFO IoInfo;
1303
1304 if (!mSmmProfileStart) {
1305 //
1306 // If SMM profile does not start, call original page fault handler.
1307 //
1308 SmiDefaultPFHandler ();
1309 return;
1310 }
1311
1312 if (mBtsSupported) {
1313 DisableBTS ();
1314 }
1315
1316 IsValidPFAddress = FALSE;
1317 PageTable = (UINT64 *)AsmReadCr3 ();
1318 PFAddress = AsmReadCr2 ();
1319 CpuIndex = GetCpuIndex ();
1320
1321 //
1322 // Memory operation cross pages, like "rep mov" instruction, will cause
1323 // infinite loop between this and Debug Trap handler. We have to make sure
1324 // that current page and the page followed are both in PRESENT state.
1325 //
1326 RestorePageNumber = 2;
1327 RestoreAddress = PFAddress;
1328 while (RestorePageNumber > 0) {
1329 if (RestoreAddress <= 0xFFFFFFFF) {
1330 RestorePageTableBelow4G (PageTable, RestoreAddress, CpuIndex, ErrorCode);
1331 } else {
1332 RestorePageTableAbove4G (PageTable, RestoreAddress, CpuIndex, ErrorCode, &IsValidPFAddress);
1333 }
1334 RestoreAddress += EFI_PAGE_SIZE;
1335 RestorePageNumber--;
1336 }
1337
1338 if (!IsValidPFAddress) {
1339 InstructionAddress = Rip;
1340 if ((ErrorCode & IA32_PF_EC_ID) != 0 && (mBtsSupported)) {
1341 //
1342 // If it is instruction fetch failure, get the correct IP from BTS.
1343 //
1344 InstructionAddress = GetSourceFromDestinationOnBts (CpuIndex, Rip);
1345 if (InstructionAddress == 0) {
1346 //
1347 // It indicates the instruction which caused page fault is not a jump instruction,
1348 // set instruction address same as the page fault address.
1349 //
1350 InstructionAddress = PFAddress;
1351 }
1352 }
1353
1354 //
1355 // Indicate it is not software SMI
1356 //
1357 SmiCommand = 0xFFFFFFFFFFFFFFFFULL;
1358 for (Index = 0; Index < gSmst->NumberOfCpus; Index++) {
1359 Status = SmmReadSaveState(&mSmmCpu, sizeof(IoInfo), EFI_SMM_SAVE_STATE_REGISTER_IO, Index, &IoInfo);
1360 if (EFI_ERROR (Status)) {
1361 continue;
1362 }
1363 if (IoInfo.IoPort == mSmiCommandPort) {
1364 //
1365 // A software SMI triggered by SMI command port has been found, get SmiCommand from SMI command port.
1366 //
1367 SoftSmiValue = IoRead8 (mSmiCommandPort);
1368 SmiCommand = (UINT64)SoftSmiValue;
1369 break;
1370 }
1371 }
1372
1373 SmmProfileEntry = (SMM_PROFILE_ENTRY *)(UINTN)(mSmmProfileBase + 1);
1374 //
1375 // Check if there is already a same entry in profile data.
1376 //
1377 for (Index = 0; Index < (UINTN) mSmmProfileBase->CurDataEntries; Index++) {
1378 if ((SmmProfileEntry[Index].ErrorCode == (UINT64)ErrorCode) &&
1379 (SmmProfileEntry[Index].Address == PFAddress) &&
1380 (SmmProfileEntry[Index].CpuNum == (UINT64)CpuIndex) &&
1381 (SmmProfileEntry[Index].Instruction == InstructionAddress) &&
1382 (SmmProfileEntry[Index].SmiCmd == SmiCommand)) {
1383 //
1384 // Same record exist, need not save again.
1385 //
1386 break;
1387 }
1388 }
1389 if (Index == mSmmProfileBase->CurDataEntries) {
1390 CurrentEntryNumber = (UINTN) mSmmProfileBase->CurDataEntries;
1391 MaxEntryNumber = (UINTN) mSmmProfileBase->MaxDataEntries;
1392 if (FeaturePcdGet (PcdCpuSmmProfileRingBuffer)) {
1393 CurrentEntryNumber = CurrentEntryNumber % MaxEntryNumber;
1394 }
1395 if (CurrentEntryNumber < MaxEntryNumber) {
1396 //
1397 // Log the new entry
1398 //
1399 SmmProfileEntry[CurrentEntryNumber].SmiNum = mSmmProfileBase->NumSmis;
1400 SmmProfileEntry[CurrentEntryNumber].ErrorCode = (UINT64)ErrorCode;
1401 SmmProfileEntry[CurrentEntryNumber].ApicId = (UINT64)GetApicId ();
1402 SmmProfileEntry[CurrentEntryNumber].CpuNum = (UINT64)CpuIndex;
1403 SmmProfileEntry[CurrentEntryNumber].Address = PFAddress;
1404 SmmProfileEntry[CurrentEntryNumber].Instruction = InstructionAddress;
1405 SmmProfileEntry[CurrentEntryNumber].SmiCmd = SmiCommand;
1406 //
1407 // Update current entry index and data size in the header.
1408 //
1409 mSmmProfileBase->CurDataEntries++;
1410 mSmmProfileBase->CurDataSize = MultU64x64 (mSmmProfileBase->CurDataEntries, sizeof (SMM_PROFILE_ENTRY));
1411 }
1412 }
1413 }
1414 //
1415 // Flush TLB
1416 //
1417 CpuFlushTlb ();
1418
1419 if (mBtsSupported) {
1420 EnableBTS ();
1421 }
1422 }
1423
1424 /**
1425 Replace INT1 exception handler to restore page table to absent/execute-disable state
1426 in order to trigger page fault again to save SMM profile data..
1427
1428 **/
1429 VOID
1430 InitIdtr (
1431 VOID
1432 )
1433 {
1434 EFI_STATUS Status;
1435
1436 Status = SmmRegisterExceptionHandler (&mSmmCpuService, EXCEPT_IA32_DEBUG, DebugExceptionHandler);
1437 ASSERT_EFI_ERROR (Status);
1438 }