--- /dev/null
+/** @file\r
+X64 processor specific functions to enable SMM profile.\r
+\r
+Copyright (c) 2012 - 2015, Intel Corporation. All rights reserved.<BR>\r
+This program and the accompanying materials\r
+are licensed and made available under the terms and conditions of the BSD License\r
+which accompanies this distribution. The full text of the license may be found at\r
+http://opensource.org/licenses/bsd-license.php\r
+\r
+THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS,\r
+WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED.\r
+\r
+**/\r
+\r
+#include "PiSmmCpuDxeSmm.h"\r
+#include "SmmProfileInternal.h"\r
+\r
+//\r
+// Current page index.\r
+//\r
+UINTN mPFPageIndex;\r
+\r
+//\r
+// Pool for dynamically creating page table in page fault handler.\r
+//\r
+UINT64 mPFPageBuffer;\r
+\r
+//\r
+// Store the uplink information for each page being used.\r
+//\r
+UINT64 *mPFPageUplink[MAX_PF_PAGE_COUNT];\r
+\r
+/**\r
+ Create SMM page table for S3 path.\r
+\r
+**/\r
+VOID\r
+InitSmmS3Cr3 (\r
+ VOID\r
+ )\r
+{\r
+ EFI_PHYSICAL_ADDRESS Pages;\r
+ UINT64 *PTEntry;\r
+\r
+ //\r
+ // Generate PAE page table for the first 4GB memory space\r
+ //\r
+ Pages = Gen4GPageTable (1);\r
+\r
+ //\r
+ // Fill Page-Table-Level4 (PML4) entry\r
+ //\r
+ PTEntry = (UINT64*)(UINTN)(Pages - EFI_PAGES_TO_SIZE (1));\r
+ *PTEntry = Pages + IA32_PG_P;\r
+ ZeroMem (PTEntry + 1, EFI_PAGE_SIZE - sizeof (*PTEntry));\r
+\r
+ //\r
+ // Return the address of PML4 (to set CR3)\r
+ //\r
+ mSmmS3ResumeState->SmmS3Cr3 = (UINT32)(UINTN)PTEntry;\r
+\r
+ return ;\r
+}\r
+\r
+/**\r
+ Allocate pages for creating 4KB-page based on 2MB-page when page fault happens.\r
+\r
+**/\r
+VOID\r
+InitPagesForPFHandler (\r
+ VOID\r
+ )\r
+{\r
+ VOID *Address;\r
+\r
+ //\r
+ // Pre-Allocate memory for page fault handler\r
+ //\r
+ Address = NULL;\r
+ Address = AllocatePages (MAX_PF_PAGE_COUNT);\r
+ ASSERT_EFI_ERROR (Address != NULL);\r
+\r
+ mPFPageBuffer = (UINT64)(UINTN) Address;\r
+ mPFPageIndex = 0;\r
+ ZeroMem ((VOID *) (UINTN) mPFPageBuffer, EFI_PAGE_SIZE * MAX_PF_PAGE_COUNT);\r
+ ZeroMem (mPFPageUplink, sizeof (mPFPageUplink));\r
+\r
+ return;\r
+}\r
+\r
+/**\r
+ Allocate one page for creating 4KB-page based on 2MB-page.\r
+\r
+ @param Uplink The address of Page-Directory entry.\r
+\r
+**/\r
+VOID\r
+AcquirePage (\r
+ UINT64 *Uplink\r
+ )\r
+{\r
+ UINT64 Address;\r
+\r
+ //\r
+ // Get the buffer\r
+ //\r
+ Address = mPFPageBuffer + EFI_PAGES_TO_SIZE (mPFPageIndex);\r
+ ZeroMem ((VOID *) (UINTN) Address, EFI_PAGE_SIZE);\r
+\r
+ //\r
+ // Cut the previous uplink if it exists and wasn't overwritten\r
+ //\r
+ if ((mPFPageUplink[mPFPageIndex] != NULL) && ((*mPFPageUplink[mPFPageIndex] & PHYSICAL_ADDRESS_MASK) == Address)) {\r
+ *mPFPageUplink[mPFPageIndex] = 0;\r
+ }\r
+\r
+ //\r
+ // Link & Record the current uplink\r
+ //\r
+ *Uplink = Address | IA32_PG_P | IA32_PG_RW;\r
+ mPFPageUplink[mPFPageIndex] = Uplink;\r
+\r
+ mPFPageIndex = (mPFPageIndex + 1) % MAX_PF_PAGE_COUNT;\r
+}\r
+\r
+/**\r
+ Update page table to map the memory correctly in order to make the instruction\r
+ which caused page fault execute successfully. And it also save the original page\r
+ table to be restored in single-step exception.\r
+\r
+ @param PageTable PageTable Address.\r
+ @param PFAddress The memory address which caused page fault exception.\r
+ @param CpuIndex The index of the processor.\r
+ @param ErrorCode The Error code of exception.\r
+ @param IsValidPFAddress The flag indicates if SMM profile data need be added.\r
+\r
+**/\r
+VOID\r
+RestorePageTableAbove4G (\r
+ UINT64 *PageTable,\r
+ UINT64 PFAddress,\r
+ UINTN CpuIndex,\r
+ UINTN ErrorCode,\r
+ BOOLEAN *IsValidPFAddress\r
+ )\r
+{\r
+ UINTN PTIndex;\r
+ UINT64 Address;\r
+ BOOLEAN Nx;\r
+ BOOLEAN Existed;\r
+ UINTN Index;\r
+ UINTN PFIndex;\r
+\r
+ ASSERT ((PageTable != NULL) && (IsValidPFAddress != NULL));\r
+\r
+ //\r
+ // If page fault address is 4GB above.\r
+ //\r
+\r
+ //\r
+ // Check if page fault address has existed in page table.\r
+ // If it exists in page table but page fault is generated,\r
+ // there are 2 possible reasons: 1. present flag is set to 0; 2. instruction fetch in protected memory range.\r
+ //\r
+ Existed = FALSE;\r
+ PageTable = (UINT64*)(AsmReadCr3 () & PHYSICAL_ADDRESS_MASK);\r
+ PTIndex = BitFieldRead64 (PFAddress, 39, 47);\r
+ if ((PageTable[PTIndex] & IA32_PG_P) != 0) {\r
+ // PML4E\r
+ PageTable = (UINT64*)(UINTN)(PageTable[PTIndex] & PHYSICAL_ADDRESS_MASK);\r
+ PTIndex = BitFieldRead64 (PFAddress, 30, 38);\r
+ if ((PageTable[PTIndex] & IA32_PG_P) != 0) {\r
+ // PDPTE\r
+ PageTable = (UINT64*)(UINTN)(PageTable[PTIndex] & PHYSICAL_ADDRESS_MASK);\r
+ PTIndex = BitFieldRead64 (PFAddress, 21, 29);\r
+ // PD\r
+ if ((PageTable[PTIndex] & IA32_PG_PS) != 0) {\r
+ //\r
+ // 2MB page\r
+ //\r
+ Address = (UINT64)(PageTable[PTIndex] & PHYSICAL_ADDRESS_MASK);\r
+ if ((Address & PHYSICAL_ADDRESS_MASK & ~((1ull << 21) - 1)) == ((PFAddress & PHYSICAL_ADDRESS_MASK & ~((1ull << 21) - 1)))) {\r
+ Existed = TRUE;\r
+ }\r
+ } else {\r
+ //\r
+ // 4KB page\r
+ //\r
+ PageTable = (UINT64*)(UINTN)(PageTable[PTIndex] & PHYSICAL_ADDRESS_MASK);\r
+ if (PageTable != 0) {\r
+ //\r
+ // When there is a valid entry to map to 4KB page, need not create a new entry to map 2MB.\r
+ //\r
+ PTIndex = BitFieldRead64 (PFAddress, 12, 20);\r
+ Address = (UINT64)(PageTable[PTIndex] & PHYSICAL_ADDRESS_MASK);\r
+ if ((Address & PHYSICAL_ADDRESS_MASK & ~((1ull << 12) - 1)) == (PFAddress & PHYSICAL_ADDRESS_MASK & ~((1ull << 12) - 1))) {\r
+ Existed = TRUE;\r
+ }\r
+ }\r
+ }\r
+ }\r
+ }\r
+\r
+ //\r
+ // If page entry does not existed in page table at all, create a new entry.\r
+ //\r
+ if (!Existed) {\r
+\r
+ if (IsAddressValid (PFAddress, &Nx)) {\r
+ //\r
+ // If page fault address above 4GB is in protected range but it causes a page fault exception,\r
+ // Will create a page entry for this page fault address, make page table entry as present/rw and execution-disable.\r
+ // this access is not saved into SMM profile data.\r
+ //\r
+ *IsValidPFAddress = TRUE;\r
+ }\r
+\r
+ //\r
+ // Create one entry in page table for page fault address.\r
+ //\r
+ SmiDefaultPFHandler ();\r
+ //\r
+ // Find the page table entry created just now.\r
+ //\r
+ PageTable = (UINT64*)(AsmReadCr3 () & PHYSICAL_ADDRESS_MASK);\r
+ PFAddress = AsmReadCr2 ();\r
+ // PML4E\r
+ PTIndex = BitFieldRead64 (PFAddress, 39, 47);\r
+ PageTable = (UINT64*)(UINTN)(PageTable[PTIndex] & PHYSICAL_ADDRESS_MASK);\r
+ // PDPTE\r
+ PTIndex = BitFieldRead64 (PFAddress, 30, 38);\r
+ PageTable = (UINT64*)(UINTN)(PageTable[PTIndex] & PHYSICAL_ADDRESS_MASK);\r
+ // PD\r
+ PTIndex = BitFieldRead64 (PFAddress, 21, 29);\r
+ Address = PageTable[PTIndex] & PHYSICAL_ADDRESS_MASK;\r
+ //\r
+ // Check if 2MB-page entry need be changed to 4KB-page entry.\r
+ //\r
+ if (IsAddressSplit (Address)) {\r
+ AcquirePage (&PageTable[PTIndex]);\r
+\r
+ // PTE\r
+ PageTable = (UINT64*)(UINTN)(PageTable[PTIndex] & PHYSICAL_ADDRESS_MASK);\r
+ for (Index = 0; Index < 512; Index++) {\r
+ PageTable[Index] = Address | IA32_PG_RW | IA32_PG_P;\r
+ if (!IsAddressValid (Address, &Nx)) {\r
+ PageTable[Index] = PageTable[Index] & (INTN)(INT32)(~(IA32_PG_RW | IA32_PG_P));\r
+ }\r
+ if (Nx && mXdSupported) {\r
+ PageTable[Index] = PageTable[Index] | IA32_PG_NX;\r
+ }\r
+ if (Address == (PFAddress & PHYSICAL_ADDRESS_MASK & ~((1ull << 12) - 1))) {\r
+ PTIndex = Index;\r
+ }\r
+ Address += SIZE_4KB;\r
+ } // end for PT\r
+ } else {\r
+ //\r
+ // Update 2MB page entry.\r
+ //\r
+ if (!IsAddressValid (Address, &Nx)) {\r
+ //\r
+ // Patch to remove present flag and rw flag.\r
+ //\r
+ PageTable[PTIndex] = PageTable[PTIndex] & (INTN)(INT32)(~(IA32_PG_RW | IA32_PG_P));\r
+ }\r
+ //\r
+ // Set XD bit to 1\r
+ //\r
+ if (Nx && mXdSupported) {\r
+ PageTable[PTIndex] = PageTable[PTIndex] | IA32_PG_NX;\r
+ }\r
+ }\r
+ }\r
+\r
+ //\r
+ // Record old entries with non-present status\r
+ // Old entries include the memory which instruction is at and the memory which instruction access.\r
+ //\r
+ //\r
+ ASSERT (mPFEntryCount[CpuIndex] < MAX_PF_ENTRY_COUNT);\r
+ if (mPFEntryCount[CpuIndex] < MAX_PF_ENTRY_COUNT) {\r
+ PFIndex = mPFEntryCount[CpuIndex];\r
+ mLastPFEntryValue[CpuIndex][PFIndex] = PageTable[PTIndex];\r
+ mLastPFEntryPointer[CpuIndex][PFIndex] = &PageTable[PTIndex];\r
+ mPFEntryCount[CpuIndex]++;\r
+ }\r
+\r
+ //\r
+ // Add present flag or clear XD flag to make page fault handler succeed.\r
+ //\r
+ PageTable[PTIndex] |= (UINT64)(IA32_PG_RW | IA32_PG_P);\r
+ if ((ErrorCode & IA32_PF_EC_ID) != 0) {\r
+ //\r
+ // If page fault is caused by instruction fetch, clear XD bit in the entry.\r
+ //\r
+ PageTable[PTIndex] &= ~IA32_PG_NX;\r
+ }\r
+\r
+ return;\r
+}\r
+\r
+/**\r
+ Clear TF in FLAGS.\r
+\r
+ @param SystemContext A pointer to the processor context when\r
+ the interrupt occurred on the processor.\r
+\r
+**/\r
+VOID\r
+ClearTrapFlag (\r
+ IN OUT EFI_SYSTEM_CONTEXT SystemContext\r
+ )\r
+{\r
+ SystemContext.SystemContextX64->Rflags &= (UINTN) ~BIT8;\r
+}\r