--- /dev/null
+#------------------------------------------------------------------------------\r
+#\r
+# Copyright (c) 2006 - 2015, Intel Corporation. All rights reserved.<BR>\r
+# This program and the accompanying materials\r
+# are licensed and made available under the terms and conditions of the BSD License\r
+# which accompanies this distribution. The full text of the license may be found at\r
+# http://opensource.org/licenses/bsd-license.php.\r
+#\r
+# THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS,\r
+# WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED.\r
+#\r
+# Module Name:\r
+#\r
+# MpFuncs.S\r
+#\r
+# Abstract:\r
+#\r
+# This is the assembly code for Multi-processor S3 support\r
+#\r
+#------------------------------------------------------------------------------\r
+\r
+.equ VacantFlag, 0x0\r
+.equ NotVacantFlag, 0xff\r
+\r
+.equ LockLocation, RendezvousFunnelProcEnd - RendezvousFunnelProcStart\r
+.equ StackStartAddressLocation, RendezvousFunnelProcEnd - RendezvousFunnelProcStart + 0x08\r
+.equ StackSizeLocation, RendezvousFunnelProcEnd - RendezvousFunnelProcStart + 0x10\r
+.equ CProcedureLocation, RendezvousFunnelProcEnd - RendezvousFunnelProcStart + 0x18\r
+.equ GdtrLocation, RendezvousFunnelProcEnd - RendezvousFunnelProcStart + 0x20\r
+.equ IdtrLocation, RendezvousFunnelProcEnd - RendezvousFunnelProcStart + 0x2A\r
+.equ BufferStartLocation, RendezvousFunnelProcEnd - RendezvousFunnelProcStart + 0x34\r
+.equ Cr3OffsetLocation, RendezvousFunnelProcEnd - RendezvousFunnelProcStart + 0x38\r
+\r
+#-------------------------------------------------------------------------------------\r
+#RendezvousFunnelProc procedure follows. All APs execute their procedure. This\r
+#procedure serializes all the AP processors through an Init sequence. It must be\r
+#noted that APs arrive here very raw...ie: real mode, no stack.\r
+#ALSO THIS PROCEDURE IS EXECUTED BY APs ONLY ON 16 BIT MODE. HENCE THIS PROC\r
+#IS IN MACHINE CODE.\r
+#-------------------------------------------------------------------------------------\r
+#RendezvousFunnelProc (&WakeUpBuffer,MemAddress);\r
+\r
+.code:\r
+\r
+ASM_GLOBAL ASM_PFX(RendezvousFunnelProc)\r
+ASM_PFX(RendezvousFunnelProc):\r
+RendezvousFunnelProcStart:\r
+\r
+# At this point CS = 0x(vv00) and ip= 0x0.\r
+\r
+ .byte 0x8c,0xc8 # mov ax, cs\r
+ .byte 0x8e,0xd8 # mov ds, ax\r
+ .byte 0x8e,0xc0 # mov es, ax\r
+ .byte 0x8e,0xd0 # mov ss, ax\r
+ .byte 0x33,0xc0 # xor ax, ax\r
+ .byte 0x8e,0xe0 # mov fs, ax\r
+ .byte 0x8e,0xe8 # mov gs, ax\r
+\r
+flat32Start:\r
+\r
+ .byte 0xBE\r
+ .word BufferStartLocation\r
+ .byte 0x66,0x8B,0x14 # mov edx,dword ptr [si] ; EDX is keeping the start address of wakeup buffer\r
+\r
+ .byte 0xBE\r
+ .word Cr3OffsetLocation\r
+ .byte 0x66,0x8B,0xC # mov ecx,dword ptr [si] ; ECX is keeping the value of CR3\r
+\r
+ .byte 0xBE\r
+ .word GdtrLocation\r
+ .byte 0x66 # db 66h\r
+ .byte 0x2E,0xF,0x1,0x14 # lgdt fword ptr cs:[si]\r
+\r
+ .byte 0xBE\r
+ .word IdtrLocation\r
+ .byte 0x66 # db 66h\r
+ .byte 0x2E,0xF,0x1,0x1C # lidt fword ptr cs:[si]\r
+\r
+ .byte 0x33,0xC0 # xor ax, ax\r
+ .byte 0x8E,0xD8 # mov ds, ax\r
+\r
+ .byte 0xF,0x20,0xC0 # mov eax, cr0 ; Get control register 0\r
+ .byte 0x66,0x83,0xC8,0x1 # or eax, 000000001h ; Set PE bit (bit #0)\r
+ .byte 0xF,0x22,0xC0 # mov cr0, eax\r
+\r
+FLAT32_JUMP:\r
+\r
+ .byte 0x66,0x67,0xEA # far jump\r
+ .long 0x0 # 32-bit offset\r
+ .word 0x20 # 16-bit selector\r
+\r
+PMODE_ENTRY: # protected mode entry point\r
+\r
+ .byte 0x66,0xB8,0x18,0x0 # mov ax, 18h\r
+ .byte 0x66,0x8E,0xD8 # mov ds, ax\r
+ .byte 0x66,0x8E,0xC0 # mov es, ax\r
+ .byte 0x66,0x8E,0xE0 # mov fs, ax\r
+ .byte 0x66,0x8E,0xE8 # mov gs, ax\r
+ .byte 0x66,0x8E,0xD0 # mov ss, ax ; Flat mode setup.\r
+\r
+ .byte 0xF,0x20,0xE0 # mov eax, cr4\r
+ .byte 0xF,0xBA,0xE8,0x5 # bts eax, 5\r
+ .byte 0xF,0x22,0xE0 # mov cr4, eax\r
+\r
+ .byte 0xF,0x22,0xD9 # mov cr3, ecx\r
+\r
+ .byte 0x8B,0xF2 # mov esi, edx ; Save wakeup buffer address\r
+\r
+ .byte 0xB9\r
+ .long 0xC0000080 # mov ecx, 0c0000080h ; EFER MSR number.\r
+ .byte 0xF,0x32 # rdmsr ; Read EFER.\r
+ .byte 0xF,0xBA,0xE8,0x8 # bts eax, 8 ; Set LME=1.\r
+ .byte 0xF,0x30 # wrmsr ; Write EFER.\r
+\r
+ .byte 0xF,0x20,0xC0 # mov eax, cr0 ; Read CR0.\r
+ .byte 0xF,0xBA,0xE8,0x1F # bts eax, 31 ; Set PG=1.\r
+ .byte 0xF,0x22,0xC0 # mov cr0, eax ; Write CR0.\r
+\r
+LONG_JUMP:\r
+\r
+ .byte 0x67,0xEA # far jump\r
+ .long 0x0 # 32-bit offset\r
+ .word 0x38 # 16-bit selector\r
+\r
+LongModeStart:\r
+\r
+ movw $0x30,%ax\r
+ .byte 0x66\r
+ movw %ax,%ds\r
+ .byte 0x66\r
+ movw %ax,%es\r
+ .byte 0x66\r
+ movw %ax,%ss\r
+\r
+ movl %esi,%edi\r
+ addl $LockLocation, %edi\r
+ movb $NotVacantFlag, %al\r
+TestLock:\r
+ xchgb (%edi), %al\r
+ cmpb $NotVacantFlag, %al\r
+ jz TestLock\r
+\r
+ProgramStack:\r
+\r
+ movl %esi,%edi\r
+ addl $StackSizeLocation, %edi\r
+ movq (%edi), %rax\r
+ movl %esi,%edi\r
+ addl $StackStartAddressLocation, %edi\r
+ addq (%edi), %rax\r
+ movq %rax, %rsp\r
+ movq %rax, (%edi)\r
+\r
+Releaselock:\r
+\r
+ movb $VacantFlag, %al\r
+ movl %esi,%edi\r
+ addl $LockLocation, %edi\r
+ xchgb (%edi), %al\r
+\r
+ #\r
+ # Call assembly function to initialize FPU.\r
+ #\r
+ movabsq $ASM_PFX(InitializeFloatingPointUnits), %rax\r
+ subq $0x20, %rsp\r
+ call *%rax\r
+ addq $0x20, %rsp\r
+ #\r
+ # Call C Function\r
+ #\r
+ movl %esi,%edi\r
+ addl $CProcedureLocation, %edi\r
+ movq (%edi), %rax\r
+\r
+ testq %rax, %rax\r
+ jz GoToSleep\r
+\r
+ subq $0x20, %rsp\r
+ call *%rax\r
+ addq $0x20, %rsp\r
+\r
+GoToSleep:\r
+ cli\r
+ hlt\r
+ jmp .-2\r
+\r
+RendezvousFunnelProcEnd:\r
+\r
+\r
+#-------------------------------------------------------------------------------------\r
+# AsmGetAddressMap (&AddressMap);\r
+#-------------------------------------------------------------------------------------\r
+# comments here for definition of address map\r
+ASM_GLOBAL ASM_PFX(AsmGetAddressMap)\r
+ASM_PFX(AsmGetAddressMap):\r
+ movabsq $RendezvousFunnelProcStart, %rax\r
+ movq %rax, (%rcx)\r
+ movq $(PMODE_ENTRY - RendezvousFunnelProcStart), 0x08(%rcx)\r
+ movq $(FLAT32_JUMP - RendezvousFunnelProcStart), 0x10(%rcx)\r
+ movq $(RendezvousFunnelProcEnd - RendezvousFunnelProcStart), 0x18(%rcx)\r
+ movq $(LongModeStart - RendezvousFunnelProcStart), 0x20(%rcx)\r
+ movq $(LONG_JUMP - RendezvousFunnelProcStart), 0x28(%rcx)\r
+ ret\r
+\r
--- /dev/null
+;------------------------------------------------------------------------------ ;\r
+; Copyright (c) 2006 - 2015, Intel Corporation. All rights reserved.<BR>\r
+; This program and the accompanying materials\r
+; are licensed and made available under the terms and conditions of the BSD License\r
+; which accompanies this distribution. The full text of the license may be found at\r
+; http://opensource.org/licenses/bsd-license.php.\r
+;\r
+; THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS,\r
+; WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED.\r
+;\r
+; Module Name:\r
+;\r
+; MpFuncs.asm\r
+;\r
+; Abstract:\r
+;\r
+; This is the assembly code for Multi-processor S3 support\r
+;\r
+;-------------------------------------------------------------------------------\r
+\r
+EXTERN InitializeFloatingPointUnits:PROC\r
+\r
+VacantFlag Equ 00h\r
+NotVacantFlag Equ 0ffh\r
+\r
+LockLocation equ RendezvousFunnelProcEnd - RendezvousFunnelProcStart\r
+StackStartAddressLocation equ LockLocation + 08h\r
+StackSizeLocation equ LockLocation + 10h\r
+CProcedureLocation equ LockLocation + 18h\r
+GdtrLocation equ LockLocation + 20h\r
+IdtrLocation equ LockLocation + 2Ah\r
+BufferStartLocation equ LockLocation + 34h\r
+Cr3OffsetLocation equ LockLocation + 38h\r
+\r
+;-------------------------------------------------------------------------------------\r
+;RendezvousFunnelProc procedure follows. All APs execute their procedure. This\r
+;procedure serializes all the AP processors through an Init sequence. It must be\r
+;noted that APs arrive here very raw...ie: real mode, no stack.\r
+;ALSO THIS PROCEDURE IS EXECUTED BY APs ONLY ON 16 BIT MODE. HENCE THIS PROC\r
+;IS IN MACHINE CODE.\r
+;-------------------------------------------------------------------------------------\r
+;RendezvousFunnelProc (&WakeUpBuffer,MemAddress);\r
+\r
+;text SEGMENT\r
+.code\r
+\r
+RendezvousFunnelProc PROC\r
+RendezvousFunnelProcStart::\r
+\r
+; At this point CS = 0x(vv00) and ip= 0x0.\r
+\r
+ db 8ch, 0c8h ; mov ax, cs\r
+ db 8eh, 0d8h ; mov ds, ax\r
+ db 8eh, 0c0h ; mov es, ax\r
+ db 8eh, 0d0h ; mov ss, ax\r
+ db 33h, 0c0h ; xor ax, ax\r
+ db 8eh, 0e0h ; mov fs, ax\r
+ db 8eh, 0e8h ; mov gs, ax\r
+\r
+flat32Start::\r
+\r
+ db 0BEh\r
+ dw BufferStartLocation ; mov si, BufferStartLocation\r
+ db 66h, 8Bh, 14h ; mov edx,dword ptr [si] ; EDX is keeping the start address of wakeup buffer\r
+\r
+ db 0BEh\r
+ dw Cr3OffsetLocation ; mov si, Cr3Location\r
+ db 66h, 8Bh, 0Ch ; mov ecx,dword ptr [si] ; ECX is keeping the value of CR3\r
+\r
+ db 0BEh\r
+ dw GdtrLocation ; mov si, GdtrProfile\r
+ db 66h ; db 66h\r
+ db 2Eh, 0Fh, 01h, 14h ; lgdt fword ptr cs:[si]\r
+\r
+ db 0BEh\r
+ dw IdtrLocation ; mov si, IdtrProfile\r
+ db 66h ; db 66h\r
+ db 2Eh, 0Fh, 01h, 1Ch ; lidt fword ptr cs:[si]\r
+\r
+ db 33h, 0C0h ; xor ax, ax\r
+ db 8Eh, 0D8h ; mov ds, ax\r
+\r
+ db 0Fh, 20h, 0C0h ; mov eax, cr0 ; Get control register 0\r
+ db 66h, 83h, 0C8h, 01h ; or eax, 000000001h ; Set PE bit (bit #0)\r
+ db 0Fh, 22h, 0C0h ; mov cr0, eax\r
+\r
+FLAT32_JUMP::\r
+\r
+ db 66h, 67h, 0EAh ; far jump\r
+ dd 0h ; 32-bit offset\r
+ dw 20h ; 16-bit selector\r
+\r
+PMODE_ENTRY:: ; protected mode entry point\r
+\r
+ db 66h, 0B8h, 18h, 00h ; mov ax, 18h\r
+ db 66h, 8Eh, 0D8h ; mov ds, ax\r
+ db 66h, 8Eh, 0C0h ; mov es, ax\r
+ db 66h, 8Eh, 0E0h ; mov fs, ax\r
+ db 66h, 8Eh, 0E8h ; mov gs, ax\r
+ db 66h, 8Eh, 0D0h ; mov ss, ax ; Flat mode setup.\r
+\r
+ db 0Fh, 20h, 0E0h ; mov eax, cr4\r
+ db 0Fh, 0BAh, 0E8h, 05h ; bts eax, 5\r
+ db 0Fh, 22h, 0E0h ; mov cr4, eax\r
+\r
+ db 0Fh, 22h, 0D9h ; mov cr3, ecx\r
+\r
+ db 8Bh, 0F2h ; mov esi, edx ; Save wakeup buffer address\r
+\r
+ db 0B9h\r
+ dd 0C0000080h ; mov ecx, 0c0000080h ; EFER MSR number.\r
+ db 0Fh, 32h ; rdmsr ; Read EFER.\r
+ db 0Fh, 0BAh, 0E8h, 08h ; bts eax, 8 ; Set LME=1.\r
+ db 0Fh, 30h ; wrmsr ; Write EFER.\r
+\r
+ db 0Fh, 20h, 0C0h ; mov eax, cr0 ; Read CR0.\r
+ db 0Fh, 0BAh, 0E8h, 1Fh ; bts eax, 31 ; Set PG=1.\r
+ db 0Fh, 22h, 0C0h ; mov cr0, eax ; Write CR0.\r
+\r
+LONG_JUMP::\r
+\r
+ db 67h, 0EAh ; far jump\r
+ dd 0h ; 32-bit offset\r
+ dw 38h ; 16-bit selector\r
+\r
+LongModeStart::\r
+\r
+ mov ax, 30h\r
+ mov ds, ax\r
+ mov es, ax\r
+ mov ss, ax\r
+\r
+ mov edi, esi\r
+ add edi, LockLocation\r
+ mov al, NotVacantFlag\r
+TestLock::\r
+ xchg byte ptr [edi], al\r
+ cmp al, NotVacantFlag\r
+ jz TestLock\r
+\r
+ProgramStack::\r
+\r
+ mov edi, esi\r
+ add edi, StackSizeLocation\r
+ mov rax, qword ptr [edi]\r
+ mov edi, esi\r
+ add edi, StackStartAddressLocation\r
+ add rax, qword ptr [edi]\r
+ mov rsp, rax\r
+ mov qword ptr [edi], rax\r
+\r
+Releaselock::\r
+\r
+ mov al, VacantFlag\r
+ mov edi, esi\r
+ add edi, LockLocation\r
+ xchg byte ptr [edi], al\r
+\r
+ ;\r
+ ; Call assembly function to initialize FPU.\r
+ ;\r
+ mov rax, InitializeFloatingPointUnits\r
+ sub rsp, 20h\r
+ call rax\r
+ add rsp, 20h\r
+\r
+ ;\r
+ ; Call C Function\r
+ ;\r
+ mov edi, esi\r
+ add edi, CProcedureLocation\r
+ mov rax, qword ptr [edi]\r
+\r
+ test rax, rax\r
+ jz GoToSleep\r
+\r
+ sub rsp, 20h\r
+ call rax\r
+ add rsp, 20h\r
+\r
+GoToSleep::\r
+ cli\r
+ hlt\r
+ jmp $-2\r
+\r
+RendezvousFunnelProcEnd::\r
+RendezvousFunnelProc ENDP\r
+\r
+\r
+;-------------------------------------------------------------------------------------\r
+; AsmGetAddressMap (&AddressMap);\r
+;-------------------------------------------------------------------------------------\r
+; comments here for definition of address map\r
+AsmGetAddressMap PROC\r
+ mov rax, offset RendezvousFunnelProcStart\r
+ mov qword ptr [rcx], rax\r
+ mov qword ptr [rcx+8h], PMODE_ENTRY - RendezvousFunnelProcStart\r
+ mov qword ptr [rcx+10h], FLAT32_JUMP - RendezvousFunnelProcStart\r
+ mov qword ptr [rcx+18h], RendezvousFunnelProcEnd - RendezvousFunnelProcStart\r
+ mov qword ptr [rcx+20h], LongModeStart - RendezvousFunnelProcStart\r
+ mov qword ptr [rcx+28h], LONG_JUMP - RendezvousFunnelProcStart\r
+ ret\r
+\r
+AsmGetAddressMap ENDP\r
+\r
+END\r
--- /dev/null
+/** @file\r
+Page Fault (#PF) handler for X64 processors\r
+\r
+Copyright (c) 2009 - 2015, Intel Corporation. All rights reserved.<BR>\r
+This program and the accompanying materials\r
+are licensed and made available under the terms and conditions of the BSD License\r
+which accompanies this distribution. The full text of the license may be found at\r
+http://opensource.org/licenses/bsd-license.php\r
+\r
+THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS,\r
+WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED.\r
+\r
+**/\r
+\r
+#include "PiSmmCpuDxeSmm.h"\r
+\r
+#define PAGE_TABLE_PAGES 8\r
+#define ACC_MAX_BIT BIT3\r
+LIST_ENTRY mPagePool = INITIALIZE_LIST_HEAD_VARIABLE (mPagePool);\r
+SPIN_LOCK mPFLock;\r
+BOOLEAN m1GPageTableSupport = FALSE;\r
+\r
+/**\r
+ Check if 1-GByte pages is supported by processor or not.\r
+\r
+ @retval TRUE 1-GByte pages is supported.\r
+ @retval FALSE 1-GByte pages is not supported.\r
+\r
+**/\r
+BOOLEAN\r
+Is1GPageSupport (\r
+ VOID\r
+ )\r
+{\r
+ UINT32 RegEax;\r
+ UINT32 RegEdx;\r
+\r
+ AsmCpuid (0x80000000, &RegEax, NULL, NULL, NULL);\r
+ if (RegEax >= 0x80000001) {\r
+ AsmCpuid (0x80000001, NULL, NULL, NULL, &RegEdx);\r
+ if ((RegEdx & BIT26) != 0) {\r
+ return TRUE;\r
+ }\r
+ }\r
+ return FALSE;\r
+}\r
+\r
+/**\r
+ Set sub-entries number in entry.\r
+\r
+ @param[in, out] Entry Pointer to entry\r
+ @param[in] SubEntryNum Sub-entries number based on 0:\r
+ 0 means there is 1 sub-entry under this entry\r
+ 0x1ff means there is 512 sub-entries under this entry\r
+\r
+**/\r
+VOID\r
+SetSubEntriesNum (\r
+ IN OUT UINT64 *Entry,\r
+ IN UINT64 SubEntryNum\r
+ )\r
+{\r
+ //\r
+ // Sub-entries number is saved in BIT52 to BIT60 (reserved field) in Entry\r
+ //\r
+ *Entry = BitFieldWrite64 (*Entry, 52, 60, SubEntryNum);\r
+}\r
+\r
+/**\r
+ Return sub-entries number in entry.\r
+\r
+ @param[in] Entry Pointer to entry\r
+\r
+ @return Sub-entries number based on 0:\r
+ 0 means there is 1 sub-entry under this entry\r
+ 0x1ff means there is 512 sub-entries under this entry\r
+**/\r
+UINT64\r
+GetSubEntriesNum (\r
+ IN UINT64 *Entry\r
+ )\r
+{\r
+ //\r
+ // Sub-entries number is saved in BIT52 to BIT60 (reserved field) in Entry\r
+ //\r
+ return BitFieldRead64 (*Entry, 52, 60);\r
+}\r
+\r
+/**\r
+ Create PageTable for SMM use.\r
+\r
+ @return The address of PML4 (to set CR3).\r
+\r
+**/\r
+UINT32\r
+SmmInitPageTable (\r
+ VOID\r
+ )\r
+{\r
+ EFI_PHYSICAL_ADDRESS Pages;\r
+ UINT64 *PTEntry;\r
+ LIST_ENTRY *FreePage;\r
+ UINTN Index;\r
+ UINTN PageFaultHandlerHookAddress;\r
+ IA32_IDT_GATE_DESCRIPTOR *IdtEntry;\r
+\r
+ //\r
+ // Initialize spin lock\r
+ //\r
+ InitializeSpinLock (&mPFLock);\r
+\r
+ m1GPageTableSupport = Is1GPageSupport ();\r
+ //\r
+ // Generate PAE page table for the first 4GB memory space\r
+ //\r
+ Pages = Gen4GPageTable (PAGE_TABLE_PAGES + 1);\r
+\r
+ //\r
+ // Set IA32_PG_PMNT bit to mask this entry\r
+ //\r
+ PTEntry = (UINT64*)(UINTN)Pages;\r
+ for (Index = 0; Index < 4; Index++) {\r
+ PTEntry[Index] |= IA32_PG_PMNT;\r
+ }\r
+\r
+ //\r
+ // Fill Page-Table-Level4 (PML4) entry\r
+ //\r
+ PTEntry = (UINT64*)(UINTN)(Pages - EFI_PAGES_TO_SIZE (PAGE_TABLE_PAGES + 1));\r
+ *PTEntry = Pages + IA32_PG_P;\r
+ ZeroMem (PTEntry + 1, EFI_PAGE_SIZE - sizeof (*PTEntry));\r
+ //\r
+ // Set sub-entries number\r
+ //\r
+ SetSubEntriesNum (PTEntry, 3);\r
+\r
+ //\r
+ // Add remaining pages to page pool\r
+ //\r
+ FreePage = (LIST_ENTRY*)(PTEntry + EFI_PAGE_SIZE / sizeof (*PTEntry));\r
+ while ((UINTN)FreePage < Pages) {\r
+ InsertTailList (&mPagePool, FreePage);\r
+ FreePage += EFI_PAGE_SIZE / sizeof (*FreePage);\r
+ }\r
+\r
+ if (FeaturePcdGet (PcdCpuSmmProfileEnable)) {\r
+ //\r
+ // Set own Page Fault entry instead of the default one, because SMM Profile\r
+ // feature depends on IRET instruction to do Single Step\r
+ //\r
+ PageFaultHandlerHookAddress = (UINTN)PageFaultIdtHandlerSmmProfile;\r
+ IdtEntry = (IA32_IDT_GATE_DESCRIPTOR *) gcSmiIdtr.Base;\r
+ IdtEntry += EXCEPT_IA32_PAGE_FAULT;\r
+ IdtEntry->Bits.OffsetLow = (UINT16)PageFaultHandlerHookAddress;\r
+ IdtEntry->Bits.Reserved_0 = 0;\r
+ IdtEntry->Bits.GateType = IA32_IDT_GATE_TYPE_INTERRUPT_32;\r
+ IdtEntry->Bits.OffsetHigh = (UINT16)(PageFaultHandlerHookAddress >> 16);\r
+ IdtEntry->Bits.OffsetUpper = (UINT32)(PageFaultHandlerHookAddress >> 32);\r
+ IdtEntry->Bits.Reserved_1 = 0;\r
+ } else {\r
+ //\r
+ // Register Smm Page Fault Handler\r
+ //\r
+ SmmRegisterExceptionHandler (&mSmmCpuService, EXCEPT_IA32_PAGE_FAULT, SmiPFHandler);\r
+ }\r
+\r
+ //\r
+ // Additional SMM IDT initialization for SMM stack guard\r
+ //\r
+ if (FeaturePcdGet (PcdCpuSmmStackGuard)) {\r
+ InitializeIDTSmmStackGuard ();\r
+ }\r
+\r
+ //\r
+ // Return the address of PML4 (to set CR3)\r
+ //\r
+ return (UINT32)(UINTN)PTEntry;\r
+}\r
+\r
+/**\r
+ Set access record in entry.\r
+\r
+ @param[in, out] Entry Pointer to entry\r
+ @param[in] Acc Access record value\r
+\r
+**/\r
+VOID\r
+SetAccNum (\r
+ IN OUT UINT64 *Entry,\r
+ IN UINT64 Acc\r
+ )\r
+{\r
+ //\r
+ // Access record is saved in BIT9 to BIT11 (reserved field) in Entry\r
+ //\r
+ *Entry = BitFieldWrite64 (*Entry, 9, 11, Acc);\r
+}\r
+\r
+/**\r
+ Return access record in entry.\r
+\r
+ @param[in] Entry Pointer to entry\r
+\r
+ @return Access record value.\r
+\r
+**/\r
+UINT64\r
+GetAccNum (\r
+ IN UINT64 *Entry\r
+ )\r
+{\r
+ //\r
+ // Access record is saved in BIT9 to BIT11 (reserved field) in Entry\r
+ //\r
+ return BitFieldRead64 (*Entry, 9, 11);\r
+}\r
+\r
+/**\r
+ Return and update the access record in entry.\r
+\r
+ @param[in, out] Entry Pointer to entry\r
+\r
+ @return Access record value.\r
+\r
+**/\r
+UINT64\r
+GetAndUpdateAccNum (\r
+ IN OUT UINT64 *Entry\r
+ )\r
+{\r
+ UINT64 Acc;\r
+\r
+ Acc = GetAccNum (Entry);\r
+ if ((*Entry & IA32_PG_A) != 0) {\r
+ //\r
+ // If this entry has been accessed, clear access flag in Entry and update access record\r
+ // to the initial value 7, adding ACC_MAX_BIT is to make it larger than others\r
+ //\r
+ *Entry &= ~(UINT64)(UINTN)IA32_PG_A;\r
+ SetAccNum (Entry, 0x7);\r
+ return (0x7 + ACC_MAX_BIT);\r
+ } else {\r
+ if (Acc != 0) {\r
+ //\r
+ // If the access record is not the smallest value 0, minus 1 and update the access record field\r
+ //\r
+ SetAccNum (Entry, Acc - 1);\r
+ }\r
+ }\r
+ return Acc;\r
+}\r
+\r
+/**\r
+ Reclaim free pages for PageFault handler.\r
+\r
+ Search the whole entries tree to find the leaf entry that has the smallest\r
+ access record value. Insert the page pointed by this leaf entry into the\r
+ page pool. And check its upper entries if need to be inserted into the page\r
+ pool or not.\r
+\r
+**/\r
+VOID\r
+ReclaimPages (\r
+ VOID\r
+ )\r
+{\r
+ UINT64 *Pml4;\r
+ UINT64 *Pdpt;\r
+ UINT64 *Pdt;\r
+ UINTN Pml4Index;\r
+ UINTN PdptIndex;\r
+ UINTN PdtIndex;\r
+ UINTN MinPml4;\r
+ UINTN MinPdpt;\r
+ UINTN MinPdt;\r
+ UINT64 MinAcc;\r
+ UINT64 Acc;\r
+ UINT64 SubEntriesNum;\r
+ BOOLEAN PML4EIgnore;\r
+ BOOLEAN PDPTEIgnore;\r
+ UINT64 *ReleasePageAddress;\r
+\r
+ Pml4 = NULL;\r
+ Pdpt = NULL;\r
+ Pdt = NULL;\r
+ MinAcc = (UINT64)-1;\r
+ MinPml4 = (UINTN)-1;\r
+ MinPdpt = (UINTN)-1;\r
+ MinPdt = (UINTN)-1;\r
+ Acc = 0;\r
+ ReleasePageAddress = 0;\r
+\r
+ //\r
+ // First, find the leaf entry has the smallest access record value\r
+ //\r
+ Pml4 = (UINT64*)(UINTN)(AsmReadCr3 () & gPhyMask);\r
+ for (Pml4Index = 0; Pml4Index < EFI_PAGE_SIZE / sizeof (*Pml4); Pml4Index++) {\r
+ if ((Pml4[Pml4Index] & IA32_PG_P) == 0 || (Pml4[Pml4Index] & IA32_PG_PMNT) != 0) {\r
+ //\r
+ // If the PML4 entry is not present or is masked, skip it\r
+ //\r
+ continue;\r
+ }\r
+ Pdpt = (UINT64*)(UINTN)(Pml4[Pml4Index] & gPhyMask);\r
+ PML4EIgnore = FALSE;\r
+ for (PdptIndex = 0; PdptIndex < EFI_PAGE_SIZE / sizeof (*Pdpt); PdptIndex++) {\r
+ if ((Pdpt[PdptIndex] & IA32_PG_P) == 0 || (Pdpt[PdptIndex] & IA32_PG_PMNT) != 0) {\r
+ //\r
+ // If the PDPT entry is not present or is masked, skip it\r
+ //\r
+ if ((Pdpt[PdptIndex] & IA32_PG_PMNT) != 0) {\r
+ //\r
+ // If the PDPT entry is masked, we will ignore checking the PML4 entry\r
+ //\r
+ PML4EIgnore = TRUE;\r
+ }\r
+ continue;\r
+ }\r
+ if ((Pdpt[PdptIndex] & IA32_PG_PS) == 0) {\r
+ //\r
+ // It's not 1-GByte pages entry, it should be a PDPT entry,\r
+ // we will not check PML4 entry more\r
+ //\r
+ PML4EIgnore = TRUE;\r
+ Pdt = (UINT64*)(UINTN)(Pdpt[PdptIndex] & gPhyMask);\r
+ PDPTEIgnore = FALSE;\r
+ for (PdtIndex = 0; PdtIndex < EFI_PAGE_SIZE / sizeof(*Pdt); PdtIndex++) {\r
+ if ((Pdt[PdtIndex] & IA32_PG_P) == 0 || (Pdt[PdtIndex] & IA32_PG_PMNT) != 0) {\r
+ //\r
+ // If the PD entry is not present or is masked, skip it\r
+ //\r
+ if ((Pdt[PdtIndex] & IA32_PG_PMNT) != 0) {\r
+ //\r
+ // If the PD entry is masked, we will not PDPT entry more\r
+ //\r
+ PDPTEIgnore = TRUE;\r
+ }\r
+ continue;\r
+ }\r
+ if ((Pdt[PdtIndex] & IA32_PG_PS) == 0) {\r
+ //\r
+ // It's not 2 MByte page table entry, it should be PD entry\r
+ // we will find the entry has the smallest access record value\r
+ //\r
+ PDPTEIgnore = TRUE;\r
+ Acc = GetAndUpdateAccNum (Pdt + PdtIndex);\r
+ if (Acc < MinAcc) {\r
+ //\r
+ // If the PD entry has the smallest access record value,\r
+ // save the Page address to be released\r
+ //\r
+ MinAcc = Acc;\r
+ MinPml4 = Pml4Index;\r
+ MinPdpt = PdptIndex;\r
+ MinPdt = PdtIndex;\r
+ ReleasePageAddress = Pdt + PdtIndex;\r
+ }\r
+ }\r
+ }\r
+ if (!PDPTEIgnore) {\r
+ //\r
+ // If this PDPT entry has no PDT entries pointer to 4 KByte pages,\r
+ // it should only has the entries point to 2 MByte Pages\r
+ //\r
+ Acc = GetAndUpdateAccNum (Pdpt + PdptIndex);\r
+ if (Acc < MinAcc) {\r
+ //\r
+ // If the PDPT entry has the smallest access record value,\r
+ // save the Page address to be released\r
+ //\r
+ MinAcc = Acc;\r
+ MinPml4 = Pml4Index;\r
+ MinPdpt = PdptIndex;\r
+ MinPdt = (UINTN)-1;\r
+ ReleasePageAddress = Pdpt + PdptIndex;\r
+ }\r
+ }\r
+ }\r
+ }\r
+ if (!PML4EIgnore) {\r
+ //\r
+ // If PML4 entry has no the PDPT entry pointer to 2 MByte pages,\r
+ // it should only has the entries point to 1 GByte Pages\r
+ //\r
+ Acc = GetAndUpdateAccNum (Pml4 + Pml4Index);\r
+ if (Acc < MinAcc) {\r
+ //\r
+ // If the PML4 entry has the smallest access record value,\r
+ // save the Page address to be released\r
+ //\r
+ MinAcc = Acc;\r
+ MinPml4 = Pml4Index;\r
+ MinPdpt = (UINTN)-1;\r
+ MinPdt = (UINTN)-1;\r
+ ReleasePageAddress = Pml4 + Pml4Index;\r
+ }\r
+ }\r
+ }\r
+ //\r
+ // Make sure one PML4/PDPT/PD entry is selected\r
+ //\r
+ ASSERT (MinAcc != (UINT64)-1);\r
+\r
+ //\r
+ // Secondly, insert the page pointed by this entry into page pool and clear this entry\r
+ //\r
+ InsertTailList (&mPagePool, (LIST_ENTRY*)(UINTN)(*ReleasePageAddress & gPhyMask));\r
+ *ReleasePageAddress = 0;\r
+\r
+ //\r
+ // Lastly, check this entry's upper entries if need to be inserted into page pool\r
+ // or not\r
+ //\r
+ while (TRUE) {\r
+ if (MinPdt != (UINTN)-1) {\r
+ //\r
+ // If 4 KByte Page Table is released, check the PDPT entry\r
+ //\r
+ Pdpt = (UINT64*)(UINTN)(Pml4[MinPml4] & gPhyMask);\r
+ SubEntriesNum = GetSubEntriesNum(Pdpt + MinPdpt);\r
+ if (SubEntriesNum == 0) {\r
+ //\r
+ // Release the empty Page Directory table if there was no more 4 KByte Page Table entry\r
+ // clear the Page directory entry\r
+ //\r
+ InsertTailList (&mPagePool, (LIST_ENTRY*)(UINTN)(Pdpt[MinPdpt] & gPhyMask));\r
+ Pdpt[MinPdpt] = 0;\r
+ //\r
+ // Go on checking the PML4 table\r
+ //\r
+ MinPdt = (UINTN)-1;\r
+ continue;\r
+ }\r
+ //\r
+ // Update the sub-entries filed in PDPT entry and exit\r
+ //\r
+ SetSubEntriesNum (Pdpt + MinPdpt, SubEntriesNum - 1);\r
+ break;\r
+ }\r
+ if (MinPdpt != (UINTN)-1) {\r
+ //\r
+ // One 2MB Page Table is released or Page Directory table is released, check the PML4 entry\r
+ //\r
+ SubEntriesNum = GetSubEntriesNum (Pml4 + MinPml4);\r
+ if (SubEntriesNum == 0) {\r
+ //\r
+ // Release the empty PML4 table if there was no more 1G KByte Page Table entry\r
+ // clear the Page directory entry\r
+ //\r
+ InsertTailList (&mPagePool, (LIST_ENTRY*)(UINTN)(Pml4[MinPml4] & gPhyMask));\r
+ Pml4[MinPml4] = 0;\r
+ MinPdpt = (UINTN)-1;\r
+ continue;\r
+ }\r
+ //\r
+ // Update the sub-entries filed in PML4 entry and exit\r
+ //\r
+ SetSubEntriesNum (Pml4 + MinPml4, SubEntriesNum - 1);\r
+ break;\r
+ }\r
+ //\r
+ // PLM4 table has been released before, exit it\r
+ //\r
+ break;\r
+ }\r
+}\r
+\r
+/**\r
+ Allocate free Page for PageFault handler use.\r
+\r
+ @return Page address.\r
+\r
+**/\r
+UINT64\r
+AllocPage (\r
+ VOID\r
+ )\r
+{\r
+ UINT64 RetVal;\r
+\r
+ if (IsListEmpty (&mPagePool)) {\r
+ //\r
+ // If page pool is empty, reclaim the used pages and insert one into page pool\r
+ //\r
+ ReclaimPages ();\r
+ }\r
+\r
+ //\r
+ // Get one free page and remove it from page pool\r
+ //\r
+ RetVal = (UINT64)(UINTN)mPagePool.ForwardLink;\r
+ RemoveEntryList (mPagePool.ForwardLink);\r
+ //\r
+ // Clean this page and return\r
+ //\r
+ ZeroMem ((VOID*)(UINTN)RetVal, EFI_PAGE_SIZE);\r
+ return RetVal;\r
+}\r
+\r
+/**\r
+ Page Fault handler for SMM use.\r
+\r
+**/\r
+VOID\r
+SmiDefaultPFHandler (\r
+ VOID\r
+ )\r
+{\r
+ UINT64 *PageTable;\r
+ UINT64 *Pml4;\r
+ UINT64 PFAddress;\r
+ UINTN StartBit;\r
+ UINTN EndBit;\r
+ UINT64 PTIndex;\r
+ UINTN Index;\r
+ SMM_PAGE_SIZE_TYPE PageSize;\r
+ UINTN NumOfPages;\r
+ UINTN PageAttribute;\r
+ EFI_STATUS Status;\r
+ UINT64 *UpperEntry;\r
+\r
+ //\r
+ // Set default SMM page attribute\r
+ //\r
+ PageSize = SmmPageSize2M;\r
+ NumOfPages = 1;\r
+ PageAttribute = 0;\r
+\r
+ EndBit = 0;\r
+ Pml4 = (UINT64*)(AsmReadCr3 () & gPhyMask);\r
+ PFAddress = AsmReadCr2 ();\r
+\r
+ Status = GetPlatformPageTableAttribute (PFAddress, &PageSize, &NumOfPages, &PageAttribute);\r
+ //\r
+ // If platform not support page table attribute, set default SMM page attribute\r
+ //\r
+ if (Status != EFI_SUCCESS) {\r
+ PageSize = SmmPageSize2M;\r
+ NumOfPages = 1;\r
+ PageAttribute = 0;\r
+ }\r
+ if (PageSize >= MaxSmmPageSizeType) {\r
+ PageSize = SmmPageSize2M;\r
+ }\r
+ if (NumOfPages > 512) {\r
+ NumOfPages = 512;\r
+ }\r
+\r
+ switch (PageSize) {\r
+ case SmmPageSize4K:\r
+ //\r
+ // BIT12 to BIT20 is Page Table index\r
+ //\r
+ EndBit = 12;\r
+ break;\r
+ case SmmPageSize2M:\r
+ //\r
+ // BIT21 to BIT29 is Page Directory index\r
+ //\r
+ EndBit = 21;\r
+ PageAttribute |= (UINTN)IA32_PG_PS;\r
+ break;\r
+ case SmmPageSize1G:\r
+ if (!m1GPageTableSupport) {\r
+ DEBUG ((EFI_D_ERROR, "1-GByte pages is not supported!"));\r
+ ASSERT (FALSE);\r
+ }\r
+ //\r
+ // BIT30 to BIT38 is Page Directory Pointer Table index\r
+ //\r
+ EndBit = 30;\r
+ PageAttribute |= (UINTN)IA32_PG_PS;\r
+ break;\r
+ default:\r
+ ASSERT (FALSE);\r
+ }\r
+\r
+ //\r
+ // If execute-disable is enabled, set NX bit\r
+ //\r
+ if (mXdEnabled) {\r
+ PageAttribute |= IA32_PG_NX;\r
+ }\r
+\r
+ for (Index = 0; Index < NumOfPages; Index++) {\r
+ PageTable = Pml4;\r
+ UpperEntry = NULL;\r
+ for (StartBit = 39; StartBit > EndBit; StartBit -= 9) {\r
+ PTIndex = BitFieldRead64 (PFAddress, StartBit, StartBit + 8);\r
+ if ((PageTable[PTIndex] & IA32_PG_P) == 0) {\r
+ //\r
+ // If the entry is not present, allocate one page from page pool for it\r
+ //\r
+ PageTable[PTIndex] = AllocPage () | IA32_PG_RW | IA32_PG_P;\r
+ } else {\r
+ //\r
+ // Save the upper entry address\r
+ //\r
+ UpperEntry = PageTable + PTIndex;\r
+ }\r
+ //\r
+ // BIT9 to BIT11 of entry is used to save access record,\r
+ // initialize value is 7\r
+ //\r
+ PageTable[PTIndex] |= (UINT64)IA32_PG_A;\r
+ SetAccNum (PageTable + PTIndex, 7);\r
+ PageTable = (UINT64*)(UINTN)(PageTable[PTIndex] & gPhyMask);\r
+ }\r
+\r
+ PTIndex = BitFieldRead64 (PFAddress, StartBit, StartBit + 8);\r
+ if ((PageTable[PTIndex] & IA32_PG_P) != 0) {\r
+ //\r
+ // Check if the entry has already existed, this issue may occur when the different\r
+ // size page entries created under the same entry\r
+ //\r
+ DEBUG ((EFI_D_ERROR, "PageTable = %lx, PTIndex = %x, PageTable[PTIndex] = %lx\n", PageTable, PTIndex, PageTable[PTIndex]));\r
+ DEBUG ((EFI_D_ERROR, "New page table overlapped with old page table!\n"));\r
+ ASSERT (FALSE);\r
+ }\r
+ //\r
+ // Fill the new entry\r
+ //\r
+ PageTable[PTIndex] = (PFAddress & gPhyMask & ~((1ull << EndBit) - 1)) |\r
+ PageAttribute | IA32_PG_A | IA32_PG_RW | IA32_PG_P;\r
+ if (UpperEntry != NULL) {\r
+ SetSubEntriesNum (UpperEntry, GetSubEntriesNum (UpperEntry) + 1);\r
+ }\r
+ //\r
+ // Get the next page address if we need to create more page tables\r
+ //\r
+ PFAddress += (1ull << EndBit);\r
+ }\r
+}\r
+\r
+/**\r
+ ThePage Fault handler wrapper for SMM use.\r
+\r
+ @param InterruptType Defines the type of interrupt or exception that\r
+ occurred on the processor.This parameter is processor architecture specific.\r
+ @param SystemContext A pointer to the processor context when\r
+ the interrupt occurred on the processor.\r
+**/\r
+VOID\r
+EFIAPI\r
+SmiPFHandler (\r
+ IN EFI_EXCEPTION_TYPE InterruptType,\r
+ IN EFI_SYSTEM_CONTEXT SystemContext\r
+ )\r
+{\r
+ UINTN PFAddress;\r
+\r
+ ASSERT (InterruptType == EXCEPT_IA32_PAGE_FAULT);\r
+\r
+ AcquireSpinLock (&mPFLock);\r
+\r
+ PFAddress = AsmReadCr2 ();\r
+\r
+ //\r
+ // If a page fault occurs in SMRAM range, it should be in a SMM stack guard page.\r
+ //\r
+ if ((FeaturePcdGet (PcdCpuSmmStackGuard)) &&\r
+ (PFAddress >= mCpuHotPlugData.SmrrBase) &&\r
+ (PFAddress < (mCpuHotPlugData.SmrrBase + mCpuHotPlugData.SmrrSize))) {\r
+ DEBUG ((EFI_D_ERROR, "SMM stack overflow!\n"));\r
+ CpuDeadLoop ();\r
+ }\r
+\r
+ //\r
+ // If a page fault occurs in SMM range\r
+ //\r
+ if ((PFAddress < mCpuHotPlugData.SmrrBase) ||\r
+ (PFAddress >= mCpuHotPlugData.SmrrBase + mCpuHotPlugData.SmrrSize)) {\r
+ if ((SystemContext.SystemContextX64->ExceptionData & IA32_PF_EC_ID) != 0) {\r
+ DEBUG ((EFI_D_ERROR, "Code executed on IP(0x%lx) out of SMM range after SMM is locked!\n", PFAddress));\r
+ DEBUG_CODE (\r
+ DumpModuleInfoByIp (*(UINTN *)(UINTN)SystemContext.SystemContextX64->Rsp);\r
+ );\r
+ CpuDeadLoop ();\r
+ }\r
+ }\r
+\r
+ if (FeaturePcdGet (PcdCpuSmmProfileEnable)) {\r
+ SmmProfilePFHandler (\r
+ SystemContext.SystemContextX64->Rip,\r
+ SystemContext.SystemContextX64->ExceptionData\r
+ );\r
+ } else {\r
+ SmiDefaultPFHandler ();\r
+ }\r
+\r
+ ReleaseSpinLock (&mPFLock);\r
+}\r
--- /dev/null
+/** @file\r
+Semaphore mechanism to indicate to the BSP that an AP has exited SMM\r
+after SMBASE relocation.\r
+\r
+Copyright (c) 2009 - 2015, Intel Corporation. All rights reserved.<BR>\r
+This program and the accompanying materials\r
+are licensed and made available under the terms and conditions of the BSD License\r
+which accompanies this distribution. The full text of the license may be found at\r
+http://opensource.org/licenses/bsd-license.php\r
+\r
+THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS,\r
+WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED.\r
+\r
+**/\r
+\r
+#include "PiSmmCpuDxeSmm.h"\r
+\r
+extern UINT32 mSmmRelocationOriginalAddressPtr32;\r
+extern UINT32 mRebasedFlagAddr32;\r
+\r
+UINTN mSmmRelocationOriginalAddress;\r
+volatile BOOLEAN *mRebasedFlag;\r
+\r
+/**\r
+AP Semaphore operation in 32-bit mode while BSP runs in 64-bit mode.\r
+**/\r
+VOID\r
+SmmRelocationSemaphoreComplete32 (\r
+ VOID\r
+ );\r
+\r
+/**\r
+ Hook return address of SMM Save State so that semaphore code\r
+ can be executed immediately after AP exits SMM to indicate to\r
+ the BSP that an AP has exited SMM after SMBASE relocation.\r
+\r
+ @param[in] CpuIndex The processor index.\r
+ @param[in] RebasedFlag A pointer to a flag that is set to TRUE\r
+ immediately after AP exits SMM.\r
+\r
+**/\r
+VOID\r
+SemaphoreHook (\r
+ IN UINTN CpuIndex,\r
+ IN volatile BOOLEAN *RebasedFlag\r
+ )\r
+{\r
+ SMRAM_SAVE_STATE_MAP *CpuState;\r
+ UINTN TempValue;\r
+\r
+ mRebasedFlag = RebasedFlag;\r
+ mRebasedFlagAddr32 = (UINT32)(UINTN)mRebasedFlag;\r
+\r
+ CpuState = (SMRAM_SAVE_STATE_MAP *)(UINTN)(SMM_DEFAULT_SMBASE + SMRAM_SAVE_STATE_MAP_OFFSET);\r
+ mSmmRelocationOriginalAddress = HookReturnFromSmm (\r
+ CpuIndex,\r
+ CpuState,\r
+ (UINT64)(UINTN)&SmmRelocationSemaphoreComplete32,\r
+ (UINT64)(UINTN)&SmmRelocationSemaphoreComplete\r
+ );\r
+\r
+ //\r
+ // Use temp value to fix ICC complier warning\r
+ //\r
+ TempValue = (UINTN)&mSmmRelocationOriginalAddress;\r
+ mSmmRelocationOriginalAddressPtr32 = (UINT32)TempValue;\r
+}\r
--- /dev/null
+#------------------------------------------------------------------------------\r
+#\r
+# Copyright (c) 2009 - 2015, Intel Corporation. All rights reserved.<BR>\r
+# This program and the accompanying materials\r
+# are licensed and made available under the terms and conditions of the BSD License\r
+# which accompanies this distribution. The full text of the license may be found at\r
+# http://opensource.org/licenses/bsd-license.php.\r
+#\r
+# THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS,\r
+# WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED.\r
+#\r
+# Module Name:\r
+#\r
+# SmiEntry.S\r
+#\r
+# Abstract:\r
+#\r
+# Code template of the SMI handler for a particular processor\r
+#\r
+#------------------------------------------------------------------------------\r
+\r
+ASM_GLOBAL ASM_PFX(gcSmiHandlerTemplate)\r
+ASM_GLOBAL ASM_PFX(gcSmiHandlerSize)\r
+ASM_GLOBAL ASM_PFX(gSmiCr3)\r
+ASM_GLOBAL ASM_PFX(gSmiStack)\r
+ASM_GLOBAL ASM_PFX(gSmbase)\r
+ASM_GLOBAL ASM_PFX(FeaturePcdGet (PcdCpuSmmDebug))\r
+ASM_GLOBAL ASM_PFX(gSmiHandlerIdtr)\r
+\r
+#\r
+# Constants relating to PROCESSOR_SMM_DESCRIPTOR\r
+#\r
+.equ DSC_OFFSET, 0xfb00\r
+.equ DSC_GDTPTR, 0x30\r
+.equ DSC_GDTSIZ, 0x38\r
+.equ DSC_CS, 14\r
+.equ DSC_DS, 16\r
+.equ DSC_SS, 18\r
+.equ DSC_OTHERSEG, 20\r
+#\r
+# Constants relating to CPU State Save Area\r
+#\r
+.equ SSM_DR6, 0xffd0\r
+.equ SSM_DR7, 0xffc8\r
+\r
+.equ PROTECT_MODE_CS, 0x08\r
+.equ PROTECT_MODE_DS, 0x20\r
+.equ LONG_MODE_CS, 0x38\r
+.equ TSS_SEGMENT, 0x40\r
+.equ GDT_SIZE, 0x50\r
+\r
+ .text\r
+\r
+ASM_PFX(gcSmiHandlerTemplate):\r
+\r
+_SmiEntryPoint:\r
+ #\r
+ # The encoding of BX in 16-bit addressing mode is the same as of RDI in 64-\r
+ # bit addressing mode. And that coincidence has been used in the following\r
+ # "64-bit like" 16-bit code. Be aware that once RDI is referenced as a\r
+ # base address register, it is actually BX that is referenced.\r
+ #\r
+ .byte 0xbb # mov bx, imm16\r
+ .word _GdtDesc - _SmiEntryPoint + 0x8000\r
+ #\r
+ # fix GDT descriptor\r
+ #\r
+ .byte 0x2e,0xa1 # mov ax, cs:[offset16]\r
+ .word DSC_OFFSET + DSC_GDTSIZ\r
+ .byte 0x48 # dec ax\r
+ .byte 0x2e\r
+ movl %eax, (%rdi) # mov cs:[bx], ax\r
+ .byte 0x66,0x2e,0xa1 # mov eax, cs:[offset16]\r
+ .word DSC_OFFSET + DSC_GDTPTR\r
+ .byte 0x2e\r
+ movw %ax, 2(%rdi)\r
+ .byte 0x66,0x2e\r
+ lgdt (%rdi)\r
+ #\r
+ # Patch ProtectedMode Segment\r
+ #\r
+ .byte 0xb8\r
+ .word PROTECT_MODE_CS\r
+ .byte 0x2e\r
+ movl %eax, -2(%rdi)\r
+ #\r
+ # Patch ProtectedMode entry\r
+ #\r
+ .byte 0x66, 0xbf # mov edi, SMBASE\r
+ASM_PFX(gSmbase): .space 4\r
+ lea ((ProtectedMode - _SmiEntryPoint) + 0x8000)(%edi), %ax\r
+ .byte 0x2e\r
+ movw %ax, -6(%rdi)\r
+ #\r
+ # Switch into ProtectedMode\r
+ #\r
+ movq %cr0, %rbx\r
+ .byte 0x66\r
+ andl $0x9ffafff3, %ebx\r
+ .byte 0x66\r
+ orl $0x00000023, %ebx\r
+\r
+ movq %rbx, %cr0\r
+ .byte 0x66, 0xea\r
+ .space 6\r
+\r
+_GdtDesc: .space 6\r
+\r
+ProtectedMode:\r
+ movw $PROTECT_MODE_DS, %ax\r
+ movl %eax, %ds\r
+ movl %eax, %es\r
+ movl %eax, %fs\r
+ movl %eax, %gs\r
+ movl %eax, %ss\r
+ .byte 0xbc # mov esp, imm32\r
+ASM_PFX(gSmiStack): .space 4\r
+ jmp ProtFlatMode\r
+\r
+ProtFlatMode:\r
+ .byte 0xb8\r
+ASM_PFX(gSmiCr3): .space 4\r
+ movq %rax, %cr3\r
+ movl $0x668,%eax # as cr4.PGE is not set here, refresh cr3\r
+ movq %rax, %cr4 # in PreModifyMtrrs() to flush TLB.\r
+# Load TSS\r
+ subl $8, %esp # reserve room in stack\r
+ sgdt (%rsp)\r
+ movl 2(%rsp), %eax # eax = GDT base\r
+ addl $8, %esp\r
+ movl %eax, %edx\r
+ addl $GDT_SIZE, %edx\r
+ movb %dl, (TSS_SEGMENT + 2)(%rax)\r
+ movb %dh, (TSS_SEGMENT + 3)(%rax)\r
+ .byte 0xc1, 0xea, 0x10 # shr edx, 16\r
+ movb %dl, (TSS_SEGMENT + 4)(%rax)\r
+ movb %dh, (TSS_SEGMENT + 7)(%rax)\r
+ movl %eax, %edx\r
+ movb $0x89, %dl\r
+ movb %dl, (TSS_SEGMENT + 5)(%rax) # clear busy flag\r
+ movl $TSS_SEGMENT, %eax\r
+ ltr %ax\r
+\r
+ #\r
+ # Switch to LongMode\r
+ #\r
+ pushq $LONG_MODE_CS # push cs hardcore here\r
+ call Base # push return address for retf later\r
+Base:\r
+ addl $(LongMode - Base), (%rsp) # offset for far retf, seg is the 1st arg\r
+ movl $0xc0000080, %ecx\r
+ rdmsr\r
+ orb $1,%ah\r
+ wrmsr\r
+ movq %cr0, %rbx\r
+ btsl $31, %ebx\r
+ movq %rbx, %cr0\r
+ retf\r
+LongMode: # long mode (64-bit code) starts here\r
+ movabsq $ASM_PFX(gSmiHandlerIdtr), %rax\r
+ lidt (%rax)\r
+ lea (DSC_OFFSET)(%rdi), %ebx\r
+ movw DSC_DS(%rbx), %ax\r
+ movl %eax,%ds\r
+ movw DSC_OTHERSEG(%rbx), %ax\r
+ movl %eax,%es\r
+ movl %eax,%fs\r
+ movl %eax,%gs\r
+ movw DSC_SS(%rbx), %ax\r
+ movl %eax,%ss\r
+# jmp _SmiHandler ; instruction is not needed\r
+\r
+_SmiHandler:\r
+ movabsq $ASM_PFX(FeaturePcdGet (PcdCpuSmmDebug)), %rax\r
+ cmpb $0, (%rax)\r
+ jz L1\r
+\r
+ .byte 0x48, 0x8b, 0x0d # mov rcx, [rip + disp32]\r
+ .long SSM_DR6 - (. + 4 - _SmiEntryPoint + 0x8000)\r
+ .byte 0x48, 0x8b, 0x15 # mov rdx, [rip + disp32]\r
+ .long SSM_DR7 - (. + 4 - _SmiEntryPoint + 0x8000)\r
+ movq %rcx, %dr6\r
+ movq %rdx, %dr7\r
+L1:\r
+\r
+ movabsq $ASM_PFX(SmiRendezvous), %rax\r
+ movq (%rsp), %rcx\r
+ # Save FP registers\r
+\r
+ subq $0x208, %rsp\r
+ .byte 0x48 # FXSAVE64\r
+ fxsave (%rsp)\r
+\r
+ addq $-0x20, %rsp\r
+ call *%rax\r
+ addq $0x20, %rsp\r
+\r
+ #\r
+ # Restore FP registers\r
+ #\r
+ .byte 0x48 # FXRSTOR64\r
+ fxrstor (%rsp)\r
+\r
+ movabsq $ASM_PFX(FeaturePcdGet (PcdCpuSmmDebug)), %rax\r
+ cmpb $0, (%rax)\r
+ jz L2\r
+\r
+ movq %dr7, %rdx\r
+ movq %dr6, %rcx\r
+ .byte 0x48, 0x89, 0x15 # mov [rip + disp32], rdx\r
+ .long SSM_DR7 - (. + 4 - _SmiEntryPoint + 0x8000)\r
+ .byte 0x48, 0x89, 0x0d # mov [rip + disp32], rcx\r
+ .long SSM_DR6 - (. + 4 - _SmiEntryPoint + 0x8000)\r
+L2:\r
+ rsm\r
+\r
+ASM_PFX(gcSmiHandlerSize): .word . - _SmiEntryPoint\r
--- /dev/null
+;------------------------------------------------------------------------------ ;\r
+; Copyright (c) 2009 - 2015, Intel Corporation. All rights reserved.<BR>\r
+; This program and the accompanying materials\r
+; are licensed and made available under the terms and conditions of the BSD License\r
+; which accompanies this distribution. The full text of the license may be found at\r
+; http://opensource.org/licenses/bsd-license.php.\r
+;\r
+; THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS,\r
+; WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED.\r
+;\r
+; Module Name:\r
+;\r
+; SmiEntry.asm\r
+;\r
+; Abstract:\r
+;\r
+; Code template of the SMI handler for a particular processor\r
+;\r
+;-------------------------------------------------------------------------------\r
+\r
+;\r
+; Variables referenced by C code\r
+;\r
+EXTERNDEF SmiRendezvous:PROC\r
+EXTERNDEF gcSmiHandlerTemplate:BYTE\r
+EXTERNDEF gcSmiHandlerSize:WORD\r
+EXTERNDEF gSmiCr3:DWORD\r
+EXTERNDEF gSmiStack:DWORD\r
+EXTERNDEF gSmbase:DWORD\r
+EXTERNDEF FeaturePcdGet (PcdCpuSmmDebug):BYTE\r
+EXTERNDEF gSmiHandlerIdtr:FWORD\r
+\r
+\r
+;\r
+; Constants relating to PROCESSOR_SMM_DESCRIPTOR\r
+;\r
+DSC_OFFSET EQU 0fb00h\r
+DSC_GDTPTR EQU 30h\r
+DSC_GDTSIZ EQU 38h\r
+DSC_CS EQU 14\r
+DSC_DS EQU 16\r
+DSC_SS EQU 18\r
+DSC_OTHERSEG EQU 20\r
+;\r
+; Constants relating to CPU State Save Area\r
+;\r
+SSM_DR6 EQU 0ffd0h\r
+SSM_DR7 EQU 0ffc8h\r
+\r
+PROTECT_MODE_CS EQU 08h\r
+PROTECT_MODE_DS EQU 20h\r
+LONG_MODE_CS EQU 38h\r
+TSS_SEGMENT EQU 40h\r
+GDT_SIZE EQU 50h\r
+\r
+ .code\r
+\r
+gcSmiHandlerTemplate LABEL BYTE\r
+\r
+_SmiEntryPoint:\r
+ ;\r
+ ; The encoding of BX in 16-bit addressing mode is the same as of RDI in 64-\r
+ ; bit addressing mode. And that coincidence has been used in the following\r
+ ; "64-bit like" 16-bit code. Be aware that once RDI is referenced as a\r
+ ; base address register, it is actually BX that is referenced.\r
+ ;\r
+ DB 0bbh ; mov bx, imm16\r
+ DW offset _GdtDesc - _SmiEntryPoint + 8000h ; bx = GdtDesc offset\r
+; fix GDT descriptor\r
+ DB 2eh, 0a1h ; mov ax, cs:[offset16]\r
+ DW DSC_OFFSET + DSC_GDTSIZ\r
+ DB 48h ; dec ax\r
+ DB 2eh\r
+ mov [rdi], eax ; mov cs:[bx], ax\r
+ DB 66h, 2eh, 0a1h ; mov eax, cs:[offset16]\r
+ DW DSC_OFFSET + DSC_GDTPTR\r
+ DB 2eh\r
+ mov [rdi + 2], ax ; mov cs:[bx + 2], eax\r
+ DB 66h, 2eh\r
+ lgdt fword ptr [rdi] ; lgdt fword ptr cs:[bx]\r
+; Patch ProtectedMode Segment\r
+ DB 0b8h ; mov ax, imm16\r
+ DW PROTECT_MODE_CS ; set AX for segment directly\r
+ DB 2eh\r
+ mov [rdi - 2], eax ; mov cs:[bx - 2], ax\r
+; Patch ProtectedMode entry\r
+ DB 66h, 0bfh ; mov edi, SMBASE\r
+gSmbase DD ?\r
+ lea ax, [edi + (@ProtectedMode - _SmiEntryPoint) + 8000h]\r
+ DB 2eh\r
+ mov [rdi - 6], ax ; mov cs:[bx - 6], eax\r
+; Switch into @ProtectedMode\r
+ mov rbx, cr0\r
+ DB 66h\r
+ and ebx, 9ffafff3h\r
+ DB 66h\r
+ or ebx, 00000023h\r
+\r
+ mov cr0, rbx\r
+ DB 66h, 0eah\r
+ DD ?\r
+ DW ?\r
+\r
+_GdtDesc FWORD ?\r
+@ProtectedMode:\r
+ mov ax, PROTECT_MODE_DS\r
+ mov ds, ax\r
+ mov es, ax\r
+ mov fs, ax\r
+ mov gs, ax\r
+ mov ss, ax\r
+ DB 0bch ; mov esp, imm32\r
+gSmiStack DD ?\r
+ jmp ProtFlatMode\r
+\r
+ProtFlatMode:\r
+ DB 0b8h ; mov eax, offset gSmiCr3\r
+gSmiCr3 DD ?\r
+ mov cr3, rax\r
+ mov eax, 668h ; as cr4.PGE is not set here, refresh cr3\r
+ mov cr4, rax ; in PreModifyMtrrs() to flush TLB.\r
+; Load TSS\r
+ sub esp, 8 ; reserve room in stack\r
+ sgdt fword ptr [rsp]\r
+ mov eax, [rsp + 2] ; eax = GDT base\r
+ add esp, 8\r
+ mov edx, eax\r
+ add edx, GDT_SIZE\r
+ mov [rax + TSS_SEGMENT + 2], dl\r
+ mov [rax + TSS_SEGMENT + 3], dh\r
+ DB 0c1h, 0eah, 10h ; shr edx, 16\r
+ mov [rax + TSS_SEGMENT + 4], dl\r
+ mov [rax + TSS_SEGMENT + 7], dh\r
+ mov edx, eax\r
+ mov dl, 89h\r
+ mov [rax + TSS_SEGMENT + 5], dl ; clear busy flag\r
+ mov eax, TSS_SEGMENT\r
+ ltr ax\r
+\r
+; Switch into @LongMode\r
+ push LONG_MODE_CS ; push cs hardcore here\r
+ call Base ; push return address for retf later\r
+Base:\r
+ add dword ptr [rsp], @LongMode - Base; offset for far retf, seg is the 1st arg\r
+ mov ecx, 0c0000080h\r
+ rdmsr\r
+ or ah, 1\r
+ wrmsr\r
+ mov rbx, cr0\r
+ bts ebx, 31\r
+ mov cr0, rbx\r
+ retf\r
+@LongMode: ; long mode (64-bit code) starts here\r
+ mov rax, offset gSmiHandlerIdtr\r
+ lidt fword ptr [rax]\r
+ lea ebx, [rdi + DSC_OFFSET]\r
+ mov ax, [rbx + DSC_DS]\r
+ mov ds, eax\r
+ mov ax, [rbx + DSC_OTHERSEG]\r
+ mov es, eax\r
+ mov fs, eax\r
+ mov gs, eax\r
+ mov ax, [rbx + DSC_SS]\r
+ mov ss, eax\r
+; jmp _SmiHandler ; instruction is not needed\r
+\r
+_SmiHandler:\r
+;\r
+; The following lines restore DR6 & DR7 before running C code. They are useful\r
+; when you want to enable hardware breakpoints in SMM.\r
+;\r
+; NOTE: These lines might not be appreciated in runtime since they might\r
+; conflict with OS debugging facilities. Turn them off in RELEASE.\r
+;\r
+ mov rax, offset FeaturePcdGet (PcdCpuSmmDebug) ;Get absolute address. Avoid RIP relative addressing\r
+ cmp byte ptr [rax], 0\r
+ jz @1\r
+\r
+ DB 48h, 8bh, 0dh ; mov rcx, [rip + disp32]\r
+ DD SSM_DR6 - ($ + 4 - _SmiEntryPoint + 8000h)\r
+ DB 48h, 8bh, 15h ; mov rdx, [rip + disp32]\r
+ DD SSM_DR7 - ($ + 4 - _SmiEntryPoint + 8000h)\r
+ mov dr6, rcx\r
+ mov dr7, rdx\r
+@1:\r
+ mov rcx, [rsp] ; rcx <- CpuIndex\r
+ mov rax, SmiRendezvous ; rax <- absolute addr of SmiRedezvous\r
+\r
+ ;\r
+ ; Save FP registers\r
+ ;\r
+ sub rsp, 208h\r
+ DB 48h ; FXSAVE64\r
+ fxsave [rsp]\r
+\r
+ add rsp, -20h\r
+ call rax\r
+ add rsp, 20h\r
+\r
+ ;\r
+ ; Restore FP registers\r
+ ;\r
+ DB 48h ; FXRSTOR64\r
+ fxrstor [rsp]\r
+\r
+ mov rax, offset FeaturePcdGet (PcdCpuSmmDebug) ;Get absolute address. Avoid RIP relative addressing\r
+ cmp byte ptr [rax], 0\r
+ jz @2\r
+\r
+ mov rdx, dr7\r
+ mov rcx, dr6\r
+ DB 48h, 89h, 15h ; mov [rip + disp32], rdx\r
+ DD SSM_DR7 - ($ + 4 - _SmiEntryPoint + 8000h)\r
+ DB 48h, 89h, 0dh ; mov [rip + disp32], rcx\r
+ DD SSM_DR6 - ($ + 4 - _SmiEntryPoint + 8000h)\r
+@2:\r
+ rsm\r
+\r
+gcSmiHandlerSize DW $ - _SmiEntryPoint\r
+\r
+ END\r
--- /dev/null
+#------------------------------------------------------------------------------\r
+#\r
+# Copyright (c) 2009 - 2015, Intel Corporation. All rights reserved.<BR>\r
+# This program and the accompanying materials\r
+# are licensed and made available under the terms and conditions of the BSD License\r
+# which accompanies this distribution. The full text of the license may be found at\r
+# http://opensource.org/licenses/bsd-license.php.\r
+#\r
+# THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS,\r
+# WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED.\r
+#\r
+# Module Name:\r
+#\r
+# SmiException.S\r
+#\r
+# Abstract:\r
+#\r
+# Exception handlers used in SM mode\r
+#\r
+#------------------------------------------------------------------------------\r
+\r
+ASM_GLOBAL ASM_PFX(SmiPFHandler)\r
+ASM_GLOBAL ASM_PFX(gSmiMtrrs)\r
+ASM_GLOBAL ASM_PFX(gcSmiIdtr)\r
+ASM_GLOBAL ASM_PFX(gcSmiGdtr)\r
+ASM_GLOBAL ASM_PFX(gcPsd)\r
+\r
+ .data\r
+\r
+NullSeg: .quad 0 # reserved by architecture\r
+CodeSeg32:\r
+ .word -1 # LimitLow\r
+ .word 0 # BaseLow\r
+ .byte 0 # BaseMid\r
+ .byte 0x9b\r
+ .byte 0xcf # LimitHigh\r
+ .byte 0 # BaseHigh\r
+ProtModeCodeSeg32:\r
+ .word -1 # LimitLow\r
+ .word 0 # BaseLow\r
+ .byte 0 # BaseMid\r
+ .byte 0x9b\r
+ .byte 0xcf # LimitHigh\r
+ .byte 0 # BaseHigh\r
+ProtModeSsSeg32:\r
+ .word -1 # LimitLow\r
+ .word 0 # BaseLow\r
+ .byte 0 # BaseMid\r
+ .byte 0x93\r
+ .byte 0xcf # LimitHigh\r
+ .byte 0 # BaseHigh\r
+DataSeg32:\r
+ .word -1 # LimitLow\r
+ .word 0 # BaseLow\r
+ .byte 0 # BaseMid\r
+ .byte 0x93\r
+ .byte 0xcf # LimitHigh\r
+ .byte 0 # BaseHigh\r
+CodeSeg16:\r
+ .word -1\r
+ .word 0\r
+ .byte 0\r
+ .byte 0x9b\r
+ .byte 0x8f\r
+ .byte 0\r
+DataSeg16:\r
+ .word -1\r
+ .word 0\r
+ .byte 0\r
+ .byte 0x93\r
+ .byte 0x8f\r
+ .byte 0\r
+CodeSeg64:\r
+ .word -1 # LimitLow\r
+ .word 0 # BaseLow\r
+ .byte 0 # BaseMid\r
+ .byte 0x9b\r
+ .byte 0xaf # LimitHigh\r
+ .byte 0 # BaseHigh\r
+# TSS Segment for X64 specially\r
+TssSeg:\r
+ .word TSS_DESC_SIZE # LimitLow\r
+ .word 0 # BaseLow\r
+ .byte 0 # BaseMid\r
+ .byte 0x89\r
+ .byte 0xDB # LimitHigh\r
+ .byte 0 # BaseHigh\r
+ .long 0 # BaseUpper\r
+ .long 0 # Reserved\r
+.equ GDT_SIZE, .- NullSeg\r
+\r
+TssDescriptor:\r
+ .space 104, 0\r
+.equ TSS_DESC_SIZE, .- TssDescriptor\r
+\r
+#\r
+# This structure serves as a template for all processors.\r
+#\r
+ASM_PFX(gcPsd):\r
+ .ascii "PSDSIG "\r
+ .word PSD_SIZE\r
+ .word 2\r
+ .word 1 << 2\r
+ .word CODE_SEL\r
+ .word DATA_SEL\r
+ .word DATA_SEL\r
+ .word DATA_SEL\r
+ .word 0\r
+ .quad 0\r
+ .quad 0\r
+ .quad 0 # fixed in InitializeMpServiceData()\r
+ .quad NullSeg\r
+ .long GDT_SIZE\r
+ .long 0\r
+ .space 24, 0\r
+ .quad ASM_PFX(gSmiMtrrs)\r
+.equ PSD_SIZE, . - ASM_PFX(gcPsd)\r
+\r
+#\r
+# CODE & DATA segments for SMM runtime\r
+#\r
+.equ CODE_SEL, CodeSeg64 - NullSeg\r
+.equ DATA_SEL, DataSeg32 - NullSeg\r
+.equ CODE32_SEL, CodeSeg32 - NullSeg\r
+\r
+ASM_PFX(gcSmiGdtr):\r
+ .word GDT_SIZE - 1\r
+ .quad NullSeg\r
+\r
+ASM_PFX(gcSmiIdtr):\r
+ .word IDT_SIZE - 1\r
+ .quad _SmiIDT\r
+\r
+\r
+#\r
+# Here is the IDT. There are 32 (not 255) entries in it since only processor\r
+# generated exceptions will be handled.\r
+#\r
+_SmiIDT:\r
+# The following segment repeats 32 times:\r
+# No. 1\r
+ .word 0 # Offset 0:15\r
+ .word CODE_SEL\r
+ .byte 0 # Unused\r
+ .byte 0x8e # Interrupt Gate, Present\r
+ .word 0 # Offset 16:31\r
+ .quad 0 # Offset 32:63\r
+# No. 2\r
+ .word 0 # Offset 0:15\r
+ .word CODE_SEL\r
+ .byte 0 # Unused\r
+ .byte 0x8e # Interrupt Gate, Present\r
+ .word 0 # Offset 16:31\r
+ .quad 0 # Offset 32:63\r
+# No. 3\r
+ .word 0 # Offset 0:15\r
+ .word CODE_SEL\r
+ .byte 0 # Unused\r
+ .byte 0x8e # Interrupt Gate, Present\r
+ .word 0 # Offset 16:31\r
+ .quad 0 # Offset 32:63\r
+# No. 4\r
+ .word 0 # Offset 0:15\r
+ .word CODE_SEL\r
+ .byte 0 # Unused\r
+ .byte 0x8e # Interrupt Gate, Present\r
+ .word 0 # Offset 16:31\r
+ .quad 0 # Offset 32:63\r
+# No. 5\r
+ .word 0 # Offset 0:15\r
+ .word CODE_SEL\r
+ .byte 0 # Unused\r
+ .byte 0x8e # Interrupt Gate, Present\r
+ .word 0 # Offset 16:31\r
+ .quad 0 # Offset 32:63\r
+# No. 6\r
+ .word 0 # Offset 0:15\r
+ .word CODE_SEL\r
+ .byte 0 # Unused\r
+ .byte 0x8e # Interrupt Gate, Present\r
+ .word 0 # Offset 16:31\r
+ .quad 0 # Offset 32:63\r
+# No. 7\r
+ .word 0 # Offset 0:15\r
+ .word CODE_SEL\r
+ .byte 0 # Unused\r
+ .byte 0x8e # Interrupt Gate, Present\r
+ .word 0 # Offset 16:31\r
+ .quad 0 # Offset 32:63\r
+# No. 8\r
+ .word 0 # Offset 0:15\r
+ .word CODE_SEL\r
+ .byte 0 # Unused\r
+ .byte 0x8e # Interrupt Gate, Present\r
+ .word 0 # Offset 16:31\r
+ .quad 0 # Offset 32:63\r
+# No. 9\r
+ .word 0 # Offset 0:15\r
+ .word CODE_SEL\r
+ .byte 0 # Unused\r
+ .byte 0x8e # Interrupt Gate, Present\r
+ .word 0 # Offset 16:31\r
+ .quad 0 # Offset 32:63\r
+# No. 10\r
+ .word 0 # Offset 0:15\r
+ .word CODE_SEL\r
+ .byte 0 # Unused\r
+ .byte 0x8e # Interrupt Gate, Present\r
+ .word 0 # Offset 16:31\r
+ .quad 0 # Offset 32:63\r
+# No. 11\r
+ .word 0 # Offset 0:15\r
+ .word CODE_SEL\r
+ .byte 0 # Unused\r
+ .byte 0x8e # Interrupt Gate, Present\r
+ .word 0 # Offset 16:31\r
+ .quad 0 # Offset 32:63\r
+# No. 12\r
+ .word 0 # Offset 0:15\r
+ .word CODE_SEL\r
+ .byte 0 # Unused\r
+ .byte 0x8e # Interrupt Gate, Present\r
+ .word 0 # Offset 16:31\r
+ .quad 0 # Offset 32:63\r
+# No. 13\r
+ .word 0 # Offset 0:15\r
+ .word CODE_SEL\r
+ .byte 0 # Unused\r
+ .byte 0x8e # Interrupt Gate, Present\r
+ .word 0 # Offset 16:31\r
+ .quad 0 # Offset 32:63\r
+# No. 14\r
+ .word 0 # Offset 0:15\r
+ .word CODE_SEL\r
+ .byte 0 # Unused\r
+ .byte 0x8e # Interrupt Gate, Present\r
+ .word 0 # Offset 16:31\r
+ .quad 0 # Offset 32:63\r
+# No. 15\r
+ .word 0 # Offset 0:15\r
+ .word CODE_SEL\r
+ .byte 0 # Unused\r
+ .byte 0x8e # Interrupt Gate, Present\r
+ .word 0 # Offset 16:31\r
+ .quad 0 # Offset 32:63\r
+# No. 16\r
+ .word 0 # Offset 0:15\r
+ .word CODE_SEL\r
+ .byte 0 # Unused\r
+ .byte 0x8e # Interrupt Gate, Present\r
+ .word 0 # Offset 16:31\r
+ .quad 0 # Offset 32:63\r
+# No. 17\r
+ .word 0 # Offset 0:15\r
+ .word CODE_SEL\r
+ .byte 0 # Unused\r
+ .byte 0x8e # Interrupt Gate, Present\r
+ .word 0 # Offset 16:31\r
+ .quad 0 # Offset 32:63\r
+# No. 18\r
+ .word 0 # Offset 0:15\r
+ .word CODE_SEL\r
+ .byte 0 # Unused\r
+ .byte 0x8e # Interrupt Gate, Present\r
+ .word 0 # Offset 16:31\r
+ .quad 0 # Offset 32:63\r
+# No. 19\r
+ .word 0 # Offset 0:15\r
+ .word CODE_SEL\r
+ .byte 0 # Unused\r
+ .byte 0x8e # Interrupt Gate, Present\r
+ .word 0 # Offset 16:31\r
+ .quad 0 # Offset 32:63\r
+# No. 20\r
+ .word 0 # Offset 0:15\r
+ .word CODE_SEL\r
+ .byte 0 # Unused\r
+ .byte 0x8e # Interrupt Gate, Present\r
+ .word 0 # Offset 16:31\r
+ .quad 0 # Offset 32:63\r
+# No. 21\r
+ .word 0 # Offset 0:15\r
+ .word CODE_SEL\r
+ .byte 0 # Unused\r
+ .byte 0x8e # Interrupt Gate, Present\r
+ .word 0 # Offset 16:31\r
+ .quad 0 # Offset 32:63\r
+# No. 22\r
+ .word 0 # Offset 0:15\r
+ .word CODE_SEL\r
+ .byte 0 # Unused\r
+ .byte 0x8e # Interrupt Gate, Present\r
+ .word 0 # Offset 16:31\r
+ .quad 0 # Offset 32:63\r
+# No. 23\r
+ .word 0 # Offset 0:15\r
+ .word CODE_SEL\r
+ .byte 0 # Unused\r
+ .byte 0x8e # Interrupt Gate, Present\r
+ .word 0 # Offset 16:31\r
+ .quad 0 # Offset 32:63\r
+# No. 24\r
+ .word 0 # Offset 0:15\r
+ .word CODE_SEL\r
+ .byte 0 # Unused\r
+ .byte 0x8e # Interrupt Gate, Present\r
+ .word 0 # Offset 16:31\r
+ .quad 0 # Offset 32:63\r
+# No. 25\r
+ .word 0 # Offset 0:15\r
+ .word CODE_SEL\r
+ .byte 0 # Unused\r
+ .byte 0x8e # Interrupt Gate, Present\r
+ .word 0 # Offset 16:31\r
+ .quad 0 # Offset 32:63\r
+# No. 26\r
+ .word 0 # Offset 0:15\r
+ .word CODE_SEL\r
+ .byte 0 # Unused\r
+ .byte 0x8e # Interrupt Gate, Present\r
+ .word 0 # Offset 16:31\r
+ .quad 0 # Offset 32:63\r
+# No. 27\r
+ .word 0 # Offset 0:15\r
+ .word CODE_SEL\r
+ .byte 0 # Unused\r
+ .byte 0x8e # Interrupt Gate, Present\r
+ .word 0 # Offset 16:31\r
+ .quad 0 # Offset 32:63\r
+# No. 28\r
+ .word 0 # Offset 0:15\r
+ .word CODE_SEL\r
+ .byte 0 # Unused\r
+ .byte 0x8e # Interrupt Gate, Present\r
+ .word 0 # Offset 16:31\r
+ .quad 0 # Offset 32:63\r
+# No. 29\r
+ .word 0 # Offset 0:15\r
+ .word CODE_SEL\r
+ .byte 0 # Unused\r
+ .byte 0x8e # Interrupt Gate, Present\r
+ .word 0 # Offset 16:31\r
+ .quad 0 # Offset 32:63\r
+# No. 30\r
+ .word 0 # Offset 0:15\r
+ .word CODE_SEL\r
+ .byte 0 # Unused\r
+ .byte 0x8e # Interrupt Gate, Present\r
+ .word 0 # Offset 16:31\r
+ .quad 0 # Offset 32:63\r
+# No. 31\r
+ .word 0 # Offset 0:15\r
+ .word CODE_SEL\r
+ .byte 0 # Unused\r
+ .byte 0x8e # Interrupt Gate, Present\r
+ .word 0 # Offset 16:31\r
+ .quad 0 # Offset 32:63\r
+# No. 32\r
+ .word 0 # Offset 0:15\r
+ .word CODE_SEL\r
+ .byte 0 # Unused\r
+ .byte 0x8e # Interrupt Gate, Present\r
+ .word 0 # Offset 16:31\r
+ .quad 0 # Offset 32:63\r
+\r
+_SmiIDTEnd:\r
+\r
+.equ IDT_SIZE, (_SmiIDTEnd - _SmiIDT)\r
+\r
+ .text\r
+\r
+#------------------------------------------------------------------------------\r
+# _SmiExceptionEntryPoints is the collection of exception entry points followed\r
+# by a common exception handler.\r
+#\r
+# Stack frame would be as follows as specified in IA32 manuals:\r
+# +---------------------+ <-- 16-byte aligned ensured by processor\r
+# + Old SS +\r
+# +---------------------+\r
+# + Old RSP +\r
+# +---------------------+\r
+# + RFlags +\r
+# +---------------------+\r
+# + CS +\r
+# +---------------------+\r
+# + RIP +\r
+# +---------------------+\r
+# + Error Code +\r
+# +---------------------+\r
+# + Vector Number +\r
+# +---------------------+\r
+# + RBP +\r
+# +---------------------+ <-- RBP, 16-byte aligned\r
+#\r
+# RSP set to odd multiple of 8 at @CommonEntryPoint means ErrCode PRESENT\r
+#------------------------------------------------------------------------------\r
+ASM_GLOBAL ASM_PFX(PageFaultIdtHandlerSmmProfile)\r
+ASM_PFX(PageFaultIdtHandlerSmmProfile):\r
+ pushq $0x0e # Page Fault\r
+ .byte 0x40, 0xf6, 0xc4, 0x08 #test spl, 8\r
+ jnz L1\r
+ pushq (%rsp)\r
+ movq $0, 8(%rsp)\r
+L1:\r
+ pushq %rbp\r
+ movq %rsp, %rbp\r
+\r
+ #\r
+ # Since here the stack pointer is 16-byte aligned, so\r
+ # EFI_FX_SAVE_STATE_X64 of EFI_SYSTEM_CONTEXT_x64\r
+ # is 16-byte aligned\r
+ #\r
+\r
+## UINT64 Rdi, Rsi, Rbp, Rsp, Rbx, Rdx, Rcx, Rax;\r
+## UINT64 R8, R9, R10, R11, R12, R13, R14, R15;\r
+ pushq %r15\r
+ pushq %r14\r
+ pushq %r13\r
+ pushq %r12\r
+ pushq %r11\r
+ pushq %r10\r
+ pushq %r9\r
+ pushq %r8\r
+ pushq %rax\r
+ pushq %rcx\r
+ pushq %rdx\r
+ pushq %rbx\r
+ pushq 48(%rbp) # RSP\r
+ pushq (%rbp) # RBP\r
+ pushq %rsi\r
+ pushq %rdi\r
+\r
+## UINT64 Gs, Fs, Es, Ds, Cs, Ss; insure high 16 bits of each is zero\r
+ movzwq 56(%rbp), %rax\r
+ pushq %rax # for ss\r
+ movzwq 32(%rbp), %rax\r
+ pushq %rax # for cs\r
+ movq %ds, %rax\r
+ pushq %rax\r
+ movq %es, %rax\r
+ pushq %rax\r
+ movq %fs, %rax\r
+ pushq %rax\r
+ movq %gs, %rax\r
+ pushq %rax\r
+\r
+## UINT64 Rip;\r
+ pushq 24(%rbp)\r
+\r
+## UINT64 Gdtr[2], Idtr[2];\r
+ subq $16, %rsp\r
+ sidt (%rsp)\r
+ subq $16, %rsp\r
+ sgdt (%rsp)\r
+\r
+## UINT64 Ldtr, Tr;\r
+ xorq %rax, %rax\r
+ strw %ax\r
+ pushq %rax\r
+ sldtw %ax\r
+ pushq %rax\r
+\r
+## UINT64 RFlags;\r
+ pushq 40(%rbp)\r
+\r
+## UINT64 Cr0, Cr1, Cr2, Cr3, Cr4, Cr8;\r
+ movq %cr8, %rax\r
+ pushq %rax\r
+ movq %cr4, %rax\r
+ orq $0x208, %rax\r
+ movq %rax, %cr4\r
+ pushq %rax\r
+ movq %cr3, %rax\r
+ pushq %rax\r
+ movq %cr2, %rax\r
+ pushq %rax\r
+ xorq %rax, %rax\r
+ pushq %rax\r
+ movq %cr0, %rax\r
+ pushq %rax\r
+\r
+## UINT64 Dr0, Dr1, Dr2, Dr3, Dr6, Dr7;\r
+ movq %dr7, %rax\r
+ pushq %rax\r
+ movq %dr6, %rax\r
+ pushq %rax\r
+ movq %dr3, %rax\r
+ pushq %rax\r
+ movq %dr2, %rax\r
+ pushq %rax\r
+ movq %dr1, %rax\r
+ pushq %rax\r
+ movq %dr0, %rax\r
+ pushq %rax\r
+\r
+## FX_SAVE_STATE_X64 FxSaveState;\r
+\r
+ subq $512, %rsp\r
+ movq %rsp, %rdi\r
+ .byte 0xf, 0xae, 0x7 # fxsave [rdi]\r
+\r
+# UEFI calling convention for x64 requires that Direction flag in EFLAGs is clear\r
+ cld\r
+\r
+## UINT32 ExceptionData;\r
+ pushq 16(%rbp)\r
+\r
+## call into exception handler\r
+ movq 8(%rbp), %rcx\r
+ movabsq $ASM_PFX(SmiPFHandler), %rax\r
+\r
+## Prepare parameter and call\r
+ movq %rsp, %rdx\r
+ #\r
+ # Per X64 calling convention, allocate maximum parameter stack space\r
+ # and make sure RSP is 16-byte aligned\r
+ #\r
+ subq $4 * 8 + 8, %rsp\r
+ call *%rax\r
+ addq $4 * 8 + 8, %rsp\r
+ jmp L5\r
+\r
+L5:\r
+## UINT64 ExceptionData;\r
+ addq $8, %rsp\r
+\r
+## FX_SAVE_STATE_X64 FxSaveState;\r
+\r
+ movq %rsp, %rsi\r
+ .byte 0xf, 0xae, 0xe # fxrstor [rsi]\r
+ addq $512, %rsp\r
+\r
+## UINT64 Dr0, Dr1, Dr2, Dr3, Dr6, Dr7;\r
+## Skip restoration of DRx registers to support debuggers\r
+## that set breakpoints in interrupt/exception context\r
+ addq $8 * 6, %rsp\r
+\r
+## UINT64 Cr0, Cr1, Cr2, Cr3, Cr4, Cr8;\r
+ popq %rax\r
+ movq %rax, %cr0\r
+ addq $8, %rsp # not for Cr1\r
+ popq %rax\r
+ movq %rax, %cr2\r
+ popq %rax\r
+ movq %rax, %cr3\r
+ popq %rax\r
+ movq %rax, %cr4\r
+ popq %rax\r
+ movq %rax, %cr8\r
+\r
+## UINT64 RFlags;\r
+ popq 40(%rbp)\r
+\r
+## UINT64 Ldtr, Tr;\r
+## UINT64 Gdtr[2], Idtr[2];\r
+## Best not let anyone mess with these particular registers...\r
+ addq $48, %rsp\r
+\r
+## UINT64 Rip;\r
+ popq 24(%rbp)\r
+\r
+## UINT64 Gs, Fs, Es, Ds, Cs, Ss;\r
+ popq %rax\r
+ # mov gs, rax ; not for gs\r
+ popq %rax\r
+ # mov fs, rax ; not for fs\r
+ # (X64 will not use fs and gs, so we do not restore it)\r
+ popq %rax\r
+ movq %rax, %es\r
+ popq %rax\r
+ movq %rax, %ds\r
+ popq 32(%rbp) # for cs\r
+ popq 56(%rbp) # for ss\r
+\r
+## UINT64 Rdi, Rsi, Rbp, Rsp, Rbx, Rdx, Rcx, Rax;\r
+## UINT64 R8, R9, R10, R11, R12, R13, R14, R15;\r
+ popq %rdi\r
+ popq %rsi\r
+ addq $8, %rsp # not for rbp\r
+ popq 48(%rbp) # for rsp\r
+ popq %rbx\r
+ popq %rdx\r
+ popq %rcx\r
+ popq %rax\r
+ popq %r8\r
+ popq %r9\r
+ popq %r10\r
+ popq %r11\r
+ popq %r12\r
+ popq %r13\r
+ popq %r14\r
+ popq %r15\r
+\r
+ movq %rbp, %rsp\r
+\r
+# Enable TF bit after page fault handler runs\r
+ btsl $8, 40(%rsp) #RFLAGS\r
+\r
+ popq %rbp\r
+ addq $16, %rsp # skip INT# & ErrCode\r
+ iretq\r
+\r
+ASM_GLOBAL ASM_PFX(InitializeIDTSmmStackGuard)\r
+ASM_PFX(InitializeIDTSmmStackGuard):\r
+# If SMM Stack Guard feature is enabled, set the IST field of\r
+# the interrupt gate for Page Fault Exception to be 1\r
+#\r
+ movabsq $_SmiIDT + 14 * 16, %rax\r
+ movb $1, 4(%rax)\r
+ ret\r
--- /dev/null
+;------------------------------------------------------------------------------ ;\r
+; Copyright (c) 2009 - 2015, Intel Corporation. All rights reserved.<BR>\r
+; This program and the accompanying materials\r
+; are licensed and made available under the terms and conditions of the BSD License\r
+; which accompanies this distribution. The full text of the license may be found at\r
+; http://opensource.org/licenses/bsd-license.php.\r
+;\r
+; THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS,\r
+; WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED.\r
+;\r
+; Module Name:\r
+;\r
+; SmiException.asm\r
+;\r
+; Abstract:\r
+;\r
+; Exception handlers used in SM mode\r
+;\r
+;-------------------------------------------------------------------------------\r
+\r
+EXTERNDEF SmiPFHandler:PROC\r
+EXTERNDEF gSmiMtrrs:QWORD\r
+EXTERNDEF gcSmiIdtr:FWORD\r
+EXTERNDEF gcSmiGdtr:FWORD\r
+EXTERNDEF gcPsd:BYTE\r
+\r
+ .const\r
+\r
+NullSeg DQ 0 ; reserved by architecture\r
+CodeSeg32 LABEL QWORD\r
+ DW -1 ; LimitLow\r
+ DW 0 ; BaseLow\r
+ DB 0 ; BaseMid\r
+ DB 9bh\r
+ DB 0cfh ; LimitHigh\r
+ DB 0 ; BaseHigh\r
+ProtModeCodeSeg32 LABEL QWORD\r
+ DW -1 ; LimitLow\r
+ DW 0 ; BaseLow\r
+ DB 0 ; BaseMid\r
+ DB 9bh\r
+ DB 0cfh ; LimitHigh\r
+ DB 0 ; BaseHigh\r
+ProtModeSsSeg32 LABEL QWORD\r
+ DW -1 ; LimitLow\r
+ DW 0 ; BaseLow\r
+ DB 0 ; BaseMid\r
+ DB 93h\r
+ DB 0cfh ; LimitHigh\r
+ DB 0 ; BaseHigh\r
+DataSeg32 LABEL QWORD\r
+ DW -1 ; LimitLow\r
+ DW 0 ; BaseLow\r
+ DB 0 ; BaseMid\r
+ DB 93h\r
+ DB 0cfh ; LimitHigh\r
+ DB 0 ; BaseHigh\r
+CodeSeg16 LABEL QWORD\r
+ DW -1\r
+ DW 0\r
+ DB 0\r
+ DB 9bh\r
+ DB 8fh\r
+ DB 0\r
+DataSeg16 LABEL QWORD\r
+ DW -1\r
+ DW 0\r
+ DB 0\r
+ DB 93h\r
+ DB 8fh\r
+ DB 0\r
+CodeSeg64 LABEL QWORD\r
+ DW -1 ; LimitLow\r
+ DW 0 ; BaseLow\r
+ DB 0 ; BaseMid\r
+ DB 9bh\r
+ DB 0afh ; LimitHigh\r
+ DB 0 ; BaseHigh\r
+; TSS Segment for X64 specially\r
+TssSeg LABEL QWORD\r
+ DW TSS_DESC_SIZE ; LimitLow\r
+ DW 0 ; BaseLow\r
+ DB 0 ; BaseMid\r
+ DB 89h\r
+ DB 080h ; LimitHigh\r
+ DB 0 ; BaseHigh\r
+ DD 0 ; BaseUpper\r
+ DD 0 ; Reserved\r
+GDT_SIZE = $ - offset NullSeg\r
+\r
+; Create TSS Descriptor just after GDT\r
+TssDescriptor LABEL BYTE\r
+ DD 0 ; Reserved\r
+ DQ 0 ; RSP0\r
+ DQ 0 ; RSP1\r
+ DQ 0 ; RSP2\r
+ DD 0 ; Reserved\r
+ DD 0 ; Reserved\r
+ DQ 0 ; IST1\r
+ DQ 0 ; IST2\r
+ DQ 0 ; IST3\r
+ DQ 0 ; IST4\r
+ DQ 0 ; IST5\r
+ DQ 0 ; IST6\r
+ DQ 0 ; IST7\r
+ DD 0 ; Reserved\r
+ DD 0 ; Reserved\r
+ DW 0 ; Reserved\r
+ DW 0 ; I/O Map Base Address\r
+TSS_DESC_SIZE = $ - offset TssDescriptor\r
+\r
+;\r
+; This structure serves as a template for all processors.\r
+;\r
+gcPsd LABEL BYTE\r
+ DB 'PSDSIG '\r
+ DW PSD_SIZE\r
+ DW 2\r
+ DW 1 SHL 2\r
+ DW CODE_SEL\r
+ DW DATA_SEL\r
+ DW DATA_SEL\r
+ DW DATA_SEL\r
+ DW 0\r
+ DQ 0\r
+ DQ 0\r
+ DQ 0 ; fixed in InitializeMpServiceData()\r
+ DQ offset NullSeg\r
+ DD GDT_SIZE\r
+ DD 0\r
+ DB 24 dup (0)\r
+ DQ offset gSmiMtrrs\r
+PSD_SIZE = $ - offset gcPsd\r
+\r
+;\r
+; CODE & DATA segments for SMM runtime\r
+;\r
+CODE_SEL = offset CodeSeg64 - offset NullSeg\r
+DATA_SEL = offset DataSeg32 - offset NullSeg\r
+CODE32_SEL = offset CodeSeg32 - offset NullSeg\r
+\r
+gcSmiGdtr LABEL FWORD\r
+ DW GDT_SIZE - 1\r
+ DQ offset NullSeg\r
+\r
+gcSmiIdtr LABEL FWORD\r
+ DW IDT_SIZE - 1\r
+ DQ offset _SmiIDT\r
+\r
+ .data\r
+\r
+;\r
+; Here is the IDT. There are 32 (not 255) entries in it since only processor\r
+; generated exceptions will be handled.\r
+;\r
+_SmiIDT:\r
+REPEAT 32\r
+ DW 0 ; Offset 0:15\r
+ DW CODE_SEL ; Segment selector\r
+ DB 0 ; Unused\r
+ DB 8eh ; Interrupt Gate, Present\r
+ DW 0 ; Offset 16:31\r
+ DQ 0 ; Offset 32:63\r
+ ENDM\r
+_SmiIDTEnd:\r
+\r
+IDT_SIZE = (offset _SmiIDTEnd - offset _SmiIDT)\r
+\r
+ .code\r
+\r
+;------------------------------------------------------------------------------\r
+; _SmiExceptionEntryPoints is the collection of exception entry points followed\r
+; by a common exception handler.\r
+;\r
+; Stack frame would be as follows as specified in IA32 manuals:\r
+;\r
+; +---------------------+ <-- 16-byte aligned ensured by processor\r
+; + Old SS +\r
+; +---------------------+\r
+; + Old RSP +\r
+; +---------------------+\r
+; + RFlags +\r
+; +---------------------+\r
+; + CS +\r
+; +---------------------+\r
+; + RIP +\r
+; +---------------------+\r
+; + Error Code +\r
+; +---------------------+\r
+; + Vector Number +\r
+; +---------------------+\r
+; + RBP +\r
+; +---------------------+ <-- RBP, 16-byte aligned\r
+;\r
+; RSP set to odd multiple of 8 at @CommonEntryPoint means ErrCode PRESENT\r
+;------------------------------------------------------------------------------\r
+PageFaultIdtHandlerSmmProfile PROC\r
+ push 0eh ; Page Fault\r
+ test spl, 8 ; odd multiple of 8 => ErrCode present\r
+ jnz @F\r
+ push [rsp] ; duplicate INT# if no ErrCode\r
+ mov qword ptr [rsp + 8], 0\r
+@@:\r
+ push rbp\r
+ mov rbp, rsp\r
+\r
+ ;\r
+ ; Since here the stack pointer is 16-byte aligned, so\r
+ ; EFI_FX_SAVE_STATE_X64 of EFI_SYSTEM_CONTEXT_x64\r
+ ; is 16-byte aligned\r
+ ;\r
+\r
+;; UINT64 Rdi, Rsi, Rbp, Rsp, Rbx, Rdx, Rcx, Rax;\r
+;; UINT64 R8, R9, R10, R11, R12, R13, R14, R15;\r
+ push r15\r
+ push r14\r
+ push r13\r
+ push r12\r
+ push r11\r
+ push r10\r
+ push r9\r
+ push r8\r
+ push rax\r
+ push rcx\r
+ push rdx\r
+ push rbx\r
+ push qword ptr [rbp + 48] ; RSP\r
+ push qword ptr [rbp] ; RBP\r
+ push rsi\r
+ push rdi\r
+\r
+;; UINT64 Gs, Fs, Es, Ds, Cs, Ss; insure high 16 bits of each is zero\r
+ movzx rax, word ptr [rbp + 56]\r
+ push rax ; for ss\r
+ movzx rax, word ptr [rbp + 32]\r
+ push rax ; for cs\r
+ mov rax, ds\r
+ push rax\r
+ mov rax, es\r
+ push rax\r
+ mov rax, fs\r
+ push rax\r
+ mov rax, gs\r
+ push rax\r
+\r
+;; UINT64 Rip;\r
+ push qword ptr [rbp + 24]\r
+\r
+;; UINT64 Gdtr[2], Idtr[2];\r
+ sub rsp, 16\r
+ sidt fword ptr [rsp]\r
+ sub rsp, 16\r
+ sgdt fword ptr [rsp]\r
+\r
+;; UINT64 Ldtr, Tr;\r
+ xor rax, rax\r
+ str ax\r
+ push rax\r
+ sldt ax\r
+ push rax\r
+\r
+;; UINT64 RFlags;\r
+ push qword ptr [rbp + 40]\r
+\r
+;; UINT64 Cr0, Cr1, Cr2, Cr3, Cr4, Cr8;\r
+ mov rax, cr8\r
+ push rax\r
+ mov rax, cr4\r
+ or rax, 208h\r
+ mov cr4, rax\r
+ push rax\r
+ mov rax, cr3\r
+ push rax\r
+ mov rax, cr2\r
+ push rax\r
+ xor rax, rax\r
+ push rax\r
+ mov rax, cr0\r
+ push rax\r
+\r
+;; UINT64 Dr0, Dr1, Dr2, Dr3, Dr6, Dr7;\r
+ mov rax, dr7\r
+ push rax\r
+ mov rax, dr6\r
+ push rax\r
+ mov rax, dr3\r
+ push rax\r
+ mov rax, dr2\r
+ push rax\r
+ mov rax, dr1\r
+ push rax\r
+ mov rax, dr0\r
+ push rax\r
+\r
+;; FX_SAVE_STATE_X64 FxSaveState;\r
+\r
+ sub rsp, 512\r
+ mov rdi, rsp\r
+ db 0fh, 0aeh, 00000111y ;fxsave [rdi]\r
+\r
+; UEFI calling convention for x64 requires that Direction flag in EFLAGs is clear\r
+ cld\r
+\r
+;; UINT32 ExceptionData;\r
+ push qword ptr [rbp + 16]\r
+\r
+;; call into exception handler\r
+ mov rcx, [rbp + 8]\r
+ mov rax, SmiPFHandler\r
+\r
+;; Prepare parameter and call\r
+ mov rdx, rsp\r
+ ;\r
+ ; Per X64 calling convention, allocate maximum parameter stack space\r
+ ; and make sure RSP is 16-byte aligned\r
+ ;\r
+ sub rsp, 4 * 8 + 8\r
+ call rax\r
+ add rsp, 4 * 8 + 8\r
+ jmp @F\r
+\r
+@@:\r
+;; UINT64 ExceptionData;\r
+ add rsp, 8\r
+\r
+;; FX_SAVE_STATE_X64 FxSaveState;\r
+\r
+ mov rsi, rsp\r
+ db 0fh, 0aeh, 00001110y ; fxrstor [rsi]\r
+ add rsp, 512\r
+\r
+;; UINT64 Dr0, Dr1, Dr2, Dr3, Dr6, Dr7;\r
+;; Skip restoration of DRx registers to support debuggers\r
+;; that set breakpoints in interrupt/exception context\r
+ add rsp, 8 * 6\r
+\r
+;; UINT64 Cr0, Cr1, Cr2, Cr3, Cr4, Cr8;\r
+ pop rax\r
+ mov cr0, rax\r
+ add rsp, 8 ; not for Cr1\r
+ pop rax\r
+ mov cr2, rax\r
+ pop rax\r
+ mov cr3, rax\r
+ pop rax\r
+ mov cr4, rax\r
+ pop rax\r
+ mov cr8, rax\r
+\r
+;; UINT64 RFlags;\r
+ pop qword ptr [rbp + 40]\r
+\r
+;; UINT64 Ldtr, Tr;\r
+;; UINT64 Gdtr[2], Idtr[2];\r
+;; Best not let anyone mess with these particular registers...\r
+ add rsp, 48\r
+\r
+;; UINT64 Rip;\r
+ pop qword ptr [rbp + 24]\r
+\r
+;; UINT64 Gs, Fs, Es, Ds, Cs, Ss;\r
+ pop rax\r
+ ; mov gs, rax ; not for gs\r
+ pop rax\r
+ ; mov fs, rax ; not for fs\r
+ ; (X64 will not use fs and gs, so we do not restore it)\r
+ pop rax\r
+ mov es, rax\r
+ pop rax\r
+ mov ds, rax\r
+ pop qword ptr [rbp + 32] ; for cs\r
+ pop qword ptr [rbp + 56] ; for ss\r
+\r
+;; UINT64 Rdi, Rsi, Rbp, Rsp, Rbx, Rdx, Rcx, Rax;\r
+;; UINT64 R8, R9, R10, R11, R12, R13, R14, R15;\r
+ pop rdi\r
+ pop rsi\r
+ add rsp, 8 ; not for rbp\r
+ pop qword ptr [rbp + 48] ; for rsp\r
+ pop rbx\r
+ pop rdx\r
+ pop rcx\r
+ pop rax\r
+ pop r8\r
+ pop r9\r
+ pop r10\r
+ pop r11\r
+ pop r12\r
+ pop r13\r
+ pop r14\r
+ pop r15\r
+\r
+ mov rsp, rbp\r
+\r
+; Enable TF bit after page fault handler runs\r
+ bts dword ptr [rsp + 40], 8 ;RFLAGS\r
+\r
+ pop rbp\r
+ add rsp, 16 ; skip INT# & ErrCode\r
+ iretq\r
+PageFaultIdtHandlerSmmProfile ENDP\r
+\r
+InitializeIDTSmmStackGuard PROC\r
+;\r
+; If SMM Stack Guard feature is enabled, set the IST field of\r
+; the interrupt gate for Page Fault Exception to be 1\r
+;\r
+ lea rax, _SmiIDT + 14 * 16\r
+ mov byte ptr [rax + 4], 1\r
+ ret\r
+InitializeIDTSmmStackGuard ENDP\r
+\r
+ END\r
--- /dev/null
+#------------------------------------------------------------------------------\r
+#\r
+# Copyright (c) 2009 - 2015, Intel Corporation. All rights reserved.<BR>\r
+# This program and the accompanying materials\r
+# are licensed and made available under the terms and conditions of the BSD License\r
+# which accompanies this distribution. The full text of the license may be found at\r
+# http://opensource.org/licenses/bsd-license.php.\r
+#\r
+# THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS,\r
+# WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED.\r
+#\r
+# Module Name:\r
+#\r
+# SmmInit.S\r
+#\r
+# Abstract:\r
+#\r
+# Functions for relocating SMBASE's for all processors\r
+#\r
+#------------------------------------------------------------------------------\r
+\r
+ASM_GLOBAL ASM_PFX(gSmmCr0)\r
+ASM_GLOBAL ASM_PFX(gSmmCr3)\r
+ASM_GLOBAL ASM_PFX(gSmmCr4)\r
+ASM_GLOBAL ASM_PFX(gSmmJmpAddr)\r
+ASM_GLOBAL ASM_PFX(gcSmmInitTemplate)\r
+ASM_GLOBAL ASM_PFX(gcSmmInitSize)\r
+ASM_GLOBAL ASM_PFX(mRebasedFlagAddr32)\r
+ASM_GLOBAL ASM_PFX(SmmRelocationSemaphoreComplete)\r
+ASM_GLOBAL ASM_PFX(SmmRelocationSemaphoreComplete32)\r
+ASM_GLOBAL ASM_PFX(mSmmRelocationOriginalAddressPtr32)\r
+ASM_GLOBAL ASM_PFX(gSmmInitStack)\r
+ASM_GLOBAL ASM_PFX(gcSmiInitGdtr)\r
+\r
+\r
+ .text\r
+\r
+ASM_PFX(gcSmiInitGdtr):\r
+ .word 0\r
+ .quad 0\r
+\r
+SmmStartup:\r
+ .byte 0x66,0xb8 # mov eax, imm32\r
+ASM_PFX(gSmmCr3): .space 4\r
+ movq %rax, %cr3\r
+ .byte 0x66,0x2e\r
+ lgdt (ASM_PFX(gcSmiInitGdtr) - SmmStartup)(%ebp)\r
+ .byte 0x66,0xb8 # mov eax, imm32\r
+ASM_PFX(gSmmCr4): .space 4\r
+ orb $2, %ah # enable XMM registers access\r
+ movq %rax, %cr4\r
+ .byte 0x66\r
+ movl $0xc0000080,%ecx # IA32_EFER MSR\r
+ rdmsr\r
+ orb $1,%ah # set LME bit\r
+ wrmsr\r
+ .byte 0x66,0xb8 # mov eax, imm32\r
+ASM_PFX(gSmmCr0): .space 4\r
+ movq %rax, %cr0\r
+ .byte 0x66,0xea # far jmp to long mode\r
+ASM_PFX(gSmmJmpAddr): .quad LongMode\r
+LongMode: # long-mode starts here\r
+ .byte 0x48,0xbc # mov rsp, imm64\r
+ASM_PFX(gSmmInitStack): .space 8\r
+ andw $0xfff0, %sp # make sure RSP is 16-byte aligned\r
+ #\r
+ # Accoring to X64 calling convention, XMM0~5 are volatile, we need to save\r
+ # them before calling C-function.\r
+ #\r
+ subq $0x60, %rsp\r
+ movdqa %xmm0, 0x0(%rsp)\r
+ movdqa %xmm1, 0x10(%rsp)\r
+ movdqa %xmm2, 0x20(%rsp)\r
+ movdqa %xmm3, 0x30(%rsp)\r
+ movdqa %xmm4, 0x40(%rsp)\r
+ movdqa %xmm5, 0x50(%rsp)\r
+\r
+\r
+ addq $-0x20, %rsp\r
+ call ASM_PFX(SmmInitHandler)\r
+ addq $0x20, %rsp\r
+ #\r
+ # Restore XMM0~5 after calling C-function.\r
+ #\r
+ movdqa 0x0(%rsp), %xmm0\r
+ movdqa 0x10(%rsp), %xmm1\r
+ movdqa 0x20(%rsp), %xmm2\r
+ movdqa 0x30(%rsp), %xmm3\r
+ movdqa 0x40(%rsp), %xmm4\r
+ movdqa 0x50(%rsp), %xmm5\r
+\r
+ rsm\r
+\r
+ASM_PFX(gcSmmInitTemplate):\r
+\r
+_SmmInitTemplate:\r
+ .byte 0x66,0x2e,0x8b,0x2e # mov ebp, cs:[@F]\r
+ .word L1 - _SmmInitTemplate + 0x8000\r
+ .byte 0x66, 0x81, 0xed, 0, 0, 3, 0 # sub ebp, 0x30000\r
+ jmp *%bp # jmp ebp actually\r
+L1:\r
+ .quad SmmStartup\r
+\r
+ASM_PFX(gcSmmInitSize): .word . - ASM_PFX(gcSmmInitTemplate)\r
+\r
+ASM_PFX(SmmRelocationSemaphoreComplete):\r
+ # Create a simple stack frame to store RAX and the original RSM location\r
+ pushq %rax # Used to store return address\r
+ pushq %rax\r
+\r
+ # Load the original RSM location onto stack\r
+ movabsq $ASM_PFX(mSmmRelocationOriginalAddress), %rax\r
+ movq (%rax), %rax\r
+ movq %rax, 0x08(%rsp)\r
+\r
+ # Update rebase flag\r
+ movabsq $ASM_PFX(mRebasedFlag), %rax\r
+ movq (%rax), %rax\r
+ movb $1, (%rax)\r
+\r
+ #restore RAX and return to original RSM location\r
+ popq %rax\r
+ retq\r
+\r
+#\r
+# Semaphore code running in 32-bit mode\r
+#\r
+ASM_PFX(SmmRelocationSemaphoreComplete32):\r
+ #\r
+ # movb $1, ()\r
+ #\r
+ .byte 0xc6, 0x05\r
+ASM_PFX(mRebasedFlagAddr32):\r
+ .long 0\r
+ .byte 1\r
+ #\r
+ # jmpd ()\r
+ #\r
+ .byte 0xff, 0x25\r
+ASM_PFX(mSmmRelocationOriginalAddressPtr32):\r
+ .long 0\r
--- /dev/null
+;------------------------------------------------------------------------------ ;\r
+; Copyright (c) 2009 - 2015, Intel Corporation. All rights reserved.<BR>\r
+; This program and the accompanying materials\r
+; are licensed and made available under the terms and conditions of the BSD License\r
+; which accompanies this distribution. The full text of the license may be found at\r
+; http://opensource.org/licenses/bsd-license.php.\r
+;\r
+; THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS,\r
+; WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED.\r
+;\r
+; Module Name:\r
+;\r
+; SmmInit.Asm\r
+;\r
+; Abstract:\r
+;\r
+; Functions for relocating SMBASE's for all processors\r
+;\r
+;-------------------------------------------------------------------------------\r
+\r
+EXTERNDEF SmmInitHandler:PROC\r
+EXTERNDEF gSmmCr0:DWORD\r
+EXTERNDEF gSmmCr3:DWORD\r
+EXTERNDEF gSmmCr4:DWORD\r
+EXTERNDEF gSmmJmpAddr:QWORD\r
+EXTERNDEF gcSmmInitTemplate:BYTE\r
+EXTERNDEF gcSmmInitSize:WORD\r
+EXTERNDEF mRebasedFlag:PTR BYTE\r
+EXTERNDEF mSmmRelocationOriginalAddress:QWORD\r
+EXTERNDEF mRebasedFlagAddr32:DWORD\r
+EXTERNDEF mSmmRelocationOriginalAddressPtr32:DWORD\r
+EXTERNDEF gSmmInitStack:QWORD\r
+EXTERNDEF gcSmiInitGdtr:FWORD\r
+\r
+ .code\r
+\r
+gcSmiInitGdtr LABEL FWORD\r
+ DW 0\r
+ DQ 0\r
+\r
+SmmStartup PROC\r
+ DB 66h, 0b8h ; mov eax, imm32\r
+gSmmCr3 DD ?\r
+ mov cr3, rax\r
+ DB 66h, 2eh\r
+ lgdt fword ptr [ebp + (offset gcSmiInitGdtr - SmmStartup)]\r
+ DB 66h, 0b8h ; mov eax, imm32\r
+gSmmCr4 DD ?\r
+ or ah, 2 ; enable XMM registers access\r
+ mov cr4, rax\r
+ DB 66h\r
+ mov ecx, 0c0000080h ; IA32_EFER MSR\r
+ rdmsr\r
+ or ah, 1 ; set LME bit\r
+ wrmsr\r
+ DB 66h, 0b8h ; mov eax, imm32\r
+gSmmCr0 DD ?\r
+ mov cr0, rax ; enable protected mode & paging\r
+ DB 66h, 0eah ; far jmp to long mode\r
+gSmmJmpAddr DQ @LongMode\r
+@LongMode: ; long-mode starts here\r
+ DB 48h, 0bch ; mov rsp, imm64\r
+gSmmInitStack DQ ?\r
+ and sp, 0fff0h ; make sure RSP is 16-byte aligned\r
+ ;\r
+ ; Accoring to X64 calling convention, XMM0~5 are volatile, we need to save\r
+ ; them before calling C-function.\r
+ ;\r
+ sub rsp, 60h\r
+ movdqa [rsp], xmm0\r
+ movdqa [rsp + 10h], xmm1\r
+ movdqa [rsp + 20h], xmm2\r
+ movdqa [rsp + 30h], xmm3\r
+ movdqa [rsp + 40h], xmm4\r
+ movdqa [rsp + 50h], xmm5\r
+\r
+ add rsp, -20h\r
+ call SmmInitHandler\r
+ add rsp, 20h\r
+\r
+ ;\r
+ ; Restore XMM0~5 after calling C-function.\r
+ ;\r
+ movdqa xmm0, [rsp]\r
+ movdqa xmm1, [rsp + 10h]\r
+ movdqa xmm2, [rsp + 20h]\r
+ movdqa xmm3, [rsp + 30h]\r
+ movdqa xmm4, [rsp + 40h]\r
+ movdqa xmm5, [rsp + 50h]\r
+\r
+ rsm\r
+SmmStartup ENDP\r
+\r
+gcSmmInitTemplate LABEL BYTE\r
+\r
+_SmmInitTemplate PROC\r
+ DB 66h, 2eh, 8bh, 2eh ; mov ebp, cs:[@F]\r
+ DW @L1 - _SmmInitTemplate + 8000h\r
+ DB 66h, 81h, 0edh, 00h, 00h, 03h, 00 ; sub ebp, 30000h\r
+ jmp bp ; jmp ebp actually\r
+@L1:\r
+ DQ SmmStartup\r
+_SmmInitTemplate ENDP\r
+\r
+gcSmmInitSize DW $ - gcSmmInitTemplate\r
+\r
+SmmRelocationSemaphoreComplete PROC\r
+ push rax\r
+ mov rax, mRebasedFlag\r
+ mov byte ptr [rax], 1\r
+ pop rax\r
+ jmp [mSmmRelocationOriginalAddress]\r
+SmmRelocationSemaphoreComplete ENDP\r
+\r
+;\r
+; Semaphore code running in 32-bit mode\r
+;\r
+SmmRelocationSemaphoreComplete32 PROC\r
+ ;\r
+ ; mov byte ptr [], 1\r
+ ;\r
+ db 0c6h, 05h\r
+mRebasedFlagAddr32 dd 0\r
+ db 1\r
+ ;\r
+ ; jmp dword ptr []\r
+ ;\r
+ db 0ffh, 25h\r
+mSmmRelocationOriginalAddressPtr32 dd 0\r
+SmmRelocationSemaphoreComplete32 ENDP\r
+\r
+ END\r
--- /dev/null
+/** @file\r
+X64 processor specific functions to enable SMM profile.\r
+\r
+Copyright (c) 2012 - 2015, Intel Corporation. All rights reserved.<BR>\r
+This program and the accompanying materials\r
+are licensed and made available under the terms and conditions of the BSD License\r
+which accompanies this distribution. The full text of the license may be found at\r
+http://opensource.org/licenses/bsd-license.php\r
+\r
+THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS,\r
+WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED.\r
+\r
+**/\r
+\r
+#include "PiSmmCpuDxeSmm.h"\r
+#include "SmmProfileInternal.h"\r
+\r
+//\r
+// Current page index.\r
+//\r
+UINTN mPFPageIndex;\r
+\r
+//\r
+// Pool for dynamically creating page table in page fault handler.\r
+//\r
+UINT64 mPFPageBuffer;\r
+\r
+//\r
+// Store the uplink information for each page being used.\r
+//\r
+UINT64 *mPFPageUplink[MAX_PF_PAGE_COUNT];\r
+\r
+/**\r
+ Create SMM page table for S3 path.\r
+\r
+**/\r
+VOID\r
+InitSmmS3Cr3 (\r
+ VOID\r
+ )\r
+{\r
+ EFI_PHYSICAL_ADDRESS Pages;\r
+ UINT64 *PTEntry;\r
+\r
+ //\r
+ // Generate PAE page table for the first 4GB memory space\r
+ //\r
+ Pages = Gen4GPageTable (1);\r
+\r
+ //\r
+ // Fill Page-Table-Level4 (PML4) entry\r
+ //\r
+ PTEntry = (UINT64*)(UINTN)(Pages - EFI_PAGES_TO_SIZE (1));\r
+ *PTEntry = Pages + IA32_PG_P;\r
+ ZeroMem (PTEntry + 1, EFI_PAGE_SIZE - sizeof (*PTEntry));\r
+\r
+ //\r
+ // Return the address of PML4 (to set CR3)\r
+ //\r
+ mSmmS3ResumeState->SmmS3Cr3 = (UINT32)(UINTN)PTEntry;\r
+\r
+ return ;\r
+}\r
+\r
+/**\r
+ Allocate pages for creating 4KB-page based on 2MB-page when page fault happens.\r
+\r
+**/\r
+VOID\r
+InitPagesForPFHandler (\r
+ VOID\r
+ )\r
+{\r
+ VOID *Address;\r
+\r
+ //\r
+ // Pre-Allocate memory for page fault handler\r
+ //\r
+ Address = NULL;\r
+ Address = AllocatePages (MAX_PF_PAGE_COUNT);\r
+ ASSERT_EFI_ERROR (Address != NULL);\r
+\r
+ mPFPageBuffer = (UINT64)(UINTN) Address;\r
+ mPFPageIndex = 0;\r
+ ZeroMem ((VOID *) (UINTN) mPFPageBuffer, EFI_PAGE_SIZE * MAX_PF_PAGE_COUNT);\r
+ ZeroMem (mPFPageUplink, sizeof (mPFPageUplink));\r
+\r
+ return;\r
+}\r
+\r
+/**\r
+ Allocate one page for creating 4KB-page based on 2MB-page.\r
+\r
+ @param Uplink The address of Page-Directory entry.\r
+\r
+**/\r
+VOID\r
+AcquirePage (\r
+ UINT64 *Uplink\r
+ )\r
+{\r
+ UINT64 Address;\r
+\r
+ //\r
+ // Get the buffer\r
+ //\r
+ Address = mPFPageBuffer + EFI_PAGES_TO_SIZE (mPFPageIndex);\r
+ ZeroMem ((VOID *) (UINTN) Address, EFI_PAGE_SIZE);\r
+\r
+ //\r
+ // Cut the previous uplink if it exists and wasn't overwritten\r
+ //\r
+ if ((mPFPageUplink[mPFPageIndex] != NULL) && ((*mPFPageUplink[mPFPageIndex] & PHYSICAL_ADDRESS_MASK) == Address)) {\r
+ *mPFPageUplink[mPFPageIndex] = 0;\r
+ }\r
+\r
+ //\r
+ // Link & Record the current uplink\r
+ //\r
+ *Uplink = Address | IA32_PG_P | IA32_PG_RW;\r
+ mPFPageUplink[mPFPageIndex] = Uplink;\r
+\r
+ mPFPageIndex = (mPFPageIndex + 1) % MAX_PF_PAGE_COUNT;\r
+}\r
+\r
+/**\r
+ Update page table to map the memory correctly in order to make the instruction\r
+ which caused page fault execute successfully. And it also save the original page\r
+ table to be restored in single-step exception.\r
+\r
+ @param PageTable PageTable Address.\r
+ @param PFAddress The memory address which caused page fault exception.\r
+ @param CpuIndex The index of the processor.\r
+ @param ErrorCode The Error code of exception.\r
+ @param IsValidPFAddress The flag indicates if SMM profile data need be added.\r
+\r
+**/\r
+VOID\r
+RestorePageTableAbove4G (\r
+ UINT64 *PageTable,\r
+ UINT64 PFAddress,\r
+ UINTN CpuIndex,\r
+ UINTN ErrorCode,\r
+ BOOLEAN *IsValidPFAddress\r
+ )\r
+{\r
+ UINTN PTIndex;\r
+ UINT64 Address;\r
+ BOOLEAN Nx;\r
+ BOOLEAN Existed;\r
+ UINTN Index;\r
+ UINTN PFIndex;\r
+\r
+ ASSERT ((PageTable != NULL) && (IsValidPFAddress != NULL));\r
+\r
+ //\r
+ // If page fault address is 4GB above.\r
+ //\r
+\r
+ //\r
+ // Check if page fault address has existed in page table.\r
+ // If it exists in page table but page fault is generated,\r
+ // there are 2 possible reasons: 1. present flag is set to 0; 2. instruction fetch in protected memory range.\r
+ //\r
+ Existed = FALSE;\r
+ PageTable = (UINT64*)(AsmReadCr3 () & PHYSICAL_ADDRESS_MASK);\r
+ PTIndex = BitFieldRead64 (PFAddress, 39, 47);\r
+ if ((PageTable[PTIndex] & IA32_PG_P) != 0) {\r
+ // PML4E\r
+ PageTable = (UINT64*)(UINTN)(PageTable[PTIndex] & PHYSICAL_ADDRESS_MASK);\r
+ PTIndex = BitFieldRead64 (PFAddress, 30, 38);\r
+ if ((PageTable[PTIndex] & IA32_PG_P) != 0) {\r
+ // PDPTE\r
+ PageTable = (UINT64*)(UINTN)(PageTable[PTIndex] & PHYSICAL_ADDRESS_MASK);\r
+ PTIndex = BitFieldRead64 (PFAddress, 21, 29);\r
+ // PD\r
+ if ((PageTable[PTIndex] & IA32_PG_PS) != 0) {\r
+ //\r
+ // 2MB page\r
+ //\r
+ Address = (UINT64)(PageTable[PTIndex] & PHYSICAL_ADDRESS_MASK);\r
+ if ((Address & PHYSICAL_ADDRESS_MASK & ~((1ull << 21) - 1)) == ((PFAddress & PHYSICAL_ADDRESS_MASK & ~((1ull << 21) - 1)))) {\r
+ Existed = TRUE;\r
+ }\r
+ } else {\r
+ //\r
+ // 4KB page\r
+ //\r
+ PageTable = (UINT64*)(UINTN)(PageTable[PTIndex] & PHYSICAL_ADDRESS_MASK);\r
+ if (PageTable != 0) {\r
+ //\r
+ // When there is a valid entry to map to 4KB page, need not create a new entry to map 2MB.\r
+ //\r
+ PTIndex = BitFieldRead64 (PFAddress, 12, 20);\r
+ Address = (UINT64)(PageTable[PTIndex] & PHYSICAL_ADDRESS_MASK);\r
+ if ((Address & PHYSICAL_ADDRESS_MASK & ~((1ull << 12) - 1)) == (PFAddress & PHYSICAL_ADDRESS_MASK & ~((1ull << 12) - 1))) {\r
+ Existed = TRUE;\r
+ }\r
+ }\r
+ }\r
+ }\r
+ }\r
+\r
+ //\r
+ // If page entry does not existed in page table at all, create a new entry.\r
+ //\r
+ if (!Existed) {\r
+\r
+ if (IsAddressValid (PFAddress, &Nx)) {\r
+ //\r
+ // If page fault address above 4GB is in protected range but it causes a page fault exception,\r
+ // Will create a page entry for this page fault address, make page table entry as present/rw and execution-disable.\r
+ // this access is not saved into SMM profile data.\r
+ //\r
+ *IsValidPFAddress = TRUE;\r
+ }\r
+\r
+ //\r
+ // Create one entry in page table for page fault address.\r
+ //\r
+ SmiDefaultPFHandler ();\r
+ //\r
+ // Find the page table entry created just now.\r
+ //\r
+ PageTable = (UINT64*)(AsmReadCr3 () & PHYSICAL_ADDRESS_MASK);\r
+ PFAddress = AsmReadCr2 ();\r
+ // PML4E\r
+ PTIndex = BitFieldRead64 (PFAddress, 39, 47);\r
+ PageTable = (UINT64*)(UINTN)(PageTable[PTIndex] & PHYSICAL_ADDRESS_MASK);\r
+ // PDPTE\r
+ PTIndex = BitFieldRead64 (PFAddress, 30, 38);\r
+ PageTable = (UINT64*)(UINTN)(PageTable[PTIndex] & PHYSICAL_ADDRESS_MASK);\r
+ // PD\r
+ PTIndex = BitFieldRead64 (PFAddress, 21, 29);\r
+ Address = PageTable[PTIndex] & PHYSICAL_ADDRESS_MASK;\r
+ //\r
+ // Check if 2MB-page entry need be changed to 4KB-page entry.\r
+ //\r
+ if (IsAddressSplit (Address)) {\r
+ AcquirePage (&PageTable[PTIndex]);\r
+\r
+ // PTE\r
+ PageTable = (UINT64*)(UINTN)(PageTable[PTIndex] & PHYSICAL_ADDRESS_MASK);\r
+ for (Index = 0; Index < 512; Index++) {\r
+ PageTable[Index] = Address | IA32_PG_RW | IA32_PG_P;\r
+ if (!IsAddressValid (Address, &Nx)) {\r
+ PageTable[Index] = PageTable[Index] & (INTN)(INT32)(~(IA32_PG_RW | IA32_PG_P));\r
+ }\r
+ if (Nx && mXdSupported) {\r
+ PageTable[Index] = PageTable[Index] | IA32_PG_NX;\r
+ }\r
+ if (Address == (PFAddress & PHYSICAL_ADDRESS_MASK & ~((1ull << 12) - 1))) {\r
+ PTIndex = Index;\r
+ }\r
+ Address += SIZE_4KB;\r
+ } // end for PT\r
+ } else {\r
+ //\r
+ // Update 2MB page entry.\r
+ //\r
+ if (!IsAddressValid (Address, &Nx)) {\r
+ //\r
+ // Patch to remove present flag and rw flag.\r
+ //\r
+ PageTable[PTIndex] = PageTable[PTIndex] & (INTN)(INT32)(~(IA32_PG_RW | IA32_PG_P));\r
+ }\r
+ //\r
+ // Set XD bit to 1\r
+ //\r
+ if (Nx && mXdSupported) {\r
+ PageTable[PTIndex] = PageTable[PTIndex] | IA32_PG_NX;\r
+ }\r
+ }\r
+ }\r
+\r
+ //\r
+ // Record old entries with non-present status\r
+ // Old entries include the memory which instruction is at and the memory which instruction access.\r
+ //\r
+ //\r
+ ASSERT (mPFEntryCount[CpuIndex] < MAX_PF_ENTRY_COUNT);\r
+ if (mPFEntryCount[CpuIndex] < MAX_PF_ENTRY_COUNT) {\r
+ PFIndex = mPFEntryCount[CpuIndex];\r
+ mLastPFEntryValue[CpuIndex][PFIndex] = PageTable[PTIndex];\r
+ mLastPFEntryPointer[CpuIndex][PFIndex] = &PageTable[PTIndex];\r
+ mPFEntryCount[CpuIndex]++;\r
+ }\r
+\r
+ //\r
+ // Add present flag or clear XD flag to make page fault handler succeed.\r
+ //\r
+ PageTable[PTIndex] |= (UINT64)(IA32_PG_RW | IA32_PG_P);\r
+ if ((ErrorCode & IA32_PF_EC_ID) != 0) {\r
+ //\r
+ // If page fault is caused by instruction fetch, clear XD bit in the entry.\r
+ //\r
+ PageTable[PTIndex] &= ~IA32_PG_NX;\r
+ }\r
+\r
+ return;\r
+}\r
+\r
+/**\r
+ Clear TF in FLAGS.\r
+\r
+ @param SystemContext A pointer to the processor context when\r
+ the interrupt occurred on the processor.\r
+\r
+**/\r
+VOID\r
+ClearTrapFlag (\r
+ IN OUT EFI_SYSTEM_CONTEXT SystemContext\r
+ )\r
+{\r
+ SystemContext.SystemContextX64->Rflags &= (UINTN) ~BIT8;\r
+}\r
--- /dev/null
+/** @file\r
+X64 processor specific header file to enable SMM profile.\r
+\r
+Copyright (c) 2012 - 2015, Intel Corporation. All rights reserved.<BR>\r
+This program and the accompanying materials\r
+are licensed and made available under the terms and conditions of the BSD License\r
+which accompanies this distribution. The full text of the license may be found at\r
+http://opensource.org/licenses/bsd-license.php\r
+\r
+THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS,\r
+WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED.\r
+\r
+**/\r
+\r
+#ifndef _SMM_PROFILE_ARCH_H_\r
+#define _SMM_PROFILE_ARCH_H_\r
+\r
+#pragma pack (1)\r
+\r
+typedef struct _MSR_DS_AREA_STRUCT {\r
+ UINT64 BTSBufferBase;\r
+ UINT64 BTSIndex;\r
+ UINT64 BTSAbsoluteMaximum;\r
+ UINT64 BTSInterruptThreshold;\r
+ UINT64 PEBSBufferBase;\r
+ UINT64 PEBSIndex;\r
+ UINT64 PEBSAbsoluteMaximum;\r
+ UINT64 PEBSInterruptThreshold;\r
+ UINT64 PEBSCounterReset[2];\r
+ UINT64 Reserved;\r
+} MSR_DS_AREA_STRUCT;\r
+\r
+typedef struct _BRANCH_TRACE_RECORD {\r
+ UINT64 LastBranchFrom;\r
+ UINT64 LastBranchTo;\r
+ UINT64 Rsvd0 : 4;\r
+ UINT64 BranchPredicted : 1;\r
+ UINT64 Rsvd1 : 59;\r
+} BRANCH_TRACE_RECORD;\r
+\r
+typedef struct _PEBS_RECORD {\r
+ UINT64 Rflags;\r
+ UINT64 LinearIP;\r
+ UINT64 Rax;\r
+ UINT64 Rbx;\r
+ UINT64 Rcx;\r
+ UINT64 Rdx;\r
+ UINT64 Rsi;\r
+ UINT64 Rdi;\r
+ UINT64 Rbp;\r
+ UINT64 Rsp;\r
+ UINT64 R8;\r
+ UINT64 R9;\r
+ UINT64 R10;\r
+ UINT64 R11;\r
+ UINT64 R12;\r
+ UINT64 R13;\r
+ UINT64 R14;\r
+ UINT64 R15;\r
+} PEBS_RECORD;\r
+\r
+#pragma pack ()\r
+\r
+#define PHYSICAL_ADDRESS_MASK ((1ull << 52) - SIZE_4KB)\r
+\r
+/**\r
+ Update page table to map the memory correctly in order to make the instruction\r
+ which caused page fault execute successfully. And it also save the original page\r
+ table to be restored in single-step exception.\r
+\r
+ @param PageTable PageTable Address.\r
+ @param PFAddress The memory address which caused page fault exception.\r
+ @param CpuIndex The index of the processor.\r
+ @param ErrorCode The Error code of exception.\r
+ @param IsValidPFAddress The flag indicates if SMM profile data need be added.\r
+\r
+**/\r
+VOID\r
+RestorePageTableAbove4G (\r
+ UINT64 *PageTable,\r
+ UINT64 PFAddress,\r
+ UINTN CpuIndex,\r
+ UINTN ErrorCode,\r
+ BOOLEAN *IsValidPFAddress\r
+ );\r
+\r
+/**\r
+ Create SMM page table for S3 path.\r
+\r
+**/\r
+VOID\r
+InitSmmS3Cr3 (\r
+ VOID\r
+ );\r
+\r
+/**\r
+ Allocate pages for creating 4KB-page based on 2MB-page when page fault happens.\r
+\r
+**/\r
+VOID\r
+InitPagesForPFHandler (\r
+ VOID\r
+ );\r
+\r
+#endif // _SMM_PROFILE_ARCH_H_\r