--- /dev/null
+#------------------------------------------------------------------------------\r
+#\r
+# Copyright (c) 2006 - 2015, Intel Corporation. All rights reserved.<BR>\r
+# This program and the accompanying materials\r
+# are licensed and made available under the terms and conditions of the BSD License\r
+# which accompanies this distribution. The full text of the license may be found at\r
+# http://opensource.org/licenses/bsd-license.php.\r
+#\r
+# THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS,\r
+# WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED.\r
+#\r
+# Module Name:\r
+#\r
+# MpFuncs.S\r
+#\r
+# Abstract:\r
+#\r
+# This is the assembly code for Multi-processor S3 support\r
+#\r
+#------------------------------------------------------------------------------\r
+\r
+.equ VacantFlag, 0x0\r
+.equ NotVacantFlag, 0xff\r
+\r
+.equ LockLocation, RendezvousFunnelProcEnd - RendezvousFunnelProcStart\r
+.equ StackStart, RendezvousFunnelProcEnd - RendezvousFunnelProcStart + 0x04\r
+.equ StackSize, RendezvousFunnelProcEnd - RendezvousFunnelProcStart + 0x08\r
+.equ RendezvousProc, RendezvousFunnelProcEnd - RendezvousFunnelProcStart + 0x0C\r
+.equ GdtrProfile, RendezvousFunnelProcEnd - RendezvousFunnelProcStart + 0x10\r
+.equ IdtrProfile, RendezvousFunnelProcEnd - RendezvousFunnelProcStart + 0x16\r
+.equ BufferStart, RendezvousFunnelProcEnd - RendezvousFunnelProcStart + 0x1C\r
+\r
+#-------------------------------------------------------------------------------------\r
+#RendezvousFunnelProc procedure follows. All APs execute their procedure. This\r
+#procedure serializes all the AP processors through an Init sequence. It must be\r
+#noted that APs arrive here very raw...ie: real mode, no stack.\r
+#ALSO THIS PROCEDURE IS EXECUTED BY APs ONLY ON 16 BIT MODE. HENCE THIS PROC\r
+#IS IN MACHINE CODE.\r
+#-------------------------------------------------------------------------------------\r
+#RendezvousFunnelProc (&WakeUpBuffer,MemAddress);\r
+\r
+ASM_GLOBAL ASM_PFX(RendezvousFunnelProc)\r
+ASM_PFX(RendezvousFunnelProc):\r
+RendezvousFunnelProcStart:\r
+\r
+# At this point CS = 0x(vv00) and ip= 0x0.\r
+\r
+ .byte 0x8c,0xc8 # mov ax, cs\r
+ .byte 0x8e,0xd8 # mov ds, ax\r
+ .byte 0x8e,0xc0 # mov es, ax\r
+ .byte 0x8e,0xd0 # mov ss, ax\r
+ .byte 0x33,0xc0 # xor ax, ax\r
+ .byte 0x8e,0xe0 # mov fs, ax\r
+ .byte 0x8e,0xe8 # mov gs, ax\r
+\r
+flat32Start:\r
+\r
+ .byte 0xBE\r
+ .word BufferStart\r
+ .byte 0x66,0x8B,0x14 # mov edx,dword ptr [si] ; EDX is keeping the start address of wakeup buffer\r
+\r
+ .byte 0xBE\r
+ .word GdtrProfile\r
+ .byte 0x66 # db 66h\r
+ .byte 0x2E,0xF,0x1,0x14 # lgdt fword ptr cs:[si]\r
+\r
+ .byte 0xBE\r
+ .word IdtrProfile\r
+ .byte 0x66 # db 66h\r
+ .byte 0x2E,0xF,0x1,0x1C # lidt fword ptr cs:[si]\r
+\r
+ .byte 0x33,0xC0 # xor ax, ax\r
+ .byte 0x8E,0xD8 # mov ds, ax\r
+\r
+ .byte 0xF,0x20,0xC0 # mov eax, cr0 ; Get control register 0\r
+ .byte 0x66,0x83,0xC8,0x1 # or eax, 000000001h ; Set PE bit (bit #0)\r
+ .byte 0xF,0x22,0xC0 # mov cr0, eax\r
+\r
+FLAT32_JUMP:\r
+\r
+ .byte 0x66,0x67,0xEA # far jump\r
+ .long 0x0 # 32-bit offset\r
+ .word 0x20 # 16-bit selector\r
+\r
+PMODE_ENTRY: # protected mode entry point\r
+\r
+ movw $0x8,%ax\r
+ .byte 0x66\r
+ movw %ax,%ds\r
+ .byte 0x66\r
+ movw %ax,%es\r
+ .byte 0x66\r
+ movw %ax,%fs\r
+ .byte 0x66\r
+ movw %ax,%gs\r
+ .byte 0x66\r
+ movw %ax,%ss # Flat mode setup.\r
+\r
+ movl %edx,%esi\r
+\r
+ movl %esi,%edi\r
+ addl $LockLocation, %edi\r
+ movb $NotVacantFlag, %al\r
+TestLock:\r
+ xchgb (%edi), %al\r
+ cmpb $NotVacantFlag, %al\r
+ jz TestLock\r
+\r
+ProgramStack:\r
+\r
+ movl %esi,%edi\r
+ addl $StackSize, %edi\r
+ movl (%edi),%eax\r
+ movl %esi,%edi\r
+ addl $StackStart, %edi\r
+ addl (%edi),%eax\r
+ movl %eax,%esp\r
+ movl %eax,(%edi)\r
+\r
+Releaselock:\r
+\r
+ movb $VacantFlag, %al\r
+ movl %esi,%edi\r
+ addl $LockLocation, %edi\r
+ xchgb (%edi), %al\r
+\r
+ #\r
+ # Call assembly function to initialize FPU.\r
+ #\r
+ lea ASM_PFX(InitializeFloatingPointUnits), %ebx\r
+ call *%ebx\r
+ #\r
+ # Call C Function\r
+ #\r
+ movl %esi,%edi\r
+ addl $RendezvousProc, %edi\r
+ movl (%edi),%eax\r
+\r
+ testl %eax,%eax\r
+ jz GoToSleep\r
+ call *%eax # Call C function\r
+\r
+GoToSleep:\r
+ cli\r
+ hlt\r
+ jmp GoToSleep\r
+\r
+RendezvousFunnelProcEnd:\r
+#-------------------------------------------------------------------------------------\r
+# AsmGetAddressMap (&AddressMap);\r
+#-------------------------------------------------------------------------------------\r
+ASM_GLOBAL ASM_PFX(AsmGetAddressMap)\r
+ASM_PFX(AsmGetAddressMap):\r
+\r
+ pushal\r
+ movl %esp,%ebp\r
+\r
+ movl 0x24(%ebp), %ebx\r
+ movl $RendezvousFunnelProcStart, (%ebx)\r
+ movl $(PMODE_ENTRY - RendezvousFunnelProcStart), 0x4(%ebx)\r
+ movl $(FLAT32_JUMP - RendezvousFunnelProcStart), 0x8(%ebx)\r
+ movl $(RendezvousFunnelProcEnd - RendezvousFunnelProcStart), 0x0c(%ebx)\r
+\r
+ popal\r
+ ret\r
--- /dev/null
+;------------------------------------------------------------------------------ ;\r
+; Copyright (c) 2006 - 2015, Intel Corporation. All rights reserved.<BR>\r
+; This program and the accompanying materials\r
+; are licensed and made available under the terms and conditions of the BSD License\r
+; which accompanies this distribution. The full text of the license may be found at\r
+; http://opensource.org/licenses/bsd-license.php.\r
+;\r
+; THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS,\r
+; WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED.\r
+;\r
+; Module Name:\r
+;\r
+; MpFuncs.asm\r
+;\r
+; Abstract:\r
+;\r
+; This is the assembly code for Multi-processor S3 support\r
+;\r
+;-------------------------------------------------------------------------------\r
+\r
+.686p\r
+.model flat,C\r
+.code\r
+\r
+EXTERN InitializeFloatingPointUnits:PROC\r
+\r
+VacantFlag Equ 00h\r
+NotVacantFlag Equ 0ffh\r
+\r
+LockLocation equ RendezvousFunnelProcEnd - RendezvousFunnelProcStart\r
+StackStart equ LockLocation + 4h\r
+StackSize equ LockLocation + 8h\r
+RendezvousProc equ LockLocation + 0Ch\r
+GdtrProfile equ LockLocation + 10h\r
+IdtrProfile equ LockLocation + 16h\r
+BufferStart equ LockLocation + 1Ch\r
+\r
+;-------------------------------------------------------------------------------------\r
+;RendezvousFunnelProc procedure follows. All APs execute their procedure. This\r
+;procedure serializes all the AP processors through an Init sequence. It must be\r
+;noted that APs arrive here very raw...ie: real mode, no stack.\r
+;ALSO THIS PROCEDURE IS EXECUTED BY APs ONLY ON 16 BIT MODE. HENCE THIS PROC\r
+;IS IN MACHINE CODE.\r
+;-------------------------------------------------------------------------------------\r
+;RendezvousFunnelProc (&WakeUpBuffer,MemAddress);\r
+\r
+RendezvousFunnelProc PROC near C PUBLIC\r
+RendezvousFunnelProcStart::\r
+\r
+; At this point CS = 0x(vv00) and ip= 0x0.\r
+\r
+ db 8ch, 0c8h ; mov ax, cs\r
+ db 8eh, 0d8h ; mov ds, ax\r
+ db 8eh, 0c0h ; mov es, ax\r
+ db 8eh, 0d0h ; mov ss, ax\r
+ db 33h, 0c0h ; xor ax, ax\r
+ db 8eh, 0e0h ; mov fs, ax\r
+ db 8eh, 0e8h ; mov gs, ax\r
+\r
+flat32Start::\r
+\r
+ db 0BEh\r
+ dw BufferStart ; mov si, BufferStart\r
+ db 66h, 8Bh, 14h ; mov edx,dword ptr [si] ; EDX is keeping the start address of wakeup buffer\r
+\r
+ db 0BEh\r
+ dw GdtrProfile ; mov si, GdtrProfile\r
+ db 66h ; db 66h\r
+ db 2Eh, 0Fh, 01h, 14h ; lgdt fword ptr cs:[si]\r
+\r
+ db 0BEh\r
+ dw IdtrProfile ; mov si, IdtrProfile\r
+ db 66h ; db 66h\r
+ db 2Eh, 0Fh, 01h, 1Ch ; lidt fword ptr cs:[si]\r
+\r
+ db 33h, 0C0h ; xor ax, ax\r
+ db 8Eh, 0D8h ; mov ds, ax\r
+\r
+ db 0Fh, 20h, 0C0h ; mov eax, cr0 ; Get control register 0\r
+ db 66h, 83h, 0C8h, 01h ; or eax, 000000001h ; Set PE bit (bit #0)\r
+ db 0Fh, 22h, 0C0h ; mov cr0, eax\r
+\r
+FLAT32_JUMP::\r
+\r
+ db 66h, 67h, 0EAh ; far jump\r
+ dd 0h ; 32-bit offset\r
+ dw 20h ; 16-bit selector\r
+\r
+PMODE_ENTRY:: ; protected mode entry point\r
+\r
+ mov ax, 8h\r
+ mov ds, ax\r
+ mov es, ax\r
+ mov fs, ax\r
+ mov gs, ax\r
+ mov ss, ax ; Flat mode setup.\r
+\r
+ mov esi, edx\r
+\r
+ mov edi, esi\r
+ add edi, LockLocation\r
+ mov al, NotVacantFlag\r
+TestLock::\r
+ xchg byte ptr [edi], al\r
+ cmp al, NotVacantFlag\r
+ jz TestLock\r
+\r
+ProgramStack::\r
+\r
+ mov edi, esi\r
+ add edi, StackSize\r
+ mov eax, dword ptr [edi]\r
+ mov edi, esi\r
+ add edi, StackStart\r
+ add eax, dword ptr [edi]\r
+ mov esp, eax\r
+ mov dword ptr [edi], eax\r
+\r
+Releaselock::\r
+\r
+ mov al, VacantFlag\r
+ mov edi, esi\r
+ add edi, LockLocation\r
+ xchg byte ptr [edi], al\r
+\r
+ ;\r
+ ; Call assembly function to initialize FPU.\r
+ ;\r
+ mov ebx, InitializeFloatingPointUnits\r
+ call ebx\r
+ ;\r
+ ; Call C Function\r
+ ;\r
+ mov edi, esi\r
+ add edi, RendezvousProc\r
+ mov eax, dword ptr [edi]\r
+\r
+ test eax, eax\r
+ jz GoToSleep\r
+ call eax ; Call C function\r
+\r
+GoToSleep::\r
+ cli\r
+ hlt\r
+ jmp $-2\r
+\r
+RendezvousFunnelProc ENDP\r
+RendezvousFunnelProcEnd::\r
+;-------------------------------------------------------------------------------------\r
+; AsmGetAddressMap (&AddressMap);\r
+;-------------------------------------------------------------------------------------\r
+AsmGetAddressMap PROC near C PUBLIC\r
+\r
+ pushad\r
+ mov ebp,esp\r
+\r
+ mov ebx, dword ptr [ebp+24h]\r
+ mov dword ptr [ebx], RendezvousFunnelProcStart\r
+ mov dword ptr [ebx+4h], PMODE_ENTRY - RendezvousFunnelProcStart\r
+ mov dword ptr [ebx+8h], FLAT32_JUMP - RendezvousFunnelProcStart\r
+ mov dword ptr [ebx+0ch], RendezvousFunnelProcEnd - RendezvousFunnelProcStart\r
+\r
+ popad\r
+ ret\r
+\r
+AsmGetAddressMap ENDP\r
+\r
+END\r
--- /dev/null
+/** @file\r
+Page table manipulation functions for IA-32 processors\r
+\r
+Copyright (c) 2009 - 2015, Intel Corporation. All rights reserved.<BR>\r
+This program and the accompanying materials\r
+are licensed and made available under the terms and conditions of the BSD License\r
+which accompanies this distribution. The full text of the license may be found at\r
+http://opensource.org/licenses/bsd-license.php\r
+\r
+THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS,\r
+WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED.\r
+\r
+**/\r
+\r
+#include "PiSmmCpuDxeSmm.h"\r
+\r
+SPIN_LOCK mPFLock;\r
+\r
+/**\r
+ Create PageTable for SMM use.\r
+\r
+ @return PageTable Address\r
+\r
+**/\r
+UINT32\r
+SmmInitPageTable (\r
+ VOID\r
+ )\r
+{\r
+ UINTN PageFaultHandlerHookAddress;\r
+ IA32_IDT_GATE_DESCRIPTOR *IdtEntry;\r
+\r
+ //\r
+ // Initialize spin lock\r
+ //\r
+ InitializeSpinLock (&mPFLock);\r
+\r
+ if (FeaturePcdGet (PcdCpuSmmProfileEnable)) {\r
+ //\r
+ // Set own Page Fault entry instead of the default one, because SMM Profile\r
+ // feature depends on IRET instruction to do Single Step\r
+ //\r
+ PageFaultHandlerHookAddress = (UINTN)PageFaultIdtHandlerSmmProfile;\r
+ IdtEntry = (IA32_IDT_GATE_DESCRIPTOR *) gcSmiIdtr.Base;\r
+ IdtEntry += EXCEPT_IA32_PAGE_FAULT;\r
+ IdtEntry->Bits.OffsetLow = (UINT16)PageFaultHandlerHookAddress;\r
+ IdtEntry->Bits.Reserved_0 = 0;\r
+ IdtEntry->Bits.GateType = IA32_IDT_GATE_TYPE_INTERRUPT_32;\r
+ IdtEntry->Bits.OffsetHigh = (UINT16)(PageFaultHandlerHookAddress >> 16);\r
+ } else {\r
+ //\r
+ // Register SMM Page Fault Handler\r
+ //\r
+ SmmRegisterExceptionHandler (&mSmmCpuService, EXCEPT_IA32_PAGE_FAULT, SmiPFHandler);\r
+ }\r
+\r
+ //\r
+ // Additional SMM IDT initialization for SMM stack guard\r
+ //\r
+ if (FeaturePcdGet (PcdCpuSmmStackGuard)) {\r
+ InitializeIDTSmmStackGuard ();\r
+ }\r
+ return Gen4GPageTable (0);\r
+}\r
+\r
+/**\r
+ Page Fault handler for SMM use.\r
+\r
+**/\r
+VOID\r
+SmiDefaultPFHandler (\r
+ VOID\r
+ )\r
+{\r
+ CpuDeadLoop ();\r
+}\r
+\r
+/**\r
+ ThePage Fault handler wrapper for SMM use.\r
+\r
+ @param InterruptType Defines the type of interrupt or exception that\r
+ occurred on the processor.This parameter is processor architecture specific.\r
+ @param SystemContext A pointer to the processor context when\r
+ the interrupt occurred on the processor.\r
+**/\r
+VOID\r
+EFIAPI\r
+SmiPFHandler (\r
+ IN EFI_EXCEPTION_TYPE InterruptType,\r
+ IN EFI_SYSTEM_CONTEXT SystemContext\r
+ )\r
+{\r
+ UINTN PFAddress;\r
+\r
+ ASSERT (InterruptType == EXCEPT_IA32_PAGE_FAULT);\r
+\r
+ AcquireSpinLock (&mPFLock);\r
+\r
+ PFAddress = AsmReadCr2 ();\r
+\r
+ if ((FeaturePcdGet (PcdCpuSmmStackGuard)) &&\r
+ (PFAddress >= mCpuHotPlugData.SmrrBase) &&\r
+ (PFAddress < (mCpuHotPlugData.SmrrBase + mCpuHotPlugData.SmrrSize))) {\r
+ DEBUG ((EFI_D_ERROR, "SMM stack overflow!\n"));\r
+ CpuDeadLoop ();\r
+ }\r
+\r
+ //\r
+ // If a page fault occurs in SMM range\r
+ //\r
+ if ((PFAddress < mCpuHotPlugData.SmrrBase) ||\r
+ (PFAddress >= mCpuHotPlugData.SmrrBase + mCpuHotPlugData.SmrrSize)) {\r
+ if ((SystemContext.SystemContextIa32->ExceptionData & IA32_PF_EC_ID) != 0) {\r
+ DEBUG ((EFI_D_ERROR, "Code executed on IP(0x%x) out of SMM range after SMM is locked!\n", PFAddress));\r
+ DEBUG_CODE (\r
+ DumpModuleInfoByIp (*(UINTN *)(UINTN)SystemContext.SystemContextIa32->Esp);\r
+ );\r
+ CpuDeadLoop ();\r
+ }\r
+ }\r
+\r
+ if (FeaturePcdGet (PcdCpuSmmProfileEnable)) {\r
+ SmmProfilePFHandler (\r
+ SystemContext.SystemContextIa32->Eip,\r
+ SystemContext.SystemContextIa32->ExceptionData\r
+ );\r
+ } else {\r
+ SmiDefaultPFHandler ();\r
+ }\r
+\r
+ ReleaseSpinLock (&mPFLock);\r
+}\r
--- /dev/null
+/** @file\r
+Semaphore mechanism to indicate to the BSP that an AP has exited SMM\r
+after SMBASE relocation.\r
+\r
+Copyright (c) 2009 - 2015, Intel Corporation. All rights reserved.<BR>\r
+This program and the accompanying materials\r
+are licensed and made available under the terms and conditions of the BSD License\r
+which accompanies this distribution. The full text of the license may be found at\r
+http://opensource.org/licenses/bsd-license.php\r
+\r
+THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS,\r
+WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED.\r
+\r
+**/\r
+\r
+#include "PiSmmCpuDxeSmm.h"\r
+\r
+UINTN mSmmRelocationOriginalAddress;\r
+volatile BOOLEAN *mRebasedFlag;\r
+\r
+/**\r
+ Hook return address of SMM Save State so that semaphore code\r
+ can be executed immediately after AP exits SMM to indicate to\r
+ the BSP that an AP has exited SMM after SMBASE relocation.\r
+\r
+ @param[in] CpuIndex The processor index.\r
+ @param[in] RebasedFlag A pointer to a flag that is set to TRUE\r
+ immediately after AP exits SMM.\r
+\r
+**/\r
+VOID\r
+SemaphoreHook (\r
+ IN UINTN CpuIndex,\r
+ IN volatile BOOLEAN *RebasedFlag\r
+ )\r
+{\r
+ SMRAM_SAVE_STATE_MAP *CpuState;\r
+\r
+ mRebasedFlag = RebasedFlag;\r
+\r
+ CpuState = (SMRAM_SAVE_STATE_MAP *)(UINTN)(SMM_DEFAULT_SMBASE + SMRAM_SAVE_STATE_MAP_OFFSET);\r
+ mSmmRelocationOriginalAddress = (UINTN)HookReturnFromSmm (\r
+ CpuIndex,\r
+ CpuState,\r
+ (UINT64)(UINTN)&SmmRelocationSemaphoreComplete,\r
+ (UINT64)(UINTN)&SmmRelocationSemaphoreComplete\r
+ );\r
+}\r
--- /dev/null
+#------------------------------------------------------------------------------\r
+#\r
+# Copyright (c) 2009 - 2015, Intel Corporation. All rights reserved.<BR>\r
+# This program and the accompanying materials\r
+# are licensed and made available under the terms and conditions of the BSD License\r
+# which accompanies this distribution. The full text of the license may be found at\r
+# http://opensource.org/licenses/bsd-license.php.\r
+#\r
+# THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS,\r
+# WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED.\r
+#\r
+# Module Name:\r
+#\r
+# SmiEntry.S\r
+#\r
+# Abstract:\r
+#\r
+# Code template of the SMI handler for a particular processor\r
+#\r
+#------------------------------------------------------------------------------\r
+\r
+ASM_GLOBAL ASM_PFX(gcSmiHandlerTemplate)\r
+ASM_GLOBAL ASM_PFX(gcSmiHandlerSize)\r
+ASM_GLOBAL ASM_PFX(gSmiCr3)\r
+ASM_GLOBAL ASM_PFX(gSmiStack)\r
+ASM_GLOBAL ASM_PFX(gSmbase)\r
+ASM_GLOBAL ASM_PFX(FeaturePcdGet (PcdCpuSmmDebug))\r
+ASM_GLOBAL ASM_PFX(FeaturePcdGet (PcdCpuSmmStackGuard))\r
+ASM_GLOBAL ASM_PFX(gSmiHandlerIdtr)\r
+\r
+.equ DSC_OFFSET, 0xfb00\r
+.equ DSC_GDTPTR, 0x30\r
+.equ DSC_GDTSIZ, 0x38\r
+.equ DSC_CS, 14\r
+.equ DSC_DS, 16\r
+.equ DSC_SS, 18\r
+.equ DSC_OTHERSEG, 20\r
+\r
+.equ PROTECT_MODE_CS, 0x08\r
+.equ PROTECT_MODE_DS, 0x20\r
+.equ TSS_SEGMENT, 0x40\r
+\r
+ .text\r
+\r
+ASM_PFX(gcSmiHandlerTemplate):\r
+\r
+_SmiEntryPoint:\r
+ .byte 0xbb # mov bx, imm16\r
+ .word _GdtDesc - _SmiEntryPoint + 0x8000\r
+ .byte 0x2e,0xa1 # mov ax, cs:[offset16]\r
+ .word DSC_OFFSET + DSC_GDTSIZ\r
+ decl %eax\r
+ movl %eax, %cs:(%edi) # mov cs:[bx], ax\r
+ .byte 0x66,0x2e,0xa1 # mov eax, cs:[offset16]\r
+ .word DSC_OFFSET + DSC_GDTPTR\r
+ movw %ax, %cs:2(%edi)\r
+ movw %ax, %bp # ebp = GDT base\r
+ .byte 0x66\r
+ lgdt %cs:(%edi)\r
+# Patch ProtectedMode Segment\r
+ .byte 0xb8 # mov ax, imm16\r
+ .word PROTECT_MODE_CS # set AX for segment directly\r
+ movl %eax, %cs:-2(%edi) # mov cs:[bx - 2], ax\r
+# Patch ProtectedMode entry\r
+ .byte 0x66, 0xbf # mov edi, SMBASE\r
+ASM_PFX(gSmbase): .space 4\r
+ .byte 0x67\r
+ lea ((Start32bit - _SmiEntryPoint) + 0x8000)(%edi), %ax\r
+ movw %ax, %cs:-6(%edi)\r
+ movl %cr0, %ebx\r
+ .byte 0x66\r
+ andl $0x9ffafff3, %ebx\r
+ .byte 0x66\r
+ orl $0x23, %ebx\r
+ movl %ebx, %cr0\r
+ .byte 0x66,0xea\r
+ .space 4\r
+ .space 2\r
+_GdtDesc: .space 4\r
+ .space 2\r
+\r
+Start32bit:\r
+ movw $PROTECT_MODE_DS, %ax\r
+ movl %eax,%ds\r
+ movl %eax,%es\r
+ movl %eax,%fs\r
+ movl %eax,%gs\r
+ movl %eax,%ss\r
+ .byte 0xbc # mov esp, imm32\r
+ASM_PFX(gSmiStack): .space 4\r
+ movl $ASM_PFX(gSmiHandlerIdtr), %eax\r
+ lidt (%eax)\r
+ jmp ProtFlatMode\r
+\r
+ProtFlatMode:\r
+ .byte 0xb8 # mov eax, imm32\r
+ASM_PFX(gSmiCr3): .space 4\r
+ movl %eax, %cr3\r
+#\r
+# Need to test for CR4 specific bit support\r
+#\r
+ movl $1, %eax\r
+ cpuid # use CPUID to determine if specific CR4 bits are supported\r
+ xorl %eax, %eax # Clear EAX\r
+ testl $BIT2, %edx # Check for DE capabilities\r
+ jz L8\r
+ orl $BIT3, %eax\r
+L8:\r
+ testl $BIT6, %edx # Check for PAE capabilities\r
+ jz L9\r
+ orl $BIT5, %eax\r
+L9:\r
+ testl $BIT7, %edx # Check for MCE capabilities\r
+ jz L10\r
+ orl $BIT6, %eax\r
+L10:\r
+ testl $BIT24, %edx # Check for FXSR capabilities\r
+ jz L11\r
+ orl $BIT9, %eax\r
+L11:\r
+ testl $BIT25, %edx # Check for SSE capabilities\r
+ jz L12\r
+ orl $BIT10, %eax\r
+L12: # as cr4.PGE is not set here, refresh cr3\r
+ movl %eax, %cr4 # in PreModifyMtrrs() to flush TLB.\r
+ movl %cr0, %ebx\r
+ orl $0x080000000, %ebx # enable paging\r
+ movl %ebx, %cr0\r
+ leal DSC_OFFSET(%edi),%ebx\r
+ movw DSC_DS(%ebx),%ax\r
+ movl %eax, %ds\r
+ movw DSC_OTHERSEG(%ebx),%ax\r
+ movl %eax, %es\r
+ movl %eax, %fs\r
+ movl %eax, %gs\r
+ movw DSC_SS(%ebx),%ax\r
+ movl %eax, %ss\r
+\r
+ cmpb $0, ASM_PFX(FeaturePcdGet (PcdCpuSmmStackGuard))\r
+ jz L5\r
+\r
+# Load TSS\r
+ movb $0x89, (TSS_SEGMENT + 5)(%ebp) # clear busy flag\r
+ movl $TSS_SEGMENT, %eax\r
+ ltrw %ax\r
+L5:\r
+\r
+# jmp _SmiHandler # instruction is not needed\r
+\r
+_SmiHandler:\r
+ cmpb $0, ASM_PFX(FeaturePcdGet (PcdCpuSmmDebug))\r
+ jz L3\r
+\r
+L6:\r
+ call L1\r
+L1:\r
+ popl %ebp\r
+ movl $0x80000001, %eax\r
+ cpuid\r
+ btl $29, %edx # check cpuid to identify X64 or IA32\r
+ leal (0x7fc8 - (L1 - _SmiEntryPoint))(%ebp), %edi\r
+ leal 4(%edi), %esi\r
+ jnc L2\r
+ addl $4, %esi\r
+L2:\r
+ movl (%esi), %ecx\r
+ movl (%edi), %edx\r
+L7:\r
+ movl %ecx, %dr6\r
+ movl %edx, %dr7 # restore DR6 & DR7 before running C code\r
+L3:\r
+\r
+ pushl (%esp)\r
+\r
+ movl $ASM_PFX(SmiRendezvous), %eax\r
+ call *%eax\r
+ popl %ecx\r
+\r
+\r
+ cmpb $0, ASM_PFX(FeaturePcdGet (PcdCpuSmmDebug))\r
+ jz L4\r
+\r
+ movl %dr6, %ecx\r
+ movl %dr7, %edx\r
+ movl %ecx, (%esi)\r
+ movl %edx, (%edi)\r
+L4:\r
+\r
+ rsm\r
+\r
+ASM_PFX(gcSmiHandlerSize): .word . - _SmiEntryPoint\r
--- /dev/null
+;------------------------------------------------------------------------------ ;\r
+; Copyright (c) 2009 - 2015, Intel Corporation. All rights reserved.<BR>\r
+; This program and the accompanying materials\r
+; are licensed and made available under the terms and conditions of the BSD License\r
+; which accompanies this distribution. The full text of the license may be found at\r
+; http://opensource.org/licenses/bsd-license.php.\r
+;\r
+; THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS,\r
+; WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED.\r
+;\r
+; Module Name:\r
+;\r
+; SmiEntry.asm\r
+;\r
+; Abstract:\r
+;\r
+; Code template of the SMI handler for a particular processor\r
+;\r
+;-------------------------------------------------------------------------------\r
+\r
+ .686p\r
+ .model flat,C\r
+ .xmm\r
+\r
+DSC_OFFSET EQU 0fb00h\r
+DSC_GDTPTR EQU 30h\r
+DSC_GDTSIZ EQU 38h\r
+DSC_CS EQU 14\r
+DSC_DS EQU 16\r
+DSC_SS EQU 18\r
+DSC_OTHERSEG EQU 20\r
+\r
+PROTECT_MODE_CS EQU 08h\r
+PROTECT_MODE_DS EQU 20h\r
+TSS_SEGMENT EQU 40h\r
+\r
+SmiRendezvous PROTO C\r
+\r
+EXTERNDEF gcSmiHandlerTemplate:BYTE\r
+EXTERNDEF gcSmiHandlerSize:WORD\r
+EXTERNDEF gSmiCr3:DWORD\r
+EXTERNDEF gSmiStack:DWORD\r
+EXTERNDEF gSmbase:DWORD\r
+EXTERNDEF FeaturePcdGet (PcdCpuSmmDebug):BYTE\r
+EXTERNDEF FeaturePcdGet (PcdCpuSmmStackGuard):BYTE\r
+EXTERNDEF gSmiHandlerIdtr:FWORD\r
+\r
+ .code\r
+\r
+gcSmiHandlerTemplate LABEL BYTE\r
+\r
+_SmiEntryPoint:\r
+ DB 0bbh ; mov bx, imm16\r
+ DW offset _GdtDesc - _SmiEntryPoint + 8000h\r
+ DB 2eh, 0a1h ; mov ax, cs:[offset16]\r
+ DW DSC_OFFSET + DSC_GDTSIZ\r
+ dec eax\r
+ mov cs:[edi], eax ; mov cs:[bx], ax\r
+ DB 66h, 2eh, 0a1h ; mov eax, cs:[offset16]\r
+ DW DSC_OFFSET + DSC_GDTPTR\r
+ mov cs:[edi + 2], ax ; mov cs:[bx + 2], eax\r
+ mov bp, ax ; ebp = GDT base\r
+ DB 66h\r
+ lgdt fword ptr cs:[edi] ; lgdt fword ptr cs:[bx]\r
+; Patch ProtectedMode Segment\r
+ DB 0b8h ; mov ax, imm16\r
+ DW PROTECT_MODE_CS ; set AX for segment directly\r
+ mov cs:[edi - 2], eax ; mov cs:[bx - 2], ax\r
+; Patch ProtectedMode entry\r
+ DB 66h, 0bfh ; mov edi, SMBASE\r
+gSmbase DD ?\r
+ DB 67h\r
+ lea ax, [edi + (@32bit - _SmiEntryPoint) + 8000h]\r
+ mov cs:[edi - 6], ax ; mov cs:[bx - 6], eax\r
+ mov ebx, cr0\r
+ DB 66h\r
+ and ebx, 9ffafff3h\r
+ DB 66h\r
+ or ebx, 23h\r
+ mov cr0, ebx\r
+ DB 66h, 0eah\r
+ DD ?\r
+ DW ?\r
+_GdtDesc FWORD ?\r
+\r
+@32bit:\r
+ mov ax, PROTECT_MODE_DS\r
+ mov ds, ax\r
+ mov es, ax\r
+ mov fs, ax\r
+ mov gs, ax\r
+ mov ss, ax\r
+ DB 0bch ; mov esp, imm32\r
+gSmiStack DD ?\r
+ mov eax, offset gSmiHandlerIdtr\r
+ lidt fword ptr [eax]\r
+ jmp ProtFlatMode\r
+\r
+ProtFlatMode:\r
+ DB 0b8h ; mov eax, imm32\r
+gSmiCr3 DD ?\r
+ mov cr3, eax\r
+;\r
+; Need to test for CR4 specific bit support\r
+;\r
+ mov eax, 1\r
+ cpuid ; use CPUID to determine if specific CR4 bits are supported\r
+ xor eax, eax ; Clear EAX\r
+ test edx, BIT2 ; Check for DE capabilities\r
+ jz @f\r
+ or eax, BIT3\r
+@@:\r
+ test edx, BIT6 ; Check for PAE capabilities\r
+ jz @f\r
+ or eax, BIT5\r
+@@:\r
+ test edx, BIT7 ; Check for MCE capabilities\r
+ jz @f\r
+ or eax, BIT6\r
+@@:\r
+ test edx, BIT24 ; Check for FXSR capabilities\r
+ jz @f\r
+ or eax, BIT9\r
+@@:\r
+ test edx, BIT25 ; Check for SSE capabilities\r
+ jz @f\r
+ or eax, BIT10\r
+@@: ; as cr4.PGE is not set here, refresh cr3\r
+ mov cr4, eax ; in PreModifyMtrrs() to flush TLB.\r
+ mov ebx, cr0\r
+ or ebx, 080000000h ; enable paging\r
+ mov cr0, ebx\r
+ lea ebx, [edi + DSC_OFFSET]\r
+ mov ax, [ebx + DSC_DS]\r
+ mov ds, eax\r
+ mov ax, [ebx + DSC_OTHERSEG]\r
+ mov es, eax\r
+ mov fs, eax\r
+ mov gs, eax\r
+ mov ax, [ebx + DSC_SS]\r
+ mov ss, eax\r
+\r
+ cmp FeaturePcdGet (PcdCpuSmmStackGuard), 0\r
+ jz @F\r
+\r
+; Load TSS\r
+ mov byte ptr [ebp + TSS_SEGMENT + 5], 89h ; clear busy flag\r
+ mov eax, TSS_SEGMENT\r
+ ltr ax\r
+@@:\r
+; jmp _SmiHandler ; instruction is not needed\r
+\r
+_SmiHandler PROC\r
+ cmp FeaturePcdGet (PcdCpuSmmDebug), 0\r
+ jz @3\r
+ call @1\r
+@1:\r
+ pop ebp\r
+ mov eax, 80000001h\r
+ cpuid\r
+ bt edx, 29 ; check cpuid to identify X64 or IA32\r
+ lea edi, [ebp - (@1 - _SmiEntryPoint) + 7fc8h]\r
+ lea esi, [edi + 4]\r
+ jnc @2\r
+ add esi, 4\r
+@2:\r
+ mov ecx, [esi]\r
+ mov edx, [edi]\r
+@5:\r
+ mov dr6, ecx\r
+ mov dr7, edx ; restore DR6 & DR7 before running C code\r
+@3:\r
+ mov ecx, [esp] ; CPU Index\r
+\r
+ push ecx\r
+ mov eax, SmiRendezvous\r
+ call eax\r
+ pop ecx\r
+\r
+ cmp FeaturePcdGet (PcdCpuSmmDebug), 0\r
+ jz @4\r
+\r
+ mov ecx, dr6\r
+ mov edx, dr7\r
+ mov [esi], ecx\r
+ mov [edi], edx\r
+@4:\r
+ rsm\r
+_SmiHandler ENDP\r
+\r
+gcSmiHandlerSize DW $ - _SmiEntryPoint\r
+\r
+ END\r
--- /dev/null
+#------------------------------------------------------------------------------\r
+#\r
+# Copyright (c) 2009 - 2015, Intel Corporation. All rights reserved.<BR>\r
+# This program and the accompanying materials\r
+# are licensed and made available under the terms and conditions of the BSD License\r
+# which accompanies this distribution. The full text of the license may be found at\r
+# http://opensource.org/licenses/bsd-license.php.\r
+#\r
+# THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS,\r
+# WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED.\r
+#\r
+# Module Name:\r
+#\r
+# SmiException.S\r
+#\r
+# Abstract:\r
+#\r
+# Exception handlers used in SM mode\r
+#\r
+#------------------------------------------------------------------------------\r
+\r
+ASM_GLOBAL ASM_PFX(SmiPFHandler)\r
+ASM_GLOBAL ASM_PFX(PageFaultStubFunction)\r
+ASM_GLOBAL ASM_PFX(gSmiMtrrs)\r
+ASM_GLOBAL ASM_PFX(gcSmiIdtr)\r
+ASM_GLOBAL ASM_PFX(gcSmiGdtr)\r
+ASM_GLOBAL ASM_PFX(gcPsd)\r
+ASM_GLOBAL ASM_PFX(FeaturePcdGet (PcdCpuSmmProfileEnable))\r
+\r
+ .data\r
+\r
+NullSeg: .quad 0 # reserved by architecture\r
+CodeSeg32:\r
+ .word -1 # LimitLow\r
+ .word 0 # BaseLow\r
+ .byte 0 # BaseMid\r
+ .byte 0x9b\r
+ .byte 0xcf # LimitHigh\r
+ .byte 0 # BaseHigh\r
+ProtModeCodeSeg32:\r
+ .word -1 # LimitLow\r
+ .word 0 # BaseLow\r
+ .byte 0 # BaseMid\r
+ .byte 0x9b\r
+ .byte 0xcf # LimitHigh\r
+ .byte 0 # BaseHigh\r
+ProtModeSsSeg32:\r
+ .word -1 # LimitLow\r
+ .word 0 # BaseLow\r
+ .byte 0 # BaseMid\r
+ .byte 0x93\r
+ .byte 0xcf # LimitHigh\r
+ .byte 0 # BaseHigh\r
+DataSeg32:\r
+ .word -1 # LimitLow\r
+ .word 0 # BaseLow\r
+ .byte 0 # BaseMid\r
+ .byte 0x93\r
+ .byte 0xcf # LimitHigh\r
+ .byte 0 # BaseHigh\r
+CodeSeg16:\r
+ .word -1\r
+ .word 0\r
+ .byte 0\r
+ .byte 0x9b\r
+ .byte 0x8f\r
+ .byte 0\r
+DataSeg16:\r
+ .word -1\r
+ .word 0\r
+ .byte 0\r
+ .byte 0x93\r
+ .byte 0x8f\r
+ .byte 0\r
+CodeSeg64:\r
+ .word -1 # LimitLow\r
+ .word 0 # BaseLow\r
+ .byte 0 # BaseMid\r
+ .byte 0x9b\r
+ .byte 0xaf # LimitHigh\r
+ .byte 0 # BaseHigh\r
+.equ GDT_SIZE, .- NullSeg\r
+\r
+TssSeg:\r
+ .word TSS_DESC_SIZE # LimitLow\r
+ .word 0 # BaseLow\r
+ .byte 0 # BaseMid\r
+ .byte 0x89\r
+ .byte 0x80 # LimitHigh\r
+ .byte 0 # BaseHigh\r
+ExceptionTssSeg:\r
+ .word TSS_DESC_SIZE # LimitLow\r
+ .word 0 # BaseLow\r
+ .byte 0 # BaseMid\r
+ .byte 0x89\r
+ .byte 0x80 # LimitHigh\r
+ .byte 0 # BaseHigh\r
+\r
+.equ CODE_SEL, CodeSeg32 - NullSeg\r
+.equ DATA_SEL, DataSeg32 - NullSeg\r
+.equ TSS_SEL, TssSeg - NullSeg\r
+.equ EXCEPTION_TSS_SEL, ExceptionTssSeg - NullSeg\r
+\r
+# IA32 TSS fields\r
+.equ TSS_ESP0, 4\r
+.equ TSS_SS0, 8\r
+.equ TSS_ESP1, 12\r
+.equ TSS_SS1, 16\r
+.equ TSS_ESP2, 20\r
+.equ TSS_SS2, 24\r
+.equ TSS_CR3, 28\r
+.equ TSS_EIP, 32\r
+.equ TSS_EFLAGS, 36\r
+.equ TSS_EAX, 40\r
+.equ TSS_ECX, 44\r
+.equ TSS_EDX, 48\r
+.equ TSS_EBX, 52\r
+.equ TSS_ESP, 56\r
+.equ TSS_EBP, 60\r
+.equ TSS_ESI, 64\r
+.equ TSS_EDI, 68\r
+.equ TSS_ES, 72\r
+.equ TSS_CS, 76\r
+.equ TSS_SS, 80\r
+.equ TSS_DS, 84\r
+.equ TSS_FS, 88\r
+.equ TSS_GS, 92\r
+.equ TSS_LDT, 96\r
+\r
+# Create 2 TSS segments just after GDT\r
+TssDescriptor:\r
+ .word 0 # PreviousTaskLink\r
+ .word 0 # Reserved\r
+ .long 0 # ESP0\r
+ .word 0 # SS0\r
+ .word 0 # Reserved\r
+ .long 0 # ESP1\r
+ .word 0 # SS1\r
+ .word 0 # Reserved\r
+ .long 0 # ESP2\r
+ .word 0 # SS2\r
+ .word 0 # Reserved\r
+ .long 0 # CR3\r
+ .long 0 # EIP\r
+ .long 0 # EFLAGS\r
+ .long 0 # EAX\r
+ .long 0 # ECX\r
+ .long 0 # EDX\r
+ .long 0 # EBX\r
+ .long 0 # ESP\r
+ .long 0 # EBP\r
+ .long 0 # ESI\r
+ .long 0 # EDI\r
+ .word 0 # ES\r
+ .word 0 # Reserved\r
+ .word 0 # CS\r
+ .word 0 # Reserved\r
+ .word 0 # SS\r
+ .word 0 # Reserved\r
+ .word 0 # DS\r
+ .word 0 # Reserved\r
+ .word 0 # FS\r
+ .word 0 # Reserved\r
+ .word 0 # GS\r
+ .word 0 # Reserved\r
+ .word 0 # LDT Selector\r
+ .word 0 # Reserved\r
+ .word 0 # T\r
+ .word 0 # I/O Map Base\r
+.equ TSS_DESC_SIZE, . - TssDescriptor\r
+\r
+ExceptionTssDescriptor:\r
+ .word 0 # PreviousTaskLink\r
+ .word 0 # Reserved\r
+ .long 0 # ESP0\r
+ .word 0 # SS0\r
+ .word 0 # Reserved\r
+ .long 0 # ESP1\r
+ .word 0 # SS1\r
+ .word 0 # Reserved\r
+ .long 0 # ESP2\r
+ .word 0 # SS2\r
+ .word 0 # Reserved\r
+ .long 0 # CR3\r
+ .long PFHandlerEntry # EIP\r
+ .long 00000002 # EFLAGS\r
+ .long 0 # EAX\r
+ .long 0 # ECX\r
+ .long 0 # EDX\r
+ .long 0 # EBX\r
+ .long 0 # ESP\r
+ .long 0 # EBP\r
+ .long 0 # ESI\r
+ .long 0 # EDI\r
+ .word DATA_SEL # ES\r
+ .word 0 # Reserved\r
+ .word CODE_SEL # CS\r
+ .word 0 # Reserved\r
+ .word DATA_SEL # SS\r
+ .word 0 # Reserved\r
+ .word DATA_SEL # DS\r
+ .word 0 # Reserved\r
+ .word DATA_SEL # FS\r
+ .word 0 # Reserved\r
+ .word DATA_SEL # GS\r
+ .word 0 # Reserved\r
+ .word 0 # LDT Selector\r
+ .word 0 # Reserved\r
+ .word 0 # T\r
+ .word 0 # I/O Map Base\r
+\r
+ASM_PFX(gcPsd):\r
+ .ascii "PSDSIG "\r
+ .word PSD_SIZE\r
+ .word 2\r
+ .word 1 << 2\r
+ .word CODE_SEL\r
+ .word DATA_SEL\r
+ .word DATA_SEL\r
+ .word DATA_SEL\r
+ .word 0\r
+ .long 0\r
+ .long 0\r
+ .long 0\r
+ .long 0\r
+ .quad 0\r
+ .long NullSeg\r
+ .long 0\r
+ .long GDT_SIZE\r
+ .long 0\r
+ .space 24, 0\r
+ .long ASM_PFX(gSmiMtrrs)\r
+ .long 0\r
+.equ PSD_SIZE, . - ASM_PFX(gcPsd)\r
+\r
+ASM_PFX(gcSmiGdtr): .word GDT_SIZE - 1\r
+ .long NullSeg\r
+\r
+ASM_PFX(gcSmiIdtr): .word IDT_SIZE - 1\r
+ .long _SmiIDT\r
+\r
+_SmiIDT:\r
+# The following segment repeats 32 times:\r
+# No. 1\r
+ .word 0 # Offset 0:15\r
+ .word CODE_SEL\r
+ .byte 0 # Unused\r
+ .byte 0x8e # Interrupt Gate, Present\r
+ .word 0 # Offset 16:31\r
+# No. 2\r
+ .word 0 # Offset 0:15\r
+ .word CODE_SEL\r
+ .byte 0 # Unused\r
+ .byte 0x8e # Interrupt Gate, Present\r
+ .word 0 # Offset 16:31\r
+# No. 3\r
+ .word 0 # Offset 0:15\r
+ .word CODE_SEL\r
+ .byte 0 # Unused\r
+ .byte 0x8e # Interrupt Gate, Present\r
+ .word 0 # Offset 16:31\r
+# No. 4\r
+ .word 0 # Offset 0:15\r
+ .word CODE_SEL\r
+ .byte 0 # Unused\r
+ .byte 0x8e # Interrupt Gate, Present\r
+ .word 0 # Offset 16:31\r
+# No. 5\r
+ .word 0 # Offset 0:15\r
+ .word CODE_SEL\r
+ .byte 0 # Unused\r
+ .byte 0x8e # Interrupt Gate, Present\r
+ .word 0 # Offset 16:31\r
+# No. 6\r
+ .word 0 # Offset 0:15\r
+ .word CODE_SEL\r
+ .byte 0 # Unused\r
+ .byte 0x8e # Interrupt Gate, Present\r
+ .word 0 # Offset 16:31\r
+# No. 7\r
+ .word 0 # Offset 0:15\r
+ .word CODE_SEL\r
+ .byte 0 # Unused\r
+ .byte 0x8e # Interrupt Gate, Present\r
+ .word 0 # Offset 16:31\r
+# No. 8\r
+ .word 0 # Offset 0:15\r
+ .word CODE_SEL\r
+ .byte 0 # Unused\r
+ .byte 0x8e # Interrupt Gate, Present\r
+ .word 0 # Offset 16:31\r
+# No. 9\r
+ .word 0 # Offset 0:15\r
+ .word CODE_SEL\r
+ .byte 0 # Unused\r
+ .byte 0x8e # Interrupt Gate, Present\r
+ .word 0 # Offset 16:31\r
+# No. 10\r
+ .word 0 # Offset 0:15\r
+ .word CODE_SEL\r
+ .byte 0 # Unused\r
+ .byte 0x8e # Interrupt Gate, Present\r
+ .word 0 # Offset 16:31\r
+# No. 11\r
+ .word 0 # Offset 0:15\r
+ .word CODE_SEL\r
+ .byte 0 # Unused\r
+ .byte 0x8e # Interrupt Gate, Present\r
+ .word 0 # Offset 16:31\r
+# No. 12\r
+ .word 0 # Offset 0:15\r
+ .word CODE_SEL\r
+ .byte 0 # Unused\r
+ .byte 0x8e # Interrupt Gate, Present\r
+ .word 0 # Offset 16:31\r
+# No. 13\r
+ .word 0 # Offset 0:15\r
+ .word CODE_SEL\r
+ .byte 0 # Unused\r
+ .byte 0x8e # Interrupt Gate, Present\r
+ .word 0 # Offset 16:31\r
+# No. 14\r
+ .word 0 # Offset 0:15\r
+ .word CODE_SEL\r
+ .byte 0 # Unused\r
+ .byte 0x8e # Interrupt Gate, Present\r
+ .word 0 # Offset 16:31\r
+# No. 15\r
+ .word 0 # Offset 0:15\r
+ .word CODE_SEL\r
+ .byte 0 # Unused\r
+ .byte 0x8e # Interrupt Gate, Present\r
+ .word 0 # Offset 16:31\r
+# No. 16\r
+ .word 0 # Offset 0:15\r
+ .word CODE_SEL\r
+ .byte 0 # Unused\r
+ .byte 0x8e # Interrupt Gate, Present\r
+ .word 0 # Offset 16:31\r
+# No. 17\r
+ .word 0 # Offset 0:15\r
+ .word CODE_SEL\r
+ .byte 0 # Unused\r
+ .byte 0x8e # Interrupt Gate, Present\r
+ .word 0 # Offset 16:31\r
+# No. 18\r
+ .word 0 # Offset 0:15\r
+ .word CODE_SEL\r
+ .byte 0 # Unused\r
+ .byte 0x8e # Interrupt Gate, Present\r
+ .word 0 # Offset 16:31\r
+# No. 19\r
+ .word 0 # Offset 0:15\r
+ .word CODE_SEL\r
+ .byte 0 # Unused\r
+ .byte 0x8e # Interrupt Gate, Present\r
+ .word 0 # Offset 16:31\r
+# No. 20\r
+ .word 0 # Offset 0:15\r
+ .word CODE_SEL\r
+ .byte 0 # Unused\r
+ .byte 0x8e # Interrupt Gate, Present\r
+ .word 0 # Offset 16:31\r
+# No. 21\r
+ .word 0 # Offset 0:15\r
+ .word CODE_SEL\r
+ .byte 0 # Unused\r
+ .byte 0x8e # Interrupt Gate, Present\r
+ .word 0 # Offset 16:31\r
+# No. 22\r
+ .word 0 # Offset 0:15\r
+ .word CODE_SEL\r
+ .byte 0 # Unused\r
+ .byte 0x8e # Interrupt Gate, Present\r
+ .word 0 # Offset 16:31\r
+# No. 23\r
+ .word 0 # Offset 0:15\r
+ .word CODE_SEL\r
+ .byte 0 # Unused\r
+ .byte 0x8e # Interrupt Gate, Present\r
+ .word 0 # Offset 16:31\r
+# No. 24\r
+ .word 0 # Offset 0:15\r
+ .word CODE_SEL\r
+ .byte 0 # Unused\r
+ .byte 0x8e # Interrupt Gate, Present\r
+ .word 0 # Offset 16:31\r
+# No. 25\r
+ .word 0 # Offset 0:15\r
+ .word CODE_SEL\r
+ .byte 0 # Unused\r
+ .byte 0x8e # Interrupt Gate, Present\r
+ .word 0 # Offset 16:31\r
+# No. 26\r
+ .word 0 # Offset 0:15\r
+ .word CODE_SEL\r
+ .byte 0 # Unused\r
+ .byte 0x8e # Interrupt Gate, Present\r
+ .word 0 # Offset 16:31\r
+# No. 27\r
+ .word 0 # Offset 0:15\r
+ .word CODE_SEL\r
+ .byte 0 # Unused\r
+ .byte 0x8e # Interrupt Gate, Present\r
+ .word 0 # Offset 16:31\r
+# No. 28\r
+ .word 0 # Offset 0:15\r
+ .word CODE_SEL\r
+ .byte 0 # Unused\r
+ .byte 0x8e # Interrupt Gate, Present\r
+ .word 0 # Offset 16:31\r
+# No. 29\r
+ .word 0 # Offset 0:15\r
+ .word CODE_SEL\r
+ .byte 0 # Unused\r
+ .byte 0x8e # Interrupt Gate, Present\r
+ .word 0 # Offset 16:31\r
+# No. 30\r
+ .word 0 # Offset 0:15\r
+ .word CODE_SEL\r
+ .byte 0 # Unused\r
+ .byte 0x8e # Interrupt Gate, Present\r
+ .word 0 # Offset 16:31\r
+# No. 31\r
+ .word 0 # Offset 0:15\r
+ .word CODE_SEL\r
+ .byte 0 # Unused\r
+ .byte 0x8e # Interrupt Gate, Present\r
+ .word 0 # Offset 16:31\r
+# No. 32\r
+ .word 0 # Offset 0:15\r
+ .word CODE_SEL\r
+ .byte 0 # Unused\r
+ .byte 0x8e # Interrupt Gate, Present\r
+ .word 0 # Offset 16:31\r
+\r
+.equ IDT_SIZE, . - _SmiIDT\r
+\r
+TaskGateDescriptor:\r
+ .word 0 # Reserved\r
+ .word EXCEPTION_TSS_SEL # TSS Segment selector\r
+ .byte 0 # Reserved\r
+ .byte 0x85 # Task Gate, present, DPL = 0\r
+ .word 0 # Reserved\r
+\r
+ .text\r
+\r
+#------------------------------------------------------------------------------\r
+# PageFaultIdtHandlerSmmProfile is the entry point for all exceptions\r
+#\r
+# Stack:\r
+#+---------------------+\r
+#+ EFlags +\r
+#+---------------------+\r
+#+ CS +\r
+#+---------------------+\r
+#+ EIP +\r
+#+---------------------+\r
+#+ Error Code +\r
+#+---------------------+\r
+#+ Vector Number +\r
+#+---------------------+\r
+#+ EBP +\r
+#+---------------------+ <-- EBP\r
+#\r
+# RSP set to odd multiple of 8 means ErrCode PRESENT\r
+#------------------------------------------------------------------------------\r
+ASM_GLOBAL ASM_PFX(PageFaultIdtHandlerSmmProfile)\r
+ASM_PFX(PageFaultIdtHandlerSmmProfile):\r
+ pushl $0x0e # Page Fault\r
+ pushl %ebp\r
+ movl %esp, %ebp\r
+\r
+\r
+ #\r
+ # Align stack to make sure that EFI_FX_SAVE_STATE_IA32 of EFI_SYSTEM_CONTEXT_IA32\r
+ # is 16-byte aligned\r
+ #\r
+ andl $0xfffffff0, %esp\r
+ subl $12, %esp\r
+\r
+## UINT32 Edi, Esi, Ebp, Esp, Ebx, Edx, Ecx, Eax;\r
+ pushl %eax\r
+ pushl %ecx\r
+ pushl %edx\r
+ pushl %ebx\r
+ leal (6*4)(%ebp), %ecx\r
+ pushl %ecx # ESP\r
+ pushl (%ebp) # EBP\r
+ pushl %esi\r
+ pushl %edi\r
+\r
+## UINT32 Gs, Fs, Es, Ds, Cs, Ss;\r
+ movl %ss, %eax\r
+ pushl %eax\r
+ movzwl (4*4)(%ebp), %eax\r
+ pushl %eax\r
+ movl %ds, %eax\r
+ pushl %eax\r
+ movl %es, %eax\r
+ pushl %eax\r
+ movl %fs, %eax\r
+ pushl %eax\r
+ movl %gs, %eax\r
+ pushl %eax\r
+\r
+## UINT32 Eip;\r
+ movl (3*4)(%ebp), %eax\r
+ pushl %eax\r
+\r
+## UINT32 Gdtr[2], Idtr[2];\r
+ subl $8, %esp\r
+ sidt (%esp)\r
+ movl 2(%esp), %eax\r
+ xchgl (%esp), %eax\r
+ andl $0xffff, %eax\r
+ movl %eax, 4(%esp)\r
+\r
+ subl $8, %esp\r
+ sgdt (%esp)\r
+ movl 2(%esp), %eax\r
+ xchgl (%esp), %eax\r
+ andl $0xffff, %eax\r
+ movl %eax, 4(%esp)\r
+\r
+## UINT32 Ldtr, Tr;\r
+ xorl %eax, %eax\r
+ strw %ax\r
+ pushl %eax\r
+ sldtw %ax\r
+ pushl %eax\r
+\r
+## UINT32 EFlags;\r
+ movl (5*4)(%ebp), %eax\r
+ pushl %eax\r
+\r
+## UINT32 Cr0, Cr1, Cr2, Cr3, Cr4;\r
+ movl %cr4, %eax\r
+ orl $0x208, %eax\r
+ movl %eax, %cr4\r
+ pushl %eax\r
+ movl %cr3, %eax\r
+ pushl %eax\r
+ movl %cr2, %eax\r
+ pushl %eax\r
+ xorl %eax, %eax\r
+ pushl %eax\r
+ movl %cr0, %eax\r
+ pushl %eax\r
+\r
+## UINT32 Dr0, Dr1, Dr2, Dr3, Dr6, Dr7;\r
+ movl %dr7, %eax\r
+ pushl %eax\r
+ movl %dr6, %eax\r
+ pushl %eax\r
+ movl %dr3, %eax\r
+ pushl %eax\r
+ movl %dr2, %eax\r
+ pushl %eax\r
+ movl %dr1, %eax\r
+ pushl %eax\r
+ movl %dr0, %eax\r
+ pushl %eax\r
+\r
+## FX_SAVE_STATE_IA32 FxSaveState;\r
+ subl $512, %esp\r
+ movl %esp, %edi\r
+ .byte 0x0f, 0xae, 0x07 #fxsave [edi]\r
+\r
+# UEFI calling convention for IA32 requires that Direction flag in EFLAGs is clear\r
+ cld\r
+\r
+## UINT32 ExceptionData;\r
+ pushl (2*4)(%ebp)\r
+\r
+## call into exception handler\r
+\r
+## Prepare parameter and call\r
+ movl %esp, %edx\r
+ pushl %edx\r
+ movl (1*4)(%ebp), %edx\r
+ pushl %edx\r
+\r
+ #\r
+ # Call External Exception Handler\r
+ #\r
+ movl $ASM_PFX(SmiPFHandler), %eax\r
+ call *%eax\r
+ addl $8, %esp\r
+ jmp L4\r
+\r
+L4:\r
+## UINT32 ExceptionData;\r
+ addl $4, %esp\r
+\r
+## FX_SAVE_STATE_IA32 FxSaveState;\r
+ movl %esp, %esi\r
+ .byte 0xf, 0xae, 0xe # fxrstor [esi]\r
+ addl $512, %esp\r
+\r
+## UINT32 Dr0, Dr1, Dr2, Dr3, Dr6, Dr7;\r
+## Skip restoration of DRx registers to support debuggers\r
+## that set breakpoints in interrupt/exception context\r
+ addl $4*6, %esp\r
+\r
+## UINT32 Cr0, Cr1, Cr2, Cr3, Cr4;\r
+ popl %eax\r
+ movl %eax, %cr0\r
+ addl $4, %esp # not for Cr1\r
+ popl %eax\r
+ movl %eax, %cr2\r
+ popl %eax\r
+ movl %eax, %cr3\r
+ popl %eax\r
+ movl %eax, %cr4\r
+\r
+## UINT32 EFlags;\r
+ popl (5*4)(%ebp)\r
+\r
+## UINT32 Ldtr, Tr;\r
+## UINT32 Gdtr[2], Idtr[2];\r
+## Best not let anyone mess with these particular registers...\r
+ addl $24, %esp\r
+\r
+## UINT32 Eip;\r
+ popl (3*4)(%ebp)\r
+\r
+## UINT32 Gs, Fs, Es, Ds, Cs, Ss;\r
+## NOTE - modified segment registers could hang the debugger... We\r
+## could attempt to insulate ourselves against this possibility,\r
+## but that poses risks as well.\r
+##\r
+ popl %gs\r
+ popl %fs\r
+ popl %es\r
+ popl %ds\r
+ popl (4*4)(%ebp)\r
+ popl %ss\r
+\r
+## UINT32 Edi, Esi, Ebp, Esp, Ebx, Edx, Ecx, Eax;\r
+ popl %edi\r
+ popl %esi\r
+ addl $4, %esp # not for ebp\r
+ addl $4, %esp # not for esp\r
+ popl %ebx\r
+ popl %edx\r
+ popl %ecx\r
+ popl %eax\r
+\r
+ movl %ebp, %esp\r
+ popl %ebp\r
+\r
+# Enable TF bit after page fault handler runs\r
+ btsl $8, 16(%esp) # EFLAGS\r
+\r
+ addl $8, %esp # skip INT# & ErrCode\r
+Return:\r
+ iret\r
+#\r
+# Page Fault Exception Handler entry when SMM Stack Guard is enabled\r
+# Executiot starts here after a task switch\r
+#\r
+PFHandlerEntry:\r
+#\r
+# Get this processor's TSS\r
+#\r
+ subl $8, %esp\r
+ sgdt 2(%esp)\r
+ movl 4(%esp), %eax # GDT base\r
+ addl $8, %esp\r
+ movl (TSS_SEL+2)(%eax), %ecx\r
+ shll $8, %ecx\r
+ movb (TSS_SEL+7)(%eax), %cl\r
+ rorl $8, %ecx # ecx = TSS base\r
+\r
+ movl %esp, %ebp\r
+\r
+ #\r
+ # Align stack to make sure that EFI_FX_SAVE_STATE_IA32 of EFI_SYSTEM_CONTEXT_IA32\r
+ # is 16-byte aligned\r
+ #\r
+ andl $0xfffffff0, %esp\r
+ subl $12, %esp\r
+\r
+## UINT32 Edi, Esi, Ebp, Esp, Ebx, Edx, Ecx, Eax;\r
+ pushl TSS_EAX(%ecx)\r
+ pushl TSS_ECX(%ecx)\r
+ pushl TSS_EDX(%ecx)\r
+ pushl TSS_EBX(%ecx)\r
+ pushl TSS_ESP(%ecx)\r
+ pushl TSS_EBP(%ecx)\r
+ pushl TSS_ESI(%ecx)\r
+ pushl TSS_EDI(%ecx)\r
+\r
+## UINT32 Gs, Fs, Es, Ds, Cs, Ss;\r
+ movzwl TSS_SS(%ecx), %eax\r
+ pushl %eax\r
+ movzwl TSS_CS(%ecx), %eax\r
+ pushl %eax\r
+ movzwl TSS_DS(%ecx), %eax\r
+ pushl %eax\r
+ movzwl TSS_ES(%ecx), %eax\r
+ pushl %eax\r
+ movzwl TSS_FS(%ecx), %eax\r
+ pushl %eax\r
+ movzwl TSS_GS(%ecx), %eax\r
+ pushl %eax\r
+\r
+## UINT32 Eip;\r
+ pushl TSS_EIP(%ecx)\r
+\r
+## UINT32 Gdtr[2], Idtr[2];\r
+ subl $8, %esp\r
+ sidt (%esp)\r
+ movl 2(%esp), %eax\r
+ xchgl (%esp), %eax\r
+ andl $0xFFFF, %eax\r
+ movl %eax, 4(%esp)\r
+\r
+ subl $8, %esp\r
+ sgdt (%esp)\r
+ movl 2(%esp), %eax\r
+ xchgl (%esp), %eax\r
+ andl $0xFFFF, %eax\r
+ movl %eax, 4(%esp)\r
+\r
+## UINT32 Ldtr, Tr;\r
+ movl $TSS_SEL, %eax\r
+ pushl %eax\r
+ movzwl TSS_LDT(%ecx), %eax\r
+ pushl %eax\r
+\r
+## UINT32 EFlags;\r
+ pushl TSS_EFLAGS(%ecx)\r
+\r
+## UINT32 Cr0, Cr1, Cr2, Cr3, Cr4;\r
+ movl %cr4, %eax\r
+ orl $0x208, %eax\r
+ movl %eax, %cr4\r
+ pushl %eax\r
+ movl %cr3, %eax\r
+ pushl %eax\r
+ movl %cr2, %eax\r
+ pushl %eax\r
+ xorl %eax, %eax\r
+ pushl %eax\r
+ movl %cr0, %eax\r
+ pushl %eax\r
+\r
+## UINT32 Dr0, Dr1, Dr2, Dr3, Dr6, Dr7;\r
+ movl %dr7, %eax\r
+ pushl %eax\r
+ movl %dr6, %eax\r
+ pushl %eax\r
+ movl %dr3, %eax\r
+ pushl %eax\r
+ movl %dr2, %eax\r
+ pushl %eax\r
+ movl %dr1, %eax\r
+ pushl %eax\r
+ movl %dr0, %eax\r
+ pushl %eax\r
+\r
+## FX_SAVE_STATE_IA32 FxSaveState;\r
+## Clear TS bit in CR0 to avoid Device Not Available Exception (#NM)\r
+## when executing fxsave/fxrstor instruction\r
+ clts\r
+ subl $512, %esp\r
+ movl %esp, %edi\r
+ .byte 0x0f, 0xae, 0x07 #fxsave [edi]\r
+\r
+# UEFI calling convention for IA32 requires that Direction flag in EFLAGs is clear\r
+ cld\r
+\r
+## UINT32 ExceptionData;\r
+ pushl (%ebp)\r
+\r
+## call into exception handler\r
+ movl %ecx, %ebx\r
+ movl $ASM_PFX(SmiPFHandler), %eax\r
+\r
+## Prepare parameter and call\r
+ movl %esp, %edx\r
+ pushl %edx\r
+ movl $14, %edx\r
+ pushl %edx\r
+\r
+ #\r
+ # Call External Exception Handler\r
+ #\r
+ call *%eax\r
+ addl $8, %esp\r
+\r
+ movl %ebx, %ecx\r
+## UINT32 ExceptionData;\r
+ addl $4, %esp\r
+\r
+## FX_SAVE_STATE_IA32 FxSaveState;\r
+ movl %esp, %esi\r
+ .byte 0xf, 0xae, 0xe # fxrstor [esi]\r
+ addl $512, %esp\r
+\r
+## UINT32 Dr0, Dr1, Dr2, Dr3, Dr6, Dr7;\r
+## Skip restoration of DRx registers to support debuggers\r
+## that set breakpoints in interrupt/exception context\r
+ addl $4*6, %esp\r
+\r
+## UINT32 Cr0, Cr1, Cr2, Cr3, Cr4;\r
+ popl %eax\r
+ movl %eax, %cr0\r
+ addl $4, %esp # not for Cr1\r
+ popl %eax\r
+ movl %eax, %cr2\r
+ popl %eax\r
+ movl %eax, TSS_CR3(%ecx)\r
+ popl %eax\r
+ movl %eax, %cr4\r
+\r
+## UINT32 EFlags;\r
+ popl TSS_EFLAGS(%ecx)\r
+\r
+## UINT32 Ldtr, Tr;\r
+## UINT32 Gdtr[2], Idtr[2];\r
+## Best not let anyone mess with these particular registers...\r
+ addl $24, %esp\r
+\r
+## UINT32 Eip;\r
+ popl TSS_EIP(%ecx)\r
+\r
+## UINT32 Gs, Fs, Es, Ds, Cs, Ss;\r
+## NOTE - modified segment registers could hang the debugger... We\r
+## could attempt to insulate ourselves against this possibility,\r
+## but that poses risks as well.\r
+##\r
+ popl %eax\r
+ movw %ax, TSS_GS(%ecx)\r
+ popl %eax\r
+ movw %ax, TSS_FS(%ecx)\r
+ popl %eax\r
+ movw %ax, TSS_ES(%ecx)\r
+ popl %eax\r
+ movw %ax, TSS_DS(%ecx)\r
+ popl %eax\r
+ movw %ax, TSS_CS(%ecx)\r
+ popl %eax\r
+ movw %ax, TSS_SS(%ecx)\r
+\r
+## UINT32 Edi, Esi, Ebp, Esp, Ebx, Edx, Ecx, Eax;\r
+ popl TSS_EDI(%ecx)\r
+ popl TSS_ESI(%ecx)\r
+ addl $4, %esp # not for ebp\r
+ addl $4, %esp # not for esp\r
+ popl TSS_EBX(%ecx)\r
+ popl TSS_EDX(%ecx)\r
+ popl TSS_ECX(%ecx)\r
+ popl TSS_EAX(%ecx)\r
+\r
+ movl %ebp, %esp\r
+\r
+# Set single step DB# if SMM profile is enabled and page fault exception happens\r
+ cmpb $0, ASM_PFX(FeaturePcdGet (PcdCpuSmmProfileEnable))\r
+ jz Done2\r
+# Create return context for iret in stub function\r
+ movl TSS_ESP(%ecx), %eax # Get old stack pointer\r
+ movl TSS_EIP(%ecx), %ebx\r
+ movl %ebx, -0xc(%eax) # create EIP in old stack\r
+ movzwl TSS_CS(%ecx), %ebx\r
+ movl %ebx, -0x8(%eax) # create CS in old stack\r
+ movl TSS_EFLAGS(%ecx), %ebx\r
+ btsl $8,%ebx\r
+ movl %ebx, -0x4(%eax) # create eflags in old stack\r
+ movl TSS_ESP(%ecx), %eax # Get old stack pointer\r
+ subl $12, %eax # minus 12 byte\r
+ movl %eax, TSS_ESP(%ecx) # Set new stack pointer\r
+\r
+# Replace the EIP of interrupted task with stub function\r
+ movl $ASM_PFX(PageFaultStubFunction), %eax\r
+ movl %eax, TSS_EIP(%ecx)\r
+# Jump to the iret so next page fault handler as a task will start again after iret.\r
+\r
+Done2:\r
+\r
+ addl $4, %esp # skip ErrCode\r
+\r
+ jmp Return\r
+\r
+ASM_PFX(PageFaultStubFunction):\r
+#\r
+# we need clean TS bit in CR0 to execute\r
+# x87 FPU/MMX/SSE/SSE2/SSE3/SSSE3/SSE4 instructions.\r
+#\r
+ clts\r
+ iret\r
+\r
+ASM_GLOBAL ASM_PFX(InitializeIDTSmmStackGuard)\r
+ASM_PFX(InitializeIDTSmmStackGuard):\r
+ pushl %ebx\r
+#\r
+# If SMM Stack Guard feature is enabled, the Page Fault Exception entry in IDT\r
+# is a Task Gate Descriptor so that when a Page Fault Exception occurs,\r
+# the processors can use a known good stack in case stack ran out.\r
+#\r
+ leal _SmiIDT + 14 * 8, %ebx\r
+ leal TaskGateDescriptor, %edx\r
+ movl (%edx), %eax\r
+ movl %eax, (%ebx)\r
+ movl 4(%edx), %eax\r
+ movl %eax, 4(%ebx)\r
+\r
+ popl %ebx\r
+ ret\r
--- /dev/null
+;------------------------------------------------------------------------------ ;\r
+; Copyright (c) 2009 - 2015, Intel Corporation. All rights reserved.<BR>\r
+; This program and the accompanying materials\r
+; are licensed and made available under the terms and conditions of the BSD License\r
+; which accompanies this distribution. The full text of the license may be found at\r
+; http://opensource.org/licenses/bsd-license.php.\r
+;\r
+; THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS,\r
+; WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED.\r
+;\r
+; Module Name:\r
+;\r
+; SmiException.asm\r
+;\r
+; Abstract:\r
+;\r
+; Exception handlers used in SM mode\r
+;\r
+;-------------------------------------------------------------------------------\r
+\r
+ .686p\r
+ .model flat,C\r
+\r
+EXTERNDEF SmiPFHandler:PROC\r
+EXTERNDEF PageFaultStubFunction:PROC\r
+EXTERNDEF gSmiMtrrs:QWORD\r
+EXTERNDEF gcSmiIdtr:FWORD\r
+EXTERNDEF gcSmiGdtr:FWORD\r
+EXTERNDEF gcPsd:BYTE\r
+EXTERNDEF FeaturePcdGet (PcdCpuSmmProfileEnable):BYTE\r
+\r
+\r
+ .data\r
+\r
+NullSeg DQ 0 ; reserved by architecture\r
+CodeSeg32 LABEL QWORD\r
+ DW -1 ; LimitLow\r
+ DW 0 ; BaseLow\r
+ DB 0 ; BaseMid\r
+ DB 9bh\r
+ DB 0cfh ; LimitHigh\r
+ DB 0 ; BaseHigh\r
+ProtModeCodeSeg32 LABEL QWORD\r
+ DW -1 ; LimitLow\r
+ DW 0 ; BaseLow\r
+ DB 0 ; BaseMid\r
+ DB 9bh\r
+ DB 0cfh ; LimitHigh\r
+ DB 0 ; BaseHigh\r
+ProtModeSsSeg32 LABEL QWORD\r
+ DW -1 ; LimitLow\r
+ DW 0 ; BaseLow\r
+ DB 0 ; BaseMid\r
+ DB 93h\r
+ DB 0cfh ; LimitHigh\r
+ DB 0 ; BaseHigh\r
+DataSeg32 LABEL QWORD\r
+ DW -1 ; LimitLow\r
+ DW 0 ; BaseLow\r
+ DB 0 ; BaseMid\r
+ DB 93h\r
+ DB 0cfh ; LimitHigh\r
+ DB 0 ; BaseHigh\r
+CodeSeg16 LABEL QWORD\r
+ DW -1\r
+ DW 0\r
+ DB 0\r
+ DB 9bh\r
+ DB 8fh\r
+ DB 0\r
+DataSeg16 LABEL QWORD\r
+ DW -1\r
+ DW 0\r
+ DB 0\r
+ DB 93h\r
+ DB 8fh\r
+ DB 0\r
+CodeSeg64 LABEL QWORD\r
+ DW -1 ; LimitLow\r
+ DW 0 ; BaseLow\r
+ DB 0 ; BaseMid\r
+ DB 9bh\r
+ DB 0afh ; LimitHigh\r
+ DB 0 ; BaseHigh\r
+GDT_SIZE = $ - offset NullSeg\r
+\r
+TssSeg LABEL QWORD\r
+ DW TSS_DESC_SIZE ; LimitLow\r
+ DW 0 ; BaseLow\r
+ DB 0 ; BaseMid\r
+ DB 89h\r
+ DB 080h ; LimitHigh\r
+ DB 0 ; BaseHigh\r
+ExceptionTssSeg LABEL QWORD\r
+ DW TSS_DESC_SIZE ; LimitLow\r
+ DW 0 ; BaseLow\r
+ DB 0 ; BaseMid\r
+ DB 89h\r
+ DB 080h ; LimitHigh\r
+ DB 0 ; BaseHigh\r
+\r
+CODE_SEL = offset CodeSeg32 - offset NullSeg\r
+DATA_SEL = offset DataSeg32 - offset NullSeg\r
+TSS_SEL = offset TssSeg - offset NullSeg\r
+EXCEPTION_TSS_SEL = offset ExceptionTssSeg - offset NullSeg\r
+\r
+IA32_TSS STRUC\r
+ DW ?\r
+ DW ?\r
+ ESP0 DD ?\r
+ SS0 DW ?\r
+ DW ?\r
+ ESP1 DD ?\r
+ SS1 DW ?\r
+ DW ?\r
+ ESP2 DD ?\r
+ SS2 DW ?\r
+ DW ?\r
+ _CR3 DD ?\r
+ EIP DD ?\r
+ EFLAGS DD ?\r
+ _EAX DD ?\r
+ _ECX DD ?\r
+ _EDX DD ?\r
+ _EBX DD ?\r
+ _ESP DD ?\r
+ _EBP DD ?\r
+ _ESI DD ?\r
+ _EDI DD ?\r
+ _ES DW ?\r
+ DW ?\r
+ _CS DW ?\r
+ DW ?\r
+ _SS DW ?\r
+ DW ?\r
+ _DS DW ?\r
+ DW ?\r
+ _FS DW ?\r
+ DW ?\r
+ _GS DW ?\r
+ DW ?\r
+ LDT DW ?\r
+ DW ?\r
+ DW ?\r
+ DW ?\r
+IA32_TSS ENDS\r
+\r
+; Create 2 TSS segments just after GDT\r
+TssDescriptor LABEL BYTE\r
+ DW 0 ; PreviousTaskLink\r
+ DW 0 ; Reserved\r
+ DD 0 ; ESP0\r
+ DW 0 ; SS0\r
+ DW 0 ; Reserved\r
+ DD 0 ; ESP1\r
+ DW 0 ; SS1\r
+ DW 0 ; Reserved\r
+ DD 0 ; ESP2\r
+ DW 0 ; SS2\r
+ DW 0 ; Reserved\r
+ DD 0 ; CR3\r
+ DD 0 ; EIP\r
+ DD 0 ; EFLAGS\r
+ DD 0 ; EAX\r
+ DD 0 ; ECX\r
+ DD 0 ; EDX\r
+ DD 0 ; EBX\r
+ DD 0 ; ESP\r
+ DD 0 ; EBP\r
+ DD 0 ; ESI\r
+ DD 0 ; EDI\r
+ DW 0 ; ES\r
+ DW 0 ; Reserved\r
+ DW 0 ; CS\r
+ DW 0 ; Reserved\r
+ DW 0 ; SS\r
+ DW 0 ; Reserved\r
+ DW 0 ; DS\r
+ DW 0 ; Reserved\r
+ DW 0 ; FS\r
+ DW 0 ; Reserved\r
+ DW 0 ; GS\r
+ DW 0 ; Reserved\r
+ DW 0 ; LDT Selector\r
+ DW 0 ; Reserved\r
+ DW 0 ; T\r
+ DW 0 ; I/O Map Base\r
+TSS_DESC_SIZE = $ - offset TssDescriptor\r
+\r
+ExceptionTssDescriptor LABEL BYTE\r
+ DW 0 ; PreviousTaskLink\r
+ DW 0 ; Reserved\r
+ DD 0 ; ESP0\r
+ DW 0 ; SS0\r
+ DW 0 ; Reserved\r
+ DD 0 ; ESP1\r
+ DW 0 ; SS1\r
+ DW 0 ; Reserved\r
+ DD 0 ; ESP2\r
+ DW 0 ; SS2\r
+ DW 0 ; Reserved\r
+ DD 0 ; CR3\r
+ DD offset PFHandlerEntry ; EIP\r
+ DD 00000002 ; EFLAGS\r
+ DD 0 ; EAX\r
+ DD 0 ; ECX\r
+ DD 0 ; EDX\r
+ DD 0 ; EBX\r
+ DD 0 ; ESP\r
+ DD 0 ; EBP\r
+ DD 0 ; ESI\r
+ DD 0 ; EDI\r
+ DW DATA_SEL ; ES\r
+ DW 0 ; Reserved\r
+ DW CODE_SEL ; CS\r
+ DW 0 ; Reserved\r
+ DW DATA_SEL ; SS\r
+ DW 0 ; Reserved\r
+ DW DATA_SEL ; DS\r
+ DW 0 ; Reserved\r
+ DW DATA_SEL ; FS\r
+ DW 0 ; Reserved\r
+ DW DATA_SEL ; GS\r
+ DW 0 ; Reserved\r
+ DW 0 ; LDT Selector\r
+ DW 0 ; Reserved\r
+ DW 0 ; T\r
+ DW 0 ; I/O Map Base\r
+\r
+gcPsd LABEL BYTE\r
+ DB 'PSDSIG '\r
+ DW PSD_SIZE\r
+ DW 2\r
+ DW 1 SHL 2\r
+ DW CODE_SEL\r
+ DW DATA_SEL\r
+ DW DATA_SEL\r
+ DW DATA_SEL\r
+ DW 0\r
+ DQ 0\r
+ DQ 0\r
+ DQ 0\r
+ DQ offset NullSeg\r
+ DD GDT_SIZE\r
+ DD 0\r
+ DB 24 dup (0)\r
+ DQ offset gSmiMtrrs\r
+PSD_SIZE = $ - offset gcPsd\r
+\r
+gcSmiGdtr LABEL FWORD\r
+ DW GDT_SIZE - 1\r
+ DD offset NullSeg\r
+\r
+gcSmiIdtr LABEL FWORD\r
+ DW IDT_SIZE - 1\r
+ DD offset _SmiIDT\r
+\r
+_SmiIDT LABEL QWORD\r
+REPEAT 32\r
+ DW 0 ; Offset 0:15\r
+ DW CODE_SEL ; Segment selector\r
+ DB 0 ; Unused\r
+ DB 8eh ; Interrupt Gate, Present\r
+ DW 0 ; Offset 16:31\r
+ ENDM\r
+IDT_SIZE = $ - offset _SmiIDT\r
+\r
+TaskGateDescriptor LABEL DWORD\r
+ DW 0 ; Reserved\r
+ DW EXCEPTION_TSS_SEL ; TSS Segment selector\r
+ DB 0 ; Reserved\r
+ DB 85h ; Task Gate, present, DPL = 0\r
+ DW 0 ; Reserved\r
+\r
+\r
+ .code\r
+;------------------------------------------------------------------------------\r
+; PageFaultIdtHandlerSmmProfile is the entry point page fault only\r
+;\r
+;\r
+; Stack:\r
+; +---------------------+\r
+; + EFlags +\r
+; +---------------------+\r
+; + CS +\r
+; +---------------------+\r
+; + EIP +\r
+; +---------------------+\r
+; + Error Code +\r
+; +---------------------+\r
+; + Vector Number +\r
+; +---------------------+\r
+; + EBP +\r
+; +---------------------+ <-- EBP\r
+;\r
+;\r
+;------------------------------------------------------------------------------\r
+PageFaultIdtHandlerSmmProfile PROC\r
+ push 0eh ; Page Fault\r
+\r
+ push ebp\r
+ mov ebp, esp\r
+\r
+\r
+ ;\r
+ ; Align stack to make sure that EFI_FX_SAVE_STATE_IA32 of EFI_SYSTEM_CONTEXT_IA32\r
+ ; is 16-byte aligned\r
+ ;\r
+ and esp, 0fffffff0h\r
+ sub esp, 12\r
+\r
+;; UINT32 Edi, Esi, Ebp, Esp, Ebx, Edx, Ecx, Eax;\r
+ push eax\r
+ push ecx\r
+ push edx\r
+ push ebx\r
+ lea ecx, [ebp + 6 * 4]\r
+ push ecx ; ESP\r
+ push dword ptr [ebp] ; EBP\r
+ push esi\r
+ push edi\r
+\r
+;; UINT32 Gs, Fs, Es, Ds, Cs, Ss;\r
+ mov eax, ss\r
+ push eax\r
+ movzx eax, word ptr [ebp + 4 * 4]\r
+ push eax\r
+ mov eax, ds\r
+ push eax\r
+ mov eax, es\r
+ push eax\r
+ mov eax, fs\r
+ push eax\r
+ mov eax, gs\r
+ push eax\r
+\r
+;; UINT32 Eip;\r
+ mov eax, [ebp + 3 * 4]\r
+ push eax\r
+\r
+;; UINT32 Gdtr[2], Idtr[2];\r
+ sub esp, 8\r
+ sidt [esp]\r
+ mov eax, [esp + 2]\r
+ xchg eax, [esp]\r
+ and eax, 0FFFFh\r
+ mov [esp+4], eax\r
+\r
+ sub esp, 8\r
+ sgdt [esp]\r
+ mov eax, [esp + 2]\r
+ xchg eax, [esp]\r
+ and eax, 0FFFFh\r
+ mov [esp+4], eax\r
+\r
+;; UINT32 Ldtr, Tr;\r
+ xor eax, eax\r
+ str ax\r
+ push eax\r
+ sldt ax\r
+ push eax\r
+\r
+;; UINT32 EFlags;\r
+ mov eax, [ebp + 5 * 4]\r
+ push eax\r
+\r
+;; UINT32 Cr0, Cr1, Cr2, Cr3, Cr4;\r
+ mov eax, cr4\r
+ or eax, 208h\r
+ mov cr4, eax\r
+ push eax\r
+ mov eax, cr3\r
+ push eax\r
+ mov eax, cr2\r
+ push eax\r
+ xor eax, eax\r
+ push eax\r
+ mov eax, cr0\r
+ push eax\r
+\r
+;; UINT32 Dr0, Dr1, Dr2, Dr3, Dr6, Dr7;\r
+ mov eax, dr7\r
+ push eax\r
+ mov eax, dr6\r
+ push eax\r
+ mov eax, dr3\r
+ push eax\r
+ mov eax, dr2\r
+ push eax\r
+ mov eax, dr1\r
+ push eax\r
+ mov eax, dr0\r
+ push eax\r
+\r
+;; FX_SAVE_STATE_IA32 FxSaveState;\r
+ sub esp, 512\r
+ mov edi, esp\r
+ db 0fh, 0aeh, 07h ;fxsave [edi]\r
+\r
+; UEFI calling convention for IA32 requires that Direction flag in EFLAGs is clear\r
+ cld\r
+\r
+;; UINT32 ExceptionData;\r
+ push dword ptr [ebp + 2 * 4]\r
+\r
+;; call into exception handler\r
+\r
+;; Prepare parameter and call\r
+ mov edx, esp\r
+ push edx\r
+ mov edx, dword ptr [ebp + 1 * 4]\r
+ push edx\r
+\r
+ ;\r
+ ; Call External Exception Handler\r
+ ;\r
+ mov eax, SmiPFHandler\r
+ call eax\r
+ add esp, 8\r
+\r
+;; UINT32 ExceptionData;\r
+ add esp, 4\r
+\r
+;; FX_SAVE_STATE_IA32 FxSaveState;\r
+ mov esi, esp\r
+ db 0fh, 0aeh, 0eh ; fxrstor [esi]\r
+ add esp, 512\r
+\r
+;; UINT32 Dr0, Dr1, Dr2, Dr3, Dr6, Dr7;\r
+;; Skip restoration of DRx registers to support debuggers\r
+;; that set breakpoint in interrupt/exception context\r
+ add esp, 4 * 6\r
+\r
+;; UINT32 Cr0, Cr1, Cr2, Cr3, Cr4;\r
+ pop eax\r
+ mov cr0, eax\r
+ add esp, 4 ; not for Cr1\r
+ pop eax\r
+ mov cr2, eax\r
+ pop eax\r
+ mov cr3, eax\r
+ pop eax\r
+ mov cr4, eax\r
+\r
+;; UINT32 EFlags;\r
+ pop dword ptr [ebp + 5 * 4]\r
+\r
+;; UINT32 Ldtr, Tr;\r
+;; UINT32 Gdtr[2], Idtr[2];\r
+;; Best not let anyone mess with these particular registers...\r
+ add esp, 24\r
+\r
+;; UINT32 Eip;\r
+ pop dword ptr [ebp + 3 * 4]\r
+\r
+;; UINT32 Gs, Fs, Es, Ds, Cs, Ss;\r
+;; NOTE - modified segment registers could hang the debugger... We\r
+;; could attempt to insulate ourselves against this possibility,\r
+;; but that poses risks as well.\r
+;;\r
+ pop gs\r
+ pop fs\r
+ pop es\r
+ pop ds\r
+ pop dword ptr [ebp + 4 * 4]\r
+ pop ss\r
+\r
+;; UINT32 Edi, Esi, Ebp, Esp, Ebx, Edx, Ecx, Eax;\r
+ pop edi\r
+ pop esi\r
+ add esp, 4 ; not for ebp\r
+ add esp, 4 ; not for esp\r
+ pop ebx\r
+ pop edx\r
+ pop ecx\r
+ pop eax\r
+\r
+ mov esp, ebp\r
+ pop ebp\r
+\r
+; Enable TF bit after page fault handler runs\r
+ bts dword ptr [esp + 16], 8 ; EFLAGS\r
+\r
+ add esp, 8 ; skip INT# & ErrCode\r
+Return:\r
+ iretd\r
+;\r
+; Page Fault Exception Handler entry when SMM Stack Guard is enabled\r
+; Executiot starts here after a task switch\r
+;\r
+PFHandlerEntry::\r
+;\r
+; Get this processor's TSS\r
+;\r
+ sub esp, 8\r
+ sgdt [esp + 2]\r
+ mov eax, [esp + 4] ; GDT base\r
+ add esp, 8\r
+ mov ecx, [eax + TSS_SEL + 2]\r
+ shl ecx, 8\r
+ mov cl, [eax + TSS_SEL + 7]\r
+ ror ecx, 8 ; ecx = TSS base\r
+\r
+ mov ebp, esp\r
+\r
+ ;\r
+ ; Align stack to make sure that EFI_FX_SAVE_STATE_IA32 of EFI_SYSTEM_CONTEXT_IA32\r
+ ; is 16-byte aligned\r
+ ;\r
+ and esp, 0fffffff0h\r
+ sub esp, 12\r
+\r
+;; UINT32 Edi, Esi, Ebp, Esp, Ebx, Edx, Ecx, Eax;\r
+ push (IA32_TSS ptr [ecx])._EAX\r
+ push (IA32_TSS ptr [ecx])._ECX\r
+ push (IA32_TSS ptr [ecx])._EDX\r
+ push (IA32_TSS ptr [ecx])._EBX\r
+ push (IA32_TSS ptr [ecx])._ESP\r
+ push (IA32_TSS ptr [ecx])._EBP\r
+ push (IA32_TSS ptr [ecx])._ESI\r
+ push (IA32_TSS ptr [ecx])._EDI\r
+\r
+;; UINT32 Gs, Fs, Es, Ds, Cs, Ss;\r
+ movzx eax, (IA32_TSS ptr [ecx])._SS\r
+ push eax\r
+ movzx eax, (IA32_TSS ptr [ecx])._CS\r
+ push eax\r
+ movzx eax, (IA32_TSS ptr [ecx])._DS\r
+ push eax\r
+ movzx eax, (IA32_TSS ptr [ecx])._ES\r
+ push eax\r
+ movzx eax, (IA32_TSS ptr [ecx])._FS\r
+ push eax\r
+ movzx eax, (IA32_TSS ptr [ecx])._GS\r
+ push eax\r
+\r
+;; UINT32 Eip;\r
+ push (IA32_TSS ptr [ecx]).EIP\r
+\r
+;; UINT32 Gdtr[2], Idtr[2];\r
+ sub esp, 8\r
+ sidt [esp]\r
+ mov eax, [esp + 2]\r
+ xchg eax, [esp]\r
+ and eax, 0FFFFh\r
+ mov [esp+4], eax\r
+\r
+ sub esp, 8\r
+ sgdt [esp]\r
+ mov eax, [esp + 2]\r
+ xchg eax, [esp]\r
+ and eax, 0FFFFh\r
+ mov [esp+4], eax\r
+\r
+;; UINT32 Ldtr, Tr;\r
+ mov eax, TSS_SEL\r
+ push eax\r
+ movzx eax, (IA32_TSS ptr [ecx]).LDT\r
+ push eax\r
+\r
+;; UINT32 EFlags;\r
+ push (IA32_TSS ptr [ecx]).EFLAGS\r
+\r
+;; UINT32 Cr0, Cr1, Cr2, Cr3, Cr4;\r
+ mov eax, cr4\r
+ or eax, 208h\r
+ mov cr4, eax\r
+ push eax\r
+ mov eax, cr3\r
+ push eax\r
+ mov eax, cr2\r
+ push eax\r
+ xor eax, eax\r
+ push eax\r
+ mov eax, cr0\r
+ push eax\r
+\r
+;; UINT32 Dr0, Dr1, Dr2, Dr3, Dr6, Dr7;\r
+ mov eax, dr7\r
+ push eax\r
+ mov eax, dr6\r
+ push eax\r
+ mov eax, dr3\r
+ push eax\r
+ mov eax, dr2\r
+ push eax\r
+ mov eax, dr1\r
+ push eax\r
+ mov eax, dr0\r
+ push eax\r
+\r
+;; FX_SAVE_STATE_IA32 FxSaveState;\r
+;; Clear TS bit in CR0 to avoid Device Not Available Exception (#NM)\r
+;; when executing fxsave/fxrstor instruction\r
+ clts\r
+ sub esp, 512\r
+ mov edi, esp\r
+ db 0fh, 0aeh, 07h ;fxsave [edi]\r
+\r
+; UEFI calling convention for IA32 requires that Direction flag in EFLAGs is clear\r
+ cld\r
+\r
+;; UINT32 ExceptionData;\r
+ push dword ptr [ebp]\r
+\r
+;; call into exception handler\r
+ mov ebx, ecx\r
+ mov eax, SmiPFHandler\r
+\r
+;; Prepare parameter and call\r
+ mov edx, esp\r
+ push edx\r
+ mov edx, 14\r
+ push edx\r
+\r
+ ;\r
+ ; Call External Exception Handler\r
+ ;\r
+ call eax\r
+ add esp, 8\r
+\r
+ mov ecx, ebx\r
+;; UINT32 ExceptionData;\r
+ add esp, 4\r
+\r
+;; FX_SAVE_STATE_IA32 FxSaveState;\r
+ mov esi, esp\r
+ db 0fh, 0aeh, 0eh ; fxrstor [esi]\r
+ add esp, 512\r
+\r
+;; UINT32 Dr0, Dr1, Dr2, Dr3, Dr6, Dr7;\r
+;; Skip restoration of DRx registers to support debuggers\r
+;; that set breakpoints in interrupt/exception context\r
+ add esp, 4 * 6\r
+\r
+;; UINT32 Cr0, Cr1, Cr2, Cr3, Cr4;\r
+ pop eax\r
+ mov cr0, eax\r
+ add esp, 4 ; not for Cr1\r
+ pop eax\r
+ mov cr2, eax\r
+ pop eax\r
+ mov (IA32_TSS ptr [ecx])._CR3, eax\r
+ pop eax\r
+ mov cr4, eax\r
+\r
+;; UINT32 EFlags;\r
+ pop (IA32_TSS ptr [ecx]).EFLAGS\r
+\r
+;; UINT32 Ldtr, Tr;\r
+;; UINT32 Gdtr[2], Idtr[2];\r
+;; Best not let anyone mess with these particular registers...\r
+ add esp, 24\r
+\r
+;; UINT32 Eip;\r
+ pop (IA32_TSS ptr [ecx]).EIP\r
+\r
+;; UINT32 Gs, Fs, Es, Ds, Cs, Ss;\r
+;; NOTE - modified segment registers could hang the debugger... We\r
+;; could attempt to insulate ourselves against this possibility,\r
+;; but that poses risks as well.\r
+;;\r
+ pop eax\r
+ mov (IA32_TSS ptr [ecx])._GS, ax\r
+ pop eax\r
+ mov (IA32_TSS ptr [ecx])._FS, ax\r
+ pop eax\r
+ mov (IA32_TSS ptr [ecx])._ES, ax\r
+ pop eax\r
+ mov (IA32_TSS ptr [ecx])._DS, ax\r
+ pop eax\r
+ mov (IA32_TSS ptr [ecx])._CS, ax\r
+ pop eax\r
+ mov (IA32_TSS ptr [ecx])._SS, ax\r
+\r
+;; UINT32 Edi, Esi, Ebp, Esp, Ebx, Edx, Ecx, Eax;\r
+ pop (IA32_TSS ptr [ecx])._EDI\r
+ pop (IA32_TSS ptr [ecx])._ESI\r
+ add esp, 4 ; not for ebp\r
+ add esp, 4 ; not for esp\r
+ pop (IA32_TSS ptr [ecx])._EBX\r
+ pop (IA32_TSS ptr [ecx])._EDX\r
+ pop (IA32_TSS ptr [ecx])._ECX\r
+ pop (IA32_TSS ptr [ecx])._EAX\r
+\r
+ mov esp, ebp\r
+\r
+; Set single step DB# if SMM profile is enabled and page fault exception happens\r
+ cmp FeaturePcdGet (PcdCpuSmmProfileEnable), 0\r
+ jz @Done2\r
+\r
+; Create return context for iretd in stub function\r
+ mov eax, (IA32_TSS ptr [ecx])._ESP ; Get old stack pointer\r
+ mov ebx, (IA32_TSS ptr [ecx]).EIP\r
+ mov [eax - 0ch], ebx ; create EIP in old stack\r
+ movzx ebx, (IA32_TSS ptr [ecx])._CS\r
+ mov [eax - 08h], ebx ; create CS in old stack\r
+ mov ebx, (IA32_TSS ptr [ecx]).EFLAGS\r
+ bts ebx, 8\r
+ mov [eax - 04h], ebx ; create eflags in old stack\r
+ mov eax, (IA32_TSS ptr [ecx])._ESP ; Get old stack pointer\r
+ sub eax, 0ch ; minus 12 byte\r
+ mov (IA32_TSS ptr [ecx])._ESP, eax ; Set new stack pointer\r
+; Replace the EIP of interrupted task with stub function\r
+ mov eax, PageFaultStubFunction\r
+ mov (IA32_TSS ptr [ecx]).EIP, eax\r
+; Jump to the iretd so next page fault handler as a task will start again after iretd.\r
+@Done2:\r
+ add esp, 4 ; skip ErrCode\r
+\r
+ jmp Return\r
+PageFaultIdtHandlerSmmProfile ENDP\r
+\r
+PageFaultStubFunction PROC\r
+;\r
+; we need clean TS bit in CR0 to execute\r
+; x87 FPU/MMX/SSE/SSE2/SSE3/SSSE3/SSE4 instructions.\r
+;\r
+ clts\r
+ iretd\r
+PageFaultStubFunction ENDP\r
+\r
+InitializeIDTSmmStackGuard PROC USES ebx\r
+;\r
+; If SMM Stack Guard feature is enabled, the Page Fault Exception entry in IDT\r
+; is a Task Gate Descriptor so that when a Page Fault Exception occurs,\r
+; the processors can use a known good stack in case stack is ran out.\r
+;\r
+ lea ebx, _SmiIDT + 14 * 8\r
+ lea edx, TaskGateDescriptor\r
+ mov eax, [edx]\r
+ mov [ebx], eax\r
+ mov eax, [edx + 4]\r
+ mov [ebx + 4], eax\r
+ ret\r
+InitializeIDTSmmStackGuard ENDP\r
+\r
+ END\r
--- /dev/null
+#------------------------------------------------------------------------------\r
+#\r
+# Copyright (c) 2009 - 2015, Intel Corporation. All rights reserved.<BR>\r
+# This program and the accompanying materials\r
+# are licensed and made available under the terms and conditions of the BSD License\r
+# which accompanies this distribution. The full text of the license may be found at\r
+# http://opensource.org/licenses/bsd-license.php.\r
+#\r
+# THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS,\r
+# WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED.\r
+#\r
+# Module Name:\r
+#\r
+# SmmInit.S\r
+#\r
+# Abstract:\r
+#\r
+# Functions for relocating SMBASE's for all processors\r
+#\r
+#------------------------------------------------------------------------------\r
+\r
+ASM_GLOBAL ASM_PFX(gSmmCr0)\r
+ASM_GLOBAL ASM_PFX(gSmmCr3)\r
+ASM_GLOBAL ASM_PFX(gSmmCr4)\r
+ASM_GLOBAL ASM_PFX(gcSmmInitTemplate)\r
+ASM_GLOBAL ASM_PFX(gcSmmInitSize)\r
+ASM_GLOBAL ASM_PFX(gSmmJmpAddr)\r
+ASM_GLOBAL ASM_PFX(SmmRelocationSemaphoreComplete)\r
+ASM_GLOBAL ASM_PFX(gSmmInitStack)\r
+ASM_GLOBAL ASM_PFX(gcSmiInitGdtr)\r
+\r
+.equ PROTECT_MODE_CS, 0x08\r
+.equ PROTECT_MODE_DS, 0x20\r
+\r
+ .text\r
+\r
+ASM_PFX(gcSmiInitGdtr):\r
+ .word 0\r
+ .quad 0\r
+\r
+SmmStartup:\r
+ .byte 0x66,0xb8\r
+ASM_PFX(gSmmCr3): .space 4\r
+ movl %eax, %cr3\r
+ .byte 0x67,0x66\r
+ lgdt %cs:(ASM_PFX(gcSmiInitGdtr) - SmmStartup)(%ebp)\r
+ .byte 0x66,0xb8\r
+ASM_PFX(gSmmCr4): .space 4\r
+ movl %eax, %cr4\r
+ .byte 0x66,0xb8\r
+ASM_PFX(gSmmCr0): .space 4\r
+ .byte 0xbf, PROTECT_MODE_DS, 0 # mov di, PROTECT_MODE_DS\r
+ movl %eax, %cr0\r
+ .byte 0x66,0xea # jmp far [ptr48]\r
+ASM_PFX(gSmmJmpAddr): .long Start32bit\r
+ .word PROTECT_MODE_CS\r
+Start32bit:\r
+ movl %edi,%ds\r
+ movl %edi,%es\r
+ movl %edi,%fs\r
+ movl %edi,%gs\r
+ movl %edi,%ss\r
+ .byte 0xbc # mov esp, imm32\r
+ASM_PFX(gSmmInitStack): .space 4\r
+ call ASM_PFX(SmmInitHandler)\r
+ rsm\r
+\r
+ASM_PFX(gcSmmInitTemplate):\r
+\r
+_SmmInitTemplate:\r
+ .byte 0x66\r
+ movl $SmmStartup, %ebp\r
+ .byte 0x66, 0x81, 0xed, 0, 0, 3, 0 # sub ebp, 0x30000\r
+ jmp *%bp # jmp ebp actually\r
+\r
+ASM_PFX(gcSmmInitSize): .word . - ASM_PFX(gcSmmInitTemplate)\r
+\r
+\r
+ASM_PFX(SmmRelocationSemaphoreComplete):\r
+ pushl %eax\r
+ movl ASM_PFX(mRebasedFlag), %eax\r
+ movb $1, (%eax)\r
+ popl %eax\r
+ jmp *ASM_PFX(mSmmRelocationOriginalAddress)\r
--- /dev/null
+;------------------------------------------------------------------------------ ;\r
+; Copyright (c) 2009 - 2015, Intel Corporation. All rights reserved.<BR>\r
+; This program and the accompanying materials\r
+; are licensed and made available under the terms and conditions of the BSD License\r
+; which accompanies this distribution. The full text of the license may be found at\r
+; http://opensource.org/licenses/bsd-license.php.\r
+;\r
+; THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS,\r
+; WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED.\r
+;\r
+; Module Name:\r
+;\r
+; SmmInit.Asm\r
+;\r
+; Abstract:\r
+;\r
+; Functions for relocating SMBASE's for all processors\r
+;\r
+;-------------------------------------------------------------------------------\r
+\r
+ .686p\r
+ .xmm\r
+ .model flat,C\r
+\r
+SmmInitHandler PROTO C\r
+\r
+EXTERNDEF C gSmmCr0:DWORD\r
+EXTERNDEF C gSmmCr3:DWORD\r
+EXTERNDEF C gSmmCr4:DWORD\r
+EXTERNDEF C gcSmmInitTemplate:BYTE\r
+EXTERNDEF C gcSmmInitSize:WORD\r
+EXTERNDEF C gSmmJmpAddr:QWORD\r
+EXTERNDEF C mRebasedFlag:PTR BYTE\r
+EXTERNDEF C mSmmRelocationOriginalAddress:DWORD\r
+EXTERNDEF C gSmmInitStack:DWORD\r
+EXTERNDEF C gcSmiInitGdtr:FWORD\r
+\r
+PROTECT_MODE_CS EQU 08h\r
+PROTECT_MODE_DS EQU 20h\r
+\r
+ .code\r
+\r
+gcSmiInitGdtr LABEL FWORD\r
+ DW 0\r
+ DQ 0\r
+\r
+SmmStartup PROC\r
+ DB 66h, 0b8h\r
+gSmmCr3 DD ?\r
+ mov cr3, eax\r
+ DB 67h, 66h\r
+ lgdt fword ptr cs:[ebp + (offset gcSmiInitGdtr - SmmStartup)]\r
+ DB 66h, 0b8h\r
+gSmmCr4 DD ?\r
+ mov cr4, eax\r
+ DB 66h, 0b8h\r
+gSmmCr0 DD ?\r
+ DB 0bfh, PROTECT_MODE_DS, 0 ; mov di, PROTECT_MODE_DS\r
+ mov cr0, eax\r
+ DB 66h, 0eah ; jmp far [ptr48]\r
+gSmmJmpAddr LABEL QWORD\r
+ DD @32bit\r
+ DW PROTECT_MODE_CS\r
+@32bit:\r
+ mov ds, edi\r
+ mov es, edi\r
+ mov fs, edi\r
+ mov gs, edi\r
+ mov ss, edi\r
+ DB 0bch ; mov esp, imm32\r
+gSmmInitStack DD ?\r
+ call SmmInitHandler\r
+ rsm\r
+SmmStartup ENDP\r
+\r
+gcSmmInitTemplate LABEL BYTE\r
+\r
+_SmmInitTemplate PROC\r
+ DB 66h\r
+ mov ebp, SmmStartup\r
+ DB 66h, 81h, 0edh, 00h, 00h, 03h, 00 ; sub ebp, 30000h\r
+ jmp bp ; jmp ebp actually\r
+_SmmInitTemplate ENDP\r
+\r
+gcSmmInitSize DW $ - gcSmmInitTemplate\r
+\r
+SmmRelocationSemaphoreComplete PROC\r
+ push eax\r
+ mov eax, mRebasedFlag\r
+ mov byte ptr [eax], 1\r
+ pop eax\r
+ jmp [mSmmRelocationOriginalAddress]\r
+SmmRelocationSemaphoreComplete ENDP\r
+ END\r
--- /dev/null
+/** @file\r
+IA-32 processor specific functions to enable SMM profile.\r
+\r
+Copyright (c) 2012 - 2015, Intel Corporation. All rights reserved.<BR>\r
+This program and the accompanying materials\r
+are licensed and made available under the terms and conditions of the BSD License\r
+which accompanies this distribution. The full text of the license may be found at\r
+http://opensource.org/licenses/bsd-license.php\r
+\r
+THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS,\r
+WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED.\r
+\r
+**/\r
+\r
+#include "PiSmmCpuDxeSmm.h"\r
+#include "SmmProfileInternal.h"\r
+\r
+/**\r
+ Create SMM page table for S3 path.\r
+\r
+**/\r
+VOID\r
+InitSmmS3Cr3 (\r
+ VOID\r
+ )\r
+{\r
+ mSmmS3ResumeState->SmmS3Cr3 = Gen4GPageTable (0);\r
+\r
+ return ;\r
+}\r
+\r
+/**\r
+ Allocate pages for creating 4KB-page based on 2MB-page when page fault happens.\r
+ 32-bit firmware does not need it.\r
+\r
+**/\r
+VOID\r
+InitPagesForPFHandler (\r
+ VOID\r
+ )\r
+{\r
+}\r
+\r
+/**\r
+ Update page table to map the memory correctly in order to make the instruction\r
+ which caused page fault execute successfully. And it also save the original page\r
+ table to be restored in single-step exception. 32-bit firmware does not need it.\r
+\r
+ @param PageTable PageTable Address.\r
+ @param PFAddress The memory address which caused page fault exception.\r
+ @param CpuIndex The index of the processor.\r
+ @param ErrorCode The Error code of exception.\r
+ @param IsValidPFAddress The flag indicates if SMM profile data need be added.\r
+\r
+**/\r
+VOID\r
+RestorePageTableAbove4G (\r
+ UINT64 *PageTable,\r
+ UINT64 PFAddress,\r
+ UINTN CpuIndex,\r
+ UINTN ErrorCode,\r
+ BOOLEAN *IsValidPFAddress\r
+ )\r
+{\r
+}\r
+\r
+/**\r
+ Clear TF in FLAGS.\r
+\r
+ @param SystemContext A pointer to the processor context when\r
+ the interrupt occurred on the processor.\r
+\r
+**/\r
+VOID\r
+ClearTrapFlag (\r
+ IN OUT EFI_SYSTEM_CONTEXT SystemContext\r
+ )\r
+{\r
+ SystemContext.SystemContextIa32->Eflags &= (UINTN) ~BIT8;\r
+}\r
--- /dev/null
+/** @file\r
+IA-32 processor specific header file to enable SMM profile.\r
+\r
+Copyright (c) 2012 - 2015, Intel Corporation. All rights reserved.<BR>\r
+This program and the accompanying materials\r
+are licensed and made available under the terms and conditions of the BSD License\r
+which accompanies this distribution. The full text of the license may be found at\r
+http://opensource.org/licenses/bsd-license.php\r
+\r
+THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS,\r
+WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED.\r
+\r
+**/\r
+\r
+#ifndef _SMM_PROFILE_ARCH_H_\r
+#define _SMM_PROFILE_ARCH_H_\r
+\r
+#pragma pack (1)\r
+\r
+typedef struct _MSR_DS_AREA_STRUCT {\r
+ UINT32 BTSBufferBase;\r
+ UINT32 BTSIndex;\r
+ UINT32 BTSAbsoluteMaximum;\r
+ UINT32 BTSInterruptThreshold;\r
+ UINT32 PEBSBufferBase;\r
+ UINT32 PEBSIndex;\r
+ UINT32 PEBSAbsoluteMaximum;\r
+ UINT32 PEBSInterruptThreshold;\r
+ UINT32 PEBSCounterReset[4];\r
+ UINT32 Reserved;\r
+} MSR_DS_AREA_STRUCT;\r
+\r
+typedef struct _BRANCH_TRACE_RECORD {\r
+ UINT32 LastBranchFrom;\r
+ UINT32 LastBranchTo;\r
+ UINT32 Rsvd0 : 4;\r
+ UINT32 BranchPredicted : 1;\r
+ UINT32 Rsvd1 : 27;\r
+} BRANCH_TRACE_RECORD;\r
+\r
+typedef struct _PEBS_RECORD {\r
+ UINT32 Eflags;\r
+ UINT32 LinearIP;\r
+ UINT32 Eax;\r
+ UINT32 Ebx;\r
+ UINT32 Ecx;\r
+ UINT32 Edx;\r
+ UINT32 Esi;\r
+ UINT32 Edi;\r
+ UINT32 Ebp;\r
+ UINT32 Esp;\r
+} PEBS_RECORD;\r
+\r
+#pragma pack ()\r
+\r
+#define PHYSICAL_ADDRESS_MASK ((1ull << 32) - SIZE_4KB)\r
+\r
+/**\r
+ Update page table to map the memory correctly in order to make the instruction\r
+ which caused page fault execute successfully. And it also save the original page\r
+ table to be restored in single-step exception. 32-bit firmware does not need it.\r
+\r
+ @param PageTable PageTable Address.\r
+ @param PFAddress The memory address which caused page fault exception.\r
+ @param CpuIndex The index of the processor.\r
+ @param ErrorCode The Error code of exception.\r
+ @param IsValidPFAddress The flag indicates if SMM profile data need be added.\r
+\r
+**/\r
+VOID\r
+RestorePageTableAbove4G (\r
+ UINT64 *PageTable,\r
+ UINT64 PFAddress,\r
+ UINTN CpuIndex,\r
+ UINTN ErrorCode,\r
+ BOOLEAN *IsValidPFAddress\r
+ );\r
+\r
+/**\r
+ Create SMM page table for S3 path.\r
+\r
+**/\r
+VOID\r
+InitSmmS3Cr3 (\r
+ VOID\r
+ );\r
+\r
+/**\r
+ Allocate pages for creating 4KB-page based on 2MB-page when page fault happens.\r
+\r
+**/\r
+VOID\r
+InitPagesForPFHandler (\r
+ VOID\r
+ );\r
+\r
+#endif // _SMM_PROFILE_ARCH_H_\r