--- /dev/null
+#------------------------------------------------------------------------------\r
+#\r
+# Copyright (c) 2009 - 2016, Intel Corporation. All rights reserved.<BR>\r
+# This program and the accompanying materials\r
+# are licensed and made available under the terms and conditions of the BSD License\r
+# which accompanies this distribution. The full text of the license may be found at\r
+# http://opensource.org/licenses/bsd-license.php.\r
+#\r
+# THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS,\r
+# WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED.\r
+#\r
+# Module Name:\r
+#\r
+# SmiEntry.S\r
+#\r
+# Abstract:\r
+#\r
+# Code template of the SMI handler for a particular processor\r
+#\r
+#------------------------------------------------------------------------------\r
+\r
+ASM_GLOBAL ASM_PFX(gcStmSmiHandlerTemplate)\r
+ASM_GLOBAL ASM_PFX(gcStmSmiHandlerSize)\r
+ASM_GLOBAL ASM_PFX(gcStmSmiHandlerOffset)\r
+ASM_GLOBAL ASM_PFX(gStmSmiCr3)\r
+ASM_GLOBAL ASM_PFX(gStmSmiStack)\r
+ASM_GLOBAL ASM_PFX(gStmSmbase)\r
+ASM_GLOBAL ASM_PFX(gStmXdSupported)\r
+ASM_GLOBAL ASM_PFX(FeaturePcdGet (PcdCpuSmmStackGuard))\r
+ASM_GLOBAL ASM_PFX(gStmSmiHandlerIdtr)\r
+\r
+.equ MSR_IA32_MISC_ENABLE, 0x1A0\r
+.equ MSR_EFER, 0xc0000080\r
+.equ MSR_EFER_XD, 0x800\r
+\r
+#\r
+# Constants relating to TXT_PROCESSOR_SMM_DESCRIPTOR\r
+#\r
+.equ DSC_OFFSET, 0xfb00\r
+.equ DSC_GDTPTR, 0x48\r
+.equ DSC_GDTSIZ, 0x50\r
+.equ DSC_CS, 0x14\r
+.equ DSC_DS, 0x16\r
+.equ DSC_SS, 0x18\r
+.equ DSC_OTHERSEG, 0x1A\r
+\r
+.equ PROTECT_MODE_CS, 0x08\r
+.equ PROTECT_MODE_DS, 0x20\r
+.equ TSS_SEGMENT, 0x40\r
+\r
+ .text\r
+ASM_PFX(gcStmSmiHandlerTemplate):\r
+\r
+_StmSmiEntryPoint:\r
+ .byte 0xbb # mov bx, imm16\r
+ .word _StmGdtDesc - _StmSmiEntryPoint + 0x8000\r
+ .byte 0x2e,0xa1 # mov ax, cs:[offset16]\r
+ .word DSC_OFFSET + DSC_GDTSIZ\r
+ decl %eax\r
+ movl %eax, %cs:(%edi) # mov cs:[bx], ax\r
+ .byte 0x66,0x2e,0xa1 # mov eax, cs:[offset16]\r
+ .word DSC_OFFSET + DSC_GDTPTR\r
+ movw %ax, %cs:2(%edi)\r
+ movw %ax, %bp # ebp = GDT base\r
+ .byte 0x66\r
+ lgdt %cs:(%edi)\r
+# Patch ProtectedMode Segment\r
+ .byte 0xb8 # mov ax, imm16\r
+ .word PROTECT_MODE_CS # set AX for segment directly\r
+ movl %eax, %cs:-2(%edi) # mov cs:[bx - 2], ax\r
+# Patch ProtectedMode entry\r
+ .byte 0x66, 0xbf # mov edi, SMBASE\r
+ASM_PFX(gStmSmbase): .space 4\r
+ .byte 0x67\r
+ lea ((Start32bit - _StmSmiEntryPoint) + 0x8000)(%edi), %ax\r
+ movw %ax, %cs:-6(%edi)\r
+ movl %cr0, %ebx\r
+ .byte 0x66\r
+ andl $0x9ffafff3, %ebx\r
+ .byte 0x66\r
+ orl $0x23, %ebx\r
+ movl %ebx, %cr0\r
+ .byte 0x66,0xea\r
+ .space 4\r
+ .space 2\r
+_StmGdtDesc: .space 4\r
+ .space 2\r
+\r
+Start32bit:\r
+ movw $PROTECT_MODE_DS, %ax\r
+ movl %eax,%ds\r
+ movl %eax,%es\r
+ movl %eax,%fs\r
+ movl %eax,%gs\r
+ movl %eax,%ss\r
+ .byte 0xbc # mov esp, imm32\r
+ASM_PFX(gStmSmiStack): .space 4\r
+ movl $ASM_PFX(gStmSmiHandlerIdtr), %eax\r
+ lidt (%eax)\r
+ jmp ProtFlatMode\r
+\r
+ProtFlatMode:\r
+ .byte 0xb8 # mov eax, imm32\r
+ASM_PFX(gStmSmiCr3): .space 4\r
+ movl %eax, %cr3\r
+#\r
+# Need to test for CR4 specific bit support\r
+#\r
+ movl $1, %eax\r
+ cpuid # use CPUID to determine if specific CR4 bits are supported\r
+ xorl %eax, %eax # Clear EAX\r
+ testl $BIT2, %edx # Check for DE capabilities\r
+ jz L8\r
+ orl $BIT3, %eax\r
+L8:\r
+ testl $BIT6, %edx # Check for PAE capabilities\r
+ jz L9\r
+ orl $BIT5, %eax\r
+L9:\r
+ testl $BIT7, %edx # Check for MCE capabilities\r
+ jz L10\r
+ orl $BIT6, %eax\r
+L10:\r
+ testl $BIT24, %edx # Check for FXSR capabilities\r
+ jz L11\r
+ orl $BIT9, %eax\r
+L11:\r
+ testl $BIT25, %edx # Check for SSE capabilities\r
+ jz L12\r
+ orl $BIT10, %eax\r
+L12: # as cr4.PGE is not set here, refresh cr3\r
+ movl %eax, %cr4 # in PreModifyMtrrs() to flush TLB.\r
+\r
+ cmpb $0, ASM_PFX(FeaturePcdGet (PcdCpuSmmStackGuard))\r
+ jz L5\r
+# Load TSS\r
+ movb $0x89, (TSS_SEGMENT + 5)(%ebp) # clear busy flag\r
+ movl $TSS_SEGMENT, %eax\r
+ ltrw %ax\r
+L5:\r
+\r
+# enable NXE if supported\r
+ .byte 0xb0 # mov al, imm8\r
+ASM_PFX(gStmXdSupported): .byte 1\r
+ cmpb $0, %al\r
+ jz SkipXd\r
+#\r
+# Check XD disable bit\r
+#\r
+ movl $MSR_IA32_MISC_ENABLE, %ecx\r
+ rdmsr\r
+ pushl %edx # save MSR_IA32_MISC_ENABLE[63-32]\r
+ testl $BIT2, %edx # MSR_IA32_MISC_ENABLE[34]\r
+ jz L13\r
+ andw $0x0FFFB, %dx # clear XD Disable bit if it is set\r
+ wrmsr\r
+L13:\r
+ movl $MSR_EFER, %ecx\r
+ rdmsr\r
+ orw $MSR_EFER_XD,%ax # enable NXE\r
+ wrmsr\r
+ jmp XdDone\r
+SkipXd:\r
+ subl $4, %esp\r
+XdDone:\r
+\r
+ movl %cr0, %ebx\r
+ orl $0x080010023, %ebx # enable paging + WP + NE + MP + PE\r
+ movl %ebx, %cr0\r
+ leal DSC_OFFSET(%edi),%ebx\r
+ movw DSC_DS(%ebx),%ax\r
+ movl %eax, %ds\r
+ movw DSC_OTHERSEG(%ebx),%ax\r
+ movl %eax, %es\r
+ movl %eax, %fs\r
+ movl %eax, %gs\r
+ movw DSC_SS(%ebx),%ax\r
+ movl %eax, %ss\r
+\r
+CommonHandler:\r
+ movl 4(%esp), %ebx\r
+\r
+ pushl %ebx\r
+ movl $ASM_PFX(CpuSmmDebugEntry), %eax\r
+ call *%eax\r
+ addl $4, %esp\r
+\r
+ pushl %ebx\r
+ movl $ASM_PFX(SmiRendezvous), %eax\r
+ call *%eax\r
+ addl $4, %esp\r
+\r
+ pushl %ebx\r
+ movl $ASM_PFX(CpuSmmDebugExit), %eax\r
+ call *%eax\r
+ addl $4, %esp\r
+\r
+ movl $ASM_PFX(gStmXdSupported), %eax\r
+ movb (%eax), %al\r
+ cmpb $0, %al\r
+ jz L16\r
+ popl %edx # get saved MSR_IA32_MISC_ENABLE[63-32]\r
+ testl $BIT2, %edx\r
+ jz L16\r
+ movl $MSR_IA32_MISC_ENABLE, %ecx\r
+ rdmsr\r
+ orw $BIT2, %dx # set XD Disable bit if it was set before entering into SMM\r
+ wrmsr\r
+\r
+L16:\r
+ rsm\r
+\r
+_StmSmiHandler:\r
+#\r
+# Check XD disable bit\r
+#\r
+ xorl %esi, %esi\r
+ movl $ASM_PFX(gStmXdSupported), %eax\r
+ movb (%eax), %al\r
+ cmpb $0, %al\r
+ jz StmXdDone\r
+ movl $MSR_IA32_MISC_ENABLE, %ecx\r
+ rdmsr\r
+ movl %edx, %esi # save MSR_IA32_MISC_ENABLE[63-32]\r
+ testl $BIT2, %edx # MSR_IA32_MISC_ENABLE[34]\r
+ jz L14\r
+ andw $0x0FFFB, %dx # clear XD Disable bit if it is set\r
+ wrmsr\r
+L14:\r
+ movl $MSR_EFER, %ecx\r
+ rdmsr\r
+ orw $MSR_EFER_XD,%ax # enable NXE\r
+ wrmsr\r
+StmXdDone:\r
+ push %esi\r
+\r
+ # below step is needed, because STM does not run above code.\r
+ # we have to run below code to set IDT/CR0/CR4\r
+ movl $ASM_PFX(gStmSmiHandlerIdtr), %eax\r
+ lidt (%eax)\r
+\r
+ movl %cr0, %eax\r
+ orl $0x80010023, %eax # enable paging + WP + NE + MP + PE\r
+ movl %eax, %cr0\r
+#\r
+# Need to test for CR4 specific bit support\r
+#\r
+ movl $1, %eax\r
+ cpuid # use CPUID to determine if specific CR4 bits are supported\r
+ movl %cr4, %eax # init EAX\r
+ testl $BIT2, %edx # Check for DE capabilities\r
+ jz L28\r
+ orl $BIT3, %eax\r
+L28:\r
+ testl $BIT6, %edx # Check for PAE capabilities\r
+ jz L29\r
+ orl $BIT5, %eax\r
+L29:\r
+ testl $BIT7, %edx # Check for MCE capabilities\r
+ jz L30\r
+ orl $BIT6, %eax\r
+L30:\r
+ testl $BIT24, %edx # Check for FXSR capabilities\r
+ jz L31\r
+ orl $BIT9, %eax\r
+L31:\r
+ testl $BIT25, %edx # Check for SSE capabilities\r
+ jz L32\r
+ orl $BIT10, %eax\r
+L32: # as cr4.PGE is not set here, refresh cr3\r
+ movl %eax, %cr4 # in PreModifyMtrrs() to flush TLB.\r
+ # STM init finish\r
+ jmp CommonHandler\r
+\r
+\r
+ASM_PFX(gcStmSmiHandlerSize) : .word . - _StmSmiEntryPoint\r
+ASM_PFX(gcStmSmiHandlerOffset): .word _StmSmiHandler - _StmSmiEntryPoint\r
+\r
--- /dev/null
+;------------------------------------------------------------------------------ ;\r
+; Copyright (c) 2009 - 2016, Intel Corporation. All rights reserved.<BR>\r
+; This program and the accompanying materials\r
+; are licensed and made available under the terms and conditions of the BSD License\r
+; which accompanies this distribution. The full text of the license may be found at\r
+; http://opensource.org/licenses/bsd-license.php.\r
+;\r
+; THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS,\r
+; WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED.\r
+;\r
+; Module Name:\r
+;\r
+; SmiEntry.asm\r
+;\r
+; Abstract:\r
+;\r
+; Code template of the SMI handler for a particular processor\r
+;\r
+;-------------------------------------------------------------------------------\r
+\r
+ .686p\r
+ .model flat,C\r
+ .xmm\r
+\r
+MSR_IA32_MISC_ENABLE EQU 1A0h\r
+MSR_EFER EQU 0c0000080h\r
+MSR_EFER_XD EQU 0800h\r
+\r
+;\r
+; Constants relating to TXT_PROCESSOR_SMM_DESCRIPTOR\r
+;\r
+DSC_OFFSET EQU 0fb00h\r
+DSC_GDTPTR EQU 48h\r
+DSC_GDTSIZ EQU 50h\r
+DSC_CS EQU 14h\r
+DSC_DS EQU 16h\r
+DSC_SS EQU 18h\r
+DSC_OTHERSEG EQU 1Ah\r
+\r
+PROTECT_MODE_CS EQU 08h\r
+PROTECT_MODE_DS EQU 20h\r
+TSS_SEGMENT EQU 40h\r
+\r
+SmiRendezvous PROTO C\r
+CpuSmmDebugEntry PROTO C\r
+CpuSmmDebugExit PROTO C\r
+\r
+EXTERNDEF gcStmSmiHandlerTemplate:BYTE\r
+EXTERNDEF gcStmSmiHandlerSize:WORD\r
+EXTERNDEF gcStmSmiHandlerOffset:WORD\r
+EXTERNDEF gStmSmiCr3:DWORD\r
+EXTERNDEF gStmSmiStack:DWORD\r
+EXTERNDEF gStmSmbase:DWORD\r
+EXTERNDEF gStmXdSupported:BYTE\r
+EXTERNDEF FeaturePcdGet (PcdCpuSmmStackGuard):BYTE\r
+EXTERNDEF gStmSmiHandlerIdtr:FWORD\r
+\r
+ .code\r
+\r
+gcStmSmiHandlerTemplate LABEL BYTE\r
+\r
+_StmSmiEntryPoint:\r
+ DB 0bbh ; mov bx, imm16\r
+ DW offset _StmGdtDesc - _StmSmiEntryPoint + 8000h\r
+ DB 2eh, 0a1h ; mov ax, cs:[offset16]\r
+ DW DSC_OFFSET + DSC_GDTSIZ\r
+ dec eax\r
+ mov cs:[edi], eax ; mov cs:[bx], ax\r
+ DB 66h, 2eh, 0a1h ; mov eax, cs:[offset16]\r
+ DW DSC_OFFSET + DSC_GDTPTR\r
+ mov cs:[edi + 2], ax ; mov cs:[bx + 2], eax\r
+ mov bp, ax ; ebp = GDT base\r
+ DB 66h\r
+ lgdt fword ptr cs:[edi] ; lgdt fword ptr cs:[bx]\r
+; Patch ProtectedMode Segment\r
+ DB 0b8h ; mov ax, imm16\r
+ DW PROTECT_MODE_CS ; set AX for segment directly\r
+ mov cs:[edi - 2], eax ; mov cs:[bx - 2], ax\r
+; Patch ProtectedMode entry\r
+ DB 66h, 0bfh ; mov edi, SMBASE\r
+gStmSmbase DD ?\r
+ DB 67h\r
+ lea ax, [edi + (@32bit - _StmSmiEntryPoint) + 8000h]\r
+ mov cs:[edi - 6], ax ; mov cs:[bx - 6], eax\r
+ mov ebx, cr0\r
+ DB 66h\r
+ and ebx, 9ffafff3h\r
+ DB 66h\r
+ or ebx, 23h\r
+ mov cr0, ebx\r
+ DB 66h, 0eah\r
+ DD ?\r
+ DW ?\r
+_StmGdtDesc FWORD ?\r
+\r
+@32bit:\r
+ mov ax, PROTECT_MODE_DS\r
+ mov ds, ax\r
+ mov es, ax\r
+ mov fs, ax\r
+ mov gs, ax\r
+ mov ss, ax\r
+ DB 0bch ; mov esp, imm32\r
+gStmSmiStack DD ?\r
+ mov eax, offset gStmSmiHandlerIdtr\r
+ lidt fword ptr [eax]\r
+ jmp ProtFlatMode\r
+\r
+ProtFlatMode:\r
+ DB 0b8h ; mov eax, imm32\r
+gStmSmiCr3 DD ?\r
+ mov cr3, eax\r
+;\r
+; Need to test for CR4 specific bit support\r
+;\r
+ mov eax, 1\r
+ cpuid ; use CPUID to determine if specific CR4 bits are supported\r
+ xor eax, eax ; Clear EAX\r
+ test edx, BIT2 ; Check for DE capabilities\r
+ jz @f\r
+ or eax, BIT3\r
+@@:\r
+ test edx, BIT6 ; Check for PAE capabilities\r
+ jz @f\r
+ or eax, BIT5\r
+@@:\r
+ test edx, BIT7 ; Check for MCE capabilities\r
+ jz @f\r
+ or eax, BIT6\r
+@@:\r
+ test edx, BIT24 ; Check for FXSR capabilities\r
+ jz @f\r
+ or eax, BIT9\r
+@@:\r
+ test edx, BIT25 ; Check for SSE capabilities\r
+ jz @f\r
+ or eax, BIT10\r
+@@: ; as cr4.PGE is not set here, refresh cr3\r
+ mov cr4, eax ; in PreModifyMtrrs() to flush TLB.\r
+\r
+ cmp FeaturePcdGet (PcdCpuSmmStackGuard), 0\r
+ jz @F\r
+; Load TSS\r
+ mov byte ptr [ebp + TSS_SEGMENT + 5], 89h ; clear busy flag\r
+ mov eax, TSS_SEGMENT\r
+ ltr ax\r
+@@:\r
+\r
+; enable NXE if supported\r
+ DB 0b0h ; mov al, imm8\r
+gStmXdSupported DB 1\r
+ cmp al, 0\r
+ jz @SkipXd\r
+;\r
+; Check XD disable bit\r
+;\r
+ mov ecx, MSR_IA32_MISC_ENABLE\r
+ rdmsr\r
+ push edx ; save MSR_IA32_MISC_ENABLE[63-32]\r
+ test edx, BIT2 ; MSR_IA32_MISC_ENABLE[34]\r
+ jz @f\r
+ and dx, 0FFFBh ; clear XD Disable bit if it is set\r
+ wrmsr\r
+@@:\r
+ mov ecx, MSR_EFER\r
+ rdmsr\r
+ or ax, MSR_EFER_XD ; enable NXE\r
+ wrmsr\r
+ jmp @XdDone\r
+@SkipXd:\r
+ sub esp, 4\r
+@XdDone:\r
+\r
+ mov ebx, cr0\r
+ or ebx, 080010023h ; enable paging + WP + NE + MP + PE\r
+ mov cr0, ebx\r
+ lea ebx, [edi + DSC_OFFSET]\r
+ mov ax, [ebx + DSC_DS]\r
+ mov ds, eax\r
+ mov ax, [ebx + DSC_OTHERSEG]\r
+ mov es, eax\r
+ mov fs, eax\r
+ mov gs, eax\r
+ mov ax, [ebx + DSC_SS]\r
+ mov ss, eax\r
+\r
+CommonHandler:\r
+ mov ebx, [esp + 4] ; CPU Index\r
+ push ebx\r
+ mov eax, CpuSmmDebugEntry\r
+ call eax\r
+ add esp, 4\r
+\r
+ push ebx\r
+ mov eax, SmiRendezvous\r
+ call eax\r
+ add esp, 4\r
+\r
+ push ebx\r
+ mov eax, CpuSmmDebugExit\r
+ call eax\r
+ add esp, 4\r
+\r
+ mov eax, gStmXdSupported\r
+ mov al, [eax]\r
+ cmp al, 0\r
+ jz @f\r
+ pop edx ; get saved MSR_IA32_MISC_ENABLE[63-32]\r
+ test edx, BIT2\r
+ jz @f\r
+ mov ecx, MSR_IA32_MISC_ENABLE\r
+ rdmsr\r
+ or dx, BIT2 ; set XD Disable bit if it was set before entering into SMM\r
+ wrmsr\r
+\r
+@@:\r
+ rsm\r
+\r
+_StmSmiHandler:\r
+;\r
+; Check XD disable bit\r
+;\r
+ xor esi, esi\r
+ mov eax, gStmXdSupported\r
+ mov al, [eax]\r
+ cmp al, 0\r
+ jz @StmXdDone\r
+ mov ecx, MSR_IA32_MISC_ENABLE\r
+ rdmsr\r
+ mov esi, edx ; save MSR_IA32_MISC_ENABLE[63-32]\r
+ test edx, BIT2 ; MSR_IA32_MISC_ENABLE[34]\r
+ jz @f\r
+ and dx, 0FFFBh ; clear XD Disable bit if it is set\r
+ wrmsr\r
+@@:\r
+ mov ecx, MSR_EFER\r
+ rdmsr\r
+ or ax, MSR_EFER_XD ; enable NXE\r
+ wrmsr\r
+@StmXdDone:\r
+ push esi\r
+\r
+ ; below step is needed, because STM does not run above code.\r
+ ; we have to run below code to set IDT/CR0/CR4\r
+ mov eax, offset gStmSmiHandlerIdtr\r
+ lidt fword ptr [eax]\r
+\r
+\r
+ mov eax, cr0\r
+ or eax, 80010023h ; enable paging + WP + NE + MP + PE\r
+ mov cr0, eax\r
+;\r
+; Need to test for CR4 specific bit support\r
+;\r
+ mov eax, 1\r
+ cpuid ; use CPUID to determine if specific CR4 bits are supported\r
+ mov eax, cr4 ; init EAX\r
+ test edx, BIT2 ; Check for DE capabilities\r
+ jz @f\r
+ or eax, BIT3\r
+@@:\r
+ test edx, BIT6 ; Check for PAE capabilities\r
+ jz @f\r
+ or eax, BIT5\r
+@@:\r
+ test edx, BIT7 ; Check for MCE capabilities\r
+ jz @f\r
+ or eax, BIT6\r
+@@:\r
+ test edx, BIT24 ; Check for FXSR capabilities\r
+ jz @f\r
+ or eax, BIT9\r
+@@:\r
+ test edx, BIT25 ; Check for SSE capabilities\r
+ jz @f\r
+ or eax, BIT10\r
+@@: ; as cr4.PGE is not set here, refresh cr3\r
+ mov cr4, eax ; in PreModifyMtrrs() to flush TLB.\r
+ ; STM init finish\r
+ jmp CommonHandler\r
+\r
+gcStmSmiHandlerSize DW $ - _StmSmiEntryPoint\r
+gcStmSmiHandlerOffset DW _StmSmiHandler - _StmSmiEntryPoint\r
+\r
+ END\r
--- /dev/null
+;------------------------------------------------------------------------------ ;\r
+; Copyright (c) 2016, Intel Corporation. All rights reserved.<BR>\r
+; This program and the accompanying materials\r
+; are licensed and made available under the terms and conditions of the BSD License\r
+; which accompanies this distribution. The full text of the license may be found at\r
+; http://opensource.org/licenses/bsd-license.php.\r
+;\r
+; THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS,\r
+; WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED.\r
+;\r
+; Module Name:\r
+;\r
+; SmiEntry.nasm\r
+;\r
+; Abstract:\r
+;\r
+; Code template of the SMI handler for a particular processor\r
+;\r
+;-------------------------------------------------------------------------------\r
+\r
+%define MSR_IA32_MISC_ENABLE 0x1A0\r
+%define MSR_EFER 0xc0000080\r
+%define MSR_EFER_XD 0x800\r
+\r
+;\r
+; Constants relating to TXT_PROCESSOR_SMM_DESCRIPTOR\r
+;\r
+%define DSC_OFFSET 0xfb00\r
+%define DSC_GDTPTR 0x48\r
+%define DSC_GDTSIZ 0x50\r
+%define DSC_CS 0x14\r
+%define DSC_DS 0x16\r
+%define DSC_SS 0x18\r
+%define DSC_OTHERSEG 0x1a\r
+\r
+%define PROTECT_MODE_CS 0x8\r
+%define PROTECT_MODE_DS 0x20\r
+%define TSS_SEGMENT 0x40\r
+\r
+extern ASM_PFX(SmiRendezvous)\r
+extern ASM_PFX(FeaturePcdGet (PcdCpuSmmStackGuard))\r
+extern ASM_PFX(CpuSmmDebugEntry)\r
+extern ASM_PFX(CpuSmmDebugExit)\r
+\r
+global ASM_PFX(gcStmSmiHandlerTemplate)\r
+global ASM_PFX(gcStmSmiHandlerSize)\r
+global ASM_PFX(gcStmSmiHandlerOffset)\r
+global ASM_PFX(gStmSmiCr3)\r
+global ASM_PFX(gStmSmiStack)\r
+global ASM_PFX(gStmSmbase)\r
+global ASM_PFX(gStmXdSupported)\r
+extern ASM_PFX(gStmSmiHandlerIdtr)\r
+\r
+ SECTION .text\r
+\r
+BITS 16\r
+ASM_PFX(gcStmSmiHandlerTemplate):\r
+_StmSmiEntryPoint:\r
+ mov bx, _StmGdtDesc - _StmSmiEntryPoint + 0x8000\r
+ mov ax,[cs:DSC_OFFSET + DSC_GDTSIZ]\r
+ dec ax\r
+ mov [cs:bx], ax\r
+ mov eax, [cs:DSC_OFFSET + DSC_GDTPTR]\r
+ mov [cs:bx + 2], eax\r
+ mov ebp, eax ; ebp = GDT base\r
+o32 lgdt [cs:bx] ; lgdt fword ptr cs:[bx]\r
+ mov ax, PROTECT_MODE_CS\r
+ mov [cs:bx-0x2],ax\r
+ DB 0x66, 0xbf ; mov edi, SMBASE\r
+ASM_PFX(gStmSmbase): DD 0\r
+ lea eax, [edi + (@32bit - _StmSmiEntryPoint) + 0x8000]\r
+ mov [cs:bx-0x6],eax\r
+ mov ebx, cr0\r
+ and ebx, 0x9ffafff3\r
+ or ebx, 0x23\r
+ mov cr0, ebx\r
+ jmp dword 0x0:0x0\r
+_StmGdtDesc:\r
+ DW 0\r
+ DD 0\r
+\r
+BITS 32\r
+@32bit:\r
+ mov ax, PROTECT_MODE_DS\r
+o16 mov ds, ax\r
+o16 mov es, ax\r
+o16 mov fs, ax\r
+o16 mov gs, ax\r
+o16 mov ss, ax\r
+ DB 0xbc ; mov esp, imm32\r
+ASM_PFX(gStmSmiStack): DD 0\r
+ mov eax, ASM_PFX(gStmSmiHandlerIdtr)\r
+ lidt [eax]\r
+ jmp ProtFlatMode\r
+\r
+ProtFlatMode:\r
+ DB 0xb8 ; mov eax, imm32\r
+ASM_PFX(gStmSmiCr3): DD 0\r
+ mov cr3, eax\r
+;\r
+; Need to test for CR4 specific bit support\r
+;\r
+ mov eax, 1\r
+ cpuid ; use CPUID to determine if specific CR4 bits are supported\r
+ xor eax, eax ; Clear EAX\r
+ test edx, BIT2 ; Check for DE capabilities\r
+ jz .0\r
+ or eax, BIT3\r
+.0:\r
+ test edx, BIT6 ; Check for PAE capabilities\r
+ jz .1\r
+ or eax, BIT5\r
+.1:\r
+ test edx, BIT7 ; Check for MCE capabilities\r
+ jz .2\r
+ or eax, BIT6\r
+.2:\r
+ test edx, BIT24 ; Check for FXSR capabilities\r
+ jz .3\r
+ or eax, BIT9\r
+.3:\r
+ test edx, BIT25 ; Check for SSE capabilities\r
+ jz .4\r
+ or eax, BIT10\r
+.4: ; as cr4.PGE is not set here, refresh cr3\r
+ mov cr4, eax ; in PreModifyMtrrs() to flush TLB.\r
+\r
+ cmp byte [dword ASM_PFX(FeaturePcdGet (PcdCpuSmmStackGuard))], 0\r
+ jz .6\r
+; Load TSS\r
+ mov byte [ebp + TSS_SEGMENT + 5], 0x89 ; clear busy flag\r
+ mov eax, TSS_SEGMENT\r
+ ltr ax\r
+.6:\r
+\r
+; enable NXE if supported\r
+ DB 0b0h ; mov al, imm8\r
+ASM_PFX(gStmXdSupported): DB 1\r
+ cmp al, 0\r
+ jz @SkipXd\r
+;\r
+; Check XD disable bit\r
+;\r
+ mov ecx, MSR_IA32_MISC_ENABLE\r
+ rdmsr\r
+ push edx ; save MSR_IA32_MISC_ENABLE[63-32]\r
+ test edx, BIT2 ; MSR_IA32_MISC_ENABLE[34]\r
+ jz .5\r
+ and dx, 0xFFFB ; clear XD Disable bit if it is set\r
+ wrmsr\r
+.5:\r
+ mov ecx, MSR_EFER\r
+ rdmsr\r
+ or ax, MSR_EFER_XD ; enable NXE\r
+ wrmsr\r
+ jmp @XdDone\r
+@SkipXd:\r
+ sub esp, 4\r
+@XdDone:\r
+\r
+ mov ebx, cr0\r
+ or ebx, 0x80010023 ; enable paging + WP + NE + MP + PE\r
+ mov cr0, ebx\r
+ lea ebx, [edi + DSC_OFFSET]\r
+ mov ax, [ebx + DSC_DS]\r
+ mov ds, eax\r
+ mov ax, [ebx + DSC_OTHERSEG]\r
+ mov es, eax\r
+ mov fs, eax\r
+ mov gs, eax\r
+ mov ax, [ebx + DSC_SS]\r
+ mov ss, eax\r
+\r
+CommonHandler:\r
+ mov ebx, [esp + 4] ; CPU Index\r
+ push ebx\r
+ mov eax, ASM_PFX(CpuSmmDebugEntry)\r
+ call eax\r
+ add esp, 4\r
+\r
+ push ebx\r
+ mov eax, ASM_PFX(SmiRendezvous)\r
+ call eax\r
+ add esp, 4\r
+\r
+ push ebx\r
+ mov eax, ASM_PFX(CpuSmmDebugExit)\r
+ call eax\r
+ add esp, 4\r
+\r
+ mov eax, ASM_PFX(gStmXdSupported)\r
+ mov al, [eax]\r
+ cmp al, 0\r
+ jz .7\r
+ pop edx ; get saved MSR_IA32_MISC_ENABLE[63-32]\r
+ test edx, BIT2\r
+ jz .7\r
+ mov ecx, MSR_IA32_MISC_ENABLE\r
+ rdmsr\r
+ or dx, BIT2 ; set XD Disable bit if it was set before entering into SMM\r
+ wrmsr\r
+\r
+.7:\r
+ rsm\r
+\r
+\r
+_StmSmiHandler:\r
+;\r
+; Check XD disable bit\r
+;\r
+ xor esi, esi\r
+ mov eax, ASM_PFX(gStmXdSupported)\r
+ mov al, [eax]\r
+ cmp al, 0\r
+ jz @StmXdDone\r
+ mov ecx, MSR_IA32_MISC_ENABLE\r
+ rdmsr\r
+ mov esi, edx ; save MSR_IA32_MISC_ENABLE[63-32]\r
+ test edx, BIT2 ; MSR_IA32_MISC_ENABLE[34]\r
+ jz .5\r
+ and dx, 0xFFFB ; clear XD Disable bit if it is set\r
+ wrmsr\r
+.5:\r
+ mov ecx, MSR_EFER\r
+ rdmsr\r
+ or ax, MSR_EFER_XD ; enable NXE\r
+ wrmsr\r
+@StmXdDone:\r
+ push esi\r
+\r
+ ; below step is needed, because STM does not run above code.\r
+ ; we have to run below code to set IDT/CR0/CR4\r
+ mov eax, ASM_PFX(gStmSmiHandlerIdtr)\r
+ lidt [eax]\r
+\r
+ mov eax, cr0\r
+ or eax, 0x80010023 ; enable paging + WP + NE + MP + PE\r
+ mov cr0, eax\r
+;\r
+; Need to test for CR4 specific bit support\r
+;\r
+ mov eax, 1\r
+ cpuid ; use CPUID to determine if specific CR4 bits are supported\r
+ mov eax, cr4 ; init EAX\r
+ test edx, BIT2 ; Check for DE capabilities\r
+ jz .0\r
+ or eax, BIT3\r
+.0:\r
+ test edx, BIT6 ; Check for PAE capabilities\r
+ jz .1\r
+ or eax, BIT5\r
+.1:\r
+ test edx, BIT7 ; Check for MCE capabilities\r
+ jz .2\r
+ or eax, BIT6\r
+.2:\r
+ test edx, BIT24 ; Check for FXSR capabilities\r
+ jz .3\r
+ or eax, BIT9\r
+.3:\r
+ test edx, BIT25 ; Check for SSE capabilities\r
+ jz .4\r
+ or eax, BIT10\r
+.4: ; as cr4.PGE is not set here, refresh cr3\r
+ mov cr4, eax ; in PreModifyMtrrs() to flush TLB.\r
+ ; STM init finish\r
+ jmp CommonHandler\r
+\r
+ASM_PFX(gcStmSmiHandlerSize) : DW $ - _StmSmiEntryPoint\r
+ASM_PFX(gcStmSmiHandlerOffset) : DW _StmSmiHandler - _StmSmiEntryPoint\r
+\r
--- /dev/null
+#------------------------------------------------------------------------------\r
+#\r
+# Copyright (c) 2009 - 2016, Intel Corporation. All rights reserved.<BR>\r
+# This program and the accompanying materials\r
+# are licensed and made available under the terms and conditions of the BSD License\r
+# which accompanies this distribution. The full text of the license may be found at\r
+# http://opensource.org/licenses/bsd-license.php.\r
+#\r
+# THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS,\r
+# WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED.\r
+#\r
+# Module Name:\r
+#\r
+# SmiException.S\r
+#\r
+# Abstract:\r
+#\r
+# Exception handlers used in SM mode\r
+#\r
+#------------------------------------------------------------------------------\r
+\r
+ASM_GLOBAL ASM_PFX(gcStmPsd)\r
+\r
+ASM_GLOBAL ASM_PFX(SmmStmExceptionHandler)\r
+ASM_GLOBAL ASM_PFX(SmmStmSetup)\r
+ASM_GLOBAL ASM_PFX(SmmStmTeardown)\r
+\r
+.equ MSR_IA32_MISC_ENABLE, 0x1A0\r
+.equ MSR_EFER, 0xc0000080\r
+.equ MSR_EFER_XD, 0x800\r
+\r
+.equ CODE_SEL, 0x08\r
+.equ DATA_SEL, 0x20\r
+.equ TSS_SEL, 0x40\r
+\r
+ .data\r
+\r
+ASM_PFX(gcStmPsd):\r
+ .ascii "TXTPSSIG"\r
+ .word PSD_SIZE\r
+ .word 1 # Version\r
+ .long 0 # LocalApicId\r
+ .byte 0x5 # Cr4Pse;Cr4Pae;Intel64Mode;ExecutionDisableOutsideSmrr\r
+ .byte 0 # BIOS to STM\r
+ .byte 0 # STM to BIOS\r
+ .byte 0\r
+ .word CODE_SEL\r
+ .word DATA_SEL\r
+ .word DATA_SEL\r
+ .word DATA_SEL\r
+ .word TSS_SEL\r
+ .word 0\r
+ .quad 0 # SmmCr3\r
+ .long ASM_PFX(_OnStmSetup)\r
+ .long 0\r
+ .long ASM_PFX(_OnStmTeardown)\r
+ .long 0\r
+ .quad 0 # SmmSmiHandlerRip - SMM guest entrypoint\r
+ .quad 0 # SmmSmiHandlerRsp\r
+ .quad 0\r
+ .long 0\r
+ .long 0x80010100 # RequiredStmSmmRevId\r
+ .long ASM_PFX(_OnException)\r
+ .long 0\r
+ .quad 0 # ExceptionStack\r
+ .word DATA_SEL\r
+ .word 0x1F # ExceptionFilter\r
+ .long 0\r
+ .quad 0\r
+ .quad 0 # BiosHwResourceRequirementsPtr\r
+ .quad 0 # AcpiRsdp\r
+ .byte 0 # PhysicalAddressBits\r
+.equ PSD_SIZE, . - ASM_PFX(gcStmPsd)\r
+\r
+ .text\r
+\r
+#------------------------------------------------------------------------------\r
+# SMM Exception handlers\r
+#------------------------------------------------------------------------------\r
+ASM_GLOBAL ASM_PFX(_OnException)\r
+ASM_PFX(_OnException):\r
+ movl %esp, %ecx\r
+ pushl %ecx\r
+ call ASM_PFX(SmmStmExceptionHandler)\r
+ addl $4, %esp\r
+\r
+ movl %eax, %ebx\r
+ movl $4, %eax\r
+ .byte 0xf, 0x1, 0xc1 # VMCALL\r
+ jmp .\r
+\r
+ASM_GLOBAL ASM_PFX(_OnStmSetup)\r
+ASM_PFX(_OnStmSetup):\r
+#\r
+# Check XD disable bit\r
+#\r
+ xorl %esi, %esi\r
+ movl $ASM_PFX(gStmXdSupported), %eax\r
+ movb (%eax), %al\r
+ cmpb $0, %al\r
+ jz StmXdDone1\r
+ movl $MSR_IA32_MISC_ENABLE, %ecx\r
+ rdmsr\r
+ movl %edx, %esi # save MSR_IA32_MISC_ENABLE[63-32]\r
+ testl $BIT2, %edx # MSR_IA32_MISC_ENABLE[34]\r
+ jz L13\r
+ andw $0x0FFFB, %dx # clear XD Disable bit if it is set\r
+ wrmsr\r
+L13:\r
+ movl $MSR_EFER, %ecx\r
+ rdmsr\r
+ orw $MSR_EFER_XD,%ax # enable NXE\r
+ wrmsr\r
+StmXdDone1:\r
+ push %esi\r
+\r
+ call ASM_PFX(SmmStmSetup)\r
+\r
+ movl $ASM_PFX(gStmXdSupported), %eax\r
+ movb (%eax), %al\r
+ cmpb $0, %al\r
+ jz L14\r
+ popl %edx # get saved MSR_IA32_MISC_ENABLE[63-32]\r
+ testl $BIT2, %edx\r
+ jz L14\r
+ movl $MSR_IA32_MISC_ENABLE, %ecx\r
+ rdmsr\r
+ orw $BIT2, %dx # set XD Disable bit if it was set before entering into SMM\r
+ wrmsr\r
+L14:\r
+\r
+ rsm\r
+\r
+ASM_GLOBAL ASM_PFX(_OnStmTeardown)\r
+ASM_PFX(_OnStmTeardown):\r
+#\r
+# Check XD disable bit\r
+#\r
+ xorl %esi, %esi\r
+ movl $ASM_PFX(gStmXdSupported), %eax\r
+ movb (%eax), %al\r
+ cmpb $0, %al\r
+ jz StmXdDone2\r
+ movl $MSR_IA32_MISC_ENABLE, %ecx\r
+ rdmsr\r
+ movl %edx, %esi # save MSR_IA32_MISC_ENABLE[63-32]\r
+ testl $BIT2, %edx # MSR_IA32_MISC_ENABLE[34]\r
+ jz L15\r
+ andw $0x0FFFB, %dx # clear XD Disable bit if it is set\r
+ wrmsr\r
+L15:\r
+ movl $MSR_EFER, %ecx\r
+ rdmsr\r
+ orw $MSR_EFER_XD,%ax # enable NXE\r
+ wrmsr\r
+StmXdDone2:\r
+ push %esi\r
+\r
+ call ASM_PFX(SmmStmTeardown)\r
+\r
+ movl $ASM_PFX(gStmXdSupported), %eax\r
+ movb (%eax), %al\r
+ cmpb $0, %al\r
+ jz L16\r
+ popl %edx # get saved MSR_IA32_MISC_ENABLE[63-32]\r
+ testl $BIT2, %edx\r
+ jz L16\r
+ movl $MSR_IA32_MISC_ENABLE, %ecx\r
+ rdmsr\r
+ orw $BIT2, %dx # set XD Disable bit if it was set before entering into SMM\r
+ wrmsr\r
+L16:\r
+\r
+ rsm\r
--- /dev/null
+;------------------------------------------------------------------------------ ;\r
+; Copyright (c) 2009 - 2016, Intel Corporation. All rights reserved.<BR>\r
+; This program and the accompanying materials\r
+; are licensed and made available under the terms and conditions of the BSD License\r
+; which accompanies this distribution. The full text of the license may be found at\r
+; http://opensource.org/licenses/bsd-license.php.\r
+;\r
+; THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS,\r
+; WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED.\r
+;\r
+; Module Name:\r
+;\r
+; SmiException.asm\r
+;\r
+; Abstract:\r
+;\r
+; Exception handlers used in SM mode\r
+;\r
+;-------------------------------------------------------------------------------\r
+\r
+ .686p\r
+ .model flat,C\r
+\r
+EXTERNDEF gcStmPsd:BYTE\r
+\r
+EXTERNDEF SmmStmExceptionHandler:PROC\r
+EXTERNDEF SmmStmSetup:PROC\r
+EXTERNDEF SmmStmTeardown:PROC\r
+\r
+CODE_SEL = 08h\r
+DATA_SEL = 20h\r
+TSS_SEL = 40h\r
+\r
+ .data\r
+\r
+gcStmPsd LABEL BYTE\r
+ DB 'TXTPSSIG'\r
+ DW PSD_SIZE\r
+ DW 1 ; Version\r
+ DD 0 ; LocalApicId\r
+ DB 05h ; Cr4Pse;Cr4Pae;Intel64Mode;ExecutionDisableOutsideSmrr\r
+ DB 0 ; BIOS to STM\r
+ DB 0 ; STM to BIOS\r
+ DB 0\r
+ DW CODE_SEL\r
+ DW DATA_SEL\r
+ DW DATA_SEL\r
+ DW DATA_SEL\r
+ DW TSS_SEL\r
+ DW 0\r
+ DQ 0 ; SmmCr3\r
+ DQ _OnStmSetup\r
+ DQ _OnStmTeardown\r
+ DQ 0 ; SmmSmiHandlerRip - SMM guest entrypoint\r
+ DQ 0 ; SmmSmiHandlerRsp\r
+ DQ 0\r
+ DD 0\r
+ DD 80010100h ; RequiredStmSmmRevId\r
+ DQ _OnException\r
+ DQ 0 ; ExceptionStack\r
+ DW DATA_SEL\r
+ DW 01Fh ; ExceptionFilter\r
+ DD 0\r
+ DQ 0\r
+ DQ 0 ; BiosHwResourceRequirementsPtr\r
+ DQ 0 ; AcpiRsdp\r
+ DB 0 ; PhysicalAddressBits\r
+PSD_SIZE = $ - offset gcStmPsd\r
+\r
+ .code\r
+;------------------------------------------------------------------------------\r
+; SMM Exception handlers\r
+;------------------------------------------------------------------------------\r
+_OnException PROC\r
+ mov ecx, esp\r
+ push ecx\r
+ call SmmStmExceptionHandler\r
+ add esp, 4\r
+\r
+ mov ebx, eax\r
+ mov eax, 4\r
+ DB 0fh, 01h, 0c1h ; VMCALL\r
+ jmp $\r
+_OnException ENDP\r
+\r
+_OnStmSetup PROC\r
+;\r
+; Check XD disable bit\r
+;\r
+ xor esi, esi\r
+ mov eax, gStmXdSupported\r
+ mov al, [eax]\r
+ cmp al, 0\r
+ jz @StmXdDone1\r
+ mov ecx, MSR_IA32_MISC_ENABLE\r
+ rdmsr\r
+ mov esi, edx ; save MSR_IA32_MISC_ENABLE[63-32]\r
+ test edx, BIT2 ; MSR_IA32_MISC_ENABLE[34]\r
+ jz @f\r
+ and dx, 0FFFBh ; clear XD Disable bit if it is set\r
+ wrmsr\r
+@@:\r
+ mov ecx, MSR_EFER\r
+ rdmsr\r
+ or ax, MSR_EFER_XD ; enable NXE\r
+ wrmsr\r
+@StmXdDone1:\r
+ push esi\r
+\r
+ call SmmStmSetup\r
+\r
+ mov eax, gStmXdSupported\r
+ mov al, [eax]\r
+ cmp al, 0\r
+ jz @f\r
+ pop edx ; get saved MSR_IA32_MISC_ENABLE[63-32]\r
+ test edx, BIT2\r
+ jz @f\r
+ mov ecx, MSR_IA32_MISC_ENABLE\r
+ rdmsr\r
+ or dx, BIT2 ; set XD Disable bit if it was set before entering into SMM\r
+ wrmsr\r
+@@:\r
+\r
+ rsm\r
+_OnStmSetup ENDP\r
+\r
+_OnStmTeardown PROC\r
+;\r
+; Check XD disable bit\r
+;\r
+ xor esi, esi\r
+ mov eax, gStmXdSupported\r
+ mov al, [eax]\r
+ cmp al, 0\r
+ jz @StmXdDone2\r
+ mov ecx, MSR_IA32_MISC_ENABLE\r
+ rdmsr\r
+ mov esi, edx ; save MSR_IA32_MISC_ENABLE[63-32]\r
+ test edx, BIT2 ; MSR_IA32_MISC_ENABLE[34]\r
+ jz @f\r
+ and dx, 0FFFBh ; clear XD Disable bit if it is set\r
+ wrmsr\r
+@@:\r
+ mov ecx, MSR_EFER\r
+ rdmsr\r
+ or ax, MSR_EFER_XD ; enable NXE\r
+ wrmsr\r
+@StmXdDone2:\r
+ push esi\r
+\r
+ call SmmStmTeardown\r
+\r
+ mov eax, gStmXdSupported\r
+ mov al, [eax]\r
+ cmp al, 0\r
+ jz @f\r
+ pop edx ; get saved MSR_IA32_MISC_ENABLE[63-32]\r
+ test edx, BIT2\r
+ jz @f\r
+ mov ecx, MSR_IA32_MISC_ENABLE\r
+ rdmsr\r
+ or dx, BIT2 ; set XD Disable bit if it was set before entering into SMM\r
+ wrmsr\r
+@@:\r
+\r
+ rsm\r
+_OnStmTeardown ENDP\r
+\r
+ END\r
--- /dev/null
+;------------------------------------------------------------------------------ ;\r
+; Copyright (c) 2009 - 2016, Intel Corporation. All rights reserved.<BR>\r
+; This program and the accompanying materials\r
+; are licensed and made available under the terms and conditions of the BSD License\r
+; which accompanies this distribution. The full text of the license may be found at\r
+; http://opensource.org/licenses/bsd-license.php.\r
+;\r
+; THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS,\r
+; WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED.\r
+;\r
+; Module Name:\r
+;\r
+; SmiException.nasm\r
+;\r
+; Abstract:\r
+;\r
+; Exception handlers used in SM mode\r
+;\r
+;-------------------------------------------------------------------------------\r
+\r
+global ASM_PFX(gcStmPsd)\r
+\r
+extern ASM_PFX(SmmStmExceptionHandler)\r
+extern ASM_PFX(SmmStmSetup)\r
+extern ASM_PFX(SmmStmTeardown)\r
+extern ASM_PFX(gStmXdSupported)\r
+extern ASM_PFX(gStmSmiHandlerIdtr)\r
+\r
+%define MSR_IA32_MISC_ENABLE 0x1A0\r
+%define MSR_EFER 0xc0000080\r
+%define MSR_EFER_XD 0x800\r
+\r
+CODE_SEL equ 0x08\r
+DATA_SEL equ 0x20\r
+TSS_SEL equ 0x40\r
+\r
+ SECTION .data\r
+\r
+ASM_PFX(gcStmPsd):\r
+ DB 'TXTPSSIG'\r
+ DW PSD_SIZE\r
+ DW 1 ; Version\r
+ DD 0 ; LocalApicId\r
+ DB 0x05 ; Cr4Pse;Cr4Pae;Intel64Mode;ExecutionDisableOutsideSmrr\r
+ DB 0 ; BIOS to STM\r
+ DB 0 ; STM to BIOS\r
+ DB 0\r
+ DW CODE_SEL\r
+ DW DATA_SEL\r
+ DW DATA_SEL\r
+ DW DATA_SEL\r
+ DW TSS_SEL\r
+ DW 0\r
+ DQ 0 ; SmmCr3\r
+ DD ASM_PFX(OnStmSetup)\r
+ DD 0\r
+ DD ASM_PFX(OnStmTeardown)\r
+ DD 0\r
+ DQ 0 ; SmmSmiHandlerRip - SMM guest entrypoint\r
+ DQ 0 ; SmmSmiHandlerRsp\r
+ DQ 0\r
+ DD 0\r
+ DD 0x80010100 ; RequiredStmSmmRevId\r
+ DD ASM_PFX(OnException)\r
+ DD 0\r
+ DQ 0 ; ExceptionStack\r
+ DW DATA_SEL\r
+ DW 0x01F ; ExceptionFilter\r
+ DD 0\r
+ DD 0\r
+ DD 0\r
+ DQ 0 ; BiosHwResourceRequirementsPtr\r
+ DQ 0 ; AcpiRsdp\r
+ DB 0 ; PhysicalAddressBits\r
+PSD_SIZE equ $ - ASM_PFX(gcStmPsd)\r
+\r
+ SECTION .text\r
+;------------------------------------------------------------------------------\r
+; SMM Exception handlers\r
+;------------------------------------------------------------------------------\r
+global ASM_PFX(OnException)\r
+ASM_PFX(OnException):\r
+ mov ecx, esp\r
+ push ecx\r
+ call ASM_PFX(SmmStmExceptionHandler)\r
+ add esp, 4\r
+\r
+ mov ebx, eax\r
+ mov eax, 4\r
+ DB 0x0f, 0x01, 0x0c1 ; VMCALL\r
+ jmp $\r
+\r
+global ASM_PFX(OnStmSetup)\r
+ASM_PFX(OnStmSetup):\r
+;\r
+; Check XD disable bit\r
+;\r
+ xor esi, esi\r
+ mov eax, ASM_PFX(gStmXdSupported)\r
+ mov al, [eax]\r
+ cmp al, 0\r
+ jz @StmXdDone1\r
+ mov ecx, MSR_IA32_MISC_ENABLE\r
+ rdmsr\r
+ mov esi, edx ; save MSR_IA32_MISC_ENABLE[63-32]\r
+ test edx, BIT2 ; MSR_IA32_MISC_ENABLE[34]\r
+ jz .51\r
+ and dx, 0xFFFB ; clear XD Disable bit if it is set\r
+ wrmsr\r
+.51:\r
+ mov ecx, MSR_EFER\r
+ rdmsr\r
+ or ax, MSR_EFER_XD ; enable NXE\r
+ wrmsr\r
+@StmXdDone1:\r
+ push esi\r
+\r
+ call ASM_PFX(SmmStmSetup)\r
+\r
+ mov eax, ASM_PFX(gStmXdSupported)\r
+ mov al, [eax]\r
+ cmp al, 0\r
+ jz .71\r
+ pop edx ; get saved MSR_IA32_MISC_ENABLE[63-32]\r
+ test edx, BIT2\r
+ jz .71\r
+ mov ecx, MSR_IA32_MISC_ENABLE\r
+ rdmsr\r
+ or dx, BIT2 ; set XD Disable bit if it was set before entering into SMM\r
+ wrmsr\r
+\r
+.71:\r
+ rsm\r
+\r
+global ASM_PFX(OnStmTeardown)\r
+ASM_PFX(OnStmTeardown):\r
+;\r
+; Check XD disable bit\r
+;\r
+ xor esi, esi\r
+ mov eax, ASM_PFX(gStmXdSupported)\r
+ mov al, [eax]\r
+ cmp al, 0\r
+ jz @StmXdDone2\r
+ mov ecx, MSR_IA32_MISC_ENABLE\r
+ rdmsr\r
+ mov esi, edx ; save MSR_IA32_MISC_ENABLE[63-32]\r
+ test edx, BIT2 ; MSR_IA32_MISC_ENABLE[34]\r
+ jz .52\r
+ and dx, 0xFFFB ; clear XD Disable bit if it is set\r
+ wrmsr\r
+.52:\r
+ mov ecx, MSR_EFER\r
+ rdmsr\r
+ or ax, MSR_EFER_XD ; enable NXE\r
+ wrmsr\r
+@StmXdDone2:\r
+ push esi\r
+\r
+ call ASM_PFX(SmmStmTeardown)\r
+\r
+ mov eax, ASM_PFX(gStmXdSupported)\r
+ mov al, [eax]\r
+ cmp al, 0\r
+ jz .72\r
+ pop edx ; get saved MSR_IA32_MISC_ENABLE[63-32]\r
+ test edx, BIT2\r
+ jz .72\r
+ mov ecx, MSR_IA32_MISC_ENABLE\r
+ rdmsr\r
+ or dx, BIT2 ; set XD Disable bit if it was set before entering into SMM\r
+ wrmsr\r
+\r
+.72:\r
+ rsm\r
+\r
--- /dev/null
+/** @file\r
+ SMM STM support functions\r
+\r
+ Copyright (c) 2015 - 2016, Intel Corporation. All rights reserved.<BR>\r
+ This program and the accompanying materials\r
+ are licensed and made available under the terms and conditions of the BSD License\r
+ which accompanies this distribution. The full text of the license may be found at\r
+ http://opensource.org/licenses/bsd-license.php.\r
+\r
+ THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS,\r
+ WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED.\r
+\r
+**/\r
+\r
+#include <PiSmm.h>\r
+#include <Library/DebugLib.h>\r
+\r
+#include "SmmStm.h"\r
+\r
+///\r
+/// Page Table Entry\r
+///\r
+#define IA32_PG_P BIT0\r
+#define IA32_PG_RW BIT1\r
+#define IA32_PG_PS BIT7\r
+\r
+/**\r
+\r
+ Create 4G page table for STM.\r
+ 4M Non-PAE page table in IA32 version.\r
+\r
+ @param PageTableBase The page table base in MSEG\r
+\r
+**/\r
+VOID\r
+StmGen4GPageTable (\r
+ IN UINTN PageTableBase\r
+ )\r
+{\r
+ UINTN Index;\r
+ UINT32 *Pte;\r
+ UINT32 Address;\r
+\r
+ Pte = (UINT32*)(UINTN)PageTableBase;\r
+\r
+ Address = 0;\r
+ for (Index = 0; Index < SIZE_4KB / sizeof (*Pte); Index++) {\r
+ *Pte = Address | IA32_PG_PS | IA32_PG_RW | IA32_PG_P;\r
+ Pte++;\r
+ Address += SIZE_4MB;\r
+ }\r
+}\r
+\r
+/**\r
+ This is SMM exception handle.\r
+ Consumed by STM when exception happen.\r
+\r
+ @param Context STM protection exception stack frame\r
+\r
+ @return the EBX value for STM reference.\r
+ EBX = 0: resume SMM guest using register state found on exception stack.\r
+ EBX = 1 to 0x0F: EBX contains a BIOS error code which the STM must record in the\r
+ TXT.ERRORCODE register and subsequently reset the system via\r
+ TXT.CMD.SYS_RESET. The value of the TXT.ERRORCODE register is calculated as\r
+ follows: TXT.ERRORCODE = (EBX & 0x0F) | STM_CRASH_BIOS_PANIC\r
+ EBX = 0x10 to 0xFFFFFFFF - reserved, do not use.\r
+\r
+**/\r
+UINT32\r
+EFIAPI\r
+SmmStmExceptionHandler (\r
+ IN OUT STM_PROTECTION_EXCEPTION_STACK_FRAME Context\r
+ )\r
+{\r
+ // TBD - SmmStmExceptionHandler, record information\r
+ DEBUG ((DEBUG_ERROR, "SmmStmExceptionHandler ...\n"));\r
+ //\r
+ // Skip this instruction and continue;\r
+ //\r
+ Context.Ia32StackFrame->Rip += Context.Ia32StackFrame->VmcsExitInstructionLength;\r
+\r
+ return 0;\r
+}\r
--- /dev/null
+## @file\r
+# The CPU specific programming for PiSmmCpuDxeSmm module when STM support\r
+# is included.\r
+#\r
+# Copyright (c) 2009 - 2016, Intel Corporation. All rights reserved.<BR>\r
+# This program and the accompanying materials\r
+# are licensed and made available under the terms and conditions of the BSD License\r
+# which accompanies this distribution. The full text of the license may be found at\r
+# http://opensource.org/licenses/bsd-license.php\r
+#\r
+# THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS,\r
+# WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED.\r
+#\r
+##\r
+\r
+[Defines]\r
+ INF_VERSION = 0x00010005\r
+ BASE_NAME = SmmCpuFeaturesLibStm\r
+ MODULE_UNI_FILE = SmmCpuFeaturesLib.uni\r
+ FILE_GUID = 374DE830-81C5-4CC8-B2AB-28F0AB73710B\r
+ MODULE_TYPE = DXE_SMM_DRIVER\r
+ VERSION_STRING = 1.0\r
+ LIBRARY_CLASS = SmmCpuFeaturesLib\r
+ CONSTRUCTOR = SmmCpuFeaturesLibStmConstructor\r
+\r
+[Sources]\r
+ SmmCpuFeaturesLib.c\r
+ SmmStm.c\r
+ SmmStm.h\r
+\r
+[Sources.Ia32]\r
+ Ia32/SmmStmSupport.c\r
+\r
+ Ia32/SmiEntry.asm\r
+ Ia32/SmiException.asm\r
+\r
+ Ia32/SmiEntry.nasm\r
+ Ia32/SmiException.nasm\r
+\r
+ Ia32/SmiEntry.S\r
+ Ia32/SmiException.S\r
+\r
+[Sources.X64]\r
+ X64/SmmStmSupport.c\r
+\r
+ X64/SmiEntry.asm\r
+ X64/SmiException.asm\r
+\r
+ X64/SmiEntry.nasm\r
+ X64/SmiException.nasm\r
+\r
+ X64/SmiEntry.S\r
+ X64/SmiException.S\r
+\r
+[Packages]\r
+ MdePkg/MdePkg.dec\r
+ MdeModulePkg/MdeModulePkg.dec\r
+ UefiCpuPkg/UefiCpuPkg.dec\r
+\r
+[LibraryClasses]\r
+ BaseLib\r
+ BaseMemoryLib\r
+ PcdLib\r
+ HobLib\r
+ MemoryAllocationLib\r
+ DebugLib\r
+ UefiBootServicesTableLib\r
+ SmmServicesTableLib\r
+ TpmMeasurementLib\r
+\r
+[Protocols]\r
+ gEfiMpServiceProtocolGuid ## CONSUMES\r
+ gEfiSmmEndOfDxeProtocolGuid ## CONSUMES\r
+ gEfiSmMonitorInitProtocolGuid ## PRODUCES\r
+\r
+[Guids]\r
+ gMsegSmramGuid ## SOMETIMES_CONSUMES ## HOB\r
+ gEfiAcpi20TableGuid ## SOMETIMES_CONSUMES ## SystemTable\r
+ gEfiAcpi10TableGuid ## SOMETIMES_CONSUMES ## SystemTable\r
+\r
+[Pcd]\r
+ gUefiCpuPkgTokenSpaceGuid.PcdCpuMaxLogicalProcessorNumber ## SOMETIMES_CONSUMES\r
+ gUefiCpuPkgTokenSpaceGuid.PcdCpuMsegSize ## SOMETIMES_CONSUMES\r
+ gUefiCpuPkgTokenSpaceGuid.PcdCpuSmmStmExceptionStackSize ## SOMETIMES_CONSUMES\r
+ gUefiCpuPkgTokenSpaceGuid.PcdCpuSmmStackGuard ## CONSUMES\r
+\r
+[Depex]\r
+ gEfiMpServiceProtocolGuid\r
--- /dev/null
+/** @file\r
+ SMM STM support functions\r
+\r
+ Copyright (c) 2015 - 2016, Intel Corporation. All rights reserved.<BR>\r
+ This program and the accompanying materials\r
+ are licensed and made available under the terms and conditions of the BSD License\r
+ which accompanies this distribution. The full text of the license may be found at\r
+ http://opensource.org/licenses/bsd-license.php.\r
+\r
+ THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS,\r
+ WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED.\r
+\r
+**/\r
+\r
+#include <PiSmm.h>\r
+#include <Library/BaseLib.h>\r
+#include <Library/BaseMemoryLib.h>\r
+#include <Library/MemoryAllocationLib.h>\r
+#include <Library/HobLib.h>\r
+#include <Library/DebugLib.h>\r
+#include <Library/UefiBootServicesTableLib.h>\r
+#include <Library/SmmServicesTableLib.h>\r
+#include <Library/TpmMeasurementLib.h>\r
+#include <Register/Cpuid.h>\r
+#include <Register/ArchitecturalMsr.h>\r
+#include <Register/SmramSaveStateMap.h>\r
+\r
+#include <Protocol/MpService.h>\r
+\r
+#include "SmmStm.h"\r
+\r
+#define TXT_EVTYPE_BASE 0x400\r
+#define TXT_EVTYPE_STM_HASH (TXT_EVTYPE_BASE + 14)\r
+\r
+#define RDWR_ACCS 3\r
+#define FULL_ACCS 7\r
+\r
+/**\r
+ The constructor function\r
+\r
+ @param[in] ImageHandle The firmware allocated handle for the EFI image.\r
+ @param[in] SystemTable A pointer to the EFI System Table.\r
+\r
+ @retval EFI_SUCCESS The constructor always returns EFI_SUCCESS.\r
+\r
+**/\r
+EFI_STATUS\r
+EFIAPI\r
+SmmCpuFeaturesLibConstructor (\r
+ IN EFI_HANDLE ImageHandle,\r
+ IN EFI_SYSTEM_TABLE *SystemTable\r
+ );\r
+\r
+EFI_HANDLE mStmSmmCpuHandle = NULL;\r
+\r
+BOOLEAN mLockLoadMonitor = FALSE;\r
+\r
+//\r
+// Template of STM_RSC_END structure for copying.\r
+//\r
+GLOBAL_REMOVE_IF_UNREFERENCED STM_RSC_END mRscEndNode = {\r
+ {END_OF_RESOURCES, sizeof (STM_RSC_END)},\r
+};\r
+\r
+GLOBAL_REMOVE_IF_UNREFERENCED UINT8 *mStmResourcesPtr = NULL;\r
+GLOBAL_REMOVE_IF_UNREFERENCED UINTN mStmResourceTotalSize = 0x0;\r
+GLOBAL_REMOVE_IF_UNREFERENCED UINTN mStmResourceSizeUsed = 0x0;\r
+GLOBAL_REMOVE_IF_UNREFERENCED UINTN mStmResourceSizeAvailable = 0x0;\r
+\r
+GLOBAL_REMOVE_IF_UNREFERENCED UINT32 mStmState = 0;\r
+\r
+//\r
+// System Configuration Table pointing to STM Configuration Table\r
+//\r
+GLOBAL_REMOVE_IF_UNREFERENCED\r
+EFI_SM_MONITOR_INIT_PROTOCOL mSmMonitorInitProtocol = {\r
+ LoadMonitor,\r
+ AddPiResource,\r
+ DeletePiResource,\r
+ GetPiResource,\r
+ GetMonitorState,\r
+};\r
+\r
+\r
+\r
+\r
+#define CPUID1_EDX_XD_SUPPORT 0x100000\r
+\r
+//\r
+// External global variables associated with SMI Handler Template\r
+//\r
+extern CONST TXT_PROCESSOR_SMM_DESCRIPTOR gcStmPsd;\r
+extern UINT32 gStmSmbase;\r
+extern volatile UINT32 gStmSmiStack;\r
+extern UINT32 gStmSmiCr3;\r
+extern volatile UINT8 gcStmSmiHandlerTemplate[];\r
+extern CONST UINT16 gcStmSmiHandlerSize;\r
+extern UINT16 gcStmSmiHandlerOffset;\r
+extern BOOLEAN gStmXdSupported;\r
+\r
+//\r
+// Variables used by SMI Handler\r
+//\r
+IA32_DESCRIPTOR gStmSmiHandlerIdtr;\r
+\r
+//\r
+// MP Services Protocol\r
+//\r
+EFI_MP_SERVICES_PROTOCOL *mSmmCpuFeaturesLibMpService = NULL;\r
+\r
+//\r
+// MSEG Base and Length in SMRAM\r
+//\r
+UINTN mMsegBase = 0;\r
+UINTN mMsegSize = 0;\r
+\r
+BOOLEAN mStmConfigurationTableInitialized = FALSE;\r
+\r
+\r
+/**\r
+ The constructor function\r
+\r
+ @param[in] ImageHandle The firmware allocated handle for the EFI image.\r
+ @param[in] SystemTable A pointer to the EFI System Table.\r
+\r
+ @retval EFI_SUCCESS The constructor always returns EFI_SUCCESS.\r
+\r
+**/\r
+EFI_STATUS\r
+EFIAPI\r
+SmmCpuFeaturesLibStmConstructor (\r
+ IN EFI_HANDLE ImageHandle,\r
+ IN EFI_SYSTEM_TABLE *SystemTable\r
+ )\r
+{\r
+ EFI_STATUS Status;\r
+ CPUID_VERSION_INFO_ECX RegEcx;\r
+ EFI_HOB_GUID_TYPE *GuidHob;\r
+ EFI_SMRAM_DESCRIPTOR *SmramDescriptor;\r
+\r
+ //\r
+ // Call the common constructor function\r
+ //\r
+ Status = SmmCpuFeaturesLibConstructor (ImageHandle, SystemTable);\r
+ ASSERT_EFI_ERROR (Status);\r
+\r
+ //\r
+ // Lookup the MP Services Protocol\r
+ //\r
+ Status = gBS->LocateProtocol (\r
+ &gEfiMpServiceProtocolGuid,\r
+ NULL,\r
+ (VOID **)&mSmmCpuFeaturesLibMpService\r
+ );\r
+ ASSERT_EFI_ERROR (Status);\r
+\r
+ //\r
+ // If CPU supports VMX, then determine SMRAM range for MSEG.\r
+ //\r
+ AsmCpuid (CPUID_VERSION_INFO, NULL, NULL, &RegEcx.Uint32, NULL);\r
+ if (RegEcx.Bits.VMX == 1) {\r
+ GuidHob = GetFirstGuidHob (&gMsegSmramGuid);\r
+ if (GuidHob != NULL) {\r
+ //\r
+ // Retrieve MSEG location from MSEG SRAM HOB\r
+ //\r
+ SmramDescriptor = (EFI_SMRAM_DESCRIPTOR *) GET_GUID_HOB_DATA (GuidHob);\r
+ if (SmramDescriptor->PhysicalSize > 0) {\r
+ mMsegBase = (UINTN)SmramDescriptor->CpuStart;\r
+ mMsegSize = (UINTN)SmramDescriptor->PhysicalSize;\r
+ }\r
+ } else if (PcdGet32 (PcdCpuMsegSize) > 0) {\r
+ //\r
+ // Allocate MSEG from SMRAM memory\r
+ //\r
+ mMsegBase = (UINTN)AllocatePages (EFI_SIZE_TO_PAGES (PcdGet32 (PcdCpuMsegSize)));\r
+ if (mMsegBase > 0) {\r
+ mMsegSize = ALIGN_VALUE (PcdGet32 (PcdCpuMsegSize), EFI_PAGE_SIZE);\r
+ } else {\r
+ DEBUG ((DEBUG_ERROR, "Not enough SMRAM resource to allocate MSEG size %08x\n", PcdGet32 (PcdCpuMsegSize)));\r
+ }\r
+ }\r
+ if (mMsegBase > 0) {\r
+ DEBUG ((DEBUG_INFO, "MsegBase: 0x%08x, MsegSize: 0x%08x\n", mMsegBase, mMsegSize));\r
+ }\r
+ }\r
+\r
+ return EFI_SUCCESS;\r
+}\r
+\r
+/**\r
+ Internal worker function that is called to complete CPU initialization at the\r
+ end of SmmCpuFeaturesInitializeProcessor()\r
+\r
+**/\r
+VOID\r
+FinishSmmCpuFeaturesInitializeProcessor (\r
+ VOID\r
+ )\r
+{\r
+ MSR_IA32_SMM_MONITOR_CTL_REGISTER SmmMonitorCtl;\r
+\r
+ //\r
+ // Set MSEG Base Address in SMM Monitor Control MSR.\r
+ //\r
+ if (mMsegBase > 0) {\r
+ SmmMonitorCtl.Uint64 = 0;\r
+ SmmMonitorCtl.Bits.MsegBase = (UINT32)mMsegBase >> 12;\r
+ SmmMonitorCtl.Bits.Valid = 1;\r
+ AsmWriteMsr64 (MSR_IA32_SMM_MONITOR_CTL, SmmMonitorCtl.Uint64);\r
+ }\r
+}\r
+\r
+/**\r
+ Return the size, in bytes, of a custom SMI Handler in bytes. If 0 is\r
+ returned, then a custom SMI handler is not provided by this library,\r
+ and the default SMI handler must be used.\r
+\r
+ @retval 0 Use the default SMI handler.\r
+ @retval > 0 Use the SMI handler installed by SmmCpuFeaturesInstallSmiHandler()\r
+ The caller is required to allocate enough SMRAM for each CPU to\r
+ support the size of the custom SMI handler.\r
+**/\r
+UINTN\r
+EFIAPI\r
+SmmCpuFeaturesGetSmiHandlerSize (\r
+ VOID\r
+ )\r
+{\r
+ return gcStmSmiHandlerSize;\r
+}\r
+\r
+/**\r
+ Install a custom SMI handler for the CPU specified by CpuIndex. This function\r
+ is only called if SmmCpuFeaturesGetSmiHandlerSize() returns a size is greater\r
+ than zero and is called by the CPU that was elected as monarch during System\r
+ Management Mode initialization.\r
+\r
+ @param[in] CpuIndex The index of the CPU to install the custom SMI handler.\r
+ The value must be between 0 and the NumberOfCpus field\r
+ in the System Management System Table (SMST).\r
+ @param[in] SmBase The SMBASE address for the CPU specified by CpuIndex.\r
+ @param[in] SmiStack The stack to use when an SMI is processed by the\r
+ the CPU specified by CpuIndex.\r
+ @param[in] StackSize The size, in bytes, if the stack used when an SMI is\r
+ processed by the CPU specified by CpuIndex.\r
+ @param[in] GdtBase The base address of the GDT to use when an SMI is\r
+ processed by the CPU specified by CpuIndex.\r
+ @param[in] GdtSize The size, in bytes, of the GDT used when an SMI is\r
+ processed by the CPU specified by CpuIndex.\r
+ @param[in] IdtBase The base address of the IDT to use when an SMI is\r
+ processed by the CPU specified by CpuIndex.\r
+ @param[in] IdtSize The size, in bytes, of the IDT used when an SMI is\r
+ processed by the CPU specified by CpuIndex.\r
+ @param[in] Cr3 The base address of the page tables to use when an SMI\r
+ is processed by the CPU specified by CpuIndex.\r
+**/\r
+VOID\r
+EFIAPI\r
+SmmCpuFeaturesInstallSmiHandler (\r
+ IN UINTN CpuIndex,\r
+ IN UINT32 SmBase,\r
+ IN VOID *SmiStack,\r
+ IN UINTN StackSize,\r
+ IN UINTN GdtBase,\r
+ IN UINTN GdtSize,\r
+ IN UINTN IdtBase,\r
+ IN UINTN IdtSize,\r
+ IN UINT32 Cr3\r
+ )\r
+{\r
+ EFI_STATUS Status;\r
+ TXT_PROCESSOR_SMM_DESCRIPTOR *Psd;\r
+ VOID *Hob;\r
+ UINT32 RegEax;\r
+ UINT32 RegEdx;\r
+ EFI_PROCESSOR_INFORMATION ProcessorInfo;\r
+\r
+ CopyMem ((VOID *)(UINTN)(SmBase + TXT_SMM_PSD_OFFSET), &gcStmPsd, sizeof (gcStmPsd));\r
+ Psd = (TXT_PROCESSOR_SMM_DESCRIPTOR *)(VOID *)(UINTN)(SmBase + TXT_SMM_PSD_OFFSET);\r
+ Psd->SmmGdtPtr = GdtBase;\r
+ Psd->SmmGdtSize = (UINT32)GdtSize;\r
+\r
+ //\r
+ // Initialize values in template before copy\r
+ //\r
+ gStmSmiStack = (UINT32)((UINTN)SmiStack + StackSize - sizeof (UINTN));\r
+ gStmSmiCr3 = Cr3;\r
+ gStmSmbase = SmBase;\r
+ gStmSmiHandlerIdtr.Base = IdtBase;\r
+ gStmSmiHandlerIdtr.Limit = (UINT16)(IdtSize - 1);\r
+\r
+ if (gStmXdSupported) {\r
+ AsmCpuid (CPUID_EXTENDED_FUNCTION, &RegEax, NULL, NULL, NULL);\r
+ if (RegEax <= CPUID_EXTENDED_FUNCTION) {\r
+ //\r
+ // Extended CPUID functions are not supported on this processor.\r
+ //\r
+ gStmXdSupported = FALSE;\r
+ }\r
+\r
+ AsmCpuid (CPUID_EXTENDED_CPU_SIG, NULL, NULL, NULL, &RegEdx);\r
+ if ((RegEdx & CPUID1_EDX_XD_SUPPORT) == 0) {\r
+ //\r
+ // Execute Disable Bit feature is not supported on this processor.\r
+ //\r
+ gStmXdSupported = FALSE;\r
+ }\r
+ }\r
+\r
+ //\r
+ // Set the value at the top of the CPU stack to the CPU Index\r
+ //\r
+ *(UINTN*)(UINTN)gStmSmiStack = CpuIndex;\r
+\r
+ //\r
+ // Copy template to CPU specific SMI handler location\r
+ //\r
+ CopyMem (\r
+ (VOID*)(UINTN)(SmBase + SMM_HANDLER_OFFSET),\r
+ (VOID*)gcStmSmiHandlerTemplate,\r
+ gcStmSmiHandlerSize\r
+ );\r
+\r
+ Psd->SmmSmiHandlerRip = SmBase + SMM_HANDLER_OFFSET + gcStmSmiHandlerOffset;\r
+ Psd->SmmSmiHandlerRsp = (UINTN)SmiStack + StackSize - sizeof(UINTN);\r
+ Psd->SmmCr3 = Cr3;\r
+\r
+ DEBUG((DEBUG_ERROR, "CpuSmmStmExceptionStackSize - %x\n", PcdGet32(PcdCpuSmmStmExceptionStackSize)));\r
+ DEBUG((DEBUG_ERROR, "Pages - %x\n", EFI_SIZE_TO_PAGES(PcdGet32(PcdCpuSmmStmExceptionStackSize))));\r
+ Psd->StmProtectionExceptionHandler.SpeRsp = (UINT64)(UINTN)AllocatePages (EFI_SIZE_TO_PAGES (PcdGet32 (PcdCpuSmmStmExceptionStackSize)));\r
+ Psd->StmProtectionExceptionHandler.SpeRsp += EFI_PAGES_TO_SIZE (EFI_SIZE_TO_PAGES (PcdGet32 (PcdCpuSmmStmExceptionStackSize)));\r
+\r
+ Psd->BiosHwResourceRequirementsPtr = (UINT64)(UINTN)GetStmResource ();\r
+\r
+ //\r
+ // Get the APIC ID for the CPU specified by CpuIndex\r
+ //\r
+ Status = mSmmCpuFeaturesLibMpService->GetProcessorInfo (\r
+ mSmmCpuFeaturesLibMpService,\r
+ CpuIndex,\r
+ &ProcessorInfo\r
+ );\r
+ ASSERT_EFI_ERROR (Status);\r
+\r
+ Psd->LocalApicId = (UINT32)ProcessorInfo.ProcessorId;\r
+ Psd->AcpiRsdp = 0;\r
+\r
+ Hob = GetFirstHob (EFI_HOB_TYPE_CPU);\r
+ if (Hob != NULL) {\r
+ Psd->PhysicalAddressBits = ((EFI_HOB_CPU *) Hob)->SizeOfMemorySpace;\r
+ } else {\r
+ AsmCpuid (0x80000000, &RegEax, NULL, NULL, NULL);\r
+ if (RegEax >= 0x80000008) {\r
+ AsmCpuid (0x80000008, &RegEax, NULL, NULL, NULL);\r
+ Psd->PhysicalAddressBits = (UINT8) RegEax;\r
+ } else {\r
+ Psd->PhysicalAddressBits = 36;\r
+ }\r
+ }\r
+\r
+ if (!mStmConfigurationTableInitialized) {\r
+ StmSmmConfigurationTableInit ();\r
+ mStmConfigurationTableInitialized = TRUE;\r
+ }\r
+}\r
+\r
+/**\r
+ SMM End Of Dxe event notification handler.\r
+\r
+ STM support need patch AcpiRsdp in TXT_PROCESSOR_SMM_DESCRIPTOR.\r
+\r
+ @param[in] Protocol Points to the protocol's unique identifier.\r
+ @param[in] Interface Points to the interface instance.\r
+ @param[in] Handle The handle on which the interface was installed.\r
+\r
+ @retval EFI_SUCCESS Notification handler runs successfully.\r
+**/\r
+EFI_STATUS\r
+EFIAPI\r
+SmmEndOfDxeEventNotify (\r
+ IN CONST EFI_GUID *Protocol,\r
+ IN VOID *Interface,\r
+ IN EFI_HANDLE Handle\r
+ )\r
+{\r
+ VOID *Rsdp;\r
+ UINTN Index;\r
+ TXT_PROCESSOR_SMM_DESCRIPTOR *Psd;\r
+\r
+ DEBUG ((DEBUG_INFO, "SmmEndOfDxeEventNotify\n"));\r
+\r
+ //\r
+ // found ACPI table RSD_PTR from system table\r
+ //\r
+ Rsdp = NULL;\r
+ for (Index = 0; Index < gST->NumberOfTableEntries; Index++) {\r
+ if (CompareGuid (&(gST->ConfigurationTable[Index].VendorGuid), &gEfiAcpi20TableGuid)) {\r
+ //\r
+ // A match was found.\r
+ //\r
+ Rsdp = gST->ConfigurationTable[Index].VendorTable;\r
+ break;\r
+ }\r
+ }\r
+ if (Rsdp == NULL) {\r
+ for (Index = 0; Index < gST->NumberOfTableEntries; Index++) {\r
+ if (CompareGuid (&(gST->ConfigurationTable[Index].VendorGuid), &gEfiAcpi10TableGuid)) {\r
+ //\r
+ // A match was found.\r
+ //\r
+ Rsdp = gST->ConfigurationTable[Index].VendorTable;\r
+ break;\r
+ }\r
+ }\r
+ }\r
+\r
+ for (Index = 0; Index < gSmst->NumberOfCpus; Index++) {\r
+ Psd = (TXT_PROCESSOR_SMM_DESCRIPTOR *)((UINTN)gSmst->CpuSaveState[Index] - SMRAM_SAVE_STATE_MAP_OFFSET + TXT_SMM_PSD_OFFSET);\r
+ DEBUG ((DEBUG_INFO, "Index=%d Psd=%p Rsdp=%p\n", Index, Psd, Rsdp));\r
+ Psd->AcpiRsdp = (UINT64)(UINTN)Rsdp;\r
+ }\r
+\r
+ mLockLoadMonitor = TRUE;\r
+\r
+ return EFI_SUCCESS;\r
+}\r
+\r
+/**\r
+ This function initializes the STM configuration table.\r
+**/\r
+VOID\r
+StmSmmConfigurationTableInit (\r
+ VOID\r
+ )\r
+{\r
+ EFI_STATUS Status;\r
+ VOID *Registration;\r
+\r
+ Status = gSmst->SmmInstallProtocolInterface (\r
+ &mStmSmmCpuHandle,\r
+ &gEfiSmMonitorInitProtocolGuid,\r
+ EFI_NATIVE_INTERFACE,\r
+ &mSmMonitorInitProtocol\r
+ );\r
+ ASSERT_EFI_ERROR (Status);\r
+\r
+ //\r
+ //\r
+ // Register SMM End of DXE Event\r
+ //\r
+ Status = gSmst->SmmRegisterProtocolNotify (\r
+ &gEfiSmmEndOfDxeProtocolGuid,\r
+ SmmEndOfDxeEventNotify,\r
+ &Registration\r
+ );\r
+ ASSERT_EFI_ERROR (Status);\r
+}\r
+\r
+/**\r
+\r
+ Get STM state.\r
+\r
+ @return STM state\r
+\r
+**/\r
+EFI_SM_MONITOR_STATE\r
+EFIAPI\r
+GetMonitorState (\r
+ VOID\r
+ )\r
+{\r
+ return mStmState;\r
+}\r
+\r
+/**\r
+\r
+ Handle single Resource to see if it can be merged into Record.\r
+\r
+ @param Resource A pointer to resource node to be added\r
+ @param Record A pointer to record node to be merged\r
+\r
+ @retval TRUE resource handled\r
+ @retval FALSE resource is not handled\r
+\r
+**/\r
+BOOLEAN\r
+HandleSingleResource (\r
+ IN STM_RSC *Resource,\r
+ IN STM_RSC *Record\r
+ )\r
+{\r
+ UINT64 ResourceLo;\r
+ UINT64 ResourceHi;\r
+ UINT64 RecordLo;\r
+ UINT64 RecordHi;\r
+\r
+ ResourceLo = 0;\r
+ ResourceHi = 0;\r
+ RecordLo = 0;\r
+ RecordHi = 0;\r
+\r
+ //\r
+ // Calling code is responsible for making sure that\r
+ // Resource->Header.RscType == (*Record)->Header.RscType\r
+ // thus we use just one of them as switch variable.\r
+ //\r
+ switch (Resource->Header.RscType) {\r
+ case MEM_RANGE:\r
+ case MMIO_RANGE:\r
+ ResourceLo = Resource->Mem.Base;\r
+ ResourceHi = Resource->Mem.Base + Resource->Mem.Length;\r
+ RecordLo = Record->Mem.Base;\r
+ RecordHi = Record->Mem.Base + Record->Mem.Length;\r
+ if (Resource->Mem.RWXAttributes != Record->Mem.RWXAttributes) {\r
+ if ((ResourceLo == RecordLo) && (ResourceHi == RecordHi)) {\r
+ Record->Mem.RWXAttributes = Resource->Mem.RWXAttributes | Record->Mem.RWXAttributes;\r
+ return TRUE;\r
+ } else {\r
+ return FALSE;\r
+ }\r
+ }\r
+ break;\r
+ case IO_RANGE:\r
+ case TRAPPED_IO_RANGE:\r
+ ResourceLo = (UINT64) Resource->Io.Base;\r
+ ResourceHi = (UINT64) Resource->Io.Base + (UINT64) Resource->Io.Length;\r
+ RecordLo = (UINT64) Record->Io.Base;\r
+ RecordHi = (UINT64) Record->Io.Base + (UINT64) Record->Io.Length;\r
+ break;\r
+ case PCI_CFG_RANGE:\r
+ if ((Resource->PciCfg.OriginatingBusNumber != Record->PciCfg.OriginatingBusNumber) ||\r
+ (Resource->PciCfg.LastNodeIndex != Record->PciCfg.LastNodeIndex)) {\r
+ return FALSE;\r
+ }\r
+ if (CompareMem (Resource->PciCfg.PciDevicePath, Record->PciCfg.PciDevicePath, sizeof(STM_PCI_DEVICE_PATH_NODE) * (Resource->PciCfg.LastNodeIndex + 1)) != 0) {\r
+ return FALSE;\r
+ }\r
+ ResourceLo = (UINT64) Resource->PciCfg.Base;\r
+ ResourceHi = (UINT64) Resource->PciCfg.Base + (UINT64) Resource->PciCfg.Length;\r
+ RecordLo = (UINT64) Record->PciCfg.Base;\r
+ RecordHi = (UINT64) Record->PciCfg.Base + (UINT64) Record->PciCfg.Length;\r
+ if (Resource->PciCfg.RWAttributes != Record->PciCfg.RWAttributes) {\r
+ if ((ResourceLo == RecordLo) && (ResourceHi == RecordHi)) {\r
+ Record->PciCfg.RWAttributes = Resource->PciCfg.RWAttributes | Record->PciCfg.RWAttributes;\r
+ return TRUE;\r
+ } else {\r
+ return FALSE;\r
+ }\r
+ }\r
+ break;\r
+ case MACHINE_SPECIFIC_REG:\r
+ //\r
+ // Special case - merge MSR masks in place.\r
+ //\r
+ if (Resource->Msr.MsrIndex != Record->Msr.MsrIndex) {\r
+ return FALSE;\r
+ }\r
+ Record->Msr.ReadMask |= Resource->Msr.ReadMask;\r
+ Record->Msr.WriteMask |= Resource->Msr.WriteMask;\r
+ return TRUE;\r
+ default:\r
+ return FALSE;\r
+ }\r
+ //\r
+ // If resources are disjoint\r
+ //\r
+ if ((ResourceHi < RecordLo) || (ResourceLo > RecordHi)) {\r
+ return FALSE;\r
+ }\r
+\r
+ //\r
+ // If resource is consumed by record.\r
+ //\r
+ if ((ResourceLo >= RecordLo) && (ResourceHi <= RecordHi)) {\r
+ return TRUE;\r
+ }\r
+ //\r
+ // Resources are overlapping.\r
+ // Resource and record are merged.\r
+ //\r
+ ResourceLo = (ResourceLo < RecordLo) ? ResourceLo : RecordLo;\r
+ ResourceHi = (ResourceHi > RecordHi) ? ResourceHi : RecordHi;\r
+\r
+ switch (Resource->Header.RscType) {\r
+ case MEM_RANGE:\r
+ case MMIO_RANGE:\r
+ Record->Mem.Base = ResourceLo;\r
+ Record->Mem.Length = ResourceHi - ResourceLo;\r
+ break;\r
+ case IO_RANGE:\r
+ case TRAPPED_IO_RANGE:\r
+ Record->Io.Base = (UINT16) ResourceLo;\r
+ Record->Io.Length = (UINT16) (ResourceHi - ResourceLo);\r
+ break;\r
+ case PCI_CFG_RANGE:\r
+ Record->PciCfg.Base = (UINT16) ResourceLo;\r
+ Record->PciCfg.Length = (UINT16) (ResourceHi - ResourceLo);\r
+ break;\r
+ default:\r
+ return FALSE;\r
+ }\r
+\r
+ return TRUE;\r
+}\r
+\r
+/**\r
+\r
+ Add resource node.\r
+\r
+ @param Resource A pointer to resource node to be added\r
+\r
+**/\r
+VOID\r
+AddSingleResource (\r
+ IN STM_RSC *Resource\r
+ )\r
+{\r
+ STM_RSC *Record;\r
+\r
+ Record = (STM_RSC *)mStmResourcesPtr;\r
+\r
+ while (TRUE) {\r
+ if (Record->Header.RscType == END_OF_RESOURCES) {\r
+ break;\r
+ }\r
+ //\r
+ // Go to next record if resource and record types don't match.\r
+ //\r
+ if (Resource->Header.RscType != Record->Header.RscType) {\r
+ Record = (STM_RSC *)((UINTN)Record + Record->Header.Length);\r
+ continue;\r
+ }\r
+ //\r
+ // Record is handled inside of procedure - don't adjust.\r
+ //\r
+ if (HandleSingleResource (Resource, Record)) {\r
+ return ;\r
+ }\r
+ Record = (STM_RSC *)((UINTN)Record + Record->Header.Length);\r
+ }\r
+\r
+ //\r
+ // Add resource to the end of area.\r
+ //\r
+ CopyMem (\r
+ mStmResourcesPtr + mStmResourceSizeUsed - sizeof(mRscEndNode),\r
+ Resource,\r
+ Resource->Header.Length\r
+ );\r
+ CopyMem (\r
+ mStmResourcesPtr + mStmResourceSizeUsed - sizeof(mRscEndNode) + Resource->Header.Length,\r
+ &mRscEndNode,\r
+ sizeof(mRscEndNode)\r
+ );\r
+ mStmResourceSizeUsed += Resource->Header.Length;\r
+ mStmResourceSizeAvailable = mStmResourceTotalSize - mStmResourceSizeUsed;\r
+\r
+ return ;\r
+}\r
+\r
+/**\r
+\r
+ Add resource list.\r
+\r
+ @param ResourceList A pointer to resource list to be added\r
+ @param NumEntries Optional number of entries.\r
+ If 0, list must be terminated by END_OF_RESOURCES.\r
+\r
+**/\r
+VOID\r
+AddResource (\r
+ IN STM_RSC *ResourceList,\r
+ IN UINT32 NumEntries OPTIONAL\r
+ )\r
+{\r
+ UINT32 Count;\r
+ UINTN Index;\r
+ STM_RSC *Resource;\r
+\r
+ if (NumEntries == 0) {\r
+ Count = 0xFFFFFFFF;\r
+ } else {\r
+ Count = NumEntries;\r
+ }\r
+\r
+ Resource = ResourceList;\r
+\r
+ for (Index = 0; Index < Count; Index++) {\r
+ if (Resource->Header.RscType == END_OF_RESOURCES) {\r
+ return ;\r
+ }\r
+ AddSingleResource (Resource);\r
+ Resource = (STM_RSC *)((UINTN)Resource + Resource->Header.Length);\r
+ }\r
+ return ;\r
+}\r
+\r
+/**\r
+\r
+ Validate resource list.\r
+\r
+ @param ResourceList A pointer to resource list to be added\r
+ @param NumEntries Optional number of entries.\r
+ If 0, list must be terminated by END_OF_RESOURCES.\r
+\r
+ @retval TRUE resource valid\r
+ @retval FALSE resource invalid\r
+\r
+**/\r
+BOOLEAN\r
+ValidateResource (\r
+ IN STM_RSC *ResourceList,\r
+ IN UINT32 NumEntries OPTIONAL\r
+ )\r
+{\r
+ UINT32 Count;\r
+ UINTN Index;\r
+ STM_RSC *Resource;\r
+ UINTN SubIndex;\r
+\r
+ //\r
+ // If NumEntries == 0 make it very big. Scan will be terminated by\r
+ // END_OF_RESOURCES.\r
+ //\r
+ if (NumEntries == 0) {\r
+ Count = 0xFFFFFFFF;\r
+ } else {\r
+ Count = NumEntries;\r
+ }\r
+\r
+ //\r
+ // Start from beginning of resource list.\r
+ //\r
+ Resource = ResourceList;\r
+\r
+ for (Index = 0; Index < Count; Index++) {\r
+ DEBUG ((DEBUG_ERROR, "ValidateResource (%d) - RscType(%x)\n", Index, Resource->Header.RscType));\r
+ //\r
+ // Validate resource.\r
+ //\r
+ switch (Resource->Header.RscType) {\r
+ case END_OF_RESOURCES:\r
+ if (Resource->Header.Length != sizeof (STM_RSC_END)) {\r
+ return FALSE;\r
+ }\r
+ //\r
+ // If we are passed actual number of resources to add,\r
+ // END_OF_RESOURCES structure between them is considered an\r
+ // error. If NumEntries == 0 END_OF_RESOURCES is a termination.\r
+ //\r
+ if (NumEntries != 0) {\r
+ return FALSE;\r
+ } else {\r
+ //\r
+ // If NumEntries == 0 and list reached end - return success.\r
+ //\r
+ return TRUE;\r
+ }\r
+ break;\r
+\r
+ case MEM_RANGE:\r
+ case MMIO_RANGE:\r
+ if (Resource->Header.Length != sizeof (STM_RSC_MEM_DESC)) {\r
+ return FALSE;\r
+ }\r
+\r
+ if (Resource->Mem.RWXAttributes > FULL_ACCS) {\r
+ return FALSE;\r
+ }\r
+ break;\r
+\r
+ case IO_RANGE:\r
+ case TRAPPED_IO_RANGE:\r
+ if (Resource->Header.Length != sizeof (STM_RSC_IO_DESC)) {\r
+ return FALSE;\r
+ }\r
+\r
+ if ((Resource->Io.Base + Resource->Io.Length) > 0xFFFF) {\r
+ return FALSE;\r
+ }\r
+ break;\r
+\r
+ case PCI_CFG_RANGE:\r
+ DEBUG ((DEBUG_ERROR, "ValidateResource - PCI (0x%02x, 0x%08x, 0x%02x, 0x%02x)\n", Resource->PciCfg.OriginatingBusNumber, Resource->PciCfg.LastNodeIndex, Resource->PciCfg.PciDevicePath[0].PciDevice, Resource->PciCfg.PciDevicePath[0].PciFunction));\r
+ if (Resource->Header.Length != sizeof (STM_RSC_PCI_CFG_DESC) + (sizeof(STM_PCI_DEVICE_PATH_NODE) * Resource->PciCfg.LastNodeIndex)) {\r
+ return FALSE;\r
+ }\r
+ for (SubIndex = 0; SubIndex <= Resource->PciCfg.LastNodeIndex; SubIndex++) {\r
+ if ((Resource->PciCfg.PciDevicePath[SubIndex].PciDevice > 0x1F) || (Resource->PciCfg.PciDevicePath[SubIndex].PciFunction > 7)) {\r
+ return FALSE;\r
+ }\r
+ }\r
+ if ((Resource->PciCfg.Base + Resource->PciCfg.Length) > 0x1000) {\r
+ return FALSE;\r
+ }\r
+ break;\r
+\r
+ case MACHINE_SPECIFIC_REG:\r
+ if (Resource->Header.Length != sizeof (STM_RSC_MSR_DESC)) {\r
+ return FALSE;\r
+ }\r
+ break;\r
+\r
+ default :\r
+ DEBUG ((DEBUG_ERROR, "ValidateResource - Unknown RscType(%x)\n", Resource->Header.RscType));\r
+ return FALSE;\r
+ }\r
+ Resource = (STM_RSC *)((UINTN)Resource + Resource->Header.Length);\r
+ }\r
+ return TRUE;\r
+}\r
+\r
+/**\r
+\r
+ Get resource list.\r
+ EndResource is excluded.\r
+\r
+ @param ResourceList A pointer to resource list to be added\r
+ @param NumEntries Optional number of entries.\r
+ If 0, list must be terminated by END_OF_RESOURCES.\r
+\r
+ @retval TRUE resource valid\r
+ @retval FALSE resource invalid\r
+\r
+**/\r
+UINTN\r
+GetResourceSize (\r
+ IN STM_RSC *ResourceList,\r
+ IN UINT32 NumEntries OPTIONAL\r
+ )\r
+{\r
+ UINT32 Count;\r
+ UINTN Index;\r
+ STM_RSC *Resource;\r
+\r
+ Resource = ResourceList;\r
+\r
+ //\r
+ // If NumEntries == 0 make it very big. Scan will be terminated by\r
+ // END_OF_RESOURCES.\r
+ //\r
+ if (NumEntries == 0) {\r
+ Count = 0xFFFFFFFF;\r
+ } else {\r
+ Count = NumEntries;\r
+ }\r
+\r
+ //\r
+ // Start from beginning of resource list.\r
+ //\r
+ Resource = ResourceList;\r
+\r
+ for (Index = 0; Index < Count; Index++) {\r
+ if (Resource->Header.RscType == END_OF_RESOURCES) {\r
+ break;\r
+ }\r
+ Resource = (STM_RSC *)((UINTN)Resource + Resource->Header.Length);\r
+ }\r
+\r
+ return (UINTN)Resource - (UINTN)ResourceList;\r
+}\r
+\r
+/**\r
+\r
+ Add resources in list to database. Allocate new memory areas as needed.\r
+\r
+ @param ResourceList A pointer to resource list to be added\r
+ @param NumEntries Optional number of entries.\r
+ If 0, list must be terminated by END_OF_RESOURCES.\r
+\r
+ @retval EFI_SUCCESS If resources are added\r
+ @retval EFI_INVALID_PARAMETER If nested procedure detected resource failer\r
+ @retval EFI_OUT_OF_RESOURCES If nested procedure returned it and we cannot allocate more areas.\r
+\r
+**/\r
+EFI_STATUS\r
+EFIAPI\r
+AddPiResource (\r
+ IN STM_RSC *ResourceList,\r
+ IN UINT32 NumEntries OPTIONAL\r
+ )\r
+{\r
+ EFI_STATUS Status;\r
+ UINTN ResourceSize;\r
+ EFI_PHYSICAL_ADDRESS NewResource;\r
+ UINTN NewResourceSize;\r
+\r
+ DEBUG ((DEBUG_INFO, "AddPiResource - Enter\n"));\r
+\r
+ if (!ValidateResource (ResourceList, NumEntries)) {\r
+ return EFI_INVALID_PARAMETER;\r
+ }\r
+\r
+ ResourceSize = GetResourceSize (ResourceList, NumEntries);\r
+ DEBUG ((DEBUG_INFO, "ResourceSize - 0x%08x\n", ResourceSize));\r
+ if (ResourceSize == 0) {\r
+ return EFI_INVALID_PARAMETER;\r
+ }\r
+\r
+ if (mStmResourcesPtr == NULL) {\r
+ //\r
+ // First time allocation\r
+ //\r
+ NewResourceSize = EFI_PAGES_TO_SIZE (EFI_SIZE_TO_PAGES (ResourceSize + sizeof(mRscEndNode)));\r
+ DEBUG ((DEBUG_INFO, "Allocate - 0x%08x\n", NewResourceSize));\r
+ Status = gSmst->SmmAllocatePages (\r
+ AllocateAnyPages,\r
+ EfiRuntimeServicesData,\r
+ EFI_SIZE_TO_PAGES (NewResourceSize),\r
+ &NewResource\r
+ );\r
+ if (EFI_ERROR (Status)) {\r
+ return Status;\r
+ }\r
+\r
+ //\r
+ // Copy EndResource for intialization\r
+ //\r
+ mStmResourcesPtr = (UINT8 *)(UINTN)NewResource;\r
+ mStmResourceTotalSize = NewResourceSize;\r
+ CopyMem (mStmResourcesPtr, &mRscEndNode, sizeof(mRscEndNode));\r
+ mStmResourceSizeUsed = sizeof(mRscEndNode);\r
+ mStmResourceSizeAvailable = mStmResourceTotalSize - sizeof(mRscEndNode);\r
+\r
+ //\r
+ // Let SmmCore change resource ptr\r
+ //\r
+ NotifyStmResourceChange (mStmResourcesPtr);\r
+ } else if (mStmResourceSizeAvailable < ResourceSize) {\r
+ //\r
+ // Need enlarge\r
+ //\r
+ NewResourceSize = mStmResourceTotalSize + (ResourceSize - mStmResourceSizeAvailable);\r
+ NewResourceSize = EFI_PAGES_TO_SIZE (EFI_SIZE_TO_PAGES (NewResourceSize));\r
+ DEBUG ((DEBUG_INFO, "ReAllocate - 0x%08x\n", NewResourceSize));\r
+ Status = gSmst->SmmAllocatePages (\r
+ AllocateAnyPages,\r
+ EfiRuntimeServicesData,\r
+ EFI_SIZE_TO_PAGES (NewResourceSize),\r
+ &NewResource\r
+ );\r
+ if (EFI_ERROR (Status)) {\r
+ return Status;\r
+ }\r
+ CopyMem ((VOID *)(UINTN)NewResource, mStmResourcesPtr, mStmResourceSizeUsed);\r
+ mStmResourceSizeAvailable = NewResourceSize - mStmResourceSizeUsed;\r
+\r
+ gSmst->SmmFreePages (\r
+ (EFI_PHYSICAL_ADDRESS)(UINTN)mStmResourcesPtr,\r
+ EFI_SIZE_TO_PAGES (mStmResourceTotalSize)\r
+ );\r
+\r
+ mStmResourceTotalSize = NewResourceSize;\r
+ mStmResourcesPtr = (UINT8 *)(UINTN)NewResource;\r
+\r
+ //\r
+ // Let SmmCore change resource ptr\r
+ //\r
+ NotifyStmResourceChange (mStmResourcesPtr);\r
+ }\r
+\r
+ //\r
+ // Check duplication\r
+ //\r
+ AddResource (ResourceList, NumEntries);\r
+\r
+ return EFI_SUCCESS;\r
+}\r
+\r
+/**\r
+\r
+ Delete resources in list to database.\r
+\r
+ @param ResourceList A pointer to resource list to be deleted\r
+ NULL means delete all resources.\r
+ @param NumEntries Optional number of entries.\r
+ If 0, list must be terminated by END_OF_RESOURCES.\r
+\r
+ @retval EFI_SUCCESS If resources are deleted\r
+ @retval EFI_INVALID_PARAMETER If nested procedure detected resource failer\r
+\r
+**/\r
+EFI_STATUS\r
+EFIAPI\r
+DeletePiResource (\r
+ IN STM_RSC *ResourceList,\r
+ IN UINT32 NumEntries OPTIONAL\r
+ )\r
+{\r
+ if (ResourceList != NULL) {\r
+ // TBD\r
+ ASSERT (FALSE);\r
+ return EFI_UNSUPPORTED;\r
+ }\r
+ //\r
+ // Delete all\r
+ //\r
+ CopyMem (mStmResourcesPtr, &mRscEndNode, sizeof(mRscEndNode));\r
+ mStmResourceSizeUsed = sizeof(mRscEndNode);\r
+ mStmResourceSizeAvailable = mStmResourceTotalSize - sizeof(mRscEndNode);\r
+ return EFI_SUCCESS;\r
+}\r
+\r
+/**\r
+\r
+ Get BIOS resources.\r
+\r
+ @param ResourceList A pointer to resource list to be filled\r
+ @param ResourceSize On input it means size of resource list input.\r
+ On output it means size of resource list filled,\r
+ or the size of resource list to be filled if size of too small.\r
+\r
+ @retval EFI_SUCCESS If resources are returned.\r
+ @retval EFI_BUFFER_TOO_SMALL If resource list buffer is too small to hold the whole resources.\r
+\r
+**/\r
+EFI_STATUS\r
+EFIAPI\r
+GetPiResource (\r
+ OUT STM_RSC *ResourceList,\r
+ IN OUT UINT32 *ResourceSize\r
+ )\r
+{\r
+ if (*ResourceSize < mStmResourceSizeUsed) {\r
+ *ResourceSize = (UINT32)mStmResourceSizeUsed;\r
+ return EFI_BUFFER_TOO_SMALL;\r
+ }\r
+\r
+ CopyMem (ResourceList, mStmResourcesPtr, mStmResourceSizeUsed);\r
+ *ResourceSize = (UINT32)mStmResourceSizeUsed;\r
+ return EFI_SUCCESS;\r
+}\r
+\r
+/**\r
+\r
+ Set valid bit for MSEG MSR.\r
+\r
+ @param Buffer Ap function buffer. (not used)\r
+\r
+**/\r
+VOID\r
+EFIAPI\r
+EnableMsegMsr (\r
+ IN VOID *Buffer\r
+ )\r
+{\r
+ MSR_IA32_SMM_MONITOR_CTL_REGISTER SmmMonitorCtl;\r
+\r
+ SmmMonitorCtl.Uint64 = AsmReadMsr64 (MSR_IA32_SMM_MONITOR_CTL);\r
+ SmmMonitorCtl.Bits.Valid = 1;\r
+ AsmWriteMsr64 (MSR_IA32_SMM_MONITOR_CTL, SmmMonitorCtl.Uint64);\r
+}\r
+\r
+/**\r
+\r
+ Get 4K page aligned VMCS size.\r
+\r
+ @return 4K page aligned VMCS size\r
+\r
+**/\r
+UINT32\r
+GetVmcsSize (\r
+ VOID\r
+ )\r
+{\r
+ MSR_IA32_VMX_BASIC_REGISTER VmxBasic;\r
+\r
+ //\r
+ // Read VMCS size and and align to 4KB\r
+ //\r
+ VmxBasic.Uint64 = AsmReadMsr64 (MSR_IA32_VMX_BASIC);\r
+ return ALIGN_VALUE (VmxBasic.Bits.VmcsSize, SIZE_4KB);\r
+}\r
+\r
+/**\r
+\r
+ Check STM image size.\r
+\r
+ @param StmImage STM image\r
+ @param StmImageSize STM image size\r
+\r
+ @retval TRUE check pass\r
+ @retval FALSE check fail\r
+**/\r
+BOOLEAN\r
+StmCheckStmImage (\r
+ IN EFI_PHYSICAL_ADDRESS StmImage,\r
+ IN UINTN StmImageSize\r
+ )\r
+{\r
+ UINTN MinMsegSize;\r
+ STM_HEADER *StmHeader;\r
+ IA32_VMX_MISC_REGISTER VmxMiscMsr;\r
+\r
+ //\r
+ // Check to see if STM image is compatible with CPU\r
+ //\r
+ StmHeader = (STM_HEADER *)(UINTN)StmImage;\r
+ VmxMiscMsr.Uint64 = AsmReadMsr64 (MSR_IA32_VMX_MISC);\r
+ if (StmHeader->HwStmHdr.MsegHeaderRevision != VmxMiscMsr.Bits.MsegRevisionIdentifier) {\r
+ DEBUG ((DEBUG_ERROR, "STM Image not compatible with CPU\n"));\r
+ DEBUG ((DEBUG_ERROR, " StmHeader->HwStmHdr.MsegHeaderRevision = %08x\n", StmHeader->HwStmHdr.MsegHeaderRevision));\r
+ DEBUG ((DEBUG_ERROR, " VmxMiscMsr.Bits.MsegRevisionIdentifier = %08x\n", VmxMiscMsr.Bits.MsegRevisionIdentifier));\r
+ return FALSE;\r
+ }\r
+\r
+ //\r
+ // Get Minimal required Mseg size\r
+ //\r
+ MinMsegSize = (EFI_PAGES_TO_SIZE (EFI_SIZE_TO_PAGES (StmHeader->SwStmHdr.StaticImageSize)) +\r
+ StmHeader->SwStmHdr.AdditionalDynamicMemorySize +\r
+ (StmHeader->SwStmHdr.PerProcDynamicMemorySize + GetVmcsSize () * 2) * gSmst->NumberOfCpus);\r
+ if (MinMsegSize < StmImageSize) {\r
+ MinMsegSize = StmImageSize;\r
+ }\r
+\r
+ if (StmHeader->HwStmHdr.Cr3Offset >= StmHeader->SwStmHdr.StaticImageSize) {\r
+ //\r
+ // We will create page table, just in case that SINIT does not create it.\r
+ //\r
+ if (MinMsegSize < StmHeader->HwStmHdr.Cr3Offset + EFI_PAGES_TO_SIZE(6)) {\r
+ MinMsegSize = StmHeader->HwStmHdr.Cr3Offset + EFI_PAGES_TO_SIZE(6);\r
+ }\r
+ }\r
+\r
+ //\r
+ // Check if it exceeds MSEG size\r
+ //\r
+ if (MinMsegSize > mMsegSize) {\r
+ DEBUG ((DEBUG_ERROR, "MSEG too small. Min MSEG Size = %08x Current MSEG Size = %08x\n", MinMsegSize, mMsegSize));\r
+ DEBUG ((DEBUG_ERROR, " StmHeader->SwStmHdr.StaticImageSize = %08x\n", StmHeader->SwStmHdr.StaticImageSize));\r
+ DEBUG ((DEBUG_ERROR, " StmHeader->SwStmHdr.AdditionalDynamicMemorySize = %08x\n", StmHeader->SwStmHdr.AdditionalDynamicMemorySize));\r
+ DEBUG ((DEBUG_ERROR, " StmHeader->SwStmHdr.PerProcDynamicMemorySize = %08x\n", StmHeader->SwStmHdr.PerProcDynamicMemorySize));\r
+ DEBUG ((DEBUG_ERROR, " VMCS Size = %08x\n", GetVmcsSize ()));\r
+ DEBUG ((DEBUG_ERROR, " Max CPUs = %08x\n", gSmst->NumberOfCpus));\r
+ DEBUG ((DEBUG_ERROR, " StmHeader->HwStmHdr.Cr3Offset = %08x\n", StmHeader->HwStmHdr.Cr3Offset));\r
+ return FALSE;\r
+ }\r
+\r
+ return TRUE;\r
+}\r
+\r
+/**\r
+\r
+ Load STM image to MSEG.\r
+\r
+ @param StmImage STM image\r
+ @param StmImageSize STM image size\r
+\r
+**/\r
+VOID\r
+StmLoadStmImage (\r
+ IN EFI_PHYSICAL_ADDRESS StmImage,\r
+ IN UINTN StmImageSize\r
+ )\r
+{\r
+ MSR_IA32_SMM_MONITOR_CTL_REGISTER SmmMonitorCtl;\r
+ UINT32 MsegBase;\r
+ STM_HEADER *StmHeader;\r
+\r
+ //\r
+ // Get MSEG base address from MSR_IA32_SMM_MONITOR_CTL\r
+ //\r
+ SmmMonitorCtl.Uint64 = AsmReadMsr64 (MSR_IA32_SMM_MONITOR_CTL);\r
+ MsegBase = SmmMonitorCtl.Bits.MsegBase << 12;\r
+\r
+ //\r
+ // Zero all of MSEG base address\r
+ //\r
+ ZeroMem ((VOID *)(UINTN)MsegBase, mMsegSize);\r
+\r
+ //\r
+ // Copy STM Image into MSEG\r
+ //\r
+ CopyMem ((VOID *)(UINTN)MsegBase, (VOID *)(UINTN)StmImage, StmImageSize);\r
+\r
+ //\r
+ // STM Header is at the beginning of the STM Image\r
+ //\r
+ StmHeader = (STM_HEADER *)(UINTN)StmImage;\r
+\r
+ StmGen4GPageTable ((UINTN)MsegBase + StmHeader->HwStmHdr.Cr3Offset);\r
+}\r
+\r
+/**\r
+\r
+ Load STM image to MSEG.\r
+\r
+ @param StmImage STM image\r
+ @param StmImageSize STM image size\r
+\r
+ @retval EFI_SUCCESS Load STM to MSEG successfully\r
+ @retval EFI_ALREADY_STARTED STM image is already loaded to MSEG\r
+ @retval EFI_BUFFER_TOO_SMALL MSEG is smaller than minimal requirement of STM image\r
+ @retval EFI_UNSUPPORTED MSEG is not enabled\r
+\r
+**/\r
+EFI_STATUS\r
+EFIAPI\r
+LoadMonitor (\r
+ IN EFI_PHYSICAL_ADDRESS StmImage,\r
+ IN UINTN StmImageSize\r
+ )\r
+{\r
+ MSR_IA32_SMM_MONITOR_CTL_REGISTER SmmMonitorCtl;\r
+\r
+ if (mLockLoadMonitor) {\r
+ return EFI_ACCESS_DENIED;\r
+ }\r
+\r
+ SmmMonitorCtl.Uint64 = AsmReadMsr64 (MSR_IA32_SMM_MONITOR_CTL);\r
+ if (SmmMonitorCtl.Bits.MsegBase == 0) {\r
+ return EFI_UNSUPPORTED;\r
+ }\r
+\r
+ if (!StmCheckStmImage (StmImage, StmImageSize)) {\r
+ return EFI_BUFFER_TOO_SMALL;\r
+ }\r
+\r
+ // Record STM_HASH to PCR 0, just in case it is NOT TXT launch, we still need provide the evidence.\r
+ TpmMeasureAndLogData(\r
+ 0, // PcrIndex\r
+ TXT_EVTYPE_STM_HASH, // EventType\r
+ NULL, // EventLog\r
+ 0, // LogLen\r
+ (VOID *)(UINTN)StmImage, // HashData\r
+ StmImageSize // HashDataLen\r
+ );\r
+\r
+ StmLoadStmImage (StmImage, StmImageSize);\r
+\r
+ mStmState |= EFI_SM_MONITOR_STATE_ENABLED;\r
+\r
+ return EFI_SUCCESS;\r
+}\r
+\r
+/**\r
+ This function return BIOS STM resource.\r
+ Produced by SmmStm.\r
+ Comsumed by SmmMpService when Init.\r
+\r
+ @return BIOS STM resource\r
+\r
+**/\r
+VOID *\r
+GetStmResource(\r
+ VOID\r
+ )\r
+{\r
+ return mStmResourcesPtr;\r
+}\r
+\r
+/**\r
+ This function notify STM resource change.\r
+\r
+ @param StmResource BIOS STM resource\r
+\r
+**/\r
+VOID\r
+NotifyStmResourceChange (\r
+ VOID *StmResource\r
+ )\r
+{\r
+ UINTN Index;\r
+ TXT_PROCESSOR_SMM_DESCRIPTOR *Psd;\r
+\r
+ for (Index = 0; Index < gSmst->NumberOfCpus; Index++) {\r
+ Psd = (TXT_PROCESSOR_SMM_DESCRIPTOR *)((UINTN)gSmst->CpuSaveState[Index] - SMRAM_SAVE_STATE_MAP_OFFSET + TXT_SMM_PSD_OFFSET);\r
+ Psd->BiosHwResourceRequirementsPtr = (UINT64)(UINTN)StmResource;\r
+ }\r
+ return ;\r
+}\r
+\r
+\r
+/**\r
+ This is STM setup BIOS callback.\r
+**/\r
+VOID\r
+EFIAPI\r
+SmmStmSetup (\r
+ VOID\r
+ )\r
+{\r
+ mStmState |= EFI_SM_MONITOR_STATE_ACTIVATED;\r
+}\r
+\r
+/**\r
+ This is STM teardown BIOS callback.\r
+**/\r
+VOID\r
+EFIAPI\r
+SmmStmTeardown (\r
+ VOID\r
+ )\r
+{\r
+ mStmState &= ~EFI_SM_MONITOR_STATE_ACTIVATED;\r
+}\r
+\r
--- /dev/null
+/** @file\r
+ SMM STM support\r
+\r
+ Copyright (c) 2015 - 2016, Intel Corporation. All rights reserved.<BR>\r
+ This program and the accompanying materials\r
+ are licensed and made available under the terms and conditions of the BSD License\r
+ which accompanies this distribution. The full text of the license may be found at\r
+ http://opensource.org/licenses/bsd-license.php.\r
+\r
+ THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS,\r
+ WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED.\r
+\r
+**/\r
+\r
+#ifndef _SMM_STM_H_\r
+#define _SMM_STM_H_\r
+\r
+#include <Protocol/SmMonitorInit.h>\r
+\r
+/**\r
+\r
+ Create 4G page table for STM.\r
+ 2M PAE page table in X64 version.\r
+\r
+ @param PageTableBase The page table base in MSEG\r
+\r
+**/\r
+VOID\r
+StmGen4GPageTable (\r
+ IN UINTN PageTableBase\r
+ );\r
+\r
+/**\r
+ This is SMM exception handle.\r
+ Consumed by STM when exception happen.\r
+\r
+ @param Context STM protection exception stack frame\r
+\r
+ @return the EBX value for STM reference.\r
+ EBX = 0: resume SMM guest using register state found on exception stack.\r
+ EBX = 1 to 0x0F: EBX contains a BIOS error code which the STM must record in the\r
+ TXT.ERRORCODE register and subsequently reset the system via\r
+ TXT.CMD.SYS_RESET. The value of the TXT.ERRORCODE register is calculated as\r
+ follows: TXT.ERRORCODE = (EBX & 0x0F) | STM_CRASH_BIOS_PANIC\r
+ EBX = 0x10 to 0xFFFFFFFF - reserved, do not use.\r
+\r
+**/\r
+UINT32\r
+EFIAPI\r
+SmmStmExceptionHandler (\r
+ IN OUT STM_PROTECTION_EXCEPTION_STACK_FRAME Context\r
+ );\r
+\r
+\r
+/**\r
+\r
+ Get STM state.\r
+\r
+ @return STM state\r
+\r
+**/\r
+EFI_SM_MONITOR_STATE\r
+EFIAPI\r
+GetMonitorState (\r
+ VOID\r
+ );\r
+\r
+/**\r
+\r
+ Load STM image to MSEG.\r
+\r
+ @param StmImage STM image\r
+ @param StmImageSize STM image size\r
+\r
+ @retval EFI_SUCCESS Load STM to MSEG successfully\r
+ @retval EFI_BUFFER_TOO_SMALL MSEG is smaller than minimal requirement of STM image\r
+\r
+**/\r
+EFI_STATUS\r
+EFIAPI\r
+LoadMonitor (\r
+ IN EFI_PHYSICAL_ADDRESS StmImage,\r
+ IN UINTN StmImageSize\r
+ );\r
+\r
+/**\r
+\r
+ Add resources in list to database. Allocate new memory areas as needed.\r
+\r
+ @param ResourceList A pointer to resource list to be added\r
+ @param NumEntries Optional number of entries.\r
+ If 0, list must be terminated by END_OF_RESOURCES.\r
+\r
+ @retval EFI_SUCCESS If resources are added\r
+ @retval EFI_INVALID_PARAMETER If nested procedure detected resource failer\r
+ @retval EFI_OUT_OF_RESOURCES If nested procedure returned it and we cannot allocate more areas.\r
+\r
+**/\r
+EFI_STATUS\r
+EFIAPI\r
+AddPiResource (\r
+ IN STM_RSC *ResourceList,\r
+ IN UINT32 NumEntries OPTIONAL\r
+ );\r
+\r
+/**\r
+\r
+ Delete resources in list to database.\r
+\r
+ @param ResourceList A pointer to resource list to be deleted\r
+ NULL means delete all resources.\r
+ @param NumEntries Optional number of entries.\r
+ If 0, list must be terminated by END_OF_RESOURCES.\r
+\r
+ @retval EFI_SUCCESS If resources are deleted\r
+ @retval EFI_INVALID_PARAMETER If nested procedure detected resource failer\r
+\r
+**/\r
+EFI_STATUS\r
+EFIAPI\r
+DeletePiResource (\r
+ IN STM_RSC *ResourceList,\r
+ IN UINT32 NumEntries OPTIONAL\r
+ );\r
+\r
+/**\r
+\r
+ Get BIOS resources.\r
+\r
+ @param ResourceList A pointer to resource list to be filled\r
+ @param ResourceSize On input it means size of resource list input.\r
+ On output it means size of resource list filled,\r
+ or the size of resource list to be filled if size of too small.\r
+\r
+ @retval EFI_SUCCESS If resources are returned.\r
+ @retval EFI_BUFFER_TOO_SMALL If resource list buffer is too small to hold the whole resources.\r
+\r
+**/\r
+EFI_STATUS\r
+EFIAPI\r
+GetPiResource (\r
+ OUT STM_RSC *ResourceList,\r
+ IN OUT UINT32 *ResourceSize\r
+ );\r
+\r
+/**\r
+ This functin initialize STM configuration table.\r
+**/\r
+VOID\r
+StmSmmConfigurationTableInit (\r
+ VOID\r
+ );\r
+\r
+/**\r
+ This function notify STM resource change.\r
+\r
+ @param StmResource BIOS STM resource\r
+\r
+**/\r
+VOID\r
+NotifyStmResourceChange (\r
+ IN VOID *StmResource\r
+ );\r
+\r
+/**\r
+ This function return BIOS STM resource.\r
+\r
+ @return BIOS STM resource\r
+\r
+**/\r
+VOID *\r
+GetStmResource (\r
+ VOID\r
+ );\r
+\r
+#endif\r
--- /dev/null
+#------------------------------------------------------------------------------\r
+#\r
+# Copyright (c) 2009 - 2016, Intel Corporation. All rights reserved.<BR>\r
+# This program and the accompanying materials\r
+# are licensed and made available under the terms and conditions of the BSD License\r
+# which accompanies this distribution. The full text of the license may be found at\r
+# http://opensource.org/licenses/bsd-license.php.\r
+#\r
+# THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS,\r
+# WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED.\r
+#\r
+# Module Name:\r
+#\r
+# SmiEntry.S\r
+#\r
+# Abstract:\r
+#\r
+# Code template of the SMI handler for a particular processor\r
+#\r
+#------------------------------------------------------------------------------\r
+\r
+ASM_GLOBAL ASM_PFX(gcStmSmiHandlerTemplate)\r
+ASM_GLOBAL ASM_PFX(gcStmSmiHandlerSize)\r
+ASM_GLOBAL ASM_PFX(gcStmSmiHandlerOffset)\r
+ASM_GLOBAL ASM_PFX(gStmSmiCr3)\r
+ASM_GLOBAL ASM_PFX(gStmSmiStack)\r
+ASM_GLOBAL ASM_PFX(gStmSmbase)\r
+ASM_GLOBAL ASM_PFX(gStmXdSupported)\r
+ASM_GLOBAL ASM_PFX(gStmSmiHandlerIdtr)\r
+\r
+.equ MSR_IA32_MISC_ENABLE, 0x1A0\r
+.equ MSR_EFER, 0xc0000080\r
+.equ MSR_EFER_XD, 0x800\r
+\r
+#\r
+# Constants relating to TXT_PROCESSOR_SMM_DESCRIPTOR\r
+#\r
+.equ DSC_OFFSET, 0xfb00\r
+.equ DSC_GDTPTR, 0x48\r
+.equ DSC_GDTSIZ, 0x50\r
+.equ DSC_CS, 0x14\r
+.equ DSC_DS, 0x16\r
+.equ DSC_SS, 0x18\r
+.equ DSC_OTHERSEG, 0x1a\r
+#\r
+# Constants relating to CPU State Save Area\r
+#\r
+.equ SSM_DR6, 0xffd0\r
+.equ SSM_DR7, 0xffc8\r
+\r
+.equ PROTECT_MODE_CS, 0x08\r
+.equ PROTECT_MODE_DS, 0x20\r
+.equ LONG_MODE_CS, 0x38\r
+.equ TSS_SEGMENT, 0x40\r
+.equ GDT_SIZE, 0x50\r
+\r
+ .text\r
+\r
+ASM_PFX(gcStmSmiHandlerTemplate):\r
+\r
+_StmSmiEntryPoint:\r
+ #\r
+ # The encoding of BX in 16-bit addressing mode is the same as of RDI in 64-\r
+ # bit addressing mode. And that coincidence has been used in the following\r
+ # "64-bit like" 16-bit code. Be aware that once RDI is referenced as a\r
+ # base address register, it is actually BX that is referenced.\r
+ #\r
+ .byte 0xbb # mov bx, imm16\r
+ .word _StmGdtDesc - _StmSmiEntryPoint + 0x8000\r
+ #\r
+ # fix GDT descriptor\r
+ #\r
+ .byte 0x2e,0xa1 # mov ax, cs:[offset16]\r
+ .word DSC_OFFSET + DSC_GDTSIZ\r
+ .byte 0x48 # dec ax\r
+ .byte 0x2e\r
+ movl %eax, (%rdi) # mov cs:[bx], ax\r
+ .byte 0x66,0x2e,0xa1 # mov eax, cs:[offset16]\r
+ .word DSC_OFFSET + DSC_GDTPTR\r
+ .byte 0x2e\r
+ movw %ax, 2(%rdi)\r
+ .byte 0x66,0x2e\r
+ lgdt (%rdi)\r
+ #\r
+ # Patch ProtectedMode Segment\r
+ #\r
+ .byte 0xb8\r
+ .word PROTECT_MODE_CS\r
+ .byte 0x2e\r
+ movl %eax, -2(%rdi)\r
+ #\r
+ # Patch ProtectedMode entry\r
+ #\r
+ .byte 0x66, 0xbf # mov edi, SMBASE\r
+ASM_PFX(gStmSmbase): .space 4\r
+ lea ((ProtectedMode - _StmSmiEntryPoint) + 0x8000)(%edi), %ax\r
+ .byte 0x2e\r
+ movw %ax, -6(%rdi)\r
+ #\r
+ # Switch into ProtectedMode\r
+ #\r
+ movq %cr0, %rbx\r
+ .byte 0x66\r
+ andl $0x9ffafff3, %ebx\r
+ .byte 0x66\r
+ orl $0x00000023, %ebx\r
+\r
+ movq %rbx, %cr0\r
+ .byte 0x66, 0xea\r
+ .space 6\r
+\r
+_StmGdtDesc: .space 6\r
+\r
+ProtectedMode:\r
+ movw $PROTECT_MODE_DS, %ax\r
+ movl %eax, %ds\r
+ movl %eax, %es\r
+ movl %eax, %fs\r
+ movl %eax, %gs\r
+ movl %eax, %ss\r
+ .byte 0xbc # mov esp, imm32\r
+ASM_PFX(gStmSmiStack): .space 4\r
+ jmp ProtFlatMode\r
+\r
+ProtFlatMode:\r
+ .byte 0xb8\r
+ASM_PFX(gStmSmiCr3): .space 4\r
+ movq %rax, %cr3\r
+ movl $0x668,%eax # as cr4.PGE is not set here, refresh cr3\r
+ movq %rax, %cr4 # in PreModifyMtrrs() to flush TLB.\r
+# Load TSS\r
+ subl $8, %esp # reserve room in stack\r
+ sgdt (%rsp)\r
+ movl 2(%rsp), %eax # eax = GDT base\r
+ addl $8, %esp\r
+ movb $0x89, %dl\r
+ movb %dl, (TSS_SEGMENT + 5)(%rax) # clear busy flag\r
+ movl $TSS_SEGMENT, %eax\r
+ ltr %ax\r
+\r
+# enable NXE if supported\r
+ .byte 0xb0 # mov al, imm8\r
+ASM_PFX(gStmXdSupported): .byte 1\r
+ cmpb $0, %al\r
+ jz SkipXd\r
+#\r
+# Check XD disable bit\r
+#\r
+ movl $MSR_IA32_MISC_ENABLE, %ecx\r
+ rdmsr\r
+ subl $4, %esp\r
+ pushq %rdx # save MSR_IA32_MISC_ENABLE[63-32]\r
+ testl $BIT2, %edx # MSR_IA32_MISC_ENABLE[34]\r
+ jz L13\r
+ andw $0x0FFFB, %dx # clear XD Disable bit if it is set\r
+ wrmsr\r
+L13:\r
+ movl $MSR_EFER, %ecx\r
+ rdmsr\r
+ orw $MSR_EFER_XD,%ax # enable NXE\r
+ wrmsr\r
+ jmp XdDone\r
+SkipXd:\r
+ subl $8, %esp\r
+XdDone:\r
+\r
+ #\r
+ # Switch to LongMode\r
+ #\r
+ pushq $LONG_MODE_CS # push cs hardcore here\r
+ call Base # push return address for retf later\r
+Base:\r
+ addl $(LongMode - Base), (%rsp) # offset for far retf, seg is the 1st arg\r
+\r
+ movl $MSR_EFER, %ecx\r
+ rdmsr\r
+ orb $1,%ah # enable LME\r
+ wrmsr\r
+ movq %cr0, %rbx\r
+ orl $0x080010023, %ebx # enable paging + WP + NE + MP + PE\r
+ movq %rbx, %cr0\r
+ retf\r
+LongMode: # long mode (64-bit code) starts here\r
+ movabsq $ASM_PFX(gStmSmiHandlerIdtr), %rax\r
+ lidt (%rax)\r
+ lea (DSC_OFFSET)(%rdi), %ebx\r
+ movw DSC_DS(%rbx), %ax\r
+ movl %eax,%ds\r
+ movw DSC_OTHERSEG(%rbx), %ax\r
+ movl %eax,%es\r
+ movl %eax,%fs\r
+ movl %eax,%gs\r
+ movw DSC_SS(%rbx), %ax\r
+ movl %eax,%ss\r
+\r
+CommonHandler:\r
+ movq 8(%rsp), %rbx\r
+ # Save FP registers\r
+\r
+ subq $0x200, %rsp\r
+ .byte 0x48 # FXSAVE64\r
+ fxsave (%rsp)\r
+\r
+ addq $-0x20, %rsp\r
+\r
+ movq %rbx, %rcx\r
+ movabsq $ASM_PFX(CpuSmmDebugEntry), %rax\r
+ call *%rax\r
+\r
+ movq %rbx, %rcx\r
+ movabsq $ASM_PFX(SmiRendezvous), %rax\r
+ call *%rax\r
+\r
+ movq %rbx, %rcx\r
+ movabsq $ASM_PFX(CpuSmmDebugExit), %rax\r
+ call *%rax\r
+\r
+ addq $0x20, %rsp\r
+\r
+ #\r
+ # Restore FP registers\r
+ #\r
+ .byte 0x48 # FXRSTOR64\r
+ fxrstor (%rsp)\r
+\r
+ addq $0x200, %rsp\r
+\r
+ movabsq $ASM_PFX(gStmXdSupported), %rax\r
+ movb (%rax), %al\r
+ cmpb $0, %al\r
+ jz L16\r
+ popq %rdx # get saved MSR_IA32_MISC_ENABLE[63-32]\r
+ testl $BIT2, %edx\r
+ jz L16\r
+ movl $MSR_IA32_MISC_ENABLE, %ecx\r
+ rdmsr\r
+ orw $BIT2, %dx # set XD Disable bit if it was set before entering into SMM\r
+ wrmsr\r
+\r
+L16:\r
+ rsm\r
+\r
+_StmSmiHandler:\r
+#\r
+# Check XD disable bit\r
+#\r
+ xorq %r8, %r8\r
+ movabsq $ASM_PFX(gStmXdSupported), %rax\r
+ movb (%rax), %al\r
+ cmpb $0, %al\r
+ jz StmXdDone\r
+ movl $MSR_IA32_MISC_ENABLE, %ecx\r
+ rdmsr\r
+ movq %rdx, %r8 # save MSR_IA32_MISC_ENABLE[63-32]\r
+ testl $BIT2, %edx # MSR_IA32_MISC_ENABLE[34]\r
+ jz L14\r
+ andw $0x0FFFB, %dx # clear XD Disable bit if it is set\r
+ wrmsr\r
+L14:\r
+ movl $MSR_EFER, %ecx\r
+ rdmsr\r
+ orw $MSR_EFER_XD,%ax # enable NXE\r
+ wrmsr\r
+StmXdDone:\r
+ pushq %r8\r
+\r
+ # below step is needed, because STM does not run above code.\r
+ # we have to run below code to set IDT/CR0/CR4\r
+ movabsq $ASM_PFX(gStmSmiHandlerIdtr), %rax\r
+ lidt (%rax)\r
+\r
+ movq %cr0, %rax\r
+ orl $0x80010023, %eax\r
+ movq %rax, %cr0\r
+ movq %cr4, %rax\r
+ movl $0x668, %eax # as cr4.PGE is not set here, refresh cr3\r
+ movq %rax, %cr4 # in PreModifyMtrrs() to flush TLB.\r
+ # STM init finish\r
+ jmp CommonHandler\r
+\r
+ASM_PFX(gcStmSmiHandlerSize) : .word . - _StmSmiEntryPoint\r
+ASM_PFX(gcStmSmiHandlerOffset): .word _StmSmiHandler - _StmSmiEntryPoint\r
--- /dev/null
+;------------------------------------------------------------------------------ ;\r
+; Copyright (c) 2009 - 2016, Intel Corporation. All rights reserved.<BR>\r
+; This program and the accompanying materials\r
+; are licensed and made available under the terms and conditions of the BSD License\r
+; which accompanies this distribution. The full text of the license may be found at\r
+; http://opensource.org/licenses/bsd-license.php.\r
+;\r
+; THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS,\r
+; WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED.\r
+;\r
+; Module Name:\r
+;\r
+; SmiEntry.asm\r
+;\r
+; Abstract:\r
+;\r
+; Code template of the SMI handler for a particular processor\r
+;\r
+;-------------------------------------------------------------------------------\r
+\r
+;\r
+; Variables referenced by C code\r
+;\r
+EXTERNDEF SmiRendezvous:PROC\r
+EXTERNDEF CpuSmmDebugEntry:PROC\r
+EXTERNDEF CpuSmmDebugExit:PROC\r
+EXTERNDEF gcStmSmiHandlerTemplate:BYTE\r
+EXTERNDEF gcStmSmiHandlerSize:WORD\r
+EXTERNDEF gcStmSmiHandlerOffset:WORD\r
+EXTERNDEF gStmSmiCr3:DWORD\r
+EXTERNDEF gStmSmiStack:DWORD\r
+EXTERNDEF gStmSmbase:DWORD\r
+EXTERNDEF gStmXdSupported:BYTE\r
+EXTERNDEF gStmSmiHandlerIdtr:FWORD\r
+\r
+MSR_IA32_MISC_ENABLE EQU 1A0h\r
+MSR_EFER EQU 0c0000080h\r
+MSR_EFER_XD EQU 0800h\r
+\r
+;\r
+; Constants relating to TXT_PROCESSOR_SMM_DESCRIPTOR\r
+;\r
+DSC_OFFSET EQU 0fb00h\r
+DSC_GDTPTR EQU 48h\r
+DSC_GDTSIZ EQU 50h\r
+DSC_CS EQU 14h\r
+DSC_DS EQU 16h\r
+DSC_SS EQU 18h\r
+DSC_OTHERSEG EQU 1ah\r
+;\r
+; Constants relating to CPU State Save Area\r
+;\r
+SSM_DR6 EQU 0ffd0h\r
+SSM_DR7 EQU 0ffc8h\r
+\r
+PROTECT_MODE_CS EQU 08h\r
+PROTECT_MODE_DS EQU 20h\r
+LONG_MODE_CS EQU 38h\r
+TSS_SEGMENT EQU 40h\r
+GDT_SIZE EQU 50h\r
+\r
+ .code\r
+\r
+gcStmSmiHandlerTemplate LABEL BYTE\r
+\r
+_StmSmiEntryPoint:\r
+ ;\r
+ ; The encoding of BX in 16-bit addressing mode is the same as of RDI in 64-\r
+ ; bit addressing mode. And that coincidence has been used in the following\r
+ ; "64-bit like" 16-bit code. Be aware that once RDI is referenced as a\r
+ ; base address register, it is actually BX that is referenced.\r
+ ;\r
+ DB 0bbh ; mov bx, imm16\r
+ DW offset _StmGdtDesc - _StmSmiEntryPoint + 8000h ; bx = GdtDesc offset\r
+; fix GDT descriptor\r
+ DB 2eh, 0a1h ; mov ax, cs:[offset16]\r
+ DW DSC_OFFSET + DSC_GDTSIZ\r
+ DB 48h ; dec ax\r
+ DB 2eh\r
+ mov [rdi], eax ; mov cs:[bx], ax\r
+ DB 66h, 2eh, 0a1h ; mov eax, cs:[offset16]\r
+ DW DSC_OFFSET + DSC_GDTPTR\r
+ DB 2eh\r
+ mov [rdi + 2], ax ; mov cs:[bx + 2], eax\r
+ DB 66h, 2eh\r
+ lgdt fword ptr [rdi] ; lgdt fword ptr cs:[bx]\r
+; Patch ProtectedMode Segment\r
+ DB 0b8h ; mov ax, imm16\r
+ DW PROTECT_MODE_CS ; set AX for segment directly\r
+ DB 2eh\r
+ mov [rdi - 2], eax ; mov cs:[bx - 2], ax\r
+; Patch ProtectedMode entry\r
+ DB 66h, 0bfh ; mov edi, SMBASE\r
+gStmSmbase DD ?\r
+ lea ax, [edi + (@ProtectedMode - _StmSmiEntryPoint) + 8000h]\r
+ DB 2eh\r
+ mov [rdi - 6], ax ; mov cs:[bx - 6], eax\r
+; Switch into @ProtectedMode\r
+ mov rbx, cr0\r
+ DB 66h\r
+ and ebx, 9ffafff3h\r
+ DB 66h\r
+ or ebx, 00000023h\r
+\r
+ mov cr0, rbx\r
+ DB 66h, 0eah\r
+ DD ?\r
+ DW ?\r
+\r
+_StmGdtDesc FWORD ?\r
+@ProtectedMode:\r
+ mov ax, PROTECT_MODE_DS\r
+ mov ds, ax\r
+ mov es, ax\r
+ mov fs, ax\r
+ mov gs, ax\r
+ mov ss, ax\r
+ DB 0bch ; mov esp, imm32\r
+gStmSmiStack DD ?\r
+ jmp ProtFlatMode\r
+\r
+ProtFlatMode:\r
+ DB 0b8h ; mov eax, offset gStmSmiCr3\r
+gStmSmiCr3 DD ?\r
+ mov cr3, rax\r
+ mov eax, 668h ; as cr4.PGE is not set here, refresh cr3\r
+ mov cr4, rax ; in PreModifyMtrrs() to flush TLB.\r
+; Load TSS\r
+ sub esp, 8 ; reserve room in stack\r
+ sgdt fword ptr [rsp]\r
+ mov eax, [rsp + 2] ; eax = GDT base\r
+ add esp, 8\r
+ mov dl, 89h\r
+ mov [rax + TSS_SEGMENT + 5], dl ; clear busy flag\r
+ mov eax, TSS_SEGMENT\r
+ ltr ax\r
+\r
+; enable NXE if supported\r
+ DB 0b0h ; mov al, imm8\r
+gStmXdSupported DB 1\r
+ cmp al, 0\r
+ jz @SkipXd\r
+;\r
+; Check XD disable bit\r
+;\r
+ mov ecx, MSR_IA32_MISC_ENABLE\r
+ rdmsr\r
+ sub esp, 4\r
+ push rdx ; save MSR_IA32_MISC_ENABLE[63-32]\r
+ test edx, BIT2 ; MSR_IA32_MISC_ENABLE[34]\r
+ jz @f\r
+ and dx, 0FFFBh ; clear XD Disable bit if it is set\r
+ wrmsr\r
+@@:\r
+ mov ecx, MSR_EFER\r
+ rdmsr\r
+ or ax, MSR_EFER_XD ; enable NXE\r
+ wrmsr\r
+ jmp @XdDone\r
+@SkipXd:\r
+ sub esp, 8\r
+@XdDone:\r
+\r
+; Switch into @LongMode\r
+ push LONG_MODE_CS ; push cs hardcore here\r
+ call Base ; push return address for retf later\r
+Base:\r
+ add dword ptr [rsp], @LongMode - Base; offset for far retf, seg is the 1st arg\r
+\r
+ mov ecx, MSR_EFER\r
+ rdmsr\r
+ or ah, 1 ; enable LME\r
+ wrmsr\r
+ mov rbx, cr0\r
+ or ebx, 080010023h ; enable paging + WP + NE + MP + PE\r
+ mov cr0, rbx\r
+ retf\r
+@LongMode: ; long mode (64-bit code) starts here\r
+ mov rax, offset gStmSmiHandlerIdtr\r
+ lidt fword ptr [rax]\r
+ lea ebx, [rdi + DSC_OFFSET]\r
+ mov ax, [rbx + DSC_DS]\r
+ mov ds, eax\r
+ mov ax, [rbx + DSC_OTHERSEG]\r
+ mov es, eax\r
+ mov fs, eax\r
+ mov gs, eax\r
+ mov ax, [rbx + DSC_SS]\r
+ mov ss, eax\r
+\r
+CommonHandler:\r
+ mov rbx, [rsp + 0x08] ; rbx <- CpuIndex\r
+\r
+ ;\r
+ ; Save FP registers\r
+ ;\r
+ sub rsp, 200h\r
+ DB 48h ; FXSAVE64\r
+ fxsave [rsp]\r
+\r
+ add rsp, -20h\r
+\r
+ mov rcx, rbx\r
+ mov rax, CpuSmmDebugEntry\r
+ call rax\r
+\r
+ mov rcx, rbx\r
+ mov rax, SmiRendezvous ; rax <- absolute addr of SmiRedezvous\r
+ call rax\r
+\r
+ mov rcx, rbx\r
+ mov rax, CpuSmmDebugExit\r
+ call rax\r
+\r
+ add rsp, 20h\r
+\r
+ ;\r
+ ; Restore FP registers\r
+ ;\r
+ DB 48h ; FXRSTOR64\r
+ fxrstor [rsp]\r
+\r
+ add rsp, 200h\r
+\r
+ mov rax, offset ASM_PFX(gStmXdSupported)\r
+ mov al, [rax]\r
+ cmp al, 0\r
+ jz @f\r
+ pop rdx ; get saved MSR_IA32_MISC_ENABLE[63-32]\r
+ test edx, BIT2\r
+ jz @f\r
+ mov ecx, MSR_IA32_MISC_ENABLE\r
+ rdmsr\r
+ or dx, BIT2 ; set XD Disable bit if it was set before entering into SMM\r
+ wrmsr\r
+\r
+@@:\r
+ rsm\r
+\r
+_StmSmiHandler:\r
+;\r
+; Check XD disable bit\r
+;\r
+ xor r8, r8\r
+ mov rax, offset ASM_PFX(gStmXdSupported)\r
+ mov al, [rax]\r
+ cmp al, 0\r
+ jz @StmXdDone\r
+ mov ecx, MSR_IA32_MISC_ENABLE\r
+ rdmsr\r
+ mov r8, rdx ; save MSR_IA32_MISC_ENABLE[63-32]\r
+ test edx, BIT2 ; MSR_IA32_MISC_ENABLE[34]\r
+ jz @f\r
+ and dx, 0FFFBh ; clear XD Disable bit if it is set\r
+ wrmsr\r
+@@:\r
+ mov ecx, MSR_EFER\r
+ rdmsr\r
+ or ax, MSR_EFER_XD ; enable NXE\r
+ wrmsr\r
+@StmXdDone:\r
+ push r8\r
+\r
+ ; below step is needed, because STM does not run above code.\r
+ ; we have to run below code to set IDT/CR0/CR4\r
+ mov rax, offset gStmSmiHandlerIdtr\r
+ lidt fword ptr [rax]\r
+\r
+ mov rax, cr0\r
+ or eax, 80010023h ; enable paging + WP + NE + MP + PE\r
+ mov cr0, rax\r
+ mov rax, cr4\r
+ mov eax, 668h ; as cr4.PGE is not set here, refresh cr3\r
+ mov cr4, rax ; in PreModifyMtrrs() to flush TLB.\r
+ ; STM init finish\r
+ jmp CommonHandler\r
+\r
+gcStmSmiHandlerSize DW $ - _StmSmiEntryPoint\r
+gcStmSmiHandlerOffset DW _StmSmiHandler - _StmSmiEntryPoint\r
+\r
+ END\r
--- /dev/null
+;------------------------------------------------------------------------------ ;\r
+; Copyright (c) 2016, Intel Corporation. All rights reserved.<BR>\r
+; This program and the accompanying materials\r
+; are licensed and made available under the terms and conditions of the BSD License\r
+; which accompanies this distribution. The full text of the license may be found at\r
+; http://opensource.org/licenses/bsd-license.php.\r
+;\r
+; THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS,\r
+; WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED.\r
+;\r
+; Module Name:\r
+;\r
+; SmiEntry.nasm\r
+;\r
+; Abstract:\r
+;\r
+; Code template of the SMI handler for a particular processor\r
+;\r
+;-------------------------------------------------------------------------------\r
+\r
+;\r
+; Variables referrenced by C code\r
+;\r
+\r
+%define MSR_IA32_MISC_ENABLE 0x1A0\r
+%define MSR_EFER 0xc0000080\r
+%define MSR_EFER_XD 0x800\r
+\r
+;\r
+; Constants relating to TXT_PROCESSOR_SMM_DESCRIPTOR\r
+;\r
+%define DSC_OFFSET 0xfb00\r
+%define DSC_GDTPTR 0x48\r
+%define DSC_GDTSIZ 0x50\r
+%define DSC_CS 0x14\r
+%define DSC_DS 0x16\r
+%define DSC_SS 0x18\r
+%define DSC_OTHERSEG 0x1a\r
+;\r
+; Constants relating to CPU State Save Area\r
+;\r
+%define SSM_DR6 0xffd0\r
+%define SSM_DR7 0xffc8\r
+\r
+%define PROTECT_MODE_CS 0x8\r
+%define PROTECT_MODE_DS 0x20\r
+%define LONG_MODE_CS 0x38\r
+%define TSS_SEGMENT 0x40\r
+%define GDT_SIZE 0x50\r
+\r
+extern ASM_PFX(SmiRendezvous)\r
+extern ASM_PFX(gStmSmiHandlerIdtr)\r
+extern ASM_PFX(CpuSmmDebugEntry)\r
+extern ASM_PFX(CpuSmmDebugExit)\r
+\r
+global ASM_PFX(gStmSmbase)\r
+global ASM_PFX(gStmXdSupported)\r
+global ASM_PFX(gStmSmiStack)\r
+global ASM_PFX(gStmSmiCr3)\r
+global ASM_PFX(gcStmSmiHandlerTemplate)\r
+global ASM_PFX(gcStmSmiHandlerSize)\r
+global ASM_PFX(gcStmSmiHandlerOffset)\r
+\r
+ DEFAULT REL\r
+ SECTION .text\r
+\r
+BITS 16\r
+ASM_PFX(gcStmSmiHandlerTemplate):\r
+_StmSmiEntryPoint:\r
+ mov bx, _StmGdtDesc - _StmSmiEntryPoint + 0x8000\r
+ mov ax,[cs:DSC_OFFSET + DSC_GDTSIZ]\r
+ dec ax\r
+ mov [cs:bx], ax\r
+ mov eax, [cs:DSC_OFFSET + DSC_GDTPTR]\r
+ mov [cs:bx + 2], eax\r
+o32 lgdt [cs:bx] ; lgdt fword ptr cs:[bx]\r
+ mov ax, PROTECT_MODE_CS\r
+ mov [cs:bx-0x2],ax\r
+ DB 0x66, 0xbf ; mov edi, SMBASE\r
+ASM_PFX(gStmSmbase): DD 0\r
+ lea eax, [edi + (@ProtectedMode - _StmSmiEntryPoint) + 0x8000]\r
+ mov [cs:bx-0x6],eax\r
+ mov ebx, cr0\r
+ and ebx, 0x9ffafff3\r
+ or ebx, 0x23\r
+ mov cr0, ebx\r
+ jmp dword 0x0:0x0\r
+_StmGdtDesc:\r
+ DW 0\r
+ DD 0\r
+\r
+BITS 32\r
+@ProtectedMode:\r
+ mov ax, PROTECT_MODE_DS\r
+o16 mov ds, ax\r
+o16 mov es, ax\r
+o16 mov fs, ax\r
+o16 mov gs, ax\r
+o16 mov ss, ax\r
+ DB 0xbc ; mov esp, imm32\r
+ASM_PFX(gStmSmiStack): DD 0\r
+ jmp ProtFlatMode\r
+\r
+BITS 64\r
+ProtFlatMode:\r
+ DB 0xb8 ; mov eax, offset gStmSmiCr3\r
+ASM_PFX(gStmSmiCr3): DD 0\r
+ mov cr3, rax\r
+ mov eax, 0x668 ; as cr4.PGE is not set here, refresh cr3\r
+ mov cr4, rax ; in PreModifyMtrrs() to flush TLB.\r
+; Load TSS\r
+ sub esp, 8 ; reserve room in stack\r
+ sgdt [rsp]\r
+ mov eax, [rsp + 2] ; eax = GDT base\r
+ add esp, 8\r
+ mov dl, 0x89\r
+ mov [rax + TSS_SEGMENT + 5], dl ; clear busy flag\r
+ mov eax, TSS_SEGMENT\r
+ ltr ax\r
+\r
+; enable NXE if supported\r
+ DB 0xb0 ; mov al, imm8\r
+ASM_PFX(gStmXdSupported): DB 1\r
+ cmp al, 0\r
+ jz @SkipXd\r
+;\r
+; Check XD disable bit\r
+;\r
+ mov ecx, MSR_IA32_MISC_ENABLE\r
+ rdmsr\r
+ sub esp, 4\r
+ push rdx ; save MSR_IA32_MISC_ENABLE[63-32]\r
+ test edx, BIT2 ; MSR_IA32_MISC_ENABLE[34]\r
+ jz .0\r
+ and dx, 0xFFFB ; clear XD Disable bit if it is set\r
+ wrmsr\r
+.0:\r
+ mov ecx, MSR_EFER\r
+ rdmsr\r
+ or ax, MSR_EFER_XD ; enable NXE\r
+ wrmsr\r
+ jmp @XdDone\r
+@SkipXd:\r
+ sub esp, 8\r
+@XdDone:\r
+\r
+; Switch into @LongMode\r
+ push LONG_MODE_CS ; push cs hardcore here\r
+ call Base ; push return address for retf later\r
+Base:\r
+ add dword [rsp], @LongMode - Base; offset for far retf, seg is the 1st arg\r
+\r
+ mov ecx, MSR_EFER\r
+ rdmsr\r
+ or ah, 1 ; enable LME\r
+ wrmsr\r
+ mov rbx, cr0\r
+ or ebx, 0x80010023 ; enable paging + WP + NE + MP + PE\r
+ mov cr0, rbx\r
+ retf\r
+@LongMode: ; long mode (64-bit code) starts here\r
+ mov rax, ASM_PFX(gStmSmiHandlerIdtr)\r
+ lidt [rax]\r
+ lea ebx, [rdi + DSC_OFFSET]\r
+ mov ax, [rbx + DSC_DS]\r
+ mov ds, eax\r
+ mov ax, [rbx + DSC_OTHERSEG]\r
+ mov es, eax\r
+ mov fs, eax\r
+ mov gs, eax\r
+ mov ax, [rbx + DSC_SS]\r
+ mov ss, eax\r
+\r
+CommonHandler:\r
+ mov rbx, [rsp + 0x08] ; rbx <- CpuIndex\r
+\r
+ ;\r
+ ; Save FP registers\r
+ ;\r
+ sub rsp, 0x200\r
+ DB 0x48 ; FXSAVE64\r
+ fxsave [rsp]\r
+\r
+ add rsp, -0x20\r
+\r
+ mov rcx, rbx\r
+ mov rax, CpuSmmDebugEntry\r
+ call rax\r
+\r
+ mov rcx, rbx\r
+ mov rax, SmiRendezvous ; rax <- absolute addr of SmiRedezvous\r
+ call rax\r
+\r
+ mov rcx, rbx\r
+ mov rax, CpuSmmDebugExit\r
+ call rax\r
+\r
+ add rsp, 0x20\r
+\r
+ ;\r
+ ; Restore FP registers\r
+ ;\r
+ DB 0x48 ; FXRSTOR64\r
+ fxrstor [rsp]\r
+\r
+ add rsp, 0x200\r
+\r
+ mov rax, ASM_PFX(gStmXdSupported)\r
+ mov al, [rax]\r
+ cmp al, 0\r
+ jz .1\r
+ pop rdx ; get saved MSR_IA32_MISC_ENABLE[63-32]\r
+ test edx, BIT2\r
+ jz .1\r
+ mov ecx, MSR_IA32_MISC_ENABLE\r
+ rdmsr\r
+ or dx, BIT2 ; set XD Disable bit if it was set before entering into SMM\r
+ wrmsr\r
+\r
+.1:\r
+ rsm\r
+\r
+_StmSmiHandler:\r
+;\r
+; Check XD disable bit\r
+;\r
+ xor r8, r8\r
+ mov rax, ASM_PFX(gStmXdSupported)\r
+ mov al, [rax]\r
+ cmp al, 0\r
+ jz @StmXdDone\r
+ mov ecx, MSR_IA32_MISC_ENABLE\r
+ rdmsr\r
+ mov r8, rdx ; save MSR_IA32_MISC_ENABLE[63-32]\r
+ test edx, BIT2 ; MSR_IA32_MISC_ENABLE[34]\r
+ jz .0\r
+ and dx, 0xFFFB ; clear XD Disable bit if it is set\r
+ wrmsr\r
+.0:\r
+ mov ecx, MSR_EFER\r
+ rdmsr\r
+ or ax, MSR_EFER_XD ; enable NXE\r
+ wrmsr\r
+@StmXdDone:\r
+ push r8\r
+\r
+ ; below step is needed, because STM does not run above code.\r
+ ; we have to run below code to set IDT/CR0/CR4\r
+\r
+ mov rax, ASM_PFX(gStmSmiHandlerIdtr)\r
+ lidt [rax]\r
+\r
+ mov rax, cr0\r
+ or eax, 0x80010023 ; enable paging + WP + NE + MP + PE\r
+ mov cr0, rax\r
+ mov rax, cr4\r
+ mov eax, 0x668 ; as cr4.PGE is not set here, refresh cr3\r
+ mov cr4, rax ; in PreModifyMtrrs() to flush TLB.\r
+ ; STM init finish\r
+ jmp CommonHandler\r
+\r
+ASM_PFX(gcStmSmiHandlerSize) : DW $ - _StmSmiEntryPoint\r
+ASM_PFX(gcStmSmiHandlerOffset) : DW _StmSmiHandler - _StmSmiEntryPoint\r
--- /dev/null
+#------------------------------------------------------------------------------\r
+#\r
+# Copyright (c) 2009 - 2016, Intel Corporation. All rights reserved.<BR>\r
+# This program and the accompanying materials\r
+# are licensed and made available under the terms and conditions of the BSD License\r
+# which accompanies this distribution. The full text of the license may be found at\r
+# http://opensource.org/licenses/bsd-license.php.\r
+#\r
+# THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS,\r
+# WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED.\r
+#\r
+# Module Name:\r
+#\r
+# SmiException.S\r
+#\r
+# Abstract:\r
+#\r
+# Exception handlers used in SM mode\r
+#\r
+#------------------------------------------------------------------------------\r
+\r
+ASM_GLOBAL ASM_PFX(gcStmPsd)\r
+\r
+ASM_GLOBAL ASM_PFX(SmmStmExceptionHandler)\r
+ASM_GLOBAL ASM_PFX(SmmStmSetup)\r
+ASM_GLOBAL ASM_PFX(SmmStmTeardown)\r
+\r
+.equ CODE_SEL, 0x38\r
+.equ DATA_SEL, 0x20\r
+.equ TR_SEL, 0x40\r
+\r
+.equ MSR_IA32_MISC_ENABLE, 0x1A0\r
+.equ MSR_EFER, 0x0c0000080\r
+.equ MSR_EFER_XD, 0x0800\r
+\r
+ .data\r
+\r
+#\r
+# This structure serves as a template for all processors.\r
+#\r
+ASM_PFX(gcStmPsd):\r
+ .ascii "TXTPSSIG"\r
+ .word PSD_SIZE\r
+ .word 1 # Version\r
+ .long 0 # LocalApicId\r
+ .byte 0xF # Cr4Pse;Cr4Pae;Intel64Mode;ExecutionDisableOutsideSmrr\r
+ .byte 0 # BIOS to STM\r
+ .byte 0 # STM to BIOS\r
+ .byte 0\r
+ .word CODE_SEL\r
+ .word DATA_SEL\r
+ .word DATA_SEL\r
+ .word DATA_SEL\r
+ .word TR_SEL\r
+ .word 0\r
+ .quad 0 # SmmCr3\r
+ .quad ASM_PFX(_OnStmSetup)\r
+ .quad ASM_PFX(_OnStmTeardown)\r
+ .quad 0 # SmmSmiHandlerRip - SMM guest entrypoint\r
+ .quad 0 # SmmSmiHandlerRsp\r
+ .quad 0\r
+ .long 0\r
+ .long 0x80010100 # RequiredStmSmmRevId\r
+ .quad ASM_PFX(_OnException)\r
+ .quad 0 # ExceptionStack\r
+ .word DATA_SEL\r
+ .word 0x1F # ExceptionFilter\r
+ .long 0\r
+ .quad 0\r
+ .quad 0 # BiosHwResourceRequirementsPtr\r
+ .quad 0 # AcpiRsdp\r
+ .byte 0 # PhysicalAddressBits\r
+.equ PSD_SIZE, . - ASM_PFX(gcStmPsd)\r
+\r
+ .text\r
+#------------------------------------------------------------------------------\r
+# SMM Exception handlers\r
+#------------------------------------------------------------------------------\r
+\r
+ASM_GLOBAL ASM_PFX(_OnException)\r
+ASM_PFX(_OnException):\r
+ movq %rsp, %rcx\r
+ subq $0x28, %rsp\r
+ call ASM_PFX(SmmStmExceptionHandler)\r
+ addq $0x28, %rsp\r
+ movl %eax, %ebx\r
+ movl $4, %eax\r
+ .byte 0xf, 0x1, 0xc1 # VMCALL\r
+ jmp .\r
+\r
+ASM_GLOBAL ASM_PFX(_OnStmSetup)\r
+ASM_PFX(_OnStmSetup):\r
+#\r
+# Check XD disable bit\r
+#\r
+ xorq %r8, %r8\r
+ movabsq $ASM_PFX(gStmXdSupported), %rax\r
+ movb (%rax), %al\r
+ cmpb $0, %al\r
+ jz StmXdDone1\r
+ movl $MSR_IA32_MISC_ENABLE, %ecx\r
+ rdmsr\r
+ movq %rdx, %r8 # save MSR_IA32_MISC_ENABLE[63-32]\r
+ testl $BIT2, %edx # MSR_IA32_MISC_ENABLE[34]\r
+ jz L13\r
+ andw $0x0FFFB, %dx # clear XD Disable bit if it is set\r
+ wrmsr\r
+L13:\r
+ movl $MSR_EFER, %ecx\r
+ rdmsr\r
+ orw $MSR_EFER_XD,%ax # enable NXE\r
+ wrmsr\r
+StmXdDone1:\r
+ pushq %r8\r
+\r
+ subq $0x20, %rsp\r
+ call ASM_PFX(SmmStmSetup)\r
+ addq 0x20, %rsp\r
+\r
+ movabsq $ASM_PFX(gStmXdSupported), %rax\r
+ movb (%rax), %al\r
+ cmpb $0, %al\r
+ jz L14\r
+ popq %rdx # get saved MSR_IA32_MISC_ENABLE[63-32]\r
+ testl $BIT2, %edx\r
+ jz L14\r
+ movl $MSR_IA32_MISC_ENABLE, %ecx\r
+ rdmsr\r
+ orw $BIT2, %dx # set XD Disable bit if it was set before entering into SMM\r
+ wrmsr\r
+L14:\r
+\r
+ rsm\r
+\r
+ASM_GLOBAL ASM_PFX(_OnStmTeardown)\r
+ASM_PFX(_OnStmTeardown):\r
+#\r
+# Check XD disable bit\r
+#\r
+ xorq %r8, %r8\r
+ movabsq $ASM_PFX(gStmXdSupported), %rax\r
+ movb (%rax), %al\r
+ cmpb $0, %al\r
+ jz StmXdDone2\r
+ movl $MSR_IA32_MISC_ENABLE, %ecx\r
+ rdmsr\r
+ movq %rdx, %r8 # save MSR_IA32_MISC_ENABLE[63-32]\r
+ testl $BIT2, %edx # MSR_IA32_MISC_ENABLE[34]\r
+ jz L15\r
+ andw $0x0FFFB, %dx # clear XD Disable bit if it is set\r
+ wrmsr\r
+L15:\r
+ movl $MSR_EFER, %ecx\r
+ rdmsr\r
+ orw $MSR_EFER_XD,%ax # enable NXE\r
+ wrmsr\r
+StmXdDone2:\r
+ pushq %r8\r
+\r
+ subq $0x20, %rsp\r
+ call ASM_PFX(SmmStmTeardown)\r
+ addq $0x20, %rsp\r
+\r
+ movabsq $ASM_PFX(gStmXdSupported), %rax\r
+ movb (%rax), %al\r
+ cmpb $0, %al\r
+ jz L16\r
+ popq %rdx # get saved MSR_IA32_MISC_ENABLE[63-32]\r
+ testl $BIT2, %edx\r
+ jz L16\r
+ movl $MSR_IA32_MISC_ENABLE, %ecx\r
+ rdmsr\r
+ orw $BIT2, %dx # set XD Disable bit if it was set before entering into SMM\r
+ wrmsr\r
+L16:\r
+\r
+ rsm\r
+\r
--- /dev/null
+;------------------------------------------------------------------------------ ;\r
+; Copyright (c) 2009 - 2016, Intel Corporation. All rights reserved.<BR>\r
+; This program and the accompanying materials\r
+; are licensed and made available under the terms and conditions of the BSD License\r
+; which accompanies this distribution. The full text of the license may be found at\r
+; http://opensource.org/licenses/bsd-license.php.\r
+;\r
+; THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS,\r
+; WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED.\r
+;\r
+; Module Name:\r
+;\r
+; SmiException.asm\r
+;\r
+; Abstract:\r
+;\r
+; Exception handlers used in SM mode\r
+;\r
+;-------------------------------------------------------------------------------\r
+\r
+EXTERNDEF gcStmPsd:BYTE\r
+\r
+EXTERNDEF SmmStmExceptionHandler:PROC\r
+EXTERNDEF SmmStmSetup:PROC\r
+EXTERNDEF SmmStmTeardown:PROC\r
+EXTERNDEF gStmXdSupported:BYTE\r
+\r
+CODE_SEL EQU 38h\r
+DATA_SEL EQU 20h\r
+TR_SEL EQU 40h\r
+\r
+MSR_IA32_MISC_ENABLE EQU 1A0h\r
+MSR_EFER EQU 0c0000080h\r
+MSR_EFER_XD EQU 0800h\r
+\r
+ .data\r
+\r
+;\r
+; This structure serves as a template for all processors.\r
+;\r
+gcStmPsd LABEL BYTE\r
+ DB 'TXTPSSIG'\r
+ DW PSD_SIZE\r
+ DW 1 ; Version\r
+ DD 0 ; LocalApicId\r
+ DB 0Fh ; Cr4Pse;Cr4Pae;Intel64Mode;ExecutionDisableOutsideSmrr\r
+ DB 0 ; BIOS to STM\r
+ DB 0 ; STM to BIOS\r
+ DB 0\r
+ DW CODE_SEL\r
+ DW DATA_SEL\r
+ DW DATA_SEL\r
+ DW DATA_SEL\r
+ DW TR_SEL\r
+ DW 0\r
+ DQ 0 ; SmmCr3\r
+ DQ _OnStmSetup\r
+ DQ _OnStmTeardown\r
+ DQ 0 ; SmmSmiHandlerRip - SMM guest entrypoint\r
+ DQ 0 ; SmmSmiHandlerRsp\r
+ DQ 0\r
+ DD 0\r
+ DD 80010100h ; RequiredStmSmmRevId\r
+ DQ _OnException\r
+ DQ 0 ; ExceptionStack\r
+ DW DATA_SEL\r
+ DW 01Fh ; ExceptionFilter\r
+ DD 0\r
+ DQ 0\r
+ DQ 0 ; BiosHwResourceRequirementsPtr\r
+ DQ 0 ; AcpiRsdp\r
+ DB 0 ; PhysicalAddressBits\r
+PSD_SIZE = $ - offset gcStmPsd\r
+\r
+ .code\r
+;------------------------------------------------------------------------------\r
+; SMM Exception handlers\r
+;------------------------------------------------------------------------------\r
+_OnException PROC\r
+ mov rcx, rsp\r
+ add rsp, -28h\r
+ call SmmStmExceptionHandler\r
+ add rsp, 28h\r
+ mov ebx, eax\r
+ mov eax, 4\r
+ DB 0fh, 01h, 0c1h ; VMCALL\r
+ jmp $\r
+_OnException ENDP\r
+\r
+_OnStmSetup PROC\r
+;\r
+; Check XD disable bit\r
+;\r
+ xor r8, r8\r
+ mov rax, offset ASM_PFX(gStmXdSupported)\r
+ mov al, [rax]\r
+ cmp al, 0\r
+ jz @StmXdDone1\r
+ mov ecx, MSR_IA32_MISC_ENABLE\r
+ rdmsr\r
+ mov r8, rdx ; save MSR_IA32_MISC_ENABLE[63-32]\r
+ test edx, BIT2 ; MSR_IA32_MISC_ENABLE[34]\r
+ jz @f\r
+ and dx, 0FFFBh ; clear XD Disable bit if it is set\r
+ wrmsr\r
+@@:\r
+ mov ecx, MSR_EFER\r
+ rdmsr\r
+ or ax, MSR_EFER_XD ; enable NXE\r
+ wrmsr\r
+@StmXdDone1:\r
+ push r8\r
+\r
+ add rsp, -20h\r
+ call SmmStmSetup\r
+ add rsp, 20h\r
+\r
+ mov rax, offset ASM_PFX(gStmXdSupported)\r
+ mov al, [rax]\r
+ cmp al, 0\r
+ jz @f\r
+ pop rdx ; get saved MSR_IA32_MISC_ENABLE[63-32]\r
+ test edx, BIT2\r
+ jz @f\r
+ mov ecx, MSR_IA32_MISC_ENABLE\r
+ rdmsr\r
+ or dx, BIT2 ; set XD Disable bit if it was set before entering into SMM\r
+ wrmsr\r
+@@:\r
+\r
+ rsm\r
+_OnStmSetup ENDP\r
+\r
+_OnStmTeardown PROC\r
+;\r
+; Check XD disable bit\r
+;\r
+ xor r8, r8\r
+ mov rax, offset ASM_PFX(gStmXdSupported)\r
+ mov al, [rax]\r
+ cmp al, 0\r
+ jz @StmXdDone2\r
+ mov ecx, MSR_IA32_MISC_ENABLE\r
+ rdmsr\r
+ mov r8, rdx ; save MSR_IA32_MISC_ENABLE[63-32]\r
+ test edx, BIT2 ; MSR_IA32_MISC_ENABLE[34]\r
+ jz @f\r
+ and dx, 0FFFBh ; clear XD Disable bit if it is set\r
+ wrmsr\r
+@@:\r
+ mov ecx, MSR_EFER\r
+ rdmsr\r
+ or ax, MSR_EFER_XD ; enable NXE\r
+ wrmsr\r
+@StmXdDone2:\r
+ push r8\r
+\r
+ add rsp, -20h\r
+ call SmmStmTeardown\r
+ add rsp, 20h\r
+\r
+ mov rax, offset ASM_PFX(gStmXdSupported)\r
+ mov al, [rax]\r
+ cmp al, 0\r
+ jz @f\r
+ pop rdx ; get saved MSR_IA32_MISC_ENABLE[63-32]\r
+ test edx, BIT2\r
+ jz @f\r
+ mov ecx, MSR_IA32_MISC_ENABLE\r
+ rdmsr\r
+ or dx, BIT2 ; set XD Disable bit if it was set before entering into SMM\r
+ wrmsr\r
+@@:\r
+\r
+ rsm\r
+_OnStmTeardown ENDP\r
+\r
+ END\r
--- /dev/null
+;------------------------------------------------------------------------------ ;\r
+; Copyright (c) 2016, Intel Corporation. All rights reserved.<BR>\r
+; This program and the accompanying materials\r
+; are licensed and made available under the terms and conditions of the BSD License\r
+; which accompanies this distribution. The full text of the license may be found at\r
+; http://opensource.org/licenses/bsd-license.php.\r
+;\r
+; THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS,\r
+; WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED.\r
+;\r
+; Module Name:\r
+;\r
+; SmiException.nasm\r
+;\r
+; Abstract:\r
+;\r
+; Exception handlers used in SM mode\r
+;\r
+;-------------------------------------------------------------------------------\r
+\r
+global ASM_PFX(gcStmPsd)\r
+\r
+extern ASM_PFX(SmmStmExceptionHandler)\r
+extern ASM_PFX(SmmStmSetup)\r
+extern ASM_PFX(SmmStmTeardown)\r
+extern ASM_PFX(gStmXdSupported)\r
+extern ASM_PFX(gStmSmiHandlerIdtr)\r
+\r
+%define MSR_IA32_MISC_ENABLE 0x1A0\r
+%define MSR_EFER 0xc0000080\r
+%define MSR_EFER_XD 0x800\r
+\r
+CODE_SEL equ 0x38\r
+DATA_SEL equ 0x20\r
+TR_SEL equ 0x40\r
+\r
+ SECTION .data\r
+\r
+;\r
+; This structure serves as a template for all processors.\r
+;\r
+ASM_PFX(gcStmPsd):\r
+ DB 'TXTPSSIG'\r
+ DW PSD_SIZE\r
+ DW 1 ; Version\r
+ DD 0 ; LocalApicId\r
+ DB 0x0F ; Cr4Pse;Cr4Pae;Intel64Mode;ExecutionDisableOutsideSmrr\r
+ DB 0 ; BIOS to STM\r
+ DB 0 ; STM to BIOS\r
+ DB 0\r
+ DW CODE_SEL\r
+ DW DATA_SEL\r
+ DW DATA_SEL\r
+ DW DATA_SEL\r
+ DW TR_SEL\r
+ DW 0\r
+ DQ 0 ; SmmCr3\r
+ DQ ASM_PFX(OnStmSetup)\r
+ DQ ASM_PFX(OnStmTeardown)\r
+ DQ 0 ; SmmSmiHandlerRip - SMM guest entrypoint\r
+ DQ 0 ; SmmSmiHandlerRsp\r
+ DQ 0\r
+ DD 0\r
+ DD 0x80010100 ; RequiredStmSmmRevId\r
+ DQ ASM_PFX(OnException)\r
+ DQ 0 ; ExceptionStack\r
+ DW DATA_SEL\r
+ DW 0x01F ; ExceptionFilter\r
+ DD 0\r
+ DQ 0\r
+ DQ 0 ; BiosHwResourceRequirementsPtr\r
+ DQ 0 ; AcpiRsdp\r
+ DB 0 ; PhysicalAddressBits\r
+PSD_SIZE equ $ - ASM_PFX(gcStmPsd)\r
+\r
+ DEFAULT REL\r
+ SECTION .text\r
+;------------------------------------------------------------------------------\r
+; SMM Exception handlers\r
+;------------------------------------------------------------------------------\r
+global ASM_PFX(OnException)\r
+ASM_PFX(OnException):\r
+ mov rcx, rsp\r
+ add rsp, -0x28\r
+ call ASM_PFX(SmmStmExceptionHandler)\r
+ add rsp, 0x28\r
+ mov ebx, eax\r
+ mov eax, 4\r
+ DB 0x0f, 0x01, 0x0c1 ; VMCALL\r
+ jmp $\r
+\r
+global ASM_PFX(OnStmSetup)\r
+ASM_PFX(OnStmSetup):\r
+;\r
+; Check XD disable bit\r
+;\r
+ xor r8, r8\r
+ mov rax, ASM_PFX(gStmXdSupported)\r
+ mov al, [rax]\r
+ cmp al, 0\r
+ jz @StmXdDone1\r
+ mov ecx, MSR_IA32_MISC_ENABLE\r
+ rdmsr\r
+ mov r8, rdx ; save MSR_IA32_MISC_ENABLE[63-32]\r
+ test edx, BIT2 ; MSR_IA32_MISC_ENABLE[34]\r
+ jz .01\r
+ and dx, 0xFFFB ; clear XD Disable bit if it is set\r
+ wrmsr\r
+.01:\r
+ mov ecx, MSR_EFER\r
+ rdmsr\r
+ or ax, MSR_EFER_XD ; enable NXE\r
+ wrmsr\r
+@StmXdDone1:\r
+ push r8\r
+\r
+ add rsp, -0x20\r
+ call ASM_PFX(SmmStmSetup)\r
+ add rsp, 0x20\r
+\r
+ mov rax, ASM_PFX(gStmXdSupported)\r
+ mov al, [rax]\r
+ cmp al, 0\r
+ jz .11\r
+ pop rdx ; get saved MSR_IA32_MISC_ENABLE[63-32]\r
+ test edx, BIT2\r
+ jz .11\r
+ mov ecx, MSR_IA32_MISC_ENABLE\r
+ rdmsr\r
+ or dx, BIT2 ; set XD Disable bit if it was set before entering into SMM\r
+ wrmsr\r
+\r
+.11:\r
+ rsm\r
+\r
+global ASM_PFX(OnStmTeardown)\r
+ASM_PFX(OnStmTeardown):\r
+;\r
+; Check XD disable bit\r
+;\r
+ xor r8, r8\r
+ mov rax, ASM_PFX(gStmXdSupported)\r
+ mov al, [rax]\r
+ cmp al, 0\r
+ jz @StmXdDone2\r
+ mov ecx, MSR_IA32_MISC_ENABLE\r
+ rdmsr\r
+ mov r8, rdx ; save MSR_IA32_MISC_ENABLE[63-32]\r
+ test edx, BIT2 ; MSR_IA32_MISC_ENABLE[34]\r
+ jz .02\r
+ and dx, 0xFFFB ; clear XD Disable bit if it is set\r
+ wrmsr\r
+.02:\r
+ mov ecx, MSR_EFER\r
+ rdmsr\r
+ or ax, MSR_EFER_XD ; enable NXE\r
+ wrmsr\r
+@StmXdDone2:\r
+ push r8\r
+\r
+ add rsp, -0x20\r
+ call ASM_PFX(SmmStmTeardown)\r
+ add rsp, 0x20\r
+\r
+ mov rax, ASM_PFX(gStmXdSupported)\r
+ mov al, [rax]\r
+ cmp al, 0\r
+ jz .12\r
+ pop rdx ; get saved MSR_IA32_MISC_ENABLE[63-32]\r
+ test edx, BIT2\r
+ jz .12\r
+ mov ecx, MSR_IA32_MISC_ENABLE\r
+ rdmsr\r
+ or dx, BIT2 ; set XD Disable bit if it was set before entering into SMM\r
+ wrmsr\r
+\r
+.12:\r
+ rsm\r
+\r
--- /dev/null
+/** @file\r
+ SMM STM support functions\r
+\r
+ Copyright (c) 2015 - 2016, Intel Corporation. All rights reserved.<BR>\r
+ This program and the accompanying materials\r
+ are licensed and made available under the terms and conditions of the BSD License\r
+ which accompanies this distribution. The full text of the license may be found at\r
+ http://opensource.org/licenses/bsd-license.php.\r
+\r
+ THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS,\r
+ WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED.\r
+\r
+**/\r
+\r
+#include <PiSmm.h>\r
+#include <Library/DebugLib.h>\r
+\r
+#include "SmmStm.h"\r
+\r
+///\r
+/// Page Table Entry\r
+///\r
+#define IA32_PG_P BIT0\r
+#define IA32_PG_RW BIT1\r
+#define IA32_PG_PS BIT7\r
+\r
+/**\r
+\r
+ Create 4G page table for STM.\r
+ 2M PAE page table in X64 version.\r
+\r
+ @param PageTableBase The page table base in MSEG\r
+\r
+**/\r
+VOID\r
+StmGen4GPageTable (\r
+ IN UINTN PageTableBase\r
+ )\r
+{\r
+ UINTN Index;\r
+ UINTN SubIndex;\r
+ UINT64 *Pde;\r
+ UINT64 *Pte;\r
+ UINT64 *Pml4;\r
+\r
+ Pml4 = (UINT64*)(UINTN)PageTableBase;\r
+ PageTableBase += SIZE_4KB;\r
+ *Pml4 = PageTableBase | IA32_PG_RW | IA32_PG_P;\r
+\r
+ Pde = (UINT64*)(UINTN)PageTableBase;\r
+ PageTableBase += SIZE_4KB;\r
+ Pte = (UINT64 *)(UINTN)PageTableBase;\r
+\r
+ for (Index = 0; Index < 4; Index++) {\r
+ *Pde = PageTableBase | IA32_PG_RW | IA32_PG_P;\r
+ Pde++;\r
+ PageTableBase += SIZE_4KB;\r
+\r
+ for (SubIndex = 0; SubIndex < SIZE_4KB / sizeof (*Pte); SubIndex++) {\r
+ *Pte = (((Index << 9) + SubIndex) << 21) | IA32_PG_PS | IA32_PG_RW | IA32_PG_P;\r
+ Pte++;\r
+ }\r
+ }\r
+}\r
+\r
+/**\r
+ This is SMM exception handle.\r
+ Consumed by STM when exception happen.\r
+\r
+ @param Context STM protection exception stack frame\r
+\r
+ @return the EBX value for STM reference.\r
+ EBX = 0: resume SMM guest using register state found on exception stack.\r
+ EBX = 1 to 0x0F: EBX contains a BIOS error code which the STM must record in the\r
+ TXT.ERRORCODE register and subsequently reset the system via\r
+ TXT.CMD.SYS_RESET. The value of the TXT.ERRORCODE register is calculated as\r
+ follows: TXT.ERRORCODE = (EBX & 0x0F) | STM_CRASH_BIOS_PANIC\r
+ EBX = 0x10 to 0xFFFFFFFF - reserved, do not use.\r
+\r
+**/\r
+UINT32\r
+EFIAPI\r
+SmmStmExceptionHandler (\r
+ IN OUT STM_PROTECTION_EXCEPTION_STACK_FRAME Context\r
+ )\r
+{\r
+ // TBD - SmmStmExceptionHandler, record information\r
+ DEBUG ((DEBUG_ERROR, "SmmStmExceptionHandler ...\n"));\r
+ //\r
+ // Skip this instruction and continue;\r
+ //\r
+ Context.X64StackFrame->Rip += Context.X64StackFrame->VmcsExitInstructionLength;\r
+\r
+ return 0;\r
+}\r
PeCoffGetEntryPointLib|MdePkg/Library/BasePeCoffGetEntryPointLib/BasePeCoffGetEntryPointLib.inf\r
PeCoffExtraActionLib|MdePkg/Library/BasePeCoffExtraActionLibNull/BasePeCoffExtraActionLibNull.inf\r
MicrocodeFlashAccessLib|UefiCpuPkg/Feature/Capsule/Library/MicrocodeFlashAccessLibNull/MicrocodeFlashAccessLibNull.inf\r
+ TpmMeasurementLib|MdeModulePkg/Library/TpmMeasurementLibNull/TpmMeasurementLibNull.inf\r
\r
[LibraryClasses.common.SEC]\r
PlatformSecLib|UefiCpuPkg/Library/PlatformSecLibNull/PlatformSecLibNull.inf\r
UefiCpuPkg/Library/PlatformSecLibNull/PlatformSecLibNull.inf\r
UefiCpuPkg/Library/SmmCpuPlatformHookLibNull/SmmCpuPlatformHookLibNull.inf\r
UefiCpuPkg/Library/SmmCpuFeaturesLib/SmmCpuFeaturesLib.inf\r
+ UefiCpuPkg/Library/SmmCpuFeaturesLib/SmmCpuFeaturesLibStm.inf\r
UefiCpuPkg/PiSmmCommunication/PiSmmCommunicationPei.inf\r
UefiCpuPkg/PiSmmCommunication/PiSmmCommunicationSmm.inf\r
UefiCpuPkg/SecCore/SecCore.inf\r
UefiCpuPkg/PiSmmCpuDxeSmm/PiSmmCpuDxeSmm.inf\r
+ UefiCpuPkg/PiSmmCpuDxeSmm/PiSmmCpuDxeSmm.inf {\r
+ <Defines>\r
+ FILE_GUID = D1D74FE9-7A4E-41D3-A0B3-67F13AD34D94\r
+ <LibraryClasses>\r
+ SmmCpuFeaturesLib|UefiCpuPkg/Library/SmmCpuFeaturesLib/SmmCpuFeaturesLibStm.inf\r
+ }\r
UefiCpuPkg/Universal/Acpi/S3Resume2Pei/S3Resume2Pei.inf\r
UefiCpuPkg/Feature/Capsule/MicrocodeUpdateDxe/MicrocodeUpdateDxe.inf\r
\r