#------------------------------------------------------------------------------\r
#\r
-# Copyright (c) 2009 - 2015, Intel Corporation. All rights reserved.<BR>\r
+# Copyright (c) 2009 - 2016, Intel Corporation. All rights reserved.<BR>\r
# This program and the accompanying materials\r
# are licensed and made available under the terms and conditions of the BSD License\r
# which accompanies this distribution. The full text of the license may be found at\r
ASM_GLOBAL ASM_PFX(gSmiCr3)\r
ASM_GLOBAL ASM_PFX(gSmiStack)\r
ASM_GLOBAL ASM_PFX(gSmbase)\r
-ASM_GLOBAL ASM_PFX(FeaturePcdGet (PcdCpuSmmDebug))\r
+ASM_GLOBAL ASM_PFX(mXdSupported)\r
ASM_GLOBAL ASM_PFX(gSmiHandlerIdtr)\r
\r
+.equ MSR_IA32_MISC_ENABLE, 0x1A0\r
+.equ MSR_EFER, 0xc0000080\r
+.equ MSR_EFER_XD, 0x800\r
+\r
#\r
# Constants relating to PROCESSOR_SMM_DESCRIPTOR\r
#\r
sgdt (%rsp)\r
movl 2(%rsp), %eax # eax = GDT base\r
addl $8, %esp\r
- movl %eax, %edx\r
- addl $GDT_SIZE, %edx\r
- movb %dl, (TSS_SEGMENT + 2)(%rax)\r
- movb %dh, (TSS_SEGMENT + 3)(%rax)\r
- .byte 0xc1, 0xea, 0x10 # shr edx, 16\r
- movb %dl, (TSS_SEGMENT + 4)(%rax)\r
- movb %dh, (TSS_SEGMENT + 7)(%rax)\r
- movl %eax, %edx\r
movb $0x89, %dl\r
movb %dl, (TSS_SEGMENT + 5)(%rax) # clear busy flag\r
movl $TSS_SEGMENT, %eax\r
ltr %ax\r
\r
+# enable NXE if supported\r
+ .byte 0xb0 # mov al, imm8\r
+ASM_PFX(mXdSupported): .byte 1\r
+ cmpb $0, %al\r
+ jz SkipNxe\r
+#\r
+# Check XD disable bit\r
+#\r
+ movl $MSR_IA32_MISC_ENABLE, %ecx\r
+ rdmsr\r
+ subl $4, %esp\r
+ pushq %rdx # save MSR_IA32_MISC_ENABLE[63-32]\r
+ testl $BIT2, %edx # MSR_IA32_MISC_ENABLE[34]\r
+ jz L13\r
+ andw $0x0FFFB, %dx # clear XD Disable bit if it is set\r
+ wrmsr\r
+L13:\r
+ movl $MSR_EFER, %ecx\r
+ rdmsr\r
+ orw $MSR_EFER_XD,%ax # enable NXE\r
+ wrmsr\r
+ jmp NxeDone\r
+SkipNxe:\r
+ subl $8, %esp\r
+NxeDone:\r
+\r
#\r
# Switch to LongMode\r
#\r
call Base # push return address for retf later\r
Base:\r
addl $(LongMode - Base), (%rsp) # offset for far retf, seg is the 1st arg\r
- movl $0xc0000080, %ecx\r
+\r
+ movl $MSR_EFER, %ecx\r
rdmsr\r
- orb $1,%ah\r
+ orb $1,%ah # enable LME\r
wrmsr\r
movq %cr0, %rbx\r
- btsl $31, %ebx\r
+ orl $0x080010023, %ebx # enable paging + WP + NE + MP + PE\r
movq %rbx, %cr0\r
retf\r
LongMode: # long mode (64-bit code) starts here\r
# jmp _SmiHandler ; instruction is not needed\r
\r
_SmiHandler:\r
- movabsq $ASM_PFX(FeaturePcdGet (PcdCpuSmmDebug)), %rax\r
- cmpb $0, (%rax)\r
- jz L1\r
-\r
- .byte 0x48, 0x8b, 0x0d # mov rcx, [rip + disp32]\r
- .long SSM_DR6 - (. + 4 - _SmiEntryPoint + 0x8000)\r
- .byte 0x48, 0x8b, 0x15 # mov rdx, [rip + disp32]\r
- .long SSM_DR7 - (. + 4 - _SmiEntryPoint + 0x8000)\r
- movq %rcx, %dr6\r
- movq %rdx, %dr7\r
-L1:\r
-\r
- movabsq $ASM_PFX(SmiRendezvous), %rax\r
- movq (%rsp), %rcx\r
+ movq 8(%rsp), %rbx\r
# Save FP registers\r
\r
- subq $0x208, %rsp\r
+ subq $0x200, %rsp\r
.byte 0x48 # FXSAVE64\r
fxsave (%rsp)\r
\r
addq $-0x20, %rsp\r
+\r
+ movq %rbx, %rcx\r
+ movabsq $ASM_PFX(CpuSmmDebugEntry), %rax\r
+ call *%rax\r
+\r
+ movq %rbx, %rcx\r
+ movabsq $ASM_PFX(SmiRendezvous), %rax\r
+ call *%rax\r
+\r
+ movq %rbx, %rcx\r
+ movabsq $ASM_PFX(CpuSmmDebugExit), %rax\r
call *%rax\r
+\r
addq $0x20, %rsp\r
\r
#\r
.byte 0x48 # FXRSTOR64\r
fxrstor (%rsp)\r
\r
- movabsq $ASM_PFX(FeaturePcdGet (PcdCpuSmmDebug)), %rax\r
- cmpb $0, (%rax)\r
- jz L2\r
-\r
- movq %dr7, %rdx\r
- movq %dr6, %rcx\r
- .byte 0x48, 0x89, 0x15 # mov [rip + disp32], rdx\r
- .long SSM_DR7 - (. + 4 - _SmiEntryPoint + 0x8000)\r
- .byte 0x48, 0x89, 0x0d # mov [rip + disp32], rcx\r
- .long SSM_DR6 - (. + 4 - _SmiEntryPoint + 0x8000)\r
-L2:\r
+ addq $0x200, %rsp\r
+\r
+ movabsq $ASM_PFX(mXdSupported), %rax\r
+ movb (%rax), %al\r
+ cmpb $0, %al\r
+ jz L16\r
+ popq %rdx # get saved MSR_IA32_MISC_ENABLE[63-32]\r
+ testl $BIT2, %edx\r
+ jz L16\r
+ movl $MSR_IA32_MISC_ENABLE, %ecx\r
+ rdmsr\r
+ orw $BIT2, %dx # set XD Disable bit if it was set before entering into SMM\r
+ wrmsr\r
+\r
+L16:\r
rsm\r
\r
ASM_PFX(gcSmiHandlerSize): .word . - _SmiEntryPoint\r