--- /dev/null
+/** @file\r
+ SMM CPU misc functions for Ia32 arch specific.\r
+ \r
+Copyright (c) 2015, Intel Corporation. All rights reserved.<BR>\r
+This program and the accompanying materials\r
+are licensed and made available under the terms and conditions of the BSD License\r
+which accompanies this distribution. The full text of the license may be found at\r
+http://opensource.org/licenses/bsd-license.php\r
+\r
+THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS,\r
+WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED.\r
+\r
+**/\r
+\r
+#include "PiSmmCpuDxeSmm.h"\r
+\r
+/**\r
+ Initialize Gdt for all processors.\r
+ \r
+ @param[in] Cr3 CR3 value.\r
+ @param[out] GdtStepSize The step size for GDT table.\r
+\r
+ @return GdtBase for processor 0.\r
+ GdtBase for processor X is: GdtBase + (GdtStepSize * X)\r
+**/\r
+VOID *\r
+InitGdt (\r
+ IN UINTN Cr3,\r
+ OUT UINTN *GdtStepSize\r
+ )\r
+{\r
+ UINTN Index;\r
+ IA32_SEGMENT_DESCRIPTOR *GdtDescriptor;\r
+ UINTN TssBase;\r
+ UINTN GdtTssTableSize;\r
+ UINT8 *GdtTssTables;\r
+ UINTN GdtTableStepSize;\r
+\r
+ if (FeaturePcdGet (PcdCpuSmmStackGuard)) {\r
+ //\r
+ // For IA32 SMM, if SMM Stack Guard feature is enabled, we use 2 TSS.\r
+ // in this case, we allocate separate GDT/TSS for each CPUs to avoid TSS load contention\r
+ // on each SMI entry.\r
+ //\r
+\r
+ //\r
+ // Enlarge GDT to contain 2 TSS descriptors\r
+ //\r
+ gcSmiGdtr.Limit += (UINT16)(2 * sizeof (IA32_SEGMENT_DESCRIPTOR));\r
+\r
+ GdtTssTableSize = (gcSmiGdtr.Limit + 1 + TSS_SIZE * 2 + 7) & ~7; // 8 bytes aligned\r
+ GdtTssTables = (UINT8*)AllocatePages (EFI_SIZE_TO_PAGES (GdtTssTableSize * gSmmCpuPrivate->SmmCoreEntryContext.NumberOfCpus));\r
+ ASSERT (GdtTssTables != NULL);\r
+ GdtTableStepSize = GdtTssTableSize;\r
+\r
+ for (Index = 0; Index < gSmmCpuPrivate->SmmCoreEntryContext.NumberOfCpus; Index++) {\r
+ CopyMem (GdtTssTables + GdtTableStepSize * Index, (VOID*)(UINTN)gcSmiGdtr.Base, gcSmiGdtr.Limit + 1 + TSS_SIZE * 2);\r
+ //\r
+ // Fixup TSS descriptors\r
+ //\r
+ TssBase = (UINTN)(GdtTssTables + GdtTableStepSize * Index + gcSmiGdtr.Limit + 1);\r
+ GdtDescriptor = (IA32_SEGMENT_DESCRIPTOR *)(TssBase) - 2;\r
+ GdtDescriptor->Bits.BaseLow = (UINT16)TssBase;\r
+ GdtDescriptor->Bits.BaseMid = (UINT8)(TssBase >> 16);\r
+ GdtDescriptor->Bits.BaseHigh = (UINT8)(TssBase >> 24);\r
+\r
+ TssBase += TSS_SIZE;\r
+ GdtDescriptor++;\r
+ GdtDescriptor->Bits.BaseLow = (UINT16)TssBase;\r
+ GdtDescriptor->Bits.BaseMid = (UINT8)(TssBase >> 16);\r
+ GdtDescriptor->Bits.BaseHigh = (UINT8)(TssBase >> 24);\r
+ //\r
+ // Fixup TSS segments\r
+ //\r
+ // ESP as known good stack\r
+ //\r
+ *(UINTN *)(TssBase + TSS_IA32_ESP_OFFSET) = mSmmStackArrayBase + EFI_PAGE_SIZE + Index * mSmmStackSize;\r
+ *(UINT32 *)(TssBase + TSS_IA32_CR3_OFFSET) = Cr3;\r
+ }\r
+ } else {\r
+ //\r
+ // Just use original table, AllocatePage and copy them here to make sure GDTs are covered in page memory.\r
+ //\r
+ GdtTssTableSize = gcSmiGdtr.Limit + 1;\r
+ GdtTssTables = (UINT8*)AllocatePages (EFI_SIZE_TO_PAGES (GdtTssTableSize * gSmmCpuPrivate->SmmCoreEntryContext.NumberOfCpus));\r
+ ASSERT (GdtTssTables != NULL);\r
+ GdtTableStepSize = GdtTssTableSize;\r
+\r
+ for (Index = 0; Index < gSmmCpuPrivate->SmmCoreEntryContext.NumberOfCpus; Index++) {\r
+ CopyMem (GdtTssTables + GdtTableStepSize * Index, (VOID*)(UINTN)gcSmiGdtr.Base, gcSmiGdtr.Limit + 1);\r
+ }\r
+ }\r
+\r
+ *GdtStepSize = GdtTableStepSize;\r
+ return GdtTssTables;\r
+}\r
UINTN Index;\r
MTRR_SETTINGS *Mtrr;\r
PROCESSOR_SMM_DESCRIPTOR *Psd;\r
- UINTN GdtTssTableSize;\r
UINT8 *GdtTssTables;\r
- IA32_SEGMENT_DESCRIPTOR *GdtDescriptor;\r
- UINTN TssBase;\r
UINTN GdtTableStepSize;\r
\r
//\r
//\r
Cr3 = SmmInitPageTable ();\r
\r
- GdtTssTables = NULL;\r
- GdtTssTableSize = 0;\r
- GdtTableStepSize = 0;\r
- //\r
- // For X64 SMM, we allocate separate GDT/TSS for each CPUs to avoid TSS load contention\r
- // on each SMI entry.\r
- //\r
- if (EFI_IMAGE_MACHINE_TYPE_SUPPORTED(EFI_IMAGE_MACHINE_X64)) {\r
- GdtTssTableSize = (gcSmiGdtr.Limit + 1 + TSS_SIZE + 7) & ~7; // 8 bytes aligned\r
- GdtTssTables = (UINT8*)AllocatePages (EFI_SIZE_TO_PAGES (GdtTssTableSize * gSmmCpuPrivate->SmmCoreEntryContext.NumberOfCpus));\r
- ASSERT (GdtTssTables != NULL);\r
- GdtTableStepSize = GdtTssTableSize;\r
-\r
- for (Index = 0; Index < gSmmCpuPrivate->SmmCoreEntryContext.NumberOfCpus; Index++) {\r
- CopyMem (GdtTssTables + GdtTableStepSize * Index, (VOID*)(UINTN)gcSmiGdtr.Base, gcSmiGdtr.Limit + 1 + TSS_SIZE);\r
- if (FeaturePcdGet (PcdCpuSmmStackGuard)) {\r
- //\r
- // Setup top of known good stack as IST1 for each processor.\r
- //\r
- *(UINTN *)(GdtTssTables + GdtTableStepSize * Index + gcSmiGdtr.Limit + 1 + TSS_X64_IST1_OFFSET) = (mSmmStackArrayBase + EFI_PAGE_SIZE + Index * mSmmStackSize);\r
- }\r
- }\r
- } else if (FeaturePcdGet (PcdCpuSmmStackGuard)) {\r
-\r
- //\r
- // For IA32 SMM, if SMM Stack Guard feature is enabled, we use 2 TSS.\r
- // in this case, we allocate separate GDT/TSS for each CPUs to avoid TSS load contention\r
- // on each SMI entry.\r
- //\r
-\r
- //\r
- // Enlarge GDT to contain 2 TSS descriptors\r
- //\r
- gcSmiGdtr.Limit += (UINT16)(2 * sizeof (IA32_SEGMENT_DESCRIPTOR));\r
-\r
- GdtTssTableSize = (gcSmiGdtr.Limit + 1 + TSS_SIZE * 2 + 7) & ~7; // 8 bytes aligned\r
- GdtTssTables = (UINT8*)AllocatePages (EFI_SIZE_TO_PAGES (GdtTssTableSize * gSmmCpuPrivate->SmmCoreEntryContext.NumberOfCpus));\r
- ASSERT (GdtTssTables != NULL);\r
- GdtTableStepSize = GdtTssTableSize;\r
-\r
- for (Index = 0; Index < gSmmCpuPrivate->SmmCoreEntryContext.NumberOfCpus; Index++) {\r
- CopyMem (GdtTssTables + GdtTableStepSize * Index, (VOID*)(UINTN)gcSmiGdtr.Base, gcSmiGdtr.Limit + 1 + TSS_SIZE * 2);\r
- //\r
- // Fixup TSS descriptors\r
- //\r
- TssBase = (UINTN)(GdtTssTables + GdtTableStepSize * Index + gcSmiGdtr.Limit + 1);\r
- GdtDescriptor = (IA32_SEGMENT_DESCRIPTOR *)(TssBase) - 2;\r
- GdtDescriptor->Bits.BaseLow = (UINT16)TssBase;\r
- GdtDescriptor->Bits.BaseMid = (UINT8)(TssBase >> 16);\r
- GdtDescriptor->Bits.BaseHigh = (UINT8)(TssBase >> 24);\r
-\r
- TssBase += TSS_SIZE;\r
- GdtDescriptor++;\r
- GdtDescriptor->Bits.BaseLow = (UINT16)TssBase;\r
- GdtDescriptor->Bits.BaseMid = (UINT8)(TssBase >> 16);\r
- GdtDescriptor->Bits.BaseHigh = (UINT8)(TssBase >> 24);\r
- //\r
- // Fixup TSS segments\r
- //\r
- // ESP as known good stack\r
- //\r
- *(UINTN *)(TssBase + TSS_IA32_ESP_OFFSET) = mSmmStackArrayBase + EFI_PAGE_SIZE + Index * mSmmStackSize;\r
- *(UINT32 *)(TssBase + TSS_IA32_CR3_OFFSET) = Cr3;\r
- }\r
- }\r
+ GdtTssTables = InitGdt (Cr3, &GdtTableStepSize);\r
\r
//\r
// Initialize PROCESSOR_SMM_DESCRIPTOR for each CPU\r
for (Index = 0; Index < mMaxNumberOfCpus; Index++) {\r
Psd = (PROCESSOR_SMM_DESCRIPTOR *)(VOID *)(UINTN)(mCpuHotPlugData.SmBase[Index] + SMM_PSD_OFFSET);\r
CopyMem (Psd, &gcPsd, sizeof (gcPsd));\r
- if (EFI_IMAGE_MACHINE_TYPE_SUPPORTED (EFI_IMAGE_MACHINE_X64)) {\r
- //\r
- // For X64 SMM, set GDT to the copy allocated above.\r
- //\r
- Psd->SmmGdtPtr = (UINT64)(UINTN)(GdtTssTables + GdtTableStepSize * Index);\r
- } else if (FeaturePcdGet (PcdCpuSmmStackGuard)) {\r
- //\r
- // For IA32 SMM, if SMM Stack Guard feature is enabled, set GDT to the copy allocated above.\r
- //\r
- Psd->SmmGdtPtr = (UINT64)(UINTN)(GdtTssTables + GdtTableStepSize * Index);\r
- Psd->SmmGdtSize = gcSmiGdtr.Limit + 1;\r
- }\r
+ Psd->SmmGdtPtr = (UINT64)(UINTN)(GdtTssTables + GdtTableStepSize * Index);\r
+ Psd->SmmGdtSize = gcSmiGdtr.Limit + 1;\r
\r
//\r
// Install SMI handler\r
VOID\r
);\r
\r
+/**\r
+ Initialize Gdt for all processors.\r
+ \r
+ @param[in] Cr3 CR3 value.\r
+ @param[out] GdtStepSize The step size for GDT table.\r
+\r
+ @return GdtBase for processor 0.\r
+ GdtBase for processor X is: GdtBase + (GdtStepSize * X)\r
+**/\r
+VOID *\r
+InitGdt (\r
+ IN UINTN Cr3,\r
+ OUT UINTN *GdtStepSize\r
+ );\r
+\r
/**\r
\r
Register the SMM Foundation entry point.\r
[Sources.Ia32]\r
Ia32/Semaphore.c\r
Ia32/PageTbl.c\r
+ Ia32/SmmFuncsArch.c\r
Ia32/SmmProfileArch.c\r
Ia32/SmmProfileArch.h\r
Ia32/SmmInit.asm | MSFT\r
[Sources.X64]\r
X64/Semaphore.c\r
X64/PageTbl.c\r
+ X64/SmmFuncsArch.c\r
X64/SmmProfileArch.c\r
X64/SmmProfileArch.h\r
X64/SmmInit.asm | MSFT\r
sgdt (%rsp)\r
movl 2(%rsp), %eax # eax = GDT base\r
addl $8, %esp\r
- movl %eax, %edx\r
- addl $GDT_SIZE, %edx\r
- movb %dl, (TSS_SEGMENT + 2)(%rax)\r
- movb %dh, (TSS_SEGMENT + 3)(%rax)\r
- .byte 0xc1, 0xea, 0x10 # shr edx, 16\r
- movb %dl, (TSS_SEGMENT + 4)(%rax)\r
- movb %dh, (TSS_SEGMENT + 7)(%rax)\r
- movl %eax, %edx\r
movb $0x89, %dl\r
movb %dl, (TSS_SEGMENT + 5)(%rax) # clear busy flag\r
movl $TSS_SEGMENT, %eax\r
sgdt fword ptr [rsp]\r
mov eax, [rsp + 2] ; eax = GDT base\r
add esp, 8\r
- mov edx, eax\r
- add edx, GDT_SIZE\r
- mov [rax + TSS_SEGMENT + 2], dl\r
- mov [rax + TSS_SEGMENT + 3], dh\r
- DB 0c1h, 0eah, 10h ; shr edx, 16\r
- mov [rax + TSS_SEGMENT + 4], dl\r
- mov [rax + TSS_SEGMENT + 7], dh\r
- mov edx, eax\r
mov dl, 89h\r
mov [rax + TSS_SEGMENT + 5], dl ; clear busy flag\r
mov eax, TSS_SEGMENT\r
--- /dev/null
+/** @file\r
+ SMM CPU misc functions for x64 arch specific.\r
+ \r
+Copyright (c) 2015, Intel Corporation. All rights reserved.<BR>\r
+This program and the accompanying materials\r
+are licensed and made available under the terms and conditions of the BSD License\r
+which accompanies this distribution. The full text of the license may be found at\r
+http://opensource.org/licenses/bsd-license.php\r
+\r
+THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS,\r
+WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED.\r
+\r
+**/\r
+\r
+#include "PiSmmCpuDxeSmm.h"\r
+\r
+/**\r
+ Initialize Gdt for all processors.\r
+ \r
+ @param[in] Cr3 CR3 value.\r
+ @param[out] GdtStepSize The step size for GDT table.\r
+\r
+ @return GdtBase for processor 0.\r
+ GdtBase for processor X is: GdtBase + (GdtStepSize * X)\r
+**/\r
+VOID *\r
+InitGdt (\r
+ IN UINTN Cr3,\r
+ OUT UINTN *GdtStepSize\r
+ )\r
+{\r
+ UINTN Index;\r
+ IA32_SEGMENT_DESCRIPTOR *GdtDescriptor;\r
+ UINTN TssBase;\r
+ UINTN GdtTssTableSize;\r
+ UINT8 *GdtTssTables;\r
+ UINTN GdtTableStepSize;\r
+\r
+ //\r
+ // For X64 SMM, we allocate separate GDT/TSS for each CPUs to avoid TSS load contention\r
+ // on each SMI entry.\r
+ //\r
+ GdtTssTableSize = (gcSmiGdtr.Limit + 1 + TSS_SIZE + 7) & ~7; // 8 bytes aligned\r
+ GdtTssTables = (UINT8*)AllocatePages (EFI_SIZE_TO_PAGES (GdtTssTableSize * gSmmCpuPrivate->SmmCoreEntryContext.NumberOfCpus));\r
+ ASSERT (GdtTssTables != NULL);\r
+ GdtTableStepSize = GdtTssTableSize;\r
+\r
+ for (Index = 0; Index < gSmmCpuPrivate->SmmCoreEntryContext.NumberOfCpus; Index++) {\r
+ CopyMem (GdtTssTables + GdtTableStepSize * Index, (VOID*)(UINTN)gcSmiGdtr.Base, gcSmiGdtr.Limit + 1 + TSS_SIZE);\r
+\r
+ //\r
+ // Fixup TSS descriptors\r
+ //\r
+ TssBase = (UINTN)(GdtTssTables + GdtTableStepSize * Index + gcSmiGdtr.Limit + 1);\r
+ GdtDescriptor = (IA32_SEGMENT_DESCRIPTOR *)(TssBase) - 2;\r
+ GdtDescriptor->Bits.BaseLow = (UINT16)(UINTN)TssBase;\r
+ GdtDescriptor->Bits.BaseMid = (UINT8)((UINTN)TssBase >> 16);\r
+ GdtDescriptor->Bits.BaseHigh = (UINT8)((UINTN)TssBase >> 24);\r
+\r
+ if (FeaturePcdGet (PcdCpuSmmStackGuard)) {\r
+ //\r
+ // Setup top of known good stack as IST1 for each processor.\r
+ //\r
+ *(UINTN *)(TssBase + TSS_X64_IST1_OFFSET) = (mSmmStackArrayBase + EFI_PAGE_SIZE + Index * mSmmStackSize);\r
+ }\r
+ }\r
+\r
+ *GdtStepSize = GdtTableStepSize;\r
+ return GdtTssTables;\r
+}\r