--- /dev/null
+/** @file\r
+ SMM CPU misc functions for Ia32 arch specific.\r
+ \r
+Copyright (c) 2015, Intel Corporation. All rights reserved.<BR>\r
+This program and the accompanying materials\r
+are licensed and made available under the terms and conditions of the BSD License\r
+which accompanies this distribution. The full text of the license may be found at\r
+http://opensource.org/licenses/bsd-license.php\r
+\r
+THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS,\r
+WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED.\r
+\r
+**/\r
+\r
+#include "PiSmmCpuDxeSmm.h"\r
+\r
+/**\r
+ Initialize Gdt for all processors.\r
+ \r
+ @param[in] Cr3 CR3 value.\r
+ @param[out] GdtStepSize The step size for GDT table.\r
+\r
+ @return GdtBase for processor 0.\r
+ GdtBase for processor X is: GdtBase + (GdtStepSize * X)\r
+**/\r
+VOID *\r
+InitGdt (\r
+ IN UINTN Cr3,\r
+ OUT UINTN *GdtStepSize\r
+ )\r
+{\r
+ UINTN Index;\r
+ IA32_SEGMENT_DESCRIPTOR *GdtDescriptor;\r
+ UINTN TssBase;\r
+ UINTN GdtTssTableSize;\r
+ UINT8 *GdtTssTables;\r
+ UINTN GdtTableStepSize;\r
+\r
+ if (FeaturePcdGet (PcdCpuSmmStackGuard)) {\r
+ //\r
+ // For IA32 SMM, if SMM Stack Guard feature is enabled, we use 2 TSS.\r
+ // in this case, we allocate separate GDT/TSS for each CPUs to avoid TSS load contention\r
+ // on each SMI entry.\r
+ //\r
+\r
+ //\r
+ // Enlarge GDT to contain 2 TSS descriptors\r
+ //\r
+ gcSmiGdtr.Limit += (UINT16)(2 * sizeof (IA32_SEGMENT_DESCRIPTOR));\r
+\r
+ GdtTssTableSize = (gcSmiGdtr.Limit + 1 + TSS_SIZE * 2 + 7) & ~7; // 8 bytes aligned\r
+ GdtTssTables = (UINT8*)AllocatePages (EFI_SIZE_TO_PAGES (GdtTssTableSize * gSmmCpuPrivate->SmmCoreEntryContext.NumberOfCpus));\r
+ ASSERT (GdtTssTables != NULL);\r
+ GdtTableStepSize = GdtTssTableSize;\r
+\r
+ for (Index = 0; Index < gSmmCpuPrivate->SmmCoreEntryContext.NumberOfCpus; Index++) {\r
+ CopyMem (GdtTssTables + GdtTableStepSize * Index, (VOID*)(UINTN)gcSmiGdtr.Base, gcSmiGdtr.Limit + 1 + TSS_SIZE * 2);\r
+ //\r
+ // Fixup TSS descriptors\r
+ //\r
+ TssBase = (UINTN)(GdtTssTables + GdtTableStepSize * Index + gcSmiGdtr.Limit + 1);\r
+ GdtDescriptor = (IA32_SEGMENT_DESCRIPTOR *)(TssBase) - 2;\r
+ GdtDescriptor->Bits.BaseLow = (UINT16)TssBase;\r
+ GdtDescriptor->Bits.BaseMid = (UINT8)(TssBase >> 16);\r
+ GdtDescriptor->Bits.BaseHigh = (UINT8)(TssBase >> 24);\r
+\r
+ TssBase += TSS_SIZE;\r
+ GdtDescriptor++;\r
+ GdtDescriptor->Bits.BaseLow = (UINT16)TssBase;\r
+ GdtDescriptor->Bits.BaseMid = (UINT8)(TssBase >> 16);\r
+ GdtDescriptor->Bits.BaseHigh = (UINT8)(TssBase >> 24);\r
+ //\r
+ // Fixup TSS segments\r
+ //\r
+ // ESP as known good stack\r
+ //\r
+ *(UINTN *)(TssBase + TSS_IA32_ESP_OFFSET) = mSmmStackArrayBase + EFI_PAGE_SIZE + Index * mSmmStackSize;\r
+ *(UINT32 *)(TssBase + TSS_IA32_CR3_OFFSET) = Cr3;\r
+ }\r
+ } else {\r
+ //\r
+ // Just use original table, AllocatePage and copy them here to make sure GDTs are covered in page memory.\r
+ //\r
+ GdtTssTableSize = gcSmiGdtr.Limit + 1;\r
+ GdtTssTables = (UINT8*)AllocatePages (EFI_SIZE_TO_PAGES (GdtTssTableSize * gSmmCpuPrivate->SmmCoreEntryContext.NumberOfCpus));\r
+ ASSERT (GdtTssTables != NULL);\r
+ GdtTableStepSize = GdtTssTableSize;\r
+\r
+ for (Index = 0; Index < gSmmCpuPrivate->SmmCoreEntryContext.NumberOfCpus; Index++) {\r
+ CopyMem (GdtTssTables + GdtTableStepSize * Index, (VOID*)(UINTN)gcSmiGdtr.Base, gcSmiGdtr.Limit + 1);\r
+ }\r
+ }\r
+\r
+ *GdtStepSize = GdtTableStepSize;\r
+ return GdtTssTables;\r
+}\r