]> git.proxmox.com Git - mirror_edk2.git/blob - UefiCpuPkg/PiSmmCpuDxeSmm/Ia32/SmmFuncsArch.c
Eliminate EFI_IMAGE_MACHINE_TYPE_SUPPORTED.
[mirror_edk2.git] / UefiCpuPkg / PiSmmCpuDxeSmm / Ia32 / SmmFuncsArch.c
1 /** @file
2 SMM CPU misc functions for Ia32 arch specific.
3
4 Copyright (c) 2015, Intel Corporation. All rights reserved.<BR>
5 This program and the accompanying materials
6 are licensed and made available under the terms and conditions of the BSD License
7 which accompanies this distribution. The full text of the license may be found at
8 http://opensource.org/licenses/bsd-license.php
9
10 THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS,
11 WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED.
12
13 **/
14
15 #include "PiSmmCpuDxeSmm.h"
16
17 /**
18 Initialize Gdt for all processors.
19
20 @param[in] Cr3 CR3 value.
21 @param[out] GdtStepSize The step size for GDT table.
22
23 @return GdtBase for processor 0.
24 GdtBase for processor X is: GdtBase + (GdtStepSize * X)
25 **/
26 VOID *
27 InitGdt (
28 IN UINTN Cr3,
29 OUT UINTN *GdtStepSize
30 )
31 {
32 UINTN Index;
33 IA32_SEGMENT_DESCRIPTOR *GdtDescriptor;
34 UINTN TssBase;
35 UINTN GdtTssTableSize;
36 UINT8 *GdtTssTables;
37 UINTN GdtTableStepSize;
38
39 if (FeaturePcdGet (PcdCpuSmmStackGuard)) {
40 //
41 // For IA32 SMM, if SMM Stack Guard feature is enabled, we use 2 TSS.
42 // in this case, we allocate separate GDT/TSS for each CPUs to avoid TSS load contention
43 // on each SMI entry.
44 //
45
46 //
47 // Enlarge GDT to contain 2 TSS descriptors
48 //
49 gcSmiGdtr.Limit += (UINT16)(2 * sizeof (IA32_SEGMENT_DESCRIPTOR));
50
51 GdtTssTableSize = (gcSmiGdtr.Limit + 1 + TSS_SIZE * 2 + 7) & ~7; // 8 bytes aligned
52 GdtTssTables = (UINT8*)AllocatePages (EFI_SIZE_TO_PAGES (GdtTssTableSize * gSmmCpuPrivate->SmmCoreEntryContext.NumberOfCpus));
53 ASSERT (GdtTssTables != NULL);
54 GdtTableStepSize = GdtTssTableSize;
55
56 for (Index = 0; Index < gSmmCpuPrivate->SmmCoreEntryContext.NumberOfCpus; Index++) {
57 CopyMem (GdtTssTables + GdtTableStepSize * Index, (VOID*)(UINTN)gcSmiGdtr.Base, gcSmiGdtr.Limit + 1 + TSS_SIZE * 2);
58 //
59 // Fixup TSS descriptors
60 //
61 TssBase = (UINTN)(GdtTssTables + GdtTableStepSize * Index + gcSmiGdtr.Limit + 1);
62 GdtDescriptor = (IA32_SEGMENT_DESCRIPTOR *)(TssBase) - 2;
63 GdtDescriptor->Bits.BaseLow = (UINT16)TssBase;
64 GdtDescriptor->Bits.BaseMid = (UINT8)(TssBase >> 16);
65 GdtDescriptor->Bits.BaseHigh = (UINT8)(TssBase >> 24);
66
67 TssBase += TSS_SIZE;
68 GdtDescriptor++;
69 GdtDescriptor->Bits.BaseLow = (UINT16)TssBase;
70 GdtDescriptor->Bits.BaseMid = (UINT8)(TssBase >> 16);
71 GdtDescriptor->Bits.BaseHigh = (UINT8)(TssBase >> 24);
72 //
73 // Fixup TSS segments
74 //
75 // ESP as known good stack
76 //
77 *(UINTN *)(TssBase + TSS_IA32_ESP_OFFSET) = mSmmStackArrayBase + EFI_PAGE_SIZE + Index * mSmmStackSize;
78 *(UINT32 *)(TssBase + TSS_IA32_CR3_OFFSET) = Cr3;
79 }
80 } else {
81 //
82 // Just use original table, AllocatePage and copy them here to make sure GDTs are covered in page memory.
83 //
84 GdtTssTableSize = gcSmiGdtr.Limit + 1;
85 GdtTssTables = (UINT8*)AllocatePages (EFI_SIZE_TO_PAGES (GdtTssTableSize * gSmmCpuPrivate->SmmCoreEntryContext.NumberOfCpus));
86 ASSERT (GdtTssTables != NULL);
87 GdtTableStepSize = GdtTssTableSize;
88
89 for (Index = 0; Index < gSmmCpuPrivate->SmmCoreEntryContext.NumberOfCpus; Index++) {
90 CopyMem (GdtTssTables + GdtTableStepSize * Index, (VOID*)(UINTN)gcSmiGdtr.Base, gcSmiGdtr.Limit + 1);
91 }
92 }
93
94 *GdtStepSize = GdtTableStepSize;
95 return GdtTssTables;
96 }