+**/\r
+EFI_STATUS\r
+ArchSetupExceptionStack (\r
+ IN CPU_EXCEPTION_INIT_DATA *StackSwitchData\r
+ )\r
+{\r
+ IA32_DESCRIPTOR Gdtr;\r
+ IA32_DESCRIPTOR Idtr;\r
+ IA32_IDT_GATE_DESCRIPTOR *IdtTable;\r
+ IA32_TSS_DESCRIPTOR *TssDesc;\r
+ IA32_TASK_STATE_SEGMENT *Tss;\r
+ UINTN StackTop;\r
+ UINTN Index;\r
+ UINTN Vector;\r
+ UINTN TssBase;\r
+ UINTN GdtSize;\r
+ EXCEPTION_HANDLER_TEMPLATE_MAP TemplateMap;\r
+\r
+ if (StackSwitchData == NULL ||\r
+ StackSwitchData->Ia32.Revision != CPU_EXCEPTION_INIT_DATA_REV ||\r
+ StackSwitchData->Ia32.KnownGoodStackTop == 0 ||\r
+ StackSwitchData->Ia32.KnownGoodStackSize == 0 ||\r
+ StackSwitchData->Ia32.StackSwitchExceptions == NULL ||\r
+ StackSwitchData->Ia32.StackSwitchExceptionNumber == 0 ||\r
+ StackSwitchData->Ia32.StackSwitchExceptionNumber > CPU_EXCEPTION_NUM ||\r
+ StackSwitchData->Ia32.GdtTable == NULL ||\r
+ StackSwitchData->Ia32.IdtTable == NULL ||\r
+ StackSwitchData->Ia32.ExceptionTssDesc == NULL ||\r
+ StackSwitchData->Ia32.ExceptionTss == NULL) {\r
+ return EFI_INVALID_PARAMETER;\r
+ }\r
+\r
+ //\r
+ // The caller is responsible for that the GDT table, no matter the existing\r
+ // one or newly allocated, has enough space to hold descriptors for exception\r
+ // task-state segments.\r
+ //\r
+ if (((UINTN)StackSwitchData->Ia32.GdtTable & (IA32_GDT_ALIGNMENT - 1)) != 0) {\r
+ return EFI_INVALID_PARAMETER;\r
+ }\r
+\r
+ if ((UINTN)StackSwitchData->Ia32.ExceptionTssDesc < (UINTN)(StackSwitchData->Ia32.GdtTable)) {\r
+ return EFI_INVALID_PARAMETER;\r
+ }\r
+\r
+ if ((UINTN)StackSwitchData->Ia32.ExceptionTssDesc + StackSwitchData->Ia32.ExceptionTssDescSize >\r
+ ((UINTN)(StackSwitchData->Ia32.GdtTable) + StackSwitchData->Ia32.GdtTableSize)) {\r
+ return EFI_INVALID_PARAMETER;\r
+ }\r
+\r
+ //\r
+ // We need one descriptor and one TSS for current task and every exception\r
+ // specified.\r
+ //\r
+ if (StackSwitchData->Ia32.ExceptionTssDescSize <\r
+ sizeof (IA32_TSS_DESCRIPTOR) * (StackSwitchData->Ia32.StackSwitchExceptionNumber + 1)) {\r
+ return EFI_INVALID_PARAMETER;\r
+ }\r
+ if (StackSwitchData->Ia32.ExceptionTssSize <\r
+ sizeof (IA32_TASK_STATE_SEGMENT) * (StackSwitchData->Ia32.StackSwitchExceptionNumber + 1)) {\r
+ return EFI_INVALID_PARAMETER;\r
+ }\r
+\r
+ TssDesc = StackSwitchData->Ia32.ExceptionTssDesc;\r
+ Tss = StackSwitchData->Ia32.ExceptionTss;\r
+\r
+ //\r
+ // Initialize new GDT table and/or IDT table, if any\r
+ //\r
+ AsmReadIdtr (&Idtr);\r
+ AsmReadGdtr (&Gdtr);\r
+\r
+ GdtSize = (UINTN)TssDesc +\r
+ sizeof (IA32_TSS_DESCRIPTOR) *\r
+ (StackSwitchData->Ia32.StackSwitchExceptionNumber + 1) -\r
+ (UINTN)(StackSwitchData->Ia32.GdtTable);\r
+ if ((UINTN)StackSwitchData->Ia32.GdtTable != Gdtr.Base) {\r
+ CopyMem (StackSwitchData->Ia32.GdtTable, (VOID *)Gdtr.Base, Gdtr.Limit + 1);\r
+ Gdtr.Base = (UINTN)StackSwitchData->Ia32.GdtTable;\r
+ Gdtr.Limit = (UINT16)GdtSize - 1;\r
+ }\r
+\r
+ if ((UINTN)StackSwitchData->Ia32.IdtTable != Idtr.Base) {\r
+ Idtr.Base = (UINTN)StackSwitchData->Ia32.IdtTable;\r
+ }\r
+ if (StackSwitchData->Ia32.IdtTableSize > 0) {\r
+ Idtr.Limit = (UINT16)(StackSwitchData->Ia32.IdtTableSize - 1);\r
+ }\r
+\r
+ //\r
+ // Fixup current task descriptor. Task-state segment for current task will\r
+ // be filled by processor during task switching.\r
+ //\r
+ TssBase = (UINTN)Tss;\r
+\r
+ TssDesc->Uint64 = 0;\r
+ TssDesc->Bits.LimitLow = sizeof(IA32_TASK_STATE_SEGMENT) - 1;\r
+ TssDesc->Bits.BaseLow = (UINT16)TssBase;\r
+ TssDesc->Bits.BaseMid = (UINT8)(TssBase >> 16);\r
+ TssDesc->Bits.Type = IA32_GDT_TYPE_TSS;\r
+ TssDesc->Bits.P = 1;\r
+ TssDesc->Bits.LimitHigh = 0;\r
+ TssDesc->Bits.BaseHigh = (UINT8)(TssBase >> 24);\r
+\r
+ //\r
+ // Fixup exception task descriptor and task-state segment\r
+ //\r
+ AsmGetTssTemplateMap (&TemplateMap);\r
+ StackTop = StackSwitchData->Ia32.KnownGoodStackTop - CPU_STACK_ALIGNMENT;\r
+ StackTop = (UINTN)ALIGN_POINTER (StackTop, CPU_STACK_ALIGNMENT);\r
+ IdtTable = StackSwitchData->Ia32.IdtTable;\r
+ for (Index = 0; Index < StackSwitchData->Ia32.StackSwitchExceptionNumber; ++Index) {\r
+ TssDesc += 1;\r
+ Tss += 1;\r
+\r
+ //\r
+ // Fixup TSS descriptor\r
+ //\r
+ TssBase = (UINTN)Tss;\r
+\r
+ TssDesc->Uint64 = 0;\r
+ TssDesc->Bits.LimitLow = sizeof(IA32_TASK_STATE_SEGMENT) - 1;\r
+ TssDesc->Bits.BaseLow = (UINT16)TssBase;\r
+ TssDesc->Bits.BaseMid = (UINT8)(TssBase >> 16);\r
+ TssDesc->Bits.Type = IA32_GDT_TYPE_TSS;\r
+ TssDesc->Bits.P = 1;\r
+ TssDesc->Bits.LimitHigh = 0;\r
+ TssDesc->Bits.BaseHigh = (UINT8)(TssBase >> 24);\r
+\r
+ //\r
+ // Fixup TSS\r
+ //\r
+ Vector = StackSwitchData->Ia32.StackSwitchExceptions[Index];\r
+ if (Vector >= CPU_EXCEPTION_NUM ||\r
+ Vector >= (Idtr.Limit + 1) / sizeof (IA32_IDT_GATE_DESCRIPTOR)) {\r
+ continue;\r
+ }\r
+\r
+ ZeroMem (Tss, sizeof (*Tss));\r
+ Tss->EIP = (UINT32)(TemplateMap.ExceptionStart\r
+ + Vector * TemplateMap.ExceptionStubHeaderSize);\r
+ Tss->EFLAGS = 0x2;\r
+ Tss->ESP = StackTop;\r
+ Tss->CR3 = AsmReadCr3 ();\r
+ Tss->ES = AsmReadEs ();\r
+ Tss->CS = AsmReadCs ();\r
+ Tss->SS = AsmReadSs ();\r
+ Tss->DS = AsmReadDs ();\r
+ Tss->FS = AsmReadFs ();\r
+ Tss->GS = AsmReadGs ();\r
+\r
+ StackTop -= StackSwitchData->Ia32.KnownGoodStackSize;\r
+\r
+ //\r
+ // Update IDT to use Task Gate for given exception\r
+ //\r
+ IdtTable[Vector].Bits.OffsetLow = 0;\r
+ IdtTable[Vector].Bits.Selector = (UINT16)((UINTN)TssDesc - Gdtr.Base);\r
+ IdtTable[Vector].Bits.Reserved_0 = 0;\r
+ IdtTable[Vector].Bits.GateType = IA32_IDT_GATE_TYPE_TASK;\r
+ IdtTable[Vector].Bits.OffsetHigh = 0;\r
+ }\r
+\r
+ //\r
+ // Publish GDT\r
+ //\r
+ AsmWriteGdtr (&Gdtr);\r
+\r
+ //\r
+ // Load current task\r
+ //\r
+ AsmWriteTr ((UINT16)((UINTN)StackSwitchData->Ia32.ExceptionTssDesc - Gdtr.Base));\r
+\r
+ //\r
+ // Publish IDT\r
+ //\r
+ AsmWriteIdtr (&Idtr);\r
+\r
+ return EFI_SUCCESS;\r
+}\r
+\r
+/**\r
+ Display processor context.\r
+\r
+ @param[in] ExceptionType Exception type.\r
+ @param[in] SystemContext Processor context to be display.\r