This commit is a code optimization to allow bigger seperate stack size in
ArchSetupExceptionStack. In previous code logic, CPU_STACK_ALIGNMENT bytes
will be wasted if StackTop is already CPU_STACK_ALIGNMENT aligned.
Signed-off-by: Dun Tan <dun.tan@intel.com>
Cc: Eric Dong <eric.dong@intel.com>
Reviewed-by: Ray Ni <ray.ni@intel.com>
Cc: Rahul Kumar <rahul1.kumar@intel.com>
Reviewed-by: Abner Chang <abner.chang@amd.com>
// Fixup exception task descriptor and task-state segment\r
//\r
AsmGetTssTemplateMap (&TemplateMap);\r
- StackTop = StackTop - CPU_STACK_ALIGNMENT;\r
+ //\r
+ // Plus 1 byte is for compact stack layout in case StackTop is already aligned.\r
+ //\r
+ StackTop = StackTop - CPU_STACK_ALIGNMENT + 1;\r
StackTop = (UINTN)ALIGN_POINTER (StackTop, CPU_STACK_ALIGNMENT);\r
IdtTable = (IA32_IDT_GATE_DESCRIPTOR *)Idtr.Base;\r
for (Index = 0; Index < CPU_STACK_SWITCH_EXCEPTION_NUMBER; ++Index) {\r
// Fixup exception task descriptor and task-state segment\r
//\r
ZeroMem (Tss, sizeof (*Tss));\r
- StackTop = StackTop - CPU_STACK_ALIGNMENT;\r
+ //\r
+ // Plus 1 byte is for compact stack layout in case StackTop is already aligned.\r
+ //\r
+ StackTop = StackTop - CPU_STACK_ALIGNMENT + 1;\r
StackTop = (UINTN)ALIGN_POINTER (StackTop, CPU_STACK_ALIGNMENT);\r
IdtTable = (IA32_IDT_GATE_DESCRIPTOR *)Idtr.Base;\r
for (Index = 0; Index < CPU_STACK_SWITCH_EXCEPTION_NUMBER; ++Index) {\r