X-Git-Url: https://git.proxmox.com/?p=pve-kernel.git;a=blobdiff_plain;f=patches%2Fkernel%2F0011-KVM-x86-emulator-smm-use-smram-struct-for-32-bit-smr.patch;fp=patches%2Fkernel%2F0011-KVM-x86-emulator-smm-use-smram-struct-for-32-bit-smr.patch;h=40234cf7d145932fc9bc71eeb7b69d2b5c7431ce;hp=0000000000000000000000000000000000000000;hb=898be11352320dfc723c51976809af3fc08ab5c3;hpb=5e31f96e2c46ba22f3169d5130bea4a832a0e379 diff --git a/patches/kernel/0011-KVM-x86-emulator-smm-use-smram-struct-for-32-bit-smr.patch b/patches/kernel/0011-KVM-x86-emulator-smm-use-smram-struct-for-32-bit-smr.patch new file mode 100644 index 0000000..40234cf --- /dev/null +++ b/patches/kernel/0011-KVM-x86-emulator-smm-use-smram-struct-for-32-bit-smr.patch @@ -0,0 +1,268 @@ +From 0000000000000000000000000000000000000000 Mon Sep 17 00:00:00 2001 +From: Maxim Levitsky +Date: Wed, 3 Aug 2022 18:50:07 +0300 +Subject: [PATCH] KVM: x86: emulator/smm: use smram struct for 32 bit smram + load/restore + +Use kvm_smram_state_32 struct to save/restore 32 bit SMM state +(used when X86_FEATURE_LM is not present in the guest CPUID). + +Signed-off-by: Maxim Levitsky +Signed-off-by: Thomas Lamprecht +--- + arch/x86/kvm/emulate.c | 81 +++++++++++++++--------------------------- + arch/x86/kvm/x86.c | 75 +++++++++++++++++--------------------- + 2 files changed, 60 insertions(+), 96 deletions(-) + +diff --git a/arch/x86/kvm/emulate.c b/arch/x86/kvm/emulate.c +index 7294dffa794a..65d82292ccec 100644 +--- a/arch/x86/kvm/emulate.c ++++ b/arch/x86/kvm/emulate.c +@@ -2359,25 +2359,17 @@ static void rsm_set_desc_flags(struct desc_struct *desc, u32 flags) + desc->type = (flags >> 8) & 15; + } + +-static int rsm_load_seg_32(struct x86_emulate_ctxt *ctxt, const char *smstate, ++static void rsm_load_seg_32(struct x86_emulate_ctxt *ctxt, ++ const struct kvm_smm_seg_state_32 *state, ++ u16 selector, + int n) + { + struct desc_struct desc; +- int offset; +- u16 selector; +- +- selector = GET_SMSTATE(u32, smstate, 0x7fa8 + n * 4); +- +- if (n < 3) +- offset = 0x7f84 + n * 12; +- else +- offset = 0x7f2c + (n - 3) * 12; + +- set_desc_base(&desc, GET_SMSTATE(u32, smstate, offset + 8)); +- set_desc_limit(&desc, GET_SMSTATE(u32, smstate, offset + 4)); +- rsm_set_desc_flags(&desc, GET_SMSTATE(u32, smstate, offset)); ++ set_desc_base(&desc, state->base); ++ set_desc_limit(&desc, state->limit); ++ rsm_set_desc_flags(&desc, state->flags); + ctxt->ops->set_segment(ctxt, selector, &desc, 0, n); +- return X86EMUL_CONTINUE; + } + + #ifdef CONFIG_X86_64 +@@ -2448,63 +2440,46 @@ static int rsm_enter_protected_mode(struct x86_emulate_ctxt *ctxt, + } + + static int rsm_load_state_32(struct x86_emulate_ctxt *ctxt, +- const char *smstate) ++ const struct kvm_smram_state_32 *smstate) + { +- struct desc_struct desc; + struct desc_ptr dt; +- u16 selector; +- u32 val, cr0, cr3, cr4; + int i; + +- cr0 = GET_SMSTATE(u32, smstate, 0x7ffc); +- cr3 = GET_SMSTATE(u32, smstate, 0x7ff8); +- ctxt->eflags = GET_SMSTATE(u32, smstate, 0x7ff4) | X86_EFLAGS_FIXED; +- ctxt->_eip = GET_SMSTATE(u32, smstate, 0x7ff0); ++ ctxt->eflags = smstate->eflags | X86_EFLAGS_FIXED; ++ ctxt->_eip = smstate->eip; + + for (i = 0; i < 8; i++) +- *reg_write(ctxt, i) = GET_SMSTATE(u32, smstate, 0x7fd0 + i * 4); +- +- val = GET_SMSTATE(u32, smstate, 0x7fcc); ++ *reg_write(ctxt, i) = smstate->gprs[i]; + +- if (ctxt->ops->set_dr(ctxt, 6, val)) ++ if (ctxt->ops->set_dr(ctxt, 6, smstate->dr6)) + return X86EMUL_UNHANDLEABLE; +- +- val = GET_SMSTATE(u32, smstate, 0x7fc8); +- +- if (ctxt->ops->set_dr(ctxt, 7, val)) ++ if (ctxt->ops->set_dr(ctxt, 7, smstate->dr7)) + return X86EMUL_UNHANDLEABLE; + +- selector = GET_SMSTATE(u32, smstate, 0x7fc4); +- set_desc_base(&desc, GET_SMSTATE(u32, smstate, 0x7f64)); +- set_desc_limit(&desc, GET_SMSTATE(u32, smstate, 0x7f60)); +- rsm_set_desc_flags(&desc, GET_SMSTATE(u32, smstate, 0x7f5c)); +- ctxt->ops->set_segment(ctxt, selector, &desc, 0, VCPU_SREG_TR); ++ rsm_load_seg_32(ctxt, &smstate->tr, smstate->tr_sel, VCPU_SREG_TR); ++ rsm_load_seg_32(ctxt, &smstate->ldtr, smstate->ldtr_sel, VCPU_SREG_LDTR); + +- selector = GET_SMSTATE(u32, smstate, 0x7fc0); +- set_desc_base(&desc, GET_SMSTATE(u32, smstate, 0x7f80)); +- set_desc_limit(&desc, GET_SMSTATE(u32, smstate, 0x7f7c)); +- rsm_set_desc_flags(&desc, GET_SMSTATE(u32, smstate, 0x7f78)); +- ctxt->ops->set_segment(ctxt, selector, &desc, 0, VCPU_SREG_LDTR); + +- dt.address = GET_SMSTATE(u32, smstate, 0x7f74); +- dt.size = GET_SMSTATE(u32, smstate, 0x7f70); ++ dt.address = smstate->gdtr.base; ++ dt.size = smstate->gdtr.limit; + ctxt->ops->set_gdt(ctxt, &dt); + +- dt.address = GET_SMSTATE(u32, smstate, 0x7f58); +- dt.size = GET_SMSTATE(u32, smstate, 0x7f54); ++ dt.address = smstate->idtr.base; ++ dt.size = smstate->idtr.limit; + ctxt->ops->set_idt(ctxt, &dt); + +- for (i = 0; i < 6; i++) { +- int r = rsm_load_seg_32(ctxt, smstate, i); +- if (r != X86EMUL_CONTINUE) +- return r; +- } ++ rsm_load_seg_32(ctxt, &smstate->es, smstate->es_sel, VCPU_SREG_ES); ++ rsm_load_seg_32(ctxt, &smstate->cs, smstate->cs_sel, VCPU_SREG_CS); ++ rsm_load_seg_32(ctxt, &smstate->ss, smstate->ss_sel, VCPU_SREG_SS); + +- cr4 = GET_SMSTATE(u32, smstate, 0x7f14); ++ rsm_load_seg_32(ctxt, &smstate->ds, smstate->ds_sel, VCPU_SREG_DS); ++ rsm_load_seg_32(ctxt, &smstate->fs, smstate->fs_sel, VCPU_SREG_FS); ++ rsm_load_seg_32(ctxt, &smstate->gs, smstate->gs_sel, VCPU_SREG_GS); + +- ctxt->ops->set_smbase(ctxt, GET_SMSTATE(u32, smstate, 0x7ef8)); ++ ctxt->ops->set_smbase(ctxt, smstate->smbase); + +- return rsm_enter_protected_mode(ctxt, cr0, cr3, cr4); ++ return rsm_enter_protected_mode(ctxt, smstate->cr0, ++ smstate->cr3, smstate->cr4); + } + + #ifdef CONFIG_X86_64 +@@ -2651,7 +2626,7 @@ static int em_rsm(struct x86_emulate_ctxt *ctxt) + ret = rsm_load_state_64(ctxt, (const char *)&smram); + else + #endif +- ret = rsm_load_state_32(ctxt, (const char *)&smram); ++ ret = rsm_load_state_32(ctxt, &smram.smram32); + + if (ret != X86EMUL_CONTINUE) + goto emulate_shutdown; +diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c +index 94c29391b065..579a1cb6a7c8 100644 +--- a/arch/x86/kvm/x86.c ++++ b/arch/x86/kvm/x86.c +@@ -10100,22 +10100,18 @@ static u32 enter_smm_get_segment_flags(struct kvm_segment *seg) + return flags; + } + +-static void enter_smm_save_seg_32(struct kvm_vcpu *vcpu, char *buf, int n) ++static void enter_smm_save_seg_32(struct kvm_vcpu *vcpu, ++ struct kvm_smm_seg_state_32 *state, ++ u32 *selector, ++ int n) + { + struct kvm_segment seg; +- int offset; + + kvm_get_segment(vcpu, &seg, n); +- put_smstate(u32, buf, 0x7fa8 + n * 4, seg.selector); +- +- if (n < 3) +- offset = 0x7f84 + n * 12; +- else +- offset = 0x7f2c + (n - 3) * 12; +- +- put_smstate(u32, buf, offset + 8, seg.base); +- put_smstate(u32, buf, offset + 4, seg.limit); +- put_smstate(u32, buf, offset, enter_smm_get_segment_flags(&seg)); ++ *selector = seg.selector; ++ state->base = seg.base; ++ state->limit = seg.limit; ++ state->flags = enter_smm_get_segment_flags(&seg); + } + + #ifdef CONFIG_X86_64 +@@ -10136,54 +10132,47 @@ static void enter_smm_save_seg_64(struct kvm_vcpu *vcpu, char *buf, int n) + } + #endif + +-static void enter_smm_save_state_32(struct kvm_vcpu *vcpu, char *buf) ++static void enter_smm_save_state_32(struct kvm_vcpu *vcpu, struct kvm_smram_state_32 *smram) + { + struct desc_ptr dt; +- struct kvm_segment seg; + unsigned long val; + int i; + +- put_smstate(u32, buf, 0x7ffc, kvm_read_cr0(vcpu)); +- put_smstate(u32, buf, 0x7ff8, kvm_read_cr3(vcpu)); +- put_smstate(u32, buf, 0x7ff4, kvm_get_rflags(vcpu)); +- put_smstate(u32, buf, 0x7ff0, kvm_rip_read(vcpu)); ++ smram->cr0 = kvm_read_cr0(vcpu); ++ smram->cr3 = kvm_read_cr3(vcpu); ++ smram->eflags = kvm_get_rflags(vcpu); ++ smram->eip = kvm_rip_read(vcpu); + + for (i = 0; i < 8; i++) +- put_smstate(u32, buf, 0x7fd0 + i * 4, kvm_register_read_raw(vcpu, i)); ++ smram->gprs[i] = kvm_register_read_raw(vcpu, i); + + kvm_get_dr(vcpu, 6, &val); +- put_smstate(u32, buf, 0x7fcc, (u32)val); ++ smram->dr6 = (u32)val; + kvm_get_dr(vcpu, 7, &val); +- put_smstate(u32, buf, 0x7fc8, (u32)val); ++ smram->dr7 = (u32)val; + +- kvm_get_segment(vcpu, &seg, VCPU_SREG_TR); +- put_smstate(u32, buf, 0x7fc4, seg.selector); +- put_smstate(u32, buf, 0x7f64, seg.base); +- put_smstate(u32, buf, 0x7f60, seg.limit); +- put_smstate(u32, buf, 0x7f5c, enter_smm_get_segment_flags(&seg)); +- +- kvm_get_segment(vcpu, &seg, VCPU_SREG_LDTR); +- put_smstate(u32, buf, 0x7fc0, seg.selector); +- put_smstate(u32, buf, 0x7f80, seg.base); +- put_smstate(u32, buf, 0x7f7c, seg.limit); +- put_smstate(u32, buf, 0x7f78, enter_smm_get_segment_flags(&seg)); ++ enter_smm_save_seg_32(vcpu, &smram->tr, &smram->tr_sel, VCPU_SREG_TR); ++ enter_smm_save_seg_32(vcpu, &smram->ldtr, &smram->ldtr_sel, VCPU_SREG_LDTR); + + static_call(kvm_x86_get_gdt)(vcpu, &dt); +- put_smstate(u32, buf, 0x7f74, dt.address); +- put_smstate(u32, buf, 0x7f70, dt.size); ++ smram->gdtr.base = dt.address; ++ smram->gdtr.limit = dt.size; + + static_call(kvm_x86_get_idt)(vcpu, &dt); +- put_smstate(u32, buf, 0x7f58, dt.address); +- put_smstate(u32, buf, 0x7f54, dt.size); ++ smram->idtr.base = dt.address; ++ smram->idtr.limit = dt.size; + +- for (i = 0; i < 6; i++) +- enter_smm_save_seg_32(vcpu, buf, i); ++ enter_smm_save_seg_32(vcpu, &smram->es, &smram->es_sel, VCPU_SREG_ES); ++ enter_smm_save_seg_32(vcpu, &smram->cs, &smram->cs_sel, VCPU_SREG_CS); ++ enter_smm_save_seg_32(vcpu, &smram->ss, &smram->ss_sel, VCPU_SREG_SS); + +- put_smstate(u32, buf, 0x7f14, kvm_read_cr4(vcpu)); ++ enter_smm_save_seg_32(vcpu, &smram->ds, &smram->ds_sel, VCPU_SREG_DS); ++ enter_smm_save_seg_32(vcpu, &smram->fs, &smram->fs_sel, VCPU_SREG_FS); ++ enter_smm_save_seg_32(vcpu, &smram->gs, &smram->gs_sel, VCPU_SREG_GS); + +- /* revision id */ +- put_smstate(u32, buf, 0x7efc, 0x00020000); +- put_smstate(u32, buf, 0x7ef8, vcpu->arch.smbase); ++ smram->cr4 = kvm_read_cr4(vcpu); ++ smram->smm_revision = 0x00020000; ++ smram->smbase = vcpu->arch.smbase; + } + + #ifdef CONFIG_X86_64 +@@ -10254,7 +10243,7 @@ static void enter_smm(struct kvm_vcpu *vcpu) + enter_smm_save_state_64(vcpu, (char *)&smram); + else + #endif +- enter_smm_save_state_32(vcpu, (char *)&smram); ++ enter_smm_save_state_32(vcpu, &smram.smram32); + + /* + * Give enter_smm() a chance to make ISA-specific changes to the vCPU