From 832507353ad7381e3cbe2aeb29cf3f222b990df8 Mon Sep 17 00:00:00 2001 From: Thomas Lamprecht Date: Mon, 8 Aug 2022 15:08:52 +0200 Subject: [PATCH] update SMM shadow fixes to v3 Link: https://lore.kernel.org/kvm/20220803155011.43721-1-mlevitsk@redhat.com/ Signed-off-by: Thomas Lamprecht --- ...5-bug-introduce-ASSERT_STRUCT_OFFSET.patch | 53 +++ ...-em_sysexit-should-update-ctxt-mode.patch} | 2 +- ...introduce-emulator_recalc_and_set_m.patch} | 10 +- ...-emulator-remove-assign_eip_near-far.patch | 127 ------- ...-update-the-emulation-mode-after-rsm.patch | 8 +- ...-update-the-emulation-mode-after-CR0.patch | 17 +- ...-smm-add-structs-for-KVM-s-smram-lay.patch | 324 ++++++++++++------ ...-smm-use-smram-structs-in-the-common.patch | 214 ++++++++++++ ...smm-use-smram-struct-for-32-bit-smr.patch} | 28 +- ...smm-use-smram-struct-for-64-bit-smr.patch} | 30 +- ... 0024-KVM-x86-SVM-use-smram-structs.patch} | 55 ++- ...t-save-SVM-state-to-SMRAM-when-VM-is.patch | 40 +++ ...smm-preserve-interrupt-shadow-in-SM.patch} | 95 ++--- 13 files changed, 650 insertions(+), 353 deletions(-) create mode 100644 patches/kernel/0015-bug-introduce-ASSERT_STRUCT_OFFSET.patch rename patches/kernel/{0015-KVM-x86-emulator-em_sysexit-should-update-ctxt-mode.patch => 0016-KVM-x86-emulator-em_sysexit-should-update-ctxt-mode.patch} (96%) rename patches/kernel/{0016-KVM-x86-emulator-introduce-update_emulation_mode.patch => 0017-KVM-x86-emulator-introduce-emulator_recalc_and_set_m.patch} (94%) delete mode 100644 patches/kernel/0017-KVM-x86-emulator-remove-assign_eip_near-far.patch create mode 100644 patches/kernel/0021-KVM-x86-emulator-smm-use-smram-structs-in-the-common.patch rename patches/kernel/{0021-KVM-x86-emulator-smm-use-smram-struct-for-32-bit-smr.patch => 0022-KVM-x86-emulator-smm-use-smram-struct-for-32-bit-smr.patch} (92%) rename patches/kernel/{0022-KVM-x86-emulator-smm-use-smram-struct-for-64-bit-smr.patch => 0023-KVM-x86-emulator-smm-use-smram-struct-for-64-bit-smr.patch} (92%) rename patches/kernel/{0023-KVM-x86-SVM-use-smram-structs.patch => 0024-KVM-x86-SVM-use-smram-structs.patch} (58%) create mode 100644 patches/kernel/0025-KVM-x86-SVM-don-t-save-SVM-state-to-SMRAM-when-VM-is.patch rename patches/kernel/{0024-KVM-x86-emulator-smm-preserve-interrupt-shadow-in-SM.patch => 0026-KVM-x86-emulator-smm-preserve-interrupt-shadow-in-SM.patch} (67%) diff --git a/patches/kernel/0015-bug-introduce-ASSERT_STRUCT_OFFSET.patch b/patches/kernel/0015-bug-introduce-ASSERT_STRUCT_OFFSET.patch new file mode 100644 index 0000000..00c4974 --- /dev/null +++ b/patches/kernel/0015-bug-introduce-ASSERT_STRUCT_OFFSET.patch @@ -0,0 +1,53 @@ +From 0000000000000000000000000000000000000000 Mon Sep 17 00:00:00 2001 +From: Maxim Levitsky +Date: Wed, 3 Aug 2022 18:49:59 +0300 +Subject: [PATCH] bug: introduce ASSERT_STRUCT_OFFSET + +ASSERT_STRUCT_OFFSET allows to assert during the build of +the kernel that a field in a struct have an expected offset. + +KVM used to have such macro, but there is almost nothing KVM specific +in it so move it to build_bug.h, so that it can be used in other +places in KVM. + +Signed-off-by: Maxim Levitsky +Signed-off-by: Thomas Lamprecht +--- + arch/x86/kvm/vmx/vmcs12.h | 5 ++--- + include/linux/build_bug.h | 9 +++++++++ + 2 files changed, 11 insertions(+), 3 deletions(-) + +diff --git a/arch/x86/kvm/vmx/vmcs12.h b/arch/x86/kvm/vmx/vmcs12.h +index 2a45f026ee11..ba8617964982 100644 +--- a/arch/x86/kvm/vmx/vmcs12.h ++++ b/arch/x86/kvm/vmx/vmcs12.h +@@ -208,9 +208,8 @@ struct __packed vmcs12 { + /* + * For save/restore compatibility, the vmcs12 field offsets must not change. + */ +-#define CHECK_OFFSET(field, loc) \ +- BUILD_BUG_ON_MSG(offsetof(struct vmcs12, field) != (loc), \ +- "Offset of " #field " in struct vmcs12 has changed.") ++#define CHECK_OFFSET(field, loc) \ ++ ASSERT_STRUCT_OFFSET(struct vmcs12, field, loc) + + static inline void vmx_check_vmcs12_offsets(void) + { +diff --git a/include/linux/build_bug.h b/include/linux/build_bug.h +index e3a0be2c90ad..3aa3640f8c18 100644 +--- a/include/linux/build_bug.h ++++ b/include/linux/build_bug.h +@@ -77,4 +77,13 @@ + #define static_assert(expr, ...) __static_assert(expr, ##__VA_ARGS__, #expr) + #define __static_assert(expr, msg, ...) _Static_assert(expr, msg) + ++ ++/* ++ * Compile time check that field has an expected offset ++ */ ++#define ASSERT_STRUCT_OFFSET(type, field, expected_offset) \ ++ BUILD_BUG_ON_MSG(offsetof(type, field) != (expected_offset), \ ++ "Offset of " #field " in " #type " has changed.") ++ ++ + #endif /* _LINUX_BUILD_BUG_H */ diff --git a/patches/kernel/0015-KVM-x86-emulator-em_sysexit-should-update-ctxt-mode.patch b/patches/kernel/0016-KVM-x86-emulator-em_sysexit-should-update-ctxt-mode.patch similarity index 96% rename from patches/kernel/0015-KVM-x86-emulator-em_sysexit-should-update-ctxt-mode.patch rename to patches/kernel/0016-KVM-x86-emulator-em_sysexit-should-update-ctxt-mode.patch index 732349d..f7f8c6a 100644 --- a/patches/kernel/0015-KVM-x86-emulator-em_sysexit-should-update-ctxt-mode.patch +++ b/patches/kernel/0016-KVM-x86-emulator-em_sysexit-should-update-ctxt-mode.patch @@ -1,6 +1,6 @@ From 0000000000000000000000000000000000000000 Mon Sep 17 00:00:00 2001 From: Maxim Levitsky -Date: Tue, 21 Jun 2022 18:08:52 +0300 +Date: Wed, 3 Aug 2022 18:50:00 +0300 Subject: [PATCH] KVM: x86: emulator: em_sysexit should update ctxt->mode This is one of the instructions that can change the diff --git a/patches/kernel/0016-KVM-x86-emulator-introduce-update_emulation_mode.patch b/patches/kernel/0017-KVM-x86-emulator-introduce-emulator_recalc_and_set_m.patch similarity index 94% rename from patches/kernel/0016-KVM-x86-emulator-introduce-update_emulation_mode.patch rename to patches/kernel/0017-KVM-x86-emulator-introduce-emulator_recalc_and_set_m.patch index cb54bf1..a95e51e 100644 --- a/patches/kernel/0016-KVM-x86-emulator-introduce-update_emulation_mode.patch +++ b/patches/kernel/0017-KVM-x86-emulator-introduce-emulator_recalc_and_set_m.patch @@ -1,7 +1,7 @@ From 0000000000000000000000000000000000000000 Mon Sep 17 00:00:00 2001 From: Maxim Levitsky -Date: Tue, 21 Jun 2022 18:08:53 +0300 -Subject: [PATCH] KVM: x86: emulator: introduce update_emulation_mode +Date: Wed, 3 Aug 2022 18:50:01 +0300 +Subject: [PATCH] KVM: x86: emulator: introduce emulator_recalc_and_set_mode Some instructions update the cpu execution mode, which needs to update the emulation mode. @@ -21,7 +21,7 @@ Signed-off-by: Thomas Lamprecht 1 file changed, 57 insertions(+), 28 deletions(-) diff --git a/arch/x86/kvm/emulate.c b/arch/x86/kvm/emulate.c -index 35b12692739c..1b5123a882a1 100644 +index 35b12692739c..6a597d68d456 100644 --- a/arch/x86/kvm/emulate.c +++ b/arch/x86/kvm/emulate.c @@ -795,8 +795,7 @@ static int linearize(struct x86_emulate_ctxt *ctxt, @@ -45,7 +45,7 @@ index 35b12692739c..1b5123a882a1 100644 return rc; } -+static inline int update_emulation_mode(struct x86_emulate_ctxt *ctxt) ++static inline int emulator_recalc_and_set_mode(struct x86_emulate_ctxt *ctxt) +{ + u64 efer; + struct desc_struct cs; @@ -103,7 +103,7 @@ index 35b12692739c..1b5123a882a1 100644 { - enum x86emul_mode mode = ctxt->mode; - int rc; -+ int rc = update_emulation_mode(ctxt); ++ int rc = emulator_recalc_and_set_mode(ctxt); -#ifdef CONFIG_X86_64 - if (ctxt->mode >= X86EMUL_MODE_PROT16) { diff --git a/patches/kernel/0017-KVM-x86-emulator-remove-assign_eip_near-far.patch b/patches/kernel/0017-KVM-x86-emulator-remove-assign_eip_near-far.patch deleted file mode 100644 index 97ba427..0000000 --- a/patches/kernel/0017-KVM-x86-emulator-remove-assign_eip_near-far.patch +++ /dev/null @@ -1,127 +0,0 @@ -From 0000000000000000000000000000000000000000 Mon Sep 17 00:00:00 2001 -From: Maxim Levitsky -Date: Tue, 21 Jun 2022 18:08:54 +0300 -Subject: [PATCH] KVM: x86: emulator: remove assign_eip_near/far - -Now the assign_eip_far just updates the emulation mode in addition to -updating the rip, it doesn't make sense to keep that function. - -Move mode update to the callers and remove these functions. - -No functional change is intended. - -Signed-off-by: Maxim Levitsky -Signed-off-by: Thomas Lamprecht ---- - arch/x86/kvm/emulate.c | 47 +++++++++++++++++++++--------------------- - 1 file changed, 24 insertions(+), 23 deletions(-) - -diff --git a/arch/x86/kvm/emulate.c b/arch/x86/kvm/emulate.c -index 1b5123a882a1..9e305e0cd815 100644 ---- a/arch/x86/kvm/emulate.c -+++ b/arch/x86/kvm/emulate.c -@@ -857,24 +857,9 @@ static inline int update_emulation_mode(struct x86_emulate_ctxt *ctxt) - return X86EMUL_CONTINUE; - } - --static inline int assign_eip_near(struct x86_emulate_ctxt *ctxt, ulong dst) --{ -- return assign_eip(ctxt, dst); --} -- --static int assign_eip_far(struct x86_emulate_ctxt *ctxt, ulong dst) --{ -- int rc = update_emulation_mode(ctxt); -- -- if (rc != X86EMUL_CONTINUE) -- return rc; -- -- return assign_eip(ctxt, dst); --} -- - static inline int jmp_rel(struct x86_emulate_ctxt *ctxt, int rel) - { -- return assign_eip_near(ctxt, ctxt->_eip + rel); -+ return assign_eip(ctxt, ctxt->_eip + rel); - } - - static int linear_read_system(struct x86_emulate_ctxt *ctxt, ulong linear, -@@ -2183,7 +2168,12 @@ static int em_jmp_far(struct x86_emulate_ctxt *ctxt) - if (rc != X86EMUL_CONTINUE) - return rc; - -- rc = assign_eip_far(ctxt, ctxt->src.val); -+ rc = update_emulation_mode(ctxt); -+ if (rc != X86EMUL_CONTINUE) -+ return rc; -+ -+ rc = assign_eip(ctxt, ctxt->src.val); -+ - /* Error handling is not implemented. */ - if (rc != X86EMUL_CONTINUE) - return X86EMUL_UNHANDLEABLE; -@@ -2193,7 +2183,7 @@ static int em_jmp_far(struct x86_emulate_ctxt *ctxt) - - static int em_jmp_abs(struct x86_emulate_ctxt *ctxt) - { -- return assign_eip_near(ctxt, ctxt->src.val); -+ return assign_eip(ctxt, ctxt->src.val); - } - - static int em_call_near_abs(struct x86_emulate_ctxt *ctxt) -@@ -2202,7 +2192,7 @@ static int em_call_near_abs(struct x86_emulate_ctxt *ctxt) - long int old_eip; - - old_eip = ctxt->_eip; -- rc = assign_eip_near(ctxt, ctxt->src.val); -+ rc = assign_eip(ctxt, ctxt->src.val); - if (rc != X86EMUL_CONTINUE) - return rc; - ctxt->src.val = old_eip; -@@ -2240,7 +2230,7 @@ static int em_ret(struct x86_emulate_ctxt *ctxt) - if (rc != X86EMUL_CONTINUE) - return rc; - -- return assign_eip_near(ctxt, eip); -+ return assign_eip(ctxt, eip); - } - - static int em_ret_far(struct x86_emulate_ctxt *ctxt) -@@ -2264,7 +2254,13 @@ static int em_ret_far(struct x86_emulate_ctxt *ctxt) - &new_desc); - if (rc != X86EMUL_CONTINUE) - return rc; -- rc = assign_eip_far(ctxt, eip); -+ -+ rc = update_emulation_mode(ctxt); -+ if (rc != X86EMUL_CONTINUE) -+ return rc; -+ -+ rc = assign_eip(ctxt, eip); -+ - /* Error handling is not implemented. */ - if (rc != X86EMUL_CONTINUE) - return X86EMUL_UNHANDLEABLE; -@@ -3488,7 +3484,12 @@ static int em_call_far(struct x86_emulate_ctxt *ctxt) - if (rc != X86EMUL_CONTINUE) - return rc; - -- rc = assign_eip_far(ctxt, ctxt->src.val); -+ rc = update_emulation_mode(ctxt); -+ if (rc != X86EMUL_CONTINUE) -+ return rc; -+ -+ rc = assign_eip(ctxt, ctxt->src.val); -+ - if (rc != X86EMUL_CONTINUE) - goto fail; - -@@ -3521,7 +3522,7 @@ static int em_ret_near_imm(struct x86_emulate_ctxt *ctxt) - rc = emulate_pop(ctxt, &eip, ctxt->op_bytes); - if (rc != X86EMUL_CONTINUE) - return rc; -- rc = assign_eip_near(ctxt, eip); -+ rc = assign_eip(ctxt, eip); - if (rc != X86EMUL_CONTINUE) - return rc; - rsp_increment(ctxt, ctxt->src.val); diff --git a/patches/kernel/0018-KVM-x86-emulator-update-the-emulation-mode-after-rsm.patch b/patches/kernel/0018-KVM-x86-emulator-update-the-emulation-mode-after-rsm.patch index b18ccf5..0f1c9ca 100644 --- a/patches/kernel/0018-KVM-x86-emulator-update-the-emulation-mode-after-rsm.patch +++ b/patches/kernel/0018-KVM-x86-emulator-update-the-emulation-mode-after-rsm.patch @@ -1,6 +1,6 @@ From 0000000000000000000000000000000000000000 Mon Sep 17 00:00:00 2001 From: Maxim Levitsky -Date: Tue, 21 Jun 2022 18:08:55 +0300 +Date: Wed, 3 Aug 2022 18:50:02 +0300 Subject: [PATCH] KVM: x86: emulator: update the emulation mode after rsm This ensures that RIP will be correctly written back, @@ -17,15 +17,15 @@ Signed-off-by: Thomas Lamprecht 1 file changed, 5 insertions(+) diff --git a/arch/x86/kvm/emulate.c b/arch/x86/kvm/emulate.c -index 9e305e0cd815..c582639ea2b4 100644 +index 6a597d68d456..49697d589f87 100644 --- a/arch/x86/kvm/emulate.c +++ b/arch/x86/kvm/emulate.c -@@ -2635,6 +2635,11 @@ static int em_rsm(struct x86_emulate_ctxt *ctxt) +@@ -2639,6 +2639,11 @@ static int em_rsm(struct x86_emulate_ctxt *ctxt) if (ret != X86EMUL_CONTINUE) goto emulate_shutdown; + -+ ret = update_emulation_mode(ctxt); ++ ret = emulator_recalc_and_set_mode(ctxt); + if (ret != X86EMUL_CONTINUE) + goto emulate_shutdown; + diff --git a/patches/kernel/0019-KVM-x86-emulator-update-the-emulation-mode-after-CR0.patch b/patches/kernel/0019-KVM-x86-emulator-update-the-emulation-mode-after-CR0.patch index 21c6732..f9a69ce 100644 --- a/patches/kernel/0019-KVM-x86-emulator-update-the-emulation-mode-after-CR0.patch +++ b/patches/kernel/0019-KVM-x86-emulator-update-the-emulation-mode-after-CR0.patch @@ -1,6 +1,6 @@ From 0000000000000000000000000000000000000000 Mon Sep 17 00:00:00 2001 From: Maxim Levitsky -Date: Tue, 21 Jun 2022 18:08:56 +0300 +Date: Wed, 3 Aug 2022 18:50:03 +0300 Subject: [PATCH] KVM: x86: emulator: update the emulation mode after CR0 write CR0.PE toggles real/protected mode, thus its update @@ -10,17 +10,19 @@ This is likely a benign bug because there is no writeback of state, other than the RIP increment, and when toggling CR0.PE, the CPU has to execute code from a very low memory address. +Also CR0.PG toggle when EFER.LMA is set, toggles the long mode. + Signed-off-by: Maxim Levitsky Signed-off-by: Thomas Lamprecht --- - arch/x86/kvm/emulate.c | 13 ++++++++++++- - 1 file changed, 12 insertions(+), 1 deletion(-) + arch/x86/kvm/emulate.c | 14 +++++++++++++- + 1 file changed, 13 insertions(+), 1 deletion(-) diff --git a/arch/x86/kvm/emulate.c b/arch/x86/kvm/emulate.c -index c582639ea2b4..38d9bfa650ec 100644 +index 49697d589f87..89f035fc52e7 100644 --- a/arch/x86/kvm/emulate.c +++ b/arch/x86/kvm/emulate.c -@@ -3636,11 +3636,22 @@ static int em_movbe(struct x86_emulate_ctxt *ctxt) +@@ -3635,11 +3635,23 @@ static int em_movbe(struct x86_emulate_ctxt *ctxt) static int em_cr_write(struct x86_emulate_ctxt *ctxt) { @@ -35,8 +37,9 @@ index c582639ea2b4..38d9bfa650ec 100644 ctxt->dst.type = OP_NONE; + + if (cr_num == 0) { -+ /* CR0 write might have updated CR0.PE */ -+ r = update_emulation_mode(ctxt); ++ /* CR0 write might have updated CR0.PE and/or CR0.PG ++ * which can affect the cpu execution mode */ ++ r = emulator_recalc_and_set_mode(ctxt); + if (r != X86EMUL_CONTINUE) + return r; + } diff --git a/patches/kernel/0020-KVM-x86-emulator-smm-add-structs-for-KVM-s-smram-lay.patch b/patches/kernel/0020-KVM-x86-emulator-smm-add-structs-for-KVM-s-smram-lay.patch index e2e51f7..c7c086f 100644 --- a/patches/kernel/0020-KVM-x86-emulator-smm-add-structs-for-KVM-s-smram-lay.patch +++ b/patches/kernel/0020-KVM-x86-emulator-smm-add-structs-for-KVM-s-smram-lay.patch @@ -1,6 +1,6 @@ From 0000000000000000000000000000000000000000 Mon Sep 17 00:00:00 2001 From: Maxim Levitsky -Date: Tue, 21 Jun 2022 18:08:58 +0300 +Date: Wed, 3 Aug 2022 18:50:05 +0300 Subject: [PATCH] KVM: x86: emulator/smm: add structs for KVM's smram layout Those structs will be used to read/write the smram state image. @@ -11,22 +11,43 @@ layout that is used by real Intel/AMD cpus. Signed-off-by: Maxim Levitsky Signed-off-by: Thomas Lamprecht --- - arch/x86/kvm/kvm_emulate.h | 139 +++++++++++++++++++++++++++++++++++++ - 1 file changed, 139 insertions(+) + arch/x86/kvm/emulate.c | 6 + + arch/x86/kvm/kvm_emulate.h | 218 +++++++++++++++++++++++++++++++++++++ + arch/x86/kvm/x86.c | 1 + + 3 files changed, 225 insertions(+) +diff --git a/arch/x86/kvm/emulate.c b/arch/x86/kvm/emulate.c +index 89f035fc52e7..bfaf5d24bf1e 100644 +--- a/arch/x86/kvm/emulate.c ++++ b/arch/x86/kvm/emulate.c +@@ -5825,3 +5825,9 @@ bool emulator_can_use_gpa(struct x86_emulate_ctxt *ctxt) + + return true; + } ++ ++void __init kvm_emulator_init(void) ++{ ++ __check_smram32_offsets(); ++ __check_smram64_offsets(); ++} diff --git a/arch/x86/kvm/kvm_emulate.h b/arch/x86/kvm/kvm_emulate.h -index fb09cd22cb7f..d16b377be70b 100644 +index fb09cd22cb7f..0b2bbcce321a 100644 --- a/arch/x86/kvm/kvm_emulate.h +++ b/arch/x86/kvm/kvm_emulate.h -@@ -482,6 +482,145 @@ enum x86_intercept { +@@ -13,6 +13,7 @@ + #define _ASM_X86_KVM_X86_EMULATE_H + + #include ++#include + #include "fpu.h" + + struct x86_emulate_ctxt; +@@ -482,6 +483,223 @@ enum x86_intercept { nr_x86_intercepts }; + -+/* -+ * 32 bit KVM's emulated SMM layout -+ * Loosely based on Intel's layout -+ */ ++/* 32 bit KVM's emulated SMM layout. Loosely based on Intel's layout */ + +struct kvm_smm_seg_state_32 { + u32 flags; @@ -35,58 +56,89 @@ index fb09cd22cb7f..d16b377be70b 100644 +} __packed; + +struct kvm_smram_state_32 { -+ -+ u32 reserved1[62]; /* FE00 - FEF7 */ -+ u32 smbase; /* FEF8 */ -+ u32 smm_revision; /* FEFC */ -+ u32 reserved2[5]; /* FF00-FF13 */ -+ /* CR4 is not present in Intel/AMD SMRAM image*/ -+ u32 cr4; /* FF14 */ -+ u32 reserved3[5]; /* FF18 */ ++ u32 reserved1[62]; ++ u32 smbase; ++ u32 smm_revision; ++ u32 reserved2[5]; ++ u32 cr4; /* CR4 is not present in Intel/AMD SMRAM image */ ++ u32 reserved3[5]; + + /* -+ * Segment state is not present/documented in the -+ * Intel/AMD SMRAM image ++ * Segment state is not present/documented in the Intel/AMD SMRAM image ++ * Instead this area on Intel/AMD contains IO/HLT restart flags. + */ -+ struct kvm_smm_seg_state_32 ds; /* FF2C */ -+ struct kvm_smm_seg_state_32 fs; /* FF38 */ -+ struct kvm_smm_seg_state_32 gs; /* FF44 */ -+ /* idtr has only base and limit*/ -+ struct kvm_smm_seg_state_32 idtr; /* FF50 */ -+ struct kvm_smm_seg_state_32 tr; /* FF5C */ -+ u32 reserved; /* FF68 */ -+ /* gdtr has only base and limit*/ -+ struct kvm_smm_seg_state_32 gdtr; /* FF6C */ -+ struct kvm_smm_seg_state_32 ldtr; /* FF78 */ -+ struct kvm_smm_seg_state_32 es; /* FF84 */ -+ struct kvm_smm_seg_state_32 cs; /* FF90 */ -+ struct kvm_smm_seg_state_32 ss; /* FF9C */ -+ -+ u32 es_sel; /* FFA8 */ -+ u32 cs_sel; /* FFAC */ -+ u32 ss_sel; /* FFB0 */ -+ u32 ds_sel; /* FFB4 */ -+ u32 fs_sel; /* FFB8 */ -+ u32 gs_sel; /* FFBC */ -+ u32 ldtr_sel; /* FFC0 */ -+ u32 tr_sel; /* FFC4 */ -+ -+ u32 dr7; /* FFC8 */ -+ u32 dr6; /* FFCC */ -+ -+ /* GPRS in the "natural" X86 order (RAX/RCX/RDX.../RDI)*/ -+ u32 gprs[8]; /* FFD0-FFEC */ -+ -+ u32 eip; /* FFF0 */ -+ u32 eflags; /* FFF4 */ -+ u32 cr3; /* FFF8 */ -+ u32 cr0; /* FFFC */ ++ struct kvm_smm_seg_state_32 ds; ++ struct kvm_smm_seg_state_32 fs; ++ struct kvm_smm_seg_state_32 gs; ++ struct kvm_smm_seg_state_32 idtr; /* IDTR has only base and limit */ ++ struct kvm_smm_seg_state_32 tr; ++ u32 reserved; ++ struct kvm_smm_seg_state_32 gdtr; /* GDTR has only base and limit */ ++ struct kvm_smm_seg_state_32 ldtr; ++ struct kvm_smm_seg_state_32 es; ++ struct kvm_smm_seg_state_32 cs; ++ struct kvm_smm_seg_state_32 ss; ++ ++ u32 es_sel; ++ u32 cs_sel; ++ u32 ss_sel; ++ u32 ds_sel; ++ u32 fs_sel; ++ u32 gs_sel; ++ u32 ldtr_sel; ++ u32 tr_sel; ++ ++ u32 dr7; ++ u32 dr6; ++ u32 gprs[8]; /* GPRS in the "natural" X86 order (EAX/ECX/EDX.../EDI) */ ++ u32 eip; ++ u32 eflags; ++ u32 cr3; ++ u32 cr0; +} __packed; + -+/* -+ * 64 bit KVM's emulated SMM layout -+ * Based on AMD64 layout -+ */ ++ ++static inline void __check_smram32_offsets(void) ++{ ++#define __CHECK_SMRAM32_OFFSET(field, offset) \ ++ ASSERT_STRUCT_OFFSET(struct kvm_smram_state_32, field, offset - 0xFE00) ++ ++ __CHECK_SMRAM32_OFFSET(reserved1, 0xFE00); ++ __CHECK_SMRAM32_OFFSET(smbase, 0xFEF8); ++ __CHECK_SMRAM32_OFFSET(smm_revision, 0xFEFC); ++ __CHECK_SMRAM32_OFFSET(reserved2, 0xFF00); ++ __CHECK_SMRAM32_OFFSET(cr4, 0xFF14); ++ __CHECK_SMRAM32_OFFSET(reserved3, 0xFF18); ++ __CHECK_SMRAM32_OFFSET(ds, 0xFF2C); ++ __CHECK_SMRAM32_OFFSET(fs, 0xFF38); ++ __CHECK_SMRAM32_OFFSET(gs, 0xFF44); ++ __CHECK_SMRAM32_OFFSET(idtr, 0xFF50); ++ __CHECK_SMRAM32_OFFSET(tr, 0xFF5C); ++ __CHECK_SMRAM32_OFFSET(gdtr, 0xFF6C); ++ __CHECK_SMRAM32_OFFSET(ldtr, 0xFF78); ++ __CHECK_SMRAM32_OFFSET(es, 0xFF84); ++ __CHECK_SMRAM32_OFFSET(cs, 0xFF90); ++ __CHECK_SMRAM32_OFFSET(ss, 0xFF9C); ++ __CHECK_SMRAM32_OFFSET(es_sel, 0xFFA8); ++ __CHECK_SMRAM32_OFFSET(cs_sel, 0xFFAC); ++ __CHECK_SMRAM32_OFFSET(ss_sel, 0xFFB0); ++ __CHECK_SMRAM32_OFFSET(ds_sel, 0xFFB4); ++ __CHECK_SMRAM32_OFFSET(fs_sel, 0xFFB8); ++ __CHECK_SMRAM32_OFFSET(gs_sel, 0xFFBC); ++ __CHECK_SMRAM32_OFFSET(ldtr_sel, 0xFFC0); ++ __CHECK_SMRAM32_OFFSET(tr_sel, 0xFFC4); ++ __CHECK_SMRAM32_OFFSET(dr7, 0xFFC8); ++ __CHECK_SMRAM32_OFFSET(dr6, 0xFFCC); ++ __CHECK_SMRAM32_OFFSET(gprs, 0xFFD0); ++ __CHECK_SMRAM32_OFFSET(eip, 0xFFF0); ++ __CHECK_SMRAM32_OFFSET(eflags, 0xFFF4); ++ __CHECK_SMRAM32_OFFSET(cr3, 0xFFF8); ++ __CHECK_SMRAM32_OFFSET(cr0, 0xFFFC); ++#undef __CHECK_SMRAM32_OFFSET ++} ++ ++ ++/* 64 bit KVM's emulated SMM layout. Based on AMD64 layout */ + +struct kvm_smm_seg_state_64 { + u16 selector; @@ -96,71 +148,133 @@ index fb09cd22cb7f..d16b377be70b 100644 +}; + +struct kvm_smram_state_64 { -+ struct kvm_smm_seg_state_64 es; /* FE00 (R/O) */ -+ struct kvm_smm_seg_state_64 cs; /* FE10 (R/O) */ -+ struct kvm_smm_seg_state_64 ss; /* FE20 (R/O) */ -+ struct kvm_smm_seg_state_64 ds; /* FE30 (R/O) */ -+ struct kvm_smm_seg_state_64 fs; /* FE40 (R/O) */ -+ struct kvm_smm_seg_state_64 gs; /* FE50 (R/O) */ -+ -+ /* gdtr has only base and limit*/ -+ struct kvm_smm_seg_state_64 gdtr; /* FE60 (R/O) */ -+ struct kvm_smm_seg_state_64 ldtr; /* FE70 (R/O) */ + -+ /* idtr has only base and limit*/ -+ struct kvm_smm_seg_state_64 idtr; /* FE80 (R/O) */ -+ struct kvm_smm_seg_state_64 tr; /* FE90 (R/O) */ ++ struct kvm_smm_seg_state_64 es; ++ struct kvm_smm_seg_state_64 cs; ++ struct kvm_smm_seg_state_64 ss; ++ struct kvm_smm_seg_state_64 ds; ++ struct kvm_smm_seg_state_64 fs; ++ struct kvm_smm_seg_state_64 gs; ++ struct kvm_smm_seg_state_64 gdtr; /* GDTR has only base and limit*/ ++ struct kvm_smm_seg_state_64 ldtr; ++ struct kvm_smm_seg_state_64 idtr; /* IDTR has only base and limit*/ ++ struct kvm_smm_seg_state_64 tr; + + /* I/O restart and auto halt restart are not implemented by KVM */ -+ u64 io_restart_rip; /* FEA0 (R/O) */ -+ u64 io_restart_rcx; /* FEA8 (R/O) */ -+ u64 io_restart_rsi; /* FEB0 (R/O) */ -+ u64 io_restart_rdi; /* FEB8 (R/O) */ -+ u32 io_restart_dword; /* FEC0 (R/O) */ -+ u32 reserved1; /* FEC4 */ -+ u8 io_instruction_restart; /* FEC8 (R/W) */ -+ u8 auto_halt_restart; /* FEC9 (R/W) */ -+ u8 reserved2[6]; /* FECA-FECF */ -+ -+ u64 efer; /* FED0 (R/O) */ ++ u64 io_restart_rip; ++ u64 io_restart_rcx; ++ u64 io_restart_rsi; ++ u64 io_restart_rdi; ++ u32 io_restart_dword; ++ u32 reserved1; ++ u8 io_inst_restart; ++ u8 auto_hlt_restart; ++ u8 reserved2[6]; ++ ++ u64 efer; + + /* -+ * Implemented on AMD only, to store current SVM guest address. -+ * svm_guest_virtual_int has unknown purpose, not implemented. ++ * Two fields below are implemented on AMD only, to store ++ * SVM guest vmcb address if the #SMI was received while in the guest mode. + */ ++ u64 svm_guest_flag; ++ u64 svm_guest_vmcb_gpa; ++ u64 svm_guest_virtual_int; /* unknown purpose, not implemented */ + -+ u64 svm_guest_flag; /* FED8 (R/O) */ -+ u64 svm_guest_vmcb_gpa; /* FEE0 (R/O) */ -+ u64 svm_guest_virtual_int; /* FEE8 (R/O) */ ++ u32 reserved3[3]; ++ u32 smm_revison; ++ u32 smbase; ++ u32 reserved4[5]; + -+ u32 reserved3[3]; /* FEF0-FEFB */ -+ u32 smm_revison; /* FEFC (R/O) */ -+ u32 smbase; /* FFF0 (R/W) */ -+ u32 reserved4[5]; /* FF04-FF17 */ ++ /* ssp and svm_* fields below are not implemented by KVM */ ++ u64 ssp; ++ u64 svm_guest_pat; ++ u64 svm_host_efer; ++ u64 svm_host_cr4; ++ u64 svm_host_cr3; ++ u64 svm_host_cr0; + -+ /* SSP and SVM fields below are not implemented by KVM */ -+ u64 ssp; /* FF18 (R/W) */ -+ u64 svm_guest_pat; /* FF20 (R/O) */ -+ u64 svm_host_efer; /* FF28 (R/O) */ -+ u64 svm_host_cr4; /* FF30 (R/O) */ -+ u64 svm_host_cr3; /* FF38 (R/O) */ -+ u64 svm_host_cr0; /* FF40 (R/O) */ ++ u64 cr4; ++ u64 cr3; ++ u64 cr0; ++ u64 dr7; ++ u64 dr6; ++ u64 rflags; ++ u64 rip; ++ u64 gprs[16]; /* GPRS in a reversed "natural" X86 order (R15/R14/../RCX/RAX.) */ ++}; + -+ u64 cr4; /* FF48 (R/O) */ -+ u64 cr3; /* FF50 (R/O) */ -+ u64 cr0; /* FF58 (R/O) */ + -+ u64 dr7; /* FF60 (R/O) */ -+ u64 dr6; /* FF68 (R/O) */ ++static inline void __check_smram64_offsets(void) ++{ ++#define __CHECK_SMRAM64_OFFSET(field, offset) \ ++ ASSERT_STRUCT_OFFSET(struct kvm_smram_state_64, field, offset - 0xFE00) + -+ u64 rflags; /* FF70 (R/W) */ -+ u64 rip; /* FF78 (R/W) */ ++ __CHECK_SMRAM64_OFFSET(es, 0xFE00); ++ __CHECK_SMRAM64_OFFSET(cs, 0xFE10); ++ __CHECK_SMRAM64_OFFSET(ss, 0xFE20); ++ __CHECK_SMRAM64_OFFSET(ds, 0xFE30); ++ __CHECK_SMRAM64_OFFSET(fs, 0xFE40); ++ __CHECK_SMRAM64_OFFSET(gs, 0xFE50); ++ __CHECK_SMRAM64_OFFSET(gdtr, 0xFE60); ++ __CHECK_SMRAM64_OFFSET(ldtr, 0xFE70); ++ __CHECK_SMRAM64_OFFSET(idtr, 0xFE80); ++ __CHECK_SMRAM64_OFFSET(tr, 0xFE90); ++ __CHECK_SMRAM64_OFFSET(io_restart_rip, 0xFEA0); ++ __CHECK_SMRAM64_OFFSET(io_restart_rcx, 0xFEA8); ++ __CHECK_SMRAM64_OFFSET(io_restart_rsi, 0xFEB0); ++ __CHECK_SMRAM64_OFFSET(io_restart_rdi, 0xFEB8); ++ __CHECK_SMRAM64_OFFSET(io_restart_dword, 0xFEC0); ++ __CHECK_SMRAM64_OFFSET(reserved1, 0xFEC4); ++ __CHECK_SMRAM64_OFFSET(io_inst_restart, 0xFEC8); ++ __CHECK_SMRAM64_OFFSET(auto_hlt_restart, 0xFEC9); ++ __CHECK_SMRAM64_OFFSET(reserved2, 0xFECA); ++ __CHECK_SMRAM64_OFFSET(efer, 0xFED0); ++ __CHECK_SMRAM64_OFFSET(svm_guest_flag, 0xFED8); ++ __CHECK_SMRAM64_OFFSET(svm_guest_vmcb_gpa, 0xFEE0); ++ __CHECK_SMRAM64_OFFSET(svm_guest_virtual_int, 0xFEE8); ++ __CHECK_SMRAM64_OFFSET(reserved3, 0xFEF0); ++ __CHECK_SMRAM64_OFFSET(smm_revison, 0xFEFC); ++ __CHECK_SMRAM64_OFFSET(smbase, 0xFF00); ++ __CHECK_SMRAM64_OFFSET(reserved4, 0xFF04); ++ __CHECK_SMRAM64_OFFSET(ssp, 0xFF18); ++ __CHECK_SMRAM64_OFFSET(svm_guest_pat, 0xFF20); ++ __CHECK_SMRAM64_OFFSET(svm_host_efer, 0xFF28); ++ __CHECK_SMRAM64_OFFSET(svm_host_cr4, 0xFF30); ++ __CHECK_SMRAM64_OFFSET(svm_host_cr3, 0xFF38); ++ __CHECK_SMRAM64_OFFSET(svm_host_cr0, 0xFF40); ++ __CHECK_SMRAM64_OFFSET(cr4, 0xFF48); ++ __CHECK_SMRAM64_OFFSET(cr3, 0xFF50); ++ __CHECK_SMRAM64_OFFSET(cr0, 0xFF58); ++ __CHECK_SMRAM64_OFFSET(dr7, 0xFF60); ++ __CHECK_SMRAM64_OFFSET(dr6, 0xFF68); ++ __CHECK_SMRAM64_OFFSET(rflags, 0xFF70); ++ __CHECK_SMRAM64_OFFSET(rip, 0xFF78); ++ __CHECK_SMRAM64_OFFSET(gprs, 0xFF80); ++#undef __CHECK_SMRAM64_OFFSET ++} + -+ /* GPRS in a reversed "natural" X86 order (R15/R14/../RCX/RAX.) */ -+ u64 gprs[16]; /* FF80-FFFF (R/W) */ ++union kvm_smram { ++ struct kvm_smram_state_64 smram64; ++ struct kvm_smram_state_32 smram32; ++ u8 bytes[512]; +}; + ++void __init kvm_emulator_init(void); ++ + /* Host execution mode. */ #if defined(CONFIG_X86_32) #define X86EMUL_MODE_HOST X86EMUL_MODE_PROT32 +diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c +index 922fc258c37f..07575e5eb254 100644 +--- a/arch/x86/kvm/x86.c ++++ b/arch/x86/kvm/x86.c +@@ -12442,6 +12442,7 @@ EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_vmgexit_msr_protocol_exit); + static int __init kvm_x86_init(void) + { + kvm_mmu_x86_module_init(); ++ kvm_emulator_init(); + return 0; + } + module_init(kvm_x86_init); diff --git a/patches/kernel/0021-KVM-x86-emulator-smm-use-smram-structs-in-the-common.patch b/patches/kernel/0021-KVM-x86-emulator-smm-use-smram-structs-in-the-common.patch new file mode 100644 index 0000000..af4b038 --- /dev/null +++ b/patches/kernel/0021-KVM-x86-emulator-smm-use-smram-structs-in-the-common.patch @@ -0,0 +1,214 @@ +From 0000000000000000000000000000000000000000 Mon Sep 17 00:00:00 2001 +From: Maxim Levitsky +Date: Wed, 3 Aug 2022 18:50:06 +0300 +Subject: [PATCH] KVM: x86: emulator/smm: use smram structs in the common code + +Switch from using a raw array to 'union kvm_smram'. + +Signed-off-by: Maxim Levitsky +Signed-off-by: Thomas Lamprecht +--- + arch/x86/include/asm/kvm_host.h | 5 +++-- + arch/x86/kvm/emulate.c | 12 +++++++----- + arch/x86/kvm/kvm_emulate.h | 3 ++- + arch/x86/kvm/svm/svm.c | 8 ++++++-- + arch/x86/kvm/vmx/vmx.c | 4 ++-- + arch/x86/kvm/x86.c | 16 ++++++++-------- + 6 files changed, 28 insertions(+), 20 deletions(-) + +diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h +index 867febee8fc3..fb48dd8773e1 100644 +--- a/arch/x86/include/asm/kvm_host.h ++++ b/arch/x86/include/asm/kvm_host.h +@@ -200,6 +200,7 @@ typedef enum exit_fastpath_completion fastpath_t; + + struct x86_emulate_ctxt; + struct x86_exception; ++union kvm_smram; + enum x86_intercept; + enum x86_intercept_stage; + +@@ -1463,8 +1464,8 @@ struct kvm_x86_ops { + void (*setup_mce)(struct kvm_vcpu *vcpu); + + int (*smi_allowed)(struct kvm_vcpu *vcpu, bool for_injection); +- int (*enter_smm)(struct kvm_vcpu *vcpu, char *smstate); +- int (*leave_smm)(struct kvm_vcpu *vcpu, const char *smstate); ++ int (*enter_smm)(struct kvm_vcpu *vcpu, union kvm_smram *smram); ++ int (*leave_smm)(struct kvm_vcpu *vcpu, const union kvm_smram *smram); + void (*enable_smi_window)(struct kvm_vcpu *vcpu); + + int (*mem_enc_op)(struct kvm *kvm, void __user *argp); +diff --git a/arch/x86/kvm/emulate.c b/arch/x86/kvm/emulate.c +index bfaf5d24bf1e..730c3e2662d6 100644 +--- a/arch/x86/kvm/emulate.c ++++ b/arch/x86/kvm/emulate.c +@@ -2567,16 +2567,18 @@ static int rsm_load_state_64(struct x86_emulate_ctxt *ctxt, + static int em_rsm(struct x86_emulate_ctxt *ctxt) + { + unsigned long cr0, cr4, efer; +- char buf[512]; ++ const union kvm_smram smram; + u64 smbase; + int ret; + ++ BUILD_BUG_ON(sizeof(smram) != 512); ++ + if ((ctxt->ops->get_hflags(ctxt) & X86EMUL_SMM_MASK) == 0) + return emulate_ud(ctxt); + + smbase = ctxt->ops->get_smbase(ctxt); + +- ret = ctxt->ops->read_phys(ctxt, smbase + 0xfe00, buf, sizeof(buf)); ++ ret = ctxt->ops->read_phys(ctxt, smbase + 0xfe00, (void *)&smram, sizeof(smram)); + if (ret != X86EMUL_CONTINUE) + return X86EMUL_UNHANDLEABLE; + +@@ -2626,15 +2628,15 @@ static int em_rsm(struct x86_emulate_ctxt *ctxt) + * state (e.g. enter guest mode) before loading state from the SMM + * state-save area. + */ +- if (ctxt->ops->leave_smm(ctxt, buf)) ++ if (ctxt->ops->leave_smm(ctxt, &smram)) + goto emulate_shutdown; + + #ifdef CONFIG_X86_64 + if (emulator_has_longmode(ctxt)) +- ret = rsm_load_state_64(ctxt, buf); ++ ret = rsm_load_state_64(ctxt, (const char *)&smram); + else + #endif +- ret = rsm_load_state_32(ctxt, buf); ++ ret = rsm_load_state_32(ctxt, (const char *)&smram); + + if (ret != X86EMUL_CONTINUE) + goto emulate_shutdown; +diff --git a/arch/x86/kvm/kvm_emulate.h b/arch/x86/kvm/kvm_emulate.h +index 0b2bbcce321a..3b37b3e17379 100644 +--- a/arch/x86/kvm/kvm_emulate.h ++++ b/arch/x86/kvm/kvm_emulate.h +@@ -19,6 +19,7 @@ + struct x86_emulate_ctxt; + enum x86_intercept; + enum x86_intercept_stage; ++union kvm_smram; + + struct x86_exception { + u8 vector; +@@ -233,7 +234,7 @@ struct x86_emulate_ops { + + unsigned (*get_hflags)(struct x86_emulate_ctxt *ctxt); + void (*exiting_smm)(struct x86_emulate_ctxt *ctxt); +- int (*leave_smm)(struct x86_emulate_ctxt *ctxt, const char *smstate); ++ int (*leave_smm)(struct x86_emulate_ctxt *ctxt, const union kvm_smram *smram); + void (*triple_fault)(struct x86_emulate_ctxt *ctxt); + int (*set_xcr)(struct x86_emulate_ctxt *ctxt, u32 index, u64 xcr); + }; +diff --git a/arch/x86/kvm/svm/svm.c b/arch/x86/kvm/svm/svm.c +index 21f747eacc9a..d903120811b9 100644 +--- a/arch/x86/kvm/svm/svm.c ++++ b/arch/x86/kvm/svm/svm.c +@@ -4302,12 +4302,14 @@ static int svm_smi_allowed(struct kvm_vcpu *vcpu, bool for_injection) + return !svm_smi_blocked(vcpu); + } + +-static int svm_enter_smm(struct kvm_vcpu *vcpu, char *smstate) ++static int svm_enter_smm(struct kvm_vcpu *vcpu, union kvm_smram *smram) + { + struct vcpu_svm *svm = to_svm(vcpu); + struct kvm_host_map map_save; + int ret; + ++ char *smstate = (char *)smram; ++ + if (!is_guest_mode(vcpu)) + return 0; + +@@ -4349,7 +4351,7 @@ static int svm_enter_smm(struct kvm_vcpu *vcpu, char *smstate) + return 0; + } + +-static int svm_leave_smm(struct kvm_vcpu *vcpu, const char *smstate) ++static int svm_leave_smm(struct kvm_vcpu *vcpu, const union kvm_smram *smram) + { + struct vcpu_svm *svm = to_svm(vcpu); + struct kvm_host_map map, map_save; +@@ -4357,6 +4359,8 @@ static int svm_leave_smm(struct kvm_vcpu *vcpu, const char *smstate) + struct vmcb *vmcb12; + int ret; + ++ const char *smstate = (const char *)smram; ++ + if (!guest_cpuid_has(vcpu, X86_FEATURE_LM)) + return 0; + +diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c +index 417176817d80..a45a43bcc844 100644 +--- a/arch/x86/kvm/vmx/vmx.c ++++ b/arch/x86/kvm/vmx/vmx.c +@@ -7594,7 +7594,7 @@ static int vmx_smi_allowed(struct kvm_vcpu *vcpu, bool for_injection) + return !is_smm(vcpu); + } + +-static int vmx_enter_smm(struct kvm_vcpu *vcpu, char *smstate) ++static int vmx_enter_smm(struct kvm_vcpu *vcpu, union kvm_smram *smram) + { + struct vcpu_vmx *vmx = to_vmx(vcpu); + +@@ -7608,7 +7608,7 @@ static int vmx_enter_smm(struct kvm_vcpu *vcpu, char *smstate) + return 0; + } + +-static int vmx_leave_smm(struct kvm_vcpu *vcpu, const char *smstate) ++static int vmx_leave_smm(struct kvm_vcpu *vcpu, const union kvm_smram *smram) + { + struct vcpu_vmx *vmx = to_vmx(vcpu); + int ret; +diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c +index 07575e5eb254..2ebbb441880c 100644 +--- a/arch/x86/kvm/x86.c ++++ b/arch/x86/kvm/x86.c +@@ -7312,9 +7312,9 @@ static void emulator_exiting_smm(struct x86_emulate_ctxt *ctxt) + } + + static int emulator_leave_smm(struct x86_emulate_ctxt *ctxt, +- const char *smstate) ++ const union kvm_smram *smram) + { +- return static_call(kvm_x86_leave_smm)(emul_to_vcpu(ctxt), smstate); ++ return static_call(kvm_x86_leave_smm)(emul_to_vcpu(ctxt), smram); + } + + static void emulator_triple_fault(struct x86_emulate_ctxt *ctxt) +@@ -9164,25 +9164,25 @@ static void enter_smm(struct kvm_vcpu *vcpu) + struct kvm_segment cs, ds; + struct desc_ptr dt; + unsigned long cr0; +- char buf[512]; ++ union kvm_smram smram; + +- memset(buf, 0, 512); ++ memset(smram.bytes, 0, sizeof(smram.bytes)); + #ifdef CONFIG_X86_64 + if (guest_cpuid_has(vcpu, X86_FEATURE_LM)) +- enter_smm_save_state_64(vcpu, buf); ++ enter_smm_save_state_64(vcpu, (char *)&smram); + else + #endif +- enter_smm_save_state_32(vcpu, buf); ++ enter_smm_save_state_32(vcpu, (char *)&smram); + + /* + * Give enter_smm() a chance to make ISA-specific changes to the vCPU + * state (e.g. leave guest mode) after we've saved the state into the + * SMM state-save area. + */ +- static_call(kvm_x86_enter_smm)(vcpu, buf); ++ static_call(kvm_x86_enter_smm)(vcpu, &smram); + + kvm_smm_changed(vcpu, true); +- kvm_vcpu_write_guest(vcpu, vcpu->arch.smbase + 0xfe00, buf, sizeof(buf)); ++ kvm_vcpu_write_guest(vcpu, vcpu->arch.smbase + 0xfe00, &smram, sizeof(smram)); + + if (static_call(kvm_x86_get_nmi_mask)(vcpu)) + vcpu->arch.hflags |= HF_SMM_INSIDE_NMI_MASK; diff --git a/patches/kernel/0021-KVM-x86-emulator-smm-use-smram-struct-for-32-bit-smr.patch b/patches/kernel/0022-KVM-x86-emulator-smm-use-smram-struct-for-32-bit-smr.patch similarity index 92% rename from patches/kernel/0021-KVM-x86-emulator-smm-use-smram-struct-for-32-bit-smr.patch rename to patches/kernel/0022-KVM-x86-emulator-smm-use-smram-struct-for-32-bit-smr.patch index ba324ae..6985f2d 100644 --- a/patches/kernel/0021-KVM-x86-emulator-smm-use-smram-struct-for-32-bit-smr.patch +++ b/patches/kernel/0022-KVM-x86-emulator-smm-use-smram-struct-for-32-bit-smr.patch @@ -1,6 +1,6 @@ From 0000000000000000000000000000000000000000 Mon Sep 17 00:00:00 2001 From: Maxim Levitsky -Date: Tue, 21 Jun 2022 18:08:59 +0300 +Date: Wed, 3 Aug 2022 18:50:07 +0300 Subject: [PATCH] KVM: x86: emulator/smm: use smram struct for 32 bit smram load/restore @@ -15,16 +15,16 @@ Signed-off-by: Thomas Lamprecht 2 files changed, 60 insertions(+), 96 deletions(-) diff --git a/arch/x86/kvm/emulate.c b/arch/x86/kvm/emulate.c -index 38d9bfa650ec..44ce8d51f18b 100644 +index 730c3e2662d6..ad5d2ab9ab84 100644 --- a/arch/x86/kvm/emulate.c +++ b/arch/x86/kvm/emulate.c -@@ -2340,25 +2340,17 @@ static void rsm_set_desc_flags(struct desc_struct *desc, u32 flags) +@@ -2344,25 +2344,17 @@ static void rsm_set_desc_flags(struct desc_struct *desc, u32 flags) desc->type = (flags >> 8) & 15; } -static int rsm_load_seg_32(struct x86_emulate_ctxt *ctxt, const char *smstate, +static void rsm_load_seg_32(struct x86_emulate_ctxt *ctxt, -+ struct kvm_smm_seg_state_32 *state, ++ const struct kvm_smm_seg_state_32 *state, + u16 selector, int n) { @@ -50,12 +50,12 @@ index 38d9bfa650ec..44ce8d51f18b 100644 } #ifdef CONFIG_X86_64 -@@ -2429,63 +2421,46 @@ static int rsm_enter_protected_mode(struct x86_emulate_ctxt *ctxt, +@@ -2433,63 +2425,46 @@ static int rsm_enter_protected_mode(struct x86_emulate_ctxt *ctxt, } static int rsm_load_state_32(struct x86_emulate_ctxt *ctxt, - const char *smstate) -+ struct kvm_smram_state_32 *smstate) ++ const struct kvm_smram_state_32 *smstate) { - struct desc_struct desc; struct desc_ptr dt; @@ -135,17 +135,17 @@ index 38d9bfa650ec..44ce8d51f18b 100644 } #ifdef CONFIG_X86_64 -@@ -2630,7 +2605,7 @@ static int em_rsm(struct x86_emulate_ctxt *ctxt) - ret = rsm_load_state_64(ctxt, buf); +@@ -2636,7 +2611,7 @@ static int em_rsm(struct x86_emulate_ctxt *ctxt) + ret = rsm_load_state_64(ctxt, (const char *)&smram); else #endif -- ret = rsm_load_state_32(ctxt, buf); -+ ret = rsm_load_state_32(ctxt, (struct kvm_smram_state_32 *)buf); +- ret = rsm_load_state_32(ctxt, (const char *)&smram); ++ ret = rsm_load_state_32(ctxt, &smram.smram32); if (ret != X86EMUL_CONTINUE) goto emulate_shutdown; diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c -index 922fc258c37f..b626eb3fa376 100644 +index 2ebbb441880c..8a6b9bffc770 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -9018,22 +9018,18 @@ static u32 enter_smm_get_segment_flags(struct kvm_segment *seg) @@ -258,11 +258,11 @@ index 922fc258c37f..b626eb3fa376 100644 #ifdef CONFIG_X86_64 @@ -9172,7 +9161,7 @@ static void enter_smm(struct kvm_vcpu *vcpu) - enter_smm_save_state_64(vcpu, buf); + enter_smm_save_state_64(vcpu, (char *)&smram); else #endif -- enter_smm_save_state_32(vcpu, buf); -+ enter_smm_save_state_32(vcpu, (struct kvm_smram_state_32 *)buf); +- enter_smm_save_state_32(vcpu, (char *)&smram); ++ enter_smm_save_state_32(vcpu, &smram.smram32); /* * Give enter_smm() a chance to make ISA-specific changes to the vCPU diff --git a/patches/kernel/0022-KVM-x86-emulator-smm-use-smram-struct-for-64-bit-smr.patch b/patches/kernel/0023-KVM-x86-emulator-smm-use-smram-struct-for-64-bit-smr.patch similarity index 92% rename from patches/kernel/0022-KVM-x86-emulator-smm-use-smram-struct-for-64-bit-smr.patch rename to patches/kernel/0023-KVM-x86-emulator-smm-use-smram-struct-for-64-bit-smr.patch index ce4a011..d23ff4e 100644 --- a/patches/kernel/0022-KVM-x86-emulator-smm-use-smram-struct-for-64-bit-smr.patch +++ b/patches/kernel/0023-KVM-x86-emulator-smm-use-smram-struct-for-64-bit-smr.patch @@ -1,6 +1,6 @@ From 0000000000000000000000000000000000000000 Mon Sep 17 00:00:00 2001 From: Maxim Levitsky -Date: Tue, 21 Jun 2022 18:09:00 +0300 +Date: Wed, 3 Aug 2022 18:50:08 +0300 Subject: [PATCH] KVM: x86: emulator/smm: use smram struct for 64 bit smram load/restore @@ -16,17 +16,17 @@ Signed-off-by: Thomas Lamprecht 2 files changed, 62 insertions(+), 101 deletions(-) diff --git a/arch/x86/kvm/emulate.c b/arch/x86/kvm/emulate.c -index 44ce8d51f18b..98c2cf169b39 100644 +index ad5d2ab9ab84..4eb35a0a33a5 100644 --- a/arch/x86/kvm/emulate.c +++ b/arch/x86/kvm/emulate.c -@@ -2354,24 +2354,16 @@ static void rsm_load_seg_32(struct x86_emulate_ctxt *ctxt, +@@ -2358,24 +2358,16 @@ static void rsm_load_seg_32(struct x86_emulate_ctxt *ctxt, } #ifdef CONFIG_X86_64 -static int rsm_load_seg_64(struct x86_emulate_ctxt *ctxt, const char *smstate, - int n) +static void rsm_load_seg_64(struct x86_emulate_ctxt *ctxt, -+ struct kvm_smm_seg_state_64 *state, ++ const struct kvm_smm_seg_state_64 *state, + int n) { struct desc_struct desc; @@ -51,12 +51,12 @@ index 44ce8d51f18b..98c2cf169b39 100644 } #endif -@@ -2465,71 +2457,49 @@ static int rsm_load_state_32(struct x86_emulate_ctxt *ctxt, +@@ -2469,71 +2461,49 @@ static int rsm_load_state_32(struct x86_emulate_ctxt *ctxt, #ifdef CONFIG_X86_64 static int rsm_load_state_64(struct x86_emulate_ctxt *ctxt, - const char *smstate) -+ struct kvm_smram_state_64 *smstate) ++ const struct kvm_smram_state_64 *smstate) { - struct desc_struct desc; struct desc_ptr dt; @@ -144,17 +144,17 @@ index 44ce8d51f18b..98c2cf169b39 100644 return X86EMUL_CONTINUE; } -@@ -2602,7 +2572,7 @@ static int em_rsm(struct x86_emulate_ctxt *ctxt) +@@ -2608,7 +2578,7 @@ static int em_rsm(struct x86_emulate_ctxt *ctxt) #ifdef CONFIG_X86_64 if (emulator_has_longmode(ctxt)) -- ret = rsm_load_state_64(ctxt, buf); -+ ret = rsm_load_state_64(ctxt, (struct kvm_smram_state_64 *)buf); +- ret = rsm_load_state_64(ctxt, (const char *)&smram); ++ ret = rsm_load_state_64(ctxt, &smram.smram64); else #endif - ret = rsm_load_state_32(ctxt, (struct kvm_smram_state_32 *)buf); + ret = rsm_load_state_32(ctxt, &smram.smram32); diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c -index b626eb3fa376..f40cd45b6a01 100644 +index 8a6b9bffc770..d00b82ee6ca4 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -9033,20 +9033,17 @@ static void enter_smm_save_seg_32(struct kvm_vcpu *vcpu, @@ -269,11 +269,11 @@ index b626eb3fa376..f40cd45b6a01 100644 #endif @@ -9158,7 +9149,7 @@ static void enter_smm(struct kvm_vcpu *vcpu) - memset(buf, 0, 512); + memset(smram.bytes, 0, sizeof(smram.bytes)); #ifdef CONFIG_X86_64 if (guest_cpuid_has(vcpu, X86_FEATURE_LM)) -- enter_smm_save_state_64(vcpu, buf); -+ enter_smm_save_state_64(vcpu, (struct kvm_smram_state_64 *)buf); +- enter_smm_save_state_64(vcpu, (char *)&smram); ++ enter_smm_save_state_64(vcpu, &smram.smram64); else #endif - enter_smm_save_state_32(vcpu, (struct kvm_smram_state_32 *)buf); + enter_smm_save_state_32(vcpu, &smram.smram32); diff --git a/patches/kernel/0023-KVM-x86-SVM-use-smram-structs.patch b/patches/kernel/0024-KVM-x86-SVM-use-smram-structs.patch similarity index 58% rename from patches/kernel/0023-KVM-x86-SVM-use-smram-structs.patch rename to patches/kernel/0024-KVM-x86-SVM-use-smram-structs.patch index 891b4a2..1cc4769 100644 --- a/patches/kernel/0023-KVM-x86-SVM-use-smram-structs.patch +++ b/patches/kernel/0024-KVM-x86-SVM-use-smram-structs.patch @@ -1,6 +1,6 @@ From 0000000000000000000000000000000000000000 Mon Sep 17 00:00:00 2001 From: Maxim Levitsky -Date: Tue, 21 Jun 2022 18:09:01 +0300 +Date: Wed, 3 Aug 2022 18:50:09 +0300 Subject: [PATCH] KVM: x86: SVM: use smram structs This removes the last user of put_smstate/GET_SMSTATE so @@ -13,14 +13,14 @@ Signed-off-by: Maxim Levitsky Signed-off-by: Thomas Lamprecht --- arch/x86/include/asm/kvm_host.h | 6 ------ - arch/x86/kvm/svm/svm.c | 28 +++++++++++++++++----------- - 2 files changed, 17 insertions(+), 17 deletions(-) + arch/x86/kvm/svm/svm.c | 21 ++++++--------------- + 2 files changed, 6 insertions(+), 21 deletions(-) diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h -index 867febee8fc3..4b443624b884 100644 +index fb48dd8773e1..0362d3fba42a 100644 --- a/arch/x86/include/asm/kvm_host.h +++ b/arch/x86/include/asm/kvm_host.h -@@ -1931,12 +1931,6 @@ static inline int kvm_cpu_get_apicid(int mps_cpu) +@@ -1932,12 +1932,6 @@ static inline int kvm_cpu_get_apicid(int mps_cpu) #endif } @@ -34,18 +34,15 @@ index 867febee8fc3..4b443624b884 100644 int alloc_all_memslots_rmaps(struct kvm *kvm); diff --git a/arch/x86/kvm/svm/svm.c b/arch/x86/kvm/svm/svm.c -index 21f747eacc9a..283b1ca95317 100644 +index d903120811b9..742497b1d4c3 100644 --- a/arch/x86/kvm/svm/svm.c +++ b/arch/x86/kvm/svm/svm.c -@@ -4304,6 +4304,7 @@ static int svm_smi_allowed(struct kvm_vcpu *vcpu, bool for_injection) - - static int svm_enter_smm(struct kvm_vcpu *vcpu, char *smstate) - { -+ struct kvm_smram_state_64 *smram = (struct kvm_smram_state_64 *)smstate; - struct vcpu_svm *svm = to_svm(vcpu); +@@ -4308,15 +4308,11 @@ static int svm_enter_smm(struct kvm_vcpu *vcpu, union kvm_smram *smram) struct kvm_host_map map_save; int ret; -@@ -4311,10 +4312,17 @@ static int svm_enter_smm(struct kvm_vcpu *vcpu, char *smstate) + +- char *smstate = (char *)smram; +- if (!is_guest_mode(vcpu)) return 0; @@ -53,37 +50,27 @@ index 21f747eacc9a..283b1ca95317 100644 - put_smstate(u64, smstate, 0x7ed8, 1); - /* FEE0h - SVM Guest VMCB Physical Address */ - put_smstate(u64, smstate, 0x7ee0, svm->nested.vmcb12_gpa); -+ /* -+ * 32 bit SMRAM format doesn't preserve EFER and SVM state. -+ * SVM should not be enabled by the userspace without marking -+ * the CPU as at least long mode capable. -+ */ -+ -+ if (!guest_cpuid_has(vcpu, X86_FEATURE_SVM)) -+ return 1; -+ -+ smram->svm_guest_flag = 1; -+ smram->svm_guest_vmcb_gpa = svm->nested.vmcb12_gpa; ++ smram->smram64.svm_guest_flag = 1; ++ smram->smram64.svm_guest_vmcb_gpa = svm->nested.vmcb12_gpa; svm->vmcb->save.rax = vcpu->arch.regs[VCPU_REGS_RAX]; svm->vmcb->save.rsp = vcpu->arch.regs[VCPU_REGS_RSP]; -@@ -4351,9 +4359,9 @@ static int svm_enter_smm(struct kvm_vcpu *vcpu, char *smstate) - - static int svm_leave_smm(struct kvm_vcpu *vcpu, const char *smstate) +@@ -4355,28 +4351,23 @@ static int svm_leave_smm(struct kvm_vcpu *vcpu, const union kvm_smram *smram) { -+ struct kvm_smram_state_64 *smram = (struct kvm_smram_state_64 *)smstate; struct vcpu_svm *svm = to_svm(vcpu); struct kvm_host_map map, map_save; - u64 saved_efer, vmcb12_gpa; struct vmcb *vmcb12; int ret; -@@ -4361,18 +4369,16 @@ static int svm_leave_smm(struct kvm_vcpu *vcpu, const char *smstate) +- const char *smstate = (const char *)smram; +- + if (!guest_cpuid_has(vcpu, X86_FEATURE_LM)) return 0; /* Non-zero if SMI arrived while vCPU was in guest mode. */ - if (!GET_SMSTATE(u64, smstate, 0x7ed8)) -+ if (!smram->svm_guest_flag) ++ if (!smram->smram64.svm_guest_flag) return 0; if (!guest_cpuid_has(vcpu, X86_FEATURE_SVM)) @@ -91,21 +78,21 @@ index 21f747eacc9a..283b1ca95317 100644 - saved_efer = GET_SMSTATE(u64, smstate, 0x7ed0); - if (!(saved_efer & EFER_SVME)) -+ if (!(smram->efer & EFER_SVME)) ++ if (!(smram->smram64.efer & EFER_SVME)) return 1; - vmcb12_gpa = GET_SMSTATE(u64, smstate, 0x7ee0); - if (kvm_vcpu_map(vcpu, gpa_to_gfn(vmcb12_gpa), &map) == -EINVAL) -+ if (kvm_vcpu_map(vcpu, gpa_to_gfn(smram->svm_guest_vmcb_gpa), &map) == -EINVAL) ++ if (kvm_vcpu_map(vcpu, gpa_to_gfn(smram->smram64.svm_guest_vmcb_gpa), &map) == -EINVAL) return 1; ret = 1; -@@ -4397,7 +4403,7 @@ static int svm_leave_smm(struct kvm_vcpu *vcpu, const char *smstate) +@@ -4401,7 +4392,7 @@ static int svm_leave_smm(struct kvm_vcpu *vcpu, const union kvm_smram *smram) vmcb12 = map.hva; nested_load_control_from_vmcb12(svm, &vmcb12->control); - ret = enter_svm_guest_mode(vcpu, vmcb12_gpa, vmcb12, false); -+ ret = enter_svm_guest_mode(vcpu, smram->svm_guest_vmcb_gpa, vmcb12, false); ++ ret = enter_svm_guest_mode(vcpu, smram->smram64.svm_guest_vmcb_gpa, vmcb12, false); if (ret) goto unmap_save; diff --git a/patches/kernel/0025-KVM-x86-SVM-don-t-save-SVM-state-to-SMRAM-when-VM-is.patch b/patches/kernel/0025-KVM-x86-SVM-don-t-save-SVM-state-to-SMRAM-when-VM-is.patch new file mode 100644 index 0000000..501be1d --- /dev/null +++ b/patches/kernel/0025-KVM-x86-SVM-don-t-save-SVM-state-to-SMRAM-when-VM-is.patch @@ -0,0 +1,40 @@ +From 0000000000000000000000000000000000000000 Mon Sep 17 00:00:00 2001 +From: Maxim Levitsky +Date: Wed, 3 Aug 2022 18:50:10 +0300 +Subject: [PATCH] KVM: x86: SVM: don't save SVM state to SMRAM when VM is not + long mode capable + +When the guest CPUID doesn't have support for long mode, 32 bit SMRAM +layout is used and it has no support for preserving EFER and/or SVM +state. + +Note that this isn't relevant to running 32 bit guests on VM which is +long mode capable - such VM can still run 32 bit guests in compatibility +mode. + +Signed-off-by: Maxim Levitsky +Signed-off-by: Thomas Lamprecht +--- + arch/x86/kvm/svm/svm.c | 9 +++++++++ + 1 file changed, 9 insertions(+) + +diff --git a/arch/x86/kvm/svm/svm.c b/arch/x86/kvm/svm/svm.c +index 742497b1d4c3..938b9b24f0ee 100644 +--- a/arch/x86/kvm/svm/svm.c ++++ b/arch/x86/kvm/svm/svm.c +@@ -4311,6 +4311,15 @@ static int svm_enter_smm(struct kvm_vcpu *vcpu, union kvm_smram *smram) + if (!is_guest_mode(vcpu)) + return 0; + ++ /* ++ * 32 bit SMRAM format doesn't preserve EFER and SVM state. ++ * SVM should not be enabled by the userspace without marking ++ * the CPU as at least long mode capable. ++ */ ++ ++ if (!guest_cpuid_has(vcpu, X86_FEATURE_LM)) ++ return 1; ++ + smram->smram64.svm_guest_flag = 1; + smram->smram64.svm_guest_vmcb_gpa = svm->nested.vmcb12_gpa; + diff --git a/patches/kernel/0024-KVM-x86-emulator-smm-preserve-interrupt-shadow-in-SM.patch b/patches/kernel/0026-KVM-x86-emulator-smm-preserve-interrupt-shadow-in-SM.patch similarity index 67% rename from patches/kernel/0024-KVM-x86-emulator-smm-preserve-interrupt-shadow-in-SM.patch rename to patches/kernel/0026-KVM-x86-emulator-smm-preserve-interrupt-shadow-in-SM.patch index 0c0d2e6..c80b51b 100644 --- a/patches/kernel/0024-KVM-x86-emulator-smm-preserve-interrupt-shadow-in-SM.patch +++ b/patches/kernel/0026-KVM-x86-emulator-smm-preserve-interrupt-shadow-in-SM.patch @@ -1,6 +1,6 @@ From 0000000000000000000000000000000000000000 Mon Sep 17 00:00:00 2001 From: Maxim Levitsky -Date: Tue, 21 Jun 2022 18:09:02 +0300 +Date: Wed, 3 Aug 2022 18:50:11 +0300 Subject: [PATCH] KVM: x86: emulator/smm: preserve interrupt shadow in SMRAM When #SMI is asserted, the CPU can be in interrupt shadow @@ -25,16 +25,16 @@ Signed-off-by: Maxim Levitsky Signed-off-by: Thomas Lamprecht --- arch/x86/kvm/emulate.c | 17 ++++++++++++++--- - arch/x86/kvm/kvm_emulate.h | 13 ++++++++++--- + arch/x86/kvm/kvm_emulate.h | 10 ++++++---- arch/x86/kvm/x86.c | 12 ++++++++++++ - 3 files changed, 36 insertions(+), 6 deletions(-) + 3 files changed, 32 insertions(+), 7 deletions(-) diff --git a/arch/x86/kvm/emulate.c b/arch/x86/kvm/emulate.c -index 98c2cf169b39..5614456de922 100644 +index 4eb35a0a33a5..3e6ea2951e2b 100644 --- a/arch/x86/kvm/emulate.c +++ b/arch/x86/kvm/emulate.c -@@ -2416,7 +2416,7 @@ static int rsm_load_state_32(struct x86_emulate_ctxt *ctxt, - struct kvm_smram_state_32 *smstate) +@@ -2420,7 +2420,7 @@ static int rsm_load_state_32(struct x86_emulate_ctxt *ctxt, + const struct kvm_smram_state_32 *smstate) { struct desc_ptr dt; - int i; @@ -42,7 +42,7 @@ index 98c2cf169b39..5614456de922 100644 ctxt->eflags = smstate->eflags | X86_EFLAGS_FIXED; ctxt->_eip = smstate->eip; -@@ -2451,8 +2451,16 @@ static int rsm_load_state_32(struct x86_emulate_ctxt *ctxt, +@@ -2455,8 +2455,16 @@ static int rsm_load_state_32(struct x86_emulate_ctxt *ctxt, ctxt->ops->set_smbase(ctxt, smstate->smbase); @@ -61,7 +61,7 @@ index 98c2cf169b39..5614456de922 100644 } #ifdef CONFIG_X86_64 -@@ -2501,6 +2509,9 @@ static int rsm_load_state_64(struct x86_emulate_ctxt *ctxt, +@@ -2505,6 +2513,9 @@ static int rsm_load_state_64(struct x86_emulate_ctxt *ctxt, rsm_load_seg_64(ctxt, &smstate->fs, VCPU_SREG_FS); rsm_load_seg_64(ctxt, &smstate->gs, VCPU_SREG_GS); @@ -72,10 +72,10 @@ index 98c2cf169b39..5614456de922 100644 } #endif diff --git a/arch/x86/kvm/kvm_emulate.h b/arch/x86/kvm/kvm_emulate.h -index d16b377be70b..5b881a3a5ed9 100644 +index 3b37b3e17379..a64c190abf28 100644 --- a/arch/x86/kvm/kvm_emulate.h +++ b/arch/x86/kvm/kvm_emulate.h -@@ -229,6 +229,7 @@ struct x86_emulate_ops { +@@ -231,6 +231,7 @@ struct x86_emulate_ops { bool (*guest_has_rdpid)(struct x86_emulate_ctxt *ctxt); void (*set_nmi_mask)(struct x86_emulate_ctxt *ctxt, bool masked); @@ -83,39 +83,52 @@ index d16b377be70b..5b881a3a5ed9 100644 unsigned (*get_hflags)(struct x86_emulate_ctxt *ctxt); void (*exiting_smm)(struct x86_emulate_ctxt *ctxt); -@@ -499,7 +500,9 @@ struct kvm_smram_state_32 { - u32 reserved1[62]; /* FE00 - FEF7 */ - u32 smbase; /* FEF8 */ - u32 smm_revision; /* FEFC */ -- u32 reserved2[5]; /* FF00-FF13 */ -+ u32 reserved2[4]; /* FF00-FF0F*/ -+ /* int_shadow is KVM extension*/ -+ u32 int_shadow; /* FF10 */ - /* CR4 is not present in Intel/AMD SMRAM image*/ - u32 cr4; /* FF14 */ - u32 reserved3[5]; /* FF18 */ -@@ -571,13 +574,17 @@ struct kvm_smram_state_64 { - struct kvm_smm_seg_state_64 idtr; /* FE80 (R/O) */ - struct kvm_smm_seg_state_64 tr; /* FE90 (R/O) */ +@@ -497,7 +498,8 @@ struct kvm_smram_state_32 { + u32 reserved1[62]; + u32 smbase; + u32 smm_revision; +- u32 reserved2[5]; ++ u32 reserved2[4]; ++ u32 int_shadow; /* KVM extension */ + u32 cr4; /* CR4 is not present in Intel/AMD SMRAM image */ + u32 reserved3[5]; -- /* I/O restart and auto halt restart are not implemented by KVM */ -+ /* -+ * I/O restart and auto halt restart are not implemented by KVM -+ * int_shadow is KVM's extension -+ */ -+ - u64 io_restart_rip; /* FEA0 (R/O) */ - u64 io_restart_rcx; /* FEA8 (R/O) */ - u64 io_restart_rsi; /* FEB0 (R/O) */ - u64 io_restart_rdi; /* FEB8 (R/O) */ - u32 io_restart_dword; /* FEC0 (R/O) */ -- u32 reserved1; /* FEC4 */ -+ u32 int_shadow; /* FEC4 (R/O) */ - u8 io_instruction_restart; /* FEC8 (R/W) */ - u8 auto_halt_restart; /* FEC9 (R/W) */ - u8 reserved2[6]; /* FECA-FECF */ +@@ -545,6 +547,7 @@ static inline void __check_smram32_offsets(void) + __CHECK_SMRAM32_OFFSET(smbase, 0xFEF8); + __CHECK_SMRAM32_OFFSET(smm_revision, 0xFEFC); + __CHECK_SMRAM32_OFFSET(reserved2, 0xFF00); ++ __CHECK_SMRAM32_OFFSET(int_shadow, 0xFF10); + __CHECK_SMRAM32_OFFSET(cr4, 0xFF14); + __CHECK_SMRAM32_OFFSET(reserved3, 0xFF18); + __CHECK_SMRAM32_OFFSET(ds, 0xFF2C); +@@ -604,7 +607,7 @@ struct kvm_smram_state_64 { + u64 io_restart_rsi; + u64 io_restart_rdi; + u32 io_restart_dword; +- u32 reserved1; ++ u32 int_shadow; + u8 io_inst_restart; + u8 auto_hlt_restart; + u8 reserved2[6]; +@@ -642,7 +645,6 @@ struct kvm_smram_state_64 { + u64 gprs[16]; /* GPRS in a reversed "natural" X86 order (R15/R14/../RCX/RAX.) */ + }; + +- + static inline void __check_smram64_offsets(void) + { + #define __CHECK_SMRAM64_OFFSET(field, offset) \ +@@ -663,7 +665,7 @@ static inline void __check_smram64_offsets(void) + __CHECK_SMRAM64_OFFSET(io_restart_rsi, 0xFEB0); + __CHECK_SMRAM64_OFFSET(io_restart_rdi, 0xFEB8); + __CHECK_SMRAM64_OFFSET(io_restart_dword, 0xFEC0); +- __CHECK_SMRAM64_OFFSET(reserved1, 0xFEC4); ++ __CHECK_SMRAM64_OFFSET(int_shadow, 0xFEC4); + __CHECK_SMRAM64_OFFSET(io_inst_restart, 0xFEC8); + __CHECK_SMRAM64_OFFSET(auto_hlt_restart, 0xFEC9); + __CHECK_SMRAM64_OFFSET(reserved2, 0xFECA); diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c -index f40cd45b6a01..9afac97ea98c 100644 +index d00b82ee6ca4..4cefdd83a448 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -7299,6 +7299,11 @@ static void emulator_set_nmi_mask(struct x86_emulate_ctxt *ctxt, bool masked) -- 2.39.5