From 72d7b374b14d67e973bce476e4a75552478cc42d Mon Sep 17 00:00:00 2001 From: Ladi Prosek Date: Wed, 11 Oct 2017 16:54:41 +0200 Subject: [PATCH] KVM: x86: introduce ISA specific smi_allowed callback Similar to NMI, there may be ISA specific reasons why an SMI cannot be injected into the guest. This commit adds a new smi_allowed callback to be implemented in following commits. Signed-off-by: Ladi Prosek Signed-off-by: Paolo Bonzini --- arch/x86/include/asm/kvm_host.h | 1 + arch/x86/kvm/svm.c | 6 ++++++ arch/x86/kvm/vmx.c | 6 ++++++ arch/x86/kvm/x86.c | 2 +- 4 files changed, 14 insertions(+), 1 deletion(-) diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h index 23a9a5339f3f..411ddbbaeabf 100644 --- a/arch/x86/include/asm/kvm_host.h +++ b/arch/x86/include/asm/kvm_host.h @@ -1062,6 +1062,7 @@ struct kvm_x86_ops { void (*setup_mce)(struct kvm_vcpu *vcpu); + int (*smi_allowed)(struct kvm_vcpu *vcpu); int (*pre_enter_smm)(struct kvm_vcpu *vcpu, char *smstate); int (*pre_leave_smm)(struct kvm_vcpu *vcpu, u64 smbase); }; diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c index c4e9b99d48d8..e3c61a32249d 100644 --- a/arch/x86/kvm/svm.c +++ b/arch/x86/kvm/svm.c @@ -5401,6 +5401,11 @@ static void svm_setup_mce(struct kvm_vcpu *vcpu) vcpu->arch.mcg_cap &= 0x1ff; } +static int svm_smi_allowed(struct kvm_vcpu *vcpu) +{ + return 1; +} + static int svm_pre_enter_smm(struct kvm_vcpu *vcpu, char *smstate) { /* TODO: Implement */ @@ -5524,6 +5529,7 @@ static struct kvm_x86_ops svm_x86_ops __ro_after_init = { .update_pi_irte = svm_update_pi_irte, .setup_mce = svm_setup_mce, + .smi_allowed = svm_smi_allowed, .pre_enter_smm = svm_pre_enter_smm, .pre_leave_smm = svm_pre_leave_smm, }; diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c index 1305bb65688b..156ecbaad1e6 100644 --- a/arch/x86/kvm/vmx.c +++ b/arch/x86/kvm/vmx.c @@ -11916,6 +11916,11 @@ static void vmx_setup_mce(struct kvm_vcpu *vcpu) ~FEATURE_CONTROL_LMCE; } +static int vmx_smi_allowed(struct kvm_vcpu *vcpu) +{ + return 1; +} + static int vmx_pre_enter_smm(struct kvm_vcpu *vcpu, char *smstate) { /* TODO: Implement */ @@ -12054,6 +12059,7 @@ static struct kvm_x86_ops vmx_x86_ops __ro_after_init = { .setup_mce = vmx_setup_mce, + .smi_allowed = vmx_smi_allowed, .pre_enter_smm = vmx_pre_enter_smm, .pre_leave_smm = vmx_pre_leave_smm, }; diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index 9e85a69ccb12..693bf8d01128 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -6438,7 +6438,7 @@ static int inject_pending_event(struct kvm_vcpu *vcpu, bool req_int_win) } kvm_x86_ops->queue_exception(vcpu); - } else if (vcpu->arch.smi_pending && !is_smm(vcpu)) { + } else if (vcpu->arch.smi_pending && !is_smm(vcpu) && kvm_x86_ops->smi_allowed(vcpu)) { vcpu->arch.smi_pending = false; enter_smm(vcpu); } else if (vcpu->arch.nmi_pending && kvm_x86_ops->nmi_allowed(vcpu)) { -- 2.39.5