From: Thomas Lamprecht Date: Wed, 27 Jun 2018 12:50:39 +0000 (+0200) Subject: add KVM L1 guest escape - CVE-2018-12904 patch X-Git-Url: https://git.proxmox.com/?p=pve-kernel.git;a=commitdiff_plain;h=241d0d30b70b7917c6b347a9ff1bbb7365a97c5f add KVM L1 guest escape - CVE-2018-12904 patch see: http://www.openwall.com/lists/oss-security/2018/06/27/7 --- diff --git a/patches/kernel/0010-kvm-nVMX-Enforce-cpl-0-for-VMX-instructions.patch b/patches/kernel/0010-kvm-nVMX-Enforce-cpl-0-for-VMX-instructions.patch new file mode 100644 index 0000000..2da036a --- /dev/null +++ b/patches/kernel/0010-kvm-nVMX-Enforce-cpl-0-for-VMX-instructions.patch @@ -0,0 +1,68 @@ +From 727ba748e110b4de50d142edca9d6a9b7e6111d8 Mon Sep 17 00:00:00 2001 +From: Felix Wilhelm +Date: Mon, 11 Jun 2018 09:43:44 +0200 +Subject: kvm: nVMX: Enforce cpl=0 for VMX instructions + +VMX instructions executed inside a L1 VM will always trigger a VM exit +even when executed with cpl 3. This means we must perform the +privilege check in software. + +Fixes: 70f3aac964ae("kvm: nVMX: Remove superfluous VMX instruction fault checks") +Cc: stable@vger.kernel.org +Signed-off-by: Felix Wilhelm +Signed-off-by: Paolo Bonzini +Signed-off-by: Thomas Lamprecht +--- + arch/x86/kvm/vmx.c | 15 +++++++++++++-- + 1 file changed, 13 insertions(+), 2 deletions(-) + +diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c +index 709de996..4bf1f9d 100644 +--- a/arch/x86/kvm/vmx.c ++++ b/arch/x86/kvm/vmx.c +@@ -7905,6 +7905,12 @@ static int handle_vmon(struct kvm_vcpu *vcpu) + return 1; + } + ++ /* CPL=0 must be checked manually. */ ++ if (vmx_get_cpl(vcpu)) { ++ kvm_queue_exception(vcpu, UD_VECTOR); ++ return 1; ++ } ++ + if (vmx->nested.vmxon) { + nested_vmx_failValid(vcpu, VMXERR_VMXON_IN_VMX_ROOT_OPERATION); + return kvm_skip_emulated_instruction(vcpu); +@@ -7964,6 +7970,11 @@ static int handle_vmon(struct kvm_vcpu *vcpu) + */ + static int nested_vmx_check_permission(struct kvm_vcpu *vcpu) + { ++ if (vmx_get_cpl(vcpu)) { ++ kvm_queue_exception(vcpu, UD_VECTOR); ++ return 0; ++ } ++ + if (!to_vmx(vcpu)->nested.vmxon) { + kvm_queue_exception(vcpu, UD_VECTOR); + return 0; +@@ -8283,7 +8294,7 @@ static int handle_vmread(struct kvm_vcpu *vcpu) + if (get_vmx_mem_address(vcpu, exit_qualification, + vmx_instruction_info, true, &gva)) + return 1; +- /* _system ok, as hardware has verified cpl=0 */ ++ /* _system ok, nested_vmx_check_permission has verified cpl=0 */ + kvm_write_guest_virt_system(&vcpu->arch.emulate_ctxt, gva, + &field_value, (is_long_mode(vcpu) ? 8 : 4), NULL); + } +@@ -8448,7 +8459,7 @@ static int handle_vmptrst(struct kvm_vcpu *vcpu) + if (get_vmx_mem_address(vcpu, exit_qualification, + vmx_instruction_info, true, &vmcs_gva)) + return 1; +- /* ok to use *_system, as hardware has verified cpl=0 */ ++ /* *_system ok, nested_vmx_check_permission has verified cpl=0 */ + if (kvm_write_guest_virt_system(&vcpu->arch.emulate_ctxt, vmcs_gva, + (void *)&to_vmx(vcpu)->nested.current_vmptr, + sizeof(u64), &e)) { +-- +cgit v1.1 +