]> git.proxmox.com Git - pve-kernel.git/commitdiff
add KVM L1 guest escape - CVE-2018-12904 patch
authorThomas Lamprecht <t.lamprecht@proxmox.com>
Wed, 27 Jun 2018 12:50:39 +0000 (14:50 +0200)
committerThomas Lamprecht <t.lamprecht@proxmox.com>
Wed, 27 Jun 2018 16:01:12 +0000 (18:01 +0200)
see: http://www.openwall.com/lists/oss-security/2018/06/27/7

patches/kernel/0010-kvm-nVMX-Enforce-cpl-0-for-VMX-instructions.patch [new file with mode: 0644]

diff --git a/patches/kernel/0010-kvm-nVMX-Enforce-cpl-0-for-VMX-instructions.patch b/patches/kernel/0010-kvm-nVMX-Enforce-cpl-0-for-VMX-instructions.patch
new file mode 100644 (file)
index 0000000..2da036a
--- /dev/null
@@ -0,0 +1,68 @@
+From 727ba748e110b4de50d142edca9d6a9b7e6111d8 Mon Sep 17 00:00:00 2001
+From: Felix Wilhelm <fwilhelm@google.com>
+Date: Mon, 11 Jun 2018 09:43:44 +0200
+Subject: kvm: nVMX: Enforce cpl=0 for VMX instructions
+
+VMX instructions executed inside a L1 VM will always trigger a VM exit
+even when executed with cpl 3. This means we must perform the
+privilege check in software.
+
+Fixes: 70f3aac964ae("kvm: nVMX: Remove superfluous VMX instruction fault checks")
+Cc: stable@vger.kernel.org
+Signed-off-by: Felix Wilhelm <fwilhelm@google.com>
+Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
+Signed-off-by: Thomas Lamprecht <t.lamprecht@proxmox.com>
+---
+ arch/x86/kvm/vmx.c | 15 +++++++++++++--
+ 1 file changed, 13 insertions(+), 2 deletions(-)
+
+diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
+index 709de996..4bf1f9d 100644
+--- a/arch/x86/kvm/vmx.c
++++ b/arch/x86/kvm/vmx.c
+@@ -7905,6 +7905,12 @@ static int handle_vmon(struct kvm_vcpu *vcpu)
+               return 1;
+       }
++      /* CPL=0 must be checked manually. */
++      if (vmx_get_cpl(vcpu)) {
++              kvm_queue_exception(vcpu, UD_VECTOR);
++              return 1;
++      }
++
+       if (vmx->nested.vmxon) {
+               nested_vmx_failValid(vcpu, VMXERR_VMXON_IN_VMX_ROOT_OPERATION);
+               return kvm_skip_emulated_instruction(vcpu);
+@@ -7964,6 +7970,11 @@ static int handle_vmon(struct kvm_vcpu *vcpu)
+  */
+ static int nested_vmx_check_permission(struct kvm_vcpu *vcpu)
+ {
++      if (vmx_get_cpl(vcpu)) {
++              kvm_queue_exception(vcpu, UD_VECTOR);
++              return 0;
++      }
++
+       if (!to_vmx(vcpu)->nested.vmxon) {
+               kvm_queue_exception(vcpu, UD_VECTOR);
+               return 0;
+@@ -8283,7 +8294,7 @@ static int handle_vmread(struct kvm_vcpu *vcpu)
+               if (get_vmx_mem_address(vcpu, exit_qualification,
+                               vmx_instruction_info, true, &gva))
+                       return 1;
+-              /* _system ok, as hardware has verified cpl=0 */
++              /* _system ok, nested_vmx_check_permission has verified cpl=0 */
+               kvm_write_guest_virt_system(&vcpu->arch.emulate_ctxt, gva,
+                            &field_value, (is_long_mode(vcpu) ? 8 : 4), NULL);
+       }
+@@ -8448,7 +8459,7 @@ static int handle_vmptrst(struct kvm_vcpu *vcpu)
+       if (get_vmx_mem_address(vcpu, exit_qualification,
+                       vmx_instruction_info, true, &vmcs_gva))
+               return 1;
+-      /* ok to use *_system, as hardware has verified cpl=0 */
++      /* *_system ok, nested_vmx_check_permission has verified cpl=0 */
+       if (kvm_write_guest_virt_system(&vcpu->arch.emulate_ctxt, vmcs_gva,
+                                (void *)&to_vmx(vcpu)->nested.current_vmptr,
+                                sizeof(u64), &e)) {
+-- 
+cgit v1.1
+