]> git.proxmox.com Git - mirror_ubuntu-jammy-kernel.git/blobdiff - arch/x86/kvm/x86.c
KVM: x86: declare Xen HVM shared info capability and add test case
[mirror_ubuntu-jammy-kernel.git] / arch / x86 / kvm / x86.c
index 4758afe597954b16d2303d324723322956b0fdc5..838ce5e9814b3bbdcd4c42fbe5a4f5decd138b54 100644 (file)
@@ -29,6 +29,7 @@
 #include "pmu.h"
 #include "hyperv.h"
 #include "lapic.h"
+#include "xen.h"
 
 #include <linux/clocksource.h>
 #include <linux/interrupt.h>
@@ -986,12 +987,10 @@ static int __kvm_set_xcr(struct kvm_vcpu *vcpu, u32 index, u64 xcr)
 
 int kvm_set_xcr(struct kvm_vcpu *vcpu, u32 index, u64 xcr)
 {
-       if (static_call(kvm_x86_get_cpl)(vcpu) != 0 ||
-           __kvm_set_xcr(vcpu, index, xcr)) {
-               kvm_inject_gp(vcpu, 0);
-               return 1;
-       }
-       return 0;
+       if (static_call(kvm_x86_get_cpl)(vcpu) == 0)
+               return __kvm_set_xcr(vcpu, index, xcr);
+
+       return 1;
 }
 EXPORT_SYMBOL_GPL(kvm_set_xcr);
 
@@ -1796,12 +1795,11 @@ int kvm_emulate_wrmsr(struct kvm_vcpu *vcpu)
 }
 EXPORT_SYMBOL_GPL(kvm_emulate_wrmsr);
 
-bool kvm_vcpu_exit_request(struct kvm_vcpu *vcpu)
+static inline bool kvm_vcpu_exit_request(struct kvm_vcpu *vcpu)
 {
        return vcpu->mode == EXITING_GUEST_MODE || kvm_request_pending(vcpu) ||
                xfer_to_guest_mode_work_pending();
 }
-EXPORT_SYMBOL_GPL(kvm_vcpu_exit_request);
 
 /*
  * The fast path for frequent and performance sensitive wrmsr emulation,
@@ -1951,15 +1949,14 @@ static s64 get_kvmclock_base_ns(void)
 }
 #endif
 
-static void kvm_write_wall_clock(struct kvm *kvm, gpa_t wall_clock)
+void kvm_write_wall_clock(struct kvm *kvm, gpa_t wall_clock, int sec_hi_ofs)
 {
        int version;
        int r;
        struct pvclock_wall_clock wc;
+       u32 wc_sec_hi;
        u64 wall_nsec;
 
-       kvm->arch.wall_clock = wall_clock;
-
        if (!wall_clock)
                return;
 
@@ -1988,6 +1985,12 @@ static void kvm_write_wall_clock(struct kvm *kvm, gpa_t wall_clock)
 
        kvm_write_guest(kvm, wall_clock, &wc, sizeof(wc));
 
+       if (sec_hi_ofs) {
+               wc_sec_hi = wall_nsec >> 32;
+               kvm_write_guest(kvm, wall_clock + sec_hi_ofs,
+                               &wc_sec_hi, sizeof(wc_sec_hi));
+       }
+
        version++;
        kvm_write_guest(kvm, wall_clock, &version, sizeof(version));
 }
@@ -2607,13 +2610,15 @@ u64 get_kvmclock_ns(struct kvm *kvm)
        return ret;
 }
 
-static void kvm_setup_pvclock_page(struct kvm_vcpu *v)
+static void kvm_setup_pvclock_page(struct kvm_vcpu *v,
+                                  struct gfn_to_hva_cache *cache,
+                                  unsigned int offset)
 {
        struct kvm_vcpu_arch *vcpu = &v->arch;
        struct pvclock_vcpu_time_info guest_hv_clock;
 
-       if (unlikely(kvm_read_guest_cached(v->kvm, &vcpu->pv_time,
-               &guest_hv_clock, sizeof(guest_hv_clock))))
+       if (unlikely(kvm_read_guest_offset_cached(v->kvm, cache,
+               &guest_hv_clock, offset, sizeof(guest_hv_clock))))
                return;
 
        /* This VCPU is paused, but it's legal for a guest to read another
@@ -2636,9 +2641,9 @@ static void kvm_setup_pvclock_page(struct kvm_vcpu *v)
                ++guest_hv_clock.version;  /* first time write, random junk */
 
        vcpu->hv_clock.version = guest_hv_clock.version + 1;
-       kvm_write_guest_cached(v->kvm, &vcpu->pv_time,
-                               &vcpu->hv_clock,
-                               sizeof(vcpu->hv_clock.version));
+       kvm_write_guest_offset_cached(v->kvm, cache,
+                                     &vcpu->hv_clock, offset,
+                                     sizeof(vcpu->hv_clock.version));
 
        smp_wmb();
 
@@ -2652,16 +2657,16 @@ static void kvm_setup_pvclock_page(struct kvm_vcpu *v)
 
        trace_kvm_pvclock_update(v->vcpu_id, &vcpu->hv_clock);
 
-       kvm_write_guest_cached(v->kvm, &vcpu->pv_time,
-                               &vcpu->hv_clock,
-                               sizeof(vcpu->hv_clock));
+       kvm_write_guest_offset_cached(v->kvm, cache,
+                                     &vcpu->hv_clock, offset,
+                                     sizeof(vcpu->hv_clock));
 
        smp_wmb();
 
        vcpu->hv_clock.version++;
-       kvm_write_guest_cached(v->kvm, &vcpu->pv_time,
-                               &vcpu->hv_clock,
-                               sizeof(vcpu->hv_clock.version));
+       kvm_write_guest_offset_cached(v->kvm, cache,
+                                    &vcpu->hv_clock, offset,
+                                    sizeof(vcpu->hv_clock.version));
 }
 
 static int kvm_guest_time_update(struct kvm_vcpu *v)
@@ -2748,7 +2753,12 @@ static int kvm_guest_time_update(struct kvm_vcpu *v)
        vcpu->hv_clock.flags = pvclock_flags;
 
        if (vcpu->pv_time_enabled)
-               kvm_setup_pvclock_page(v);
+               kvm_setup_pvclock_page(v, &vcpu->pv_time, 0);
+       if (vcpu->xen.vcpu_info_set)
+               kvm_setup_pvclock_page(v, &vcpu->xen.vcpu_info_cache,
+                                      offsetof(struct compat_vcpu_info, time));
+       if (vcpu->xen.vcpu_time_info_set)
+               kvm_setup_pvclock_page(v, &vcpu->xen.vcpu_time_info_cache, 0);
        if (v == kvm_get_vcpu(v->kvm, 0))
                kvm_hv_setup_tsc_page(v->kvm, &vcpu->hv_clock);
        return 0;
@@ -2873,32 +2883,6 @@ static int set_msr_mce(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
        return 0;
 }
 
-static int xen_hvm_config(struct kvm_vcpu *vcpu, u64 data)
-{
-       struct kvm *kvm = vcpu->kvm;
-       int lm = is_long_mode(vcpu);
-       u8 *blob_addr = lm ? (u8 *)(long)kvm->arch.xen_hvm_config.blob_addr_64
-               : (u8 *)(long)kvm->arch.xen_hvm_config.blob_addr_32;
-       u8 blob_size = lm ? kvm->arch.xen_hvm_config.blob_size_64
-               : kvm->arch.xen_hvm_config.blob_size_32;
-       u32 page_num = data & ~PAGE_MASK;
-       u64 page_addr = data & PAGE_MASK;
-       u8 *page;
-
-       if (page_num >= blob_size)
-               return 1;
-
-       page = memdup_user(blob_addr + (page_num * PAGE_SIZE), PAGE_SIZE);
-       if (IS_ERR(page))
-               return PTR_ERR(page);
-
-       if (kvm_vcpu_write_guest(vcpu, page_addr, page, PAGE_SIZE)) {
-               kfree(page);
-               return 1;
-       }
-       return 0;
-}
-
 static inline bool kvm_pv_async_pf_enabled(struct kvm_vcpu *vcpu)
 {
        u64 mask = KVM_ASYNC_PF_ENABLED | KVM_ASYNC_PF_DELIVERY_AS_INT;
@@ -3032,6 +3016,9 @@ int kvm_set_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
        u32 msr = msr_info->index;
        u64 data = msr_info->data;
 
+       if (msr && msr == vcpu->kvm->arch.xen_hvm_config.msr)
+               return kvm_xen_write_hypercall_page(vcpu, data);
+
        switch (msr) {
        case MSR_AMD64_NB_CFG:
        case MSR_IA32_UCODE_WRITE:
@@ -3156,13 +3143,15 @@ int kvm_set_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
                if (!guest_pv_has(vcpu, KVM_FEATURE_CLOCKSOURCE2))
                        return 1;
 
-               kvm_write_wall_clock(vcpu->kvm, data);
+               vcpu->kvm->arch.wall_clock = data;
+               kvm_write_wall_clock(vcpu->kvm, data, 0);
                break;
        case MSR_KVM_WALL_CLOCK:
                if (!guest_pv_has(vcpu, KVM_FEATURE_CLOCKSOURCE))
                        return 1;
 
-               kvm_write_wall_clock(vcpu->kvm, data);
+               vcpu->kvm->arch.wall_clock = data;
+               kvm_write_wall_clock(vcpu->kvm, data, 0);
                break;
        case MSR_KVM_SYSTEM_TIME_NEW:
                if (!guest_pv_has(vcpu, KVM_FEATURE_CLOCKSOURCE2))
@@ -3307,8 +3296,6 @@ int kvm_set_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
                vcpu->arch.msr_misc_features_enables = data;
                break;
        default:
-               if (msr && (msr == vcpu->kvm->arch.xen_hvm_config.msr))
-                       return xen_hvm_config(vcpu, data);
                if (kvm_pmu_is_valid_msr(vcpu, msr))
                        return kvm_pmu_set_msr(vcpu, msr_info);
                return KVM_MSR_RET_INVALID;
@@ -3741,7 +3728,6 @@ int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
        case KVM_CAP_PIT2:
        case KVM_CAP_PIT_STATE2:
        case KVM_CAP_SET_IDENTITY_MAP_ADDR:
-       case KVM_CAP_XEN_HVM:
        case KVM_CAP_VCPU_EVENTS:
        case KVM_CAP_HYPERV:
        case KVM_CAP_HYPERV_VAPIC:
@@ -3781,6 +3767,11 @@ int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
        case KVM_CAP_ENFORCE_PV_FEATURE_CPUID:
                r = 1;
                break;
+       case KVM_CAP_XEN_HVM:
+               r = KVM_XEN_HVM_CONFIG_HYPERCALL_MSR |
+                   KVM_XEN_HVM_CONFIG_INTERCEPT_HCALL |
+                   KVM_XEN_HVM_CONFIG_SHARED_INFO;
+               break;
        case KVM_CAP_SYNC_REGS:
                r = KVM_SYNC_X86_VALID_FIELDS;
                break;
@@ -4423,9 +4414,9 @@ static int kvm_vcpu_ioctl_x86_set_debugregs(struct kvm_vcpu *vcpu,
        if (dbgregs->flags)
                return -EINVAL;
 
-       if (dbgregs->dr6 & ~0xffffffffull)
+       if (!kvm_dr6_valid(dbgregs->dr6))
                return -EINVAL;
-       if (dbgregs->dr7 & ~0xffffffffull)
+       if (!kvm_dr7_valid(dbgregs->dr7))
                return -EINVAL;
 
        memcpy(vcpu->arch.db, dbgregs->db, sizeof(vcpu->arch.db));
@@ -5033,6 +5024,26 @@ long kvm_arch_vcpu_ioctl(struct file *filp,
        case KVM_GET_SUPPORTED_HV_CPUID:
                r = kvm_ioctl_get_supported_hv_cpuid(vcpu, argp);
                break;
+       case KVM_XEN_VCPU_GET_ATTR: {
+               struct kvm_xen_vcpu_attr xva;
+
+               r = -EFAULT;
+               if (copy_from_user(&xva, argp, sizeof(xva)))
+                       goto out;
+               r = kvm_xen_vcpu_get_attr(vcpu, &xva);
+               if (!r && copy_to_user(argp, &xva, sizeof(xva)))
+                       r = -EFAULT;
+               break;
+       }
+       case KVM_XEN_VCPU_SET_ATTR: {
+               struct kvm_xen_vcpu_attr xva;
+
+               r = -EFAULT;
+               if (copy_from_user(&xva, argp, sizeof(xva)))
+                       goto out;
+               r = kvm_xen_vcpu_set_attr(vcpu, &xva);
+               break;
+       }
        default:
                r = -EINVAL;
        }
@@ -5651,11 +5662,27 @@ set_pit2_out:
                r = -EFAULT;
                if (copy_from_user(&xhc, argp, sizeof(xhc)))
                        goto out;
-               r = -EINVAL;
-               if (xhc.flags)
+               r = kvm_xen_hvm_config(kvm, &xhc);
+               break;
+       }
+       case KVM_XEN_HVM_GET_ATTR: {
+               struct kvm_xen_hvm_attr xha;
+
+               r = -EFAULT;
+               if (copy_from_user(&xha, argp, sizeof(xha)))
                        goto out;
-               memcpy(&kvm->arch.xen_hvm_config, &xhc, sizeof(xhc));
-               r = 0;
+               r = kvm_xen_hvm_get_attr(kvm, &xha);
+               if (!r && copy_to_user(argp, &xha, sizeof(xha)))
+                       r = -EFAULT;
+               break;
+       }
+       case KVM_XEN_HVM_SET_ATTR: {
+               struct kvm_xen_hvm_attr xha;
+
+               r = -EFAULT;
+               if (copy_from_user(&xha, argp, sizeof(xha)))
+                       goto out;
+               r = kvm_xen_hvm_set_attr(kvm, &xha);
                break;
        }
        case KVM_SET_CLOCK: {
@@ -7115,9 +7142,9 @@ static bool reexecute_instruction(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa,
        if (vcpu->arch.mmu->direct_map) {
                unsigned int indirect_shadow_pages;
 
-               spin_lock(&vcpu->kvm->mmu_lock);
+               write_lock(&vcpu->kvm->mmu_lock);
                indirect_shadow_pages = vcpu->kvm->arch.indirect_shadow_pages;
-               spin_unlock(&vcpu->kvm->mmu_lock);
+               write_unlock(&vcpu->kvm->mmu_lock);
 
                if (indirect_shadow_pages)
                        kvm_mmu_unprotect_page(vcpu->kvm, gpa_to_gfn(gpa));
@@ -8016,6 +8043,7 @@ void kvm_arch_exit(void)
        kvm_mmu_module_exit();
        free_percpu(user_return_msrs);
        kmem_cache_destroy(x86_fpu_cache);
+       WARN_ON(static_branch_unlikely(&kvm_xen_enabled.key));
 }
 
 static int __kvm_vcpu_halt(struct kvm_vcpu *vcpu, int state, int reason)
@@ -8143,6 +8171,9 @@ int kvm_emulate_hypercall(struct kvm_vcpu *vcpu)
        unsigned long nr, a0, a1, a2, a3, ret;
        int op_64_bit;
 
+       if (kvm_xen_hypercall_enabled(vcpu->kvm))
+               return kvm_xen_hypercall(vcpu);
+
        if (kvm_hv_hypercall_enabled(vcpu->kvm))
                return kvm_hv_hypercall(vcpu);
 
@@ -8957,7 +8988,8 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu)
                        static_call(kvm_x86_msr_filter_changed)(vcpu);
        }
 
-       if (kvm_check_request(KVM_REQ_EVENT, vcpu) || req_int_win) {
+       if (kvm_check_request(KVM_REQ_EVENT, vcpu) || req_int_win ||
+           kvm_xen_has_interrupt(vcpu)) {
                ++vcpu->stat.req_event;
                kvm_apic_accept_events(vcpu);
                if (vcpu->arch.mp_state == KVM_MP_STATE_INIT_RECEIVED) {
@@ -9044,7 +9076,19 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu)
                vcpu->arch.switch_db_regs &= ~KVM_DEBUGREG_RELOAD;
        }
 
-       exit_fastpath = static_call(kvm_x86_run)(vcpu);
+       for (;;) {
+               exit_fastpath = static_call(kvm_x86_run)(vcpu);
+               if (likely(exit_fastpath != EXIT_FASTPATH_REENTER_GUEST))
+                       break;
+
+                if (unlikely(kvm_vcpu_exit_request(vcpu))) {
+                       exit_fastpath = EXIT_FASTPATH_EXIT_HANDLED;
+                       break;
+               }
+
+               if (vcpu->arch.apicv_active)
+                       static_call(kvm_x86_sync_pir_to_irr)(vcpu);
+        }
 
        /*
         * Do this here before restoring debug registers on the host.  And
@@ -10594,6 +10638,7 @@ void kvm_arch_destroy_vm(struct kvm *kvm)
        kfree(srcu_dereference_check(kvm->arch.pmu_event_filter, &kvm->srcu, 1));
        kvm_mmu_uninit_vm(kvm);
        kvm_page_track_cleanup(kvm);
+       kvm_xen_destroy_vm(kvm);
        kvm_hv_destroy_vm(kvm);
 }