]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/commitdiff
KVM: lapic: reorganize restart_apic_timer
authorPaolo Bonzini <pbonzini@redhat.com>
Thu, 29 Jun 2017 15:14:50 +0000 (17:14 +0200)
committerPaolo Bonzini <pbonzini@redhat.com>
Thu, 29 Jun 2017 16:18:52 +0000 (18:18 +0200)
Move the code to cancel the hv timer into the caller, just before
it starts the hrtimer.  Check availability of the hv timer in
start_hv_timer.

Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
arch/x86/kvm/lapic.c
arch/x86/kvm/lapic.h
arch/x86/kvm/x86.c

index b6689dcae1da71aa82db18f837e6c46769540feb..a80e5a5d6f2ffe04227014a44c3e8f63a9d2b6f8 100644 (file)
@@ -1495,17 +1495,21 @@ EXPORT_SYMBOL_GPL(kvm_lapic_hv_timer_in_use);
 
 static void cancel_hv_timer(struct kvm_lapic *apic)
 {
+       WARN_ON(!apic->lapic_timer.hv_timer_in_use);
        preempt_disable();
        kvm_x86_ops->cancel_hv_timer(apic->vcpu);
        apic->lapic_timer.hv_timer_in_use = false;
        preempt_enable();
 }
 
-static bool __start_hv_timer(struct kvm_lapic *apic)
+static bool start_hv_timer(struct kvm_lapic *apic)
 {
        struct kvm_timer *ktimer = &apic->lapic_timer;
        int r;
 
+       if (!kvm_x86_ops->set_hv_timer)
+               return false;
+
        if (!apic_lvtt_period(apic) && atomic_read(&ktimer->pending))
                return false;
 
@@ -1523,19 +1527,30 @@ static bool __start_hv_timer(struct kvm_lapic *apic)
         */
        if (!apic_lvtt_period(apic) && atomic_read(&ktimer->pending))
                return false;
+
+       trace_kvm_hv_timer_state(apic->vcpu->vcpu_id, true);
        return true;
 }
 
-static bool start_hv_timer(struct kvm_lapic *apic)
+static void start_sw_timer(struct kvm_lapic *apic)
 {
-       if (!__start_hv_timer(apic)) {
-               if (apic->lapic_timer.hv_timer_in_use)
-                       cancel_hv_timer(apic);
-       }
+       struct kvm_timer *ktimer = &apic->lapic_timer;
+       if (apic->lapic_timer.hv_timer_in_use)
+               cancel_hv_timer(apic);
+       if (!apic_lvtt_period(apic) && atomic_read(&ktimer->pending))
+               return;
+
+       if (apic_lvtt_period(apic) || apic_lvtt_oneshot(apic))
+               start_sw_period(apic);
+       else if (apic_lvtt_tscdeadline(apic))
+               start_sw_tscdeadline(apic);
+       trace_kvm_hv_timer_state(apic->vcpu->vcpu_id, false);
+}
 
-       trace_kvm_hv_timer_state(apic->vcpu->vcpu_id,
-                       apic->lapic_timer.hv_timer_in_use);
-       return apic->lapic_timer.hv_timer_in_use;
+static void restart_apic_timer(struct kvm_lapic *apic)
+{
+       if (!start_hv_timer(apic))
+               start_sw_timer(apic);
 }
 
 void kvm_lapic_expired_hv_timer(struct kvm_vcpu *vcpu)
@@ -1549,19 +1564,14 @@ void kvm_lapic_expired_hv_timer(struct kvm_vcpu *vcpu)
 
        if (apic_lvtt_period(apic) && apic->lapic_timer.period) {
                advance_periodic_target_expiration(apic);
-               if (!start_hv_timer(apic))
-                       start_sw_period(apic);
+               restart_apic_timer(apic);
        }
 }
 EXPORT_SYMBOL_GPL(kvm_lapic_expired_hv_timer);
 
 void kvm_lapic_switch_to_hv_timer(struct kvm_vcpu *vcpu)
 {
-       struct kvm_lapic *apic = vcpu->arch.apic;
-
-       WARN_ON(apic->lapic_timer.hv_timer_in_use);
-
-       start_hv_timer(apic);
+       restart_apic_timer(vcpu->arch.apic);
 }
 EXPORT_SYMBOL_GPL(kvm_lapic_switch_to_hv_timer);
 
@@ -1570,33 +1580,28 @@ void kvm_lapic_switch_to_sw_timer(struct kvm_vcpu *vcpu)
        struct kvm_lapic *apic = vcpu->arch.apic;
 
        /* Possibly the TSC deadline timer is not enabled yet */
-       if (!apic->lapic_timer.hv_timer_in_use)
-               return;
-
-       cancel_hv_timer(apic);
+       if (apic->lapic_timer.hv_timer_in_use)
+               start_sw_timer(apic);
+}
+EXPORT_SYMBOL_GPL(kvm_lapic_switch_to_sw_timer);
 
-       if (atomic_read(&apic->lapic_timer.pending))
-               return;
+void kvm_lapic_restart_hv_timer(struct kvm_vcpu *vcpu)
+{
+       struct kvm_lapic *apic = vcpu->arch.apic;
 
-       if (apic_lvtt_period(apic) || apic_lvtt_oneshot(apic))
-               start_sw_period(apic);
-       else if (apic_lvtt_tscdeadline(apic))
-               start_sw_tscdeadline(apic);
+       WARN_ON(!apic->lapic_timer.hv_timer_in_use);
+       restart_apic_timer(apic);
 }
-EXPORT_SYMBOL_GPL(kvm_lapic_switch_to_sw_timer);
 
 static void start_apic_timer(struct kvm_lapic *apic)
 {
        atomic_set(&apic->lapic_timer.pending, 0);
 
-       if (apic_lvtt_period(apic) || apic_lvtt_oneshot(apic)) {
-               if (set_target_expiration(apic) &&
-                       !(kvm_x86_ops->set_hv_timer && start_hv_timer(apic)))
-                       start_sw_period(apic);
-       } else if (apic_lvtt_tscdeadline(apic)) {
-               if (!(kvm_x86_ops->set_hv_timer && start_hv_timer(apic)))
-                       start_sw_tscdeadline(apic);
-       }
+       if ((apic_lvtt_period(apic) || apic_lvtt_oneshot(apic))
+           && !set_target_expiration(apic))
+               return;
+
+       restart_apic_timer(apic);
 }
 
 static void apic_manage_nmi_watchdog(struct kvm_lapic *apic, u32 lvt0_val)
@@ -1827,16 +1832,6 @@ void kvm_free_lapic(struct kvm_vcpu *vcpu)
  * LAPIC interface
  *----------------------------------------------------------------------
  */
-u64 kvm_get_lapic_target_expiration_tsc(struct kvm_vcpu *vcpu)
-{
-       struct kvm_lapic *apic = vcpu->arch.apic;
-
-       if (!lapic_in_kernel(vcpu))
-               return 0;
-
-       return apic->lapic_timer.tscdeadline;
-}
-
 u64 kvm_get_lapic_tscdeadline_msr(struct kvm_vcpu *vcpu)
 {
        struct kvm_lapic *apic = vcpu->arch.apic;
index bcbe811f3b97f1d8b576a24e7fefca0b4fe477dd..29caa2c3dff95716cdf0ab737cce45401651763b 100644 (file)
@@ -87,7 +87,6 @@ int kvm_apic_get_state(struct kvm_vcpu *vcpu, struct kvm_lapic_state *s);
 int kvm_apic_set_state(struct kvm_vcpu *vcpu, struct kvm_lapic_state *s);
 int kvm_lapic_find_highest_irr(struct kvm_vcpu *vcpu);
 
-u64 kvm_get_lapic_target_expiration_tsc(struct kvm_vcpu *vcpu);
 u64 kvm_get_lapic_tscdeadline_msr(struct kvm_vcpu *vcpu);
 void kvm_set_lapic_tscdeadline_msr(struct kvm_vcpu *vcpu, u64 data);
 
@@ -216,4 +215,5 @@ void kvm_lapic_switch_to_sw_timer(struct kvm_vcpu *vcpu);
 void kvm_lapic_switch_to_hv_timer(struct kvm_vcpu *vcpu);
 void kvm_lapic_expired_hv_timer(struct kvm_vcpu *vcpu);
 bool kvm_lapic_hv_timer_in_use(struct kvm_vcpu *vcpu);
+void kvm_lapic_restart_hv_timer(struct kvm_vcpu *vcpu);
 #endif
index a2cd0997343c485051e849551b9fc9d904177fe0..81aa9c321be3badab7eeb0588bdb97caaf249acd 100644 (file)
@@ -2841,10 +2841,10 @@ void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
                        kvm_vcpu_write_tsc_offset(vcpu, offset);
                        vcpu->arch.tsc_catchup = 1;
                }
-               if (kvm_lapic_hv_timer_in_use(vcpu) &&
-                               kvm_x86_ops->set_hv_timer(vcpu,
-                                       kvm_get_lapic_target_expiration_tsc(vcpu)))
-                       kvm_lapic_switch_to_sw_timer(vcpu);
+
+               if (kvm_lapic_hv_timer_in_use(vcpu))
+                       kvm_lapic_restart_hv_timer(vcpu);
+
                /*
                 * On a host with synchronized TSC, there is no need to update
                 * kvmclock on vcpu->cpu migration