]> git.proxmox.com Git - mirror_ubuntu-kernels.git/commitdiff
KVM: LAPIC: Inject timer interrupt via posted interrupt
authorWanpeng Li <wanpengli@tencent.com>
Sat, 6 Jul 2019 01:26:51 +0000 (09:26 +0800)
committerPaolo Bonzini <pbonzini@redhat.com>
Sat, 20 Jul 2019 07:00:40 +0000 (09:00 +0200)
Dedicated instances are currently disturbed by unnecessary jitter due
to the emulated lapic timers firing on the same pCPUs where the
vCPUs reside.  There is no hardware virtual timer on Intel for guest
like ARM, so both programming timer in guest and the emulated timer fires
incur vmexits.  This patch tries to avoid vmexit when the emulated timer
fires, at least in dedicated instance scenario when nohz_full is enabled.

In that case, the emulated timers can be offload to the nearest busy
housekeeping cpus since APICv has been found for several years in server
processors. The guest timer interrupt can then be injected via posted interrupts,
which are delivered by the housekeeping cpu once the emulated timer fires.

The host should tuned so that vCPUs are placed on isolated physical
processors, and with several pCPUs surplus for busy housekeeping.
If disabled mwait/hlt/pause vmexits keep the vCPUs in non-root mode,
~3% redis performance benefit can be observed on Skylake server, and the
number of external interrupt vmexits drops substantially.  Without patch

            VM-EXIT  Samples  Samples%  Time%   Min Time  Max Time   Avg time
EXTERNAL_INTERRUPT    42916    49.43%   39.30%   0.47us   106.09us   0.71us ( +-   1.09% )

While with patch:

            VM-EXIT  Samples  Samples%  Time%   Min Time  Max Time         Avg time
EXTERNAL_INTERRUPT    6871     9.29%     2.96%   0.44us    57.88us   0.72us ( +-   4.02% )

Cc: Paolo Bonzini <pbonzini@redhat.com>
Cc: Radim Krčmář <rkrcmar@redhat.com>
Cc: Marcelo Tosatti <mtosatti@redhat.com>
Signed-off-by: Wanpeng Li <wanpengli@tencent.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
arch/x86/kvm/lapic.c
arch/x86/kvm/lapic.h
arch/x86/kvm/vmx/vmx.c
arch/x86/kvm/x86.c
arch/x86/kvm/x86.h
include/linux/sched/isolation.h
kernel/sched/isolation.c

index 32b80ecc0ac5886cc4abdba18c126a6e988cc4a0..0aa158657f20cff83270f4251d1294030076c1bb 100644 (file)
@@ -118,6 +118,17 @@ static inline u32 kvm_x2apic_id(struct kvm_lapic *apic)
        return apic->vcpu->vcpu_id;
 }
 
+bool kvm_can_post_timer_interrupt(struct kvm_vcpu *vcpu)
+{
+       return pi_inject_timer && kvm_vcpu_apicv_active(vcpu);
+}
+EXPORT_SYMBOL_GPL(kvm_can_post_timer_interrupt);
+
+static bool kvm_use_posted_timer_interrupt(struct kvm_vcpu *vcpu)
+{
+       return kvm_can_post_timer_interrupt(vcpu) && vcpu->mode == IN_GUEST_MODE;
+}
+
 static inline bool kvm_apic_map_get_logical_dest(struct kvm_apic_map *map,
                u32 dest_id, struct kvm_lapic ***cluster, u16 *mask) {
        switch (map->mode) {
@@ -1421,29 +1432,6 @@ static void apic_update_lvtt(struct kvm_lapic *apic)
        }
 }
 
-static void apic_timer_expired(struct kvm_lapic *apic)
-{
-       struct kvm_vcpu *vcpu = apic->vcpu;
-       struct swait_queue_head *q = &vcpu->wq;
-       struct kvm_timer *ktimer = &apic->lapic_timer;
-
-       if (atomic_read(&apic->lapic_timer.pending))
-               return;
-
-       atomic_inc(&apic->lapic_timer.pending);
-       kvm_set_pending_timer(vcpu);
-
-       /*
-        * For x86, the atomic_inc() is serialized, thus
-        * using swait_active() is safe.
-        */
-       if (swait_active(q))
-               swake_up_one(q);
-
-       if (apic_lvtt_tscdeadline(apic) || ktimer->hv_timer_in_use)
-               ktimer->expired_tscdeadline = ktimer->tscdeadline;
-}
-
 /*
  * On APICv, this test will cause a busy wait
  * during a higher-priority task.
@@ -1517,7 +1505,7 @@ static inline void adjust_lapic_timer_advance(struct kvm_vcpu *vcpu,
        apic->lapic_timer.timer_advance_ns = timer_advance_ns;
 }
 
-void kvm_wait_lapic_expire(struct kvm_vcpu *vcpu)
+static void __kvm_wait_lapic_expire(struct kvm_vcpu *vcpu)
 {
        struct kvm_lapic *apic = vcpu->arch.apic;
        u64 guest_tsc, tsc_deadline;
@@ -1525,9 +1513,6 @@ void kvm_wait_lapic_expire(struct kvm_vcpu *vcpu)
        if (apic->lapic_timer.expired_tscdeadline == 0)
                return;
 
-       if (!lapic_timer_int_injected(vcpu))
-               return;
-
        tsc_deadline = apic->lapic_timer.expired_tscdeadline;
        apic->lapic_timer.expired_tscdeadline = 0;
        guest_tsc = kvm_read_l1_tsc(vcpu, rdtsc());
@@ -1539,8 +1524,57 @@ void kvm_wait_lapic_expire(struct kvm_vcpu *vcpu)
        if (unlikely(!apic->lapic_timer.timer_advance_adjust_done))
                adjust_lapic_timer_advance(vcpu, apic->lapic_timer.advance_expire_delta);
 }
+
+void kvm_wait_lapic_expire(struct kvm_vcpu *vcpu)
+{
+       if (lapic_timer_int_injected(vcpu))
+               __kvm_wait_lapic_expire(vcpu);
+}
 EXPORT_SYMBOL_GPL(kvm_wait_lapic_expire);
 
+static void kvm_apic_inject_pending_timer_irqs(struct kvm_lapic *apic)
+{
+       struct kvm_timer *ktimer = &apic->lapic_timer;
+
+       kvm_apic_local_deliver(apic, APIC_LVTT);
+       if (apic_lvtt_tscdeadline(apic))
+               ktimer->tscdeadline = 0;
+       if (apic_lvtt_oneshot(apic)) {
+               ktimer->tscdeadline = 0;
+               ktimer->target_expiration = 0;
+       }
+}
+
+static void apic_timer_expired(struct kvm_lapic *apic)
+{
+       struct kvm_vcpu *vcpu = apic->vcpu;
+       struct swait_queue_head *q = &vcpu->wq;
+       struct kvm_timer *ktimer = &apic->lapic_timer;
+
+       if (atomic_read(&apic->lapic_timer.pending))
+               return;
+
+       if (apic_lvtt_tscdeadline(apic) || ktimer->hv_timer_in_use)
+               ktimer->expired_tscdeadline = ktimer->tscdeadline;
+
+       if (kvm_use_posted_timer_interrupt(apic->vcpu)) {
+               if (apic->lapic_timer.timer_advance_ns)
+                       __kvm_wait_lapic_expire(vcpu);
+               kvm_apic_inject_pending_timer_irqs(apic);
+               return;
+       }
+
+       atomic_inc(&apic->lapic_timer.pending);
+       kvm_set_pending_timer(vcpu);
+
+       /*
+        * For x86, the atomic_inc() is serialized, thus
+        * using swait_active() is safe.
+        */
+       if (swait_active(q))
+               swake_up_one(q);
+}
+
 static void start_sw_tscdeadline(struct kvm_lapic *apic)
 {
        struct kvm_timer *ktimer = &apic->lapic_timer;
@@ -2325,13 +2359,7 @@ void kvm_inject_apic_timer_irqs(struct kvm_vcpu *vcpu)
        struct kvm_lapic *apic = vcpu->arch.apic;
 
        if (atomic_read(&apic->lapic_timer.pending) > 0) {
-               kvm_apic_local_deliver(apic, APIC_LVTT);
-               if (apic_lvtt_tscdeadline(apic))
-                       apic->lapic_timer.tscdeadline = 0;
-               if (apic_lvtt_oneshot(apic)) {
-                       apic->lapic_timer.tscdeadline = 0;
-                       apic->lapic_timer.target_expiration = 0;
-               }
+               kvm_apic_inject_pending_timer_irqs(apic);
                atomic_set(&apic->lapic_timer.pending, 0);
        }
 }
@@ -2453,7 +2481,8 @@ void __kvm_migrate_apic_timer(struct kvm_vcpu *vcpu)
 {
        struct hrtimer *timer;
 
-       if (!lapic_in_kernel(vcpu))
+       if (!lapic_in_kernel(vcpu) ||
+               kvm_can_post_timer_interrupt(vcpu))
                return;
 
        timer = &vcpu->arch.apic->lapic_timer.timer;
index 36747174e4a8ba7b19d5fb58ccfae2974f8e4795..50053d2b8b7bc849b7057406376f874b774c24cd 100644 (file)
@@ -236,6 +236,7 @@ void kvm_lapic_switch_to_hv_timer(struct kvm_vcpu *vcpu);
 void kvm_lapic_expired_hv_timer(struct kvm_vcpu *vcpu);
 bool kvm_lapic_hv_timer_in_use(struct kvm_vcpu *vcpu);
 void kvm_lapic_restart_hv_timer(struct kvm_vcpu *vcpu);
+bool kvm_can_post_timer_interrupt(struct kvm_vcpu *vcpu);
 
 static inline enum lapic_mode kvm_apic_mode(u64 apic_base)
 {
index 84f8d49a2fd28eee08538d21f2de567df948f52c..280320f74db7b7ff87f24bb04edd809eb6b4e7eb 100644 (file)
@@ -7064,7 +7064,8 @@ static int vmx_set_hv_timer(struct kvm_vcpu *vcpu, u64 guest_deadline_tsc,
        u64 tscl, guest_tscl, delta_tsc, lapic_timer_advance_cycles;
        struct kvm_timer *ktimer = &vcpu->arch.apic->lapic_timer;
 
-       if (kvm_mwait_in_guest(vcpu->kvm))
+       if (kvm_mwait_in_guest(vcpu->kvm) ||
+               kvm_can_post_timer_interrupt(vcpu))
                return -EOPNOTSUPP;
 
        vmx = to_vmx(vcpu);
index 6ab30c5e1ae0e9c9a9b83eb69886dcee2396dead..58305cf81182dab02a0a45f5efa85990d2e239b4 100644 (file)
@@ -51,6 +51,7 @@
 #include <linux/kvm_irqfd.h>
 #include <linux/irqbypass.h>
 #include <linux/sched/stat.h>
+#include <linux/sched/isolation.h>
 #include <linux/mem_encrypt.h>
 
 #include <trace/events/kvm.h>
@@ -153,6 +154,9 @@ EXPORT_SYMBOL_GPL(enable_vmware_backdoor);
 static bool __read_mostly force_emulation_prefix = false;
 module_param(force_emulation_prefix, bool, S_IRUGO);
 
+int __read_mostly pi_inject_timer = -1;
+module_param(pi_inject_timer, bint, S_IRUGO | S_IWUSR);
+
 #define KVM_NR_SHARED_MSRS 16
 
 struct kvm_shared_msrs_global {
@@ -7058,6 +7062,8 @@ int kvm_arch_init(void *opaque)
                host_xcr0 = xgetbv(XCR_XFEATURE_ENABLED_MASK);
 
        kvm_lapic_init();
+       if (pi_inject_timer == -1)
+               pi_inject_timer = housekeeping_enabled(HK_FLAG_TIMER);
 #ifdef CONFIG_X86_64
        pvclock_gtod_register_notifier(&pvclock_gtod_notifier);
 
index e08a12892e8ba8275addbc9f9d8c98f4fa8051df..6594020c069167f4f889570960698b2d61ba6379 100644 (file)
@@ -301,6 +301,8 @@ extern unsigned int min_timer_period_us;
 
 extern bool enable_vmware_backdoor;
 
+extern int pi_inject_timer;
+
 extern struct static_key kvm_no_apic_vcpu;
 
 static inline u64 nsec_to_cycles(struct kvm_vcpu *vcpu, u64 nsec)
index b0fb1446fe04d809ab9fa0763e7d7b27ff2a2c6c..6c8512d3be88e4d1d34cbfa861fdbc3094ee925b 100644 (file)
@@ -19,6 +19,7 @@ enum hk_flags {
 DECLARE_STATIC_KEY_FALSE(housekeeping_overridden);
 extern int housekeeping_any_cpu(enum hk_flags flags);
 extern const struct cpumask *housekeeping_cpumask(enum hk_flags flags);
+extern bool housekeeping_enabled(enum hk_flags flags);
 extern void housekeeping_affine(struct task_struct *t, enum hk_flags flags);
 extern bool housekeeping_test_cpu(int cpu, enum hk_flags flags);
 extern void __init housekeeping_init(void);
@@ -35,6 +36,11 @@ static inline const struct cpumask *housekeeping_cpumask(enum hk_flags flags)
        return cpu_possible_mask;
 }
 
+static inline bool housekeeping_enabled(enum hk_flags flags)
+{
+       return false;
+}
+
 static inline void housekeeping_affine(struct task_struct *t,
                                       enum hk_flags flags) { }
 static inline void housekeeping_init(void) { }
index 123ea07a3f3b048089dc1e3e1c6ad8f422ed50ab..ccb28085b11418f539766b96d32901bf05e0a7f6 100644 (file)
@@ -14,6 +14,12 @@ EXPORT_SYMBOL_GPL(housekeeping_overridden);
 static cpumask_var_t housekeeping_mask;
 static unsigned int housekeeping_flags;
 
+bool housekeeping_enabled(enum hk_flags flags)
+{
+       return !!(housekeeping_flags & flags);
+}
+EXPORT_SYMBOL_GPL(housekeeping_enabled);
+
 int housekeeping_any_cpu(enum hk_flags flags)
 {
        if (static_branch_unlikely(&housekeeping_overridden))