]> git.proxmox.com Git - mirror_ubuntu-jammy-kernel.git/commitdiff
KVM: arm64: Opportunistically turn off WFI trapping when using direct LPI injection
authorMarc Zyngier <maz@kernel.org>
Thu, 7 Nov 2019 16:04:12 +0000 (16:04 +0000)
committerMarc Zyngier <maz@kernel.org>
Fri, 8 Nov 2019 11:14:36 +0000 (11:14 +0000)
Just like we do for WFE trapping, it can be useful to turn off
WFI trapping when the physical CPU is not oversubscribed (that
is, the vcpu is the only runnable process on this CPU) *and*
that we're using direct injection of interrupts.

The conditions are reevaluated on each vcpu_load(), ensuring that
we don't switch to this mode on a busy system.

On a GICv4 system, this has the effect of reducing the generation
of doorbell interrupts to zero when the right conditions are
met, which is a huge improvement over the current situation
(where the doorbells are screaming if the CPU ever hits a
blocking WFI).

Signed-off-by: Marc Zyngier <maz@kernel.org>
Reviewed-by: Zenghui Yu <yuzenghui@huawei.com>
Reviewed-by: Christoffer Dall <christoffer.dall@arm.com>
Link: https://lore.kernel.org/r/20191107160412.30301-3-maz@kernel.org
arch/arm/include/asm/kvm_emulate.h
arch/arm64/include/asm/kvm_emulate.h
virt/kvm/arm/arm.c

index 40002416efec221345c6c693381a7d2b3956bb5c..023c01cad2b1a690c9669faf4bc2f2804f1a5821 100644 (file)
@@ -95,12 +95,12 @@ static inline unsigned long *vcpu_hcr(const struct kvm_vcpu *vcpu)
        return (unsigned long *)&vcpu->arch.hcr;
 }
 
-static inline void vcpu_clear_wfe_traps(struct kvm_vcpu *vcpu)
+static inline void vcpu_clear_wfx_traps(struct kvm_vcpu *vcpu)
 {
        vcpu->arch.hcr &= ~HCR_TWE;
 }
 
-static inline void vcpu_set_wfe_traps(struct kvm_vcpu *vcpu)
+static inline void vcpu_set_wfx_traps(struct kvm_vcpu *vcpu)
 {
        vcpu->arch.hcr |= HCR_TWE;
 }
index 6e92f6c7b1e409e922f385c6d21b4fe9f7e4e174..5a542d801f07128ba267cae1401d749b053e38f9 100644 (file)
@@ -87,14 +87,19 @@ static inline unsigned long *vcpu_hcr(struct kvm_vcpu *vcpu)
        return (unsigned long *)&vcpu->arch.hcr_el2;
 }
 
-static inline void vcpu_clear_wfe_traps(struct kvm_vcpu *vcpu)
+static inline void vcpu_clear_wfx_traps(struct kvm_vcpu *vcpu)
 {
        vcpu->arch.hcr_el2 &= ~HCR_TWE;
+       if (atomic_read(&vcpu->arch.vgic_cpu.vgic_v3.its_vpe.vlpi_count))
+               vcpu->arch.hcr_el2 &= ~HCR_TWI;
+       else
+               vcpu->arch.hcr_el2 |= HCR_TWI;
 }
 
-static inline void vcpu_set_wfe_traps(struct kvm_vcpu *vcpu)
+static inline void vcpu_set_wfx_traps(struct kvm_vcpu *vcpu)
 {
        vcpu->arch.hcr_el2 |= HCR_TWE;
+       vcpu->arch.hcr_el2 |= HCR_TWI;
 }
 
 static inline void vcpu_ptrauth_enable(struct kvm_vcpu *vcpu)
index bd2afcf9a13f8a564e974c49a03c2597f198ffb9..dac96e355f69cbd7e1327579ca481ac8ef24a6aa 100644 (file)
@@ -386,9 +386,9 @@ void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
        kvm_vcpu_pmu_restore_guest(vcpu);
 
        if (single_task_running())
-               vcpu_clear_wfe_traps(vcpu);
+               vcpu_clear_wfx_traps(vcpu);
        else
-               vcpu_set_wfe_traps(vcpu);
+               vcpu_set_wfx_traps(vcpu);
 
        vcpu_ptrauth_setup_lazy(vcpu);
 }