]> git.proxmox.com Git - mirror_ubuntu-hirsute-kernel.git/commitdiff
KVM: PPC: Book3S HV: Don't use existing "prodded" flag for XIVE escalations
authorBenjamin Herrenschmidt <benh@kernel.crashing.org>
Fri, 12 Jan 2018 02:37:13 +0000 (13:37 +1100)
committerPaul Mackerras <paulus@ozlabs.org>
Fri, 19 Jan 2018 01:10:21 +0000 (12:10 +1100)
The prodded flag is only cleared at the beginning of H_CEDE,
so every time we have an escalation, we will cause the *next*
H_CEDE to return immediately.

Instead use a dedicated "irq_pending" flag to indicate that
a guest interrupt is pending for the VCPU. We don't reuse the
existing exception bitmap so as to avoid expensive atomic ops.

Signed-off-by: Benjamin Herrenschmidt <benh@kernel.crashing.org>
Signed-off-by: Paul Mackerras <paulus@ozlabs.org>
arch/powerpc/include/asm/kvm_host.h
arch/powerpc/kernel/asm-offsets.c
arch/powerpc/kvm/book3s_hv.c
arch/powerpc/kvm/book3s_hv_rmhandlers.S
arch/powerpc/kvm/book3s_xive.c

index 3aa5b577cd609cea0c183b1949ec11cc5f2df9b7..bfe51356af5e161cb67b006dbed1cfa5123ad0e5 100644 (file)
@@ -709,6 +709,7 @@ struct kvm_vcpu_arch {
        u8 ceded;
        u8 prodded;
        u8 doorbell_request;
+       u8 irq_pending; /* Used by XIVE to signal pending guest irqs */
        u32 last_inst;
 
        struct swait_queue_head *wqp;
index 6b958414b4e036ac1e4c97bceb61277ffab65e76..825089cf3e2359d2fc4e7860dd3dce17986041b6 100644 (file)
@@ -514,6 +514,7 @@ int main(void)
        OFFSET(VCPU_PENDING_EXC, kvm_vcpu, arch.pending_exceptions);
        OFFSET(VCPU_CEDED, kvm_vcpu, arch.ceded);
        OFFSET(VCPU_PRODDED, kvm_vcpu, arch.prodded);
+       OFFSET(VCPU_IRQ_PENDING, kvm_vcpu, arch.irq_pending);
        OFFSET(VCPU_DBELL_REQ, kvm_vcpu, arch.doorbell_request);
        OFFSET(VCPU_MMCR, kvm_vcpu, arch.mmcr);
        OFFSET(VCPU_PMC, kvm_vcpu, arch.pmc);
index 76cf48051eb3e34380255d931bf9b6f446e0f2ee..e5f81fc108e094c100b3d73fc2c702603e692e47 100644 (file)
@@ -2999,7 +2999,7 @@ static inline bool xive_interrupt_pending(struct kvm_vcpu *vcpu)
 {
        if (!xive_enabled())
                return false;
-       return vcpu->arch.xive_saved_state.pipr <
+       return vcpu->arch.irq_pending || vcpu->arch.xive_saved_state.pipr <
                vcpu->arch.xive_saved_state.cppr;
 }
 #else
index 7daf21be33d0b9ca3d372ff219686f23784ad99f..34dbab7deb39e51fd58ecd382f14cb13d7410162 100644 (file)
@@ -1035,6 +1035,16 @@ ALT_FTR_SECTION_END_IFCLR(CPU_FTR_ARCH_300)
        li      r9, 1
        stw     r9, VCPU_XIVE_PUSHED(r4)
        eieio
+
+       /*
+        * We clear the irq_pending flag. There is a small chance of a
+        * race vs. the escalation interrupt happening on another
+        * processor setting it again, but the only consequence is to
+        * cause a spurrious wakeup on the next H_CEDE which is not an
+        * issue.
+        */
+       li      r0,0
+       stb     r0, VCPU_IRQ_PENDING(r4)
 no_xive:
 #endif /* CONFIG_KVM_XICS */
 
index a102efeabf05a1b2aa37fcfd35f0d33867f36d0e..eef9ccafdc098fb17c706d8382d1f0e6530fd43b 100644 (file)
@@ -84,8 +84,7 @@ static irqreturn_t xive_esc_irq(int irq, void *data)
 {
        struct kvm_vcpu *vcpu = data;
 
-       /* We use the existing H_PROD mechanism to wake up the target */
-       vcpu->arch.prodded = 1;
+       vcpu->arch.irq_pending = 1;
        smp_mb();
        if (vcpu->arch.ceded)
                kvmppc_fast_vcpu_kick(vcpu);