]> git.proxmox.com Git - mirror_ubuntu-jammy-kernel.git/commitdiff
powerpc/64: interrupt soft-enable race fix
authorNicholas Piggin <npiggin@gmail.com>
Thu, 17 Jun 2021 15:51:09 +0000 (01:51 +1000)
committerMichael Ellerman <mpe@ellerman.id.au>
Thu, 24 Jun 2021 14:06:56 +0000 (00:06 +1000)
Prevent interrupt restore from allowing racing hard interrupts going
ahead of previous soft-pending ones, by using the soft-masked restart
handler to allow a store to clear the soft-mask while knowing nothing
is soft-pending.

This probably doesn't matter much in practice, but it's a simple
demonstrator / test case to exercise the restart table logic.

Signed-off-by: Nicholas Piggin <npiggin@gmail.com>
Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
Link: https://lore.kernel.org/r/20210617155116.2167984-11-npiggin@gmail.com
arch/powerpc/kernel/irq.c

index 72cb45393ef29568dee0bb339842b2c7dce2b52d..8428caf3194e5d8d30b0f8c1d1aee32a1954be13 100644 (file)
@@ -217,6 +217,100 @@ static inline void replay_soft_interrupts_irqrestore(void)
 #define replay_soft_interrupts_irqrestore() replay_soft_interrupts()
 #endif
 
+#ifdef CONFIG_CC_HAS_ASM_GOTO
+notrace void arch_local_irq_restore(unsigned long mask)
+{
+       unsigned char irq_happened;
+
+       /* Write the new soft-enabled value if it is a disable */
+       if (mask) {
+               irq_soft_mask_set(mask);
+               return;
+       }
+
+       /*
+        * After the stb, interrupts are unmasked and there are no interrupts
+        * pending replay. The restart sequence makes this atomic with
+        * respect to soft-masked interrupts. If this was just a simple code
+        * sequence, a soft-masked interrupt could become pending right after
+        * the comparison and before the stb.
+        *
+        * This allows interrupts to be unmasked without hard disabling, and
+        * also without new hard interrupts coming in ahead of pending ones.
+        */
+       asm_volatile_goto(
+"1:                                    \n"
+"              lbz     9,%0(13)        \n"
+"              cmpwi   9,0             \n"
+"              bne     %l[happened]    \n"
+"              stb     9,%1(13)        \n"
+"2:                                    \n"
+               RESTART_TABLE(1b, 2b, 1b)
+       : : "i" (offsetof(struct paca_struct, irq_happened)),
+           "i" (offsetof(struct paca_struct, irq_soft_mask))
+       : "cr0", "r9"
+       : happened);
+
+       if (IS_ENABLED(CONFIG_PPC_IRQ_SOFT_MASK_DEBUG))
+               WARN_ON_ONCE(!(mfmsr() & MSR_EE));
+
+       return;
+
+happened:
+       irq_happened = get_irq_happened();
+       if (IS_ENABLED(CONFIG_PPC_IRQ_SOFT_MASK_DEBUG))
+               WARN_ON_ONCE(!irq_happened);
+
+       if (irq_happened == PACA_IRQ_HARD_DIS) {
+               if (IS_ENABLED(CONFIG_PPC_IRQ_SOFT_MASK_DEBUG))
+                       WARN_ON_ONCE(mfmsr() & MSR_EE);
+               irq_soft_mask_set(IRQS_ENABLED);
+               local_paca->irq_happened = 0;
+               __hard_irq_enable();
+               return;
+       }
+
+       /* Have interrupts to replay, need to hard disable first */
+       if (!(irq_happened & PACA_IRQ_HARD_DIS)) {
+               if (IS_ENABLED(CONFIG_PPC_IRQ_SOFT_MASK_DEBUG)) {
+                       if (!(mfmsr() & MSR_EE)) {
+                               /*
+                                * An interrupt could have come in and cleared
+                                * MSR[EE] and set IRQ_HARD_DIS, so check
+                                * IRQ_HARD_DIS again and warn if it is still
+                                * clear.
+                                */
+                               irq_happened = get_irq_happened();
+                               WARN_ON_ONCE(!(irq_happened & PACA_IRQ_HARD_DIS));
+                       }
+               }
+               __hard_irq_disable();
+               local_paca->irq_happened |= PACA_IRQ_HARD_DIS;
+       } else {
+               if (IS_ENABLED(CONFIG_PPC_IRQ_SOFT_MASK_DEBUG)) {
+                       if (WARN_ON_ONCE(mfmsr() & MSR_EE))
+                               __hard_irq_disable();
+               }
+       }
+
+       /*
+        * Disable preempt here, so that the below preempt_enable will
+        * perform resched if required (a replayed interrupt may set
+        * need_resched).
+        */
+       preempt_disable();
+       irq_soft_mask_set(IRQS_ALL_DISABLED);
+       trace_hardirqs_off();
+
+       replay_soft_interrupts_irqrestore();
+       local_paca->irq_happened = 0;
+
+       trace_hardirqs_on();
+       irq_soft_mask_set(IRQS_ENABLED);
+       __hard_irq_enable();
+       preempt_enable();
+}
+#else
 notrace void arch_local_irq_restore(unsigned long mask)
 {
        unsigned char irq_happened;
@@ -288,6 +382,7 @@ notrace void arch_local_irq_restore(unsigned long mask)
        __hard_irq_enable();
        preempt_enable();
 }
+#endif
 EXPORT_SYMBOL(arch_local_irq_restore);
 
 /*