]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/commitdiff
perf_counter: powerpc: add nmi_enter/nmi_exit calls
authorPaul Mackerras <paulus@samba.org>
Thu, 9 Apr 2009 04:42:56 +0000 (14:42 +1000)
committerIngo Molnar <mingo@elte.hu>
Thu, 9 Apr 2009 05:56:08 +0000 (07:56 +0200)
Impact: fix potential deadlocks on powerpc

Now that the core is using in_nmi() (added in e30e08f6, "perf_counter:
fix NMI race in task clock"), we need the powerpc perf_counter_interrupt
to call nmi_enter() and nmi_exit() in those cases where the interrupt
happens when interrupts are soft-disabled.

If interrupts were soft-enabled, we can treat it as a regular interrupt
and do irq_enter/irq_exit around the whole routine. This lets us get rid
of the test_perf_counter_pending() call at the end of
perf_counter_interrupt, thus simplifying things a little.

Signed-off-by: Paul Mackerras <paulus@samba.org>
Cc: Peter Zijlstra <a.p.zijlstra@chello.nl>
LKML-Reference: <18909.31952.873098.336615@cargo.ozlabs.ibm.com>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
arch/powerpc/kernel/perf_counter.c

index c9d019f1907425a0349321567b6279285208c305..bd76d0fa2c3575ad75b0839b765eda106991c5e7 100644 (file)
@@ -714,7 +714,7 @@ hw_perf_counter_init(struct perf_counter *counter)
  * here so there is no possibility of being interrupted.
  */
 static void record_and_restart(struct perf_counter *counter, long val,
-                              struct pt_regs *regs)
+                              struct pt_regs *regs, int nmi)
 {
        s64 prev, delta, left;
        int record = 0;
@@ -749,7 +749,7 @@ static void record_and_restart(struct perf_counter *counter, long val,
         * Finally record data if requested.
         */
        if (record)
-               perf_counter_overflow(counter, 1, regs, 0);
+               perf_counter_overflow(counter, nmi, regs, 0);
 }
 
 /*
@@ -762,6 +762,17 @@ static void perf_counter_interrupt(struct pt_regs *regs)
        struct perf_counter *counter;
        long val;
        int found = 0;
+       int nmi;
+
+       /*
+        * If interrupts were soft-disabled when this PMU interrupt
+        * occurred, treat it as an NMI.
+        */
+       nmi = !regs->softe;
+       if (nmi)
+               nmi_enter();
+       else
+               irq_enter();
 
        for (i = 0; i < cpuhw->n_counters; ++i) {
                counter = cpuhw->counter[i];
@@ -769,7 +780,7 @@ static void perf_counter_interrupt(struct pt_regs *regs)
                if ((int)val < 0) {
                        /* counter has overflowed */
                        found = 1;
-                       record_and_restart(counter, val, regs);
+                       record_and_restart(counter, val, regs, nmi);
                }
        }
 
@@ -796,18 +807,10 @@ static void perf_counter_interrupt(struct pt_regs *regs)
         */
        mtspr(SPRN_MMCR0, cpuhw->mmcr[0]);
 
-       /*
-        * If we need a wakeup, check whether interrupts were soft-enabled
-        * when we took the interrupt.  If they were, we can wake stuff up
-        * immediately; otherwise we'll have do the wakeup when interrupts
-        * get soft-enabled.
-        */
-       if (test_perf_counter_pending() && regs->softe) {
-               irq_enter();
-               clear_perf_counter_pending();
-               perf_counter_do_pending();
+       if (nmi)
+               nmi_exit();
+       else
                irq_exit();
-       }
 }
 
 void hw_perf_counter_setup(int cpu)