]> git.proxmox.com Git - mirror_ubuntu-kernels.git/commitdiff
perf/x86: Fix n_pair for cancelled txn
authorPeter Zijlstra <peterz@infradead.org>
Mon, 5 Oct 2020 08:09:06 +0000 (10:09 +0200)
committerPeter Zijlstra <peterz@infradead.org>
Tue, 6 Oct 2020 13:18:17 +0000 (15:18 +0200)
Kan reported that n_metric gets corrupted for cancelled transactions;
a similar issue exists for n_pair for AMD's Large Increment thing.

The problem was confirmed and confirmed fixed by Kim using:

  sudo perf stat -e "{cycles,cycles,cycles,cycles}:D" -a sleep 10 &

  # should succeed:
  sudo perf stat -e "{fp_ret_sse_avx_ops.all}:D" -a workload

  # should fail:
  sudo perf stat -e "{fp_ret_sse_avx_ops.all,fp_ret_sse_avx_ops.all,cycles}:D" -a workload

  # previously failed, now succeeds with this patch:
  sudo perf stat -e "{fp_ret_sse_avx_ops.all}:D" -a workload

Fixes: 5738891229a2 ("perf/x86/amd: Add support for Large Increment per Cycle Events")
Reported-by: Kan Liang <kan.liang@linux.intel.com>
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Tested-by: Kim Phillips <kim.phillips@amd.com>
Link: https://lkml.kernel.org/r/20201005082516.GG2628@hirez.programming.kicks-ass.net
arch/x86/events/core.c
arch/x86/events/perf_event.h

index cb5cfef3a9380e684f24c7d96dfbd55b9e7be185..a7248a3c4b2f43f70850ee360fc013f3e4dc4f4d 100644 (file)
@@ -1064,8 +1064,10 @@ static int collect_event(struct cpu_hw_events *cpuc, struct perf_event *event,
                return -EINVAL;
 
        cpuc->event_list[n] = event;
-       if (is_counter_pair(&event->hw))
+       if (is_counter_pair(&event->hw)) {
                cpuc->n_pair++;
+               cpuc->n_txn_pair++;
+       }
 
        return 0;
 }
@@ -2006,6 +2008,7 @@ static void x86_pmu_start_txn(struct pmu *pmu, unsigned int txn_flags)
 
        perf_pmu_disable(pmu);
        __this_cpu_write(cpu_hw_events.n_txn, 0);
+       __this_cpu_write(cpu_hw_events.n_txn_pair, 0);
 }
 
 /*
@@ -2031,6 +2034,7 @@ static void x86_pmu_cancel_txn(struct pmu *pmu)
         */
        __this_cpu_sub(cpu_hw_events.n_added, __this_cpu_read(cpu_hw_events.n_txn));
        __this_cpu_sub(cpu_hw_events.n_events, __this_cpu_read(cpu_hw_events.n_txn));
+       __this_cpu_sub(cpu_hw_events.n_pair, __this_cpu_read(cpu_hw_events.n_txn_pair));
        perf_pmu_enable(pmu);
 }
 
index 345442410a4d01256bc4e2e915f9f9987a26d18b..93e56d76980f377f6c87f90017214618821a6e61 100644 (file)
@@ -235,6 +235,7 @@ struct cpu_hw_events {
                                             they've never been enabled yet */
        int                     n_txn;    /* the # last events in the below arrays;
                                             added in the current transaction */
+       int                     n_txn_pair;
        int                     assign[X86_PMC_IDX_MAX]; /* event to counter assignment */
        u64                     tags[X86_PMC_IDX_MAX];