]> git.proxmox.com Git - mirror_ubuntu-zesty-kernel.git/commitdiff
perf_counter: Always schedule all software counters in
authorPaul Mackerras <paulus@samba.org>
Mon, 12 Jan 2009 04:11:00 +0000 (15:11 +1100)
committerPaul Mackerras <paulus@samba.org>
Mon, 12 Jan 2009 04:12:50 +0000 (15:12 +1100)
Software counters aren't subject to the limitations imposed by the
fixed number of hardware counter registers, so there is no reason not
to enable them all in __perf_counter_sched_in.  Previously we used to
break out of the loop when we got to a group that wouldn't fit on the
PMU; with this we continue through the list but only schedule in
software counters (or groups containing only software counters) from
there on.

Signed-off-by: Paul Mackerras <paulus@samba.org>
kernel/perf_counter.c

index 4c0dccb756adf08536957c38c1e8c33ea0e9e440..3aef3062ff78b6bddc43f51e8e9cfbba87db4995 100644 (file)
@@ -455,12 +455,37 @@ group_error:
        return -EAGAIN;
 }
 
+/*
+ * Return 1 for a software counter, 0 for a hardware counter
+ */
+static inline int is_software_counter(struct perf_counter *counter)
+{
+       return !counter->hw_event.raw && counter->hw_event.type < 0;
+}
+
+/*
+ * Return 1 for a group consisting entirely of software counters,
+ * 0 if the group contains any hardware counters.
+ */
+static int is_software_only_group(struct perf_counter *leader)
+{
+       struct perf_counter *counter;
+
+       if (!is_software_counter(leader))
+               return 0;
+       list_for_each_entry(counter, &leader->sibling_list, list_entry)
+               if (!is_software_counter(counter))
+                       return 0;
+       return 1;
+}
+
 static void
 __perf_counter_sched_in(struct perf_counter_context *ctx,
                        struct perf_cpu_context *cpuctx, int cpu)
 {
        struct perf_counter *counter;
        u64 flags;
+       int can_add_hw = 1;
 
        if (likely(!ctx->nr_counters))
                return;
@@ -477,10 +502,12 @@ __perf_counter_sched_in(struct perf_counter_context *ctx,
 
                /*
                 * If we scheduled in a group atomically and exclusively,
-                * or if this group can't go on, break out:
+                * or if this group can't go on, don't add any more
+                * hardware counters.
                 */
-               if (group_sched_in(counter, cpuctx, ctx, cpu))
-                       break;
+               if (can_add_hw || is_software_only_group(counter))
+                       if (group_sched_in(counter, cpuctx, ctx, cpu))
+                               can_add_hw = 0;
        }
        hw_perf_restore(flags);
        spin_unlock(&ctx->lock);