/*
* Future software events:
*/
- /* PERF_COUNT_PAGE_FAULTS = -3,
- PERF_COUNT_CONTEXT_SWITCHES = -4, */
+ PERF_COUNT_PAGE_FAULTS = -3,
+ PERF_COUNT_CONTEXT_SWITCHES = -4,
};
/*
.hw_perf_counter_read = task_clock_perf_counter_read,
};
+static u64 get_context_switches(void)
+{
+ struct task_struct *curr = current;
+
+ return curr->nvcsw + curr->nivcsw;
+}
+
+static void context_switches_perf_counter_update(struct perf_counter *counter)
+{
+ u64 prev, now;
+ s64 delta;
+
+ prev = atomic64_read(&counter->hw.prev_count);
+ now = get_context_switches();
+
+ atomic64_set(&counter->hw.prev_count, now);
+
+ delta = now - prev;
+ if (WARN_ON_ONCE(delta < 0))
+ delta = 0;
+
+ atomic64_add(delta, &counter->count);
+}
+
+static void context_switches_perf_counter_read(struct perf_counter *counter)
+{
+ context_switches_perf_counter_update(counter);
+}
+
+static void context_switches_perf_counter_enable(struct perf_counter *counter)
+{
+ /*
+ * ->nvcsw + curr->nivcsw is a per-task value already,
+ * so we dont have to clear it on switch-in.
+ */
+}
+
+static void context_switches_perf_counter_disable(struct perf_counter *counter)
+{
+ context_switches_perf_counter_update(counter);
+}
+
+static const struct hw_perf_counter_ops perf_ops_context_switches = {
+ .hw_perf_counter_enable = context_switches_perf_counter_enable,
+ .hw_perf_counter_disable = context_switches_perf_counter_disable,
+ .hw_perf_counter_read = context_switches_perf_counter_read,
+};
+
static const struct hw_perf_counter_ops *
sw_perf_counter_init(struct perf_counter *counter)
{
case PERF_COUNT_TASK_CLOCK:
hw_ops = &perf_ops_task_clock;
break;
+ case PERF_COUNT_CONTEXT_SWITCHES:
+ hw_ops = &perf_ops_context_switches;
+ break;
default:
break;
}