7 #include "thread_map.h"
9 void update_stats(struct stats
*stats
, u64 val
)
14 delta
= val
- stats
->mean
;
15 stats
->mean
+= delta
/ stats
->n
;
16 stats
->M2
+= delta
*(val
- stats
->mean
);
25 double avg_stats(struct stats
*stats
)
31 * http://en.wikipedia.org/wiki/Algorithms_for_calculating_variance
33 * (\Sum n_i^2) - ((\Sum n_i)^2)/n
34 * s^2 = -------------------------------
37 * http://en.wikipedia.org/wiki/Stddev
39 * The std dev of the mean is related to the std dev by:
46 double stddev_stats(struct stats
*stats
)
48 double variance
, variance_mean
;
53 variance
= stats
->M2
/ (stats
->n
- 1);
54 variance_mean
= variance
/ stats
->n
;
56 return sqrt(variance_mean
);
59 double rel_stddev_stats(double stddev
, double avg
)
64 pct
= 100.0 * stddev
/avg
;
69 bool __perf_evsel_stat__is(struct perf_evsel
*evsel
,
70 enum perf_stat_evsel_id id
)
72 struct perf_stat_evsel
*ps
= evsel
->priv
;
77 #define ID(id, name) [PERF_STAT_EVSEL_ID__##id] = #name
78 static const char *id_str
[PERF_STAT_EVSEL_ID__MAX
] = {
80 ID(CYCLES_IN_TX
, cpu
/cycles
-t
/),
81 ID(TRANSACTION_START
, cpu
/tx
-start
/),
82 ID(ELISION_START
, cpu
/el
-start
/),
83 ID(CYCLES_IN_TX_CP
, cpu
/cycles
-ct
/),
84 ID(TOPDOWN_TOTAL_SLOTS
, topdown
-total
-slots
),
85 ID(TOPDOWN_SLOTS_ISSUED
, topdown
-slots
-issued
),
86 ID(TOPDOWN_SLOTS_RETIRED
, topdown
-slots
-retired
),
87 ID(TOPDOWN_FETCH_BUBBLES
, topdown
-fetch
-bubbles
),
88 ID(TOPDOWN_RECOVERY_BUBBLES
, topdown
-recovery
-bubbles
),
92 void perf_stat_evsel_id_init(struct perf_evsel
*evsel
)
94 struct perf_stat_evsel
*ps
= evsel
->priv
;
97 /* ps->id is 0 hence PERF_STAT_EVSEL_ID__NONE by default */
99 for (i
= 0; i
< PERF_STAT_EVSEL_ID__MAX
; i
++) {
100 if (!strcmp(perf_evsel__name(evsel
), id_str
[i
])) {
107 static void perf_evsel__reset_stat_priv(struct perf_evsel
*evsel
)
110 struct perf_stat_evsel
*ps
= evsel
->priv
;
112 for (i
= 0; i
< 3; i
++)
113 init_stats(&ps
->res_stats
[i
]);
115 perf_stat_evsel_id_init(evsel
);
118 static int perf_evsel__alloc_stat_priv(struct perf_evsel
*evsel
)
120 evsel
->priv
= zalloc(sizeof(struct perf_stat_evsel
));
121 if (evsel
->priv
== NULL
)
123 perf_evsel__reset_stat_priv(evsel
);
127 static void perf_evsel__free_stat_priv(struct perf_evsel
*evsel
)
132 static int perf_evsel__alloc_prev_raw_counts(struct perf_evsel
*evsel
,
133 int ncpus
, int nthreads
)
135 struct perf_counts
*counts
;
137 counts
= perf_counts__new(ncpus
, nthreads
);
139 evsel
->prev_raw_counts
= counts
;
141 return counts
? 0 : -ENOMEM
;
144 static void perf_evsel__free_prev_raw_counts(struct perf_evsel
*evsel
)
146 perf_counts__delete(evsel
->prev_raw_counts
);
147 evsel
->prev_raw_counts
= NULL
;
150 static int perf_evsel__alloc_stats(struct perf_evsel
*evsel
, bool alloc_raw
)
152 int ncpus
= perf_evsel__nr_cpus(evsel
);
153 int nthreads
= thread_map__nr(evsel
->threads
);
155 if (perf_evsel__alloc_stat_priv(evsel
) < 0 ||
156 perf_evsel__alloc_counts(evsel
, ncpus
, nthreads
) < 0 ||
157 (alloc_raw
&& perf_evsel__alloc_prev_raw_counts(evsel
, ncpus
, nthreads
) < 0))
163 int perf_evlist__alloc_stats(struct perf_evlist
*evlist
, bool alloc_raw
)
165 struct perf_evsel
*evsel
;
167 evlist__for_each_entry(evlist
, evsel
) {
168 if (perf_evsel__alloc_stats(evsel
, alloc_raw
))
175 perf_evlist__free_stats(evlist
);
179 void perf_evlist__free_stats(struct perf_evlist
*evlist
)
181 struct perf_evsel
*evsel
;
183 evlist__for_each_entry(evlist
, evsel
) {
184 perf_evsel__free_stat_priv(evsel
);
185 perf_evsel__free_counts(evsel
);
186 perf_evsel__free_prev_raw_counts(evsel
);
190 void perf_evlist__reset_stats(struct perf_evlist
*evlist
)
192 struct perf_evsel
*evsel
;
194 evlist__for_each_entry(evlist
, evsel
) {
195 perf_evsel__reset_stat_priv(evsel
);
196 perf_evsel__reset_counts(evsel
);
200 static void zero_per_pkg(struct perf_evsel
*counter
)
202 if (counter
->per_pkg_mask
)
203 memset(counter
->per_pkg_mask
, 0, MAX_NR_CPUS
);
206 static int check_per_pkg(struct perf_evsel
*counter
,
207 struct perf_counts_values
*vals
, int cpu
, bool *skip
)
209 unsigned long *mask
= counter
->per_pkg_mask
;
210 struct cpu_map
*cpus
= perf_evsel__cpus(counter
);
215 if (!counter
->per_pkg
)
218 if (cpu_map__empty(cpus
))
222 mask
= zalloc(MAX_NR_CPUS
);
226 counter
->per_pkg_mask
= mask
;
230 * we do not consider an event that has not run as a good
231 * instance to mark a package as used (skip=1). Otherwise
232 * we may run into a situation where the first CPU in a package
233 * is not running anything, yet the second is, and this function
234 * would mark the package as used after the first CPU and would
235 * not read the values from the second CPU.
237 if (!(vals
->run
&& vals
->ena
))
240 s
= cpu_map__get_socket(cpus
, cpu
, NULL
);
244 *skip
= test_and_set_bit(s
, mask
) == 1;
249 process_counter_values(struct perf_stat_config
*config
, struct perf_evsel
*evsel
,
251 struct perf_counts_values
*count
)
253 struct perf_counts_values
*aggr
= &evsel
->counts
->aggr
;
254 static struct perf_counts_values zero
;
257 if (check_per_pkg(evsel
, count
, cpu
, &skip
)) {
258 pr_err("failed to read per-pkg counter\n");
265 switch (config
->aggr_mode
) {
270 if (!evsel
->snapshot
)
271 perf_evsel__compute_deltas(evsel
, cpu
, thread
, count
);
272 perf_counts_values__scale(count
, config
->scale
, NULL
);
273 if (config
->aggr_mode
== AGGR_NONE
)
274 perf_stat__update_shadow_stats(evsel
, count
->values
, cpu
);
277 aggr
->val
+= count
->val
;
279 aggr
->ena
+= count
->ena
;
280 aggr
->run
+= count
->run
;
290 static int process_counter_maps(struct perf_stat_config
*config
,
291 struct perf_evsel
*counter
)
293 int nthreads
= thread_map__nr(counter
->threads
);
294 int ncpus
= perf_evsel__nr_cpus(counter
);
297 if (counter
->system_wide
)
300 for (thread
= 0; thread
< nthreads
; thread
++) {
301 for (cpu
= 0; cpu
< ncpus
; cpu
++) {
302 if (process_counter_values(config
, counter
, cpu
, thread
,
303 perf_counts(counter
->counts
, cpu
, thread
)))
311 int perf_stat_process_counter(struct perf_stat_config
*config
,
312 struct perf_evsel
*counter
)
314 struct perf_counts_values
*aggr
= &counter
->counts
->aggr
;
315 struct perf_stat_evsel
*ps
= counter
->priv
;
316 u64
*count
= counter
->counts
->aggr
.values
;
320 aggr
->val
= aggr
->ena
= aggr
->run
= 0;
323 * We calculate counter's data every interval,
324 * and the display code shows ps->res_stats
325 * avg value. We need to zero the stats for
326 * interval mode, otherwise overall avg running
327 * averages will be shown for each interval.
329 if (config
->interval
)
330 init_stats(ps
->res_stats
);
332 if (counter
->per_pkg
)
333 zero_per_pkg(counter
);
335 ret
= process_counter_maps(config
, counter
);
339 if (config
->aggr_mode
!= AGGR_GLOBAL
)
342 if (!counter
->snapshot
)
343 perf_evsel__compute_deltas(counter
, -1, -1, aggr
);
344 perf_counts_values__scale(aggr
, config
->scale
, &counter
->counts
->scaled
);
346 for (i
= 0; i
< 3; i
++)
347 update_stats(&ps
->res_stats
[i
], count
[i
]);
350 fprintf(config
->output
, "%s: %" PRIu64
" %" PRIu64
" %" PRIu64
"\n",
351 perf_evsel__name(counter
), count
[0], count
[1], count
[2]);
355 * Save the full runtime - to allow normalization during printout:
357 val
= counter
->scale
* *count
;
358 perf_stat__update_shadow_stats(counter
, &val
, 0);
363 int perf_event__process_stat_event(struct perf_tool
*tool __maybe_unused
,
364 union perf_event
*event
,
365 struct perf_session
*session
)
367 struct perf_counts_values count
;
368 struct stat_event
*st
= &event
->stat
;
369 struct perf_evsel
*counter
;
375 counter
= perf_evlist__id2evsel(session
->evlist
, st
->id
);
377 pr_err("Failed to resolve counter for stat event.\n");
381 *perf_counts(counter
->counts
, st
->cpu
, st
->thread
) = count
;
382 counter
->supported
= true;
386 size_t perf_event__fprintf_stat(union perf_event
*event
, FILE *fp
)
388 struct stat_event
*st
= (struct stat_event
*) event
;
391 ret
= fprintf(fp
, "\n... id %" PRIu64
", cpu %d, thread %d\n",
392 st
->id
, st
->cpu
, st
->thread
);
393 ret
+= fprintf(fp
, "... value %" PRIu64
", enabled %" PRIu64
", running %" PRIu64
"\n",
394 st
->val
, st
->ena
, st
->run
);
399 size_t perf_event__fprintf_stat_round(union perf_event
*event
, FILE *fp
)
401 struct stat_round_event
*rd
= (struct stat_round_event
*)event
;
404 ret
= fprintf(fp
, "\n... time %" PRIu64
", type %s\n", rd
->time
,
405 rd
->type
== PERF_STAT_ROUND_TYPE__FINAL
? "FINAL" : "INTERVAL");
410 size_t perf_event__fprintf_stat_config(union perf_event
*event
, FILE *fp
)
412 struct perf_stat_config sc
;
415 perf_event__read_stat_config(&sc
, &event
->stat_config
);
417 ret
= fprintf(fp
, "\n");
418 ret
+= fprintf(fp
, "... aggr_mode %d\n", sc
.aggr_mode
);
419 ret
+= fprintf(fp
, "... scale %d\n", sc
.scale
);
420 ret
+= fprintf(fp
, "... interval %u\n", sc
.interval
);