]> git.proxmox.com Git - mirror_ubuntu-kernels.git/blame - tools/perf/util/evsel.c
Merge branch 'pm-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/rafael/suspe...
[mirror_ubuntu-kernels.git] / tools / perf / util / evsel.c
CommitLineData
f8a95309
ACM
1/*
2 * Copyright (C) 2011, Red Hat Inc, Arnaldo Carvalho de Melo <acme@redhat.com>
3 *
4 * Parts came from builtin-{top,stat,record}.c, see those files for further
5 * copyright notes.
6 *
7 * Released under the GPL v2. (and only v2, not any later version)
8 */
9
69aad6f1 10#include "evsel.h"
70082dd9 11#include "evlist.h"
69aad6f1 12#include "util.h"
86bd5e86 13#include "cpumap.h"
fd78260b 14#include "thread_map.h"
69aad6f1 15
c52b12ed
ACM
16#define FD(e, x, y) (*(int *)xyarray__entry(e->fd, x, y))
17
56722381
ACM
18int __perf_evsel__sample_size(u64 sample_type)
19{
20 u64 mask = sample_type & PERF_SAMPLE_MASK;
21 int size = 0;
22 int i;
23
24 for (i = 0; i < 64; i++) {
25 if (mask & (1ULL << i))
26 size++;
27 }
28
29 size *= sizeof(u64);
30
31 return size;
32}
33
ef1d1af2
ACM
34void perf_evsel__init(struct perf_evsel *evsel,
35 struct perf_event_attr *attr, int idx)
36{
37 evsel->idx = idx;
38 evsel->attr = *attr;
39 INIT_LIST_HEAD(&evsel->node);
40}
41
23a2f3ab 42struct perf_evsel *perf_evsel__new(struct perf_event_attr *attr, int idx)
69aad6f1
ACM
43{
44 struct perf_evsel *evsel = zalloc(sizeof(*evsel));
45
ef1d1af2
ACM
46 if (evsel != NULL)
47 perf_evsel__init(evsel, attr, idx);
69aad6f1
ACM
48
49 return evsel;
50}
51
52int perf_evsel__alloc_fd(struct perf_evsel *evsel, int ncpus, int nthreads)
53{
4af4c955 54 int cpu, thread;
69aad6f1 55 evsel->fd = xyarray__new(ncpus, nthreads, sizeof(int));
4af4c955
DA
56
57 if (evsel->fd) {
58 for (cpu = 0; cpu < ncpus; cpu++) {
59 for (thread = 0; thread < nthreads; thread++) {
60 FD(evsel, cpu, thread) = -1;
61 }
62 }
63 }
64
69aad6f1
ACM
65 return evsel->fd != NULL ? 0 : -ENOMEM;
66}
67
70db7533
ACM
68int perf_evsel__alloc_id(struct perf_evsel *evsel, int ncpus, int nthreads)
69{
a91e5431
ACM
70 evsel->sample_id = xyarray__new(ncpus, nthreads, sizeof(struct perf_sample_id));
71 if (evsel->sample_id == NULL)
72 return -ENOMEM;
73
74 evsel->id = zalloc(ncpus * nthreads * sizeof(u64));
75 if (evsel->id == NULL) {
76 xyarray__delete(evsel->sample_id);
77 evsel->sample_id = NULL;
78 return -ENOMEM;
79 }
80
81 return 0;
70db7533
ACM
82}
83
c52b12ed
ACM
84int perf_evsel__alloc_counts(struct perf_evsel *evsel, int ncpus)
85{
86 evsel->counts = zalloc((sizeof(*evsel->counts) +
87 (ncpus * sizeof(struct perf_counts_values))));
88 return evsel->counts != NULL ? 0 : -ENOMEM;
89}
90
69aad6f1
ACM
91void perf_evsel__free_fd(struct perf_evsel *evsel)
92{
93 xyarray__delete(evsel->fd);
94 evsel->fd = NULL;
95}
96
70db7533
ACM
97void perf_evsel__free_id(struct perf_evsel *evsel)
98{
a91e5431
ACM
99 xyarray__delete(evsel->sample_id);
100 evsel->sample_id = NULL;
101 free(evsel->id);
70db7533
ACM
102 evsel->id = NULL;
103}
104
c52b12ed
ACM
105void perf_evsel__close_fd(struct perf_evsel *evsel, int ncpus, int nthreads)
106{
107 int cpu, thread;
108
109 for (cpu = 0; cpu < ncpus; cpu++)
110 for (thread = 0; thread < nthreads; ++thread) {
111 close(FD(evsel, cpu, thread));
112 FD(evsel, cpu, thread) = -1;
113 }
114}
115
ef1d1af2 116void perf_evsel__exit(struct perf_evsel *evsel)
69aad6f1
ACM
117{
118 assert(list_empty(&evsel->node));
119 xyarray__delete(evsel->fd);
a91e5431
ACM
120 xyarray__delete(evsel->sample_id);
121 free(evsel->id);
ef1d1af2
ACM
122}
123
124void perf_evsel__delete(struct perf_evsel *evsel)
125{
126 perf_evsel__exit(evsel);
023695d9 127 close_cgroup(evsel->cgrp);
f0c55bcf 128 free(evsel->name);
69aad6f1
ACM
129 free(evsel);
130}
c52b12ed
ACM
131
132int __perf_evsel__read_on_cpu(struct perf_evsel *evsel,
133 int cpu, int thread, bool scale)
134{
135 struct perf_counts_values count;
136 size_t nv = scale ? 3 : 1;
137
138 if (FD(evsel, cpu, thread) < 0)
139 return -EINVAL;
140
4eed11d5
ACM
141 if (evsel->counts == NULL && perf_evsel__alloc_counts(evsel, cpu + 1) < 0)
142 return -ENOMEM;
143
c52b12ed
ACM
144 if (readn(FD(evsel, cpu, thread), &count, nv * sizeof(u64)) < 0)
145 return -errno;
146
147 if (scale) {
148 if (count.run == 0)
149 count.val = 0;
150 else if (count.run < count.ena)
151 count.val = (u64)((double)count.val * count.ena / count.run + 0.5);
152 } else
153 count.ena = count.run = 0;
154
155 evsel->counts->cpu[cpu] = count;
156 return 0;
157}
158
159int __perf_evsel__read(struct perf_evsel *evsel,
160 int ncpus, int nthreads, bool scale)
161{
162 size_t nv = scale ? 3 : 1;
163 int cpu, thread;
164 struct perf_counts_values *aggr = &evsel->counts->aggr, count;
165
52bcd994 166 aggr->val = aggr->ena = aggr->run = 0;
c52b12ed
ACM
167
168 for (cpu = 0; cpu < ncpus; cpu++) {
169 for (thread = 0; thread < nthreads; thread++) {
170 if (FD(evsel, cpu, thread) < 0)
171 continue;
172
173 if (readn(FD(evsel, cpu, thread),
174 &count, nv * sizeof(u64)) < 0)
175 return -errno;
176
177 aggr->val += count.val;
178 if (scale) {
179 aggr->ena += count.ena;
180 aggr->run += count.run;
181 }
182 }
183 }
184
185 evsel->counts->scaled = 0;
186 if (scale) {
187 if (aggr->run == 0) {
188 evsel->counts->scaled = -1;
189 aggr->val = 0;
190 return 0;
191 }
192
193 if (aggr->run < aggr->ena) {
194 evsel->counts->scaled = 1;
195 aggr->val = (u64)((double)aggr->val * aggr->ena / aggr->run + 0.5);
196 }
197 } else
198 aggr->ena = aggr->run = 0;
199
200 return 0;
201}
48290609 202
0252208e 203static int __perf_evsel__open(struct perf_evsel *evsel, struct cpu_map *cpus,
5d2cd909 204 struct thread_map *threads, bool group)
48290609 205{
0252208e 206 int cpu, thread;
023695d9
SE
207 unsigned long flags = 0;
208 int pid = -1;
48290609 209
0252208e
ACM
210 if (evsel->fd == NULL &&
211 perf_evsel__alloc_fd(evsel, cpus->nr, threads->nr) < 0)
4eed11d5
ACM
212 return -1;
213
023695d9
SE
214 if (evsel->cgrp) {
215 flags = PERF_FLAG_PID_CGROUP;
216 pid = evsel->cgrp->fd;
217 }
218
86bd5e86 219 for (cpu = 0; cpu < cpus->nr; cpu++) {
f08199d3 220 int group_fd = -1;
9d04f178 221
0252208e 222 for (thread = 0; thread < threads->nr; thread++) {
023695d9
SE
223
224 if (!evsel->cgrp)
225 pid = threads->map[thread];
226
0252208e 227 FD(evsel, cpu, thread) = sys_perf_event_open(&evsel->attr,
023695d9 228 pid,
f08199d3 229 cpus->map[cpu],
023695d9 230 group_fd, flags);
0252208e
ACM
231 if (FD(evsel, cpu, thread) < 0)
232 goto out_close;
f08199d3
ACM
233
234 if (group && group_fd == -1)
235 group_fd = FD(evsel, cpu, thread);
0252208e 236 }
48290609
ACM
237 }
238
239 return 0;
240
241out_close:
0252208e
ACM
242 do {
243 while (--thread >= 0) {
244 close(FD(evsel, cpu, thread));
245 FD(evsel, cpu, thread) = -1;
246 }
247 thread = threads->nr;
248 } while (--cpu >= 0);
48290609
ACM
249 return -1;
250}
251
0252208e
ACM
252static struct {
253 struct cpu_map map;
254 int cpus[1];
255} empty_cpu_map = {
256 .map.nr = 1,
257 .cpus = { -1, },
258};
259
260static struct {
261 struct thread_map map;
262 int threads[1];
263} empty_thread_map = {
264 .map.nr = 1,
265 .threads = { -1, },
266};
267
f08199d3 268int perf_evsel__open(struct perf_evsel *evsel, struct cpu_map *cpus,
5d2cd909 269 struct thread_map *threads, bool group)
48290609 270{
0252208e
ACM
271 if (cpus == NULL) {
272 /* Work around old compiler warnings about strict aliasing */
273 cpus = &empty_cpu_map.map;
48290609
ACM
274 }
275
0252208e
ACM
276 if (threads == NULL)
277 threads = &empty_thread_map.map;
48290609 278
5d2cd909 279 return __perf_evsel__open(evsel, cpus, threads, group);
48290609
ACM
280}
281
f08199d3 282int perf_evsel__open_per_cpu(struct perf_evsel *evsel,
5d2cd909 283 struct cpu_map *cpus, bool group)
48290609 284{
5d2cd909 285 return __perf_evsel__open(evsel, cpus, &empty_thread_map.map, group);
0252208e 286}
48290609 287
f08199d3 288int perf_evsel__open_per_thread(struct perf_evsel *evsel,
5d2cd909 289 struct thread_map *threads, bool group)
0252208e 290{
5d2cd909 291 return __perf_evsel__open(evsel, &empty_cpu_map.map, threads, group);
48290609 292}
70082dd9 293
8115d60c
ACM
294static int perf_event__parse_id_sample(const union perf_event *event, u64 type,
295 struct perf_sample *sample)
d0dd74e8
ACM
296{
297 const u64 *array = event->sample.array;
298
299 array += ((event->header.size -
300 sizeof(event->header)) / sizeof(u64)) - 1;
301
302 if (type & PERF_SAMPLE_CPU) {
303 u32 *p = (u32 *)array;
304 sample->cpu = *p;
305 array--;
306 }
307
308 if (type & PERF_SAMPLE_STREAM_ID) {
309 sample->stream_id = *array;
310 array--;
311 }
312
313 if (type & PERF_SAMPLE_ID) {
314 sample->id = *array;
315 array--;
316 }
317
318 if (type & PERF_SAMPLE_TIME) {
319 sample->time = *array;
320 array--;
321 }
322
323 if (type & PERF_SAMPLE_TID) {
324 u32 *p = (u32 *)array;
325 sample->pid = p[0];
326 sample->tid = p[1];
327 }
328
329 return 0;
330}
331
98e1da90
FW
332static bool sample_overlap(const union perf_event *event,
333 const void *offset, u64 size)
334{
335 const void *base = event;
336
337 if (offset + size > base + event->header.size)
338 return true;
339
340 return false;
341}
342
8115d60c 343int perf_event__parse_sample(const union perf_event *event, u64 type,
a2854124
FW
344 int sample_size, bool sample_id_all,
345 struct perf_sample *data)
d0dd74e8
ACM
346{
347 const u64 *array;
348
349 data->cpu = data->pid = data->tid = -1;
350 data->stream_id = data->id = data->time = -1ULL;
351
352 if (event->header.type != PERF_RECORD_SAMPLE) {
353 if (!sample_id_all)
354 return 0;
8115d60c 355 return perf_event__parse_id_sample(event, type, data);
d0dd74e8
ACM
356 }
357
358 array = event->sample.array;
359
a2854124
FW
360 if (sample_size + sizeof(event->header) > event->header.size)
361 return -EFAULT;
362
d0dd74e8
ACM
363 if (type & PERF_SAMPLE_IP) {
364 data->ip = event->ip.ip;
365 array++;
366 }
367
368 if (type & PERF_SAMPLE_TID) {
369 u32 *p = (u32 *)array;
370 data->pid = p[0];
371 data->tid = p[1];
372 array++;
373 }
374
375 if (type & PERF_SAMPLE_TIME) {
376 data->time = *array;
377 array++;
378 }
379
380 if (type & PERF_SAMPLE_ADDR) {
381 data->addr = *array;
382 array++;
383 }
384
385 data->id = -1ULL;
386 if (type & PERF_SAMPLE_ID) {
387 data->id = *array;
388 array++;
389 }
390
391 if (type & PERF_SAMPLE_STREAM_ID) {
392 data->stream_id = *array;
393 array++;
394 }
395
396 if (type & PERF_SAMPLE_CPU) {
397 u32 *p = (u32 *)array;
398 data->cpu = *p;
399 array++;
400 }
401
402 if (type & PERF_SAMPLE_PERIOD) {
403 data->period = *array;
404 array++;
405 }
406
407 if (type & PERF_SAMPLE_READ) {
408 fprintf(stderr, "PERF_SAMPLE_READ is unsuported for now\n");
409 return -1;
410 }
411
412 if (type & PERF_SAMPLE_CALLCHAIN) {
98e1da90
FW
413 if (sample_overlap(event, array, sizeof(data->callchain->nr)))
414 return -EFAULT;
415
d0dd74e8 416 data->callchain = (struct ip_callchain *)array;
98e1da90
FW
417
418 if (sample_overlap(event, array, data->callchain->nr))
419 return -EFAULT;
420
d0dd74e8
ACM
421 array += 1 + data->callchain->nr;
422 }
423
424 if (type & PERF_SAMPLE_RAW) {
425 u32 *p = (u32 *)array;
98e1da90
FW
426
427 if (sample_overlap(event, array, sizeof(u32)))
428 return -EFAULT;
429
d0dd74e8
ACM
430 data->raw_size = *p;
431 p++;
98e1da90
FW
432
433 if (sample_overlap(event, p, data->raw_size))
434 return -EFAULT;
435
d0dd74e8
ACM
436 data->raw_data = p;
437 }
438
439 return 0;
440}