]> git.proxmox.com Git - mirror_ubuntu-kernels.git/blame - tools/perf/util/evsel.c
perf tools: Rename 'struct sample_data' to 'struct perf_sample'
[mirror_ubuntu-kernels.git] / tools / perf / util / evsel.c
CommitLineData
69aad6f1 1#include "evsel.h"
70082dd9 2#include "evlist.h"
48290609 3#include "../perf.h"
69aad6f1 4#include "util.h"
86bd5e86 5#include "cpumap.h"
fd78260b 6#include "thread_map.h"
69aad6f1 7
70082dd9
ACM
8#include <unistd.h>
9#include <sys/mman.h>
10
70db7533
ACM
11#include <linux/bitops.h>
12#include <linux/hash.h>
13
c52b12ed 14#define FD(e, x, y) (*(int *)xyarray__entry(e->fd, x, y))
70db7533 15#define SID(e, x, y) xyarray__entry(e->id, x, y)
c52b12ed 16
ef1d1af2
ACM
17void perf_evsel__init(struct perf_evsel *evsel,
18 struct perf_event_attr *attr, int idx)
19{
20 evsel->idx = idx;
21 evsel->attr = *attr;
22 INIT_LIST_HEAD(&evsel->node);
23}
24
23a2f3ab 25struct perf_evsel *perf_evsel__new(struct perf_event_attr *attr, int idx)
69aad6f1
ACM
26{
27 struct perf_evsel *evsel = zalloc(sizeof(*evsel));
28
ef1d1af2
ACM
29 if (evsel != NULL)
30 perf_evsel__init(evsel, attr, idx);
69aad6f1
ACM
31
32 return evsel;
33}
34
35int perf_evsel__alloc_fd(struct perf_evsel *evsel, int ncpus, int nthreads)
36{
37 evsel->fd = xyarray__new(ncpus, nthreads, sizeof(int));
38 return evsel->fd != NULL ? 0 : -ENOMEM;
39}
40
70db7533
ACM
41int perf_evsel__alloc_id(struct perf_evsel *evsel, int ncpus, int nthreads)
42{
43 evsel->id = xyarray__new(ncpus, nthreads, sizeof(struct perf_sample_id));
44 return evsel->id != NULL ? 0 : -ENOMEM;
45}
46
c52b12ed
ACM
47int perf_evsel__alloc_counts(struct perf_evsel *evsel, int ncpus)
48{
49 evsel->counts = zalloc((sizeof(*evsel->counts) +
50 (ncpus * sizeof(struct perf_counts_values))));
51 return evsel->counts != NULL ? 0 : -ENOMEM;
52}
53
69aad6f1
ACM
54void perf_evsel__free_fd(struct perf_evsel *evsel)
55{
56 xyarray__delete(evsel->fd);
57 evsel->fd = NULL;
58}
59
70db7533
ACM
60void perf_evsel__free_id(struct perf_evsel *evsel)
61{
62 xyarray__delete(evsel->id);
63 evsel->id = NULL;
64}
65
c52b12ed
ACM
66void perf_evsel__close_fd(struct perf_evsel *evsel, int ncpus, int nthreads)
67{
68 int cpu, thread;
69
70 for (cpu = 0; cpu < ncpus; cpu++)
71 for (thread = 0; thread < nthreads; ++thread) {
72 close(FD(evsel, cpu, thread));
73 FD(evsel, cpu, thread) = -1;
74 }
75}
76
70db7533 77void perf_evlist__munmap(struct perf_evlist *evlist, int ncpus)
70082dd9 78{
70db7533 79 int cpu;
70082dd9 80
70db7533
ACM
81 for (cpu = 0; cpu < ncpus; cpu++) {
82 if (evlist->mmap[cpu].base != NULL) {
83 munmap(evlist->mmap[cpu].base, evlist->mmap_len);
84 evlist->mmap[cpu].base = NULL;
70082dd9 85 }
70db7533 86 }
70082dd9
ACM
87}
88
70db7533 89int perf_evlist__alloc_mmap(struct perf_evlist *evlist, int ncpus)
70082dd9 90{
70db7533
ACM
91 evlist->mmap = zalloc(ncpus * sizeof(struct perf_mmap));
92 return evlist->mmap != NULL ? 0 : -ENOMEM;
70082dd9
ACM
93}
94
ef1d1af2 95void perf_evsel__exit(struct perf_evsel *evsel)
69aad6f1
ACM
96{
97 assert(list_empty(&evsel->node));
98 xyarray__delete(evsel->fd);
70db7533 99 xyarray__delete(evsel->id);
ef1d1af2
ACM
100}
101
102void perf_evsel__delete(struct perf_evsel *evsel)
103{
104 perf_evsel__exit(evsel);
69aad6f1
ACM
105 free(evsel);
106}
c52b12ed
ACM
107
108int __perf_evsel__read_on_cpu(struct perf_evsel *evsel,
109 int cpu, int thread, bool scale)
110{
111 struct perf_counts_values count;
112 size_t nv = scale ? 3 : 1;
113
114 if (FD(evsel, cpu, thread) < 0)
115 return -EINVAL;
116
4eed11d5
ACM
117 if (evsel->counts == NULL && perf_evsel__alloc_counts(evsel, cpu + 1) < 0)
118 return -ENOMEM;
119
c52b12ed
ACM
120 if (readn(FD(evsel, cpu, thread), &count, nv * sizeof(u64)) < 0)
121 return -errno;
122
123 if (scale) {
124 if (count.run == 0)
125 count.val = 0;
126 else if (count.run < count.ena)
127 count.val = (u64)((double)count.val * count.ena / count.run + 0.5);
128 } else
129 count.ena = count.run = 0;
130
131 evsel->counts->cpu[cpu] = count;
132 return 0;
133}
134
135int __perf_evsel__read(struct perf_evsel *evsel,
136 int ncpus, int nthreads, bool scale)
137{
138 size_t nv = scale ? 3 : 1;
139 int cpu, thread;
140 struct perf_counts_values *aggr = &evsel->counts->aggr, count;
141
142 aggr->val = 0;
143
144 for (cpu = 0; cpu < ncpus; cpu++) {
145 for (thread = 0; thread < nthreads; thread++) {
146 if (FD(evsel, cpu, thread) < 0)
147 continue;
148
149 if (readn(FD(evsel, cpu, thread),
150 &count, nv * sizeof(u64)) < 0)
151 return -errno;
152
153 aggr->val += count.val;
154 if (scale) {
155 aggr->ena += count.ena;
156 aggr->run += count.run;
157 }
158 }
159 }
160
161 evsel->counts->scaled = 0;
162 if (scale) {
163 if (aggr->run == 0) {
164 evsel->counts->scaled = -1;
165 aggr->val = 0;
166 return 0;
167 }
168
169 if (aggr->run < aggr->ena) {
170 evsel->counts->scaled = 1;
171 aggr->val = (u64)((double)aggr->val * aggr->ena / aggr->run + 0.5);
172 }
173 } else
174 aggr->ena = aggr->run = 0;
175
176 return 0;
177}
48290609 178
0252208e 179static int __perf_evsel__open(struct perf_evsel *evsel, struct cpu_map *cpus,
9d04f178 180 struct thread_map *threads, bool group, bool inherit)
48290609 181{
0252208e 182 int cpu, thread;
48290609 183
0252208e
ACM
184 if (evsel->fd == NULL &&
185 perf_evsel__alloc_fd(evsel, cpus->nr, threads->nr) < 0)
4eed11d5
ACM
186 return -1;
187
86bd5e86 188 for (cpu = 0; cpu < cpus->nr; cpu++) {
f08199d3
ACM
189 int group_fd = -1;
190
9d04f178
ACM
191 evsel->attr.inherit = (cpus->map[cpu] < 0) && inherit;
192
0252208e
ACM
193 for (thread = 0; thread < threads->nr; thread++) {
194 FD(evsel, cpu, thread) = sys_perf_event_open(&evsel->attr,
195 threads->map[thread],
f08199d3
ACM
196 cpus->map[cpu],
197 group_fd, 0);
0252208e
ACM
198 if (FD(evsel, cpu, thread) < 0)
199 goto out_close;
f08199d3
ACM
200
201 if (group && group_fd == -1)
202 group_fd = FD(evsel, cpu, thread);
0252208e 203 }
48290609
ACM
204 }
205
206 return 0;
207
208out_close:
0252208e
ACM
209 do {
210 while (--thread >= 0) {
211 close(FD(evsel, cpu, thread));
212 FD(evsel, cpu, thread) = -1;
213 }
214 thread = threads->nr;
215 } while (--cpu >= 0);
48290609
ACM
216 return -1;
217}
218
0252208e
ACM
219static struct {
220 struct cpu_map map;
221 int cpus[1];
222} empty_cpu_map = {
223 .map.nr = 1,
224 .cpus = { -1, },
225};
226
227static struct {
228 struct thread_map map;
229 int threads[1];
230} empty_thread_map = {
231 .map.nr = 1,
232 .threads = { -1, },
233};
234
f08199d3 235int perf_evsel__open(struct perf_evsel *evsel, struct cpu_map *cpus,
9d04f178 236 struct thread_map *threads, bool group, bool inherit)
48290609 237{
0252208e
ACM
238 if (cpus == NULL) {
239 /* Work around old compiler warnings about strict aliasing */
240 cpus = &empty_cpu_map.map;
48290609
ACM
241 }
242
0252208e
ACM
243 if (threads == NULL)
244 threads = &empty_thread_map.map;
48290609 245
9d04f178 246 return __perf_evsel__open(evsel, cpus, threads, group, inherit);
48290609
ACM
247}
248
f08199d3 249int perf_evsel__open_per_cpu(struct perf_evsel *evsel,
9d04f178 250 struct cpu_map *cpus, bool group, bool inherit)
48290609 251{
9d04f178 252 return __perf_evsel__open(evsel, cpus, &empty_thread_map.map, group, inherit);
0252208e 253}
48290609 254
f08199d3 255int perf_evsel__open_per_thread(struct perf_evsel *evsel,
9d04f178 256 struct thread_map *threads, bool group, bool inherit)
0252208e 257{
9d04f178 258 return __perf_evsel__open(evsel, &empty_cpu_map.map, threads, group, inherit);
48290609 259}
70082dd9 260
70db7533
ACM
261static int __perf_evlist__mmap(struct perf_evlist *evlist, int cpu, int prot,
262 int mask, int fd)
263{
264 evlist->mmap[cpu].prev = 0;
265 evlist->mmap[cpu].mask = mask;
266 evlist->mmap[cpu].base = mmap(NULL, evlist->mmap_len, prot,
267 MAP_SHARED, fd, 0);
268 if (evlist->mmap[cpu].base == MAP_FAILED)
269 return -1;
270
271 perf_evlist__add_pollfd(evlist, fd);
272 return 0;
273}
274
275static int perf_evlist__id_hash(struct perf_evlist *evlist, struct perf_evsel *evsel,
276 int cpu, int thread, int fd)
277{
278 struct perf_sample_id *sid;
279 u64 read_data[4] = { 0, };
280 int hash, id_idx = 1; /* The first entry is the counter value */
281
282 if (!(evsel->attr.read_format & PERF_FORMAT_ID) ||
283 read(fd, &read_data, sizeof(read_data)) == -1)
284 return -1;
285
286 if (evsel->attr.read_format & PERF_FORMAT_TOTAL_TIME_ENABLED)
287 ++id_idx;
288 if (evsel->attr.read_format & PERF_FORMAT_TOTAL_TIME_RUNNING)
289 ++id_idx;
290
291 sid = SID(evsel, cpu, thread);
292 sid->id = read_data[id_idx];
293 sid->evsel = evsel;
294 hash = hash_64(sid->id, PERF_EVLIST__HLIST_BITS);
295 hlist_add_head(&sid->node, &evlist->heads[hash]);
296 return 0;
297}
298
299/** perf_evlist__mmap - Create per cpu maps to receive events
300 *
301 * @evlist - list of events
302 * @cpus - cpu map being monitored
303 * @threads - threads map being monitored
304 * @pages - map length in pages
305 * @overwrite - overwrite older events?
306 *
307 * If overwrite is false the user needs to signal event consuption using:
308 *
309 * struct perf_mmap *m = &evlist->mmap[cpu];
310 * unsigned int head = perf_mmap__read_head(m);
311 *
312 * perf_mmap__write_tail(m, head)
313 */
314int perf_evlist__mmap(struct perf_evlist *evlist, struct cpu_map *cpus,
315 struct thread_map *threads, int pages, bool overwrite)
70082dd9
ACM
316{
317 unsigned int page_size = sysconf(_SC_PAGE_SIZE);
318 int mask = pages * page_size - 1, cpu;
70db7533
ACM
319 struct perf_evsel *first_evsel, *evsel;
320 int thread, prot = PROT_READ | (overwrite ? 0 : PROT_WRITE);
70082dd9 321
70db7533
ACM
322 if (evlist->mmap == NULL &&
323 perf_evlist__alloc_mmap(evlist, cpus->nr) < 0)
70082dd9
ACM
324 return -ENOMEM;
325
70db7533
ACM
326 if (evlist->pollfd == NULL &&
327 perf_evlist__alloc_pollfd(evlist, cpus->nr, threads->nr) < 0)
328 return -ENOMEM;
70082dd9 329
7bb41152 330 evlist->overwrite = overwrite;
70db7533
ACM
331 evlist->mmap_len = (pages + 1) * page_size;
332 first_evsel = list_entry(evlist->entries.next, struct perf_evsel, node);
333
334 list_for_each_entry(evsel, &evlist->entries, node) {
335 if ((evsel->attr.read_format & PERF_FORMAT_ID) &&
336 evsel->id == NULL &&
337 perf_evsel__alloc_id(evsel, cpus->nr, threads->nr) < 0)
338 return -ENOMEM;
339
340 for (cpu = 0; cpu < cpus->nr; cpu++) {
341 for (thread = 0; thread < threads->nr; thread++) {
342 int fd = FD(evsel, cpu, thread);
343
344 if (evsel->idx || thread) {
345 if (ioctl(fd, PERF_EVENT_IOC_SET_OUTPUT,
346 FD(first_evsel, cpu, 0)) != 0)
347 goto out_unmap;
348 } else if (__perf_evlist__mmap(evlist, cpu, prot, mask, fd) < 0)
349 goto out_unmap;
350
351 if ((evsel->attr.read_format & PERF_FORMAT_ID) &&
352 perf_evlist__id_hash(evlist, evsel, cpu, thread, fd) < 0)
353 goto out_unmap;
354 }
70082dd9
ACM
355 }
356 }
357
358 return 0;
359
360out_unmap:
70db7533
ACM
361 for (cpu = 0; cpu < cpus->nr; cpu++) {
362 if (evlist->mmap[cpu].base != NULL) {
363 munmap(evlist->mmap[cpu].base, evlist->mmap_len);
364 evlist->mmap[cpu].base = NULL;
70082dd9 365 }
70db7533 366 }
70082dd9
ACM
367 return -1;
368}
d0dd74e8
ACM
369
370static int event__parse_id_sample(const event_t *event, u64 type,
8d50e5b4 371 struct perf_sample *sample)
d0dd74e8
ACM
372{
373 const u64 *array = event->sample.array;
374
375 array += ((event->header.size -
376 sizeof(event->header)) / sizeof(u64)) - 1;
377
378 if (type & PERF_SAMPLE_CPU) {
379 u32 *p = (u32 *)array;
380 sample->cpu = *p;
381 array--;
382 }
383
384 if (type & PERF_SAMPLE_STREAM_ID) {
385 sample->stream_id = *array;
386 array--;
387 }
388
389 if (type & PERF_SAMPLE_ID) {
390 sample->id = *array;
391 array--;
392 }
393
394 if (type & PERF_SAMPLE_TIME) {
395 sample->time = *array;
396 array--;
397 }
398
399 if (type & PERF_SAMPLE_TID) {
400 u32 *p = (u32 *)array;
401 sample->pid = p[0];
402 sample->tid = p[1];
403 }
404
405 return 0;
406}
407
408int event__parse_sample(const event_t *event, u64 type, bool sample_id_all,
8d50e5b4 409 struct perf_sample *data)
d0dd74e8
ACM
410{
411 const u64 *array;
412
413 data->cpu = data->pid = data->tid = -1;
414 data->stream_id = data->id = data->time = -1ULL;
415
416 if (event->header.type != PERF_RECORD_SAMPLE) {
417 if (!sample_id_all)
418 return 0;
419 return event__parse_id_sample(event, type, data);
420 }
421
422 array = event->sample.array;
423
424 if (type & PERF_SAMPLE_IP) {
425 data->ip = event->ip.ip;
426 array++;
427 }
428
429 if (type & PERF_SAMPLE_TID) {
430 u32 *p = (u32 *)array;
431 data->pid = p[0];
432 data->tid = p[1];
433 array++;
434 }
435
436 if (type & PERF_SAMPLE_TIME) {
437 data->time = *array;
438 array++;
439 }
440
441 if (type & PERF_SAMPLE_ADDR) {
442 data->addr = *array;
443 array++;
444 }
445
446 data->id = -1ULL;
447 if (type & PERF_SAMPLE_ID) {
448 data->id = *array;
449 array++;
450 }
451
452 if (type & PERF_SAMPLE_STREAM_ID) {
453 data->stream_id = *array;
454 array++;
455 }
456
457 if (type & PERF_SAMPLE_CPU) {
458 u32 *p = (u32 *)array;
459 data->cpu = *p;
460 array++;
461 }
462
463 if (type & PERF_SAMPLE_PERIOD) {
464 data->period = *array;
465 array++;
466 }
467
468 if (type & PERF_SAMPLE_READ) {
469 fprintf(stderr, "PERF_SAMPLE_READ is unsuported for now\n");
470 return -1;
471 }
472
473 if (type & PERF_SAMPLE_CALLCHAIN) {
474 data->callchain = (struct ip_callchain *)array;
475 array += 1 + data->callchain->nr;
476 }
477
478 if (type & PERF_SAMPLE_RAW) {
479 u32 *p = (u32 *)array;
480 data->raw_size = *p;
481 p++;
482 data->raw_data = p;
483 }
484
485 return 0;
486}