]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blame - tools/perf/util/evsel.c
perf evsel: Handle all event types in perf_evsel__name
[mirror_ubuntu-artful-kernel.git] / tools / perf / util / evsel.c
CommitLineData
f8a95309
ACM
1/*
2 * Copyright (C) 2011, Red Hat Inc, Arnaldo Carvalho de Melo <acme@redhat.com>
3 *
4 * Parts came from builtin-{top,stat,record}.c, see those files for further
5 * copyright notes.
6 *
7 * Released under the GPL v2. (and only v2, not any later version)
8 */
9
936be503
DA
10#include <byteswap.h>
11#include "asm/bug.h"
69aad6f1 12#include "evsel.h"
70082dd9 13#include "evlist.h"
69aad6f1 14#include "util.h"
86bd5e86 15#include "cpumap.h"
fd78260b 16#include "thread_map.h"
12864b31 17#include "target.h"
c410431c 18#include "../../include/linux/perf_event.h"
69aad6f1 19
c52b12ed 20#define FD(e, x, y) (*(int *)xyarray__entry(e->fd, x, y))
727ab04e 21#define GROUP_FD(group_fd, cpu) (*(int *)xyarray__entry(group_fd, cpu, 0))
c52b12ed 22
c2a70653
ACM
23int __perf_evsel__sample_size(u64 sample_type)
24{
25 u64 mask = sample_type & PERF_SAMPLE_MASK;
26 int size = 0;
27 int i;
28
29 for (i = 0; i < 64; i++) {
30 if (mask & (1ULL << i))
31 size++;
32 }
33
34 size *= sizeof(u64);
35
36 return size;
37}
38
4bf9ce1b 39void hists__init(struct hists *hists)
0e2a5f10
ACM
40{
41 memset(hists, 0, sizeof(*hists));
42 hists->entries_in_array[0] = hists->entries_in_array[1] = RB_ROOT;
43 hists->entries_in = &hists->entries_in_array[0];
44 hists->entries_collapsed = RB_ROOT;
45 hists->entries = RB_ROOT;
46 pthread_mutex_init(&hists->lock, NULL);
47}
48
ef1d1af2
ACM
49void perf_evsel__init(struct perf_evsel *evsel,
50 struct perf_event_attr *attr, int idx)
51{
52 evsel->idx = idx;
53 evsel->attr = *attr;
54 INIT_LIST_HEAD(&evsel->node);
1980c2eb 55 hists__init(&evsel->hists);
ef1d1af2
ACM
56}
57
23a2f3ab 58struct perf_evsel *perf_evsel__new(struct perf_event_attr *attr, int idx)
69aad6f1
ACM
59{
60 struct perf_evsel *evsel = zalloc(sizeof(*evsel));
61
ef1d1af2
ACM
62 if (evsel != NULL)
63 perf_evsel__init(evsel, attr, idx);
69aad6f1
ACM
64
65 return evsel;
66}
67
c410431c
ACM
68static const char *perf_evsel__hw_names[PERF_COUNT_HW_MAX] = {
69 "cycles",
70 "instructions",
71 "cache-references",
72 "cache-misses",
73 "branches",
74 "branch-misses",
75 "bus-cycles",
76 "stalled-cycles-frontend",
77 "stalled-cycles-backend",
78 "ref-cycles",
79};
80
81const char *__perf_evsel__hw_name(u64 config)
82{
83 if (config < PERF_COUNT_HW_MAX && perf_evsel__hw_names[config])
84 return perf_evsel__hw_names[config];
85
86 return "unknown-hardware";
87}
88
27f18617 89static int perf_evsel__add_modifiers(struct perf_evsel *evsel, char *bf, size_t size)
c410431c 90{
27f18617 91 int colon = 0, r = 0;
c410431c 92 struct perf_event_attr *attr = &evsel->attr;
c410431c
ACM
93 bool exclude_guest_default = false;
94
95#define MOD_PRINT(context, mod) do { \
96 if (!attr->exclude_##context) { \
27f18617 97 if (!colon) colon = ++r; \
c410431c
ACM
98 r += scnprintf(bf + r, size - r, "%c", mod); \
99 } } while(0)
100
101 if (attr->exclude_kernel || attr->exclude_user || attr->exclude_hv) {
102 MOD_PRINT(kernel, 'k');
103 MOD_PRINT(user, 'u');
104 MOD_PRINT(hv, 'h');
105 exclude_guest_default = true;
106 }
107
108 if (attr->precise_ip) {
109 if (!colon)
27f18617 110 colon = ++r;
c410431c
ACM
111 r += scnprintf(bf + r, size - r, "%.*s", attr->precise_ip, "ppp");
112 exclude_guest_default = true;
113 }
114
115 if (attr->exclude_host || attr->exclude_guest == exclude_guest_default) {
116 MOD_PRINT(host, 'H');
117 MOD_PRINT(guest, 'G');
118 }
119#undef MOD_PRINT
120 if (colon)
27f18617 121 bf[colon - 1] = ':';
c410431c
ACM
122 return r;
123}
124
27f18617
ACM
125static int perf_evsel__hw_name(struct perf_evsel *evsel, char *bf, size_t size)
126{
127 int r = scnprintf(bf, size, "%s", __perf_evsel__hw_name(evsel->attr.config));
128 return r + perf_evsel__add_modifiers(evsel, bf + r, size - r);
129}
130
335c2f5d
ACM
131static const char *perf_evsel__sw_names[PERF_COUNT_SW_MAX] = {
132 "cpu-clock",
133 "task-clock",
134 "page-faults",
135 "context-switches",
136 "CPU-migrations",
137 "minor-faults",
138 "major-faults",
139 "alignment-faults",
140 "emulation-faults",
141};
142
143const char *__perf_evsel__sw_name(u64 config)
144{
145 if (config < PERF_COUNT_SW_MAX && perf_evsel__sw_names[config])
146 return perf_evsel__sw_names[config];
147 return "unknown-software";
148}
149
150static int perf_evsel__sw_name(struct perf_evsel *evsel, char *bf, size_t size)
151{
152 int r = scnprintf(bf, size, "%s", __perf_evsel__sw_name(evsel->attr.config));
153 return r + perf_evsel__add_modifiers(evsel, bf + r, size - r);
154}
155
0b668bc9
ACM
156const char *perf_evsel__hw_cache[PERF_COUNT_HW_CACHE_MAX]
157 [PERF_EVSEL__MAX_ALIASES] = {
158 { "L1-dcache", "l1-d", "l1d", "L1-data", },
159 { "L1-icache", "l1-i", "l1i", "L1-instruction", },
160 { "LLC", "L2", },
161 { "dTLB", "d-tlb", "Data-TLB", },
162 { "iTLB", "i-tlb", "Instruction-TLB", },
163 { "branch", "branches", "bpu", "btb", "bpc", },
164 { "node", },
165};
166
167const char *perf_evsel__hw_cache_op[PERF_COUNT_HW_CACHE_OP_MAX]
168 [PERF_EVSEL__MAX_ALIASES] = {
169 { "load", "loads", "read", },
170 { "store", "stores", "write", },
171 { "prefetch", "prefetches", "speculative-read", "speculative-load", },
172};
173
174const char *perf_evsel__hw_cache_result[PERF_COUNT_HW_CACHE_RESULT_MAX]
175 [PERF_EVSEL__MAX_ALIASES] = {
176 { "refs", "Reference", "ops", "access", },
177 { "misses", "miss", },
178};
179
180#define C(x) PERF_COUNT_HW_CACHE_##x
181#define CACHE_READ (1 << C(OP_READ))
182#define CACHE_WRITE (1 << C(OP_WRITE))
183#define CACHE_PREFETCH (1 << C(OP_PREFETCH))
184#define COP(x) (1 << x)
185
186/*
187 * cache operartion stat
188 * L1I : Read and prefetch only
189 * ITLB and BPU : Read-only
190 */
191static unsigned long perf_evsel__hw_cache_stat[C(MAX)] = {
192 [C(L1D)] = (CACHE_READ | CACHE_WRITE | CACHE_PREFETCH),
193 [C(L1I)] = (CACHE_READ | CACHE_PREFETCH),
194 [C(LL)] = (CACHE_READ | CACHE_WRITE | CACHE_PREFETCH),
195 [C(DTLB)] = (CACHE_READ | CACHE_WRITE | CACHE_PREFETCH),
196 [C(ITLB)] = (CACHE_READ),
197 [C(BPU)] = (CACHE_READ),
198 [C(NODE)] = (CACHE_READ | CACHE_WRITE | CACHE_PREFETCH),
199};
200
201bool perf_evsel__is_cache_op_valid(u8 type, u8 op)
202{
203 if (perf_evsel__hw_cache_stat[type] & COP(op))
204 return true; /* valid */
205 else
206 return false; /* invalid */
207}
208
209int __perf_evsel__hw_cache_type_op_res_name(u8 type, u8 op, u8 result,
210 char *bf, size_t size)
211{
212 if (result) {
213 return scnprintf(bf, size, "%s-%s-%s", perf_evsel__hw_cache[type][0],
214 perf_evsel__hw_cache_op[op][0],
215 perf_evsel__hw_cache_result[result][0]);
216 }
217
218 return scnprintf(bf, size, "%s-%s", perf_evsel__hw_cache[type][0],
219 perf_evsel__hw_cache_op[op][1]);
220}
221
222int __perf_evsel__hw_cache_name(u64 config, char *bf, size_t size)
223{
224 u8 op, result, type = (config >> 0) & 0xff;
225 const char *err = "unknown-ext-hardware-cache-type";
226
227 if (type > PERF_COUNT_HW_CACHE_MAX)
228 goto out_err;
229
230 op = (config >> 8) & 0xff;
231 err = "unknown-ext-hardware-cache-op";
232 if (op > PERF_COUNT_HW_CACHE_OP_MAX)
233 goto out_err;
234
235 result = (config >> 16) & 0xff;
236 err = "unknown-ext-hardware-cache-result";
237 if (result > PERF_COUNT_HW_CACHE_RESULT_MAX)
238 goto out_err;
239
240 err = "invalid-cache";
241 if (!perf_evsel__is_cache_op_valid(type, op))
242 goto out_err;
243
244 return __perf_evsel__hw_cache_type_op_res_name(type, op, result, bf, size);
245out_err:
246 return scnprintf(bf, size, "%s", err);
247}
248
249static int perf_evsel__hw_cache_name(struct perf_evsel *evsel, char *bf, size_t size)
250{
251 int ret = __perf_evsel__hw_cache_name(evsel->attr.config, bf, size);
252 return ret + perf_evsel__add_modifiers(evsel, bf + ret, size - ret);
253}
254
a4460836
ACM
255static int perf_evsel__tracepoint_name(struct perf_evsel *evsel, char *bf, size_t size)
256{
257 return scnprintf(bf, size, "%s", evsel->name ?: "unknown tracepoint");
258}
259
c410431c
ACM
260int perf_evsel__name(struct perf_evsel *evsel, char *bf, size_t size)
261{
262 int ret;
263
264 switch (evsel->attr.type) {
265 case PERF_TYPE_RAW:
266 ret = scnprintf(bf, size, "raw 0x%" PRIx64, evsel->attr.config);
267 break;
268
269 case PERF_TYPE_HARDWARE:
270 ret = perf_evsel__hw_name(evsel, bf, size);
271 break;
0b668bc9
ACM
272
273 case PERF_TYPE_HW_CACHE:
274 ret = perf_evsel__hw_cache_name(evsel, bf, size);
275 break;
276
335c2f5d
ACM
277 case PERF_TYPE_SOFTWARE:
278 ret = perf_evsel__sw_name(evsel, bf, size);
279 break;
280
a4460836
ACM
281 case PERF_TYPE_TRACEPOINT:
282 ret = perf_evsel__tracepoint_name(evsel, bf, size);
283 break;
284
c410431c 285 default:
a4460836
ACM
286 ret = scnprintf(bf, size, "%s", "unknown attr type");
287 break;
c410431c
ACM
288 }
289
290 return ret;
291}
292
5090c6ae
NK
293void perf_evsel__config(struct perf_evsel *evsel, struct perf_record_opts *opts,
294 struct perf_evsel *first)
0f82ebc4
ACM
295{
296 struct perf_event_attr *attr = &evsel->attr;
297 int track = !evsel->idx; /* only the first counter needs these */
298
5e1c81d9 299 attr->disabled = 1;
808e1226 300 attr->sample_id_all = opts->sample_id_all_missing ? 0 : 1;
0f82ebc4
ACM
301 attr->inherit = !opts->no_inherit;
302 attr->read_format = PERF_FORMAT_TOTAL_TIME_ENABLED |
303 PERF_FORMAT_TOTAL_TIME_RUNNING |
304 PERF_FORMAT_ID;
305
306 attr->sample_type |= PERF_SAMPLE_IP | PERF_SAMPLE_TID;
307
308 /*
309 * We default some events to a 1 default interval. But keep
310 * it a weak assumption overridable by the user.
311 */
312 if (!attr->sample_period || (opts->user_freq != UINT_MAX &&
313 opts->user_interval != ULLONG_MAX)) {
314 if (opts->freq) {
315 attr->sample_type |= PERF_SAMPLE_PERIOD;
316 attr->freq = 1;
317 attr->sample_freq = opts->freq;
318 } else {
319 attr->sample_period = opts->default_interval;
320 }
321 }
322
323 if (opts->no_samples)
324 attr->sample_freq = 0;
325
326 if (opts->inherit_stat)
327 attr->inherit_stat = 1;
328
329 if (opts->sample_address) {
330 attr->sample_type |= PERF_SAMPLE_ADDR;
331 attr->mmap_data = track;
332 }
333
334 if (opts->call_graph)
335 attr->sample_type |= PERF_SAMPLE_CALLCHAIN;
336
e40ee742 337 if (perf_target__has_cpu(&opts->target))
0f82ebc4
ACM
338 attr->sample_type |= PERF_SAMPLE_CPU;
339
3e76ac78
AV
340 if (opts->period)
341 attr->sample_type |= PERF_SAMPLE_PERIOD;
342
808e1226 343 if (!opts->sample_id_all_missing &&
d67356e7 344 (opts->sample_time || !opts->no_inherit ||
aa22dd49 345 perf_target__has_cpu(&opts->target)))
0f82ebc4
ACM
346 attr->sample_type |= PERF_SAMPLE_TIME;
347
348 if (opts->raw_samples) {
349 attr->sample_type |= PERF_SAMPLE_TIME;
350 attr->sample_type |= PERF_SAMPLE_RAW;
351 attr->sample_type |= PERF_SAMPLE_CPU;
352 }
353
354 if (opts->no_delay) {
355 attr->watermark = 0;
356 attr->wakeup_events = 1;
357 }
bdfebd84
RAV
358 if (opts->branch_stack) {
359 attr->sample_type |= PERF_SAMPLE_BRANCH_STACK;
360 attr->branch_sample_type = opts->branch_stack;
361 }
0f82ebc4
ACM
362
363 attr->mmap = track;
364 attr->comm = track;
365
d67356e7
NK
366 if (perf_target__none(&opts->target) &&
367 (!opts->group || evsel == first)) {
0f82ebc4
ACM
368 attr->enable_on_exec = 1;
369 }
370}
371
69aad6f1
ACM
372int perf_evsel__alloc_fd(struct perf_evsel *evsel, int ncpus, int nthreads)
373{
4af4c955 374 int cpu, thread;
69aad6f1 375 evsel->fd = xyarray__new(ncpus, nthreads, sizeof(int));
4af4c955
DA
376
377 if (evsel->fd) {
378 for (cpu = 0; cpu < ncpus; cpu++) {
379 for (thread = 0; thread < nthreads; thread++) {
380 FD(evsel, cpu, thread) = -1;
381 }
382 }
383 }
384
69aad6f1
ACM
385 return evsel->fd != NULL ? 0 : -ENOMEM;
386}
387
70db7533
ACM
388int perf_evsel__alloc_id(struct perf_evsel *evsel, int ncpus, int nthreads)
389{
a91e5431
ACM
390 evsel->sample_id = xyarray__new(ncpus, nthreads, sizeof(struct perf_sample_id));
391 if (evsel->sample_id == NULL)
392 return -ENOMEM;
393
394 evsel->id = zalloc(ncpus * nthreads * sizeof(u64));
395 if (evsel->id == NULL) {
396 xyarray__delete(evsel->sample_id);
397 evsel->sample_id = NULL;
398 return -ENOMEM;
399 }
400
401 return 0;
70db7533
ACM
402}
403
c52b12ed
ACM
404int perf_evsel__alloc_counts(struct perf_evsel *evsel, int ncpus)
405{
406 evsel->counts = zalloc((sizeof(*evsel->counts) +
407 (ncpus * sizeof(struct perf_counts_values))));
408 return evsel->counts != NULL ? 0 : -ENOMEM;
409}
410
69aad6f1
ACM
411void perf_evsel__free_fd(struct perf_evsel *evsel)
412{
413 xyarray__delete(evsel->fd);
414 evsel->fd = NULL;
415}
416
70db7533
ACM
417void perf_evsel__free_id(struct perf_evsel *evsel)
418{
a91e5431
ACM
419 xyarray__delete(evsel->sample_id);
420 evsel->sample_id = NULL;
421 free(evsel->id);
70db7533
ACM
422 evsel->id = NULL;
423}
424
c52b12ed
ACM
425void perf_evsel__close_fd(struct perf_evsel *evsel, int ncpus, int nthreads)
426{
427 int cpu, thread;
428
429 for (cpu = 0; cpu < ncpus; cpu++)
430 for (thread = 0; thread < nthreads; ++thread) {
431 close(FD(evsel, cpu, thread));
432 FD(evsel, cpu, thread) = -1;
433 }
434}
435
ef1d1af2 436void perf_evsel__exit(struct perf_evsel *evsel)
69aad6f1
ACM
437{
438 assert(list_empty(&evsel->node));
439 xyarray__delete(evsel->fd);
a91e5431
ACM
440 xyarray__delete(evsel->sample_id);
441 free(evsel->id);
ef1d1af2
ACM
442}
443
444void perf_evsel__delete(struct perf_evsel *evsel)
445{
446 perf_evsel__exit(evsel);
023695d9 447 close_cgroup(evsel->cgrp);
f0c55bcf 448 free(evsel->name);
69aad6f1
ACM
449 free(evsel);
450}
c52b12ed
ACM
451
452int __perf_evsel__read_on_cpu(struct perf_evsel *evsel,
453 int cpu, int thread, bool scale)
454{
455 struct perf_counts_values count;
456 size_t nv = scale ? 3 : 1;
457
458 if (FD(evsel, cpu, thread) < 0)
459 return -EINVAL;
460
4eed11d5
ACM
461 if (evsel->counts == NULL && perf_evsel__alloc_counts(evsel, cpu + 1) < 0)
462 return -ENOMEM;
463
c52b12ed
ACM
464 if (readn(FD(evsel, cpu, thread), &count, nv * sizeof(u64)) < 0)
465 return -errno;
466
467 if (scale) {
468 if (count.run == 0)
469 count.val = 0;
470 else if (count.run < count.ena)
471 count.val = (u64)((double)count.val * count.ena / count.run + 0.5);
472 } else
473 count.ena = count.run = 0;
474
475 evsel->counts->cpu[cpu] = count;
476 return 0;
477}
478
479int __perf_evsel__read(struct perf_evsel *evsel,
480 int ncpus, int nthreads, bool scale)
481{
482 size_t nv = scale ? 3 : 1;
483 int cpu, thread;
484 struct perf_counts_values *aggr = &evsel->counts->aggr, count;
485
52bcd994 486 aggr->val = aggr->ena = aggr->run = 0;
c52b12ed
ACM
487
488 for (cpu = 0; cpu < ncpus; cpu++) {
489 for (thread = 0; thread < nthreads; thread++) {
490 if (FD(evsel, cpu, thread) < 0)
491 continue;
492
493 if (readn(FD(evsel, cpu, thread),
494 &count, nv * sizeof(u64)) < 0)
495 return -errno;
496
497 aggr->val += count.val;
498 if (scale) {
499 aggr->ena += count.ena;
500 aggr->run += count.run;
501 }
502 }
503 }
504
505 evsel->counts->scaled = 0;
506 if (scale) {
507 if (aggr->run == 0) {
508 evsel->counts->scaled = -1;
509 aggr->val = 0;
510 return 0;
511 }
512
513 if (aggr->run < aggr->ena) {
514 evsel->counts->scaled = 1;
515 aggr->val = (u64)((double)aggr->val * aggr->ena / aggr->run + 0.5);
516 }
517 } else
518 aggr->ena = aggr->run = 0;
519
520 return 0;
521}
48290609 522
0252208e 523static int __perf_evsel__open(struct perf_evsel *evsel, struct cpu_map *cpus,
727ab04e
ACM
524 struct thread_map *threads, bool group,
525 struct xyarray *group_fds)
48290609 526{
0252208e 527 int cpu, thread;
023695d9 528 unsigned long flags = 0;
727ab04e 529 int pid = -1, err;
48290609 530
0252208e
ACM
531 if (evsel->fd == NULL &&
532 perf_evsel__alloc_fd(evsel, cpus->nr, threads->nr) < 0)
727ab04e 533 return -ENOMEM;
4eed11d5 534
023695d9
SE
535 if (evsel->cgrp) {
536 flags = PERF_FLAG_PID_CGROUP;
537 pid = evsel->cgrp->fd;
538 }
539
86bd5e86 540 for (cpu = 0; cpu < cpus->nr; cpu++) {
727ab04e 541 int group_fd = group_fds ? GROUP_FD(group_fds, cpu) : -1;
9d04f178 542
0252208e 543 for (thread = 0; thread < threads->nr; thread++) {
023695d9
SE
544
545 if (!evsel->cgrp)
546 pid = threads->map[thread];
547
0252208e 548 FD(evsel, cpu, thread) = sys_perf_event_open(&evsel->attr,
023695d9 549 pid,
f08199d3 550 cpus->map[cpu],
023695d9 551 group_fd, flags);
727ab04e
ACM
552 if (FD(evsel, cpu, thread) < 0) {
553 err = -errno;
0252208e 554 goto out_close;
727ab04e 555 }
f08199d3
ACM
556
557 if (group && group_fd == -1)
558 group_fd = FD(evsel, cpu, thread);
0252208e 559 }
48290609
ACM
560 }
561
562 return 0;
563
564out_close:
0252208e
ACM
565 do {
566 while (--thread >= 0) {
567 close(FD(evsel, cpu, thread));
568 FD(evsel, cpu, thread) = -1;
569 }
570 thread = threads->nr;
571 } while (--cpu >= 0);
727ab04e
ACM
572 return err;
573}
574
575void perf_evsel__close(struct perf_evsel *evsel, int ncpus, int nthreads)
576{
577 if (evsel->fd == NULL)
578 return;
579
580 perf_evsel__close_fd(evsel, ncpus, nthreads);
581 perf_evsel__free_fd(evsel);
582 evsel->fd = NULL;
48290609
ACM
583}
584
0252208e
ACM
585static struct {
586 struct cpu_map map;
587 int cpus[1];
588} empty_cpu_map = {
589 .map.nr = 1,
590 .cpus = { -1, },
591};
592
593static struct {
594 struct thread_map map;
595 int threads[1];
596} empty_thread_map = {
597 .map.nr = 1,
598 .threads = { -1, },
599};
600
f08199d3 601int perf_evsel__open(struct perf_evsel *evsel, struct cpu_map *cpus,
727ab04e
ACM
602 struct thread_map *threads, bool group,
603 struct xyarray *group_fd)
48290609 604{
0252208e
ACM
605 if (cpus == NULL) {
606 /* Work around old compiler warnings about strict aliasing */
607 cpus = &empty_cpu_map.map;
48290609
ACM
608 }
609
0252208e
ACM
610 if (threads == NULL)
611 threads = &empty_thread_map.map;
48290609 612
727ab04e 613 return __perf_evsel__open(evsel, cpus, threads, group, group_fd);
48290609
ACM
614}
615
f08199d3 616int perf_evsel__open_per_cpu(struct perf_evsel *evsel,
727ab04e
ACM
617 struct cpu_map *cpus, bool group,
618 struct xyarray *group_fd)
48290609 619{
727ab04e
ACM
620 return __perf_evsel__open(evsel, cpus, &empty_thread_map.map, group,
621 group_fd);
0252208e 622}
48290609 623
f08199d3 624int perf_evsel__open_per_thread(struct perf_evsel *evsel,
727ab04e
ACM
625 struct thread_map *threads, bool group,
626 struct xyarray *group_fd)
0252208e 627{
727ab04e
ACM
628 return __perf_evsel__open(evsel, &empty_cpu_map.map, threads, group,
629 group_fd);
48290609 630}
70082dd9 631
8115d60c 632static int perf_event__parse_id_sample(const union perf_event *event, u64 type,
37073f9e
JO
633 struct perf_sample *sample,
634 bool swapped)
d0dd74e8
ACM
635{
636 const u64 *array = event->sample.array;
37073f9e 637 union u64_swap u;
d0dd74e8
ACM
638
639 array += ((event->header.size -
640 sizeof(event->header)) / sizeof(u64)) - 1;
641
642 if (type & PERF_SAMPLE_CPU) {
37073f9e
JO
643 u.val64 = *array;
644 if (swapped) {
645 /* undo swap of u64, then swap on individual u32s */
646 u.val64 = bswap_64(u.val64);
647 u.val32[0] = bswap_32(u.val32[0]);
648 }
649
650 sample->cpu = u.val32[0];
d0dd74e8
ACM
651 array--;
652 }
653
654 if (type & PERF_SAMPLE_STREAM_ID) {
655 sample->stream_id = *array;
656 array--;
657 }
658
659 if (type & PERF_SAMPLE_ID) {
660 sample->id = *array;
661 array--;
662 }
663
664 if (type & PERF_SAMPLE_TIME) {
665 sample->time = *array;
666 array--;
667 }
668
669 if (type & PERF_SAMPLE_TID) {
37073f9e
JO
670 u.val64 = *array;
671 if (swapped) {
672 /* undo swap of u64, then swap on individual u32s */
673 u.val64 = bswap_64(u.val64);
674 u.val32[0] = bswap_32(u.val32[0]);
675 u.val32[1] = bswap_32(u.val32[1]);
676 }
677
678 sample->pid = u.val32[0];
679 sample->tid = u.val32[1];
d0dd74e8
ACM
680 }
681
682 return 0;
683}
684
98e1da90
FW
685static bool sample_overlap(const union perf_event *event,
686 const void *offset, u64 size)
687{
688 const void *base = event;
689
690 if (offset + size > base + event->header.size)
691 return true;
692
693 return false;
694}
695
8115d60c 696int perf_event__parse_sample(const union perf_event *event, u64 type,
a2854124 697 int sample_size, bool sample_id_all,
936be503 698 struct perf_sample *data, bool swapped)
d0dd74e8
ACM
699{
700 const u64 *array;
701
936be503
DA
702 /*
703 * used for cross-endian analysis. See git commit 65014ab3
704 * for why this goofiness is needed.
705 */
6a11f92e 706 union u64_swap u;
936be503 707
f3bda2c9 708 memset(data, 0, sizeof(*data));
d0dd74e8
ACM
709 data->cpu = data->pid = data->tid = -1;
710 data->stream_id = data->id = data->time = -1ULL;
a4a03fc7 711 data->period = 1;
d0dd74e8
ACM
712
713 if (event->header.type != PERF_RECORD_SAMPLE) {
714 if (!sample_id_all)
715 return 0;
37073f9e 716 return perf_event__parse_id_sample(event, type, data, swapped);
d0dd74e8
ACM
717 }
718
719 array = event->sample.array;
720
a2854124
FW
721 if (sample_size + sizeof(event->header) > event->header.size)
722 return -EFAULT;
723
d0dd74e8
ACM
724 if (type & PERF_SAMPLE_IP) {
725 data->ip = event->ip.ip;
726 array++;
727 }
728
729 if (type & PERF_SAMPLE_TID) {
936be503
DA
730 u.val64 = *array;
731 if (swapped) {
732 /* undo swap of u64, then swap on individual u32s */
733 u.val64 = bswap_64(u.val64);
734 u.val32[0] = bswap_32(u.val32[0]);
735 u.val32[1] = bswap_32(u.val32[1]);
736 }
737
738 data->pid = u.val32[0];
739 data->tid = u.val32[1];
d0dd74e8
ACM
740 array++;
741 }
742
743 if (type & PERF_SAMPLE_TIME) {
744 data->time = *array;
745 array++;
746 }
747
7cec0922 748 data->addr = 0;
d0dd74e8
ACM
749 if (type & PERF_SAMPLE_ADDR) {
750 data->addr = *array;
751 array++;
752 }
753
754 data->id = -1ULL;
755 if (type & PERF_SAMPLE_ID) {
756 data->id = *array;
757 array++;
758 }
759
760 if (type & PERF_SAMPLE_STREAM_ID) {
761 data->stream_id = *array;
762 array++;
763 }
764
765 if (type & PERF_SAMPLE_CPU) {
936be503
DA
766
767 u.val64 = *array;
768 if (swapped) {
769 /* undo swap of u64, then swap on individual u32s */
770 u.val64 = bswap_64(u.val64);
771 u.val32[0] = bswap_32(u.val32[0]);
772 }
773
774 data->cpu = u.val32[0];
d0dd74e8
ACM
775 array++;
776 }
777
778 if (type & PERF_SAMPLE_PERIOD) {
779 data->period = *array;
780 array++;
781 }
782
783 if (type & PERF_SAMPLE_READ) {
f9d36996 784 fprintf(stderr, "PERF_SAMPLE_READ is unsupported for now\n");
d0dd74e8
ACM
785 return -1;
786 }
787
788 if (type & PERF_SAMPLE_CALLCHAIN) {
98e1da90
FW
789 if (sample_overlap(event, array, sizeof(data->callchain->nr)))
790 return -EFAULT;
791
d0dd74e8 792 data->callchain = (struct ip_callchain *)array;
98e1da90
FW
793
794 if (sample_overlap(event, array, data->callchain->nr))
795 return -EFAULT;
796
d0dd74e8
ACM
797 array += 1 + data->callchain->nr;
798 }
799
800 if (type & PERF_SAMPLE_RAW) {
8e303f20
JO
801 const u64 *pdata;
802
936be503
DA
803 u.val64 = *array;
804 if (WARN_ONCE(swapped,
805 "Endianness of raw data not corrected!\n")) {
806 /* undo swap of u64, then swap on individual u32s */
807 u.val64 = bswap_64(u.val64);
808 u.val32[0] = bswap_32(u.val32[0]);
809 u.val32[1] = bswap_32(u.val32[1]);
810 }
98e1da90
FW
811
812 if (sample_overlap(event, array, sizeof(u32)))
813 return -EFAULT;
814
936be503 815 data->raw_size = u.val32[0];
8e303f20 816 pdata = (void *) array + sizeof(u32);
98e1da90 817
8e303f20 818 if (sample_overlap(event, pdata, data->raw_size))
98e1da90
FW
819 return -EFAULT;
820
8e303f20 821 data->raw_data = (void *) pdata;
fa30c964
SE
822
823 array = (void *)array + data->raw_size + sizeof(u32);
d0dd74e8
ACM
824 }
825
b5387528
RAV
826 if (type & PERF_SAMPLE_BRANCH_STACK) {
827 u64 sz;
828
829 data->branch_stack = (struct branch_stack *)array;
830 array++; /* nr */
831
832 sz = data->branch_stack->nr * sizeof(struct branch_entry);
833 sz /= sizeof(u64);
834 array += sz;
835 }
d0dd74e8
ACM
836 return 0;
837}
74eec26f
AV
838
839int perf_event__synthesize_sample(union perf_event *event, u64 type,
840 const struct perf_sample *sample,
841 bool swapped)
842{
843 u64 *array;
844
845 /*
846 * used for cross-endian analysis. See git commit 65014ab3
847 * for why this goofiness is needed.
848 */
6a11f92e 849 union u64_swap u;
74eec26f
AV
850
851 array = event->sample.array;
852
853 if (type & PERF_SAMPLE_IP) {
854 event->ip.ip = sample->ip;
855 array++;
856 }
857
858 if (type & PERF_SAMPLE_TID) {
859 u.val32[0] = sample->pid;
860 u.val32[1] = sample->tid;
861 if (swapped) {
862 /*
863 * Inverse of what is done in perf_event__parse_sample
864 */
865 u.val32[0] = bswap_32(u.val32[0]);
866 u.val32[1] = bswap_32(u.val32[1]);
867 u.val64 = bswap_64(u.val64);
868 }
869
870 *array = u.val64;
871 array++;
872 }
873
874 if (type & PERF_SAMPLE_TIME) {
875 *array = sample->time;
876 array++;
877 }
878
879 if (type & PERF_SAMPLE_ADDR) {
880 *array = sample->addr;
881 array++;
882 }
883
884 if (type & PERF_SAMPLE_ID) {
885 *array = sample->id;
886 array++;
887 }
888
889 if (type & PERF_SAMPLE_STREAM_ID) {
890 *array = sample->stream_id;
891 array++;
892 }
893
894 if (type & PERF_SAMPLE_CPU) {
895 u.val32[0] = sample->cpu;
896 if (swapped) {
897 /*
898 * Inverse of what is done in perf_event__parse_sample
899 */
900 u.val32[0] = bswap_32(u.val32[0]);
901 u.val64 = bswap_64(u.val64);
902 }
903 *array = u.val64;
904 array++;
905 }
906
907 if (type & PERF_SAMPLE_PERIOD) {
908 *array = sample->period;
909 array++;
910 }
911
912 return 0;
913}