]> git.proxmox.com Git - mirror_ubuntu-jammy-kernel.git/blame - tools/perf/util/header.c
perf evlist: Remove needless util.h from evlist.h
[mirror_ubuntu-jammy-kernel.git] / tools / perf / util / header.c
CommitLineData
b2441318 1// SPDX-License-Identifier: GPL-2.0
a43783ae 2#include <errno.h>
fd20e811 3#include <inttypes.h>
a067558e 4#include "string2.h"
391e4206 5#include <sys/param.h>
7c6a1c65 6#include <sys/types.h>
ba21594c 7#include <byteswap.h>
7c6a1c65
PZ
8#include <unistd.h>
9#include <stdio.h>
10#include <stdlib.h>
0353631a 11#include <linux/compiler.h>
8671dab9 12#include <linux/list.h>
ba21594c 13#include <linux/kernel.h>
b1e5a9be 14#include <linux/bitops.h>
fc6a1726 15#include <linux/string.h>
a4d8c985 16#include <linux/stringify.h>
7f7c536f 17#include <linux/zalloc.h>
7a8ef4c4 18#include <sys/stat.h>
fbe96f29 19#include <sys/utsname.h>
6011518d 20#include <linux/time64.h>
e2091ced 21#include <dirent.h>
606f972b 22#include <bpf/libbpf.h>
9c3516d1 23#include <perf/cpumap.h>
7c6a1c65 24
361c99a6 25#include "evlist.h"
a91e5431 26#include "evsel.h"
7c6a1c65 27#include "header.h"
98521b38 28#include "memswap.h"
03456a15
FW
29#include "../perf.h"
30#include "trace-event.h"
301a0b02 31#include "session.h"
8671dab9 32#include "symbol.h"
4778d2e4 33#include "debug.h"
fbe96f29 34#include "cpumap.h"
50a9667c 35#include "pmu.h"
7dbf4dcf 36#include "vdso.h"
a1ae5655 37#include "strbuf.h"
ebb296c2 38#include "build-id.h"
cc9784bd 39#include "data.h"
720e98b5
JO
40#include <api/fs/fs.h>
41#include "asm/bug.h"
e9def1b2 42#include "tool.h"
6011518d 43#include "time-utils.h"
e2091ced 44#include "units.h"
2da39f1c 45#include "util.h"
5135d5ef 46#include "cputopo.h"
606f972b 47#include "bpf-event.h"
7c6a1c65 48
3052ba56 49#include <linux/ctype.h>
3d689ed6 50
73323f54
SE
51/*
52 * magic2 = "PERFILE2"
53 * must be a numerical value to let the endianness
54 * determine the memory layout. That way we are able
55 * to detect endianness when reading the perf.data file
56 * back.
57 *
58 * we check for legacy (PERFFILE) format.
59 */
60static const char *__perf_magic1 = "PERFFILE";
61static const u64 __perf_magic2 = 0x32454c4946524550ULL;
62static const u64 __perf_magic2_sw = 0x50455246494c4532ULL;
7c6a1c65 63
73323f54 64#define PERF_MAGIC __perf_magic2
7c6a1c65 65
d25ed5d9
SA
66const char perf_version_string[] = PERF_VERSION;
67
7c6a1c65 68struct perf_file_attr {
cdd6c482 69 struct perf_event_attr attr;
7c6a1c65
PZ
70 struct perf_file_section ids;
71};
72
ccebbeb6
DCC
73struct feat_fd {
74 struct perf_header *ph;
75 int fd;
0b3d3410 76 void *buf; /* Either buf != NULL or fd >= 0 */
62552457
DCC
77 ssize_t offset;
78 size_t size;
32dcd021 79 struct evsel *events;
ccebbeb6
DCC
80};
81
1c0b04d1 82void perf_header__set_feat(struct perf_header *header, int feat)
8d06367f 83{
1c0b04d1 84 set_bit(feat, header->adds_features);
8d06367f
ACM
85}
86
1c0b04d1 87void perf_header__clear_feat(struct perf_header *header, int feat)
baa2f6ce 88{
1c0b04d1 89 clear_bit(feat, header->adds_features);
baa2f6ce
ACM
90}
91
1c0b04d1 92bool perf_header__has_feat(const struct perf_header *header, int feat)
8d06367f 93{
1c0b04d1 94 return test_bit(feat, header->adds_features);
8d06367f
ACM
95}
96
0b3d3410 97static int __do_write_fd(struct feat_fd *ff, const void *buf, size_t size)
7c6a1c65 98{
0b3d3410 99 ssize_t ret = writen(ff->fd, buf, size);
7c6a1c65 100
3b8f51a6
DCC
101 if (ret != (ssize_t)size)
102 return ret < 0 ? (int)ret : -1;
0b3d3410
DCC
103 return 0;
104}
105
106static int __do_write_buf(struct feat_fd *ff, const void *buf, size_t size)
107{
108 /* struct perf_event_header::size is u16 */
109 const size_t max_size = 0xffff - sizeof(struct perf_event_header);
110 size_t new_size = ff->size;
111 void *addr;
112
113 if (size + ff->offset > max_size)
114 return -E2BIG;
115
116 while (size > (new_size - ff->offset))
117 new_size <<= 1;
118 new_size = min(max_size, new_size);
119
120 if (ff->size < new_size) {
121 addr = realloc(ff->buf, new_size);
122 if (!addr)
123 return -ENOMEM;
124 ff->buf = addr;
125 ff->size = new_size;
126 }
127
128 memcpy(ff->buf + ff->offset, buf, size);
129 ff->offset += size;
3726cc75
ACM
130
131 return 0;
7c6a1c65
PZ
132}
133
0b3d3410
DCC
134/* Return: 0 if succeded, -ERR if failed. */
135int do_write(struct feat_fd *ff, const void *buf, size_t size)
136{
137 if (!ff->buf)
138 return __do_write_fd(ff, buf, size);
139 return __do_write_buf(ff, buf, size);
140}
141
e2091ced
JO
142/* Return: 0 if succeded, -ERR if failed. */
143static int do_write_bitmap(struct feat_fd *ff, unsigned long *set, u64 size)
144{
145 u64 *p = (u64 *) set;
146 int i, ret;
147
148 ret = do_write(ff, &size, sizeof(size));
149 if (ret < 0)
150 return ret;
151
152 for (i = 0; (u64) i < BITS_TO_U64(size); i++) {
153 ret = do_write(ff, p + i, sizeof(*p));
154 if (ret < 0)
155 return ret;
156 }
157
158 return 0;
159}
160
2ff5365d 161/* Return: 0 if succeded, -ERR if failed. */
ccebbeb6
DCC
162int write_padded(struct feat_fd *ff, const void *bf,
163 size_t count, size_t count_aligned)
f92cb24c
ACM
164{
165 static const char zero_buf[NAME_ALIGN];
ccebbeb6 166 int err = do_write(ff, bf, count);
f92cb24c
ACM
167
168 if (!err)
ccebbeb6 169 err = do_write(ff, zero_buf, count_aligned - count);
f92cb24c
ACM
170
171 return err;
172}
173
2bb00d2f
KL
174#define string_size(str) \
175 (PERF_ALIGN((strlen(str) + 1), NAME_ALIGN) + sizeof(u32))
176
2ff5365d 177/* Return: 0 if succeded, -ERR if failed. */
ccebbeb6 178static int do_write_string(struct feat_fd *ff, const char *str)
fbe96f29
SE
179{
180 u32 len, olen;
181 int ret;
182
183 olen = strlen(str) + 1;
9ac3e487 184 len = PERF_ALIGN(olen, NAME_ALIGN);
fbe96f29
SE
185
186 /* write len, incl. \0 */
ccebbeb6 187 ret = do_write(ff, &len, sizeof(len));
fbe96f29
SE
188 if (ret < 0)
189 return ret;
190
ccebbeb6 191 return write_padded(ff, str, olen, len);
fbe96f29
SE
192}
193
0b3d3410 194static int __do_read_fd(struct feat_fd *ff, void *addr, ssize_t size)
6200e494 195{
48e5fcea 196 ssize_t ret = readn(ff->fd, addr, size);
6200e494
DCC
197
198 if (ret != size)
199 return ret < 0 ? (int)ret : -1;
200 return 0;
201}
202
0b3d3410
DCC
203static int __do_read_buf(struct feat_fd *ff, void *addr, ssize_t size)
204{
205 if (size > (ssize_t)ff->size - ff->offset)
206 return -1;
207
208 memcpy(addr, ff->buf + ff->offset, size);
209 ff->offset += size;
210
211 return 0;
212
213}
214
215static int __do_read(struct feat_fd *ff, void *addr, ssize_t size)
216{
217 if (!ff->buf)
218 return __do_read_fd(ff, addr, size);
219 return __do_read_buf(ff, addr, size);
220}
221
48e5fcea 222static int do_read_u32(struct feat_fd *ff, u32 *addr)
6200e494
DCC
223{
224 int ret;
225
48e5fcea 226 ret = __do_read(ff, addr, sizeof(*addr));
6200e494
DCC
227 if (ret)
228 return ret;
229
48e5fcea 230 if (ff->ph->needs_swap)
6200e494
DCC
231 *addr = bswap_32(*addr);
232 return 0;
233}
234
48e5fcea 235static int do_read_u64(struct feat_fd *ff, u64 *addr)
6200e494
DCC
236{
237 int ret;
238
48e5fcea 239 ret = __do_read(ff, addr, sizeof(*addr));
6200e494
DCC
240 if (ret)
241 return ret;
242
48e5fcea 243 if (ff->ph->needs_swap)
6200e494
DCC
244 *addr = bswap_64(*addr);
245 return 0;
246}
247
48e5fcea 248static char *do_read_string(struct feat_fd *ff)
fbe96f29 249{
fbe96f29
SE
250 u32 len;
251 char *buf;
252
48e5fcea 253 if (do_read_u32(ff, &len))
fbe96f29
SE
254 return NULL;
255
fbe96f29
SE
256 buf = malloc(len);
257 if (!buf)
258 return NULL;
259
48e5fcea 260 if (!__do_read(ff, buf, len)) {
fbe96f29
SE
261 /*
262 * strings are padded by zeroes
263 * thus the actual strlen of buf
264 * may be less than len
265 */
266 return buf;
267 }
268
269 free(buf);
270 return NULL;
271}
272
e2091ced
JO
273/* Return: 0 if succeded, -ERR if failed. */
274static int do_read_bitmap(struct feat_fd *ff, unsigned long **pset, u64 *psize)
275{
276 unsigned long *set;
277 u64 size, *p;
278 int i, ret;
279
280 ret = do_read_u64(ff, &size);
281 if (ret)
282 return ret;
283
284 set = bitmap_alloc(size);
285 if (!set)
286 return -ENOMEM;
287
e2091ced
JO
288 p = (u64 *) set;
289
290 for (i = 0; (u64) i < BITS_TO_U64(size); i++) {
291 ret = do_read_u64(ff, p + i);
292 if (ret < 0) {
293 free(set);
294 return ret;
295 }
296 }
297
298 *pset = set;
299 *psize = size;
300 return 0;
301}
302
ccebbeb6 303static int write_tracing_data(struct feat_fd *ff,
63503dba 304 struct evlist *evlist)
fbe96f29 305{
0b3d3410
DCC
306 if (WARN(ff->buf, "Error: calling %s in pipe-mode.\n", __func__))
307 return -1;
308
ce9036a6 309 return read_tracing_data(ff->fd, &evlist->core.entries);
fbe96f29
SE
310}
311
ccebbeb6 312static int write_build_id(struct feat_fd *ff,
63503dba 313 struct evlist *evlist __maybe_unused)
fbe96f29
SE
314{
315 struct perf_session *session;
316 int err;
317
ccebbeb6 318 session = container_of(ff->ph, struct perf_session, header);
fbe96f29 319
e20960c0
RR
320 if (!perf_session__read_build_ids(session, true))
321 return -1;
322
0b3d3410
DCC
323 if (WARN(ff->buf, "Error: calling %s in pipe-mode.\n", __func__))
324 return -1;
325
ccebbeb6 326 err = perf_session__write_buildid_table(session, ff);
fbe96f29
SE
327 if (err < 0) {
328 pr_debug("failed to write buildid table\n");
329 return err;
330 }
73c5d224 331 perf_session__cache_build_ids(session);
fbe96f29
SE
332
333 return 0;
334}
335
ccebbeb6 336static int write_hostname(struct feat_fd *ff,
63503dba 337 struct evlist *evlist __maybe_unused)
fbe96f29
SE
338{
339 struct utsname uts;
340 int ret;
341
342 ret = uname(&uts);
343 if (ret < 0)
344 return -1;
345
ccebbeb6 346 return do_write_string(ff, uts.nodename);
fbe96f29
SE
347}
348
ccebbeb6 349static int write_osrelease(struct feat_fd *ff,
63503dba 350 struct evlist *evlist __maybe_unused)
fbe96f29
SE
351{
352 struct utsname uts;
353 int ret;
354
355 ret = uname(&uts);
356 if (ret < 0)
357 return -1;
358
ccebbeb6 359 return do_write_string(ff, uts.release);
fbe96f29
SE
360}
361
ccebbeb6 362static int write_arch(struct feat_fd *ff,
63503dba 363 struct evlist *evlist __maybe_unused)
fbe96f29
SE
364{
365 struct utsname uts;
366 int ret;
367
368 ret = uname(&uts);
369 if (ret < 0)
370 return -1;
371
ccebbeb6 372 return do_write_string(ff, uts.machine);
fbe96f29
SE
373}
374
ccebbeb6 375static int write_version(struct feat_fd *ff,
63503dba 376 struct evlist *evlist __maybe_unused)
fbe96f29 377{
ccebbeb6 378 return do_write_string(ff, perf_version_string);
fbe96f29
SE
379}
380
ccebbeb6 381static int __write_cpudesc(struct feat_fd *ff, const char *cpuinfo_proc)
fbe96f29 382{
fbe96f29
SE
383 FILE *file;
384 char *buf = NULL;
385 char *s, *p;
493c3031 386 const char *search = cpuinfo_proc;
fbe96f29
SE
387 size_t len = 0;
388 int ret = -1;
389
390 if (!search)
391 return -1;
392
393 file = fopen("/proc/cpuinfo", "r");
394 if (!file)
395 return -1;
396
397 while (getline(&buf, &len, file) > 0) {
398 ret = strncmp(buf, search, strlen(search));
399 if (!ret)
400 break;
401 }
402
ed307758
WN
403 if (ret) {
404 ret = -1;
fbe96f29 405 goto done;
ed307758 406 }
fbe96f29
SE
407
408 s = buf;
409
410 p = strchr(buf, ':');
411 if (p && *(p+1) == ' ' && *(p+2))
412 s = p + 2;
413 p = strchr(s, '\n');
414 if (p)
415 *p = '\0';
416
417 /* squash extra space characters (branding string) */
418 p = s;
419 while (*p) {
420 if (isspace(*p)) {
421 char *r = p + 1;
fc6a1726 422 char *q = skip_spaces(r);
fbe96f29 423 *p = ' ';
fbe96f29
SE
424 if (q != (p+1))
425 while ((*r++ = *q++));
426 }
427 p++;
428 }
ccebbeb6 429 ret = do_write_string(ff, s);
fbe96f29
SE
430done:
431 free(buf);
432 fclose(file);
433 return ret;
434}
435
ccebbeb6 436static int write_cpudesc(struct feat_fd *ff,
63503dba 437 struct evlist *evlist __maybe_unused)
493c3031 438{
493c3031
WN
439 const char *cpuinfo_procs[] = CPUINFO_PROC;
440 unsigned int i;
441
442 for (i = 0; i < ARRAY_SIZE(cpuinfo_procs); i++) {
443 int ret;
ccebbeb6 444 ret = __write_cpudesc(ff, cpuinfo_procs[i]);
493c3031
WN
445 if (ret >= 0)
446 return ret;
447 }
448 return -1;
449}
450
451
ccebbeb6 452static int write_nrcpus(struct feat_fd *ff,
63503dba 453 struct evlist *evlist __maybe_unused)
fbe96f29
SE
454{
455 long nr;
456 u32 nrc, nra;
457 int ret;
458
da8a58b5 459 nrc = cpu__max_present_cpu();
fbe96f29
SE
460
461 nr = sysconf(_SC_NPROCESSORS_ONLN);
462 if (nr < 0)
463 return -1;
464
465 nra = (u32)(nr & UINT_MAX);
466
ccebbeb6 467 ret = do_write(ff, &nrc, sizeof(nrc));
fbe96f29
SE
468 if (ret < 0)
469 return ret;
470
ccebbeb6 471 return do_write(ff, &nra, sizeof(nra));
fbe96f29
SE
472}
473
ccebbeb6 474static int write_event_desc(struct feat_fd *ff,
63503dba 475 struct evlist *evlist)
fbe96f29 476{
32dcd021 477 struct evsel *evsel;
74ba9e11 478 u32 nre, nri, sz;
fbe96f29
SE
479 int ret;
480
6484d2f9 481 nre = evlist->core.nr_entries;
fbe96f29
SE
482
483 /*
484 * write number of events
485 */
ccebbeb6 486 ret = do_write(ff, &nre, sizeof(nre));
fbe96f29
SE
487 if (ret < 0)
488 return ret;
489
490 /*
491 * size of perf_event_attr struct
492 */
1fc632ce 493 sz = (u32)sizeof(evsel->core.attr);
ccebbeb6 494 ret = do_write(ff, &sz, sizeof(sz));
fbe96f29
SE
495 if (ret < 0)
496 return ret;
497
e5cadb93 498 evlist__for_each_entry(evlist, evsel) {
1fc632ce 499 ret = do_write(ff, &evsel->core.attr, sz);
fbe96f29
SE
500 if (ret < 0)
501 return ret;
502 /*
503 * write number of unique id per event
504 * there is one id per instance of an event
505 *
506 * copy into an nri to be independent of the
507 * type of ids,
508 */
6606f873 509 nri = evsel->ids;
ccebbeb6 510 ret = do_write(ff, &nri, sizeof(nri));
fbe96f29
SE
511 if (ret < 0)
512 return ret;
513
514 /*
515 * write event string as passed on cmdline
516 */
ccebbeb6 517 ret = do_write_string(ff, perf_evsel__name(evsel));
fbe96f29
SE
518 if (ret < 0)
519 return ret;
520 /*
521 * write unique ids for this event
522 */
ccebbeb6 523 ret = do_write(ff, evsel->id, evsel->ids * sizeof(u64));
fbe96f29
SE
524 if (ret < 0)
525 return ret;
526 }
527 return 0;
528}
529
ccebbeb6 530static int write_cmdline(struct feat_fd *ff,
63503dba 531 struct evlist *evlist __maybe_unused)
fbe96f29 532{
94816add
AK
533 char pbuf[MAXPATHLEN], *buf;
534 int i, ret, n;
fbe96f29 535
55f77128 536 /* actual path to perf binary */
94816add 537 buf = perf_exe(pbuf, MAXPATHLEN);
fbe96f29
SE
538
539 /* account for binary path */
b6998692 540 n = perf_env.nr_cmdline + 1;
fbe96f29 541
ccebbeb6 542 ret = do_write(ff, &n, sizeof(n));
fbe96f29
SE
543 if (ret < 0)
544 return ret;
545
ccebbeb6 546 ret = do_write_string(ff, buf);
fbe96f29
SE
547 if (ret < 0)
548 return ret;
549
b6998692 550 for (i = 0 ; i < perf_env.nr_cmdline; i++) {
ccebbeb6 551 ret = do_write_string(ff, perf_env.cmdline_argv[i]);
fbe96f29
SE
552 if (ret < 0)
553 return ret;
554 }
555 return 0;
556}
557
fbe96f29 558
ccebbeb6 559static int write_cpu_topology(struct feat_fd *ff,
63503dba 560 struct evlist *evlist __maybe_unused)
fbe96f29 561{
5135d5ef 562 struct cpu_topology *tp;
fbe96f29 563 u32 i;
aa36ddd7 564 int ret, j;
fbe96f29 565
5135d5ef 566 tp = cpu_topology__new();
fbe96f29
SE
567 if (!tp)
568 return -1;
569
ccebbeb6 570 ret = do_write(ff, &tp->core_sib, sizeof(tp->core_sib));
fbe96f29
SE
571 if (ret < 0)
572 goto done;
573
574 for (i = 0; i < tp->core_sib; i++) {
ccebbeb6 575 ret = do_write_string(ff, tp->core_siblings[i]);
fbe96f29
SE
576 if (ret < 0)
577 goto done;
578 }
ccebbeb6 579 ret = do_write(ff, &tp->thread_sib, sizeof(tp->thread_sib));
fbe96f29
SE
580 if (ret < 0)
581 goto done;
582
583 for (i = 0; i < tp->thread_sib; i++) {
ccebbeb6 584 ret = do_write_string(ff, tp->thread_siblings[i]);
fbe96f29
SE
585 if (ret < 0)
586 break;
587 }
2bb00d2f 588
aa36ddd7
ACM
589 ret = perf_env__read_cpu_topology_map(&perf_env);
590 if (ret < 0)
591 goto done;
592
593 for (j = 0; j < perf_env.nr_cpus_avail; j++) {
ccebbeb6 594 ret = do_write(ff, &perf_env.cpu[j].core_id,
aa36ddd7 595 sizeof(perf_env.cpu[j].core_id));
2bb00d2f
KL
596 if (ret < 0)
597 return ret;
ccebbeb6 598 ret = do_write(ff, &perf_env.cpu[j].socket_id,
aa36ddd7 599 sizeof(perf_env.cpu[j].socket_id));
2bb00d2f
KL
600 if (ret < 0)
601 return ret;
602 }
acae8b36
KL
603
604 if (!tp->die_sib)
605 goto done;
606
607 ret = do_write(ff, &tp->die_sib, sizeof(tp->die_sib));
608 if (ret < 0)
609 goto done;
610
611 for (i = 0; i < tp->die_sib; i++) {
612 ret = do_write_string(ff, tp->die_siblings[i]);
613 if (ret < 0)
614 goto done;
615 }
616
617 for (j = 0; j < perf_env.nr_cpus_avail; j++) {
618 ret = do_write(ff, &perf_env.cpu[j].die_id,
619 sizeof(perf_env.cpu[j].die_id));
620 if (ret < 0)
621 return ret;
622 }
623
fbe96f29 624done:
5135d5ef 625 cpu_topology__delete(tp);
fbe96f29
SE
626 return ret;
627}
628
629
630
ccebbeb6 631static int write_total_mem(struct feat_fd *ff,
63503dba 632 struct evlist *evlist __maybe_unused)
fbe96f29
SE
633{
634 char *buf = NULL;
635 FILE *fp;
636 size_t len = 0;
637 int ret = -1, n;
638 uint64_t mem;
639
640 fp = fopen("/proc/meminfo", "r");
641 if (!fp)
642 return -1;
643
644 while (getline(&buf, &len, fp) > 0) {
645 ret = strncmp(buf, "MemTotal:", 9);
646 if (!ret)
647 break;
648 }
649 if (!ret) {
650 n = sscanf(buf, "%*s %"PRIu64, &mem);
651 if (n == 1)
ccebbeb6 652 ret = do_write(ff, &mem, sizeof(mem));
ed307758
WN
653 } else
654 ret = -1;
fbe96f29
SE
655 free(buf);
656 fclose(fp);
657 return ret;
658}
659
ccebbeb6 660static int write_numa_topology(struct feat_fd *ff,
63503dba 661 struct evlist *evlist __maybe_unused)
fbe96f29 662{
48e6c5ac 663 struct numa_topology *tp;
fbe96f29 664 int ret = -1;
48e6c5ac 665 u32 i;
fbe96f29 666
48e6c5ac
JO
667 tp = numa_topology__new();
668 if (!tp)
669 return -ENOMEM;
fbe96f29 670
48e6c5ac
JO
671 ret = do_write(ff, &tp->nr, sizeof(u32));
672 if (ret < 0)
673 goto err;
fbe96f29 674
48e6c5ac
JO
675 for (i = 0; i < tp->nr; i++) {
676 struct numa_topology_node *n = &tp->nodes[i];
fbe96f29 677
48e6c5ac
JO
678 ret = do_write(ff, &n->node, sizeof(u32));
679 if (ret < 0)
680 goto err;
fbe96f29 681
48e6c5ac
JO
682 ret = do_write(ff, &n->mem_total, sizeof(u64));
683 if (ret)
684 goto err;
fbe96f29 685
48e6c5ac
JO
686 ret = do_write(ff, &n->mem_free, sizeof(u64));
687 if (ret)
688 goto err;
fbe96f29 689
48e6c5ac 690 ret = do_write_string(ff, n->cpus);
fbe96f29 691 if (ret < 0)
48e6c5ac 692 goto err;
fbe96f29 693 }
48e6c5ac
JO
694
695 ret = 0;
696
697err:
698 numa_topology__delete(tp);
fbe96f29
SE
699 return ret;
700}
701
50a9667c
RR
702/*
703 * File format:
704 *
705 * struct pmu_mappings {
706 * u32 pmu_num;
707 * struct pmu_map {
708 * u32 type;
709 * char name[];
710 * }[pmu_num];
711 * };
712 */
713
ccebbeb6 714static int write_pmu_mappings(struct feat_fd *ff,
63503dba 715 struct evlist *evlist __maybe_unused)
50a9667c
RR
716{
717 struct perf_pmu *pmu = NULL;
a02c395c 718 u32 pmu_num = 0;
5323f60c 719 int ret;
50a9667c 720
a02c395c
DCC
721 /*
722 * Do a first pass to count number of pmu to avoid lseek so this
723 * works in pipe mode as well.
724 */
725 while ((pmu = perf_pmu__scan(pmu))) {
726 if (!pmu->name)
727 continue;
728 pmu_num++;
729 }
730
ccebbeb6 731 ret = do_write(ff, &pmu_num, sizeof(pmu_num));
5323f60c
NK
732 if (ret < 0)
733 return ret;
50a9667c
RR
734
735 while ((pmu = perf_pmu__scan(pmu))) {
736 if (!pmu->name)
737 continue;
5323f60c 738
ccebbeb6 739 ret = do_write(ff, &pmu->type, sizeof(pmu->type));
5323f60c
NK
740 if (ret < 0)
741 return ret;
742
ccebbeb6 743 ret = do_write_string(ff, pmu->name);
5323f60c
NK
744 if (ret < 0)
745 return ret;
50a9667c
RR
746 }
747
50a9667c
RR
748 return 0;
749}
750
a8bb559b
NK
751/*
752 * File format:
753 *
754 * struct group_descs {
755 * u32 nr_groups;
756 * struct group_desc {
757 * char name[];
758 * u32 leader_idx;
759 * u32 nr_members;
760 * }[nr_groups];
761 * };
762 */
ccebbeb6 763static int write_group_desc(struct feat_fd *ff,
63503dba 764 struct evlist *evlist)
a8bb559b
NK
765{
766 u32 nr_groups = evlist->nr_groups;
32dcd021 767 struct evsel *evsel;
a8bb559b
NK
768 int ret;
769
ccebbeb6 770 ret = do_write(ff, &nr_groups, sizeof(nr_groups));
a8bb559b
NK
771 if (ret < 0)
772 return ret;
773
e5cadb93 774 evlist__for_each_entry(evlist, evsel) {
a8bb559b 775 if (perf_evsel__is_group_leader(evsel) &&
5643b1a5 776 evsel->core.nr_members > 1) {
a8bb559b
NK
777 const char *name = evsel->group_name ?: "{anon_group}";
778 u32 leader_idx = evsel->idx;
5643b1a5 779 u32 nr_members = evsel->core.nr_members;
a8bb559b 780
ccebbeb6 781 ret = do_write_string(ff, name);
a8bb559b
NK
782 if (ret < 0)
783 return ret;
784
ccebbeb6 785 ret = do_write(ff, &leader_idx, sizeof(leader_idx));
a8bb559b
NK
786 if (ret < 0)
787 return ret;
788
ccebbeb6 789 ret = do_write(ff, &nr_members, sizeof(nr_members));
a8bb559b
NK
790 if (ret < 0)
791 return ret;
792 }
793 }
794 return 0;
795}
796
f4a0742b
KL
797/*
798 * Return the CPU id as a raw string.
799 *
800 * Each architecture should provide a more precise id string that
801 * can be use to match the architecture's "mapfile".
802 */
803char * __weak get_cpuid_str(struct perf_pmu *pmu __maybe_unused)
804{
805 return NULL;
806}
807
808/* Return zero when the cpuid from the mapfile.csv matches the
809 * cpuid string generated on this platform.
810 * Otherwise return non-zero.
811 */
812int __weak strcmp_cpuid_str(const char *mapcpuid, const char *cpuid)
813{
814 regex_t re;
815 regmatch_t pmatch[1];
816 int match;
817
818 if (regcomp(&re, mapcpuid, REG_EXTENDED) != 0) {
819 /* Warn unable to generate match particular string. */
820 pr_info("Invalid regular expression %s\n", mapcpuid);
821 return 1;
822 }
823
824 match = !regexec(&re, cpuid, 1, pmatch, 0);
825 regfree(&re);
826 if (match) {
827 size_t match_len = (pmatch[0].rm_eo - pmatch[0].rm_so);
828
829 /* Verify the entire string matched. */
830 if (match_len == strlen(cpuid))
831 return 0;
832 }
833 return 1;
834}
835
fbe96f29
SE
836/*
837 * default get_cpuid(): nothing gets recorded
7a759cd8 838 * actual implementation must be in arch/$(SRCARCH)/util/header.c
fbe96f29 839 */
11d8f870 840int __weak get_cpuid(char *buffer __maybe_unused, size_t sz __maybe_unused)
fbe96f29
SE
841{
842 return -1;
843}
844
ccebbeb6 845static int write_cpuid(struct feat_fd *ff,
63503dba 846 struct evlist *evlist __maybe_unused)
fbe96f29
SE
847{
848 char buffer[64];
849 int ret;
850
851 ret = get_cpuid(buffer, sizeof(buffer));
a9aeb87b
JO
852 if (ret)
853 return -1;
fbe96f29 854
ccebbeb6 855 return do_write_string(ff, buffer);
fbe96f29
SE
856}
857
ccebbeb6 858static int write_branch_stack(struct feat_fd *ff __maybe_unused,
63503dba 859 struct evlist *evlist __maybe_unused)
330aa675
SE
860{
861 return 0;
862}
863
ccebbeb6 864static int write_auxtrace(struct feat_fd *ff,
63503dba 865 struct evlist *evlist __maybe_unused)
4025ea40 866{
99fa2984
AH
867 struct perf_session *session;
868 int err;
869
0b3d3410
DCC
870 if (WARN(ff->buf, "Error: calling %s in pipe-mode.\n", __func__))
871 return -1;
872
ccebbeb6 873 session = container_of(ff->ph, struct perf_session, header);
99fa2984 874
ccebbeb6 875 err = auxtrace_index__write(ff->fd, &session->auxtrace_index);
99fa2984
AH
876 if (err < 0)
877 pr_err("Failed to write auxtrace index\n");
878 return err;
4025ea40
AH
879}
880
cf790516 881static int write_clockid(struct feat_fd *ff,
63503dba 882 struct evlist *evlist __maybe_unused)
cf790516
AB
883{
884 return do_write(ff, &ff->ph->env.clockid_res_ns,
885 sizeof(ff->ph->env.clockid_res_ns));
886}
887
258031c0 888static int write_dir_format(struct feat_fd *ff,
63503dba 889 struct evlist *evlist __maybe_unused)
258031c0
JO
890{
891 struct perf_session *session;
892 struct perf_data *data;
893
894 session = container_of(ff->ph, struct perf_session, header);
895 data = session->data;
896
897 if (WARN_ON(!perf_data__is_dir(data)))
898 return -1;
899
900 return do_write(ff, &data->dir.version, sizeof(data->dir.version));
901}
902
606f972b
SL
903#ifdef HAVE_LIBBPF_SUPPORT
904static int write_bpf_prog_info(struct feat_fd *ff,
63503dba 905 struct evlist *evlist __maybe_unused)
606f972b
SL
906{
907 struct perf_env *env = &ff->ph->env;
908 struct rb_root *root;
909 struct rb_node *next;
910 int ret;
911
912 down_read(&env->bpf_progs.lock);
913
914 ret = do_write(ff, &env->bpf_progs.infos_cnt,
915 sizeof(env->bpf_progs.infos_cnt));
916 if (ret < 0)
917 goto out;
918
919 root = &env->bpf_progs.infos;
920 next = rb_first(root);
921 while (next) {
922 struct bpf_prog_info_node *node;
923 size_t len;
924
925 node = rb_entry(next, struct bpf_prog_info_node, rb_node);
926 next = rb_next(&node->rb_node);
927 len = sizeof(struct bpf_prog_info_linear) +
928 node->info_linear->data_len;
929
930 /* before writing to file, translate address to offset */
931 bpf_program__bpil_addr_to_offs(node->info_linear);
932 ret = do_write(ff, node->info_linear, len);
933 /*
934 * translate back to address even when do_write() fails,
935 * so that this function never changes the data.
936 */
937 bpf_program__bpil_offs_to_addr(node->info_linear);
938 if (ret < 0)
939 goto out;
940 }
941out:
942 up_read(&env->bpf_progs.lock);
943 return ret;
944}
945#else // HAVE_LIBBPF_SUPPORT
946static int write_bpf_prog_info(struct feat_fd *ff __maybe_unused,
63503dba 947 struct evlist *evlist __maybe_unused)
606f972b
SL
948{
949 return 0;
950}
951#endif // HAVE_LIBBPF_SUPPORT
952
a70a1123 953static int write_bpf_btf(struct feat_fd *ff,
63503dba 954 struct evlist *evlist __maybe_unused)
a70a1123
SL
955{
956 struct perf_env *env = &ff->ph->env;
957 struct rb_root *root;
958 struct rb_node *next;
959 int ret;
960
961 down_read(&env->bpf_progs.lock);
962
963 ret = do_write(ff, &env->bpf_progs.btfs_cnt,
964 sizeof(env->bpf_progs.btfs_cnt));
965
966 if (ret < 0)
967 goto out;
968
969 root = &env->bpf_progs.btfs;
970 next = rb_first(root);
971 while (next) {
972 struct btf_node *node;
973
974 node = rb_entry(next, struct btf_node, rb_node);
975 next = rb_next(&node->rb_node);
976 ret = do_write(ff, &node->id,
977 sizeof(u32) * 2 + node->data_size);
978 if (ret < 0)
979 goto out;
980 }
981out:
982 up_read(&env->bpf_progs.lock);
983 return ret;
984}
985
720e98b5
JO
986static int cpu_cache_level__sort(const void *a, const void *b)
987{
988 struct cpu_cache_level *cache_a = (struct cpu_cache_level *)a;
989 struct cpu_cache_level *cache_b = (struct cpu_cache_level *)b;
990
991 return cache_a->level - cache_b->level;
992}
993
994static bool cpu_cache_level__cmp(struct cpu_cache_level *a, struct cpu_cache_level *b)
995{
996 if (a->level != b->level)
997 return false;
998
999 if (a->line_size != b->line_size)
1000 return false;
1001
1002 if (a->sets != b->sets)
1003 return false;
1004
1005 if (a->ways != b->ways)
1006 return false;
1007
1008 if (strcmp(a->type, b->type))
1009 return false;
1010
1011 if (strcmp(a->size, b->size))
1012 return false;
1013
1014 if (strcmp(a->map, b->map))
1015 return false;
1016
1017 return true;
1018}
1019
1020static int cpu_cache_level__read(struct cpu_cache_level *cache, u32 cpu, u16 level)
1021{
1022 char path[PATH_MAX], file[PATH_MAX];
1023 struct stat st;
1024 size_t len;
1025
1026 scnprintf(path, PATH_MAX, "devices/system/cpu/cpu%d/cache/index%d/", cpu, level);
1027 scnprintf(file, PATH_MAX, "%s/%s", sysfs__mountpoint(), path);
1028
1029 if (stat(file, &st))
1030 return 1;
1031
1032 scnprintf(file, PATH_MAX, "%s/level", path);
1033 if (sysfs__read_int(file, (int *) &cache->level))
1034 return -1;
1035
1036 scnprintf(file, PATH_MAX, "%s/coherency_line_size", path);
1037 if (sysfs__read_int(file, (int *) &cache->line_size))
1038 return -1;
1039
1040 scnprintf(file, PATH_MAX, "%s/number_of_sets", path);
1041 if (sysfs__read_int(file, (int *) &cache->sets))
1042 return -1;
1043
1044 scnprintf(file, PATH_MAX, "%s/ways_of_associativity", path);
1045 if (sysfs__read_int(file, (int *) &cache->ways))
1046 return -1;
1047
1048 scnprintf(file, PATH_MAX, "%s/type", path);
1049 if (sysfs__read_str(file, &cache->type, &len))
1050 return -1;
1051
1052 cache->type[len] = 0;
13c230ab 1053 cache->type = strim(cache->type);
720e98b5
JO
1054
1055 scnprintf(file, PATH_MAX, "%s/size", path);
1056 if (sysfs__read_str(file, &cache->size, &len)) {
d8f9da24 1057 zfree(&cache->type);
720e98b5
JO
1058 return -1;
1059 }
1060
1061 cache->size[len] = 0;
13c230ab 1062 cache->size = strim(cache->size);
720e98b5
JO
1063
1064 scnprintf(file, PATH_MAX, "%s/shared_cpu_list", path);
1065 if (sysfs__read_str(file, &cache->map, &len)) {
d8f9da24
ACM
1066 zfree(&cache->map);
1067 zfree(&cache->type);
720e98b5
JO
1068 return -1;
1069 }
1070
1071 cache->map[len] = 0;
13c230ab 1072 cache->map = strim(cache->map);
720e98b5
JO
1073 return 0;
1074}
1075
1076static void cpu_cache_level__fprintf(FILE *out, struct cpu_cache_level *c)
1077{
1078 fprintf(out, "L%d %-15s %8s [%s]\n", c->level, c->type, c->size, c->map);
1079}
1080
1081static int build_caches(struct cpu_cache_level caches[], u32 size, u32 *cntp)
1082{
1083 u32 i, cnt = 0;
1084 long ncpus;
1085 u32 nr, cpu;
1086 u16 level;
1087
1088 ncpus = sysconf(_SC_NPROCESSORS_CONF);
1089 if (ncpus < 0)
1090 return -1;
1091
1092 nr = (u32)(ncpus & UINT_MAX);
1093
1094 for (cpu = 0; cpu < nr; cpu++) {
1095 for (level = 0; level < 10; level++) {
1096 struct cpu_cache_level c;
1097 int err;
1098
1099 err = cpu_cache_level__read(&c, cpu, level);
1100 if (err < 0)
1101 return err;
1102
1103 if (err == 1)
1104 break;
1105
1106 for (i = 0; i < cnt; i++) {
1107 if (cpu_cache_level__cmp(&c, &caches[i]))
1108 break;
1109 }
1110
1111 if (i == cnt)
1112 caches[cnt++] = c;
1113 else
1114 cpu_cache_level__free(&c);
1115
1116 if (WARN_ONCE(cnt == size, "way too many cpu caches.."))
1117 goto out;
1118 }
1119 }
1120 out:
1121 *cntp = cnt;
1122 return 0;
1123}
1124
9f94c7f9 1125#define MAX_CACHES (MAX_NR_CPUS * 4)
720e98b5 1126
ccebbeb6 1127static int write_cache(struct feat_fd *ff,
63503dba 1128 struct evlist *evlist __maybe_unused)
720e98b5
JO
1129{
1130 struct cpu_cache_level caches[MAX_CACHES];
1131 u32 cnt = 0, i, version = 1;
1132 int ret;
1133
1134 ret = build_caches(caches, MAX_CACHES, &cnt);
1135 if (ret)
1136 goto out;
1137
1138 qsort(&caches, cnt, sizeof(struct cpu_cache_level), cpu_cache_level__sort);
1139
ccebbeb6 1140 ret = do_write(ff, &version, sizeof(u32));
720e98b5
JO
1141 if (ret < 0)
1142 goto out;
1143
ccebbeb6 1144 ret = do_write(ff, &cnt, sizeof(u32));
720e98b5
JO
1145 if (ret < 0)
1146 goto out;
1147
1148 for (i = 0; i < cnt; i++) {
1149 struct cpu_cache_level *c = &caches[i];
1150
1151 #define _W(v) \
ccebbeb6 1152 ret = do_write(ff, &c->v, sizeof(u32)); \
720e98b5
JO
1153 if (ret < 0) \
1154 goto out;
1155
1156 _W(level)
1157 _W(line_size)
1158 _W(sets)
1159 _W(ways)
1160 #undef _W
1161
1162 #define _W(v) \
ccebbeb6 1163 ret = do_write_string(ff, (const char *) c->v); \
720e98b5
JO
1164 if (ret < 0) \
1165 goto out;
1166
1167 _W(type)
1168 _W(size)
1169 _W(map)
1170 #undef _W
1171 }
1172
1173out:
1174 for (i = 0; i < cnt; i++)
1175 cpu_cache_level__free(&caches[i]);
1176 return ret;
1177}
1178
ccebbeb6 1179static int write_stat(struct feat_fd *ff __maybe_unused,
63503dba 1180 struct evlist *evlist __maybe_unused)
ffa517ad
JO
1181{
1182 return 0;
1183}
1184
6011518d 1185static int write_sample_time(struct feat_fd *ff,
63503dba 1186 struct evlist *evlist)
6011518d
JY
1187{
1188 int ret;
1189
1190 ret = do_write(ff, &evlist->first_sample_time,
1191 sizeof(evlist->first_sample_time));
1192 if (ret < 0)
1193 return ret;
1194
1195 return do_write(ff, &evlist->last_sample_time,
1196 sizeof(evlist->last_sample_time));
1197}
1198
e2091ced
JO
1199
1200static int memory_node__read(struct memory_node *n, unsigned long idx)
1201{
1202 unsigned int phys, size = 0;
1203 char path[PATH_MAX];
1204 struct dirent *ent;
1205 DIR *dir;
1206
1207#define for_each_memory(mem, dir) \
1208 while ((ent = readdir(dir))) \
1209 if (strcmp(ent->d_name, ".") && \
1210 strcmp(ent->d_name, "..") && \
1211 sscanf(ent->d_name, "memory%u", &mem) == 1)
1212
1213 scnprintf(path, PATH_MAX,
1214 "%s/devices/system/node/node%lu",
1215 sysfs__mountpoint(), idx);
1216
1217 dir = opendir(path);
1218 if (!dir) {
1219 pr_warning("failed: cant' open memory sysfs data\n");
1220 return -1;
1221 }
1222
1223 for_each_memory(phys, dir) {
1224 size = max(phys, size);
1225 }
1226
1227 size++;
1228
1229 n->set = bitmap_alloc(size);
1230 if (!n->set) {
1231 closedir(dir);
1232 return -ENOMEM;
1233 }
1234
e2091ced
JO
1235 n->node = idx;
1236 n->size = size;
1237
1238 rewinddir(dir);
1239
1240 for_each_memory(phys, dir) {
1241 set_bit(phys, n->set);
1242 }
1243
1244 closedir(dir);
1245 return 0;
1246}
1247
1248static int memory_node__sort(const void *a, const void *b)
1249{
1250 const struct memory_node *na = a;
1251 const struct memory_node *nb = b;
1252
1253 return na->node - nb->node;
1254}
1255
1256static int build_mem_topology(struct memory_node *nodes, u64 size, u64 *cntp)
1257{
1258 char path[PATH_MAX];
1259 struct dirent *ent;
1260 DIR *dir;
1261 u64 cnt = 0;
1262 int ret = 0;
1263
1264 scnprintf(path, PATH_MAX, "%s/devices/system/node/",
1265 sysfs__mountpoint());
1266
1267 dir = opendir(path);
1268 if (!dir) {
4f75f1cb
TR
1269 pr_debug2("%s: could't read %s, does this arch have topology information?\n",
1270 __func__, path);
e2091ced
JO
1271 return -1;
1272 }
1273
1274 while (!ret && (ent = readdir(dir))) {
1275 unsigned int idx;
1276 int r;
1277
1278 if (!strcmp(ent->d_name, ".") ||
1279 !strcmp(ent->d_name, ".."))
1280 continue;
1281
1282 r = sscanf(ent->d_name, "node%u", &idx);
1283 if (r != 1)
1284 continue;
1285
1286 if (WARN_ONCE(cnt >= size,
1287 "failed to write MEM_TOPOLOGY, way too many nodes\n"))
1288 return -1;
1289
1290 ret = memory_node__read(&nodes[cnt++], idx);
1291 }
1292
1293 *cntp = cnt;
1294 closedir(dir);
1295
1296 if (!ret)
1297 qsort(nodes, cnt, sizeof(nodes[0]), memory_node__sort);
1298
1299 return ret;
1300}
1301
1302#define MAX_MEMORY_NODES 2000
1303
1304/*
1305 * The MEM_TOPOLOGY holds physical memory map for every
1306 * node in system. The format of data is as follows:
1307 *
1308 * 0 - version | for future changes
1309 * 8 - block_size_bytes | /sys/devices/system/memory/block_size_bytes
1310 * 16 - count | number of nodes
1311 *
1312 * For each node we store map of physical indexes for
1313 * each node:
1314 *
1315 * 32 - node id | node index
1316 * 40 - size | size of bitmap
1317 * 48 - bitmap | bitmap of memory indexes that belongs to node
1318 */
1319static int write_mem_topology(struct feat_fd *ff __maybe_unused,
63503dba 1320 struct evlist *evlist __maybe_unused)
e2091ced
JO
1321{
1322 static struct memory_node nodes[MAX_MEMORY_NODES];
1323 u64 bsize, version = 1, i, nr;
1324 int ret;
1325
1326 ret = sysfs__read_xll("devices/system/memory/block_size_bytes",
1327 (unsigned long long *) &bsize);
1328 if (ret)
1329 return ret;
1330
1331 ret = build_mem_topology(&nodes[0], MAX_MEMORY_NODES, &nr);
1332 if (ret)
1333 return ret;
1334
1335 ret = do_write(ff, &version, sizeof(version));
1336 if (ret < 0)
1337 goto out;
1338
1339 ret = do_write(ff, &bsize, sizeof(bsize));
1340 if (ret < 0)
1341 goto out;
1342
1343 ret = do_write(ff, &nr, sizeof(nr));
1344 if (ret < 0)
1345 goto out;
1346
1347 for (i = 0; i < nr; i++) {
1348 struct memory_node *n = &nodes[i];
1349
1350 #define _W(v) \
1351 ret = do_write(ff, &n->v, sizeof(n->v)); \
1352 if (ret < 0) \
1353 goto out;
1354
1355 _W(node)
1356 _W(size)
1357
1358 #undef _W
1359
1360 ret = do_write_bitmap(ff, n->set, n->size);
1361 if (ret < 0)
1362 goto out;
1363 }
1364
1365out:
1366 return ret;
1367}
1368
42e1fd80 1369static int write_compressed(struct feat_fd *ff __maybe_unused,
63503dba 1370 struct evlist *evlist __maybe_unused)
42e1fd80
AB
1371{
1372 int ret;
1373
1374 ret = do_write(ff, &(ff->ph->env.comp_ver), sizeof(ff->ph->env.comp_ver));
1375 if (ret)
1376 return ret;
1377
1378 ret = do_write(ff, &(ff->ph->env.comp_type), sizeof(ff->ph->env.comp_type));
1379 if (ret)
1380 return ret;
1381
1382 ret = do_write(ff, &(ff->ph->env.comp_level), sizeof(ff->ph->env.comp_level));
1383 if (ret)
1384 return ret;
1385
1386 ret = do_write(ff, &(ff->ph->env.comp_ratio), sizeof(ff->ph->env.comp_ratio));
1387 if (ret)
1388 return ret;
1389
1390 return do_write(ff, &(ff->ph->env.comp_mmap_len), sizeof(ff->ph->env.comp_mmap_len));
1391}
1392
cfc65420 1393static void print_hostname(struct feat_fd *ff, FILE *fp)
fbe96f29 1394{
cfc65420 1395 fprintf(fp, "# hostname : %s\n", ff->ph->env.hostname);
fbe96f29
SE
1396}
1397
cfc65420 1398static void print_osrelease(struct feat_fd *ff, FILE *fp)
fbe96f29 1399{
cfc65420 1400 fprintf(fp, "# os release : %s\n", ff->ph->env.os_release);
fbe96f29
SE
1401}
1402
cfc65420 1403static void print_arch(struct feat_fd *ff, FILE *fp)
fbe96f29 1404{
cfc65420 1405 fprintf(fp, "# arch : %s\n", ff->ph->env.arch);
fbe96f29
SE
1406}
1407
cfc65420 1408static void print_cpudesc(struct feat_fd *ff, FILE *fp)
fbe96f29 1409{
cfc65420 1410 fprintf(fp, "# cpudesc : %s\n", ff->ph->env.cpu_desc);
fbe96f29
SE
1411}
1412
cfc65420 1413static void print_nrcpus(struct feat_fd *ff, FILE *fp)
fbe96f29 1414{
cfc65420
DCC
1415 fprintf(fp, "# nrcpus online : %u\n", ff->ph->env.nr_cpus_online);
1416 fprintf(fp, "# nrcpus avail : %u\n", ff->ph->env.nr_cpus_avail);
fbe96f29
SE
1417}
1418
cfc65420 1419static void print_version(struct feat_fd *ff, FILE *fp)
fbe96f29 1420{
cfc65420 1421 fprintf(fp, "# perf version : %s\n", ff->ph->env.version);
fbe96f29
SE
1422}
1423
cfc65420 1424static void print_cmdline(struct feat_fd *ff, FILE *fp)
fbe96f29 1425{
7e94cfcc 1426 int nr, i;
fbe96f29 1427
cfc65420 1428 nr = ff->ph->env.nr_cmdline;
fbe96f29
SE
1429
1430 fprintf(fp, "# cmdline : ");
1431
f92da712
AB
1432 for (i = 0; i < nr; i++) {
1433 char *argv_i = strdup(ff->ph->env.cmdline_argv[i]);
1434 if (!argv_i) {
1435 fprintf(fp, "%s ", ff->ph->env.cmdline_argv[i]);
1436 } else {
1437 char *mem = argv_i;
1438 do {
1439 char *quote = strchr(argv_i, '\'');
1440 if (!quote)
1441 break;
1442 *quote++ = '\0';
1443 fprintf(fp, "%s\\\'", argv_i);
1444 argv_i = quote;
1445 } while (1);
1446 fprintf(fp, "%s ", argv_i);
1447 free(mem);
1448 }
1449 }
fbe96f29
SE
1450 fputc('\n', fp);
1451}
1452
cfc65420 1453static void print_cpu_topology(struct feat_fd *ff, FILE *fp)
fbe96f29 1454{
cfc65420
DCC
1455 struct perf_header *ph = ff->ph;
1456 int cpu_nr = ph->env.nr_cpus_avail;
7e94cfcc 1457 int nr, i;
fbe96f29
SE
1458 char *str;
1459
7e94cfcc
NK
1460 nr = ph->env.nr_sibling_cores;
1461 str = ph->env.sibling_cores;
fbe96f29
SE
1462
1463 for (i = 0; i < nr; i++) {
e05a8997 1464 fprintf(fp, "# sibling sockets : %s\n", str);
7e94cfcc 1465 str += strlen(str) + 1;
fbe96f29
SE
1466 }
1467
acae8b36
KL
1468 if (ph->env.nr_sibling_dies) {
1469 nr = ph->env.nr_sibling_dies;
1470 str = ph->env.sibling_dies;
1471
1472 for (i = 0; i < nr; i++) {
1473 fprintf(fp, "# sibling dies : %s\n", str);
1474 str += strlen(str) + 1;
1475 }
1476 }
1477
7e94cfcc
NK
1478 nr = ph->env.nr_sibling_threads;
1479 str = ph->env.sibling_threads;
fbe96f29
SE
1480
1481 for (i = 0; i < nr; i++) {
fbe96f29 1482 fprintf(fp, "# sibling threads : %s\n", str);
7e94cfcc 1483 str += strlen(str) + 1;
fbe96f29 1484 }
2bb00d2f 1485
acae8b36
KL
1486 if (ph->env.nr_sibling_dies) {
1487 if (ph->env.cpu != NULL) {
1488 for (i = 0; i < cpu_nr; i++)
1489 fprintf(fp, "# CPU %d: Core ID %d, "
1490 "Die ID %d, Socket ID %d\n",
1491 i, ph->env.cpu[i].core_id,
1492 ph->env.cpu[i].die_id,
1493 ph->env.cpu[i].socket_id);
1494 } else
1495 fprintf(fp, "# Core ID, Die ID and Socket ID "
1496 "information is not available\n");
1497 } else {
1498 if (ph->env.cpu != NULL) {
1499 for (i = 0; i < cpu_nr; i++)
1500 fprintf(fp, "# CPU %d: Core ID %d, "
1501 "Socket ID %d\n",
1502 i, ph->env.cpu[i].core_id,
1503 ph->env.cpu[i].socket_id);
1504 } else
1505 fprintf(fp, "# Core ID and Socket ID "
1506 "information is not available\n");
1507 }
fbe96f29
SE
1508}
1509
cf790516
AB
1510static void print_clockid(struct feat_fd *ff, FILE *fp)
1511{
1512 fprintf(fp, "# clockid frequency: %"PRIu64" MHz\n",
1513 ff->ph->env.clockid_res_ns * 1000);
1514}
1515
258031c0
JO
1516static void print_dir_format(struct feat_fd *ff, FILE *fp)
1517{
1518 struct perf_session *session;
1519 struct perf_data *data;
1520
1521 session = container_of(ff->ph, struct perf_session, header);
1522 data = session->data;
1523
1524 fprintf(fp, "# directory data version : %"PRIu64"\n", data->dir.version);
1525}
1526
606f972b
SL
1527static void print_bpf_prog_info(struct feat_fd *ff, FILE *fp)
1528{
1529 struct perf_env *env = &ff->ph->env;
1530 struct rb_root *root;
1531 struct rb_node *next;
1532
1533 down_read(&env->bpf_progs.lock);
1534
1535 root = &env->bpf_progs.infos;
1536 next = rb_first(root);
1537
1538 while (next) {
1539 struct bpf_prog_info_node *node;
1540
1541 node = rb_entry(next, struct bpf_prog_info_node, rb_node);
1542 next = rb_next(&node->rb_node);
f8dfeae0
SL
1543
1544 bpf_event__print_bpf_prog_info(&node->info_linear->info,
1545 env, fp);
606f972b
SL
1546 }
1547
1548 up_read(&env->bpf_progs.lock);
1549}
1550
a70a1123
SL
1551static void print_bpf_btf(struct feat_fd *ff, FILE *fp)
1552{
1553 struct perf_env *env = &ff->ph->env;
1554 struct rb_root *root;
1555 struct rb_node *next;
1556
1557 down_read(&env->bpf_progs.lock);
1558
1559 root = &env->bpf_progs.btfs;
1560 next = rb_first(root);
1561
1562 while (next) {
1563 struct btf_node *node;
1564
1565 node = rb_entry(next, struct btf_node, rb_node);
1566 next = rb_next(&node->rb_node);
1567 fprintf(fp, "# btf info of id %u\n", node->id);
1568 }
1569
1570 up_read(&env->bpf_progs.lock);
1571}
1572
32dcd021 1573static void free_event_desc(struct evsel *events)
fbe96f29 1574{
32dcd021 1575 struct evsel *evsel;
4e1b9c67
RR
1576
1577 if (!events)
1578 return;
1579
1fc632ce 1580 for (evsel = events; evsel->core.attr.size; evsel++) {
74cf249d
ACM
1581 zfree(&evsel->name);
1582 zfree(&evsel->id);
4e1b9c67
RR
1583 }
1584
1585 free(events);
1586}
1587
32dcd021 1588static struct evsel *read_event_desc(struct feat_fd *ff)
4e1b9c67 1589{
32dcd021 1590 struct evsel *evsel, *events = NULL;
4e1b9c67 1591 u64 *id;
fbe96f29 1592 void *buf = NULL;
62db9068 1593 u32 nre, sz, nr, i, j;
62db9068 1594 size_t msz;
fbe96f29
SE
1595
1596 /* number of events */
48e5fcea 1597 if (do_read_u32(ff, &nre))
fbe96f29
SE
1598 goto error;
1599
48e5fcea 1600 if (do_read_u32(ff, &sz))
fbe96f29
SE
1601 goto error;
1602
62db9068 1603 /* buffer to hold on file attr struct */
fbe96f29
SE
1604 buf = malloc(sz);
1605 if (!buf)
1606 goto error;
1607
1fc632ce 1608 /* the last event terminates with evsel->core.attr.size == 0: */
4e1b9c67
RR
1609 events = calloc(nre + 1, sizeof(*events));
1610 if (!events)
1611 goto error;
1612
1fc632ce 1613 msz = sizeof(evsel->core.attr);
9fafd98f 1614 if (sz < msz)
fbe96f29
SE
1615 msz = sz;
1616
4e1b9c67
RR
1617 for (i = 0, evsel = events; i < nre; evsel++, i++) {
1618 evsel->idx = i;
fbe96f29 1619
62db9068
SE
1620 /*
1621 * must read entire on-file attr struct to
1622 * sync up with layout.
1623 */
48e5fcea 1624 if (__do_read(ff, buf, sz))
fbe96f29
SE
1625 goto error;
1626
48e5fcea 1627 if (ff->ph->needs_swap)
fbe96f29
SE
1628 perf_event__attr_swap(buf);
1629
1fc632ce 1630 memcpy(&evsel->core.attr, buf, msz);
fbe96f29 1631
48e5fcea 1632 if (do_read_u32(ff, &nr))
fbe96f29
SE
1633 goto error;
1634
48e5fcea 1635 if (ff->ph->needs_swap)
0807d2d8 1636 evsel->needs_swap = true;
fbe96f29 1637
48e5fcea 1638 evsel->name = do_read_string(ff);
6200e494
DCC
1639 if (!evsel->name)
1640 goto error;
4e1b9c67
RR
1641
1642 if (!nr)
1643 continue;
1644
1645 id = calloc(nr, sizeof(*id));
1646 if (!id)
1647 goto error;
1648 evsel->ids = nr;
1649 evsel->id = id;
1650
1651 for (j = 0 ; j < nr; j++) {
48e5fcea 1652 if (do_read_u64(ff, id))
4e1b9c67 1653 goto error;
4e1b9c67
RR
1654 id++;
1655 }
1656 }
1657out:
04662523 1658 free(buf);
4e1b9c67
RR
1659 return events;
1660error:
4cc97614 1661 free_event_desc(events);
4e1b9c67
RR
1662 events = NULL;
1663 goto out;
1664}
1665
2c5e8c52 1666static int __desc_attr__fprintf(FILE *fp, const char *name, const char *val,
0353631a 1667 void *priv __maybe_unused)
2c5e8c52
PZ
1668{
1669 return fprintf(fp, ", %s = %s", name, val);
1670}
1671
cfc65420 1672static void print_event_desc(struct feat_fd *ff, FILE *fp)
4e1b9c67 1673{
32dcd021 1674 struct evsel *evsel, *events;
4e1b9c67
RR
1675 u32 j;
1676 u64 *id;
1677
f9ebdccf
DCC
1678 if (ff->events)
1679 events = ff->events;
1680 else
1681 events = read_event_desc(ff);
1682
4e1b9c67
RR
1683 if (!events) {
1684 fprintf(fp, "# event desc: not available or unable to read\n");
1685 return;
1686 }
1687
1fc632ce 1688 for (evsel = events; evsel->core.attr.size; evsel++) {
4e1b9c67 1689 fprintf(fp, "# event : name = %s, ", evsel->name);
fbe96f29 1690
4e1b9c67 1691 if (evsel->ids) {
fbe96f29 1692 fprintf(fp, ", id = {");
4e1b9c67
RR
1693 for (j = 0, id = evsel->id; j < evsel->ids; j++, id++) {
1694 if (j)
1695 fputc(',', fp);
1696 fprintf(fp, " %"PRIu64, *id);
1697 }
fbe96f29 1698 fprintf(fp, " }");
4e1b9c67 1699 }
814c8c38 1700
1fc632ce 1701 perf_event_attr__fprintf(fp, &evsel->core.attr, __desc_attr__fprintf, NULL);
4e1b9c67 1702
fbe96f29
SE
1703 fputc('\n', fp);
1704 }
4e1b9c67
RR
1705
1706 free_event_desc(events);
f9ebdccf 1707 ff->events = NULL;
fbe96f29
SE
1708}
1709
cfc65420 1710static void print_total_mem(struct feat_fd *ff, FILE *fp)
fbe96f29 1711{
cfc65420 1712 fprintf(fp, "# total memory : %llu kB\n", ff->ph->env.total_mem);
fbe96f29
SE
1713}
1714
cfc65420 1715static void print_numa_topology(struct feat_fd *ff, FILE *fp)
fbe96f29 1716{
c60da22a
JO
1717 int i;
1718 struct numa_node *n;
fbe96f29 1719
cfc65420
DCC
1720 for (i = 0; i < ff->ph->env.nr_numa_nodes; i++) {
1721 n = &ff->ph->env.numa_nodes[i];
fbe96f29 1722
fbe96f29
SE
1723 fprintf(fp, "# node%u meminfo : total = %"PRIu64" kB,"
1724 " free = %"PRIu64" kB\n",
c60da22a 1725 n->node, n->mem_total, n->mem_free);
1234471e 1726
c60da22a
JO
1727 fprintf(fp, "# node%u cpu list : ", n->node);
1728 cpu_map__fprintf(n->map, fp);
fbe96f29 1729 }
fbe96f29
SE
1730}
1731
cfc65420 1732static void print_cpuid(struct feat_fd *ff, FILE *fp)
fbe96f29 1733{
cfc65420 1734 fprintf(fp, "# cpuid : %s\n", ff->ph->env.cpuid);
fbe96f29
SE
1735}
1736
cfc65420 1737static void print_branch_stack(struct feat_fd *ff __maybe_unused, FILE *fp)
330aa675
SE
1738{
1739 fprintf(fp, "# contains samples with branch stack\n");
1740}
1741
cfc65420 1742static void print_auxtrace(struct feat_fd *ff __maybe_unused, FILE *fp)
4025ea40
AH
1743{
1744 fprintf(fp, "# contains AUX area data (e.g. instruction trace)\n");
1745}
1746
cfc65420 1747static void print_stat(struct feat_fd *ff __maybe_unused, FILE *fp)
ffa517ad
JO
1748{
1749 fprintf(fp, "# contains stat data\n");
1750}
1751
cfc65420 1752static void print_cache(struct feat_fd *ff, FILE *fp __maybe_unused)
720e98b5
JO
1753{
1754 int i;
1755
1756 fprintf(fp, "# CPU cache info:\n");
cfc65420 1757 for (i = 0; i < ff->ph->env.caches_cnt; i++) {
720e98b5 1758 fprintf(fp, "# ");
cfc65420 1759 cpu_cache_level__fprintf(fp, &ff->ph->env.caches[i]);
720e98b5
JO
1760 }
1761}
1762
42e1fd80
AB
1763static void print_compressed(struct feat_fd *ff, FILE *fp)
1764{
1765 fprintf(fp, "# compressed : %s, level = %d, ratio = %d\n",
1766 ff->ph->env.comp_type == PERF_COMP_ZSTD ? "Zstd" : "Unknown",
1767 ff->ph->env.comp_level, ff->ph->env.comp_ratio);
1768}
1769
cfc65420 1770static void print_pmu_mappings(struct feat_fd *ff, FILE *fp)
50a9667c
RR
1771{
1772 const char *delimiter = "# pmu mappings: ";
7e94cfcc 1773 char *str, *tmp;
50a9667c
RR
1774 u32 pmu_num;
1775 u32 type;
1776
cfc65420 1777 pmu_num = ff->ph->env.nr_pmu_mappings;
50a9667c
RR
1778 if (!pmu_num) {
1779 fprintf(fp, "# pmu mappings: not available\n");
1780 return;
1781 }
1782
cfc65420 1783 str = ff->ph->env.pmu_mappings;
7e94cfcc 1784
50a9667c 1785 while (pmu_num) {
7e94cfcc
NK
1786 type = strtoul(str, &tmp, 0);
1787 if (*tmp != ':')
1788 goto error;
1789
1790 str = tmp + 1;
1791 fprintf(fp, "%s%s = %" PRIu32, delimiter, str, type);
be4a2ded 1792
50a9667c 1793 delimiter = ", ";
7e94cfcc
NK
1794 str += strlen(str) + 1;
1795 pmu_num--;
50a9667c
RR
1796 }
1797
1798 fprintf(fp, "\n");
1799
1800 if (!pmu_num)
1801 return;
1802error:
1803 fprintf(fp, "# pmu mappings: unable to read\n");
1804}
1805
cfc65420 1806static void print_group_desc(struct feat_fd *ff, FILE *fp)
a8bb559b
NK
1807{
1808 struct perf_session *session;
32dcd021 1809 struct evsel *evsel;
a8bb559b
NK
1810 u32 nr = 0;
1811
cfc65420 1812 session = container_of(ff->ph, struct perf_session, header);
a8bb559b 1813
e5cadb93 1814 evlist__for_each_entry(session->evlist, evsel) {
a8bb559b 1815 if (perf_evsel__is_group_leader(evsel) &&
5643b1a5 1816 evsel->core.nr_members > 1) {
a8bb559b
NK
1817 fprintf(fp, "# group: %s{%s", evsel->group_name ?: "",
1818 perf_evsel__name(evsel));
1819
5643b1a5 1820 nr = evsel->core.nr_members - 1;
a8bb559b
NK
1821 } else if (nr) {
1822 fprintf(fp, ",%s", perf_evsel__name(evsel));
1823
1824 if (--nr == 0)
1825 fprintf(fp, "}\n");
1826 }
1827 }
1828}
1829
6011518d
JY
1830static void print_sample_time(struct feat_fd *ff, FILE *fp)
1831{
1832 struct perf_session *session;
1833 char time_buf[32];
1834 double d;
1835
1836 session = container_of(ff->ph, struct perf_session, header);
1837
1838 timestamp__scnprintf_usec(session->evlist->first_sample_time,
1839 time_buf, sizeof(time_buf));
1840 fprintf(fp, "# time of first sample : %s\n", time_buf);
1841
1842 timestamp__scnprintf_usec(session->evlist->last_sample_time,
1843 time_buf, sizeof(time_buf));
1844 fprintf(fp, "# time of last sample : %s\n", time_buf);
1845
1846 d = (double)(session->evlist->last_sample_time -
1847 session->evlist->first_sample_time) / NSEC_PER_MSEC;
1848
1849 fprintf(fp, "# sample duration : %10.3f ms\n", d);
1850}
1851
e2091ced
JO
1852static void memory_node__fprintf(struct memory_node *n,
1853 unsigned long long bsize, FILE *fp)
1854{
1855 char buf_map[100], buf_size[50];
1856 unsigned long long size;
1857
1858 size = bsize * bitmap_weight(n->set, n->size);
1859 unit_number__scnprintf(buf_size, 50, size);
1860
1861 bitmap_scnprintf(n->set, n->size, buf_map, 100);
1862 fprintf(fp, "# %3" PRIu64 " [%s]: %s\n", n->node, buf_size, buf_map);
1863}
1864
1865static void print_mem_topology(struct feat_fd *ff, FILE *fp)
1866{
1867 struct memory_node *nodes;
1868 int i, nr;
1869
1870 nodes = ff->ph->env.memory_nodes;
1871 nr = ff->ph->env.nr_memory_nodes;
1872
1873 fprintf(fp, "# memory nodes (nr %d, block size 0x%llx):\n",
1874 nr, ff->ph->env.memory_bsize);
1875
1876 for (i = 0; i < nr; i++) {
1877 memory_node__fprintf(&nodes[i], ff->ph->env.memory_bsize, fp);
1878 }
1879}
1880
08d95bd2
RR
1881static int __event_process_build_id(struct build_id_event *bev,
1882 char *filename,
1883 struct perf_session *session)
1884{
1885 int err = -1;
08d95bd2 1886 struct machine *machine;
1f121b03 1887 u16 cpumode;
08d95bd2
RR
1888 struct dso *dso;
1889 enum dso_kernel_type dso_type;
1890
1891 machine = perf_session__findnew_machine(session, bev->pid);
1892 if (!machine)
1893 goto out;
1894
1f121b03 1895 cpumode = bev->header.misc & PERF_RECORD_MISC_CPUMODE_MASK;
08d95bd2 1896
1f121b03 1897 switch (cpumode) {
08d95bd2
RR
1898 case PERF_RECORD_MISC_KERNEL:
1899 dso_type = DSO_TYPE_KERNEL;
08d95bd2
RR
1900 break;
1901 case PERF_RECORD_MISC_GUEST_KERNEL:
1902 dso_type = DSO_TYPE_GUEST_KERNEL;
08d95bd2
RR
1903 break;
1904 case PERF_RECORD_MISC_USER:
1905 case PERF_RECORD_MISC_GUEST_USER:
1906 dso_type = DSO_TYPE_USER;
08d95bd2
RR
1907 break;
1908 default:
1909 goto out;
1910 }
1911
aa7cc2ae 1912 dso = machine__findnew_dso(machine, filename);
08d95bd2 1913 if (dso != NULL) {
b5d8bbe8 1914 char sbuild_id[SBUILD_ID_SIZE];
08d95bd2
RR
1915
1916 dso__set_build_id(dso, &bev->build_id);
1917
1deec1bd
NK
1918 if (dso_type != DSO_TYPE_USER) {
1919 struct kmod_path m = { .name = NULL, };
1920
1921 if (!kmod_path__parse_name(&m, filename) && m.kmod)
6b335e8f 1922 dso__set_module_info(dso, &m, machine);
1deec1bd
NK
1923 else
1924 dso->kernel = dso_type;
1925
1926 free(m.name);
1927 }
08d95bd2
RR
1928
1929 build_id__sprintf(dso->build_id, sizeof(dso->build_id),
1930 sbuild_id);
1931 pr_debug("build id event received for %s: %s\n",
1932 dso->long_name, sbuild_id);
d3a7c489 1933 dso__put(dso);
08d95bd2
RR
1934 }
1935
1936 err = 0;
1937out:
1938 return err;
1939}
1940
1941static int perf_header__read_build_ids_abi_quirk(struct perf_header *header,
1942 int input, u64 offset, u64 size)
1943{
1944 struct perf_session *session = container_of(header, struct perf_session, header);
1945 struct {
1946 struct perf_event_header header;
9ac3e487 1947 u8 build_id[PERF_ALIGN(BUILD_ID_SIZE, sizeof(u64))];
08d95bd2
RR
1948 char filename[0];
1949 } old_bev;
1950 struct build_id_event bev;
1951 char filename[PATH_MAX];
1952 u64 limit = offset + size;
1953
1954 while (offset < limit) {
1955 ssize_t len;
1956
5323f60c 1957 if (readn(input, &old_bev, sizeof(old_bev)) != sizeof(old_bev))
08d95bd2
RR
1958 return -1;
1959
1960 if (header->needs_swap)
1961 perf_event_header__bswap(&old_bev.header);
1962
1963 len = old_bev.header.size - sizeof(old_bev);
5323f60c 1964 if (readn(input, filename, len) != len)
08d95bd2
RR
1965 return -1;
1966
1967 bev.header = old_bev.header;
1968
1969 /*
1970 * As the pid is the missing value, we need to fill
1971 * it properly. The header.misc value give us nice hint.
1972 */
1973 bev.pid = HOST_KERNEL_ID;
1974 if (bev.header.misc == PERF_RECORD_MISC_GUEST_USER ||
1975 bev.header.misc == PERF_RECORD_MISC_GUEST_KERNEL)
1976 bev.pid = DEFAULT_GUEST_KERNEL_ID;
1977
1978 memcpy(bev.build_id, old_bev.build_id, sizeof(bev.build_id));
1979 __event_process_build_id(&bev, filename, session);
1980
1981 offset += bev.header.size;
1982 }
1983
1984 return 0;
1985}
1986
1987static int perf_header__read_build_ids(struct perf_header *header,
1988 int input, u64 offset, u64 size)
1989{
1990 struct perf_session *session = container_of(header, struct perf_session, header);
1991 struct build_id_event bev;
1992 char filename[PATH_MAX];
1993 u64 limit = offset + size, orig_offset = offset;
1994 int err = -1;
1995
1996 while (offset < limit) {
1997 ssize_t len;
1998
5323f60c 1999 if (readn(input, &bev, sizeof(bev)) != sizeof(bev))
08d95bd2
RR
2000 goto out;
2001
2002 if (header->needs_swap)
2003 perf_event_header__bswap(&bev.header);
2004
2005 len = bev.header.size - sizeof(bev);
5323f60c 2006 if (readn(input, filename, len) != len)
08d95bd2
RR
2007 goto out;
2008 /*
2009 * The a1645ce1 changeset:
2010 *
2011 * "perf: 'perf kvm' tool for monitoring guest performance from host"
2012 *
2013 * Added a field to struct build_id_event that broke the file
2014 * format.
2015 *
2016 * Since the kernel build-id is the first entry, process the
2017 * table using the old format if the well known
2018 * '[kernel.kallsyms]' string for the kernel build-id has the
2019 * first 4 characters chopped off (where the pid_t sits).
2020 */
2021 if (memcmp(filename, "nel.kallsyms]", 13) == 0) {
2022 if (lseek(input, orig_offset, SEEK_SET) == (off_t)-1)
2023 return -1;
2024 return perf_header__read_build_ids_abi_quirk(header, input, offset, size);
2025 }
2026
2027 __event_process_build_id(&bev, filename, session);
2028
2029 offset += bev.header.size;
2030 }
2031 err = 0;
2032out:
2033 return err;
2034}
2035
dfaa1580
DCC
2036/* Macro for features that simply need to read and store a string. */
2037#define FEAT_PROCESS_STR_FUN(__feat, __feat_env) \
62552457 2038static int process_##__feat(struct feat_fd *ff, void *data __maybe_unused) \
dfaa1580 2039{\
48e5fcea 2040 ff->ph->env.__feat_env = do_read_string(ff); \
1a222754 2041 return ff->ph->env.__feat_env ? 0 : -ENOMEM; \
dfaa1580
DCC
2042}
2043
2044FEAT_PROCESS_STR_FUN(hostname, hostname);
2045FEAT_PROCESS_STR_FUN(osrelease, os_release);
2046FEAT_PROCESS_STR_FUN(version, version);
2047FEAT_PROCESS_STR_FUN(arch, arch);
2048FEAT_PROCESS_STR_FUN(cpudesc, cpu_desc);
2049FEAT_PROCESS_STR_FUN(cpuid, cpuid);
2050
62552457 2051static int process_tracing_data(struct feat_fd *ff, void *data)
f1c67db7 2052{
1a222754
DCC
2053 ssize_t ret = trace_report(ff->fd, data, false);
2054
3dce2ce3 2055 return ret < 0 ? -1 : 0;
f1c67db7
RR
2056}
2057
62552457 2058static int process_build_id(struct feat_fd *ff, void *data __maybe_unused)
f1c67db7 2059{
62552457 2060 if (perf_header__read_build_ids(ff->ph, ff->fd, ff->offset, ff->size))
f1c67db7
RR
2061 pr_debug("Failed to read buildids, continuing...\n");
2062 return 0;
2063}
2064
62552457 2065static int process_nrcpus(struct feat_fd *ff, void *data __maybe_unused)
a1ae5655 2066{
6200e494
DCC
2067 int ret;
2068 u32 nr_cpus_avail, nr_cpus_online;
a1ae5655 2069
48e5fcea 2070 ret = do_read_u32(ff, &nr_cpus_avail);
6200e494
DCC
2071 if (ret)
2072 return ret;
a1ae5655 2073
48e5fcea 2074 ret = do_read_u32(ff, &nr_cpus_online);
6200e494
DCC
2075 if (ret)
2076 return ret;
1a222754
DCC
2077 ff->ph->env.nr_cpus_avail = (int)nr_cpus_avail;
2078 ff->ph->env.nr_cpus_online = (int)nr_cpus_online;
a1ae5655
NK
2079 return 0;
2080}
2081
62552457 2082static int process_total_mem(struct feat_fd *ff, void *data __maybe_unused)
a1ae5655 2083{
6200e494
DCC
2084 u64 total_mem;
2085 int ret;
a1ae5655 2086
48e5fcea 2087 ret = do_read_u64(ff, &total_mem);
6200e494 2088 if (ret)
a1ae5655 2089 return -1;
1a222754 2090 ff->ph->env.total_mem = (unsigned long long)total_mem;
a1ae5655
NK
2091 return 0;
2092}
2093
32dcd021 2094static struct evsel *
63503dba 2095perf_evlist__find_by_index(struct evlist *evlist, int idx)
7c2f7afd 2096{
32dcd021 2097 struct evsel *evsel;
7c2f7afd 2098
e5cadb93 2099 evlist__for_each_entry(evlist, evsel) {
7c2f7afd
RR
2100 if (evsel->idx == idx)
2101 return evsel;
2102 }
2103
2104 return NULL;
2105}
2106
2107static void
63503dba 2108perf_evlist__set_event_name(struct evlist *evlist,
32dcd021 2109 struct evsel *event)
7c2f7afd 2110{
32dcd021 2111 struct evsel *evsel;
7c2f7afd
RR
2112
2113 if (!event->name)
2114 return;
2115
2116 evsel = perf_evlist__find_by_index(evlist, event->idx);
2117 if (!evsel)
2118 return;
2119
2120 if (evsel->name)
2121 return;
2122
2123 evsel->name = strdup(event->name);
2124}
2125
2126static int
62552457 2127process_event_desc(struct feat_fd *ff, void *data __maybe_unused)
7c2f7afd 2128{
3d7eb86b 2129 struct perf_session *session;
32dcd021 2130 struct evsel *evsel, *events = read_event_desc(ff);
7c2f7afd
RR
2131
2132 if (!events)
2133 return 0;
2134
1a222754 2135 session = container_of(ff->ph, struct perf_session, header);
f9ebdccf 2136
8ceb41d7 2137 if (session->data->is_pipe) {
f9ebdccf
DCC
2138 /* Save events for reading later by print_event_desc,
2139 * since they can't be read again in pipe mode. */
2140 ff->events = events;
2141 }
2142
1fc632ce 2143 for (evsel = events; evsel->core.attr.size; evsel++)
7c2f7afd
RR
2144 perf_evlist__set_event_name(session->evlist, evsel);
2145
8ceb41d7 2146 if (!session->data->is_pipe)
f9ebdccf 2147 free_event_desc(events);
7c2f7afd
RR
2148
2149 return 0;
2150}
2151
62552457 2152static int process_cmdline(struct feat_fd *ff, void *data __maybe_unused)
a1ae5655 2153{
768dd3f3
JO
2154 char *str, *cmdline = NULL, **argv = NULL;
2155 u32 nr, i, len = 0;
a1ae5655 2156
48e5fcea 2157 if (do_read_u32(ff, &nr))
a1ae5655
NK
2158 return -1;
2159
1a222754 2160 ff->ph->env.nr_cmdline = nr;
768dd3f3 2161
62552457 2162 cmdline = zalloc(ff->size + nr + 1);
768dd3f3
JO
2163 if (!cmdline)
2164 return -1;
2165
2166 argv = zalloc(sizeof(char *) * (nr + 1));
2167 if (!argv)
2168 goto error;
a1ae5655
NK
2169
2170 for (i = 0; i < nr; i++) {
48e5fcea 2171 str = do_read_string(ff);
a1ae5655
NK
2172 if (!str)
2173 goto error;
2174
768dd3f3
JO
2175 argv[i] = cmdline + len;
2176 memcpy(argv[i], str, strlen(str) + 1);
2177 len += strlen(str) + 1;
a1ae5655
NK
2178 free(str);
2179 }
1a222754
DCC
2180 ff->ph->env.cmdline = cmdline;
2181 ff->ph->env.cmdline_argv = (const char **) argv;
a1ae5655
NK
2182 return 0;
2183
2184error:
768dd3f3
JO
2185 free(argv);
2186 free(cmdline);
a1ae5655
NK
2187 return -1;
2188}
2189
62552457 2190static int process_cpu_topology(struct feat_fd *ff, void *data __maybe_unused)
a1ae5655 2191{
a1ae5655
NK
2192 u32 nr, i;
2193 char *str;
2194 struct strbuf sb;
1a222754 2195 int cpu_nr = ff->ph->env.nr_cpus_avail;
2bb00d2f 2196 u64 size = 0;
1a222754 2197 struct perf_header *ph = ff->ph;
01766229 2198 bool do_core_id_test = true;
2bb00d2f
KL
2199
2200 ph->env.cpu = calloc(cpu_nr, sizeof(*ph->env.cpu));
2201 if (!ph->env.cpu)
2202 return -1;
a1ae5655 2203
48e5fcea 2204 if (do_read_u32(ff, &nr))
2bb00d2f 2205 goto free_cpu;
a1ae5655 2206
a1ae5655 2207 ph->env.nr_sibling_cores = nr;
2bb00d2f 2208 size += sizeof(u32);
642aadaa
MH
2209 if (strbuf_init(&sb, 128) < 0)
2210 goto free_cpu;
a1ae5655
NK
2211
2212 for (i = 0; i < nr; i++) {
48e5fcea 2213 str = do_read_string(ff);
a1ae5655
NK
2214 if (!str)
2215 goto error;
2216
2217 /* include a NULL character at the end */
642aadaa
MH
2218 if (strbuf_add(&sb, str, strlen(str) + 1) < 0)
2219 goto error;
2bb00d2f 2220 size += string_size(str);
a1ae5655
NK
2221 free(str);
2222 }
2223 ph->env.sibling_cores = strbuf_detach(&sb, NULL);
2224
48e5fcea 2225 if (do_read_u32(ff, &nr))
a1ae5655
NK
2226 return -1;
2227
a1ae5655 2228 ph->env.nr_sibling_threads = nr;
2bb00d2f 2229 size += sizeof(u32);
a1ae5655
NK
2230
2231 for (i = 0; i < nr; i++) {
48e5fcea 2232 str = do_read_string(ff);
a1ae5655
NK
2233 if (!str)
2234 goto error;
2235
2236 /* include a NULL character at the end */
642aadaa
MH
2237 if (strbuf_add(&sb, str, strlen(str) + 1) < 0)
2238 goto error;
2bb00d2f 2239 size += string_size(str);
a1ae5655
NK
2240 free(str);
2241 }
2242 ph->env.sibling_threads = strbuf_detach(&sb, NULL);
2bb00d2f
KL
2243
2244 /*
2245 * The header may be from old perf,
2246 * which doesn't include core id and socket id information.
2247 */
62552457 2248 if (ff->size <= size) {
2bb00d2f
KL
2249 zfree(&ph->env.cpu);
2250 return 0;
2251 }
2252
01766229
TR
2253 /* On s390 the socket_id number is not related to the numbers of cpus.
2254 * The socket_id number might be higher than the numbers of cpus.
2255 * This depends on the configuration.
0a4d8fb2 2256 * AArch64 is the same.
01766229 2257 */
0a4d8fb2
TX
2258 if (ph->env.arch && (!strncmp(ph->env.arch, "s390", 4)
2259 || !strncmp(ph->env.arch, "aarch64", 7)))
01766229
TR
2260 do_core_id_test = false;
2261
2bb00d2f 2262 for (i = 0; i < (u32)cpu_nr; i++) {
48e5fcea 2263 if (do_read_u32(ff, &nr))
2bb00d2f
KL
2264 goto free_cpu;
2265
2bb00d2f 2266 ph->env.cpu[i].core_id = nr;
acae8b36 2267 size += sizeof(u32);
2bb00d2f 2268
48e5fcea 2269 if (do_read_u32(ff, &nr))
2bb00d2f
KL
2270 goto free_cpu;
2271
01766229 2272 if (do_core_id_test && nr != (u32)-1 && nr > (u32)cpu_nr) {
2bb00d2f
KL
2273 pr_debug("socket_id number is too big."
2274 "You may need to upgrade the perf tool.\n");
2275 goto free_cpu;
2276 }
2277
2278 ph->env.cpu[i].socket_id = nr;
acae8b36
KL
2279 size += sizeof(u32);
2280 }
2281
2282 /*
2283 * The header may be from old perf,
2284 * which doesn't include die information.
2285 */
2286 if (ff->size <= size)
2287 return 0;
2288
2289 if (do_read_u32(ff, &nr))
2290 return -1;
2291
2292 ph->env.nr_sibling_dies = nr;
2293 size += sizeof(u32);
2294
2295 for (i = 0; i < nr; i++) {
2296 str = do_read_string(ff);
2297 if (!str)
2298 goto error;
2299
2300 /* include a NULL character at the end */
2301 if (strbuf_add(&sb, str, strlen(str) + 1) < 0)
2302 goto error;
2303 size += string_size(str);
2304 free(str);
2305 }
2306 ph->env.sibling_dies = strbuf_detach(&sb, NULL);
2307
2308 for (i = 0; i < (u32)cpu_nr; i++) {
2309 if (do_read_u32(ff, &nr))
2310 goto free_cpu;
2311
2312 ph->env.cpu[i].die_id = nr;
2bb00d2f
KL
2313 }
2314
a1ae5655
NK
2315 return 0;
2316
2317error:
2318 strbuf_release(&sb);
2bb00d2f
KL
2319free_cpu:
2320 zfree(&ph->env.cpu);
a1ae5655
NK
2321 return -1;
2322}
2323
62552457 2324static int process_numa_topology(struct feat_fd *ff, void *data __maybe_unused)
a1ae5655 2325{
c60da22a 2326 struct numa_node *nodes, *n;
c60da22a 2327 u32 nr, i;
a1ae5655 2328 char *str;
a1ae5655
NK
2329
2330 /* nr nodes */
48e5fcea 2331 if (do_read_u32(ff, &nr))
642aadaa 2332 return -1;
a1ae5655 2333
c60da22a
JO
2334 nodes = zalloc(sizeof(*nodes) * nr);
2335 if (!nodes)
2336 return -ENOMEM;
a1ae5655
NK
2337
2338 for (i = 0; i < nr; i++) {
c60da22a
JO
2339 n = &nodes[i];
2340
a1ae5655 2341 /* node number */
48e5fcea 2342 if (do_read_u32(ff, &n->node))
a1ae5655
NK
2343 goto error;
2344
48e5fcea 2345 if (do_read_u64(ff, &n->mem_total))
a1ae5655
NK
2346 goto error;
2347
48e5fcea 2348 if (do_read_u64(ff, &n->mem_free))
a1ae5655
NK
2349 goto error;
2350
48e5fcea 2351 str = do_read_string(ff);
a1ae5655
NK
2352 if (!str)
2353 goto error;
2354
9c3516d1 2355 n->map = perf_cpu_map__new(str);
c60da22a 2356 if (!n->map)
642aadaa 2357 goto error;
c60da22a 2358
a1ae5655
NK
2359 free(str);
2360 }
1a222754
DCC
2361 ff->ph->env.nr_numa_nodes = nr;
2362 ff->ph->env.numa_nodes = nodes;
a1ae5655
NK
2363 return 0;
2364
2365error:
c60da22a 2366 free(nodes);
a1ae5655
NK
2367 return -1;
2368}
2369
62552457 2370static int process_pmu_mappings(struct feat_fd *ff, void *data __maybe_unused)
a1ae5655 2371{
a1ae5655
NK
2372 char *name;
2373 u32 pmu_num;
2374 u32 type;
2375 struct strbuf sb;
2376
48e5fcea 2377 if (do_read_u32(ff, &pmu_num))
a1ae5655
NK
2378 return -1;
2379
a1ae5655
NK
2380 if (!pmu_num) {
2381 pr_debug("pmu mappings not available\n");
2382 return 0;
2383 }
2384
1a222754 2385 ff->ph->env.nr_pmu_mappings = pmu_num;
642aadaa
MH
2386 if (strbuf_init(&sb, 128) < 0)
2387 return -1;
a1ae5655
NK
2388
2389 while (pmu_num) {
48e5fcea 2390 if (do_read_u32(ff, &type))
a1ae5655 2391 goto error;
a1ae5655 2392
48e5fcea 2393 name = do_read_string(ff);
a1ae5655
NK
2394 if (!name)
2395 goto error;
2396
642aadaa
MH
2397 if (strbuf_addf(&sb, "%u:%s", type, name) < 0)
2398 goto error;
a1ae5655 2399 /* include a NULL character at the end */
642aadaa
MH
2400 if (strbuf_add(&sb, "", 1) < 0)
2401 goto error;
a1ae5655 2402
e0838e02 2403 if (!strcmp(name, "msr"))
1a222754 2404 ff->ph->env.msr_pmu_type = type;
e0838e02 2405
a1ae5655
NK
2406 free(name);
2407 pmu_num--;
2408 }
1a222754 2409 ff->ph->env.pmu_mappings = strbuf_detach(&sb, NULL);
a1ae5655
NK
2410 return 0;
2411
2412error:
2413 strbuf_release(&sb);
2414 return -1;
2415}
2416
62552457 2417static int process_group_desc(struct feat_fd *ff, void *data __maybe_unused)
a8bb559b
NK
2418{
2419 size_t ret = -1;
2420 u32 i, nr, nr_groups;
2421 struct perf_session *session;
32dcd021 2422 struct evsel *evsel, *leader = NULL;
a8bb559b
NK
2423 struct group_desc {
2424 char *name;
2425 u32 leader_idx;
2426 u32 nr_members;
2427 } *desc;
2428
48e5fcea 2429 if (do_read_u32(ff, &nr_groups))
a8bb559b
NK
2430 return -1;
2431
1a222754 2432 ff->ph->env.nr_groups = nr_groups;
a8bb559b
NK
2433 if (!nr_groups) {
2434 pr_debug("group desc not available\n");
2435 return 0;
2436 }
2437
2438 desc = calloc(nr_groups, sizeof(*desc));
2439 if (!desc)
2440 return -1;
2441
2442 for (i = 0; i < nr_groups; i++) {
48e5fcea 2443 desc[i].name = do_read_string(ff);
a8bb559b
NK
2444 if (!desc[i].name)
2445 goto out_free;
2446
48e5fcea 2447 if (do_read_u32(ff, &desc[i].leader_idx))
a8bb559b
NK
2448 goto out_free;
2449
48e5fcea 2450 if (do_read_u32(ff, &desc[i].nr_members))
a8bb559b 2451 goto out_free;
a8bb559b
NK
2452 }
2453
2454 /*
2455 * Rebuild group relationship based on the group_desc
2456 */
1a222754 2457 session = container_of(ff->ph, struct perf_session, header);
a8bb559b
NK
2458 session->evlist->nr_groups = nr_groups;
2459
2460 i = nr = 0;
e5cadb93 2461 evlist__for_each_entry(session->evlist, evsel) {
a8bb559b
NK
2462 if (evsel->idx == (int) desc[i].leader_idx) {
2463 evsel->leader = evsel;
2464 /* {anon_group} is a dummy name */
210e812f 2465 if (strcmp(desc[i].name, "{anon_group}")) {
a8bb559b 2466 evsel->group_name = desc[i].name;
210e812f
NK
2467 desc[i].name = NULL;
2468 }
5643b1a5 2469 evsel->core.nr_members = desc[i].nr_members;
a8bb559b
NK
2470
2471 if (i >= nr_groups || nr > 0) {
2472 pr_debug("invalid group desc\n");
2473 goto out_free;
2474 }
2475
2476 leader = evsel;
5643b1a5 2477 nr = evsel->core.nr_members - 1;
a8bb559b
NK
2478 i++;
2479 } else if (nr) {
2480 /* This is a group member */
2481 evsel->leader = leader;
2482
2483 nr--;
2484 }
2485 }
2486
2487 if (i != nr_groups || nr != 0) {
2488 pr_debug("invalid group desc\n");
2489 goto out_free;
2490 }
2491
2492 ret = 0;
2493out_free:
50a2740b 2494 for (i = 0; i < nr_groups; i++)
74cf249d 2495 zfree(&desc[i].name);
a8bb559b
NK
2496 free(desc);
2497
2498 return ret;
2499}
2500
62552457 2501static int process_auxtrace(struct feat_fd *ff, void *data __maybe_unused)
99fa2984
AH
2502{
2503 struct perf_session *session;
2504 int err;
2505
1a222754 2506 session = container_of(ff->ph, struct perf_session, header);
99fa2984 2507
62552457 2508 err = auxtrace_index__process(ff->fd, ff->size, session,
1a222754 2509 ff->ph->needs_swap);
99fa2984
AH
2510 if (err < 0)
2511 pr_err("Failed to process auxtrace index\n");
2512 return err;
2513}
2514
62552457 2515static int process_cache(struct feat_fd *ff, void *data __maybe_unused)
720e98b5
JO
2516{
2517 struct cpu_cache_level *caches;
2518 u32 cnt, i, version;
2519
48e5fcea 2520 if (do_read_u32(ff, &version))
720e98b5
JO
2521 return -1;
2522
720e98b5
JO
2523 if (version != 1)
2524 return -1;
2525
48e5fcea 2526 if (do_read_u32(ff, &cnt))
720e98b5
JO
2527 return -1;
2528
720e98b5
JO
2529 caches = zalloc(sizeof(*caches) * cnt);
2530 if (!caches)
2531 return -1;
2532
2533 for (i = 0; i < cnt; i++) {
2534 struct cpu_cache_level c;
2535
2536 #define _R(v) \
48e5fcea 2537 if (do_read_u32(ff, &c.v))\
720e98b5 2538 goto out_free_caches; \
720e98b5
JO
2539
2540 _R(level)
2541 _R(line_size)
2542 _R(sets)
2543 _R(ways)
2544 #undef _R
2545
1a222754 2546 #define _R(v) \
48e5fcea 2547 c.v = do_read_string(ff); \
1a222754 2548 if (!c.v) \
720e98b5
JO
2549 goto out_free_caches;
2550
2551 _R(type)
2552 _R(size)
2553 _R(map)
2554 #undef _R
2555
2556 caches[i] = c;
2557 }
2558
1a222754
DCC
2559 ff->ph->env.caches = caches;
2560 ff->ph->env.caches_cnt = cnt;
720e98b5
JO
2561 return 0;
2562out_free_caches:
2563 free(caches);
2564 return -1;
2565}
2566
6011518d
JY
2567static int process_sample_time(struct feat_fd *ff, void *data __maybe_unused)
2568{
2569 struct perf_session *session;
2570 u64 first_sample_time, last_sample_time;
2571 int ret;
2572
2573 session = container_of(ff->ph, struct perf_session, header);
2574
2575 ret = do_read_u64(ff, &first_sample_time);
2576 if (ret)
2577 return -1;
2578
2579 ret = do_read_u64(ff, &last_sample_time);
2580 if (ret)
2581 return -1;
2582
2583 session->evlist->first_sample_time = first_sample_time;
2584 session->evlist->last_sample_time = last_sample_time;
2585 return 0;
2586}
2587
e2091ced
JO
2588static int process_mem_topology(struct feat_fd *ff,
2589 void *data __maybe_unused)
2590{
2591 struct memory_node *nodes;
2592 u64 version, i, nr, bsize;
2593 int ret = -1;
2594
2595 if (do_read_u64(ff, &version))
2596 return -1;
2597
2598 if (version != 1)
2599 return -1;
2600
2601 if (do_read_u64(ff, &bsize))
2602 return -1;
2603
2604 if (do_read_u64(ff, &nr))
2605 return -1;
2606
2607 nodes = zalloc(sizeof(*nodes) * nr);
2608 if (!nodes)
2609 return -1;
2610
2611 for (i = 0; i < nr; i++) {
2612 struct memory_node n;
2613
2614 #define _R(v) \
2615 if (do_read_u64(ff, &n.v)) \
2616 goto out; \
2617
2618 _R(node)
2619 _R(size)
2620
2621 #undef _R
2622
2623 if (do_read_bitmap(ff, &n.set, &n.size))
2624 goto out;
2625
2626 nodes[i] = n;
2627 }
2628
2629 ff->ph->env.memory_bsize = bsize;
2630 ff->ph->env.memory_nodes = nodes;
2631 ff->ph->env.nr_memory_nodes = nr;
2632 ret = 0;
2633
2634out:
2635 if (ret)
2636 free(nodes);
2637 return ret;
2638}
2639
cf790516
AB
2640static int process_clockid(struct feat_fd *ff,
2641 void *data __maybe_unused)
2642{
2643 if (do_read_u64(ff, &ff->ph->env.clockid_res_ns))
2644 return -1;
2645
2646 return 0;
2647}
2648
258031c0
JO
2649static int process_dir_format(struct feat_fd *ff,
2650 void *_data __maybe_unused)
2651{
2652 struct perf_session *session;
2653 struct perf_data *data;
2654
2655 session = container_of(ff->ph, struct perf_session, header);
2656 data = session->data;
2657
2658 if (WARN_ON(!perf_data__is_dir(data)))
2659 return -1;
2660
2661 return do_read_u64(ff, &data->dir.version);
2662}
2663
606f972b
SL
2664#ifdef HAVE_LIBBPF_SUPPORT
2665static int process_bpf_prog_info(struct feat_fd *ff, void *data __maybe_unused)
2666{
2667 struct bpf_prog_info_linear *info_linear;
2668 struct bpf_prog_info_node *info_node;
2669 struct perf_env *env = &ff->ph->env;
2670 u32 count, i;
2671 int err = -1;
2672
2673 if (ff->ph->needs_swap) {
2674 pr_warning("interpreting bpf_prog_info from systems with endianity is not yet supported\n");
2675 return 0;
2676 }
2677
2678 if (do_read_u32(ff, &count))
2679 return -1;
2680
2681 down_write(&env->bpf_progs.lock);
2682
2683 for (i = 0; i < count; ++i) {
2684 u32 info_len, data_len;
2685
2686 info_linear = NULL;
2687 info_node = NULL;
2688 if (do_read_u32(ff, &info_len))
2689 goto out;
2690 if (do_read_u32(ff, &data_len))
2691 goto out;
2692
2693 if (info_len > sizeof(struct bpf_prog_info)) {
2694 pr_warning("detected invalid bpf_prog_info\n");
2695 goto out;
2696 }
2697
2698 info_linear = malloc(sizeof(struct bpf_prog_info_linear) +
2699 data_len);
2700 if (!info_linear)
2701 goto out;
2702 info_linear->info_len = sizeof(struct bpf_prog_info);
2703 info_linear->data_len = data_len;
2704 if (do_read_u64(ff, (u64 *)(&info_linear->arrays)))
2705 goto out;
2706 if (__do_read(ff, &info_linear->info, info_len))
2707 goto out;
2708 if (info_len < sizeof(struct bpf_prog_info))
2709 memset(((void *)(&info_linear->info)) + info_len, 0,
2710 sizeof(struct bpf_prog_info) - info_len);
2711
2712 if (__do_read(ff, info_linear->data, data_len))
2713 goto out;
2714
2715 info_node = malloc(sizeof(struct bpf_prog_info_node));
2716 if (!info_node)
2717 goto out;
2718
2719 /* after reading from file, translate offset to address */
2720 bpf_program__bpil_offs_to_addr(info_linear);
2721 info_node->info_linear = info_linear;
2722 perf_env__insert_bpf_prog_info(env, info_node);
2723 }
2724
14c9b31a 2725 up_write(&env->bpf_progs.lock);
606f972b
SL
2726 return 0;
2727out:
2728 free(info_linear);
2729 free(info_node);
2730 up_write(&env->bpf_progs.lock);
2731 return err;
2732}
2733#else // HAVE_LIBBPF_SUPPORT
2734static int process_bpf_prog_info(struct feat_fd *ff __maybe_unused, void *data __maybe_unused)
2735{
2736 return 0;
2737}
2738#endif // HAVE_LIBBPF_SUPPORT
2739
a70a1123
SL
2740static int process_bpf_btf(struct feat_fd *ff, void *data __maybe_unused)
2741{
2742 struct perf_env *env = &ff->ph->env;
14c9b31a 2743 struct btf_node *node = NULL;
a70a1123 2744 u32 count, i;
14c9b31a 2745 int err = -1;
a70a1123
SL
2746
2747 if (ff->ph->needs_swap) {
2748 pr_warning("interpreting btf from systems with endianity is not yet supported\n");
2749 return 0;
2750 }
2751
2752 if (do_read_u32(ff, &count))
2753 return -1;
2754
2755 down_write(&env->bpf_progs.lock);
2756
2757 for (i = 0; i < count; ++i) {
a70a1123
SL
2758 u32 id, data_size;
2759
2760 if (do_read_u32(ff, &id))
14c9b31a 2761 goto out;
a70a1123 2762 if (do_read_u32(ff, &data_size))
14c9b31a 2763 goto out;
a70a1123
SL
2764
2765 node = malloc(sizeof(struct btf_node) + data_size);
2766 if (!node)
14c9b31a 2767 goto out;
a70a1123
SL
2768
2769 node->id = id;
2770 node->data_size = data_size;
2771
14c9b31a
GS
2772 if (__do_read(ff, node->data, data_size))
2773 goto out;
a70a1123
SL
2774
2775 perf_env__insert_btf(env, node);
14c9b31a 2776 node = NULL;
a70a1123
SL
2777 }
2778
14c9b31a
GS
2779 err = 0;
2780out:
a70a1123 2781 up_write(&env->bpf_progs.lock);
14c9b31a
GS
2782 free(node);
2783 return err;
a70a1123
SL
2784}
2785
42e1fd80
AB
2786static int process_compressed(struct feat_fd *ff,
2787 void *data __maybe_unused)
2788{
2789 if (do_read_u32(ff, &(ff->ph->env.comp_ver)))
2790 return -1;
2791
2792 if (do_read_u32(ff, &(ff->ph->env.comp_type)))
2793 return -1;
2794
2795 if (do_read_u32(ff, &(ff->ph->env.comp_level)))
2796 return -1;
2797
2798 if (do_read_u32(ff, &(ff->ph->env.comp_ratio)))
2799 return -1;
2800
2801 if (do_read_u32(ff, &(ff->ph->env.comp_mmap_len)))
2802 return -1;
2803
2804 return 0;
2805}
2806
fbe96f29 2807struct feature_ops {
63503dba 2808 int (*write)(struct feat_fd *ff, struct evlist *evlist);
cfc65420 2809 void (*print)(struct feat_fd *ff, FILE *fp);
62552457 2810 int (*process)(struct feat_fd *ff, void *data);
fbe96f29
SE
2811 const char *name;
2812 bool full_only;
a4d8c985 2813 bool synthesize;
fbe96f29
SE
2814};
2815
a4d8c985
DCC
2816#define FEAT_OPR(n, func, __full_only) \
2817 [HEADER_##n] = { \
2818 .name = __stringify(n), \
2819 .write = write_##func, \
2820 .print = print_##func, \
2821 .full_only = __full_only, \
2822 .process = process_##func, \
2823 .synthesize = true \
2824 }
2825
2826#define FEAT_OPN(n, func, __full_only) \
2827 [HEADER_##n] = { \
2828 .name = __stringify(n), \
2829 .write = write_##func, \
2830 .print = print_##func, \
2831 .full_only = __full_only, \
2832 .process = process_##func \
2833 }
8cdfa78a
RR
2834
2835/* feature_ops not implemented: */
2eeaaa09
SE
2836#define print_tracing_data NULL
2837#define print_build_id NULL
fbe96f29 2838
a4d8c985
DCC
2839#define process_branch_stack NULL
2840#define process_stat NULL
2841
2842
fbe96f29 2843static const struct feature_ops feat_ops[HEADER_LAST_FEATURE] = {
a4d8c985
DCC
2844 FEAT_OPN(TRACING_DATA, tracing_data, false),
2845 FEAT_OPN(BUILD_ID, build_id, false),
2846 FEAT_OPR(HOSTNAME, hostname, false),
2847 FEAT_OPR(OSRELEASE, osrelease, false),
2848 FEAT_OPR(VERSION, version, false),
2849 FEAT_OPR(ARCH, arch, false),
2850 FEAT_OPR(NRCPUS, nrcpus, false),
2851 FEAT_OPR(CPUDESC, cpudesc, false),
2852 FEAT_OPR(CPUID, cpuid, false),
2853 FEAT_OPR(TOTAL_MEM, total_mem, false),
2854 FEAT_OPR(EVENT_DESC, event_desc, false),
2855 FEAT_OPR(CMDLINE, cmdline, false),
2856 FEAT_OPR(CPU_TOPOLOGY, cpu_topology, true),
2857 FEAT_OPR(NUMA_TOPOLOGY, numa_topology, true),
2858 FEAT_OPN(BRANCH_STACK, branch_stack, false),
2859 FEAT_OPR(PMU_MAPPINGS, pmu_mappings, false),
e8fedff1 2860 FEAT_OPR(GROUP_DESC, group_desc, false),
a4d8c985
DCC
2861 FEAT_OPN(AUXTRACE, auxtrace, false),
2862 FEAT_OPN(STAT, stat, false),
2863 FEAT_OPN(CACHE, cache, true),
6011518d 2864 FEAT_OPR(SAMPLE_TIME, sample_time, false),
e2091ced 2865 FEAT_OPR(MEM_TOPOLOGY, mem_topology, true),
258031c0 2866 FEAT_OPR(CLOCKID, clockid, false),
606f972b 2867 FEAT_OPN(DIR_FORMAT, dir_format, false),
a70a1123
SL
2868 FEAT_OPR(BPF_PROG_INFO, bpf_prog_info, false),
2869 FEAT_OPR(BPF_BTF, bpf_btf, false),
42e1fd80 2870 FEAT_OPR(COMPRESSED, compressed, false),
fbe96f29
SE
2871};
2872
2873struct header_print_data {
2874 FILE *fp;
2875 bool full; /* extended list of headers */
2876};
2877
2878static int perf_file_section__fprintf_info(struct perf_file_section *section,
2879 struct perf_header *ph,
2880 int feat, int fd, void *data)
2881{
2882 struct header_print_data *hd = data;
cfc65420 2883 struct feat_fd ff;
fbe96f29
SE
2884
2885 if (lseek(fd, section->offset, SEEK_SET) == (off_t)-1) {
2886 pr_debug("Failed to lseek to %" PRIu64 " offset for feature "
2887 "%d, continuing...\n", section->offset, feat);
2888 return 0;
2889 }
b1e5a9be 2890 if (feat >= HEADER_LAST_FEATURE) {
fbe96f29 2891 pr_warning("unknown feature %d\n", feat);
f7a8a133 2892 return 0;
fbe96f29
SE
2893 }
2894 if (!feat_ops[feat].print)
2895 return 0;
2896
cfc65420
DCC
2897 ff = (struct feat_fd) {
2898 .fd = fd,
2899 .ph = ph,
2900 };
2901
fbe96f29 2902 if (!feat_ops[feat].full_only || hd->full)
cfc65420 2903 feat_ops[feat].print(&ff, hd->fp);
fbe96f29
SE
2904 else
2905 fprintf(hd->fp, "# %s info available, use -I to display\n",
2906 feat_ops[feat].name);
2907
2908 return 0;
2909}
2910
2911int perf_header__fprintf_info(struct perf_session *session, FILE *fp, bool full)
2912{
2913 struct header_print_data hd;
2914 struct perf_header *header = &session->header;
8ceb41d7 2915 int fd = perf_data__fd(session->data);
f45f5615 2916 struct stat st;
0afcf29b 2917 time_t stctime;
aabae165 2918 int ret, bit;
f45f5615 2919
fbe96f29
SE
2920 hd.fp = fp;
2921 hd.full = full;
2922
f45f5615
JO
2923 ret = fstat(fd, &st);
2924 if (ret == -1)
2925 return -1;
2926
0afcf29b
ACM
2927 stctime = st.st_ctime;
2928 fprintf(fp, "# captured on : %s", ctime(&stctime));
e971a5a8
JO
2929
2930 fprintf(fp, "# header version : %u\n", header->version);
2931 fprintf(fp, "# data offset : %" PRIu64 "\n", header->data_offset);
2932 fprintf(fp, "# data size : %" PRIu64 "\n", header->data_size);
2933 fprintf(fp, "# feat offset : %" PRIu64 "\n", header->feat_offset);
f45f5615 2934
fbe96f29
SE
2935 perf_header__process_sections(header, fd, &hd,
2936 perf_file_section__fprintf_info);
aabae165 2937
8ceb41d7 2938 if (session->data->is_pipe)
c9d1c934
DCC
2939 return 0;
2940
aabae165
JO
2941 fprintf(fp, "# missing features: ");
2942 for_each_clear_bit(bit, header->adds_features, HEADER_LAST_FEATURE) {
2943 if (bit)
2944 fprintf(fp, "%s ", feat_ops[bit].name);
2945 }
2946
2947 fprintf(fp, "\n");
fbe96f29
SE
2948 return 0;
2949}
2950
ccebbeb6 2951static int do_write_feat(struct feat_fd *ff, int type,
fbe96f29 2952 struct perf_file_section **p,
63503dba 2953 struct evlist *evlist)
fbe96f29
SE
2954{
2955 int err;
2956 int ret = 0;
2957
ccebbeb6 2958 if (perf_header__has_feat(ff->ph, type)) {
b1e5a9be
RR
2959 if (!feat_ops[type].write)
2960 return -1;
fbe96f29 2961
0b3d3410
DCC
2962 if (WARN(ff->buf, "Error: calling %s in pipe-mode.\n", __func__))
2963 return -1;
2964
ccebbeb6 2965 (*p)->offset = lseek(ff->fd, 0, SEEK_CUR);
fbe96f29 2966
ccebbeb6 2967 err = feat_ops[type].write(ff, evlist);
fbe96f29 2968 if (err < 0) {
0c2aff4c 2969 pr_debug("failed to write feature %s\n", feat_ops[type].name);
fbe96f29
SE
2970
2971 /* undo anything written */
ccebbeb6 2972 lseek(ff->fd, (*p)->offset, SEEK_SET);
fbe96f29
SE
2973
2974 return -1;
2975 }
ccebbeb6 2976 (*p)->size = lseek(ff->fd, 0, SEEK_CUR) - (*p)->offset;
fbe96f29
SE
2977 (*p)++;
2978 }
2979 return ret;
2980}
2981
1c0b04d1 2982static int perf_header__adds_write(struct perf_header *header,
63503dba 2983 struct evlist *evlist, int fd)
2ba08250 2984{
9e827dd0 2985 int nr_sections;
ccebbeb6 2986 struct feat_fd ff;
fbe96f29 2987 struct perf_file_section *feat_sec, *p;
9e827dd0
FW
2988 int sec_size;
2989 u64 sec_start;
b1e5a9be 2990 int feat;
fbe96f29 2991 int err;
9e827dd0 2992
ccebbeb6
DCC
2993 ff = (struct feat_fd){
2994 .fd = fd,
2995 .ph = header,
2996 };
2997
1c0b04d1 2998 nr_sections = bitmap_weight(header->adds_features, HEADER_FEAT_BITS);
9e827dd0 2999 if (!nr_sections)
d5eed904 3000 return 0;
9e827dd0 3001
91b98804 3002 feat_sec = p = calloc(nr_sections, sizeof(*feat_sec));
d5eed904
ACM
3003 if (feat_sec == NULL)
3004 return -ENOMEM;
9e827dd0
FW
3005
3006 sec_size = sizeof(*feat_sec) * nr_sections;
3007
8d541e97 3008 sec_start = header->feat_offset;
f887f301 3009 lseek(fd, sec_start + sec_size, SEEK_SET);
2ba08250 3010
b1e5a9be 3011 for_each_set_bit(feat, header->adds_features, HEADER_FEAT_BITS) {
ccebbeb6 3012 if (do_write_feat(&ff, feat, &p, evlist))
b1e5a9be
RR
3013 perf_header__clear_feat(header, feat);
3014 }
9e827dd0 3015
f887f301 3016 lseek(fd, sec_start, SEEK_SET);
fbe96f29
SE
3017 /*
3018 * may write more than needed due to dropped feature, but
adba1634 3019 * this is okay, reader will skip the missing entries
fbe96f29 3020 */
ccebbeb6 3021 err = do_write(&ff, feat_sec, sec_size);
d5eed904
ACM
3022 if (err < 0)
3023 pr_debug("failed to write feature section\n");
9e827dd0 3024 free(feat_sec);
d5eed904 3025 return err;
9e827dd0 3026}
2ba08250 3027
8dc58101
TZ
3028int perf_header__write_pipe(int fd)
3029{
3030 struct perf_pipe_file_header f_header;
ccebbeb6 3031 struct feat_fd ff;
8dc58101
TZ
3032 int err;
3033
ccebbeb6
DCC
3034 ff = (struct feat_fd){ .fd = fd };
3035
8dc58101
TZ
3036 f_header = (struct perf_pipe_file_header){
3037 .magic = PERF_MAGIC,
3038 .size = sizeof(f_header),
3039 };
3040
ccebbeb6 3041 err = do_write(&ff, &f_header, sizeof(f_header));
8dc58101
TZ
3042 if (err < 0) {
3043 pr_debug("failed to write perf pipe header\n");
3044 return err;
3045 }
3046
3047 return 0;
3048}
3049
a91e5431 3050int perf_session__write_header(struct perf_session *session,
63503dba 3051 struct evlist *evlist,
a91e5431 3052 int fd, bool at_exit)
7c6a1c65
PZ
3053{
3054 struct perf_file_header f_header;
3055 struct perf_file_attr f_attr;
1c0b04d1 3056 struct perf_header *header = &session->header;
32dcd021 3057 struct evsel *evsel;
ccebbeb6 3058 struct feat_fd ff;
944d62ba 3059 u64 attr_offset;
a91e5431 3060 int err;
7c6a1c65 3061
ccebbeb6 3062 ff = (struct feat_fd){ .fd = fd};
7c6a1c65
PZ
3063 lseek(fd, sizeof(f_header), SEEK_SET);
3064
e5cadb93 3065 evlist__for_each_entry(session->evlist, evsel) {
6606f873 3066 evsel->id_offset = lseek(fd, 0, SEEK_CUR);
ccebbeb6 3067 err = do_write(&ff, evsel->id, evsel->ids * sizeof(u64));
d5eed904
ACM
3068 if (err < 0) {
3069 pr_debug("failed to write perf header\n");
3070 return err;
3071 }
7c6a1c65
PZ
3072 }
3073
ccebbeb6 3074 attr_offset = lseek(ff.fd, 0, SEEK_CUR);
7c6a1c65 3075
e5cadb93 3076 evlist__for_each_entry(evlist, evsel) {
7c6a1c65 3077 f_attr = (struct perf_file_attr){
1fc632ce 3078 .attr = evsel->core.attr,
7c6a1c65 3079 .ids = {
6606f873
RR
3080 .offset = evsel->id_offset,
3081 .size = evsel->ids * sizeof(u64),
7c6a1c65
PZ
3082 }
3083 };
ccebbeb6 3084 err = do_write(&ff, &f_attr, sizeof(f_attr));
d5eed904
ACM
3085 if (err < 0) {
3086 pr_debug("failed to write perf header attribute\n");
3087 return err;
3088 }
7c6a1c65
PZ
3089 }
3090
d645c442
AH
3091 if (!header->data_offset)
3092 header->data_offset = lseek(fd, 0, SEEK_CUR);
8d541e97 3093 header->feat_offset = header->data_offset + header->data_size;
7c6a1c65 3094
d5eed904 3095 if (at_exit) {
1c0b04d1 3096 err = perf_header__adds_write(header, evlist, fd);
d5eed904
ACM
3097 if (err < 0)
3098 return err;
3099 }
9e827dd0 3100
7c6a1c65
PZ
3101 f_header = (struct perf_file_header){
3102 .magic = PERF_MAGIC,
3103 .size = sizeof(f_header),
3104 .attr_size = sizeof(f_attr),
3105 .attrs = {
944d62ba 3106 .offset = attr_offset,
6484d2f9 3107 .size = evlist->core.nr_entries * sizeof(f_attr),
7c6a1c65
PZ
3108 },
3109 .data = {
1c0b04d1
ACM
3110 .offset = header->data_offset,
3111 .size = header->data_size,
7c6a1c65 3112 },
44b3c578 3113 /* event_types is ignored, store zeros */
7c6a1c65
PZ
3114 };
3115
1c0b04d1 3116 memcpy(&f_header.adds_features, &header->adds_features, sizeof(header->adds_features));
2ba08250 3117
7c6a1c65 3118 lseek(fd, 0, SEEK_SET);
ccebbeb6 3119 err = do_write(&ff, &f_header, sizeof(f_header));
d5eed904
ACM
3120 if (err < 0) {
3121 pr_debug("failed to write perf header\n");
3122 return err;
3123 }
1c0b04d1 3124 lseek(fd, header->data_offset + header->data_size, SEEK_SET);
7c6a1c65 3125
d5eed904 3126 return 0;
7c6a1c65
PZ
3127}
3128
1c0b04d1 3129static int perf_header__getbuffer64(struct perf_header *header,
ba21594c
ACM
3130 int fd, void *buf, size_t size)
3131{
1e7972cc 3132 if (readn(fd, buf, size) <= 0)
ba21594c
ACM
3133 return -1;
3134
1c0b04d1 3135 if (header->needs_swap)
ba21594c
ACM
3136 mem_bswap_64(buf, size);
3137
3138 return 0;
3139}
3140
1c0b04d1 3141int perf_header__process_sections(struct perf_header *header, int fd,
fbe96f29 3142 void *data,
1c0b04d1 3143 int (*process)(struct perf_file_section *section,
b1e5a9be
RR
3144 struct perf_header *ph,
3145 int feat, int fd, void *data))
2ba08250 3146{
b1e5a9be 3147 struct perf_file_section *feat_sec, *sec;
9e827dd0
FW
3148 int nr_sections;
3149 int sec_size;
b1e5a9be
RR
3150 int feat;
3151 int err;
9e827dd0 3152
1c0b04d1 3153 nr_sections = bitmap_weight(header->adds_features, HEADER_FEAT_BITS);
9e827dd0 3154 if (!nr_sections)
37562eac 3155 return 0;
9e827dd0 3156
91b98804 3157 feat_sec = sec = calloc(nr_sections, sizeof(*feat_sec));
9e827dd0 3158 if (!feat_sec)
37562eac 3159 return -1;
9e827dd0
FW
3160
3161 sec_size = sizeof(*feat_sec) * nr_sections;
3162
8d541e97 3163 lseek(fd, header->feat_offset, SEEK_SET);
9e827dd0 3164
b1e5a9be
RR
3165 err = perf_header__getbuffer64(header, fd, feat_sec, sec_size);
3166 if (err < 0)
769885f3 3167 goto out_free;
9e827dd0 3168
b1e5a9be
RR
3169 for_each_set_bit(feat, header->adds_features, HEADER_LAST_FEATURE) {
3170 err = process(sec++, header, feat, fd, data);
3171 if (err < 0)
3172 goto out_free;
2ba08250 3173 }
b1e5a9be 3174 err = 0;
769885f3 3175out_free:
37562eac
ACM
3176 free(feat_sec);
3177 return err;
769885f3 3178}
4778d2e4 3179
114382a0
SE
3180static const int attr_file_abi_sizes[] = {
3181 [0] = PERF_ATTR_SIZE_VER0,
3182 [1] = PERF_ATTR_SIZE_VER1,
239cc478 3183 [2] = PERF_ATTR_SIZE_VER2,
0f6a3015 3184 [3] = PERF_ATTR_SIZE_VER3,
6a21c0b5 3185 [4] = PERF_ATTR_SIZE_VER4,
114382a0
SE
3186 0,
3187};
3188
3189/*
3190 * In the legacy file format, the magic number is not used to encode endianness.
3191 * hdr_sz was used to encode endianness. But given that hdr_sz can vary based
3192 * on ABI revisions, we need to try all combinations for all endianness to
3193 * detect the endianness.
3194 */
3195static int try_all_file_abis(uint64_t hdr_sz, struct perf_header *ph)
73323f54 3196{
114382a0
SE
3197 uint64_t ref_size, attr_size;
3198 int i;
73323f54 3199
114382a0
SE
3200 for (i = 0 ; attr_file_abi_sizes[i]; i++) {
3201 ref_size = attr_file_abi_sizes[i]
3202 + sizeof(struct perf_file_section);
3203 if (hdr_sz != ref_size) {
3204 attr_size = bswap_64(hdr_sz);
3205 if (attr_size != ref_size)
3206 continue;
73323f54 3207
114382a0
SE
3208 ph->needs_swap = true;
3209 }
3210 pr_debug("ABI%d perf.data file detected, need_swap=%d\n",
3211 i,
3212 ph->needs_swap);
3213 return 0;
3214 }
3215 /* could not determine endianness */
3216 return -1;
3217}
73323f54 3218
114382a0
SE
3219#define PERF_PIPE_HDR_VER0 16
3220
3221static const size_t attr_pipe_abi_sizes[] = {
3222 [0] = PERF_PIPE_HDR_VER0,
3223 0,
3224};
3225
3226/*
3227 * In the legacy pipe format, there is an implicit assumption that endiannesss
3228 * between host recording the samples, and host parsing the samples is the
3229 * same. This is not always the case given that the pipe output may always be
3230 * redirected into a file and analyzed on a different machine with possibly a
3231 * different endianness and perf_event ABI revsions in the perf tool itself.
3232 */
3233static int try_all_pipe_abis(uint64_t hdr_sz, struct perf_header *ph)
3234{
3235 u64 attr_size;
3236 int i;
3237
3238 for (i = 0 ; attr_pipe_abi_sizes[i]; i++) {
3239 if (hdr_sz != attr_pipe_abi_sizes[i]) {
3240 attr_size = bswap_64(hdr_sz);
3241 if (attr_size != hdr_sz)
3242 continue;
73323f54
SE
3243
3244 ph->needs_swap = true;
3245 }
114382a0 3246 pr_debug("Pipe ABI%d perf.data file detected\n", i);
73323f54
SE
3247 return 0;
3248 }
114382a0
SE
3249 return -1;
3250}
3251
e84ba4e2
FT
3252bool is_perf_magic(u64 magic)
3253{
3254 if (!memcmp(&magic, __perf_magic1, sizeof(magic))
3255 || magic == __perf_magic2
3256 || magic == __perf_magic2_sw)
3257 return true;
3258
3259 return false;
3260}
3261
114382a0
SE
3262static int check_magic_endian(u64 magic, uint64_t hdr_sz,
3263 bool is_pipe, struct perf_header *ph)
3264{
3265 int ret;
3266
3267 /* check for legacy format */
3268 ret = memcmp(&magic, __perf_magic1, sizeof(magic));
3269 if (ret == 0) {
2a08c3ec 3270 ph->version = PERF_HEADER_VERSION_1;
114382a0
SE
3271 pr_debug("legacy perf.data format\n");
3272 if (is_pipe)
3273 return try_all_pipe_abis(hdr_sz, ph);
3274
3275 return try_all_file_abis(hdr_sz, ph);
3276 }
3277 /*
3278 * the new magic number serves two purposes:
3279 * - unique number to identify actual perf.data files
3280 * - encode endianness of file
3281 */
f7913971 3282 ph->version = PERF_HEADER_VERSION_2;
73323f54 3283
114382a0
SE
3284 /* check magic number with one endianness */
3285 if (magic == __perf_magic2)
73323f54
SE
3286 return 0;
3287
114382a0
SE
3288 /* check magic number with opposite endianness */
3289 if (magic != __perf_magic2_sw)
73323f54
SE
3290 return -1;
3291
3292 ph->needs_swap = true;
3293
3294 return 0;
3295}
3296
1c0b04d1 3297int perf_file_header__read(struct perf_file_header *header,
37562eac
ACM
3298 struct perf_header *ph, int fd)
3299{
727ebd54 3300 ssize_t ret;
73323f54 3301
37562eac 3302 lseek(fd, 0, SEEK_SET);
37562eac 3303
73323f54
SE
3304 ret = readn(fd, header, sizeof(*header));
3305 if (ret <= 0)
37562eac
ACM
3306 return -1;
3307
114382a0
SE
3308 if (check_magic_endian(header->magic,
3309 header->attr_size, false, ph) < 0) {
3310 pr_debug("magic/endian check failed\n");
73323f54 3311 return -1;
114382a0 3312 }
ba21594c 3313
73323f54 3314 if (ph->needs_swap) {
1c0b04d1 3315 mem_bswap_64(header, offsetof(struct perf_file_header,
73323f54 3316 adds_features));
ba21594c
ACM
3317 }
3318
1c0b04d1 3319 if (header->size != sizeof(*header)) {
37562eac 3320 /* Support the previous format */
1c0b04d1
ACM
3321 if (header->size == offsetof(typeof(*header), adds_features))
3322 bitmap_zero(header->adds_features, HEADER_FEAT_BITS);
37562eac
ACM
3323 else
3324 return -1;
d327fa43 3325 } else if (ph->needs_swap) {
d327fa43
DA
3326 /*
3327 * feature bitmap is declared as an array of unsigned longs --
3328 * not good since its size can differ between the host that
3329 * generated the data file and the host analyzing the file.
3330 *
3331 * We need to handle endianness, but we don't know the size of
3332 * the unsigned long where the file was generated. Take a best
3333 * guess at determining it: try 64-bit swap first (ie., file
3334 * created on a 64-bit host), and check if the hostname feature
3335 * bit is set (this feature bit is forced on as of fbe96f2).
3336 * If the bit is not, undo the 64-bit swap and try a 32-bit
3337 * swap. If the hostname bit is still not set (e.g., older data
3338 * file), punt and fallback to the original behavior --
3339 * clearing all feature bits and setting buildid.
3340 */
80c0120a
DA
3341 mem_bswap_64(&header->adds_features,
3342 BITS_TO_U64(HEADER_FEAT_BITS));
d327fa43
DA
3343
3344 if (!test_bit(HEADER_HOSTNAME, header->adds_features)) {
80c0120a
DA
3345 /* unswap as u64 */
3346 mem_bswap_64(&header->adds_features,
3347 BITS_TO_U64(HEADER_FEAT_BITS));
3348
3349 /* unswap as u32 */
3350 mem_bswap_32(&header->adds_features,
3351 BITS_TO_U32(HEADER_FEAT_BITS));
d327fa43
DA
3352 }
3353
3354 if (!test_bit(HEADER_HOSTNAME, header->adds_features)) {
3355 bitmap_zero(header->adds_features, HEADER_FEAT_BITS);
3356 set_bit(HEADER_BUILD_ID, header->adds_features);
3357 }
4778d2e4 3358 }
9e827dd0 3359
1c0b04d1 3360 memcpy(&ph->adds_features, &header->adds_features,
ba21594c 3361 sizeof(ph->adds_features));
37562eac 3362
1c0b04d1
ACM
3363 ph->data_offset = header->data.offset;
3364 ph->data_size = header->data.size;
8d541e97 3365 ph->feat_offset = header->data.offset + header->data.size;
37562eac
ACM
3366 return 0;
3367}
3368
1c0b04d1 3369static int perf_file_section__process(struct perf_file_section *section,
ba21594c 3370 struct perf_header *ph,
da378962 3371 int feat, int fd, void *data)
37562eac 3372{
62552457 3373 struct feat_fd fdd = {
1a222754
DCC
3374 .fd = fd,
3375 .ph = ph,
62552457
DCC
3376 .size = section->size,
3377 .offset = section->offset,
1a222754
DCC
3378 };
3379
1c0b04d1 3380 if (lseek(fd, section->offset, SEEK_SET) == (off_t)-1) {
9486aa38 3381 pr_debug("Failed to lseek to %" PRIu64 " offset for feature "
1c0b04d1 3382 "%d, continuing...\n", section->offset, feat);
37562eac
ACM
3383 return 0;
3384 }
3385
b1e5a9be
RR
3386 if (feat >= HEADER_LAST_FEATURE) {
3387 pr_debug("unknown feature %d, continuing...\n", feat);
3388 return 0;
3389 }
3390
f1c67db7
RR
3391 if (!feat_ops[feat].process)
3392 return 0;
37562eac 3393
62552457 3394 return feat_ops[feat].process(&fdd, data);
37562eac 3395}
2ba08250 3396
1c0b04d1 3397static int perf_file_header__read_pipe(struct perf_pipe_file_header *header,
454c407e
TZ
3398 struct perf_header *ph, int fd,
3399 bool repipe)
7c6a1c65 3400{
ccebbeb6
DCC
3401 struct feat_fd ff = {
3402 .fd = STDOUT_FILENO,
3403 .ph = ph,
3404 };
727ebd54 3405 ssize_t ret;
73323f54
SE
3406
3407 ret = readn(fd, header, sizeof(*header));
3408 if (ret <= 0)
3409 return -1;
3410
114382a0
SE
3411 if (check_magic_endian(header->magic, header->size, true, ph) < 0) {
3412 pr_debug("endian/magic failed\n");
8dc58101 3413 return -1;
114382a0
SE
3414 }
3415
3416 if (ph->needs_swap)
3417 header->size = bswap_64(header->size);
8dc58101 3418
ccebbeb6 3419 if (repipe && do_write(&ff, header, sizeof(*header)) < 0)
454c407e
TZ
3420 return -1;
3421
8dc58101
TZ
3422 return 0;
3423}
3424
d4339569 3425static int perf_header__read_pipe(struct perf_session *session)
8dc58101 3426{
1c0b04d1 3427 struct perf_header *header = &session->header;
8dc58101
TZ
3428 struct perf_pipe_file_header f_header;
3429
cc9784bd 3430 if (perf_file_header__read_pipe(&f_header, header,
8ceb41d7 3431 perf_data__fd(session->data),
454c407e 3432 session->repipe) < 0) {
8dc58101
TZ
3433 pr_debug("incompatible file format\n");
3434 return -EINVAL;
3435 }
3436
8dc58101
TZ
3437 return 0;
3438}
3439
69996df4
SE
3440static int read_attr(int fd, struct perf_header *ph,
3441 struct perf_file_attr *f_attr)
3442{
3443 struct perf_event_attr *attr = &f_attr->attr;
3444 size_t sz, left;
3445 size_t our_sz = sizeof(f_attr->attr);
727ebd54 3446 ssize_t ret;
69996df4
SE
3447
3448 memset(f_attr, 0, sizeof(*f_attr));
3449
3450 /* read minimal guaranteed structure */
3451 ret = readn(fd, attr, PERF_ATTR_SIZE_VER0);
3452 if (ret <= 0) {
3453 pr_debug("cannot read %d bytes of header attr\n",
3454 PERF_ATTR_SIZE_VER0);
3455 return -1;
3456 }
3457
3458 /* on file perf_event_attr size */
3459 sz = attr->size;
114382a0 3460
69996df4
SE
3461 if (ph->needs_swap)
3462 sz = bswap_32(sz);
3463
3464 if (sz == 0) {
3465 /* assume ABI0 */
3466 sz = PERF_ATTR_SIZE_VER0;
3467 } else if (sz > our_sz) {
3468 pr_debug("file uses a more recent and unsupported ABI"
3469 " (%zu bytes extra)\n", sz - our_sz);
3470 return -1;
3471 }
3472 /* what we have not yet read and that we know about */
3473 left = sz - PERF_ATTR_SIZE_VER0;
3474 if (left) {
3475 void *ptr = attr;
3476 ptr += PERF_ATTR_SIZE_VER0;
3477
3478 ret = readn(fd, ptr, left);
3479 }
3480 /* read perf_file_section, ids are read in caller */
3481 ret = readn(fd, &f_attr->ids, sizeof(f_attr->ids));
3482
3483 return ret <= 0 ? -1 : 0;
3484}
3485
32dcd021 3486static int perf_evsel__prepare_tracepoint_event(struct evsel *evsel,
096177a8 3487 struct tep_handle *pevent)
cb9dd49e 3488{
97fbf3f0 3489 struct tep_event *event;
cb9dd49e
ACM
3490 char bf[128];
3491
831394bd
NK
3492 /* already prepared */
3493 if (evsel->tp_format)
3494 return 0;
3495
3dce2ce3
NK
3496 if (pevent == NULL) {
3497 pr_debug("broken or missing trace data\n");
3498 return -1;
3499 }
3500
1fc632ce 3501 event = tep_find_event(pevent, evsel->core.attr.config);
a7619aef 3502 if (event == NULL) {
1fc632ce 3503 pr_debug("cannot find event format for %d\n", (int)evsel->core.attr.config);
cb9dd49e 3504 return -1;
a7619aef 3505 }
cb9dd49e 3506
831394bd
NK
3507 if (!evsel->name) {
3508 snprintf(bf, sizeof(bf), "%s:%s", event->system, event->name);
3509 evsel->name = strdup(bf);
3510 if (evsel->name == NULL)
3511 return -1;
3512 }
cb9dd49e 3513
fcf65bf1 3514 evsel->tp_format = event;
cb9dd49e
ACM
3515 return 0;
3516}
3517
63503dba 3518static int perf_evlist__prepare_tracepoint_events(struct evlist *evlist,
096177a8 3519 struct tep_handle *pevent)
cb9dd49e 3520{
32dcd021 3521 struct evsel *pos;
cb9dd49e 3522
e5cadb93 3523 evlist__for_each_entry(evlist, pos) {
1fc632ce 3524 if (pos->core.attr.type == PERF_TYPE_TRACEPOINT &&
831394bd 3525 perf_evsel__prepare_tracepoint_event(pos, pevent))
cb9dd49e
ACM
3526 return -1;
3527 }
3528
3529 return 0;
3530}
3531
d4339569 3532int perf_session__read_header(struct perf_session *session)
8dc58101 3533{
8ceb41d7 3534 struct perf_data *data = session->data;
1c0b04d1 3535 struct perf_header *header = &session->header;
ba21594c 3536 struct perf_file_header f_header;
7c6a1c65
PZ
3537 struct perf_file_attr f_attr;
3538 u64 f_id;
7c6a1c65 3539 int nr_attrs, nr_ids, i, j;
8ceb41d7 3540 int fd = perf_data__fd(data);
7c6a1c65 3541
0f98b11c 3542 session->evlist = evlist__new();
a91e5431
ACM
3543 if (session->evlist == NULL)
3544 return -ENOMEM;
3545
2c07144d 3546 session->evlist->env = &header->env;
4cde998d 3547 session->machines.host.env = &header->env;
8ceb41d7 3548 if (perf_data__is_pipe(data))
d4339569 3549 return perf_header__read_pipe(session);
8dc58101 3550
69996df4 3551 if (perf_file_header__read(&f_header, header, fd) < 0)
4dc0a04b 3552 return -EINVAL;
7c6a1c65 3553
b314e5cf
NK
3554 /*
3555 * Sanity check that perf.data was written cleanly; data size is
3556 * initialized to 0 and updated only if the on_exit function is run.
3557 * If data size is still 0 then the file contains only partial
3558 * information. Just warn user and process it as much as it can.
3559 */
3560 if (f_header.data.size == 0) {
3561 pr_warning("WARNING: The %s file's data size field is 0 which is unexpected.\n"
3562 "Was the 'perf record' command properly terminated?\n",
eae8ad80 3563 data->file.path);
b314e5cf
NK
3564 }
3565
7622236c
VW
3566 if (f_header.attr_size == 0) {
3567 pr_err("ERROR: The %s file's attr size field is 0 which is unexpected.\n"
3568 "Was the 'perf record' command properly terminated?\n",
3569 data->file.path);
3570 return -EINVAL;
3571 }
3572
69996df4 3573 nr_attrs = f_header.attrs.size / f_header.attr_size;
7c6a1c65
PZ
3574 lseek(fd, f_header.attrs.offset, SEEK_SET);
3575
3576 for (i = 0; i < nr_attrs; i++) {
32dcd021 3577 struct evsel *evsel;
1c222bce 3578 off_t tmp;
7c6a1c65 3579
69996df4 3580 if (read_attr(fd, header, &f_attr) < 0)
769885f3 3581 goto out_errno;
ba21594c 3582
1060ab85
DA
3583 if (header->needs_swap) {
3584 f_attr.ids.size = bswap_64(f_attr.ids.size);
3585 f_attr.ids.offset = bswap_64(f_attr.ids.offset);
eda3913b 3586 perf_event__attr_swap(&f_attr.attr);
1060ab85 3587 }
eda3913b 3588
1c222bce 3589 tmp = lseek(fd, 0, SEEK_CUR);
365c3ae7 3590 evsel = evsel__new(&f_attr.attr);
7c6a1c65 3591
a91e5431
ACM
3592 if (evsel == NULL)
3593 goto out_delete_evlist;
0807d2d8
ACM
3594
3595 evsel->needs_swap = header->needs_swap;
a91e5431
ACM
3596 /*
3597 * Do it before so that if perf_evsel__alloc_id fails, this
c12995a5 3598 * entry gets purged too at evlist__delete().
a91e5431 3599 */
a1cf3a75 3600 evlist__add(session->evlist, evsel);
7c6a1c65
PZ
3601
3602 nr_ids = f_attr.ids.size / sizeof(u64);
a91e5431
ACM
3603 /*
3604 * We don't have the cpu and thread maps on the header, so
3605 * for allocating the perf_sample_id table we fake 1 cpu and
3606 * hattr->ids threads.
3607 */
3608 if (perf_evsel__alloc_id(evsel, 1, nr_ids))
3609 goto out_delete_evlist;
3610
7c6a1c65
PZ
3611 lseek(fd, f_attr.ids.offset, SEEK_SET);
3612
3613 for (j = 0; j < nr_ids; j++) {
1c0b04d1 3614 if (perf_header__getbuffer64(header, fd, &f_id, sizeof(f_id)))
769885f3 3615 goto out_errno;
7c6a1c65 3616
a91e5431 3617 perf_evlist__id_add(session->evlist, evsel, 0, j, f_id);
7c6a1c65 3618 }
11deb1f9 3619
7c6a1c65
PZ
3620 lseek(fd, tmp, SEEK_SET);
3621 }
3622
29f5ffd3 3623 perf_header__process_sections(header, fd, &session->tevent,
fbe96f29 3624 perf_file_section__process);
4778d2e4 3625
831394bd 3626 if (perf_evlist__prepare_tracepoint_events(session->evlist,
29f5ffd3 3627 session->tevent.pevent))
cb9dd49e
ACM
3628 goto out_delete_evlist;
3629
4dc0a04b 3630 return 0;
769885f3
ACM
3631out_errno:
3632 return -errno;
a91e5431
ACM
3633
3634out_delete_evlist:
c12995a5 3635 evlist__delete(session->evlist);
a91e5431
ACM
3636 session->evlist = NULL;
3637 return -ENOMEM;
7c6a1c65 3638}
0d3a5c88 3639
45694aa7 3640int perf_event__synthesize_attr(struct perf_tool *tool,
f4d83436 3641 struct perf_event_attr *attr, u32 ids, u64 *id,
743eb868 3642 perf_event__handler_t process)
2c46dbb5 3643{
8115d60c 3644 union perf_event *ev;
2c46dbb5
TZ
3645 size_t size;
3646 int err;
3647
3648 size = sizeof(struct perf_event_attr);
9ac3e487 3649 size = PERF_ALIGN(size, sizeof(u64));
2c46dbb5
TZ
3650 size += sizeof(struct perf_event_header);
3651 size += ids * sizeof(u64);
3652
20f9781f 3653 ev = zalloc(size);
2c46dbb5 3654
ce47dc56
CS
3655 if (ev == NULL)
3656 return -ENOMEM;
3657
2c46dbb5
TZ
3658 ev->attr.attr = *attr;
3659 memcpy(ev->attr.id, id, ids * sizeof(u64));
3660
3661 ev->attr.header.type = PERF_RECORD_HEADER_ATTR;
f4d83436 3662 ev->attr.header.size = (u16)size;
2c46dbb5 3663
f4d83436
RR
3664 if (ev->attr.header.size == size)
3665 err = process(tool, ev, NULL, NULL);
3666 else
3667 err = -E2BIG;
2c46dbb5
TZ
3668
3669 free(ev);
3670
3671 return err;
3672}
3673
e9def1b2
DCC
3674int perf_event__synthesize_features(struct perf_tool *tool,
3675 struct perf_session *session,
63503dba 3676 struct evlist *evlist,
e9def1b2
DCC
3677 perf_event__handler_t process)
3678{
3679 struct perf_header *header = &session->header;
3680 struct feat_fd ff;
3681 struct feature_event *fe;
3682 size_t sz, sz_hdr;
3683 int feat, ret;
3684
3685 sz_hdr = sizeof(fe->header);
3686 sz = sizeof(union perf_event);
3687 /* get a nice alignment */
3688 sz = PERF_ALIGN(sz, page_size);
3689
3690 memset(&ff, 0, sizeof(ff));
3691
3692 ff.buf = malloc(sz);
3693 if (!ff.buf)
3694 return -ENOMEM;
3695
3696 ff.size = sz - sz_hdr;
c952b35f 3697 ff.ph = &session->header;
e9def1b2
DCC
3698
3699 for_each_set_bit(feat, header->adds_features, HEADER_FEAT_BITS) {
3700 if (!feat_ops[feat].synthesize) {
3701 pr_debug("No record header feature for header :%d\n", feat);
3702 continue;
3703 }
3704
3705 ff.offset = sizeof(*fe);
3706
3707 ret = feat_ops[feat].write(&ff, evlist);
3708 if (ret || ff.offset <= (ssize_t)sizeof(*fe)) {
3709 pr_debug("Error writing feature\n");
3710 continue;
3711 }
3712 /* ff.buf may have changed due to realloc in do_write() */
3713 fe = ff.buf;
3714 memset(fe, 0, sizeof(*fe));
3715
3716 fe->feat_id = feat;
3717 fe->header.type = PERF_RECORD_HEADER_FEATURE;
3718 fe->header.size = ff.offset;
3719
3720 ret = process(tool, ff.buf, NULL, NULL);
3721 if (ret) {
3722 free(ff.buf);
3723 return ret;
3724 }
3725 }
57b5de46
JO
3726
3727 /* Send HEADER_LAST_FEATURE mark. */
3728 fe = ff.buf;
3729 fe->feat_id = HEADER_LAST_FEATURE;
3730 fe->header.type = PERF_RECORD_HEADER_FEATURE;
3731 fe->header.size = sizeof(*fe);
3732
3733 ret = process(tool, ff.buf, NULL, NULL);
3734
e9def1b2 3735 free(ff.buf);
57b5de46 3736 return ret;
e9def1b2
DCC
3737}
3738
89f1688a
JO
3739int perf_event__process_feature(struct perf_session *session,
3740 union perf_event *event)
e9def1b2 3741{
89f1688a 3742 struct perf_tool *tool = session->tool;
e9def1b2
DCC
3743 struct feat_fd ff = { .fd = 0 };
3744 struct feature_event *fe = (struct feature_event *)event;
3745 int type = fe->header.type;
3746 u64 feat = fe->feat_id;
3747
3748 if (type < 0 || type >= PERF_RECORD_HEADER_MAX) {
3749 pr_warning("invalid record type %d in pipe-mode\n", type);
3750 return 0;
3751 }
92ead7ee 3752 if (feat == HEADER_RESERVED || feat >= HEADER_LAST_FEATURE) {
e9def1b2
DCC
3753 pr_warning("invalid record type %d in pipe-mode\n", type);
3754 return -1;
3755 }
3756
3757 if (!feat_ops[feat].process)
3758 return 0;
3759
3760 ff.buf = (void *)fe->data;
79b2fe5e 3761 ff.size = event->header.size - sizeof(*fe);
e9def1b2
DCC
3762 ff.ph = &session->header;
3763
3764 if (feat_ops[feat].process(&ff, NULL))
3765 return -1;
3766
3767 if (!feat_ops[feat].print || !tool->show_feat_hdr)
3768 return 0;
3769
3770 if (!feat_ops[feat].full_only ||
3771 tool->show_feat_hdr >= SHOW_FEAT_HEADER_FULL_INFO) {
3772 feat_ops[feat].print(&ff, stdout);
3773 } else {
3774 fprintf(stdout, "# %s info available, use -I to display\n",
3775 feat_ops[feat].name);
3776 }
3777
3778 return 0;
3779}
3780
a6e52817
JO
3781static struct event_update_event *
3782event_update_event__new(size_t size, u64 type, u64 id)
3783{
3784 struct event_update_event *ev;
3785
3786 size += sizeof(*ev);
3787 size = PERF_ALIGN(size, sizeof(u64));
3788
3789 ev = zalloc(size);
3790 if (ev) {
3791 ev->header.type = PERF_RECORD_EVENT_UPDATE;
3792 ev->header.size = (u16)size;
3793 ev->type = type;
3794 ev->id = id;
3795 }
3796 return ev;
3797}
3798
3799int
3800perf_event__synthesize_event_update_unit(struct perf_tool *tool,
32dcd021 3801 struct evsel *evsel,
a6e52817
JO
3802 perf_event__handler_t process)
3803{
3804 struct event_update_event *ev;
3805 size_t size = strlen(evsel->unit);
3806 int err;
3807
3808 ev = event_update_event__new(size + 1, PERF_EVENT_UPDATE__UNIT, evsel->id[0]);
3809 if (ev == NULL)
3810 return -ENOMEM;
3811
75725880 3812 strlcpy(ev->data, evsel->unit, size + 1);
a6e52817
JO
3813 err = process(tool, (union perf_event *)ev, NULL, NULL);
3814 free(ev);
3815 return err;
3816}
3817
daeecbc0
JO
3818int
3819perf_event__synthesize_event_update_scale(struct perf_tool *tool,
32dcd021 3820 struct evsel *evsel,
daeecbc0
JO
3821 perf_event__handler_t process)
3822{
3823 struct event_update_event *ev;
3824 struct event_update_event_scale *ev_data;
3825 int err;
3826
3827 ev = event_update_event__new(sizeof(*ev_data), PERF_EVENT_UPDATE__SCALE, evsel->id[0]);
3828 if (ev == NULL)
3829 return -ENOMEM;
3830
3831 ev_data = (struct event_update_event_scale *) ev->data;
3832 ev_data->scale = evsel->scale;
3833 err = process(tool, (union perf_event*) ev, NULL, NULL);
3834 free(ev);
3835 return err;
3836}
3837
802c9048
JO
3838int
3839perf_event__synthesize_event_update_name(struct perf_tool *tool,
32dcd021 3840 struct evsel *evsel,
802c9048
JO
3841 perf_event__handler_t process)
3842{
3843 struct event_update_event *ev;
3844 size_t len = strlen(evsel->name);
3845 int err;
3846
3847 ev = event_update_event__new(len + 1, PERF_EVENT_UPDATE__NAME, evsel->id[0]);
3848 if (ev == NULL)
3849 return -ENOMEM;
3850
5192bde7 3851 strlcpy(ev->data, evsel->name, len + 1);
802c9048
JO
3852 err = process(tool, (union perf_event*) ev, NULL, NULL);
3853 free(ev);
3854 return err;
3855}
daeecbc0 3856
86ebb09f
JO
3857int
3858perf_event__synthesize_event_update_cpus(struct perf_tool *tool,
32dcd021 3859 struct evsel *evsel,
86ebb09f
JO
3860 perf_event__handler_t process)
3861{
3862 size_t size = sizeof(struct event_update_event);
3863 struct event_update_event *ev;
3864 int max, err;
3865 u16 type;
3866
fe1f61b3 3867 if (!evsel->core.own_cpus)
86ebb09f
JO
3868 return 0;
3869
fe1f61b3 3870 ev = cpu_map_data__alloc(evsel->core.own_cpus, &size, &type, &max);
86ebb09f
JO
3871 if (!ev)
3872 return -ENOMEM;
3873
3874 ev->header.type = PERF_RECORD_EVENT_UPDATE;
3875 ev->header.size = (u16)size;
3876 ev->type = PERF_EVENT_UPDATE__CPUS;
3877 ev->id = evsel->id[0];
3878
3879 cpu_map_data__synthesize((struct cpu_map_data *) ev->data,
fe1f61b3 3880 evsel->core.own_cpus,
86ebb09f
JO
3881 type, max);
3882
3883 err = process(tool, (union perf_event*) ev, NULL, NULL);
3884 free(ev);
3885 return err;
3886}
3887
c853f939
JO
3888size_t perf_event__fprintf_event_update(union perf_event *event, FILE *fp)
3889{
3890 struct event_update_event *ev = &event->event_update;
3891 struct event_update_event_scale *ev_scale;
3892 struct event_update_event_cpus *ev_cpus;
f854839b 3893 struct perf_cpu_map *map;
c853f939
JO
3894 size_t ret;
3895
3896 ret = fprintf(fp, "\n... id: %" PRIu64 "\n", ev->id);
3897
3898 switch (ev->type) {
3899 case PERF_EVENT_UPDATE__SCALE:
3900 ev_scale = (struct event_update_event_scale *) ev->data;
3901 ret += fprintf(fp, "... scale: %f\n", ev_scale->scale);
3902 break;
3903 case PERF_EVENT_UPDATE__UNIT:
3904 ret += fprintf(fp, "... unit: %s\n", ev->data);
3905 break;
3906 case PERF_EVENT_UPDATE__NAME:
3907 ret += fprintf(fp, "... name: %s\n", ev->data);
3908 break;
3909 case PERF_EVENT_UPDATE__CPUS:
3910 ev_cpus = (struct event_update_event_cpus *) ev->data;
3911 ret += fprintf(fp, "... ");
3912
3913 map = cpu_map__new_data(&ev_cpus->cpus);
3914 if (map)
3915 ret += cpu_map__fprintf(map, fp);
3916 else
3917 ret += fprintf(fp, "failed to get cpus\n");
3918 break;
3919 default:
3920 ret += fprintf(fp, "... unknown type\n");
3921 break;
3922 }
3923
3924 return ret;
3925}
86ebb09f 3926
45694aa7 3927int perf_event__synthesize_attrs(struct perf_tool *tool,
63503dba 3928 struct evlist *evlist,
318ec184 3929 perf_event__handler_t process)
2c46dbb5 3930{
32dcd021 3931 struct evsel *evsel;
a91e5431 3932 int err = 0;
2c46dbb5 3933
318ec184 3934 evlist__for_each_entry(evlist, evsel) {
1fc632ce 3935 err = perf_event__synthesize_attr(tool, &evsel->core.attr, evsel->ids,
6606f873 3936 evsel->id, process);
2c46dbb5
TZ
3937 if (err) {
3938 pr_debug("failed to create perf header attribute\n");
3939 return err;
3940 }
3941 }
3942
3943 return err;
3944}
3945
32dcd021 3946static bool has_unit(struct evsel *counter)
bfd8f72c
AK
3947{
3948 return counter->unit && *counter->unit;
3949}
3950
32dcd021 3951static bool has_scale(struct evsel *counter)
bfd8f72c
AK
3952{
3953 return counter->scale != 1;
3954}
3955
3956int perf_event__synthesize_extra_attr(struct perf_tool *tool,
63503dba 3957 struct evlist *evsel_list,
bfd8f72c
AK
3958 perf_event__handler_t process,
3959 bool is_pipe)
3960{
32dcd021 3961 struct evsel *counter;
bfd8f72c
AK
3962 int err;
3963
3964 /*
3965 * Synthesize other events stuff not carried within
3966 * attr event - unit, scale, name
3967 */
3968 evlist__for_each_entry(evsel_list, counter) {
3969 if (!counter->supported)
3970 continue;
3971
3972 /*
3973 * Synthesize unit and scale only if it's defined.
3974 */
3975 if (has_unit(counter)) {
3976 err = perf_event__synthesize_event_update_unit(tool, counter, process);
3977 if (err < 0) {
3978 pr_err("Couldn't synthesize evsel unit.\n");
3979 return err;
3980 }
3981 }
3982
3983 if (has_scale(counter)) {
3984 err = perf_event__synthesize_event_update_scale(tool, counter, process);
3985 if (err < 0) {
3986 pr_err("Couldn't synthesize evsel counter.\n");
3987 return err;
3988 }
3989 }
3990
fe1f61b3 3991 if (counter->core.own_cpus) {
bfd8f72c
AK
3992 err = perf_event__synthesize_event_update_cpus(tool, counter, process);
3993 if (err < 0) {
3994 pr_err("Couldn't synthesize evsel cpus.\n");
3995 return err;
3996 }
3997 }
3998
3999 /*
4000 * Name is needed only for pipe output,
4001 * perf.data carries event names.
4002 */
4003 if (is_pipe) {
4004 err = perf_event__synthesize_event_update_name(tool, counter, process);
4005 if (err < 0) {
4006 pr_err("Couldn't synthesize evsel name.\n");
4007 return err;
4008 }
4009 }
4010 }
4011 return 0;
4012}
4013
47c3d109
AH
4014int perf_event__process_attr(struct perf_tool *tool __maybe_unused,
4015 union perf_event *event,
63503dba 4016 struct evlist **pevlist)
2c46dbb5 4017{
f4d83436 4018 u32 i, ids, n_ids;
32dcd021 4019 struct evsel *evsel;
63503dba 4020 struct evlist *evlist = *pevlist;
2c46dbb5 4021
10d0f086 4022 if (evlist == NULL) {
0f98b11c 4023 *pevlist = evlist = evlist__new();
10d0f086 4024 if (evlist == NULL)
a91e5431
ACM
4025 return -ENOMEM;
4026 }
4027
365c3ae7 4028 evsel = evsel__new(&event->attr.attr);
a91e5431 4029 if (evsel == NULL)
2c46dbb5
TZ
4030 return -ENOMEM;
4031
a1cf3a75 4032 evlist__add(evlist, evsel);
a91e5431 4033
8115d60c
ACM
4034 ids = event->header.size;
4035 ids -= (void *)&event->attr.id - (void *)event;
2c46dbb5 4036 n_ids = ids / sizeof(u64);
a91e5431
ACM
4037 /*
4038 * We don't have the cpu and thread maps on the header, so
4039 * for allocating the perf_sample_id table we fake 1 cpu and
4040 * hattr->ids threads.
4041 */
4042 if (perf_evsel__alloc_id(evsel, 1, n_ids))
4043 return -ENOMEM;
2c46dbb5
TZ
4044
4045 for (i = 0; i < n_ids; i++) {
10d0f086 4046 perf_evlist__id_add(evlist, evsel, 0, i, event->attr.id[i]);
2c46dbb5
TZ
4047 }
4048
2c46dbb5
TZ
4049 return 0;
4050}
cd19a035 4051
ffe77725
JO
4052int perf_event__process_event_update(struct perf_tool *tool __maybe_unused,
4053 union perf_event *event,
63503dba 4054 struct evlist **pevlist)
ffe77725
JO
4055{
4056 struct event_update_event *ev = &event->event_update;
daeecbc0 4057 struct event_update_event_scale *ev_scale;
86ebb09f 4058 struct event_update_event_cpus *ev_cpus;
63503dba 4059 struct evlist *evlist;
32dcd021 4060 struct evsel *evsel;
f854839b 4061 struct perf_cpu_map *map;
ffe77725
JO
4062
4063 if (!pevlist || *pevlist == NULL)
4064 return -EINVAL;
4065
4066 evlist = *pevlist;
4067
4068 evsel = perf_evlist__id2evsel(evlist, ev->id);
4069 if (evsel == NULL)
4070 return -EINVAL;
4071
a6e52817
JO
4072 switch (ev->type) {
4073 case PERF_EVENT_UPDATE__UNIT:
4074 evsel->unit = strdup(ev->data);
daeecbc0 4075 break;
802c9048
JO
4076 case PERF_EVENT_UPDATE__NAME:
4077 evsel->name = strdup(ev->data);
4078 break;
daeecbc0
JO
4079 case PERF_EVENT_UPDATE__SCALE:
4080 ev_scale = (struct event_update_event_scale *) ev->data;
4081 evsel->scale = ev_scale->scale;
8434a2ec 4082 break;
86ebb09f
JO
4083 case PERF_EVENT_UPDATE__CPUS:
4084 ev_cpus = (struct event_update_event_cpus *) ev->data;
4085
4086 map = cpu_map__new_data(&ev_cpus->cpus);
4087 if (map)
fe1f61b3 4088 evsel->core.own_cpus = map;
86ebb09f
JO
4089 else
4090 pr_err("failed to get event_update cpus\n");
a6e52817
JO
4091 default:
4092 break;
4093 }
4094
ffe77725
JO
4095 return 0;
4096}
4097
45694aa7 4098int perf_event__synthesize_tracing_data(struct perf_tool *tool, int fd,
63503dba 4099 struct evlist *evlist,
743eb868 4100 perf_event__handler_t process)
9215545e 4101{
8115d60c 4102 union perf_event ev;
29208e57 4103 struct tracing_data *tdata;
9215545e 4104 ssize_t size = 0, aligned_size = 0, padding;
ccebbeb6 4105 struct feat_fd ff;
1d037ca1 4106 int err __maybe_unused = 0;
9215545e 4107
29208e57
JO
4108 /*
4109 * We are going to store the size of the data followed
4110 * by the data contents. Since the fd descriptor is a pipe,
4111 * we cannot seek back to store the size of the data once
4112 * we know it. Instead we:
4113 *
4114 * - write the tracing data to the temp file
4115 * - get/write the data size to pipe
4116 * - write the tracing data from the temp file
4117 * to the pipe
4118 */
ce9036a6 4119 tdata = tracing_data_get(&evlist->core.entries, fd, true);
29208e57
JO
4120 if (!tdata)
4121 return -1;
4122
9215545e
TZ
4123 memset(&ev, 0, sizeof(ev));
4124
4125 ev.tracing_data.header.type = PERF_RECORD_HEADER_TRACING_DATA;
29208e57 4126 size = tdata->size;
9ac3e487 4127 aligned_size = PERF_ALIGN(size, sizeof(u64));
9215545e
TZ
4128 padding = aligned_size - size;
4129 ev.tracing_data.header.size = sizeof(ev.tracing_data);
4130 ev.tracing_data.size = aligned_size;
4131
45694aa7 4132 process(tool, &ev, NULL, NULL);
9215545e 4133
29208e57
JO
4134 /*
4135 * The put function will copy all the tracing data
4136 * stored in temp file to the pipe.
4137 */
4138 tracing_data_put(tdata);
4139
ccebbeb6
DCC
4140 ff = (struct feat_fd){ .fd = fd };
4141 if (write_padded(&ff, NULL, 0, padding))
2ff5365d 4142 return -1;
9215545e
TZ
4143
4144 return aligned_size;
4145}
4146
89f1688a
JO
4147int perf_event__process_tracing_data(struct perf_session *session,
4148 union perf_event *event)
9215545e 4149{
8115d60c 4150 ssize_t size_read, padding, size = event->tracing_data.size;
8ceb41d7 4151 int fd = perf_data__fd(session->data);
cc9784bd 4152 off_t offset = lseek(fd, 0, SEEK_CUR);
9215545e
TZ
4153 char buf[BUFSIZ];
4154
4155 /* setup for reading amidst mmap */
cc9784bd 4156 lseek(fd, offset + sizeof(struct tracing_data_event),
9215545e
TZ
4157 SEEK_SET);
4158
29f5ffd3 4159 size_read = trace_report(fd, &session->tevent,
da378962 4160 session->repipe);
9ac3e487 4161 padding = PERF_ALIGN(size_read, sizeof(u64)) - size_read;
9215545e 4162
cc9784bd 4163 if (readn(fd, buf, padding) < 0) {
2caa48a2
ACM
4164 pr_err("%s: reading input file", __func__);
4165 return -1;
4166 }
454c407e
TZ
4167 if (session->repipe) {
4168 int retw = write(STDOUT_FILENO, buf, padding);
2caa48a2
ACM
4169 if (retw <= 0 || retw != padding) {
4170 pr_err("%s: repiping tracing data padding", __func__);
4171 return -1;
4172 }
454c407e 4173 }
9215545e 4174
2caa48a2
ACM
4175 if (size_read + padding != size) {
4176 pr_err("%s: tracing data size mismatch", __func__);
4177 return -1;
4178 }
9215545e 4179
831394bd 4180 perf_evlist__prepare_tracepoint_events(session->evlist,
29f5ffd3 4181 session->tevent.pevent);
8b6ee4c5 4182
9215545e
TZ
4183 return size_read + padding;
4184}
c7929e47 4185
45694aa7 4186int perf_event__synthesize_build_id(struct perf_tool *tool,
d20deb64 4187 struct dso *pos, u16 misc,
8115d60c 4188 perf_event__handler_t process,
743eb868 4189 struct machine *machine)
c7929e47 4190{
8115d60c 4191 union perf_event ev;
c7929e47
TZ
4192 size_t len;
4193 int err = 0;
4194
4195 if (!pos->hit)
4196 return err;
4197
4198 memset(&ev, 0, sizeof(ev));
4199
4200 len = pos->long_name_len + 1;
9ac3e487 4201 len = PERF_ALIGN(len, NAME_ALIGN);
c7929e47
TZ
4202 memcpy(&ev.build_id.build_id, pos->build_id, sizeof(pos->build_id));
4203 ev.build_id.header.type = PERF_RECORD_HEADER_BUILD_ID;
4204 ev.build_id.header.misc = misc;
23346f21 4205 ev.build_id.pid = machine->pid;
c7929e47
TZ
4206 ev.build_id.header.size = sizeof(ev.build_id) + len;
4207 memcpy(&ev.build_id.filename, pos->long_name, pos->long_name_len);
4208
45694aa7 4209 err = process(tool, &ev, NULL, machine);
c7929e47
TZ
4210
4211 return err;
4212}
4213
89f1688a
JO
4214int perf_event__process_build_id(struct perf_session *session,
4215 union perf_event *event)
c7929e47 4216{
8115d60c
ACM
4217 __event_process_build_id(&event->build_id,
4218 event->build_id.filename,
a1645ce1 4219 session);
c7929e47
TZ
4220 return 0;
4221}