]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blob - tools/perf/util/header.c
Merge branch 'for-linus' of git://github.com/tiwai/sound
[mirror_ubuntu-bionic-kernel.git] / tools / perf / util / header.c
1 #define _FILE_OFFSET_BITS 64
2
3 #include <sys/types.h>
4 #include <byteswap.h>
5 #include <unistd.h>
6 #include <stdio.h>
7 #include <stdlib.h>
8 #include <linux/list.h>
9 #include <linux/kernel.h>
10
11 #include "evlist.h"
12 #include "evsel.h"
13 #include "util.h"
14 #include "header.h"
15 #include "../perf.h"
16 #include "trace-event.h"
17 #include "session.h"
18 #include "symbol.h"
19 #include "debug.h"
20
21 static bool no_buildid_cache = false;
22
23 static int event_count;
24 static struct perf_trace_event_type *events;
25
26 int perf_header__push_event(u64 id, const char *name)
27 {
28 if (strlen(name) > MAX_EVENT_NAME)
29 pr_warning("Event %s will be truncated\n", name);
30
31 if (!events) {
32 events = malloc(sizeof(struct perf_trace_event_type));
33 if (events == NULL)
34 return -ENOMEM;
35 } else {
36 struct perf_trace_event_type *nevents;
37
38 nevents = realloc(events, (event_count + 1) * sizeof(*events));
39 if (nevents == NULL)
40 return -ENOMEM;
41 events = nevents;
42 }
43 memset(&events[event_count], 0, sizeof(struct perf_trace_event_type));
44 events[event_count].event_id = id;
45 strncpy(events[event_count].name, name, MAX_EVENT_NAME - 1);
46 event_count++;
47 return 0;
48 }
49
50 char *perf_header__find_event(u64 id)
51 {
52 int i;
53 for (i = 0 ; i < event_count; i++) {
54 if (events[i].event_id == id)
55 return events[i].name;
56 }
57 return NULL;
58 }
59
60 static const char *__perf_magic = "PERFFILE";
61
62 #define PERF_MAGIC (*(u64 *)__perf_magic)
63
64 struct perf_file_attr {
65 struct perf_event_attr attr;
66 struct perf_file_section ids;
67 };
68
69 void perf_header__set_feat(struct perf_header *header, int feat)
70 {
71 set_bit(feat, header->adds_features);
72 }
73
74 void perf_header__clear_feat(struct perf_header *header, int feat)
75 {
76 clear_bit(feat, header->adds_features);
77 }
78
79 bool perf_header__has_feat(const struct perf_header *header, int feat)
80 {
81 return test_bit(feat, header->adds_features);
82 }
83
84 static int do_write(int fd, const void *buf, size_t size)
85 {
86 while (size) {
87 int ret = write(fd, buf, size);
88
89 if (ret < 0)
90 return -errno;
91
92 size -= ret;
93 buf += ret;
94 }
95
96 return 0;
97 }
98
99 #define NAME_ALIGN 64
100
101 static int write_padded(int fd, const void *bf, size_t count,
102 size_t count_aligned)
103 {
104 static const char zero_buf[NAME_ALIGN];
105 int err = do_write(fd, bf, count);
106
107 if (!err)
108 err = do_write(fd, zero_buf, count_aligned - count);
109
110 return err;
111 }
112
113 #define dsos__for_each_with_build_id(pos, head) \
114 list_for_each_entry(pos, head, node) \
115 if (!pos->has_build_id) \
116 continue; \
117 else
118
119 static int __dsos__write_buildid_table(struct list_head *head, pid_t pid,
120 u16 misc, int fd)
121 {
122 struct dso *pos;
123
124 dsos__for_each_with_build_id(pos, head) {
125 int err;
126 struct build_id_event b;
127 size_t len;
128
129 if (!pos->hit)
130 continue;
131 len = pos->long_name_len + 1;
132 len = ALIGN(len, NAME_ALIGN);
133 memset(&b, 0, sizeof(b));
134 memcpy(&b.build_id, pos->build_id, sizeof(pos->build_id));
135 b.pid = pid;
136 b.header.misc = misc;
137 b.header.size = sizeof(b) + len;
138 err = do_write(fd, &b, sizeof(b));
139 if (err < 0)
140 return err;
141 err = write_padded(fd, pos->long_name,
142 pos->long_name_len + 1, len);
143 if (err < 0)
144 return err;
145 }
146
147 return 0;
148 }
149
150 static int machine__write_buildid_table(struct machine *machine, int fd)
151 {
152 int err;
153 u16 kmisc = PERF_RECORD_MISC_KERNEL,
154 umisc = PERF_RECORD_MISC_USER;
155
156 if (!machine__is_host(machine)) {
157 kmisc = PERF_RECORD_MISC_GUEST_KERNEL;
158 umisc = PERF_RECORD_MISC_GUEST_USER;
159 }
160
161 err = __dsos__write_buildid_table(&machine->kernel_dsos, machine->pid,
162 kmisc, fd);
163 if (err == 0)
164 err = __dsos__write_buildid_table(&machine->user_dsos,
165 machine->pid, umisc, fd);
166 return err;
167 }
168
169 static int dsos__write_buildid_table(struct perf_header *header, int fd)
170 {
171 struct perf_session *session = container_of(header,
172 struct perf_session, header);
173 struct rb_node *nd;
174 int err = machine__write_buildid_table(&session->host_machine, fd);
175
176 if (err)
177 return err;
178
179 for (nd = rb_first(&session->machines); nd; nd = rb_next(nd)) {
180 struct machine *pos = rb_entry(nd, struct machine, rb_node);
181 err = machine__write_buildid_table(pos, fd);
182 if (err)
183 break;
184 }
185 return err;
186 }
187
188 int build_id_cache__add_s(const char *sbuild_id, const char *debugdir,
189 const char *name, bool is_kallsyms)
190 {
191 const size_t size = PATH_MAX;
192 char *realname, *filename = zalloc(size),
193 *linkname = zalloc(size), *targetname;
194 int len, err = -1;
195
196 if (is_kallsyms) {
197 if (symbol_conf.kptr_restrict) {
198 pr_debug("Not caching a kptr_restrict'ed /proc/kallsyms\n");
199 return 0;
200 }
201 realname = (char *)name;
202 } else
203 realname = realpath(name, NULL);
204
205 if (realname == NULL || filename == NULL || linkname == NULL)
206 goto out_free;
207
208 len = snprintf(filename, size, "%s%s%s",
209 debugdir, is_kallsyms ? "/" : "", realname);
210 if (mkdir_p(filename, 0755))
211 goto out_free;
212
213 snprintf(filename + len, sizeof(filename) - len, "/%s", sbuild_id);
214
215 if (access(filename, F_OK)) {
216 if (is_kallsyms) {
217 if (copyfile("/proc/kallsyms", filename))
218 goto out_free;
219 } else if (link(realname, filename) && copyfile(name, filename))
220 goto out_free;
221 }
222
223 len = snprintf(linkname, size, "%s/.build-id/%.2s",
224 debugdir, sbuild_id);
225
226 if (access(linkname, X_OK) && mkdir_p(linkname, 0755))
227 goto out_free;
228
229 snprintf(linkname + len, size - len, "/%s", sbuild_id + 2);
230 targetname = filename + strlen(debugdir) - 5;
231 memcpy(targetname, "../..", 5);
232
233 if (symlink(targetname, linkname) == 0)
234 err = 0;
235 out_free:
236 if (!is_kallsyms)
237 free(realname);
238 free(filename);
239 free(linkname);
240 return err;
241 }
242
243 static int build_id_cache__add_b(const u8 *build_id, size_t build_id_size,
244 const char *name, const char *debugdir,
245 bool is_kallsyms)
246 {
247 char sbuild_id[BUILD_ID_SIZE * 2 + 1];
248
249 build_id__sprintf(build_id, build_id_size, sbuild_id);
250
251 return build_id_cache__add_s(sbuild_id, debugdir, name, is_kallsyms);
252 }
253
254 int build_id_cache__remove_s(const char *sbuild_id, const char *debugdir)
255 {
256 const size_t size = PATH_MAX;
257 char *filename = zalloc(size),
258 *linkname = zalloc(size);
259 int err = -1;
260
261 if (filename == NULL || linkname == NULL)
262 goto out_free;
263
264 snprintf(linkname, size, "%s/.build-id/%.2s/%s",
265 debugdir, sbuild_id, sbuild_id + 2);
266
267 if (access(linkname, F_OK))
268 goto out_free;
269
270 if (readlink(linkname, filename, size) < 0)
271 goto out_free;
272
273 if (unlink(linkname))
274 goto out_free;
275
276 /*
277 * Since the link is relative, we must make it absolute:
278 */
279 snprintf(linkname, size, "%s/.build-id/%.2s/%s",
280 debugdir, sbuild_id, filename);
281
282 if (unlink(linkname))
283 goto out_free;
284
285 err = 0;
286 out_free:
287 free(filename);
288 free(linkname);
289 return err;
290 }
291
292 static int dso__cache_build_id(struct dso *dso, const char *debugdir)
293 {
294 bool is_kallsyms = dso->kernel && dso->long_name[0] != '/';
295
296 return build_id_cache__add_b(dso->build_id, sizeof(dso->build_id),
297 dso->long_name, debugdir, is_kallsyms);
298 }
299
300 static int __dsos__cache_build_ids(struct list_head *head, const char *debugdir)
301 {
302 struct dso *pos;
303 int err = 0;
304
305 dsos__for_each_with_build_id(pos, head)
306 if (dso__cache_build_id(pos, debugdir))
307 err = -1;
308
309 return err;
310 }
311
312 static int machine__cache_build_ids(struct machine *machine, const char *debugdir)
313 {
314 int ret = __dsos__cache_build_ids(&machine->kernel_dsos, debugdir);
315 ret |= __dsos__cache_build_ids(&machine->user_dsos, debugdir);
316 return ret;
317 }
318
319 static int perf_session__cache_build_ids(struct perf_session *session)
320 {
321 struct rb_node *nd;
322 int ret;
323 char debugdir[PATH_MAX];
324
325 snprintf(debugdir, sizeof(debugdir), "%s", buildid_dir);
326
327 if (mkdir(debugdir, 0755) != 0 && errno != EEXIST)
328 return -1;
329
330 ret = machine__cache_build_ids(&session->host_machine, debugdir);
331
332 for (nd = rb_first(&session->machines); nd; nd = rb_next(nd)) {
333 struct machine *pos = rb_entry(nd, struct machine, rb_node);
334 ret |= machine__cache_build_ids(pos, debugdir);
335 }
336 return ret ? -1 : 0;
337 }
338
339 static bool machine__read_build_ids(struct machine *machine, bool with_hits)
340 {
341 bool ret = __dsos__read_build_ids(&machine->kernel_dsos, with_hits);
342 ret |= __dsos__read_build_ids(&machine->user_dsos, with_hits);
343 return ret;
344 }
345
346 static bool perf_session__read_build_ids(struct perf_session *session, bool with_hits)
347 {
348 struct rb_node *nd;
349 bool ret = machine__read_build_ids(&session->host_machine, with_hits);
350
351 for (nd = rb_first(&session->machines); nd; nd = rb_next(nd)) {
352 struct machine *pos = rb_entry(nd, struct machine, rb_node);
353 ret |= machine__read_build_ids(pos, with_hits);
354 }
355
356 return ret;
357 }
358
359 static int perf_header__adds_write(struct perf_header *header,
360 struct perf_evlist *evlist, int fd)
361 {
362 int nr_sections;
363 struct perf_session *session;
364 struct perf_file_section *feat_sec;
365 int sec_size;
366 u64 sec_start;
367 int idx = 0, err;
368
369 session = container_of(header, struct perf_session, header);
370
371 if (perf_header__has_feat(header, HEADER_BUILD_ID &&
372 !perf_session__read_build_ids(session, true)))
373 perf_header__clear_feat(header, HEADER_BUILD_ID);
374
375 nr_sections = bitmap_weight(header->adds_features, HEADER_FEAT_BITS);
376 if (!nr_sections)
377 return 0;
378
379 feat_sec = calloc(sizeof(*feat_sec), nr_sections);
380 if (feat_sec == NULL)
381 return -ENOMEM;
382
383 sec_size = sizeof(*feat_sec) * nr_sections;
384
385 sec_start = header->data_offset + header->data_size;
386 lseek(fd, sec_start + sec_size, SEEK_SET);
387
388 if (perf_header__has_feat(header, HEADER_TRACE_INFO)) {
389 struct perf_file_section *trace_sec;
390
391 trace_sec = &feat_sec[idx++];
392
393 /* Write trace info */
394 trace_sec->offset = lseek(fd, 0, SEEK_CUR);
395 read_tracing_data(fd, &evlist->entries);
396 trace_sec->size = lseek(fd, 0, SEEK_CUR) - trace_sec->offset;
397 }
398
399 if (perf_header__has_feat(header, HEADER_BUILD_ID)) {
400 struct perf_file_section *buildid_sec;
401
402 buildid_sec = &feat_sec[idx++];
403
404 /* Write build-ids */
405 buildid_sec->offset = lseek(fd, 0, SEEK_CUR);
406 err = dsos__write_buildid_table(header, fd);
407 if (err < 0) {
408 pr_debug("failed to write buildid table\n");
409 goto out_free;
410 }
411 buildid_sec->size = lseek(fd, 0, SEEK_CUR) -
412 buildid_sec->offset;
413 if (!no_buildid_cache)
414 perf_session__cache_build_ids(session);
415 }
416
417 lseek(fd, sec_start, SEEK_SET);
418 err = do_write(fd, feat_sec, sec_size);
419 if (err < 0)
420 pr_debug("failed to write feature section\n");
421 out_free:
422 free(feat_sec);
423 return err;
424 }
425
426 int perf_header__write_pipe(int fd)
427 {
428 struct perf_pipe_file_header f_header;
429 int err;
430
431 f_header = (struct perf_pipe_file_header){
432 .magic = PERF_MAGIC,
433 .size = sizeof(f_header),
434 };
435
436 err = do_write(fd, &f_header, sizeof(f_header));
437 if (err < 0) {
438 pr_debug("failed to write perf pipe header\n");
439 return err;
440 }
441
442 return 0;
443 }
444
445 int perf_session__write_header(struct perf_session *session,
446 struct perf_evlist *evlist,
447 int fd, bool at_exit)
448 {
449 struct perf_file_header f_header;
450 struct perf_file_attr f_attr;
451 struct perf_header *header = &session->header;
452 struct perf_evsel *attr, *pair = NULL;
453 int err;
454
455 lseek(fd, sizeof(f_header), SEEK_SET);
456
457 if (session->evlist != evlist)
458 pair = list_entry(session->evlist->entries.next, struct perf_evsel, node);
459
460 list_for_each_entry(attr, &evlist->entries, node) {
461 attr->id_offset = lseek(fd, 0, SEEK_CUR);
462 err = do_write(fd, attr->id, attr->ids * sizeof(u64));
463 if (err < 0) {
464 out_err_write:
465 pr_debug("failed to write perf header\n");
466 return err;
467 }
468 if (session->evlist != evlist) {
469 err = do_write(fd, pair->id, pair->ids * sizeof(u64));
470 if (err < 0)
471 goto out_err_write;
472 attr->ids += pair->ids;
473 pair = list_entry(pair->node.next, struct perf_evsel, node);
474 }
475 }
476
477 header->attr_offset = lseek(fd, 0, SEEK_CUR);
478
479 list_for_each_entry(attr, &evlist->entries, node) {
480 f_attr = (struct perf_file_attr){
481 .attr = attr->attr,
482 .ids = {
483 .offset = attr->id_offset,
484 .size = attr->ids * sizeof(u64),
485 }
486 };
487 err = do_write(fd, &f_attr, sizeof(f_attr));
488 if (err < 0) {
489 pr_debug("failed to write perf header attribute\n");
490 return err;
491 }
492 }
493
494 header->event_offset = lseek(fd, 0, SEEK_CUR);
495 header->event_size = event_count * sizeof(struct perf_trace_event_type);
496 if (events) {
497 err = do_write(fd, events, header->event_size);
498 if (err < 0) {
499 pr_debug("failed to write perf header events\n");
500 return err;
501 }
502 }
503
504 header->data_offset = lseek(fd, 0, SEEK_CUR);
505
506 if (at_exit) {
507 err = perf_header__adds_write(header, evlist, fd);
508 if (err < 0)
509 return err;
510 }
511
512 f_header = (struct perf_file_header){
513 .magic = PERF_MAGIC,
514 .size = sizeof(f_header),
515 .attr_size = sizeof(f_attr),
516 .attrs = {
517 .offset = header->attr_offset,
518 .size = evlist->nr_entries * sizeof(f_attr),
519 },
520 .data = {
521 .offset = header->data_offset,
522 .size = header->data_size,
523 },
524 .event_types = {
525 .offset = header->event_offset,
526 .size = header->event_size,
527 },
528 };
529
530 memcpy(&f_header.adds_features, &header->adds_features, sizeof(header->adds_features));
531
532 lseek(fd, 0, SEEK_SET);
533 err = do_write(fd, &f_header, sizeof(f_header));
534 if (err < 0) {
535 pr_debug("failed to write perf header\n");
536 return err;
537 }
538 lseek(fd, header->data_offset + header->data_size, SEEK_SET);
539
540 header->frozen = 1;
541 return 0;
542 }
543
544 static int perf_header__getbuffer64(struct perf_header *header,
545 int fd, void *buf, size_t size)
546 {
547 if (readn(fd, buf, size) <= 0)
548 return -1;
549
550 if (header->needs_swap)
551 mem_bswap_64(buf, size);
552
553 return 0;
554 }
555
556 int perf_header__process_sections(struct perf_header *header, int fd,
557 int (*process)(struct perf_file_section *section,
558 struct perf_header *ph,
559 int feat, int fd))
560 {
561 struct perf_file_section *feat_sec;
562 int nr_sections;
563 int sec_size;
564 int idx = 0;
565 int err = -1, feat = 1;
566
567 nr_sections = bitmap_weight(header->adds_features, HEADER_FEAT_BITS);
568 if (!nr_sections)
569 return 0;
570
571 feat_sec = calloc(sizeof(*feat_sec), nr_sections);
572 if (!feat_sec)
573 return -1;
574
575 sec_size = sizeof(*feat_sec) * nr_sections;
576
577 lseek(fd, header->data_offset + header->data_size, SEEK_SET);
578
579 if (perf_header__getbuffer64(header, fd, feat_sec, sec_size))
580 goto out_free;
581
582 err = 0;
583 while (idx < nr_sections && feat < HEADER_LAST_FEATURE) {
584 if (perf_header__has_feat(header, feat)) {
585 struct perf_file_section *sec = &feat_sec[idx++];
586
587 err = process(sec, header, feat, fd);
588 if (err < 0)
589 break;
590 }
591 ++feat;
592 }
593 out_free:
594 free(feat_sec);
595 return err;
596 }
597
598 int perf_file_header__read(struct perf_file_header *header,
599 struct perf_header *ph, int fd)
600 {
601 lseek(fd, 0, SEEK_SET);
602
603 if (readn(fd, header, sizeof(*header)) <= 0 ||
604 memcmp(&header->magic, __perf_magic, sizeof(header->magic)))
605 return -1;
606
607 if (header->attr_size != sizeof(struct perf_file_attr)) {
608 u64 attr_size = bswap_64(header->attr_size);
609
610 if (attr_size != sizeof(struct perf_file_attr))
611 return -1;
612
613 mem_bswap_64(header, offsetof(struct perf_file_header,
614 adds_features));
615 ph->needs_swap = true;
616 }
617
618 if (header->size != sizeof(*header)) {
619 /* Support the previous format */
620 if (header->size == offsetof(typeof(*header), adds_features))
621 bitmap_zero(header->adds_features, HEADER_FEAT_BITS);
622 else
623 return -1;
624 }
625
626 memcpy(&ph->adds_features, &header->adds_features,
627 sizeof(ph->adds_features));
628 /*
629 * FIXME: hack that assumes that if we need swap the perf.data file
630 * may be coming from an arch with a different word-size, ergo different
631 * DEFINE_BITMAP format, investigate more later, but for now its mostly
632 * safe to assume that we have a build-id section. Trace files probably
633 * have several other issues in this realm anyway...
634 */
635 if (ph->needs_swap) {
636 memset(&ph->adds_features, 0, sizeof(ph->adds_features));
637 perf_header__set_feat(ph, HEADER_BUILD_ID);
638 }
639
640 ph->event_offset = header->event_types.offset;
641 ph->event_size = header->event_types.size;
642 ph->data_offset = header->data.offset;
643 ph->data_size = header->data.size;
644 return 0;
645 }
646
647 static int __event_process_build_id(struct build_id_event *bev,
648 char *filename,
649 struct perf_session *session)
650 {
651 int err = -1;
652 struct list_head *head;
653 struct machine *machine;
654 u16 misc;
655 struct dso *dso;
656 enum dso_kernel_type dso_type;
657
658 machine = perf_session__findnew_machine(session, bev->pid);
659 if (!machine)
660 goto out;
661
662 misc = bev->header.misc & PERF_RECORD_MISC_CPUMODE_MASK;
663
664 switch (misc) {
665 case PERF_RECORD_MISC_KERNEL:
666 dso_type = DSO_TYPE_KERNEL;
667 head = &machine->kernel_dsos;
668 break;
669 case PERF_RECORD_MISC_GUEST_KERNEL:
670 dso_type = DSO_TYPE_GUEST_KERNEL;
671 head = &machine->kernel_dsos;
672 break;
673 case PERF_RECORD_MISC_USER:
674 case PERF_RECORD_MISC_GUEST_USER:
675 dso_type = DSO_TYPE_USER;
676 head = &machine->user_dsos;
677 break;
678 default:
679 goto out;
680 }
681
682 dso = __dsos__findnew(head, filename);
683 if (dso != NULL) {
684 char sbuild_id[BUILD_ID_SIZE * 2 + 1];
685
686 dso__set_build_id(dso, &bev->build_id);
687
688 if (filename[0] == '[')
689 dso->kernel = dso_type;
690
691 build_id__sprintf(dso->build_id, sizeof(dso->build_id),
692 sbuild_id);
693 pr_debug("build id event received for %s: %s\n",
694 dso->long_name, sbuild_id);
695 }
696
697 err = 0;
698 out:
699 return err;
700 }
701
702 static int perf_header__read_build_ids_abi_quirk(struct perf_header *header,
703 int input, u64 offset, u64 size)
704 {
705 struct perf_session *session = container_of(header, struct perf_session, header);
706 struct {
707 struct perf_event_header header;
708 u8 build_id[ALIGN(BUILD_ID_SIZE, sizeof(u64))];
709 char filename[0];
710 } old_bev;
711 struct build_id_event bev;
712 char filename[PATH_MAX];
713 u64 limit = offset + size;
714
715 while (offset < limit) {
716 ssize_t len;
717
718 if (read(input, &old_bev, sizeof(old_bev)) != sizeof(old_bev))
719 return -1;
720
721 if (header->needs_swap)
722 perf_event_header__bswap(&old_bev.header);
723
724 len = old_bev.header.size - sizeof(old_bev);
725 if (read(input, filename, len) != len)
726 return -1;
727
728 bev.header = old_bev.header;
729
730 /*
731 * As the pid is the missing value, we need to fill
732 * it properly. The header.misc value give us nice hint.
733 */
734 bev.pid = HOST_KERNEL_ID;
735 if (bev.header.misc == PERF_RECORD_MISC_GUEST_USER ||
736 bev.header.misc == PERF_RECORD_MISC_GUEST_KERNEL)
737 bev.pid = DEFAULT_GUEST_KERNEL_ID;
738
739 memcpy(bev.build_id, old_bev.build_id, sizeof(bev.build_id));
740 __event_process_build_id(&bev, filename, session);
741
742 offset += bev.header.size;
743 }
744
745 return 0;
746 }
747
748 static int perf_header__read_build_ids(struct perf_header *header,
749 int input, u64 offset, u64 size)
750 {
751 struct perf_session *session = container_of(header, struct perf_session, header);
752 struct build_id_event bev;
753 char filename[PATH_MAX];
754 u64 limit = offset + size, orig_offset = offset;
755 int err = -1;
756
757 while (offset < limit) {
758 ssize_t len;
759
760 if (read(input, &bev, sizeof(bev)) != sizeof(bev))
761 goto out;
762
763 if (header->needs_swap)
764 perf_event_header__bswap(&bev.header);
765
766 len = bev.header.size - sizeof(bev);
767 if (read(input, filename, len) != len)
768 goto out;
769 /*
770 * The a1645ce1 changeset:
771 *
772 * "perf: 'perf kvm' tool for monitoring guest performance from host"
773 *
774 * Added a field to struct build_id_event that broke the file
775 * format.
776 *
777 * Since the kernel build-id is the first entry, process the
778 * table using the old format if the well known
779 * '[kernel.kallsyms]' string for the kernel build-id has the
780 * first 4 characters chopped off (where the pid_t sits).
781 */
782 if (memcmp(filename, "nel.kallsyms]", 13) == 0) {
783 if (lseek(input, orig_offset, SEEK_SET) == (off_t)-1)
784 return -1;
785 return perf_header__read_build_ids_abi_quirk(header, input, offset, size);
786 }
787
788 __event_process_build_id(&bev, filename, session);
789
790 offset += bev.header.size;
791 }
792 err = 0;
793 out:
794 return err;
795 }
796
797 static int perf_file_section__process(struct perf_file_section *section,
798 struct perf_header *ph,
799 int feat, int fd)
800 {
801 if (lseek(fd, section->offset, SEEK_SET) == (off_t)-1) {
802 pr_debug("Failed to lseek to %" PRIu64 " offset for feature "
803 "%d, continuing...\n", section->offset, feat);
804 return 0;
805 }
806
807 switch (feat) {
808 case HEADER_TRACE_INFO:
809 trace_report(fd, false);
810 break;
811
812 case HEADER_BUILD_ID:
813 if (perf_header__read_build_ids(ph, fd, section->offset, section->size))
814 pr_debug("Failed to read buildids, continuing...\n");
815 break;
816 default:
817 pr_debug("unknown feature %d, continuing...\n", feat);
818 }
819
820 return 0;
821 }
822
823 static int perf_file_header__read_pipe(struct perf_pipe_file_header *header,
824 struct perf_header *ph, int fd,
825 bool repipe)
826 {
827 if (readn(fd, header, sizeof(*header)) <= 0 ||
828 memcmp(&header->magic, __perf_magic, sizeof(header->magic)))
829 return -1;
830
831 if (repipe && do_write(STDOUT_FILENO, header, sizeof(*header)) < 0)
832 return -1;
833
834 if (header->size != sizeof(*header)) {
835 u64 size = bswap_64(header->size);
836
837 if (size != sizeof(*header))
838 return -1;
839
840 ph->needs_swap = true;
841 }
842
843 return 0;
844 }
845
846 static int perf_header__read_pipe(struct perf_session *session, int fd)
847 {
848 struct perf_header *header = &session->header;
849 struct perf_pipe_file_header f_header;
850
851 if (perf_file_header__read_pipe(&f_header, header, fd,
852 session->repipe) < 0) {
853 pr_debug("incompatible file format\n");
854 return -EINVAL;
855 }
856
857 session->fd = fd;
858
859 return 0;
860 }
861
862 int perf_session__read_header(struct perf_session *session, int fd)
863 {
864 struct perf_header *header = &session->header;
865 struct perf_file_header f_header;
866 struct perf_file_attr f_attr;
867 u64 f_id;
868 int nr_attrs, nr_ids, i, j;
869
870 session->evlist = perf_evlist__new(NULL, NULL);
871 if (session->evlist == NULL)
872 return -ENOMEM;
873
874 if (session->fd_pipe)
875 return perf_header__read_pipe(session, fd);
876
877 if (perf_file_header__read(&f_header, header, fd) < 0) {
878 pr_debug("incompatible file format\n");
879 return -EINVAL;
880 }
881
882 nr_attrs = f_header.attrs.size / sizeof(f_attr);
883 lseek(fd, f_header.attrs.offset, SEEK_SET);
884
885 for (i = 0; i < nr_attrs; i++) {
886 struct perf_evsel *evsel;
887 off_t tmp;
888
889 if (readn(fd, &f_attr, sizeof(f_attr)) <= 0)
890 goto out_errno;
891
892 if (header->needs_swap)
893 perf_event__attr_swap(&f_attr.attr);
894
895 tmp = lseek(fd, 0, SEEK_CUR);
896 evsel = perf_evsel__new(&f_attr.attr, i);
897
898 if (evsel == NULL)
899 goto out_delete_evlist;
900 /*
901 * Do it before so that if perf_evsel__alloc_id fails, this
902 * entry gets purged too at perf_evlist__delete().
903 */
904 perf_evlist__add(session->evlist, evsel);
905
906 nr_ids = f_attr.ids.size / sizeof(u64);
907 /*
908 * We don't have the cpu and thread maps on the header, so
909 * for allocating the perf_sample_id table we fake 1 cpu and
910 * hattr->ids threads.
911 */
912 if (perf_evsel__alloc_id(evsel, 1, nr_ids))
913 goto out_delete_evlist;
914
915 lseek(fd, f_attr.ids.offset, SEEK_SET);
916
917 for (j = 0; j < nr_ids; j++) {
918 if (perf_header__getbuffer64(header, fd, &f_id, sizeof(f_id)))
919 goto out_errno;
920
921 perf_evlist__id_add(session->evlist, evsel, 0, j, f_id);
922 }
923
924 lseek(fd, tmp, SEEK_SET);
925 }
926
927 if (f_header.event_types.size) {
928 lseek(fd, f_header.event_types.offset, SEEK_SET);
929 events = malloc(f_header.event_types.size);
930 if (events == NULL)
931 return -ENOMEM;
932 if (perf_header__getbuffer64(header, fd, events,
933 f_header.event_types.size))
934 goto out_errno;
935 event_count = f_header.event_types.size / sizeof(struct perf_trace_event_type);
936 }
937
938 perf_header__process_sections(header, fd, perf_file_section__process);
939
940 lseek(fd, header->data_offset, SEEK_SET);
941
942 header->frozen = 1;
943 return 0;
944 out_errno:
945 return -errno;
946
947 out_delete_evlist:
948 perf_evlist__delete(session->evlist);
949 session->evlist = NULL;
950 return -ENOMEM;
951 }
952
953 int perf_event__synthesize_attr(struct perf_event_attr *attr, u16 ids, u64 *id,
954 perf_event__handler_t process,
955 struct perf_session *session)
956 {
957 union perf_event *ev;
958 size_t size;
959 int err;
960
961 size = sizeof(struct perf_event_attr);
962 size = ALIGN(size, sizeof(u64));
963 size += sizeof(struct perf_event_header);
964 size += ids * sizeof(u64);
965
966 ev = malloc(size);
967
968 if (ev == NULL)
969 return -ENOMEM;
970
971 ev->attr.attr = *attr;
972 memcpy(ev->attr.id, id, ids * sizeof(u64));
973
974 ev->attr.header.type = PERF_RECORD_HEADER_ATTR;
975 ev->attr.header.size = size;
976
977 err = process(ev, NULL, session);
978
979 free(ev);
980
981 return err;
982 }
983
984 int perf_session__synthesize_attrs(struct perf_session *session,
985 perf_event__handler_t process)
986 {
987 struct perf_evsel *attr;
988 int err = 0;
989
990 list_for_each_entry(attr, &session->evlist->entries, node) {
991 err = perf_event__synthesize_attr(&attr->attr, attr->ids,
992 attr->id, process, session);
993 if (err) {
994 pr_debug("failed to create perf header attribute\n");
995 return err;
996 }
997 }
998
999 return err;
1000 }
1001
1002 int perf_event__process_attr(union perf_event *event,
1003 struct perf_session *session)
1004 {
1005 unsigned int i, ids, n_ids;
1006 struct perf_evsel *evsel;
1007
1008 if (session->evlist == NULL) {
1009 session->evlist = perf_evlist__new(NULL, NULL);
1010 if (session->evlist == NULL)
1011 return -ENOMEM;
1012 }
1013
1014 evsel = perf_evsel__new(&event->attr.attr,
1015 session->evlist->nr_entries);
1016 if (evsel == NULL)
1017 return -ENOMEM;
1018
1019 perf_evlist__add(session->evlist, evsel);
1020
1021 ids = event->header.size;
1022 ids -= (void *)&event->attr.id - (void *)event;
1023 n_ids = ids / sizeof(u64);
1024 /*
1025 * We don't have the cpu and thread maps on the header, so
1026 * for allocating the perf_sample_id table we fake 1 cpu and
1027 * hattr->ids threads.
1028 */
1029 if (perf_evsel__alloc_id(evsel, 1, n_ids))
1030 return -ENOMEM;
1031
1032 for (i = 0; i < n_ids; i++) {
1033 perf_evlist__id_add(session->evlist, evsel, 0, i,
1034 event->attr.id[i]);
1035 }
1036
1037 perf_session__update_sample_type(session);
1038
1039 return 0;
1040 }
1041
1042 int perf_event__synthesize_event_type(u64 event_id, char *name,
1043 perf_event__handler_t process,
1044 struct perf_session *session)
1045 {
1046 union perf_event ev;
1047 size_t size = 0;
1048 int err = 0;
1049
1050 memset(&ev, 0, sizeof(ev));
1051
1052 ev.event_type.event_type.event_id = event_id;
1053 memset(ev.event_type.event_type.name, 0, MAX_EVENT_NAME);
1054 strncpy(ev.event_type.event_type.name, name, MAX_EVENT_NAME - 1);
1055
1056 ev.event_type.header.type = PERF_RECORD_HEADER_EVENT_TYPE;
1057 size = strlen(name);
1058 size = ALIGN(size, sizeof(u64));
1059 ev.event_type.header.size = sizeof(ev.event_type) -
1060 (sizeof(ev.event_type.event_type.name) - size);
1061
1062 err = process(&ev, NULL, session);
1063
1064 return err;
1065 }
1066
1067 int perf_event__synthesize_event_types(perf_event__handler_t process,
1068 struct perf_session *session)
1069 {
1070 struct perf_trace_event_type *type;
1071 int i, err = 0;
1072
1073 for (i = 0; i < event_count; i++) {
1074 type = &events[i];
1075
1076 err = perf_event__synthesize_event_type(type->event_id,
1077 type->name, process,
1078 session);
1079 if (err) {
1080 pr_debug("failed to create perf header event type\n");
1081 return err;
1082 }
1083 }
1084
1085 return err;
1086 }
1087
1088 int perf_event__process_event_type(union perf_event *event,
1089 struct perf_session *session __unused)
1090 {
1091 if (perf_header__push_event(event->event_type.event_type.event_id,
1092 event->event_type.event_type.name) < 0)
1093 return -ENOMEM;
1094
1095 return 0;
1096 }
1097
1098 int perf_event__synthesize_tracing_data(int fd, struct perf_evlist *evlist,
1099 perf_event__handler_t process,
1100 struct perf_session *session __unused)
1101 {
1102 union perf_event ev;
1103 ssize_t size = 0, aligned_size = 0, padding;
1104 int err __used = 0;
1105
1106 memset(&ev, 0, sizeof(ev));
1107
1108 ev.tracing_data.header.type = PERF_RECORD_HEADER_TRACING_DATA;
1109 size = read_tracing_data_size(fd, &evlist->entries);
1110 if (size <= 0)
1111 return size;
1112 aligned_size = ALIGN(size, sizeof(u64));
1113 padding = aligned_size - size;
1114 ev.tracing_data.header.size = sizeof(ev.tracing_data);
1115 ev.tracing_data.size = aligned_size;
1116
1117 process(&ev, NULL, session);
1118
1119 err = read_tracing_data(fd, &evlist->entries);
1120 write_padded(fd, NULL, 0, padding);
1121
1122 return aligned_size;
1123 }
1124
1125 int perf_event__process_tracing_data(union perf_event *event,
1126 struct perf_session *session)
1127 {
1128 ssize_t size_read, padding, size = event->tracing_data.size;
1129 off_t offset = lseek(session->fd, 0, SEEK_CUR);
1130 char buf[BUFSIZ];
1131
1132 /* setup for reading amidst mmap */
1133 lseek(session->fd, offset + sizeof(struct tracing_data_event),
1134 SEEK_SET);
1135
1136 size_read = trace_report(session->fd, session->repipe);
1137
1138 padding = ALIGN(size_read, sizeof(u64)) - size_read;
1139
1140 if (read(session->fd, buf, padding) < 0)
1141 die("reading input file");
1142 if (session->repipe) {
1143 int retw = write(STDOUT_FILENO, buf, padding);
1144 if (retw <= 0 || retw != padding)
1145 die("repiping tracing data padding");
1146 }
1147
1148 if (size_read + padding != size)
1149 die("tracing data size mismatch");
1150
1151 return size_read + padding;
1152 }
1153
1154 int perf_event__synthesize_build_id(struct dso *pos, u16 misc,
1155 perf_event__handler_t process,
1156 struct machine *machine,
1157 struct perf_session *session)
1158 {
1159 union perf_event ev;
1160 size_t len;
1161 int err = 0;
1162
1163 if (!pos->hit)
1164 return err;
1165
1166 memset(&ev, 0, sizeof(ev));
1167
1168 len = pos->long_name_len + 1;
1169 len = ALIGN(len, NAME_ALIGN);
1170 memcpy(&ev.build_id.build_id, pos->build_id, sizeof(pos->build_id));
1171 ev.build_id.header.type = PERF_RECORD_HEADER_BUILD_ID;
1172 ev.build_id.header.misc = misc;
1173 ev.build_id.pid = machine->pid;
1174 ev.build_id.header.size = sizeof(ev.build_id) + len;
1175 memcpy(&ev.build_id.filename, pos->long_name, pos->long_name_len);
1176
1177 err = process(&ev, NULL, session);
1178
1179 return err;
1180 }
1181
1182 int perf_event__process_build_id(union perf_event *event,
1183 struct perf_session *session)
1184 {
1185 __event_process_build_id(&event->build_id,
1186 event->build_id.filename,
1187 session);
1188 return 0;
1189 }
1190
1191 void disable_buildid_cache(void)
1192 {
1193 no_buildid_cache = true;
1194 }