]> git.proxmox.com Git - mirror_ubuntu-jammy-kernel.git/blob - tools/perf/util/session.c
Merge branch 'timers-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel...
[mirror_ubuntu-jammy-kernel.git] / tools / perf / util / session.c
1 // SPDX-License-Identifier: GPL-2.0
2 #include <errno.h>
3 #include <inttypes.h>
4 #include <linux/kernel.h>
5 #include <traceevent/event-parse.h>
6 #include <api/fs/fs.h>
7
8 #include <byteswap.h>
9 #include <unistd.h>
10 #include <sys/types.h>
11 #include <sys/mman.h>
12
13 #include "evlist.h"
14 #include "evsel.h"
15 #include "memswap.h"
16 #include "map.h"
17 #include "symbol.h"
18 #include "session.h"
19 #include "tool.h"
20 #include "sort.h"
21 #include "util.h"
22 #include "cpumap.h"
23 #include "perf_regs.h"
24 #include "asm/bug.h"
25 #include "auxtrace.h"
26 #include "thread.h"
27 #include "thread-stack.h"
28 #include "sample-raw.h"
29 #include "stat.h"
30 #include "arch/common.h"
31
32 #ifdef HAVE_ZSTD_SUPPORT
33 static int perf_session__process_compressed_event(struct perf_session *session,
34 union perf_event *event, u64 file_offset)
35 {
36 void *src;
37 size_t decomp_size, src_size;
38 u64 decomp_last_rem = 0;
39 size_t decomp_len = session->header.env.comp_mmap_len;
40 struct decomp *decomp, *decomp_last = session->decomp_last;
41
42 decomp = mmap(NULL, sizeof(struct decomp) + decomp_len, PROT_READ|PROT_WRITE,
43 MAP_ANONYMOUS|MAP_PRIVATE, -1, 0);
44 if (decomp == MAP_FAILED) {
45 pr_err("Couldn't allocate memory for decompression\n");
46 return -1;
47 }
48
49 decomp->file_pos = file_offset;
50 decomp->head = 0;
51
52 if (decomp_last) {
53 decomp_last_rem = decomp_last->size - decomp_last->head;
54 memcpy(decomp->data, &(decomp_last->data[decomp_last->head]), decomp_last_rem);
55 decomp->size = decomp_last_rem;
56 }
57
58 src = (void *)event + sizeof(struct compressed_event);
59 src_size = event->pack.header.size - sizeof(struct compressed_event);
60
61 decomp_size = zstd_decompress_stream(&(session->zstd_data), src, src_size,
62 &(decomp->data[decomp_last_rem]), decomp_len - decomp_last_rem);
63 if (!decomp_size) {
64 munmap(decomp, sizeof(struct decomp) + decomp_len);
65 pr_err("Couldn't decompress data\n");
66 return -1;
67 }
68
69 decomp->size += decomp_size;
70
71 if (session->decomp == NULL) {
72 session->decomp = decomp;
73 session->decomp_last = decomp;
74 } else {
75 session->decomp_last->next = decomp;
76 session->decomp_last = decomp;
77 }
78
79 pr_debug("decomp (B): %ld to %ld\n", src_size, decomp_size);
80
81 return 0;
82 }
83 #else /* !HAVE_ZSTD_SUPPORT */
84 #define perf_session__process_compressed_event perf_session__process_compressed_event_stub
85 #endif
86
87 static int perf_session__deliver_event(struct perf_session *session,
88 union perf_event *event,
89 struct perf_tool *tool,
90 u64 file_offset);
91
92 static int perf_session__open(struct perf_session *session)
93 {
94 struct perf_data *data = session->data;
95
96 if (perf_session__read_header(session) < 0) {
97 pr_err("incompatible file format (rerun with -v to learn more)\n");
98 return -1;
99 }
100
101 if (perf_data__is_pipe(data))
102 return 0;
103
104 if (perf_header__has_feat(&session->header, HEADER_STAT))
105 return 0;
106
107 if (!perf_evlist__valid_sample_type(session->evlist)) {
108 pr_err("non matching sample_type\n");
109 return -1;
110 }
111
112 if (!perf_evlist__valid_sample_id_all(session->evlist)) {
113 pr_err("non matching sample_id_all\n");
114 return -1;
115 }
116
117 if (!perf_evlist__valid_read_format(session->evlist)) {
118 pr_err("non matching read_format\n");
119 return -1;
120 }
121
122 return 0;
123 }
124
125 void perf_session__set_id_hdr_size(struct perf_session *session)
126 {
127 u16 id_hdr_size = perf_evlist__id_hdr_size(session->evlist);
128
129 machines__set_id_hdr_size(&session->machines, id_hdr_size);
130 }
131
132 int perf_session__create_kernel_maps(struct perf_session *session)
133 {
134 int ret = machine__create_kernel_maps(&session->machines.host);
135
136 if (ret >= 0)
137 ret = machines__create_guest_kernel_maps(&session->machines);
138 return ret;
139 }
140
141 static void perf_session__destroy_kernel_maps(struct perf_session *session)
142 {
143 machines__destroy_kernel_maps(&session->machines);
144 }
145
146 static bool perf_session__has_comm_exec(struct perf_session *session)
147 {
148 struct perf_evsel *evsel;
149
150 evlist__for_each_entry(session->evlist, evsel) {
151 if (evsel->attr.comm_exec)
152 return true;
153 }
154
155 return false;
156 }
157
158 static void perf_session__set_comm_exec(struct perf_session *session)
159 {
160 bool comm_exec = perf_session__has_comm_exec(session);
161
162 machines__set_comm_exec(&session->machines, comm_exec);
163 }
164
165 static int ordered_events__deliver_event(struct ordered_events *oe,
166 struct ordered_event *event)
167 {
168 struct perf_session *session = container_of(oe, struct perf_session,
169 ordered_events);
170
171 return perf_session__deliver_event(session, event->event,
172 session->tool, event->file_offset);
173 }
174
175 struct perf_session *perf_session__new(struct perf_data *data,
176 bool repipe, struct perf_tool *tool)
177 {
178 struct perf_session *session = zalloc(sizeof(*session));
179
180 if (!session)
181 goto out;
182
183 session->repipe = repipe;
184 session->tool = tool;
185 INIT_LIST_HEAD(&session->auxtrace_index);
186 machines__init(&session->machines);
187 ordered_events__init(&session->ordered_events,
188 ordered_events__deliver_event, NULL);
189
190 perf_env__init(&session->header.env);
191 if (data) {
192 if (perf_data__open(data))
193 goto out_delete;
194
195 session->data = data;
196
197 if (perf_data__is_read(data)) {
198 if (perf_session__open(session) < 0)
199 goto out_delete;
200
201 /*
202 * set session attributes that are present in perf.data
203 * but not in pipe-mode.
204 */
205 if (!data->is_pipe) {
206 perf_session__set_id_hdr_size(session);
207 perf_session__set_comm_exec(session);
208 }
209
210 perf_evlist__init_trace_event_sample_raw(session->evlist);
211
212 /* Open the directory data. */
213 if (data->is_dir && perf_data__open_dir(data))
214 goto out_delete;
215 }
216 } else {
217 session->machines.host.env = &perf_env;
218 }
219
220 session->machines.host.single_address_space =
221 perf_env__single_address_space(session->machines.host.env);
222
223 if (!data || perf_data__is_write(data)) {
224 /*
225 * In O_RDONLY mode this will be performed when reading the
226 * kernel MMAP event, in perf_event__process_mmap().
227 */
228 if (perf_session__create_kernel_maps(session) < 0)
229 pr_warning("Cannot read kernel map\n");
230 }
231
232 /*
233 * In pipe-mode, evlist is empty until PERF_RECORD_HEADER_ATTR is
234 * processed, so perf_evlist__sample_id_all is not meaningful here.
235 */
236 if ((!data || !data->is_pipe) && tool && tool->ordering_requires_timestamps &&
237 tool->ordered_events && !perf_evlist__sample_id_all(session->evlist)) {
238 dump_printf("WARNING: No sample_id_all support, falling back to unordered processing\n");
239 tool->ordered_events = false;
240 }
241
242 return session;
243
244 out_delete:
245 perf_session__delete(session);
246 out:
247 return NULL;
248 }
249
250 static void perf_session__delete_threads(struct perf_session *session)
251 {
252 machine__delete_threads(&session->machines.host);
253 }
254
255 static void perf_session__release_decomp_events(struct perf_session *session)
256 {
257 struct decomp *next, *decomp;
258 size_t decomp_len;
259 next = session->decomp;
260 decomp_len = session->header.env.comp_mmap_len;
261 do {
262 decomp = next;
263 if (decomp == NULL)
264 break;
265 next = decomp->next;
266 munmap(decomp, decomp_len + sizeof(struct decomp));
267 } while (1);
268 }
269
270 void perf_session__delete(struct perf_session *session)
271 {
272 if (session == NULL)
273 return;
274 auxtrace__free(session);
275 auxtrace_index__free(&session->auxtrace_index);
276 perf_session__destroy_kernel_maps(session);
277 perf_session__delete_threads(session);
278 perf_session__release_decomp_events(session);
279 perf_env__exit(&session->header.env);
280 machines__exit(&session->machines);
281 if (session->data)
282 perf_data__close(session->data);
283 free(session);
284 }
285
286 static int process_event_synth_tracing_data_stub(struct perf_session *session
287 __maybe_unused,
288 union perf_event *event
289 __maybe_unused)
290 {
291 dump_printf(": unhandled!\n");
292 return 0;
293 }
294
295 static int process_event_synth_attr_stub(struct perf_tool *tool __maybe_unused,
296 union perf_event *event __maybe_unused,
297 struct perf_evlist **pevlist
298 __maybe_unused)
299 {
300 dump_printf(": unhandled!\n");
301 return 0;
302 }
303
304 static int process_event_synth_event_update_stub(struct perf_tool *tool __maybe_unused,
305 union perf_event *event __maybe_unused,
306 struct perf_evlist **pevlist
307 __maybe_unused)
308 {
309 if (dump_trace)
310 perf_event__fprintf_event_update(event, stdout);
311
312 dump_printf(": unhandled!\n");
313 return 0;
314 }
315
316 static int process_event_sample_stub(struct perf_tool *tool __maybe_unused,
317 union perf_event *event __maybe_unused,
318 struct perf_sample *sample __maybe_unused,
319 struct perf_evsel *evsel __maybe_unused,
320 struct machine *machine __maybe_unused)
321 {
322 dump_printf(": unhandled!\n");
323 return 0;
324 }
325
326 static int process_event_stub(struct perf_tool *tool __maybe_unused,
327 union perf_event *event __maybe_unused,
328 struct perf_sample *sample __maybe_unused,
329 struct machine *machine __maybe_unused)
330 {
331 dump_printf(": unhandled!\n");
332 return 0;
333 }
334
335 static int process_finished_round_stub(struct perf_tool *tool __maybe_unused,
336 union perf_event *event __maybe_unused,
337 struct ordered_events *oe __maybe_unused)
338 {
339 dump_printf(": unhandled!\n");
340 return 0;
341 }
342
343 static int process_finished_round(struct perf_tool *tool,
344 union perf_event *event,
345 struct ordered_events *oe);
346
347 static int skipn(int fd, off_t n)
348 {
349 char buf[4096];
350 ssize_t ret;
351
352 while (n > 0) {
353 ret = read(fd, buf, min(n, (off_t)sizeof(buf)));
354 if (ret <= 0)
355 return ret;
356 n -= ret;
357 }
358
359 return 0;
360 }
361
362 static s64 process_event_auxtrace_stub(struct perf_session *session __maybe_unused,
363 union perf_event *event)
364 {
365 dump_printf(": unhandled!\n");
366 if (perf_data__is_pipe(session->data))
367 skipn(perf_data__fd(session->data), event->auxtrace.size);
368 return event->auxtrace.size;
369 }
370
371 static int process_event_op2_stub(struct perf_session *session __maybe_unused,
372 union perf_event *event __maybe_unused)
373 {
374 dump_printf(": unhandled!\n");
375 return 0;
376 }
377
378
379 static
380 int process_event_thread_map_stub(struct perf_session *session __maybe_unused,
381 union perf_event *event __maybe_unused)
382 {
383 if (dump_trace)
384 perf_event__fprintf_thread_map(event, stdout);
385
386 dump_printf(": unhandled!\n");
387 return 0;
388 }
389
390 static
391 int process_event_cpu_map_stub(struct perf_session *session __maybe_unused,
392 union perf_event *event __maybe_unused)
393 {
394 if (dump_trace)
395 perf_event__fprintf_cpu_map(event, stdout);
396
397 dump_printf(": unhandled!\n");
398 return 0;
399 }
400
401 static
402 int process_event_stat_config_stub(struct perf_session *session __maybe_unused,
403 union perf_event *event __maybe_unused)
404 {
405 if (dump_trace)
406 perf_event__fprintf_stat_config(event, stdout);
407
408 dump_printf(": unhandled!\n");
409 return 0;
410 }
411
412 static int process_stat_stub(struct perf_session *perf_session __maybe_unused,
413 union perf_event *event)
414 {
415 if (dump_trace)
416 perf_event__fprintf_stat(event, stdout);
417
418 dump_printf(": unhandled!\n");
419 return 0;
420 }
421
422 static int process_stat_round_stub(struct perf_session *perf_session __maybe_unused,
423 union perf_event *event)
424 {
425 if (dump_trace)
426 perf_event__fprintf_stat_round(event, stdout);
427
428 dump_printf(": unhandled!\n");
429 return 0;
430 }
431
432 static int perf_session__process_compressed_event_stub(struct perf_session *session __maybe_unused,
433 union perf_event *event __maybe_unused,
434 u64 file_offset __maybe_unused)
435 {
436 dump_printf(": unhandled!\n");
437 return 0;
438 }
439
440 void perf_tool__fill_defaults(struct perf_tool *tool)
441 {
442 if (tool->sample == NULL)
443 tool->sample = process_event_sample_stub;
444 if (tool->mmap == NULL)
445 tool->mmap = process_event_stub;
446 if (tool->mmap2 == NULL)
447 tool->mmap2 = process_event_stub;
448 if (tool->comm == NULL)
449 tool->comm = process_event_stub;
450 if (tool->namespaces == NULL)
451 tool->namespaces = process_event_stub;
452 if (tool->fork == NULL)
453 tool->fork = process_event_stub;
454 if (tool->exit == NULL)
455 tool->exit = process_event_stub;
456 if (tool->lost == NULL)
457 tool->lost = perf_event__process_lost;
458 if (tool->lost_samples == NULL)
459 tool->lost_samples = perf_event__process_lost_samples;
460 if (tool->aux == NULL)
461 tool->aux = perf_event__process_aux;
462 if (tool->itrace_start == NULL)
463 tool->itrace_start = perf_event__process_itrace_start;
464 if (tool->context_switch == NULL)
465 tool->context_switch = perf_event__process_switch;
466 if (tool->ksymbol == NULL)
467 tool->ksymbol = perf_event__process_ksymbol;
468 if (tool->bpf_event == NULL)
469 tool->bpf_event = perf_event__process_bpf_event;
470 if (tool->read == NULL)
471 tool->read = process_event_sample_stub;
472 if (tool->throttle == NULL)
473 tool->throttle = process_event_stub;
474 if (tool->unthrottle == NULL)
475 tool->unthrottle = process_event_stub;
476 if (tool->attr == NULL)
477 tool->attr = process_event_synth_attr_stub;
478 if (tool->event_update == NULL)
479 tool->event_update = process_event_synth_event_update_stub;
480 if (tool->tracing_data == NULL)
481 tool->tracing_data = process_event_synth_tracing_data_stub;
482 if (tool->build_id == NULL)
483 tool->build_id = process_event_op2_stub;
484 if (tool->finished_round == NULL) {
485 if (tool->ordered_events)
486 tool->finished_round = process_finished_round;
487 else
488 tool->finished_round = process_finished_round_stub;
489 }
490 if (tool->id_index == NULL)
491 tool->id_index = process_event_op2_stub;
492 if (tool->auxtrace_info == NULL)
493 tool->auxtrace_info = process_event_op2_stub;
494 if (tool->auxtrace == NULL)
495 tool->auxtrace = process_event_auxtrace_stub;
496 if (tool->auxtrace_error == NULL)
497 tool->auxtrace_error = process_event_op2_stub;
498 if (tool->thread_map == NULL)
499 tool->thread_map = process_event_thread_map_stub;
500 if (tool->cpu_map == NULL)
501 tool->cpu_map = process_event_cpu_map_stub;
502 if (tool->stat_config == NULL)
503 tool->stat_config = process_event_stat_config_stub;
504 if (tool->stat == NULL)
505 tool->stat = process_stat_stub;
506 if (tool->stat_round == NULL)
507 tool->stat_round = process_stat_round_stub;
508 if (tool->time_conv == NULL)
509 tool->time_conv = process_event_op2_stub;
510 if (tool->feature == NULL)
511 tool->feature = process_event_op2_stub;
512 if (tool->compressed == NULL)
513 tool->compressed = perf_session__process_compressed_event;
514 }
515
516 static void swap_sample_id_all(union perf_event *event, void *data)
517 {
518 void *end = (void *) event + event->header.size;
519 int size = end - data;
520
521 BUG_ON(size % sizeof(u64));
522 mem_bswap_64(data, size);
523 }
524
525 static void perf_event__all64_swap(union perf_event *event,
526 bool sample_id_all __maybe_unused)
527 {
528 struct perf_event_header *hdr = &event->header;
529 mem_bswap_64(hdr + 1, event->header.size - sizeof(*hdr));
530 }
531
532 static void perf_event__comm_swap(union perf_event *event, bool sample_id_all)
533 {
534 event->comm.pid = bswap_32(event->comm.pid);
535 event->comm.tid = bswap_32(event->comm.tid);
536
537 if (sample_id_all) {
538 void *data = &event->comm.comm;
539
540 data += PERF_ALIGN(strlen(data) + 1, sizeof(u64));
541 swap_sample_id_all(event, data);
542 }
543 }
544
545 static void perf_event__mmap_swap(union perf_event *event,
546 bool sample_id_all)
547 {
548 event->mmap.pid = bswap_32(event->mmap.pid);
549 event->mmap.tid = bswap_32(event->mmap.tid);
550 event->mmap.start = bswap_64(event->mmap.start);
551 event->mmap.len = bswap_64(event->mmap.len);
552 event->mmap.pgoff = bswap_64(event->mmap.pgoff);
553
554 if (sample_id_all) {
555 void *data = &event->mmap.filename;
556
557 data += PERF_ALIGN(strlen(data) + 1, sizeof(u64));
558 swap_sample_id_all(event, data);
559 }
560 }
561
562 static void perf_event__mmap2_swap(union perf_event *event,
563 bool sample_id_all)
564 {
565 event->mmap2.pid = bswap_32(event->mmap2.pid);
566 event->mmap2.tid = bswap_32(event->mmap2.tid);
567 event->mmap2.start = bswap_64(event->mmap2.start);
568 event->mmap2.len = bswap_64(event->mmap2.len);
569 event->mmap2.pgoff = bswap_64(event->mmap2.pgoff);
570 event->mmap2.maj = bswap_32(event->mmap2.maj);
571 event->mmap2.min = bswap_32(event->mmap2.min);
572 event->mmap2.ino = bswap_64(event->mmap2.ino);
573
574 if (sample_id_all) {
575 void *data = &event->mmap2.filename;
576
577 data += PERF_ALIGN(strlen(data) + 1, sizeof(u64));
578 swap_sample_id_all(event, data);
579 }
580 }
581 static void perf_event__task_swap(union perf_event *event, bool sample_id_all)
582 {
583 event->fork.pid = bswap_32(event->fork.pid);
584 event->fork.tid = bswap_32(event->fork.tid);
585 event->fork.ppid = bswap_32(event->fork.ppid);
586 event->fork.ptid = bswap_32(event->fork.ptid);
587 event->fork.time = bswap_64(event->fork.time);
588
589 if (sample_id_all)
590 swap_sample_id_all(event, &event->fork + 1);
591 }
592
593 static void perf_event__read_swap(union perf_event *event, bool sample_id_all)
594 {
595 event->read.pid = bswap_32(event->read.pid);
596 event->read.tid = bswap_32(event->read.tid);
597 event->read.value = bswap_64(event->read.value);
598 event->read.time_enabled = bswap_64(event->read.time_enabled);
599 event->read.time_running = bswap_64(event->read.time_running);
600 event->read.id = bswap_64(event->read.id);
601
602 if (sample_id_all)
603 swap_sample_id_all(event, &event->read + 1);
604 }
605
606 static void perf_event__aux_swap(union perf_event *event, bool sample_id_all)
607 {
608 event->aux.aux_offset = bswap_64(event->aux.aux_offset);
609 event->aux.aux_size = bswap_64(event->aux.aux_size);
610 event->aux.flags = bswap_64(event->aux.flags);
611
612 if (sample_id_all)
613 swap_sample_id_all(event, &event->aux + 1);
614 }
615
616 static void perf_event__itrace_start_swap(union perf_event *event,
617 bool sample_id_all)
618 {
619 event->itrace_start.pid = bswap_32(event->itrace_start.pid);
620 event->itrace_start.tid = bswap_32(event->itrace_start.tid);
621
622 if (sample_id_all)
623 swap_sample_id_all(event, &event->itrace_start + 1);
624 }
625
626 static void perf_event__switch_swap(union perf_event *event, bool sample_id_all)
627 {
628 if (event->header.type == PERF_RECORD_SWITCH_CPU_WIDE) {
629 event->context_switch.next_prev_pid =
630 bswap_32(event->context_switch.next_prev_pid);
631 event->context_switch.next_prev_tid =
632 bswap_32(event->context_switch.next_prev_tid);
633 }
634
635 if (sample_id_all)
636 swap_sample_id_all(event, &event->context_switch + 1);
637 }
638
639 static void perf_event__throttle_swap(union perf_event *event,
640 bool sample_id_all)
641 {
642 event->throttle.time = bswap_64(event->throttle.time);
643 event->throttle.id = bswap_64(event->throttle.id);
644 event->throttle.stream_id = bswap_64(event->throttle.stream_id);
645
646 if (sample_id_all)
647 swap_sample_id_all(event, &event->throttle + 1);
648 }
649
650 static void perf_event__namespaces_swap(union perf_event *event,
651 bool sample_id_all)
652 {
653 u64 i;
654
655 event->namespaces.pid = bswap_32(event->namespaces.pid);
656 event->namespaces.tid = bswap_32(event->namespaces.tid);
657 event->namespaces.nr_namespaces = bswap_64(event->namespaces.nr_namespaces);
658
659 for (i = 0; i < event->namespaces.nr_namespaces; i++) {
660 struct perf_ns_link_info *ns = &event->namespaces.link_info[i];
661
662 ns->dev = bswap_64(ns->dev);
663 ns->ino = bswap_64(ns->ino);
664 }
665
666 if (sample_id_all)
667 swap_sample_id_all(event, &event->namespaces.link_info[i]);
668 }
669
670 static u8 revbyte(u8 b)
671 {
672 int rev = (b >> 4) | ((b & 0xf) << 4);
673 rev = ((rev & 0xcc) >> 2) | ((rev & 0x33) << 2);
674 rev = ((rev & 0xaa) >> 1) | ((rev & 0x55) << 1);
675 return (u8) rev;
676 }
677
678 /*
679 * XXX this is hack in attempt to carry flags bitfield
680 * through endian village. ABI says:
681 *
682 * Bit-fields are allocated from right to left (least to most significant)
683 * on little-endian implementations and from left to right (most to least
684 * significant) on big-endian implementations.
685 *
686 * The above seems to be byte specific, so we need to reverse each
687 * byte of the bitfield. 'Internet' also says this might be implementation
688 * specific and we probably need proper fix and carry perf_event_attr
689 * bitfield flags in separate data file FEAT_ section. Thought this seems
690 * to work for now.
691 */
692 static void swap_bitfield(u8 *p, unsigned len)
693 {
694 unsigned i;
695
696 for (i = 0; i < len; i++) {
697 *p = revbyte(*p);
698 p++;
699 }
700 }
701
702 /* exported for swapping attributes in file header */
703 void perf_event__attr_swap(struct perf_event_attr *attr)
704 {
705 attr->type = bswap_32(attr->type);
706 attr->size = bswap_32(attr->size);
707
708 #define bswap_safe(f, n) \
709 (attr->size > (offsetof(struct perf_event_attr, f) + \
710 sizeof(attr->f) * (n)))
711 #define bswap_field(f, sz) \
712 do { \
713 if (bswap_safe(f, 0)) \
714 attr->f = bswap_##sz(attr->f); \
715 } while(0)
716 #define bswap_field_16(f) bswap_field(f, 16)
717 #define bswap_field_32(f) bswap_field(f, 32)
718 #define bswap_field_64(f) bswap_field(f, 64)
719
720 bswap_field_64(config);
721 bswap_field_64(sample_period);
722 bswap_field_64(sample_type);
723 bswap_field_64(read_format);
724 bswap_field_32(wakeup_events);
725 bswap_field_32(bp_type);
726 bswap_field_64(bp_addr);
727 bswap_field_64(bp_len);
728 bswap_field_64(branch_sample_type);
729 bswap_field_64(sample_regs_user);
730 bswap_field_32(sample_stack_user);
731 bswap_field_32(aux_watermark);
732 bswap_field_16(sample_max_stack);
733
734 /*
735 * After read_format are bitfields. Check read_format because
736 * we are unable to use offsetof on bitfield.
737 */
738 if (bswap_safe(read_format, 1))
739 swap_bitfield((u8 *) (&attr->read_format + 1),
740 sizeof(u64));
741 #undef bswap_field_64
742 #undef bswap_field_32
743 #undef bswap_field
744 #undef bswap_safe
745 }
746
747 static void perf_event__hdr_attr_swap(union perf_event *event,
748 bool sample_id_all __maybe_unused)
749 {
750 size_t size;
751
752 perf_event__attr_swap(&event->attr.attr);
753
754 size = event->header.size;
755 size -= (void *)&event->attr.id - (void *)event;
756 mem_bswap_64(event->attr.id, size);
757 }
758
759 static void perf_event__event_update_swap(union perf_event *event,
760 bool sample_id_all __maybe_unused)
761 {
762 event->event_update.type = bswap_64(event->event_update.type);
763 event->event_update.id = bswap_64(event->event_update.id);
764 }
765
766 static void perf_event__event_type_swap(union perf_event *event,
767 bool sample_id_all __maybe_unused)
768 {
769 event->event_type.event_type.event_id =
770 bswap_64(event->event_type.event_type.event_id);
771 }
772
773 static void perf_event__tracing_data_swap(union perf_event *event,
774 bool sample_id_all __maybe_unused)
775 {
776 event->tracing_data.size = bswap_32(event->tracing_data.size);
777 }
778
779 static void perf_event__auxtrace_info_swap(union perf_event *event,
780 bool sample_id_all __maybe_unused)
781 {
782 size_t size;
783
784 event->auxtrace_info.type = bswap_32(event->auxtrace_info.type);
785
786 size = event->header.size;
787 size -= (void *)&event->auxtrace_info.priv - (void *)event;
788 mem_bswap_64(event->auxtrace_info.priv, size);
789 }
790
791 static void perf_event__auxtrace_swap(union perf_event *event,
792 bool sample_id_all __maybe_unused)
793 {
794 event->auxtrace.size = bswap_64(event->auxtrace.size);
795 event->auxtrace.offset = bswap_64(event->auxtrace.offset);
796 event->auxtrace.reference = bswap_64(event->auxtrace.reference);
797 event->auxtrace.idx = bswap_32(event->auxtrace.idx);
798 event->auxtrace.tid = bswap_32(event->auxtrace.tid);
799 event->auxtrace.cpu = bswap_32(event->auxtrace.cpu);
800 }
801
802 static void perf_event__auxtrace_error_swap(union perf_event *event,
803 bool sample_id_all __maybe_unused)
804 {
805 event->auxtrace_error.type = bswap_32(event->auxtrace_error.type);
806 event->auxtrace_error.code = bswap_32(event->auxtrace_error.code);
807 event->auxtrace_error.cpu = bswap_32(event->auxtrace_error.cpu);
808 event->auxtrace_error.pid = bswap_32(event->auxtrace_error.pid);
809 event->auxtrace_error.tid = bswap_32(event->auxtrace_error.tid);
810 event->auxtrace_error.fmt = bswap_32(event->auxtrace_error.fmt);
811 event->auxtrace_error.ip = bswap_64(event->auxtrace_error.ip);
812 if (event->auxtrace_error.fmt)
813 event->auxtrace_error.time = bswap_64(event->auxtrace_error.time);
814 }
815
816 static void perf_event__thread_map_swap(union perf_event *event,
817 bool sample_id_all __maybe_unused)
818 {
819 unsigned i;
820
821 event->thread_map.nr = bswap_64(event->thread_map.nr);
822
823 for (i = 0; i < event->thread_map.nr; i++)
824 event->thread_map.entries[i].pid = bswap_64(event->thread_map.entries[i].pid);
825 }
826
827 static void perf_event__cpu_map_swap(union perf_event *event,
828 bool sample_id_all __maybe_unused)
829 {
830 struct cpu_map_data *data = &event->cpu_map.data;
831 struct cpu_map_entries *cpus;
832 struct cpu_map_mask *mask;
833 unsigned i;
834
835 data->type = bswap_64(data->type);
836
837 switch (data->type) {
838 case PERF_CPU_MAP__CPUS:
839 cpus = (struct cpu_map_entries *)data->data;
840
841 cpus->nr = bswap_16(cpus->nr);
842
843 for (i = 0; i < cpus->nr; i++)
844 cpus->cpu[i] = bswap_16(cpus->cpu[i]);
845 break;
846 case PERF_CPU_MAP__MASK:
847 mask = (struct cpu_map_mask *) data->data;
848
849 mask->nr = bswap_16(mask->nr);
850 mask->long_size = bswap_16(mask->long_size);
851
852 switch (mask->long_size) {
853 case 4: mem_bswap_32(&mask->mask, mask->nr); break;
854 case 8: mem_bswap_64(&mask->mask, mask->nr); break;
855 default:
856 pr_err("cpu_map swap: unsupported long size\n");
857 }
858 default:
859 break;
860 }
861 }
862
863 static void perf_event__stat_config_swap(union perf_event *event,
864 bool sample_id_all __maybe_unused)
865 {
866 u64 size;
867
868 size = event->stat_config.nr * sizeof(event->stat_config.data[0]);
869 size += 1; /* nr item itself */
870 mem_bswap_64(&event->stat_config.nr, size);
871 }
872
873 static void perf_event__stat_swap(union perf_event *event,
874 bool sample_id_all __maybe_unused)
875 {
876 event->stat.id = bswap_64(event->stat.id);
877 event->stat.thread = bswap_32(event->stat.thread);
878 event->stat.cpu = bswap_32(event->stat.cpu);
879 event->stat.val = bswap_64(event->stat.val);
880 event->stat.ena = bswap_64(event->stat.ena);
881 event->stat.run = bswap_64(event->stat.run);
882 }
883
884 static void perf_event__stat_round_swap(union perf_event *event,
885 bool sample_id_all __maybe_unused)
886 {
887 event->stat_round.type = bswap_64(event->stat_round.type);
888 event->stat_round.time = bswap_64(event->stat_round.time);
889 }
890
891 typedef void (*perf_event__swap_op)(union perf_event *event,
892 bool sample_id_all);
893
894 static perf_event__swap_op perf_event__swap_ops[] = {
895 [PERF_RECORD_MMAP] = perf_event__mmap_swap,
896 [PERF_RECORD_MMAP2] = perf_event__mmap2_swap,
897 [PERF_RECORD_COMM] = perf_event__comm_swap,
898 [PERF_RECORD_FORK] = perf_event__task_swap,
899 [PERF_RECORD_EXIT] = perf_event__task_swap,
900 [PERF_RECORD_LOST] = perf_event__all64_swap,
901 [PERF_RECORD_READ] = perf_event__read_swap,
902 [PERF_RECORD_THROTTLE] = perf_event__throttle_swap,
903 [PERF_RECORD_UNTHROTTLE] = perf_event__throttle_swap,
904 [PERF_RECORD_SAMPLE] = perf_event__all64_swap,
905 [PERF_RECORD_AUX] = perf_event__aux_swap,
906 [PERF_RECORD_ITRACE_START] = perf_event__itrace_start_swap,
907 [PERF_RECORD_LOST_SAMPLES] = perf_event__all64_swap,
908 [PERF_RECORD_SWITCH] = perf_event__switch_swap,
909 [PERF_RECORD_SWITCH_CPU_WIDE] = perf_event__switch_swap,
910 [PERF_RECORD_NAMESPACES] = perf_event__namespaces_swap,
911 [PERF_RECORD_HEADER_ATTR] = perf_event__hdr_attr_swap,
912 [PERF_RECORD_HEADER_EVENT_TYPE] = perf_event__event_type_swap,
913 [PERF_RECORD_HEADER_TRACING_DATA] = perf_event__tracing_data_swap,
914 [PERF_RECORD_HEADER_BUILD_ID] = NULL,
915 [PERF_RECORD_ID_INDEX] = perf_event__all64_swap,
916 [PERF_RECORD_AUXTRACE_INFO] = perf_event__auxtrace_info_swap,
917 [PERF_RECORD_AUXTRACE] = perf_event__auxtrace_swap,
918 [PERF_RECORD_AUXTRACE_ERROR] = perf_event__auxtrace_error_swap,
919 [PERF_RECORD_THREAD_MAP] = perf_event__thread_map_swap,
920 [PERF_RECORD_CPU_MAP] = perf_event__cpu_map_swap,
921 [PERF_RECORD_STAT_CONFIG] = perf_event__stat_config_swap,
922 [PERF_RECORD_STAT] = perf_event__stat_swap,
923 [PERF_RECORD_STAT_ROUND] = perf_event__stat_round_swap,
924 [PERF_RECORD_EVENT_UPDATE] = perf_event__event_update_swap,
925 [PERF_RECORD_TIME_CONV] = perf_event__all64_swap,
926 [PERF_RECORD_HEADER_MAX] = NULL,
927 };
928
929 /*
930 * When perf record finishes a pass on every buffers, it records this pseudo
931 * event.
932 * We record the max timestamp t found in the pass n.
933 * Assuming these timestamps are monotonic across cpus, we know that if
934 * a buffer still has events with timestamps below t, they will be all
935 * available and then read in the pass n + 1.
936 * Hence when we start to read the pass n + 2, we can safely flush every
937 * events with timestamps below t.
938 *
939 * ============ PASS n =================
940 * CPU 0 | CPU 1
941 * |
942 * cnt1 timestamps | cnt2 timestamps
943 * 1 | 2
944 * 2 | 3
945 * - | 4 <--- max recorded
946 *
947 * ============ PASS n + 1 ==============
948 * CPU 0 | CPU 1
949 * |
950 * cnt1 timestamps | cnt2 timestamps
951 * 3 | 5
952 * 4 | 6
953 * 5 | 7 <---- max recorded
954 *
955 * Flush every events below timestamp 4
956 *
957 * ============ PASS n + 2 ==============
958 * CPU 0 | CPU 1
959 * |
960 * cnt1 timestamps | cnt2 timestamps
961 * 6 | 8
962 * 7 | 9
963 * - | 10
964 *
965 * Flush every events below timestamp 7
966 * etc...
967 */
968 static int process_finished_round(struct perf_tool *tool __maybe_unused,
969 union perf_event *event __maybe_unused,
970 struct ordered_events *oe)
971 {
972 if (dump_trace)
973 fprintf(stdout, "\n");
974 return ordered_events__flush(oe, OE_FLUSH__ROUND);
975 }
976
977 int perf_session__queue_event(struct perf_session *s, union perf_event *event,
978 u64 timestamp, u64 file_offset)
979 {
980 return ordered_events__queue(&s->ordered_events, event, timestamp, file_offset);
981 }
982
983 static void callchain__lbr_callstack_printf(struct perf_sample *sample)
984 {
985 struct ip_callchain *callchain = sample->callchain;
986 struct branch_stack *lbr_stack = sample->branch_stack;
987 u64 kernel_callchain_nr = callchain->nr;
988 unsigned int i;
989
990 for (i = 0; i < kernel_callchain_nr; i++) {
991 if (callchain->ips[i] == PERF_CONTEXT_USER)
992 break;
993 }
994
995 if ((i != kernel_callchain_nr) && lbr_stack->nr) {
996 u64 total_nr;
997 /*
998 * LBR callstack can only get user call chain,
999 * i is kernel call chain number,
1000 * 1 is PERF_CONTEXT_USER.
1001 *
1002 * The user call chain is stored in LBR registers.
1003 * LBR are pair registers. The caller is stored
1004 * in "from" register, while the callee is stored
1005 * in "to" register.
1006 * For example, there is a call stack
1007 * "A"->"B"->"C"->"D".
1008 * The LBR registers will recorde like
1009 * "C"->"D", "B"->"C", "A"->"B".
1010 * So only the first "to" register and all "from"
1011 * registers are needed to construct the whole stack.
1012 */
1013 total_nr = i + 1 + lbr_stack->nr + 1;
1014 kernel_callchain_nr = i + 1;
1015
1016 printf("... LBR call chain: nr:%" PRIu64 "\n", total_nr);
1017
1018 for (i = 0; i < kernel_callchain_nr; i++)
1019 printf("..... %2d: %016" PRIx64 "\n",
1020 i, callchain->ips[i]);
1021
1022 printf("..... %2d: %016" PRIx64 "\n",
1023 (int)(kernel_callchain_nr), lbr_stack->entries[0].to);
1024 for (i = 0; i < lbr_stack->nr; i++)
1025 printf("..... %2d: %016" PRIx64 "\n",
1026 (int)(i + kernel_callchain_nr + 1), lbr_stack->entries[i].from);
1027 }
1028 }
1029
1030 static void callchain__printf(struct perf_evsel *evsel,
1031 struct perf_sample *sample)
1032 {
1033 unsigned int i;
1034 struct ip_callchain *callchain = sample->callchain;
1035
1036 if (perf_evsel__has_branch_callstack(evsel))
1037 callchain__lbr_callstack_printf(sample);
1038
1039 printf("... FP chain: nr:%" PRIu64 "\n", callchain->nr);
1040
1041 for (i = 0; i < callchain->nr; i++)
1042 printf("..... %2d: %016" PRIx64 "\n",
1043 i, callchain->ips[i]);
1044 }
1045
1046 static void branch_stack__printf(struct perf_sample *sample)
1047 {
1048 uint64_t i;
1049
1050 printf("... branch stack: nr:%" PRIu64 "\n", sample->branch_stack->nr);
1051
1052 for (i = 0; i < sample->branch_stack->nr; i++) {
1053 struct branch_entry *e = &sample->branch_stack->entries[i];
1054
1055 printf("..... %2"PRIu64": %016" PRIx64 " -> %016" PRIx64 " %hu cycles %s%s%s%s %x\n",
1056 i, e->from, e->to,
1057 (unsigned short)e->flags.cycles,
1058 e->flags.mispred ? "M" : " ",
1059 e->flags.predicted ? "P" : " ",
1060 e->flags.abort ? "A" : " ",
1061 e->flags.in_tx ? "T" : " ",
1062 (unsigned)e->flags.reserved);
1063 }
1064 }
1065
1066 static void regs_dump__printf(u64 mask, u64 *regs)
1067 {
1068 unsigned rid, i = 0;
1069
1070 for_each_set_bit(rid, (unsigned long *) &mask, sizeof(mask) * 8) {
1071 u64 val = regs[i++];
1072
1073 printf(".... %-5s 0x%" PRIx64 "\n",
1074 perf_reg_name(rid), val);
1075 }
1076 }
1077
1078 static const char *regs_abi[] = {
1079 [PERF_SAMPLE_REGS_ABI_NONE] = "none",
1080 [PERF_SAMPLE_REGS_ABI_32] = "32-bit",
1081 [PERF_SAMPLE_REGS_ABI_64] = "64-bit",
1082 };
1083
1084 static inline const char *regs_dump_abi(struct regs_dump *d)
1085 {
1086 if (d->abi > PERF_SAMPLE_REGS_ABI_64)
1087 return "unknown";
1088
1089 return regs_abi[d->abi];
1090 }
1091
1092 static void regs__printf(const char *type, struct regs_dump *regs)
1093 {
1094 u64 mask = regs->mask;
1095
1096 printf("... %s regs: mask 0x%" PRIx64 " ABI %s\n",
1097 type,
1098 mask,
1099 regs_dump_abi(regs));
1100
1101 regs_dump__printf(mask, regs->regs);
1102 }
1103
1104 static void regs_user__printf(struct perf_sample *sample)
1105 {
1106 struct regs_dump *user_regs = &sample->user_regs;
1107
1108 if (user_regs->regs)
1109 regs__printf("user", user_regs);
1110 }
1111
1112 static void regs_intr__printf(struct perf_sample *sample)
1113 {
1114 struct regs_dump *intr_regs = &sample->intr_regs;
1115
1116 if (intr_regs->regs)
1117 regs__printf("intr", intr_regs);
1118 }
1119
1120 static void stack_user__printf(struct stack_dump *dump)
1121 {
1122 printf("... ustack: size %" PRIu64 ", offset 0x%x\n",
1123 dump->size, dump->offset);
1124 }
1125
1126 static void perf_evlist__print_tstamp(struct perf_evlist *evlist,
1127 union perf_event *event,
1128 struct perf_sample *sample)
1129 {
1130 u64 sample_type = __perf_evlist__combined_sample_type(evlist);
1131
1132 if (event->header.type != PERF_RECORD_SAMPLE &&
1133 !perf_evlist__sample_id_all(evlist)) {
1134 fputs("-1 -1 ", stdout);
1135 return;
1136 }
1137
1138 if ((sample_type & PERF_SAMPLE_CPU))
1139 printf("%u ", sample->cpu);
1140
1141 if (sample_type & PERF_SAMPLE_TIME)
1142 printf("%" PRIu64 " ", sample->time);
1143 }
1144
1145 static void sample_read__printf(struct perf_sample *sample, u64 read_format)
1146 {
1147 printf("... sample_read:\n");
1148
1149 if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED)
1150 printf("...... time enabled %016" PRIx64 "\n",
1151 sample->read.time_enabled);
1152
1153 if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING)
1154 printf("...... time running %016" PRIx64 "\n",
1155 sample->read.time_running);
1156
1157 if (read_format & PERF_FORMAT_GROUP) {
1158 u64 i;
1159
1160 printf(".... group nr %" PRIu64 "\n", sample->read.group.nr);
1161
1162 for (i = 0; i < sample->read.group.nr; i++) {
1163 struct sample_read_value *value;
1164
1165 value = &sample->read.group.values[i];
1166 printf("..... id %016" PRIx64
1167 ", value %016" PRIx64 "\n",
1168 value->id, value->value);
1169 }
1170 } else
1171 printf("..... id %016" PRIx64 ", value %016" PRIx64 "\n",
1172 sample->read.one.id, sample->read.one.value);
1173 }
1174
1175 static void dump_event(struct perf_evlist *evlist, union perf_event *event,
1176 u64 file_offset, struct perf_sample *sample)
1177 {
1178 if (!dump_trace)
1179 return;
1180
1181 printf("\n%#" PRIx64 " [%#x]: event: %d\n",
1182 file_offset, event->header.size, event->header.type);
1183
1184 trace_event(event);
1185 if (event->header.type == PERF_RECORD_SAMPLE && evlist->trace_event_sample_raw)
1186 evlist->trace_event_sample_raw(evlist, event, sample);
1187
1188 if (sample)
1189 perf_evlist__print_tstamp(evlist, event, sample);
1190
1191 printf("%#" PRIx64 " [%#x]: PERF_RECORD_%s", file_offset,
1192 event->header.size, perf_event__name(event->header.type));
1193 }
1194
1195 static void dump_sample(struct perf_evsel *evsel, union perf_event *event,
1196 struct perf_sample *sample)
1197 {
1198 u64 sample_type;
1199
1200 if (!dump_trace)
1201 return;
1202
1203 printf("(IP, 0x%x): %d/%d: %#" PRIx64 " period: %" PRIu64 " addr: %#" PRIx64 "\n",
1204 event->header.misc, sample->pid, sample->tid, sample->ip,
1205 sample->period, sample->addr);
1206
1207 sample_type = evsel->attr.sample_type;
1208
1209 if (evsel__has_callchain(evsel))
1210 callchain__printf(evsel, sample);
1211
1212 if ((sample_type & PERF_SAMPLE_BRANCH_STACK) && !perf_evsel__has_branch_callstack(evsel))
1213 branch_stack__printf(sample);
1214
1215 if (sample_type & PERF_SAMPLE_REGS_USER)
1216 regs_user__printf(sample);
1217
1218 if (sample_type & PERF_SAMPLE_REGS_INTR)
1219 regs_intr__printf(sample);
1220
1221 if (sample_type & PERF_SAMPLE_STACK_USER)
1222 stack_user__printf(&sample->user_stack);
1223
1224 if (sample_type & PERF_SAMPLE_WEIGHT)
1225 printf("... weight: %" PRIu64 "\n", sample->weight);
1226
1227 if (sample_type & PERF_SAMPLE_DATA_SRC)
1228 printf(" . data_src: 0x%"PRIx64"\n", sample->data_src);
1229
1230 if (sample_type & PERF_SAMPLE_PHYS_ADDR)
1231 printf(" .. phys_addr: 0x%"PRIx64"\n", sample->phys_addr);
1232
1233 if (sample_type & PERF_SAMPLE_TRANSACTION)
1234 printf("... transaction: %" PRIx64 "\n", sample->transaction);
1235
1236 if (sample_type & PERF_SAMPLE_READ)
1237 sample_read__printf(sample, evsel->attr.read_format);
1238 }
1239
1240 static void dump_read(struct perf_evsel *evsel, union perf_event *event)
1241 {
1242 struct read_event *read_event = &event->read;
1243 u64 read_format;
1244
1245 if (!dump_trace)
1246 return;
1247
1248 printf(": %d %d %s %" PRIu64 "\n", event->read.pid, event->read.tid,
1249 evsel ? perf_evsel__name(evsel) : "FAIL",
1250 event->read.value);
1251
1252 read_format = evsel->attr.read_format;
1253
1254 if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED)
1255 printf("... time enabled : %" PRIu64 "\n", read_event->time_enabled);
1256
1257 if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING)
1258 printf("... time running : %" PRIu64 "\n", read_event->time_running);
1259
1260 if (read_format & PERF_FORMAT_ID)
1261 printf("... id : %" PRIu64 "\n", read_event->id);
1262 }
1263
1264 static struct machine *machines__find_for_cpumode(struct machines *machines,
1265 union perf_event *event,
1266 struct perf_sample *sample)
1267 {
1268 struct machine *machine;
1269
1270 if (perf_guest &&
1271 ((sample->cpumode == PERF_RECORD_MISC_GUEST_KERNEL) ||
1272 (sample->cpumode == PERF_RECORD_MISC_GUEST_USER))) {
1273 u32 pid;
1274
1275 if (event->header.type == PERF_RECORD_MMAP
1276 || event->header.type == PERF_RECORD_MMAP2)
1277 pid = event->mmap.pid;
1278 else
1279 pid = sample->pid;
1280
1281 machine = machines__find(machines, pid);
1282 if (!machine)
1283 machine = machines__findnew(machines, DEFAULT_GUEST_KERNEL_ID);
1284 return machine;
1285 }
1286
1287 return &machines->host;
1288 }
1289
1290 static int deliver_sample_value(struct perf_evlist *evlist,
1291 struct perf_tool *tool,
1292 union perf_event *event,
1293 struct perf_sample *sample,
1294 struct sample_read_value *v,
1295 struct machine *machine)
1296 {
1297 struct perf_sample_id *sid = perf_evlist__id2sid(evlist, v->id);
1298
1299 if (sid) {
1300 sample->id = v->id;
1301 sample->period = v->value - sid->period;
1302 sid->period = v->value;
1303 }
1304
1305 if (!sid || sid->evsel == NULL) {
1306 ++evlist->stats.nr_unknown_id;
1307 return 0;
1308 }
1309
1310 /*
1311 * There's no reason to deliver sample
1312 * for zero period, bail out.
1313 */
1314 if (!sample->period)
1315 return 0;
1316
1317 return tool->sample(tool, event, sample, sid->evsel, machine);
1318 }
1319
1320 static int deliver_sample_group(struct perf_evlist *evlist,
1321 struct perf_tool *tool,
1322 union perf_event *event,
1323 struct perf_sample *sample,
1324 struct machine *machine)
1325 {
1326 int ret = -EINVAL;
1327 u64 i;
1328
1329 for (i = 0; i < sample->read.group.nr; i++) {
1330 ret = deliver_sample_value(evlist, tool, event, sample,
1331 &sample->read.group.values[i],
1332 machine);
1333 if (ret)
1334 break;
1335 }
1336
1337 return ret;
1338 }
1339
1340 static int
1341 perf_evlist__deliver_sample(struct perf_evlist *evlist,
1342 struct perf_tool *tool,
1343 union perf_event *event,
1344 struct perf_sample *sample,
1345 struct perf_evsel *evsel,
1346 struct machine *machine)
1347 {
1348 /* We know evsel != NULL. */
1349 u64 sample_type = evsel->attr.sample_type;
1350 u64 read_format = evsel->attr.read_format;
1351
1352 /* Standard sample delivery. */
1353 if (!(sample_type & PERF_SAMPLE_READ))
1354 return tool->sample(tool, event, sample, evsel, machine);
1355
1356 /* For PERF_SAMPLE_READ we have either single or group mode. */
1357 if (read_format & PERF_FORMAT_GROUP)
1358 return deliver_sample_group(evlist, tool, event, sample,
1359 machine);
1360 else
1361 return deliver_sample_value(evlist, tool, event, sample,
1362 &sample->read.one, machine);
1363 }
1364
1365 static int machines__deliver_event(struct machines *machines,
1366 struct perf_evlist *evlist,
1367 union perf_event *event,
1368 struct perf_sample *sample,
1369 struct perf_tool *tool, u64 file_offset)
1370 {
1371 struct perf_evsel *evsel;
1372 struct machine *machine;
1373
1374 dump_event(evlist, event, file_offset, sample);
1375
1376 evsel = perf_evlist__id2evsel(evlist, sample->id);
1377
1378 machine = machines__find_for_cpumode(machines, event, sample);
1379
1380 switch (event->header.type) {
1381 case PERF_RECORD_SAMPLE:
1382 if (evsel == NULL) {
1383 ++evlist->stats.nr_unknown_id;
1384 return 0;
1385 }
1386 dump_sample(evsel, event, sample);
1387 if (machine == NULL) {
1388 ++evlist->stats.nr_unprocessable_samples;
1389 return 0;
1390 }
1391 return perf_evlist__deliver_sample(evlist, tool, event, sample, evsel, machine);
1392 case PERF_RECORD_MMAP:
1393 return tool->mmap(tool, event, sample, machine);
1394 case PERF_RECORD_MMAP2:
1395 if (event->header.misc & PERF_RECORD_MISC_PROC_MAP_PARSE_TIMEOUT)
1396 ++evlist->stats.nr_proc_map_timeout;
1397 return tool->mmap2(tool, event, sample, machine);
1398 case PERF_RECORD_COMM:
1399 return tool->comm(tool, event, sample, machine);
1400 case PERF_RECORD_NAMESPACES:
1401 return tool->namespaces(tool, event, sample, machine);
1402 case PERF_RECORD_FORK:
1403 return tool->fork(tool, event, sample, machine);
1404 case PERF_RECORD_EXIT:
1405 return tool->exit(tool, event, sample, machine);
1406 case PERF_RECORD_LOST:
1407 if (tool->lost == perf_event__process_lost)
1408 evlist->stats.total_lost += event->lost.lost;
1409 return tool->lost(tool, event, sample, machine);
1410 case PERF_RECORD_LOST_SAMPLES:
1411 if (tool->lost_samples == perf_event__process_lost_samples)
1412 evlist->stats.total_lost_samples += event->lost_samples.lost;
1413 return tool->lost_samples(tool, event, sample, machine);
1414 case PERF_RECORD_READ:
1415 dump_read(evsel, event);
1416 return tool->read(tool, event, sample, evsel, machine);
1417 case PERF_RECORD_THROTTLE:
1418 return tool->throttle(tool, event, sample, machine);
1419 case PERF_RECORD_UNTHROTTLE:
1420 return tool->unthrottle(tool, event, sample, machine);
1421 case PERF_RECORD_AUX:
1422 if (tool->aux == perf_event__process_aux) {
1423 if (event->aux.flags & PERF_AUX_FLAG_TRUNCATED)
1424 evlist->stats.total_aux_lost += 1;
1425 if (event->aux.flags & PERF_AUX_FLAG_PARTIAL)
1426 evlist->stats.total_aux_partial += 1;
1427 }
1428 return tool->aux(tool, event, sample, machine);
1429 case PERF_RECORD_ITRACE_START:
1430 return tool->itrace_start(tool, event, sample, machine);
1431 case PERF_RECORD_SWITCH:
1432 case PERF_RECORD_SWITCH_CPU_WIDE:
1433 return tool->context_switch(tool, event, sample, machine);
1434 case PERF_RECORD_KSYMBOL:
1435 return tool->ksymbol(tool, event, sample, machine);
1436 case PERF_RECORD_BPF_EVENT:
1437 return tool->bpf_event(tool, event, sample, machine);
1438 default:
1439 ++evlist->stats.nr_unknown_events;
1440 return -1;
1441 }
1442 }
1443
1444 static int perf_session__deliver_event(struct perf_session *session,
1445 union perf_event *event,
1446 struct perf_tool *tool,
1447 u64 file_offset)
1448 {
1449 struct perf_sample sample;
1450 int ret;
1451
1452 ret = perf_evlist__parse_sample(session->evlist, event, &sample);
1453 if (ret) {
1454 pr_err("Can't parse sample, err = %d\n", ret);
1455 return ret;
1456 }
1457
1458 ret = auxtrace__process_event(session, event, &sample, tool);
1459 if (ret < 0)
1460 return ret;
1461 if (ret > 0)
1462 return 0;
1463
1464 return machines__deliver_event(&session->machines, session->evlist,
1465 event, &sample, tool, file_offset);
1466 }
1467
1468 static s64 perf_session__process_user_event(struct perf_session *session,
1469 union perf_event *event,
1470 u64 file_offset)
1471 {
1472 struct ordered_events *oe = &session->ordered_events;
1473 struct perf_tool *tool = session->tool;
1474 struct perf_sample sample = { .time = 0, };
1475 int fd = perf_data__fd(session->data);
1476 int err;
1477
1478 if (event->header.type != PERF_RECORD_COMPRESSED ||
1479 tool->compressed == perf_session__process_compressed_event_stub)
1480 dump_event(session->evlist, event, file_offset, &sample);
1481
1482 /* These events are processed right away */
1483 switch (event->header.type) {
1484 case PERF_RECORD_HEADER_ATTR:
1485 err = tool->attr(tool, event, &session->evlist);
1486 if (err == 0) {
1487 perf_session__set_id_hdr_size(session);
1488 perf_session__set_comm_exec(session);
1489 }
1490 return err;
1491 case PERF_RECORD_EVENT_UPDATE:
1492 return tool->event_update(tool, event, &session->evlist);
1493 case PERF_RECORD_HEADER_EVENT_TYPE:
1494 /*
1495 * Depreceated, but we need to handle it for sake
1496 * of old data files create in pipe mode.
1497 */
1498 return 0;
1499 case PERF_RECORD_HEADER_TRACING_DATA:
1500 /* setup for reading amidst mmap */
1501 lseek(fd, file_offset, SEEK_SET);
1502 return tool->tracing_data(session, event);
1503 case PERF_RECORD_HEADER_BUILD_ID:
1504 return tool->build_id(session, event);
1505 case PERF_RECORD_FINISHED_ROUND:
1506 return tool->finished_round(tool, event, oe);
1507 case PERF_RECORD_ID_INDEX:
1508 return tool->id_index(session, event);
1509 case PERF_RECORD_AUXTRACE_INFO:
1510 return tool->auxtrace_info(session, event);
1511 case PERF_RECORD_AUXTRACE:
1512 /* setup for reading amidst mmap */
1513 lseek(fd, file_offset + event->header.size, SEEK_SET);
1514 return tool->auxtrace(session, event);
1515 case PERF_RECORD_AUXTRACE_ERROR:
1516 perf_session__auxtrace_error_inc(session, event);
1517 return tool->auxtrace_error(session, event);
1518 case PERF_RECORD_THREAD_MAP:
1519 return tool->thread_map(session, event);
1520 case PERF_RECORD_CPU_MAP:
1521 return tool->cpu_map(session, event);
1522 case PERF_RECORD_STAT_CONFIG:
1523 return tool->stat_config(session, event);
1524 case PERF_RECORD_STAT:
1525 return tool->stat(session, event);
1526 case PERF_RECORD_STAT_ROUND:
1527 return tool->stat_round(session, event);
1528 case PERF_RECORD_TIME_CONV:
1529 session->time_conv = event->time_conv;
1530 return tool->time_conv(session, event);
1531 case PERF_RECORD_HEADER_FEATURE:
1532 return tool->feature(session, event);
1533 case PERF_RECORD_COMPRESSED:
1534 err = tool->compressed(session, event, file_offset);
1535 if (err)
1536 dump_event(session->evlist, event, file_offset, &sample);
1537 return err;
1538 default:
1539 return -EINVAL;
1540 }
1541 }
1542
1543 int perf_session__deliver_synth_event(struct perf_session *session,
1544 union perf_event *event,
1545 struct perf_sample *sample)
1546 {
1547 struct perf_evlist *evlist = session->evlist;
1548 struct perf_tool *tool = session->tool;
1549
1550 events_stats__inc(&evlist->stats, event->header.type);
1551
1552 if (event->header.type >= PERF_RECORD_USER_TYPE_START)
1553 return perf_session__process_user_event(session, event, 0);
1554
1555 return machines__deliver_event(&session->machines, evlist, event, sample, tool, 0);
1556 }
1557
1558 static void event_swap(union perf_event *event, bool sample_id_all)
1559 {
1560 perf_event__swap_op swap;
1561
1562 swap = perf_event__swap_ops[event->header.type];
1563 if (swap)
1564 swap(event, sample_id_all);
1565 }
1566
1567 int perf_session__peek_event(struct perf_session *session, off_t file_offset,
1568 void *buf, size_t buf_sz,
1569 union perf_event **event_ptr,
1570 struct perf_sample *sample)
1571 {
1572 union perf_event *event;
1573 size_t hdr_sz, rest;
1574 int fd;
1575
1576 if (session->one_mmap && !session->header.needs_swap) {
1577 event = file_offset - session->one_mmap_offset +
1578 session->one_mmap_addr;
1579 goto out_parse_sample;
1580 }
1581
1582 if (perf_data__is_pipe(session->data))
1583 return -1;
1584
1585 fd = perf_data__fd(session->data);
1586 hdr_sz = sizeof(struct perf_event_header);
1587
1588 if (buf_sz < hdr_sz)
1589 return -1;
1590
1591 if (lseek(fd, file_offset, SEEK_SET) == (off_t)-1 ||
1592 readn(fd, buf, hdr_sz) != (ssize_t)hdr_sz)
1593 return -1;
1594
1595 event = (union perf_event *)buf;
1596
1597 if (session->header.needs_swap)
1598 perf_event_header__bswap(&event->header);
1599
1600 if (event->header.size < hdr_sz || event->header.size > buf_sz)
1601 return -1;
1602
1603 rest = event->header.size - hdr_sz;
1604
1605 if (readn(fd, buf, rest) != (ssize_t)rest)
1606 return -1;
1607
1608 if (session->header.needs_swap)
1609 event_swap(event, perf_evlist__sample_id_all(session->evlist));
1610
1611 out_parse_sample:
1612
1613 if (sample && event->header.type < PERF_RECORD_USER_TYPE_START &&
1614 perf_evlist__parse_sample(session->evlist, event, sample))
1615 return -1;
1616
1617 *event_ptr = event;
1618
1619 return 0;
1620 }
1621
1622 static s64 perf_session__process_event(struct perf_session *session,
1623 union perf_event *event, u64 file_offset)
1624 {
1625 struct perf_evlist *evlist = session->evlist;
1626 struct perf_tool *tool = session->tool;
1627 int ret;
1628
1629 if (session->header.needs_swap)
1630 event_swap(event, perf_evlist__sample_id_all(evlist));
1631
1632 if (event->header.type >= PERF_RECORD_HEADER_MAX)
1633 return -EINVAL;
1634
1635 events_stats__inc(&evlist->stats, event->header.type);
1636
1637 if (event->header.type >= PERF_RECORD_USER_TYPE_START)
1638 return perf_session__process_user_event(session, event, file_offset);
1639
1640 if (tool->ordered_events) {
1641 u64 timestamp = -1ULL;
1642
1643 ret = perf_evlist__parse_sample_timestamp(evlist, event, &timestamp);
1644 if (ret && ret != -1)
1645 return ret;
1646
1647 ret = perf_session__queue_event(session, event, timestamp, file_offset);
1648 if (ret != -ETIME)
1649 return ret;
1650 }
1651
1652 return perf_session__deliver_event(session, event, tool, file_offset);
1653 }
1654
1655 void perf_event_header__bswap(struct perf_event_header *hdr)
1656 {
1657 hdr->type = bswap_32(hdr->type);
1658 hdr->misc = bswap_16(hdr->misc);
1659 hdr->size = bswap_16(hdr->size);
1660 }
1661
1662 struct thread *perf_session__findnew(struct perf_session *session, pid_t pid)
1663 {
1664 return machine__findnew_thread(&session->machines.host, -1, pid);
1665 }
1666
1667 /*
1668 * Threads are identified by pid and tid, and the idle task has pid == tid == 0.
1669 * So here a single thread is created for that, but actually there is a separate
1670 * idle task per cpu, so there should be one 'struct thread' per cpu, but there
1671 * is only 1. That causes problems for some tools, requiring workarounds. For
1672 * example get_idle_thread() in builtin-sched.c, or thread_stack__per_cpu().
1673 */
1674 int perf_session__register_idle_thread(struct perf_session *session)
1675 {
1676 struct thread *thread;
1677 int err = 0;
1678
1679 thread = machine__findnew_thread(&session->machines.host, 0, 0);
1680 if (thread == NULL || thread__set_comm(thread, "swapper", 0)) {
1681 pr_err("problem inserting idle task.\n");
1682 err = -1;
1683 }
1684
1685 if (thread == NULL || thread__set_namespaces(thread, 0, NULL)) {
1686 pr_err("problem inserting idle task.\n");
1687 err = -1;
1688 }
1689
1690 /* machine__findnew_thread() got the thread, so put it */
1691 thread__put(thread);
1692 return err;
1693 }
1694
1695 static void
1696 perf_session__warn_order(const struct perf_session *session)
1697 {
1698 const struct ordered_events *oe = &session->ordered_events;
1699 struct perf_evsel *evsel;
1700 bool should_warn = true;
1701
1702 evlist__for_each_entry(session->evlist, evsel) {
1703 if (evsel->attr.write_backward)
1704 should_warn = false;
1705 }
1706
1707 if (!should_warn)
1708 return;
1709 if (oe->nr_unordered_events != 0)
1710 ui__warning("%u out of order events recorded.\n", oe->nr_unordered_events);
1711 }
1712
1713 static void perf_session__warn_about_errors(const struct perf_session *session)
1714 {
1715 const struct events_stats *stats = &session->evlist->stats;
1716
1717 if (session->tool->lost == perf_event__process_lost &&
1718 stats->nr_events[PERF_RECORD_LOST] != 0) {
1719 ui__warning("Processed %d events and lost %d chunks!\n\n"
1720 "Check IO/CPU overload!\n\n",
1721 stats->nr_events[0],
1722 stats->nr_events[PERF_RECORD_LOST]);
1723 }
1724
1725 if (session->tool->lost_samples == perf_event__process_lost_samples) {
1726 double drop_rate;
1727
1728 drop_rate = (double)stats->total_lost_samples /
1729 (double) (stats->nr_events[PERF_RECORD_SAMPLE] + stats->total_lost_samples);
1730 if (drop_rate > 0.05) {
1731 ui__warning("Processed %" PRIu64 " samples and lost %3.2f%%!\n\n",
1732 stats->nr_events[PERF_RECORD_SAMPLE] + stats->total_lost_samples,
1733 drop_rate * 100.0);
1734 }
1735 }
1736
1737 if (session->tool->aux == perf_event__process_aux &&
1738 stats->total_aux_lost != 0) {
1739 ui__warning("AUX data lost %" PRIu64 " times out of %u!\n\n",
1740 stats->total_aux_lost,
1741 stats->nr_events[PERF_RECORD_AUX]);
1742 }
1743
1744 if (session->tool->aux == perf_event__process_aux &&
1745 stats->total_aux_partial != 0) {
1746 bool vmm_exclusive = false;
1747
1748 (void)sysfs__read_bool("module/kvm_intel/parameters/vmm_exclusive",
1749 &vmm_exclusive);
1750
1751 ui__warning("AUX data had gaps in it %" PRIu64 " times out of %u!\n\n"
1752 "Are you running a KVM guest in the background?%s\n\n",
1753 stats->total_aux_partial,
1754 stats->nr_events[PERF_RECORD_AUX],
1755 vmm_exclusive ?
1756 "\nReloading kvm_intel module with vmm_exclusive=0\n"
1757 "will reduce the gaps to only guest's timeslices." :
1758 "");
1759 }
1760
1761 if (stats->nr_unknown_events != 0) {
1762 ui__warning("Found %u unknown events!\n\n"
1763 "Is this an older tool processing a perf.data "
1764 "file generated by a more recent tool?\n\n"
1765 "If that is not the case, consider "
1766 "reporting to linux-kernel@vger.kernel.org.\n\n",
1767 stats->nr_unknown_events);
1768 }
1769
1770 if (stats->nr_unknown_id != 0) {
1771 ui__warning("%u samples with id not present in the header\n",
1772 stats->nr_unknown_id);
1773 }
1774
1775 if (stats->nr_invalid_chains != 0) {
1776 ui__warning("Found invalid callchains!\n\n"
1777 "%u out of %u events were discarded for this reason.\n\n"
1778 "Consider reporting to linux-kernel@vger.kernel.org.\n\n",
1779 stats->nr_invalid_chains,
1780 stats->nr_events[PERF_RECORD_SAMPLE]);
1781 }
1782
1783 if (stats->nr_unprocessable_samples != 0) {
1784 ui__warning("%u unprocessable samples recorded.\n"
1785 "Do you have a KVM guest running and not using 'perf kvm'?\n",
1786 stats->nr_unprocessable_samples);
1787 }
1788
1789 perf_session__warn_order(session);
1790
1791 events_stats__auxtrace_error_warn(stats);
1792
1793 if (stats->nr_proc_map_timeout != 0) {
1794 ui__warning("%d map information files for pre-existing threads were\n"
1795 "not processed, if there are samples for addresses they\n"
1796 "will not be resolved, you may find out which are these\n"
1797 "threads by running with -v and redirecting the output\n"
1798 "to a file.\n"
1799 "The time limit to process proc map is too short?\n"
1800 "Increase it by --proc-map-timeout\n",
1801 stats->nr_proc_map_timeout);
1802 }
1803 }
1804
1805 static int perf_session__flush_thread_stack(struct thread *thread,
1806 void *p __maybe_unused)
1807 {
1808 return thread_stack__flush(thread);
1809 }
1810
1811 static int perf_session__flush_thread_stacks(struct perf_session *session)
1812 {
1813 return machines__for_each_thread(&session->machines,
1814 perf_session__flush_thread_stack,
1815 NULL);
1816 }
1817
1818 volatile int session_done;
1819
1820 static int __perf_session__process_decomp_events(struct perf_session *session);
1821
1822 static int __perf_session__process_pipe_events(struct perf_session *session)
1823 {
1824 struct ordered_events *oe = &session->ordered_events;
1825 struct perf_tool *tool = session->tool;
1826 int fd = perf_data__fd(session->data);
1827 union perf_event *event;
1828 uint32_t size, cur_size = 0;
1829 void *buf = NULL;
1830 s64 skip = 0;
1831 u64 head;
1832 ssize_t err;
1833 void *p;
1834
1835 perf_tool__fill_defaults(tool);
1836
1837 head = 0;
1838 cur_size = sizeof(union perf_event);
1839
1840 buf = malloc(cur_size);
1841 if (!buf)
1842 return -errno;
1843 ordered_events__set_copy_on_queue(oe, true);
1844 more:
1845 event = buf;
1846 err = readn(fd, event, sizeof(struct perf_event_header));
1847 if (err <= 0) {
1848 if (err == 0)
1849 goto done;
1850
1851 pr_err("failed to read event header\n");
1852 goto out_err;
1853 }
1854
1855 if (session->header.needs_swap)
1856 perf_event_header__bswap(&event->header);
1857
1858 size = event->header.size;
1859 if (size < sizeof(struct perf_event_header)) {
1860 pr_err("bad event header size\n");
1861 goto out_err;
1862 }
1863
1864 if (size > cur_size) {
1865 void *new = realloc(buf, size);
1866 if (!new) {
1867 pr_err("failed to allocate memory to read event\n");
1868 goto out_err;
1869 }
1870 buf = new;
1871 cur_size = size;
1872 event = buf;
1873 }
1874 p = event;
1875 p += sizeof(struct perf_event_header);
1876
1877 if (size - sizeof(struct perf_event_header)) {
1878 err = readn(fd, p, size - sizeof(struct perf_event_header));
1879 if (err <= 0) {
1880 if (err == 0) {
1881 pr_err("unexpected end of event stream\n");
1882 goto done;
1883 }
1884
1885 pr_err("failed to read event data\n");
1886 goto out_err;
1887 }
1888 }
1889
1890 if ((skip = perf_session__process_event(session, event, head)) < 0) {
1891 pr_err("%#" PRIx64 " [%#x]: failed to process type: %d\n",
1892 head, event->header.size, event->header.type);
1893 err = -EINVAL;
1894 goto out_err;
1895 }
1896
1897 head += size;
1898
1899 if (skip > 0)
1900 head += skip;
1901
1902 err = __perf_session__process_decomp_events(session);
1903 if (err)
1904 goto out_err;
1905
1906 if (!session_done())
1907 goto more;
1908 done:
1909 /* do the final flush for ordered samples */
1910 err = ordered_events__flush(oe, OE_FLUSH__FINAL);
1911 if (err)
1912 goto out_err;
1913 err = auxtrace__flush_events(session, tool);
1914 if (err)
1915 goto out_err;
1916 err = perf_session__flush_thread_stacks(session);
1917 out_err:
1918 free(buf);
1919 if (!tool->no_warn)
1920 perf_session__warn_about_errors(session);
1921 ordered_events__free(&session->ordered_events);
1922 auxtrace__free_events(session);
1923 return err;
1924 }
1925
1926 static union perf_event *
1927 fetch_mmaped_event(struct perf_session *session,
1928 u64 head, size_t mmap_size, char *buf)
1929 {
1930 union perf_event *event;
1931
1932 /*
1933 * Ensure we have enough space remaining to read
1934 * the size of the event in the headers.
1935 */
1936 if (head + sizeof(event->header) > mmap_size)
1937 return NULL;
1938
1939 event = (union perf_event *)(buf + head);
1940
1941 if (session->header.needs_swap)
1942 perf_event_header__bswap(&event->header);
1943
1944 if (head + event->header.size > mmap_size) {
1945 /* We're not fetching the event so swap back again */
1946 if (session->header.needs_swap)
1947 perf_event_header__bswap(&event->header);
1948 return NULL;
1949 }
1950
1951 return event;
1952 }
1953
1954 static int __perf_session__process_decomp_events(struct perf_session *session)
1955 {
1956 s64 skip;
1957 u64 size, file_pos = 0;
1958 struct decomp *decomp = session->decomp_last;
1959
1960 if (!decomp)
1961 return 0;
1962
1963 while (decomp->head < decomp->size && !session_done()) {
1964 union perf_event *event = fetch_mmaped_event(session, decomp->head, decomp->size, decomp->data);
1965
1966 if (!event)
1967 break;
1968
1969 size = event->header.size;
1970
1971 if (size < sizeof(struct perf_event_header) ||
1972 (skip = perf_session__process_event(session, event, file_pos)) < 0) {
1973 pr_err("%#" PRIx64 " [%#x]: failed to process type: %d\n",
1974 decomp->file_pos + decomp->head, event->header.size, event->header.type);
1975 return -EINVAL;
1976 }
1977
1978 if (skip)
1979 size += skip;
1980
1981 decomp->head += size;
1982 }
1983
1984 return 0;
1985 }
1986
1987 /*
1988 * On 64bit we can mmap the data file in one go. No need for tiny mmap
1989 * slices. On 32bit we use 32MB.
1990 */
1991 #if BITS_PER_LONG == 64
1992 #define MMAP_SIZE ULLONG_MAX
1993 #define NUM_MMAPS 1
1994 #else
1995 #define MMAP_SIZE (32 * 1024 * 1024ULL)
1996 #define NUM_MMAPS 128
1997 #endif
1998
1999 struct reader;
2000
2001 typedef s64 (*reader_cb_t)(struct perf_session *session,
2002 union perf_event *event,
2003 u64 file_offset);
2004
2005 struct reader {
2006 int fd;
2007 u64 data_size;
2008 u64 data_offset;
2009 reader_cb_t process;
2010 };
2011
2012 static int
2013 reader__process_events(struct reader *rd, struct perf_session *session,
2014 struct ui_progress *prog)
2015 {
2016 u64 data_size = rd->data_size;
2017 u64 head, page_offset, file_offset, file_pos, size;
2018 int err = 0, mmap_prot, mmap_flags, map_idx = 0;
2019 size_t mmap_size;
2020 char *buf, *mmaps[NUM_MMAPS];
2021 union perf_event *event;
2022 s64 skip;
2023
2024 page_offset = page_size * (rd->data_offset / page_size);
2025 file_offset = page_offset;
2026 head = rd->data_offset - page_offset;
2027
2028 ui_progress__init_size(prog, data_size, "Processing events...");
2029
2030 data_size += rd->data_offset;
2031
2032 mmap_size = MMAP_SIZE;
2033 if (mmap_size > data_size) {
2034 mmap_size = data_size;
2035 session->one_mmap = true;
2036 }
2037
2038 memset(mmaps, 0, sizeof(mmaps));
2039
2040 mmap_prot = PROT_READ;
2041 mmap_flags = MAP_SHARED;
2042
2043 if (session->header.needs_swap) {
2044 mmap_prot |= PROT_WRITE;
2045 mmap_flags = MAP_PRIVATE;
2046 }
2047 remap:
2048 buf = mmap(NULL, mmap_size, mmap_prot, mmap_flags, rd->fd,
2049 file_offset);
2050 if (buf == MAP_FAILED) {
2051 pr_err("failed to mmap file\n");
2052 err = -errno;
2053 goto out;
2054 }
2055 mmaps[map_idx] = buf;
2056 map_idx = (map_idx + 1) & (ARRAY_SIZE(mmaps) - 1);
2057 file_pos = file_offset + head;
2058 if (session->one_mmap) {
2059 session->one_mmap_addr = buf;
2060 session->one_mmap_offset = file_offset;
2061 }
2062
2063 more:
2064 event = fetch_mmaped_event(session, head, mmap_size, buf);
2065 if (!event) {
2066 if (mmaps[map_idx]) {
2067 munmap(mmaps[map_idx], mmap_size);
2068 mmaps[map_idx] = NULL;
2069 }
2070
2071 page_offset = page_size * (head / page_size);
2072 file_offset += page_offset;
2073 head -= page_offset;
2074 goto remap;
2075 }
2076
2077 size = event->header.size;
2078
2079 skip = -EINVAL;
2080
2081 if (size < sizeof(struct perf_event_header) ||
2082 (skip = rd->process(session, event, file_pos)) < 0) {
2083 pr_err("%#" PRIx64 " [%#x]: failed to process type: %d [%s]\n",
2084 file_offset + head, event->header.size,
2085 event->header.type, strerror(-skip));
2086 err = skip;
2087 goto out;
2088 }
2089
2090 if (skip)
2091 size += skip;
2092
2093 head += size;
2094 file_pos += size;
2095
2096 err = __perf_session__process_decomp_events(session);
2097 if (err)
2098 goto out;
2099
2100 ui_progress__update(prog, size);
2101
2102 if (session_done())
2103 goto out;
2104
2105 if (file_pos < data_size)
2106 goto more;
2107
2108 out:
2109 return err;
2110 }
2111
2112 static s64 process_simple(struct perf_session *session,
2113 union perf_event *event,
2114 u64 file_offset)
2115 {
2116 return perf_session__process_event(session, event, file_offset);
2117 }
2118
2119 static int __perf_session__process_events(struct perf_session *session)
2120 {
2121 struct reader rd = {
2122 .fd = perf_data__fd(session->data),
2123 .data_size = session->header.data_size,
2124 .data_offset = session->header.data_offset,
2125 .process = process_simple,
2126 };
2127 struct ordered_events *oe = &session->ordered_events;
2128 struct perf_tool *tool = session->tool;
2129 struct ui_progress prog;
2130 int err;
2131
2132 perf_tool__fill_defaults(tool);
2133
2134 if (rd.data_size == 0)
2135 return -1;
2136
2137 ui_progress__init_size(&prog, rd.data_size, "Processing events...");
2138
2139 err = reader__process_events(&rd, session, &prog);
2140 if (err)
2141 goto out_err;
2142 /* do the final flush for ordered samples */
2143 err = ordered_events__flush(oe, OE_FLUSH__FINAL);
2144 if (err)
2145 goto out_err;
2146 err = auxtrace__flush_events(session, tool);
2147 if (err)
2148 goto out_err;
2149 err = perf_session__flush_thread_stacks(session);
2150 out_err:
2151 ui_progress__finish();
2152 if (!tool->no_warn)
2153 perf_session__warn_about_errors(session);
2154 /*
2155 * We may switching perf.data output, make ordered_events
2156 * reusable.
2157 */
2158 ordered_events__reinit(&session->ordered_events);
2159 auxtrace__free_events(session);
2160 session->one_mmap = false;
2161 return err;
2162 }
2163
2164 int perf_session__process_events(struct perf_session *session)
2165 {
2166 if (perf_session__register_idle_thread(session) < 0)
2167 return -ENOMEM;
2168
2169 if (perf_data__is_pipe(session->data))
2170 return __perf_session__process_pipe_events(session);
2171
2172 return __perf_session__process_events(session);
2173 }
2174
2175 bool perf_session__has_traces(struct perf_session *session, const char *msg)
2176 {
2177 struct perf_evsel *evsel;
2178
2179 evlist__for_each_entry(session->evlist, evsel) {
2180 if (evsel->attr.type == PERF_TYPE_TRACEPOINT)
2181 return true;
2182 }
2183
2184 pr_err("No trace sample to read. Did you call 'perf %s'?\n", msg);
2185 return false;
2186 }
2187
2188 int map__set_kallsyms_ref_reloc_sym(struct map *map, const char *symbol_name, u64 addr)
2189 {
2190 char *bracket;
2191 struct ref_reloc_sym *ref;
2192 struct kmap *kmap;
2193
2194 ref = zalloc(sizeof(struct ref_reloc_sym));
2195 if (ref == NULL)
2196 return -ENOMEM;
2197
2198 ref->name = strdup(symbol_name);
2199 if (ref->name == NULL) {
2200 free(ref);
2201 return -ENOMEM;
2202 }
2203
2204 bracket = strchr(ref->name, ']');
2205 if (bracket)
2206 *bracket = '\0';
2207
2208 ref->addr = addr;
2209
2210 kmap = map__kmap(map);
2211 if (kmap)
2212 kmap->ref_reloc_sym = ref;
2213
2214 return 0;
2215 }
2216
2217 size_t perf_session__fprintf_dsos(struct perf_session *session, FILE *fp)
2218 {
2219 return machines__fprintf_dsos(&session->machines, fp);
2220 }
2221
2222 size_t perf_session__fprintf_dsos_buildid(struct perf_session *session, FILE *fp,
2223 bool (skip)(struct dso *dso, int parm), int parm)
2224 {
2225 return machines__fprintf_dsos_buildid(&session->machines, fp, skip, parm);
2226 }
2227
2228 size_t perf_session__fprintf_nr_events(struct perf_session *session, FILE *fp)
2229 {
2230 size_t ret;
2231 const char *msg = "";
2232
2233 if (perf_header__has_feat(&session->header, HEADER_AUXTRACE))
2234 msg = " (excludes AUX area (e.g. instruction trace) decoded / synthesized events)";
2235
2236 ret = fprintf(fp, "\nAggregated stats:%s\n", msg);
2237
2238 ret += events_stats__fprintf(&session->evlist->stats, fp);
2239 return ret;
2240 }
2241
2242 size_t perf_session__fprintf(struct perf_session *session, FILE *fp)
2243 {
2244 /*
2245 * FIXME: Here we have to actually print all the machines in this
2246 * session, not just the host...
2247 */
2248 return machine__fprintf(&session->machines.host, fp);
2249 }
2250
2251 struct perf_evsel *perf_session__find_first_evtype(struct perf_session *session,
2252 unsigned int type)
2253 {
2254 struct perf_evsel *pos;
2255
2256 evlist__for_each_entry(session->evlist, pos) {
2257 if (pos->attr.type == type)
2258 return pos;
2259 }
2260 return NULL;
2261 }
2262
2263 int perf_session__cpu_bitmap(struct perf_session *session,
2264 const char *cpu_list, unsigned long *cpu_bitmap)
2265 {
2266 int i, err = -1;
2267 struct cpu_map *map;
2268
2269 for (i = 0; i < PERF_TYPE_MAX; ++i) {
2270 struct perf_evsel *evsel;
2271
2272 evsel = perf_session__find_first_evtype(session, i);
2273 if (!evsel)
2274 continue;
2275
2276 if (!(evsel->attr.sample_type & PERF_SAMPLE_CPU)) {
2277 pr_err("File does not contain CPU events. "
2278 "Remove -C option to proceed.\n");
2279 return -1;
2280 }
2281 }
2282
2283 map = cpu_map__new(cpu_list);
2284 if (map == NULL) {
2285 pr_err("Invalid cpu_list\n");
2286 return -1;
2287 }
2288
2289 for (i = 0; i < map->nr; i++) {
2290 int cpu = map->map[i];
2291
2292 if (cpu >= MAX_NR_CPUS) {
2293 pr_err("Requested CPU %d too large. "
2294 "Consider raising MAX_NR_CPUS\n", cpu);
2295 goto out_delete_map;
2296 }
2297
2298 set_bit(cpu, cpu_bitmap);
2299 }
2300
2301 err = 0;
2302
2303 out_delete_map:
2304 cpu_map__put(map);
2305 return err;
2306 }
2307
2308 void perf_session__fprintf_info(struct perf_session *session, FILE *fp,
2309 bool full)
2310 {
2311 if (session == NULL || fp == NULL)
2312 return;
2313
2314 fprintf(fp, "# ========\n");
2315 perf_header__fprintf_info(session, fp, full);
2316 fprintf(fp, "# ========\n#\n");
2317 }
2318
2319
2320 int __perf_session__set_tracepoints_handlers(struct perf_session *session,
2321 const struct perf_evsel_str_handler *assocs,
2322 size_t nr_assocs)
2323 {
2324 struct perf_evsel *evsel;
2325 size_t i;
2326 int err;
2327
2328 for (i = 0; i < nr_assocs; i++) {
2329 /*
2330 * Adding a handler for an event not in the session,
2331 * just ignore it.
2332 */
2333 evsel = perf_evlist__find_tracepoint_by_name(session->evlist, assocs[i].name);
2334 if (evsel == NULL)
2335 continue;
2336
2337 err = -EEXIST;
2338 if (evsel->handler != NULL)
2339 goto out;
2340 evsel->handler = assocs[i].handler;
2341 }
2342
2343 err = 0;
2344 out:
2345 return err;
2346 }
2347
2348 int perf_event__process_id_index(struct perf_session *session,
2349 union perf_event *event)
2350 {
2351 struct perf_evlist *evlist = session->evlist;
2352 struct id_index_event *ie = &event->id_index;
2353 size_t i, nr, max_nr;
2354
2355 max_nr = (ie->header.size - sizeof(struct id_index_event)) /
2356 sizeof(struct id_index_entry);
2357 nr = ie->nr;
2358 if (nr > max_nr)
2359 return -EINVAL;
2360
2361 if (dump_trace)
2362 fprintf(stdout, " nr: %zu\n", nr);
2363
2364 for (i = 0; i < nr; i++) {
2365 struct id_index_entry *e = &ie->entries[i];
2366 struct perf_sample_id *sid;
2367
2368 if (dump_trace) {
2369 fprintf(stdout, " ... id: %"PRIu64, e->id);
2370 fprintf(stdout, " idx: %"PRIu64, e->idx);
2371 fprintf(stdout, " cpu: %"PRId64, e->cpu);
2372 fprintf(stdout, " tid: %"PRId64"\n", e->tid);
2373 }
2374
2375 sid = perf_evlist__id2sid(evlist, e->id);
2376 if (!sid)
2377 return -ENOENT;
2378 sid->idx = e->idx;
2379 sid->cpu = e->cpu;
2380 sid->tid = e->tid;
2381 }
2382 return 0;
2383 }
2384
2385 int perf_event__synthesize_id_index(struct perf_tool *tool,
2386 perf_event__handler_t process,
2387 struct perf_evlist *evlist,
2388 struct machine *machine)
2389 {
2390 union perf_event *ev;
2391 struct perf_evsel *evsel;
2392 size_t nr = 0, i = 0, sz, max_nr, n;
2393 int err;
2394
2395 pr_debug2("Synthesizing id index\n");
2396
2397 max_nr = (UINT16_MAX - sizeof(struct id_index_event)) /
2398 sizeof(struct id_index_entry);
2399
2400 evlist__for_each_entry(evlist, evsel)
2401 nr += evsel->ids;
2402
2403 n = nr > max_nr ? max_nr : nr;
2404 sz = sizeof(struct id_index_event) + n * sizeof(struct id_index_entry);
2405 ev = zalloc(sz);
2406 if (!ev)
2407 return -ENOMEM;
2408
2409 ev->id_index.header.type = PERF_RECORD_ID_INDEX;
2410 ev->id_index.header.size = sz;
2411 ev->id_index.nr = n;
2412
2413 evlist__for_each_entry(evlist, evsel) {
2414 u32 j;
2415
2416 for (j = 0; j < evsel->ids; j++) {
2417 struct id_index_entry *e;
2418 struct perf_sample_id *sid;
2419
2420 if (i >= n) {
2421 err = process(tool, ev, NULL, machine);
2422 if (err)
2423 goto out_err;
2424 nr -= n;
2425 i = 0;
2426 }
2427
2428 e = &ev->id_index.entries[i++];
2429
2430 e->id = evsel->id[j];
2431
2432 sid = perf_evlist__id2sid(evlist, e->id);
2433 if (!sid) {
2434 free(ev);
2435 return -ENOENT;
2436 }
2437
2438 e->idx = sid->idx;
2439 e->cpu = sid->cpu;
2440 e->tid = sid->tid;
2441 }
2442 }
2443
2444 sz = sizeof(struct id_index_event) + nr * sizeof(struct id_index_entry);
2445 ev->id_index.header.size = sz;
2446 ev->id_index.nr = nr;
2447
2448 err = process(tool, ev, NULL, machine);
2449 out_err:
2450 free(ev);
2451
2452 return err;
2453 }