]> git.proxmox.com Git - mirror_ubuntu-focal-kernel.git/blob - tools/perf/util/session.c
Merge tag 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/rdma/rdma
[mirror_ubuntu-focal-kernel.git] / tools / perf / util / session.c
1 // SPDX-License-Identifier: GPL-2.0
2 #include <errno.h>
3 #include <inttypes.h>
4 #include <linux/kernel.h>
5 #include <linux/zalloc.h>
6 #include <traceevent/event-parse.h>
7 #include <api/fs/fs.h>
8
9 #include <byteswap.h>
10 #include <unistd.h>
11 #include <sys/types.h>
12 #include <sys/mman.h>
13
14 #include "evlist.h"
15 #include "evsel.h"
16 #include "memswap.h"
17 #include "map.h"
18 #include "symbol.h"
19 #include "session.h"
20 #include "tool.h"
21 #include "sort.h"
22 #include "cpumap.h"
23 #include "perf_regs.h"
24 #include "asm/bug.h"
25 #include "auxtrace.h"
26 #include "thread.h"
27 #include "thread-stack.h"
28 #include "sample-raw.h"
29 #include "stat.h"
30 #include "arch/common.h"
31
32 #ifdef HAVE_ZSTD_SUPPORT
33 static int perf_session__process_compressed_event(struct perf_session *session,
34 union perf_event *event, u64 file_offset)
35 {
36 void *src;
37 size_t decomp_size, src_size;
38 u64 decomp_last_rem = 0;
39 size_t decomp_len = session->header.env.comp_mmap_len;
40 struct decomp *decomp, *decomp_last = session->decomp_last;
41
42 decomp = mmap(NULL, sizeof(struct decomp) + decomp_len, PROT_READ|PROT_WRITE,
43 MAP_ANONYMOUS|MAP_PRIVATE, -1, 0);
44 if (decomp == MAP_FAILED) {
45 pr_err("Couldn't allocate memory for decompression\n");
46 return -1;
47 }
48
49 decomp->file_pos = file_offset;
50 decomp->head = 0;
51
52 if (decomp_last) {
53 decomp_last_rem = decomp_last->size - decomp_last->head;
54 memcpy(decomp->data, &(decomp_last->data[decomp_last->head]), decomp_last_rem);
55 decomp->size = decomp_last_rem;
56 }
57
58 src = (void *)event + sizeof(struct compressed_event);
59 src_size = event->pack.header.size - sizeof(struct compressed_event);
60
61 decomp_size = zstd_decompress_stream(&(session->zstd_data), src, src_size,
62 &(decomp->data[decomp_last_rem]), decomp_len - decomp_last_rem);
63 if (!decomp_size) {
64 munmap(decomp, sizeof(struct decomp) + decomp_len);
65 pr_err("Couldn't decompress data\n");
66 return -1;
67 }
68
69 decomp->size += decomp_size;
70
71 if (session->decomp == NULL) {
72 session->decomp = decomp;
73 session->decomp_last = decomp;
74 } else {
75 session->decomp_last->next = decomp;
76 session->decomp_last = decomp;
77 }
78
79 pr_debug("decomp (B): %ld to %ld\n", src_size, decomp_size);
80
81 return 0;
82 }
83 #else /* !HAVE_ZSTD_SUPPORT */
84 #define perf_session__process_compressed_event perf_session__process_compressed_event_stub
85 #endif
86
87 static int perf_session__deliver_event(struct perf_session *session,
88 union perf_event *event,
89 struct perf_tool *tool,
90 u64 file_offset);
91
92 static int perf_session__open(struct perf_session *session)
93 {
94 struct perf_data *data = session->data;
95
96 if (perf_session__read_header(session) < 0) {
97 pr_err("incompatible file format (rerun with -v to learn more)\n");
98 return -1;
99 }
100
101 if (perf_data__is_pipe(data))
102 return 0;
103
104 if (perf_header__has_feat(&session->header, HEADER_STAT))
105 return 0;
106
107 if (!perf_evlist__valid_sample_type(session->evlist)) {
108 pr_err("non matching sample_type\n");
109 return -1;
110 }
111
112 if (!perf_evlist__valid_sample_id_all(session->evlist)) {
113 pr_err("non matching sample_id_all\n");
114 return -1;
115 }
116
117 if (!perf_evlist__valid_read_format(session->evlist)) {
118 pr_err("non matching read_format\n");
119 return -1;
120 }
121
122 return 0;
123 }
124
125 void perf_session__set_id_hdr_size(struct perf_session *session)
126 {
127 u16 id_hdr_size = perf_evlist__id_hdr_size(session->evlist);
128
129 machines__set_id_hdr_size(&session->machines, id_hdr_size);
130 }
131
132 int perf_session__create_kernel_maps(struct perf_session *session)
133 {
134 int ret = machine__create_kernel_maps(&session->machines.host);
135
136 if (ret >= 0)
137 ret = machines__create_guest_kernel_maps(&session->machines);
138 return ret;
139 }
140
141 static void perf_session__destroy_kernel_maps(struct perf_session *session)
142 {
143 machines__destroy_kernel_maps(&session->machines);
144 }
145
146 static bool perf_session__has_comm_exec(struct perf_session *session)
147 {
148 struct perf_evsel *evsel;
149
150 evlist__for_each_entry(session->evlist, evsel) {
151 if (evsel->attr.comm_exec)
152 return true;
153 }
154
155 return false;
156 }
157
158 static void perf_session__set_comm_exec(struct perf_session *session)
159 {
160 bool comm_exec = perf_session__has_comm_exec(session);
161
162 machines__set_comm_exec(&session->machines, comm_exec);
163 }
164
165 static int ordered_events__deliver_event(struct ordered_events *oe,
166 struct ordered_event *event)
167 {
168 struct perf_session *session = container_of(oe, struct perf_session,
169 ordered_events);
170
171 return perf_session__deliver_event(session, event->event,
172 session->tool, event->file_offset);
173 }
174
175 struct perf_session *perf_session__new(struct perf_data *data,
176 bool repipe, struct perf_tool *tool)
177 {
178 struct perf_session *session = zalloc(sizeof(*session));
179
180 if (!session)
181 goto out;
182
183 session->repipe = repipe;
184 session->tool = tool;
185 INIT_LIST_HEAD(&session->auxtrace_index);
186 machines__init(&session->machines);
187 ordered_events__init(&session->ordered_events,
188 ordered_events__deliver_event, NULL);
189
190 perf_env__init(&session->header.env);
191 if (data) {
192 if (perf_data__open(data))
193 goto out_delete;
194
195 session->data = data;
196
197 if (perf_data__is_read(data)) {
198 if (perf_session__open(session) < 0)
199 goto out_delete;
200
201 /*
202 * set session attributes that are present in perf.data
203 * but not in pipe-mode.
204 */
205 if (!data->is_pipe) {
206 perf_session__set_id_hdr_size(session);
207 perf_session__set_comm_exec(session);
208 }
209
210 perf_evlist__init_trace_event_sample_raw(session->evlist);
211
212 /* Open the directory data. */
213 if (data->is_dir && perf_data__open_dir(data))
214 goto out_delete;
215 }
216 } else {
217 session->machines.host.env = &perf_env;
218 }
219
220 session->machines.host.single_address_space =
221 perf_env__single_address_space(session->machines.host.env);
222
223 if (!data || perf_data__is_write(data)) {
224 /*
225 * In O_RDONLY mode this will be performed when reading the
226 * kernel MMAP event, in perf_event__process_mmap().
227 */
228 if (perf_session__create_kernel_maps(session) < 0)
229 pr_warning("Cannot read kernel map\n");
230 }
231
232 /*
233 * In pipe-mode, evlist is empty until PERF_RECORD_HEADER_ATTR is
234 * processed, so perf_evlist__sample_id_all is not meaningful here.
235 */
236 if ((!data || !data->is_pipe) && tool && tool->ordering_requires_timestamps &&
237 tool->ordered_events && !perf_evlist__sample_id_all(session->evlist)) {
238 dump_printf("WARNING: No sample_id_all support, falling back to unordered processing\n");
239 tool->ordered_events = false;
240 }
241
242 return session;
243
244 out_delete:
245 perf_session__delete(session);
246 out:
247 return NULL;
248 }
249
250 static void perf_session__delete_threads(struct perf_session *session)
251 {
252 machine__delete_threads(&session->machines.host);
253 }
254
255 static void perf_session__release_decomp_events(struct perf_session *session)
256 {
257 struct decomp *next, *decomp;
258 size_t decomp_len;
259 next = session->decomp;
260 decomp_len = session->header.env.comp_mmap_len;
261 do {
262 decomp = next;
263 if (decomp == NULL)
264 break;
265 next = decomp->next;
266 munmap(decomp, decomp_len + sizeof(struct decomp));
267 } while (1);
268 }
269
270 void perf_session__delete(struct perf_session *session)
271 {
272 if (session == NULL)
273 return;
274 auxtrace__free(session);
275 auxtrace_index__free(&session->auxtrace_index);
276 perf_session__destroy_kernel_maps(session);
277 perf_session__delete_threads(session);
278 perf_session__release_decomp_events(session);
279 perf_env__exit(&session->header.env);
280 machines__exit(&session->machines);
281 if (session->data)
282 perf_data__close(session->data);
283 free(session);
284 }
285
286 static int process_event_synth_tracing_data_stub(struct perf_session *session
287 __maybe_unused,
288 union perf_event *event
289 __maybe_unused)
290 {
291 dump_printf(": unhandled!\n");
292 return 0;
293 }
294
295 static int process_event_synth_attr_stub(struct perf_tool *tool __maybe_unused,
296 union perf_event *event __maybe_unused,
297 struct perf_evlist **pevlist
298 __maybe_unused)
299 {
300 dump_printf(": unhandled!\n");
301 return 0;
302 }
303
304 static int process_event_synth_event_update_stub(struct perf_tool *tool __maybe_unused,
305 union perf_event *event __maybe_unused,
306 struct perf_evlist **pevlist
307 __maybe_unused)
308 {
309 if (dump_trace)
310 perf_event__fprintf_event_update(event, stdout);
311
312 dump_printf(": unhandled!\n");
313 return 0;
314 }
315
316 static int process_event_sample_stub(struct perf_tool *tool __maybe_unused,
317 union perf_event *event __maybe_unused,
318 struct perf_sample *sample __maybe_unused,
319 struct perf_evsel *evsel __maybe_unused,
320 struct machine *machine __maybe_unused)
321 {
322 dump_printf(": unhandled!\n");
323 return 0;
324 }
325
326 static int process_event_stub(struct perf_tool *tool __maybe_unused,
327 union perf_event *event __maybe_unused,
328 struct perf_sample *sample __maybe_unused,
329 struct machine *machine __maybe_unused)
330 {
331 dump_printf(": unhandled!\n");
332 return 0;
333 }
334
335 static int process_finished_round_stub(struct perf_tool *tool __maybe_unused,
336 union perf_event *event __maybe_unused,
337 struct ordered_events *oe __maybe_unused)
338 {
339 dump_printf(": unhandled!\n");
340 return 0;
341 }
342
343 static int process_finished_round(struct perf_tool *tool,
344 union perf_event *event,
345 struct ordered_events *oe);
346
347 static int skipn(int fd, off_t n)
348 {
349 char buf[4096];
350 ssize_t ret;
351
352 while (n > 0) {
353 ret = read(fd, buf, min(n, (off_t)sizeof(buf)));
354 if (ret <= 0)
355 return ret;
356 n -= ret;
357 }
358
359 return 0;
360 }
361
362 static s64 process_event_auxtrace_stub(struct perf_session *session __maybe_unused,
363 union perf_event *event)
364 {
365 dump_printf(": unhandled!\n");
366 if (perf_data__is_pipe(session->data))
367 skipn(perf_data__fd(session->data), event->auxtrace.size);
368 return event->auxtrace.size;
369 }
370
371 static int process_event_op2_stub(struct perf_session *session __maybe_unused,
372 union perf_event *event __maybe_unused)
373 {
374 dump_printf(": unhandled!\n");
375 return 0;
376 }
377
378
379 static
380 int process_event_thread_map_stub(struct perf_session *session __maybe_unused,
381 union perf_event *event __maybe_unused)
382 {
383 if (dump_trace)
384 perf_event__fprintf_thread_map(event, stdout);
385
386 dump_printf(": unhandled!\n");
387 return 0;
388 }
389
390 static
391 int process_event_cpu_map_stub(struct perf_session *session __maybe_unused,
392 union perf_event *event __maybe_unused)
393 {
394 if (dump_trace)
395 perf_event__fprintf_cpu_map(event, stdout);
396
397 dump_printf(": unhandled!\n");
398 return 0;
399 }
400
401 static
402 int process_event_stat_config_stub(struct perf_session *session __maybe_unused,
403 union perf_event *event __maybe_unused)
404 {
405 if (dump_trace)
406 perf_event__fprintf_stat_config(event, stdout);
407
408 dump_printf(": unhandled!\n");
409 return 0;
410 }
411
412 static int process_stat_stub(struct perf_session *perf_session __maybe_unused,
413 union perf_event *event)
414 {
415 if (dump_trace)
416 perf_event__fprintf_stat(event, stdout);
417
418 dump_printf(": unhandled!\n");
419 return 0;
420 }
421
422 static int process_stat_round_stub(struct perf_session *perf_session __maybe_unused,
423 union perf_event *event)
424 {
425 if (dump_trace)
426 perf_event__fprintf_stat_round(event, stdout);
427
428 dump_printf(": unhandled!\n");
429 return 0;
430 }
431
432 static int perf_session__process_compressed_event_stub(struct perf_session *session __maybe_unused,
433 union perf_event *event __maybe_unused,
434 u64 file_offset __maybe_unused)
435 {
436 dump_printf(": unhandled!\n");
437 return 0;
438 }
439
440 void perf_tool__fill_defaults(struct perf_tool *tool)
441 {
442 if (tool->sample == NULL)
443 tool->sample = process_event_sample_stub;
444 if (tool->mmap == NULL)
445 tool->mmap = process_event_stub;
446 if (tool->mmap2 == NULL)
447 tool->mmap2 = process_event_stub;
448 if (tool->comm == NULL)
449 tool->comm = process_event_stub;
450 if (tool->namespaces == NULL)
451 tool->namespaces = process_event_stub;
452 if (tool->fork == NULL)
453 tool->fork = process_event_stub;
454 if (tool->exit == NULL)
455 tool->exit = process_event_stub;
456 if (tool->lost == NULL)
457 tool->lost = perf_event__process_lost;
458 if (tool->lost_samples == NULL)
459 tool->lost_samples = perf_event__process_lost_samples;
460 if (tool->aux == NULL)
461 tool->aux = perf_event__process_aux;
462 if (tool->itrace_start == NULL)
463 tool->itrace_start = perf_event__process_itrace_start;
464 if (tool->context_switch == NULL)
465 tool->context_switch = perf_event__process_switch;
466 if (tool->ksymbol == NULL)
467 tool->ksymbol = perf_event__process_ksymbol;
468 if (tool->bpf_event == NULL)
469 tool->bpf_event = perf_event__process_bpf_event;
470 if (tool->read == NULL)
471 tool->read = process_event_sample_stub;
472 if (tool->throttle == NULL)
473 tool->throttle = process_event_stub;
474 if (tool->unthrottle == NULL)
475 tool->unthrottle = process_event_stub;
476 if (tool->attr == NULL)
477 tool->attr = process_event_synth_attr_stub;
478 if (tool->event_update == NULL)
479 tool->event_update = process_event_synth_event_update_stub;
480 if (tool->tracing_data == NULL)
481 tool->tracing_data = process_event_synth_tracing_data_stub;
482 if (tool->build_id == NULL)
483 tool->build_id = process_event_op2_stub;
484 if (tool->finished_round == NULL) {
485 if (tool->ordered_events)
486 tool->finished_round = process_finished_round;
487 else
488 tool->finished_round = process_finished_round_stub;
489 }
490 if (tool->id_index == NULL)
491 tool->id_index = process_event_op2_stub;
492 if (tool->auxtrace_info == NULL)
493 tool->auxtrace_info = process_event_op2_stub;
494 if (tool->auxtrace == NULL)
495 tool->auxtrace = process_event_auxtrace_stub;
496 if (tool->auxtrace_error == NULL)
497 tool->auxtrace_error = process_event_op2_stub;
498 if (tool->thread_map == NULL)
499 tool->thread_map = process_event_thread_map_stub;
500 if (tool->cpu_map == NULL)
501 tool->cpu_map = process_event_cpu_map_stub;
502 if (tool->stat_config == NULL)
503 tool->stat_config = process_event_stat_config_stub;
504 if (tool->stat == NULL)
505 tool->stat = process_stat_stub;
506 if (tool->stat_round == NULL)
507 tool->stat_round = process_stat_round_stub;
508 if (tool->time_conv == NULL)
509 tool->time_conv = process_event_op2_stub;
510 if (tool->feature == NULL)
511 tool->feature = process_event_op2_stub;
512 if (tool->compressed == NULL)
513 tool->compressed = perf_session__process_compressed_event;
514 }
515
516 static void swap_sample_id_all(union perf_event *event, void *data)
517 {
518 void *end = (void *) event + event->header.size;
519 int size = end - data;
520
521 BUG_ON(size % sizeof(u64));
522 mem_bswap_64(data, size);
523 }
524
525 static void perf_event__all64_swap(union perf_event *event,
526 bool sample_id_all __maybe_unused)
527 {
528 struct perf_event_header *hdr = &event->header;
529 mem_bswap_64(hdr + 1, event->header.size - sizeof(*hdr));
530 }
531
532 static void perf_event__comm_swap(union perf_event *event, bool sample_id_all)
533 {
534 event->comm.pid = bswap_32(event->comm.pid);
535 event->comm.tid = bswap_32(event->comm.tid);
536
537 if (sample_id_all) {
538 void *data = &event->comm.comm;
539
540 data += PERF_ALIGN(strlen(data) + 1, sizeof(u64));
541 swap_sample_id_all(event, data);
542 }
543 }
544
545 static void perf_event__mmap_swap(union perf_event *event,
546 bool sample_id_all)
547 {
548 event->mmap.pid = bswap_32(event->mmap.pid);
549 event->mmap.tid = bswap_32(event->mmap.tid);
550 event->mmap.start = bswap_64(event->mmap.start);
551 event->mmap.len = bswap_64(event->mmap.len);
552 event->mmap.pgoff = bswap_64(event->mmap.pgoff);
553
554 if (sample_id_all) {
555 void *data = &event->mmap.filename;
556
557 data += PERF_ALIGN(strlen(data) + 1, sizeof(u64));
558 swap_sample_id_all(event, data);
559 }
560 }
561
562 static void perf_event__mmap2_swap(union perf_event *event,
563 bool sample_id_all)
564 {
565 event->mmap2.pid = bswap_32(event->mmap2.pid);
566 event->mmap2.tid = bswap_32(event->mmap2.tid);
567 event->mmap2.start = bswap_64(event->mmap2.start);
568 event->mmap2.len = bswap_64(event->mmap2.len);
569 event->mmap2.pgoff = bswap_64(event->mmap2.pgoff);
570 event->mmap2.maj = bswap_32(event->mmap2.maj);
571 event->mmap2.min = bswap_32(event->mmap2.min);
572 event->mmap2.ino = bswap_64(event->mmap2.ino);
573
574 if (sample_id_all) {
575 void *data = &event->mmap2.filename;
576
577 data += PERF_ALIGN(strlen(data) + 1, sizeof(u64));
578 swap_sample_id_all(event, data);
579 }
580 }
581 static void perf_event__task_swap(union perf_event *event, bool sample_id_all)
582 {
583 event->fork.pid = bswap_32(event->fork.pid);
584 event->fork.tid = bswap_32(event->fork.tid);
585 event->fork.ppid = bswap_32(event->fork.ppid);
586 event->fork.ptid = bswap_32(event->fork.ptid);
587 event->fork.time = bswap_64(event->fork.time);
588
589 if (sample_id_all)
590 swap_sample_id_all(event, &event->fork + 1);
591 }
592
593 static void perf_event__read_swap(union perf_event *event, bool sample_id_all)
594 {
595 event->read.pid = bswap_32(event->read.pid);
596 event->read.tid = bswap_32(event->read.tid);
597 event->read.value = bswap_64(event->read.value);
598 event->read.time_enabled = bswap_64(event->read.time_enabled);
599 event->read.time_running = bswap_64(event->read.time_running);
600 event->read.id = bswap_64(event->read.id);
601
602 if (sample_id_all)
603 swap_sample_id_all(event, &event->read + 1);
604 }
605
606 static void perf_event__aux_swap(union perf_event *event, bool sample_id_all)
607 {
608 event->aux.aux_offset = bswap_64(event->aux.aux_offset);
609 event->aux.aux_size = bswap_64(event->aux.aux_size);
610 event->aux.flags = bswap_64(event->aux.flags);
611
612 if (sample_id_all)
613 swap_sample_id_all(event, &event->aux + 1);
614 }
615
616 static void perf_event__itrace_start_swap(union perf_event *event,
617 bool sample_id_all)
618 {
619 event->itrace_start.pid = bswap_32(event->itrace_start.pid);
620 event->itrace_start.tid = bswap_32(event->itrace_start.tid);
621
622 if (sample_id_all)
623 swap_sample_id_all(event, &event->itrace_start + 1);
624 }
625
626 static void perf_event__switch_swap(union perf_event *event, bool sample_id_all)
627 {
628 if (event->header.type == PERF_RECORD_SWITCH_CPU_WIDE) {
629 event->context_switch.next_prev_pid =
630 bswap_32(event->context_switch.next_prev_pid);
631 event->context_switch.next_prev_tid =
632 bswap_32(event->context_switch.next_prev_tid);
633 }
634
635 if (sample_id_all)
636 swap_sample_id_all(event, &event->context_switch + 1);
637 }
638
639 static void perf_event__throttle_swap(union perf_event *event,
640 bool sample_id_all)
641 {
642 event->throttle.time = bswap_64(event->throttle.time);
643 event->throttle.id = bswap_64(event->throttle.id);
644 event->throttle.stream_id = bswap_64(event->throttle.stream_id);
645
646 if (sample_id_all)
647 swap_sample_id_all(event, &event->throttle + 1);
648 }
649
650 static void perf_event__namespaces_swap(union perf_event *event,
651 bool sample_id_all)
652 {
653 u64 i;
654
655 event->namespaces.pid = bswap_32(event->namespaces.pid);
656 event->namespaces.tid = bswap_32(event->namespaces.tid);
657 event->namespaces.nr_namespaces = bswap_64(event->namespaces.nr_namespaces);
658
659 for (i = 0; i < event->namespaces.nr_namespaces; i++) {
660 struct perf_ns_link_info *ns = &event->namespaces.link_info[i];
661
662 ns->dev = bswap_64(ns->dev);
663 ns->ino = bswap_64(ns->ino);
664 }
665
666 if (sample_id_all)
667 swap_sample_id_all(event, &event->namespaces.link_info[i]);
668 }
669
670 static u8 revbyte(u8 b)
671 {
672 int rev = (b >> 4) | ((b & 0xf) << 4);
673 rev = ((rev & 0xcc) >> 2) | ((rev & 0x33) << 2);
674 rev = ((rev & 0xaa) >> 1) | ((rev & 0x55) << 1);
675 return (u8) rev;
676 }
677
678 /*
679 * XXX this is hack in attempt to carry flags bitfield
680 * through endian village. ABI says:
681 *
682 * Bit-fields are allocated from right to left (least to most significant)
683 * on little-endian implementations and from left to right (most to least
684 * significant) on big-endian implementations.
685 *
686 * The above seems to be byte specific, so we need to reverse each
687 * byte of the bitfield. 'Internet' also says this might be implementation
688 * specific and we probably need proper fix and carry perf_event_attr
689 * bitfield flags in separate data file FEAT_ section. Thought this seems
690 * to work for now.
691 */
692 static void swap_bitfield(u8 *p, unsigned len)
693 {
694 unsigned i;
695
696 for (i = 0; i < len; i++) {
697 *p = revbyte(*p);
698 p++;
699 }
700 }
701
702 /* exported for swapping attributes in file header */
703 void perf_event__attr_swap(struct perf_event_attr *attr)
704 {
705 attr->type = bswap_32(attr->type);
706 attr->size = bswap_32(attr->size);
707
708 #define bswap_safe(f, n) \
709 (attr->size > (offsetof(struct perf_event_attr, f) + \
710 sizeof(attr->f) * (n)))
711 #define bswap_field(f, sz) \
712 do { \
713 if (bswap_safe(f, 0)) \
714 attr->f = bswap_##sz(attr->f); \
715 } while(0)
716 #define bswap_field_16(f) bswap_field(f, 16)
717 #define bswap_field_32(f) bswap_field(f, 32)
718 #define bswap_field_64(f) bswap_field(f, 64)
719
720 bswap_field_64(config);
721 bswap_field_64(sample_period);
722 bswap_field_64(sample_type);
723 bswap_field_64(read_format);
724 bswap_field_32(wakeup_events);
725 bswap_field_32(bp_type);
726 bswap_field_64(bp_addr);
727 bswap_field_64(bp_len);
728 bswap_field_64(branch_sample_type);
729 bswap_field_64(sample_regs_user);
730 bswap_field_32(sample_stack_user);
731 bswap_field_32(aux_watermark);
732 bswap_field_16(sample_max_stack);
733
734 /*
735 * After read_format are bitfields. Check read_format because
736 * we are unable to use offsetof on bitfield.
737 */
738 if (bswap_safe(read_format, 1))
739 swap_bitfield((u8 *) (&attr->read_format + 1),
740 sizeof(u64));
741 #undef bswap_field_64
742 #undef bswap_field_32
743 #undef bswap_field
744 #undef bswap_safe
745 }
746
747 static void perf_event__hdr_attr_swap(union perf_event *event,
748 bool sample_id_all __maybe_unused)
749 {
750 size_t size;
751
752 perf_event__attr_swap(&event->attr.attr);
753
754 size = event->header.size;
755 size -= (void *)&event->attr.id - (void *)event;
756 mem_bswap_64(event->attr.id, size);
757 }
758
759 static void perf_event__event_update_swap(union perf_event *event,
760 bool sample_id_all __maybe_unused)
761 {
762 event->event_update.type = bswap_64(event->event_update.type);
763 event->event_update.id = bswap_64(event->event_update.id);
764 }
765
766 static void perf_event__event_type_swap(union perf_event *event,
767 bool sample_id_all __maybe_unused)
768 {
769 event->event_type.event_type.event_id =
770 bswap_64(event->event_type.event_type.event_id);
771 }
772
773 static void perf_event__tracing_data_swap(union perf_event *event,
774 bool sample_id_all __maybe_unused)
775 {
776 event->tracing_data.size = bswap_32(event->tracing_data.size);
777 }
778
779 static void perf_event__auxtrace_info_swap(union perf_event *event,
780 bool sample_id_all __maybe_unused)
781 {
782 size_t size;
783
784 event->auxtrace_info.type = bswap_32(event->auxtrace_info.type);
785
786 size = event->header.size;
787 size -= (void *)&event->auxtrace_info.priv - (void *)event;
788 mem_bswap_64(event->auxtrace_info.priv, size);
789 }
790
791 static void perf_event__auxtrace_swap(union perf_event *event,
792 bool sample_id_all __maybe_unused)
793 {
794 event->auxtrace.size = bswap_64(event->auxtrace.size);
795 event->auxtrace.offset = bswap_64(event->auxtrace.offset);
796 event->auxtrace.reference = bswap_64(event->auxtrace.reference);
797 event->auxtrace.idx = bswap_32(event->auxtrace.idx);
798 event->auxtrace.tid = bswap_32(event->auxtrace.tid);
799 event->auxtrace.cpu = bswap_32(event->auxtrace.cpu);
800 }
801
802 static void perf_event__auxtrace_error_swap(union perf_event *event,
803 bool sample_id_all __maybe_unused)
804 {
805 event->auxtrace_error.type = bswap_32(event->auxtrace_error.type);
806 event->auxtrace_error.code = bswap_32(event->auxtrace_error.code);
807 event->auxtrace_error.cpu = bswap_32(event->auxtrace_error.cpu);
808 event->auxtrace_error.pid = bswap_32(event->auxtrace_error.pid);
809 event->auxtrace_error.tid = bswap_32(event->auxtrace_error.tid);
810 event->auxtrace_error.fmt = bswap_32(event->auxtrace_error.fmt);
811 event->auxtrace_error.ip = bswap_64(event->auxtrace_error.ip);
812 if (event->auxtrace_error.fmt)
813 event->auxtrace_error.time = bswap_64(event->auxtrace_error.time);
814 }
815
816 static void perf_event__thread_map_swap(union perf_event *event,
817 bool sample_id_all __maybe_unused)
818 {
819 unsigned i;
820
821 event->thread_map.nr = bswap_64(event->thread_map.nr);
822
823 for (i = 0; i < event->thread_map.nr; i++)
824 event->thread_map.entries[i].pid = bswap_64(event->thread_map.entries[i].pid);
825 }
826
827 static void perf_event__cpu_map_swap(union perf_event *event,
828 bool sample_id_all __maybe_unused)
829 {
830 struct cpu_map_data *data = &event->cpu_map.data;
831 struct cpu_map_entries *cpus;
832 struct cpu_map_mask *mask;
833 unsigned i;
834
835 data->type = bswap_64(data->type);
836
837 switch (data->type) {
838 case PERF_CPU_MAP__CPUS:
839 cpus = (struct cpu_map_entries *)data->data;
840
841 cpus->nr = bswap_16(cpus->nr);
842
843 for (i = 0; i < cpus->nr; i++)
844 cpus->cpu[i] = bswap_16(cpus->cpu[i]);
845 break;
846 case PERF_CPU_MAP__MASK:
847 mask = (struct cpu_map_mask *) data->data;
848
849 mask->nr = bswap_16(mask->nr);
850 mask->long_size = bswap_16(mask->long_size);
851
852 switch (mask->long_size) {
853 case 4: mem_bswap_32(&mask->mask, mask->nr); break;
854 case 8: mem_bswap_64(&mask->mask, mask->nr); break;
855 default:
856 pr_err("cpu_map swap: unsupported long size\n");
857 }
858 default:
859 break;
860 }
861 }
862
863 static void perf_event__stat_config_swap(union perf_event *event,
864 bool sample_id_all __maybe_unused)
865 {
866 u64 size;
867
868 size = event->stat_config.nr * sizeof(event->stat_config.data[0]);
869 size += 1; /* nr item itself */
870 mem_bswap_64(&event->stat_config.nr, size);
871 }
872
873 static void perf_event__stat_swap(union perf_event *event,
874 bool sample_id_all __maybe_unused)
875 {
876 event->stat.id = bswap_64(event->stat.id);
877 event->stat.thread = bswap_32(event->stat.thread);
878 event->stat.cpu = bswap_32(event->stat.cpu);
879 event->stat.val = bswap_64(event->stat.val);
880 event->stat.ena = bswap_64(event->stat.ena);
881 event->stat.run = bswap_64(event->stat.run);
882 }
883
884 static void perf_event__stat_round_swap(union perf_event *event,
885 bool sample_id_all __maybe_unused)
886 {
887 event->stat_round.type = bswap_64(event->stat_round.type);
888 event->stat_round.time = bswap_64(event->stat_round.time);
889 }
890
891 typedef void (*perf_event__swap_op)(union perf_event *event,
892 bool sample_id_all);
893
894 static perf_event__swap_op perf_event__swap_ops[] = {
895 [PERF_RECORD_MMAP] = perf_event__mmap_swap,
896 [PERF_RECORD_MMAP2] = perf_event__mmap2_swap,
897 [PERF_RECORD_COMM] = perf_event__comm_swap,
898 [PERF_RECORD_FORK] = perf_event__task_swap,
899 [PERF_RECORD_EXIT] = perf_event__task_swap,
900 [PERF_RECORD_LOST] = perf_event__all64_swap,
901 [PERF_RECORD_READ] = perf_event__read_swap,
902 [PERF_RECORD_THROTTLE] = perf_event__throttle_swap,
903 [PERF_RECORD_UNTHROTTLE] = perf_event__throttle_swap,
904 [PERF_RECORD_SAMPLE] = perf_event__all64_swap,
905 [PERF_RECORD_AUX] = perf_event__aux_swap,
906 [PERF_RECORD_ITRACE_START] = perf_event__itrace_start_swap,
907 [PERF_RECORD_LOST_SAMPLES] = perf_event__all64_swap,
908 [PERF_RECORD_SWITCH] = perf_event__switch_swap,
909 [PERF_RECORD_SWITCH_CPU_WIDE] = perf_event__switch_swap,
910 [PERF_RECORD_NAMESPACES] = perf_event__namespaces_swap,
911 [PERF_RECORD_HEADER_ATTR] = perf_event__hdr_attr_swap,
912 [PERF_RECORD_HEADER_EVENT_TYPE] = perf_event__event_type_swap,
913 [PERF_RECORD_HEADER_TRACING_DATA] = perf_event__tracing_data_swap,
914 [PERF_RECORD_HEADER_BUILD_ID] = NULL,
915 [PERF_RECORD_ID_INDEX] = perf_event__all64_swap,
916 [PERF_RECORD_AUXTRACE_INFO] = perf_event__auxtrace_info_swap,
917 [PERF_RECORD_AUXTRACE] = perf_event__auxtrace_swap,
918 [PERF_RECORD_AUXTRACE_ERROR] = perf_event__auxtrace_error_swap,
919 [PERF_RECORD_THREAD_MAP] = perf_event__thread_map_swap,
920 [PERF_RECORD_CPU_MAP] = perf_event__cpu_map_swap,
921 [PERF_RECORD_STAT_CONFIG] = perf_event__stat_config_swap,
922 [PERF_RECORD_STAT] = perf_event__stat_swap,
923 [PERF_RECORD_STAT_ROUND] = perf_event__stat_round_swap,
924 [PERF_RECORD_EVENT_UPDATE] = perf_event__event_update_swap,
925 [PERF_RECORD_TIME_CONV] = perf_event__all64_swap,
926 [PERF_RECORD_HEADER_MAX] = NULL,
927 };
928
929 /*
930 * When perf record finishes a pass on every buffers, it records this pseudo
931 * event.
932 * We record the max timestamp t found in the pass n.
933 * Assuming these timestamps are monotonic across cpus, we know that if
934 * a buffer still has events with timestamps below t, they will be all
935 * available and then read in the pass n + 1.
936 * Hence when we start to read the pass n + 2, we can safely flush every
937 * events with timestamps below t.
938 *
939 * ============ PASS n =================
940 * CPU 0 | CPU 1
941 * |
942 * cnt1 timestamps | cnt2 timestamps
943 * 1 | 2
944 * 2 | 3
945 * - | 4 <--- max recorded
946 *
947 * ============ PASS n + 1 ==============
948 * CPU 0 | CPU 1
949 * |
950 * cnt1 timestamps | cnt2 timestamps
951 * 3 | 5
952 * 4 | 6
953 * 5 | 7 <---- max recorded
954 *
955 * Flush every events below timestamp 4
956 *
957 * ============ PASS n + 2 ==============
958 * CPU 0 | CPU 1
959 * |
960 * cnt1 timestamps | cnt2 timestamps
961 * 6 | 8
962 * 7 | 9
963 * - | 10
964 *
965 * Flush every events below timestamp 7
966 * etc...
967 */
968 static int process_finished_round(struct perf_tool *tool __maybe_unused,
969 union perf_event *event __maybe_unused,
970 struct ordered_events *oe)
971 {
972 if (dump_trace)
973 fprintf(stdout, "\n");
974 return ordered_events__flush(oe, OE_FLUSH__ROUND);
975 }
976
977 int perf_session__queue_event(struct perf_session *s, union perf_event *event,
978 u64 timestamp, u64 file_offset)
979 {
980 return ordered_events__queue(&s->ordered_events, event, timestamp, file_offset);
981 }
982
983 static void callchain__lbr_callstack_printf(struct perf_sample *sample)
984 {
985 struct ip_callchain *callchain = sample->callchain;
986 struct branch_stack *lbr_stack = sample->branch_stack;
987 u64 kernel_callchain_nr = callchain->nr;
988 unsigned int i;
989
990 for (i = 0; i < kernel_callchain_nr; i++) {
991 if (callchain->ips[i] == PERF_CONTEXT_USER)
992 break;
993 }
994
995 if ((i != kernel_callchain_nr) && lbr_stack->nr) {
996 u64 total_nr;
997 /*
998 * LBR callstack can only get user call chain,
999 * i is kernel call chain number,
1000 * 1 is PERF_CONTEXT_USER.
1001 *
1002 * The user call chain is stored in LBR registers.
1003 * LBR are pair registers. The caller is stored
1004 * in "from" register, while the callee is stored
1005 * in "to" register.
1006 * For example, there is a call stack
1007 * "A"->"B"->"C"->"D".
1008 * The LBR registers will recorde like
1009 * "C"->"D", "B"->"C", "A"->"B".
1010 * So only the first "to" register and all "from"
1011 * registers are needed to construct the whole stack.
1012 */
1013 total_nr = i + 1 + lbr_stack->nr + 1;
1014 kernel_callchain_nr = i + 1;
1015
1016 printf("... LBR call chain: nr:%" PRIu64 "\n", total_nr);
1017
1018 for (i = 0; i < kernel_callchain_nr; i++)
1019 printf("..... %2d: %016" PRIx64 "\n",
1020 i, callchain->ips[i]);
1021
1022 printf("..... %2d: %016" PRIx64 "\n",
1023 (int)(kernel_callchain_nr), lbr_stack->entries[0].to);
1024 for (i = 0; i < lbr_stack->nr; i++)
1025 printf("..... %2d: %016" PRIx64 "\n",
1026 (int)(i + kernel_callchain_nr + 1), lbr_stack->entries[i].from);
1027 }
1028 }
1029
1030 static void callchain__printf(struct perf_evsel *evsel,
1031 struct perf_sample *sample)
1032 {
1033 unsigned int i;
1034 struct ip_callchain *callchain = sample->callchain;
1035
1036 if (perf_evsel__has_branch_callstack(evsel))
1037 callchain__lbr_callstack_printf(sample);
1038
1039 printf("... FP chain: nr:%" PRIu64 "\n", callchain->nr);
1040
1041 for (i = 0; i < callchain->nr; i++)
1042 printf("..... %2d: %016" PRIx64 "\n",
1043 i, callchain->ips[i]);
1044 }
1045
1046 static void branch_stack__printf(struct perf_sample *sample)
1047 {
1048 uint64_t i;
1049
1050 printf("... branch stack: nr:%" PRIu64 "\n", sample->branch_stack->nr);
1051
1052 for (i = 0; i < sample->branch_stack->nr; i++) {
1053 struct branch_entry *e = &sample->branch_stack->entries[i];
1054
1055 printf("..... %2"PRIu64": %016" PRIx64 " -> %016" PRIx64 " %hu cycles %s%s%s%s %x\n",
1056 i, e->from, e->to,
1057 (unsigned short)e->flags.cycles,
1058 e->flags.mispred ? "M" : " ",
1059 e->flags.predicted ? "P" : " ",
1060 e->flags.abort ? "A" : " ",
1061 e->flags.in_tx ? "T" : " ",
1062 (unsigned)e->flags.reserved);
1063 }
1064 }
1065
1066 static void regs_dump__printf(u64 mask, u64 *regs)
1067 {
1068 unsigned rid, i = 0;
1069
1070 for_each_set_bit(rid, (unsigned long *) &mask, sizeof(mask) * 8) {
1071 u64 val = regs[i++];
1072
1073 printf(".... %-5s 0x%" PRIx64 "\n",
1074 perf_reg_name(rid), val);
1075 }
1076 }
1077
1078 static const char *regs_abi[] = {
1079 [PERF_SAMPLE_REGS_ABI_NONE] = "none",
1080 [PERF_SAMPLE_REGS_ABI_32] = "32-bit",
1081 [PERF_SAMPLE_REGS_ABI_64] = "64-bit",
1082 };
1083
1084 static inline const char *regs_dump_abi(struct regs_dump *d)
1085 {
1086 if (d->abi > PERF_SAMPLE_REGS_ABI_64)
1087 return "unknown";
1088
1089 return regs_abi[d->abi];
1090 }
1091
1092 static void regs__printf(const char *type, struct regs_dump *regs)
1093 {
1094 u64 mask = regs->mask;
1095
1096 printf("... %s regs: mask 0x%" PRIx64 " ABI %s\n",
1097 type,
1098 mask,
1099 regs_dump_abi(regs));
1100
1101 regs_dump__printf(mask, regs->regs);
1102 }
1103
1104 static void regs_user__printf(struct perf_sample *sample)
1105 {
1106 struct regs_dump *user_regs = &sample->user_regs;
1107
1108 if (user_regs->regs)
1109 regs__printf("user", user_regs);
1110 }
1111
1112 static void regs_intr__printf(struct perf_sample *sample)
1113 {
1114 struct regs_dump *intr_regs = &sample->intr_regs;
1115
1116 if (intr_regs->regs)
1117 regs__printf("intr", intr_regs);
1118 }
1119
1120 static void stack_user__printf(struct stack_dump *dump)
1121 {
1122 printf("... ustack: size %" PRIu64 ", offset 0x%x\n",
1123 dump->size, dump->offset);
1124 }
1125
1126 static void perf_evlist__print_tstamp(struct perf_evlist *evlist,
1127 union perf_event *event,
1128 struct perf_sample *sample)
1129 {
1130 u64 sample_type = __perf_evlist__combined_sample_type(evlist);
1131
1132 if (event->header.type != PERF_RECORD_SAMPLE &&
1133 !perf_evlist__sample_id_all(evlist)) {
1134 fputs("-1 -1 ", stdout);
1135 return;
1136 }
1137
1138 if ((sample_type & PERF_SAMPLE_CPU))
1139 printf("%u ", sample->cpu);
1140
1141 if (sample_type & PERF_SAMPLE_TIME)
1142 printf("%" PRIu64 " ", sample->time);
1143 }
1144
1145 static void sample_read__printf(struct perf_sample *sample, u64 read_format)
1146 {
1147 printf("... sample_read:\n");
1148
1149 if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED)
1150 printf("...... time enabled %016" PRIx64 "\n",
1151 sample->read.time_enabled);
1152
1153 if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING)
1154 printf("...... time running %016" PRIx64 "\n",
1155 sample->read.time_running);
1156
1157 if (read_format & PERF_FORMAT_GROUP) {
1158 u64 i;
1159
1160 printf(".... group nr %" PRIu64 "\n", sample->read.group.nr);
1161
1162 for (i = 0; i < sample->read.group.nr; i++) {
1163 struct sample_read_value *value;
1164
1165 value = &sample->read.group.values[i];
1166 printf("..... id %016" PRIx64
1167 ", value %016" PRIx64 "\n",
1168 value->id, value->value);
1169 }
1170 } else
1171 printf("..... id %016" PRIx64 ", value %016" PRIx64 "\n",
1172 sample->read.one.id, sample->read.one.value);
1173 }
1174
1175 static void dump_event(struct perf_evlist *evlist, union perf_event *event,
1176 u64 file_offset, struct perf_sample *sample)
1177 {
1178 if (!dump_trace)
1179 return;
1180
1181 printf("\n%#" PRIx64 " [%#x]: event: %d\n",
1182 file_offset, event->header.size, event->header.type);
1183
1184 trace_event(event);
1185 if (event->header.type == PERF_RECORD_SAMPLE && evlist->trace_event_sample_raw)
1186 evlist->trace_event_sample_raw(evlist, event, sample);
1187
1188 if (sample)
1189 perf_evlist__print_tstamp(evlist, event, sample);
1190
1191 printf("%#" PRIx64 " [%#x]: PERF_RECORD_%s", file_offset,
1192 event->header.size, perf_event__name(event->header.type));
1193 }
1194
1195 static void dump_sample(struct perf_evsel *evsel, union perf_event *event,
1196 struct perf_sample *sample)
1197 {
1198 u64 sample_type;
1199
1200 if (!dump_trace)
1201 return;
1202
1203 printf("(IP, 0x%x): %d/%d: %#" PRIx64 " period: %" PRIu64 " addr: %#" PRIx64 "\n",
1204 event->header.misc, sample->pid, sample->tid, sample->ip,
1205 sample->period, sample->addr);
1206
1207 sample_type = evsel->attr.sample_type;
1208
1209 if (evsel__has_callchain(evsel))
1210 callchain__printf(evsel, sample);
1211
1212 if ((sample_type & PERF_SAMPLE_BRANCH_STACK) && !perf_evsel__has_branch_callstack(evsel))
1213 branch_stack__printf(sample);
1214
1215 if (sample_type & PERF_SAMPLE_REGS_USER)
1216 regs_user__printf(sample);
1217
1218 if (sample_type & PERF_SAMPLE_REGS_INTR)
1219 regs_intr__printf(sample);
1220
1221 if (sample_type & PERF_SAMPLE_STACK_USER)
1222 stack_user__printf(&sample->user_stack);
1223
1224 if (sample_type & PERF_SAMPLE_WEIGHT)
1225 printf("... weight: %" PRIu64 "\n", sample->weight);
1226
1227 if (sample_type & PERF_SAMPLE_DATA_SRC)
1228 printf(" . data_src: 0x%"PRIx64"\n", sample->data_src);
1229
1230 if (sample_type & PERF_SAMPLE_PHYS_ADDR)
1231 printf(" .. phys_addr: 0x%"PRIx64"\n", sample->phys_addr);
1232
1233 if (sample_type & PERF_SAMPLE_TRANSACTION)
1234 printf("... transaction: %" PRIx64 "\n", sample->transaction);
1235
1236 if (sample_type & PERF_SAMPLE_READ)
1237 sample_read__printf(sample, evsel->attr.read_format);
1238 }
1239
1240 static void dump_read(struct perf_evsel *evsel, union perf_event *event)
1241 {
1242 struct read_event *read_event = &event->read;
1243 u64 read_format;
1244
1245 if (!dump_trace)
1246 return;
1247
1248 printf(": %d %d %s %" PRIu64 "\n", event->read.pid, event->read.tid,
1249 perf_evsel__name(evsel),
1250 event->read.value);
1251
1252 if (!evsel)
1253 return;
1254
1255 read_format = evsel->attr.read_format;
1256
1257 if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED)
1258 printf("... time enabled : %" PRIu64 "\n", read_event->time_enabled);
1259
1260 if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING)
1261 printf("... time running : %" PRIu64 "\n", read_event->time_running);
1262
1263 if (read_format & PERF_FORMAT_ID)
1264 printf("... id : %" PRIu64 "\n", read_event->id);
1265 }
1266
1267 static struct machine *machines__find_for_cpumode(struct machines *machines,
1268 union perf_event *event,
1269 struct perf_sample *sample)
1270 {
1271 struct machine *machine;
1272
1273 if (perf_guest &&
1274 ((sample->cpumode == PERF_RECORD_MISC_GUEST_KERNEL) ||
1275 (sample->cpumode == PERF_RECORD_MISC_GUEST_USER))) {
1276 u32 pid;
1277
1278 if (event->header.type == PERF_RECORD_MMAP
1279 || event->header.type == PERF_RECORD_MMAP2)
1280 pid = event->mmap.pid;
1281 else
1282 pid = sample->pid;
1283
1284 machine = machines__find(machines, pid);
1285 if (!machine)
1286 machine = machines__findnew(machines, DEFAULT_GUEST_KERNEL_ID);
1287 return machine;
1288 }
1289
1290 return &machines->host;
1291 }
1292
1293 static int deliver_sample_value(struct perf_evlist *evlist,
1294 struct perf_tool *tool,
1295 union perf_event *event,
1296 struct perf_sample *sample,
1297 struct sample_read_value *v,
1298 struct machine *machine)
1299 {
1300 struct perf_sample_id *sid = perf_evlist__id2sid(evlist, v->id);
1301
1302 if (sid) {
1303 sample->id = v->id;
1304 sample->period = v->value - sid->period;
1305 sid->period = v->value;
1306 }
1307
1308 if (!sid || sid->evsel == NULL) {
1309 ++evlist->stats.nr_unknown_id;
1310 return 0;
1311 }
1312
1313 /*
1314 * There's no reason to deliver sample
1315 * for zero period, bail out.
1316 */
1317 if (!sample->period)
1318 return 0;
1319
1320 return tool->sample(tool, event, sample, sid->evsel, machine);
1321 }
1322
1323 static int deliver_sample_group(struct perf_evlist *evlist,
1324 struct perf_tool *tool,
1325 union perf_event *event,
1326 struct perf_sample *sample,
1327 struct machine *machine)
1328 {
1329 int ret = -EINVAL;
1330 u64 i;
1331
1332 for (i = 0; i < sample->read.group.nr; i++) {
1333 ret = deliver_sample_value(evlist, tool, event, sample,
1334 &sample->read.group.values[i],
1335 machine);
1336 if (ret)
1337 break;
1338 }
1339
1340 return ret;
1341 }
1342
1343 static int
1344 perf_evlist__deliver_sample(struct perf_evlist *evlist,
1345 struct perf_tool *tool,
1346 union perf_event *event,
1347 struct perf_sample *sample,
1348 struct perf_evsel *evsel,
1349 struct machine *machine)
1350 {
1351 /* We know evsel != NULL. */
1352 u64 sample_type = evsel->attr.sample_type;
1353 u64 read_format = evsel->attr.read_format;
1354
1355 /* Standard sample delivery. */
1356 if (!(sample_type & PERF_SAMPLE_READ))
1357 return tool->sample(tool, event, sample, evsel, machine);
1358
1359 /* For PERF_SAMPLE_READ we have either single or group mode. */
1360 if (read_format & PERF_FORMAT_GROUP)
1361 return deliver_sample_group(evlist, tool, event, sample,
1362 machine);
1363 else
1364 return deliver_sample_value(evlist, tool, event, sample,
1365 &sample->read.one, machine);
1366 }
1367
1368 static int machines__deliver_event(struct machines *machines,
1369 struct perf_evlist *evlist,
1370 union perf_event *event,
1371 struct perf_sample *sample,
1372 struct perf_tool *tool, u64 file_offset)
1373 {
1374 struct perf_evsel *evsel;
1375 struct machine *machine;
1376
1377 dump_event(evlist, event, file_offset, sample);
1378
1379 evsel = perf_evlist__id2evsel(evlist, sample->id);
1380
1381 machine = machines__find_for_cpumode(machines, event, sample);
1382
1383 switch (event->header.type) {
1384 case PERF_RECORD_SAMPLE:
1385 if (evsel == NULL) {
1386 ++evlist->stats.nr_unknown_id;
1387 return 0;
1388 }
1389 dump_sample(evsel, event, sample);
1390 if (machine == NULL) {
1391 ++evlist->stats.nr_unprocessable_samples;
1392 return 0;
1393 }
1394 return perf_evlist__deliver_sample(evlist, tool, event, sample, evsel, machine);
1395 case PERF_RECORD_MMAP:
1396 return tool->mmap(tool, event, sample, machine);
1397 case PERF_RECORD_MMAP2:
1398 if (event->header.misc & PERF_RECORD_MISC_PROC_MAP_PARSE_TIMEOUT)
1399 ++evlist->stats.nr_proc_map_timeout;
1400 return tool->mmap2(tool, event, sample, machine);
1401 case PERF_RECORD_COMM:
1402 return tool->comm(tool, event, sample, machine);
1403 case PERF_RECORD_NAMESPACES:
1404 return tool->namespaces(tool, event, sample, machine);
1405 case PERF_RECORD_FORK:
1406 return tool->fork(tool, event, sample, machine);
1407 case PERF_RECORD_EXIT:
1408 return tool->exit(tool, event, sample, machine);
1409 case PERF_RECORD_LOST:
1410 if (tool->lost == perf_event__process_lost)
1411 evlist->stats.total_lost += event->lost.lost;
1412 return tool->lost(tool, event, sample, machine);
1413 case PERF_RECORD_LOST_SAMPLES:
1414 if (tool->lost_samples == perf_event__process_lost_samples)
1415 evlist->stats.total_lost_samples += event->lost_samples.lost;
1416 return tool->lost_samples(tool, event, sample, machine);
1417 case PERF_RECORD_READ:
1418 dump_read(evsel, event);
1419 return tool->read(tool, event, sample, evsel, machine);
1420 case PERF_RECORD_THROTTLE:
1421 return tool->throttle(tool, event, sample, machine);
1422 case PERF_RECORD_UNTHROTTLE:
1423 return tool->unthrottle(tool, event, sample, machine);
1424 case PERF_RECORD_AUX:
1425 if (tool->aux == perf_event__process_aux) {
1426 if (event->aux.flags & PERF_AUX_FLAG_TRUNCATED)
1427 evlist->stats.total_aux_lost += 1;
1428 if (event->aux.flags & PERF_AUX_FLAG_PARTIAL)
1429 evlist->stats.total_aux_partial += 1;
1430 }
1431 return tool->aux(tool, event, sample, machine);
1432 case PERF_RECORD_ITRACE_START:
1433 return tool->itrace_start(tool, event, sample, machine);
1434 case PERF_RECORD_SWITCH:
1435 case PERF_RECORD_SWITCH_CPU_WIDE:
1436 return tool->context_switch(tool, event, sample, machine);
1437 case PERF_RECORD_KSYMBOL:
1438 return tool->ksymbol(tool, event, sample, machine);
1439 case PERF_RECORD_BPF_EVENT:
1440 return tool->bpf_event(tool, event, sample, machine);
1441 default:
1442 ++evlist->stats.nr_unknown_events;
1443 return -1;
1444 }
1445 }
1446
1447 static int perf_session__deliver_event(struct perf_session *session,
1448 union perf_event *event,
1449 struct perf_tool *tool,
1450 u64 file_offset)
1451 {
1452 struct perf_sample sample;
1453 int ret;
1454
1455 ret = perf_evlist__parse_sample(session->evlist, event, &sample);
1456 if (ret) {
1457 pr_err("Can't parse sample, err = %d\n", ret);
1458 return ret;
1459 }
1460
1461 ret = auxtrace__process_event(session, event, &sample, tool);
1462 if (ret < 0)
1463 return ret;
1464 if (ret > 0)
1465 return 0;
1466
1467 return machines__deliver_event(&session->machines, session->evlist,
1468 event, &sample, tool, file_offset);
1469 }
1470
1471 static s64 perf_session__process_user_event(struct perf_session *session,
1472 union perf_event *event,
1473 u64 file_offset)
1474 {
1475 struct ordered_events *oe = &session->ordered_events;
1476 struct perf_tool *tool = session->tool;
1477 struct perf_sample sample = { .time = 0, };
1478 int fd = perf_data__fd(session->data);
1479 int err;
1480
1481 if (event->header.type != PERF_RECORD_COMPRESSED ||
1482 tool->compressed == perf_session__process_compressed_event_stub)
1483 dump_event(session->evlist, event, file_offset, &sample);
1484
1485 /* These events are processed right away */
1486 switch (event->header.type) {
1487 case PERF_RECORD_HEADER_ATTR:
1488 err = tool->attr(tool, event, &session->evlist);
1489 if (err == 0) {
1490 perf_session__set_id_hdr_size(session);
1491 perf_session__set_comm_exec(session);
1492 }
1493 return err;
1494 case PERF_RECORD_EVENT_UPDATE:
1495 return tool->event_update(tool, event, &session->evlist);
1496 case PERF_RECORD_HEADER_EVENT_TYPE:
1497 /*
1498 * Depreceated, but we need to handle it for sake
1499 * of old data files create in pipe mode.
1500 */
1501 return 0;
1502 case PERF_RECORD_HEADER_TRACING_DATA:
1503 /* setup for reading amidst mmap */
1504 lseek(fd, file_offset, SEEK_SET);
1505 return tool->tracing_data(session, event);
1506 case PERF_RECORD_HEADER_BUILD_ID:
1507 return tool->build_id(session, event);
1508 case PERF_RECORD_FINISHED_ROUND:
1509 return tool->finished_round(tool, event, oe);
1510 case PERF_RECORD_ID_INDEX:
1511 return tool->id_index(session, event);
1512 case PERF_RECORD_AUXTRACE_INFO:
1513 return tool->auxtrace_info(session, event);
1514 case PERF_RECORD_AUXTRACE:
1515 /* setup for reading amidst mmap */
1516 lseek(fd, file_offset + event->header.size, SEEK_SET);
1517 return tool->auxtrace(session, event);
1518 case PERF_RECORD_AUXTRACE_ERROR:
1519 perf_session__auxtrace_error_inc(session, event);
1520 return tool->auxtrace_error(session, event);
1521 case PERF_RECORD_THREAD_MAP:
1522 return tool->thread_map(session, event);
1523 case PERF_RECORD_CPU_MAP:
1524 return tool->cpu_map(session, event);
1525 case PERF_RECORD_STAT_CONFIG:
1526 return tool->stat_config(session, event);
1527 case PERF_RECORD_STAT:
1528 return tool->stat(session, event);
1529 case PERF_RECORD_STAT_ROUND:
1530 return tool->stat_round(session, event);
1531 case PERF_RECORD_TIME_CONV:
1532 session->time_conv = event->time_conv;
1533 return tool->time_conv(session, event);
1534 case PERF_RECORD_HEADER_FEATURE:
1535 return tool->feature(session, event);
1536 case PERF_RECORD_COMPRESSED:
1537 err = tool->compressed(session, event, file_offset);
1538 if (err)
1539 dump_event(session->evlist, event, file_offset, &sample);
1540 return err;
1541 default:
1542 return -EINVAL;
1543 }
1544 }
1545
1546 int perf_session__deliver_synth_event(struct perf_session *session,
1547 union perf_event *event,
1548 struct perf_sample *sample)
1549 {
1550 struct perf_evlist *evlist = session->evlist;
1551 struct perf_tool *tool = session->tool;
1552
1553 events_stats__inc(&evlist->stats, event->header.type);
1554
1555 if (event->header.type >= PERF_RECORD_USER_TYPE_START)
1556 return perf_session__process_user_event(session, event, 0);
1557
1558 return machines__deliver_event(&session->machines, evlist, event, sample, tool, 0);
1559 }
1560
1561 static void event_swap(union perf_event *event, bool sample_id_all)
1562 {
1563 perf_event__swap_op swap;
1564
1565 swap = perf_event__swap_ops[event->header.type];
1566 if (swap)
1567 swap(event, sample_id_all);
1568 }
1569
1570 int perf_session__peek_event(struct perf_session *session, off_t file_offset,
1571 void *buf, size_t buf_sz,
1572 union perf_event **event_ptr,
1573 struct perf_sample *sample)
1574 {
1575 union perf_event *event;
1576 size_t hdr_sz, rest;
1577 int fd;
1578
1579 if (session->one_mmap && !session->header.needs_swap) {
1580 event = file_offset - session->one_mmap_offset +
1581 session->one_mmap_addr;
1582 goto out_parse_sample;
1583 }
1584
1585 if (perf_data__is_pipe(session->data))
1586 return -1;
1587
1588 fd = perf_data__fd(session->data);
1589 hdr_sz = sizeof(struct perf_event_header);
1590
1591 if (buf_sz < hdr_sz)
1592 return -1;
1593
1594 if (lseek(fd, file_offset, SEEK_SET) == (off_t)-1 ||
1595 readn(fd, buf, hdr_sz) != (ssize_t)hdr_sz)
1596 return -1;
1597
1598 event = (union perf_event *)buf;
1599
1600 if (session->header.needs_swap)
1601 perf_event_header__bswap(&event->header);
1602
1603 if (event->header.size < hdr_sz || event->header.size > buf_sz)
1604 return -1;
1605
1606 rest = event->header.size - hdr_sz;
1607
1608 if (readn(fd, buf, rest) != (ssize_t)rest)
1609 return -1;
1610
1611 if (session->header.needs_swap)
1612 event_swap(event, perf_evlist__sample_id_all(session->evlist));
1613
1614 out_parse_sample:
1615
1616 if (sample && event->header.type < PERF_RECORD_USER_TYPE_START &&
1617 perf_evlist__parse_sample(session->evlist, event, sample))
1618 return -1;
1619
1620 *event_ptr = event;
1621
1622 return 0;
1623 }
1624
1625 static s64 perf_session__process_event(struct perf_session *session,
1626 union perf_event *event, u64 file_offset)
1627 {
1628 struct perf_evlist *evlist = session->evlist;
1629 struct perf_tool *tool = session->tool;
1630 int ret;
1631
1632 if (session->header.needs_swap)
1633 event_swap(event, perf_evlist__sample_id_all(evlist));
1634
1635 if (event->header.type >= PERF_RECORD_HEADER_MAX)
1636 return -EINVAL;
1637
1638 events_stats__inc(&evlist->stats, event->header.type);
1639
1640 if (event->header.type >= PERF_RECORD_USER_TYPE_START)
1641 return perf_session__process_user_event(session, event, file_offset);
1642
1643 if (tool->ordered_events) {
1644 u64 timestamp = -1ULL;
1645
1646 ret = perf_evlist__parse_sample_timestamp(evlist, event, &timestamp);
1647 if (ret && ret != -1)
1648 return ret;
1649
1650 ret = perf_session__queue_event(session, event, timestamp, file_offset);
1651 if (ret != -ETIME)
1652 return ret;
1653 }
1654
1655 return perf_session__deliver_event(session, event, tool, file_offset);
1656 }
1657
1658 void perf_event_header__bswap(struct perf_event_header *hdr)
1659 {
1660 hdr->type = bswap_32(hdr->type);
1661 hdr->misc = bswap_16(hdr->misc);
1662 hdr->size = bswap_16(hdr->size);
1663 }
1664
1665 struct thread *perf_session__findnew(struct perf_session *session, pid_t pid)
1666 {
1667 return machine__findnew_thread(&session->machines.host, -1, pid);
1668 }
1669
1670 /*
1671 * Threads are identified by pid and tid, and the idle task has pid == tid == 0.
1672 * So here a single thread is created for that, but actually there is a separate
1673 * idle task per cpu, so there should be one 'struct thread' per cpu, but there
1674 * is only 1. That causes problems for some tools, requiring workarounds. For
1675 * example get_idle_thread() in builtin-sched.c, or thread_stack__per_cpu().
1676 */
1677 int perf_session__register_idle_thread(struct perf_session *session)
1678 {
1679 struct thread *thread;
1680 int err = 0;
1681
1682 thread = machine__findnew_thread(&session->machines.host, 0, 0);
1683 if (thread == NULL || thread__set_comm(thread, "swapper", 0)) {
1684 pr_err("problem inserting idle task.\n");
1685 err = -1;
1686 }
1687
1688 if (thread == NULL || thread__set_namespaces(thread, 0, NULL)) {
1689 pr_err("problem inserting idle task.\n");
1690 err = -1;
1691 }
1692
1693 /* machine__findnew_thread() got the thread, so put it */
1694 thread__put(thread);
1695 return err;
1696 }
1697
1698 static void
1699 perf_session__warn_order(const struct perf_session *session)
1700 {
1701 const struct ordered_events *oe = &session->ordered_events;
1702 struct perf_evsel *evsel;
1703 bool should_warn = true;
1704
1705 evlist__for_each_entry(session->evlist, evsel) {
1706 if (evsel->attr.write_backward)
1707 should_warn = false;
1708 }
1709
1710 if (!should_warn)
1711 return;
1712 if (oe->nr_unordered_events != 0)
1713 ui__warning("%u out of order events recorded.\n", oe->nr_unordered_events);
1714 }
1715
1716 static void perf_session__warn_about_errors(const struct perf_session *session)
1717 {
1718 const struct events_stats *stats = &session->evlist->stats;
1719
1720 if (session->tool->lost == perf_event__process_lost &&
1721 stats->nr_events[PERF_RECORD_LOST] != 0) {
1722 ui__warning("Processed %d events and lost %d chunks!\n\n"
1723 "Check IO/CPU overload!\n\n",
1724 stats->nr_events[0],
1725 stats->nr_events[PERF_RECORD_LOST]);
1726 }
1727
1728 if (session->tool->lost_samples == perf_event__process_lost_samples) {
1729 double drop_rate;
1730
1731 drop_rate = (double)stats->total_lost_samples /
1732 (double) (stats->nr_events[PERF_RECORD_SAMPLE] + stats->total_lost_samples);
1733 if (drop_rate > 0.05) {
1734 ui__warning("Processed %" PRIu64 " samples and lost %3.2f%%!\n\n",
1735 stats->nr_events[PERF_RECORD_SAMPLE] + stats->total_lost_samples,
1736 drop_rate * 100.0);
1737 }
1738 }
1739
1740 if (session->tool->aux == perf_event__process_aux &&
1741 stats->total_aux_lost != 0) {
1742 ui__warning("AUX data lost %" PRIu64 " times out of %u!\n\n",
1743 stats->total_aux_lost,
1744 stats->nr_events[PERF_RECORD_AUX]);
1745 }
1746
1747 if (session->tool->aux == perf_event__process_aux &&
1748 stats->total_aux_partial != 0) {
1749 bool vmm_exclusive = false;
1750
1751 (void)sysfs__read_bool("module/kvm_intel/parameters/vmm_exclusive",
1752 &vmm_exclusive);
1753
1754 ui__warning("AUX data had gaps in it %" PRIu64 " times out of %u!\n\n"
1755 "Are you running a KVM guest in the background?%s\n\n",
1756 stats->total_aux_partial,
1757 stats->nr_events[PERF_RECORD_AUX],
1758 vmm_exclusive ?
1759 "\nReloading kvm_intel module with vmm_exclusive=0\n"
1760 "will reduce the gaps to only guest's timeslices." :
1761 "");
1762 }
1763
1764 if (stats->nr_unknown_events != 0) {
1765 ui__warning("Found %u unknown events!\n\n"
1766 "Is this an older tool processing a perf.data "
1767 "file generated by a more recent tool?\n\n"
1768 "If that is not the case, consider "
1769 "reporting to linux-kernel@vger.kernel.org.\n\n",
1770 stats->nr_unknown_events);
1771 }
1772
1773 if (stats->nr_unknown_id != 0) {
1774 ui__warning("%u samples with id not present in the header\n",
1775 stats->nr_unknown_id);
1776 }
1777
1778 if (stats->nr_invalid_chains != 0) {
1779 ui__warning("Found invalid callchains!\n\n"
1780 "%u out of %u events were discarded for this reason.\n\n"
1781 "Consider reporting to linux-kernel@vger.kernel.org.\n\n",
1782 stats->nr_invalid_chains,
1783 stats->nr_events[PERF_RECORD_SAMPLE]);
1784 }
1785
1786 if (stats->nr_unprocessable_samples != 0) {
1787 ui__warning("%u unprocessable samples recorded.\n"
1788 "Do you have a KVM guest running and not using 'perf kvm'?\n",
1789 stats->nr_unprocessable_samples);
1790 }
1791
1792 perf_session__warn_order(session);
1793
1794 events_stats__auxtrace_error_warn(stats);
1795
1796 if (stats->nr_proc_map_timeout != 0) {
1797 ui__warning("%d map information files for pre-existing threads were\n"
1798 "not processed, if there are samples for addresses they\n"
1799 "will not be resolved, you may find out which are these\n"
1800 "threads by running with -v and redirecting the output\n"
1801 "to a file.\n"
1802 "The time limit to process proc map is too short?\n"
1803 "Increase it by --proc-map-timeout\n",
1804 stats->nr_proc_map_timeout);
1805 }
1806 }
1807
1808 static int perf_session__flush_thread_stack(struct thread *thread,
1809 void *p __maybe_unused)
1810 {
1811 return thread_stack__flush(thread);
1812 }
1813
1814 static int perf_session__flush_thread_stacks(struct perf_session *session)
1815 {
1816 return machines__for_each_thread(&session->machines,
1817 perf_session__flush_thread_stack,
1818 NULL);
1819 }
1820
1821 volatile int session_done;
1822
1823 static int __perf_session__process_decomp_events(struct perf_session *session);
1824
1825 static int __perf_session__process_pipe_events(struct perf_session *session)
1826 {
1827 struct ordered_events *oe = &session->ordered_events;
1828 struct perf_tool *tool = session->tool;
1829 int fd = perf_data__fd(session->data);
1830 union perf_event *event;
1831 uint32_t size, cur_size = 0;
1832 void *buf = NULL;
1833 s64 skip = 0;
1834 u64 head;
1835 ssize_t err;
1836 void *p;
1837
1838 perf_tool__fill_defaults(tool);
1839
1840 head = 0;
1841 cur_size = sizeof(union perf_event);
1842
1843 buf = malloc(cur_size);
1844 if (!buf)
1845 return -errno;
1846 ordered_events__set_copy_on_queue(oe, true);
1847 more:
1848 event = buf;
1849 err = readn(fd, event, sizeof(struct perf_event_header));
1850 if (err <= 0) {
1851 if (err == 0)
1852 goto done;
1853
1854 pr_err("failed to read event header\n");
1855 goto out_err;
1856 }
1857
1858 if (session->header.needs_swap)
1859 perf_event_header__bswap(&event->header);
1860
1861 size = event->header.size;
1862 if (size < sizeof(struct perf_event_header)) {
1863 pr_err("bad event header size\n");
1864 goto out_err;
1865 }
1866
1867 if (size > cur_size) {
1868 void *new = realloc(buf, size);
1869 if (!new) {
1870 pr_err("failed to allocate memory to read event\n");
1871 goto out_err;
1872 }
1873 buf = new;
1874 cur_size = size;
1875 event = buf;
1876 }
1877 p = event;
1878 p += sizeof(struct perf_event_header);
1879
1880 if (size - sizeof(struct perf_event_header)) {
1881 err = readn(fd, p, size - sizeof(struct perf_event_header));
1882 if (err <= 0) {
1883 if (err == 0) {
1884 pr_err("unexpected end of event stream\n");
1885 goto done;
1886 }
1887
1888 pr_err("failed to read event data\n");
1889 goto out_err;
1890 }
1891 }
1892
1893 if ((skip = perf_session__process_event(session, event, head)) < 0) {
1894 pr_err("%#" PRIx64 " [%#x]: failed to process type: %d\n",
1895 head, event->header.size, event->header.type);
1896 err = -EINVAL;
1897 goto out_err;
1898 }
1899
1900 head += size;
1901
1902 if (skip > 0)
1903 head += skip;
1904
1905 err = __perf_session__process_decomp_events(session);
1906 if (err)
1907 goto out_err;
1908
1909 if (!session_done())
1910 goto more;
1911 done:
1912 /* do the final flush for ordered samples */
1913 err = ordered_events__flush(oe, OE_FLUSH__FINAL);
1914 if (err)
1915 goto out_err;
1916 err = auxtrace__flush_events(session, tool);
1917 if (err)
1918 goto out_err;
1919 err = perf_session__flush_thread_stacks(session);
1920 out_err:
1921 free(buf);
1922 if (!tool->no_warn)
1923 perf_session__warn_about_errors(session);
1924 ordered_events__free(&session->ordered_events);
1925 auxtrace__free_events(session);
1926 return err;
1927 }
1928
1929 static union perf_event *
1930 fetch_mmaped_event(struct perf_session *session,
1931 u64 head, size_t mmap_size, char *buf)
1932 {
1933 union perf_event *event;
1934
1935 /*
1936 * Ensure we have enough space remaining to read
1937 * the size of the event in the headers.
1938 */
1939 if (head + sizeof(event->header) > mmap_size)
1940 return NULL;
1941
1942 event = (union perf_event *)(buf + head);
1943
1944 if (session->header.needs_swap)
1945 perf_event_header__bswap(&event->header);
1946
1947 if (head + event->header.size > mmap_size) {
1948 /* We're not fetching the event so swap back again */
1949 if (session->header.needs_swap)
1950 perf_event_header__bswap(&event->header);
1951 return NULL;
1952 }
1953
1954 return event;
1955 }
1956
1957 static int __perf_session__process_decomp_events(struct perf_session *session)
1958 {
1959 s64 skip;
1960 u64 size, file_pos = 0;
1961 struct decomp *decomp = session->decomp_last;
1962
1963 if (!decomp)
1964 return 0;
1965
1966 while (decomp->head < decomp->size && !session_done()) {
1967 union perf_event *event = fetch_mmaped_event(session, decomp->head, decomp->size, decomp->data);
1968
1969 if (!event)
1970 break;
1971
1972 size = event->header.size;
1973
1974 if (size < sizeof(struct perf_event_header) ||
1975 (skip = perf_session__process_event(session, event, file_pos)) < 0) {
1976 pr_err("%#" PRIx64 " [%#x]: failed to process type: %d\n",
1977 decomp->file_pos + decomp->head, event->header.size, event->header.type);
1978 return -EINVAL;
1979 }
1980
1981 if (skip)
1982 size += skip;
1983
1984 decomp->head += size;
1985 }
1986
1987 return 0;
1988 }
1989
1990 /*
1991 * On 64bit we can mmap the data file in one go. No need for tiny mmap
1992 * slices. On 32bit we use 32MB.
1993 */
1994 #if BITS_PER_LONG == 64
1995 #define MMAP_SIZE ULLONG_MAX
1996 #define NUM_MMAPS 1
1997 #else
1998 #define MMAP_SIZE (32 * 1024 * 1024ULL)
1999 #define NUM_MMAPS 128
2000 #endif
2001
2002 struct reader;
2003
2004 typedef s64 (*reader_cb_t)(struct perf_session *session,
2005 union perf_event *event,
2006 u64 file_offset);
2007
2008 struct reader {
2009 int fd;
2010 u64 data_size;
2011 u64 data_offset;
2012 reader_cb_t process;
2013 };
2014
2015 static int
2016 reader__process_events(struct reader *rd, struct perf_session *session,
2017 struct ui_progress *prog)
2018 {
2019 u64 data_size = rd->data_size;
2020 u64 head, page_offset, file_offset, file_pos, size;
2021 int err = 0, mmap_prot, mmap_flags, map_idx = 0;
2022 size_t mmap_size;
2023 char *buf, *mmaps[NUM_MMAPS];
2024 union perf_event *event;
2025 s64 skip;
2026
2027 page_offset = page_size * (rd->data_offset / page_size);
2028 file_offset = page_offset;
2029 head = rd->data_offset - page_offset;
2030
2031 ui_progress__init_size(prog, data_size, "Processing events...");
2032
2033 data_size += rd->data_offset;
2034
2035 mmap_size = MMAP_SIZE;
2036 if (mmap_size > data_size) {
2037 mmap_size = data_size;
2038 session->one_mmap = true;
2039 }
2040
2041 memset(mmaps, 0, sizeof(mmaps));
2042
2043 mmap_prot = PROT_READ;
2044 mmap_flags = MAP_SHARED;
2045
2046 if (session->header.needs_swap) {
2047 mmap_prot |= PROT_WRITE;
2048 mmap_flags = MAP_PRIVATE;
2049 }
2050 remap:
2051 buf = mmap(NULL, mmap_size, mmap_prot, mmap_flags, rd->fd,
2052 file_offset);
2053 if (buf == MAP_FAILED) {
2054 pr_err("failed to mmap file\n");
2055 err = -errno;
2056 goto out;
2057 }
2058 mmaps[map_idx] = buf;
2059 map_idx = (map_idx + 1) & (ARRAY_SIZE(mmaps) - 1);
2060 file_pos = file_offset + head;
2061 if (session->one_mmap) {
2062 session->one_mmap_addr = buf;
2063 session->one_mmap_offset = file_offset;
2064 }
2065
2066 more:
2067 event = fetch_mmaped_event(session, head, mmap_size, buf);
2068 if (!event) {
2069 if (mmaps[map_idx]) {
2070 munmap(mmaps[map_idx], mmap_size);
2071 mmaps[map_idx] = NULL;
2072 }
2073
2074 page_offset = page_size * (head / page_size);
2075 file_offset += page_offset;
2076 head -= page_offset;
2077 goto remap;
2078 }
2079
2080 size = event->header.size;
2081
2082 skip = -EINVAL;
2083
2084 if (size < sizeof(struct perf_event_header) ||
2085 (skip = rd->process(session, event, file_pos)) < 0) {
2086 pr_err("%#" PRIx64 " [%#x]: failed to process type: %d [%s]\n",
2087 file_offset + head, event->header.size,
2088 event->header.type, strerror(-skip));
2089 err = skip;
2090 goto out;
2091 }
2092
2093 if (skip)
2094 size += skip;
2095
2096 head += size;
2097 file_pos += size;
2098
2099 err = __perf_session__process_decomp_events(session);
2100 if (err)
2101 goto out;
2102
2103 ui_progress__update(prog, size);
2104
2105 if (session_done())
2106 goto out;
2107
2108 if (file_pos < data_size)
2109 goto more;
2110
2111 out:
2112 return err;
2113 }
2114
2115 static s64 process_simple(struct perf_session *session,
2116 union perf_event *event,
2117 u64 file_offset)
2118 {
2119 return perf_session__process_event(session, event, file_offset);
2120 }
2121
2122 static int __perf_session__process_events(struct perf_session *session)
2123 {
2124 struct reader rd = {
2125 .fd = perf_data__fd(session->data),
2126 .data_size = session->header.data_size,
2127 .data_offset = session->header.data_offset,
2128 .process = process_simple,
2129 };
2130 struct ordered_events *oe = &session->ordered_events;
2131 struct perf_tool *tool = session->tool;
2132 struct ui_progress prog;
2133 int err;
2134
2135 perf_tool__fill_defaults(tool);
2136
2137 if (rd.data_size == 0)
2138 return -1;
2139
2140 ui_progress__init_size(&prog, rd.data_size, "Processing events...");
2141
2142 err = reader__process_events(&rd, session, &prog);
2143 if (err)
2144 goto out_err;
2145 /* do the final flush for ordered samples */
2146 err = ordered_events__flush(oe, OE_FLUSH__FINAL);
2147 if (err)
2148 goto out_err;
2149 err = auxtrace__flush_events(session, tool);
2150 if (err)
2151 goto out_err;
2152 err = perf_session__flush_thread_stacks(session);
2153 out_err:
2154 ui_progress__finish();
2155 if (!tool->no_warn)
2156 perf_session__warn_about_errors(session);
2157 /*
2158 * We may switching perf.data output, make ordered_events
2159 * reusable.
2160 */
2161 ordered_events__reinit(&session->ordered_events);
2162 auxtrace__free_events(session);
2163 session->one_mmap = false;
2164 return err;
2165 }
2166
2167 int perf_session__process_events(struct perf_session *session)
2168 {
2169 if (perf_session__register_idle_thread(session) < 0)
2170 return -ENOMEM;
2171
2172 if (perf_data__is_pipe(session->data))
2173 return __perf_session__process_pipe_events(session);
2174
2175 return __perf_session__process_events(session);
2176 }
2177
2178 bool perf_session__has_traces(struct perf_session *session, const char *msg)
2179 {
2180 struct perf_evsel *evsel;
2181
2182 evlist__for_each_entry(session->evlist, evsel) {
2183 if (evsel->attr.type == PERF_TYPE_TRACEPOINT)
2184 return true;
2185 }
2186
2187 pr_err("No trace sample to read. Did you call 'perf %s'?\n", msg);
2188 return false;
2189 }
2190
2191 int map__set_kallsyms_ref_reloc_sym(struct map *map, const char *symbol_name, u64 addr)
2192 {
2193 char *bracket;
2194 struct ref_reloc_sym *ref;
2195 struct kmap *kmap;
2196
2197 ref = zalloc(sizeof(struct ref_reloc_sym));
2198 if (ref == NULL)
2199 return -ENOMEM;
2200
2201 ref->name = strdup(symbol_name);
2202 if (ref->name == NULL) {
2203 free(ref);
2204 return -ENOMEM;
2205 }
2206
2207 bracket = strchr(ref->name, ']');
2208 if (bracket)
2209 *bracket = '\0';
2210
2211 ref->addr = addr;
2212
2213 kmap = map__kmap(map);
2214 if (kmap)
2215 kmap->ref_reloc_sym = ref;
2216
2217 return 0;
2218 }
2219
2220 size_t perf_session__fprintf_dsos(struct perf_session *session, FILE *fp)
2221 {
2222 return machines__fprintf_dsos(&session->machines, fp);
2223 }
2224
2225 size_t perf_session__fprintf_dsos_buildid(struct perf_session *session, FILE *fp,
2226 bool (skip)(struct dso *dso, int parm), int parm)
2227 {
2228 return machines__fprintf_dsos_buildid(&session->machines, fp, skip, parm);
2229 }
2230
2231 size_t perf_session__fprintf_nr_events(struct perf_session *session, FILE *fp)
2232 {
2233 size_t ret;
2234 const char *msg = "";
2235
2236 if (perf_header__has_feat(&session->header, HEADER_AUXTRACE))
2237 msg = " (excludes AUX area (e.g. instruction trace) decoded / synthesized events)";
2238
2239 ret = fprintf(fp, "\nAggregated stats:%s\n", msg);
2240
2241 ret += events_stats__fprintf(&session->evlist->stats, fp);
2242 return ret;
2243 }
2244
2245 size_t perf_session__fprintf(struct perf_session *session, FILE *fp)
2246 {
2247 /*
2248 * FIXME: Here we have to actually print all the machines in this
2249 * session, not just the host...
2250 */
2251 return machine__fprintf(&session->machines.host, fp);
2252 }
2253
2254 struct perf_evsel *perf_session__find_first_evtype(struct perf_session *session,
2255 unsigned int type)
2256 {
2257 struct perf_evsel *pos;
2258
2259 evlist__for_each_entry(session->evlist, pos) {
2260 if (pos->attr.type == type)
2261 return pos;
2262 }
2263 return NULL;
2264 }
2265
2266 int perf_session__cpu_bitmap(struct perf_session *session,
2267 const char *cpu_list, unsigned long *cpu_bitmap)
2268 {
2269 int i, err = -1;
2270 struct cpu_map *map;
2271
2272 for (i = 0; i < PERF_TYPE_MAX; ++i) {
2273 struct perf_evsel *evsel;
2274
2275 evsel = perf_session__find_first_evtype(session, i);
2276 if (!evsel)
2277 continue;
2278
2279 if (!(evsel->attr.sample_type & PERF_SAMPLE_CPU)) {
2280 pr_err("File does not contain CPU events. "
2281 "Remove -C option to proceed.\n");
2282 return -1;
2283 }
2284 }
2285
2286 map = cpu_map__new(cpu_list);
2287 if (map == NULL) {
2288 pr_err("Invalid cpu_list\n");
2289 return -1;
2290 }
2291
2292 for (i = 0; i < map->nr; i++) {
2293 int cpu = map->map[i];
2294
2295 if (cpu >= MAX_NR_CPUS) {
2296 pr_err("Requested CPU %d too large. "
2297 "Consider raising MAX_NR_CPUS\n", cpu);
2298 goto out_delete_map;
2299 }
2300
2301 set_bit(cpu, cpu_bitmap);
2302 }
2303
2304 err = 0;
2305
2306 out_delete_map:
2307 cpu_map__put(map);
2308 return err;
2309 }
2310
2311 void perf_session__fprintf_info(struct perf_session *session, FILE *fp,
2312 bool full)
2313 {
2314 if (session == NULL || fp == NULL)
2315 return;
2316
2317 fprintf(fp, "# ========\n");
2318 perf_header__fprintf_info(session, fp, full);
2319 fprintf(fp, "# ========\n#\n");
2320 }
2321
2322
2323 int __perf_session__set_tracepoints_handlers(struct perf_session *session,
2324 const struct perf_evsel_str_handler *assocs,
2325 size_t nr_assocs)
2326 {
2327 struct perf_evsel *evsel;
2328 size_t i;
2329 int err;
2330
2331 for (i = 0; i < nr_assocs; i++) {
2332 /*
2333 * Adding a handler for an event not in the session,
2334 * just ignore it.
2335 */
2336 evsel = perf_evlist__find_tracepoint_by_name(session->evlist, assocs[i].name);
2337 if (evsel == NULL)
2338 continue;
2339
2340 err = -EEXIST;
2341 if (evsel->handler != NULL)
2342 goto out;
2343 evsel->handler = assocs[i].handler;
2344 }
2345
2346 err = 0;
2347 out:
2348 return err;
2349 }
2350
2351 int perf_event__process_id_index(struct perf_session *session,
2352 union perf_event *event)
2353 {
2354 struct perf_evlist *evlist = session->evlist;
2355 struct id_index_event *ie = &event->id_index;
2356 size_t i, nr, max_nr;
2357
2358 max_nr = (ie->header.size - sizeof(struct id_index_event)) /
2359 sizeof(struct id_index_entry);
2360 nr = ie->nr;
2361 if (nr > max_nr)
2362 return -EINVAL;
2363
2364 if (dump_trace)
2365 fprintf(stdout, " nr: %zu\n", nr);
2366
2367 for (i = 0; i < nr; i++) {
2368 struct id_index_entry *e = &ie->entries[i];
2369 struct perf_sample_id *sid;
2370
2371 if (dump_trace) {
2372 fprintf(stdout, " ... id: %"PRIu64, e->id);
2373 fprintf(stdout, " idx: %"PRIu64, e->idx);
2374 fprintf(stdout, " cpu: %"PRId64, e->cpu);
2375 fprintf(stdout, " tid: %"PRId64"\n", e->tid);
2376 }
2377
2378 sid = perf_evlist__id2sid(evlist, e->id);
2379 if (!sid)
2380 return -ENOENT;
2381 sid->idx = e->idx;
2382 sid->cpu = e->cpu;
2383 sid->tid = e->tid;
2384 }
2385 return 0;
2386 }
2387
2388 int perf_event__synthesize_id_index(struct perf_tool *tool,
2389 perf_event__handler_t process,
2390 struct perf_evlist *evlist,
2391 struct machine *machine)
2392 {
2393 union perf_event *ev;
2394 struct perf_evsel *evsel;
2395 size_t nr = 0, i = 0, sz, max_nr, n;
2396 int err;
2397
2398 pr_debug2("Synthesizing id index\n");
2399
2400 max_nr = (UINT16_MAX - sizeof(struct id_index_event)) /
2401 sizeof(struct id_index_entry);
2402
2403 evlist__for_each_entry(evlist, evsel)
2404 nr += evsel->ids;
2405
2406 n = nr > max_nr ? max_nr : nr;
2407 sz = sizeof(struct id_index_event) + n * sizeof(struct id_index_entry);
2408 ev = zalloc(sz);
2409 if (!ev)
2410 return -ENOMEM;
2411
2412 ev->id_index.header.type = PERF_RECORD_ID_INDEX;
2413 ev->id_index.header.size = sz;
2414 ev->id_index.nr = n;
2415
2416 evlist__for_each_entry(evlist, evsel) {
2417 u32 j;
2418
2419 for (j = 0; j < evsel->ids; j++) {
2420 struct id_index_entry *e;
2421 struct perf_sample_id *sid;
2422
2423 if (i >= n) {
2424 err = process(tool, ev, NULL, machine);
2425 if (err)
2426 goto out_err;
2427 nr -= n;
2428 i = 0;
2429 }
2430
2431 e = &ev->id_index.entries[i++];
2432
2433 e->id = evsel->id[j];
2434
2435 sid = perf_evlist__id2sid(evlist, e->id);
2436 if (!sid) {
2437 free(ev);
2438 return -ENOENT;
2439 }
2440
2441 e->idx = sid->idx;
2442 e->cpu = sid->cpu;
2443 e->tid = sid->tid;
2444 }
2445 }
2446
2447 sz = sizeof(struct id_index_event) + nr * sizeof(struct id_index_entry);
2448 ev->id_index.header.size = sz;
2449 ev->id_index.nr = nr;
2450
2451 err = process(tool, ev, NULL, machine);
2452 out_err:
2453 free(ev);
2454
2455 return err;
2456 }