1 #include <linux/kernel.h>
2 #include <traceevent/event-parse.h>
16 #include "perf_regs.h"
19 #include "thread-stack.h"
22 static int perf_session__deliver_event(struct perf_session
*session
,
23 union perf_event
*event
,
24 struct perf_sample
*sample
,
25 struct perf_tool
*tool
,
28 static int perf_session__open(struct perf_session
*session
)
30 struct perf_data_file
*file
= session
->file
;
32 if (perf_session__read_header(session
) < 0) {
33 pr_err("incompatible file format (rerun with -v to learn more)\n");
37 if (perf_data_file__is_pipe(file
))
40 if (perf_header__has_feat(&session
->header
, HEADER_STAT
))
43 if (!perf_evlist__valid_sample_type(session
->evlist
)) {
44 pr_err("non matching sample_type\n");
48 if (!perf_evlist__valid_sample_id_all(session
->evlist
)) {
49 pr_err("non matching sample_id_all\n");
53 if (!perf_evlist__valid_read_format(session
->evlist
)) {
54 pr_err("non matching read_format\n");
61 void perf_session__set_id_hdr_size(struct perf_session
*session
)
63 u16 id_hdr_size
= perf_evlist__id_hdr_size(session
->evlist
);
65 machines__set_id_hdr_size(&session
->machines
, id_hdr_size
);
68 int perf_session__create_kernel_maps(struct perf_session
*session
)
70 int ret
= machine__create_kernel_maps(&session
->machines
.host
);
73 ret
= machines__create_guest_kernel_maps(&session
->machines
);
77 static void perf_session__destroy_kernel_maps(struct perf_session
*session
)
79 machines__destroy_kernel_maps(&session
->machines
);
82 static bool perf_session__has_comm_exec(struct perf_session
*session
)
84 struct perf_evsel
*evsel
;
86 evlist__for_each(session
->evlist
, evsel
) {
87 if (evsel
->attr
.comm_exec
)
94 static void perf_session__set_comm_exec(struct perf_session
*session
)
96 bool comm_exec
= perf_session__has_comm_exec(session
);
98 machines__set_comm_exec(&session
->machines
, comm_exec
);
101 static int ordered_events__deliver_event(struct ordered_events
*oe
,
102 struct ordered_event
*event
)
104 struct perf_sample sample
;
105 struct perf_session
*session
= container_of(oe
, struct perf_session
,
107 int ret
= perf_evlist__parse_sample(session
->evlist
, event
->event
, &sample
);
110 pr_err("Can't parse sample, err = %d\n", ret
);
114 return perf_session__deliver_event(session
, event
->event
, &sample
,
115 session
->tool
, event
->file_offset
);
118 struct perf_session
*perf_session__new(struct perf_data_file
*file
,
119 bool repipe
, struct perf_tool
*tool
)
121 struct perf_session
*session
= zalloc(sizeof(*session
));
126 session
->repipe
= repipe
;
127 session
->tool
= tool
;
128 INIT_LIST_HEAD(&session
->auxtrace_index
);
129 machines__init(&session
->machines
);
130 ordered_events__init(&session
->ordered_events
, ordered_events__deliver_event
);
133 if (perf_data_file__open(file
))
136 session
->file
= file
;
138 if (perf_data_file__is_read(file
)) {
139 if (perf_session__open(session
) < 0)
142 perf_session__set_id_hdr_size(session
);
143 perf_session__set_comm_exec(session
);
146 session
->machines
.host
.env
= &perf_env
;
149 if (!file
|| perf_data_file__is_write(file
)) {
151 * In O_RDONLY mode this will be performed when reading the
152 * kernel MMAP event, in perf_event__process_mmap().
154 if (perf_session__create_kernel_maps(session
) < 0)
155 pr_warning("Cannot read kernel map\n");
158 if (tool
&& tool
->ordering_requires_timestamps
&&
159 tool
->ordered_events
&& !perf_evlist__sample_id_all(session
->evlist
)) {
160 dump_printf("WARNING: No sample_id_all support, falling back to unordered processing\n");
161 tool
->ordered_events
= false;
167 perf_data_file__close(file
);
169 perf_session__delete(session
);
174 static void perf_session__delete_threads(struct perf_session
*session
)
176 machine__delete_threads(&session
->machines
.host
);
179 void perf_session__delete(struct perf_session
*session
)
181 auxtrace__free(session
);
182 auxtrace_index__free(&session
->auxtrace_index
);
183 perf_session__destroy_kernel_maps(session
);
184 perf_session__delete_threads(session
);
185 perf_env__exit(&session
->header
.env
);
186 machines__exit(&session
->machines
);
188 perf_data_file__close(session
->file
);
192 static int process_event_synth_tracing_data_stub(struct perf_tool
*tool
194 union perf_event
*event
196 struct perf_session
*session
199 dump_printf(": unhandled!\n");
203 static int process_event_synth_attr_stub(struct perf_tool
*tool __maybe_unused
,
204 union perf_event
*event __maybe_unused
,
205 struct perf_evlist
**pevlist
208 dump_printf(": unhandled!\n");
212 static int process_event_synth_event_update_stub(struct perf_tool
*tool __maybe_unused
,
213 union perf_event
*event __maybe_unused
,
214 struct perf_evlist
**pevlist
218 perf_event__fprintf_event_update(event
, stdout
);
220 dump_printf(": unhandled!\n");
224 static int process_event_sample_stub(struct perf_tool
*tool __maybe_unused
,
225 union perf_event
*event __maybe_unused
,
226 struct perf_sample
*sample __maybe_unused
,
227 struct perf_evsel
*evsel __maybe_unused
,
228 struct machine
*machine __maybe_unused
)
230 dump_printf(": unhandled!\n");
234 static int process_event_stub(struct perf_tool
*tool __maybe_unused
,
235 union perf_event
*event __maybe_unused
,
236 struct perf_sample
*sample __maybe_unused
,
237 struct machine
*machine __maybe_unused
)
239 dump_printf(": unhandled!\n");
243 static int process_finished_round_stub(struct perf_tool
*tool __maybe_unused
,
244 union perf_event
*event __maybe_unused
,
245 struct ordered_events
*oe __maybe_unused
)
247 dump_printf(": unhandled!\n");
251 static int process_finished_round(struct perf_tool
*tool
,
252 union perf_event
*event
,
253 struct ordered_events
*oe
);
255 static int skipn(int fd
, off_t n
)
261 ret
= read(fd
, buf
, min(n
, (off_t
)sizeof(buf
)));
270 static s64
process_event_auxtrace_stub(struct perf_tool
*tool __maybe_unused
,
271 union perf_event
*event
,
272 struct perf_session
*session
275 dump_printf(": unhandled!\n");
276 if (perf_data_file__is_pipe(session
->file
))
277 skipn(perf_data_file__fd(session
->file
), event
->auxtrace
.size
);
278 return event
->auxtrace
.size
;
281 static int process_event_op2_stub(struct perf_tool
*tool __maybe_unused
,
282 union perf_event
*event __maybe_unused
,
283 struct perf_session
*session __maybe_unused
)
285 dump_printf(": unhandled!\n");
291 int process_event_thread_map_stub(struct perf_tool
*tool __maybe_unused
,
292 union perf_event
*event __maybe_unused
,
293 struct perf_session
*session __maybe_unused
)
296 perf_event__fprintf_thread_map(event
, stdout
);
298 dump_printf(": unhandled!\n");
303 int process_event_cpu_map_stub(struct perf_tool
*tool __maybe_unused
,
304 union perf_event
*event __maybe_unused
,
305 struct perf_session
*session __maybe_unused
)
308 perf_event__fprintf_cpu_map(event
, stdout
);
310 dump_printf(": unhandled!\n");
315 int process_event_stat_config_stub(struct perf_tool
*tool __maybe_unused
,
316 union perf_event
*event __maybe_unused
,
317 struct perf_session
*session __maybe_unused
)
320 perf_event__fprintf_stat_config(event
, stdout
);
322 dump_printf(": unhandled!\n");
326 static int process_stat_stub(struct perf_tool
*tool __maybe_unused
,
327 union perf_event
*event __maybe_unused
,
328 struct perf_session
*perf_session
332 perf_event__fprintf_stat(event
, stdout
);
334 dump_printf(": unhandled!\n");
338 static int process_stat_round_stub(struct perf_tool
*tool __maybe_unused
,
339 union perf_event
*event __maybe_unused
,
340 struct perf_session
*perf_session
344 perf_event__fprintf_stat_round(event
, stdout
);
346 dump_printf(": unhandled!\n");
350 void perf_tool__fill_defaults(struct perf_tool
*tool
)
352 if (tool
->sample
== NULL
)
353 tool
->sample
= process_event_sample_stub
;
354 if (tool
->mmap
== NULL
)
355 tool
->mmap
= process_event_stub
;
356 if (tool
->mmap2
== NULL
)
357 tool
->mmap2
= process_event_stub
;
358 if (tool
->comm
== NULL
)
359 tool
->comm
= process_event_stub
;
360 if (tool
->fork
== NULL
)
361 tool
->fork
= process_event_stub
;
362 if (tool
->exit
== NULL
)
363 tool
->exit
= process_event_stub
;
364 if (tool
->lost
== NULL
)
365 tool
->lost
= perf_event__process_lost
;
366 if (tool
->lost_samples
== NULL
)
367 tool
->lost_samples
= perf_event__process_lost_samples
;
368 if (tool
->aux
== NULL
)
369 tool
->aux
= perf_event__process_aux
;
370 if (tool
->itrace_start
== NULL
)
371 tool
->itrace_start
= perf_event__process_itrace_start
;
372 if (tool
->context_switch
== NULL
)
373 tool
->context_switch
= perf_event__process_switch
;
374 if (tool
->read
== NULL
)
375 tool
->read
= process_event_sample_stub
;
376 if (tool
->throttle
== NULL
)
377 tool
->throttle
= process_event_stub
;
378 if (tool
->unthrottle
== NULL
)
379 tool
->unthrottle
= process_event_stub
;
380 if (tool
->attr
== NULL
)
381 tool
->attr
= process_event_synth_attr_stub
;
382 if (tool
->event_update
== NULL
)
383 tool
->event_update
= process_event_synth_event_update_stub
;
384 if (tool
->tracing_data
== NULL
)
385 tool
->tracing_data
= process_event_synth_tracing_data_stub
;
386 if (tool
->build_id
== NULL
)
387 tool
->build_id
= process_event_op2_stub
;
388 if (tool
->finished_round
== NULL
) {
389 if (tool
->ordered_events
)
390 tool
->finished_round
= process_finished_round
;
392 tool
->finished_round
= process_finished_round_stub
;
394 if (tool
->id_index
== NULL
)
395 tool
->id_index
= process_event_op2_stub
;
396 if (tool
->auxtrace_info
== NULL
)
397 tool
->auxtrace_info
= process_event_op2_stub
;
398 if (tool
->auxtrace
== NULL
)
399 tool
->auxtrace
= process_event_auxtrace_stub
;
400 if (tool
->auxtrace_error
== NULL
)
401 tool
->auxtrace_error
= process_event_op2_stub
;
402 if (tool
->thread_map
== NULL
)
403 tool
->thread_map
= process_event_thread_map_stub
;
404 if (tool
->cpu_map
== NULL
)
405 tool
->cpu_map
= process_event_cpu_map_stub
;
406 if (tool
->stat_config
== NULL
)
407 tool
->stat_config
= process_event_stat_config_stub
;
408 if (tool
->stat
== NULL
)
409 tool
->stat
= process_stat_stub
;
410 if (tool
->stat_round
== NULL
)
411 tool
->stat_round
= process_stat_round_stub
;
412 if (tool
->time_conv
== NULL
)
413 tool
->time_conv
= process_event_op2_stub
;
416 static void swap_sample_id_all(union perf_event
*event
, void *data
)
418 void *end
= (void *) event
+ event
->header
.size
;
419 int size
= end
- data
;
421 BUG_ON(size
% sizeof(u64
));
422 mem_bswap_64(data
, size
);
425 static void perf_event__all64_swap(union perf_event
*event
,
426 bool sample_id_all __maybe_unused
)
428 struct perf_event_header
*hdr
= &event
->header
;
429 mem_bswap_64(hdr
+ 1, event
->header
.size
- sizeof(*hdr
));
432 static void perf_event__comm_swap(union perf_event
*event
, bool sample_id_all
)
434 event
->comm
.pid
= bswap_32(event
->comm
.pid
);
435 event
->comm
.tid
= bswap_32(event
->comm
.tid
);
438 void *data
= &event
->comm
.comm
;
440 data
+= PERF_ALIGN(strlen(data
) + 1, sizeof(u64
));
441 swap_sample_id_all(event
, data
);
445 static void perf_event__mmap_swap(union perf_event
*event
,
448 event
->mmap
.pid
= bswap_32(event
->mmap
.pid
);
449 event
->mmap
.tid
= bswap_32(event
->mmap
.tid
);
450 event
->mmap
.start
= bswap_64(event
->mmap
.start
);
451 event
->mmap
.len
= bswap_64(event
->mmap
.len
);
452 event
->mmap
.pgoff
= bswap_64(event
->mmap
.pgoff
);
455 void *data
= &event
->mmap
.filename
;
457 data
+= PERF_ALIGN(strlen(data
) + 1, sizeof(u64
));
458 swap_sample_id_all(event
, data
);
462 static void perf_event__mmap2_swap(union perf_event
*event
,
465 event
->mmap2
.pid
= bswap_32(event
->mmap2
.pid
);
466 event
->mmap2
.tid
= bswap_32(event
->mmap2
.tid
);
467 event
->mmap2
.start
= bswap_64(event
->mmap2
.start
);
468 event
->mmap2
.len
= bswap_64(event
->mmap2
.len
);
469 event
->mmap2
.pgoff
= bswap_64(event
->mmap2
.pgoff
);
470 event
->mmap2
.maj
= bswap_32(event
->mmap2
.maj
);
471 event
->mmap2
.min
= bswap_32(event
->mmap2
.min
);
472 event
->mmap2
.ino
= bswap_64(event
->mmap2
.ino
);
475 void *data
= &event
->mmap2
.filename
;
477 data
+= PERF_ALIGN(strlen(data
) + 1, sizeof(u64
));
478 swap_sample_id_all(event
, data
);
481 static void perf_event__task_swap(union perf_event
*event
, bool sample_id_all
)
483 event
->fork
.pid
= bswap_32(event
->fork
.pid
);
484 event
->fork
.tid
= bswap_32(event
->fork
.tid
);
485 event
->fork
.ppid
= bswap_32(event
->fork
.ppid
);
486 event
->fork
.ptid
= bswap_32(event
->fork
.ptid
);
487 event
->fork
.time
= bswap_64(event
->fork
.time
);
490 swap_sample_id_all(event
, &event
->fork
+ 1);
493 static void perf_event__read_swap(union perf_event
*event
, bool sample_id_all
)
495 event
->read
.pid
= bswap_32(event
->read
.pid
);
496 event
->read
.tid
= bswap_32(event
->read
.tid
);
497 event
->read
.value
= bswap_64(event
->read
.value
);
498 event
->read
.time_enabled
= bswap_64(event
->read
.time_enabled
);
499 event
->read
.time_running
= bswap_64(event
->read
.time_running
);
500 event
->read
.id
= bswap_64(event
->read
.id
);
503 swap_sample_id_all(event
, &event
->read
+ 1);
506 static void perf_event__aux_swap(union perf_event
*event
, bool sample_id_all
)
508 event
->aux
.aux_offset
= bswap_64(event
->aux
.aux_offset
);
509 event
->aux
.aux_size
= bswap_64(event
->aux
.aux_size
);
510 event
->aux
.flags
= bswap_64(event
->aux
.flags
);
513 swap_sample_id_all(event
, &event
->aux
+ 1);
516 static void perf_event__itrace_start_swap(union perf_event
*event
,
519 event
->itrace_start
.pid
= bswap_32(event
->itrace_start
.pid
);
520 event
->itrace_start
.tid
= bswap_32(event
->itrace_start
.tid
);
523 swap_sample_id_all(event
, &event
->itrace_start
+ 1);
526 static void perf_event__switch_swap(union perf_event
*event
, bool sample_id_all
)
528 if (event
->header
.type
== PERF_RECORD_SWITCH_CPU_WIDE
) {
529 event
->context_switch
.next_prev_pid
=
530 bswap_32(event
->context_switch
.next_prev_pid
);
531 event
->context_switch
.next_prev_tid
=
532 bswap_32(event
->context_switch
.next_prev_tid
);
536 swap_sample_id_all(event
, &event
->context_switch
+ 1);
539 static void perf_event__throttle_swap(union perf_event
*event
,
542 event
->throttle
.time
= bswap_64(event
->throttle
.time
);
543 event
->throttle
.id
= bswap_64(event
->throttle
.id
);
544 event
->throttle
.stream_id
= bswap_64(event
->throttle
.stream_id
);
547 swap_sample_id_all(event
, &event
->throttle
+ 1);
550 static u8
revbyte(u8 b
)
552 int rev
= (b
>> 4) | ((b
& 0xf) << 4);
553 rev
= ((rev
& 0xcc) >> 2) | ((rev
& 0x33) << 2);
554 rev
= ((rev
& 0xaa) >> 1) | ((rev
& 0x55) << 1);
559 * XXX this is hack in attempt to carry flags bitfield
560 * throught endian village. ABI says:
562 * Bit-fields are allocated from right to left (least to most significant)
563 * on little-endian implementations and from left to right (most to least
564 * significant) on big-endian implementations.
566 * The above seems to be byte specific, so we need to reverse each
567 * byte of the bitfield. 'Internet' also says this might be implementation
568 * specific and we probably need proper fix and carry perf_event_attr
569 * bitfield flags in separate data file FEAT_ section. Thought this seems
572 static void swap_bitfield(u8
*p
, unsigned len
)
576 for (i
= 0; i
< len
; i
++) {
582 /* exported for swapping attributes in file header */
583 void perf_event__attr_swap(struct perf_event_attr
*attr
)
585 attr
->type
= bswap_32(attr
->type
);
586 attr
->size
= bswap_32(attr
->size
);
588 #define bswap_safe(f, n) \
589 (attr->size > (offsetof(struct perf_event_attr, f) + \
590 sizeof(attr->f) * (n)))
591 #define bswap_field(f, sz) \
593 if (bswap_safe(f, 0)) \
594 attr->f = bswap_##sz(attr->f); \
596 #define bswap_field_32(f) bswap_field(f, 32)
597 #define bswap_field_64(f) bswap_field(f, 64)
599 bswap_field_64(config
);
600 bswap_field_64(sample_period
);
601 bswap_field_64(sample_type
);
602 bswap_field_64(read_format
);
603 bswap_field_32(wakeup_events
);
604 bswap_field_32(bp_type
);
605 bswap_field_64(bp_addr
);
606 bswap_field_64(bp_len
);
607 bswap_field_64(branch_sample_type
);
608 bswap_field_64(sample_regs_user
);
609 bswap_field_32(sample_stack_user
);
610 bswap_field_32(aux_watermark
);
613 * After read_format are bitfields. Check read_format because
614 * we are unable to use offsetof on bitfield.
616 if (bswap_safe(read_format
, 1))
617 swap_bitfield((u8
*) (&attr
->read_format
+ 1),
619 #undef bswap_field_64
620 #undef bswap_field_32
625 static void perf_event__hdr_attr_swap(union perf_event
*event
,
626 bool sample_id_all __maybe_unused
)
630 perf_event__attr_swap(&event
->attr
.attr
);
632 size
= event
->header
.size
;
633 size
-= (void *)&event
->attr
.id
- (void *)event
;
634 mem_bswap_64(event
->attr
.id
, size
);
637 static void perf_event__event_update_swap(union perf_event
*event
,
638 bool sample_id_all __maybe_unused
)
640 event
->event_update
.type
= bswap_64(event
->event_update
.type
);
641 event
->event_update
.id
= bswap_64(event
->event_update
.id
);
644 static void perf_event__event_type_swap(union perf_event
*event
,
645 bool sample_id_all __maybe_unused
)
647 event
->event_type
.event_type
.event_id
=
648 bswap_64(event
->event_type
.event_type
.event_id
);
651 static void perf_event__tracing_data_swap(union perf_event
*event
,
652 bool sample_id_all __maybe_unused
)
654 event
->tracing_data
.size
= bswap_32(event
->tracing_data
.size
);
657 static void perf_event__auxtrace_info_swap(union perf_event
*event
,
658 bool sample_id_all __maybe_unused
)
662 event
->auxtrace_info
.type
= bswap_32(event
->auxtrace_info
.type
);
664 size
= event
->header
.size
;
665 size
-= (void *)&event
->auxtrace_info
.priv
- (void *)event
;
666 mem_bswap_64(event
->auxtrace_info
.priv
, size
);
669 static void perf_event__auxtrace_swap(union perf_event
*event
,
670 bool sample_id_all __maybe_unused
)
672 event
->auxtrace
.size
= bswap_64(event
->auxtrace
.size
);
673 event
->auxtrace
.offset
= bswap_64(event
->auxtrace
.offset
);
674 event
->auxtrace
.reference
= bswap_64(event
->auxtrace
.reference
);
675 event
->auxtrace
.idx
= bswap_32(event
->auxtrace
.idx
);
676 event
->auxtrace
.tid
= bswap_32(event
->auxtrace
.tid
);
677 event
->auxtrace
.cpu
= bswap_32(event
->auxtrace
.cpu
);
680 static void perf_event__auxtrace_error_swap(union perf_event
*event
,
681 bool sample_id_all __maybe_unused
)
683 event
->auxtrace_error
.type
= bswap_32(event
->auxtrace_error
.type
);
684 event
->auxtrace_error
.code
= bswap_32(event
->auxtrace_error
.code
);
685 event
->auxtrace_error
.cpu
= bswap_32(event
->auxtrace_error
.cpu
);
686 event
->auxtrace_error
.pid
= bswap_32(event
->auxtrace_error
.pid
);
687 event
->auxtrace_error
.tid
= bswap_32(event
->auxtrace_error
.tid
);
688 event
->auxtrace_error
.ip
= bswap_64(event
->auxtrace_error
.ip
);
691 static void perf_event__thread_map_swap(union perf_event
*event
,
692 bool sample_id_all __maybe_unused
)
696 event
->thread_map
.nr
= bswap_64(event
->thread_map
.nr
);
698 for (i
= 0; i
< event
->thread_map
.nr
; i
++)
699 event
->thread_map
.entries
[i
].pid
= bswap_64(event
->thread_map
.entries
[i
].pid
);
702 static void perf_event__cpu_map_swap(union perf_event
*event
,
703 bool sample_id_all __maybe_unused
)
705 struct cpu_map_data
*data
= &event
->cpu_map
.data
;
706 struct cpu_map_entries
*cpus
;
707 struct cpu_map_mask
*mask
;
710 data
->type
= bswap_64(data
->type
);
712 switch (data
->type
) {
713 case PERF_CPU_MAP__CPUS
:
714 cpus
= (struct cpu_map_entries
*)data
->data
;
716 cpus
->nr
= bswap_16(cpus
->nr
);
718 for (i
= 0; i
< cpus
->nr
; i
++)
719 cpus
->cpu
[i
] = bswap_16(cpus
->cpu
[i
]);
721 case PERF_CPU_MAP__MASK
:
722 mask
= (struct cpu_map_mask
*) data
->data
;
724 mask
->nr
= bswap_16(mask
->nr
);
725 mask
->long_size
= bswap_16(mask
->long_size
);
727 switch (mask
->long_size
) {
728 case 4: mem_bswap_32(&mask
->mask
, mask
->nr
); break;
729 case 8: mem_bswap_64(&mask
->mask
, mask
->nr
); break;
731 pr_err("cpu_map swap: unsupported long size\n");
738 static void perf_event__stat_config_swap(union perf_event
*event
,
739 bool sample_id_all __maybe_unused
)
743 size
= event
->stat_config
.nr
* sizeof(event
->stat_config
.data
[0]);
744 size
+= 1; /* nr item itself */
745 mem_bswap_64(&event
->stat_config
.nr
, size
);
748 static void perf_event__stat_swap(union perf_event
*event
,
749 bool sample_id_all __maybe_unused
)
751 event
->stat
.id
= bswap_64(event
->stat
.id
);
752 event
->stat
.thread
= bswap_32(event
->stat
.thread
);
753 event
->stat
.cpu
= bswap_32(event
->stat
.cpu
);
754 event
->stat
.val
= bswap_64(event
->stat
.val
);
755 event
->stat
.ena
= bswap_64(event
->stat
.ena
);
756 event
->stat
.run
= bswap_64(event
->stat
.run
);
759 static void perf_event__stat_round_swap(union perf_event
*event
,
760 bool sample_id_all __maybe_unused
)
762 event
->stat_round
.type
= bswap_64(event
->stat_round
.type
);
763 event
->stat_round
.time
= bswap_64(event
->stat_round
.time
);
766 typedef void (*perf_event__swap_op
)(union perf_event
*event
,
769 static perf_event__swap_op perf_event__swap_ops
[] = {
770 [PERF_RECORD_MMAP
] = perf_event__mmap_swap
,
771 [PERF_RECORD_MMAP2
] = perf_event__mmap2_swap
,
772 [PERF_RECORD_COMM
] = perf_event__comm_swap
,
773 [PERF_RECORD_FORK
] = perf_event__task_swap
,
774 [PERF_RECORD_EXIT
] = perf_event__task_swap
,
775 [PERF_RECORD_LOST
] = perf_event__all64_swap
,
776 [PERF_RECORD_READ
] = perf_event__read_swap
,
777 [PERF_RECORD_THROTTLE
] = perf_event__throttle_swap
,
778 [PERF_RECORD_UNTHROTTLE
] = perf_event__throttle_swap
,
779 [PERF_RECORD_SAMPLE
] = perf_event__all64_swap
,
780 [PERF_RECORD_AUX
] = perf_event__aux_swap
,
781 [PERF_RECORD_ITRACE_START
] = perf_event__itrace_start_swap
,
782 [PERF_RECORD_LOST_SAMPLES
] = perf_event__all64_swap
,
783 [PERF_RECORD_SWITCH
] = perf_event__switch_swap
,
784 [PERF_RECORD_SWITCH_CPU_WIDE
] = perf_event__switch_swap
,
785 [PERF_RECORD_HEADER_ATTR
] = perf_event__hdr_attr_swap
,
786 [PERF_RECORD_HEADER_EVENT_TYPE
] = perf_event__event_type_swap
,
787 [PERF_RECORD_HEADER_TRACING_DATA
] = perf_event__tracing_data_swap
,
788 [PERF_RECORD_HEADER_BUILD_ID
] = NULL
,
789 [PERF_RECORD_ID_INDEX
] = perf_event__all64_swap
,
790 [PERF_RECORD_AUXTRACE_INFO
] = perf_event__auxtrace_info_swap
,
791 [PERF_RECORD_AUXTRACE
] = perf_event__auxtrace_swap
,
792 [PERF_RECORD_AUXTRACE_ERROR
] = perf_event__auxtrace_error_swap
,
793 [PERF_RECORD_THREAD_MAP
] = perf_event__thread_map_swap
,
794 [PERF_RECORD_CPU_MAP
] = perf_event__cpu_map_swap
,
795 [PERF_RECORD_STAT_CONFIG
] = perf_event__stat_config_swap
,
796 [PERF_RECORD_STAT
] = perf_event__stat_swap
,
797 [PERF_RECORD_STAT_ROUND
] = perf_event__stat_round_swap
,
798 [PERF_RECORD_EVENT_UPDATE
] = perf_event__event_update_swap
,
799 [PERF_RECORD_TIME_CONV
] = perf_event__all64_swap
,
800 [PERF_RECORD_HEADER_MAX
] = NULL
,
804 * When perf record finishes a pass on every buffers, it records this pseudo
806 * We record the max timestamp t found in the pass n.
807 * Assuming these timestamps are monotonic across cpus, we know that if
808 * a buffer still has events with timestamps below t, they will be all
809 * available and then read in the pass n + 1.
810 * Hence when we start to read the pass n + 2, we can safely flush every
811 * events with timestamps below t.
813 * ============ PASS n =================
816 * cnt1 timestamps | cnt2 timestamps
819 * - | 4 <--- max recorded
821 * ============ PASS n + 1 ==============
824 * cnt1 timestamps | cnt2 timestamps
827 * 5 | 7 <---- max recorded
829 * Flush every events below timestamp 4
831 * ============ PASS n + 2 ==============
834 * cnt1 timestamps | cnt2 timestamps
839 * Flush every events below timestamp 7
842 static int process_finished_round(struct perf_tool
*tool __maybe_unused
,
843 union perf_event
*event __maybe_unused
,
844 struct ordered_events
*oe
)
847 fprintf(stdout
, "\n");
848 return ordered_events__flush(oe
, OE_FLUSH__ROUND
);
851 int perf_session__queue_event(struct perf_session
*s
, union perf_event
*event
,
852 struct perf_sample
*sample
, u64 file_offset
)
854 return ordered_events__queue(&s
->ordered_events
, event
, sample
, file_offset
);
857 static void callchain__lbr_callstack_printf(struct perf_sample
*sample
)
859 struct ip_callchain
*callchain
= sample
->callchain
;
860 struct branch_stack
*lbr_stack
= sample
->branch_stack
;
861 u64 kernel_callchain_nr
= callchain
->nr
;
864 for (i
= 0; i
< kernel_callchain_nr
; i
++) {
865 if (callchain
->ips
[i
] == PERF_CONTEXT_USER
)
869 if ((i
!= kernel_callchain_nr
) && lbr_stack
->nr
) {
872 * LBR callstack can only get user call chain,
873 * i is kernel call chain number,
874 * 1 is PERF_CONTEXT_USER.
876 * The user call chain is stored in LBR registers.
877 * LBR are pair registers. The caller is stored
878 * in "from" register, while the callee is stored
880 * For example, there is a call stack
881 * "A"->"B"->"C"->"D".
882 * The LBR registers will recorde like
883 * "C"->"D", "B"->"C", "A"->"B".
884 * So only the first "to" register and all "from"
885 * registers are needed to construct the whole stack.
887 total_nr
= i
+ 1 + lbr_stack
->nr
+ 1;
888 kernel_callchain_nr
= i
+ 1;
890 printf("... LBR call chain: nr:%" PRIu64
"\n", total_nr
);
892 for (i
= 0; i
< kernel_callchain_nr
; i
++)
893 printf("..... %2d: %016" PRIx64
"\n",
894 i
, callchain
->ips
[i
]);
896 printf("..... %2d: %016" PRIx64
"\n",
897 (int)(kernel_callchain_nr
), lbr_stack
->entries
[0].to
);
898 for (i
= 0; i
< lbr_stack
->nr
; i
++)
899 printf("..... %2d: %016" PRIx64
"\n",
900 (int)(i
+ kernel_callchain_nr
+ 1), lbr_stack
->entries
[i
].from
);
904 static void callchain__printf(struct perf_evsel
*evsel
,
905 struct perf_sample
*sample
)
908 struct ip_callchain
*callchain
= sample
->callchain
;
910 if (perf_evsel__has_branch_callstack(evsel
))
911 callchain__lbr_callstack_printf(sample
);
913 printf("... FP chain: nr:%" PRIu64
"\n", callchain
->nr
);
915 for (i
= 0; i
< callchain
->nr
; i
++)
916 printf("..... %2d: %016" PRIx64
"\n",
917 i
, callchain
->ips
[i
]);
920 static void branch_stack__printf(struct perf_sample
*sample
)
924 printf("... branch stack: nr:%" PRIu64
"\n", sample
->branch_stack
->nr
);
926 for (i
= 0; i
< sample
->branch_stack
->nr
; i
++) {
927 struct branch_entry
*e
= &sample
->branch_stack
->entries
[i
];
929 printf("..... %2"PRIu64
": %016" PRIx64
" -> %016" PRIx64
" %hu cycles %s%s%s%s %x\n",
932 e
->flags
.mispred
? "M" : " ",
933 e
->flags
.predicted
? "P" : " ",
934 e
->flags
.abort
? "A" : " ",
935 e
->flags
.in_tx
? "T" : " ",
936 (unsigned)e
->flags
.reserved
);
940 static void regs_dump__printf(u64 mask
, u64
*regs
)
944 for_each_set_bit(rid
, (unsigned long *) &mask
, sizeof(mask
) * 8) {
947 printf(".... %-5s 0x%" PRIx64
"\n",
948 perf_reg_name(rid
), val
);
952 static const char *regs_abi
[] = {
953 [PERF_SAMPLE_REGS_ABI_NONE
] = "none",
954 [PERF_SAMPLE_REGS_ABI_32
] = "32-bit",
955 [PERF_SAMPLE_REGS_ABI_64
] = "64-bit",
958 static inline const char *regs_dump_abi(struct regs_dump
*d
)
960 if (d
->abi
> PERF_SAMPLE_REGS_ABI_64
)
963 return regs_abi
[d
->abi
];
966 static void regs__printf(const char *type
, struct regs_dump
*regs
)
968 u64 mask
= regs
->mask
;
970 printf("... %s regs: mask 0x%" PRIx64
" ABI %s\n",
973 regs_dump_abi(regs
));
975 regs_dump__printf(mask
, regs
->regs
);
978 static void regs_user__printf(struct perf_sample
*sample
)
980 struct regs_dump
*user_regs
= &sample
->user_regs
;
983 regs__printf("user", user_regs
);
986 static void regs_intr__printf(struct perf_sample
*sample
)
988 struct regs_dump
*intr_regs
= &sample
->intr_regs
;
991 regs__printf("intr", intr_regs
);
994 static void stack_user__printf(struct stack_dump
*dump
)
996 printf("... ustack: size %" PRIu64
", offset 0x%x\n",
997 dump
->size
, dump
->offset
);
1000 static void perf_evlist__print_tstamp(struct perf_evlist
*evlist
,
1001 union perf_event
*event
,
1002 struct perf_sample
*sample
)
1004 u64 sample_type
= __perf_evlist__combined_sample_type(evlist
);
1006 if (event
->header
.type
!= PERF_RECORD_SAMPLE
&&
1007 !perf_evlist__sample_id_all(evlist
)) {
1008 fputs("-1 -1 ", stdout
);
1012 if ((sample_type
& PERF_SAMPLE_CPU
))
1013 printf("%u ", sample
->cpu
);
1015 if (sample_type
& PERF_SAMPLE_TIME
)
1016 printf("%" PRIu64
" ", sample
->time
);
1019 static void sample_read__printf(struct perf_sample
*sample
, u64 read_format
)
1021 printf("... sample_read:\n");
1023 if (read_format
& PERF_FORMAT_TOTAL_TIME_ENABLED
)
1024 printf("...... time enabled %016" PRIx64
"\n",
1025 sample
->read
.time_enabled
);
1027 if (read_format
& PERF_FORMAT_TOTAL_TIME_RUNNING
)
1028 printf("...... time running %016" PRIx64
"\n",
1029 sample
->read
.time_running
);
1031 if (read_format
& PERF_FORMAT_GROUP
) {
1034 printf(".... group nr %" PRIu64
"\n", sample
->read
.group
.nr
);
1036 for (i
= 0; i
< sample
->read
.group
.nr
; i
++) {
1037 struct sample_read_value
*value
;
1039 value
= &sample
->read
.group
.values
[i
];
1040 printf("..... id %016" PRIx64
1041 ", value %016" PRIx64
"\n",
1042 value
->id
, value
->value
);
1045 printf("..... id %016" PRIx64
", value %016" PRIx64
"\n",
1046 sample
->read
.one
.id
, sample
->read
.one
.value
);
1049 static void dump_event(struct perf_evlist
*evlist
, union perf_event
*event
,
1050 u64 file_offset
, struct perf_sample
*sample
)
1055 printf("\n%#" PRIx64
" [%#x]: event: %d\n",
1056 file_offset
, event
->header
.size
, event
->header
.type
);
1061 perf_evlist__print_tstamp(evlist
, event
, sample
);
1063 printf("%#" PRIx64
" [%#x]: PERF_RECORD_%s", file_offset
,
1064 event
->header
.size
, perf_event__name(event
->header
.type
));
1067 static void dump_sample(struct perf_evsel
*evsel
, union perf_event
*event
,
1068 struct perf_sample
*sample
)
1075 printf("(IP, 0x%x): %d/%d: %#" PRIx64
" period: %" PRIu64
" addr: %#" PRIx64
"\n",
1076 event
->header
.misc
, sample
->pid
, sample
->tid
, sample
->ip
,
1077 sample
->period
, sample
->addr
);
1079 sample_type
= evsel
->attr
.sample_type
;
1081 if (sample_type
& PERF_SAMPLE_CALLCHAIN
)
1082 callchain__printf(evsel
, sample
);
1084 if ((sample_type
& PERF_SAMPLE_BRANCH_STACK
) && !perf_evsel__has_branch_callstack(evsel
))
1085 branch_stack__printf(sample
);
1087 if (sample_type
& PERF_SAMPLE_REGS_USER
)
1088 regs_user__printf(sample
);
1090 if (sample_type
& PERF_SAMPLE_REGS_INTR
)
1091 regs_intr__printf(sample
);
1093 if (sample_type
& PERF_SAMPLE_STACK_USER
)
1094 stack_user__printf(&sample
->user_stack
);
1096 if (sample_type
& PERF_SAMPLE_WEIGHT
)
1097 printf("... weight: %" PRIu64
"\n", sample
->weight
);
1099 if (sample_type
& PERF_SAMPLE_DATA_SRC
)
1100 printf(" . data_src: 0x%"PRIx64
"\n", sample
->data_src
);
1102 if (sample_type
& PERF_SAMPLE_TRANSACTION
)
1103 printf("... transaction: %" PRIx64
"\n", sample
->transaction
);
1105 if (sample_type
& PERF_SAMPLE_READ
)
1106 sample_read__printf(sample
, evsel
->attr
.read_format
);
1109 static struct machine
*machines__find_for_cpumode(struct machines
*machines
,
1110 union perf_event
*event
,
1111 struct perf_sample
*sample
)
1113 struct machine
*machine
;
1116 ((sample
->cpumode
== PERF_RECORD_MISC_GUEST_KERNEL
) ||
1117 (sample
->cpumode
== PERF_RECORD_MISC_GUEST_USER
))) {
1120 if (event
->header
.type
== PERF_RECORD_MMAP
1121 || event
->header
.type
== PERF_RECORD_MMAP2
)
1122 pid
= event
->mmap
.pid
;
1126 machine
= machines__find(machines
, pid
);
1128 machine
= machines__findnew(machines
, DEFAULT_GUEST_KERNEL_ID
);
1132 return &machines
->host
;
1135 static int deliver_sample_value(struct perf_evlist
*evlist
,
1136 struct perf_tool
*tool
,
1137 union perf_event
*event
,
1138 struct perf_sample
*sample
,
1139 struct sample_read_value
*v
,
1140 struct machine
*machine
)
1142 struct perf_sample_id
*sid
= perf_evlist__id2sid(evlist
, v
->id
);
1146 sample
->period
= v
->value
- sid
->period
;
1147 sid
->period
= v
->value
;
1150 if (!sid
|| sid
->evsel
== NULL
) {
1151 ++evlist
->stats
.nr_unknown_id
;
1155 return tool
->sample(tool
, event
, sample
, sid
->evsel
, machine
);
1158 static int deliver_sample_group(struct perf_evlist
*evlist
,
1159 struct perf_tool
*tool
,
1160 union perf_event
*event
,
1161 struct perf_sample
*sample
,
1162 struct machine
*machine
)
1167 for (i
= 0; i
< sample
->read
.group
.nr
; i
++) {
1168 ret
= deliver_sample_value(evlist
, tool
, event
, sample
,
1169 &sample
->read
.group
.values
[i
],
1179 perf_evlist__deliver_sample(struct perf_evlist
*evlist
,
1180 struct perf_tool
*tool
,
1181 union perf_event
*event
,
1182 struct perf_sample
*sample
,
1183 struct perf_evsel
*evsel
,
1184 struct machine
*machine
)
1186 /* We know evsel != NULL. */
1187 u64 sample_type
= evsel
->attr
.sample_type
;
1188 u64 read_format
= evsel
->attr
.read_format
;
1190 /* Standard sample delievery. */
1191 if (!(sample_type
& PERF_SAMPLE_READ
))
1192 return tool
->sample(tool
, event
, sample
, evsel
, machine
);
1194 /* For PERF_SAMPLE_READ we have either single or group mode. */
1195 if (read_format
& PERF_FORMAT_GROUP
)
1196 return deliver_sample_group(evlist
, tool
, event
, sample
,
1199 return deliver_sample_value(evlist
, tool
, event
, sample
,
1200 &sample
->read
.one
, machine
);
1203 static int machines__deliver_event(struct machines
*machines
,
1204 struct perf_evlist
*evlist
,
1205 union perf_event
*event
,
1206 struct perf_sample
*sample
,
1207 struct perf_tool
*tool
, u64 file_offset
)
1209 struct perf_evsel
*evsel
;
1210 struct machine
*machine
;
1212 dump_event(evlist
, event
, file_offset
, sample
);
1214 evsel
= perf_evlist__id2evsel(evlist
, sample
->id
);
1216 machine
= machines__find_for_cpumode(machines
, event
, sample
);
1218 switch (event
->header
.type
) {
1219 case PERF_RECORD_SAMPLE
:
1220 if (evsel
== NULL
) {
1221 ++evlist
->stats
.nr_unknown_id
;
1224 dump_sample(evsel
, event
, sample
);
1225 if (machine
== NULL
) {
1226 ++evlist
->stats
.nr_unprocessable_samples
;
1229 return perf_evlist__deliver_sample(evlist
, tool
, event
, sample
, evsel
, machine
);
1230 case PERF_RECORD_MMAP
:
1231 return tool
->mmap(tool
, event
, sample
, machine
);
1232 case PERF_RECORD_MMAP2
:
1233 if (event
->header
.misc
& PERF_RECORD_MISC_PROC_MAP_PARSE_TIMEOUT
)
1234 ++evlist
->stats
.nr_proc_map_timeout
;
1235 return tool
->mmap2(tool
, event
, sample
, machine
);
1236 case PERF_RECORD_COMM
:
1237 return tool
->comm(tool
, event
, sample
, machine
);
1238 case PERF_RECORD_FORK
:
1239 return tool
->fork(tool
, event
, sample
, machine
);
1240 case PERF_RECORD_EXIT
:
1241 return tool
->exit(tool
, event
, sample
, machine
);
1242 case PERF_RECORD_LOST
:
1243 if (tool
->lost
== perf_event__process_lost
)
1244 evlist
->stats
.total_lost
+= event
->lost
.lost
;
1245 return tool
->lost(tool
, event
, sample
, machine
);
1246 case PERF_RECORD_LOST_SAMPLES
:
1247 if (tool
->lost_samples
== perf_event__process_lost_samples
)
1248 evlist
->stats
.total_lost_samples
+= event
->lost_samples
.lost
;
1249 return tool
->lost_samples(tool
, event
, sample
, machine
);
1250 case PERF_RECORD_READ
:
1251 return tool
->read(tool
, event
, sample
, evsel
, machine
);
1252 case PERF_RECORD_THROTTLE
:
1253 return tool
->throttle(tool
, event
, sample
, machine
);
1254 case PERF_RECORD_UNTHROTTLE
:
1255 return tool
->unthrottle(tool
, event
, sample
, machine
);
1256 case PERF_RECORD_AUX
:
1257 if (tool
->aux
== perf_event__process_aux
&&
1258 (event
->aux
.flags
& PERF_AUX_FLAG_TRUNCATED
))
1259 evlist
->stats
.total_aux_lost
+= 1;
1260 return tool
->aux(tool
, event
, sample
, machine
);
1261 case PERF_RECORD_ITRACE_START
:
1262 return tool
->itrace_start(tool
, event
, sample
, machine
);
1263 case PERF_RECORD_SWITCH
:
1264 case PERF_RECORD_SWITCH_CPU_WIDE
:
1265 return tool
->context_switch(tool
, event
, sample
, machine
);
1267 ++evlist
->stats
.nr_unknown_events
;
1272 static int perf_session__deliver_event(struct perf_session
*session
,
1273 union perf_event
*event
,
1274 struct perf_sample
*sample
,
1275 struct perf_tool
*tool
,
1280 ret
= auxtrace__process_event(session
, event
, sample
, tool
);
1286 return machines__deliver_event(&session
->machines
, session
->evlist
,
1287 event
, sample
, tool
, file_offset
);
1290 static s64
perf_session__process_user_event(struct perf_session
*session
,
1291 union perf_event
*event
,
1294 struct ordered_events
*oe
= &session
->ordered_events
;
1295 struct perf_tool
*tool
= session
->tool
;
1296 int fd
= perf_data_file__fd(session
->file
);
1299 dump_event(session
->evlist
, event
, file_offset
, NULL
);
1301 /* These events are processed right away */
1302 switch (event
->header
.type
) {
1303 case PERF_RECORD_HEADER_ATTR
:
1304 err
= tool
->attr(tool
, event
, &session
->evlist
);
1306 perf_session__set_id_hdr_size(session
);
1307 perf_session__set_comm_exec(session
);
1310 case PERF_RECORD_EVENT_UPDATE
:
1311 return tool
->event_update(tool
, event
, &session
->evlist
);
1312 case PERF_RECORD_HEADER_EVENT_TYPE
:
1314 * Depreceated, but we need to handle it for sake
1315 * of old data files create in pipe mode.
1318 case PERF_RECORD_HEADER_TRACING_DATA
:
1319 /* setup for reading amidst mmap */
1320 lseek(fd
, file_offset
, SEEK_SET
);
1321 return tool
->tracing_data(tool
, event
, session
);
1322 case PERF_RECORD_HEADER_BUILD_ID
:
1323 return tool
->build_id(tool
, event
, session
);
1324 case PERF_RECORD_FINISHED_ROUND
:
1325 return tool
->finished_round(tool
, event
, oe
);
1326 case PERF_RECORD_ID_INDEX
:
1327 return tool
->id_index(tool
, event
, session
);
1328 case PERF_RECORD_AUXTRACE_INFO
:
1329 return tool
->auxtrace_info(tool
, event
, session
);
1330 case PERF_RECORD_AUXTRACE
:
1331 /* setup for reading amidst mmap */
1332 lseek(fd
, file_offset
+ event
->header
.size
, SEEK_SET
);
1333 return tool
->auxtrace(tool
, event
, session
);
1334 case PERF_RECORD_AUXTRACE_ERROR
:
1335 perf_session__auxtrace_error_inc(session
, event
);
1336 return tool
->auxtrace_error(tool
, event
, session
);
1337 case PERF_RECORD_THREAD_MAP
:
1338 return tool
->thread_map(tool
, event
, session
);
1339 case PERF_RECORD_CPU_MAP
:
1340 return tool
->cpu_map(tool
, event
, session
);
1341 case PERF_RECORD_STAT_CONFIG
:
1342 return tool
->stat_config(tool
, event
, session
);
1343 case PERF_RECORD_STAT
:
1344 return tool
->stat(tool
, event
, session
);
1345 case PERF_RECORD_STAT_ROUND
:
1346 return tool
->stat_round(tool
, event
, session
);
1347 case PERF_RECORD_TIME_CONV
:
1348 session
->time_conv
= event
->time_conv
;
1349 return tool
->time_conv(tool
, event
, session
);
1355 int perf_session__deliver_synth_event(struct perf_session
*session
,
1356 union perf_event
*event
,
1357 struct perf_sample
*sample
)
1359 struct perf_evlist
*evlist
= session
->evlist
;
1360 struct perf_tool
*tool
= session
->tool
;
1362 events_stats__inc(&evlist
->stats
, event
->header
.type
);
1364 if (event
->header
.type
>= PERF_RECORD_USER_TYPE_START
)
1365 return perf_session__process_user_event(session
, event
, 0);
1367 return machines__deliver_event(&session
->machines
, evlist
, event
, sample
, tool
, 0);
1370 static void event_swap(union perf_event
*event
, bool sample_id_all
)
1372 perf_event__swap_op swap
;
1374 swap
= perf_event__swap_ops
[event
->header
.type
];
1376 swap(event
, sample_id_all
);
1379 int perf_session__peek_event(struct perf_session
*session
, off_t file_offset
,
1380 void *buf
, size_t buf_sz
,
1381 union perf_event
**event_ptr
,
1382 struct perf_sample
*sample
)
1384 union perf_event
*event
;
1385 size_t hdr_sz
, rest
;
1388 if (session
->one_mmap
&& !session
->header
.needs_swap
) {
1389 event
= file_offset
- session
->one_mmap_offset
+
1390 session
->one_mmap_addr
;
1391 goto out_parse_sample
;
1394 if (perf_data_file__is_pipe(session
->file
))
1397 fd
= perf_data_file__fd(session
->file
);
1398 hdr_sz
= sizeof(struct perf_event_header
);
1400 if (buf_sz
< hdr_sz
)
1403 if (lseek(fd
, file_offset
, SEEK_SET
) == (off_t
)-1 ||
1404 readn(fd
, buf
, hdr_sz
) != (ssize_t
)hdr_sz
)
1407 event
= (union perf_event
*)buf
;
1409 if (session
->header
.needs_swap
)
1410 perf_event_header__bswap(&event
->header
);
1412 if (event
->header
.size
< hdr_sz
|| event
->header
.size
> buf_sz
)
1415 rest
= event
->header
.size
- hdr_sz
;
1417 if (readn(fd
, buf
, rest
) != (ssize_t
)rest
)
1420 if (session
->header
.needs_swap
)
1421 event_swap(event
, perf_evlist__sample_id_all(session
->evlist
));
1425 if (sample
&& event
->header
.type
< PERF_RECORD_USER_TYPE_START
&&
1426 perf_evlist__parse_sample(session
->evlist
, event
, sample
))
1434 static s64
perf_session__process_event(struct perf_session
*session
,
1435 union perf_event
*event
, u64 file_offset
)
1437 struct perf_evlist
*evlist
= session
->evlist
;
1438 struct perf_tool
*tool
= session
->tool
;
1439 struct perf_sample sample
;
1442 if (session
->header
.needs_swap
)
1443 event_swap(event
, perf_evlist__sample_id_all(evlist
));
1445 if (event
->header
.type
>= PERF_RECORD_HEADER_MAX
)
1448 events_stats__inc(&evlist
->stats
, event
->header
.type
);
1450 if (event
->header
.type
>= PERF_RECORD_USER_TYPE_START
)
1451 return perf_session__process_user_event(session
, event
, file_offset
);
1454 * For all kernel events we get the sample data
1456 ret
= perf_evlist__parse_sample(evlist
, event
, &sample
);
1460 if (tool
->ordered_events
) {
1461 ret
= perf_session__queue_event(session
, event
, &sample
, file_offset
);
1466 return perf_session__deliver_event(session
, event
, &sample
, tool
,
1470 void perf_event_header__bswap(struct perf_event_header
*hdr
)
1472 hdr
->type
= bswap_32(hdr
->type
);
1473 hdr
->misc
= bswap_16(hdr
->misc
);
1474 hdr
->size
= bswap_16(hdr
->size
);
1477 struct thread
*perf_session__findnew(struct perf_session
*session
, pid_t pid
)
1479 return machine__findnew_thread(&session
->machines
.host
, -1, pid
);
1482 int perf_session__register_idle_thread(struct perf_session
*session
)
1484 struct thread
*thread
;
1487 thread
= machine__findnew_thread(&session
->machines
.host
, 0, 0);
1488 if (thread
== NULL
|| thread__set_comm(thread
, "swapper", 0)) {
1489 pr_err("problem inserting idle task.\n");
1493 /* machine__findnew_thread() got the thread, so put it */
1494 thread__put(thread
);
1498 static void perf_session__warn_about_errors(const struct perf_session
*session
)
1500 const struct events_stats
*stats
= &session
->evlist
->stats
;
1501 const struct ordered_events
*oe
= &session
->ordered_events
;
1503 if (session
->tool
->lost
== perf_event__process_lost
&&
1504 stats
->nr_events
[PERF_RECORD_LOST
] != 0) {
1505 ui__warning("Processed %d events and lost %d chunks!\n\n"
1506 "Check IO/CPU overload!\n\n",
1507 stats
->nr_events
[0],
1508 stats
->nr_events
[PERF_RECORD_LOST
]);
1511 if (session
->tool
->lost_samples
== perf_event__process_lost_samples
) {
1514 drop_rate
= (double)stats
->total_lost_samples
/
1515 (double) (stats
->nr_events
[PERF_RECORD_SAMPLE
] + stats
->total_lost_samples
);
1516 if (drop_rate
> 0.05) {
1517 ui__warning("Processed %" PRIu64
" samples and lost %3.2f%% samples!\n\n",
1518 stats
->nr_events
[PERF_RECORD_SAMPLE
] + stats
->total_lost_samples
,
1523 if (session
->tool
->aux
== perf_event__process_aux
&&
1524 stats
->total_aux_lost
!= 0) {
1525 ui__warning("AUX data lost %" PRIu64
" times out of %u!\n\n",
1526 stats
->total_aux_lost
,
1527 stats
->nr_events
[PERF_RECORD_AUX
]);
1530 if (stats
->nr_unknown_events
!= 0) {
1531 ui__warning("Found %u unknown events!\n\n"
1532 "Is this an older tool processing a perf.data "
1533 "file generated by a more recent tool?\n\n"
1534 "If that is not the case, consider "
1535 "reporting to linux-kernel@vger.kernel.org.\n\n",
1536 stats
->nr_unknown_events
);
1539 if (stats
->nr_unknown_id
!= 0) {
1540 ui__warning("%u samples with id not present in the header\n",
1541 stats
->nr_unknown_id
);
1544 if (stats
->nr_invalid_chains
!= 0) {
1545 ui__warning("Found invalid callchains!\n\n"
1546 "%u out of %u events were discarded for this reason.\n\n"
1547 "Consider reporting to linux-kernel@vger.kernel.org.\n\n",
1548 stats
->nr_invalid_chains
,
1549 stats
->nr_events
[PERF_RECORD_SAMPLE
]);
1552 if (stats
->nr_unprocessable_samples
!= 0) {
1553 ui__warning("%u unprocessable samples recorded.\n"
1554 "Do you have a KVM guest running and not using 'perf kvm'?\n",
1555 stats
->nr_unprocessable_samples
);
1558 if (oe
->nr_unordered_events
!= 0)
1559 ui__warning("%u out of order events recorded.\n", oe
->nr_unordered_events
);
1561 events_stats__auxtrace_error_warn(stats
);
1563 if (stats
->nr_proc_map_timeout
!= 0) {
1564 ui__warning("%d map information files for pre-existing threads were\n"
1565 "not processed, if there are samples for addresses they\n"
1566 "will not be resolved, you may find out which are these\n"
1567 "threads by running with -v and redirecting the output\n"
1569 "The time limit to process proc map is too short?\n"
1570 "Increase it by --proc-map-timeout\n",
1571 stats
->nr_proc_map_timeout
);
1575 static int perf_session__flush_thread_stack(struct thread
*thread
,
1576 void *p __maybe_unused
)
1578 return thread_stack__flush(thread
);
1581 static int perf_session__flush_thread_stacks(struct perf_session
*session
)
1583 return machines__for_each_thread(&session
->machines
,
1584 perf_session__flush_thread_stack
,
1588 volatile int session_done
;
1590 static int __perf_session__process_pipe_events(struct perf_session
*session
)
1592 struct ordered_events
*oe
= &session
->ordered_events
;
1593 struct perf_tool
*tool
= session
->tool
;
1594 int fd
= perf_data_file__fd(session
->file
);
1595 union perf_event
*event
;
1596 uint32_t size
, cur_size
= 0;
1603 perf_tool__fill_defaults(tool
);
1606 cur_size
= sizeof(union perf_event
);
1608 buf
= malloc(cur_size
);
1613 err
= readn(fd
, event
, sizeof(struct perf_event_header
));
1618 pr_err("failed to read event header\n");
1622 if (session
->header
.needs_swap
)
1623 perf_event_header__bswap(&event
->header
);
1625 size
= event
->header
.size
;
1626 if (size
< sizeof(struct perf_event_header
)) {
1627 pr_err("bad event header size\n");
1631 if (size
> cur_size
) {
1632 void *new = realloc(buf
, size
);
1634 pr_err("failed to allocate memory to read event\n");
1642 p
+= sizeof(struct perf_event_header
);
1644 if (size
- sizeof(struct perf_event_header
)) {
1645 err
= readn(fd
, p
, size
- sizeof(struct perf_event_header
));
1648 pr_err("unexpected end of event stream\n");
1652 pr_err("failed to read event data\n");
1657 if ((skip
= perf_session__process_event(session
, event
, head
)) < 0) {
1658 pr_err("%#" PRIx64
" [%#x]: failed to process type: %d\n",
1659 head
, event
->header
.size
, event
->header
.type
);
1669 if (!session_done())
1672 /* do the final flush for ordered samples */
1673 err
= ordered_events__flush(oe
, OE_FLUSH__FINAL
);
1676 err
= auxtrace__flush_events(session
, tool
);
1679 err
= perf_session__flush_thread_stacks(session
);
1682 perf_session__warn_about_errors(session
);
1683 ordered_events__free(&session
->ordered_events
);
1684 auxtrace__free_events(session
);
1688 static union perf_event
*
1689 fetch_mmaped_event(struct perf_session
*session
,
1690 u64 head
, size_t mmap_size
, char *buf
)
1692 union perf_event
*event
;
1695 * Ensure we have enough space remaining to read
1696 * the size of the event in the headers.
1698 if (head
+ sizeof(event
->header
) > mmap_size
)
1701 event
= (union perf_event
*)(buf
+ head
);
1703 if (session
->header
.needs_swap
)
1704 perf_event_header__bswap(&event
->header
);
1706 if (head
+ event
->header
.size
> mmap_size
) {
1707 /* We're not fetching the event so swap back again */
1708 if (session
->header
.needs_swap
)
1709 perf_event_header__bswap(&event
->header
);
1717 * On 64bit we can mmap the data file in one go. No need for tiny mmap
1718 * slices. On 32bit we use 32MB.
1720 #if BITS_PER_LONG == 64
1721 #define MMAP_SIZE ULLONG_MAX
1724 #define MMAP_SIZE (32 * 1024 * 1024ULL)
1725 #define NUM_MMAPS 128
1728 static int __perf_session__process_events(struct perf_session
*session
,
1729 u64 data_offset
, u64 data_size
,
1732 struct ordered_events
*oe
= &session
->ordered_events
;
1733 struct perf_tool
*tool
= session
->tool
;
1734 int fd
= perf_data_file__fd(session
->file
);
1735 u64 head
, page_offset
, file_offset
, file_pos
, size
;
1736 int err
, mmap_prot
, mmap_flags
, map_idx
= 0;
1738 char *buf
, *mmaps
[NUM_MMAPS
];
1739 union perf_event
*event
;
1740 struct ui_progress prog
;
1743 perf_tool__fill_defaults(tool
);
1745 page_offset
= page_size
* (data_offset
/ page_size
);
1746 file_offset
= page_offset
;
1747 head
= data_offset
- page_offset
;
1752 if (data_offset
+ data_size
< file_size
)
1753 file_size
= data_offset
+ data_size
;
1755 ui_progress__init(&prog
, file_size
, "Processing events...");
1757 mmap_size
= MMAP_SIZE
;
1758 if (mmap_size
> file_size
) {
1759 mmap_size
= file_size
;
1760 session
->one_mmap
= true;
1763 memset(mmaps
, 0, sizeof(mmaps
));
1765 mmap_prot
= PROT_READ
;
1766 mmap_flags
= MAP_SHARED
;
1768 if (session
->header
.needs_swap
) {
1769 mmap_prot
|= PROT_WRITE
;
1770 mmap_flags
= MAP_PRIVATE
;
1773 buf
= mmap(NULL
, mmap_size
, mmap_prot
, mmap_flags
, fd
,
1775 if (buf
== MAP_FAILED
) {
1776 pr_err("failed to mmap file\n");
1780 mmaps
[map_idx
] = buf
;
1781 map_idx
= (map_idx
+ 1) & (ARRAY_SIZE(mmaps
) - 1);
1782 file_pos
= file_offset
+ head
;
1783 if (session
->one_mmap
) {
1784 session
->one_mmap_addr
= buf
;
1785 session
->one_mmap_offset
= file_offset
;
1789 event
= fetch_mmaped_event(session
, head
, mmap_size
, buf
);
1791 if (mmaps
[map_idx
]) {
1792 munmap(mmaps
[map_idx
], mmap_size
);
1793 mmaps
[map_idx
] = NULL
;
1796 page_offset
= page_size
* (head
/ page_size
);
1797 file_offset
+= page_offset
;
1798 head
-= page_offset
;
1802 size
= event
->header
.size
;
1804 if (size
< sizeof(struct perf_event_header
) ||
1805 (skip
= perf_session__process_event(session
, event
, file_pos
)) < 0) {
1806 pr_err("%#" PRIx64
" [%#x]: failed to process type: %d\n",
1807 file_offset
+ head
, event
->header
.size
,
1808 event
->header
.type
);
1819 ui_progress__update(&prog
, size
);
1824 if (file_pos
< file_size
)
1828 /* do the final flush for ordered samples */
1829 err
= ordered_events__flush(oe
, OE_FLUSH__FINAL
);
1832 err
= auxtrace__flush_events(session
, tool
);
1835 err
= perf_session__flush_thread_stacks(session
);
1837 ui_progress__finish();
1838 perf_session__warn_about_errors(session
);
1840 * We may switching perf.data output, make ordered_events
1843 ordered_events__reinit(&session
->ordered_events
);
1844 auxtrace__free_events(session
);
1845 session
->one_mmap
= false;
1849 int perf_session__process_events(struct perf_session
*session
)
1851 u64 size
= perf_data_file__size(session
->file
);
1854 if (perf_session__register_idle_thread(session
) < 0)
1857 if (!perf_data_file__is_pipe(session
->file
))
1858 err
= __perf_session__process_events(session
,
1859 session
->header
.data_offset
,
1860 session
->header
.data_size
, size
);
1862 err
= __perf_session__process_pipe_events(session
);
1867 bool perf_session__has_traces(struct perf_session
*session
, const char *msg
)
1869 struct perf_evsel
*evsel
;
1871 evlist__for_each(session
->evlist
, evsel
) {
1872 if (evsel
->attr
.type
== PERF_TYPE_TRACEPOINT
)
1876 pr_err("No trace sample to read. Did you call 'perf %s'?\n", msg
);
1880 int maps__set_kallsyms_ref_reloc_sym(struct map
**maps
,
1881 const char *symbol_name
, u64 addr
)
1885 struct ref_reloc_sym
*ref
;
1887 ref
= zalloc(sizeof(struct ref_reloc_sym
));
1891 ref
->name
= strdup(symbol_name
);
1892 if (ref
->name
== NULL
) {
1897 bracket
= strchr(ref
->name
, ']');
1903 for (i
= 0; i
< MAP__NR_TYPES
; ++i
) {
1904 struct kmap
*kmap
= map__kmap(maps
[i
]);
1908 kmap
->ref_reloc_sym
= ref
;
1914 size_t perf_session__fprintf_dsos(struct perf_session
*session
, FILE *fp
)
1916 return machines__fprintf_dsos(&session
->machines
, fp
);
1919 size_t perf_session__fprintf_dsos_buildid(struct perf_session
*session
, FILE *fp
,
1920 bool (skip
)(struct dso
*dso
, int parm
), int parm
)
1922 return machines__fprintf_dsos_buildid(&session
->machines
, fp
, skip
, parm
);
1925 size_t perf_session__fprintf_nr_events(struct perf_session
*session
, FILE *fp
)
1928 const char *msg
= "";
1930 if (perf_header__has_feat(&session
->header
, HEADER_AUXTRACE
))
1931 msg
= " (excludes AUX area (e.g. instruction trace) decoded / synthesized events)";
1933 ret
= fprintf(fp
, "\nAggregated stats:%s\n", msg
);
1935 ret
+= events_stats__fprintf(&session
->evlist
->stats
, fp
);
1939 size_t perf_session__fprintf(struct perf_session
*session
, FILE *fp
)
1942 * FIXME: Here we have to actually print all the machines in this
1943 * session, not just the host...
1945 return machine__fprintf(&session
->machines
.host
, fp
);
1948 struct perf_evsel
*perf_session__find_first_evtype(struct perf_session
*session
,
1951 struct perf_evsel
*pos
;
1953 evlist__for_each(session
->evlist
, pos
) {
1954 if (pos
->attr
.type
== type
)
1960 int perf_session__cpu_bitmap(struct perf_session
*session
,
1961 const char *cpu_list
, unsigned long *cpu_bitmap
)
1964 struct cpu_map
*map
;
1966 for (i
= 0; i
< PERF_TYPE_MAX
; ++i
) {
1967 struct perf_evsel
*evsel
;
1969 evsel
= perf_session__find_first_evtype(session
, i
);
1973 if (!(evsel
->attr
.sample_type
& PERF_SAMPLE_CPU
)) {
1974 pr_err("File does not contain CPU events. "
1975 "Remove -c option to proceed.\n");
1980 map
= cpu_map__new(cpu_list
);
1982 pr_err("Invalid cpu_list\n");
1986 for (i
= 0; i
< map
->nr
; i
++) {
1987 int cpu
= map
->map
[i
];
1989 if (cpu
>= MAX_NR_CPUS
) {
1990 pr_err("Requested CPU %d too large. "
1991 "Consider raising MAX_NR_CPUS\n", cpu
);
1992 goto out_delete_map
;
1995 set_bit(cpu
, cpu_bitmap
);
2005 void perf_session__fprintf_info(struct perf_session
*session
, FILE *fp
,
2011 if (session
== NULL
|| fp
== NULL
)
2014 fd
= perf_data_file__fd(session
->file
);
2016 ret
= fstat(fd
, &st
);
2020 fprintf(fp
, "# ========\n");
2021 fprintf(fp
, "# captured on: %s", ctime(&st
.st_ctime
));
2022 perf_header__fprintf_info(session
, fp
, full
);
2023 fprintf(fp
, "# ========\n#\n");
2027 int __perf_session__set_tracepoints_handlers(struct perf_session
*session
,
2028 const struct perf_evsel_str_handler
*assocs
,
2031 struct perf_evsel
*evsel
;
2035 for (i
= 0; i
< nr_assocs
; i
++) {
2037 * Adding a handler for an event not in the session,
2040 evsel
= perf_evlist__find_tracepoint_by_name(session
->evlist
, assocs
[i
].name
);
2045 if (evsel
->handler
!= NULL
)
2047 evsel
->handler
= assocs
[i
].handler
;
2055 int perf_event__process_id_index(struct perf_tool
*tool __maybe_unused
,
2056 union perf_event
*event
,
2057 struct perf_session
*session
)
2059 struct perf_evlist
*evlist
= session
->evlist
;
2060 struct id_index_event
*ie
= &event
->id_index
;
2061 size_t i
, nr
, max_nr
;
2063 max_nr
= (ie
->header
.size
- sizeof(struct id_index_event
)) /
2064 sizeof(struct id_index_entry
);
2070 fprintf(stdout
, " nr: %zu\n", nr
);
2072 for (i
= 0; i
< nr
; i
++) {
2073 struct id_index_entry
*e
= &ie
->entries
[i
];
2074 struct perf_sample_id
*sid
;
2077 fprintf(stdout
, " ... id: %"PRIu64
, e
->id
);
2078 fprintf(stdout
, " idx: %"PRIu64
, e
->idx
);
2079 fprintf(stdout
, " cpu: %"PRId64
, e
->cpu
);
2080 fprintf(stdout
, " tid: %"PRId64
"\n", e
->tid
);
2083 sid
= perf_evlist__id2sid(evlist
, e
->id
);
2093 int perf_event__synthesize_id_index(struct perf_tool
*tool
,
2094 perf_event__handler_t process
,
2095 struct perf_evlist
*evlist
,
2096 struct machine
*machine
)
2098 union perf_event
*ev
;
2099 struct perf_evsel
*evsel
;
2100 size_t nr
= 0, i
= 0, sz
, max_nr
, n
;
2103 pr_debug2("Synthesizing id index\n");
2105 max_nr
= (UINT16_MAX
- sizeof(struct id_index_event
)) /
2106 sizeof(struct id_index_entry
);
2108 evlist__for_each(evlist
, evsel
)
2111 n
= nr
> max_nr
? max_nr
: nr
;
2112 sz
= sizeof(struct id_index_event
) + n
* sizeof(struct id_index_entry
);
2117 ev
->id_index
.header
.type
= PERF_RECORD_ID_INDEX
;
2118 ev
->id_index
.header
.size
= sz
;
2119 ev
->id_index
.nr
= n
;
2121 evlist__for_each(evlist
, evsel
) {
2124 for (j
= 0; j
< evsel
->ids
; j
++) {
2125 struct id_index_entry
*e
;
2126 struct perf_sample_id
*sid
;
2129 err
= process(tool
, ev
, NULL
, machine
);
2136 e
= &ev
->id_index
.entries
[i
++];
2138 e
->id
= evsel
->id
[j
];
2140 sid
= perf_evlist__id2sid(evlist
, e
->id
);
2152 sz
= sizeof(struct id_index_event
) + nr
* sizeof(struct id_index_entry
);
2153 ev
->id_index
.header
.size
= sz
;
2154 ev
->id_index
.nr
= nr
;
2156 err
= process(tool
, ev
, NULL
, machine
);