1 #define _FILE_OFFSET_BITS 64
3 #include <linux/kernel.h>
14 static int perf_session__open(struct perf_session
*self
, bool force
)
16 struct stat input_stat
;
18 if (!strcmp(self
->filename
, "-")) {
20 self
->fd
= STDIN_FILENO
;
22 if (perf_header__read(self
, self
->fd
) < 0)
23 pr_err("incompatible file format");
28 self
->fd
= open(self
->filename
, O_RDONLY
);
32 pr_err("failed to open %s: %s", self
->filename
, strerror(err
));
33 if (err
== ENOENT
&& !strcmp(self
->filename
, "perf.data"))
34 pr_err(" (try 'perf record' first)");
39 if (fstat(self
->fd
, &input_stat
) < 0)
42 if (!force
&& input_stat
.st_uid
&& (input_stat
.st_uid
!= geteuid())) {
43 pr_err("file %s not owned by current user or root\n",
48 if (!input_stat
.st_size
) {
49 pr_info("zero-sized file (%s), nothing to do!\n",
54 if (perf_header__read(self
, self
->fd
) < 0) {
55 pr_err("incompatible file format");
59 self
->size
= input_stat
.st_size
;
68 static void perf_session__id_header_size(struct perf_session
*session
)
70 struct perf_sample
*data
;
71 u64 sample_type
= session
->sample_type
;
74 if (!session
->sample_id_all
)
77 if (sample_type
& PERF_SAMPLE_TID
)
78 size
+= sizeof(data
->tid
) * 2;
80 if (sample_type
& PERF_SAMPLE_TIME
)
81 size
+= sizeof(data
->time
);
83 if (sample_type
& PERF_SAMPLE_ID
)
84 size
+= sizeof(data
->id
);
86 if (sample_type
& PERF_SAMPLE_STREAM_ID
)
87 size
+= sizeof(data
->stream_id
);
89 if (sample_type
& PERF_SAMPLE_CPU
)
90 size
+= sizeof(data
->cpu
) * 2;
92 session
->id_hdr_size
= size
;
95 void perf_session__set_sample_id_all(struct perf_session
*session
, bool value
)
97 session
->sample_id_all
= value
;
98 perf_session__id_header_size(session
);
101 void perf_session__set_sample_type(struct perf_session
*session
, u64 type
)
103 session
->sample_type
= type
;
106 void perf_session__update_sample_type(struct perf_session
*self
)
108 self
->sample_type
= perf_header__sample_type(&self
->header
);
109 self
->sample_id_all
= perf_header__sample_id_all(&self
->header
);
110 perf_session__id_header_size(self
);
113 int perf_session__create_kernel_maps(struct perf_session
*self
)
115 int ret
= machine__create_kernel_maps(&self
->host_machine
);
118 ret
= machines__create_guest_kernel_maps(&self
->machines
);
122 static void perf_session__destroy_kernel_maps(struct perf_session
*self
)
124 machine__destroy_kernel_maps(&self
->host_machine
);
125 machines__destroy_guest_kernel_maps(&self
->machines
);
128 struct perf_session
*perf_session__new(const char *filename
, int mode
,
129 bool force
, bool repipe
,
130 struct perf_event_ops
*ops
)
132 size_t len
= filename
? strlen(filename
) + 1 : 0;
133 struct perf_session
*self
= zalloc(sizeof(*self
) + len
);
138 if (perf_header__init(&self
->header
) < 0)
141 memcpy(self
->filename
, filename
, len
);
142 self
->threads
= RB_ROOT
;
143 INIT_LIST_HEAD(&self
->dead_threads
);
144 self
->hists_tree
= RB_ROOT
;
145 self
->last_match
= NULL
;
147 * On 64bit we can mmap the data file in one go. No need for tiny mmap
148 * slices. On 32bit we use 32MB.
150 #if BITS_PER_LONG == 64
151 self
->mmap_window
= ULLONG_MAX
;
153 self
->mmap_window
= 32 * 1024 * 1024ULL;
155 self
->machines
= RB_ROOT
;
156 self
->repipe
= repipe
;
157 INIT_LIST_HEAD(&self
->ordered_samples
.samples
);
158 INIT_LIST_HEAD(&self
->ordered_samples
.sample_cache
);
159 INIT_LIST_HEAD(&self
->ordered_samples
.to_free
);
160 machine__init(&self
->host_machine
, "", HOST_KERNEL_ID
);
162 if (mode
== O_RDONLY
) {
163 if (perf_session__open(self
, force
) < 0)
165 } else if (mode
== O_WRONLY
) {
167 * In O_RDONLY mode this will be performed when reading the
168 * kernel MMAP event, in perf_event__process_mmap().
170 if (perf_session__create_kernel_maps(self
) < 0)
174 perf_session__update_sample_type(self
);
176 if (ops
&& ops
->ordering_requires_timestamps
&&
177 ops
->ordered_samples
&& !self
->sample_id_all
) {
178 dump_printf("WARNING: No sample_id_all support, falling back to unordered processing\n");
179 ops
->ordered_samples
= false;
188 perf_session__delete(self
);
192 static void perf_session__delete_dead_threads(struct perf_session
*self
)
194 struct thread
*n
, *t
;
196 list_for_each_entry_safe(t
, n
, &self
->dead_threads
, node
) {
202 static void perf_session__delete_threads(struct perf_session
*self
)
204 struct rb_node
*nd
= rb_first(&self
->threads
);
207 struct thread
*t
= rb_entry(nd
, struct thread
, rb_node
);
209 rb_erase(&t
->rb_node
, &self
->threads
);
215 void perf_session__delete(struct perf_session
*self
)
217 perf_header__exit(&self
->header
);
218 perf_session__destroy_kernel_maps(self
);
219 perf_session__delete_dead_threads(self
);
220 perf_session__delete_threads(self
);
221 machine__exit(&self
->host_machine
);
226 void perf_session__remove_thread(struct perf_session
*self
, struct thread
*th
)
228 self
->last_match
= NULL
;
229 rb_erase(&th
->rb_node
, &self
->threads
);
231 * We may have references to this thread, for instance in some hist_entry
232 * instances, so just move them to a separate list.
234 list_add_tail(&th
->node
, &self
->dead_threads
);
237 static bool symbol__match_parent_regex(struct symbol
*sym
)
239 if (sym
->name
&& !regexec(&parent_regex
, sym
->name
, 0, NULL
, 0))
245 int perf_session__resolve_callchain(struct perf_session
*self
,
246 struct thread
*thread
,
247 struct ip_callchain
*chain
,
248 struct symbol
**parent
)
250 u8 cpumode
= PERF_RECORD_MISC_USER
;
254 callchain_cursor_reset(&self
->callchain_cursor
);
256 for (i
= 0; i
< chain
->nr
; i
++) {
257 u64 ip
= chain
->ips
[i
];
258 struct addr_location al
;
260 if (ip
>= PERF_CONTEXT_MAX
) {
262 case PERF_CONTEXT_HV
:
263 cpumode
= PERF_RECORD_MISC_HYPERVISOR
; break;
264 case PERF_CONTEXT_KERNEL
:
265 cpumode
= PERF_RECORD_MISC_KERNEL
; break;
266 case PERF_CONTEXT_USER
:
267 cpumode
= PERF_RECORD_MISC_USER
; break;
275 thread__find_addr_location(thread
, self
, cpumode
,
276 MAP__FUNCTION
, thread
->pid
, ip
, &al
, NULL
);
277 if (al
.sym
!= NULL
) {
278 if (sort__has_parent
&& !*parent
&&
279 symbol__match_parent_regex(al
.sym
))
281 if (!symbol_conf
.use_callchain
)
285 err
= callchain_cursor_append(&self
->callchain_cursor
,
294 static int process_event_synth_stub(union perf_event
*event __used
,
295 struct perf_session
*session __used
)
297 dump_printf(": unhandled!\n");
301 static int process_event_stub(union perf_event
*event __used
,
302 struct perf_sample
*sample __used
,
303 struct perf_session
*session __used
)
305 dump_printf(": unhandled!\n");
309 static int process_finished_round_stub(union perf_event
*event __used
,
310 struct perf_session
*session __used
,
311 struct perf_event_ops
*ops __used
)
313 dump_printf(": unhandled!\n");
317 static int process_finished_round(union perf_event
*event
,
318 struct perf_session
*session
,
319 struct perf_event_ops
*ops
);
321 static void perf_event_ops__fill_defaults(struct perf_event_ops
*handler
)
323 if (handler
->sample
== NULL
)
324 handler
->sample
= process_event_stub
;
325 if (handler
->mmap
== NULL
)
326 handler
->mmap
= process_event_stub
;
327 if (handler
->comm
== NULL
)
328 handler
->comm
= process_event_stub
;
329 if (handler
->fork
== NULL
)
330 handler
->fork
= process_event_stub
;
331 if (handler
->exit
== NULL
)
332 handler
->exit
= process_event_stub
;
333 if (handler
->lost
== NULL
)
334 handler
->lost
= perf_event__process_lost
;
335 if (handler
->read
== NULL
)
336 handler
->read
= process_event_stub
;
337 if (handler
->throttle
== NULL
)
338 handler
->throttle
= process_event_stub
;
339 if (handler
->unthrottle
== NULL
)
340 handler
->unthrottle
= process_event_stub
;
341 if (handler
->attr
== NULL
)
342 handler
->attr
= process_event_synth_stub
;
343 if (handler
->event_type
== NULL
)
344 handler
->event_type
= process_event_synth_stub
;
345 if (handler
->tracing_data
== NULL
)
346 handler
->tracing_data
= process_event_synth_stub
;
347 if (handler
->build_id
== NULL
)
348 handler
->build_id
= process_event_synth_stub
;
349 if (handler
->finished_round
== NULL
) {
350 if (handler
->ordered_samples
)
351 handler
->finished_round
= process_finished_round
;
353 handler
->finished_round
= process_finished_round_stub
;
357 void mem_bswap_64(void *src
, int byte_size
)
361 while (byte_size
> 0) {
363 byte_size
-= sizeof(u64
);
368 static void perf_event__all64_swap(union perf_event
*event
)
370 struct perf_event_header
*hdr
= &event
->header
;
371 mem_bswap_64(hdr
+ 1, event
->header
.size
- sizeof(*hdr
));
374 static void perf_event__comm_swap(union perf_event
*event
)
376 event
->comm
.pid
= bswap_32(event
->comm
.pid
);
377 event
->comm
.tid
= bswap_32(event
->comm
.tid
);
380 static void perf_event__mmap_swap(union perf_event
*event
)
382 event
->mmap
.pid
= bswap_32(event
->mmap
.pid
);
383 event
->mmap
.tid
= bswap_32(event
->mmap
.tid
);
384 event
->mmap
.start
= bswap_64(event
->mmap
.start
);
385 event
->mmap
.len
= bswap_64(event
->mmap
.len
);
386 event
->mmap
.pgoff
= bswap_64(event
->mmap
.pgoff
);
389 static void perf_event__task_swap(union perf_event
*event
)
391 event
->fork
.pid
= bswap_32(event
->fork
.pid
);
392 event
->fork
.tid
= bswap_32(event
->fork
.tid
);
393 event
->fork
.ppid
= bswap_32(event
->fork
.ppid
);
394 event
->fork
.ptid
= bswap_32(event
->fork
.ptid
);
395 event
->fork
.time
= bswap_64(event
->fork
.time
);
398 static void perf_event__read_swap(union perf_event
*event
)
400 event
->read
.pid
= bswap_32(event
->read
.pid
);
401 event
->read
.tid
= bswap_32(event
->read
.tid
);
402 event
->read
.value
= bswap_64(event
->read
.value
);
403 event
->read
.time_enabled
= bswap_64(event
->read
.time_enabled
);
404 event
->read
.time_running
= bswap_64(event
->read
.time_running
);
405 event
->read
.id
= bswap_64(event
->read
.id
);
408 static void perf_event__attr_swap(union perf_event
*event
)
412 event
->attr
.attr
.type
= bswap_32(event
->attr
.attr
.type
);
413 event
->attr
.attr
.size
= bswap_32(event
->attr
.attr
.size
);
414 event
->attr
.attr
.config
= bswap_64(event
->attr
.attr
.config
);
415 event
->attr
.attr
.sample_period
= bswap_64(event
->attr
.attr
.sample_period
);
416 event
->attr
.attr
.sample_type
= bswap_64(event
->attr
.attr
.sample_type
);
417 event
->attr
.attr
.read_format
= bswap_64(event
->attr
.attr
.read_format
);
418 event
->attr
.attr
.wakeup_events
= bswap_32(event
->attr
.attr
.wakeup_events
);
419 event
->attr
.attr
.bp_type
= bswap_32(event
->attr
.attr
.bp_type
);
420 event
->attr
.attr
.bp_addr
= bswap_64(event
->attr
.attr
.bp_addr
);
421 event
->attr
.attr
.bp_len
= bswap_64(event
->attr
.attr
.bp_len
);
423 size
= event
->header
.size
;
424 size
-= (void *)&event
->attr
.id
- (void *)event
;
425 mem_bswap_64(event
->attr
.id
, size
);
428 static void perf_event__event_type_swap(union perf_event
*event
)
430 event
->event_type
.event_type
.event_id
=
431 bswap_64(event
->event_type
.event_type
.event_id
);
434 static void perf_event__tracing_data_swap(union perf_event
*event
)
436 event
->tracing_data
.size
= bswap_32(event
->tracing_data
.size
);
439 typedef void (*perf_event__swap_op
)(union perf_event
*event
);
441 static perf_event__swap_op perf_event__swap_ops
[] = {
442 [PERF_RECORD_MMAP
] = perf_event__mmap_swap
,
443 [PERF_RECORD_COMM
] = perf_event__comm_swap
,
444 [PERF_RECORD_FORK
] = perf_event__task_swap
,
445 [PERF_RECORD_EXIT
] = perf_event__task_swap
,
446 [PERF_RECORD_LOST
] = perf_event__all64_swap
,
447 [PERF_RECORD_READ
] = perf_event__read_swap
,
448 [PERF_RECORD_SAMPLE
] = perf_event__all64_swap
,
449 [PERF_RECORD_HEADER_ATTR
] = perf_event__attr_swap
,
450 [PERF_RECORD_HEADER_EVENT_TYPE
] = perf_event__event_type_swap
,
451 [PERF_RECORD_HEADER_TRACING_DATA
] = perf_event__tracing_data_swap
,
452 [PERF_RECORD_HEADER_BUILD_ID
] = NULL
,
453 [PERF_RECORD_HEADER_MAX
] = NULL
,
456 struct sample_queue
{
459 union perf_event
*event
;
460 struct list_head list
;
463 static void perf_session_free_sample_buffers(struct perf_session
*session
)
465 struct ordered_samples
*os
= &session
->ordered_samples
;
467 while (!list_empty(&os
->to_free
)) {
468 struct sample_queue
*sq
;
470 sq
= list_entry(os
->to_free
.next
, struct sample_queue
, list
);
476 static int perf_session_deliver_event(struct perf_session
*session
,
477 union perf_event
*event
,
478 struct perf_sample
*sample
,
479 struct perf_event_ops
*ops
,
482 static void flush_sample_queue(struct perf_session
*s
,
483 struct perf_event_ops
*ops
)
485 struct ordered_samples
*os
= &s
->ordered_samples
;
486 struct list_head
*head
= &os
->samples
;
487 struct sample_queue
*tmp
, *iter
;
488 struct perf_sample sample
;
489 u64 limit
= os
->next_flush
;
490 u64 last_ts
= os
->last_sample
? os
->last_sample
->timestamp
: 0ULL;
492 if (!ops
->ordered_samples
|| !limit
)
495 list_for_each_entry_safe(iter
, tmp
, head
, list
) {
496 if (iter
->timestamp
> limit
)
499 perf_session__parse_sample(s
, iter
->event
, &sample
);
500 perf_session_deliver_event(s
, iter
->event
, &sample
, ops
,
503 os
->last_flush
= iter
->timestamp
;
504 list_del(&iter
->list
);
505 list_add(&iter
->list
, &os
->sample_cache
);
508 if (list_empty(head
)) {
509 os
->last_sample
= NULL
;
510 } else if (last_ts
<= limit
) {
512 list_entry(head
->prev
, struct sample_queue
, list
);
517 * When perf record finishes a pass on every buffers, it records this pseudo
519 * We record the max timestamp t found in the pass n.
520 * Assuming these timestamps are monotonic across cpus, we know that if
521 * a buffer still has events with timestamps below t, they will be all
522 * available and then read in the pass n + 1.
523 * Hence when we start to read the pass n + 2, we can safely flush every
524 * events with timestamps below t.
526 * ============ PASS n =================
529 * cnt1 timestamps | cnt2 timestamps
532 * - | 4 <--- max recorded
534 * ============ PASS n + 1 ==============
537 * cnt1 timestamps | cnt2 timestamps
540 * 5 | 7 <---- max recorded
542 * Flush every events below timestamp 4
544 * ============ PASS n + 2 ==============
547 * cnt1 timestamps | cnt2 timestamps
552 * Flush every events below timestamp 7
555 static int process_finished_round(union perf_event
*event __used
,
556 struct perf_session
*session
,
557 struct perf_event_ops
*ops
)
559 flush_sample_queue(session
, ops
);
560 session
->ordered_samples
.next_flush
= session
->ordered_samples
.max_timestamp
;
565 /* The queue is ordered by time */
566 static void __queue_event(struct sample_queue
*new, struct perf_session
*s
)
568 struct ordered_samples
*os
= &s
->ordered_samples
;
569 struct sample_queue
*sample
= os
->last_sample
;
570 u64 timestamp
= new->timestamp
;
573 os
->last_sample
= new;
576 list_add(&new->list
, &os
->samples
);
577 os
->max_timestamp
= timestamp
;
582 * last_sample might point to some random place in the list as it's
583 * the last queued event. We expect that the new event is close to
586 if (sample
->timestamp
<= timestamp
) {
587 while (sample
->timestamp
<= timestamp
) {
588 p
= sample
->list
.next
;
589 if (p
== &os
->samples
) {
590 list_add_tail(&new->list
, &os
->samples
);
591 os
->max_timestamp
= timestamp
;
594 sample
= list_entry(p
, struct sample_queue
, list
);
596 list_add_tail(&new->list
, &sample
->list
);
598 while (sample
->timestamp
> timestamp
) {
599 p
= sample
->list
.prev
;
600 if (p
== &os
->samples
) {
601 list_add(&new->list
, &os
->samples
);
604 sample
= list_entry(p
, struct sample_queue
, list
);
606 list_add(&new->list
, &sample
->list
);
610 #define MAX_SAMPLE_BUFFER (64 * 1024 / sizeof(struct sample_queue))
612 static int perf_session_queue_event(struct perf_session
*s
, union perf_event
*event
,
613 struct perf_sample
*sample
, u64 file_offset
)
615 struct ordered_samples
*os
= &s
->ordered_samples
;
616 struct list_head
*sc
= &os
->sample_cache
;
617 u64 timestamp
= sample
->time
;
618 struct sample_queue
*new;
620 if (!timestamp
|| timestamp
== ~0ULL)
623 if (timestamp
< s
->ordered_samples
.last_flush
) {
624 printf("Warning: Timestamp below last timeslice flush\n");
628 if (!list_empty(sc
)) {
629 new = list_entry(sc
->next
, struct sample_queue
, list
);
630 list_del(&new->list
);
631 } else if (os
->sample_buffer
) {
632 new = os
->sample_buffer
+ os
->sample_buffer_idx
;
633 if (++os
->sample_buffer_idx
== MAX_SAMPLE_BUFFER
)
634 os
->sample_buffer
= NULL
;
636 os
->sample_buffer
= malloc(MAX_SAMPLE_BUFFER
* sizeof(*new));
637 if (!os
->sample_buffer
)
639 list_add(&os
->sample_buffer
->list
, &os
->to_free
);
640 os
->sample_buffer_idx
= 2;
641 new = os
->sample_buffer
+ 1;
644 new->timestamp
= timestamp
;
645 new->file_offset
= file_offset
;
648 __queue_event(new, s
);
653 static void callchain__printf(struct perf_sample
*sample
)
657 printf("... chain: nr:%" PRIu64
"\n", sample
->callchain
->nr
);
659 for (i
= 0; i
< sample
->callchain
->nr
; i
++)
660 printf("..... %2d: %016" PRIx64
"\n",
661 i
, sample
->callchain
->ips
[i
]);
664 static void perf_session__print_tstamp(struct perf_session
*session
,
665 union perf_event
*event
,
666 struct perf_sample
*sample
)
668 if (event
->header
.type
!= PERF_RECORD_SAMPLE
&&
669 !session
->sample_id_all
) {
670 fputs("-1 -1 ", stdout
);
674 if ((session
->sample_type
& PERF_SAMPLE_CPU
))
675 printf("%u ", sample
->cpu
);
677 if (session
->sample_type
& PERF_SAMPLE_TIME
)
678 printf("%" PRIu64
" ", sample
->time
);
681 static void dump_event(struct perf_session
*session
, union perf_event
*event
,
682 u64 file_offset
, struct perf_sample
*sample
)
687 printf("\n%#" PRIx64
" [%#x]: event: %d\n",
688 file_offset
, event
->header
.size
, event
->header
.type
);
693 perf_session__print_tstamp(session
, event
, sample
);
695 printf("%#" PRIx64
" [%#x]: PERF_RECORD_%s", file_offset
,
696 event
->header
.size
, perf_event__name(event
->header
.type
));
699 static void dump_sample(struct perf_session
*session
, union perf_event
*event
,
700 struct perf_sample
*sample
)
705 printf("(IP, %d): %d/%d: %#" PRIx64
" period: %" PRIu64
"\n",
706 event
->header
.misc
, sample
->pid
, sample
->tid
, sample
->ip
,
709 if (session
->sample_type
& PERF_SAMPLE_CALLCHAIN
)
710 callchain__printf(sample
);
713 static int perf_session_deliver_event(struct perf_session
*session
,
714 union perf_event
*event
,
715 struct perf_sample
*sample
,
716 struct perf_event_ops
*ops
,
719 dump_event(session
, event
, file_offset
, sample
);
721 switch (event
->header
.type
) {
722 case PERF_RECORD_SAMPLE
:
723 dump_sample(session
, event
, sample
);
724 return ops
->sample(event
, sample
, session
);
725 case PERF_RECORD_MMAP
:
726 return ops
->mmap(event
, sample
, session
);
727 case PERF_RECORD_COMM
:
728 return ops
->comm(event
, sample
, session
);
729 case PERF_RECORD_FORK
:
730 return ops
->fork(event
, sample
, session
);
731 case PERF_RECORD_EXIT
:
732 return ops
->exit(event
, sample
, session
);
733 case PERF_RECORD_LOST
:
734 return ops
->lost(event
, sample
, session
);
735 case PERF_RECORD_READ
:
736 return ops
->read(event
, sample
, session
);
737 case PERF_RECORD_THROTTLE
:
738 return ops
->throttle(event
, sample
, session
);
739 case PERF_RECORD_UNTHROTTLE
:
740 return ops
->unthrottle(event
, sample
, session
);
742 ++session
->hists
.stats
.nr_unknown_events
;
747 static int perf_session__preprocess_sample(struct perf_session
*session
,
748 union perf_event
*event
, struct perf_sample
*sample
)
750 if (event
->header
.type
!= PERF_RECORD_SAMPLE
||
751 !(session
->sample_type
& PERF_SAMPLE_CALLCHAIN
))
754 if (!ip_callchain__valid(sample
->callchain
, event
)) {
755 pr_debug("call-chain problem with event, skipping it.\n");
756 ++session
->hists
.stats
.nr_invalid_chains
;
757 session
->hists
.stats
.total_invalid_chains
+= sample
->period
;
763 static int perf_session__process_user_event(struct perf_session
*session
, union perf_event
*event
,
764 struct perf_event_ops
*ops
, u64 file_offset
)
766 dump_event(session
, event
, file_offset
, NULL
);
768 /* These events are processed right away */
769 switch (event
->header
.type
) {
770 case PERF_RECORD_HEADER_ATTR
:
771 return ops
->attr(event
, session
);
772 case PERF_RECORD_HEADER_EVENT_TYPE
:
773 return ops
->event_type(event
, session
);
774 case PERF_RECORD_HEADER_TRACING_DATA
:
775 /* setup for reading amidst mmap */
776 lseek(session
->fd
, file_offset
, SEEK_SET
);
777 return ops
->tracing_data(event
, session
);
778 case PERF_RECORD_HEADER_BUILD_ID
:
779 return ops
->build_id(event
, session
);
780 case PERF_RECORD_FINISHED_ROUND
:
781 return ops
->finished_round(event
, session
, ops
);
787 static int perf_session__process_event(struct perf_session
*session
,
788 union perf_event
*event
,
789 struct perf_event_ops
*ops
,
792 struct perf_sample sample
;
795 if (session
->header
.needs_swap
&&
796 perf_event__swap_ops
[event
->header
.type
])
797 perf_event__swap_ops
[event
->header
.type
](event
);
799 if (event
->header
.type
>= PERF_RECORD_HEADER_MAX
)
802 hists__inc_nr_events(&session
->hists
, event
->header
.type
);
804 if (event
->header
.type
>= PERF_RECORD_USER_TYPE_START
)
805 return perf_session__process_user_event(session
, event
, ops
, file_offset
);
808 * For all kernel events we get the sample data
810 perf_session__parse_sample(session
, event
, &sample
);
812 /* Preprocess sample records - precheck callchains */
813 if (perf_session__preprocess_sample(session
, event
, &sample
))
816 if (ops
->ordered_samples
) {
817 ret
= perf_session_queue_event(session
, event
, &sample
,
823 return perf_session_deliver_event(session
, event
, &sample
, ops
,
827 void perf_event_header__bswap(struct perf_event_header
*self
)
829 self
->type
= bswap_32(self
->type
);
830 self
->misc
= bswap_16(self
->misc
);
831 self
->size
= bswap_16(self
->size
);
834 static struct thread
*perf_session__register_idle_thread(struct perf_session
*self
)
836 struct thread
*thread
= perf_session__findnew(self
, 0);
838 if (thread
== NULL
|| thread__set_comm(thread
, "swapper")) {
839 pr_err("problem inserting idle task.\n");
846 static void perf_session__warn_about_errors(const struct perf_session
*session
,
847 const struct perf_event_ops
*ops
)
849 if (ops
->lost
== perf_event__process_lost
&&
850 session
->hists
.stats
.total_lost
!= 0) {
851 ui__warning("Processed %" PRIu64
" events and LOST %" PRIu64
852 "!\n\nCheck IO/CPU overload!\n\n",
853 session
->hists
.stats
.total_period
,
854 session
->hists
.stats
.total_lost
);
857 if (session
->hists
.stats
.nr_unknown_events
!= 0) {
858 ui__warning("Found %u unknown events!\n\n"
859 "Is this an older tool processing a perf.data "
860 "file generated by a more recent tool?\n\n"
861 "If that is not the case, consider "
862 "reporting to linux-kernel@vger.kernel.org.\n\n",
863 session
->hists
.stats
.nr_unknown_events
);
866 if (session
->hists
.stats
.nr_invalid_chains
!= 0) {
867 ui__warning("Found invalid callchains!\n\n"
868 "%u out of %u events were discarded for this reason.\n\n"
869 "Consider reporting to linux-kernel@vger.kernel.org.\n\n",
870 session
->hists
.stats
.nr_invalid_chains
,
871 session
->hists
.stats
.nr_events
[PERF_RECORD_SAMPLE
]);
875 #define session_done() (*(volatile int *)(&session_done))
876 volatile int session_done
;
878 static int __perf_session__process_pipe_events(struct perf_session
*self
,
879 struct perf_event_ops
*ops
)
881 union perf_event event
;
888 perf_event_ops__fill_defaults(ops
);
892 err
= readn(self
->fd
, &event
, sizeof(struct perf_event_header
));
897 pr_err("failed to read event header\n");
901 if (self
->header
.needs_swap
)
902 perf_event_header__bswap(&event
.header
);
904 size
= event
.header
.size
;
909 p
+= sizeof(struct perf_event_header
);
911 if (size
- sizeof(struct perf_event_header
)) {
912 err
= readn(self
->fd
, p
, size
- sizeof(struct perf_event_header
));
915 pr_err("unexpected end of event stream\n");
919 pr_err("failed to read event data\n");
925 (skip
= perf_session__process_event(self
, &event
, ops
, head
)) < 0) {
926 dump_printf("%#" PRIx64
" [%#x]: skipping unknown header type: %d\n",
927 head
, event
.header
.size
, event
.header
.type
);
929 * assume we lost track of the stream, check alignment, and
930 * increment a single u64 in the hope to catch on again 'soon'.
932 if (unlikely(head
& 7))
948 perf_session__warn_about_errors(self
, ops
);
949 perf_session_free_sample_buffers(self
);
953 int __perf_session__process_events(struct perf_session
*session
,
954 u64 data_offset
, u64 data_size
,
955 u64 file_size
, struct perf_event_ops
*ops
)
957 u64 head
, page_offset
, file_offset
, file_pos
, progress_next
;
958 int err
, mmap_prot
, mmap_flags
, map_idx
= 0;
959 struct ui_progress
*progress
;
960 size_t page_size
, mmap_size
;
961 char *buf
, *mmaps
[8];
962 union perf_event
*event
;
965 perf_event_ops__fill_defaults(ops
);
967 page_size
= sysconf(_SC_PAGESIZE
);
969 page_offset
= page_size
* (data_offset
/ page_size
);
970 file_offset
= page_offset
;
971 head
= data_offset
- page_offset
;
973 if (data_offset
+ data_size
< file_size
)
974 file_size
= data_offset
+ data_size
;
976 progress_next
= file_size
/ 16;
977 progress
= ui_progress__new("Processing events...", file_size
);
978 if (progress
== NULL
)
981 mmap_size
= session
->mmap_window
;
982 if (mmap_size
> file_size
)
983 mmap_size
= file_size
;
985 memset(mmaps
, 0, sizeof(mmaps
));
987 mmap_prot
= PROT_READ
;
988 mmap_flags
= MAP_SHARED
;
990 if (session
->header
.needs_swap
) {
991 mmap_prot
|= PROT_WRITE
;
992 mmap_flags
= MAP_PRIVATE
;
995 buf
= mmap(NULL
, mmap_size
, mmap_prot
, mmap_flags
, session
->fd
,
997 if (buf
== MAP_FAILED
) {
998 pr_err("failed to mmap file\n");
1002 mmaps
[map_idx
] = buf
;
1003 map_idx
= (map_idx
+ 1) & (ARRAY_SIZE(mmaps
) - 1);
1004 file_pos
= file_offset
+ head
;
1007 event
= (union perf_event
*)(buf
+ head
);
1009 if (session
->header
.needs_swap
)
1010 perf_event_header__bswap(&event
->header
);
1011 size
= event
->header
.size
;
1015 if (head
+ event
->header
.size
> mmap_size
) {
1016 if (mmaps
[map_idx
]) {
1017 munmap(mmaps
[map_idx
], mmap_size
);
1018 mmaps
[map_idx
] = NULL
;
1021 page_offset
= page_size
* (head
/ page_size
);
1022 file_offset
+= page_offset
;
1023 head
-= page_offset
;
1027 size
= event
->header
.size
;
1030 perf_session__process_event(session
, event
, ops
, file_pos
) < 0) {
1031 dump_printf("%#" PRIx64
" [%#x]: skipping unknown header type: %d\n",
1032 file_offset
+ head
, event
->header
.size
,
1033 event
->header
.type
);
1035 * assume we lost track of the stream, check alignment, and
1036 * increment a single u64 in the hope to catch on again 'soon'.
1038 if (unlikely(head
& 7))
1047 if (file_pos
>= progress_next
) {
1048 progress_next
+= file_size
/ 16;
1049 ui_progress__update(progress
, file_pos
);
1052 if (file_pos
< file_size
)
1056 /* do the final flush for ordered samples */
1057 session
->ordered_samples
.next_flush
= ULLONG_MAX
;
1058 flush_sample_queue(session
, ops
);
1060 ui_progress__delete(progress
);
1061 perf_session__warn_about_errors(session
, ops
);
1062 perf_session_free_sample_buffers(session
);
1066 int perf_session__process_events(struct perf_session
*self
,
1067 struct perf_event_ops
*ops
)
1071 if (perf_session__register_idle_thread(self
) == NULL
)
1075 err
= __perf_session__process_events(self
,
1076 self
->header
.data_offset
,
1077 self
->header
.data_size
,
1080 err
= __perf_session__process_pipe_events(self
, ops
);
1085 bool perf_session__has_traces(struct perf_session
*self
, const char *msg
)
1087 if (!(self
->sample_type
& PERF_SAMPLE_RAW
)) {
1088 pr_err("No trace sample to read. Did you call 'perf %s'?\n", msg
);
1095 int perf_session__set_kallsyms_ref_reloc_sym(struct map
**maps
,
1096 const char *symbol_name
,
1101 struct ref_reloc_sym
*ref
;
1103 ref
= zalloc(sizeof(struct ref_reloc_sym
));
1107 ref
->name
= strdup(symbol_name
);
1108 if (ref
->name
== NULL
) {
1113 bracket
= strchr(ref
->name
, ']');
1119 for (i
= 0; i
< MAP__NR_TYPES
; ++i
) {
1120 struct kmap
*kmap
= map__kmap(maps
[i
]);
1121 kmap
->ref_reloc_sym
= ref
;
1127 size_t perf_session__fprintf_dsos(struct perf_session
*self
, FILE *fp
)
1129 return __dsos__fprintf(&self
->host_machine
.kernel_dsos
, fp
) +
1130 __dsos__fprintf(&self
->host_machine
.user_dsos
, fp
) +
1131 machines__fprintf_dsos(&self
->machines
, fp
);
1134 size_t perf_session__fprintf_dsos_buildid(struct perf_session
*self
, FILE *fp
,
1137 size_t ret
= machine__fprintf_dsos_buildid(&self
->host_machine
, fp
, with_hits
);
1138 return ret
+ machines__fprintf_dsos_buildid(&self
->machines
, fp
, with_hits
);