1 #include <linux/types.h>
10 static pid_t
event__synthesize_comm(pid_t pid
, int full
,
11 event__handler_t process
,
12 struct perf_session
*session
)
15 char filename
[PATH_MAX
];
20 struct dirent dirent
, *next
;
23 snprintf(filename
, sizeof(filename
), "/proc/%d/status", pid
);
25 fp
= fopen(filename
, "r");
29 * We raced with a task exiting - just return:
31 pr_debug("couldn't open %s\n", filename
);
35 memset(&ev
.comm
, 0, sizeof(ev
.comm
));
36 while (!ev
.comm
.comm
[0] || !ev
.comm
.pid
) {
37 if (fgets(bf
, sizeof(bf
), fp
) == NULL
)
40 if (memcmp(bf
, "Name:", 5) == 0) {
42 while (*name
&& isspace(*name
))
44 size
= strlen(name
) - 1;
45 memcpy(ev
.comm
.comm
, name
, size
++);
46 } else if (memcmp(bf
, "Tgid:", 5) == 0) {
48 while (*tgids
&& isspace(*tgids
))
50 tgid
= ev
.comm
.pid
= atoi(tgids
);
54 ev
.comm
.header
.type
= PERF_RECORD_COMM
;
55 size
= ALIGN(size
, sizeof(u64
));
56 ev
.comm
.header
.size
= sizeof(ev
.comm
) - (sizeof(ev
.comm
.comm
) - size
);
61 process(&ev
, session
);
65 snprintf(filename
, sizeof(filename
), "/proc/%d/task", pid
);
67 tasks
= opendir(filename
);
71 while (!readdir_r(tasks
, &dirent
, &next
) && next
) {
73 pid
= strtol(dirent
.d_name
, &end
, 10);
79 process(&ev
, session
);
88 pr_warning("couldn't get COMM and pgid, malformed %s\n", filename
);
92 static int event__synthesize_mmap_events(pid_t pid
, pid_t tgid
,
93 event__handler_t process
,
94 struct perf_session
*session
)
96 char filename
[PATH_MAX
];
99 snprintf(filename
, sizeof(filename
), "/proc/%d/maps", pid
);
101 fp
= fopen(filename
, "r");
104 * We raced with a task exiting - just return:
106 pr_debug("couldn't open %s\n", filename
);
111 char bf
[BUFSIZ
], *pbf
= bf
;
114 .type
= PERF_RECORD_MMAP
,
116 * Just like the kernel, see __perf_event_mmap
117 * in kernel/perf_event.c
119 .misc
= PERF_RECORD_MISC_USER
,
124 if (fgets(bf
, sizeof(bf
), fp
) == NULL
)
127 /* 00400000-0040c000 r-xp 00000000 fd:01 41038 /bin/cat */
128 n
= hex2u64(pbf
, &ev
.mmap
.start
);
132 n
= hex2u64(pbf
, &ev
.mmap
.len
);
136 if (*pbf
== 'x') { /* vm_exec */
138 char *execname
= strchr(bf
, '/');
141 if (execname
== NULL
)
142 execname
= strstr(bf
, "[vdso]");
144 if (execname
== NULL
)
148 n
= hex2u64(pbf
, &vm_pgoff
);
149 /* pgoff is in bytes, not pages */
151 ev
.mmap
.pgoff
= vm_pgoff
<< getpagesize();
155 size
= strlen(execname
);
156 execname
[size
- 1] = '\0'; /* Remove \n */
157 memcpy(ev
.mmap
.filename
, execname
, size
);
158 size
= ALIGN(size
, sizeof(u64
));
159 ev
.mmap
.len
-= ev
.mmap
.start
;
160 ev
.mmap
.header
.size
= (sizeof(ev
.mmap
) -
161 (sizeof(ev
.mmap
.filename
) - size
));
165 process(&ev
, session
);
173 int event__synthesize_modules(event__handler_t process
,
174 struct perf_session
*session
,
175 struct kernel_info
*kerninfo
)
178 struct map_groups
*kmaps
= &kerninfo
->kmaps
;
182 * kernel uses 0 for user space maps, see kernel/perf_event.c
185 if (is_host_kernel(kerninfo
))
186 misc
= PERF_RECORD_MISC_KERNEL
;
188 misc
= PERF_RECORD_MISC_GUEST_KERNEL
;
190 for (nd
= rb_first(&kmaps
->maps
[MAP__FUNCTION
]);
191 nd
; nd
= rb_next(nd
)) {
194 struct map
*pos
= rb_entry(nd
, struct map
, rb_node
);
196 if (pos
->dso
->kernel
)
199 size
= ALIGN(pos
->dso
->long_name_len
+ 1, sizeof(u64
));
200 memset(&ev
, 0, sizeof(ev
));
201 ev
.mmap
.header
.misc
= misc
;
202 ev
.mmap
.header
.type
= PERF_RECORD_MMAP
;
203 ev
.mmap
.header
.size
= (sizeof(ev
.mmap
) -
204 (sizeof(ev
.mmap
.filename
) - size
));
205 ev
.mmap
.start
= pos
->start
;
206 ev
.mmap
.len
= pos
->end
- pos
->start
;
207 ev
.mmap
.pid
= kerninfo
->pid
;
209 memcpy(ev
.mmap
.filename
, pos
->dso
->long_name
,
210 pos
->dso
->long_name_len
+ 1);
211 process(&ev
, session
);
217 int event__synthesize_thread(pid_t pid
, event__handler_t process
,
218 struct perf_session
*session
)
220 pid_t tgid
= event__synthesize_comm(pid
, 1, process
, session
);
223 return event__synthesize_mmap_events(pid
, tgid
, process
, session
);
226 void event__synthesize_threads(event__handler_t process
,
227 struct perf_session
*session
)
230 struct dirent dirent
, *next
;
232 proc
= opendir("/proc");
234 while (!readdir_r(proc
, &dirent
, &next
) && next
) {
236 pid_t pid
= strtol(dirent
.d_name
, &end
, 10);
238 if (*end
) /* only interested in proper numerical dirents */
241 event__synthesize_thread(pid
, process
, session
);
247 struct process_symbol_args
{
252 static int find_symbol_cb(void *arg
, const char *name
, char type
, u64 start
)
254 struct process_symbol_args
*args
= arg
;
257 * Must be a function or at least an alias, as in PARISC64, where "_text" is
258 * an 'A' to the same address as "_stext".
260 if (!(symbol_type__is_a(type
, MAP__FUNCTION
) ||
261 type
== 'A') || strcmp(name
, args
->name
))
268 int event__synthesize_kernel_mmap(event__handler_t process
,
269 struct perf_session
*session
,
270 struct kernel_info
*kerninfo
,
271 const char *symbol_name
)
274 const char *filename
, *mmap_name
;
276 char name_buff
[PATH_MAX
];
281 .type
= PERF_RECORD_MMAP
,
285 * We should get this from /sys/kernel/sections/.text, but till that is
286 * available use this, and after it is use this as a fallback for older
289 struct process_symbol_args args
= { .name
= symbol_name
, };
291 mmap_name
= kern_mmap_name(kerninfo
, name_buff
);
292 if (is_host_kernel(kerninfo
)) {
294 * kernel uses PERF_RECORD_MISC_USER for user space maps,
295 * see kernel/perf_event.c __perf_event_mmap
297 ev
.header
.misc
= PERF_RECORD_MISC_KERNEL
;
298 filename
= "/proc/kallsyms";
300 ev
.header
.misc
= PERF_RECORD_MISC_GUEST_KERNEL
;
301 if (is_default_guest(kerninfo
))
302 filename
= (char *) symbol_conf
.default_guest_kallsyms
;
304 sprintf(path
, "%s/proc/kallsyms", kerninfo
->root_dir
);
309 if (kallsyms__parse(filename
, &args
, find_symbol_cb
) <= 0)
312 map
= kerninfo
->vmlinux_maps
[MAP__FUNCTION
];
313 size
= snprintf(ev
.mmap
.filename
, sizeof(ev
.mmap
.filename
),
314 "%s%s", mmap_name
, symbol_name
) + 1;
315 size
= ALIGN(size
, sizeof(u64
));
316 ev
.mmap
.header
.size
= (sizeof(ev
.mmap
) -
317 (sizeof(ev
.mmap
.filename
) - size
));
318 ev
.mmap
.pgoff
= args
.start
;
319 ev
.mmap
.start
= map
->start
;
320 ev
.mmap
.len
= map
->end
- ev
.mmap
.start
;
321 ev
.mmap
.pid
= kerninfo
->pid
;
323 return process(&ev
, session
);
326 static void thread__comm_adjust(struct thread
*self
)
328 char *comm
= self
->comm
;
330 if (!symbol_conf
.col_width_list_str
&& !symbol_conf
.field_sep
&&
331 (!symbol_conf
.comm_list
||
332 strlist__has_entry(symbol_conf
.comm_list
, comm
))) {
333 unsigned int slen
= strlen(comm
);
335 if (slen
> comms__col_width
) {
336 comms__col_width
= slen
;
337 threads__col_width
= slen
+ 6;
342 static int thread__set_comm_adjust(struct thread
*self
, const char *comm
)
344 int ret
= thread__set_comm(self
, comm
);
349 thread__comm_adjust(self
);
354 int event__process_comm(event_t
*self
, struct perf_session
*session
)
356 struct thread
*thread
= perf_session__findnew(session
, self
->comm
.pid
);
358 dump_printf(": %s:%d\n", self
->comm
.comm
, self
->comm
.pid
);
360 if (thread
== NULL
|| thread__set_comm_adjust(thread
, self
->comm
.comm
)) {
361 dump_printf("problem processing PERF_RECORD_COMM, skipping event.\n");
368 int event__process_lost(event_t
*self
, struct perf_session
*session
)
370 dump_printf(": id:%Ld: lost:%Ld\n", self
->lost
.id
, self
->lost
.lost
);
371 session
->events_stats
.lost
+= self
->lost
.lost
;
375 static void event_set_kernel_mmap_len(struct map
**maps
, event_t
*self
)
377 maps
[MAP__FUNCTION
]->start
= self
->mmap
.start
;
378 maps
[MAP__FUNCTION
]->end
= self
->mmap
.start
+ self
->mmap
.len
;
380 * Be a bit paranoid here, some perf.data file came with
381 * a zero sized synthesized MMAP event for the kernel.
383 if (maps
[MAP__FUNCTION
]->end
== 0)
384 maps
[MAP__FUNCTION
]->end
= ~0UL;
387 static int event__process_kernel_mmap(event_t
*self
,
388 struct perf_session
*session
)
391 char kmmap_prefix
[PATH_MAX
];
392 struct kernel_info
*kerninfo
;
393 enum dso_kernel_type kernel_type
;
396 kerninfo
= kerninfo__findnew(&session
->kerninfo_root
, self
->mmap
.pid
);
398 pr_err("Can't find id %d's kerninfo\n", self
->mmap
.pid
);
402 kern_mmap_name(kerninfo
, kmmap_prefix
);
403 if (is_host_kernel(kerninfo
))
404 kernel_type
= DSO_TYPE_KERNEL
;
406 kernel_type
= DSO_TYPE_GUEST_KERNEL
;
408 is_kernel_mmap
= memcmp(self
->mmap
.filename
,
410 strlen(kmmap_prefix
)) == 0;
411 if (self
->mmap
.filename
[0] == '/' ||
412 (!is_kernel_mmap
&& self
->mmap
.filename
[0] == '[')) {
414 char short_module_name
[1024];
417 if (self
->mmap
.filename
[0] == '/') {
418 name
= strrchr(self
->mmap
.filename
, '/');
423 dot
= strrchr(name
, '.');
426 snprintf(short_module_name
, sizeof(short_module_name
),
427 "[%.*s]", (int)(dot
- name
), name
);
428 strxfrchar(short_module_name
, '-', '_');
430 strcpy(short_module_name
, self
->mmap
.filename
);
432 map
= map_groups__new_module(&kerninfo
->kmaps
,
439 name
= strdup(short_module_name
);
443 map
->dso
->short_name
= name
;
444 map
->end
= map
->start
+ self
->mmap
.len
;
445 } else if (is_kernel_mmap
) {
446 const char *symbol_name
= (self
->mmap
.filename
+
447 strlen(kmmap_prefix
));
449 * Should be there already, from the build-id table in
452 struct dso
*kernel
= __dsos__findnew(&kerninfo
->dsos__kernel
,
457 kernel
->kernel
= kernel_type
;
458 if (__map_groups__create_kernel_maps(&kerninfo
->kmaps
,
459 kerninfo
->vmlinux_maps
, kernel
) < 0)
462 event_set_kernel_mmap_len(kerninfo
->vmlinux_maps
, self
);
463 perf_session__set_kallsyms_ref_reloc_sym(kerninfo
->vmlinux_maps
,
466 if (is_default_guest(kerninfo
)) {
468 * preload dso of guest kernel and modules
471 kerninfo
->vmlinux_maps
[MAP__FUNCTION
],
480 int event__process_mmap(event_t
*self
, struct perf_session
*session
)
482 struct kernel_info
*kerninfo
;
483 struct thread
*thread
;
485 u8 cpumode
= self
->header
.misc
& PERF_RECORD_MISC_CPUMODE_MASK
;
488 dump_printf(" %d/%d: [%#Lx(%#Lx) @ %#Lx]: %s\n",
489 self
->mmap
.pid
, self
->mmap
.tid
, self
->mmap
.start
,
490 self
->mmap
.len
, self
->mmap
.pgoff
, self
->mmap
.filename
);
492 if (cpumode
== PERF_RECORD_MISC_GUEST_KERNEL
||
493 cpumode
== PERF_RECORD_MISC_KERNEL
) {
494 ret
= event__process_kernel_mmap(self
, session
);
500 thread
= perf_session__findnew(session
, self
->mmap
.pid
);
501 kerninfo
= kerninfo__findhost(&session
->kerninfo_root
);
502 map
= map__new(&kerninfo
->dsos__user
, self
->mmap
.start
,
503 self
->mmap
.len
, self
->mmap
.pgoff
,
504 self
->mmap
.pid
, self
->mmap
.filename
,
505 MAP__FUNCTION
, session
->cwd
, session
->cwdlen
);
507 if (thread
== NULL
|| map
== NULL
)
510 thread__insert_map(thread
, map
);
514 dump_printf("problem processing PERF_RECORD_MMAP, skipping event.\n");
518 int event__process_task(event_t
*self
, struct perf_session
*session
)
520 struct thread
*thread
= perf_session__findnew(session
, self
->fork
.pid
);
521 struct thread
*parent
= perf_session__findnew(session
, self
->fork
.ppid
);
523 dump_printf("(%d:%d):(%d:%d)\n", self
->fork
.pid
, self
->fork
.tid
,
524 self
->fork
.ppid
, self
->fork
.ptid
);
526 * A thread clone will have the same PID for both parent and child.
528 if (thread
== parent
)
531 if (self
->header
.type
== PERF_RECORD_EXIT
)
534 if (thread
== NULL
|| parent
== NULL
||
535 thread__fork(thread
, parent
) < 0) {
536 dump_printf("problem processing PERF_RECORD_FORK, skipping event.\n");
543 void thread__find_addr_map(struct thread
*self
,
544 struct perf_session
*session
, u8 cpumode
,
545 enum map_type type
, pid_t pid
, u64 addr
,
546 struct addr_location
*al
)
548 struct map_groups
*mg
= &self
->mg
;
549 struct kernel_info
*kerninfo
= NULL
;
553 al
->cpumode
= cpumode
;
554 al
->filtered
= false;
556 if (cpumode
== PERF_RECORD_MISC_KERNEL
&& perf_host
) {
558 kerninfo
= kerninfo__findhost(&session
->kerninfo_root
);
559 mg
= &kerninfo
->kmaps
;
560 } else if (cpumode
== PERF_RECORD_MISC_USER
&& perf_host
) {
562 kerninfo
= kerninfo__findhost(&session
->kerninfo_root
);
563 } else if (cpumode
== PERF_RECORD_MISC_GUEST_KERNEL
&& perf_guest
) {
565 kerninfo
= kerninfo__find(&session
->kerninfo_root
, pid
);
570 mg
= &kerninfo
->kmaps
;
573 * 'u' means guest os user space.
574 * TODO: We don't support guest user space. Might support late.
576 if (cpumode
== PERF_RECORD_MISC_GUEST_USER
&& perf_guest
)
582 if ((cpumode
== PERF_RECORD_MISC_GUEST_USER
||
583 cpumode
== PERF_RECORD_MISC_GUEST_KERNEL
) &&
586 if ((cpumode
== PERF_RECORD_MISC_USER
||
587 cpumode
== PERF_RECORD_MISC_KERNEL
) &&
594 al
->map
= map_groups__find(mg
, type
, al
->addr
);
595 if (al
->map
== NULL
) {
597 * If this is outside of all known maps, and is a negative
598 * address, try to look it up in the kernel dso, as it might be
599 * a vsyscall or vdso (which executes in user-mode).
601 * XXX This is nasty, we should have a symbol list in the
602 * "[vdso]" dso, but for now lets use the old trick of looking
603 * in the whole kernel symbol list.
605 if ((long long)al
->addr
< 0 &&
606 cpumode
== PERF_RECORD_MISC_KERNEL
&&
608 mg
!= &kerninfo
->kmaps
) {
609 mg
= &kerninfo
->kmaps
;
613 al
->addr
= al
->map
->map_ip(al
->map
, al
->addr
);
616 void thread__find_addr_location(struct thread
*self
,
617 struct perf_session
*session
, u8 cpumode
,
618 enum map_type type
, pid_t pid
, u64 addr
,
619 struct addr_location
*al
,
620 symbol_filter_t filter
)
622 thread__find_addr_map(self
, session
, cpumode
, type
, pid
, addr
, al
);
624 al
->sym
= map__find_symbol(al
->map
, al
->addr
, filter
);
629 static void dso__calc_col_width(struct dso
*self
)
631 if (!symbol_conf
.col_width_list_str
&& !symbol_conf
.field_sep
&&
632 (!symbol_conf
.dso_list
||
633 strlist__has_entry(symbol_conf
.dso_list
, self
->name
))) {
634 unsigned int slen
= strlen(self
->name
);
635 if (slen
> dsos__col_width
)
636 dsos__col_width
= slen
;
639 self
->slen_calculated
= 1;
642 int event__preprocess_sample(const event_t
*self
, struct perf_session
*session
,
643 struct addr_location
*al
, symbol_filter_t filter
)
645 u8 cpumode
= self
->header
.misc
& PERF_RECORD_MISC_CPUMODE_MASK
;
646 struct thread
*thread
= perf_session__findnew(session
, self
->ip
.pid
);
651 if (symbol_conf
.comm_list
&&
652 !strlist__has_entry(symbol_conf
.comm_list
, thread
->comm
))
655 dump_printf(" ... thread: %s:%d\n", thread
->comm
, thread
->pid
);
657 thread__find_addr_map(thread
, session
, cpumode
, MAP__FUNCTION
,
658 self
->ip
.pid
, self
->ip
.ip
, al
);
659 dump_printf(" ...... dso: %s\n",
660 al
->map
? al
->map
->dso
->long_name
:
661 al
->level
== 'H' ? "[hypervisor]" : "<not found>");
665 if (symbol_conf
.dso_list
&&
666 (!al
->map
|| !al
->map
->dso
||
667 !(strlist__has_entry(symbol_conf
.dso_list
,
668 al
->map
->dso
->short_name
) ||
669 (al
->map
->dso
->short_name
!= al
->map
->dso
->long_name
&&
670 strlist__has_entry(symbol_conf
.dso_list
,
671 al
->map
->dso
->long_name
)))))
674 * We have to do this here as we may have a dso with no symbol
675 * hit that has a name longer than the ones with symbols
678 if (!sort_dso
.elide
&& !al
->map
->dso
->slen_calculated
)
679 dso__calc_col_width(al
->map
->dso
);
681 al
->sym
= map__find_symbol(al
->map
, al
->addr
, filter
);
684 if (symbol_conf
.sym_list
&& al
->sym
&&
685 !strlist__has_entry(symbol_conf
.sym_list
, al
->sym
->name
))
695 int event__parse_sample(event_t
*event
, u64 type
, struct sample_data
*data
)
697 u64
*array
= event
->sample
.array
;
699 if (type
& PERF_SAMPLE_IP
) {
700 data
->ip
= event
->ip
.ip
;
704 if (type
& PERF_SAMPLE_TID
) {
705 u32
*p
= (u32
*)array
;
711 if (type
& PERF_SAMPLE_TIME
) {
716 if (type
& PERF_SAMPLE_ADDR
) {
721 if (type
& PERF_SAMPLE_ID
) {
726 if (type
& PERF_SAMPLE_STREAM_ID
) {
727 data
->stream_id
= *array
;
731 if (type
& PERF_SAMPLE_CPU
) {
732 u32
*p
= (u32
*)array
;
737 if (type
& PERF_SAMPLE_PERIOD
) {
738 data
->period
= *array
;
742 if (type
& PERF_SAMPLE_READ
) {
743 pr_debug("PERF_SAMPLE_READ is unsuported for now\n");
747 if (type
& PERF_SAMPLE_CALLCHAIN
) {
748 data
->callchain
= (struct ip_callchain
*)array
;
749 array
+= 1 + data
->callchain
->nr
;
752 if (type
& PERF_SAMPLE_RAW
) {
753 u32
*p
= (u32
*)array
;