4 * Builtin 'trace' command:
6 * Display a continuously updated trace of any workload, CPU, specific PID,
7 * system wide, etc. Default format is loosely strace like, but any other
8 * event may be specified using --event.
10 * Copyright (C) 2012, 2013, 2014, 2015 Red Hat Inc, Arnaldo Carvalho de Melo <acme@redhat.com>
12 * Initially based on the 'trace' prototype by Thomas Gleixner:
14 * http://lwn.net/Articles/415728/ ("Announcing a new utility: 'trace'")
17 #include "util/record.h"
18 #include <traceevent/event-parse.h>
19 #include <api/fs/tracing_path.h>
21 #include "util/bpf_map.h"
22 #include "util/rlimit.h"
24 #include "util/cgroup.h"
25 #include "util/color.h"
26 #include "util/config.h"
27 #include "util/debug.h"
30 #include "util/event.h"
31 #include "util/evlist.h"
32 #include "util/evswitch.h"
33 #include <subcmd/pager.h>
34 #include <subcmd/exec-cmd.h>
35 #include "util/machine.h"
37 #include "util/symbol.h"
38 #include "util/path.h"
39 #include "util/session.h"
40 #include "util/thread.h"
41 #include <subcmd/parse-options.h>
42 #include "util/strlist.h"
43 #include "util/intlist.h"
44 #include "util/thread_map.h"
45 #include "util/stat.h"
46 #include "util/tool.h"
47 #include "util/util.h"
48 #include "trace/beauty/beauty.h"
49 #include "trace-event.h"
50 #include "util/parse-events.h"
51 #include "util/bpf-loader.h"
52 #include "callchain.h"
53 #include "print_binary.h"
55 #include "syscalltbl.h"
56 #include "rb_resort.h"
65 #include <linux/err.h>
66 #include <linux/filter.h>
67 #include <linux/kernel.h>
68 #include <linux/random.h>
69 #include <linux/stringify.h>
70 #include <linux/time64.h>
71 #include <linux/zalloc.h>
73 #include <sys/sysmacros.h>
75 #include <linux/ctype.h>
78 # define O_CLOEXEC 02000000
81 #ifndef F_LINUX_SPECIFIC_BASE
82 # define F_LINUX_SPECIFIC_BASE 1024
86 struct perf_tool tool
;
87 struct syscalltbl
*sctbl
;
89 struct syscall
*table
;
91 struct { // per syscall BPF_MAP_TYPE_PROG_ARRAY
92 struct bpf_map
*sys_enter
,
96 struct evsel
*sys_enter
,
100 struct bpf_program
*unaugmented_prog
;
105 struct record_opts opts
;
106 struct evlist
*evlist
;
107 struct machine
*host
;
108 struct thread
*current
;
109 struct bpf_object
*bpf_obj
;
110 struct cgroup
*cgroup
;
113 unsigned long nr_events
;
114 unsigned long nr_events_printed
;
115 unsigned long max_events
;
116 struct evswitch evswitch
;
117 struct strlist
*ev_qualifier
;
127 double duration_filter
;
133 unsigned int max_stack
;
134 unsigned int min_stack
;
135 int raw_augmented_syscalls_args_size
;
136 bool raw_augmented_syscalls
;
137 bool fd_path_disabled
;
139 bool not_ev_qualifier
;
143 bool multiple_threads
;
149 bool show_tool_stats
;
151 bool kernel_syscallchains
;
157 bool show_string_prefix
;
162 struct ordered_events data
;
170 u64 (*integer
)(struct tp_field
*field
, struct perf_sample
*sample
);
171 void *(*pointer
)(struct tp_field
*field
, struct perf_sample
*sample
);
175 #define TP_UINT_FIELD(bits) \
176 static u64 tp_field__u##bits(struct tp_field *field, struct perf_sample *sample) \
179 memcpy(&value, sample->raw_data + field->offset, sizeof(value)); \
188 #define TP_UINT_FIELD__SWAPPED(bits) \
189 static u64 tp_field__swapped_u##bits(struct tp_field *field, struct perf_sample *sample) \
192 memcpy(&value, sample->raw_data + field->offset, sizeof(value)); \
193 return bswap_##bits(value);\
196 TP_UINT_FIELD__SWAPPED(16);
197 TP_UINT_FIELD__SWAPPED(32);
198 TP_UINT_FIELD__SWAPPED(64);
200 static int __tp_field__init_uint(struct tp_field
*field
, int size
, int offset
, bool needs_swap
)
202 field
->offset
= offset
;
206 field
->integer
= tp_field__u8
;
209 field
->integer
= needs_swap
? tp_field__swapped_u16
: tp_field__u16
;
212 field
->integer
= needs_swap
? tp_field__swapped_u32
: tp_field__u32
;
215 field
->integer
= needs_swap
? tp_field__swapped_u64
: tp_field__u64
;
224 static int tp_field__init_uint(struct tp_field
*field
, struct tep_format_field
*format_field
, bool needs_swap
)
226 return __tp_field__init_uint(field
, format_field
->size
, format_field
->offset
, needs_swap
);
229 static void *tp_field__ptr(struct tp_field
*field
, struct perf_sample
*sample
)
231 return sample
->raw_data
+ field
->offset
;
234 static int __tp_field__init_ptr(struct tp_field
*field
, int offset
)
236 field
->offset
= offset
;
237 field
->pointer
= tp_field__ptr
;
241 static int tp_field__init_ptr(struct tp_field
*field
, struct tep_format_field
*format_field
)
243 return __tp_field__init_ptr(field
, format_field
->offset
);
249 struct tp_field args
, ret
;
253 static int perf_evsel__init_tp_uint_field(struct evsel
*evsel
,
254 struct tp_field
*field
,
257 struct tep_format_field
*format_field
= perf_evsel__field(evsel
, name
);
259 if (format_field
== NULL
)
262 return tp_field__init_uint(field
, format_field
, evsel
->needs_swap
);
265 #define perf_evsel__init_sc_tp_uint_field(evsel, name) \
266 ({ struct syscall_tp *sc = evsel->priv;\
267 perf_evsel__init_tp_uint_field(evsel, &sc->name, #name); })
269 static int perf_evsel__init_tp_ptr_field(struct evsel
*evsel
,
270 struct tp_field
*field
,
273 struct tep_format_field
*format_field
= perf_evsel__field(evsel
, name
);
275 if (format_field
== NULL
)
278 return tp_field__init_ptr(field
, format_field
);
281 #define perf_evsel__init_sc_tp_ptr_field(evsel, name) \
282 ({ struct syscall_tp *sc = evsel->priv;\
283 perf_evsel__init_tp_ptr_field(evsel, &sc->name, #name); })
285 static void evsel__delete_priv(struct evsel
*evsel
)
288 evsel__delete(evsel
);
291 static int perf_evsel__init_syscall_tp(struct evsel
*evsel
)
293 struct syscall_tp
*sc
= evsel
->priv
= malloc(sizeof(struct syscall_tp
));
295 if (evsel
->priv
!= NULL
) {
296 if (perf_evsel__init_tp_uint_field(evsel
, &sc
->id
, "__syscall_nr") &&
297 perf_evsel__init_tp_uint_field(evsel
, &sc
->id
, "nr"))
308 static int perf_evsel__init_augmented_syscall_tp(struct evsel
*evsel
, struct evsel
*tp
)
310 struct syscall_tp
*sc
= evsel
->priv
= malloc(sizeof(struct syscall_tp
));
312 if (evsel
->priv
!= NULL
) {
313 struct tep_format_field
*syscall_id
= perf_evsel__field(tp
, "id");
314 if (syscall_id
== NULL
)
315 syscall_id
= perf_evsel__field(tp
, "__syscall_nr");
316 if (syscall_id
== NULL
)
318 if (__tp_field__init_uint(&sc
->id
, syscall_id
->size
, syscall_id
->offset
, evsel
->needs_swap
))
330 static int perf_evsel__init_augmented_syscall_tp_args(struct evsel
*evsel
)
332 struct syscall_tp
*sc
= evsel
->priv
;
334 return __tp_field__init_ptr(&sc
->args
, sc
->id
.offset
+ sizeof(u64
));
337 static int perf_evsel__init_augmented_syscall_tp_ret(struct evsel
*evsel
)
339 struct syscall_tp
*sc
= evsel
->priv
;
341 return __tp_field__init_uint(&sc
->ret
, sizeof(u64
), sc
->id
.offset
+ sizeof(u64
), evsel
->needs_swap
);
344 static int perf_evsel__init_raw_syscall_tp(struct evsel
*evsel
, void *handler
)
346 evsel
->priv
= malloc(sizeof(struct syscall_tp
));
347 if (evsel
->priv
!= NULL
) {
348 if (perf_evsel__init_sc_tp_uint_field(evsel
, id
))
351 evsel
->handler
= handler
;
362 static struct evsel
*perf_evsel__raw_syscall_newtp(const char *direction
, void *handler
)
364 struct evsel
*evsel
= perf_evsel__newtp("raw_syscalls", direction
);
366 /* older kernel (e.g., RHEL6) use syscalls:{enter,exit} */
368 evsel
= perf_evsel__newtp("syscalls", direction
);
373 if (perf_evsel__init_raw_syscall_tp(evsel
, handler
))
379 evsel__delete_priv(evsel
);
383 #define perf_evsel__sc_tp_uint(evsel, name, sample) \
384 ({ struct syscall_tp *fields = evsel->priv; \
385 fields->name.integer(&fields->name, sample); })
387 #define perf_evsel__sc_tp_ptr(evsel, name, sample) \
388 ({ struct syscall_tp *fields = evsel->priv; \
389 fields->name.pointer(&fields->name, sample); })
391 size_t strarray__scnprintf(struct strarray
*sa
, char *bf
, size_t size
, const char *intfmt
, bool show_prefix
, int val
)
393 int idx
= val
- sa
->offset
;
395 if (idx
< 0 || idx
>= sa
->nr_entries
|| sa
->entries
[idx
] == NULL
) {
396 size_t printed
= scnprintf(bf
, size
, intfmt
, val
);
398 printed
+= scnprintf(bf
+ printed
, size
- printed
, " /* %s??? */", sa
->prefix
);
402 return scnprintf(bf
, size
, "%s%s", show_prefix
? sa
->prefix
: "", sa
->entries
[idx
]);
405 static size_t __syscall_arg__scnprintf_strarray(char *bf
, size_t size
,
407 struct syscall_arg
*arg
)
409 return strarray__scnprintf(arg
->parm
, bf
, size
, intfmt
, arg
->show_string_prefix
, arg
->val
);
412 static size_t syscall_arg__scnprintf_strarray(char *bf
, size_t size
,
413 struct syscall_arg
*arg
)
415 return __syscall_arg__scnprintf_strarray(bf
, size
, "%d", arg
);
418 #define SCA_STRARRAY syscall_arg__scnprintf_strarray
420 size_t syscall_arg__scnprintf_strarray_flags(char *bf
, size_t size
, struct syscall_arg
*arg
)
422 return strarray__scnprintf_flags(arg
->parm
, bf
, size
, arg
->show_string_prefix
, arg
->val
);
425 size_t strarrays__scnprintf(struct strarrays
*sas
, char *bf
, size_t size
, const char *intfmt
, bool show_prefix
, int val
)
430 for (i
= 0; i
< sas
->nr_entries
; ++i
) {
431 struct strarray
*sa
= sas
->entries
[i
];
432 int idx
= val
- sa
->offset
;
434 if (idx
>= 0 && idx
< sa
->nr_entries
) {
435 if (sa
->entries
[idx
] == NULL
)
437 return scnprintf(bf
, size
, "%s%s", show_prefix
? sa
->prefix
: "", sa
->entries
[idx
]);
441 printed
= scnprintf(bf
, size
, intfmt
, val
);
443 printed
+= scnprintf(bf
+ printed
, size
- printed
, " /* %s??? */", sas
->entries
[0]->prefix
);
447 size_t syscall_arg__scnprintf_strarrays(char *bf
, size_t size
,
448 struct syscall_arg
*arg
)
450 return strarrays__scnprintf(arg
->parm
, bf
, size
, "%d", arg
->show_string_prefix
, arg
->val
);
454 #define AT_FDCWD -100
457 static size_t syscall_arg__scnprintf_fd_at(char *bf
, size_t size
,
458 struct syscall_arg
*arg
)
461 const char *prefix
= "AT_FD";
464 return scnprintf(bf
, size
, "%s%s", arg
->show_string_prefix
? prefix
: "", "CWD");
466 return syscall_arg__scnprintf_fd(bf
, size
, arg
);
469 #define SCA_FDAT syscall_arg__scnprintf_fd_at
471 static size_t syscall_arg__scnprintf_close_fd(char *bf
, size_t size
,
472 struct syscall_arg
*arg
);
474 #define SCA_CLOSE_FD syscall_arg__scnprintf_close_fd
476 size_t syscall_arg__scnprintf_hex(char *bf
, size_t size
, struct syscall_arg
*arg
)
478 return scnprintf(bf
, size
, "%#lx", arg
->val
);
481 size_t syscall_arg__scnprintf_ptr(char *bf
, size_t size
, struct syscall_arg
*arg
)
484 return scnprintf(bf
, size
, "NULL");
485 return syscall_arg__scnprintf_hex(bf
, size
, arg
);
488 size_t syscall_arg__scnprintf_int(char *bf
, size_t size
, struct syscall_arg
*arg
)
490 return scnprintf(bf
, size
, "%d", arg
->val
);
493 size_t syscall_arg__scnprintf_long(char *bf
, size_t size
, struct syscall_arg
*arg
)
495 return scnprintf(bf
, size
, "%ld", arg
->val
);
498 static const char *bpf_cmd
[] = {
499 "MAP_CREATE", "MAP_LOOKUP_ELEM", "MAP_UPDATE_ELEM", "MAP_DELETE_ELEM",
500 "MAP_GET_NEXT_KEY", "PROG_LOAD",
502 static DEFINE_STRARRAY(bpf_cmd
, "BPF_");
504 static const char *fsmount_flags
[] = {
507 static DEFINE_STRARRAY(fsmount_flags
, "FSMOUNT_");
509 #include "trace/beauty/generated/fsconfig_arrays.c"
511 static DEFINE_STRARRAY(fsconfig_cmds
, "FSCONFIG_");
513 static const char *epoll_ctl_ops
[] = { "ADD", "DEL", "MOD", };
514 static DEFINE_STRARRAY_OFFSET(epoll_ctl_ops
, "EPOLL_CTL_", 1);
516 static const char *itimers
[] = { "REAL", "VIRTUAL", "PROF", };
517 static DEFINE_STRARRAY(itimers
, "ITIMER_");
519 static const char *keyctl_options
[] = {
520 "GET_KEYRING_ID", "JOIN_SESSION_KEYRING", "UPDATE", "REVOKE", "CHOWN",
521 "SETPERM", "DESCRIBE", "CLEAR", "LINK", "UNLINK", "SEARCH", "READ",
522 "INSTANTIATE", "NEGATE", "SET_REQKEY_KEYRING", "SET_TIMEOUT",
523 "ASSUME_AUTHORITY", "GET_SECURITY", "SESSION_TO_PARENT", "REJECT",
524 "INSTANTIATE_IOV", "INVALIDATE", "GET_PERSISTENT",
526 static DEFINE_STRARRAY(keyctl_options
, "KEYCTL_");
528 static const char *whences
[] = { "SET", "CUR", "END",
536 static DEFINE_STRARRAY(whences
, "SEEK_");
538 static const char *fcntl_cmds
[] = {
539 "DUPFD", "GETFD", "SETFD", "GETFL", "SETFL", "GETLK", "SETLK",
540 "SETLKW", "SETOWN", "GETOWN", "SETSIG", "GETSIG", "GETLK64",
541 "SETLK64", "SETLKW64", "SETOWN_EX", "GETOWN_EX",
544 static DEFINE_STRARRAY(fcntl_cmds
, "F_");
546 static const char *fcntl_linux_specific_cmds
[] = {
547 "SETLEASE", "GETLEASE", "NOTIFY", [5] = "CANCELLK", "DUPFD_CLOEXEC",
548 "SETPIPE_SZ", "GETPIPE_SZ", "ADD_SEALS", "GET_SEALS",
549 "GET_RW_HINT", "SET_RW_HINT", "GET_FILE_RW_HINT", "SET_FILE_RW_HINT",
552 static DEFINE_STRARRAY_OFFSET(fcntl_linux_specific_cmds
, "F_", F_LINUX_SPECIFIC_BASE
);
554 static struct strarray
*fcntl_cmds_arrays
[] = {
555 &strarray__fcntl_cmds
,
556 &strarray__fcntl_linux_specific_cmds
,
559 static DEFINE_STRARRAYS(fcntl_cmds_arrays
);
561 static const char *rlimit_resources
[] = {
562 "CPU", "FSIZE", "DATA", "STACK", "CORE", "RSS", "NPROC", "NOFILE",
563 "MEMLOCK", "AS", "LOCKS", "SIGPENDING", "MSGQUEUE", "NICE", "RTPRIO",
566 static DEFINE_STRARRAY(rlimit_resources
, "RLIMIT_");
568 static const char *sighow
[] = { "BLOCK", "UNBLOCK", "SETMASK", };
569 static DEFINE_STRARRAY(sighow
, "SIG_");
571 static const char *clockid
[] = {
572 "REALTIME", "MONOTONIC", "PROCESS_CPUTIME_ID", "THREAD_CPUTIME_ID",
573 "MONOTONIC_RAW", "REALTIME_COARSE", "MONOTONIC_COARSE", "BOOTTIME",
574 "REALTIME_ALARM", "BOOTTIME_ALARM", "SGI_CYCLE", "TAI"
576 static DEFINE_STRARRAY(clockid
, "CLOCK_");
578 static size_t syscall_arg__scnprintf_access_mode(char *bf
, size_t size
,
579 struct syscall_arg
*arg
)
581 bool show_prefix
= arg
->show_string_prefix
;
582 const char *suffix
= "_OK";
586 if (mode
== F_OK
) /* 0 */
587 return scnprintf(bf
, size
, "F%s", show_prefix
? suffix
: "");
589 if (mode & n##_OK) { \
590 printed += scnprintf(bf + printed, size - printed, "%s%s", #n, show_prefix ? suffix : ""); \
600 printed
+= scnprintf(bf
+ printed
, size
- printed
, "|%#x", mode
);
605 #define SCA_ACCMODE syscall_arg__scnprintf_access_mode
607 static size_t syscall_arg__scnprintf_filename(char *bf
, size_t size
,
608 struct syscall_arg
*arg
);
610 #define SCA_FILENAME syscall_arg__scnprintf_filename
612 static size_t syscall_arg__scnprintf_pipe_flags(char *bf
, size_t size
,
613 struct syscall_arg
*arg
)
615 bool show_prefix
= arg
->show_string_prefix
;
616 const char *prefix
= "O_";
617 int printed
= 0, flags
= arg
->val
;
620 if (flags & O_##n) { \
621 printed += scnprintf(bf + printed, size - printed, "%s%s%s", printed ? "|" : "", show_prefix ? prefix : "", #n); \
630 printed
+= scnprintf(bf
+ printed
, size
- printed
, "%s%#x", printed
? "|" : "", flags
);
635 #define SCA_PIPE_FLAGS syscall_arg__scnprintf_pipe_flags
637 #ifndef GRND_NONBLOCK
638 #define GRND_NONBLOCK 0x0001
641 #define GRND_RANDOM 0x0002
644 static size_t syscall_arg__scnprintf_getrandom_flags(char *bf
, size_t size
,
645 struct syscall_arg
*arg
)
647 bool show_prefix
= arg
->show_string_prefix
;
648 const char *prefix
= "GRND_";
649 int printed
= 0, flags
= arg
->val
;
652 if (flags & GRND_##n) { \
653 printed += scnprintf(bf + printed, size - printed, "%s%s%s", printed ? "|" : "", show_prefix ? prefix : "", #n); \
654 flags &= ~GRND_##n; \
662 printed
+= scnprintf(bf
+ printed
, size
- printed
, "%s%#x", printed
? "|" : "", flags
);
667 #define SCA_GETRANDOM_FLAGS syscall_arg__scnprintf_getrandom_flags
669 #define STRARRAY(name, array) \
670 { .scnprintf = SCA_STRARRAY, \
671 .parm = &strarray__##array, }
673 #define STRARRAY_FLAGS(name, array) \
674 { .scnprintf = SCA_STRARRAY_FLAGS, \
675 .parm = &strarray__##array, }
677 #include "trace/beauty/arch_errno_names.c"
678 #include "trace/beauty/eventfd.c"
679 #include "trace/beauty/futex_op.c"
680 #include "trace/beauty/futex_val3.c"
681 #include "trace/beauty/mmap.c"
682 #include "trace/beauty/mode_t.c"
683 #include "trace/beauty/msg_flags.c"
684 #include "trace/beauty/open_flags.c"
685 #include "trace/beauty/perf_event_open.c"
686 #include "trace/beauty/pid.c"
687 #include "trace/beauty/sched_policy.c"
688 #include "trace/beauty/seccomp.c"
689 #include "trace/beauty/signum.c"
690 #include "trace/beauty/socket_type.c"
691 #include "trace/beauty/waitid_options.c"
693 struct syscall_arg_fmt
{
694 size_t (*scnprintf
)(char *bf
, size_t size
, struct syscall_arg
*arg
);
695 unsigned long (*mask_val
)(struct syscall_arg
*arg
, unsigned long val
);
701 static struct syscall_fmt
{
705 const char *sys_enter
,
708 struct syscall_arg_fmt arg
[6];
715 .arg
= { [1] = { .scnprintf
= SCA_ACCMODE
, /* mode */ }, }, },
716 { .name
= "arch_prctl",
717 .arg
= { [0] = { .scnprintf
= SCA_X86_ARCH_PRCTL_CODE
, /* code */ },
718 [1] = { .scnprintf
= SCA_PTR
, /* arg2 */ }, }, },
720 .arg
= { [0] = { .scnprintf
= SCA_INT
, /* fd */ },
721 [1] = { .scnprintf
= SCA_SOCKADDR
, /* umyaddr */ },
722 [2] = { .scnprintf
= SCA_INT
, /* addrlen */ }, }, },
724 .arg
= { [0] = STRARRAY(cmd
, bpf_cmd
), }, },
725 { .name
= "brk", .hexret
= true,
726 .arg
= { [0] = { .scnprintf
= SCA_PTR
, /* brk */ }, }, },
727 { .name
= "clock_gettime",
728 .arg
= { [0] = STRARRAY(clk_id
, clockid
), }, },
729 { .name
= "clone", .errpid
= true, .nr_args
= 5,
730 .arg
= { [0] = { .name
= "flags", .scnprintf
= SCA_CLONE_FLAGS
, },
731 [1] = { .name
= "child_stack", .scnprintf
= SCA_HEX
, },
732 [2] = { .name
= "parent_tidptr", .scnprintf
= SCA_HEX
, },
733 [3] = { .name
= "child_tidptr", .scnprintf
= SCA_HEX
, },
734 [4] = { .name
= "tls", .scnprintf
= SCA_HEX
, }, }, },
736 .arg
= { [0] = { .scnprintf
= SCA_CLOSE_FD
, /* fd */ }, }, },
738 .arg
= { [0] = { .scnprintf
= SCA_INT
, /* fd */ },
739 [1] = { .scnprintf
= SCA_SOCKADDR
, /* servaddr */ },
740 [2] = { .scnprintf
= SCA_INT
, /* addrlen */ }, }, },
741 { .name
= "epoll_ctl",
742 .arg
= { [1] = STRARRAY(op
, epoll_ctl_ops
), }, },
743 { .name
= "eventfd2",
744 .arg
= { [1] = { .scnprintf
= SCA_EFD_FLAGS
, /* flags */ }, }, },
745 { .name
= "fchmodat",
746 .arg
= { [0] = { .scnprintf
= SCA_FDAT
, /* fd */ }, }, },
747 { .name
= "fchownat",
748 .arg
= { [0] = { .scnprintf
= SCA_FDAT
, /* fd */ }, }, },
750 .arg
= { [1] = { .scnprintf
= SCA_FCNTL_CMD
, /* cmd */
751 .parm
= &strarrays__fcntl_cmds_arrays
,
752 .show_zero
= true, },
753 [2] = { .scnprintf
= SCA_FCNTL_ARG
, /* arg */ }, }, },
755 .arg
= { [1] = { .scnprintf
= SCA_FLOCK
, /* cmd */ }, }, },
756 { .name
= "fsconfig",
757 .arg
= { [1] = STRARRAY(cmd
, fsconfig_cmds
), }, },
759 .arg
= { [1] = STRARRAY_FLAGS(flags
, fsmount_flags
),
760 [2] = { .scnprintf
= SCA_FSMOUNT_ATTR_FLAGS
, /* attr_flags */ }, }, },
762 .arg
= { [0] = { .scnprintf
= SCA_FDAT
, /* dfd */ },
763 [1] = { .scnprintf
= SCA_FILENAME
, /* path */ },
764 [2] = { .scnprintf
= SCA_FSPICK_FLAGS
, /* flags */ }, }, },
765 { .name
= "fstat", .alias
= "newfstat", },
766 { .name
= "fstatat", .alias
= "newfstatat", },
768 .arg
= { [1] = { .scnprintf
= SCA_FUTEX_OP
, /* op */ },
769 [5] = { .scnprintf
= SCA_FUTEX_VAL3
, /* val3 */ }, }, },
770 { .name
= "futimesat",
771 .arg
= { [0] = { .scnprintf
= SCA_FDAT
, /* fd */ }, }, },
772 { .name
= "getitimer",
773 .arg
= { [0] = STRARRAY(which
, itimers
), }, },
774 { .name
= "getpid", .errpid
= true, },
775 { .name
= "getpgid", .errpid
= true, },
776 { .name
= "getppid", .errpid
= true, },
777 { .name
= "getrandom",
778 .arg
= { [2] = { .scnprintf
= SCA_GETRANDOM_FLAGS
, /* flags */ }, }, },
779 { .name
= "getrlimit",
780 .arg
= { [0] = STRARRAY(resource
, rlimit_resources
), }, },
781 { .name
= "gettid", .errpid
= true, },
784 #if defined(__i386__) || defined(__x86_64__)
786 * FIXME: Make this available to all arches.
788 [1] = { .scnprintf
= SCA_IOCTL_CMD
, /* cmd */ },
789 [2] = { .scnprintf
= SCA_HEX
, /* arg */ }, }, },
791 [2] = { .scnprintf
= SCA_HEX
, /* arg */ }, }, },
793 { .name
= "kcmp", .nr_args
= 5,
794 .arg
= { [0] = { .name
= "pid1", .scnprintf
= SCA_PID
, },
795 [1] = { .name
= "pid2", .scnprintf
= SCA_PID
, },
796 [2] = { .name
= "type", .scnprintf
= SCA_KCMP_TYPE
, },
797 [3] = { .name
= "idx1", .scnprintf
= SCA_KCMP_IDX
, },
798 [4] = { .name
= "idx2", .scnprintf
= SCA_KCMP_IDX
, }, }, },
800 .arg
= { [0] = STRARRAY(option
, keyctl_options
), }, },
802 .arg
= { [1] = { .scnprintf
= SCA_SIGNUM
, /* sig */ }, }, },
804 .arg
= { [0] = { .scnprintf
= SCA_FDAT
, /* fd */ }, }, },
806 .arg
= { [2] = STRARRAY(whence
, whences
), }, },
807 { .name
= "lstat", .alias
= "newlstat", },
809 .arg
= { [0] = { .scnprintf
= SCA_HEX
, /* start */ },
810 [2] = { .scnprintf
= SCA_MADV_BHV
, /* behavior */ }, }, },
812 .arg
= { [0] = { .scnprintf
= SCA_FDAT
, /* fd */ }, }, },
814 .arg
= { [0] = { .scnprintf
= SCA_FDAT
, /* fd */ }, }, },
815 { .name
= "mmap", .hexret
= true,
816 /* The standard mmap maps to old_mmap on s390x */
817 #if defined(__s390x__)
820 .arg
= { [2] = { .scnprintf
= SCA_MMAP_PROT
, /* prot */ },
821 [3] = { .scnprintf
= SCA_MMAP_FLAGS
, /* flags */ },
822 [5] = { .scnprintf
= SCA_HEX
, /* offset */ }, }, },
824 .arg
= { [0] = { .scnprintf
= SCA_FILENAME
, /* dev_name */ },
825 [3] = { .scnprintf
= SCA_MOUNT_FLAGS
, /* flags */
826 .mask_val
= SCAMV_MOUNT_FLAGS
, /* flags */ }, }, },
827 { .name
= "move_mount",
828 .arg
= { [0] = { .scnprintf
= SCA_FDAT
, /* from_dfd */ },
829 [1] = { .scnprintf
= SCA_FILENAME
, /* from_pathname */ },
830 [2] = { .scnprintf
= SCA_FDAT
, /* to_dfd */ },
831 [3] = { .scnprintf
= SCA_FILENAME
, /* to_pathname */ },
832 [4] = { .scnprintf
= SCA_MOVE_MOUNT_FLAGS
, /* flags */ }, }, },
833 { .name
= "mprotect",
834 .arg
= { [0] = { .scnprintf
= SCA_HEX
, /* start */ },
835 [2] = { .scnprintf
= SCA_MMAP_PROT
, /* prot */ }, }, },
836 { .name
= "mq_unlink",
837 .arg
= { [0] = { .scnprintf
= SCA_FILENAME
, /* u_name */ }, }, },
838 { .name
= "mremap", .hexret
= true,
839 .arg
= { [3] = { .scnprintf
= SCA_MREMAP_FLAGS
, /* flags */ }, }, },
840 { .name
= "name_to_handle_at",
841 .arg
= { [0] = { .scnprintf
= SCA_FDAT
, /* dfd */ }, }, },
842 { .name
= "newfstatat",
843 .arg
= { [0] = { .scnprintf
= SCA_FDAT
, /* dfd */ }, }, },
845 .arg
= { [1] = { .scnprintf
= SCA_OPEN_FLAGS
, /* flags */ }, }, },
846 { .name
= "open_by_handle_at",
847 .arg
= { [0] = { .scnprintf
= SCA_FDAT
, /* dfd */ },
848 [2] = { .scnprintf
= SCA_OPEN_FLAGS
, /* flags */ }, }, },
850 .arg
= { [0] = { .scnprintf
= SCA_FDAT
, /* dfd */ },
851 [2] = { .scnprintf
= SCA_OPEN_FLAGS
, /* flags */ }, }, },
852 { .name
= "perf_event_open",
853 .arg
= { [2] = { .scnprintf
= SCA_INT
, /* cpu */ },
854 [3] = { .scnprintf
= SCA_FD
, /* group_fd */ },
855 [4] = { .scnprintf
= SCA_PERF_FLAGS
, /* flags */ }, }, },
857 .arg
= { [1] = { .scnprintf
= SCA_PIPE_FLAGS
, /* flags */ }, }, },
858 { .name
= "pkey_alloc",
859 .arg
= { [1] = { .scnprintf
= SCA_PKEY_ALLOC_ACCESS_RIGHTS
, /* access_rights */ }, }, },
860 { .name
= "pkey_free",
861 .arg
= { [0] = { .scnprintf
= SCA_INT
, /* key */ }, }, },
862 { .name
= "pkey_mprotect",
863 .arg
= { [0] = { .scnprintf
= SCA_HEX
, /* start */ },
864 [2] = { .scnprintf
= SCA_MMAP_PROT
, /* prot */ },
865 [3] = { .scnprintf
= SCA_INT
, /* pkey */ }, }, },
866 { .name
= "poll", .timeout
= true, },
867 { .name
= "ppoll", .timeout
= true, },
869 .arg
= { [0] = { .scnprintf
= SCA_PRCTL_OPTION
, /* option */ },
870 [1] = { .scnprintf
= SCA_PRCTL_ARG2
, /* arg2 */ },
871 [2] = { .scnprintf
= SCA_PRCTL_ARG3
, /* arg3 */ }, }, },
872 { .name
= "pread", .alias
= "pread64", },
873 { .name
= "preadv", .alias
= "pread", },
874 { .name
= "prlimit64",
875 .arg
= { [1] = STRARRAY(resource
, rlimit_resources
), }, },
876 { .name
= "pwrite", .alias
= "pwrite64", },
877 { .name
= "readlinkat",
878 .arg
= { [0] = { .scnprintf
= SCA_FDAT
, /* dfd */ }, }, },
879 { .name
= "recvfrom",
880 .arg
= { [3] = { .scnprintf
= SCA_MSG_FLAGS
, /* flags */ }, }, },
881 { .name
= "recvmmsg",
882 .arg
= { [3] = { .scnprintf
= SCA_MSG_FLAGS
, /* flags */ }, }, },
884 .arg
= { [2] = { .scnprintf
= SCA_MSG_FLAGS
, /* flags */ }, }, },
885 { .name
= "renameat",
886 .arg
= { [0] = { .scnprintf
= SCA_FDAT
, /* olddirfd */ },
887 [2] = { .scnprintf
= SCA_FDAT
, /* newdirfd */ }, }, },
888 { .name
= "renameat2",
889 .arg
= { [0] = { .scnprintf
= SCA_FDAT
, /* olddirfd */ },
890 [2] = { .scnprintf
= SCA_FDAT
, /* newdirfd */ },
891 [4] = { .scnprintf
= SCA_RENAMEAT2_FLAGS
, /* flags */ }, }, },
892 { .name
= "rt_sigaction",
893 .arg
= { [0] = { .scnprintf
= SCA_SIGNUM
, /* sig */ }, }, },
894 { .name
= "rt_sigprocmask",
895 .arg
= { [0] = STRARRAY(how
, sighow
), }, },
896 { .name
= "rt_sigqueueinfo",
897 .arg
= { [1] = { .scnprintf
= SCA_SIGNUM
, /* sig */ }, }, },
898 { .name
= "rt_tgsigqueueinfo",
899 .arg
= { [2] = { .scnprintf
= SCA_SIGNUM
, /* sig */ }, }, },
900 { .name
= "sched_setscheduler",
901 .arg
= { [1] = { .scnprintf
= SCA_SCHED_POLICY
, /* policy */ }, }, },
903 .arg
= { [0] = { .scnprintf
= SCA_SECCOMP_OP
, /* op */ },
904 [1] = { .scnprintf
= SCA_SECCOMP_FLAGS
, /* flags */ }, }, },
905 { .name
= "select", .timeout
= true, },
906 { .name
= "sendfile", .alias
= "sendfile64", },
907 { .name
= "sendmmsg",
908 .arg
= { [3] = { .scnprintf
= SCA_MSG_FLAGS
, /* flags */ }, }, },
910 .arg
= { [2] = { .scnprintf
= SCA_MSG_FLAGS
, /* flags */ }, }, },
912 .arg
= { [3] = { .scnprintf
= SCA_MSG_FLAGS
, /* flags */ },
913 [4] = { .scnprintf
= SCA_SOCKADDR
, /* addr */ }, }, },
914 { .name
= "set_tid_address", .errpid
= true, },
915 { .name
= "setitimer",
916 .arg
= { [0] = STRARRAY(which
, itimers
), }, },
917 { .name
= "setrlimit",
918 .arg
= { [0] = STRARRAY(resource
, rlimit_resources
), }, },
920 .arg
= { [0] = STRARRAY(family
, socket_families
),
921 [1] = { .scnprintf
= SCA_SK_TYPE
, /* type */ },
922 [2] = { .scnprintf
= SCA_SK_PROTO
, /* protocol */ }, }, },
923 { .name
= "socketpair",
924 .arg
= { [0] = STRARRAY(family
, socket_families
),
925 [1] = { .scnprintf
= SCA_SK_TYPE
, /* type */ },
926 [2] = { .scnprintf
= SCA_SK_PROTO
, /* protocol */ }, }, },
927 { .name
= "stat", .alias
= "newstat", },
929 .arg
= { [0] = { .scnprintf
= SCA_FDAT
, /* fdat */ },
930 [2] = { .scnprintf
= SCA_STATX_FLAGS
, /* flags */ } ,
931 [3] = { .scnprintf
= SCA_STATX_MASK
, /* mask */ }, }, },
933 .arg
= { [0] = { .scnprintf
= SCA_FILENAME
, /* specialfile */ }, }, },
935 .arg
= { [0] = { .scnprintf
= SCA_FILENAME
, /* specialfile */ }, }, },
936 { .name
= "symlinkat",
937 .arg
= { [0] = { .scnprintf
= SCA_FDAT
, /* dfd */ }, }, },
938 { .name
= "sync_file_range",
939 .arg
= { [3] = { .scnprintf
= SCA_SYNC_FILE_RANGE_FLAGS
, /* flags */ }, }, },
941 .arg
= { [2] = { .scnprintf
= SCA_SIGNUM
, /* sig */ }, }, },
943 .arg
= { [1] = { .scnprintf
= SCA_SIGNUM
, /* sig */ }, }, },
944 { .name
= "umount2", .alias
= "umount",
945 .arg
= { [0] = { .scnprintf
= SCA_FILENAME
, /* name */ }, }, },
946 { .name
= "uname", .alias
= "newuname", },
947 { .name
= "unlinkat",
948 .arg
= { [0] = { .scnprintf
= SCA_FDAT
, /* dfd */ }, }, },
949 { .name
= "utimensat",
950 .arg
= { [0] = { .scnprintf
= SCA_FDAT
, /* dirfd */ }, }, },
951 { .name
= "wait4", .errpid
= true,
952 .arg
= { [2] = { .scnprintf
= SCA_WAITID_OPTIONS
, /* options */ }, }, },
953 { .name
= "waitid", .errpid
= true,
954 .arg
= { [3] = { .scnprintf
= SCA_WAITID_OPTIONS
, /* options */ }, }, },
957 static int syscall_fmt__cmp(const void *name
, const void *fmtp
)
959 const struct syscall_fmt
*fmt
= fmtp
;
960 return strcmp(name
, fmt
->name
);
963 static struct syscall_fmt
*syscall_fmt__find(const char *name
)
965 const int nmemb
= ARRAY_SIZE(syscall_fmts
);
966 return bsearch(name
, syscall_fmts
, nmemb
, sizeof(struct syscall_fmt
), syscall_fmt__cmp
);
969 static struct syscall_fmt
*syscall_fmt__find_by_alias(const char *alias
)
971 int i
, nmemb
= ARRAY_SIZE(syscall_fmts
);
973 for (i
= 0; i
< nmemb
; ++i
) {
974 if (syscall_fmts
[i
].alias
&& strcmp(syscall_fmts
[i
].alias
, alias
) == 0)
975 return &syscall_fmts
[i
];
982 * is_exit: is this "exit" or "exit_group"?
983 * is_open: is this "open" or "openat"? To associate the fd returned in sys_exit with the pathname in sys_enter.
984 * args_size: sum of the sizes of the syscall arguments, anything after that is augmented stuff: pathname for openat, etc.
985 * nonexistent: Just a hole in the syscall table, syscall id not allocated
988 struct tep_event
*tp_format
;
992 struct bpf_program
*sys_enter
,
998 struct tep_format_field
*args
;
1000 struct syscall_fmt
*fmt
;
1001 struct syscall_arg_fmt
*arg_fmt
;
1005 * Must match what is in the BPF program:
1007 * tools/perf/examples/bpf/augmented_raw_syscalls.c
1009 struct bpf_map_syscall_entry
{
1011 u16 string_args_len
[6];
1015 * We need to have this 'calculated' boolean because in some cases we really
1016 * don't know what is the duration of a syscall, for instance, when we start
1017 * a session and some threads are waiting for a syscall to finish, say 'poll',
1018 * in which case all we can do is to print "( ? ) for duration and for the
1021 static size_t fprintf_duration(unsigned long t
, bool calculated
, FILE *fp
)
1023 double duration
= (double)t
/ NSEC_PER_MSEC
;
1024 size_t printed
= fprintf(fp
, "(");
1027 printed
+= fprintf(fp
, " ");
1028 else if (duration
>= 1.0)
1029 printed
+= color_fprintf(fp
, PERF_COLOR_RED
, "%6.3f ms", duration
);
1030 else if (duration
>= 0.01)
1031 printed
+= color_fprintf(fp
, PERF_COLOR_YELLOW
, "%6.3f ms", duration
);
1033 printed
+= color_fprintf(fp
, PERF_COLOR_NORMAL
, "%6.3f ms", duration
);
1034 return printed
+ fprintf(fp
, "): ");
1038 * filename.ptr: The filename char pointer that will be vfs_getname'd
1039 * filename.entry_str_pos: Where to insert the string translated from
1040 * filename.ptr by the vfs_getname tracepoint/kprobe.
1041 * ret_scnprintf: syscall args may set this to a different syscall return
1042 * formatter, for instance, fcntl may return fds, file flags, etc.
1044 struct thread_trace
{
1047 unsigned long nr_events
;
1048 unsigned long pfmaj
, pfmin
;
1051 size_t (*ret_scnprintf
)(char *bf
, size_t size
, struct syscall_arg
*arg
);
1054 short int entry_str_pos
;
1056 unsigned int namelen
;
1064 struct intlist
*syscall_stats
;
1067 static struct thread_trace
*thread_trace__new(void)
1069 struct thread_trace
*ttrace
= zalloc(sizeof(struct thread_trace
));
1072 ttrace
->files
.max
= -1;
1073 ttrace
->syscall_stats
= intlist__new(NULL
);
1079 static struct thread_trace
*thread__trace(struct thread
*thread
, FILE *fp
)
1081 struct thread_trace
*ttrace
;
1086 if (thread__priv(thread
) == NULL
)
1087 thread__set_priv(thread
, thread_trace__new());
1089 if (thread__priv(thread
) == NULL
)
1092 ttrace
= thread__priv(thread
);
1093 ++ttrace
->nr_events
;
1097 color_fprintf(fp
, PERF_COLOR_RED
,
1098 "WARNING: not enough memory, dropping samples!\n");
1103 void syscall_arg__set_ret_scnprintf(struct syscall_arg
*arg
,
1104 size_t (*ret_scnprintf
)(char *bf
, size_t size
, struct syscall_arg
*arg
))
1106 struct thread_trace
*ttrace
= thread__priv(arg
->thread
);
1108 ttrace
->ret_scnprintf
= ret_scnprintf
;
1111 #define TRACE_PFMAJ (1 << 0)
1112 #define TRACE_PFMIN (1 << 1)
1114 static const size_t trace__entry_str_size
= 2048;
1116 static struct file
*thread_trace__files_entry(struct thread_trace
*ttrace
, int fd
)
1121 if (fd
> ttrace
->files
.max
) {
1122 struct file
*nfiles
= realloc(ttrace
->files
.table
, (fd
+ 1) * sizeof(struct file
));
1127 if (ttrace
->files
.max
!= -1) {
1128 memset(nfiles
+ ttrace
->files
.max
+ 1, 0,
1129 (fd
- ttrace
->files
.max
) * sizeof(struct file
));
1131 memset(nfiles
, 0, (fd
+ 1) * sizeof(struct file
));
1134 ttrace
->files
.table
= nfiles
;
1135 ttrace
->files
.max
= fd
;
1138 return ttrace
->files
.table
+ fd
;
1141 struct file
*thread__files_entry(struct thread
*thread
, int fd
)
1143 return thread_trace__files_entry(thread__priv(thread
), fd
);
1146 static int trace__set_fd_pathname(struct thread
*thread
, int fd
, const char *pathname
)
1148 struct thread_trace
*ttrace
= thread__priv(thread
);
1149 struct file
*file
= thread_trace__files_entry(ttrace
, fd
);
1153 if (stat(pathname
, &st
) == 0)
1154 file
->dev_maj
= major(st
.st_rdev
);
1155 file
->pathname
= strdup(pathname
);
1163 static int thread__read_fd_path(struct thread
*thread
, int fd
)
1165 char linkname
[PATH_MAX
], pathname
[PATH_MAX
];
1169 if (thread
->pid_
== thread
->tid
) {
1170 scnprintf(linkname
, sizeof(linkname
),
1171 "/proc/%d/fd/%d", thread
->pid_
, fd
);
1173 scnprintf(linkname
, sizeof(linkname
),
1174 "/proc/%d/task/%d/fd/%d", thread
->pid_
, thread
->tid
, fd
);
1177 if (lstat(linkname
, &st
) < 0 || st
.st_size
+ 1 > (off_t
)sizeof(pathname
))
1180 ret
= readlink(linkname
, pathname
, sizeof(pathname
));
1182 if (ret
< 0 || ret
> st
.st_size
)
1185 pathname
[ret
] = '\0';
1186 return trace__set_fd_pathname(thread
, fd
, pathname
);
1189 static const char *thread__fd_path(struct thread
*thread
, int fd
,
1190 struct trace
*trace
)
1192 struct thread_trace
*ttrace
= thread__priv(thread
);
1194 if (ttrace
== NULL
|| trace
->fd_path_disabled
)
1200 if ((fd
> ttrace
->files
.max
|| ttrace
->files
.table
[fd
].pathname
== NULL
)) {
1203 ++trace
->stats
.proc_getname
;
1204 if (thread__read_fd_path(thread
, fd
))
1208 return ttrace
->files
.table
[fd
].pathname
;
1211 size_t syscall_arg__scnprintf_fd(char *bf
, size_t size
, struct syscall_arg
*arg
)
1214 size_t printed
= scnprintf(bf
, size
, "%d", fd
);
1215 const char *path
= thread__fd_path(arg
->thread
, fd
, arg
->trace
);
1218 printed
+= scnprintf(bf
+ printed
, size
- printed
, "<%s>", path
);
1223 size_t pid__scnprintf_fd(struct trace
*trace
, pid_t pid
, int fd
, char *bf
, size_t size
)
1225 size_t printed
= scnprintf(bf
, size
, "%d", fd
);
1226 struct thread
*thread
= machine__find_thread(trace
->host
, pid
, pid
);
1229 const char *path
= thread__fd_path(thread
, fd
, trace
);
1232 printed
+= scnprintf(bf
+ printed
, size
- printed
, "<%s>", path
);
1234 thread__put(thread
);
1240 static size_t syscall_arg__scnprintf_close_fd(char *bf
, size_t size
,
1241 struct syscall_arg
*arg
)
1244 size_t printed
= syscall_arg__scnprintf_fd(bf
, size
, arg
);
1245 struct thread_trace
*ttrace
= thread__priv(arg
->thread
);
1247 if (ttrace
&& fd
>= 0 && fd
<= ttrace
->files
.max
)
1248 zfree(&ttrace
->files
.table
[fd
].pathname
);
1253 static void thread__set_filename_pos(struct thread
*thread
, const char *bf
,
1256 struct thread_trace
*ttrace
= thread__priv(thread
);
1258 ttrace
->filename
.ptr
= ptr
;
1259 ttrace
->filename
.entry_str_pos
= bf
- ttrace
->entry_str
;
1262 static size_t syscall_arg__scnprintf_augmented_string(struct syscall_arg
*arg
, char *bf
, size_t size
)
1264 struct augmented_arg
*augmented_arg
= arg
->augmented
.args
;
1265 size_t printed
= scnprintf(bf
, size
, "\"%.*s\"", augmented_arg
->size
, augmented_arg
->value
);
1267 * So that the next arg with a payload can consume its augmented arg, i.e. for rename* syscalls
1268 * we would have two strings, each prefixed by its size.
1270 int consumed
= sizeof(*augmented_arg
) + augmented_arg
->size
;
1272 arg
->augmented
.args
= ((void *)arg
->augmented
.args
) + consumed
;
1273 arg
->augmented
.size
-= consumed
;
1278 static size_t syscall_arg__scnprintf_filename(char *bf
, size_t size
,
1279 struct syscall_arg
*arg
)
1281 unsigned long ptr
= arg
->val
;
1283 if (arg
->augmented
.args
)
1284 return syscall_arg__scnprintf_augmented_string(arg
, bf
, size
);
1286 if (!arg
->trace
->vfs_getname
)
1287 return scnprintf(bf
, size
, "%#x", ptr
);
1289 thread__set_filename_pos(arg
->thread
, bf
, ptr
);
1293 static bool trace__filter_duration(struct trace
*trace
, double t
)
1295 return t
< (trace
->duration_filter
* NSEC_PER_MSEC
);
1298 static size_t __trace__fprintf_tstamp(struct trace
*trace
, u64 tstamp
, FILE *fp
)
1300 double ts
= (double)(tstamp
- trace
->base_time
) / NSEC_PER_MSEC
;
1302 return fprintf(fp
, "%10.3f ", ts
);
1306 * We're handling tstamp=0 as an undefined tstamp, i.e. like when we are
1307 * using ttrace->entry_time for a thread that receives a sys_exit without
1308 * first having received a sys_enter ("poll" issued before tracing session
1309 * starts, lost sys_enter exit due to ring buffer overflow).
1311 static size_t trace__fprintf_tstamp(struct trace
*trace
, u64 tstamp
, FILE *fp
)
1314 return __trace__fprintf_tstamp(trace
, tstamp
, fp
);
1316 return fprintf(fp
, " ? ");
1319 static bool done
= false;
1320 static bool interrupted
= false;
1322 static void sig_handler(int sig
)
1325 interrupted
= sig
== SIGINT
;
1328 static size_t trace__fprintf_comm_tid(struct trace
*trace
, struct thread
*thread
, FILE *fp
)
1332 if (trace
->multiple_threads
) {
1333 if (trace
->show_comm
)
1334 printed
+= fprintf(fp
, "%.14s/", thread__comm_str(thread
));
1335 printed
+= fprintf(fp
, "%d ", thread
->tid
);
1341 static size_t trace__fprintf_entry_head(struct trace
*trace
, struct thread
*thread
,
1342 u64 duration
, bool duration_calculated
, u64 tstamp
, FILE *fp
)
1346 if (trace
->show_tstamp
)
1347 printed
= trace__fprintf_tstamp(trace
, tstamp
, fp
);
1348 if (trace
->show_duration
)
1349 printed
+= fprintf_duration(duration
, duration_calculated
, fp
);
1350 return printed
+ trace__fprintf_comm_tid(trace
, thread
, fp
);
1353 static int trace__process_event(struct trace
*trace
, struct machine
*machine
,
1354 union perf_event
*event
, struct perf_sample
*sample
)
1358 switch (event
->header
.type
) {
1359 case PERF_RECORD_LOST
:
1360 color_fprintf(trace
->output
, PERF_COLOR_RED
,
1361 "LOST %" PRIu64
" events!\n", event
->lost
.lost
);
1362 ret
= machine__process_lost_event(machine
, event
, sample
);
1365 ret
= machine__process_event(machine
, event
, sample
);
1372 static int trace__tool_process(struct perf_tool
*tool
,
1373 union perf_event
*event
,
1374 struct perf_sample
*sample
,
1375 struct machine
*machine
)
1377 struct trace
*trace
= container_of(tool
, struct trace
, tool
);
1378 return trace__process_event(trace
, machine
, event
, sample
);
1381 static char *trace__machine__resolve_kernel_addr(void *vmachine
, unsigned long long *addrp
, char **modp
)
1383 struct machine
*machine
= vmachine
;
1385 if (machine
->kptr_restrict_warned
)
1388 if (symbol_conf
.kptr_restrict
) {
1389 pr_warning("Kernel address maps (/proc/{kallsyms,modules}) are restricted.\n\n"
1390 "Check /proc/sys/kernel/kptr_restrict and /proc/sys/kernel/perf_event_paranoid.\n\n"
1391 "Kernel samples will not be resolved.\n");
1392 machine
->kptr_restrict_warned
= true;
1396 return machine__resolve_kernel_addr(vmachine
, addrp
, modp
);
1399 static int trace__symbols_init(struct trace
*trace
, struct evlist
*evlist
)
1401 int err
= symbol__init(NULL
);
1406 trace
->host
= machine__new_host();
1407 if (trace
->host
== NULL
)
1410 err
= trace_event__register_resolver(trace
->host
, trace__machine__resolve_kernel_addr
);
1414 err
= __machine__synthesize_threads(trace
->host
, &trace
->tool
, &trace
->opts
.target
,
1415 evlist
->core
.threads
, trace__tool_process
, false,
1424 static void trace__symbols__exit(struct trace
*trace
)
1426 machine__exit(trace
->host
);
1432 static int syscall__alloc_arg_fmts(struct syscall
*sc
, int nr_args
)
1436 if (nr_args
== 6 && sc
->fmt
&& sc
->fmt
->nr_args
!= 0)
1437 nr_args
= sc
->fmt
->nr_args
;
1439 sc
->arg_fmt
= calloc(nr_args
, sizeof(*sc
->arg_fmt
));
1440 if (sc
->arg_fmt
== NULL
)
1443 for (idx
= 0; idx
< nr_args
; ++idx
) {
1445 sc
->arg_fmt
[idx
] = sc
->fmt
->arg
[idx
];
1448 sc
->nr_args
= nr_args
;
1452 static int syscall__set_arg_fmts(struct syscall
*sc
)
1454 struct tep_format_field
*field
, *last_field
= NULL
;
1457 for (field
= sc
->args
; field
; field
= field
->next
, ++idx
) {
1460 if (sc
->fmt
&& sc
->fmt
->arg
[idx
].scnprintf
)
1463 len
= strlen(field
->name
);
1465 if (strcmp(field
->type
, "const char *") == 0 &&
1466 ((len
>= 4 && strcmp(field
->name
+ len
- 4, "name") == 0) ||
1467 strstr(field
->name
, "path") != NULL
))
1468 sc
->arg_fmt
[idx
].scnprintf
= SCA_FILENAME
;
1469 else if ((field
->flags
& TEP_FIELD_IS_POINTER
) || strstr(field
->name
, "addr"))
1470 sc
->arg_fmt
[idx
].scnprintf
= SCA_PTR
;
1471 else if (strcmp(field
->type
, "pid_t") == 0)
1472 sc
->arg_fmt
[idx
].scnprintf
= SCA_PID
;
1473 else if (strcmp(field
->type
, "umode_t") == 0)
1474 sc
->arg_fmt
[idx
].scnprintf
= SCA_MODE_T
;
1475 else if ((strcmp(field
->type
, "int") == 0 ||
1476 strcmp(field
->type
, "unsigned int") == 0 ||
1477 strcmp(field
->type
, "long") == 0) &&
1478 len
>= 2 && strcmp(field
->name
+ len
- 2, "fd") == 0) {
1480 * /sys/kernel/tracing/events/syscalls/sys_enter*
1481 * egrep 'field:.*fd;' .../format|sed -r 's/.*field:([a-z ]+) [a-z_]*fd.+/\1/g'|sort|uniq -c
1486 sc
->arg_fmt
[idx
].scnprintf
= SCA_FD
;
1491 sc
->args_size
= last_field
->offset
+ last_field
->size
;
1496 static int trace__read_syscall_info(struct trace
*trace
, int id
)
1500 const char *name
= syscalltbl__name(trace
->sctbl
, id
);
1502 if (trace
->syscalls
.table
== NULL
) {
1503 trace
->syscalls
.table
= calloc(trace
->sctbl
->syscalls
.max_id
+ 1, sizeof(*sc
));
1504 if (trace
->syscalls
.table
== NULL
)
1508 sc
= trace
->syscalls
.table
+ id
;
1509 if (sc
->nonexistent
)
1513 sc
->nonexistent
= true;
1518 sc
->fmt
= syscall_fmt__find(sc
->name
);
1520 snprintf(tp_name
, sizeof(tp_name
), "sys_enter_%s", sc
->name
);
1521 sc
->tp_format
= trace_event__tp_format("syscalls", tp_name
);
1523 if (IS_ERR(sc
->tp_format
) && sc
->fmt
&& sc
->fmt
->alias
) {
1524 snprintf(tp_name
, sizeof(tp_name
), "sys_enter_%s", sc
->fmt
->alias
);
1525 sc
->tp_format
= trace_event__tp_format("syscalls", tp_name
);
1528 if (syscall__alloc_arg_fmts(sc
, IS_ERR(sc
->tp_format
) ? 6 : sc
->tp_format
->format
.nr_fields
))
1531 if (IS_ERR(sc
->tp_format
))
1532 return PTR_ERR(sc
->tp_format
);
1534 sc
->args
= sc
->tp_format
->format
.fields
;
1536 * We need to check and discard the first variable '__syscall_nr'
1537 * or 'nr' that mean the syscall number. It is needless here.
1538 * So drop '__syscall_nr' or 'nr' field but does not exist on older kernels.
1540 if (sc
->args
&& (!strcmp(sc
->args
->name
, "__syscall_nr") || !strcmp(sc
->args
->name
, "nr"))) {
1541 sc
->args
= sc
->args
->next
;
1545 sc
->is_exit
= !strcmp(name
, "exit_group") || !strcmp(name
, "exit");
1546 sc
->is_open
= !strcmp(name
, "open") || !strcmp(name
, "openat");
1548 return syscall__set_arg_fmts(sc
);
1551 static int intcmp(const void *a
, const void *b
)
1553 const int *one
= a
, *another
= b
;
1555 return *one
- *another
;
1558 static int trace__validate_ev_qualifier(struct trace
*trace
)
1561 bool printed_invalid_prefix
= false;
1562 struct str_node
*pos
;
1563 size_t nr_used
= 0, nr_allocated
= strlist__nr_entries(trace
->ev_qualifier
);
1565 trace
->ev_qualifier_ids
.entries
= malloc(nr_allocated
*
1566 sizeof(trace
->ev_qualifier_ids
.entries
[0]));
1568 if (trace
->ev_qualifier_ids
.entries
== NULL
) {
1569 fputs("Error:\tNot enough memory for allocating events qualifier ids\n",
1575 strlist__for_each_entry(pos
, trace
->ev_qualifier
) {
1576 const char *sc
= pos
->s
;
1577 int id
= syscalltbl__id(trace
->sctbl
, sc
), match_next
= -1;
1580 id
= syscalltbl__strglobmatch_first(trace
->sctbl
, sc
, &match_next
);
1584 if (!printed_invalid_prefix
) {
1585 pr_debug("Skipping unknown syscalls: ");
1586 printed_invalid_prefix
= true;
1595 trace
->ev_qualifier_ids
.entries
[nr_used
++] = id
;
1596 if (match_next
== -1)
1600 id
= syscalltbl__strglobmatch_next(trace
->sctbl
, sc
, &match_next
);
1603 if (nr_allocated
== nr_used
) {
1607 entries
= realloc(trace
->ev_qualifier_ids
.entries
,
1608 nr_allocated
* sizeof(trace
->ev_qualifier_ids
.entries
[0]));
1609 if (entries
== NULL
) {
1611 fputs("\nError:\t Not enough memory for parsing\n", trace
->output
);
1614 trace
->ev_qualifier_ids
.entries
= entries
;
1616 trace
->ev_qualifier_ids
.entries
[nr_used
++] = id
;
1620 trace
->ev_qualifier_ids
.nr
= nr_used
;
1621 qsort(trace
->ev_qualifier_ids
.entries
, nr_used
, sizeof(int), intcmp
);
1623 if (printed_invalid_prefix
)
1627 zfree(&trace
->ev_qualifier_ids
.entries
);
1628 trace
->ev_qualifier_ids
.nr
= 0;
1632 static __maybe_unused
bool trace__syscall_enabled(struct trace
*trace
, int id
)
1634 bool in_ev_qualifier
;
1636 if (trace
->ev_qualifier_ids
.nr
== 0)
1639 in_ev_qualifier
= bsearch(&id
, trace
->ev_qualifier_ids
.entries
,
1640 trace
->ev_qualifier_ids
.nr
, sizeof(int), intcmp
) != NULL
;
1642 if (in_ev_qualifier
)
1643 return !trace
->not_ev_qualifier
;
1645 return trace
->not_ev_qualifier
;
1649 * args is to be interpreted as a series of longs but we need to handle
1650 * 8-byte unaligned accesses. args points to raw_data within the event
1651 * and raw_data is guaranteed to be 8-byte unaligned because it is
1652 * preceded by raw_size which is a u32. So we need to copy args to a temp
1653 * variable to read it. Most notably this avoids extended load instructions
1654 * on unaligned addresses
1656 unsigned long syscall_arg__val(struct syscall_arg
*arg
, u8 idx
)
1659 unsigned char *p
= arg
->args
+ sizeof(unsigned long) * idx
;
1661 memcpy(&val
, p
, sizeof(val
));
1665 static size_t syscall__scnprintf_name(struct syscall
*sc
, char *bf
, size_t size
,
1666 struct syscall_arg
*arg
)
1668 if (sc
->arg_fmt
&& sc
->arg_fmt
[arg
->idx
].name
)
1669 return scnprintf(bf
, size
, "%s: ", sc
->arg_fmt
[arg
->idx
].name
);
1671 return scnprintf(bf
, size
, "arg%d: ", arg
->idx
);
1675 * Check if the value is in fact zero, i.e. mask whatever needs masking, such
1676 * as mount 'flags' argument that needs ignoring some magic flag, see comment
1677 * in tools/perf/trace/beauty/mount_flags.c
1679 static unsigned long syscall__mask_val(struct syscall
*sc
, struct syscall_arg
*arg
, unsigned long val
)
1681 if (sc
->arg_fmt
&& sc
->arg_fmt
[arg
->idx
].mask_val
)
1682 return sc
->arg_fmt
[arg
->idx
].mask_val(arg
, val
);
1687 static size_t syscall__scnprintf_val(struct syscall
*sc
, char *bf
, size_t size
,
1688 struct syscall_arg
*arg
, unsigned long val
)
1690 if (sc
->arg_fmt
&& sc
->arg_fmt
[arg
->idx
].scnprintf
) {
1692 if (sc
->arg_fmt
[arg
->idx
].parm
)
1693 arg
->parm
= sc
->arg_fmt
[arg
->idx
].parm
;
1694 return sc
->arg_fmt
[arg
->idx
].scnprintf(bf
, size
, arg
);
1696 return scnprintf(bf
, size
, "%ld", val
);
1699 static size_t syscall__scnprintf_args(struct syscall
*sc
, char *bf
, size_t size
,
1700 unsigned char *args
, void *augmented_args
, int augmented_args_size
,
1701 struct trace
*trace
, struct thread
*thread
)
1706 struct syscall_arg arg
= {
1709 .size
= augmented_args_size
,
1710 .args
= augmented_args
,
1716 .show_string_prefix
= trace
->show_string_prefix
,
1718 struct thread_trace
*ttrace
= thread__priv(thread
);
1721 * Things like fcntl will set this in its 'cmd' formatter to pick the
1722 * right formatter for the return value (an fd? file flags?), which is
1723 * not needed for syscalls that always return a given type, say an fd.
1725 ttrace
->ret_scnprintf
= NULL
;
1727 if (sc
->args
!= NULL
) {
1728 struct tep_format_field
*field
;
1730 for (field
= sc
->args
; field
;
1731 field
= field
->next
, ++arg
.idx
, bit
<<= 1) {
1735 val
= syscall_arg__val(&arg
, arg
.idx
);
1737 * Some syscall args need some mask, most don't and
1738 * return val untouched.
1740 val
= syscall__mask_val(sc
, &arg
, val
);
1743 * Suppress this argument if its value is zero and
1744 * and we don't have a string associated in an
1748 !trace
->show_zeros
&&
1750 (sc
->arg_fmt
[arg
.idx
].show_zero
||
1751 sc
->arg_fmt
[arg
.idx
].scnprintf
== SCA_STRARRAY
||
1752 sc
->arg_fmt
[arg
.idx
].scnprintf
== SCA_STRARRAYS
) &&
1753 sc
->arg_fmt
[arg
.idx
].parm
))
1756 printed
+= scnprintf(bf
+ printed
, size
- printed
, "%s", printed
? ", " : "");
1758 if (trace
->show_arg_names
)
1759 printed
+= scnprintf(bf
+ printed
, size
- printed
, "%s: ", field
->name
);
1761 printed
+= syscall__scnprintf_val(sc
, bf
+ printed
, size
- printed
, &arg
, val
);
1763 } else if (IS_ERR(sc
->tp_format
)) {
1765 * If we managed to read the tracepoint /format file, then we
1766 * may end up not having any args, like with gettid(), so only
1767 * print the raw args when we didn't manage to read it.
1769 while (arg
.idx
< sc
->nr_args
) {
1772 val
= syscall_arg__val(&arg
, arg
.idx
);
1774 printed
+= scnprintf(bf
+ printed
, size
- printed
, ", ");
1775 printed
+= syscall__scnprintf_name(sc
, bf
+ printed
, size
- printed
, &arg
);
1776 printed
+= syscall__scnprintf_val(sc
, bf
+ printed
, size
- printed
, &arg
, val
);
1786 typedef int (*tracepoint_handler
)(struct trace
*trace
, struct evsel
*evsel
,
1787 union perf_event
*event
,
1788 struct perf_sample
*sample
);
1790 static struct syscall
*trace__syscall_info(struct trace
*trace
,
1791 struct evsel
*evsel
, int id
)
1798 * XXX: Noticed on x86_64, reproduced as far back as 3.0.36, haven't tried
1799 * before that, leaving at a higher verbosity level till that is
1800 * explained. Reproduced with plain ftrace with:
1802 * echo 1 > /t/events/raw_syscalls/sys_exit/enable
1803 * grep "NR -1 " /t/trace_pipe
1805 * After generating some load on the machine.
1809 fprintf(trace
->output
, "Invalid syscall %d id, skipping (%s, %" PRIu64
") ...\n",
1810 id
, perf_evsel__name(evsel
), ++n
);
1817 if (id
> trace
->sctbl
->syscalls
.max_id
)
1820 if ((trace
->syscalls
.table
== NULL
|| trace
->syscalls
.table
[id
].name
== NULL
) &&
1821 (err
= trace__read_syscall_info(trace
, id
)) != 0)
1824 if (trace
->syscalls
.table
[id
].name
== NULL
) {
1825 if (trace
->syscalls
.table
[id
].nonexistent
)
1830 return &trace
->syscalls
.table
[id
];
1834 char sbuf
[STRERR_BUFSIZE
];
1835 fprintf(trace
->output
, "Problems reading syscall %d: %d (%s)", id
, -err
, str_error_r(-err
, sbuf
, sizeof(sbuf
)));
1836 if (id
<= trace
->sctbl
->syscalls
.max_id
&& trace
->syscalls
.table
[id
].name
!= NULL
)
1837 fprintf(trace
->output
, "(%s)", trace
->syscalls
.table
[id
].name
);
1838 fputs(" information\n", trace
->output
);
1843 static void thread__update_stats(struct thread_trace
*ttrace
,
1844 int id
, struct perf_sample
*sample
)
1846 struct int_node
*inode
;
1847 struct stats
*stats
;
1850 inode
= intlist__findnew(ttrace
->syscall_stats
, id
);
1854 stats
= inode
->priv
;
1855 if (stats
== NULL
) {
1856 stats
= malloc(sizeof(struct stats
));
1860 inode
->priv
= stats
;
1863 if (ttrace
->entry_time
&& sample
->time
> ttrace
->entry_time
)
1864 duration
= sample
->time
- ttrace
->entry_time
;
1866 update_stats(stats
, duration
);
1869 static int trace__printf_interrupted_entry(struct trace
*trace
)
1871 struct thread_trace
*ttrace
;
1875 if (trace
->failure_only
|| trace
->current
== NULL
)
1878 ttrace
= thread__priv(trace
->current
);
1880 if (!ttrace
->entry_pending
)
1883 printed
= trace__fprintf_entry_head(trace
, trace
->current
, 0, false, ttrace
->entry_time
, trace
->output
);
1884 printed
+= len
= fprintf(trace
->output
, "%s)", ttrace
->entry_str
);
1886 if (len
< trace
->args_alignment
- 4)
1887 printed
+= fprintf(trace
->output
, "%-*s", trace
->args_alignment
- 4 - len
, " ");
1889 printed
+= fprintf(trace
->output
, " ...\n");
1891 ttrace
->entry_pending
= false;
1892 ++trace
->nr_events_printed
;
1897 static int trace__fprintf_sample(struct trace
*trace
, struct evsel
*evsel
,
1898 struct perf_sample
*sample
, struct thread
*thread
)
1902 if (trace
->print_sample
) {
1903 double ts
= (double)sample
->time
/ NSEC_PER_MSEC
;
1905 printed
+= fprintf(trace
->output
, "%22s %10.3f %s %d/%d [%d]\n",
1906 perf_evsel__name(evsel
), ts
,
1907 thread__comm_str(thread
),
1908 sample
->pid
, sample
->tid
, sample
->cpu
);
1914 static void *syscall__augmented_args(struct syscall
*sc
, struct perf_sample
*sample
, int *augmented_args_size
, int raw_augmented_args_size
)
1916 void *augmented_args
= NULL
;
1918 * For now with BPF raw_augmented we hook into raw_syscalls:sys_enter
1919 * and there we get all 6 syscall args plus the tracepoint common fields
1920 * that gets calculated at the start and the syscall_nr (another long).
1921 * So we check if that is the case and if so don't look after the
1922 * sc->args_size but always after the full raw_syscalls:sys_enter payload,
1925 * We'll revisit this later to pass s->args_size to the BPF augmenter
1926 * (now tools/perf/examples/bpf/augmented_raw_syscalls.c, so that it
1927 * copies only what we need for each syscall, like what happens when we
1928 * use syscalls:sys_enter_NAME, so that we reduce the kernel/userspace
1929 * traffic to just what is needed for each syscall.
1931 int args_size
= raw_augmented_args_size
?: sc
->args_size
;
1933 *augmented_args_size
= sample
->raw_size
- args_size
;
1934 if (*augmented_args_size
> 0)
1935 augmented_args
= sample
->raw_data
+ args_size
;
1937 return augmented_args
;
1940 static int trace__sys_enter(struct trace
*trace
, struct evsel
*evsel
,
1941 union perf_event
*event __maybe_unused
,
1942 struct perf_sample
*sample
)
1947 struct thread
*thread
;
1948 int id
= perf_evsel__sc_tp_uint(evsel
, id
, sample
), err
= -1;
1949 int augmented_args_size
= 0;
1950 void *augmented_args
= NULL
;
1951 struct syscall
*sc
= trace__syscall_info(trace
, evsel
, id
);
1952 struct thread_trace
*ttrace
;
1957 thread
= machine__findnew_thread(trace
->host
, sample
->pid
, sample
->tid
);
1958 ttrace
= thread__trace(thread
, trace
->output
);
1962 trace__fprintf_sample(trace
, evsel
, sample
, thread
);
1964 args
= perf_evsel__sc_tp_ptr(evsel
, args
, sample
);
1966 if (ttrace
->entry_str
== NULL
) {
1967 ttrace
->entry_str
= malloc(trace__entry_str_size
);
1968 if (!ttrace
->entry_str
)
1972 if (!(trace
->duration_filter
|| trace
->summary_only
|| trace
->min_stack
))
1973 trace__printf_interrupted_entry(trace
);
1975 * If this is raw_syscalls.sys_enter, then it always comes with the 6 possible
1976 * arguments, even if the syscall being handled, say "openat", uses only 4 arguments
1977 * this breaks syscall__augmented_args() check for augmented args, as we calculate
1978 * syscall->args_size using each syscalls:sys_enter_NAME tracefs format file,
1979 * so when handling, say the openat syscall, we end up getting 6 args for the
1980 * raw_syscalls:sys_enter event, when we expected just 4, we end up mistakenly
1981 * thinking that the extra 2 u64 args are the augmented filename, so just check
1982 * here and avoid using augmented syscalls when the evsel is the raw_syscalls one.
1984 if (evsel
!= trace
->syscalls
.events
.sys_enter
)
1985 augmented_args
= syscall__augmented_args(sc
, sample
, &augmented_args_size
, trace
->raw_augmented_syscalls_args_size
);
1986 ttrace
->entry_time
= sample
->time
;
1987 msg
= ttrace
->entry_str
;
1988 printed
+= scnprintf(msg
+ printed
, trace__entry_str_size
- printed
, "%s(", sc
->name
);
1990 printed
+= syscall__scnprintf_args(sc
, msg
+ printed
, trace__entry_str_size
- printed
,
1991 args
, augmented_args
, augmented_args_size
, trace
, thread
);
1994 if (!(trace
->duration_filter
|| trace
->summary_only
|| trace
->failure_only
|| trace
->min_stack
)) {
1997 trace__fprintf_entry_head(trace
, thread
, 0, false, ttrace
->entry_time
, trace
->output
);
1998 printed
= fprintf(trace
->output
, "%s)", ttrace
->entry_str
);
1999 if (trace
->args_alignment
> printed
)
2000 alignment
= trace
->args_alignment
- printed
;
2001 fprintf(trace
->output
, "%*s= ?\n", alignment
, " ");
2004 ttrace
->entry_pending
= true;
2005 /* See trace__vfs_getname & trace__sys_exit */
2006 ttrace
->filename
.pending_open
= false;
2009 if (trace
->current
!= thread
) {
2010 thread__put(trace
->current
);
2011 trace
->current
= thread__get(thread
);
2015 thread__put(thread
);
2019 static int trace__fprintf_sys_enter(struct trace
*trace
, struct evsel
*evsel
,
2020 struct perf_sample
*sample
)
2022 struct thread_trace
*ttrace
;
2023 struct thread
*thread
;
2024 int id
= perf_evsel__sc_tp_uint(evsel
, id
, sample
), err
= -1;
2025 struct syscall
*sc
= trace__syscall_info(trace
, evsel
, id
);
2027 void *args
, *augmented_args
= NULL
;
2028 int augmented_args_size
;
2033 thread
= machine__findnew_thread(trace
->host
, sample
->pid
, sample
->tid
);
2034 ttrace
= thread__trace(thread
, trace
->output
);
2036 * We need to get ttrace just to make sure it is there when syscall__scnprintf_args()
2037 * and the rest of the beautifiers accessing it via struct syscall_arg touches it.
2042 args
= perf_evsel__sc_tp_ptr(evsel
, args
, sample
);
2043 augmented_args
= syscall__augmented_args(sc
, sample
, &augmented_args_size
, trace
->raw_augmented_syscalls_args_size
);
2044 syscall__scnprintf_args(sc
, msg
, sizeof(msg
), args
, augmented_args
, augmented_args_size
, trace
, thread
);
2045 fprintf(trace
->output
, "%s", msg
);
2048 thread__put(thread
);
2052 static int trace__resolve_callchain(struct trace
*trace
, struct evsel
*evsel
,
2053 struct perf_sample
*sample
,
2054 struct callchain_cursor
*cursor
)
2056 struct addr_location al
;
2057 int max_stack
= evsel
->core
.attr
.sample_max_stack
?
2058 evsel
->core
.attr
.sample_max_stack
:
2062 if (machine__resolve(trace
->host
, &al
, sample
) < 0)
2065 err
= thread__resolve_callchain(al
.thread
, cursor
, evsel
, sample
, NULL
, NULL
, max_stack
);
2066 addr_location__put(&al
);
2070 static int trace__fprintf_callchain(struct trace
*trace
, struct perf_sample
*sample
)
2072 /* TODO: user-configurable print_opts */
2073 const unsigned int print_opts
= EVSEL__PRINT_SYM
|
2075 EVSEL__PRINT_UNKNOWN_AS_ADDR
;
2077 return sample__fprintf_callchain(sample
, 38, print_opts
, &callchain_cursor
, trace
->output
);
2080 static const char *errno_to_name(struct evsel
*evsel
, int err
)
2082 struct perf_env
*env
= perf_evsel__env(evsel
);
2083 const char *arch_name
= perf_env__arch(env
);
2085 return arch_syscalls__strerrno(arch_name
, err
);
2088 static int trace__sys_exit(struct trace
*trace
, struct evsel
*evsel
,
2089 union perf_event
*event __maybe_unused
,
2090 struct perf_sample
*sample
)
2094 bool duration_calculated
= false;
2095 struct thread
*thread
;
2096 int id
= perf_evsel__sc_tp_uint(evsel
, id
, sample
), err
= -1, callchain_ret
= 0, printed
= 0;
2097 int alignment
= trace
->args_alignment
;
2098 struct syscall
*sc
= trace__syscall_info(trace
, evsel
, id
);
2099 struct thread_trace
*ttrace
;
2104 thread
= machine__findnew_thread(trace
->host
, sample
->pid
, sample
->tid
);
2105 ttrace
= thread__trace(thread
, trace
->output
);
2109 trace__fprintf_sample(trace
, evsel
, sample
, thread
);
2112 thread__update_stats(ttrace
, id
, sample
);
2114 ret
= perf_evsel__sc_tp_uint(evsel
, ret
, sample
);
2116 if (!trace
->fd_path_disabled
&& sc
->is_open
&& ret
>= 0 && ttrace
->filename
.pending_open
) {
2117 trace__set_fd_pathname(thread
, ret
, ttrace
->filename
.name
);
2118 ttrace
->filename
.pending_open
= false;
2119 ++trace
->stats
.vfs_getname
;
2122 if (ttrace
->entry_time
) {
2123 duration
= sample
->time
- ttrace
->entry_time
;
2124 if (trace__filter_duration(trace
, duration
))
2126 duration_calculated
= true;
2127 } else if (trace
->duration_filter
)
2130 if (sample
->callchain
) {
2131 callchain_ret
= trace__resolve_callchain(trace
, evsel
, sample
, &callchain_cursor
);
2132 if (callchain_ret
== 0) {
2133 if (callchain_cursor
.nr
< trace
->min_stack
)
2139 if (trace
->summary_only
|| (ret
>= 0 && trace
->failure_only
))
2142 trace__fprintf_entry_head(trace
, thread
, duration
, duration_calculated
, ttrace
->entry_time
, trace
->output
);
2144 if (ttrace
->entry_pending
) {
2145 printed
= fprintf(trace
->output
, "%s", ttrace
->entry_str
);
2147 printed
+= fprintf(trace
->output
, " ... [");
2148 color_fprintf(trace
->output
, PERF_COLOR_YELLOW
, "continued");
2150 printed
+= fprintf(trace
->output
, "]: %s()", sc
->name
);
2153 printed
++; /* the closing ')' */
2155 if (alignment
> printed
)
2156 alignment
-= printed
;
2160 fprintf(trace
->output
, ")%*s= ", alignment
, " ");
2162 if (sc
->fmt
== NULL
) {
2166 fprintf(trace
->output
, "%ld", ret
);
2167 } else if (ret
< 0) {
2169 char bf
[STRERR_BUFSIZE
];
2170 const char *emsg
= str_error_r(-ret
, bf
, sizeof(bf
)),
2171 *e
= errno_to_name(evsel
, -ret
);
2173 fprintf(trace
->output
, "-1 %s (%s)", e
, emsg
);
2175 } else if (ret
== 0 && sc
->fmt
->timeout
)
2176 fprintf(trace
->output
, "0 (Timeout)");
2177 else if (ttrace
->ret_scnprintf
) {
2179 struct syscall_arg arg
= {
2184 ttrace
->ret_scnprintf(bf
, sizeof(bf
), &arg
);
2185 ttrace
->ret_scnprintf
= NULL
;
2186 fprintf(trace
->output
, "%s", bf
);
2187 } else if (sc
->fmt
->hexret
)
2188 fprintf(trace
->output
, "%#lx", ret
);
2189 else if (sc
->fmt
->errpid
) {
2190 struct thread
*child
= machine__find_thread(trace
->host
, ret
, ret
);
2192 if (child
!= NULL
) {
2193 fprintf(trace
->output
, "%ld", ret
);
2194 if (child
->comm_set
)
2195 fprintf(trace
->output
, " (%s)", thread__comm_str(child
));
2201 fputc('\n', trace
->output
);
2204 * We only consider an 'event' for the sake of --max-events a non-filtered
2205 * sys_enter + sys_exit and other tracepoint events.
2207 if (++trace
->nr_events_printed
== trace
->max_events
&& trace
->max_events
!= ULONG_MAX
)
2210 if (callchain_ret
> 0)
2211 trace__fprintf_callchain(trace
, sample
);
2212 else if (callchain_ret
< 0)
2213 pr_err("Problem processing %s callchain, skipping...\n", perf_evsel__name(evsel
));
2215 ttrace
->entry_pending
= false;
2218 thread__put(thread
);
2222 static int trace__vfs_getname(struct trace
*trace
, struct evsel
*evsel
,
2223 union perf_event
*event __maybe_unused
,
2224 struct perf_sample
*sample
)
2226 struct thread
*thread
= machine__findnew_thread(trace
->host
, sample
->pid
, sample
->tid
);
2227 struct thread_trace
*ttrace
;
2228 size_t filename_len
, entry_str_len
, to_move
;
2229 ssize_t remaining_space
;
2231 const char *filename
= perf_evsel__rawptr(evsel
, sample
, "pathname");
2236 ttrace
= thread__priv(thread
);
2240 filename_len
= strlen(filename
);
2241 if (filename_len
== 0)
2244 if (ttrace
->filename
.namelen
< filename_len
) {
2245 char *f
= realloc(ttrace
->filename
.name
, filename_len
+ 1);
2250 ttrace
->filename
.namelen
= filename_len
;
2251 ttrace
->filename
.name
= f
;
2254 strcpy(ttrace
->filename
.name
, filename
);
2255 ttrace
->filename
.pending_open
= true;
2257 if (!ttrace
->filename
.ptr
)
2260 entry_str_len
= strlen(ttrace
->entry_str
);
2261 remaining_space
= trace__entry_str_size
- entry_str_len
- 1; /* \0 */
2262 if (remaining_space
<= 0)
2265 if (filename_len
> (size_t)remaining_space
) {
2266 filename
+= filename_len
- remaining_space
;
2267 filename_len
= remaining_space
;
2270 to_move
= entry_str_len
- ttrace
->filename
.entry_str_pos
+ 1; /* \0 */
2271 pos
= ttrace
->entry_str
+ ttrace
->filename
.entry_str_pos
;
2272 memmove(pos
+ filename_len
, pos
, to_move
);
2273 memcpy(pos
, filename
, filename_len
);
2275 ttrace
->filename
.ptr
= 0;
2276 ttrace
->filename
.entry_str_pos
= 0;
2278 thread__put(thread
);
2283 static int trace__sched_stat_runtime(struct trace
*trace
, struct evsel
*evsel
,
2284 union perf_event
*event __maybe_unused
,
2285 struct perf_sample
*sample
)
2287 u64 runtime
= perf_evsel__intval(evsel
, sample
, "runtime");
2288 double runtime_ms
= (double)runtime
/ NSEC_PER_MSEC
;
2289 struct thread
*thread
= machine__findnew_thread(trace
->host
,
2292 struct thread_trace
*ttrace
= thread__trace(thread
, trace
->output
);
2297 ttrace
->runtime_ms
+= runtime_ms
;
2298 trace
->runtime_ms
+= runtime_ms
;
2300 thread__put(thread
);
2304 fprintf(trace
->output
, "%s: comm=%s,pid=%u,runtime=%" PRIu64
",vruntime=%" PRIu64
")\n",
2306 perf_evsel__strval(evsel
, sample
, "comm"),
2307 (pid_t
)perf_evsel__intval(evsel
, sample
, "pid"),
2309 perf_evsel__intval(evsel
, sample
, "vruntime"));
2313 static int bpf_output__printer(enum binary_printer_ops op
,
2314 unsigned int val
, void *extra __maybe_unused
, FILE *fp
)
2316 unsigned char ch
= (unsigned char)val
;
2319 case BINARY_PRINT_CHAR_DATA
:
2320 return fprintf(fp
, "%c", isprint(ch
) ? ch
: '.');
2321 case BINARY_PRINT_DATA_BEGIN
:
2322 case BINARY_PRINT_LINE_BEGIN
:
2323 case BINARY_PRINT_ADDR
:
2324 case BINARY_PRINT_NUM_DATA
:
2325 case BINARY_PRINT_NUM_PAD
:
2326 case BINARY_PRINT_SEP
:
2327 case BINARY_PRINT_CHAR_PAD
:
2328 case BINARY_PRINT_LINE_END
:
2329 case BINARY_PRINT_DATA_END
:
2337 static void bpf_output__fprintf(struct trace
*trace
,
2338 struct perf_sample
*sample
)
2340 binary__fprintf(sample
->raw_data
, sample
->raw_size
, 8,
2341 bpf_output__printer
, NULL
, trace
->output
);
2342 ++trace
->nr_events_printed
;
2345 static int trace__event_handler(struct trace
*trace
, struct evsel
*evsel
,
2346 union perf_event
*event __maybe_unused
,
2347 struct perf_sample
*sample
)
2349 struct thread
*thread
;
2350 int callchain_ret
= 0;
2352 * Check if we called perf_evsel__disable(evsel) due to, for instance,
2353 * this event's max_events having been hit and this is an entry coming
2354 * from the ring buffer that we should discard, since the max events
2355 * have already been considered/printed.
2357 if (evsel
->disabled
)
2360 thread
= machine__findnew_thread(trace
->host
, sample
->pid
, sample
->tid
);
2362 if (sample
->callchain
) {
2363 callchain_ret
= trace__resolve_callchain(trace
, evsel
, sample
, &callchain_cursor
);
2364 if (callchain_ret
== 0) {
2365 if (callchain_cursor
.nr
< trace
->min_stack
)
2371 trace__printf_interrupted_entry(trace
);
2372 trace__fprintf_tstamp(trace
, sample
->time
, trace
->output
);
2374 if (trace
->trace_syscalls
&& trace
->show_duration
)
2375 fprintf(trace
->output
, "( ): ");
2378 trace__fprintf_comm_tid(trace
, thread
, trace
->output
);
2380 if (evsel
== trace
->syscalls
.events
.augmented
) {
2381 int id
= perf_evsel__sc_tp_uint(evsel
, id
, sample
);
2382 struct syscall
*sc
= trace__syscall_info(trace
, evsel
, id
);
2385 fprintf(trace
->output
, "%s(", sc
->name
);
2386 trace__fprintf_sys_enter(trace
, evsel
, sample
);
2387 fputc(')', trace
->output
);
2392 * XXX: Not having the associated syscall info or not finding/adding
2393 * the thread should never happen, but if it does...
2394 * fall thru and print it as a bpf_output event.
2398 fprintf(trace
->output
, "%s:", evsel
->name
);
2400 if (perf_evsel__is_bpf_output(evsel
)) {
2401 bpf_output__fprintf(trace
, sample
);
2402 } else if (evsel
->tp_format
) {
2403 if (strncmp(evsel
->tp_format
->name
, "sys_enter_", 10) ||
2404 trace__fprintf_sys_enter(trace
, evsel
, sample
)) {
2405 event_format__fprintf(evsel
->tp_format
, sample
->cpu
,
2406 sample
->raw_data
, sample
->raw_size
,
2408 ++trace
->nr_events_printed
;
2410 if (evsel
->max_events
!= ULONG_MAX
&& ++evsel
->nr_events_printed
== evsel
->max_events
) {
2411 evsel__disable(evsel
);
2412 evsel__close(evsel
);
2418 fprintf(trace
->output
, "\n");
2420 if (callchain_ret
> 0)
2421 trace__fprintf_callchain(trace
, sample
);
2422 else if (callchain_ret
< 0)
2423 pr_err("Problem processing %s callchain, skipping...\n", perf_evsel__name(evsel
));
2425 thread__put(thread
);
2429 static void print_location(FILE *f
, struct perf_sample
*sample
,
2430 struct addr_location
*al
,
2431 bool print_dso
, bool print_sym
)
2434 if ((verbose
> 0 || print_dso
) && al
->map
)
2435 fprintf(f
, "%s@", al
->map
->dso
->long_name
);
2437 if ((verbose
> 0 || print_sym
) && al
->sym
)
2438 fprintf(f
, "%s+0x%" PRIx64
, al
->sym
->name
,
2439 al
->addr
- al
->sym
->start
);
2441 fprintf(f
, "0x%" PRIx64
, al
->addr
);
2443 fprintf(f
, "0x%" PRIx64
, sample
->addr
);
2446 static int trace__pgfault(struct trace
*trace
,
2447 struct evsel
*evsel
,
2448 union perf_event
*event __maybe_unused
,
2449 struct perf_sample
*sample
)
2451 struct thread
*thread
;
2452 struct addr_location al
;
2453 char map_type
= 'd';
2454 struct thread_trace
*ttrace
;
2456 int callchain_ret
= 0;
2458 thread
= machine__findnew_thread(trace
->host
, sample
->pid
, sample
->tid
);
2460 if (sample
->callchain
) {
2461 callchain_ret
= trace__resolve_callchain(trace
, evsel
, sample
, &callchain_cursor
);
2462 if (callchain_ret
== 0) {
2463 if (callchain_cursor
.nr
< trace
->min_stack
)
2469 ttrace
= thread__trace(thread
, trace
->output
);
2473 if (evsel
->core
.attr
.config
== PERF_COUNT_SW_PAGE_FAULTS_MAJ
)
2478 if (trace
->summary_only
)
2481 thread__find_symbol(thread
, sample
->cpumode
, sample
->ip
, &al
);
2483 trace__fprintf_entry_head(trace
, thread
, 0, true, sample
->time
, trace
->output
);
2485 fprintf(trace
->output
, "%sfault [",
2486 evsel
->core
.attr
.config
== PERF_COUNT_SW_PAGE_FAULTS_MAJ
?
2489 print_location(trace
->output
, sample
, &al
, false, true);
2491 fprintf(trace
->output
, "] => ");
2493 thread__find_symbol(thread
, sample
->cpumode
, sample
->addr
, &al
);
2496 thread__find_symbol(thread
, sample
->cpumode
, sample
->addr
, &al
);
2504 print_location(trace
->output
, sample
, &al
, true, false);
2506 fprintf(trace
->output
, " (%c%c)\n", map_type
, al
.level
);
2508 if (callchain_ret
> 0)
2509 trace__fprintf_callchain(trace
, sample
);
2510 else if (callchain_ret
< 0)
2511 pr_err("Problem processing %s callchain, skipping...\n", perf_evsel__name(evsel
));
2513 ++trace
->nr_events_printed
;
2517 thread__put(thread
);
2521 static void trace__set_base_time(struct trace
*trace
,
2522 struct evsel
*evsel
,
2523 struct perf_sample
*sample
)
2526 * BPF events were not setting PERF_SAMPLE_TIME, so be more robust
2527 * and don't use sample->time unconditionally, we may end up having
2528 * some other event in the future without PERF_SAMPLE_TIME for good
2529 * reason, i.e. we may not be interested in its timestamps, just in
2530 * it taking place, picking some piece of information when it
2531 * appears in our event stream (vfs_getname comes to mind).
2533 if (trace
->base_time
== 0 && !trace
->full_time
&&
2534 (evsel
->core
.attr
.sample_type
& PERF_SAMPLE_TIME
))
2535 trace
->base_time
= sample
->time
;
2538 static int trace__process_sample(struct perf_tool
*tool
,
2539 union perf_event
*event
,
2540 struct perf_sample
*sample
,
2541 struct evsel
*evsel
,
2542 struct machine
*machine __maybe_unused
)
2544 struct trace
*trace
= container_of(tool
, struct trace
, tool
);
2545 struct thread
*thread
;
2548 tracepoint_handler handler
= evsel
->handler
;
2550 thread
= machine__findnew_thread(trace
->host
, sample
->pid
, sample
->tid
);
2551 if (thread
&& thread__is_filtered(thread
))
2554 trace__set_base_time(trace
, evsel
, sample
);
2558 handler(trace
, evsel
, event
, sample
);
2561 thread__put(thread
);
2565 static int trace__record(struct trace
*trace
, int argc
, const char **argv
)
2567 unsigned int rec_argc
, i
, j
;
2568 const char **rec_argv
;
2569 const char * const record_args
[] = {
2576 const char * const sc_args
[] = { "-e", };
2577 unsigned int sc_args_nr
= ARRAY_SIZE(sc_args
);
2578 const char * const majpf_args
[] = { "-e", "major-faults" };
2579 unsigned int majpf_args_nr
= ARRAY_SIZE(majpf_args
);
2580 const char * const minpf_args
[] = { "-e", "minor-faults" };
2581 unsigned int minpf_args_nr
= ARRAY_SIZE(minpf_args
);
2583 /* +1 is for the event string below */
2584 rec_argc
= ARRAY_SIZE(record_args
) + sc_args_nr
+ 1 +
2585 majpf_args_nr
+ minpf_args_nr
+ argc
;
2586 rec_argv
= calloc(rec_argc
+ 1, sizeof(char *));
2588 if (rec_argv
== NULL
)
2592 for (i
= 0; i
< ARRAY_SIZE(record_args
); i
++)
2593 rec_argv
[j
++] = record_args
[i
];
2595 if (trace
->trace_syscalls
) {
2596 for (i
= 0; i
< sc_args_nr
; i
++)
2597 rec_argv
[j
++] = sc_args
[i
];
2599 /* event string may be different for older kernels - e.g., RHEL6 */
2600 if (is_valid_tracepoint("raw_syscalls:sys_enter"))
2601 rec_argv
[j
++] = "raw_syscalls:sys_enter,raw_syscalls:sys_exit";
2602 else if (is_valid_tracepoint("syscalls:sys_enter"))
2603 rec_argv
[j
++] = "syscalls:sys_enter,syscalls:sys_exit";
2605 pr_err("Neither raw_syscalls nor syscalls events exist.\n");
2611 if (trace
->trace_pgfaults
& TRACE_PFMAJ
)
2612 for (i
= 0; i
< majpf_args_nr
; i
++)
2613 rec_argv
[j
++] = majpf_args
[i
];
2615 if (trace
->trace_pgfaults
& TRACE_PFMIN
)
2616 for (i
= 0; i
< minpf_args_nr
; i
++)
2617 rec_argv
[j
++] = minpf_args
[i
];
2619 for (i
= 0; i
< (unsigned int)argc
; i
++)
2620 rec_argv
[j
++] = argv
[i
];
2622 return cmd_record(j
, rec_argv
);
2625 static size_t trace__fprintf_thread_summary(struct trace
*trace
, FILE *fp
);
2627 static bool evlist__add_vfs_getname(struct evlist
*evlist
)
2630 struct evsel
*evsel
, *tmp
;
2631 struct parse_events_error err
= { .idx
= 0, };
2632 int ret
= parse_events(evlist
, "probe:vfs_getname*", &err
);
2637 evlist__for_each_entry_safe(evlist
, evsel
, tmp
) {
2638 if (!strstarts(perf_evsel__name(evsel
), "probe:vfs_getname"))
2641 if (perf_evsel__field(evsel
, "pathname")) {
2642 evsel
->handler
= trace__vfs_getname
;
2647 list_del_init(&evsel
->core
.node
);
2648 evsel
->evlist
= NULL
;
2649 evsel__delete(evsel
);
2655 static struct evsel
*perf_evsel__new_pgfault(u64 config
)
2657 struct evsel
*evsel
;
2658 struct perf_event_attr attr
= {
2659 .type
= PERF_TYPE_SOFTWARE
,
2663 attr
.config
= config
;
2664 attr
.sample_period
= 1;
2666 event_attr_init(&attr
);
2668 evsel
= evsel__new(&attr
);
2670 evsel
->handler
= trace__pgfault
;
2675 static void trace__handle_event(struct trace
*trace
, union perf_event
*event
, struct perf_sample
*sample
)
2677 const u32 type
= event
->header
.type
;
2678 struct evsel
*evsel
;
2680 if (type
!= PERF_RECORD_SAMPLE
) {
2681 trace__process_event(trace
, trace
->host
, event
, sample
);
2685 evsel
= perf_evlist__id2evsel(trace
->evlist
, sample
->id
);
2686 if (evsel
== NULL
) {
2687 fprintf(trace
->output
, "Unknown tp ID %" PRIu64
", skipping...\n", sample
->id
);
2691 if (evswitch__discard(&trace
->evswitch
, evsel
))
2694 trace__set_base_time(trace
, evsel
, sample
);
2696 if (evsel
->core
.attr
.type
== PERF_TYPE_TRACEPOINT
&&
2697 sample
->raw_data
== NULL
) {
2698 fprintf(trace
->output
, "%s sample with no payload for tid: %d, cpu %d, raw_size=%d, skipping...\n",
2699 perf_evsel__name(evsel
), sample
->tid
,
2700 sample
->cpu
, sample
->raw_size
);
2702 tracepoint_handler handler
= evsel
->handler
;
2703 handler(trace
, evsel
, event
, sample
);
2706 if (trace
->nr_events_printed
>= trace
->max_events
&& trace
->max_events
!= ULONG_MAX
)
2710 static int trace__add_syscall_newtp(struct trace
*trace
)
2713 struct evlist
*evlist
= trace
->evlist
;
2714 struct evsel
*sys_enter
, *sys_exit
;
2716 sys_enter
= perf_evsel__raw_syscall_newtp("sys_enter", trace__sys_enter
);
2717 if (sys_enter
== NULL
)
2720 if (perf_evsel__init_sc_tp_ptr_field(sys_enter
, args
))
2721 goto out_delete_sys_enter
;
2723 sys_exit
= perf_evsel__raw_syscall_newtp("sys_exit", trace__sys_exit
);
2724 if (sys_exit
== NULL
)
2725 goto out_delete_sys_enter
;
2727 if (perf_evsel__init_sc_tp_uint_field(sys_exit
, ret
))
2728 goto out_delete_sys_exit
;
2730 perf_evsel__config_callchain(sys_enter
, &trace
->opts
, &callchain_param
);
2731 perf_evsel__config_callchain(sys_exit
, &trace
->opts
, &callchain_param
);
2733 evlist__add(evlist
, sys_enter
);
2734 evlist__add(evlist
, sys_exit
);
2736 if (callchain_param
.enabled
&& !trace
->kernel_syscallchains
) {
2738 * We're interested only in the user space callchain
2739 * leading to the syscall, allow overriding that for
2740 * debugging reasons using --kernel_syscall_callchains
2742 sys_exit
->core
.attr
.exclude_callchain_kernel
= 1;
2745 trace
->syscalls
.events
.sys_enter
= sys_enter
;
2746 trace
->syscalls
.events
.sys_exit
= sys_exit
;
2752 out_delete_sys_exit
:
2753 evsel__delete_priv(sys_exit
);
2754 out_delete_sys_enter
:
2755 evsel__delete_priv(sys_enter
);
2759 static int trace__set_ev_qualifier_tp_filter(struct trace
*trace
)
2762 struct evsel
*sys_exit
;
2763 char *filter
= asprintf_expr_inout_ints("id", !trace
->not_ev_qualifier
,
2764 trace
->ev_qualifier_ids
.nr
,
2765 trace
->ev_qualifier_ids
.entries
);
2770 if (!perf_evsel__append_tp_filter(trace
->syscalls
.events
.sys_enter
,
2772 sys_exit
= trace
->syscalls
.events
.sys_exit
;
2773 err
= perf_evsel__append_tp_filter(sys_exit
, filter
);
2784 #ifdef HAVE_LIBBPF_SUPPORT
2785 static struct bpf_program
*trace__find_bpf_program_by_title(struct trace
*trace
, const char *name
)
2787 if (trace
->bpf_obj
== NULL
)
2790 return bpf_object__find_program_by_title(trace
->bpf_obj
, name
);
2793 static struct bpf_program
*trace__find_syscall_bpf_prog(struct trace
*trace
, struct syscall
*sc
,
2794 const char *prog_name
, const char *type
)
2796 struct bpf_program
*prog
;
2798 if (prog_name
== NULL
) {
2799 char default_prog_name
[256];
2800 scnprintf(default_prog_name
, sizeof(default_prog_name
), "!syscalls:sys_%s_%s", type
, sc
->name
);
2801 prog
= trace__find_bpf_program_by_title(trace
, default_prog_name
);
2804 if (sc
->fmt
&& sc
->fmt
->alias
) {
2805 scnprintf(default_prog_name
, sizeof(default_prog_name
), "!syscalls:sys_%s_%s", type
, sc
->fmt
->alias
);
2806 prog
= trace__find_bpf_program_by_title(trace
, default_prog_name
);
2810 goto out_unaugmented
;
2813 prog
= trace__find_bpf_program_by_title(trace
, prog_name
);
2820 pr_debug("Couldn't find BPF prog \"%s\" to associate with syscalls:sys_%s_%s, not augmenting it\n",
2821 prog_name
, type
, sc
->name
);
2823 return trace
->syscalls
.unaugmented_prog
;
2826 static void trace__init_syscall_bpf_progs(struct trace
*trace
, int id
)
2828 struct syscall
*sc
= trace__syscall_info(trace
, NULL
, id
);
2833 sc
->bpf_prog
.sys_enter
= trace__find_syscall_bpf_prog(trace
, sc
, sc
->fmt
? sc
->fmt
->bpf_prog_name
.sys_enter
: NULL
, "enter");
2834 sc
->bpf_prog
.sys_exit
= trace__find_syscall_bpf_prog(trace
, sc
, sc
->fmt
? sc
->fmt
->bpf_prog_name
.sys_exit
: NULL
, "exit");
2837 static int trace__bpf_prog_sys_enter_fd(struct trace
*trace
, int id
)
2839 struct syscall
*sc
= trace__syscall_info(trace
, NULL
, id
);
2840 return sc
? bpf_program__fd(sc
->bpf_prog
.sys_enter
) : bpf_program__fd(trace
->syscalls
.unaugmented_prog
);
2843 static int trace__bpf_prog_sys_exit_fd(struct trace
*trace
, int id
)
2845 struct syscall
*sc
= trace__syscall_info(trace
, NULL
, id
);
2846 return sc
? bpf_program__fd(sc
->bpf_prog
.sys_exit
) : bpf_program__fd(trace
->syscalls
.unaugmented_prog
);
2849 static void trace__init_bpf_map_syscall_args(struct trace
*trace
, int id
, struct bpf_map_syscall_entry
*entry
)
2851 struct syscall
*sc
= trace__syscall_info(trace
, NULL
, id
);
2857 for (; arg
< sc
->nr_args
; ++arg
) {
2858 entry
->string_args_len
[arg
] = 0;
2859 if (sc
->arg_fmt
[arg
].scnprintf
== SCA_FILENAME
) {
2860 /* Should be set like strace -s strsize */
2861 entry
->string_args_len
[arg
] = PATH_MAX
;
2865 for (; arg
< 6; ++arg
)
2866 entry
->string_args_len
[arg
] = 0;
2868 static int trace__set_ev_qualifier_bpf_filter(struct trace
*trace
)
2870 int fd
= bpf_map__fd(trace
->syscalls
.map
);
2871 struct bpf_map_syscall_entry value
= {
2872 .enabled
= !trace
->not_ev_qualifier
,
2877 for (i
= 0; i
< trace
->ev_qualifier_ids
.nr
; ++i
) {
2878 int key
= trace
->ev_qualifier_ids
.entries
[i
];
2880 if (value
.enabled
) {
2881 trace__init_bpf_map_syscall_args(trace
, key
, &value
);
2882 trace__init_syscall_bpf_progs(trace
, key
);
2885 err
= bpf_map_update_elem(fd
, &key
, &value
, BPF_EXIST
);
2893 static int __trace__init_syscalls_bpf_map(struct trace
*trace
, bool enabled
)
2895 int fd
= bpf_map__fd(trace
->syscalls
.map
);
2896 struct bpf_map_syscall_entry value
= {
2901 for (key
= 0; key
< trace
->sctbl
->syscalls
.nr_entries
; ++key
) {
2903 trace__init_bpf_map_syscall_args(trace
, key
, &value
);
2905 err
= bpf_map_update_elem(fd
, &key
, &value
, BPF_ANY
);
2913 static int trace__init_syscalls_bpf_map(struct trace
*trace
)
2915 bool enabled
= true;
2917 if (trace
->ev_qualifier_ids
.nr
)
2918 enabled
= trace
->not_ev_qualifier
;
2920 return __trace__init_syscalls_bpf_map(trace
, enabled
);
2923 static struct bpf_program
*trace__find_usable_bpf_prog_entry(struct trace
*trace
, struct syscall
*sc
)
2925 struct tep_format_field
*field
, *candidate_field
;
2929 * We're only interested in syscalls that have a pointer:
2931 for (field
= sc
->args
; field
; field
= field
->next
) {
2932 if (field
->flags
& TEP_FIELD_IS_POINTER
)
2933 goto try_to_find_pair
;
2939 for (id
= 0; id
< trace
->sctbl
->syscalls
.nr_entries
; ++id
) {
2940 struct syscall
*pair
= trace__syscall_info(trace
, NULL
, id
);
2941 struct bpf_program
*pair_prog
;
2942 bool is_candidate
= false;
2944 if (pair
== NULL
|| pair
== sc
||
2945 pair
->bpf_prog
.sys_enter
== trace
->syscalls
.unaugmented_prog
)
2948 for (field
= sc
->args
, candidate_field
= pair
->args
;
2949 field
&& candidate_field
; field
= field
->next
, candidate_field
= candidate_field
->next
) {
2950 bool is_pointer
= field
->flags
& TEP_FIELD_IS_POINTER
,
2951 candidate_is_pointer
= candidate_field
->flags
& TEP_FIELD_IS_POINTER
;
2954 if (!candidate_is_pointer
) {
2955 // The candidate just doesn't copies our pointer arg, might copy other pointers we want.
2959 if (candidate_is_pointer
) {
2960 // The candidate might copy a pointer we don't have, skip it.
2961 goto next_candidate
;
2966 if (strcmp(field
->type
, candidate_field
->type
))
2967 goto next_candidate
;
2969 is_candidate
= true;
2973 goto next_candidate
;
2976 * Check if the tentative pair syscall augmenter has more pointers, if it has,
2977 * then it may be collecting that and we then can't use it, as it would collect
2978 * more than what is common to the two syscalls.
2980 if (candidate_field
) {
2981 for (candidate_field
= candidate_field
->next
; candidate_field
; candidate_field
= candidate_field
->next
)
2982 if (candidate_field
->flags
& TEP_FIELD_IS_POINTER
)
2983 goto next_candidate
;
2986 pair_prog
= pair
->bpf_prog
.sys_enter
;
2988 * If the pair isn't enabled, then its bpf_prog.sys_enter will not
2989 * have been searched for, so search it here and if it returns the
2990 * unaugmented one, then ignore it, otherwise we'll reuse that BPF
2991 * program for a filtered syscall on a non-filtered one.
2993 * For instance, we have "!syscalls:sys_enter_renameat" and that is
2994 * useful for "renameat2".
2996 if (pair_prog
== NULL
) {
2997 pair_prog
= trace__find_syscall_bpf_prog(trace
, pair
, pair
->fmt
? pair
->fmt
->bpf_prog_name
.sys_enter
: NULL
, "enter");
2998 if (pair_prog
== trace
->syscalls
.unaugmented_prog
)
2999 goto next_candidate
;
3002 pr_debug("Reusing \"%s\" BPF sys_enter augmenter for \"%s\"\n", pair
->name
, sc
->name
);
3011 static int trace__init_syscalls_bpf_prog_array_maps(struct trace
*trace
)
3013 int map_enter_fd
= bpf_map__fd(trace
->syscalls
.prog_array
.sys_enter
),
3014 map_exit_fd
= bpf_map__fd(trace
->syscalls
.prog_array
.sys_exit
);
3017 for (key
= 0; key
< trace
->sctbl
->syscalls
.nr_entries
; ++key
) {
3020 if (!trace__syscall_enabled(trace
, key
))
3023 trace__init_syscall_bpf_progs(trace
, key
);
3025 // It'll get at least the "!raw_syscalls:unaugmented"
3026 prog_fd
= trace__bpf_prog_sys_enter_fd(trace
, key
);
3027 err
= bpf_map_update_elem(map_enter_fd
, &key
, &prog_fd
, BPF_ANY
);
3030 prog_fd
= trace__bpf_prog_sys_exit_fd(trace
, key
);
3031 err
= bpf_map_update_elem(map_exit_fd
, &key
, &prog_fd
, BPF_ANY
);
3037 * Now lets do a second pass looking for enabled syscalls without
3038 * an augmenter that have a signature that is a superset of another
3039 * syscall with an augmenter so that we can auto-reuse it.
3041 * I.e. if we have an augmenter for the "open" syscall that has
3044 * int open(const char *pathname, int flags, mode_t mode);
3046 * I.e. that will collect just the first string argument, then we
3047 * can reuse it for the 'creat' syscall, that has this signature:
3049 * int creat(const char *pathname, mode_t mode);
3053 * int stat(const char *pathname, struct stat *statbuf);
3054 * int lstat(const char *pathname, struct stat *statbuf);
3056 * Because the 'open' augmenter will collect the first arg as a string,
3057 * and leave alone all the other args, which already helps with
3058 * beautifying 'stat' and 'lstat''s pathname arg.
3060 * Then, in time, when 'stat' gets an augmenter that collects both
3061 * first and second arg (this one on the raw_syscalls:sys_exit prog
3062 * array tail call, then that one will be used.
3064 for (key
= 0; key
< trace
->sctbl
->syscalls
.nr_entries
; ++key
) {
3065 struct syscall
*sc
= trace__syscall_info(trace
, NULL
, key
);
3066 struct bpf_program
*pair_prog
;
3069 if (sc
== NULL
|| sc
->bpf_prog
.sys_enter
== NULL
)
3073 * For now we're just reusing the sys_enter prog, and if it
3074 * already has an augmenter, we don't need to find one.
3076 if (sc
->bpf_prog
.sys_enter
!= trace
->syscalls
.unaugmented_prog
)
3080 * Look at all the other syscalls for one that has a signature
3081 * that is close enough that we can share:
3083 pair_prog
= trace__find_usable_bpf_prog_entry(trace
, sc
);
3084 if (pair_prog
== NULL
)
3087 sc
->bpf_prog
.sys_enter
= pair_prog
;
3090 * Update the BPF_MAP_TYPE_PROG_SHARED for raw_syscalls:sys_enter
3091 * with the fd for the program we're reusing:
3093 prog_fd
= bpf_program__fd(sc
->bpf_prog
.sys_enter
);
3094 err
= bpf_map_update_elem(map_enter_fd
, &key
, &prog_fd
, BPF_ANY
);
3103 static int trace__set_ev_qualifier_bpf_filter(struct trace
*trace __maybe_unused
)
3108 static int trace__init_syscalls_bpf_map(struct trace
*trace __maybe_unused
)
3113 static struct bpf_program
*trace__find_bpf_program_by_title(struct trace
*trace __maybe_unused
,
3114 const char *name __maybe_unused
)
3119 static int trace__init_syscalls_bpf_prog_array_maps(struct trace
*trace __maybe_unused
)
3123 #endif // HAVE_LIBBPF_SUPPORT
3125 static int trace__set_ev_qualifier_filter(struct trace
*trace
)
3127 if (trace
->syscalls
.map
)
3128 return trace__set_ev_qualifier_bpf_filter(trace
);
3129 if (trace
->syscalls
.events
.sys_enter
)
3130 return trace__set_ev_qualifier_tp_filter(trace
);
3134 static int bpf_map__set_filter_pids(struct bpf_map
*map __maybe_unused
,
3135 size_t npids __maybe_unused
, pid_t
*pids __maybe_unused
)
3138 #ifdef HAVE_LIBBPF_SUPPORT
3140 int map_fd
= bpf_map__fd(map
);
3143 for (i
= 0; i
< npids
; ++i
) {
3144 err
= bpf_map_update_elem(map_fd
, &pids
[i
], &value
, BPF_ANY
);
3152 static int trace__set_filter_loop_pids(struct trace
*trace
)
3154 unsigned int nr
= 1, err
;
3158 struct thread
*thread
= machine__find_thread(trace
->host
, pids
[0], pids
[0]);
3160 while (thread
&& nr
< ARRAY_SIZE(pids
)) {
3161 struct thread
*parent
= machine__find_thread(trace
->host
, thread
->ppid
, thread
->ppid
);
3166 if (!strcmp(thread__comm_str(parent
), "sshd") ||
3167 strstarts(thread__comm_str(parent
), "gnome-terminal")) {
3168 pids
[nr
++] = parent
->tid
;
3174 err
= perf_evlist__set_tp_filter_pids(trace
->evlist
, nr
, pids
);
3175 if (!err
&& trace
->filter_pids
.map
)
3176 err
= bpf_map__set_filter_pids(trace
->filter_pids
.map
, nr
, pids
);
3181 static int trace__set_filter_pids(struct trace
*trace
)
3185 * Better not use !target__has_task() here because we need to cover the
3186 * case where no threads were specified in the command line, but a
3187 * workload was, and in that case we will fill in the thread_map when
3188 * we fork the workload in perf_evlist__prepare_workload.
3190 if (trace
->filter_pids
.nr
> 0) {
3191 err
= perf_evlist__set_tp_filter_pids(trace
->evlist
, trace
->filter_pids
.nr
,
3192 trace
->filter_pids
.entries
);
3193 if (!err
&& trace
->filter_pids
.map
) {
3194 err
= bpf_map__set_filter_pids(trace
->filter_pids
.map
, trace
->filter_pids
.nr
,
3195 trace
->filter_pids
.entries
);
3197 } else if (perf_thread_map__pid(trace
->evlist
->core
.threads
, 0) == -1) {
3198 err
= trace__set_filter_loop_pids(trace
);
3204 static int __trace__deliver_event(struct trace
*trace
, union perf_event
*event
)
3206 struct evlist
*evlist
= trace
->evlist
;
3207 struct perf_sample sample
;
3210 err
= perf_evlist__parse_sample(evlist
, event
, &sample
);
3212 fprintf(trace
->output
, "Can't parse sample, err = %d, skipping...\n", err
);
3214 trace__handle_event(trace
, event
, &sample
);
3219 static int __trace__flush_events(struct trace
*trace
)
3221 u64 first
= ordered_events__first_time(&trace
->oe
.data
);
3222 u64 flush
= trace
->oe
.last
- NSEC_PER_SEC
;
3224 /* Is there some thing to flush.. */
3225 if (first
&& first
< flush
)
3226 return ordered_events__flush_time(&trace
->oe
.data
, flush
);
3231 static int trace__flush_events(struct trace
*trace
)
3233 return !trace
->sort_events
? 0 : __trace__flush_events(trace
);
3236 static int trace__deliver_event(struct trace
*trace
, union perf_event
*event
)
3240 if (!trace
->sort_events
)
3241 return __trace__deliver_event(trace
, event
);
3243 err
= perf_evlist__parse_sample_timestamp(trace
->evlist
, event
, &trace
->oe
.last
);
3244 if (err
&& err
!= -1)
3247 err
= ordered_events__queue(&trace
->oe
.data
, event
, trace
->oe
.last
, 0);
3251 return trace__flush_events(trace
);
3254 static int ordered_events__deliver_event(struct ordered_events
*oe
,
3255 struct ordered_event
*event
)
3257 struct trace
*trace
= container_of(oe
, struct trace
, oe
.data
);
3259 return __trace__deliver_event(trace
, event
->event
);
3262 static int trace__run(struct trace
*trace
, int argc
, const char **argv
)
3264 struct evlist
*evlist
= trace
->evlist
;
3265 struct evsel
*evsel
, *pgfault_maj
= NULL
, *pgfault_min
= NULL
;
3267 unsigned long before
;
3268 const bool forks
= argc
> 0;
3269 bool draining
= false;
3273 if (!trace
->raw_augmented_syscalls
) {
3274 if (trace
->trace_syscalls
&& trace__add_syscall_newtp(trace
))
3275 goto out_error_raw_syscalls
;
3277 if (trace
->trace_syscalls
)
3278 trace
->vfs_getname
= evlist__add_vfs_getname(evlist
);
3281 if ((trace
->trace_pgfaults
& TRACE_PFMAJ
)) {
3282 pgfault_maj
= perf_evsel__new_pgfault(PERF_COUNT_SW_PAGE_FAULTS_MAJ
);
3283 if (pgfault_maj
== NULL
)
3285 perf_evsel__config_callchain(pgfault_maj
, &trace
->opts
, &callchain_param
);
3286 evlist__add(evlist
, pgfault_maj
);
3289 if ((trace
->trace_pgfaults
& TRACE_PFMIN
)) {
3290 pgfault_min
= perf_evsel__new_pgfault(PERF_COUNT_SW_PAGE_FAULTS_MIN
);
3291 if (pgfault_min
== NULL
)
3293 perf_evsel__config_callchain(pgfault_min
, &trace
->opts
, &callchain_param
);
3294 evlist__add(evlist
, pgfault_min
);
3298 perf_evlist__add_newtp(evlist
, "sched", "sched_stat_runtime",
3299 trace__sched_stat_runtime
))
3300 goto out_error_sched_stat_runtime
;
3303 * If a global cgroup was set, apply it to all the events without an
3304 * explicit cgroup. I.e.:
3306 * trace -G A -e sched:*switch
3308 * Will set all raw_syscalls:sys_{enter,exit}, pgfault, vfs_getname, etc
3309 * _and_ sched:sched_switch to the 'A' cgroup, while:
3311 * trace -e sched:*switch -G A
3313 * will only set the sched:sched_switch event to the 'A' cgroup, all the
3314 * other events (raw_syscalls:sys_{enter,exit}, etc are left "without"
3315 * a cgroup (on the root cgroup, sys wide, etc).
3319 * trace -G A -e sched:*switch -G B
3321 * the syscall ones go to the 'A' cgroup, the sched:sched_switch goes
3322 * to the 'B' cgroup.
3324 * evlist__set_default_cgroup() grabs a reference of the passed cgroup
3325 * only for the evsels still without a cgroup, i.e. evsel->cgroup == NULL.
3328 evlist__set_default_cgroup(trace
->evlist
, trace
->cgroup
);
3330 err
= perf_evlist__create_maps(evlist
, &trace
->opts
.target
);
3332 fprintf(trace
->output
, "Problems parsing the target to trace, check your options!\n");
3333 goto out_delete_evlist
;
3336 err
= trace__symbols_init(trace
, evlist
);
3338 fprintf(trace
->output
, "Problems initializing symbol libraries!\n");
3339 goto out_delete_evlist
;
3342 perf_evlist__config(evlist
, &trace
->opts
, &callchain_param
);
3344 signal(SIGCHLD
, sig_handler
);
3345 signal(SIGINT
, sig_handler
);
3348 err
= perf_evlist__prepare_workload(evlist
, &trace
->opts
.target
,
3351 fprintf(trace
->output
, "Couldn't run the workload!\n");
3352 goto out_delete_evlist
;
3356 err
= evlist__open(evlist
);
3358 goto out_error_open
;
3360 err
= bpf__apply_obj_config();
3362 char errbuf
[BUFSIZ
];
3364 bpf__strerror_apply_obj_config(err
, errbuf
, sizeof(errbuf
));
3365 pr_err("ERROR: Apply config to BPF failed: %s\n",
3367 goto out_error_open
;
3370 err
= trace__set_filter_pids(trace
);
3374 if (trace
->syscalls
.map
)
3375 trace__init_syscalls_bpf_map(trace
);
3377 if (trace
->syscalls
.prog_array
.sys_enter
)
3378 trace__init_syscalls_bpf_prog_array_maps(trace
);
3380 if (trace
->ev_qualifier_ids
.nr
> 0) {
3381 err
= trace__set_ev_qualifier_filter(trace
);
3385 if (trace
->syscalls
.events
.sys_exit
) {
3386 pr_debug("event qualifier tracepoint filter: %s\n",
3387 trace
->syscalls
.events
.sys_exit
->filter
);
3392 * If the "close" syscall is not traced, then we will not have the
3393 * opportunity to, in syscall_arg__scnprintf_close_fd() invalidate the
3394 * fd->pathname table and were ending up showing the last value set by
3395 * syscalls opening a pathname and associating it with a descriptor or
3396 * reading it from /proc/pid/fd/ in cases where that doesn't make
3399 * So just disable this beautifier (SCA_FD, SCA_FDAT) when 'close' is
3402 trace
->fd_path_disabled
= !trace__syscall_enabled(trace
, syscalltbl__id(trace
->sctbl
, "close"));
3404 err
= perf_evlist__apply_filters(evlist
, &evsel
);
3406 goto out_error_apply_filters
;
3408 if (trace
->dump
.map
)
3409 bpf_map__fprintf(trace
->dump
.map
, trace
->output
);
3411 err
= perf_evlist__mmap(evlist
, trace
->opts
.mmap_pages
);
3413 goto out_error_mmap
;
3415 if (!target__none(&trace
->opts
.target
) && !trace
->opts
.initial_delay
)
3416 evlist__enable(evlist
);
3419 perf_evlist__start_workload(evlist
);
3421 if (trace
->opts
.initial_delay
) {
3422 usleep(trace
->opts
.initial_delay
* 1000);
3423 evlist__enable(evlist
);
3426 trace
->multiple_threads
= perf_thread_map__pid(evlist
->core
.threads
, 0) == -1 ||
3427 evlist
->core
.threads
->nr
> 1 ||
3428 perf_evlist__first(evlist
)->core
.attr
.inherit
;
3431 * Now that we already used evsel->core.attr to ask the kernel to setup the
3432 * events, lets reuse evsel->core.attr.sample_max_stack as the limit in
3433 * trace__resolve_callchain(), allowing per-event max-stack settings
3434 * to override an explicitly set --max-stack global setting.
3436 evlist__for_each_entry(evlist
, evsel
) {
3437 if (evsel__has_callchain(evsel
) &&
3438 evsel
->core
.attr
.sample_max_stack
== 0)
3439 evsel
->core
.attr
.sample_max_stack
= trace
->max_stack
;
3442 before
= trace
->nr_events
;
3444 for (i
= 0; i
< evlist
->nr_mmaps
; i
++) {
3445 union perf_event
*event
;
3446 struct perf_mmap
*md
;
3448 md
= &evlist
->mmap
[i
];
3449 if (perf_mmap__read_init(md
) < 0)
3452 while ((event
= perf_mmap__read_event(md
)) != NULL
) {
3455 err
= trace__deliver_event(trace
, event
);
3459 perf_mmap__consume(md
);
3464 if (done
&& !draining
) {
3465 evlist__disable(evlist
);
3469 perf_mmap__read_done(md
);
3472 if (trace
->nr_events
== before
) {
3473 int timeout
= done
? 100 : -1;
3475 if (!draining
&& perf_evlist__poll(evlist
, timeout
) > 0) {
3476 if (perf_evlist__filter_pollfd(evlist
, POLLERR
| POLLHUP
| POLLNVAL
) == 0)
3481 if (trace__flush_events(trace
))
3489 thread__zput(trace
->current
);
3491 evlist__disable(evlist
);
3493 if (trace
->sort_events
)
3494 ordered_events__flush(&trace
->oe
.data
, OE_FLUSH__FINAL
);
3498 trace__fprintf_thread_summary(trace
, trace
->output
);
3500 if (trace
->show_tool_stats
) {
3501 fprintf(trace
->output
, "Stats:\n "
3502 " vfs_getname : %" PRIu64
"\n"
3503 " proc_getname: %" PRIu64
"\n",
3504 trace
->stats
.vfs_getname
,
3505 trace
->stats
.proc_getname
);
3510 trace__symbols__exit(trace
);
3512 evlist__delete(evlist
);
3513 cgroup__put(trace
->cgroup
);
3514 trace
->evlist
= NULL
;
3515 trace
->live
= false;
3518 char errbuf
[BUFSIZ
];
3520 out_error_sched_stat_runtime
:
3521 tracing_path__strerror_open_tp(errno
, errbuf
, sizeof(errbuf
), "sched", "sched_stat_runtime");
3524 out_error_raw_syscalls
:
3525 tracing_path__strerror_open_tp(errno
, errbuf
, sizeof(errbuf
), "raw_syscalls", "sys_(enter|exit)");
3529 perf_evlist__strerror_mmap(evlist
, errno
, errbuf
, sizeof(errbuf
));
3533 perf_evlist__strerror_open(evlist
, errno
, errbuf
, sizeof(errbuf
));
3536 fprintf(trace
->output
, "%s\n", errbuf
);
3537 goto out_delete_evlist
;
3539 out_error_apply_filters
:
3540 fprintf(trace
->output
,
3541 "Failed to set filter \"%s\" on event %s with %d (%s)\n",
3542 evsel
->filter
, perf_evsel__name(evsel
), errno
,
3543 str_error_r(errno
, errbuf
, sizeof(errbuf
)));
3544 goto out_delete_evlist
;
3547 fprintf(trace
->output
, "Not enough memory to run!\n");
3548 goto out_delete_evlist
;
3551 fprintf(trace
->output
, "errno=%d,%s\n", errno
, strerror(errno
));
3552 goto out_delete_evlist
;
3555 static int trace__replay(struct trace
*trace
)
3557 const struct evsel_str_handler handlers
[] = {
3558 { "probe:vfs_getname", trace__vfs_getname
, },
3560 struct perf_data data
= {
3562 .mode
= PERF_DATA_MODE_READ
,
3563 .force
= trace
->force
,
3565 struct perf_session
*session
;
3566 struct evsel
*evsel
;
3569 trace
->tool
.sample
= trace__process_sample
;
3570 trace
->tool
.mmap
= perf_event__process_mmap
;
3571 trace
->tool
.mmap2
= perf_event__process_mmap2
;
3572 trace
->tool
.comm
= perf_event__process_comm
;
3573 trace
->tool
.exit
= perf_event__process_exit
;
3574 trace
->tool
.fork
= perf_event__process_fork
;
3575 trace
->tool
.attr
= perf_event__process_attr
;
3576 trace
->tool
.tracing_data
= perf_event__process_tracing_data
;
3577 trace
->tool
.build_id
= perf_event__process_build_id
;
3578 trace
->tool
.namespaces
= perf_event__process_namespaces
;
3580 trace
->tool
.ordered_events
= true;
3581 trace
->tool
.ordering_requires_timestamps
= true;
3583 /* add tid to output */
3584 trace
->multiple_threads
= true;
3586 session
= perf_session__new(&data
, false, &trace
->tool
);
3587 if (session
== NULL
)
3590 if (trace
->opts
.target
.pid
)
3591 symbol_conf
.pid_list_str
= strdup(trace
->opts
.target
.pid
);
3593 if (trace
->opts
.target
.tid
)
3594 symbol_conf
.tid_list_str
= strdup(trace
->opts
.target
.tid
);
3596 if (symbol__init(&session
->header
.env
) < 0)
3599 trace
->host
= &session
->machines
.host
;
3601 err
= perf_session__set_tracepoints_handlers(session
, handlers
);
3605 evsel
= perf_evlist__find_tracepoint_by_name(session
->evlist
,
3606 "raw_syscalls:sys_enter");
3607 /* older kernels have syscalls tp versus raw_syscalls */
3609 evsel
= perf_evlist__find_tracepoint_by_name(session
->evlist
,
3610 "syscalls:sys_enter");
3613 (perf_evsel__init_raw_syscall_tp(evsel
, trace__sys_enter
) < 0 ||
3614 perf_evsel__init_sc_tp_ptr_field(evsel
, args
))) {
3615 pr_err("Error during initialize raw_syscalls:sys_enter event\n");
3619 evsel
= perf_evlist__find_tracepoint_by_name(session
->evlist
,
3620 "raw_syscalls:sys_exit");
3622 evsel
= perf_evlist__find_tracepoint_by_name(session
->evlist
,
3623 "syscalls:sys_exit");
3625 (perf_evsel__init_raw_syscall_tp(evsel
, trace__sys_exit
) < 0 ||
3626 perf_evsel__init_sc_tp_uint_field(evsel
, ret
))) {
3627 pr_err("Error during initialize raw_syscalls:sys_exit event\n");
3631 evlist__for_each_entry(session
->evlist
, evsel
) {
3632 if (evsel
->core
.attr
.type
== PERF_TYPE_SOFTWARE
&&
3633 (evsel
->core
.attr
.config
== PERF_COUNT_SW_PAGE_FAULTS_MAJ
||
3634 evsel
->core
.attr
.config
== PERF_COUNT_SW_PAGE_FAULTS_MIN
||
3635 evsel
->core
.attr
.config
== PERF_COUNT_SW_PAGE_FAULTS
))
3636 evsel
->handler
= trace__pgfault
;
3641 err
= perf_session__process_events(session
);
3643 pr_err("Failed to process events, error %d", err
);
3645 else if (trace
->summary
)
3646 trace__fprintf_thread_summary(trace
, trace
->output
);
3649 perf_session__delete(session
);
3654 static size_t trace__fprintf_threads_header(FILE *fp
)
3658 printed
= fprintf(fp
, "\n Summary of events:\n\n");
3663 DEFINE_RESORT_RB(syscall_stats
, a
->msecs
> b
->msecs
,
3664 struct stats
*stats
;
3669 struct int_node
*source
= rb_entry(nd
, struct int_node
, rb_node
);
3670 struct stats
*stats
= source
->priv
;
3672 entry
->syscall
= source
->i
;
3673 entry
->stats
= stats
;
3674 entry
->msecs
= stats
? (u64
)stats
->n
* (avg_stats(stats
) / NSEC_PER_MSEC
) : 0;
3677 static size_t thread__dump_stats(struct thread_trace
*ttrace
,
3678 struct trace
*trace
, FILE *fp
)
3683 DECLARE_RESORT_RB_INTLIST(syscall_stats
, ttrace
->syscall_stats
);
3685 if (syscall_stats
== NULL
)
3688 printed
+= fprintf(fp
, "\n");
3690 printed
+= fprintf(fp
, " syscall calls total min avg max stddev\n");
3691 printed
+= fprintf(fp
, " (msec) (msec) (msec) (msec) (%%)\n");
3692 printed
+= fprintf(fp
, " --------------- -------- --------- --------- --------- --------- ------\n");
3694 resort_rb__for_each_entry(nd
, syscall_stats
) {
3695 struct stats
*stats
= syscall_stats_entry
->stats
;
3697 double min
= (double)(stats
->min
) / NSEC_PER_MSEC
;
3698 double max
= (double)(stats
->max
) / NSEC_PER_MSEC
;
3699 double avg
= avg_stats(stats
);
3701 u64 n
= (u64
) stats
->n
;
3703 pct
= avg
? 100.0 * stddev_stats(stats
)/avg
: 0.0;
3704 avg
/= NSEC_PER_MSEC
;
3706 sc
= &trace
->syscalls
.table
[syscall_stats_entry
->syscall
];
3707 printed
+= fprintf(fp
, " %-15s", sc
->name
);
3708 printed
+= fprintf(fp
, " %8" PRIu64
" %9.3f %9.3f %9.3f",
3709 n
, syscall_stats_entry
->msecs
, min
, avg
);
3710 printed
+= fprintf(fp
, " %9.3f %9.2f%%\n", max
, pct
);
3714 resort_rb__delete(syscall_stats
);
3715 printed
+= fprintf(fp
, "\n\n");
3720 static size_t trace__fprintf_thread(FILE *fp
, struct thread
*thread
, struct trace
*trace
)
3723 struct thread_trace
*ttrace
= thread__priv(thread
);
3729 ratio
= (double)ttrace
->nr_events
/ trace
->nr_events
* 100.0;
3731 printed
+= fprintf(fp
, " %s (%d), ", thread__comm_str(thread
), thread
->tid
);
3732 printed
+= fprintf(fp
, "%lu events, ", ttrace
->nr_events
);
3733 printed
+= fprintf(fp
, "%.1f%%", ratio
);
3735 printed
+= fprintf(fp
, ", %lu majfaults", ttrace
->pfmaj
);
3737 printed
+= fprintf(fp
, ", %lu minfaults", ttrace
->pfmin
);
3739 printed
+= fprintf(fp
, ", %.3f msec\n", ttrace
->runtime_ms
);
3740 else if (fputc('\n', fp
) != EOF
)
3743 printed
+= thread__dump_stats(ttrace
, trace
, fp
);
3748 static unsigned long thread__nr_events(struct thread_trace
*ttrace
)
3750 return ttrace
? ttrace
->nr_events
: 0;
3753 DEFINE_RESORT_RB(threads
, (thread__nr_events(a
->thread
->priv
) < thread__nr_events(b
->thread
->priv
)),
3754 struct thread
*thread
;
3757 entry
->thread
= rb_entry(nd
, struct thread
, rb_node
);
3760 static size_t trace__fprintf_thread_summary(struct trace
*trace
, FILE *fp
)
3762 size_t printed
= trace__fprintf_threads_header(fp
);
3766 for (i
= 0; i
< THREADS__TABLE_SIZE
; i
++) {
3767 DECLARE_RESORT_RB_MACHINE_THREADS(threads
, trace
->host
, i
);
3769 if (threads
== NULL
) {
3770 fprintf(fp
, "%s", "Error sorting output by nr_events!\n");
3774 resort_rb__for_each_entry(nd
, threads
)
3775 printed
+= trace__fprintf_thread(fp
, threads_entry
->thread
, trace
);
3777 resort_rb__delete(threads
);
3782 static int trace__set_duration(const struct option
*opt
, const char *str
,
3783 int unset __maybe_unused
)
3785 struct trace
*trace
= opt
->value
;
3787 trace
->duration_filter
= atof(str
);
3791 static int trace__set_filter_pids_from_option(const struct option
*opt
, const char *str
,
3792 int unset __maybe_unused
)
3796 struct trace
*trace
= opt
->value
;
3798 * FIXME: introduce a intarray class, plain parse csv and create a
3799 * { int nr, int entries[] } struct...
3801 struct intlist
*list
= intlist__new(str
);
3806 i
= trace
->filter_pids
.nr
= intlist__nr_entries(list
) + 1;
3807 trace
->filter_pids
.entries
= calloc(i
, sizeof(pid_t
));
3809 if (trace
->filter_pids
.entries
== NULL
)
3812 trace
->filter_pids
.entries
[0] = getpid();
3814 for (i
= 1; i
< trace
->filter_pids
.nr
; ++i
)
3815 trace
->filter_pids
.entries
[i
] = intlist__entry(list
, i
- 1)->i
;
3817 intlist__delete(list
);
3823 static int trace__open_output(struct trace
*trace
, const char *filename
)
3827 if (!stat(filename
, &st
) && st
.st_size
) {
3828 char oldname
[PATH_MAX
];
3830 scnprintf(oldname
, sizeof(oldname
), "%s.old", filename
);
3832 rename(filename
, oldname
);
3835 trace
->output
= fopen(filename
, "w");
3837 return trace
->output
== NULL
? -errno
: 0;
3840 static int parse_pagefaults(const struct option
*opt
, const char *str
,
3841 int unset __maybe_unused
)
3843 int *trace_pgfaults
= opt
->value
;
3845 if (strcmp(str
, "all") == 0)
3846 *trace_pgfaults
|= TRACE_PFMAJ
| TRACE_PFMIN
;
3847 else if (strcmp(str
, "maj") == 0)
3848 *trace_pgfaults
|= TRACE_PFMAJ
;
3849 else if (strcmp(str
, "min") == 0)
3850 *trace_pgfaults
|= TRACE_PFMIN
;
3857 static void evlist__set_evsel_handler(struct evlist
*evlist
, void *handler
)
3859 struct evsel
*evsel
;
3861 evlist__for_each_entry(evlist
, evsel
)
3862 evsel
->handler
= handler
;
3865 static int evlist__set_syscall_tp_fields(struct evlist
*evlist
)
3867 struct evsel
*evsel
;
3869 evlist__for_each_entry(evlist
, evsel
) {
3870 if (evsel
->priv
|| !evsel
->tp_format
)
3873 if (strcmp(evsel
->tp_format
->system
, "syscalls"))
3876 if (perf_evsel__init_syscall_tp(evsel
))
3879 if (!strncmp(evsel
->tp_format
->name
, "sys_enter_", 10)) {
3880 struct syscall_tp
*sc
= evsel
->priv
;
3882 if (__tp_field__init_ptr(&sc
->args
, sc
->id
.offset
+ sizeof(u64
)))
3884 } else if (!strncmp(evsel
->tp_format
->name
, "sys_exit_", 9)) {
3885 struct syscall_tp
*sc
= evsel
->priv
;
3887 if (__tp_field__init_uint(&sc
->ret
, sizeof(u64
), sc
->id
.offset
+ sizeof(u64
), evsel
->needs_swap
))
3896 * XXX: Hackish, just splitting the combined -e+--event (syscalls
3897 * (raw_syscalls:{sys_{enter,exit}} + events (tracepoints, HW, SW, etc) to use
3898 * existing facilities unchanged (trace->ev_qualifier + parse_options()).
3900 * It'd be better to introduce a parse_options() variant that would return a
3901 * list with the terms it didn't match to an event...
3903 static int trace__parse_events_option(const struct option
*opt
, const char *str
,
3904 int unset __maybe_unused
)
3906 struct trace
*trace
= (struct trace
*)opt
->value
;
3907 const char *s
= str
;
3908 char *sep
= NULL
, *lists
[2] = { NULL
, NULL
, };
3909 int len
= strlen(str
) + 1, err
= -1, list
, idx
;
3910 char *strace_groups_dir
= system_path(STRACE_GROUPS_DIR
);
3911 char group_name
[PATH_MAX
];
3912 struct syscall_fmt
*fmt
;
3914 if (strace_groups_dir
== NULL
)
3919 trace
->not_ev_qualifier
= true;
3923 if ((sep
= strchr(s
, ',')) != NULL
)
3927 if (syscalltbl__id(trace
->sctbl
, s
) >= 0 ||
3928 syscalltbl__strglobmatch_first(trace
->sctbl
, s
, &idx
) >= 0) {
3933 fmt
= syscall_fmt__find_by_alias(s
);
3938 path__join(group_name
, sizeof(group_name
), strace_groups_dir
, s
);
3939 if (access(group_name
, R_OK
) == 0)
3944 sprintf(lists
[list
] + strlen(lists
[list
]), ",%s", s
);
3946 lists
[list
] = malloc(len
);
3947 if (lists
[list
] == NULL
)
3949 strcpy(lists
[list
], s
);
3959 if (lists
[1] != NULL
) {
3960 struct strlist_config slist_config
= {
3961 .dirname
= strace_groups_dir
,
3964 trace
->ev_qualifier
= strlist__new(lists
[1], &slist_config
);
3965 if (trace
->ev_qualifier
== NULL
) {
3966 fputs("Not enough memory to parse event qualifier", trace
->output
);
3970 if (trace__validate_ev_qualifier(trace
))
3972 trace
->trace_syscalls
= true;
3978 struct option o
= OPT_CALLBACK('e', "event", &trace
->evlist
, "event",
3979 "event selector. use 'perf list' to list available events",
3980 parse_events_option
);
3981 err
= parse_events_option(&o
, lists
[0], 0);
3990 static int trace__parse_cgroups(const struct option
*opt
, const char *str
, int unset
)
3992 struct trace
*trace
= opt
->value
;
3994 if (!list_empty(&trace
->evlist
->core
.entries
))
3995 return parse_cgroups(opt
, str
, unset
);
3997 trace
->cgroup
= evlist__findnew_cgroup(trace
->evlist
, str
);
4002 static struct bpf_map
*trace__find_bpf_map_by_name(struct trace
*trace
, const char *name
)
4004 if (trace
->bpf_obj
== NULL
)
4007 return bpf_object__find_map_by_name(trace
->bpf_obj
, name
);
4010 static void trace__set_bpf_map_filtered_pids(struct trace
*trace
)
4012 trace
->filter_pids
.map
= trace__find_bpf_map_by_name(trace
, "pids_filtered");
4015 static void trace__set_bpf_map_syscalls(struct trace
*trace
)
4017 trace
->syscalls
.map
= trace__find_bpf_map_by_name(trace
, "syscalls");
4018 trace
->syscalls
.prog_array
.sys_enter
= trace__find_bpf_map_by_name(trace
, "syscalls_sys_enter");
4019 trace
->syscalls
.prog_array
.sys_exit
= trace__find_bpf_map_by_name(trace
, "syscalls_sys_exit");
4022 static int trace__config(const char *var
, const char *value
, void *arg
)
4024 struct trace
*trace
= arg
;
4027 if (!strcmp(var
, "trace.add_events")) {
4028 struct option o
= OPT_CALLBACK('e', "event", &trace
->evlist
, "event",
4029 "event selector. use 'perf list' to list available events",
4030 parse_events_option
);
4032 * We can't propagate parse_event_option() return, as it is 1
4033 * for failure while perf_config() expects -1.
4035 if (parse_events_option(&o
, value
, 0))
4037 } else if (!strcmp(var
, "trace.show_timestamp")) {
4038 trace
->show_tstamp
= perf_config_bool(var
, value
);
4039 } else if (!strcmp(var
, "trace.show_duration")) {
4040 trace
->show_duration
= perf_config_bool(var
, value
);
4041 } else if (!strcmp(var
, "trace.show_arg_names")) {
4042 trace
->show_arg_names
= perf_config_bool(var
, value
);
4043 if (!trace
->show_arg_names
)
4044 trace
->show_zeros
= true;
4045 } else if (!strcmp(var
, "trace.show_zeros")) {
4046 bool new_show_zeros
= perf_config_bool(var
, value
);
4047 if (!trace
->show_arg_names
&& !new_show_zeros
) {
4048 pr_warning("trace.show_zeros has to be set when trace.show_arg_names=no\n");
4051 trace
->show_zeros
= new_show_zeros
;
4052 } else if (!strcmp(var
, "trace.show_prefix")) {
4053 trace
->show_string_prefix
= perf_config_bool(var
, value
);
4054 } else if (!strcmp(var
, "trace.no_inherit")) {
4055 trace
->opts
.no_inherit
= perf_config_bool(var
, value
);
4056 } else if (!strcmp(var
, "trace.args_alignment")) {
4057 int args_alignment
= 0;
4058 if (perf_config_int(&args_alignment
, var
, value
) == 0)
4059 trace
->args_alignment
= args_alignment
;
4065 int cmd_trace(int argc
, const char **argv
)
4067 const char *trace_usage
[] = {
4068 "perf trace [<options>] [<command>]",
4069 "perf trace [<options>] -- <command> [<options>]",
4070 "perf trace record [<options>] [<command>]",
4071 "perf trace record [<options>] -- <command> [<options>]",
4074 struct trace trace
= {
4080 .user_freq
= UINT_MAX
,
4081 .user_interval
= ULLONG_MAX
,
4082 .no_buffering
= true,
4083 .mmap_pages
= UINT_MAX
,
4087 .show_tstamp
= true,
4088 .show_duration
= true,
4089 .show_arg_names
= true,
4090 .args_alignment
= 70,
4091 .trace_syscalls
= false,
4092 .kernel_syscallchains
= false,
4093 .max_stack
= UINT_MAX
,
4094 .max_events
= ULONG_MAX
,
4096 const char *map_dump_str
= NULL
;
4097 const char *output_name
= NULL
;
4098 const struct option trace_options
[] = {
4099 OPT_CALLBACK('e', "event", &trace
, "event",
4100 "event/syscall selector. use 'perf list' to list available events",
4101 trace__parse_events_option
),
4102 OPT_BOOLEAN(0, "comm", &trace
.show_comm
,
4103 "show the thread COMM next to its id"),
4104 OPT_BOOLEAN(0, "tool_stats", &trace
.show_tool_stats
, "show tool stats"),
4105 OPT_CALLBACK(0, "expr", &trace
, "expr", "list of syscalls/events to trace",
4106 trace__parse_events_option
),
4107 OPT_STRING('o', "output", &output_name
, "file", "output file name"),
4108 OPT_STRING('i', "input", &input_name
, "file", "Analyze events in file"),
4109 OPT_STRING('p', "pid", &trace
.opts
.target
.pid
, "pid",
4110 "trace events on existing process id"),
4111 OPT_STRING('t', "tid", &trace
.opts
.target
.tid
, "tid",
4112 "trace events on existing thread id"),
4113 OPT_CALLBACK(0, "filter-pids", &trace
, "CSV list of pids",
4114 "pids to filter (by the kernel)", trace__set_filter_pids_from_option
),
4115 OPT_BOOLEAN('a', "all-cpus", &trace
.opts
.target
.system_wide
,
4116 "system-wide collection from all CPUs"),
4117 OPT_STRING('C', "cpu", &trace
.opts
.target
.cpu_list
, "cpu",
4118 "list of cpus to monitor"),
4119 OPT_BOOLEAN(0, "no-inherit", &trace
.opts
.no_inherit
,
4120 "child tasks do not inherit counters"),
4121 OPT_CALLBACK('m', "mmap-pages", &trace
.opts
.mmap_pages
, "pages",
4122 "number of mmap data pages",
4123 perf_evlist__parse_mmap_pages
),
4124 OPT_STRING('u', "uid", &trace
.opts
.target
.uid_str
, "user",
4126 OPT_CALLBACK(0, "duration", &trace
, "float",
4127 "show only events with duration > N.M ms",
4128 trace__set_duration
),
4129 #ifdef HAVE_LIBBPF_SUPPORT
4130 OPT_STRING(0, "map-dump", &map_dump_str
, "BPF map", "BPF map to periodically dump"),
4132 OPT_BOOLEAN(0, "sched", &trace
.sched
, "show blocking scheduler events"),
4133 OPT_INCR('v', "verbose", &verbose
, "be more verbose"),
4134 OPT_BOOLEAN('T', "time", &trace
.full_time
,
4135 "Show full timestamp, not time relative to first start"),
4136 OPT_BOOLEAN(0, "failure", &trace
.failure_only
,
4137 "Show only syscalls that failed"),
4138 OPT_BOOLEAN('s', "summary", &trace
.summary_only
,
4139 "Show only syscall summary with statistics"),
4140 OPT_BOOLEAN('S', "with-summary", &trace
.summary
,
4141 "Show all syscalls and summary with statistics"),
4142 OPT_CALLBACK_DEFAULT('F', "pf", &trace
.trace_pgfaults
, "all|maj|min",
4143 "Trace pagefaults", parse_pagefaults
, "maj"),
4144 OPT_BOOLEAN(0, "syscalls", &trace
.trace_syscalls
, "Trace syscalls"),
4145 OPT_BOOLEAN('f', "force", &trace
.force
, "don't complain, do it"),
4146 OPT_CALLBACK(0, "call-graph", &trace
.opts
,
4147 "record_mode[,record_size]", record_callchain_help
,
4148 &record_parse_callchain_opt
),
4149 OPT_BOOLEAN(0, "kernel-syscall-graph", &trace
.kernel_syscallchains
,
4150 "Show the kernel callchains on the syscall exit path"),
4151 OPT_ULONG(0, "max-events", &trace
.max_events
,
4152 "Set the maximum number of events to print, exit after that is reached. "),
4153 OPT_UINTEGER(0, "min-stack", &trace
.min_stack
,
4154 "Set the minimum stack depth when parsing the callchain, "
4155 "anything below the specified depth will be ignored."),
4156 OPT_UINTEGER(0, "max-stack", &trace
.max_stack
,
4157 "Set the maximum stack depth when parsing the callchain, "
4158 "anything beyond the specified depth will be ignored. "
4159 "Default: kernel.perf_event_max_stack or " __stringify(PERF_MAX_STACK_DEPTH
)),
4160 OPT_BOOLEAN(0, "sort-events", &trace
.sort_events
,
4161 "Sort batch of events before processing, use if getting out of order events"),
4162 OPT_BOOLEAN(0, "print-sample", &trace
.print_sample
,
4163 "print the PERF_RECORD_SAMPLE PERF_SAMPLE_ info, for debugging"),
4164 OPT_UINTEGER(0, "proc-map-timeout", &proc_map_timeout
,
4165 "per thread proc mmap processing timeout in ms"),
4166 OPT_CALLBACK('G', "cgroup", &trace
, "name", "monitor event in cgroup name only",
4167 trace__parse_cgroups
),
4168 OPT_UINTEGER('D', "delay", &trace
.opts
.initial_delay
,
4169 "ms to wait before starting measurement after program "
4171 OPTS_EVSWITCH(&trace
.evswitch
),
4174 bool __maybe_unused max_stack_user_set
= true;
4175 bool mmap_pages_user_set
= true;
4176 struct evsel
*evsel
;
4177 const char * const trace_subcommands
[] = { "record", NULL
};
4181 signal(SIGSEGV
, sighandler_dump_stack
);
4182 signal(SIGFPE
, sighandler_dump_stack
);
4184 trace
.evlist
= evlist__new();
4185 trace
.sctbl
= syscalltbl__new();
4187 if (trace
.evlist
== NULL
|| trace
.sctbl
== NULL
) {
4188 pr_err("Not enough memory to run!\n");
4194 * Parsing .perfconfig may entail creating a BPF event, that may need
4195 * to create BPF maps, so bump RLIM_MEMLOCK as the default 64K setting
4196 * is too small. This affects just this process, not touching the
4197 * global setting. If it fails we'll get something in 'perf trace -v'
4198 * to help diagnose the problem.
4200 rlimit__bump_memlock();
4202 err
= perf_config(trace__config
, &trace
);
4206 argc
= parse_options_subcommand(argc
, argv
, trace_options
, trace_subcommands
,
4207 trace_usage
, PARSE_OPT_STOP_AT_NON_OPTION
);
4209 if ((nr_cgroups
|| trace
.cgroup
) && !trace
.opts
.target
.system_wide
) {
4210 usage_with_options_msg(trace_usage
, trace_options
,
4211 "cgroup monitoring only available in system-wide mode");
4214 evsel
= bpf__setup_output_event(trace
.evlist
, "__augmented_syscalls__");
4215 if (IS_ERR(evsel
)) {
4216 bpf__strerror_setup_output_event(trace
.evlist
, PTR_ERR(evsel
), bf
, sizeof(bf
));
4217 pr_err("ERROR: Setup trace syscalls enter failed: %s\n", bf
);
4222 trace
.syscalls
.events
.augmented
= evsel
;
4224 evsel
= perf_evlist__find_tracepoint_by_name(trace
.evlist
, "raw_syscalls:sys_enter");
4225 if (evsel
== NULL
) {
4226 pr_err("ERROR: raw_syscalls:sys_enter not found in the augmented BPF object\n");
4230 if (evsel
->bpf_obj
== NULL
) {
4231 pr_err("ERROR: raw_syscalls:sys_enter not associated to a BPF object\n");
4235 trace
.bpf_obj
= evsel
->bpf_obj
;
4237 trace__set_bpf_map_filtered_pids(&trace
);
4238 trace__set_bpf_map_syscalls(&trace
);
4239 trace
.syscalls
.unaugmented_prog
= trace__find_bpf_program_by_title(&trace
, "!raw_syscalls:unaugmented");
4242 err
= bpf__setup_stdout(trace
.evlist
);
4244 bpf__strerror_setup_stdout(trace
.evlist
, err
, bf
, sizeof(bf
));
4245 pr_err("ERROR: Setup BPF stdout failed: %s\n", bf
);
4252 trace
.dump
.map
= trace__find_bpf_map_by_name(&trace
, map_dump_str
);
4253 if (trace
.dump
.map
== NULL
) {
4254 pr_err("ERROR: BPF map \"%s\" not found\n", map_dump_str
);
4259 if (trace
.trace_pgfaults
) {
4260 trace
.opts
.sample_address
= true;
4261 trace
.opts
.sample_time
= true;
4264 if (trace
.opts
.mmap_pages
== UINT_MAX
)
4265 mmap_pages_user_set
= false;
4267 if (trace
.max_stack
== UINT_MAX
) {
4268 trace
.max_stack
= input_name
? PERF_MAX_STACK_DEPTH
: sysctl__max_stack();
4269 max_stack_user_set
= false;
4272 #ifdef HAVE_DWARF_UNWIND_SUPPORT
4273 if ((trace
.min_stack
|| max_stack_user_set
) && !callchain_param
.enabled
) {
4274 record_opts__parse_callchain(&trace
.opts
, &callchain_param
, "dwarf", false);
4278 if (callchain_param
.enabled
) {
4279 if (!mmap_pages_user_set
&& geteuid() == 0)
4280 trace
.opts
.mmap_pages
= perf_event_mlock_kb_in_pages() * 4;
4282 symbol_conf
.use_callchain
= true;
4285 if (trace
.evlist
->core
.nr_entries
> 0) {
4286 evlist__set_evsel_handler(trace
.evlist
, trace__event_handler
);
4287 if (evlist__set_syscall_tp_fields(trace
.evlist
)) {
4288 perror("failed to set syscalls:* tracepoint fields");
4293 if (trace
.sort_events
) {
4294 ordered_events__init(&trace
.oe
.data
, ordered_events__deliver_event
, &trace
);
4295 ordered_events__set_copy_on_queue(&trace
.oe
.data
, true);
4299 * If we are augmenting syscalls, then combine what we put in the
4300 * __augmented_syscalls__ BPF map with what is in the
4301 * syscalls:sys_exit_FOO tracepoints, i.e. just like we do without BPF,
4302 * combining raw_syscalls:sys_enter with raw_syscalls:sys_exit.
4304 * We'll switch to look at two BPF maps, one for sys_enter and the
4305 * other for sys_exit when we start augmenting the sys_exit paths with
4306 * buffers that are being copied from kernel to userspace, think 'read'
4309 if (trace
.syscalls
.events
.augmented
) {
4310 evlist__for_each_entry(trace
.evlist
, evsel
) {
4311 bool raw_syscalls_sys_exit
= strcmp(perf_evsel__name(evsel
), "raw_syscalls:sys_exit") == 0;
4313 if (raw_syscalls_sys_exit
) {
4314 trace
.raw_augmented_syscalls
= true;
4315 goto init_augmented_syscall_tp
;
4318 if (trace
.syscalls
.events
.augmented
->priv
== NULL
&&
4319 strstr(perf_evsel__name(evsel
), "syscalls:sys_enter")) {
4320 struct evsel
*augmented
= trace
.syscalls
.events
.augmented
;
4321 if (perf_evsel__init_augmented_syscall_tp(augmented
, evsel
) ||
4322 perf_evsel__init_augmented_syscall_tp_args(augmented
))
4325 * Augmented is __augmented_syscalls__ BPF_OUTPUT event
4326 * Above we made sure we can get from the payload the tp fields
4327 * that we get from syscalls:sys_enter tracefs format file.
4329 augmented
->handler
= trace__sys_enter
;
4331 * Now we do the same for the *syscalls:sys_enter event so that
4332 * if we handle it directly, i.e. if the BPF prog returns 0 so
4333 * as not to filter it, then we'll handle it just like we would
4334 * for the BPF_OUTPUT one:
4336 if (perf_evsel__init_augmented_syscall_tp(evsel
, evsel
) ||
4337 perf_evsel__init_augmented_syscall_tp_args(evsel
))
4339 evsel
->handler
= trace__sys_enter
;
4342 if (strstarts(perf_evsel__name(evsel
), "syscalls:sys_exit_")) {
4343 struct syscall_tp
*sc
;
4344 init_augmented_syscall_tp
:
4345 if (perf_evsel__init_augmented_syscall_tp(evsel
, evsel
))
4349 * For now with BPF raw_augmented we hook into
4350 * raw_syscalls:sys_enter and there we get all
4351 * 6 syscall args plus the tracepoint common
4352 * fields and the syscall_nr (another long).
4353 * So we check if that is the case and if so
4354 * don't look after the sc->args_size but
4355 * always after the full raw_syscalls:sys_enter
4356 * payload, which is fixed.
4358 * We'll revisit this later to pass
4359 * s->args_size to the BPF augmenter (now
4360 * tools/perf/examples/bpf/augmented_raw_syscalls.c,
4361 * so that it copies only what we need for each
4362 * syscall, like what happens when we use
4363 * syscalls:sys_enter_NAME, so that we reduce
4364 * the kernel/userspace traffic to just what is
4365 * needed for each syscall.
4367 if (trace
.raw_augmented_syscalls
)
4368 trace
.raw_augmented_syscalls_args_size
= (6 + 1) * sizeof(long) + sc
->id
.offset
;
4369 perf_evsel__init_augmented_syscall_tp_ret(evsel
);
4370 evsel
->handler
= trace__sys_exit
;
4375 if ((argc
>= 1) && (strcmp(argv
[0], "record") == 0))
4376 return trace__record(&trace
, argc
-1, &argv
[1]);
4378 /* summary_only implies summary option, but don't overwrite summary if set */
4379 if (trace
.summary_only
)
4380 trace
.summary
= trace
.summary_only
;
4382 if (!trace
.trace_syscalls
&& !trace
.trace_pgfaults
&&
4383 trace
.evlist
->core
.nr_entries
== 0 /* Was --events used? */) {
4384 trace
.trace_syscalls
= true;
4387 if (output_name
!= NULL
) {
4388 err
= trace__open_output(&trace
, output_name
);
4390 perror("failed to create output file");
4395 err
= evswitch__init(&trace
.evswitch
, trace
.evlist
, stderr
);
4399 err
= target__validate(&trace
.opts
.target
);
4401 target__strerror(&trace
.opts
.target
, err
, bf
, sizeof(bf
));
4402 fprintf(trace
.output
, "%s", bf
);
4406 err
= target__parse_uid(&trace
.opts
.target
);
4408 target__strerror(&trace
.opts
.target
, err
, bf
, sizeof(bf
));
4409 fprintf(trace
.output
, "%s", bf
);
4413 if (!argc
&& target__none(&trace
.opts
.target
))
4414 trace
.opts
.target
.system_wide
= true;
4417 err
= trace__replay(&trace
);
4419 err
= trace__run(&trace
, argc
, argv
);
4422 if (output_name
!= NULL
)
4423 fclose(trace
.output
);