4 * Builtin 'trace' command:
6 * Display a continuously updated trace of any workload, CPU, specific PID,
7 * system wide, etc. Default format is loosely strace like, but any other
8 * event may be specified using --event.
10 * Copyright (C) 2012, 2013, 2014, 2015 Red Hat Inc, Arnaldo Carvalho de Melo <acme@redhat.com>
12 * Initially based on the 'trace' prototype by Thomas Gleixner:
14 * http://lwn.net/Articles/415728/ ("Announcing a new utility: 'trace'")
17 #include <traceevent/event-parse.h>
18 #include <api/fs/tracing_path.h>
20 #include "util/bpf_map.h"
21 #include "util/rlimit.h"
23 #include "util/cgroup.h"
24 #include "util/color.h"
25 #include "util/config.h"
26 #include "util/debug.h"
28 #include "util/event.h"
29 #include "util/evlist.h"
30 #include <subcmd/exec-cmd.h>
31 #include "util/machine.h"
33 #include "util/symbol.h"
34 #include "util/path.h"
35 #include "util/session.h"
36 #include "util/thread.h"
37 #include <subcmd/parse-options.h>
38 #include "util/strlist.h"
39 #include "util/intlist.h"
40 #include "util/thread_map.h"
41 #include "util/stat.h"
42 #include "trace/beauty/beauty.h"
43 #include "trace-event.h"
44 #include "util/parse-events.h"
45 #include "util/bpf-loader.h"
46 #include "callchain.h"
47 #include "print_binary.h"
49 #include "syscalltbl.h"
50 #include "rb_resort.h"
58 #include <linux/err.h>
59 #include <linux/filter.h>
60 #include <linux/kernel.h>
61 #include <linux/random.h>
62 #include <linux/stringify.h>
63 #include <linux/time64.h>
64 #include <linux/zalloc.h>
66 #include <sys/sysmacros.h>
68 #include <linux/ctype.h>
71 # define O_CLOEXEC 02000000
74 #ifndef F_LINUX_SPECIFIC_BASE
75 # define F_LINUX_SPECIFIC_BASE 1024
79 struct perf_tool tool
;
80 struct syscalltbl
*sctbl
;
82 struct syscall
*table
;
84 struct { // per syscall BPF_MAP_TYPE_PROG_ARRAY
85 struct bpf_map
*sys_enter
,
89 struct evsel
*sys_enter
,
93 struct bpf_program
*unaugmented_prog
;
98 struct record_opts opts
;
99 struct evlist
*evlist
;
100 struct machine
*host
;
101 struct thread
*current
;
102 struct bpf_object
*bpf_obj
;
103 struct cgroup
*cgroup
;
106 unsigned long nr_events
;
107 unsigned long nr_events_printed
;
108 unsigned long max_events
;
109 struct strlist
*ev_qualifier
;
119 double duration_filter
;
125 unsigned int max_stack
;
126 unsigned int min_stack
;
127 int raw_augmented_syscalls_args_size
;
128 bool raw_augmented_syscalls
;
129 bool fd_path_disabled
;
131 bool not_ev_qualifier
;
135 bool multiple_threads
;
141 bool show_tool_stats
;
143 bool kernel_syscallchains
;
149 bool show_string_prefix
;
154 struct ordered_events data
;
162 u64 (*integer
)(struct tp_field
*field
, struct perf_sample
*sample
);
163 void *(*pointer
)(struct tp_field
*field
, struct perf_sample
*sample
);
167 #define TP_UINT_FIELD(bits) \
168 static u64 tp_field__u##bits(struct tp_field *field, struct perf_sample *sample) \
171 memcpy(&value, sample->raw_data + field->offset, sizeof(value)); \
180 #define TP_UINT_FIELD__SWAPPED(bits) \
181 static u64 tp_field__swapped_u##bits(struct tp_field *field, struct perf_sample *sample) \
184 memcpy(&value, sample->raw_data + field->offset, sizeof(value)); \
185 return bswap_##bits(value);\
188 TP_UINT_FIELD__SWAPPED(16);
189 TP_UINT_FIELD__SWAPPED(32);
190 TP_UINT_FIELD__SWAPPED(64);
192 static int __tp_field__init_uint(struct tp_field
*field
, int size
, int offset
, bool needs_swap
)
194 field
->offset
= offset
;
198 field
->integer
= tp_field__u8
;
201 field
->integer
= needs_swap
? tp_field__swapped_u16
: tp_field__u16
;
204 field
->integer
= needs_swap
? tp_field__swapped_u32
: tp_field__u32
;
207 field
->integer
= needs_swap
? tp_field__swapped_u64
: tp_field__u64
;
216 static int tp_field__init_uint(struct tp_field
*field
, struct tep_format_field
*format_field
, bool needs_swap
)
218 return __tp_field__init_uint(field
, format_field
->size
, format_field
->offset
, needs_swap
);
221 static void *tp_field__ptr(struct tp_field
*field
, struct perf_sample
*sample
)
223 return sample
->raw_data
+ field
->offset
;
226 static int __tp_field__init_ptr(struct tp_field
*field
, int offset
)
228 field
->offset
= offset
;
229 field
->pointer
= tp_field__ptr
;
233 static int tp_field__init_ptr(struct tp_field
*field
, struct tep_format_field
*format_field
)
235 return __tp_field__init_ptr(field
, format_field
->offset
);
241 struct tp_field args
, ret
;
245 static int perf_evsel__init_tp_uint_field(struct evsel
*evsel
,
246 struct tp_field
*field
,
249 struct tep_format_field
*format_field
= perf_evsel__field(evsel
, name
);
251 if (format_field
== NULL
)
254 return tp_field__init_uint(field
, format_field
, evsel
->needs_swap
);
257 #define perf_evsel__init_sc_tp_uint_field(evsel, name) \
258 ({ struct syscall_tp *sc = evsel->priv;\
259 perf_evsel__init_tp_uint_field(evsel, &sc->name, #name); })
261 static int perf_evsel__init_tp_ptr_field(struct evsel
*evsel
,
262 struct tp_field
*field
,
265 struct tep_format_field
*format_field
= perf_evsel__field(evsel
, name
);
267 if (format_field
== NULL
)
270 return tp_field__init_ptr(field
, format_field
);
273 #define perf_evsel__init_sc_tp_ptr_field(evsel, name) \
274 ({ struct syscall_tp *sc = evsel->priv;\
275 perf_evsel__init_tp_ptr_field(evsel, &sc->name, #name); })
277 static void perf_evsel__delete_priv(struct evsel
*evsel
)
280 perf_evsel__delete(evsel
);
283 static int perf_evsel__init_syscall_tp(struct evsel
*evsel
)
285 struct syscall_tp
*sc
= evsel
->priv
= malloc(sizeof(struct syscall_tp
));
287 if (evsel
->priv
!= NULL
) {
288 if (perf_evsel__init_tp_uint_field(evsel
, &sc
->id
, "__syscall_nr") &&
289 perf_evsel__init_tp_uint_field(evsel
, &sc
->id
, "nr"))
300 static int perf_evsel__init_augmented_syscall_tp(struct evsel
*evsel
, struct evsel
*tp
)
302 struct syscall_tp
*sc
= evsel
->priv
= malloc(sizeof(struct syscall_tp
));
304 if (evsel
->priv
!= NULL
) {
305 struct tep_format_field
*syscall_id
= perf_evsel__field(tp
, "id");
306 if (syscall_id
== NULL
)
307 syscall_id
= perf_evsel__field(tp
, "__syscall_nr");
308 if (syscall_id
== NULL
)
310 if (__tp_field__init_uint(&sc
->id
, syscall_id
->size
, syscall_id
->offset
, evsel
->needs_swap
))
322 static int perf_evsel__init_augmented_syscall_tp_args(struct evsel
*evsel
)
324 struct syscall_tp
*sc
= evsel
->priv
;
326 return __tp_field__init_ptr(&sc
->args
, sc
->id
.offset
+ sizeof(u64
));
329 static int perf_evsel__init_augmented_syscall_tp_ret(struct evsel
*evsel
)
331 struct syscall_tp
*sc
= evsel
->priv
;
333 return __tp_field__init_uint(&sc
->ret
, sizeof(u64
), sc
->id
.offset
+ sizeof(u64
), evsel
->needs_swap
);
336 static int perf_evsel__init_raw_syscall_tp(struct evsel
*evsel
, void *handler
)
338 evsel
->priv
= malloc(sizeof(struct syscall_tp
));
339 if (evsel
->priv
!= NULL
) {
340 if (perf_evsel__init_sc_tp_uint_field(evsel
, id
))
343 evsel
->handler
= handler
;
354 static struct evsel
*perf_evsel__raw_syscall_newtp(const char *direction
, void *handler
)
356 struct evsel
*evsel
= perf_evsel__newtp("raw_syscalls", direction
);
358 /* older kernel (e.g., RHEL6) use syscalls:{enter,exit} */
360 evsel
= perf_evsel__newtp("syscalls", direction
);
365 if (perf_evsel__init_raw_syscall_tp(evsel
, handler
))
371 perf_evsel__delete_priv(evsel
);
375 #define perf_evsel__sc_tp_uint(evsel, name, sample) \
376 ({ struct syscall_tp *fields = evsel->priv; \
377 fields->name.integer(&fields->name, sample); })
379 #define perf_evsel__sc_tp_ptr(evsel, name, sample) \
380 ({ struct syscall_tp *fields = evsel->priv; \
381 fields->name.pointer(&fields->name, sample); })
383 size_t strarray__scnprintf(struct strarray
*sa
, char *bf
, size_t size
, const char *intfmt
, bool show_prefix
, int val
)
385 int idx
= val
- sa
->offset
;
387 if (idx
< 0 || idx
>= sa
->nr_entries
|| sa
->entries
[idx
] == NULL
) {
388 size_t printed
= scnprintf(bf
, size
, intfmt
, val
);
390 printed
+= scnprintf(bf
+ printed
, size
- printed
, " /* %s??? */", sa
->prefix
);
394 return scnprintf(bf
, size
, "%s%s", show_prefix
? sa
->prefix
: "", sa
->entries
[idx
]);
397 static size_t __syscall_arg__scnprintf_strarray(char *bf
, size_t size
,
399 struct syscall_arg
*arg
)
401 return strarray__scnprintf(arg
->parm
, bf
, size
, intfmt
, arg
->show_string_prefix
, arg
->val
);
404 static size_t syscall_arg__scnprintf_strarray(char *bf
, size_t size
,
405 struct syscall_arg
*arg
)
407 return __syscall_arg__scnprintf_strarray(bf
, size
, "%d", arg
);
410 #define SCA_STRARRAY syscall_arg__scnprintf_strarray
412 size_t syscall_arg__scnprintf_strarray_flags(char *bf
, size_t size
, struct syscall_arg
*arg
)
414 return strarray__scnprintf_flags(arg
->parm
, bf
, size
, arg
->show_string_prefix
, arg
->val
);
417 size_t strarrays__scnprintf(struct strarrays
*sas
, char *bf
, size_t size
, const char *intfmt
, bool show_prefix
, int val
)
422 for (i
= 0; i
< sas
->nr_entries
; ++i
) {
423 struct strarray
*sa
= sas
->entries
[i
];
424 int idx
= val
- sa
->offset
;
426 if (idx
>= 0 && idx
< sa
->nr_entries
) {
427 if (sa
->entries
[idx
] == NULL
)
429 return scnprintf(bf
, size
, "%s%s", show_prefix
? sa
->prefix
: "", sa
->entries
[idx
]);
433 printed
= scnprintf(bf
, size
, intfmt
, val
);
435 printed
+= scnprintf(bf
+ printed
, size
- printed
, " /* %s??? */", sas
->entries
[0]->prefix
);
439 size_t syscall_arg__scnprintf_strarrays(char *bf
, size_t size
,
440 struct syscall_arg
*arg
)
442 return strarrays__scnprintf(arg
->parm
, bf
, size
, "%d", arg
->show_string_prefix
, arg
->val
);
446 #define AT_FDCWD -100
449 static size_t syscall_arg__scnprintf_fd_at(char *bf
, size_t size
,
450 struct syscall_arg
*arg
)
453 const char *prefix
= "AT_FD";
456 return scnprintf(bf
, size
, "%s%s", arg
->show_string_prefix
? prefix
: "", "CWD");
458 return syscall_arg__scnprintf_fd(bf
, size
, arg
);
461 #define SCA_FDAT syscall_arg__scnprintf_fd_at
463 static size_t syscall_arg__scnprintf_close_fd(char *bf
, size_t size
,
464 struct syscall_arg
*arg
);
466 #define SCA_CLOSE_FD syscall_arg__scnprintf_close_fd
468 size_t syscall_arg__scnprintf_hex(char *bf
, size_t size
, struct syscall_arg
*arg
)
470 return scnprintf(bf
, size
, "%#lx", arg
->val
);
473 size_t syscall_arg__scnprintf_ptr(char *bf
, size_t size
, struct syscall_arg
*arg
)
476 return scnprintf(bf
, size
, "NULL");
477 return syscall_arg__scnprintf_hex(bf
, size
, arg
);
480 size_t syscall_arg__scnprintf_int(char *bf
, size_t size
, struct syscall_arg
*arg
)
482 return scnprintf(bf
, size
, "%d", arg
->val
);
485 size_t syscall_arg__scnprintf_long(char *bf
, size_t size
, struct syscall_arg
*arg
)
487 return scnprintf(bf
, size
, "%ld", arg
->val
);
490 static const char *bpf_cmd
[] = {
491 "MAP_CREATE", "MAP_LOOKUP_ELEM", "MAP_UPDATE_ELEM", "MAP_DELETE_ELEM",
492 "MAP_GET_NEXT_KEY", "PROG_LOAD",
494 static DEFINE_STRARRAY(bpf_cmd
, "BPF_");
496 static const char *fsmount_flags
[] = {
499 static DEFINE_STRARRAY(fsmount_flags
, "FSMOUNT_");
501 #include "trace/beauty/generated/fsconfig_arrays.c"
503 static DEFINE_STRARRAY(fsconfig_cmds
, "FSCONFIG_");
505 static const char *epoll_ctl_ops
[] = { "ADD", "DEL", "MOD", };
506 static DEFINE_STRARRAY_OFFSET(epoll_ctl_ops
, "EPOLL_CTL_", 1);
508 static const char *itimers
[] = { "REAL", "VIRTUAL", "PROF", };
509 static DEFINE_STRARRAY(itimers
, "ITIMER_");
511 static const char *keyctl_options
[] = {
512 "GET_KEYRING_ID", "JOIN_SESSION_KEYRING", "UPDATE", "REVOKE", "CHOWN",
513 "SETPERM", "DESCRIBE", "CLEAR", "LINK", "UNLINK", "SEARCH", "READ",
514 "INSTANTIATE", "NEGATE", "SET_REQKEY_KEYRING", "SET_TIMEOUT",
515 "ASSUME_AUTHORITY", "GET_SECURITY", "SESSION_TO_PARENT", "REJECT",
516 "INSTANTIATE_IOV", "INVALIDATE", "GET_PERSISTENT",
518 static DEFINE_STRARRAY(keyctl_options
, "KEYCTL_");
520 static const char *whences
[] = { "SET", "CUR", "END",
528 static DEFINE_STRARRAY(whences
, "SEEK_");
530 static const char *fcntl_cmds
[] = {
531 "DUPFD", "GETFD", "SETFD", "GETFL", "SETFL", "GETLK", "SETLK",
532 "SETLKW", "SETOWN", "GETOWN", "SETSIG", "GETSIG", "GETLK64",
533 "SETLK64", "SETLKW64", "SETOWN_EX", "GETOWN_EX",
536 static DEFINE_STRARRAY(fcntl_cmds
, "F_");
538 static const char *fcntl_linux_specific_cmds
[] = {
539 "SETLEASE", "GETLEASE", "NOTIFY", [5] = "CANCELLK", "DUPFD_CLOEXEC",
540 "SETPIPE_SZ", "GETPIPE_SZ", "ADD_SEALS", "GET_SEALS",
541 "GET_RW_HINT", "SET_RW_HINT", "GET_FILE_RW_HINT", "SET_FILE_RW_HINT",
544 static DEFINE_STRARRAY_OFFSET(fcntl_linux_specific_cmds
, "F_", F_LINUX_SPECIFIC_BASE
);
546 static struct strarray
*fcntl_cmds_arrays
[] = {
547 &strarray__fcntl_cmds
,
548 &strarray__fcntl_linux_specific_cmds
,
551 static DEFINE_STRARRAYS(fcntl_cmds_arrays
);
553 static const char *rlimit_resources
[] = {
554 "CPU", "FSIZE", "DATA", "STACK", "CORE", "RSS", "NPROC", "NOFILE",
555 "MEMLOCK", "AS", "LOCKS", "SIGPENDING", "MSGQUEUE", "NICE", "RTPRIO",
558 static DEFINE_STRARRAY(rlimit_resources
, "RLIMIT_");
560 static const char *sighow
[] = { "BLOCK", "UNBLOCK", "SETMASK", };
561 static DEFINE_STRARRAY(sighow
, "SIG_");
563 static const char *clockid
[] = {
564 "REALTIME", "MONOTONIC", "PROCESS_CPUTIME_ID", "THREAD_CPUTIME_ID",
565 "MONOTONIC_RAW", "REALTIME_COARSE", "MONOTONIC_COARSE", "BOOTTIME",
566 "REALTIME_ALARM", "BOOTTIME_ALARM", "SGI_CYCLE", "TAI"
568 static DEFINE_STRARRAY(clockid
, "CLOCK_");
570 static size_t syscall_arg__scnprintf_access_mode(char *bf
, size_t size
,
571 struct syscall_arg
*arg
)
573 bool show_prefix
= arg
->show_string_prefix
;
574 const char *suffix
= "_OK";
578 if (mode
== F_OK
) /* 0 */
579 return scnprintf(bf
, size
, "F%s", show_prefix
? suffix
: "");
581 if (mode & n##_OK) { \
582 printed += scnprintf(bf + printed, size - printed, "%s%s", #n, show_prefix ? suffix : ""); \
592 printed
+= scnprintf(bf
+ printed
, size
- printed
, "|%#x", mode
);
597 #define SCA_ACCMODE syscall_arg__scnprintf_access_mode
599 static size_t syscall_arg__scnprintf_filename(char *bf
, size_t size
,
600 struct syscall_arg
*arg
);
602 #define SCA_FILENAME syscall_arg__scnprintf_filename
604 static size_t syscall_arg__scnprintf_pipe_flags(char *bf
, size_t size
,
605 struct syscall_arg
*arg
)
607 bool show_prefix
= arg
->show_string_prefix
;
608 const char *prefix
= "O_";
609 int printed
= 0, flags
= arg
->val
;
612 if (flags & O_##n) { \
613 printed += scnprintf(bf + printed, size - printed, "%s%s%s", printed ? "|" : "", show_prefix ? prefix : "", #n); \
622 printed
+= scnprintf(bf
+ printed
, size
- printed
, "%s%#x", printed
? "|" : "", flags
);
627 #define SCA_PIPE_FLAGS syscall_arg__scnprintf_pipe_flags
629 #ifndef GRND_NONBLOCK
630 #define GRND_NONBLOCK 0x0001
633 #define GRND_RANDOM 0x0002
636 static size_t syscall_arg__scnprintf_getrandom_flags(char *bf
, size_t size
,
637 struct syscall_arg
*arg
)
639 bool show_prefix
= arg
->show_string_prefix
;
640 const char *prefix
= "GRND_";
641 int printed
= 0, flags
= arg
->val
;
644 if (flags & GRND_##n) { \
645 printed += scnprintf(bf + printed, size - printed, "%s%s%s", printed ? "|" : "", show_prefix ? prefix : "", #n); \
646 flags &= ~GRND_##n; \
654 printed
+= scnprintf(bf
+ printed
, size
- printed
, "%s%#x", printed
? "|" : "", flags
);
659 #define SCA_GETRANDOM_FLAGS syscall_arg__scnprintf_getrandom_flags
661 #define STRARRAY(name, array) \
662 { .scnprintf = SCA_STRARRAY, \
663 .parm = &strarray__##array, }
665 #define STRARRAY_FLAGS(name, array) \
666 { .scnprintf = SCA_STRARRAY_FLAGS, \
667 .parm = &strarray__##array, }
669 #include "trace/beauty/arch_errno_names.c"
670 #include "trace/beauty/eventfd.c"
671 #include "trace/beauty/futex_op.c"
672 #include "trace/beauty/futex_val3.c"
673 #include "trace/beauty/mmap.c"
674 #include "trace/beauty/mode_t.c"
675 #include "trace/beauty/msg_flags.c"
676 #include "trace/beauty/open_flags.c"
677 #include "trace/beauty/perf_event_open.c"
678 #include "trace/beauty/pid.c"
679 #include "trace/beauty/sched_policy.c"
680 #include "trace/beauty/seccomp.c"
681 #include "trace/beauty/signum.c"
682 #include "trace/beauty/socket_type.c"
683 #include "trace/beauty/waitid_options.c"
685 struct syscall_arg_fmt
{
686 size_t (*scnprintf
)(char *bf
, size_t size
, struct syscall_arg
*arg
);
687 unsigned long (*mask_val
)(struct syscall_arg
*arg
, unsigned long val
);
693 static struct syscall_fmt
{
697 const char *sys_enter
,
700 struct syscall_arg_fmt arg
[6];
707 .arg
= { [1] = { .scnprintf
= SCA_ACCMODE
, /* mode */ }, }, },
708 { .name
= "arch_prctl",
709 .arg
= { [0] = { .scnprintf
= SCA_X86_ARCH_PRCTL_CODE
, /* code */ },
710 [1] = { .scnprintf
= SCA_PTR
, /* arg2 */ }, }, },
712 .arg
= { [0] = { .scnprintf
= SCA_INT
, /* fd */ },
713 [1] = { .scnprintf
= SCA_SOCKADDR
, /* umyaddr */ },
714 [2] = { .scnprintf
= SCA_INT
, /* addrlen */ }, }, },
716 .arg
= { [0] = STRARRAY(cmd
, bpf_cmd
), }, },
717 { .name
= "brk", .hexret
= true,
718 .arg
= { [0] = { .scnprintf
= SCA_PTR
, /* brk */ }, }, },
719 { .name
= "clock_gettime",
720 .arg
= { [0] = STRARRAY(clk_id
, clockid
), }, },
721 { .name
= "clone", .errpid
= true, .nr_args
= 5,
722 .arg
= { [0] = { .name
= "flags", .scnprintf
= SCA_CLONE_FLAGS
, },
723 [1] = { .name
= "child_stack", .scnprintf
= SCA_HEX
, },
724 [2] = { .name
= "parent_tidptr", .scnprintf
= SCA_HEX
, },
725 [3] = { .name
= "child_tidptr", .scnprintf
= SCA_HEX
, },
726 [4] = { .name
= "tls", .scnprintf
= SCA_HEX
, }, }, },
728 .arg
= { [0] = { .scnprintf
= SCA_CLOSE_FD
, /* fd */ }, }, },
730 .arg
= { [0] = { .scnprintf
= SCA_INT
, /* fd */ },
731 [1] = { .scnprintf
= SCA_SOCKADDR
, /* servaddr */ },
732 [2] = { .scnprintf
= SCA_INT
, /* addrlen */ }, }, },
733 { .name
= "epoll_ctl",
734 .arg
= { [1] = STRARRAY(op
, epoll_ctl_ops
), }, },
735 { .name
= "eventfd2",
736 .arg
= { [1] = { .scnprintf
= SCA_EFD_FLAGS
, /* flags */ }, }, },
737 { .name
= "fchmodat",
738 .arg
= { [0] = { .scnprintf
= SCA_FDAT
, /* fd */ }, }, },
739 { .name
= "fchownat",
740 .arg
= { [0] = { .scnprintf
= SCA_FDAT
, /* fd */ }, }, },
742 .arg
= { [1] = { .scnprintf
= SCA_FCNTL_CMD
, /* cmd */
743 .parm
= &strarrays__fcntl_cmds_arrays
,
744 .show_zero
= true, },
745 [2] = { .scnprintf
= SCA_FCNTL_ARG
, /* arg */ }, }, },
747 .arg
= { [1] = { .scnprintf
= SCA_FLOCK
, /* cmd */ }, }, },
748 { .name
= "fsconfig",
749 .arg
= { [1] = STRARRAY(cmd
, fsconfig_cmds
), }, },
751 .arg
= { [1] = STRARRAY_FLAGS(flags
, fsmount_flags
),
752 [2] = { .scnprintf
= SCA_FSMOUNT_ATTR_FLAGS
, /* attr_flags */ }, }, },
754 .arg
= { [0] = { .scnprintf
= SCA_FDAT
, /* dfd */ },
755 [1] = { .scnprintf
= SCA_FILENAME
, /* path */ },
756 [2] = { .scnprintf
= SCA_FSPICK_FLAGS
, /* flags */ }, }, },
757 { .name
= "fstat", .alias
= "newfstat", },
758 { .name
= "fstatat", .alias
= "newfstatat", },
760 .arg
= { [1] = { .scnprintf
= SCA_FUTEX_OP
, /* op */ },
761 [5] = { .scnprintf
= SCA_FUTEX_VAL3
, /* val3 */ }, }, },
762 { .name
= "futimesat",
763 .arg
= { [0] = { .scnprintf
= SCA_FDAT
, /* fd */ }, }, },
764 { .name
= "getitimer",
765 .arg
= { [0] = STRARRAY(which
, itimers
), }, },
766 { .name
= "getpid", .errpid
= true, },
767 { .name
= "getpgid", .errpid
= true, },
768 { .name
= "getppid", .errpid
= true, },
769 { .name
= "getrandom",
770 .arg
= { [2] = { .scnprintf
= SCA_GETRANDOM_FLAGS
, /* flags */ }, }, },
771 { .name
= "getrlimit",
772 .arg
= { [0] = STRARRAY(resource
, rlimit_resources
), }, },
773 { .name
= "gettid", .errpid
= true, },
776 #if defined(__i386__) || defined(__x86_64__)
778 * FIXME: Make this available to all arches.
780 [1] = { .scnprintf
= SCA_IOCTL_CMD
, /* cmd */ },
781 [2] = { .scnprintf
= SCA_HEX
, /* arg */ }, }, },
783 [2] = { .scnprintf
= SCA_HEX
, /* arg */ }, }, },
785 { .name
= "kcmp", .nr_args
= 5,
786 .arg
= { [0] = { .name
= "pid1", .scnprintf
= SCA_PID
, },
787 [1] = { .name
= "pid2", .scnprintf
= SCA_PID
, },
788 [2] = { .name
= "type", .scnprintf
= SCA_KCMP_TYPE
, },
789 [3] = { .name
= "idx1", .scnprintf
= SCA_KCMP_IDX
, },
790 [4] = { .name
= "idx2", .scnprintf
= SCA_KCMP_IDX
, }, }, },
792 .arg
= { [0] = STRARRAY(option
, keyctl_options
), }, },
794 .arg
= { [1] = { .scnprintf
= SCA_SIGNUM
, /* sig */ }, }, },
796 .arg
= { [0] = { .scnprintf
= SCA_FDAT
, /* fd */ }, }, },
798 .arg
= { [2] = STRARRAY(whence
, whences
), }, },
799 { .name
= "lstat", .alias
= "newlstat", },
801 .arg
= { [0] = { .scnprintf
= SCA_HEX
, /* start */ },
802 [2] = { .scnprintf
= SCA_MADV_BHV
, /* behavior */ }, }, },
804 .arg
= { [0] = { .scnprintf
= SCA_FDAT
, /* fd */ }, }, },
806 .arg
= { [0] = { .scnprintf
= SCA_FDAT
, /* fd */ }, }, },
807 { .name
= "mmap", .hexret
= true,
808 /* The standard mmap maps to old_mmap on s390x */
809 #if defined(__s390x__)
812 .arg
= { [2] = { .scnprintf
= SCA_MMAP_PROT
, /* prot */ },
813 [3] = { .scnprintf
= SCA_MMAP_FLAGS
, /* flags */ },
814 [5] = { .scnprintf
= SCA_HEX
, /* offset */ }, }, },
816 .arg
= { [0] = { .scnprintf
= SCA_FILENAME
, /* dev_name */ },
817 [3] = { .scnprintf
= SCA_MOUNT_FLAGS
, /* flags */
818 .mask_val
= SCAMV_MOUNT_FLAGS
, /* flags */ }, }, },
819 { .name
= "move_mount",
820 .arg
= { [0] = { .scnprintf
= SCA_FDAT
, /* from_dfd */ },
821 [1] = { .scnprintf
= SCA_FILENAME
, /* from_pathname */ },
822 [2] = { .scnprintf
= SCA_FDAT
, /* to_dfd */ },
823 [3] = { .scnprintf
= SCA_FILENAME
, /* to_pathname */ },
824 [4] = { .scnprintf
= SCA_MOVE_MOUNT_FLAGS
, /* flags */ }, }, },
825 { .name
= "mprotect",
826 .arg
= { [0] = { .scnprintf
= SCA_HEX
, /* start */ },
827 [2] = { .scnprintf
= SCA_MMAP_PROT
, /* prot */ }, }, },
828 { .name
= "mq_unlink",
829 .arg
= { [0] = { .scnprintf
= SCA_FILENAME
, /* u_name */ }, }, },
830 { .name
= "mremap", .hexret
= true,
831 .arg
= { [3] = { .scnprintf
= SCA_MREMAP_FLAGS
, /* flags */ }, }, },
832 { .name
= "name_to_handle_at",
833 .arg
= { [0] = { .scnprintf
= SCA_FDAT
, /* dfd */ }, }, },
834 { .name
= "newfstatat",
835 .arg
= { [0] = { .scnprintf
= SCA_FDAT
, /* dfd */ }, }, },
837 .arg
= { [1] = { .scnprintf
= SCA_OPEN_FLAGS
, /* flags */ }, }, },
838 { .name
= "open_by_handle_at",
839 .arg
= { [0] = { .scnprintf
= SCA_FDAT
, /* dfd */ },
840 [2] = { .scnprintf
= SCA_OPEN_FLAGS
, /* flags */ }, }, },
842 .arg
= { [0] = { .scnprintf
= SCA_FDAT
, /* dfd */ },
843 [2] = { .scnprintf
= SCA_OPEN_FLAGS
, /* flags */ }, }, },
844 { .name
= "perf_event_open",
845 .arg
= { [2] = { .scnprintf
= SCA_INT
, /* cpu */ },
846 [3] = { .scnprintf
= SCA_FD
, /* group_fd */ },
847 [4] = { .scnprintf
= SCA_PERF_FLAGS
, /* flags */ }, }, },
849 .arg
= { [1] = { .scnprintf
= SCA_PIPE_FLAGS
, /* flags */ }, }, },
850 { .name
= "pkey_alloc",
851 .arg
= { [1] = { .scnprintf
= SCA_PKEY_ALLOC_ACCESS_RIGHTS
, /* access_rights */ }, }, },
852 { .name
= "pkey_free",
853 .arg
= { [0] = { .scnprintf
= SCA_INT
, /* key */ }, }, },
854 { .name
= "pkey_mprotect",
855 .arg
= { [0] = { .scnprintf
= SCA_HEX
, /* start */ },
856 [2] = { .scnprintf
= SCA_MMAP_PROT
, /* prot */ },
857 [3] = { .scnprintf
= SCA_INT
, /* pkey */ }, }, },
858 { .name
= "poll", .timeout
= true, },
859 { .name
= "ppoll", .timeout
= true, },
861 .arg
= { [0] = { .scnprintf
= SCA_PRCTL_OPTION
, /* option */ },
862 [1] = { .scnprintf
= SCA_PRCTL_ARG2
, /* arg2 */ },
863 [2] = { .scnprintf
= SCA_PRCTL_ARG3
, /* arg3 */ }, }, },
864 { .name
= "pread", .alias
= "pread64", },
865 { .name
= "preadv", .alias
= "pread", },
866 { .name
= "prlimit64",
867 .arg
= { [1] = STRARRAY(resource
, rlimit_resources
), }, },
868 { .name
= "pwrite", .alias
= "pwrite64", },
869 { .name
= "readlinkat",
870 .arg
= { [0] = { .scnprintf
= SCA_FDAT
, /* dfd */ }, }, },
871 { .name
= "recvfrom",
872 .arg
= { [3] = { .scnprintf
= SCA_MSG_FLAGS
, /* flags */ }, }, },
873 { .name
= "recvmmsg",
874 .arg
= { [3] = { .scnprintf
= SCA_MSG_FLAGS
, /* flags */ }, }, },
876 .arg
= { [2] = { .scnprintf
= SCA_MSG_FLAGS
, /* flags */ }, }, },
877 { .name
= "renameat",
878 .arg
= { [0] = { .scnprintf
= SCA_FDAT
, /* olddirfd */ },
879 [2] = { .scnprintf
= SCA_FDAT
, /* newdirfd */ }, }, },
880 { .name
= "renameat2",
881 .arg
= { [0] = { .scnprintf
= SCA_FDAT
, /* olddirfd */ },
882 [2] = { .scnprintf
= SCA_FDAT
, /* newdirfd */ },
883 [4] = { .scnprintf
= SCA_RENAMEAT2_FLAGS
, /* flags */ }, }, },
884 { .name
= "rt_sigaction",
885 .arg
= { [0] = { .scnprintf
= SCA_SIGNUM
, /* sig */ }, }, },
886 { .name
= "rt_sigprocmask",
887 .arg
= { [0] = STRARRAY(how
, sighow
), }, },
888 { .name
= "rt_sigqueueinfo",
889 .arg
= { [1] = { .scnprintf
= SCA_SIGNUM
, /* sig */ }, }, },
890 { .name
= "rt_tgsigqueueinfo",
891 .arg
= { [2] = { .scnprintf
= SCA_SIGNUM
, /* sig */ }, }, },
892 { .name
= "sched_setscheduler",
893 .arg
= { [1] = { .scnprintf
= SCA_SCHED_POLICY
, /* policy */ }, }, },
895 .arg
= { [0] = { .scnprintf
= SCA_SECCOMP_OP
, /* op */ },
896 [1] = { .scnprintf
= SCA_SECCOMP_FLAGS
, /* flags */ }, }, },
897 { .name
= "select", .timeout
= true, },
898 { .name
= "sendfile", .alias
= "sendfile64", },
899 { .name
= "sendmmsg",
900 .arg
= { [3] = { .scnprintf
= SCA_MSG_FLAGS
, /* flags */ }, }, },
902 .arg
= { [2] = { .scnprintf
= SCA_MSG_FLAGS
, /* flags */ }, }, },
904 .arg
= { [3] = { .scnprintf
= SCA_MSG_FLAGS
, /* flags */ },
905 [4] = { .scnprintf
= SCA_SOCKADDR
, /* addr */ }, }, },
906 { .name
= "set_tid_address", .errpid
= true, },
907 { .name
= "setitimer",
908 .arg
= { [0] = STRARRAY(which
, itimers
), }, },
909 { .name
= "setrlimit",
910 .arg
= { [0] = STRARRAY(resource
, rlimit_resources
), }, },
912 .arg
= { [0] = STRARRAY(family
, socket_families
),
913 [1] = { .scnprintf
= SCA_SK_TYPE
, /* type */ },
914 [2] = { .scnprintf
= SCA_SK_PROTO
, /* protocol */ }, }, },
915 { .name
= "socketpair",
916 .arg
= { [0] = STRARRAY(family
, socket_families
),
917 [1] = { .scnprintf
= SCA_SK_TYPE
, /* type */ },
918 [2] = { .scnprintf
= SCA_SK_PROTO
, /* protocol */ }, }, },
919 { .name
= "stat", .alias
= "newstat", },
921 .arg
= { [0] = { .scnprintf
= SCA_FDAT
, /* fdat */ },
922 [2] = { .scnprintf
= SCA_STATX_FLAGS
, /* flags */ } ,
923 [3] = { .scnprintf
= SCA_STATX_MASK
, /* mask */ }, }, },
925 .arg
= { [0] = { .scnprintf
= SCA_FILENAME
, /* specialfile */ }, }, },
927 .arg
= { [0] = { .scnprintf
= SCA_FILENAME
, /* specialfile */ }, }, },
928 { .name
= "symlinkat",
929 .arg
= { [0] = { .scnprintf
= SCA_FDAT
, /* dfd */ }, }, },
930 { .name
= "sync_file_range",
931 .arg
= { [3] = { .scnprintf
= SCA_SYNC_FILE_RANGE_FLAGS
, /* flags */ }, }, },
933 .arg
= { [2] = { .scnprintf
= SCA_SIGNUM
, /* sig */ }, }, },
935 .arg
= { [1] = { .scnprintf
= SCA_SIGNUM
, /* sig */ }, }, },
936 { .name
= "umount2", .alias
= "umount",
937 .arg
= { [0] = { .scnprintf
= SCA_FILENAME
, /* name */ }, }, },
938 { .name
= "uname", .alias
= "newuname", },
939 { .name
= "unlinkat",
940 .arg
= { [0] = { .scnprintf
= SCA_FDAT
, /* dfd */ }, }, },
941 { .name
= "utimensat",
942 .arg
= { [0] = { .scnprintf
= SCA_FDAT
, /* dirfd */ }, }, },
943 { .name
= "wait4", .errpid
= true,
944 .arg
= { [2] = { .scnprintf
= SCA_WAITID_OPTIONS
, /* options */ }, }, },
945 { .name
= "waitid", .errpid
= true,
946 .arg
= { [3] = { .scnprintf
= SCA_WAITID_OPTIONS
, /* options */ }, }, },
949 static int syscall_fmt__cmp(const void *name
, const void *fmtp
)
951 const struct syscall_fmt
*fmt
= fmtp
;
952 return strcmp(name
, fmt
->name
);
955 static struct syscall_fmt
*syscall_fmt__find(const char *name
)
957 const int nmemb
= ARRAY_SIZE(syscall_fmts
);
958 return bsearch(name
, syscall_fmts
, nmemb
, sizeof(struct syscall_fmt
), syscall_fmt__cmp
);
961 static struct syscall_fmt
*syscall_fmt__find_by_alias(const char *alias
)
963 int i
, nmemb
= ARRAY_SIZE(syscall_fmts
);
965 for (i
= 0; i
< nmemb
; ++i
) {
966 if (syscall_fmts
[i
].alias
&& strcmp(syscall_fmts
[i
].alias
, alias
) == 0)
967 return &syscall_fmts
[i
];
974 * is_exit: is this "exit" or "exit_group"?
975 * is_open: is this "open" or "openat"? To associate the fd returned in sys_exit with the pathname in sys_enter.
976 * args_size: sum of the sizes of the syscall arguments, anything after that is augmented stuff: pathname for openat, etc.
977 * nonexistent: Just a hole in the syscall table, syscall id not allocated
980 struct tep_event
*tp_format
;
984 struct bpf_program
*sys_enter
,
990 struct tep_format_field
*args
;
992 struct syscall_fmt
*fmt
;
993 struct syscall_arg_fmt
*arg_fmt
;
997 * Must match what is in the BPF program:
999 * tools/perf/examples/bpf/augmented_raw_syscalls.c
1001 struct bpf_map_syscall_entry
{
1003 u16 string_args_len
[6];
1007 * We need to have this 'calculated' boolean because in some cases we really
1008 * don't know what is the duration of a syscall, for instance, when we start
1009 * a session and some threads are waiting for a syscall to finish, say 'poll',
1010 * in which case all we can do is to print "( ? ) for duration and for the
1013 static size_t fprintf_duration(unsigned long t
, bool calculated
, FILE *fp
)
1015 double duration
= (double)t
/ NSEC_PER_MSEC
;
1016 size_t printed
= fprintf(fp
, "(");
1019 printed
+= fprintf(fp
, " ");
1020 else if (duration
>= 1.0)
1021 printed
+= color_fprintf(fp
, PERF_COLOR_RED
, "%6.3f ms", duration
);
1022 else if (duration
>= 0.01)
1023 printed
+= color_fprintf(fp
, PERF_COLOR_YELLOW
, "%6.3f ms", duration
);
1025 printed
+= color_fprintf(fp
, PERF_COLOR_NORMAL
, "%6.3f ms", duration
);
1026 return printed
+ fprintf(fp
, "): ");
1030 * filename.ptr: The filename char pointer that will be vfs_getname'd
1031 * filename.entry_str_pos: Where to insert the string translated from
1032 * filename.ptr by the vfs_getname tracepoint/kprobe.
1033 * ret_scnprintf: syscall args may set this to a different syscall return
1034 * formatter, for instance, fcntl may return fds, file flags, etc.
1036 struct thread_trace
{
1039 unsigned long nr_events
;
1040 unsigned long pfmaj
, pfmin
;
1043 size_t (*ret_scnprintf
)(char *bf
, size_t size
, struct syscall_arg
*arg
);
1046 short int entry_str_pos
;
1048 unsigned int namelen
;
1056 struct intlist
*syscall_stats
;
1059 static struct thread_trace
*thread_trace__new(void)
1061 struct thread_trace
*ttrace
= zalloc(sizeof(struct thread_trace
));
1064 ttrace
->files
.max
= -1;
1065 ttrace
->syscall_stats
= intlist__new(NULL
);
1071 static struct thread_trace
*thread__trace(struct thread
*thread
, FILE *fp
)
1073 struct thread_trace
*ttrace
;
1078 if (thread__priv(thread
) == NULL
)
1079 thread__set_priv(thread
, thread_trace__new());
1081 if (thread__priv(thread
) == NULL
)
1084 ttrace
= thread__priv(thread
);
1085 ++ttrace
->nr_events
;
1089 color_fprintf(fp
, PERF_COLOR_RED
,
1090 "WARNING: not enough memory, dropping samples!\n");
1095 void syscall_arg__set_ret_scnprintf(struct syscall_arg
*arg
,
1096 size_t (*ret_scnprintf
)(char *bf
, size_t size
, struct syscall_arg
*arg
))
1098 struct thread_trace
*ttrace
= thread__priv(arg
->thread
);
1100 ttrace
->ret_scnprintf
= ret_scnprintf
;
1103 #define TRACE_PFMAJ (1 << 0)
1104 #define TRACE_PFMIN (1 << 1)
1106 static const size_t trace__entry_str_size
= 2048;
1108 static struct file
*thread_trace__files_entry(struct thread_trace
*ttrace
, int fd
)
1113 if (fd
> ttrace
->files
.max
) {
1114 struct file
*nfiles
= realloc(ttrace
->files
.table
, (fd
+ 1) * sizeof(struct file
));
1119 if (ttrace
->files
.max
!= -1) {
1120 memset(nfiles
+ ttrace
->files
.max
+ 1, 0,
1121 (fd
- ttrace
->files
.max
) * sizeof(struct file
));
1123 memset(nfiles
, 0, (fd
+ 1) * sizeof(struct file
));
1126 ttrace
->files
.table
= nfiles
;
1127 ttrace
->files
.max
= fd
;
1130 return ttrace
->files
.table
+ fd
;
1133 struct file
*thread__files_entry(struct thread
*thread
, int fd
)
1135 return thread_trace__files_entry(thread__priv(thread
), fd
);
1138 static int trace__set_fd_pathname(struct thread
*thread
, int fd
, const char *pathname
)
1140 struct thread_trace
*ttrace
= thread__priv(thread
);
1141 struct file
*file
= thread_trace__files_entry(ttrace
, fd
);
1145 if (stat(pathname
, &st
) == 0)
1146 file
->dev_maj
= major(st
.st_rdev
);
1147 file
->pathname
= strdup(pathname
);
1155 static int thread__read_fd_path(struct thread
*thread
, int fd
)
1157 char linkname
[PATH_MAX
], pathname
[PATH_MAX
];
1161 if (thread
->pid_
== thread
->tid
) {
1162 scnprintf(linkname
, sizeof(linkname
),
1163 "/proc/%d/fd/%d", thread
->pid_
, fd
);
1165 scnprintf(linkname
, sizeof(linkname
),
1166 "/proc/%d/task/%d/fd/%d", thread
->pid_
, thread
->tid
, fd
);
1169 if (lstat(linkname
, &st
) < 0 || st
.st_size
+ 1 > (off_t
)sizeof(pathname
))
1172 ret
= readlink(linkname
, pathname
, sizeof(pathname
));
1174 if (ret
< 0 || ret
> st
.st_size
)
1177 pathname
[ret
] = '\0';
1178 return trace__set_fd_pathname(thread
, fd
, pathname
);
1181 static const char *thread__fd_path(struct thread
*thread
, int fd
,
1182 struct trace
*trace
)
1184 struct thread_trace
*ttrace
= thread__priv(thread
);
1186 if (ttrace
== NULL
|| trace
->fd_path_disabled
)
1192 if ((fd
> ttrace
->files
.max
|| ttrace
->files
.table
[fd
].pathname
== NULL
)) {
1195 ++trace
->stats
.proc_getname
;
1196 if (thread__read_fd_path(thread
, fd
))
1200 return ttrace
->files
.table
[fd
].pathname
;
1203 size_t syscall_arg__scnprintf_fd(char *bf
, size_t size
, struct syscall_arg
*arg
)
1206 size_t printed
= scnprintf(bf
, size
, "%d", fd
);
1207 const char *path
= thread__fd_path(arg
->thread
, fd
, arg
->trace
);
1210 printed
+= scnprintf(bf
+ printed
, size
- printed
, "<%s>", path
);
1215 size_t pid__scnprintf_fd(struct trace
*trace
, pid_t pid
, int fd
, char *bf
, size_t size
)
1217 size_t printed
= scnprintf(bf
, size
, "%d", fd
);
1218 struct thread
*thread
= machine__find_thread(trace
->host
, pid
, pid
);
1221 const char *path
= thread__fd_path(thread
, fd
, trace
);
1224 printed
+= scnprintf(bf
+ printed
, size
- printed
, "<%s>", path
);
1226 thread__put(thread
);
1232 static size_t syscall_arg__scnprintf_close_fd(char *bf
, size_t size
,
1233 struct syscall_arg
*arg
)
1236 size_t printed
= syscall_arg__scnprintf_fd(bf
, size
, arg
);
1237 struct thread_trace
*ttrace
= thread__priv(arg
->thread
);
1239 if (ttrace
&& fd
>= 0 && fd
<= ttrace
->files
.max
)
1240 zfree(&ttrace
->files
.table
[fd
].pathname
);
1245 static void thread__set_filename_pos(struct thread
*thread
, const char *bf
,
1248 struct thread_trace
*ttrace
= thread__priv(thread
);
1250 ttrace
->filename
.ptr
= ptr
;
1251 ttrace
->filename
.entry_str_pos
= bf
- ttrace
->entry_str
;
1254 static size_t syscall_arg__scnprintf_augmented_string(struct syscall_arg
*arg
, char *bf
, size_t size
)
1256 struct augmented_arg
*augmented_arg
= arg
->augmented
.args
;
1257 size_t printed
= scnprintf(bf
, size
, "\"%.*s\"", augmented_arg
->size
, augmented_arg
->value
);
1259 * So that the next arg with a payload can consume its augmented arg, i.e. for rename* syscalls
1260 * we would have two strings, each prefixed by its size.
1262 int consumed
= sizeof(*augmented_arg
) + augmented_arg
->size
;
1264 arg
->augmented
.args
= ((void *)arg
->augmented
.args
) + consumed
;
1265 arg
->augmented
.size
-= consumed
;
1270 static size_t syscall_arg__scnprintf_filename(char *bf
, size_t size
,
1271 struct syscall_arg
*arg
)
1273 unsigned long ptr
= arg
->val
;
1275 if (arg
->augmented
.args
)
1276 return syscall_arg__scnprintf_augmented_string(arg
, bf
, size
);
1278 if (!arg
->trace
->vfs_getname
)
1279 return scnprintf(bf
, size
, "%#x", ptr
);
1281 thread__set_filename_pos(arg
->thread
, bf
, ptr
);
1285 static bool trace__filter_duration(struct trace
*trace
, double t
)
1287 return t
< (trace
->duration_filter
* NSEC_PER_MSEC
);
1290 static size_t __trace__fprintf_tstamp(struct trace
*trace
, u64 tstamp
, FILE *fp
)
1292 double ts
= (double)(tstamp
- trace
->base_time
) / NSEC_PER_MSEC
;
1294 return fprintf(fp
, "%10.3f ", ts
);
1298 * We're handling tstamp=0 as an undefined tstamp, i.e. like when we are
1299 * using ttrace->entry_time for a thread that receives a sys_exit without
1300 * first having received a sys_enter ("poll" issued before tracing session
1301 * starts, lost sys_enter exit due to ring buffer overflow).
1303 static size_t trace__fprintf_tstamp(struct trace
*trace
, u64 tstamp
, FILE *fp
)
1306 return __trace__fprintf_tstamp(trace
, tstamp
, fp
);
1308 return fprintf(fp
, " ? ");
1311 static bool done
= false;
1312 static bool interrupted
= false;
1314 static void sig_handler(int sig
)
1317 interrupted
= sig
== SIGINT
;
1320 static size_t trace__fprintf_comm_tid(struct trace
*trace
, struct thread
*thread
, FILE *fp
)
1324 if (trace
->multiple_threads
) {
1325 if (trace
->show_comm
)
1326 printed
+= fprintf(fp
, "%.14s/", thread__comm_str(thread
));
1327 printed
+= fprintf(fp
, "%d ", thread
->tid
);
1333 static size_t trace__fprintf_entry_head(struct trace
*trace
, struct thread
*thread
,
1334 u64 duration
, bool duration_calculated
, u64 tstamp
, FILE *fp
)
1338 if (trace
->show_tstamp
)
1339 printed
= trace__fprintf_tstamp(trace
, tstamp
, fp
);
1340 if (trace
->show_duration
)
1341 printed
+= fprintf_duration(duration
, duration_calculated
, fp
);
1342 return printed
+ trace__fprintf_comm_tid(trace
, thread
, fp
);
1345 static int trace__process_event(struct trace
*trace
, struct machine
*machine
,
1346 union perf_event
*event
, struct perf_sample
*sample
)
1350 switch (event
->header
.type
) {
1351 case PERF_RECORD_LOST
:
1352 color_fprintf(trace
->output
, PERF_COLOR_RED
,
1353 "LOST %" PRIu64
" events!\n", event
->lost
.lost
);
1354 ret
= machine__process_lost_event(machine
, event
, sample
);
1357 ret
= machine__process_event(machine
, event
, sample
);
1364 static int trace__tool_process(struct perf_tool
*tool
,
1365 union perf_event
*event
,
1366 struct perf_sample
*sample
,
1367 struct machine
*machine
)
1369 struct trace
*trace
= container_of(tool
, struct trace
, tool
);
1370 return trace__process_event(trace
, machine
, event
, sample
);
1373 static char *trace__machine__resolve_kernel_addr(void *vmachine
, unsigned long long *addrp
, char **modp
)
1375 struct machine
*machine
= vmachine
;
1377 if (machine
->kptr_restrict_warned
)
1380 if (symbol_conf
.kptr_restrict
) {
1381 pr_warning("Kernel address maps (/proc/{kallsyms,modules}) are restricted.\n\n"
1382 "Check /proc/sys/kernel/kptr_restrict.\n\n"
1383 "Kernel samples will not be resolved.\n");
1384 machine
->kptr_restrict_warned
= true;
1388 return machine__resolve_kernel_addr(vmachine
, addrp
, modp
);
1391 static int trace__symbols_init(struct trace
*trace
, struct evlist
*evlist
)
1393 int err
= symbol__init(NULL
);
1398 trace
->host
= machine__new_host();
1399 if (trace
->host
== NULL
)
1402 err
= trace_event__register_resolver(trace
->host
, trace__machine__resolve_kernel_addr
);
1406 err
= __machine__synthesize_threads(trace
->host
, &trace
->tool
, &trace
->opts
.target
,
1407 evlist
->threads
, trace__tool_process
, false,
1416 static void trace__symbols__exit(struct trace
*trace
)
1418 machine__exit(trace
->host
);
1424 static int syscall__alloc_arg_fmts(struct syscall
*sc
, int nr_args
)
1428 if (nr_args
== 6 && sc
->fmt
&& sc
->fmt
->nr_args
!= 0)
1429 nr_args
= sc
->fmt
->nr_args
;
1431 sc
->arg_fmt
= calloc(nr_args
, sizeof(*sc
->arg_fmt
));
1432 if (sc
->arg_fmt
== NULL
)
1435 for (idx
= 0; idx
< nr_args
; ++idx
) {
1437 sc
->arg_fmt
[idx
] = sc
->fmt
->arg
[idx
];
1440 sc
->nr_args
= nr_args
;
1444 static int syscall__set_arg_fmts(struct syscall
*sc
)
1446 struct tep_format_field
*field
, *last_field
= NULL
;
1449 for (field
= sc
->args
; field
; field
= field
->next
, ++idx
) {
1452 if (sc
->fmt
&& sc
->fmt
->arg
[idx
].scnprintf
)
1455 len
= strlen(field
->name
);
1457 if (strcmp(field
->type
, "const char *") == 0 &&
1458 ((len
>= 4 && strcmp(field
->name
+ len
- 4, "name") == 0) ||
1459 strstr(field
->name
, "path") != NULL
))
1460 sc
->arg_fmt
[idx
].scnprintf
= SCA_FILENAME
;
1461 else if ((field
->flags
& TEP_FIELD_IS_POINTER
) || strstr(field
->name
, "addr"))
1462 sc
->arg_fmt
[idx
].scnprintf
= SCA_PTR
;
1463 else if (strcmp(field
->type
, "pid_t") == 0)
1464 sc
->arg_fmt
[idx
].scnprintf
= SCA_PID
;
1465 else if (strcmp(field
->type
, "umode_t") == 0)
1466 sc
->arg_fmt
[idx
].scnprintf
= SCA_MODE_T
;
1467 else if ((strcmp(field
->type
, "int") == 0 ||
1468 strcmp(field
->type
, "unsigned int") == 0 ||
1469 strcmp(field
->type
, "long") == 0) &&
1470 len
>= 2 && strcmp(field
->name
+ len
- 2, "fd") == 0) {
1472 * /sys/kernel/tracing/events/syscalls/sys_enter*
1473 * egrep 'field:.*fd;' .../format|sed -r 's/.*field:([a-z ]+) [a-z_]*fd.+/\1/g'|sort|uniq -c
1478 sc
->arg_fmt
[idx
].scnprintf
= SCA_FD
;
1483 sc
->args_size
= last_field
->offset
+ last_field
->size
;
1488 static int trace__read_syscall_info(struct trace
*trace
, int id
)
1492 const char *name
= syscalltbl__name(trace
->sctbl
, id
);
1494 if (trace
->syscalls
.table
== NULL
) {
1495 trace
->syscalls
.table
= calloc(trace
->sctbl
->syscalls
.nr_entries
, sizeof(*sc
));
1496 if (trace
->syscalls
.table
== NULL
)
1500 sc
= trace
->syscalls
.table
+ id
;
1501 if (sc
->nonexistent
)
1505 sc
->nonexistent
= true;
1510 sc
->fmt
= syscall_fmt__find(sc
->name
);
1512 snprintf(tp_name
, sizeof(tp_name
), "sys_enter_%s", sc
->name
);
1513 sc
->tp_format
= trace_event__tp_format("syscalls", tp_name
);
1515 if (IS_ERR(sc
->tp_format
) && sc
->fmt
&& sc
->fmt
->alias
) {
1516 snprintf(tp_name
, sizeof(tp_name
), "sys_enter_%s", sc
->fmt
->alias
);
1517 sc
->tp_format
= trace_event__tp_format("syscalls", tp_name
);
1520 if (syscall__alloc_arg_fmts(sc
, IS_ERR(sc
->tp_format
) ? 6 : sc
->tp_format
->format
.nr_fields
))
1523 if (IS_ERR(sc
->tp_format
))
1524 return PTR_ERR(sc
->tp_format
);
1526 sc
->args
= sc
->tp_format
->format
.fields
;
1528 * We need to check and discard the first variable '__syscall_nr'
1529 * or 'nr' that mean the syscall number. It is needless here.
1530 * So drop '__syscall_nr' or 'nr' field but does not exist on older kernels.
1532 if (sc
->args
&& (!strcmp(sc
->args
->name
, "__syscall_nr") || !strcmp(sc
->args
->name
, "nr"))) {
1533 sc
->args
= sc
->args
->next
;
1537 sc
->is_exit
= !strcmp(name
, "exit_group") || !strcmp(name
, "exit");
1538 sc
->is_open
= !strcmp(name
, "open") || !strcmp(name
, "openat");
1540 return syscall__set_arg_fmts(sc
);
1543 static int intcmp(const void *a
, const void *b
)
1545 const int *one
= a
, *another
= b
;
1547 return *one
- *another
;
1550 static int trace__validate_ev_qualifier(struct trace
*trace
)
1553 bool printed_invalid_prefix
= false;
1554 struct str_node
*pos
;
1555 size_t nr_used
= 0, nr_allocated
= strlist__nr_entries(trace
->ev_qualifier
);
1557 trace
->ev_qualifier_ids
.entries
= malloc(nr_allocated
*
1558 sizeof(trace
->ev_qualifier_ids
.entries
[0]));
1560 if (trace
->ev_qualifier_ids
.entries
== NULL
) {
1561 fputs("Error:\tNot enough memory for allocating events qualifier ids\n",
1567 strlist__for_each_entry(pos
, trace
->ev_qualifier
) {
1568 const char *sc
= pos
->s
;
1569 int id
= syscalltbl__id(trace
->sctbl
, sc
), match_next
= -1;
1572 id
= syscalltbl__strglobmatch_first(trace
->sctbl
, sc
, &match_next
);
1576 if (!printed_invalid_prefix
) {
1577 pr_debug("Skipping unknown syscalls: ");
1578 printed_invalid_prefix
= true;
1587 trace
->ev_qualifier_ids
.entries
[nr_used
++] = id
;
1588 if (match_next
== -1)
1592 id
= syscalltbl__strglobmatch_next(trace
->sctbl
, sc
, &match_next
);
1595 if (nr_allocated
== nr_used
) {
1599 entries
= realloc(trace
->ev_qualifier_ids
.entries
,
1600 nr_allocated
* sizeof(trace
->ev_qualifier_ids
.entries
[0]));
1601 if (entries
== NULL
) {
1603 fputs("\nError:\t Not enough memory for parsing\n", trace
->output
);
1606 trace
->ev_qualifier_ids
.entries
= entries
;
1608 trace
->ev_qualifier_ids
.entries
[nr_used
++] = id
;
1612 trace
->ev_qualifier_ids
.nr
= nr_used
;
1613 qsort(trace
->ev_qualifier_ids
.entries
, nr_used
, sizeof(int), intcmp
);
1615 if (printed_invalid_prefix
)
1619 zfree(&trace
->ev_qualifier_ids
.entries
);
1620 trace
->ev_qualifier_ids
.nr
= 0;
1624 static __maybe_unused
bool trace__syscall_enabled(struct trace
*trace
, int id
)
1626 bool in_ev_qualifier
;
1628 if (trace
->ev_qualifier_ids
.nr
== 0)
1631 in_ev_qualifier
= bsearch(&id
, trace
->ev_qualifier_ids
.entries
,
1632 trace
->ev_qualifier_ids
.nr
, sizeof(int), intcmp
) != NULL
;
1634 if (in_ev_qualifier
)
1635 return !trace
->not_ev_qualifier
;
1637 return trace
->not_ev_qualifier
;
1641 * args is to be interpreted as a series of longs but we need to handle
1642 * 8-byte unaligned accesses. args points to raw_data within the event
1643 * and raw_data is guaranteed to be 8-byte unaligned because it is
1644 * preceded by raw_size which is a u32. So we need to copy args to a temp
1645 * variable to read it. Most notably this avoids extended load instructions
1646 * on unaligned addresses
1648 unsigned long syscall_arg__val(struct syscall_arg
*arg
, u8 idx
)
1651 unsigned char *p
= arg
->args
+ sizeof(unsigned long) * idx
;
1653 memcpy(&val
, p
, sizeof(val
));
1657 static size_t syscall__scnprintf_name(struct syscall
*sc
, char *bf
, size_t size
,
1658 struct syscall_arg
*arg
)
1660 if (sc
->arg_fmt
&& sc
->arg_fmt
[arg
->idx
].name
)
1661 return scnprintf(bf
, size
, "%s: ", sc
->arg_fmt
[arg
->idx
].name
);
1663 return scnprintf(bf
, size
, "arg%d: ", arg
->idx
);
1667 * Check if the value is in fact zero, i.e. mask whatever needs masking, such
1668 * as mount 'flags' argument that needs ignoring some magic flag, see comment
1669 * in tools/perf/trace/beauty/mount_flags.c
1671 static unsigned long syscall__mask_val(struct syscall
*sc
, struct syscall_arg
*arg
, unsigned long val
)
1673 if (sc
->arg_fmt
&& sc
->arg_fmt
[arg
->idx
].mask_val
)
1674 return sc
->arg_fmt
[arg
->idx
].mask_val(arg
, val
);
1679 static size_t syscall__scnprintf_val(struct syscall
*sc
, char *bf
, size_t size
,
1680 struct syscall_arg
*arg
, unsigned long val
)
1682 if (sc
->arg_fmt
&& sc
->arg_fmt
[arg
->idx
].scnprintf
) {
1684 if (sc
->arg_fmt
[arg
->idx
].parm
)
1685 arg
->parm
= sc
->arg_fmt
[arg
->idx
].parm
;
1686 return sc
->arg_fmt
[arg
->idx
].scnprintf(bf
, size
, arg
);
1688 return scnprintf(bf
, size
, "%ld", val
);
1691 static size_t syscall__scnprintf_args(struct syscall
*sc
, char *bf
, size_t size
,
1692 unsigned char *args
, void *augmented_args
, int augmented_args_size
,
1693 struct trace
*trace
, struct thread
*thread
)
1698 struct syscall_arg arg
= {
1701 .size
= augmented_args_size
,
1702 .args
= augmented_args
,
1708 .show_string_prefix
= trace
->show_string_prefix
,
1710 struct thread_trace
*ttrace
= thread__priv(thread
);
1713 * Things like fcntl will set this in its 'cmd' formatter to pick the
1714 * right formatter for the return value (an fd? file flags?), which is
1715 * not needed for syscalls that always return a given type, say an fd.
1717 ttrace
->ret_scnprintf
= NULL
;
1719 if (sc
->args
!= NULL
) {
1720 struct tep_format_field
*field
;
1722 for (field
= sc
->args
; field
;
1723 field
= field
->next
, ++arg
.idx
, bit
<<= 1) {
1727 val
= syscall_arg__val(&arg
, arg
.idx
);
1729 * Some syscall args need some mask, most don't and
1730 * return val untouched.
1732 val
= syscall__mask_val(sc
, &arg
, val
);
1735 * Suppress this argument if its value is zero and
1736 * and we don't have a string associated in an
1740 !trace
->show_zeros
&&
1742 (sc
->arg_fmt
[arg
.idx
].show_zero
||
1743 sc
->arg_fmt
[arg
.idx
].scnprintf
== SCA_STRARRAY
||
1744 sc
->arg_fmt
[arg
.idx
].scnprintf
== SCA_STRARRAYS
) &&
1745 sc
->arg_fmt
[arg
.idx
].parm
))
1748 printed
+= scnprintf(bf
+ printed
, size
- printed
, "%s", printed
? ", " : "");
1750 if (trace
->show_arg_names
)
1751 printed
+= scnprintf(bf
+ printed
, size
- printed
, "%s: ", field
->name
);
1753 printed
+= syscall__scnprintf_val(sc
, bf
+ printed
, size
- printed
, &arg
, val
);
1755 } else if (IS_ERR(sc
->tp_format
)) {
1757 * If we managed to read the tracepoint /format file, then we
1758 * may end up not having any args, like with gettid(), so only
1759 * print the raw args when we didn't manage to read it.
1761 while (arg
.idx
< sc
->nr_args
) {
1764 val
= syscall_arg__val(&arg
, arg
.idx
);
1766 printed
+= scnprintf(bf
+ printed
, size
- printed
, ", ");
1767 printed
+= syscall__scnprintf_name(sc
, bf
+ printed
, size
- printed
, &arg
);
1768 printed
+= syscall__scnprintf_val(sc
, bf
+ printed
, size
- printed
, &arg
, val
);
1778 typedef int (*tracepoint_handler
)(struct trace
*trace
, struct evsel
*evsel
,
1779 union perf_event
*event
,
1780 struct perf_sample
*sample
);
1782 static struct syscall
*trace__syscall_info(struct trace
*trace
,
1783 struct evsel
*evsel
, int id
)
1790 * XXX: Noticed on x86_64, reproduced as far back as 3.0.36, haven't tried
1791 * before that, leaving at a higher verbosity level till that is
1792 * explained. Reproduced with plain ftrace with:
1794 * echo 1 > /t/events/raw_syscalls/sys_exit/enable
1795 * grep "NR -1 " /t/trace_pipe
1797 * After generating some load on the machine.
1801 fprintf(trace
->output
, "Invalid syscall %d id, skipping (%s, %" PRIu64
") ...\n",
1802 id
, perf_evsel__name(evsel
), ++n
);
1809 if (id
> trace
->sctbl
->syscalls
.max_id
)
1812 if ((trace
->syscalls
.table
== NULL
|| trace
->syscalls
.table
[id
].name
== NULL
) &&
1813 (err
= trace__read_syscall_info(trace
, id
)) != 0)
1816 if (trace
->syscalls
.table
[id
].name
== NULL
) {
1817 if (trace
->syscalls
.table
[id
].nonexistent
)
1822 return &trace
->syscalls
.table
[id
];
1826 char sbuf
[STRERR_BUFSIZE
];
1827 fprintf(trace
->output
, "Problems reading syscall %d: %d (%s)", id
, -err
, str_error_r(-err
, sbuf
, sizeof(sbuf
)));
1828 if (id
<= trace
->sctbl
->syscalls
.max_id
&& trace
->syscalls
.table
[id
].name
!= NULL
)
1829 fprintf(trace
->output
, "(%s)", trace
->syscalls
.table
[id
].name
);
1830 fputs(" information\n", trace
->output
);
1835 static void thread__update_stats(struct thread_trace
*ttrace
,
1836 int id
, struct perf_sample
*sample
)
1838 struct int_node
*inode
;
1839 struct stats
*stats
;
1842 inode
= intlist__findnew(ttrace
->syscall_stats
, id
);
1846 stats
= inode
->priv
;
1847 if (stats
== NULL
) {
1848 stats
= malloc(sizeof(struct stats
));
1852 inode
->priv
= stats
;
1855 if (ttrace
->entry_time
&& sample
->time
> ttrace
->entry_time
)
1856 duration
= sample
->time
- ttrace
->entry_time
;
1858 update_stats(stats
, duration
);
1861 static int trace__printf_interrupted_entry(struct trace
*trace
)
1863 struct thread_trace
*ttrace
;
1867 if (trace
->failure_only
|| trace
->current
== NULL
)
1870 ttrace
= thread__priv(trace
->current
);
1872 if (!ttrace
->entry_pending
)
1875 printed
= trace__fprintf_entry_head(trace
, trace
->current
, 0, false, ttrace
->entry_time
, trace
->output
);
1876 printed
+= len
= fprintf(trace
->output
, "%s)", ttrace
->entry_str
);
1878 if (len
< trace
->args_alignment
- 4)
1879 printed
+= fprintf(trace
->output
, "%-*s", trace
->args_alignment
- 4 - len
, " ");
1881 printed
+= fprintf(trace
->output
, " ...\n");
1883 ttrace
->entry_pending
= false;
1884 ++trace
->nr_events_printed
;
1889 static int trace__fprintf_sample(struct trace
*trace
, struct evsel
*evsel
,
1890 struct perf_sample
*sample
, struct thread
*thread
)
1894 if (trace
->print_sample
) {
1895 double ts
= (double)sample
->time
/ NSEC_PER_MSEC
;
1897 printed
+= fprintf(trace
->output
, "%22s %10.3f %s %d/%d [%d]\n",
1898 perf_evsel__name(evsel
), ts
,
1899 thread__comm_str(thread
),
1900 sample
->pid
, sample
->tid
, sample
->cpu
);
1906 static void *syscall__augmented_args(struct syscall
*sc
, struct perf_sample
*sample
, int *augmented_args_size
, int raw_augmented_args_size
)
1908 void *augmented_args
= NULL
;
1910 * For now with BPF raw_augmented we hook into raw_syscalls:sys_enter
1911 * and there we get all 6 syscall args plus the tracepoint common fields
1912 * that gets calculated at the start and the syscall_nr (another long).
1913 * So we check if that is the case and if so don't look after the
1914 * sc->args_size but always after the full raw_syscalls:sys_enter payload,
1917 * We'll revisit this later to pass s->args_size to the BPF augmenter
1918 * (now tools/perf/examples/bpf/augmented_raw_syscalls.c, so that it
1919 * copies only what we need for each syscall, like what happens when we
1920 * use syscalls:sys_enter_NAME, so that we reduce the kernel/userspace
1921 * traffic to just what is needed for each syscall.
1923 int args_size
= raw_augmented_args_size
?: sc
->args_size
;
1925 *augmented_args_size
= sample
->raw_size
- args_size
;
1926 if (*augmented_args_size
> 0)
1927 augmented_args
= sample
->raw_data
+ args_size
;
1929 return augmented_args
;
1932 static int trace__sys_enter(struct trace
*trace
, struct evsel
*evsel
,
1933 union perf_event
*event __maybe_unused
,
1934 struct perf_sample
*sample
)
1939 struct thread
*thread
;
1940 int id
= perf_evsel__sc_tp_uint(evsel
, id
, sample
), err
= -1;
1941 int augmented_args_size
= 0;
1942 void *augmented_args
= NULL
;
1943 struct syscall
*sc
= trace__syscall_info(trace
, evsel
, id
);
1944 struct thread_trace
*ttrace
;
1949 thread
= machine__findnew_thread(trace
->host
, sample
->pid
, sample
->tid
);
1950 ttrace
= thread__trace(thread
, trace
->output
);
1954 trace__fprintf_sample(trace
, evsel
, sample
, thread
);
1956 args
= perf_evsel__sc_tp_ptr(evsel
, args
, sample
);
1958 if (ttrace
->entry_str
== NULL
) {
1959 ttrace
->entry_str
= malloc(trace__entry_str_size
);
1960 if (!ttrace
->entry_str
)
1964 if (!(trace
->duration_filter
|| trace
->summary_only
|| trace
->min_stack
))
1965 trace__printf_interrupted_entry(trace
);
1967 * If this is raw_syscalls.sys_enter, then it always comes with the 6 possible
1968 * arguments, even if the syscall being handled, say "openat", uses only 4 arguments
1969 * this breaks syscall__augmented_args() check for augmented args, as we calculate
1970 * syscall->args_size using each syscalls:sys_enter_NAME tracefs format file,
1971 * so when handling, say the openat syscall, we end up getting 6 args for the
1972 * raw_syscalls:sys_enter event, when we expected just 4, we end up mistakenly
1973 * thinking that the extra 2 u64 args are the augmented filename, so just check
1974 * here and avoid using augmented syscalls when the evsel is the raw_syscalls one.
1976 if (evsel
!= trace
->syscalls
.events
.sys_enter
)
1977 augmented_args
= syscall__augmented_args(sc
, sample
, &augmented_args_size
, trace
->raw_augmented_syscalls_args_size
);
1978 ttrace
->entry_time
= sample
->time
;
1979 msg
= ttrace
->entry_str
;
1980 printed
+= scnprintf(msg
+ printed
, trace__entry_str_size
- printed
, "%s(", sc
->name
);
1982 printed
+= syscall__scnprintf_args(sc
, msg
+ printed
, trace__entry_str_size
- printed
,
1983 args
, augmented_args
, augmented_args_size
, trace
, thread
);
1986 if (!(trace
->duration_filter
|| trace
->summary_only
|| trace
->failure_only
|| trace
->min_stack
)) {
1989 trace__fprintf_entry_head(trace
, thread
, 0, false, ttrace
->entry_time
, trace
->output
);
1990 printed
= fprintf(trace
->output
, "%s)", ttrace
->entry_str
);
1991 if (trace
->args_alignment
> printed
)
1992 alignment
= trace
->args_alignment
- printed
;
1993 fprintf(trace
->output
, "%*s= ?\n", alignment
, " ");
1996 ttrace
->entry_pending
= true;
1997 /* See trace__vfs_getname & trace__sys_exit */
1998 ttrace
->filename
.pending_open
= false;
2001 if (trace
->current
!= thread
) {
2002 thread__put(trace
->current
);
2003 trace
->current
= thread__get(thread
);
2007 thread__put(thread
);
2011 static int trace__fprintf_sys_enter(struct trace
*trace
, struct evsel
*evsel
,
2012 struct perf_sample
*sample
)
2014 struct thread_trace
*ttrace
;
2015 struct thread
*thread
;
2016 int id
= perf_evsel__sc_tp_uint(evsel
, id
, sample
), err
= -1;
2017 struct syscall
*sc
= trace__syscall_info(trace
, evsel
, id
);
2019 void *args
, *augmented_args
= NULL
;
2020 int augmented_args_size
;
2025 thread
= machine__findnew_thread(trace
->host
, sample
->pid
, sample
->tid
);
2026 ttrace
= thread__trace(thread
, trace
->output
);
2028 * We need to get ttrace just to make sure it is there when syscall__scnprintf_args()
2029 * and the rest of the beautifiers accessing it via struct syscall_arg touches it.
2034 args
= perf_evsel__sc_tp_ptr(evsel
, args
, sample
);
2035 augmented_args
= syscall__augmented_args(sc
, sample
, &augmented_args_size
, trace
->raw_augmented_syscalls_args_size
);
2036 syscall__scnprintf_args(sc
, msg
, sizeof(msg
), args
, augmented_args
, augmented_args_size
, trace
, thread
);
2037 fprintf(trace
->output
, "%s", msg
);
2040 thread__put(thread
);
2044 static int trace__resolve_callchain(struct trace
*trace
, struct evsel
*evsel
,
2045 struct perf_sample
*sample
,
2046 struct callchain_cursor
*cursor
)
2048 struct addr_location al
;
2049 int max_stack
= evsel
->attr
.sample_max_stack
?
2050 evsel
->attr
.sample_max_stack
:
2054 if (machine__resolve(trace
->host
, &al
, sample
) < 0)
2057 err
= thread__resolve_callchain(al
.thread
, cursor
, evsel
, sample
, NULL
, NULL
, max_stack
);
2058 addr_location__put(&al
);
2062 static int trace__fprintf_callchain(struct trace
*trace
, struct perf_sample
*sample
)
2064 /* TODO: user-configurable print_opts */
2065 const unsigned int print_opts
= EVSEL__PRINT_SYM
|
2067 EVSEL__PRINT_UNKNOWN_AS_ADDR
;
2069 return sample__fprintf_callchain(sample
, 38, print_opts
, &callchain_cursor
, trace
->output
);
2072 static const char *errno_to_name(struct evsel
*evsel
, int err
)
2074 struct perf_env
*env
= perf_evsel__env(evsel
);
2075 const char *arch_name
= perf_env__arch(env
);
2077 return arch_syscalls__strerrno(arch_name
, err
);
2080 static int trace__sys_exit(struct trace
*trace
, struct evsel
*evsel
,
2081 union perf_event
*event __maybe_unused
,
2082 struct perf_sample
*sample
)
2086 bool duration_calculated
= false;
2087 struct thread
*thread
;
2088 int id
= perf_evsel__sc_tp_uint(evsel
, id
, sample
), err
= -1, callchain_ret
= 0, printed
= 0;
2089 int alignment
= trace
->args_alignment
;
2090 struct syscall
*sc
= trace__syscall_info(trace
, evsel
, id
);
2091 struct thread_trace
*ttrace
;
2096 thread
= machine__findnew_thread(trace
->host
, sample
->pid
, sample
->tid
);
2097 ttrace
= thread__trace(thread
, trace
->output
);
2101 trace__fprintf_sample(trace
, evsel
, sample
, thread
);
2104 thread__update_stats(ttrace
, id
, sample
);
2106 ret
= perf_evsel__sc_tp_uint(evsel
, ret
, sample
);
2108 if (!trace
->fd_path_disabled
&& sc
->is_open
&& ret
>= 0 && ttrace
->filename
.pending_open
) {
2109 trace__set_fd_pathname(thread
, ret
, ttrace
->filename
.name
);
2110 ttrace
->filename
.pending_open
= false;
2111 ++trace
->stats
.vfs_getname
;
2114 if (ttrace
->entry_time
) {
2115 duration
= sample
->time
- ttrace
->entry_time
;
2116 if (trace__filter_duration(trace
, duration
))
2118 duration_calculated
= true;
2119 } else if (trace
->duration_filter
)
2122 if (sample
->callchain
) {
2123 callchain_ret
= trace__resolve_callchain(trace
, evsel
, sample
, &callchain_cursor
);
2124 if (callchain_ret
== 0) {
2125 if (callchain_cursor
.nr
< trace
->min_stack
)
2131 if (trace
->summary_only
|| (ret
>= 0 && trace
->failure_only
))
2134 trace__fprintf_entry_head(trace
, thread
, duration
, duration_calculated
, ttrace
->entry_time
, trace
->output
);
2136 if (ttrace
->entry_pending
) {
2137 printed
= fprintf(trace
->output
, "%s", ttrace
->entry_str
);
2139 printed
+= fprintf(trace
->output
, " ... [");
2140 color_fprintf(trace
->output
, PERF_COLOR_YELLOW
, "continued");
2142 printed
+= fprintf(trace
->output
, "]: %s()", sc
->name
);
2145 printed
++; /* the closing ')' */
2147 if (alignment
> printed
)
2148 alignment
-= printed
;
2152 fprintf(trace
->output
, ")%*s= ", alignment
, " ");
2154 if (sc
->fmt
== NULL
) {
2158 fprintf(trace
->output
, "%ld", ret
);
2159 } else if (ret
< 0) {
2161 char bf
[STRERR_BUFSIZE
];
2162 const char *emsg
= str_error_r(-ret
, bf
, sizeof(bf
)),
2163 *e
= errno_to_name(evsel
, -ret
);
2165 fprintf(trace
->output
, "-1 %s (%s)", e
, emsg
);
2167 } else if (ret
== 0 && sc
->fmt
->timeout
)
2168 fprintf(trace
->output
, "0 (Timeout)");
2169 else if (ttrace
->ret_scnprintf
) {
2171 struct syscall_arg arg
= {
2176 ttrace
->ret_scnprintf(bf
, sizeof(bf
), &arg
);
2177 ttrace
->ret_scnprintf
= NULL
;
2178 fprintf(trace
->output
, "%s", bf
);
2179 } else if (sc
->fmt
->hexret
)
2180 fprintf(trace
->output
, "%#lx", ret
);
2181 else if (sc
->fmt
->errpid
) {
2182 struct thread
*child
= machine__find_thread(trace
->host
, ret
, ret
);
2184 if (child
!= NULL
) {
2185 fprintf(trace
->output
, "%ld", ret
);
2186 if (child
->comm_set
)
2187 fprintf(trace
->output
, " (%s)", thread__comm_str(child
));
2193 fputc('\n', trace
->output
);
2196 * We only consider an 'event' for the sake of --max-events a non-filtered
2197 * sys_enter + sys_exit and other tracepoint events.
2199 if (++trace
->nr_events_printed
== trace
->max_events
&& trace
->max_events
!= ULONG_MAX
)
2202 if (callchain_ret
> 0)
2203 trace__fprintf_callchain(trace
, sample
);
2204 else if (callchain_ret
< 0)
2205 pr_err("Problem processing %s callchain, skipping...\n", perf_evsel__name(evsel
));
2207 ttrace
->entry_pending
= false;
2210 thread__put(thread
);
2214 static int trace__vfs_getname(struct trace
*trace
, struct evsel
*evsel
,
2215 union perf_event
*event __maybe_unused
,
2216 struct perf_sample
*sample
)
2218 struct thread
*thread
= machine__findnew_thread(trace
->host
, sample
->pid
, sample
->tid
);
2219 struct thread_trace
*ttrace
;
2220 size_t filename_len
, entry_str_len
, to_move
;
2221 ssize_t remaining_space
;
2223 const char *filename
= perf_evsel__rawptr(evsel
, sample
, "pathname");
2228 ttrace
= thread__priv(thread
);
2232 filename_len
= strlen(filename
);
2233 if (filename_len
== 0)
2236 if (ttrace
->filename
.namelen
< filename_len
) {
2237 char *f
= realloc(ttrace
->filename
.name
, filename_len
+ 1);
2242 ttrace
->filename
.namelen
= filename_len
;
2243 ttrace
->filename
.name
= f
;
2246 strcpy(ttrace
->filename
.name
, filename
);
2247 ttrace
->filename
.pending_open
= true;
2249 if (!ttrace
->filename
.ptr
)
2252 entry_str_len
= strlen(ttrace
->entry_str
);
2253 remaining_space
= trace__entry_str_size
- entry_str_len
- 1; /* \0 */
2254 if (remaining_space
<= 0)
2257 if (filename_len
> (size_t)remaining_space
) {
2258 filename
+= filename_len
- remaining_space
;
2259 filename_len
= remaining_space
;
2262 to_move
= entry_str_len
- ttrace
->filename
.entry_str_pos
+ 1; /* \0 */
2263 pos
= ttrace
->entry_str
+ ttrace
->filename
.entry_str_pos
;
2264 memmove(pos
+ filename_len
, pos
, to_move
);
2265 memcpy(pos
, filename
, filename_len
);
2267 ttrace
->filename
.ptr
= 0;
2268 ttrace
->filename
.entry_str_pos
= 0;
2270 thread__put(thread
);
2275 static int trace__sched_stat_runtime(struct trace
*trace
, struct evsel
*evsel
,
2276 union perf_event
*event __maybe_unused
,
2277 struct perf_sample
*sample
)
2279 u64 runtime
= perf_evsel__intval(evsel
, sample
, "runtime");
2280 double runtime_ms
= (double)runtime
/ NSEC_PER_MSEC
;
2281 struct thread
*thread
= machine__findnew_thread(trace
->host
,
2284 struct thread_trace
*ttrace
= thread__trace(thread
, trace
->output
);
2289 ttrace
->runtime_ms
+= runtime_ms
;
2290 trace
->runtime_ms
+= runtime_ms
;
2292 thread__put(thread
);
2296 fprintf(trace
->output
, "%s: comm=%s,pid=%u,runtime=%" PRIu64
",vruntime=%" PRIu64
")\n",
2298 perf_evsel__strval(evsel
, sample
, "comm"),
2299 (pid_t
)perf_evsel__intval(evsel
, sample
, "pid"),
2301 perf_evsel__intval(evsel
, sample
, "vruntime"));
2305 static int bpf_output__printer(enum binary_printer_ops op
,
2306 unsigned int val
, void *extra __maybe_unused
, FILE *fp
)
2308 unsigned char ch
= (unsigned char)val
;
2311 case BINARY_PRINT_CHAR_DATA
:
2312 return fprintf(fp
, "%c", isprint(ch
) ? ch
: '.');
2313 case BINARY_PRINT_DATA_BEGIN
:
2314 case BINARY_PRINT_LINE_BEGIN
:
2315 case BINARY_PRINT_ADDR
:
2316 case BINARY_PRINT_NUM_DATA
:
2317 case BINARY_PRINT_NUM_PAD
:
2318 case BINARY_PRINT_SEP
:
2319 case BINARY_PRINT_CHAR_PAD
:
2320 case BINARY_PRINT_LINE_END
:
2321 case BINARY_PRINT_DATA_END
:
2329 static void bpf_output__fprintf(struct trace
*trace
,
2330 struct perf_sample
*sample
)
2332 binary__fprintf(sample
->raw_data
, sample
->raw_size
, 8,
2333 bpf_output__printer
, NULL
, trace
->output
);
2334 ++trace
->nr_events_printed
;
2337 static int trace__event_handler(struct trace
*trace
, struct evsel
*evsel
,
2338 union perf_event
*event __maybe_unused
,
2339 struct perf_sample
*sample
)
2341 struct thread
*thread
;
2342 int callchain_ret
= 0;
2344 * Check if we called perf_evsel__disable(evsel) due to, for instance,
2345 * this event's max_events having been hit and this is an entry coming
2346 * from the ring buffer that we should discard, since the max events
2347 * have already been considered/printed.
2349 if (evsel
->disabled
)
2352 thread
= machine__findnew_thread(trace
->host
, sample
->pid
, sample
->tid
);
2354 if (sample
->callchain
) {
2355 callchain_ret
= trace__resolve_callchain(trace
, evsel
, sample
, &callchain_cursor
);
2356 if (callchain_ret
== 0) {
2357 if (callchain_cursor
.nr
< trace
->min_stack
)
2363 trace__printf_interrupted_entry(trace
);
2364 trace__fprintf_tstamp(trace
, sample
->time
, trace
->output
);
2366 if (trace
->trace_syscalls
&& trace
->show_duration
)
2367 fprintf(trace
->output
, "( ): ");
2370 trace__fprintf_comm_tid(trace
, thread
, trace
->output
);
2372 if (evsel
== trace
->syscalls
.events
.augmented
) {
2373 int id
= perf_evsel__sc_tp_uint(evsel
, id
, sample
);
2374 struct syscall
*sc
= trace__syscall_info(trace
, evsel
, id
);
2377 fprintf(trace
->output
, "%s(", sc
->name
);
2378 trace__fprintf_sys_enter(trace
, evsel
, sample
);
2379 fputc(')', trace
->output
);
2384 * XXX: Not having the associated syscall info or not finding/adding
2385 * the thread should never happen, but if it does...
2386 * fall thru and print it as a bpf_output event.
2390 fprintf(trace
->output
, "%s:", evsel
->name
);
2392 if (perf_evsel__is_bpf_output(evsel
)) {
2393 bpf_output__fprintf(trace
, sample
);
2394 } else if (evsel
->tp_format
) {
2395 if (strncmp(evsel
->tp_format
->name
, "sys_enter_", 10) ||
2396 trace__fprintf_sys_enter(trace
, evsel
, sample
)) {
2397 event_format__fprintf(evsel
->tp_format
, sample
->cpu
,
2398 sample
->raw_data
, sample
->raw_size
,
2400 ++trace
->nr_events_printed
;
2402 if (evsel
->max_events
!= ULONG_MAX
&& ++evsel
->nr_events_printed
== evsel
->max_events
) {
2403 perf_evsel__disable(evsel
);
2404 perf_evsel__close(evsel
);
2410 fprintf(trace
->output
, "\n");
2412 if (callchain_ret
> 0)
2413 trace__fprintf_callchain(trace
, sample
);
2414 else if (callchain_ret
< 0)
2415 pr_err("Problem processing %s callchain, skipping...\n", perf_evsel__name(evsel
));
2417 thread__put(thread
);
2421 static void print_location(FILE *f
, struct perf_sample
*sample
,
2422 struct addr_location
*al
,
2423 bool print_dso
, bool print_sym
)
2426 if ((verbose
> 0 || print_dso
) && al
->map
)
2427 fprintf(f
, "%s@", al
->map
->dso
->long_name
);
2429 if ((verbose
> 0 || print_sym
) && al
->sym
)
2430 fprintf(f
, "%s+0x%" PRIx64
, al
->sym
->name
,
2431 al
->addr
- al
->sym
->start
);
2433 fprintf(f
, "0x%" PRIx64
, al
->addr
);
2435 fprintf(f
, "0x%" PRIx64
, sample
->addr
);
2438 static int trace__pgfault(struct trace
*trace
,
2439 struct evsel
*evsel
,
2440 union perf_event
*event __maybe_unused
,
2441 struct perf_sample
*sample
)
2443 struct thread
*thread
;
2444 struct addr_location al
;
2445 char map_type
= 'd';
2446 struct thread_trace
*ttrace
;
2448 int callchain_ret
= 0;
2450 thread
= machine__findnew_thread(trace
->host
, sample
->pid
, sample
->tid
);
2452 if (sample
->callchain
) {
2453 callchain_ret
= trace__resolve_callchain(trace
, evsel
, sample
, &callchain_cursor
);
2454 if (callchain_ret
== 0) {
2455 if (callchain_cursor
.nr
< trace
->min_stack
)
2461 ttrace
= thread__trace(thread
, trace
->output
);
2465 if (evsel
->attr
.config
== PERF_COUNT_SW_PAGE_FAULTS_MAJ
)
2470 if (trace
->summary_only
)
2473 thread__find_symbol(thread
, sample
->cpumode
, sample
->ip
, &al
);
2475 trace__fprintf_entry_head(trace
, thread
, 0, true, sample
->time
, trace
->output
);
2477 fprintf(trace
->output
, "%sfault [",
2478 evsel
->attr
.config
== PERF_COUNT_SW_PAGE_FAULTS_MAJ
?
2481 print_location(trace
->output
, sample
, &al
, false, true);
2483 fprintf(trace
->output
, "] => ");
2485 thread__find_symbol(thread
, sample
->cpumode
, sample
->addr
, &al
);
2488 thread__find_symbol(thread
, sample
->cpumode
, sample
->addr
, &al
);
2496 print_location(trace
->output
, sample
, &al
, true, false);
2498 fprintf(trace
->output
, " (%c%c)\n", map_type
, al
.level
);
2500 if (callchain_ret
> 0)
2501 trace__fprintf_callchain(trace
, sample
);
2502 else if (callchain_ret
< 0)
2503 pr_err("Problem processing %s callchain, skipping...\n", perf_evsel__name(evsel
));
2505 ++trace
->nr_events_printed
;
2509 thread__put(thread
);
2513 static void trace__set_base_time(struct trace
*trace
,
2514 struct evsel
*evsel
,
2515 struct perf_sample
*sample
)
2518 * BPF events were not setting PERF_SAMPLE_TIME, so be more robust
2519 * and don't use sample->time unconditionally, we may end up having
2520 * some other event in the future without PERF_SAMPLE_TIME for good
2521 * reason, i.e. we may not be interested in its timestamps, just in
2522 * it taking place, picking some piece of information when it
2523 * appears in our event stream (vfs_getname comes to mind).
2525 if (trace
->base_time
== 0 && !trace
->full_time
&&
2526 (evsel
->attr
.sample_type
& PERF_SAMPLE_TIME
))
2527 trace
->base_time
= sample
->time
;
2530 static int trace__process_sample(struct perf_tool
*tool
,
2531 union perf_event
*event
,
2532 struct perf_sample
*sample
,
2533 struct evsel
*evsel
,
2534 struct machine
*machine __maybe_unused
)
2536 struct trace
*trace
= container_of(tool
, struct trace
, tool
);
2537 struct thread
*thread
;
2540 tracepoint_handler handler
= evsel
->handler
;
2542 thread
= machine__findnew_thread(trace
->host
, sample
->pid
, sample
->tid
);
2543 if (thread
&& thread__is_filtered(thread
))
2546 trace__set_base_time(trace
, evsel
, sample
);
2550 handler(trace
, evsel
, event
, sample
);
2553 thread__put(thread
);
2557 static int trace__record(struct trace
*trace
, int argc
, const char **argv
)
2559 unsigned int rec_argc
, i
, j
;
2560 const char **rec_argv
;
2561 const char * const record_args
[] = {
2568 const char * const sc_args
[] = { "-e", };
2569 unsigned int sc_args_nr
= ARRAY_SIZE(sc_args
);
2570 const char * const majpf_args
[] = { "-e", "major-faults" };
2571 unsigned int majpf_args_nr
= ARRAY_SIZE(majpf_args
);
2572 const char * const minpf_args
[] = { "-e", "minor-faults" };
2573 unsigned int minpf_args_nr
= ARRAY_SIZE(minpf_args
);
2575 /* +1 is for the event string below */
2576 rec_argc
= ARRAY_SIZE(record_args
) + sc_args_nr
+ 1 +
2577 majpf_args_nr
+ minpf_args_nr
+ argc
;
2578 rec_argv
= calloc(rec_argc
+ 1, sizeof(char *));
2580 if (rec_argv
== NULL
)
2584 for (i
= 0; i
< ARRAY_SIZE(record_args
); i
++)
2585 rec_argv
[j
++] = record_args
[i
];
2587 if (trace
->trace_syscalls
) {
2588 for (i
= 0; i
< sc_args_nr
; i
++)
2589 rec_argv
[j
++] = sc_args
[i
];
2591 /* event string may be different for older kernels - e.g., RHEL6 */
2592 if (is_valid_tracepoint("raw_syscalls:sys_enter"))
2593 rec_argv
[j
++] = "raw_syscalls:sys_enter,raw_syscalls:sys_exit";
2594 else if (is_valid_tracepoint("syscalls:sys_enter"))
2595 rec_argv
[j
++] = "syscalls:sys_enter,syscalls:sys_exit";
2597 pr_err("Neither raw_syscalls nor syscalls events exist.\n");
2603 if (trace
->trace_pgfaults
& TRACE_PFMAJ
)
2604 for (i
= 0; i
< majpf_args_nr
; i
++)
2605 rec_argv
[j
++] = majpf_args
[i
];
2607 if (trace
->trace_pgfaults
& TRACE_PFMIN
)
2608 for (i
= 0; i
< minpf_args_nr
; i
++)
2609 rec_argv
[j
++] = minpf_args
[i
];
2611 for (i
= 0; i
< (unsigned int)argc
; i
++)
2612 rec_argv
[j
++] = argv
[i
];
2614 return cmd_record(j
, rec_argv
);
2617 static size_t trace__fprintf_thread_summary(struct trace
*trace
, FILE *fp
);
2619 static bool perf_evlist__add_vfs_getname(struct evlist
*evlist
)
2622 struct evsel
*evsel
, *tmp
;
2623 struct parse_events_error err
= { .idx
= 0, };
2624 int ret
= parse_events(evlist
, "probe:vfs_getname*", &err
);
2629 evlist__for_each_entry_safe(evlist
, evsel
, tmp
) {
2630 if (!strstarts(perf_evsel__name(evsel
), "probe:vfs_getname"))
2633 if (perf_evsel__field(evsel
, "pathname")) {
2634 evsel
->handler
= trace__vfs_getname
;
2639 list_del_init(&evsel
->node
);
2640 evsel
->evlist
= NULL
;
2641 perf_evsel__delete(evsel
);
2647 static struct evsel
*perf_evsel__new_pgfault(u64 config
)
2649 struct evsel
*evsel
;
2650 struct perf_event_attr attr
= {
2651 .type
= PERF_TYPE_SOFTWARE
,
2655 attr
.config
= config
;
2656 attr
.sample_period
= 1;
2658 event_attr_init(&attr
);
2660 evsel
= perf_evsel__new(&attr
);
2662 evsel
->handler
= trace__pgfault
;
2667 static void trace__handle_event(struct trace
*trace
, union perf_event
*event
, struct perf_sample
*sample
)
2669 const u32 type
= event
->header
.type
;
2670 struct evsel
*evsel
;
2672 if (type
!= PERF_RECORD_SAMPLE
) {
2673 trace__process_event(trace
, trace
->host
, event
, sample
);
2677 evsel
= perf_evlist__id2evsel(trace
->evlist
, sample
->id
);
2678 if (evsel
== NULL
) {
2679 fprintf(trace
->output
, "Unknown tp ID %" PRIu64
", skipping...\n", sample
->id
);
2683 trace__set_base_time(trace
, evsel
, sample
);
2685 if (evsel
->attr
.type
== PERF_TYPE_TRACEPOINT
&&
2686 sample
->raw_data
== NULL
) {
2687 fprintf(trace
->output
, "%s sample with no payload for tid: %d, cpu %d, raw_size=%d, skipping...\n",
2688 perf_evsel__name(evsel
), sample
->tid
,
2689 sample
->cpu
, sample
->raw_size
);
2691 tracepoint_handler handler
= evsel
->handler
;
2692 handler(trace
, evsel
, event
, sample
);
2695 if (trace
->nr_events_printed
>= trace
->max_events
&& trace
->max_events
!= ULONG_MAX
)
2699 static int trace__add_syscall_newtp(struct trace
*trace
)
2702 struct evlist
*evlist
= trace
->evlist
;
2703 struct evsel
*sys_enter
, *sys_exit
;
2705 sys_enter
= perf_evsel__raw_syscall_newtp("sys_enter", trace__sys_enter
);
2706 if (sys_enter
== NULL
)
2709 if (perf_evsel__init_sc_tp_ptr_field(sys_enter
, args
))
2710 goto out_delete_sys_enter
;
2712 sys_exit
= perf_evsel__raw_syscall_newtp("sys_exit", trace__sys_exit
);
2713 if (sys_exit
== NULL
)
2714 goto out_delete_sys_enter
;
2716 if (perf_evsel__init_sc_tp_uint_field(sys_exit
, ret
))
2717 goto out_delete_sys_exit
;
2719 perf_evsel__config_callchain(sys_enter
, &trace
->opts
, &callchain_param
);
2720 perf_evsel__config_callchain(sys_exit
, &trace
->opts
, &callchain_param
);
2722 perf_evlist__add(evlist
, sys_enter
);
2723 perf_evlist__add(evlist
, sys_exit
);
2725 if (callchain_param
.enabled
&& !trace
->kernel_syscallchains
) {
2727 * We're interested only in the user space callchain
2728 * leading to the syscall, allow overriding that for
2729 * debugging reasons using --kernel_syscall_callchains
2731 sys_exit
->attr
.exclude_callchain_kernel
= 1;
2734 trace
->syscalls
.events
.sys_enter
= sys_enter
;
2735 trace
->syscalls
.events
.sys_exit
= sys_exit
;
2741 out_delete_sys_exit
:
2742 perf_evsel__delete_priv(sys_exit
);
2743 out_delete_sys_enter
:
2744 perf_evsel__delete_priv(sys_enter
);
2748 static int trace__set_ev_qualifier_tp_filter(struct trace
*trace
)
2751 struct evsel
*sys_exit
;
2752 char *filter
= asprintf_expr_inout_ints("id", !trace
->not_ev_qualifier
,
2753 trace
->ev_qualifier_ids
.nr
,
2754 trace
->ev_qualifier_ids
.entries
);
2759 if (!perf_evsel__append_tp_filter(trace
->syscalls
.events
.sys_enter
,
2761 sys_exit
= trace
->syscalls
.events
.sys_exit
;
2762 err
= perf_evsel__append_tp_filter(sys_exit
, filter
);
2773 #ifdef HAVE_LIBBPF_SUPPORT
2774 static struct bpf_program
*trace__find_bpf_program_by_title(struct trace
*trace
, const char *name
)
2776 if (trace
->bpf_obj
== NULL
)
2779 return bpf_object__find_program_by_title(trace
->bpf_obj
, name
);
2782 static struct bpf_program
*trace__find_syscall_bpf_prog(struct trace
*trace
, struct syscall
*sc
,
2783 const char *prog_name
, const char *type
)
2785 struct bpf_program
*prog
;
2787 if (prog_name
== NULL
) {
2788 char default_prog_name
[256];
2789 scnprintf(default_prog_name
, sizeof(default_prog_name
), "!syscalls:sys_%s_%s", type
, sc
->name
);
2790 prog
= trace__find_bpf_program_by_title(trace
, default_prog_name
);
2793 if (sc
->fmt
&& sc
->fmt
->alias
) {
2794 scnprintf(default_prog_name
, sizeof(default_prog_name
), "!syscalls:sys_%s_%s", type
, sc
->fmt
->alias
);
2795 prog
= trace__find_bpf_program_by_title(trace
, default_prog_name
);
2799 goto out_unaugmented
;
2802 prog
= trace__find_bpf_program_by_title(trace
, prog_name
);
2809 pr_debug("Couldn't find BPF prog \"%s\" to associate with syscalls:sys_%s_%s, not augmenting it\n",
2810 prog_name
, type
, sc
->name
);
2812 return trace
->syscalls
.unaugmented_prog
;
2815 static void trace__init_syscall_bpf_progs(struct trace
*trace
, int id
)
2817 struct syscall
*sc
= trace__syscall_info(trace
, NULL
, id
);
2822 sc
->bpf_prog
.sys_enter
= trace__find_syscall_bpf_prog(trace
, sc
, sc
->fmt
? sc
->fmt
->bpf_prog_name
.sys_enter
: NULL
, "enter");
2823 sc
->bpf_prog
.sys_exit
= trace__find_syscall_bpf_prog(trace
, sc
, sc
->fmt
? sc
->fmt
->bpf_prog_name
.sys_exit
: NULL
, "exit");
2826 static int trace__bpf_prog_sys_enter_fd(struct trace
*trace
, int id
)
2828 struct syscall
*sc
= trace__syscall_info(trace
, NULL
, id
);
2829 return sc
? bpf_program__fd(sc
->bpf_prog
.sys_enter
) : bpf_program__fd(trace
->syscalls
.unaugmented_prog
);
2832 static int trace__bpf_prog_sys_exit_fd(struct trace
*trace
, int id
)
2834 struct syscall
*sc
= trace__syscall_info(trace
, NULL
, id
);
2835 return sc
? bpf_program__fd(sc
->bpf_prog
.sys_exit
) : bpf_program__fd(trace
->syscalls
.unaugmented_prog
);
2838 static void trace__init_bpf_map_syscall_args(struct trace
*trace
, int id
, struct bpf_map_syscall_entry
*entry
)
2840 struct syscall
*sc
= trace__syscall_info(trace
, NULL
, id
);
2846 for (; arg
< sc
->nr_args
; ++arg
) {
2847 entry
->string_args_len
[arg
] = 0;
2848 if (sc
->arg_fmt
[arg
].scnprintf
== SCA_FILENAME
) {
2849 /* Should be set like strace -s strsize */
2850 entry
->string_args_len
[arg
] = PATH_MAX
;
2854 for (; arg
< 6; ++arg
)
2855 entry
->string_args_len
[arg
] = 0;
2857 static int trace__set_ev_qualifier_bpf_filter(struct trace
*trace
)
2859 int fd
= bpf_map__fd(trace
->syscalls
.map
);
2860 struct bpf_map_syscall_entry value
= {
2861 .enabled
= !trace
->not_ev_qualifier
,
2866 for (i
= 0; i
< trace
->ev_qualifier_ids
.nr
; ++i
) {
2867 int key
= trace
->ev_qualifier_ids
.entries
[i
];
2869 if (value
.enabled
) {
2870 trace__init_bpf_map_syscall_args(trace
, key
, &value
);
2871 trace__init_syscall_bpf_progs(trace
, key
);
2874 err
= bpf_map_update_elem(fd
, &key
, &value
, BPF_EXIST
);
2882 static int __trace__init_syscalls_bpf_map(struct trace
*trace
, bool enabled
)
2884 int fd
= bpf_map__fd(trace
->syscalls
.map
);
2885 struct bpf_map_syscall_entry value
= {
2890 for (key
= 0; key
< trace
->sctbl
->syscalls
.nr_entries
; ++key
) {
2892 trace__init_bpf_map_syscall_args(trace
, key
, &value
);
2894 err
= bpf_map_update_elem(fd
, &key
, &value
, BPF_ANY
);
2902 static int trace__init_syscalls_bpf_map(struct trace
*trace
)
2904 bool enabled
= true;
2906 if (trace
->ev_qualifier_ids
.nr
)
2907 enabled
= trace
->not_ev_qualifier
;
2909 return __trace__init_syscalls_bpf_map(trace
, enabled
);
2912 static struct bpf_program
*trace__find_usable_bpf_prog_entry(struct trace
*trace
, struct syscall
*sc
)
2914 struct tep_format_field
*field
, *candidate_field
;
2918 * We're only interested in syscalls that have a pointer:
2920 for (field
= sc
->args
; field
; field
= field
->next
) {
2921 if (field
->flags
& TEP_FIELD_IS_POINTER
)
2922 goto try_to_find_pair
;
2928 for (id
= 0; id
< trace
->sctbl
->syscalls
.nr_entries
; ++id
) {
2929 struct syscall
*pair
= trace__syscall_info(trace
, NULL
, id
);
2930 struct bpf_program
*pair_prog
;
2931 bool is_candidate
= false;
2933 if (pair
== NULL
|| pair
== sc
||
2934 pair
->bpf_prog
.sys_enter
== trace
->syscalls
.unaugmented_prog
)
2937 for (field
= sc
->args
, candidate_field
= pair
->args
;
2938 field
&& candidate_field
; field
= field
->next
, candidate_field
= candidate_field
->next
) {
2939 bool is_pointer
= field
->flags
& TEP_FIELD_IS_POINTER
,
2940 candidate_is_pointer
= candidate_field
->flags
& TEP_FIELD_IS_POINTER
;
2943 if (!candidate_is_pointer
) {
2944 // The candidate just doesn't copies our pointer arg, might copy other pointers we want.
2948 if (candidate_is_pointer
) {
2949 // The candidate might copy a pointer we don't have, skip it.
2950 goto next_candidate
;
2955 if (strcmp(field
->type
, candidate_field
->type
))
2956 goto next_candidate
;
2958 is_candidate
= true;
2962 goto next_candidate
;
2965 * Check if the tentative pair syscall augmenter has more pointers, if it has,
2966 * then it may be collecting that and we then can't use it, as it would collect
2967 * more than what is common to the two syscalls.
2969 if (candidate_field
) {
2970 for (candidate_field
= candidate_field
->next
; candidate_field
; candidate_field
= candidate_field
->next
)
2971 if (candidate_field
->flags
& TEP_FIELD_IS_POINTER
)
2972 goto next_candidate
;
2975 pair_prog
= pair
->bpf_prog
.sys_enter
;
2977 * If the pair isn't enabled, then its bpf_prog.sys_enter will not
2978 * have been searched for, so search it here and if it returns the
2979 * unaugmented one, then ignore it, otherwise we'll reuse that BPF
2980 * program for a filtered syscall on a non-filtered one.
2982 * For instance, we have "!syscalls:sys_enter_renameat" and that is
2983 * useful for "renameat2".
2985 if (pair_prog
== NULL
) {
2986 pair_prog
= trace__find_syscall_bpf_prog(trace
, pair
, pair
->fmt
? pair
->fmt
->bpf_prog_name
.sys_enter
: NULL
, "enter");
2987 if (pair_prog
== trace
->syscalls
.unaugmented_prog
)
2988 goto next_candidate
;
2991 pr_debug("Reusing \"%s\" BPF sys_enter augmenter for \"%s\"\n", pair
->name
, sc
->name
);
3000 static int trace__init_syscalls_bpf_prog_array_maps(struct trace
*trace
)
3002 int map_enter_fd
= bpf_map__fd(trace
->syscalls
.prog_array
.sys_enter
),
3003 map_exit_fd
= bpf_map__fd(trace
->syscalls
.prog_array
.sys_exit
);
3006 for (key
= 0; key
< trace
->sctbl
->syscalls
.nr_entries
; ++key
) {
3009 if (!trace__syscall_enabled(trace
, key
))
3012 trace__init_syscall_bpf_progs(trace
, key
);
3014 // It'll get at least the "!raw_syscalls:unaugmented"
3015 prog_fd
= trace__bpf_prog_sys_enter_fd(trace
, key
);
3016 err
= bpf_map_update_elem(map_enter_fd
, &key
, &prog_fd
, BPF_ANY
);
3019 prog_fd
= trace__bpf_prog_sys_exit_fd(trace
, key
);
3020 err
= bpf_map_update_elem(map_exit_fd
, &key
, &prog_fd
, BPF_ANY
);
3026 * Now lets do a second pass looking for enabled syscalls without
3027 * an augmenter that have a signature that is a superset of another
3028 * syscall with an augmenter so that we can auto-reuse it.
3030 * I.e. if we have an augmenter for the "open" syscall that has
3033 * int open(const char *pathname, int flags, mode_t mode);
3035 * I.e. that will collect just the first string argument, then we
3036 * can reuse it for the 'creat' syscall, that has this signature:
3038 * int creat(const char *pathname, mode_t mode);
3042 * int stat(const char *pathname, struct stat *statbuf);
3043 * int lstat(const char *pathname, struct stat *statbuf);
3045 * Because the 'open' augmenter will collect the first arg as a string,
3046 * and leave alone all the other args, which already helps with
3047 * beautifying 'stat' and 'lstat''s pathname arg.
3049 * Then, in time, when 'stat' gets an augmenter that collects both
3050 * first and second arg (this one on the raw_syscalls:sys_exit prog
3051 * array tail call, then that one will be used.
3053 for (key
= 0; key
< trace
->sctbl
->syscalls
.nr_entries
; ++key
) {
3054 struct syscall
*sc
= trace__syscall_info(trace
, NULL
, key
);
3055 struct bpf_program
*pair_prog
;
3058 if (sc
== NULL
|| sc
->bpf_prog
.sys_enter
== NULL
)
3062 * For now we're just reusing the sys_enter prog, and if it
3063 * already has an augmenter, we don't need to find one.
3065 if (sc
->bpf_prog
.sys_enter
!= trace
->syscalls
.unaugmented_prog
)
3069 * Look at all the other syscalls for one that has a signature
3070 * that is close enough that we can share:
3072 pair_prog
= trace__find_usable_bpf_prog_entry(trace
, sc
);
3073 if (pair_prog
== NULL
)
3076 sc
->bpf_prog
.sys_enter
= pair_prog
;
3079 * Update the BPF_MAP_TYPE_PROG_SHARED for raw_syscalls:sys_enter
3080 * with the fd for the program we're reusing:
3082 prog_fd
= bpf_program__fd(sc
->bpf_prog
.sys_enter
);
3083 err
= bpf_map_update_elem(map_enter_fd
, &key
, &prog_fd
, BPF_ANY
);
3092 static int trace__set_ev_qualifier_bpf_filter(struct trace
*trace __maybe_unused
)
3097 static int trace__init_syscalls_bpf_map(struct trace
*trace __maybe_unused
)
3102 static struct bpf_program
*trace__find_bpf_program_by_title(struct trace
*trace __maybe_unused
,
3103 const char *name __maybe_unused
)
3108 static int trace__init_syscalls_bpf_prog_array_maps(struct trace
*trace __maybe_unused
)
3112 #endif // HAVE_LIBBPF_SUPPORT
3114 static int trace__set_ev_qualifier_filter(struct trace
*trace
)
3116 if (trace
->syscalls
.map
)
3117 return trace__set_ev_qualifier_bpf_filter(trace
);
3118 if (trace
->syscalls
.events
.sys_enter
)
3119 return trace__set_ev_qualifier_tp_filter(trace
);
3123 static int bpf_map__set_filter_pids(struct bpf_map
*map __maybe_unused
,
3124 size_t npids __maybe_unused
, pid_t
*pids __maybe_unused
)
3127 #ifdef HAVE_LIBBPF_SUPPORT
3129 int map_fd
= bpf_map__fd(map
);
3132 for (i
= 0; i
< npids
; ++i
) {
3133 err
= bpf_map_update_elem(map_fd
, &pids
[i
], &value
, BPF_ANY
);
3141 static int trace__set_filter_loop_pids(struct trace
*trace
)
3143 unsigned int nr
= 1, err
;
3147 struct thread
*thread
= machine__find_thread(trace
->host
, pids
[0], pids
[0]);
3149 while (thread
&& nr
< ARRAY_SIZE(pids
)) {
3150 struct thread
*parent
= machine__find_thread(trace
->host
, thread
->ppid
, thread
->ppid
);
3155 if (!strcmp(thread__comm_str(parent
), "sshd") ||
3156 strstarts(thread__comm_str(parent
), "gnome-terminal")) {
3157 pids
[nr
++] = parent
->tid
;
3163 err
= perf_evlist__set_tp_filter_pids(trace
->evlist
, nr
, pids
);
3164 if (!err
&& trace
->filter_pids
.map
)
3165 err
= bpf_map__set_filter_pids(trace
->filter_pids
.map
, nr
, pids
);
3170 static int trace__set_filter_pids(struct trace
*trace
)
3174 * Better not use !target__has_task() here because we need to cover the
3175 * case where no threads were specified in the command line, but a
3176 * workload was, and in that case we will fill in the thread_map when
3177 * we fork the workload in perf_evlist__prepare_workload.
3179 if (trace
->filter_pids
.nr
> 0) {
3180 err
= perf_evlist__set_tp_filter_pids(trace
->evlist
, trace
->filter_pids
.nr
,
3181 trace
->filter_pids
.entries
);
3182 if (!err
&& trace
->filter_pids
.map
) {
3183 err
= bpf_map__set_filter_pids(trace
->filter_pids
.map
, trace
->filter_pids
.nr
,
3184 trace
->filter_pids
.entries
);
3186 } else if (thread_map__pid(trace
->evlist
->threads
, 0) == -1) {
3187 err
= trace__set_filter_loop_pids(trace
);
3193 static int __trace__deliver_event(struct trace
*trace
, union perf_event
*event
)
3195 struct evlist
*evlist
= trace
->evlist
;
3196 struct perf_sample sample
;
3199 err
= perf_evlist__parse_sample(evlist
, event
, &sample
);
3201 fprintf(trace
->output
, "Can't parse sample, err = %d, skipping...\n", err
);
3203 trace__handle_event(trace
, event
, &sample
);
3208 static int __trace__flush_events(struct trace
*trace
)
3210 u64 first
= ordered_events__first_time(&trace
->oe
.data
);
3211 u64 flush
= trace
->oe
.last
- NSEC_PER_SEC
;
3213 /* Is there some thing to flush.. */
3214 if (first
&& first
< flush
)
3215 return ordered_events__flush_time(&trace
->oe
.data
, flush
);
3220 static int trace__flush_events(struct trace
*trace
)
3222 return !trace
->sort_events
? 0 : __trace__flush_events(trace
);
3225 static int trace__deliver_event(struct trace
*trace
, union perf_event
*event
)
3229 if (!trace
->sort_events
)
3230 return __trace__deliver_event(trace
, event
);
3232 err
= perf_evlist__parse_sample_timestamp(trace
->evlist
, event
, &trace
->oe
.last
);
3233 if (err
&& err
!= -1)
3236 err
= ordered_events__queue(&trace
->oe
.data
, event
, trace
->oe
.last
, 0);
3240 return trace__flush_events(trace
);
3243 static int ordered_events__deliver_event(struct ordered_events
*oe
,
3244 struct ordered_event
*event
)
3246 struct trace
*trace
= container_of(oe
, struct trace
, oe
.data
);
3248 return __trace__deliver_event(trace
, event
->event
);
3251 static int trace__run(struct trace
*trace
, int argc
, const char **argv
)
3253 struct evlist
*evlist
= trace
->evlist
;
3254 struct evsel
*evsel
, *pgfault_maj
= NULL
, *pgfault_min
= NULL
;
3256 unsigned long before
;
3257 const bool forks
= argc
> 0;
3258 bool draining
= false;
3262 if (!trace
->raw_augmented_syscalls
) {
3263 if (trace
->trace_syscalls
&& trace__add_syscall_newtp(trace
))
3264 goto out_error_raw_syscalls
;
3266 if (trace
->trace_syscalls
)
3267 trace
->vfs_getname
= perf_evlist__add_vfs_getname(evlist
);
3270 if ((trace
->trace_pgfaults
& TRACE_PFMAJ
)) {
3271 pgfault_maj
= perf_evsel__new_pgfault(PERF_COUNT_SW_PAGE_FAULTS_MAJ
);
3272 if (pgfault_maj
== NULL
)
3274 perf_evsel__config_callchain(pgfault_maj
, &trace
->opts
, &callchain_param
);
3275 perf_evlist__add(evlist
, pgfault_maj
);
3278 if ((trace
->trace_pgfaults
& TRACE_PFMIN
)) {
3279 pgfault_min
= perf_evsel__new_pgfault(PERF_COUNT_SW_PAGE_FAULTS_MIN
);
3280 if (pgfault_min
== NULL
)
3282 perf_evsel__config_callchain(pgfault_min
, &trace
->opts
, &callchain_param
);
3283 perf_evlist__add(evlist
, pgfault_min
);
3287 perf_evlist__add_newtp(evlist
, "sched", "sched_stat_runtime",
3288 trace__sched_stat_runtime
))
3289 goto out_error_sched_stat_runtime
;
3292 * If a global cgroup was set, apply it to all the events without an
3293 * explicit cgroup. I.e.:
3295 * trace -G A -e sched:*switch
3297 * Will set all raw_syscalls:sys_{enter,exit}, pgfault, vfs_getname, etc
3298 * _and_ sched:sched_switch to the 'A' cgroup, while:
3300 * trace -e sched:*switch -G A
3302 * will only set the sched:sched_switch event to the 'A' cgroup, all the
3303 * other events (raw_syscalls:sys_{enter,exit}, etc are left "without"
3304 * a cgroup (on the root cgroup, sys wide, etc).
3308 * trace -G A -e sched:*switch -G B
3310 * the syscall ones go to the 'A' cgroup, the sched:sched_switch goes
3311 * to the 'B' cgroup.
3313 * evlist__set_default_cgroup() grabs a reference of the passed cgroup
3314 * only for the evsels still without a cgroup, i.e. evsel->cgroup == NULL.
3317 evlist__set_default_cgroup(trace
->evlist
, trace
->cgroup
);
3319 err
= perf_evlist__create_maps(evlist
, &trace
->opts
.target
);
3321 fprintf(trace
->output
, "Problems parsing the target to trace, check your options!\n");
3322 goto out_delete_evlist
;
3325 err
= trace__symbols_init(trace
, evlist
);
3327 fprintf(trace
->output
, "Problems initializing symbol libraries!\n");
3328 goto out_delete_evlist
;
3331 perf_evlist__config(evlist
, &trace
->opts
, &callchain_param
);
3333 signal(SIGCHLD
, sig_handler
);
3334 signal(SIGINT
, sig_handler
);
3337 err
= perf_evlist__prepare_workload(evlist
, &trace
->opts
.target
,
3340 fprintf(trace
->output
, "Couldn't run the workload!\n");
3341 goto out_delete_evlist
;
3345 err
= perf_evlist__open(evlist
);
3347 goto out_error_open
;
3349 err
= bpf__apply_obj_config();
3351 char errbuf
[BUFSIZ
];
3353 bpf__strerror_apply_obj_config(err
, errbuf
, sizeof(errbuf
));
3354 pr_err("ERROR: Apply config to BPF failed: %s\n",
3356 goto out_error_open
;
3359 err
= trace__set_filter_pids(trace
);
3363 if (trace
->syscalls
.map
)
3364 trace__init_syscalls_bpf_map(trace
);
3366 if (trace
->syscalls
.prog_array
.sys_enter
)
3367 trace__init_syscalls_bpf_prog_array_maps(trace
);
3369 if (trace
->ev_qualifier_ids
.nr
> 0) {
3370 err
= trace__set_ev_qualifier_filter(trace
);
3374 if (trace
->syscalls
.events
.sys_exit
) {
3375 pr_debug("event qualifier tracepoint filter: %s\n",
3376 trace
->syscalls
.events
.sys_exit
->filter
);
3381 * If the "close" syscall is not traced, then we will not have the
3382 * opportunity to, in syscall_arg__scnprintf_close_fd() invalidate the
3383 * fd->pathname table and were ending up showing the last value set by
3384 * syscalls opening a pathname and associating it with a descriptor or
3385 * reading it from /proc/pid/fd/ in cases where that doesn't make
3388 * So just disable this beautifier (SCA_FD, SCA_FDAT) when 'close' is
3391 trace
->fd_path_disabled
= !trace__syscall_enabled(trace
, syscalltbl__id(trace
->sctbl
, "close"));
3393 err
= perf_evlist__apply_filters(evlist
, &evsel
);
3395 goto out_error_apply_filters
;
3397 if (trace
->dump
.map
)
3398 bpf_map__fprintf(trace
->dump
.map
, trace
->output
);
3400 err
= perf_evlist__mmap(evlist
, trace
->opts
.mmap_pages
);
3402 goto out_error_mmap
;
3404 if (!target__none(&trace
->opts
.target
) && !trace
->opts
.initial_delay
)
3405 perf_evlist__enable(evlist
);
3408 perf_evlist__start_workload(evlist
);
3410 if (trace
->opts
.initial_delay
) {
3411 usleep(trace
->opts
.initial_delay
* 1000);
3412 perf_evlist__enable(evlist
);
3415 trace
->multiple_threads
= thread_map__pid(evlist
->threads
, 0) == -1 ||
3416 evlist
->threads
->nr
> 1 ||
3417 perf_evlist__first(evlist
)->attr
.inherit
;
3420 * Now that we already used evsel->attr to ask the kernel to setup the
3421 * events, lets reuse evsel->attr.sample_max_stack as the limit in
3422 * trace__resolve_callchain(), allowing per-event max-stack settings
3423 * to override an explicitly set --max-stack global setting.
3425 evlist__for_each_entry(evlist
, evsel
) {
3426 if (evsel__has_callchain(evsel
) &&
3427 evsel
->attr
.sample_max_stack
== 0)
3428 evsel
->attr
.sample_max_stack
= trace
->max_stack
;
3431 before
= trace
->nr_events
;
3433 for (i
= 0; i
< evlist
->nr_mmaps
; i
++) {
3434 union perf_event
*event
;
3435 struct perf_mmap
*md
;
3437 md
= &evlist
->mmap
[i
];
3438 if (perf_mmap__read_init(md
) < 0)
3441 while ((event
= perf_mmap__read_event(md
)) != NULL
) {
3444 err
= trace__deliver_event(trace
, event
);
3448 perf_mmap__consume(md
);
3453 if (done
&& !draining
) {
3454 perf_evlist__disable(evlist
);
3458 perf_mmap__read_done(md
);
3461 if (trace
->nr_events
== before
) {
3462 int timeout
= done
? 100 : -1;
3464 if (!draining
&& perf_evlist__poll(evlist
, timeout
) > 0) {
3465 if (perf_evlist__filter_pollfd(evlist
, POLLERR
| POLLHUP
| POLLNVAL
) == 0)
3470 if (trace__flush_events(trace
))
3478 thread__zput(trace
->current
);
3480 perf_evlist__disable(evlist
);
3482 if (trace
->sort_events
)
3483 ordered_events__flush(&trace
->oe
.data
, OE_FLUSH__FINAL
);
3487 trace__fprintf_thread_summary(trace
, trace
->output
);
3489 if (trace
->show_tool_stats
) {
3490 fprintf(trace
->output
, "Stats:\n "
3491 " vfs_getname : %" PRIu64
"\n"
3492 " proc_getname: %" PRIu64
"\n",
3493 trace
->stats
.vfs_getname
,
3494 trace
->stats
.proc_getname
);
3499 trace__symbols__exit(trace
);
3501 perf_evlist__delete(evlist
);
3502 cgroup__put(trace
->cgroup
);
3503 trace
->evlist
= NULL
;
3504 trace
->live
= false;
3507 char errbuf
[BUFSIZ
];
3509 out_error_sched_stat_runtime
:
3510 tracing_path__strerror_open_tp(errno
, errbuf
, sizeof(errbuf
), "sched", "sched_stat_runtime");
3513 out_error_raw_syscalls
:
3514 tracing_path__strerror_open_tp(errno
, errbuf
, sizeof(errbuf
), "raw_syscalls", "sys_(enter|exit)");
3518 perf_evlist__strerror_mmap(evlist
, errno
, errbuf
, sizeof(errbuf
));
3522 perf_evlist__strerror_open(evlist
, errno
, errbuf
, sizeof(errbuf
));
3525 fprintf(trace
->output
, "%s\n", errbuf
);
3526 goto out_delete_evlist
;
3528 out_error_apply_filters
:
3529 fprintf(trace
->output
,
3530 "Failed to set filter \"%s\" on event %s with %d (%s)\n",
3531 evsel
->filter
, perf_evsel__name(evsel
), errno
,
3532 str_error_r(errno
, errbuf
, sizeof(errbuf
)));
3533 goto out_delete_evlist
;
3536 fprintf(trace
->output
, "Not enough memory to run!\n");
3537 goto out_delete_evlist
;
3540 fprintf(trace
->output
, "errno=%d,%s\n", errno
, strerror(errno
));
3541 goto out_delete_evlist
;
3544 static int trace__replay(struct trace
*trace
)
3546 const struct evsel_str_handler handlers
[] = {
3547 { "probe:vfs_getname", trace__vfs_getname
, },
3549 struct perf_data data
= {
3551 .mode
= PERF_DATA_MODE_READ
,
3552 .force
= trace
->force
,
3554 struct perf_session
*session
;
3555 struct evsel
*evsel
;
3558 trace
->tool
.sample
= trace__process_sample
;
3559 trace
->tool
.mmap
= perf_event__process_mmap
;
3560 trace
->tool
.mmap2
= perf_event__process_mmap2
;
3561 trace
->tool
.comm
= perf_event__process_comm
;
3562 trace
->tool
.exit
= perf_event__process_exit
;
3563 trace
->tool
.fork
= perf_event__process_fork
;
3564 trace
->tool
.attr
= perf_event__process_attr
;
3565 trace
->tool
.tracing_data
= perf_event__process_tracing_data
;
3566 trace
->tool
.build_id
= perf_event__process_build_id
;
3567 trace
->tool
.namespaces
= perf_event__process_namespaces
;
3569 trace
->tool
.ordered_events
= true;
3570 trace
->tool
.ordering_requires_timestamps
= true;
3572 /* add tid to output */
3573 trace
->multiple_threads
= true;
3575 session
= perf_session__new(&data
, false, &trace
->tool
);
3576 if (session
== NULL
)
3579 if (trace
->opts
.target
.pid
)
3580 symbol_conf
.pid_list_str
= strdup(trace
->opts
.target
.pid
);
3582 if (trace
->opts
.target
.tid
)
3583 symbol_conf
.tid_list_str
= strdup(trace
->opts
.target
.tid
);
3585 if (symbol__init(&session
->header
.env
) < 0)
3588 trace
->host
= &session
->machines
.host
;
3590 err
= perf_session__set_tracepoints_handlers(session
, handlers
);
3594 evsel
= perf_evlist__find_tracepoint_by_name(session
->evlist
,
3595 "raw_syscalls:sys_enter");
3596 /* older kernels have syscalls tp versus raw_syscalls */
3598 evsel
= perf_evlist__find_tracepoint_by_name(session
->evlist
,
3599 "syscalls:sys_enter");
3602 (perf_evsel__init_raw_syscall_tp(evsel
, trace__sys_enter
) < 0 ||
3603 perf_evsel__init_sc_tp_ptr_field(evsel
, args
))) {
3604 pr_err("Error during initialize raw_syscalls:sys_enter event\n");
3608 evsel
= perf_evlist__find_tracepoint_by_name(session
->evlist
,
3609 "raw_syscalls:sys_exit");
3611 evsel
= perf_evlist__find_tracepoint_by_name(session
->evlist
,
3612 "syscalls:sys_exit");
3614 (perf_evsel__init_raw_syscall_tp(evsel
, trace__sys_exit
) < 0 ||
3615 perf_evsel__init_sc_tp_uint_field(evsel
, ret
))) {
3616 pr_err("Error during initialize raw_syscalls:sys_exit event\n");
3620 evlist__for_each_entry(session
->evlist
, evsel
) {
3621 if (evsel
->attr
.type
== PERF_TYPE_SOFTWARE
&&
3622 (evsel
->attr
.config
== PERF_COUNT_SW_PAGE_FAULTS_MAJ
||
3623 evsel
->attr
.config
== PERF_COUNT_SW_PAGE_FAULTS_MIN
||
3624 evsel
->attr
.config
== PERF_COUNT_SW_PAGE_FAULTS
))
3625 evsel
->handler
= trace__pgfault
;
3630 err
= perf_session__process_events(session
);
3632 pr_err("Failed to process events, error %d", err
);
3634 else if (trace
->summary
)
3635 trace__fprintf_thread_summary(trace
, trace
->output
);
3638 perf_session__delete(session
);
3643 static size_t trace__fprintf_threads_header(FILE *fp
)
3647 printed
= fprintf(fp
, "\n Summary of events:\n\n");
3652 DEFINE_RESORT_RB(syscall_stats
, a
->msecs
> b
->msecs
,
3653 struct stats
*stats
;
3658 struct int_node
*source
= rb_entry(nd
, struct int_node
, rb_node
);
3659 struct stats
*stats
= source
->priv
;
3661 entry
->syscall
= source
->i
;
3662 entry
->stats
= stats
;
3663 entry
->msecs
= stats
? (u64
)stats
->n
* (avg_stats(stats
) / NSEC_PER_MSEC
) : 0;
3666 static size_t thread__dump_stats(struct thread_trace
*ttrace
,
3667 struct trace
*trace
, FILE *fp
)
3672 DECLARE_RESORT_RB_INTLIST(syscall_stats
, ttrace
->syscall_stats
);
3674 if (syscall_stats
== NULL
)
3677 printed
+= fprintf(fp
, "\n");
3679 printed
+= fprintf(fp
, " syscall calls total min avg max stddev\n");
3680 printed
+= fprintf(fp
, " (msec) (msec) (msec) (msec) (%%)\n");
3681 printed
+= fprintf(fp
, " --------------- -------- --------- --------- --------- --------- ------\n");
3683 resort_rb__for_each_entry(nd
, syscall_stats
) {
3684 struct stats
*stats
= syscall_stats_entry
->stats
;
3686 double min
= (double)(stats
->min
) / NSEC_PER_MSEC
;
3687 double max
= (double)(stats
->max
) / NSEC_PER_MSEC
;
3688 double avg
= avg_stats(stats
);
3690 u64 n
= (u64
) stats
->n
;
3692 pct
= avg
? 100.0 * stddev_stats(stats
)/avg
: 0.0;
3693 avg
/= NSEC_PER_MSEC
;
3695 sc
= &trace
->syscalls
.table
[syscall_stats_entry
->syscall
];
3696 printed
+= fprintf(fp
, " %-15s", sc
->name
);
3697 printed
+= fprintf(fp
, " %8" PRIu64
" %9.3f %9.3f %9.3f",
3698 n
, syscall_stats_entry
->msecs
, min
, avg
);
3699 printed
+= fprintf(fp
, " %9.3f %9.2f%%\n", max
, pct
);
3703 resort_rb__delete(syscall_stats
);
3704 printed
+= fprintf(fp
, "\n\n");
3709 static size_t trace__fprintf_thread(FILE *fp
, struct thread
*thread
, struct trace
*trace
)
3712 struct thread_trace
*ttrace
= thread__priv(thread
);
3718 ratio
= (double)ttrace
->nr_events
/ trace
->nr_events
* 100.0;
3720 printed
+= fprintf(fp
, " %s (%d), ", thread__comm_str(thread
), thread
->tid
);
3721 printed
+= fprintf(fp
, "%lu events, ", ttrace
->nr_events
);
3722 printed
+= fprintf(fp
, "%.1f%%", ratio
);
3724 printed
+= fprintf(fp
, ", %lu majfaults", ttrace
->pfmaj
);
3726 printed
+= fprintf(fp
, ", %lu minfaults", ttrace
->pfmin
);
3728 printed
+= fprintf(fp
, ", %.3f msec\n", ttrace
->runtime_ms
);
3729 else if (fputc('\n', fp
) != EOF
)
3732 printed
+= thread__dump_stats(ttrace
, trace
, fp
);
3737 static unsigned long thread__nr_events(struct thread_trace
*ttrace
)
3739 return ttrace
? ttrace
->nr_events
: 0;
3742 DEFINE_RESORT_RB(threads
, (thread__nr_events(a
->thread
->priv
) < thread__nr_events(b
->thread
->priv
)),
3743 struct thread
*thread
;
3746 entry
->thread
= rb_entry(nd
, struct thread
, rb_node
);
3749 static size_t trace__fprintf_thread_summary(struct trace
*trace
, FILE *fp
)
3751 size_t printed
= trace__fprintf_threads_header(fp
);
3755 for (i
= 0; i
< THREADS__TABLE_SIZE
; i
++) {
3756 DECLARE_RESORT_RB_MACHINE_THREADS(threads
, trace
->host
, i
);
3758 if (threads
== NULL
) {
3759 fprintf(fp
, "%s", "Error sorting output by nr_events!\n");
3763 resort_rb__for_each_entry(nd
, threads
)
3764 printed
+= trace__fprintf_thread(fp
, threads_entry
->thread
, trace
);
3766 resort_rb__delete(threads
);
3771 static int trace__set_duration(const struct option
*opt
, const char *str
,
3772 int unset __maybe_unused
)
3774 struct trace
*trace
= opt
->value
;
3776 trace
->duration_filter
= atof(str
);
3780 static int trace__set_filter_pids_from_option(const struct option
*opt
, const char *str
,
3781 int unset __maybe_unused
)
3785 struct trace
*trace
= opt
->value
;
3787 * FIXME: introduce a intarray class, plain parse csv and create a
3788 * { int nr, int entries[] } struct...
3790 struct intlist
*list
= intlist__new(str
);
3795 i
= trace
->filter_pids
.nr
= intlist__nr_entries(list
) + 1;
3796 trace
->filter_pids
.entries
= calloc(i
, sizeof(pid_t
));
3798 if (trace
->filter_pids
.entries
== NULL
)
3801 trace
->filter_pids
.entries
[0] = getpid();
3803 for (i
= 1; i
< trace
->filter_pids
.nr
; ++i
)
3804 trace
->filter_pids
.entries
[i
] = intlist__entry(list
, i
- 1)->i
;
3806 intlist__delete(list
);
3812 static int trace__open_output(struct trace
*trace
, const char *filename
)
3816 if (!stat(filename
, &st
) && st
.st_size
) {
3817 char oldname
[PATH_MAX
];
3819 scnprintf(oldname
, sizeof(oldname
), "%s.old", filename
);
3821 rename(filename
, oldname
);
3824 trace
->output
= fopen(filename
, "w");
3826 return trace
->output
== NULL
? -errno
: 0;
3829 static int parse_pagefaults(const struct option
*opt
, const char *str
,
3830 int unset __maybe_unused
)
3832 int *trace_pgfaults
= opt
->value
;
3834 if (strcmp(str
, "all") == 0)
3835 *trace_pgfaults
|= TRACE_PFMAJ
| TRACE_PFMIN
;
3836 else if (strcmp(str
, "maj") == 0)
3837 *trace_pgfaults
|= TRACE_PFMAJ
;
3838 else if (strcmp(str
, "min") == 0)
3839 *trace_pgfaults
|= TRACE_PFMIN
;
3846 static void evlist__set_evsel_handler(struct evlist
*evlist
, void *handler
)
3848 struct evsel
*evsel
;
3850 evlist__for_each_entry(evlist
, evsel
)
3851 evsel
->handler
= handler
;
3854 static int evlist__set_syscall_tp_fields(struct evlist
*evlist
)
3856 struct evsel
*evsel
;
3858 evlist__for_each_entry(evlist
, evsel
) {
3859 if (evsel
->priv
|| !evsel
->tp_format
)
3862 if (strcmp(evsel
->tp_format
->system
, "syscalls"))
3865 if (perf_evsel__init_syscall_tp(evsel
))
3868 if (!strncmp(evsel
->tp_format
->name
, "sys_enter_", 10)) {
3869 struct syscall_tp
*sc
= evsel
->priv
;
3871 if (__tp_field__init_ptr(&sc
->args
, sc
->id
.offset
+ sizeof(u64
)))
3873 } else if (!strncmp(evsel
->tp_format
->name
, "sys_exit_", 9)) {
3874 struct syscall_tp
*sc
= evsel
->priv
;
3876 if (__tp_field__init_uint(&sc
->ret
, sizeof(u64
), sc
->id
.offset
+ sizeof(u64
), evsel
->needs_swap
))
3885 * XXX: Hackish, just splitting the combined -e+--event (syscalls
3886 * (raw_syscalls:{sys_{enter,exit}} + events (tracepoints, HW, SW, etc) to use
3887 * existing facilities unchanged (trace->ev_qualifier + parse_options()).
3889 * It'd be better to introduce a parse_options() variant that would return a
3890 * list with the terms it didn't match to an event...
3892 static int trace__parse_events_option(const struct option
*opt
, const char *str
,
3893 int unset __maybe_unused
)
3895 struct trace
*trace
= (struct trace
*)opt
->value
;
3896 const char *s
= str
;
3897 char *sep
= NULL
, *lists
[2] = { NULL
, NULL
, };
3898 int len
= strlen(str
) + 1, err
= -1, list
, idx
;
3899 char *strace_groups_dir
= system_path(STRACE_GROUPS_DIR
);
3900 char group_name
[PATH_MAX
];
3901 struct syscall_fmt
*fmt
;
3903 if (strace_groups_dir
== NULL
)
3908 trace
->not_ev_qualifier
= true;
3912 if ((sep
= strchr(s
, ',')) != NULL
)
3916 if (syscalltbl__id(trace
->sctbl
, s
) >= 0 ||
3917 syscalltbl__strglobmatch_first(trace
->sctbl
, s
, &idx
) >= 0) {
3922 fmt
= syscall_fmt__find_by_alias(s
);
3927 path__join(group_name
, sizeof(group_name
), strace_groups_dir
, s
);
3928 if (access(group_name
, R_OK
) == 0)
3933 sprintf(lists
[list
] + strlen(lists
[list
]), ",%s", s
);
3935 lists
[list
] = malloc(len
);
3936 if (lists
[list
] == NULL
)
3938 strcpy(lists
[list
], s
);
3948 if (lists
[1] != NULL
) {
3949 struct strlist_config slist_config
= {
3950 .dirname
= strace_groups_dir
,
3953 trace
->ev_qualifier
= strlist__new(lists
[1], &slist_config
);
3954 if (trace
->ev_qualifier
== NULL
) {
3955 fputs("Not enough memory to parse event qualifier", trace
->output
);
3959 if (trace__validate_ev_qualifier(trace
))
3961 trace
->trace_syscalls
= true;
3967 struct option o
= OPT_CALLBACK('e', "event", &trace
->evlist
, "event",
3968 "event selector. use 'perf list' to list available events",
3969 parse_events_option
);
3970 err
= parse_events_option(&o
, lists
[0], 0);
3979 static int trace__parse_cgroups(const struct option
*opt
, const char *str
, int unset
)
3981 struct trace
*trace
= opt
->value
;
3983 if (!list_empty(&trace
->evlist
->entries
))
3984 return parse_cgroups(opt
, str
, unset
);
3986 trace
->cgroup
= evlist__findnew_cgroup(trace
->evlist
, str
);
3991 static struct bpf_map
*trace__find_bpf_map_by_name(struct trace
*trace
, const char *name
)
3993 if (trace
->bpf_obj
== NULL
)
3996 return bpf_object__find_map_by_name(trace
->bpf_obj
, name
);
3999 static void trace__set_bpf_map_filtered_pids(struct trace
*trace
)
4001 trace
->filter_pids
.map
= trace__find_bpf_map_by_name(trace
, "pids_filtered");
4004 static void trace__set_bpf_map_syscalls(struct trace
*trace
)
4006 trace
->syscalls
.map
= trace__find_bpf_map_by_name(trace
, "syscalls");
4007 trace
->syscalls
.prog_array
.sys_enter
= trace__find_bpf_map_by_name(trace
, "syscalls_sys_enter");
4008 trace
->syscalls
.prog_array
.sys_exit
= trace__find_bpf_map_by_name(trace
, "syscalls_sys_exit");
4011 static int trace__config(const char *var
, const char *value
, void *arg
)
4013 struct trace
*trace
= arg
;
4016 if (!strcmp(var
, "trace.add_events")) {
4017 struct option o
= OPT_CALLBACK('e', "event", &trace
->evlist
, "event",
4018 "event selector. use 'perf list' to list available events",
4019 parse_events_option
);
4021 * We can't propagate parse_event_option() return, as it is 1
4022 * for failure while perf_config() expects -1.
4024 if (parse_events_option(&o
, value
, 0))
4026 } else if (!strcmp(var
, "trace.show_timestamp")) {
4027 trace
->show_tstamp
= perf_config_bool(var
, value
);
4028 } else if (!strcmp(var
, "trace.show_duration")) {
4029 trace
->show_duration
= perf_config_bool(var
, value
);
4030 } else if (!strcmp(var
, "trace.show_arg_names")) {
4031 trace
->show_arg_names
= perf_config_bool(var
, value
);
4032 if (!trace
->show_arg_names
)
4033 trace
->show_zeros
= true;
4034 } else if (!strcmp(var
, "trace.show_zeros")) {
4035 bool new_show_zeros
= perf_config_bool(var
, value
);
4036 if (!trace
->show_arg_names
&& !new_show_zeros
) {
4037 pr_warning("trace.show_zeros has to be set when trace.show_arg_names=no\n");
4040 trace
->show_zeros
= new_show_zeros
;
4041 } else if (!strcmp(var
, "trace.show_prefix")) {
4042 trace
->show_string_prefix
= perf_config_bool(var
, value
);
4043 } else if (!strcmp(var
, "trace.no_inherit")) {
4044 trace
->opts
.no_inherit
= perf_config_bool(var
, value
);
4045 } else if (!strcmp(var
, "trace.args_alignment")) {
4046 int args_alignment
= 0;
4047 if (perf_config_int(&args_alignment
, var
, value
) == 0)
4048 trace
->args_alignment
= args_alignment
;
4054 int cmd_trace(int argc
, const char **argv
)
4056 const char *trace_usage
[] = {
4057 "perf trace [<options>] [<command>]",
4058 "perf trace [<options>] -- <command> [<options>]",
4059 "perf trace record [<options>] [<command>]",
4060 "perf trace record [<options>] -- <command> [<options>]",
4063 struct trace trace
= {
4069 .user_freq
= UINT_MAX
,
4070 .user_interval
= ULLONG_MAX
,
4071 .no_buffering
= true,
4072 .mmap_pages
= UINT_MAX
,
4076 .show_tstamp
= true,
4077 .show_duration
= true,
4078 .show_arg_names
= true,
4079 .args_alignment
= 70,
4080 .trace_syscalls
= false,
4081 .kernel_syscallchains
= false,
4082 .max_stack
= UINT_MAX
,
4083 .max_events
= ULONG_MAX
,
4085 const char *map_dump_str
= NULL
;
4086 const char *output_name
= NULL
;
4087 const struct option trace_options
[] = {
4088 OPT_CALLBACK('e', "event", &trace
, "event",
4089 "event/syscall selector. use 'perf list' to list available events",
4090 trace__parse_events_option
),
4091 OPT_BOOLEAN(0, "comm", &trace
.show_comm
,
4092 "show the thread COMM next to its id"),
4093 OPT_BOOLEAN(0, "tool_stats", &trace
.show_tool_stats
, "show tool stats"),
4094 OPT_CALLBACK(0, "expr", &trace
, "expr", "list of syscalls/events to trace",
4095 trace__parse_events_option
),
4096 OPT_STRING('o', "output", &output_name
, "file", "output file name"),
4097 OPT_STRING('i', "input", &input_name
, "file", "Analyze events in file"),
4098 OPT_STRING('p', "pid", &trace
.opts
.target
.pid
, "pid",
4099 "trace events on existing process id"),
4100 OPT_STRING('t', "tid", &trace
.opts
.target
.tid
, "tid",
4101 "trace events on existing thread id"),
4102 OPT_CALLBACK(0, "filter-pids", &trace
, "CSV list of pids",
4103 "pids to filter (by the kernel)", trace__set_filter_pids_from_option
),
4104 OPT_BOOLEAN('a', "all-cpus", &trace
.opts
.target
.system_wide
,
4105 "system-wide collection from all CPUs"),
4106 OPT_STRING('C', "cpu", &trace
.opts
.target
.cpu_list
, "cpu",
4107 "list of cpus to monitor"),
4108 OPT_BOOLEAN(0, "no-inherit", &trace
.opts
.no_inherit
,
4109 "child tasks do not inherit counters"),
4110 OPT_CALLBACK('m', "mmap-pages", &trace
.opts
.mmap_pages
, "pages",
4111 "number of mmap data pages",
4112 perf_evlist__parse_mmap_pages
),
4113 OPT_STRING('u', "uid", &trace
.opts
.target
.uid_str
, "user",
4115 OPT_CALLBACK(0, "duration", &trace
, "float",
4116 "show only events with duration > N.M ms",
4117 trace__set_duration
),
4118 #ifdef HAVE_LIBBPF_SUPPORT
4119 OPT_STRING(0, "map-dump", &map_dump_str
, "BPF map", "BPF map to periodically dump"),
4121 OPT_BOOLEAN(0, "sched", &trace
.sched
, "show blocking scheduler events"),
4122 OPT_INCR('v', "verbose", &verbose
, "be more verbose"),
4123 OPT_BOOLEAN('T', "time", &trace
.full_time
,
4124 "Show full timestamp, not time relative to first start"),
4125 OPT_BOOLEAN(0, "failure", &trace
.failure_only
,
4126 "Show only syscalls that failed"),
4127 OPT_BOOLEAN('s', "summary", &trace
.summary_only
,
4128 "Show only syscall summary with statistics"),
4129 OPT_BOOLEAN('S', "with-summary", &trace
.summary
,
4130 "Show all syscalls and summary with statistics"),
4131 OPT_CALLBACK_DEFAULT('F', "pf", &trace
.trace_pgfaults
, "all|maj|min",
4132 "Trace pagefaults", parse_pagefaults
, "maj"),
4133 OPT_BOOLEAN(0, "syscalls", &trace
.trace_syscalls
, "Trace syscalls"),
4134 OPT_BOOLEAN('f', "force", &trace
.force
, "don't complain, do it"),
4135 OPT_CALLBACK(0, "call-graph", &trace
.opts
,
4136 "record_mode[,record_size]", record_callchain_help
,
4137 &record_parse_callchain_opt
),
4138 OPT_BOOLEAN(0, "kernel-syscall-graph", &trace
.kernel_syscallchains
,
4139 "Show the kernel callchains on the syscall exit path"),
4140 OPT_ULONG(0, "max-events", &trace
.max_events
,
4141 "Set the maximum number of events to print, exit after that is reached. "),
4142 OPT_UINTEGER(0, "min-stack", &trace
.min_stack
,
4143 "Set the minimum stack depth when parsing the callchain, "
4144 "anything below the specified depth will be ignored."),
4145 OPT_UINTEGER(0, "max-stack", &trace
.max_stack
,
4146 "Set the maximum stack depth when parsing the callchain, "
4147 "anything beyond the specified depth will be ignored. "
4148 "Default: kernel.perf_event_max_stack or " __stringify(PERF_MAX_STACK_DEPTH
)),
4149 OPT_BOOLEAN(0, "sort-events", &trace
.sort_events
,
4150 "Sort batch of events before processing, use if getting out of order events"),
4151 OPT_BOOLEAN(0, "print-sample", &trace
.print_sample
,
4152 "print the PERF_RECORD_SAMPLE PERF_SAMPLE_ info, for debugging"),
4153 OPT_UINTEGER(0, "proc-map-timeout", &proc_map_timeout
,
4154 "per thread proc mmap processing timeout in ms"),
4155 OPT_CALLBACK('G', "cgroup", &trace
, "name", "monitor event in cgroup name only",
4156 trace__parse_cgroups
),
4157 OPT_UINTEGER('D', "delay", &trace
.opts
.initial_delay
,
4158 "ms to wait before starting measurement after program "
4162 bool __maybe_unused max_stack_user_set
= true;
4163 bool mmap_pages_user_set
= true;
4164 struct evsel
*evsel
;
4165 const char * const trace_subcommands
[] = { "record", NULL
};
4169 signal(SIGSEGV
, sighandler_dump_stack
);
4170 signal(SIGFPE
, sighandler_dump_stack
);
4172 trace
.evlist
= perf_evlist__new();
4173 trace
.sctbl
= syscalltbl__new();
4175 if (trace
.evlist
== NULL
|| trace
.sctbl
== NULL
) {
4176 pr_err("Not enough memory to run!\n");
4182 * Parsing .perfconfig may entail creating a BPF event, that may need
4183 * to create BPF maps, so bump RLIM_MEMLOCK as the default 64K setting
4184 * is too small. This affects just this process, not touching the
4185 * global setting. If it fails we'll get something in 'perf trace -v'
4186 * to help diagnose the problem.
4188 rlimit__bump_memlock();
4190 err
= perf_config(trace__config
, &trace
);
4194 argc
= parse_options_subcommand(argc
, argv
, trace_options
, trace_subcommands
,
4195 trace_usage
, PARSE_OPT_STOP_AT_NON_OPTION
);
4197 if ((nr_cgroups
|| trace
.cgroup
) && !trace
.opts
.target
.system_wide
) {
4198 usage_with_options_msg(trace_usage
, trace_options
,
4199 "cgroup monitoring only available in system-wide mode");
4202 evsel
= bpf__setup_output_event(trace
.evlist
, "__augmented_syscalls__");
4203 if (IS_ERR(evsel
)) {
4204 bpf__strerror_setup_output_event(trace
.evlist
, PTR_ERR(evsel
), bf
, sizeof(bf
));
4205 pr_err("ERROR: Setup trace syscalls enter failed: %s\n", bf
);
4210 trace
.syscalls
.events
.augmented
= evsel
;
4212 evsel
= perf_evlist__find_tracepoint_by_name(trace
.evlist
, "raw_syscalls:sys_enter");
4213 if (evsel
== NULL
) {
4214 pr_err("ERROR: raw_syscalls:sys_enter not found in the augmented BPF object\n");
4218 if (evsel
->bpf_obj
== NULL
) {
4219 pr_err("ERROR: raw_syscalls:sys_enter not associated to a BPF object\n");
4223 trace
.bpf_obj
= evsel
->bpf_obj
;
4225 trace__set_bpf_map_filtered_pids(&trace
);
4226 trace__set_bpf_map_syscalls(&trace
);
4227 trace
.syscalls
.unaugmented_prog
= trace__find_bpf_program_by_title(&trace
, "!raw_syscalls:unaugmented");
4230 err
= bpf__setup_stdout(trace
.evlist
);
4232 bpf__strerror_setup_stdout(trace
.evlist
, err
, bf
, sizeof(bf
));
4233 pr_err("ERROR: Setup BPF stdout failed: %s\n", bf
);
4240 trace
.dump
.map
= trace__find_bpf_map_by_name(&trace
, map_dump_str
);
4241 if (trace
.dump
.map
== NULL
) {
4242 pr_err("ERROR: BPF map \"%s\" not found\n", map_dump_str
);
4247 if (trace
.trace_pgfaults
) {
4248 trace
.opts
.sample_address
= true;
4249 trace
.opts
.sample_time
= true;
4252 if (trace
.opts
.mmap_pages
== UINT_MAX
)
4253 mmap_pages_user_set
= false;
4255 if (trace
.max_stack
== UINT_MAX
) {
4256 trace
.max_stack
= input_name
? PERF_MAX_STACK_DEPTH
: sysctl__max_stack();
4257 max_stack_user_set
= false;
4260 #ifdef HAVE_DWARF_UNWIND_SUPPORT
4261 if ((trace
.min_stack
|| max_stack_user_set
) && !callchain_param
.enabled
) {
4262 record_opts__parse_callchain(&trace
.opts
, &callchain_param
, "dwarf", false);
4266 if (callchain_param
.enabled
) {
4267 if (!mmap_pages_user_set
&& geteuid() == 0)
4268 trace
.opts
.mmap_pages
= perf_event_mlock_kb_in_pages() * 4;
4270 symbol_conf
.use_callchain
= true;
4273 if (trace
.evlist
->nr_entries
> 0) {
4274 evlist__set_evsel_handler(trace
.evlist
, trace__event_handler
);
4275 if (evlist__set_syscall_tp_fields(trace
.evlist
)) {
4276 perror("failed to set syscalls:* tracepoint fields");
4281 if (trace
.sort_events
) {
4282 ordered_events__init(&trace
.oe
.data
, ordered_events__deliver_event
, &trace
);
4283 ordered_events__set_copy_on_queue(&trace
.oe
.data
, true);
4287 * If we are augmenting syscalls, then combine what we put in the
4288 * __augmented_syscalls__ BPF map with what is in the
4289 * syscalls:sys_exit_FOO tracepoints, i.e. just like we do without BPF,
4290 * combining raw_syscalls:sys_enter with raw_syscalls:sys_exit.
4292 * We'll switch to look at two BPF maps, one for sys_enter and the
4293 * other for sys_exit when we start augmenting the sys_exit paths with
4294 * buffers that are being copied from kernel to userspace, think 'read'
4297 if (trace
.syscalls
.events
.augmented
) {
4298 evlist__for_each_entry(trace
.evlist
, evsel
) {
4299 bool raw_syscalls_sys_exit
= strcmp(perf_evsel__name(evsel
), "raw_syscalls:sys_exit") == 0;
4301 if (raw_syscalls_sys_exit
) {
4302 trace
.raw_augmented_syscalls
= true;
4303 goto init_augmented_syscall_tp
;
4306 if (trace
.syscalls
.events
.augmented
->priv
== NULL
&&
4307 strstr(perf_evsel__name(evsel
), "syscalls:sys_enter")) {
4308 struct evsel
*augmented
= trace
.syscalls
.events
.augmented
;
4309 if (perf_evsel__init_augmented_syscall_tp(augmented
, evsel
) ||
4310 perf_evsel__init_augmented_syscall_tp_args(augmented
))
4313 * Augmented is __augmented_syscalls__ BPF_OUTPUT event
4314 * Above we made sure we can get from the payload the tp fields
4315 * that we get from syscalls:sys_enter tracefs format file.
4317 augmented
->handler
= trace__sys_enter
;
4319 * Now we do the same for the *syscalls:sys_enter event so that
4320 * if we handle it directly, i.e. if the BPF prog returns 0 so
4321 * as not to filter it, then we'll handle it just like we would
4322 * for the BPF_OUTPUT one:
4324 if (perf_evsel__init_augmented_syscall_tp(evsel
, evsel
) ||
4325 perf_evsel__init_augmented_syscall_tp_args(evsel
))
4327 evsel
->handler
= trace__sys_enter
;
4330 if (strstarts(perf_evsel__name(evsel
), "syscalls:sys_exit_")) {
4331 struct syscall_tp
*sc
;
4332 init_augmented_syscall_tp
:
4333 if (perf_evsel__init_augmented_syscall_tp(evsel
, evsel
))
4337 * For now with BPF raw_augmented we hook into
4338 * raw_syscalls:sys_enter and there we get all
4339 * 6 syscall args plus the tracepoint common
4340 * fields and the syscall_nr (another long).
4341 * So we check if that is the case and if so
4342 * don't look after the sc->args_size but
4343 * always after the full raw_syscalls:sys_enter
4344 * payload, which is fixed.
4346 * We'll revisit this later to pass
4347 * s->args_size to the BPF augmenter (now
4348 * tools/perf/examples/bpf/augmented_raw_syscalls.c,
4349 * so that it copies only what we need for each
4350 * syscall, like what happens when we use
4351 * syscalls:sys_enter_NAME, so that we reduce
4352 * the kernel/userspace traffic to just what is
4353 * needed for each syscall.
4355 if (trace
.raw_augmented_syscalls
)
4356 trace
.raw_augmented_syscalls_args_size
= (6 + 1) * sizeof(long) + sc
->id
.offset
;
4357 perf_evsel__init_augmented_syscall_tp_ret(evsel
);
4358 evsel
->handler
= trace__sys_exit
;
4363 if ((argc
>= 1) && (strcmp(argv
[0], "record") == 0))
4364 return trace__record(&trace
, argc
-1, &argv
[1]);
4366 /* summary_only implies summary option, but don't overwrite summary if set */
4367 if (trace
.summary_only
)
4368 trace
.summary
= trace
.summary_only
;
4370 if (!trace
.trace_syscalls
&& !trace
.trace_pgfaults
&&
4371 trace
.evlist
->nr_entries
== 0 /* Was --events used? */) {
4372 trace
.trace_syscalls
= true;
4375 if (output_name
!= NULL
) {
4376 err
= trace__open_output(&trace
, output_name
);
4378 perror("failed to create output file");
4383 err
= target__validate(&trace
.opts
.target
);
4385 target__strerror(&trace
.opts
.target
, err
, bf
, sizeof(bf
));
4386 fprintf(trace
.output
, "%s", bf
);
4390 err
= target__parse_uid(&trace
.opts
.target
);
4392 target__strerror(&trace
.opts
.target
, err
, bf
, sizeof(bf
));
4393 fprintf(trace
.output
, "%s", bf
);
4397 if (!argc
&& target__none(&trace
.opts
.target
))
4398 trace
.opts
.target
.system_wide
= true;
4401 err
= trace__replay(&trace
);
4403 err
= trace__run(&trace
, argc
, argv
);
4406 if (output_name
!= NULL
)
4407 fclose(trace
.output
);