4 * Builtin 'trace' command:
6 * Display a continuously updated trace of any workload, CPU, specific PID,
7 * system wide, etc. Default format is loosely strace like, but any other
8 * event may be specified using --event.
10 * Copyright (C) 2012, 2013, 2014, 2015 Red Hat Inc, Arnaldo Carvalho de Melo <acme@redhat.com>
12 * Initially based on the 'trace' prototype by Thomas Gleixner:
14 * http://lwn.net/Articles/415728/ ("Announcing a new utility: 'trace'")
17 #include "util/record.h"
18 #include <traceevent/event-parse.h>
19 #include <api/fs/tracing_path.h>
21 #include "util/bpf_map.h"
22 #include "util/rlimit.h"
24 #include "util/cgroup.h"
25 #include "util/color.h"
26 #include "util/config.h"
27 #include "util/debug.h"
29 #include "util/event.h"
30 #include "util/evlist.h"
31 #include "util/evswitch.h"
32 #include <subcmd/exec-cmd.h>
33 #include "util/machine.h"
35 #include "util/symbol.h"
36 #include "util/path.h"
37 #include "util/session.h"
38 #include "util/thread.h"
39 #include <subcmd/parse-options.h>
40 #include "util/strlist.h"
41 #include "util/intlist.h"
42 #include "util/thread_map.h"
43 #include "util/stat.h"
44 #include "util/util.h"
45 #include "trace/beauty/beauty.h"
46 #include "trace-event.h"
47 #include "util/parse-events.h"
48 #include "util/bpf-loader.h"
49 #include "callchain.h"
50 #include "print_binary.h"
52 #include "syscalltbl.h"
53 #include "rb_resort.h"
61 #include <linux/err.h>
62 #include <linux/filter.h>
63 #include <linux/kernel.h>
64 #include <linux/random.h>
65 #include <linux/stringify.h>
66 #include <linux/time64.h>
67 #include <linux/zalloc.h>
69 #include <sys/sysmacros.h>
71 #include <linux/ctype.h>
74 # define O_CLOEXEC 02000000
77 #ifndef F_LINUX_SPECIFIC_BASE
78 # define F_LINUX_SPECIFIC_BASE 1024
82 struct perf_tool tool
;
83 struct syscalltbl
*sctbl
;
85 struct syscall
*table
;
87 struct { // per syscall BPF_MAP_TYPE_PROG_ARRAY
88 struct bpf_map
*sys_enter
,
92 struct evsel
*sys_enter
,
96 struct bpf_program
*unaugmented_prog
;
101 struct record_opts opts
;
102 struct evlist
*evlist
;
103 struct machine
*host
;
104 struct thread
*current
;
105 struct bpf_object
*bpf_obj
;
106 struct cgroup
*cgroup
;
109 unsigned long nr_events
;
110 unsigned long nr_events_printed
;
111 unsigned long max_events
;
112 struct evswitch evswitch
;
113 struct strlist
*ev_qualifier
;
123 double duration_filter
;
129 unsigned int max_stack
;
130 unsigned int min_stack
;
131 int raw_augmented_syscalls_args_size
;
132 bool raw_augmented_syscalls
;
133 bool fd_path_disabled
;
135 bool not_ev_qualifier
;
139 bool multiple_threads
;
145 bool show_tool_stats
;
147 bool kernel_syscallchains
;
153 bool show_string_prefix
;
158 struct ordered_events data
;
166 u64 (*integer
)(struct tp_field
*field
, struct perf_sample
*sample
);
167 void *(*pointer
)(struct tp_field
*field
, struct perf_sample
*sample
);
171 #define TP_UINT_FIELD(bits) \
172 static u64 tp_field__u##bits(struct tp_field *field, struct perf_sample *sample) \
175 memcpy(&value, sample->raw_data + field->offset, sizeof(value)); \
184 #define TP_UINT_FIELD__SWAPPED(bits) \
185 static u64 tp_field__swapped_u##bits(struct tp_field *field, struct perf_sample *sample) \
188 memcpy(&value, sample->raw_data + field->offset, sizeof(value)); \
189 return bswap_##bits(value);\
192 TP_UINT_FIELD__SWAPPED(16);
193 TP_UINT_FIELD__SWAPPED(32);
194 TP_UINT_FIELD__SWAPPED(64);
196 static int __tp_field__init_uint(struct tp_field
*field
, int size
, int offset
, bool needs_swap
)
198 field
->offset
= offset
;
202 field
->integer
= tp_field__u8
;
205 field
->integer
= needs_swap
? tp_field__swapped_u16
: tp_field__u16
;
208 field
->integer
= needs_swap
? tp_field__swapped_u32
: tp_field__u32
;
211 field
->integer
= needs_swap
? tp_field__swapped_u64
: tp_field__u64
;
220 static int tp_field__init_uint(struct tp_field
*field
, struct tep_format_field
*format_field
, bool needs_swap
)
222 return __tp_field__init_uint(field
, format_field
->size
, format_field
->offset
, needs_swap
);
225 static void *tp_field__ptr(struct tp_field
*field
, struct perf_sample
*sample
)
227 return sample
->raw_data
+ field
->offset
;
230 static int __tp_field__init_ptr(struct tp_field
*field
, int offset
)
232 field
->offset
= offset
;
233 field
->pointer
= tp_field__ptr
;
237 static int tp_field__init_ptr(struct tp_field
*field
, struct tep_format_field
*format_field
)
239 return __tp_field__init_ptr(field
, format_field
->offset
);
245 struct tp_field args
, ret
;
249 static int perf_evsel__init_tp_uint_field(struct evsel
*evsel
,
250 struct tp_field
*field
,
253 struct tep_format_field
*format_field
= perf_evsel__field(evsel
, name
);
255 if (format_field
== NULL
)
258 return tp_field__init_uint(field
, format_field
, evsel
->needs_swap
);
261 #define perf_evsel__init_sc_tp_uint_field(evsel, name) \
262 ({ struct syscall_tp *sc = evsel->priv;\
263 perf_evsel__init_tp_uint_field(evsel, &sc->name, #name); })
265 static int perf_evsel__init_tp_ptr_field(struct evsel
*evsel
,
266 struct tp_field
*field
,
269 struct tep_format_field
*format_field
= perf_evsel__field(evsel
, name
);
271 if (format_field
== NULL
)
274 return tp_field__init_ptr(field
, format_field
);
277 #define perf_evsel__init_sc_tp_ptr_field(evsel, name) \
278 ({ struct syscall_tp *sc = evsel->priv;\
279 perf_evsel__init_tp_ptr_field(evsel, &sc->name, #name); })
281 static void evsel__delete_priv(struct evsel
*evsel
)
284 evsel__delete(evsel
);
287 static int perf_evsel__init_syscall_tp(struct evsel
*evsel
)
289 struct syscall_tp
*sc
= evsel
->priv
= malloc(sizeof(struct syscall_tp
));
291 if (evsel
->priv
!= NULL
) {
292 if (perf_evsel__init_tp_uint_field(evsel
, &sc
->id
, "__syscall_nr") &&
293 perf_evsel__init_tp_uint_field(evsel
, &sc
->id
, "nr"))
304 static int perf_evsel__init_augmented_syscall_tp(struct evsel
*evsel
, struct evsel
*tp
)
306 struct syscall_tp
*sc
= evsel
->priv
= malloc(sizeof(struct syscall_tp
));
308 if (evsel
->priv
!= NULL
) {
309 struct tep_format_field
*syscall_id
= perf_evsel__field(tp
, "id");
310 if (syscall_id
== NULL
)
311 syscall_id
= perf_evsel__field(tp
, "__syscall_nr");
312 if (syscall_id
== NULL
)
314 if (__tp_field__init_uint(&sc
->id
, syscall_id
->size
, syscall_id
->offset
, evsel
->needs_swap
))
326 static int perf_evsel__init_augmented_syscall_tp_args(struct evsel
*evsel
)
328 struct syscall_tp
*sc
= evsel
->priv
;
330 return __tp_field__init_ptr(&sc
->args
, sc
->id
.offset
+ sizeof(u64
));
333 static int perf_evsel__init_augmented_syscall_tp_ret(struct evsel
*evsel
)
335 struct syscall_tp
*sc
= evsel
->priv
;
337 return __tp_field__init_uint(&sc
->ret
, sizeof(u64
), sc
->id
.offset
+ sizeof(u64
), evsel
->needs_swap
);
340 static int perf_evsel__init_raw_syscall_tp(struct evsel
*evsel
, void *handler
)
342 evsel
->priv
= malloc(sizeof(struct syscall_tp
));
343 if (evsel
->priv
!= NULL
) {
344 if (perf_evsel__init_sc_tp_uint_field(evsel
, id
))
347 evsel
->handler
= handler
;
358 static struct evsel
*perf_evsel__raw_syscall_newtp(const char *direction
, void *handler
)
360 struct evsel
*evsel
= perf_evsel__newtp("raw_syscalls", direction
);
362 /* older kernel (e.g., RHEL6) use syscalls:{enter,exit} */
364 evsel
= perf_evsel__newtp("syscalls", direction
);
369 if (perf_evsel__init_raw_syscall_tp(evsel
, handler
))
375 evsel__delete_priv(evsel
);
379 #define perf_evsel__sc_tp_uint(evsel, name, sample) \
380 ({ struct syscall_tp *fields = evsel->priv; \
381 fields->name.integer(&fields->name, sample); })
383 #define perf_evsel__sc_tp_ptr(evsel, name, sample) \
384 ({ struct syscall_tp *fields = evsel->priv; \
385 fields->name.pointer(&fields->name, sample); })
387 size_t strarray__scnprintf(struct strarray
*sa
, char *bf
, size_t size
, const char *intfmt
, bool show_prefix
, int val
)
389 int idx
= val
- sa
->offset
;
391 if (idx
< 0 || idx
>= sa
->nr_entries
|| sa
->entries
[idx
] == NULL
) {
392 size_t printed
= scnprintf(bf
, size
, intfmt
, val
);
394 printed
+= scnprintf(bf
+ printed
, size
- printed
, " /* %s??? */", sa
->prefix
);
398 return scnprintf(bf
, size
, "%s%s", show_prefix
? sa
->prefix
: "", sa
->entries
[idx
]);
401 static size_t __syscall_arg__scnprintf_strarray(char *bf
, size_t size
,
403 struct syscall_arg
*arg
)
405 return strarray__scnprintf(arg
->parm
, bf
, size
, intfmt
, arg
->show_string_prefix
, arg
->val
);
408 static size_t syscall_arg__scnprintf_strarray(char *bf
, size_t size
,
409 struct syscall_arg
*arg
)
411 return __syscall_arg__scnprintf_strarray(bf
, size
, "%d", arg
);
414 #define SCA_STRARRAY syscall_arg__scnprintf_strarray
416 size_t syscall_arg__scnprintf_strarray_flags(char *bf
, size_t size
, struct syscall_arg
*arg
)
418 return strarray__scnprintf_flags(arg
->parm
, bf
, size
, arg
->show_string_prefix
, arg
->val
);
421 size_t strarrays__scnprintf(struct strarrays
*sas
, char *bf
, size_t size
, const char *intfmt
, bool show_prefix
, int val
)
426 for (i
= 0; i
< sas
->nr_entries
; ++i
) {
427 struct strarray
*sa
= sas
->entries
[i
];
428 int idx
= val
- sa
->offset
;
430 if (idx
>= 0 && idx
< sa
->nr_entries
) {
431 if (sa
->entries
[idx
] == NULL
)
433 return scnprintf(bf
, size
, "%s%s", show_prefix
? sa
->prefix
: "", sa
->entries
[idx
]);
437 printed
= scnprintf(bf
, size
, intfmt
, val
);
439 printed
+= scnprintf(bf
+ printed
, size
- printed
, " /* %s??? */", sas
->entries
[0]->prefix
);
443 size_t syscall_arg__scnprintf_strarrays(char *bf
, size_t size
,
444 struct syscall_arg
*arg
)
446 return strarrays__scnprintf(arg
->parm
, bf
, size
, "%d", arg
->show_string_prefix
, arg
->val
);
450 #define AT_FDCWD -100
453 static size_t syscall_arg__scnprintf_fd_at(char *bf
, size_t size
,
454 struct syscall_arg
*arg
)
457 const char *prefix
= "AT_FD";
460 return scnprintf(bf
, size
, "%s%s", arg
->show_string_prefix
? prefix
: "", "CWD");
462 return syscall_arg__scnprintf_fd(bf
, size
, arg
);
465 #define SCA_FDAT syscall_arg__scnprintf_fd_at
467 static size_t syscall_arg__scnprintf_close_fd(char *bf
, size_t size
,
468 struct syscall_arg
*arg
);
470 #define SCA_CLOSE_FD syscall_arg__scnprintf_close_fd
472 size_t syscall_arg__scnprintf_hex(char *bf
, size_t size
, struct syscall_arg
*arg
)
474 return scnprintf(bf
, size
, "%#lx", arg
->val
);
477 size_t syscall_arg__scnprintf_ptr(char *bf
, size_t size
, struct syscall_arg
*arg
)
480 return scnprintf(bf
, size
, "NULL");
481 return syscall_arg__scnprintf_hex(bf
, size
, arg
);
484 size_t syscall_arg__scnprintf_int(char *bf
, size_t size
, struct syscall_arg
*arg
)
486 return scnprintf(bf
, size
, "%d", arg
->val
);
489 size_t syscall_arg__scnprintf_long(char *bf
, size_t size
, struct syscall_arg
*arg
)
491 return scnprintf(bf
, size
, "%ld", arg
->val
);
494 static const char *bpf_cmd
[] = {
495 "MAP_CREATE", "MAP_LOOKUP_ELEM", "MAP_UPDATE_ELEM", "MAP_DELETE_ELEM",
496 "MAP_GET_NEXT_KEY", "PROG_LOAD",
498 static DEFINE_STRARRAY(bpf_cmd
, "BPF_");
500 static const char *fsmount_flags
[] = {
503 static DEFINE_STRARRAY(fsmount_flags
, "FSMOUNT_");
505 #include "trace/beauty/generated/fsconfig_arrays.c"
507 static DEFINE_STRARRAY(fsconfig_cmds
, "FSCONFIG_");
509 static const char *epoll_ctl_ops
[] = { "ADD", "DEL", "MOD", };
510 static DEFINE_STRARRAY_OFFSET(epoll_ctl_ops
, "EPOLL_CTL_", 1);
512 static const char *itimers
[] = { "REAL", "VIRTUAL", "PROF", };
513 static DEFINE_STRARRAY(itimers
, "ITIMER_");
515 static const char *keyctl_options
[] = {
516 "GET_KEYRING_ID", "JOIN_SESSION_KEYRING", "UPDATE", "REVOKE", "CHOWN",
517 "SETPERM", "DESCRIBE", "CLEAR", "LINK", "UNLINK", "SEARCH", "READ",
518 "INSTANTIATE", "NEGATE", "SET_REQKEY_KEYRING", "SET_TIMEOUT",
519 "ASSUME_AUTHORITY", "GET_SECURITY", "SESSION_TO_PARENT", "REJECT",
520 "INSTANTIATE_IOV", "INVALIDATE", "GET_PERSISTENT",
522 static DEFINE_STRARRAY(keyctl_options
, "KEYCTL_");
524 static const char *whences
[] = { "SET", "CUR", "END",
532 static DEFINE_STRARRAY(whences
, "SEEK_");
534 static const char *fcntl_cmds
[] = {
535 "DUPFD", "GETFD", "SETFD", "GETFL", "SETFL", "GETLK", "SETLK",
536 "SETLKW", "SETOWN", "GETOWN", "SETSIG", "GETSIG", "GETLK64",
537 "SETLK64", "SETLKW64", "SETOWN_EX", "GETOWN_EX",
540 static DEFINE_STRARRAY(fcntl_cmds
, "F_");
542 static const char *fcntl_linux_specific_cmds
[] = {
543 "SETLEASE", "GETLEASE", "NOTIFY", [5] = "CANCELLK", "DUPFD_CLOEXEC",
544 "SETPIPE_SZ", "GETPIPE_SZ", "ADD_SEALS", "GET_SEALS",
545 "GET_RW_HINT", "SET_RW_HINT", "GET_FILE_RW_HINT", "SET_FILE_RW_HINT",
548 static DEFINE_STRARRAY_OFFSET(fcntl_linux_specific_cmds
, "F_", F_LINUX_SPECIFIC_BASE
);
550 static struct strarray
*fcntl_cmds_arrays
[] = {
551 &strarray__fcntl_cmds
,
552 &strarray__fcntl_linux_specific_cmds
,
555 static DEFINE_STRARRAYS(fcntl_cmds_arrays
);
557 static const char *rlimit_resources
[] = {
558 "CPU", "FSIZE", "DATA", "STACK", "CORE", "RSS", "NPROC", "NOFILE",
559 "MEMLOCK", "AS", "LOCKS", "SIGPENDING", "MSGQUEUE", "NICE", "RTPRIO",
562 static DEFINE_STRARRAY(rlimit_resources
, "RLIMIT_");
564 static const char *sighow
[] = { "BLOCK", "UNBLOCK", "SETMASK", };
565 static DEFINE_STRARRAY(sighow
, "SIG_");
567 static const char *clockid
[] = {
568 "REALTIME", "MONOTONIC", "PROCESS_CPUTIME_ID", "THREAD_CPUTIME_ID",
569 "MONOTONIC_RAW", "REALTIME_COARSE", "MONOTONIC_COARSE", "BOOTTIME",
570 "REALTIME_ALARM", "BOOTTIME_ALARM", "SGI_CYCLE", "TAI"
572 static DEFINE_STRARRAY(clockid
, "CLOCK_");
574 static size_t syscall_arg__scnprintf_access_mode(char *bf
, size_t size
,
575 struct syscall_arg
*arg
)
577 bool show_prefix
= arg
->show_string_prefix
;
578 const char *suffix
= "_OK";
582 if (mode
== F_OK
) /* 0 */
583 return scnprintf(bf
, size
, "F%s", show_prefix
? suffix
: "");
585 if (mode & n##_OK) { \
586 printed += scnprintf(bf + printed, size - printed, "%s%s", #n, show_prefix ? suffix : ""); \
596 printed
+= scnprintf(bf
+ printed
, size
- printed
, "|%#x", mode
);
601 #define SCA_ACCMODE syscall_arg__scnprintf_access_mode
603 static size_t syscall_arg__scnprintf_filename(char *bf
, size_t size
,
604 struct syscall_arg
*arg
);
606 #define SCA_FILENAME syscall_arg__scnprintf_filename
608 static size_t syscall_arg__scnprintf_pipe_flags(char *bf
, size_t size
,
609 struct syscall_arg
*arg
)
611 bool show_prefix
= arg
->show_string_prefix
;
612 const char *prefix
= "O_";
613 int printed
= 0, flags
= arg
->val
;
616 if (flags & O_##n) { \
617 printed += scnprintf(bf + printed, size - printed, "%s%s%s", printed ? "|" : "", show_prefix ? prefix : "", #n); \
626 printed
+= scnprintf(bf
+ printed
, size
- printed
, "%s%#x", printed
? "|" : "", flags
);
631 #define SCA_PIPE_FLAGS syscall_arg__scnprintf_pipe_flags
633 #ifndef GRND_NONBLOCK
634 #define GRND_NONBLOCK 0x0001
637 #define GRND_RANDOM 0x0002
640 static size_t syscall_arg__scnprintf_getrandom_flags(char *bf
, size_t size
,
641 struct syscall_arg
*arg
)
643 bool show_prefix
= arg
->show_string_prefix
;
644 const char *prefix
= "GRND_";
645 int printed
= 0, flags
= arg
->val
;
648 if (flags & GRND_##n) { \
649 printed += scnprintf(bf + printed, size - printed, "%s%s%s", printed ? "|" : "", show_prefix ? prefix : "", #n); \
650 flags &= ~GRND_##n; \
658 printed
+= scnprintf(bf
+ printed
, size
- printed
, "%s%#x", printed
? "|" : "", flags
);
663 #define SCA_GETRANDOM_FLAGS syscall_arg__scnprintf_getrandom_flags
665 #define STRARRAY(name, array) \
666 { .scnprintf = SCA_STRARRAY, \
667 .parm = &strarray__##array, }
669 #define STRARRAY_FLAGS(name, array) \
670 { .scnprintf = SCA_STRARRAY_FLAGS, \
671 .parm = &strarray__##array, }
673 #include "trace/beauty/arch_errno_names.c"
674 #include "trace/beauty/eventfd.c"
675 #include "trace/beauty/futex_op.c"
676 #include "trace/beauty/futex_val3.c"
677 #include "trace/beauty/mmap.c"
678 #include "trace/beauty/mode_t.c"
679 #include "trace/beauty/msg_flags.c"
680 #include "trace/beauty/open_flags.c"
681 #include "trace/beauty/perf_event_open.c"
682 #include "trace/beauty/pid.c"
683 #include "trace/beauty/sched_policy.c"
684 #include "trace/beauty/seccomp.c"
685 #include "trace/beauty/signum.c"
686 #include "trace/beauty/socket_type.c"
687 #include "trace/beauty/waitid_options.c"
689 struct syscall_arg_fmt
{
690 size_t (*scnprintf
)(char *bf
, size_t size
, struct syscall_arg
*arg
);
691 unsigned long (*mask_val
)(struct syscall_arg
*arg
, unsigned long val
);
697 static struct syscall_fmt
{
701 const char *sys_enter
,
704 struct syscall_arg_fmt arg
[6];
711 .arg
= { [1] = { .scnprintf
= SCA_ACCMODE
, /* mode */ }, }, },
712 { .name
= "arch_prctl",
713 .arg
= { [0] = { .scnprintf
= SCA_X86_ARCH_PRCTL_CODE
, /* code */ },
714 [1] = { .scnprintf
= SCA_PTR
, /* arg2 */ }, }, },
716 .arg
= { [0] = { .scnprintf
= SCA_INT
, /* fd */ },
717 [1] = { .scnprintf
= SCA_SOCKADDR
, /* umyaddr */ },
718 [2] = { .scnprintf
= SCA_INT
, /* addrlen */ }, }, },
720 .arg
= { [0] = STRARRAY(cmd
, bpf_cmd
), }, },
721 { .name
= "brk", .hexret
= true,
722 .arg
= { [0] = { .scnprintf
= SCA_PTR
, /* brk */ }, }, },
723 { .name
= "clock_gettime",
724 .arg
= { [0] = STRARRAY(clk_id
, clockid
), }, },
725 { .name
= "clone", .errpid
= true, .nr_args
= 5,
726 .arg
= { [0] = { .name
= "flags", .scnprintf
= SCA_CLONE_FLAGS
, },
727 [1] = { .name
= "child_stack", .scnprintf
= SCA_HEX
, },
728 [2] = { .name
= "parent_tidptr", .scnprintf
= SCA_HEX
, },
729 [3] = { .name
= "child_tidptr", .scnprintf
= SCA_HEX
, },
730 [4] = { .name
= "tls", .scnprintf
= SCA_HEX
, }, }, },
732 .arg
= { [0] = { .scnprintf
= SCA_CLOSE_FD
, /* fd */ }, }, },
734 .arg
= { [0] = { .scnprintf
= SCA_INT
, /* fd */ },
735 [1] = { .scnprintf
= SCA_SOCKADDR
, /* servaddr */ },
736 [2] = { .scnprintf
= SCA_INT
, /* addrlen */ }, }, },
737 { .name
= "epoll_ctl",
738 .arg
= { [1] = STRARRAY(op
, epoll_ctl_ops
), }, },
739 { .name
= "eventfd2",
740 .arg
= { [1] = { .scnprintf
= SCA_EFD_FLAGS
, /* flags */ }, }, },
741 { .name
= "fchmodat",
742 .arg
= { [0] = { .scnprintf
= SCA_FDAT
, /* fd */ }, }, },
743 { .name
= "fchownat",
744 .arg
= { [0] = { .scnprintf
= SCA_FDAT
, /* fd */ }, }, },
746 .arg
= { [1] = { .scnprintf
= SCA_FCNTL_CMD
, /* cmd */
747 .parm
= &strarrays__fcntl_cmds_arrays
,
748 .show_zero
= true, },
749 [2] = { .scnprintf
= SCA_FCNTL_ARG
, /* arg */ }, }, },
751 .arg
= { [1] = { .scnprintf
= SCA_FLOCK
, /* cmd */ }, }, },
752 { .name
= "fsconfig",
753 .arg
= { [1] = STRARRAY(cmd
, fsconfig_cmds
), }, },
755 .arg
= { [1] = STRARRAY_FLAGS(flags
, fsmount_flags
),
756 [2] = { .scnprintf
= SCA_FSMOUNT_ATTR_FLAGS
, /* attr_flags */ }, }, },
758 .arg
= { [0] = { .scnprintf
= SCA_FDAT
, /* dfd */ },
759 [1] = { .scnprintf
= SCA_FILENAME
, /* path */ },
760 [2] = { .scnprintf
= SCA_FSPICK_FLAGS
, /* flags */ }, }, },
761 { .name
= "fstat", .alias
= "newfstat", },
762 { .name
= "fstatat", .alias
= "newfstatat", },
764 .arg
= { [1] = { .scnprintf
= SCA_FUTEX_OP
, /* op */ },
765 [5] = { .scnprintf
= SCA_FUTEX_VAL3
, /* val3 */ }, }, },
766 { .name
= "futimesat",
767 .arg
= { [0] = { .scnprintf
= SCA_FDAT
, /* fd */ }, }, },
768 { .name
= "getitimer",
769 .arg
= { [0] = STRARRAY(which
, itimers
), }, },
770 { .name
= "getpid", .errpid
= true, },
771 { .name
= "getpgid", .errpid
= true, },
772 { .name
= "getppid", .errpid
= true, },
773 { .name
= "getrandom",
774 .arg
= { [2] = { .scnprintf
= SCA_GETRANDOM_FLAGS
, /* flags */ }, }, },
775 { .name
= "getrlimit",
776 .arg
= { [0] = STRARRAY(resource
, rlimit_resources
), }, },
777 { .name
= "gettid", .errpid
= true, },
780 #if defined(__i386__) || defined(__x86_64__)
782 * FIXME: Make this available to all arches.
784 [1] = { .scnprintf
= SCA_IOCTL_CMD
, /* cmd */ },
785 [2] = { .scnprintf
= SCA_HEX
, /* arg */ }, }, },
787 [2] = { .scnprintf
= SCA_HEX
, /* arg */ }, }, },
789 { .name
= "kcmp", .nr_args
= 5,
790 .arg
= { [0] = { .name
= "pid1", .scnprintf
= SCA_PID
, },
791 [1] = { .name
= "pid2", .scnprintf
= SCA_PID
, },
792 [2] = { .name
= "type", .scnprintf
= SCA_KCMP_TYPE
, },
793 [3] = { .name
= "idx1", .scnprintf
= SCA_KCMP_IDX
, },
794 [4] = { .name
= "idx2", .scnprintf
= SCA_KCMP_IDX
, }, }, },
796 .arg
= { [0] = STRARRAY(option
, keyctl_options
), }, },
798 .arg
= { [1] = { .scnprintf
= SCA_SIGNUM
, /* sig */ }, }, },
800 .arg
= { [0] = { .scnprintf
= SCA_FDAT
, /* fd */ }, }, },
802 .arg
= { [2] = STRARRAY(whence
, whences
), }, },
803 { .name
= "lstat", .alias
= "newlstat", },
805 .arg
= { [0] = { .scnprintf
= SCA_HEX
, /* start */ },
806 [2] = { .scnprintf
= SCA_MADV_BHV
, /* behavior */ }, }, },
808 .arg
= { [0] = { .scnprintf
= SCA_FDAT
, /* fd */ }, }, },
810 .arg
= { [0] = { .scnprintf
= SCA_FDAT
, /* fd */ }, }, },
811 { .name
= "mmap", .hexret
= true,
812 /* The standard mmap maps to old_mmap on s390x */
813 #if defined(__s390x__)
816 .arg
= { [2] = { .scnprintf
= SCA_MMAP_PROT
, /* prot */ },
817 [3] = { .scnprintf
= SCA_MMAP_FLAGS
, /* flags */ },
818 [5] = { .scnprintf
= SCA_HEX
, /* offset */ }, }, },
820 .arg
= { [0] = { .scnprintf
= SCA_FILENAME
, /* dev_name */ },
821 [3] = { .scnprintf
= SCA_MOUNT_FLAGS
, /* flags */
822 .mask_val
= SCAMV_MOUNT_FLAGS
, /* flags */ }, }, },
823 { .name
= "move_mount",
824 .arg
= { [0] = { .scnprintf
= SCA_FDAT
, /* from_dfd */ },
825 [1] = { .scnprintf
= SCA_FILENAME
, /* from_pathname */ },
826 [2] = { .scnprintf
= SCA_FDAT
, /* to_dfd */ },
827 [3] = { .scnprintf
= SCA_FILENAME
, /* to_pathname */ },
828 [4] = { .scnprintf
= SCA_MOVE_MOUNT_FLAGS
, /* flags */ }, }, },
829 { .name
= "mprotect",
830 .arg
= { [0] = { .scnprintf
= SCA_HEX
, /* start */ },
831 [2] = { .scnprintf
= SCA_MMAP_PROT
, /* prot */ }, }, },
832 { .name
= "mq_unlink",
833 .arg
= { [0] = { .scnprintf
= SCA_FILENAME
, /* u_name */ }, }, },
834 { .name
= "mremap", .hexret
= true,
835 .arg
= { [3] = { .scnprintf
= SCA_MREMAP_FLAGS
, /* flags */ }, }, },
836 { .name
= "name_to_handle_at",
837 .arg
= { [0] = { .scnprintf
= SCA_FDAT
, /* dfd */ }, }, },
838 { .name
= "newfstatat",
839 .arg
= { [0] = { .scnprintf
= SCA_FDAT
, /* dfd */ }, }, },
841 .arg
= { [1] = { .scnprintf
= SCA_OPEN_FLAGS
, /* flags */ }, }, },
842 { .name
= "open_by_handle_at",
843 .arg
= { [0] = { .scnprintf
= SCA_FDAT
, /* dfd */ },
844 [2] = { .scnprintf
= SCA_OPEN_FLAGS
, /* flags */ }, }, },
846 .arg
= { [0] = { .scnprintf
= SCA_FDAT
, /* dfd */ },
847 [2] = { .scnprintf
= SCA_OPEN_FLAGS
, /* flags */ }, }, },
848 { .name
= "perf_event_open",
849 .arg
= { [2] = { .scnprintf
= SCA_INT
, /* cpu */ },
850 [3] = { .scnprintf
= SCA_FD
, /* group_fd */ },
851 [4] = { .scnprintf
= SCA_PERF_FLAGS
, /* flags */ }, }, },
853 .arg
= { [1] = { .scnprintf
= SCA_PIPE_FLAGS
, /* flags */ }, }, },
854 { .name
= "pkey_alloc",
855 .arg
= { [1] = { .scnprintf
= SCA_PKEY_ALLOC_ACCESS_RIGHTS
, /* access_rights */ }, }, },
856 { .name
= "pkey_free",
857 .arg
= { [0] = { .scnprintf
= SCA_INT
, /* key */ }, }, },
858 { .name
= "pkey_mprotect",
859 .arg
= { [0] = { .scnprintf
= SCA_HEX
, /* start */ },
860 [2] = { .scnprintf
= SCA_MMAP_PROT
, /* prot */ },
861 [3] = { .scnprintf
= SCA_INT
, /* pkey */ }, }, },
862 { .name
= "poll", .timeout
= true, },
863 { .name
= "ppoll", .timeout
= true, },
865 .arg
= { [0] = { .scnprintf
= SCA_PRCTL_OPTION
, /* option */ },
866 [1] = { .scnprintf
= SCA_PRCTL_ARG2
, /* arg2 */ },
867 [2] = { .scnprintf
= SCA_PRCTL_ARG3
, /* arg3 */ }, }, },
868 { .name
= "pread", .alias
= "pread64", },
869 { .name
= "preadv", .alias
= "pread", },
870 { .name
= "prlimit64",
871 .arg
= { [1] = STRARRAY(resource
, rlimit_resources
), }, },
872 { .name
= "pwrite", .alias
= "pwrite64", },
873 { .name
= "readlinkat",
874 .arg
= { [0] = { .scnprintf
= SCA_FDAT
, /* dfd */ }, }, },
875 { .name
= "recvfrom",
876 .arg
= { [3] = { .scnprintf
= SCA_MSG_FLAGS
, /* flags */ }, }, },
877 { .name
= "recvmmsg",
878 .arg
= { [3] = { .scnprintf
= SCA_MSG_FLAGS
, /* flags */ }, }, },
880 .arg
= { [2] = { .scnprintf
= SCA_MSG_FLAGS
, /* flags */ }, }, },
881 { .name
= "renameat",
882 .arg
= { [0] = { .scnprintf
= SCA_FDAT
, /* olddirfd */ },
883 [2] = { .scnprintf
= SCA_FDAT
, /* newdirfd */ }, }, },
884 { .name
= "renameat2",
885 .arg
= { [0] = { .scnprintf
= SCA_FDAT
, /* olddirfd */ },
886 [2] = { .scnprintf
= SCA_FDAT
, /* newdirfd */ },
887 [4] = { .scnprintf
= SCA_RENAMEAT2_FLAGS
, /* flags */ }, }, },
888 { .name
= "rt_sigaction",
889 .arg
= { [0] = { .scnprintf
= SCA_SIGNUM
, /* sig */ }, }, },
890 { .name
= "rt_sigprocmask",
891 .arg
= { [0] = STRARRAY(how
, sighow
), }, },
892 { .name
= "rt_sigqueueinfo",
893 .arg
= { [1] = { .scnprintf
= SCA_SIGNUM
, /* sig */ }, }, },
894 { .name
= "rt_tgsigqueueinfo",
895 .arg
= { [2] = { .scnprintf
= SCA_SIGNUM
, /* sig */ }, }, },
896 { .name
= "sched_setscheduler",
897 .arg
= { [1] = { .scnprintf
= SCA_SCHED_POLICY
, /* policy */ }, }, },
899 .arg
= { [0] = { .scnprintf
= SCA_SECCOMP_OP
, /* op */ },
900 [1] = { .scnprintf
= SCA_SECCOMP_FLAGS
, /* flags */ }, }, },
901 { .name
= "select", .timeout
= true, },
902 { .name
= "sendfile", .alias
= "sendfile64", },
903 { .name
= "sendmmsg",
904 .arg
= { [3] = { .scnprintf
= SCA_MSG_FLAGS
, /* flags */ }, }, },
906 .arg
= { [2] = { .scnprintf
= SCA_MSG_FLAGS
, /* flags */ }, }, },
908 .arg
= { [3] = { .scnprintf
= SCA_MSG_FLAGS
, /* flags */ },
909 [4] = { .scnprintf
= SCA_SOCKADDR
, /* addr */ }, }, },
910 { .name
= "set_tid_address", .errpid
= true, },
911 { .name
= "setitimer",
912 .arg
= { [0] = STRARRAY(which
, itimers
), }, },
913 { .name
= "setrlimit",
914 .arg
= { [0] = STRARRAY(resource
, rlimit_resources
), }, },
916 .arg
= { [0] = STRARRAY(family
, socket_families
),
917 [1] = { .scnprintf
= SCA_SK_TYPE
, /* type */ },
918 [2] = { .scnprintf
= SCA_SK_PROTO
, /* protocol */ }, }, },
919 { .name
= "socketpair",
920 .arg
= { [0] = STRARRAY(family
, socket_families
),
921 [1] = { .scnprintf
= SCA_SK_TYPE
, /* type */ },
922 [2] = { .scnprintf
= SCA_SK_PROTO
, /* protocol */ }, }, },
923 { .name
= "stat", .alias
= "newstat", },
925 .arg
= { [0] = { .scnprintf
= SCA_FDAT
, /* fdat */ },
926 [2] = { .scnprintf
= SCA_STATX_FLAGS
, /* flags */ } ,
927 [3] = { .scnprintf
= SCA_STATX_MASK
, /* mask */ }, }, },
929 .arg
= { [0] = { .scnprintf
= SCA_FILENAME
, /* specialfile */ }, }, },
931 .arg
= { [0] = { .scnprintf
= SCA_FILENAME
, /* specialfile */ }, }, },
932 { .name
= "symlinkat",
933 .arg
= { [0] = { .scnprintf
= SCA_FDAT
, /* dfd */ }, }, },
934 { .name
= "sync_file_range",
935 .arg
= { [3] = { .scnprintf
= SCA_SYNC_FILE_RANGE_FLAGS
, /* flags */ }, }, },
937 .arg
= { [2] = { .scnprintf
= SCA_SIGNUM
, /* sig */ }, }, },
939 .arg
= { [1] = { .scnprintf
= SCA_SIGNUM
, /* sig */ }, }, },
940 { .name
= "umount2", .alias
= "umount",
941 .arg
= { [0] = { .scnprintf
= SCA_FILENAME
, /* name */ }, }, },
942 { .name
= "uname", .alias
= "newuname", },
943 { .name
= "unlinkat",
944 .arg
= { [0] = { .scnprintf
= SCA_FDAT
, /* dfd */ }, }, },
945 { .name
= "utimensat",
946 .arg
= { [0] = { .scnprintf
= SCA_FDAT
, /* dirfd */ }, }, },
947 { .name
= "wait4", .errpid
= true,
948 .arg
= { [2] = { .scnprintf
= SCA_WAITID_OPTIONS
, /* options */ }, }, },
949 { .name
= "waitid", .errpid
= true,
950 .arg
= { [3] = { .scnprintf
= SCA_WAITID_OPTIONS
, /* options */ }, }, },
953 static int syscall_fmt__cmp(const void *name
, const void *fmtp
)
955 const struct syscall_fmt
*fmt
= fmtp
;
956 return strcmp(name
, fmt
->name
);
959 static struct syscall_fmt
*syscall_fmt__find(const char *name
)
961 const int nmemb
= ARRAY_SIZE(syscall_fmts
);
962 return bsearch(name
, syscall_fmts
, nmemb
, sizeof(struct syscall_fmt
), syscall_fmt__cmp
);
965 static struct syscall_fmt
*syscall_fmt__find_by_alias(const char *alias
)
967 int i
, nmemb
= ARRAY_SIZE(syscall_fmts
);
969 for (i
= 0; i
< nmemb
; ++i
) {
970 if (syscall_fmts
[i
].alias
&& strcmp(syscall_fmts
[i
].alias
, alias
) == 0)
971 return &syscall_fmts
[i
];
978 * is_exit: is this "exit" or "exit_group"?
979 * is_open: is this "open" or "openat"? To associate the fd returned in sys_exit with the pathname in sys_enter.
980 * args_size: sum of the sizes of the syscall arguments, anything after that is augmented stuff: pathname for openat, etc.
981 * nonexistent: Just a hole in the syscall table, syscall id not allocated
984 struct tep_event
*tp_format
;
988 struct bpf_program
*sys_enter
,
994 struct tep_format_field
*args
;
996 struct syscall_fmt
*fmt
;
997 struct syscall_arg_fmt
*arg_fmt
;
1001 * Must match what is in the BPF program:
1003 * tools/perf/examples/bpf/augmented_raw_syscalls.c
1005 struct bpf_map_syscall_entry
{
1007 u16 string_args_len
[6];
1011 * We need to have this 'calculated' boolean because in some cases we really
1012 * don't know what is the duration of a syscall, for instance, when we start
1013 * a session and some threads are waiting for a syscall to finish, say 'poll',
1014 * in which case all we can do is to print "( ? ) for duration and for the
1017 static size_t fprintf_duration(unsigned long t
, bool calculated
, FILE *fp
)
1019 double duration
= (double)t
/ NSEC_PER_MSEC
;
1020 size_t printed
= fprintf(fp
, "(");
1023 printed
+= fprintf(fp
, " ");
1024 else if (duration
>= 1.0)
1025 printed
+= color_fprintf(fp
, PERF_COLOR_RED
, "%6.3f ms", duration
);
1026 else if (duration
>= 0.01)
1027 printed
+= color_fprintf(fp
, PERF_COLOR_YELLOW
, "%6.3f ms", duration
);
1029 printed
+= color_fprintf(fp
, PERF_COLOR_NORMAL
, "%6.3f ms", duration
);
1030 return printed
+ fprintf(fp
, "): ");
1034 * filename.ptr: The filename char pointer that will be vfs_getname'd
1035 * filename.entry_str_pos: Where to insert the string translated from
1036 * filename.ptr by the vfs_getname tracepoint/kprobe.
1037 * ret_scnprintf: syscall args may set this to a different syscall return
1038 * formatter, for instance, fcntl may return fds, file flags, etc.
1040 struct thread_trace
{
1043 unsigned long nr_events
;
1044 unsigned long pfmaj
, pfmin
;
1047 size_t (*ret_scnprintf
)(char *bf
, size_t size
, struct syscall_arg
*arg
);
1050 short int entry_str_pos
;
1052 unsigned int namelen
;
1060 struct intlist
*syscall_stats
;
1063 static struct thread_trace
*thread_trace__new(void)
1065 struct thread_trace
*ttrace
= zalloc(sizeof(struct thread_trace
));
1068 ttrace
->files
.max
= -1;
1069 ttrace
->syscall_stats
= intlist__new(NULL
);
1075 static struct thread_trace
*thread__trace(struct thread
*thread
, FILE *fp
)
1077 struct thread_trace
*ttrace
;
1082 if (thread__priv(thread
) == NULL
)
1083 thread__set_priv(thread
, thread_trace__new());
1085 if (thread__priv(thread
) == NULL
)
1088 ttrace
= thread__priv(thread
);
1089 ++ttrace
->nr_events
;
1093 color_fprintf(fp
, PERF_COLOR_RED
,
1094 "WARNING: not enough memory, dropping samples!\n");
1099 void syscall_arg__set_ret_scnprintf(struct syscall_arg
*arg
,
1100 size_t (*ret_scnprintf
)(char *bf
, size_t size
, struct syscall_arg
*arg
))
1102 struct thread_trace
*ttrace
= thread__priv(arg
->thread
);
1104 ttrace
->ret_scnprintf
= ret_scnprintf
;
1107 #define TRACE_PFMAJ (1 << 0)
1108 #define TRACE_PFMIN (1 << 1)
1110 static const size_t trace__entry_str_size
= 2048;
1112 static struct file
*thread_trace__files_entry(struct thread_trace
*ttrace
, int fd
)
1117 if (fd
> ttrace
->files
.max
) {
1118 struct file
*nfiles
= realloc(ttrace
->files
.table
, (fd
+ 1) * sizeof(struct file
));
1123 if (ttrace
->files
.max
!= -1) {
1124 memset(nfiles
+ ttrace
->files
.max
+ 1, 0,
1125 (fd
- ttrace
->files
.max
) * sizeof(struct file
));
1127 memset(nfiles
, 0, (fd
+ 1) * sizeof(struct file
));
1130 ttrace
->files
.table
= nfiles
;
1131 ttrace
->files
.max
= fd
;
1134 return ttrace
->files
.table
+ fd
;
1137 struct file
*thread__files_entry(struct thread
*thread
, int fd
)
1139 return thread_trace__files_entry(thread__priv(thread
), fd
);
1142 static int trace__set_fd_pathname(struct thread
*thread
, int fd
, const char *pathname
)
1144 struct thread_trace
*ttrace
= thread__priv(thread
);
1145 struct file
*file
= thread_trace__files_entry(ttrace
, fd
);
1149 if (stat(pathname
, &st
) == 0)
1150 file
->dev_maj
= major(st
.st_rdev
);
1151 file
->pathname
= strdup(pathname
);
1159 static int thread__read_fd_path(struct thread
*thread
, int fd
)
1161 char linkname
[PATH_MAX
], pathname
[PATH_MAX
];
1165 if (thread
->pid_
== thread
->tid
) {
1166 scnprintf(linkname
, sizeof(linkname
),
1167 "/proc/%d/fd/%d", thread
->pid_
, fd
);
1169 scnprintf(linkname
, sizeof(linkname
),
1170 "/proc/%d/task/%d/fd/%d", thread
->pid_
, thread
->tid
, fd
);
1173 if (lstat(linkname
, &st
) < 0 || st
.st_size
+ 1 > (off_t
)sizeof(pathname
))
1176 ret
= readlink(linkname
, pathname
, sizeof(pathname
));
1178 if (ret
< 0 || ret
> st
.st_size
)
1181 pathname
[ret
] = '\0';
1182 return trace__set_fd_pathname(thread
, fd
, pathname
);
1185 static const char *thread__fd_path(struct thread
*thread
, int fd
,
1186 struct trace
*trace
)
1188 struct thread_trace
*ttrace
= thread__priv(thread
);
1190 if (ttrace
== NULL
|| trace
->fd_path_disabled
)
1196 if ((fd
> ttrace
->files
.max
|| ttrace
->files
.table
[fd
].pathname
== NULL
)) {
1199 ++trace
->stats
.proc_getname
;
1200 if (thread__read_fd_path(thread
, fd
))
1204 return ttrace
->files
.table
[fd
].pathname
;
1207 size_t syscall_arg__scnprintf_fd(char *bf
, size_t size
, struct syscall_arg
*arg
)
1210 size_t printed
= scnprintf(bf
, size
, "%d", fd
);
1211 const char *path
= thread__fd_path(arg
->thread
, fd
, arg
->trace
);
1214 printed
+= scnprintf(bf
+ printed
, size
- printed
, "<%s>", path
);
1219 size_t pid__scnprintf_fd(struct trace
*trace
, pid_t pid
, int fd
, char *bf
, size_t size
)
1221 size_t printed
= scnprintf(bf
, size
, "%d", fd
);
1222 struct thread
*thread
= machine__find_thread(trace
->host
, pid
, pid
);
1225 const char *path
= thread__fd_path(thread
, fd
, trace
);
1228 printed
+= scnprintf(bf
+ printed
, size
- printed
, "<%s>", path
);
1230 thread__put(thread
);
1236 static size_t syscall_arg__scnprintf_close_fd(char *bf
, size_t size
,
1237 struct syscall_arg
*arg
)
1240 size_t printed
= syscall_arg__scnprintf_fd(bf
, size
, arg
);
1241 struct thread_trace
*ttrace
= thread__priv(arg
->thread
);
1243 if (ttrace
&& fd
>= 0 && fd
<= ttrace
->files
.max
)
1244 zfree(&ttrace
->files
.table
[fd
].pathname
);
1249 static void thread__set_filename_pos(struct thread
*thread
, const char *bf
,
1252 struct thread_trace
*ttrace
= thread__priv(thread
);
1254 ttrace
->filename
.ptr
= ptr
;
1255 ttrace
->filename
.entry_str_pos
= bf
- ttrace
->entry_str
;
1258 static size_t syscall_arg__scnprintf_augmented_string(struct syscall_arg
*arg
, char *bf
, size_t size
)
1260 struct augmented_arg
*augmented_arg
= arg
->augmented
.args
;
1261 size_t printed
= scnprintf(bf
, size
, "\"%.*s\"", augmented_arg
->size
, augmented_arg
->value
);
1263 * So that the next arg with a payload can consume its augmented arg, i.e. for rename* syscalls
1264 * we would have two strings, each prefixed by its size.
1266 int consumed
= sizeof(*augmented_arg
) + augmented_arg
->size
;
1268 arg
->augmented
.args
= ((void *)arg
->augmented
.args
) + consumed
;
1269 arg
->augmented
.size
-= consumed
;
1274 static size_t syscall_arg__scnprintf_filename(char *bf
, size_t size
,
1275 struct syscall_arg
*arg
)
1277 unsigned long ptr
= arg
->val
;
1279 if (arg
->augmented
.args
)
1280 return syscall_arg__scnprintf_augmented_string(arg
, bf
, size
);
1282 if (!arg
->trace
->vfs_getname
)
1283 return scnprintf(bf
, size
, "%#x", ptr
);
1285 thread__set_filename_pos(arg
->thread
, bf
, ptr
);
1289 static bool trace__filter_duration(struct trace
*trace
, double t
)
1291 return t
< (trace
->duration_filter
* NSEC_PER_MSEC
);
1294 static size_t __trace__fprintf_tstamp(struct trace
*trace
, u64 tstamp
, FILE *fp
)
1296 double ts
= (double)(tstamp
- trace
->base_time
) / NSEC_PER_MSEC
;
1298 return fprintf(fp
, "%10.3f ", ts
);
1302 * We're handling tstamp=0 as an undefined tstamp, i.e. like when we are
1303 * using ttrace->entry_time for a thread that receives a sys_exit without
1304 * first having received a sys_enter ("poll" issued before tracing session
1305 * starts, lost sys_enter exit due to ring buffer overflow).
1307 static size_t trace__fprintf_tstamp(struct trace
*trace
, u64 tstamp
, FILE *fp
)
1310 return __trace__fprintf_tstamp(trace
, tstamp
, fp
);
1312 return fprintf(fp
, " ? ");
1315 static bool done
= false;
1316 static bool interrupted
= false;
1318 static void sig_handler(int sig
)
1321 interrupted
= sig
== SIGINT
;
1324 static size_t trace__fprintf_comm_tid(struct trace
*trace
, struct thread
*thread
, FILE *fp
)
1328 if (trace
->multiple_threads
) {
1329 if (trace
->show_comm
)
1330 printed
+= fprintf(fp
, "%.14s/", thread__comm_str(thread
));
1331 printed
+= fprintf(fp
, "%d ", thread
->tid
);
1337 static size_t trace__fprintf_entry_head(struct trace
*trace
, struct thread
*thread
,
1338 u64 duration
, bool duration_calculated
, u64 tstamp
, FILE *fp
)
1342 if (trace
->show_tstamp
)
1343 printed
= trace__fprintf_tstamp(trace
, tstamp
, fp
);
1344 if (trace
->show_duration
)
1345 printed
+= fprintf_duration(duration
, duration_calculated
, fp
);
1346 return printed
+ trace__fprintf_comm_tid(trace
, thread
, fp
);
1349 static int trace__process_event(struct trace
*trace
, struct machine
*machine
,
1350 union perf_event
*event
, struct perf_sample
*sample
)
1354 switch (event
->header
.type
) {
1355 case PERF_RECORD_LOST
:
1356 color_fprintf(trace
->output
, PERF_COLOR_RED
,
1357 "LOST %" PRIu64
" events!\n", event
->lost
.lost
);
1358 ret
= machine__process_lost_event(machine
, event
, sample
);
1361 ret
= machine__process_event(machine
, event
, sample
);
1368 static int trace__tool_process(struct perf_tool
*tool
,
1369 union perf_event
*event
,
1370 struct perf_sample
*sample
,
1371 struct machine
*machine
)
1373 struct trace
*trace
= container_of(tool
, struct trace
, tool
);
1374 return trace__process_event(trace
, machine
, event
, sample
);
1377 static char *trace__machine__resolve_kernel_addr(void *vmachine
, unsigned long long *addrp
, char **modp
)
1379 struct machine
*machine
= vmachine
;
1381 if (machine
->kptr_restrict_warned
)
1384 if (symbol_conf
.kptr_restrict
) {
1385 pr_warning("Kernel address maps (/proc/{kallsyms,modules}) are restricted.\n\n"
1386 "Check /proc/sys/kernel/kptr_restrict and /proc/sys/kernel/perf_event_paranoid.\n\n"
1387 "Kernel samples will not be resolved.\n");
1388 machine
->kptr_restrict_warned
= true;
1392 return machine__resolve_kernel_addr(vmachine
, addrp
, modp
);
1395 static int trace__symbols_init(struct trace
*trace
, struct evlist
*evlist
)
1397 int err
= symbol__init(NULL
);
1402 trace
->host
= machine__new_host();
1403 if (trace
->host
== NULL
)
1406 err
= trace_event__register_resolver(trace
->host
, trace__machine__resolve_kernel_addr
);
1410 err
= __machine__synthesize_threads(trace
->host
, &trace
->tool
, &trace
->opts
.target
,
1411 evlist
->core
.threads
, trace__tool_process
, false,
1420 static void trace__symbols__exit(struct trace
*trace
)
1422 machine__exit(trace
->host
);
1428 static int syscall__alloc_arg_fmts(struct syscall
*sc
, int nr_args
)
1432 if (nr_args
== 6 && sc
->fmt
&& sc
->fmt
->nr_args
!= 0)
1433 nr_args
= sc
->fmt
->nr_args
;
1435 sc
->arg_fmt
= calloc(nr_args
, sizeof(*sc
->arg_fmt
));
1436 if (sc
->arg_fmt
== NULL
)
1439 for (idx
= 0; idx
< nr_args
; ++idx
) {
1441 sc
->arg_fmt
[idx
] = sc
->fmt
->arg
[idx
];
1444 sc
->nr_args
= nr_args
;
1448 static int syscall__set_arg_fmts(struct syscall
*sc
)
1450 struct tep_format_field
*field
, *last_field
= NULL
;
1453 for (field
= sc
->args
; field
; field
= field
->next
, ++idx
) {
1456 if (sc
->fmt
&& sc
->fmt
->arg
[idx
].scnprintf
)
1459 len
= strlen(field
->name
);
1461 if (strcmp(field
->type
, "const char *") == 0 &&
1462 ((len
>= 4 && strcmp(field
->name
+ len
- 4, "name") == 0) ||
1463 strstr(field
->name
, "path") != NULL
))
1464 sc
->arg_fmt
[idx
].scnprintf
= SCA_FILENAME
;
1465 else if ((field
->flags
& TEP_FIELD_IS_POINTER
) || strstr(field
->name
, "addr"))
1466 sc
->arg_fmt
[idx
].scnprintf
= SCA_PTR
;
1467 else if (strcmp(field
->type
, "pid_t") == 0)
1468 sc
->arg_fmt
[idx
].scnprintf
= SCA_PID
;
1469 else if (strcmp(field
->type
, "umode_t") == 0)
1470 sc
->arg_fmt
[idx
].scnprintf
= SCA_MODE_T
;
1471 else if ((strcmp(field
->type
, "int") == 0 ||
1472 strcmp(field
->type
, "unsigned int") == 0 ||
1473 strcmp(field
->type
, "long") == 0) &&
1474 len
>= 2 && strcmp(field
->name
+ len
- 2, "fd") == 0) {
1476 * /sys/kernel/tracing/events/syscalls/sys_enter*
1477 * egrep 'field:.*fd;' .../format|sed -r 's/.*field:([a-z ]+) [a-z_]*fd.+/\1/g'|sort|uniq -c
1482 sc
->arg_fmt
[idx
].scnprintf
= SCA_FD
;
1487 sc
->args_size
= last_field
->offset
+ last_field
->size
;
1492 static int trace__read_syscall_info(struct trace
*trace
, int id
)
1496 const char *name
= syscalltbl__name(trace
->sctbl
, id
);
1498 if (trace
->syscalls
.table
== NULL
) {
1499 trace
->syscalls
.table
= calloc(trace
->sctbl
->syscalls
.max_id
+ 1, sizeof(*sc
));
1500 if (trace
->syscalls
.table
== NULL
)
1504 sc
= trace
->syscalls
.table
+ id
;
1505 if (sc
->nonexistent
)
1509 sc
->nonexistent
= true;
1514 sc
->fmt
= syscall_fmt__find(sc
->name
);
1516 snprintf(tp_name
, sizeof(tp_name
), "sys_enter_%s", sc
->name
);
1517 sc
->tp_format
= trace_event__tp_format("syscalls", tp_name
);
1519 if (IS_ERR(sc
->tp_format
) && sc
->fmt
&& sc
->fmt
->alias
) {
1520 snprintf(tp_name
, sizeof(tp_name
), "sys_enter_%s", sc
->fmt
->alias
);
1521 sc
->tp_format
= trace_event__tp_format("syscalls", tp_name
);
1524 if (syscall__alloc_arg_fmts(sc
, IS_ERR(sc
->tp_format
) ? 6 : sc
->tp_format
->format
.nr_fields
))
1527 if (IS_ERR(sc
->tp_format
))
1528 return PTR_ERR(sc
->tp_format
);
1530 sc
->args
= sc
->tp_format
->format
.fields
;
1532 * We need to check and discard the first variable '__syscall_nr'
1533 * or 'nr' that mean the syscall number. It is needless here.
1534 * So drop '__syscall_nr' or 'nr' field but does not exist on older kernels.
1536 if (sc
->args
&& (!strcmp(sc
->args
->name
, "__syscall_nr") || !strcmp(sc
->args
->name
, "nr"))) {
1537 sc
->args
= sc
->args
->next
;
1541 sc
->is_exit
= !strcmp(name
, "exit_group") || !strcmp(name
, "exit");
1542 sc
->is_open
= !strcmp(name
, "open") || !strcmp(name
, "openat");
1544 return syscall__set_arg_fmts(sc
);
1547 static int intcmp(const void *a
, const void *b
)
1549 const int *one
= a
, *another
= b
;
1551 return *one
- *another
;
1554 static int trace__validate_ev_qualifier(struct trace
*trace
)
1557 bool printed_invalid_prefix
= false;
1558 struct str_node
*pos
;
1559 size_t nr_used
= 0, nr_allocated
= strlist__nr_entries(trace
->ev_qualifier
);
1561 trace
->ev_qualifier_ids
.entries
= malloc(nr_allocated
*
1562 sizeof(trace
->ev_qualifier_ids
.entries
[0]));
1564 if (trace
->ev_qualifier_ids
.entries
== NULL
) {
1565 fputs("Error:\tNot enough memory for allocating events qualifier ids\n",
1571 strlist__for_each_entry(pos
, trace
->ev_qualifier
) {
1572 const char *sc
= pos
->s
;
1573 int id
= syscalltbl__id(trace
->sctbl
, sc
), match_next
= -1;
1576 id
= syscalltbl__strglobmatch_first(trace
->sctbl
, sc
, &match_next
);
1580 if (!printed_invalid_prefix
) {
1581 pr_debug("Skipping unknown syscalls: ");
1582 printed_invalid_prefix
= true;
1591 trace
->ev_qualifier_ids
.entries
[nr_used
++] = id
;
1592 if (match_next
== -1)
1596 id
= syscalltbl__strglobmatch_next(trace
->sctbl
, sc
, &match_next
);
1599 if (nr_allocated
== nr_used
) {
1603 entries
= realloc(trace
->ev_qualifier_ids
.entries
,
1604 nr_allocated
* sizeof(trace
->ev_qualifier_ids
.entries
[0]));
1605 if (entries
== NULL
) {
1607 fputs("\nError:\t Not enough memory for parsing\n", trace
->output
);
1610 trace
->ev_qualifier_ids
.entries
= entries
;
1612 trace
->ev_qualifier_ids
.entries
[nr_used
++] = id
;
1616 trace
->ev_qualifier_ids
.nr
= nr_used
;
1617 qsort(trace
->ev_qualifier_ids
.entries
, nr_used
, sizeof(int), intcmp
);
1619 if (printed_invalid_prefix
)
1623 zfree(&trace
->ev_qualifier_ids
.entries
);
1624 trace
->ev_qualifier_ids
.nr
= 0;
1628 static __maybe_unused
bool trace__syscall_enabled(struct trace
*trace
, int id
)
1630 bool in_ev_qualifier
;
1632 if (trace
->ev_qualifier_ids
.nr
== 0)
1635 in_ev_qualifier
= bsearch(&id
, trace
->ev_qualifier_ids
.entries
,
1636 trace
->ev_qualifier_ids
.nr
, sizeof(int), intcmp
) != NULL
;
1638 if (in_ev_qualifier
)
1639 return !trace
->not_ev_qualifier
;
1641 return trace
->not_ev_qualifier
;
1645 * args is to be interpreted as a series of longs but we need to handle
1646 * 8-byte unaligned accesses. args points to raw_data within the event
1647 * and raw_data is guaranteed to be 8-byte unaligned because it is
1648 * preceded by raw_size which is a u32. So we need to copy args to a temp
1649 * variable to read it. Most notably this avoids extended load instructions
1650 * on unaligned addresses
1652 unsigned long syscall_arg__val(struct syscall_arg
*arg
, u8 idx
)
1655 unsigned char *p
= arg
->args
+ sizeof(unsigned long) * idx
;
1657 memcpy(&val
, p
, sizeof(val
));
1661 static size_t syscall__scnprintf_name(struct syscall
*sc
, char *bf
, size_t size
,
1662 struct syscall_arg
*arg
)
1664 if (sc
->arg_fmt
&& sc
->arg_fmt
[arg
->idx
].name
)
1665 return scnprintf(bf
, size
, "%s: ", sc
->arg_fmt
[arg
->idx
].name
);
1667 return scnprintf(bf
, size
, "arg%d: ", arg
->idx
);
1671 * Check if the value is in fact zero, i.e. mask whatever needs masking, such
1672 * as mount 'flags' argument that needs ignoring some magic flag, see comment
1673 * in tools/perf/trace/beauty/mount_flags.c
1675 static unsigned long syscall__mask_val(struct syscall
*sc
, struct syscall_arg
*arg
, unsigned long val
)
1677 if (sc
->arg_fmt
&& sc
->arg_fmt
[arg
->idx
].mask_val
)
1678 return sc
->arg_fmt
[arg
->idx
].mask_val(arg
, val
);
1683 static size_t syscall__scnprintf_val(struct syscall
*sc
, char *bf
, size_t size
,
1684 struct syscall_arg
*arg
, unsigned long val
)
1686 if (sc
->arg_fmt
&& sc
->arg_fmt
[arg
->idx
].scnprintf
) {
1688 if (sc
->arg_fmt
[arg
->idx
].parm
)
1689 arg
->parm
= sc
->arg_fmt
[arg
->idx
].parm
;
1690 return sc
->arg_fmt
[arg
->idx
].scnprintf(bf
, size
, arg
);
1692 return scnprintf(bf
, size
, "%ld", val
);
1695 static size_t syscall__scnprintf_args(struct syscall
*sc
, char *bf
, size_t size
,
1696 unsigned char *args
, void *augmented_args
, int augmented_args_size
,
1697 struct trace
*trace
, struct thread
*thread
)
1702 struct syscall_arg arg
= {
1705 .size
= augmented_args_size
,
1706 .args
= augmented_args
,
1712 .show_string_prefix
= trace
->show_string_prefix
,
1714 struct thread_trace
*ttrace
= thread__priv(thread
);
1717 * Things like fcntl will set this in its 'cmd' formatter to pick the
1718 * right formatter for the return value (an fd? file flags?), which is
1719 * not needed for syscalls that always return a given type, say an fd.
1721 ttrace
->ret_scnprintf
= NULL
;
1723 if (sc
->args
!= NULL
) {
1724 struct tep_format_field
*field
;
1726 for (field
= sc
->args
; field
;
1727 field
= field
->next
, ++arg
.idx
, bit
<<= 1) {
1731 val
= syscall_arg__val(&arg
, arg
.idx
);
1733 * Some syscall args need some mask, most don't and
1734 * return val untouched.
1736 val
= syscall__mask_val(sc
, &arg
, val
);
1739 * Suppress this argument if its value is zero and
1740 * and we don't have a string associated in an
1744 !trace
->show_zeros
&&
1746 (sc
->arg_fmt
[arg
.idx
].show_zero
||
1747 sc
->arg_fmt
[arg
.idx
].scnprintf
== SCA_STRARRAY
||
1748 sc
->arg_fmt
[arg
.idx
].scnprintf
== SCA_STRARRAYS
) &&
1749 sc
->arg_fmt
[arg
.idx
].parm
))
1752 printed
+= scnprintf(bf
+ printed
, size
- printed
, "%s", printed
? ", " : "");
1754 if (trace
->show_arg_names
)
1755 printed
+= scnprintf(bf
+ printed
, size
- printed
, "%s: ", field
->name
);
1757 printed
+= syscall__scnprintf_val(sc
, bf
+ printed
, size
- printed
, &arg
, val
);
1759 } else if (IS_ERR(sc
->tp_format
)) {
1761 * If we managed to read the tracepoint /format file, then we
1762 * may end up not having any args, like with gettid(), so only
1763 * print the raw args when we didn't manage to read it.
1765 while (arg
.idx
< sc
->nr_args
) {
1768 val
= syscall_arg__val(&arg
, arg
.idx
);
1770 printed
+= scnprintf(bf
+ printed
, size
- printed
, ", ");
1771 printed
+= syscall__scnprintf_name(sc
, bf
+ printed
, size
- printed
, &arg
);
1772 printed
+= syscall__scnprintf_val(sc
, bf
+ printed
, size
- printed
, &arg
, val
);
1782 typedef int (*tracepoint_handler
)(struct trace
*trace
, struct evsel
*evsel
,
1783 union perf_event
*event
,
1784 struct perf_sample
*sample
);
1786 static struct syscall
*trace__syscall_info(struct trace
*trace
,
1787 struct evsel
*evsel
, int id
)
1794 * XXX: Noticed on x86_64, reproduced as far back as 3.0.36, haven't tried
1795 * before that, leaving at a higher verbosity level till that is
1796 * explained. Reproduced with plain ftrace with:
1798 * echo 1 > /t/events/raw_syscalls/sys_exit/enable
1799 * grep "NR -1 " /t/trace_pipe
1801 * After generating some load on the machine.
1805 fprintf(trace
->output
, "Invalid syscall %d id, skipping (%s, %" PRIu64
") ...\n",
1806 id
, perf_evsel__name(evsel
), ++n
);
1813 if (id
> trace
->sctbl
->syscalls
.max_id
)
1816 if ((trace
->syscalls
.table
== NULL
|| trace
->syscalls
.table
[id
].name
== NULL
) &&
1817 (err
= trace__read_syscall_info(trace
, id
)) != 0)
1820 if (trace
->syscalls
.table
[id
].name
== NULL
) {
1821 if (trace
->syscalls
.table
[id
].nonexistent
)
1826 return &trace
->syscalls
.table
[id
];
1830 char sbuf
[STRERR_BUFSIZE
];
1831 fprintf(trace
->output
, "Problems reading syscall %d: %d (%s)", id
, -err
, str_error_r(-err
, sbuf
, sizeof(sbuf
)));
1832 if (id
<= trace
->sctbl
->syscalls
.max_id
&& trace
->syscalls
.table
[id
].name
!= NULL
)
1833 fprintf(trace
->output
, "(%s)", trace
->syscalls
.table
[id
].name
);
1834 fputs(" information\n", trace
->output
);
1839 static void thread__update_stats(struct thread_trace
*ttrace
,
1840 int id
, struct perf_sample
*sample
)
1842 struct int_node
*inode
;
1843 struct stats
*stats
;
1846 inode
= intlist__findnew(ttrace
->syscall_stats
, id
);
1850 stats
= inode
->priv
;
1851 if (stats
== NULL
) {
1852 stats
= malloc(sizeof(struct stats
));
1856 inode
->priv
= stats
;
1859 if (ttrace
->entry_time
&& sample
->time
> ttrace
->entry_time
)
1860 duration
= sample
->time
- ttrace
->entry_time
;
1862 update_stats(stats
, duration
);
1865 static int trace__printf_interrupted_entry(struct trace
*trace
)
1867 struct thread_trace
*ttrace
;
1871 if (trace
->failure_only
|| trace
->current
== NULL
)
1874 ttrace
= thread__priv(trace
->current
);
1876 if (!ttrace
->entry_pending
)
1879 printed
= trace__fprintf_entry_head(trace
, trace
->current
, 0, false, ttrace
->entry_time
, trace
->output
);
1880 printed
+= len
= fprintf(trace
->output
, "%s)", ttrace
->entry_str
);
1882 if (len
< trace
->args_alignment
- 4)
1883 printed
+= fprintf(trace
->output
, "%-*s", trace
->args_alignment
- 4 - len
, " ");
1885 printed
+= fprintf(trace
->output
, " ...\n");
1887 ttrace
->entry_pending
= false;
1888 ++trace
->nr_events_printed
;
1893 static int trace__fprintf_sample(struct trace
*trace
, struct evsel
*evsel
,
1894 struct perf_sample
*sample
, struct thread
*thread
)
1898 if (trace
->print_sample
) {
1899 double ts
= (double)sample
->time
/ NSEC_PER_MSEC
;
1901 printed
+= fprintf(trace
->output
, "%22s %10.3f %s %d/%d [%d]\n",
1902 perf_evsel__name(evsel
), ts
,
1903 thread__comm_str(thread
),
1904 sample
->pid
, sample
->tid
, sample
->cpu
);
1910 static void *syscall__augmented_args(struct syscall
*sc
, struct perf_sample
*sample
, int *augmented_args_size
, int raw_augmented_args_size
)
1912 void *augmented_args
= NULL
;
1914 * For now with BPF raw_augmented we hook into raw_syscalls:sys_enter
1915 * and there we get all 6 syscall args plus the tracepoint common fields
1916 * that gets calculated at the start and the syscall_nr (another long).
1917 * So we check if that is the case and if so don't look after the
1918 * sc->args_size but always after the full raw_syscalls:sys_enter payload,
1921 * We'll revisit this later to pass s->args_size to the BPF augmenter
1922 * (now tools/perf/examples/bpf/augmented_raw_syscalls.c, so that it
1923 * copies only what we need for each syscall, like what happens when we
1924 * use syscalls:sys_enter_NAME, so that we reduce the kernel/userspace
1925 * traffic to just what is needed for each syscall.
1927 int args_size
= raw_augmented_args_size
?: sc
->args_size
;
1929 *augmented_args_size
= sample
->raw_size
- args_size
;
1930 if (*augmented_args_size
> 0)
1931 augmented_args
= sample
->raw_data
+ args_size
;
1933 return augmented_args
;
1936 static int trace__sys_enter(struct trace
*trace
, struct evsel
*evsel
,
1937 union perf_event
*event __maybe_unused
,
1938 struct perf_sample
*sample
)
1943 struct thread
*thread
;
1944 int id
= perf_evsel__sc_tp_uint(evsel
, id
, sample
), err
= -1;
1945 int augmented_args_size
= 0;
1946 void *augmented_args
= NULL
;
1947 struct syscall
*sc
= trace__syscall_info(trace
, evsel
, id
);
1948 struct thread_trace
*ttrace
;
1953 thread
= machine__findnew_thread(trace
->host
, sample
->pid
, sample
->tid
);
1954 ttrace
= thread__trace(thread
, trace
->output
);
1958 trace__fprintf_sample(trace
, evsel
, sample
, thread
);
1960 args
= perf_evsel__sc_tp_ptr(evsel
, args
, sample
);
1962 if (ttrace
->entry_str
== NULL
) {
1963 ttrace
->entry_str
= malloc(trace__entry_str_size
);
1964 if (!ttrace
->entry_str
)
1968 if (!(trace
->duration_filter
|| trace
->summary_only
|| trace
->min_stack
))
1969 trace__printf_interrupted_entry(trace
);
1971 * If this is raw_syscalls.sys_enter, then it always comes with the 6 possible
1972 * arguments, even if the syscall being handled, say "openat", uses only 4 arguments
1973 * this breaks syscall__augmented_args() check for augmented args, as we calculate
1974 * syscall->args_size using each syscalls:sys_enter_NAME tracefs format file,
1975 * so when handling, say the openat syscall, we end up getting 6 args for the
1976 * raw_syscalls:sys_enter event, when we expected just 4, we end up mistakenly
1977 * thinking that the extra 2 u64 args are the augmented filename, so just check
1978 * here and avoid using augmented syscalls when the evsel is the raw_syscalls one.
1980 if (evsel
!= trace
->syscalls
.events
.sys_enter
)
1981 augmented_args
= syscall__augmented_args(sc
, sample
, &augmented_args_size
, trace
->raw_augmented_syscalls_args_size
);
1982 ttrace
->entry_time
= sample
->time
;
1983 msg
= ttrace
->entry_str
;
1984 printed
+= scnprintf(msg
+ printed
, trace__entry_str_size
- printed
, "%s(", sc
->name
);
1986 printed
+= syscall__scnprintf_args(sc
, msg
+ printed
, trace__entry_str_size
- printed
,
1987 args
, augmented_args
, augmented_args_size
, trace
, thread
);
1990 if (!(trace
->duration_filter
|| trace
->summary_only
|| trace
->failure_only
|| trace
->min_stack
)) {
1993 trace__fprintf_entry_head(trace
, thread
, 0, false, ttrace
->entry_time
, trace
->output
);
1994 printed
= fprintf(trace
->output
, "%s)", ttrace
->entry_str
);
1995 if (trace
->args_alignment
> printed
)
1996 alignment
= trace
->args_alignment
- printed
;
1997 fprintf(trace
->output
, "%*s= ?\n", alignment
, " ");
2000 ttrace
->entry_pending
= true;
2001 /* See trace__vfs_getname & trace__sys_exit */
2002 ttrace
->filename
.pending_open
= false;
2005 if (trace
->current
!= thread
) {
2006 thread__put(trace
->current
);
2007 trace
->current
= thread__get(thread
);
2011 thread__put(thread
);
2015 static int trace__fprintf_sys_enter(struct trace
*trace
, struct evsel
*evsel
,
2016 struct perf_sample
*sample
)
2018 struct thread_trace
*ttrace
;
2019 struct thread
*thread
;
2020 int id
= perf_evsel__sc_tp_uint(evsel
, id
, sample
), err
= -1;
2021 struct syscall
*sc
= trace__syscall_info(trace
, evsel
, id
);
2023 void *args
, *augmented_args
= NULL
;
2024 int augmented_args_size
;
2029 thread
= machine__findnew_thread(trace
->host
, sample
->pid
, sample
->tid
);
2030 ttrace
= thread__trace(thread
, trace
->output
);
2032 * We need to get ttrace just to make sure it is there when syscall__scnprintf_args()
2033 * and the rest of the beautifiers accessing it via struct syscall_arg touches it.
2038 args
= perf_evsel__sc_tp_ptr(evsel
, args
, sample
);
2039 augmented_args
= syscall__augmented_args(sc
, sample
, &augmented_args_size
, trace
->raw_augmented_syscalls_args_size
);
2040 syscall__scnprintf_args(sc
, msg
, sizeof(msg
), args
, augmented_args
, augmented_args_size
, trace
, thread
);
2041 fprintf(trace
->output
, "%s", msg
);
2044 thread__put(thread
);
2048 static int trace__resolve_callchain(struct trace
*trace
, struct evsel
*evsel
,
2049 struct perf_sample
*sample
,
2050 struct callchain_cursor
*cursor
)
2052 struct addr_location al
;
2053 int max_stack
= evsel
->core
.attr
.sample_max_stack
?
2054 evsel
->core
.attr
.sample_max_stack
:
2058 if (machine__resolve(trace
->host
, &al
, sample
) < 0)
2061 err
= thread__resolve_callchain(al
.thread
, cursor
, evsel
, sample
, NULL
, NULL
, max_stack
);
2062 addr_location__put(&al
);
2066 static int trace__fprintf_callchain(struct trace
*trace
, struct perf_sample
*sample
)
2068 /* TODO: user-configurable print_opts */
2069 const unsigned int print_opts
= EVSEL__PRINT_SYM
|
2071 EVSEL__PRINT_UNKNOWN_AS_ADDR
;
2073 return sample__fprintf_callchain(sample
, 38, print_opts
, &callchain_cursor
, trace
->output
);
2076 static const char *errno_to_name(struct evsel
*evsel
, int err
)
2078 struct perf_env
*env
= perf_evsel__env(evsel
);
2079 const char *arch_name
= perf_env__arch(env
);
2081 return arch_syscalls__strerrno(arch_name
, err
);
2084 static int trace__sys_exit(struct trace
*trace
, struct evsel
*evsel
,
2085 union perf_event
*event __maybe_unused
,
2086 struct perf_sample
*sample
)
2090 bool duration_calculated
= false;
2091 struct thread
*thread
;
2092 int id
= perf_evsel__sc_tp_uint(evsel
, id
, sample
), err
= -1, callchain_ret
= 0, printed
= 0;
2093 int alignment
= trace
->args_alignment
;
2094 struct syscall
*sc
= trace__syscall_info(trace
, evsel
, id
);
2095 struct thread_trace
*ttrace
;
2100 thread
= machine__findnew_thread(trace
->host
, sample
->pid
, sample
->tid
);
2101 ttrace
= thread__trace(thread
, trace
->output
);
2105 trace__fprintf_sample(trace
, evsel
, sample
, thread
);
2108 thread__update_stats(ttrace
, id
, sample
);
2110 ret
= perf_evsel__sc_tp_uint(evsel
, ret
, sample
);
2112 if (!trace
->fd_path_disabled
&& sc
->is_open
&& ret
>= 0 && ttrace
->filename
.pending_open
) {
2113 trace__set_fd_pathname(thread
, ret
, ttrace
->filename
.name
);
2114 ttrace
->filename
.pending_open
= false;
2115 ++trace
->stats
.vfs_getname
;
2118 if (ttrace
->entry_time
) {
2119 duration
= sample
->time
- ttrace
->entry_time
;
2120 if (trace__filter_duration(trace
, duration
))
2122 duration_calculated
= true;
2123 } else if (trace
->duration_filter
)
2126 if (sample
->callchain
) {
2127 callchain_ret
= trace__resolve_callchain(trace
, evsel
, sample
, &callchain_cursor
);
2128 if (callchain_ret
== 0) {
2129 if (callchain_cursor
.nr
< trace
->min_stack
)
2135 if (trace
->summary_only
|| (ret
>= 0 && trace
->failure_only
))
2138 trace__fprintf_entry_head(trace
, thread
, duration
, duration_calculated
, ttrace
->entry_time
, trace
->output
);
2140 if (ttrace
->entry_pending
) {
2141 printed
= fprintf(trace
->output
, "%s", ttrace
->entry_str
);
2143 printed
+= fprintf(trace
->output
, " ... [");
2144 color_fprintf(trace
->output
, PERF_COLOR_YELLOW
, "continued");
2146 printed
+= fprintf(trace
->output
, "]: %s()", sc
->name
);
2149 printed
++; /* the closing ')' */
2151 if (alignment
> printed
)
2152 alignment
-= printed
;
2156 fprintf(trace
->output
, ")%*s= ", alignment
, " ");
2158 if (sc
->fmt
== NULL
) {
2162 fprintf(trace
->output
, "%ld", ret
);
2163 } else if (ret
< 0) {
2165 char bf
[STRERR_BUFSIZE
];
2166 const char *emsg
= str_error_r(-ret
, bf
, sizeof(bf
)),
2167 *e
= errno_to_name(evsel
, -ret
);
2169 fprintf(trace
->output
, "-1 %s (%s)", e
, emsg
);
2171 } else if (ret
== 0 && sc
->fmt
->timeout
)
2172 fprintf(trace
->output
, "0 (Timeout)");
2173 else if (ttrace
->ret_scnprintf
) {
2175 struct syscall_arg arg
= {
2180 ttrace
->ret_scnprintf(bf
, sizeof(bf
), &arg
);
2181 ttrace
->ret_scnprintf
= NULL
;
2182 fprintf(trace
->output
, "%s", bf
);
2183 } else if (sc
->fmt
->hexret
)
2184 fprintf(trace
->output
, "%#lx", ret
);
2185 else if (sc
->fmt
->errpid
) {
2186 struct thread
*child
= machine__find_thread(trace
->host
, ret
, ret
);
2188 if (child
!= NULL
) {
2189 fprintf(trace
->output
, "%ld", ret
);
2190 if (child
->comm_set
)
2191 fprintf(trace
->output
, " (%s)", thread__comm_str(child
));
2197 fputc('\n', trace
->output
);
2200 * We only consider an 'event' for the sake of --max-events a non-filtered
2201 * sys_enter + sys_exit and other tracepoint events.
2203 if (++trace
->nr_events_printed
== trace
->max_events
&& trace
->max_events
!= ULONG_MAX
)
2206 if (callchain_ret
> 0)
2207 trace__fprintf_callchain(trace
, sample
);
2208 else if (callchain_ret
< 0)
2209 pr_err("Problem processing %s callchain, skipping...\n", perf_evsel__name(evsel
));
2211 ttrace
->entry_pending
= false;
2214 thread__put(thread
);
2218 static int trace__vfs_getname(struct trace
*trace
, struct evsel
*evsel
,
2219 union perf_event
*event __maybe_unused
,
2220 struct perf_sample
*sample
)
2222 struct thread
*thread
= machine__findnew_thread(trace
->host
, sample
->pid
, sample
->tid
);
2223 struct thread_trace
*ttrace
;
2224 size_t filename_len
, entry_str_len
, to_move
;
2225 ssize_t remaining_space
;
2227 const char *filename
= perf_evsel__rawptr(evsel
, sample
, "pathname");
2232 ttrace
= thread__priv(thread
);
2236 filename_len
= strlen(filename
);
2237 if (filename_len
== 0)
2240 if (ttrace
->filename
.namelen
< filename_len
) {
2241 char *f
= realloc(ttrace
->filename
.name
, filename_len
+ 1);
2246 ttrace
->filename
.namelen
= filename_len
;
2247 ttrace
->filename
.name
= f
;
2250 strcpy(ttrace
->filename
.name
, filename
);
2251 ttrace
->filename
.pending_open
= true;
2253 if (!ttrace
->filename
.ptr
)
2256 entry_str_len
= strlen(ttrace
->entry_str
);
2257 remaining_space
= trace__entry_str_size
- entry_str_len
- 1; /* \0 */
2258 if (remaining_space
<= 0)
2261 if (filename_len
> (size_t)remaining_space
) {
2262 filename
+= filename_len
- remaining_space
;
2263 filename_len
= remaining_space
;
2266 to_move
= entry_str_len
- ttrace
->filename
.entry_str_pos
+ 1; /* \0 */
2267 pos
= ttrace
->entry_str
+ ttrace
->filename
.entry_str_pos
;
2268 memmove(pos
+ filename_len
, pos
, to_move
);
2269 memcpy(pos
, filename
, filename_len
);
2271 ttrace
->filename
.ptr
= 0;
2272 ttrace
->filename
.entry_str_pos
= 0;
2274 thread__put(thread
);
2279 static int trace__sched_stat_runtime(struct trace
*trace
, struct evsel
*evsel
,
2280 union perf_event
*event __maybe_unused
,
2281 struct perf_sample
*sample
)
2283 u64 runtime
= perf_evsel__intval(evsel
, sample
, "runtime");
2284 double runtime_ms
= (double)runtime
/ NSEC_PER_MSEC
;
2285 struct thread
*thread
= machine__findnew_thread(trace
->host
,
2288 struct thread_trace
*ttrace
= thread__trace(thread
, trace
->output
);
2293 ttrace
->runtime_ms
+= runtime_ms
;
2294 trace
->runtime_ms
+= runtime_ms
;
2296 thread__put(thread
);
2300 fprintf(trace
->output
, "%s: comm=%s,pid=%u,runtime=%" PRIu64
",vruntime=%" PRIu64
")\n",
2302 perf_evsel__strval(evsel
, sample
, "comm"),
2303 (pid_t
)perf_evsel__intval(evsel
, sample
, "pid"),
2305 perf_evsel__intval(evsel
, sample
, "vruntime"));
2309 static int bpf_output__printer(enum binary_printer_ops op
,
2310 unsigned int val
, void *extra __maybe_unused
, FILE *fp
)
2312 unsigned char ch
= (unsigned char)val
;
2315 case BINARY_PRINT_CHAR_DATA
:
2316 return fprintf(fp
, "%c", isprint(ch
) ? ch
: '.');
2317 case BINARY_PRINT_DATA_BEGIN
:
2318 case BINARY_PRINT_LINE_BEGIN
:
2319 case BINARY_PRINT_ADDR
:
2320 case BINARY_PRINT_NUM_DATA
:
2321 case BINARY_PRINT_NUM_PAD
:
2322 case BINARY_PRINT_SEP
:
2323 case BINARY_PRINT_CHAR_PAD
:
2324 case BINARY_PRINT_LINE_END
:
2325 case BINARY_PRINT_DATA_END
:
2333 static void bpf_output__fprintf(struct trace
*trace
,
2334 struct perf_sample
*sample
)
2336 binary__fprintf(sample
->raw_data
, sample
->raw_size
, 8,
2337 bpf_output__printer
, NULL
, trace
->output
);
2338 ++trace
->nr_events_printed
;
2341 static int trace__event_handler(struct trace
*trace
, struct evsel
*evsel
,
2342 union perf_event
*event __maybe_unused
,
2343 struct perf_sample
*sample
)
2345 struct thread
*thread
;
2346 int callchain_ret
= 0;
2348 * Check if we called perf_evsel__disable(evsel) due to, for instance,
2349 * this event's max_events having been hit and this is an entry coming
2350 * from the ring buffer that we should discard, since the max events
2351 * have already been considered/printed.
2353 if (evsel
->disabled
)
2356 thread
= machine__findnew_thread(trace
->host
, sample
->pid
, sample
->tid
);
2358 if (sample
->callchain
) {
2359 callchain_ret
= trace__resolve_callchain(trace
, evsel
, sample
, &callchain_cursor
);
2360 if (callchain_ret
== 0) {
2361 if (callchain_cursor
.nr
< trace
->min_stack
)
2367 trace__printf_interrupted_entry(trace
);
2368 trace__fprintf_tstamp(trace
, sample
->time
, trace
->output
);
2370 if (trace
->trace_syscalls
&& trace
->show_duration
)
2371 fprintf(trace
->output
, "( ): ");
2374 trace__fprintf_comm_tid(trace
, thread
, trace
->output
);
2376 if (evsel
== trace
->syscalls
.events
.augmented
) {
2377 int id
= perf_evsel__sc_tp_uint(evsel
, id
, sample
);
2378 struct syscall
*sc
= trace__syscall_info(trace
, evsel
, id
);
2381 fprintf(trace
->output
, "%s(", sc
->name
);
2382 trace__fprintf_sys_enter(trace
, evsel
, sample
);
2383 fputc(')', trace
->output
);
2388 * XXX: Not having the associated syscall info or not finding/adding
2389 * the thread should never happen, but if it does...
2390 * fall thru and print it as a bpf_output event.
2394 fprintf(trace
->output
, "%s:", evsel
->name
);
2396 if (perf_evsel__is_bpf_output(evsel
)) {
2397 bpf_output__fprintf(trace
, sample
);
2398 } else if (evsel
->tp_format
) {
2399 if (strncmp(evsel
->tp_format
->name
, "sys_enter_", 10) ||
2400 trace__fprintf_sys_enter(trace
, evsel
, sample
)) {
2401 event_format__fprintf(evsel
->tp_format
, sample
->cpu
,
2402 sample
->raw_data
, sample
->raw_size
,
2404 ++trace
->nr_events_printed
;
2406 if (evsel
->max_events
!= ULONG_MAX
&& ++evsel
->nr_events_printed
== evsel
->max_events
) {
2407 evsel__disable(evsel
);
2408 evsel__close(evsel
);
2414 fprintf(trace
->output
, "\n");
2416 if (callchain_ret
> 0)
2417 trace__fprintf_callchain(trace
, sample
);
2418 else if (callchain_ret
< 0)
2419 pr_err("Problem processing %s callchain, skipping...\n", perf_evsel__name(evsel
));
2421 thread__put(thread
);
2425 static void print_location(FILE *f
, struct perf_sample
*sample
,
2426 struct addr_location
*al
,
2427 bool print_dso
, bool print_sym
)
2430 if ((verbose
> 0 || print_dso
) && al
->map
)
2431 fprintf(f
, "%s@", al
->map
->dso
->long_name
);
2433 if ((verbose
> 0 || print_sym
) && al
->sym
)
2434 fprintf(f
, "%s+0x%" PRIx64
, al
->sym
->name
,
2435 al
->addr
- al
->sym
->start
);
2437 fprintf(f
, "0x%" PRIx64
, al
->addr
);
2439 fprintf(f
, "0x%" PRIx64
, sample
->addr
);
2442 static int trace__pgfault(struct trace
*trace
,
2443 struct evsel
*evsel
,
2444 union perf_event
*event __maybe_unused
,
2445 struct perf_sample
*sample
)
2447 struct thread
*thread
;
2448 struct addr_location al
;
2449 char map_type
= 'd';
2450 struct thread_trace
*ttrace
;
2452 int callchain_ret
= 0;
2454 thread
= machine__findnew_thread(trace
->host
, sample
->pid
, sample
->tid
);
2456 if (sample
->callchain
) {
2457 callchain_ret
= trace__resolve_callchain(trace
, evsel
, sample
, &callchain_cursor
);
2458 if (callchain_ret
== 0) {
2459 if (callchain_cursor
.nr
< trace
->min_stack
)
2465 ttrace
= thread__trace(thread
, trace
->output
);
2469 if (evsel
->core
.attr
.config
== PERF_COUNT_SW_PAGE_FAULTS_MAJ
)
2474 if (trace
->summary_only
)
2477 thread__find_symbol(thread
, sample
->cpumode
, sample
->ip
, &al
);
2479 trace__fprintf_entry_head(trace
, thread
, 0, true, sample
->time
, trace
->output
);
2481 fprintf(trace
->output
, "%sfault [",
2482 evsel
->core
.attr
.config
== PERF_COUNT_SW_PAGE_FAULTS_MAJ
?
2485 print_location(trace
->output
, sample
, &al
, false, true);
2487 fprintf(trace
->output
, "] => ");
2489 thread__find_symbol(thread
, sample
->cpumode
, sample
->addr
, &al
);
2492 thread__find_symbol(thread
, sample
->cpumode
, sample
->addr
, &al
);
2500 print_location(trace
->output
, sample
, &al
, true, false);
2502 fprintf(trace
->output
, " (%c%c)\n", map_type
, al
.level
);
2504 if (callchain_ret
> 0)
2505 trace__fprintf_callchain(trace
, sample
);
2506 else if (callchain_ret
< 0)
2507 pr_err("Problem processing %s callchain, skipping...\n", perf_evsel__name(evsel
));
2509 ++trace
->nr_events_printed
;
2513 thread__put(thread
);
2517 static void trace__set_base_time(struct trace
*trace
,
2518 struct evsel
*evsel
,
2519 struct perf_sample
*sample
)
2522 * BPF events were not setting PERF_SAMPLE_TIME, so be more robust
2523 * and don't use sample->time unconditionally, we may end up having
2524 * some other event in the future without PERF_SAMPLE_TIME for good
2525 * reason, i.e. we may not be interested in its timestamps, just in
2526 * it taking place, picking some piece of information when it
2527 * appears in our event stream (vfs_getname comes to mind).
2529 if (trace
->base_time
== 0 && !trace
->full_time
&&
2530 (evsel
->core
.attr
.sample_type
& PERF_SAMPLE_TIME
))
2531 trace
->base_time
= sample
->time
;
2534 static int trace__process_sample(struct perf_tool
*tool
,
2535 union perf_event
*event
,
2536 struct perf_sample
*sample
,
2537 struct evsel
*evsel
,
2538 struct machine
*machine __maybe_unused
)
2540 struct trace
*trace
= container_of(tool
, struct trace
, tool
);
2541 struct thread
*thread
;
2544 tracepoint_handler handler
= evsel
->handler
;
2546 thread
= machine__findnew_thread(trace
->host
, sample
->pid
, sample
->tid
);
2547 if (thread
&& thread__is_filtered(thread
))
2550 trace__set_base_time(trace
, evsel
, sample
);
2554 handler(trace
, evsel
, event
, sample
);
2557 thread__put(thread
);
2561 static int trace__record(struct trace
*trace
, int argc
, const char **argv
)
2563 unsigned int rec_argc
, i
, j
;
2564 const char **rec_argv
;
2565 const char * const record_args
[] = {
2572 const char * const sc_args
[] = { "-e", };
2573 unsigned int sc_args_nr
= ARRAY_SIZE(sc_args
);
2574 const char * const majpf_args
[] = { "-e", "major-faults" };
2575 unsigned int majpf_args_nr
= ARRAY_SIZE(majpf_args
);
2576 const char * const minpf_args
[] = { "-e", "minor-faults" };
2577 unsigned int minpf_args_nr
= ARRAY_SIZE(minpf_args
);
2579 /* +1 is for the event string below */
2580 rec_argc
= ARRAY_SIZE(record_args
) + sc_args_nr
+ 1 +
2581 majpf_args_nr
+ minpf_args_nr
+ argc
;
2582 rec_argv
= calloc(rec_argc
+ 1, sizeof(char *));
2584 if (rec_argv
== NULL
)
2588 for (i
= 0; i
< ARRAY_SIZE(record_args
); i
++)
2589 rec_argv
[j
++] = record_args
[i
];
2591 if (trace
->trace_syscalls
) {
2592 for (i
= 0; i
< sc_args_nr
; i
++)
2593 rec_argv
[j
++] = sc_args
[i
];
2595 /* event string may be different for older kernels - e.g., RHEL6 */
2596 if (is_valid_tracepoint("raw_syscalls:sys_enter"))
2597 rec_argv
[j
++] = "raw_syscalls:sys_enter,raw_syscalls:sys_exit";
2598 else if (is_valid_tracepoint("syscalls:sys_enter"))
2599 rec_argv
[j
++] = "syscalls:sys_enter,syscalls:sys_exit";
2601 pr_err("Neither raw_syscalls nor syscalls events exist.\n");
2607 if (trace
->trace_pgfaults
& TRACE_PFMAJ
)
2608 for (i
= 0; i
< majpf_args_nr
; i
++)
2609 rec_argv
[j
++] = majpf_args
[i
];
2611 if (trace
->trace_pgfaults
& TRACE_PFMIN
)
2612 for (i
= 0; i
< minpf_args_nr
; i
++)
2613 rec_argv
[j
++] = minpf_args
[i
];
2615 for (i
= 0; i
< (unsigned int)argc
; i
++)
2616 rec_argv
[j
++] = argv
[i
];
2618 return cmd_record(j
, rec_argv
);
2621 static size_t trace__fprintf_thread_summary(struct trace
*trace
, FILE *fp
);
2623 static bool evlist__add_vfs_getname(struct evlist
*evlist
)
2626 struct evsel
*evsel
, *tmp
;
2627 struct parse_events_error err
= { .idx
= 0, };
2628 int ret
= parse_events(evlist
, "probe:vfs_getname*", &err
);
2633 evlist__for_each_entry_safe(evlist
, evsel
, tmp
) {
2634 if (!strstarts(perf_evsel__name(evsel
), "probe:vfs_getname"))
2637 if (perf_evsel__field(evsel
, "pathname")) {
2638 evsel
->handler
= trace__vfs_getname
;
2643 list_del_init(&evsel
->core
.node
);
2644 evsel
->evlist
= NULL
;
2645 evsel__delete(evsel
);
2651 static struct evsel
*perf_evsel__new_pgfault(u64 config
)
2653 struct evsel
*evsel
;
2654 struct perf_event_attr attr
= {
2655 .type
= PERF_TYPE_SOFTWARE
,
2659 attr
.config
= config
;
2660 attr
.sample_period
= 1;
2662 event_attr_init(&attr
);
2664 evsel
= evsel__new(&attr
);
2666 evsel
->handler
= trace__pgfault
;
2671 static void trace__handle_event(struct trace
*trace
, union perf_event
*event
, struct perf_sample
*sample
)
2673 const u32 type
= event
->header
.type
;
2674 struct evsel
*evsel
;
2676 if (type
!= PERF_RECORD_SAMPLE
) {
2677 trace__process_event(trace
, trace
->host
, event
, sample
);
2681 evsel
= perf_evlist__id2evsel(trace
->evlist
, sample
->id
);
2682 if (evsel
== NULL
) {
2683 fprintf(trace
->output
, "Unknown tp ID %" PRIu64
", skipping...\n", sample
->id
);
2687 if (evswitch__discard(&trace
->evswitch
, evsel
))
2690 trace__set_base_time(trace
, evsel
, sample
);
2692 if (evsel
->core
.attr
.type
== PERF_TYPE_TRACEPOINT
&&
2693 sample
->raw_data
== NULL
) {
2694 fprintf(trace
->output
, "%s sample with no payload for tid: %d, cpu %d, raw_size=%d, skipping...\n",
2695 perf_evsel__name(evsel
), sample
->tid
,
2696 sample
->cpu
, sample
->raw_size
);
2698 tracepoint_handler handler
= evsel
->handler
;
2699 handler(trace
, evsel
, event
, sample
);
2702 if (trace
->nr_events_printed
>= trace
->max_events
&& trace
->max_events
!= ULONG_MAX
)
2706 static int trace__add_syscall_newtp(struct trace
*trace
)
2709 struct evlist
*evlist
= trace
->evlist
;
2710 struct evsel
*sys_enter
, *sys_exit
;
2712 sys_enter
= perf_evsel__raw_syscall_newtp("sys_enter", trace__sys_enter
);
2713 if (sys_enter
== NULL
)
2716 if (perf_evsel__init_sc_tp_ptr_field(sys_enter
, args
))
2717 goto out_delete_sys_enter
;
2719 sys_exit
= perf_evsel__raw_syscall_newtp("sys_exit", trace__sys_exit
);
2720 if (sys_exit
== NULL
)
2721 goto out_delete_sys_enter
;
2723 if (perf_evsel__init_sc_tp_uint_field(sys_exit
, ret
))
2724 goto out_delete_sys_exit
;
2726 perf_evsel__config_callchain(sys_enter
, &trace
->opts
, &callchain_param
);
2727 perf_evsel__config_callchain(sys_exit
, &trace
->opts
, &callchain_param
);
2729 evlist__add(evlist
, sys_enter
);
2730 evlist__add(evlist
, sys_exit
);
2732 if (callchain_param
.enabled
&& !trace
->kernel_syscallchains
) {
2734 * We're interested only in the user space callchain
2735 * leading to the syscall, allow overriding that for
2736 * debugging reasons using --kernel_syscall_callchains
2738 sys_exit
->core
.attr
.exclude_callchain_kernel
= 1;
2741 trace
->syscalls
.events
.sys_enter
= sys_enter
;
2742 trace
->syscalls
.events
.sys_exit
= sys_exit
;
2748 out_delete_sys_exit
:
2749 evsel__delete_priv(sys_exit
);
2750 out_delete_sys_enter
:
2751 evsel__delete_priv(sys_enter
);
2755 static int trace__set_ev_qualifier_tp_filter(struct trace
*trace
)
2758 struct evsel
*sys_exit
;
2759 char *filter
= asprintf_expr_inout_ints("id", !trace
->not_ev_qualifier
,
2760 trace
->ev_qualifier_ids
.nr
,
2761 trace
->ev_qualifier_ids
.entries
);
2766 if (!perf_evsel__append_tp_filter(trace
->syscalls
.events
.sys_enter
,
2768 sys_exit
= trace
->syscalls
.events
.sys_exit
;
2769 err
= perf_evsel__append_tp_filter(sys_exit
, filter
);
2780 #ifdef HAVE_LIBBPF_SUPPORT
2781 static struct bpf_program
*trace__find_bpf_program_by_title(struct trace
*trace
, const char *name
)
2783 if (trace
->bpf_obj
== NULL
)
2786 return bpf_object__find_program_by_title(trace
->bpf_obj
, name
);
2789 static struct bpf_program
*trace__find_syscall_bpf_prog(struct trace
*trace
, struct syscall
*sc
,
2790 const char *prog_name
, const char *type
)
2792 struct bpf_program
*prog
;
2794 if (prog_name
== NULL
) {
2795 char default_prog_name
[256];
2796 scnprintf(default_prog_name
, sizeof(default_prog_name
), "!syscalls:sys_%s_%s", type
, sc
->name
);
2797 prog
= trace__find_bpf_program_by_title(trace
, default_prog_name
);
2800 if (sc
->fmt
&& sc
->fmt
->alias
) {
2801 scnprintf(default_prog_name
, sizeof(default_prog_name
), "!syscalls:sys_%s_%s", type
, sc
->fmt
->alias
);
2802 prog
= trace__find_bpf_program_by_title(trace
, default_prog_name
);
2806 goto out_unaugmented
;
2809 prog
= trace__find_bpf_program_by_title(trace
, prog_name
);
2816 pr_debug("Couldn't find BPF prog \"%s\" to associate with syscalls:sys_%s_%s, not augmenting it\n",
2817 prog_name
, type
, sc
->name
);
2819 return trace
->syscalls
.unaugmented_prog
;
2822 static void trace__init_syscall_bpf_progs(struct trace
*trace
, int id
)
2824 struct syscall
*sc
= trace__syscall_info(trace
, NULL
, id
);
2829 sc
->bpf_prog
.sys_enter
= trace__find_syscall_bpf_prog(trace
, sc
, sc
->fmt
? sc
->fmt
->bpf_prog_name
.sys_enter
: NULL
, "enter");
2830 sc
->bpf_prog
.sys_exit
= trace__find_syscall_bpf_prog(trace
, sc
, sc
->fmt
? sc
->fmt
->bpf_prog_name
.sys_exit
: NULL
, "exit");
2833 static int trace__bpf_prog_sys_enter_fd(struct trace
*trace
, int id
)
2835 struct syscall
*sc
= trace__syscall_info(trace
, NULL
, id
);
2836 return sc
? bpf_program__fd(sc
->bpf_prog
.sys_enter
) : bpf_program__fd(trace
->syscalls
.unaugmented_prog
);
2839 static int trace__bpf_prog_sys_exit_fd(struct trace
*trace
, int id
)
2841 struct syscall
*sc
= trace__syscall_info(trace
, NULL
, id
);
2842 return sc
? bpf_program__fd(sc
->bpf_prog
.sys_exit
) : bpf_program__fd(trace
->syscalls
.unaugmented_prog
);
2845 static void trace__init_bpf_map_syscall_args(struct trace
*trace
, int id
, struct bpf_map_syscall_entry
*entry
)
2847 struct syscall
*sc
= trace__syscall_info(trace
, NULL
, id
);
2853 for (; arg
< sc
->nr_args
; ++arg
) {
2854 entry
->string_args_len
[arg
] = 0;
2855 if (sc
->arg_fmt
[arg
].scnprintf
== SCA_FILENAME
) {
2856 /* Should be set like strace -s strsize */
2857 entry
->string_args_len
[arg
] = PATH_MAX
;
2861 for (; arg
< 6; ++arg
)
2862 entry
->string_args_len
[arg
] = 0;
2864 static int trace__set_ev_qualifier_bpf_filter(struct trace
*trace
)
2866 int fd
= bpf_map__fd(trace
->syscalls
.map
);
2867 struct bpf_map_syscall_entry value
= {
2868 .enabled
= !trace
->not_ev_qualifier
,
2873 for (i
= 0; i
< trace
->ev_qualifier_ids
.nr
; ++i
) {
2874 int key
= trace
->ev_qualifier_ids
.entries
[i
];
2876 if (value
.enabled
) {
2877 trace__init_bpf_map_syscall_args(trace
, key
, &value
);
2878 trace__init_syscall_bpf_progs(trace
, key
);
2881 err
= bpf_map_update_elem(fd
, &key
, &value
, BPF_EXIST
);
2889 static int __trace__init_syscalls_bpf_map(struct trace
*trace
, bool enabled
)
2891 int fd
= bpf_map__fd(trace
->syscalls
.map
);
2892 struct bpf_map_syscall_entry value
= {
2897 for (key
= 0; key
< trace
->sctbl
->syscalls
.nr_entries
; ++key
) {
2899 trace__init_bpf_map_syscall_args(trace
, key
, &value
);
2901 err
= bpf_map_update_elem(fd
, &key
, &value
, BPF_ANY
);
2909 static int trace__init_syscalls_bpf_map(struct trace
*trace
)
2911 bool enabled
= true;
2913 if (trace
->ev_qualifier_ids
.nr
)
2914 enabled
= trace
->not_ev_qualifier
;
2916 return __trace__init_syscalls_bpf_map(trace
, enabled
);
2919 static struct bpf_program
*trace__find_usable_bpf_prog_entry(struct trace
*trace
, struct syscall
*sc
)
2921 struct tep_format_field
*field
, *candidate_field
;
2925 * We're only interested in syscalls that have a pointer:
2927 for (field
= sc
->args
; field
; field
= field
->next
) {
2928 if (field
->flags
& TEP_FIELD_IS_POINTER
)
2929 goto try_to_find_pair
;
2935 for (id
= 0; id
< trace
->sctbl
->syscalls
.nr_entries
; ++id
) {
2936 struct syscall
*pair
= trace__syscall_info(trace
, NULL
, id
);
2937 struct bpf_program
*pair_prog
;
2938 bool is_candidate
= false;
2940 if (pair
== NULL
|| pair
== sc
||
2941 pair
->bpf_prog
.sys_enter
== trace
->syscalls
.unaugmented_prog
)
2944 for (field
= sc
->args
, candidate_field
= pair
->args
;
2945 field
&& candidate_field
; field
= field
->next
, candidate_field
= candidate_field
->next
) {
2946 bool is_pointer
= field
->flags
& TEP_FIELD_IS_POINTER
,
2947 candidate_is_pointer
= candidate_field
->flags
& TEP_FIELD_IS_POINTER
;
2950 if (!candidate_is_pointer
) {
2951 // The candidate just doesn't copies our pointer arg, might copy other pointers we want.
2955 if (candidate_is_pointer
) {
2956 // The candidate might copy a pointer we don't have, skip it.
2957 goto next_candidate
;
2962 if (strcmp(field
->type
, candidate_field
->type
))
2963 goto next_candidate
;
2965 is_candidate
= true;
2969 goto next_candidate
;
2972 * Check if the tentative pair syscall augmenter has more pointers, if it has,
2973 * then it may be collecting that and we then can't use it, as it would collect
2974 * more than what is common to the two syscalls.
2976 if (candidate_field
) {
2977 for (candidate_field
= candidate_field
->next
; candidate_field
; candidate_field
= candidate_field
->next
)
2978 if (candidate_field
->flags
& TEP_FIELD_IS_POINTER
)
2979 goto next_candidate
;
2982 pair_prog
= pair
->bpf_prog
.sys_enter
;
2984 * If the pair isn't enabled, then its bpf_prog.sys_enter will not
2985 * have been searched for, so search it here and if it returns the
2986 * unaugmented one, then ignore it, otherwise we'll reuse that BPF
2987 * program for a filtered syscall on a non-filtered one.
2989 * For instance, we have "!syscalls:sys_enter_renameat" and that is
2990 * useful for "renameat2".
2992 if (pair_prog
== NULL
) {
2993 pair_prog
= trace__find_syscall_bpf_prog(trace
, pair
, pair
->fmt
? pair
->fmt
->bpf_prog_name
.sys_enter
: NULL
, "enter");
2994 if (pair_prog
== trace
->syscalls
.unaugmented_prog
)
2995 goto next_candidate
;
2998 pr_debug("Reusing \"%s\" BPF sys_enter augmenter for \"%s\"\n", pair
->name
, sc
->name
);
3007 static int trace__init_syscalls_bpf_prog_array_maps(struct trace
*trace
)
3009 int map_enter_fd
= bpf_map__fd(trace
->syscalls
.prog_array
.sys_enter
),
3010 map_exit_fd
= bpf_map__fd(trace
->syscalls
.prog_array
.sys_exit
);
3013 for (key
= 0; key
< trace
->sctbl
->syscalls
.nr_entries
; ++key
) {
3016 if (!trace__syscall_enabled(trace
, key
))
3019 trace__init_syscall_bpf_progs(trace
, key
);
3021 // It'll get at least the "!raw_syscalls:unaugmented"
3022 prog_fd
= trace__bpf_prog_sys_enter_fd(trace
, key
);
3023 err
= bpf_map_update_elem(map_enter_fd
, &key
, &prog_fd
, BPF_ANY
);
3026 prog_fd
= trace__bpf_prog_sys_exit_fd(trace
, key
);
3027 err
= bpf_map_update_elem(map_exit_fd
, &key
, &prog_fd
, BPF_ANY
);
3033 * Now lets do a second pass looking for enabled syscalls without
3034 * an augmenter that have a signature that is a superset of another
3035 * syscall with an augmenter so that we can auto-reuse it.
3037 * I.e. if we have an augmenter for the "open" syscall that has
3040 * int open(const char *pathname, int flags, mode_t mode);
3042 * I.e. that will collect just the first string argument, then we
3043 * can reuse it for the 'creat' syscall, that has this signature:
3045 * int creat(const char *pathname, mode_t mode);
3049 * int stat(const char *pathname, struct stat *statbuf);
3050 * int lstat(const char *pathname, struct stat *statbuf);
3052 * Because the 'open' augmenter will collect the first arg as a string,
3053 * and leave alone all the other args, which already helps with
3054 * beautifying 'stat' and 'lstat''s pathname arg.
3056 * Then, in time, when 'stat' gets an augmenter that collects both
3057 * first and second arg (this one on the raw_syscalls:sys_exit prog
3058 * array tail call, then that one will be used.
3060 for (key
= 0; key
< trace
->sctbl
->syscalls
.nr_entries
; ++key
) {
3061 struct syscall
*sc
= trace__syscall_info(trace
, NULL
, key
);
3062 struct bpf_program
*pair_prog
;
3065 if (sc
== NULL
|| sc
->bpf_prog
.sys_enter
== NULL
)
3069 * For now we're just reusing the sys_enter prog, and if it
3070 * already has an augmenter, we don't need to find one.
3072 if (sc
->bpf_prog
.sys_enter
!= trace
->syscalls
.unaugmented_prog
)
3076 * Look at all the other syscalls for one that has a signature
3077 * that is close enough that we can share:
3079 pair_prog
= trace__find_usable_bpf_prog_entry(trace
, sc
);
3080 if (pair_prog
== NULL
)
3083 sc
->bpf_prog
.sys_enter
= pair_prog
;
3086 * Update the BPF_MAP_TYPE_PROG_SHARED for raw_syscalls:sys_enter
3087 * with the fd for the program we're reusing:
3089 prog_fd
= bpf_program__fd(sc
->bpf_prog
.sys_enter
);
3090 err
= bpf_map_update_elem(map_enter_fd
, &key
, &prog_fd
, BPF_ANY
);
3099 static int trace__set_ev_qualifier_bpf_filter(struct trace
*trace __maybe_unused
)
3104 static int trace__init_syscalls_bpf_map(struct trace
*trace __maybe_unused
)
3109 static struct bpf_program
*trace__find_bpf_program_by_title(struct trace
*trace __maybe_unused
,
3110 const char *name __maybe_unused
)
3115 static int trace__init_syscalls_bpf_prog_array_maps(struct trace
*trace __maybe_unused
)
3119 #endif // HAVE_LIBBPF_SUPPORT
3121 static int trace__set_ev_qualifier_filter(struct trace
*trace
)
3123 if (trace
->syscalls
.map
)
3124 return trace__set_ev_qualifier_bpf_filter(trace
);
3125 if (trace
->syscalls
.events
.sys_enter
)
3126 return trace__set_ev_qualifier_tp_filter(trace
);
3130 static int bpf_map__set_filter_pids(struct bpf_map
*map __maybe_unused
,
3131 size_t npids __maybe_unused
, pid_t
*pids __maybe_unused
)
3134 #ifdef HAVE_LIBBPF_SUPPORT
3136 int map_fd
= bpf_map__fd(map
);
3139 for (i
= 0; i
< npids
; ++i
) {
3140 err
= bpf_map_update_elem(map_fd
, &pids
[i
], &value
, BPF_ANY
);
3148 static int trace__set_filter_loop_pids(struct trace
*trace
)
3150 unsigned int nr
= 1, err
;
3154 struct thread
*thread
= machine__find_thread(trace
->host
, pids
[0], pids
[0]);
3156 while (thread
&& nr
< ARRAY_SIZE(pids
)) {
3157 struct thread
*parent
= machine__find_thread(trace
->host
, thread
->ppid
, thread
->ppid
);
3162 if (!strcmp(thread__comm_str(parent
), "sshd") ||
3163 strstarts(thread__comm_str(parent
), "gnome-terminal")) {
3164 pids
[nr
++] = parent
->tid
;
3170 err
= perf_evlist__set_tp_filter_pids(trace
->evlist
, nr
, pids
);
3171 if (!err
&& trace
->filter_pids
.map
)
3172 err
= bpf_map__set_filter_pids(trace
->filter_pids
.map
, nr
, pids
);
3177 static int trace__set_filter_pids(struct trace
*trace
)
3181 * Better not use !target__has_task() here because we need to cover the
3182 * case where no threads were specified in the command line, but a
3183 * workload was, and in that case we will fill in the thread_map when
3184 * we fork the workload in perf_evlist__prepare_workload.
3186 if (trace
->filter_pids
.nr
> 0) {
3187 err
= perf_evlist__set_tp_filter_pids(trace
->evlist
, trace
->filter_pids
.nr
,
3188 trace
->filter_pids
.entries
);
3189 if (!err
&& trace
->filter_pids
.map
) {
3190 err
= bpf_map__set_filter_pids(trace
->filter_pids
.map
, trace
->filter_pids
.nr
,
3191 trace
->filter_pids
.entries
);
3193 } else if (perf_thread_map__pid(trace
->evlist
->core
.threads
, 0) == -1) {
3194 err
= trace__set_filter_loop_pids(trace
);
3200 static int __trace__deliver_event(struct trace
*trace
, union perf_event
*event
)
3202 struct evlist
*evlist
= trace
->evlist
;
3203 struct perf_sample sample
;
3206 err
= perf_evlist__parse_sample(evlist
, event
, &sample
);
3208 fprintf(trace
->output
, "Can't parse sample, err = %d, skipping...\n", err
);
3210 trace__handle_event(trace
, event
, &sample
);
3215 static int __trace__flush_events(struct trace
*trace
)
3217 u64 first
= ordered_events__first_time(&trace
->oe
.data
);
3218 u64 flush
= trace
->oe
.last
- NSEC_PER_SEC
;
3220 /* Is there some thing to flush.. */
3221 if (first
&& first
< flush
)
3222 return ordered_events__flush_time(&trace
->oe
.data
, flush
);
3227 static int trace__flush_events(struct trace
*trace
)
3229 return !trace
->sort_events
? 0 : __trace__flush_events(trace
);
3232 static int trace__deliver_event(struct trace
*trace
, union perf_event
*event
)
3236 if (!trace
->sort_events
)
3237 return __trace__deliver_event(trace
, event
);
3239 err
= perf_evlist__parse_sample_timestamp(trace
->evlist
, event
, &trace
->oe
.last
);
3240 if (err
&& err
!= -1)
3243 err
= ordered_events__queue(&trace
->oe
.data
, event
, trace
->oe
.last
, 0);
3247 return trace__flush_events(trace
);
3250 static int ordered_events__deliver_event(struct ordered_events
*oe
,
3251 struct ordered_event
*event
)
3253 struct trace
*trace
= container_of(oe
, struct trace
, oe
.data
);
3255 return __trace__deliver_event(trace
, event
->event
);
3258 static int trace__run(struct trace
*trace
, int argc
, const char **argv
)
3260 struct evlist
*evlist
= trace
->evlist
;
3261 struct evsel
*evsel
, *pgfault_maj
= NULL
, *pgfault_min
= NULL
;
3263 unsigned long before
;
3264 const bool forks
= argc
> 0;
3265 bool draining
= false;
3269 if (!trace
->raw_augmented_syscalls
) {
3270 if (trace
->trace_syscalls
&& trace__add_syscall_newtp(trace
))
3271 goto out_error_raw_syscalls
;
3273 if (trace
->trace_syscalls
)
3274 trace
->vfs_getname
= evlist__add_vfs_getname(evlist
);
3277 if ((trace
->trace_pgfaults
& TRACE_PFMAJ
)) {
3278 pgfault_maj
= perf_evsel__new_pgfault(PERF_COUNT_SW_PAGE_FAULTS_MAJ
);
3279 if (pgfault_maj
== NULL
)
3281 perf_evsel__config_callchain(pgfault_maj
, &trace
->opts
, &callchain_param
);
3282 evlist__add(evlist
, pgfault_maj
);
3285 if ((trace
->trace_pgfaults
& TRACE_PFMIN
)) {
3286 pgfault_min
= perf_evsel__new_pgfault(PERF_COUNT_SW_PAGE_FAULTS_MIN
);
3287 if (pgfault_min
== NULL
)
3289 perf_evsel__config_callchain(pgfault_min
, &trace
->opts
, &callchain_param
);
3290 evlist__add(evlist
, pgfault_min
);
3294 perf_evlist__add_newtp(evlist
, "sched", "sched_stat_runtime",
3295 trace__sched_stat_runtime
))
3296 goto out_error_sched_stat_runtime
;
3299 * If a global cgroup was set, apply it to all the events without an
3300 * explicit cgroup. I.e.:
3302 * trace -G A -e sched:*switch
3304 * Will set all raw_syscalls:sys_{enter,exit}, pgfault, vfs_getname, etc
3305 * _and_ sched:sched_switch to the 'A' cgroup, while:
3307 * trace -e sched:*switch -G A
3309 * will only set the sched:sched_switch event to the 'A' cgroup, all the
3310 * other events (raw_syscalls:sys_{enter,exit}, etc are left "without"
3311 * a cgroup (on the root cgroup, sys wide, etc).
3315 * trace -G A -e sched:*switch -G B
3317 * the syscall ones go to the 'A' cgroup, the sched:sched_switch goes
3318 * to the 'B' cgroup.
3320 * evlist__set_default_cgroup() grabs a reference of the passed cgroup
3321 * only for the evsels still without a cgroup, i.e. evsel->cgroup == NULL.
3324 evlist__set_default_cgroup(trace
->evlist
, trace
->cgroup
);
3326 err
= perf_evlist__create_maps(evlist
, &trace
->opts
.target
);
3328 fprintf(trace
->output
, "Problems parsing the target to trace, check your options!\n");
3329 goto out_delete_evlist
;
3332 err
= trace__symbols_init(trace
, evlist
);
3334 fprintf(trace
->output
, "Problems initializing symbol libraries!\n");
3335 goto out_delete_evlist
;
3338 perf_evlist__config(evlist
, &trace
->opts
, &callchain_param
);
3340 signal(SIGCHLD
, sig_handler
);
3341 signal(SIGINT
, sig_handler
);
3344 err
= perf_evlist__prepare_workload(evlist
, &trace
->opts
.target
,
3347 fprintf(trace
->output
, "Couldn't run the workload!\n");
3348 goto out_delete_evlist
;
3352 err
= evlist__open(evlist
);
3354 goto out_error_open
;
3356 err
= bpf__apply_obj_config();
3358 char errbuf
[BUFSIZ
];
3360 bpf__strerror_apply_obj_config(err
, errbuf
, sizeof(errbuf
));
3361 pr_err("ERROR: Apply config to BPF failed: %s\n",
3363 goto out_error_open
;
3366 err
= trace__set_filter_pids(trace
);
3370 if (trace
->syscalls
.map
)
3371 trace__init_syscalls_bpf_map(trace
);
3373 if (trace
->syscalls
.prog_array
.sys_enter
)
3374 trace__init_syscalls_bpf_prog_array_maps(trace
);
3376 if (trace
->ev_qualifier_ids
.nr
> 0) {
3377 err
= trace__set_ev_qualifier_filter(trace
);
3381 if (trace
->syscalls
.events
.sys_exit
) {
3382 pr_debug("event qualifier tracepoint filter: %s\n",
3383 trace
->syscalls
.events
.sys_exit
->filter
);
3388 * If the "close" syscall is not traced, then we will not have the
3389 * opportunity to, in syscall_arg__scnprintf_close_fd() invalidate the
3390 * fd->pathname table and were ending up showing the last value set by
3391 * syscalls opening a pathname and associating it with a descriptor or
3392 * reading it from /proc/pid/fd/ in cases where that doesn't make
3395 * So just disable this beautifier (SCA_FD, SCA_FDAT) when 'close' is
3398 trace
->fd_path_disabled
= !trace__syscall_enabled(trace
, syscalltbl__id(trace
->sctbl
, "close"));
3400 err
= perf_evlist__apply_filters(evlist
, &evsel
);
3402 goto out_error_apply_filters
;
3404 if (trace
->dump
.map
)
3405 bpf_map__fprintf(trace
->dump
.map
, trace
->output
);
3407 err
= perf_evlist__mmap(evlist
, trace
->opts
.mmap_pages
);
3409 goto out_error_mmap
;
3411 if (!target__none(&trace
->opts
.target
) && !trace
->opts
.initial_delay
)
3412 evlist__enable(evlist
);
3415 perf_evlist__start_workload(evlist
);
3417 if (trace
->opts
.initial_delay
) {
3418 usleep(trace
->opts
.initial_delay
* 1000);
3419 evlist__enable(evlist
);
3422 trace
->multiple_threads
= perf_thread_map__pid(evlist
->core
.threads
, 0) == -1 ||
3423 evlist
->core
.threads
->nr
> 1 ||
3424 perf_evlist__first(evlist
)->core
.attr
.inherit
;
3427 * Now that we already used evsel->core.attr to ask the kernel to setup the
3428 * events, lets reuse evsel->core.attr.sample_max_stack as the limit in
3429 * trace__resolve_callchain(), allowing per-event max-stack settings
3430 * to override an explicitly set --max-stack global setting.
3432 evlist__for_each_entry(evlist
, evsel
) {
3433 if (evsel__has_callchain(evsel
) &&
3434 evsel
->core
.attr
.sample_max_stack
== 0)
3435 evsel
->core
.attr
.sample_max_stack
= trace
->max_stack
;
3438 before
= trace
->nr_events
;
3440 for (i
= 0; i
< evlist
->nr_mmaps
; i
++) {
3441 union perf_event
*event
;
3442 struct perf_mmap
*md
;
3444 md
= &evlist
->mmap
[i
];
3445 if (perf_mmap__read_init(md
) < 0)
3448 while ((event
= perf_mmap__read_event(md
)) != NULL
) {
3451 err
= trace__deliver_event(trace
, event
);
3455 perf_mmap__consume(md
);
3460 if (done
&& !draining
) {
3461 evlist__disable(evlist
);
3465 perf_mmap__read_done(md
);
3468 if (trace
->nr_events
== before
) {
3469 int timeout
= done
? 100 : -1;
3471 if (!draining
&& perf_evlist__poll(evlist
, timeout
) > 0) {
3472 if (perf_evlist__filter_pollfd(evlist
, POLLERR
| POLLHUP
| POLLNVAL
) == 0)
3477 if (trace__flush_events(trace
))
3485 thread__zput(trace
->current
);
3487 evlist__disable(evlist
);
3489 if (trace
->sort_events
)
3490 ordered_events__flush(&trace
->oe
.data
, OE_FLUSH__FINAL
);
3494 trace__fprintf_thread_summary(trace
, trace
->output
);
3496 if (trace
->show_tool_stats
) {
3497 fprintf(trace
->output
, "Stats:\n "
3498 " vfs_getname : %" PRIu64
"\n"
3499 " proc_getname: %" PRIu64
"\n",
3500 trace
->stats
.vfs_getname
,
3501 trace
->stats
.proc_getname
);
3506 trace__symbols__exit(trace
);
3508 evlist__delete(evlist
);
3509 cgroup__put(trace
->cgroup
);
3510 trace
->evlist
= NULL
;
3511 trace
->live
= false;
3514 char errbuf
[BUFSIZ
];
3516 out_error_sched_stat_runtime
:
3517 tracing_path__strerror_open_tp(errno
, errbuf
, sizeof(errbuf
), "sched", "sched_stat_runtime");
3520 out_error_raw_syscalls
:
3521 tracing_path__strerror_open_tp(errno
, errbuf
, sizeof(errbuf
), "raw_syscalls", "sys_(enter|exit)");
3525 perf_evlist__strerror_mmap(evlist
, errno
, errbuf
, sizeof(errbuf
));
3529 perf_evlist__strerror_open(evlist
, errno
, errbuf
, sizeof(errbuf
));
3532 fprintf(trace
->output
, "%s\n", errbuf
);
3533 goto out_delete_evlist
;
3535 out_error_apply_filters
:
3536 fprintf(trace
->output
,
3537 "Failed to set filter \"%s\" on event %s with %d (%s)\n",
3538 evsel
->filter
, perf_evsel__name(evsel
), errno
,
3539 str_error_r(errno
, errbuf
, sizeof(errbuf
)));
3540 goto out_delete_evlist
;
3543 fprintf(trace
->output
, "Not enough memory to run!\n");
3544 goto out_delete_evlist
;
3547 fprintf(trace
->output
, "errno=%d,%s\n", errno
, strerror(errno
));
3548 goto out_delete_evlist
;
3551 static int trace__replay(struct trace
*trace
)
3553 const struct evsel_str_handler handlers
[] = {
3554 { "probe:vfs_getname", trace__vfs_getname
, },
3556 struct perf_data data
= {
3558 .mode
= PERF_DATA_MODE_READ
,
3559 .force
= trace
->force
,
3561 struct perf_session
*session
;
3562 struct evsel
*evsel
;
3565 trace
->tool
.sample
= trace__process_sample
;
3566 trace
->tool
.mmap
= perf_event__process_mmap
;
3567 trace
->tool
.mmap2
= perf_event__process_mmap2
;
3568 trace
->tool
.comm
= perf_event__process_comm
;
3569 trace
->tool
.exit
= perf_event__process_exit
;
3570 trace
->tool
.fork
= perf_event__process_fork
;
3571 trace
->tool
.attr
= perf_event__process_attr
;
3572 trace
->tool
.tracing_data
= perf_event__process_tracing_data
;
3573 trace
->tool
.build_id
= perf_event__process_build_id
;
3574 trace
->tool
.namespaces
= perf_event__process_namespaces
;
3576 trace
->tool
.ordered_events
= true;
3577 trace
->tool
.ordering_requires_timestamps
= true;
3579 /* add tid to output */
3580 trace
->multiple_threads
= true;
3582 session
= perf_session__new(&data
, false, &trace
->tool
);
3583 if (session
== NULL
)
3586 if (trace
->opts
.target
.pid
)
3587 symbol_conf
.pid_list_str
= strdup(trace
->opts
.target
.pid
);
3589 if (trace
->opts
.target
.tid
)
3590 symbol_conf
.tid_list_str
= strdup(trace
->opts
.target
.tid
);
3592 if (symbol__init(&session
->header
.env
) < 0)
3595 trace
->host
= &session
->machines
.host
;
3597 err
= perf_session__set_tracepoints_handlers(session
, handlers
);
3601 evsel
= perf_evlist__find_tracepoint_by_name(session
->evlist
,
3602 "raw_syscalls:sys_enter");
3603 /* older kernels have syscalls tp versus raw_syscalls */
3605 evsel
= perf_evlist__find_tracepoint_by_name(session
->evlist
,
3606 "syscalls:sys_enter");
3609 (perf_evsel__init_raw_syscall_tp(evsel
, trace__sys_enter
) < 0 ||
3610 perf_evsel__init_sc_tp_ptr_field(evsel
, args
))) {
3611 pr_err("Error during initialize raw_syscalls:sys_enter event\n");
3615 evsel
= perf_evlist__find_tracepoint_by_name(session
->evlist
,
3616 "raw_syscalls:sys_exit");
3618 evsel
= perf_evlist__find_tracepoint_by_name(session
->evlist
,
3619 "syscalls:sys_exit");
3621 (perf_evsel__init_raw_syscall_tp(evsel
, trace__sys_exit
) < 0 ||
3622 perf_evsel__init_sc_tp_uint_field(evsel
, ret
))) {
3623 pr_err("Error during initialize raw_syscalls:sys_exit event\n");
3627 evlist__for_each_entry(session
->evlist
, evsel
) {
3628 if (evsel
->core
.attr
.type
== PERF_TYPE_SOFTWARE
&&
3629 (evsel
->core
.attr
.config
== PERF_COUNT_SW_PAGE_FAULTS_MAJ
||
3630 evsel
->core
.attr
.config
== PERF_COUNT_SW_PAGE_FAULTS_MIN
||
3631 evsel
->core
.attr
.config
== PERF_COUNT_SW_PAGE_FAULTS
))
3632 evsel
->handler
= trace__pgfault
;
3637 err
= perf_session__process_events(session
);
3639 pr_err("Failed to process events, error %d", err
);
3641 else if (trace
->summary
)
3642 trace__fprintf_thread_summary(trace
, trace
->output
);
3645 perf_session__delete(session
);
3650 static size_t trace__fprintf_threads_header(FILE *fp
)
3654 printed
= fprintf(fp
, "\n Summary of events:\n\n");
3659 DEFINE_RESORT_RB(syscall_stats
, a
->msecs
> b
->msecs
,
3660 struct stats
*stats
;
3665 struct int_node
*source
= rb_entry(nd
, struct int_node
, rb_node
);
3666 struct stats
*stats
= source
->priv
;
3668 entry
->syscall
= source
->i
;
3669 entry
->stats
= stats
;
3670 entry
->msecs
= stats
? (u64
)stats
->n
* (avg_stats(stats
) / NSEC_PER_MSEC
) : 0;
3673 static size_t thread__dump_stats(struct thread_trace
*ttrace
,
3674 struct trace
*trace
, FILE *fp
)
3679 DECLARE_RESORT_RB_INTLIST(syscall_stats
, ttrace
->syscall_stats
);
3681 if (syscall_stats
== NULL
)
3684 printed
+= fprintf(fp
, "\n");
3686 printed
+= fprintf(fp
, " syscall calls total min avg max stddev\n");
3687 printed
+= fprintf(fp
, " (msec) (msec) (msec) (msec) (%%)\n");
3688 printed
+= fprintf(fp
, " --------------- -------- --------- --------- --------- --------- ------\n");
3690 resort_rb__for_each_entry(nd
, syscall_stats
) {
3691 struct stats
*stats
= syscall_stats_entry
->stats
;
3693 double min
= (double)(stats
->min
) / NSEC_PER_MSEC
;
3694 double max
= (double)(stats
->max
) / NSEC_PER_MSEC
;
3695 double avg
= avg_stats(stats
);
3697 u64 n
= (u64
) stats
->n
;
3699 pct
= avg
? 100.0 * stddev_stats(stats
)/avg
: 0.0;
3700 avg
/= NSEC_PER_MSEC
;
3702 sc
= &trace
->syscalls
.table
[syscall_stats_entry
->syscall
];
3703 printed
+= fprintf(fp
, " %-15s", sc
->name
);
3704 printed
+= fprintf(fp
, " %8" PRIu64
" %9.3f %9.3f %9.3f",
3705 n
, syscall_stats_entry
->msecs
, min
, avg
);
3706 printed
+= fprintf(fp
, " %9.3f %9.2f%%\n", max
, pct
);
3710 resort_rb__delete(syscall_stats
);
3711 printed
+= fprintf(fp
, "\n\n");
3716 static size_t trace__fprintf_thread(FILE *fp
, struct thread
*thread
, struct trace
*trace
)
3719 struct thread_trace
*ttrace
= thread__priv(thread
);
3725 ratio
= (double)ttrace
->nr_events
/ trace
->nr_events
* 100.0;
3727 printed
+= fprintf(fp
, " %s (%d), ", thread__comm_str(thread
), thread
->tid
);
3728 printed
+= fprintf(fp
, "%lu events, ", ttrace
->nr_events
);
3729 printed
+= fprintf(fp
, "%.1f%%", ratio
);
3731 printed
+= fprintf(fp
, ", %lu majfaults", ttrace
->pfmaj
);
3733 printed
+= fprintf(fp
, ", %lu minfaults", ttrace
->pfmin
);
3735 printed
+= fprintf(fp
, ", %.3f msec\n", ttrace
->runtime_ms
);
3736 else if (fputc('\n', fp
) != EOF
)
3739 printed
+= thread__dump_stats(ttrace
, trace
, fp
);
3744 static unsigned long thread__nr_events(struct thread_trace
*ttrace
)
3746 return ttrace
? ttrace
->nr_events
: 0;
3749 DEFINE_RESORT_RB(threads
, (thread__nr_events(a
->thread
->priv
) < thread__nr_events(b
->thread
->priv
)),
3750 struct thread
*thread
;
3753 entry
->thread
= rb_entry(nd
, struct thread
, rb_node
);
3756 static size_t trace__fprintf_thread_summary(struct trace
*trace
, FILE *fp
)
3758 size_t printed
= trace__fprintf_threads_header(fp
);
3762 for (i
= 0; i
< THREADS__TABLE_SIZE
; i
++) {
3763 DECLARE_RESORT_RB_MACHINE_THREADS(threads
, trace
->host
, i
);
3765 if (threads
== NULL
) {
3766 fprintf(fp
, "%s", "Error sorting output by nr_events!\n");
3770 resort_rb__for_each_entry(nd
, threads
)
3771 printed
+= trace__fprintf_thread(fp
, threads_entry
->thread
, trace
);
3773 resort_rb__delete(threads
);
3778 static int trace__set_duration(const struct option
*opt
, const char *str
,
3779 int unset __maybe_unused
)
3781 struct trace
*trace
= opt
->value
;
3783 trace
->duration_filter
= atof(str
);
3787 static int trace__set_filter_pids_from_option(const struct option
*opt
, const char *str
,
3788 int unset __maybe_unused
)
3792 struct trace
*trace
= opt
->value
;
3794 * FIXME: introduce a intarray class, plain parse csv and create a
3795 * { int nr, int entries[] } struct...
3797 struct intlist
*list
= intlist__new(str
);
3802 i
= trace
->filter_pids
.nr
= intlist__nr_entries(list
) + 1;
3803 trace
->filter_pids
.entries
= calloc(i
, sizeof(pid_t
));
3805 if (trace
->filter_pids
.entries
== NULL
)
3808 trace
->filter_pids
.entries
[0] = getpid();
3810 for (i
= 1; i
< trace
->filter_pids
.nr
; ++i
)
3811 trace
->filter_pids
.entries
[i
] = intlist__entry(list
, i
- 1)->i
;
3813 intlist__delete(list
);
3819 static int trace__open_output(struct trace
*trace
, const char *filename
)
3823 if (!stat(filename
, &st
) && st
.st_size
) {
3824 char oldname
[PATH_MAX
];
3826 scnprintf(oldname
, sizeof(oldname
), "%s.old", filename
);
3828 rename(filename
, oldname
);
3831 trace
->output
= fopen(filename
, "w");
3833 return trace
->output
== NULL
? -errno
: 0;
3836 static int parse_pagefaults(const struct option
*opt
, const char *str
,
3837 int unset __maybe_unused
)
3839 int *trace_pgfaults
= opt
->value
;
3841 if (strcmp(str
, "all") == 0)
3842 *trace_pgfaults
|= TRACE_PFMAJ
| TRACE_PFMIN
;
3843 else if (strcmp(str
, "maj") == 0)
3844 *trace_pgfaults
|= TRACE_PFMAJ
;
3845 else if (strcmp(str
, "min") == 0)
3846 *trace_pgfaults
|= TRACE_PFMIN
;
3853 static void evlist__set_evsel_handler(struct evlist
*evlist
, void *handler
)
3855 struct evsel
*evsel
;
3857 evlist__for_each_entry(evlist
, evsel
)
3858 evsel
->handler
= handler
;
3861 static int evlist__set_syscall_tp_fields(struct evlist
*evlist
)
3863 struct evsel
*evsel
;
3865 evlist__for_each_entry(evlist
, evsel
) {
3866 if (evsel
->priv
|| !evsel
->tp_format
)
3869 if (strcmp(evsel
->tp_format
->system
, "syscalls"))
3872 if (perf_evsel__init_syscall_tp(evsel
))
3875 if (!strncmp(evsel
->tp_format
->name
, "sys_enter_", 10)) {
3876 struct syscall_tp
*sc
= evsel
->priv
;
3878 if (__tp_field__init_ptr(&sc
->args
, sc
->id
.offset
+ sizeof(u64
)))
3880 } else if (!strncmp(evsel
->tp_format
->name
, "sys_exit_", 9)) {
3881 struct syscall_tp
*sc
= evsel
->priv
;
3883 if (__tp_field__init_uint(&sc
->ret
, sizeof(u64
), sc
->id
.offset
+ sizeof(u64
), evsel
->needs_swap
))
3892 * XXX: Hackish, just splitting the combined -e+--event (syscalls
3893 * (raw_syscalls:{sys_{enter,exit}} + events (tracepoints, HW, SW, etc) to use
3894 * existing facilities unchanged (trace->ev_qualifier + parse_options()).
3896 * It'd be better to introduce a parse_options() variant that would return a
3897 * list with the terms it didn't match to an event...
3899 static int trace__parse_events_option(const struct option
*opt
, const char *str
,
3900 int unset __maybe_unused
)
3902 struct trace
*trace
= (struct trace
*)opt
->value
;
3903 const char *s
= str
;
3904 char *sep
= NULL
, *lists
[2] = { NULL
, NULL
, };
3905 int len
= strlen(str
) + 1, err
= -1, list
, idx
;
3906 char *strace_groups_dir
= system_path(STRACE_GROUPS_DIR
);
3907 char group_name
[PATH_MAX
];
3908 struct syscall_fmt
*fmt
;
3910 if (strace_groups_dir
== NULL
)
3915 trace
->not_ev_qualifier
= true;
3919 if ((sep
= strchr(s
, ',')) != NULL
)
3923 if (syscalltbl__id(trace
->sctbl
, s
) >= 0 ||
3924 syscalltbl__strglobmatch_first(trace
->sctbl
, s
, &idx
) >= 0) {
3929 fmt
= syscall_fmt__find_by_alias(s
);
3934 path__join(group_name
, sizeof(group_name
), strace_groups_dir
, s
);
3935 if (access(group_name
, R_OK
) == 0)
3940 sprintf(lists
[list
] + strlen(lists
[list
]), ",%s", s
);
3942 lists
[list
] = malloc(len
);
3943 if (lists
[list
] == NULL
)
3945 strcpy(lists
[list
], s
);
3955 if (lists
[1] != NULL
) {
3956 struct strlist_config slist_config
= {
3957 .dirname
= strace_groups_dir
,
3960 trace
->ev_qualifier
= strlist__new(lists
[1], &slist_config
);
3961 if (trace
->ev_qualifier
== NULL
) {
3962 fputs("Not enough memory to parse event qualifier", trace
->output
);
3966 if (trace__validate_ev_qualifier(trace
))
3968 trace
->trace_syscalls
= true;
3974 struct option o
= OPT_CALLBACK('e', "event", &trace
->evlist
, "event",
3975 "event selector. use 'perf list' to list available events",
3976 parse_events_option
);
3977 err
= parse_events_option(&o
, lists
[0], 0);
3986 static int trace__parse_cgroups(const struct option
*opt
, const char *str
, int unset
)
3988 struct trace
*trace
= opt
->value
;
3990 if (!list_empty(&trace
->evlist
->core
.entries
))
3991 return parse_cgroups(opt
, str
, unset
);
3993 trace
->cgroup
= evlist__findnew_cgroup(trace
->evlist
, str
);
3998 static struct bpf_map
*trace__find_bpf_map_by_name(struct trace
*trace
, const char *name
)
4000 if (trace
->bpf_obj
== NULL
)
4003 return bpf_object__find_map_by_name(trace
->bpf_obj
, name
);
4006 static void trace__set_bpf_map_filtered_pids(struct trace
*trace
)
4008 trace
->filter_pids
.map
= trace__find_bpf_map_by_name(trace
, "pids_filtered");
4011 static void trace__set_bpf_map_syscalls(struct trace
*trace
)
4013 trace
->syscalls
.map
= trace__find_bpf_map_by_name(trace
, "syscalls");
4014 trace
->syscalls
.prog_array
.sys_enter
= trace__find_bpf_map_by_name(trace
, "syscalls_sys_enter");
4015 trace
->syscalls
.prog_array
.sys_exit
= trace__find_bpf_map_by_name(trace
, "syscalls_sys_exit");
4018 static int trace__config(const char *var
, const char *value
, void *arg
)
4020 struct trace
*trace
= arg
;
4023 if (!strcmp(var
, "trace.add_events")) {
4024 struct option o
= OPT_CALLBACK('e', "event", &trace
->evlist
, "event",
4025 "event selector. use 'perf list' to list available events",
4026 parse_events_option
);
4028 * We can't propagate parse_event_option() return, as it is 1
4029 * for failure while perf_config() expects -1.
4031 if (parse_events_option(&o
, value
, 0))
4033 } else if (!strcmp(var
, "trace.show_timestamp")) {
4034 trace
->show_tstamp
= perf_config_bool(var
, value
);
4035 } else if (!strcmp(var
, "trace.show_duration")) {
4036 trace
->show_duration
= perf_config_bool(var
, value
);
4037 } else if (!strcmp(var
, "trace.show_arg_names")) {
4038 trace
->show_arg_names
= perf_config_bool(var
, value
);
4039 if (!trace
->show_arg_names
)
4040 trace
->show_zeros
= true;
4041 } else if (!strcmp(var
, "trace.show_zeros")) {
4042 bool new_show_zeros
= perf_config_bool(var
, value
);
4043 if (!trace
->show_arg_names
&& !new_show_zeros
) {
4044 pr_warning("trace.show_zeros has to be set when trace.show_arg_names=no\n");
4047 trace
->show_zeros
= new_show_zeros
;
4048 } else if (!strcmp(var
, "trace.show_prefix")) {
4049 trace
->show_string_prefix
= perf_config_bool(var
, value
);
4050 } else if (!strcmp(var
, "trace.no_inherit")) {
4051 trace
->opts
.no_inherit
= perf_config_bool(var
, value
);
4052 } else if (!strcmp(var
, "trace.args_alignment")) {
4053 int args_alignment
= 0;
4054 if (perf_config_int(&args_alignment
, var
, value
) == 0)
4055 trace
->args_alignment
= args_alignment
;
4061 int cmd_trace(int argc
, const char **argv
)
4063 const char *trace_usage
[] = {
4064 "perf trace [<options>] [<command>]",
4065 "perf trace [<options>] -- <command> [<options>]",
4066 "perf trace record [<options>] [<command>]",
4067 "perf trace record [<options>] -- <command> [<options>]",
4070 struct trace trace
= {
4076 .user_freq
= UINT_MAX
,
4077 .user_interval
= ULLONG_MAX
,
4078 .no_buffering
= true,
4079 .mmap_pages
= UINT_MAX
,
4083 .show_tstamp
= true,
4084 .show_duration
= true,
4085 .show_arg_names
= true,
4086 .args_alignment
= 70,
4087 .trace_syscalls
= false,
4088 .kernel_syscallchains
= false,
4089 .max_stack
= UINT_MAX
,
4090 .max_events
= ULONG_MAX
,
4092 const char *map_dump_str
= NULL
;
4093 const char *output_name
= NULL
;
4094 const struct option trace_options
[] = {
4095 OPT_CALLBACK('e', "event", &trace
, "event",
4096 "event/syscall selector. use 'perf list' to list available events",
4097 trace__parse_events_option
),
4098 OPT_BOOLEAN(0, "comm", &trace
.show_comm
,
4099 "show the thread COMM next to its id"),
4100 OPT_BOOLEAN(0, "tool_stats", &trace
.show_tool_stats
, "show tool stats"),
4101 OPT_CALLBACK(0, "expr", &trace
, "expr", "list of syscalls/events to trace",
4102 trace__parse_events_option
),
4103 OPT_STRING('o', "output", &output_name
, "file", "output file name"),
4104 OPT_STRING('i', "input", &input_name
, "file", "Analyze events in file"),
4105 OPT_STRING('p', "pid", &trace
.opts
.target
.pid
, "pid",
4106 "trace events on existing process id"),
4107 OPT_STRING('t', "tid", &trace
.opts
.target
.tid
, "tid",
4108 "trace events on existing thread id"),
4109 OPT_CALLBACK(0, "filter-pids", &trace
, "CSV list of pids",
4110 "pids to filter (by the kernel)", trace__set_filter_pids_from_option
),
4111 OPT_BOOLEAN('a', "all-cpus", &trace
.opts
.target
.system_wide
,
4112 "system-wide collection from all CPUs"),
4113 OPT_STRING('C', "cpu", &trace
.opts
.target
.cpu_list
, "cpu",
4114 "list of cpus to monitor"),
4115 OPT_BOOLEAN(0, "no-inherit", &trace
.opts
.no_inherit
,
4116 "child tasks do not inherit counters"),
4117 OPT_CALLBACK('m', "mmap-pages", &trace
.opts
.mmap_pages
, "pages",
4118 "number of mmap data pages",
4119 perf_evlist__parse_mmap_pages
),
4120 OPT_STRING('u', "uid", &trace
.opts
.target
.uid_str
, "user",
4122 OPT_CALLBACK(0, "duration", &trace
, "float",
4123 "show only events with duration > N.M ms",
4124 trace__set_duration
),
4125 #ifdef HAVE_LIBBPF_SUPPORT
4126 OPT_STRING(0, "map-dump", &map_dump_str
, "BPF map", "BPF map to periodically dump"),
4128 OPT_BOOLEAN(0, "sched", &trace
.sched
, "show blocking scheduler events"),
4129 OPT_INCR('v', "verbose", &verbose
, "be more verbose"),
4130 OPT_BOOLEAN('T', "time", &trace
.full_time
,
4131 "Show full timestamp, not time relative to first start"),
4132 OPT_BOOLEAN(0, "failure", &trace
.failure_only
,
4133 "Show only syscalls that failed"),
4134 OPT_BOOLEAN('s', "summary", &trace
.summary_only
,
4135 "Show only syscall summary with statistics"),
4136 OPT_BOOLEAN('S', "with-summary", &trace
.summary
,
4137 "Show all syscalls and summary with statistics"),
4138 OPT_CALLBACK_DEFAULT('F', "pf", &trace
.trace_pgfaults
, "all|maj|min",
4139 "Trace pagefaults", parse_pagefaults
, "maj"),
4140 OPT_BOOLEAN(0, "syscalls", &trace
.trace_syscalls
, "Trace syscalls"),
4141 OPT_BOOLEAN('f', "force", &trace
.force
, "don't complain, do it"),
4142 OPT_CALLBACK(0, "call-graph", &trace
.opts
,
4143 "record_mode[,record_size]", record_callchain_help
,
4144 &record_parse_callchain_opt
),
4145 OPT_BOOLEAN(0, "kernel-syscall-graph", &trace
.kernel_syscallchains
,
4146 "Show the kernel callchains on the syscall exit path"),
4147 OPT_ULONG(0, "max-events", &trace
.max_events
,
4148 "Set the maximum number of events to print, exit after that is reached. "),
4149 OPT_UINTEGER(0, "min-stack", &trace
.min_stack
,
4150 "Set the minimum stack depth when parsing the callchain, "
4151 "anything below the specified depth will be ignored."),
4152 OPT_UINTEGER(0, "max-stack", &trace
.max_stack
,
4153 "Set the maximum stack depth when parsing the callchain, "
4154 "anything beyond the specified depth will be ignored. "
4155 "Default: kernel.perf_event_max_stack or " __stringify(PERF_MAX_STACK_DEPTH
)),
4156 OPT_BOOLEAN(0, "sort-events", &trace
.sort_events
,
4157 "Sort batch of events before processing, use if getting out of order events"),
4158 OPT_BOOLEAN(0, "print-sample", &trace
.print_sample
,
4159 "print the PERF_RECORD_SAMPLE PERF_SAMPLE_ info, for debugging"),
4160 OPT_UINTEGER(0, "proc-map-timeout", &proc_map_timeout
,
4161 "per thread proc mmap processing timeout in ms"),
4162 OPT_CALLBACK('G', "cgroup", &trace
, "name", "monitor event in cgroup name only",
4163 trace__parse_cgroups
),
4164 OPT_UINTEGER('D', "delay", &trace
.opts
.initial_delay
,
4165 "ms to wait before starting measurement after program "
4167 OPTS_EVSWITCH(&trace
.evswitch
),
4170 bool __maybe_unused max_stack_user_set
= true;
4171 bool mmap_pages_user_set
= true;
4172 struct evsel
*evsel
;
4173 const char * const trace_subcommands
[] = { "record", NULL
};
4177 signal(SIGSEGV
, sighandler_dump_stack
);
4178 signal(SIGFPE
, sighandler_dump_stack
);
4180 trace
.evlist
= evlist__new();
4181 trace
.sctbl
= syscalltbl__new();
4183 if (trace
.evlist
== NULL
|| trace
.sctbl
== NULL
) {
4184 pr_err("Not enough memory to run!\n");
4190 * Parsing .perfconfig may entail creating a BPF event, that may need
4191 * to create BPF maps, so bump RLIM_MEMLOCK as the default 64K setting
4192 * is too small. This affects just this process, not touching the
4193 * global setting. If it fails we'll get something in 'perf trace -v'
4194 * to help diagnose the problem.
4196 rlimit__bump_memlock();
4198 err
= perf_config(trace__config
, &trace
);
4202 argc
= parse_options_subcommand(argc
, argv
, trace_options
, trace_subcommands
,
4203 trace_usage
, PARSE_OPT_STOP_AT_NON_OPTION
);
4205 if ((nr_cgroups
|| trace
.cgroup
) && !trace
.opts
.target
.system_wide
) {
4206 usage_with_options_msg(trace_usage
, trace_options
,
4207 "cgroup monitoring only available in system-wide mode");
4210 evsel
= bpf__setup_output_event(trace
.evlist
, "__augmented_syscalls__");
4211 if (IS_ERR(evsel
)) {
4212 bpf__strerror_setup_output_event(trace
.evlist
, PTR_ERR(evsel
), bf
, sizeof(bf
));
4213 pr_err("ERROR: Setup trace syscalls enter failed: %s\n", bf
);
4218 trace
.syscalls
.events
.augmented
= evsel
;
4220 evsel
= perf_evlist__find_tracepoint_by_name(trace
.evlist
, "raw_syscalls:sys_enter");
4221 if (evsel
== NULL
) {
4222 pr_err("ERROR: raw_syscalls:sys_enter not found in the augmented BPF object\n");
4226 if (evsel
->bpf_obj
== NULL
) {
4227 pr_err("ERROR: raw_syscalls:sys_enter not associated to a BPF object\n");
4231 trace
.bpf_obj
= evsel
->bpf_obj
;
4233 trace__set_bpf_map_filtered_pids(&trace
);
4234 trace__set_bpf_map_syscalls(&trace
);
4235 trace
.syscalls
.unaugmented_prog
= trace__find_bpf_program_by_title(&trace
, "!raw_syscalls:unaugmented");
4238 err
= bpf__setup_stdout(trace
.evlist
);
4240 bpf__strerror_setup_stdout(trace
.evlist
, err
, bf
, sizeof(bf
));
4241 pr_err("ERROR: Setup BPF stdout failed: %s\n", bf
);
4248 trace
.dump
.map
= trace__find_bpf_map_by_name(&trace
, map_dump_str
);
4249 if (trace
.dump
.map
== NULL
) {
4250 pr_err("ERROR: BPF map \"%s\" not found\n", map_dump_str
);
4255 if (trace
.trace_pgfaults
) {
4256 trace
.opts
.sample_address
= true;
4257 trace
.opts
.sample_time
= true;
4260 if (trace
.opts
.mmap_pages
== UINT_MAX
)
4261 mmap_pages_user_set
= false;
4263 if (trace
.max_stack
== UINT_MAX
) {
4264 trace
.max_stack
= input_name
? PERF_MAX_STACK_DEPTH
: sysctl__max_stack();
4265 max_stack_user_set
= false;
4268 #ifdef HAVE_DWARF_UNWIND_SUPPORT
4269 if ((trace
.min_stack
|| max_stack_user_set
) && !callchain_param
.enabled
) {
4270 record_opts__parse_callchain(&trace
.opts
, &callchain_param
, "dwarf", false);
4274 if (callchain_param
.enabled
) {
4275 if (!mmap_pages_user_set
&& geteuid() == 0)
4276 trace
.opts
.mmap_pages
= perf_event_mlock_kb_in_pages() * 4;
4278 symbol_conf
.use_callchain
= true;
4281 if (trace
.evlist
->core
.nr_entries
> 0) {
4282 evlist__set_evsel_handler(trace
.evlist
, trace__event_handler
);
4283 if (evlist__set_syscall_tp_fields(trace
.evlist
)) {
4284 perror("failed to set syscalls:* tracepoint fields");
4289 if (trace
.sort_events
) {
4290 ordered_events__init(&trace
.oe
.data
, ordered_events__deliver_event
, &trace
);
4291 ordered_events__set_copy_on_queue(&trace
.oe
.data
, true);
4295 * If we are augmenting syscalls, then combine what we put in the
4296 * __augmented_syscalls__ BPF map with what is in the
4297 * syscalls:sys_exit_FOO tracepoints, i.e. just like we do without BPF,
4298 * combining raw_syscalls:sys_enter with raw_syscalls:sys_exit.
4300 * We'll switch to look at two BPF maps, one for sys_enter and the
4301 * other for sys_exit when we start augmenting the sys_exit paths with
4302 * buffers that are being copied from kernel to userspace, think 'read'
4305 if (trace
.syscalls
.events
.augmented
) {
4306 evlist__for_each_entry(trace
.evlist
, evsel
) {
4307 bool raw_syscalls_sys_exit
= strcmp(perf_evsel__name(evsel
), "raw_syscalls:sys_exit") == 0;
4309 if (raw_syscalls_sys_exit
) {
4310 trace
.raw_augmented_syscalls
= true;
4311 goto init_augmented_syscall_tp
;
4314 if (trace
.syscalls
.events
.augmented
->priv
== NULL
&&
4315 strstr(perf_evsel__name(evsel
), "syscalls:sys_enter")) {
4316 struct evsel
*augmented
= trace
.syscalls
.events
.augmented
;
4317 if (perf_evsel__init_augmented_syscall_tp(augmented
, evsel
) ||
4318 perf_evsel__init_augmented_syscall_tp_args(augmented
))
4321 * Augmented is __augmented_syscalls__ BPF_OUTPUT event
4322 * Above we made sure we can get from the payload the tp fields
4323 * that we get from syscalls:sys_enter tracefs format file.
4325 augmented
->handler
= trace__sys_enter
;
4327 * Now we do the same for the *syscalls:sys_enter event so that
4328 * if we handle it directly, i.e. if the BPF prog returns 0 so
4329 * as not to filter it, then we'll handle it just like we would
4330 * for the BPF_OUTPUT one:
4332 if (perf_evsel__init_augmented_syscall_tp(evsel
, evsel
) ||
4333 perf_evsel__init_augmented_syscall_tp_args(evsel
))
4335 evsel
->handler
= trace__sys_enter
;
4338 if (strstarts(perf_evsel__name(evsel
), "syscalls:sys_exit_")) {
4339 struct syscall_tp
*sc
;
4340 init_augmented_syscall_tp
:
4341 if (perf_evsel__init_augmented_syscall_tp(evsel
, evsel
))
4345 * For now with BPF raw_augmented we hook into
4346 * raw_syscalls:sys_enter and there we get all
4347 * 6 syscall args plus the tracepoint common
4348 * fields and the syscall_nr (another long).
4349 * So we check if that is the case and if so
4350 * don't look after the sc->args_size but
4351 * always after the full raw_syscalls:sys_enter
4352 * payload, which is fixed.
4354 * We'll revisit this later to pass
4355 * s->args_size to the BPF augmenter (now
4356 * tools/perf/examples/bpf/augmented_raw_syscalls.c,
4357 * so that it copies only what we need for each
4358 * syscall, like what happens when we use
4359 * syscalls:sys_enter_NAME, so that we reduce
4360 * the kernel/userspace traffic to just what is
4361 * needed for each syscall.
4363 if (trace
.raw_augmented_syscalls
)
4364 trace
.raw_augmented_syscalls_args_size
= (6 + 1) * sizeof(long) + sc
->id
.offset
;
4365 perf_evsel__init_augmented_syscall_tp_ret(evsel
);
4366 evsel
->handler
= trace__sys_exit
;
4371 if ((argc
>= 1) && (strcmp(argv
[0], "record") == 0))
4372 return trace__record(&trace
, argc
-1, &argv
[1]);
4374 /* summary_only implies summary option, but don't overwrite summary if set */
4375 if (trace
.summary_only
)
4376 trace
.summary
= trace
.summary_only
;
4378 if (!trace
.trace_syscalls
&& !trace
.trace_pgfaults
&&
4379 trace
.evlist
->core
.nr_entries
== 0 /* Was --events used? */) {
4380 trace
.trace_syscalls
= true;
4383 if (output_name
!= NULL
) {
4384 err
= trace__open_output(&trace
, output_name
);
4386 perror("failed to create output file");
4391 err
= evswitch__init(&trace
.evswitch
, trace
.evlist
, stderr
);
4395 err
= target__validate(&trace
.opts
.target
);
4397 target__strerror(&trace
.opts
.target
, err
, bf
, sizeof(bf
));
4398 fprintf(trace
.output
, "%s", bf
);
4402 err
= target__parse_uid(&trace
.opts
.target
);
4404 target__strerror(&trace
.opts
.target
, err
, bf
, sizeof(bf
));
4405 fprintf(trace
.output
, "%s", bf
);
4409 if (!argc
&& target__none(&trace
.opts
.target
))
4410 trace
.opts
.target
.system_wide
= true;
4413 err
= trace__replay(&trace
);
4415 err
= trace__run(&trace
, argc
, argv
);
4418 if (output_name
!= NULL
)
4419 fclose(trace
.output
);