]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blob - tools/perf/builtin-trace.c
60053d49539bd98767f787796eba807eeba89576
[mirror_ubuntu-artful-kernel.git] / tools / perf / builtin-trace.c
1 /*
2 * builtin-trace.c
3 *
4 * Builtin 'trace' command:
5 *
6 * Display a continuously updated trace of any workload, CPU, specific PID,
7 * system wide, etc. Default format is loosely strace like, but any other
8 * event may be specified using --event.
9 *
10 * Copyright (C) 2012, 2013, 2014, 2015 Red Hat Inc, Arnaldo Carvalho de Melo <acme@redhat.com>
11 *
12 * Initially based on the 'trace' prototype by Thomas Gleixner:
13 *
14 * http://lwn.net/Articles/415728/ ("Announcing a new utility: 'trace'")
15 *
16 * Released under the GPL v2. (and only v2, not any later version)
17 */
18
19 #include <traceevent/event-parse.h>
20 #include <api/fs/tracing_path.h>
21 #include "builtin.h"
22 #include "util/color.h"
23 #include "util/debug.h"
24 #include "util/evlist.h"
25 #include <subcmd/exec-cmd.h>
26 #include "util/machine.h"
27 #include "util/session.h"
28 #include "util/thread.h"
29 #include <subcmd/parse-options.h>
30 #include "util/strlist.h"
31 #include "util/intlist.h"
32 #include "util/thread_map.h"
33 #include "util/stat.h"
34 #include "trace-event.h"
35 #include "util/parse-events.h"
36 #include "util/bpf-loader.h"
37 #include "callchain.h"
38 #include "syscalltbl.h"
39 #include "rb_resort.h"
40
41 #include <libaudit.h> /* FIXME: Still needed for audit_errno_to_name */
42 #include <stdlib.h>
43 #include <string.h>
44 #include <linux/err.h>
45 #include <linux/filter.h>
46 #include <linux/audit.h>
47 #include <linux/random.h>
48 #include <linux/stringify.h>
49 #include <linux/time64.h>
50
51 #ifndef O_CLOEXEC
52 # define O_CLOEXEC 02000000
53 #endif
54
55 struct trace {
56 struct perf_tool tool;
57 struct syscalltbl *sctbl;
58 struct {
59 int max;
60 struct syscall *table;
61 struct {
62 struct perf_evsel *sys_enter,
63 *sys_exit;
64 } events;
65 } syscalls;
66 struct record_opts opts;
67 struct perf_evlist *evlist;
68 struct machine *host;
69 struct thread *current;
70 u64 base_time;
71 FILE *output;
72 unsigned long nr_events;
73 struct strlist *ev_qualifier;
74 struct {
75 size_t nr;
76 int *entries;
77 } ev_qualifier_ids;
78 struct {
79 size_t nr;
80 pid_t *entries;
81 } filter_pids;
82 double duration_filter;
83 double runtime_ms;
84 struct {
85 u64 vfs_getname,
86 proc_getname;
87 } stats;
88 unsigned int max_stack;
89 unsigned int min_stack;
90 bool not_ev_qualifier;
91 bool live;
92 bool full_time;
93 bool sched;
94 bool multiple_threads;
95 bool summary;
96 bool summary_only;
97 bool show_comm;
98 bool show_tool_stats;
99 bool trace_syscalls;
100 bool kernel_syscallchains;
101 bool force;
102 bool vfs_getname;
103 int trace_pgfaults;
104 int open_id;
105 };
106
107 struct tp_field {
108 int offset;
109 union {
110 u64 (*integer)(struct tp_field *field, struct perf_sample *sample);
111 void *(*pointer)(struct tp_field *field, struct perf_sample *sample);
112 };
113 };
114
115 #define TP_UINT_FIELD(bits) \
116 static u64 tp_field__u##bits(struct tp_field *field, struct perf_sample *sample) \
117 { \
118 u##bits value; \
119 memcpy(&value, sample->raw_data + field->offset, sizeof(value)); \
120 return value; \
121 }
122
123 TP_UINT_FIELD(8);
124 TP_UINT_FIELD(16);
125 TP_UINT_FIELD(32);
126 TP_UINT_FIELD(64);
127
128 #define TP_UINT_FIELD__SWAPPED(bits) \
129 static u64 tp_field__swapped_u##bits(struct tp_field *field, struct perf_sample *sample) \
130 { \
131 u##bits value; \
132 memcpy(&value, sample->raw_data + field->offset, sizeof(value)); \
133 return bswap_##bits(value);\
134 }
135
136 TP_UINT_FIELD__SWAPPED(16);
137 TP_UINT_FIELD__SWAPPED(32);
138 TP_UINT_FIELD__SWAPPED(64);
139
140 static int tp_field__init_uint(struct tp_field *field,
141 struct format_field *format_field,
142 bool needs_swap)
143 {
144 field->offset = format_field->offset;
145
146 switch (format_field->size) {
147 case 1:
148 field->integer = tp_field__u8;
149 break;
150 case 2:
151 field->integer = needs_swap ? tp_field__swapped_u16 : tp_field__u16;
152 break;
153 case 4:
154 field->integer = needs_swap ? tp_field__swapped_u32 : tp_field__u32;
155 break;
156 case 8:
157 field->integer = needs_swap ? tp_field__swapped_u64 : tp_field__u64;
158 break;
159 default:
160 return -1;
161 }
162
163 return 0;
164 }
165
166 static void *tp_field__ptr(struct tp_field *field, struct perf_sample *sample)
167 {
168 return sample->raw_data + field->offset;
169 }
170
171 static int tp_field__init_ptr(struct tp_field *field, struct format_field *format_field)
172 {
173 field->offset = format_field->offset;
174 field->pointer = tp_field__ptr;
175 return 0;
176 }
177
178 struct syscall_tp {
179 struct tp_field id;
180 union {
181 struct tp_field args, ret;
182 };
183 };
184
185 static int perf_evsel__init_tp_uint_field(struct perf_evsel *evsel,
186 struct tp_field *field,
187 const char *name)
188 {
189 struct format_field *format_field = perf_evsel__field(evsel, name);
190
191 if (format_field == NULL)
192 return -1;
193
194 return tp_field__init_uint(field, format_field, evsel->needs_swap);
195 }
196
197 #define perf_evsel__init_sc_tp_uint_field(evsel, name) \
198 ({ struct syscall_tp *sc = evsel->priv;\
199 perf_evsel__init_tp_uint_field(evsel, &sc->name, #name); })
200
201 static int perf_evsel__init_tp_ptr_field(struct perf_evsel *evsel,
202 struct tp_field *field,
203 const char *name)
204 {
205 struct format_field *format_field = perf_evsel__field(evsel, name);
206
207 if (format_field == NULL)
208 return -1;
209
210 return tp_field__init_ptr(field, format_field);
211 }
212
213 #define perf_evsel__init_sc_tp_ptr_field(evsel, name) \
214 ({ struct syscall_tp *sc = evsel->priv;\
215 perf_evsel__init_tp_ptr_field(evsel, &sc->name, #name); })
216
217 static void perf_evsel__delete_priv(struct perf_evsel *evsel)
218 {
219 zfree(&evsel->priv);
220 perf_evsel__delete(evsel);
221 }
222
223 static int perf_evsel__init_syscall_tp(struct perf_evsel *evsel, void *handler)
224 {
225 evsel->priv = malloc(sizeof(struct syscall_tp));
226 if (evsel->priv != NULL) {
227 if (perf_evsel__init_sc_tp_uint_field(evsel, id))
228 goto out_delete;
229
230 evsel->handler = handler;
231 return 0;
232 }
233
234 return -ENOMEM;
235
236 out_delete:
237 zfree(&evsel->priv);
238 return -ENOENT;
239 }
240
241 static struct perf_evsel *perf_evsel__syscall_newtp(const char *direction, void *handler)
242 {
243 struct perf_evsel *evsel = perf_evsel__newtp("raw_syscalls", direction);
244
245 /* older kernel (e.g., RHEL6) use syscalls:{enter,exit} */
246 if (IS_ERR(evsel))
247 evsel = perf_evsel__newtp("syscalls", direction);
248
249 if (IS_ERR(evsel))
250 return NULL;
251
252 if (perf_evsel__init_syscall_tp(evsel, handler))
253 goto out_delete;
254
255 return evsel;
256
257 out_delete:
258 perf_evsel__delete_priv(evsel);
259 return NULL;
260 }
261
262 #define perf_evsel__sc_tp_uint(evsel, name, sample) \
263 ({ struct syscall_tp *fields = evsel->priv; \
264 fields->name.integer(&fields->name, sample); })
265
266 #define perf_evsel__sc_tp_ptr(evsel, name, sample) \
267 ({ struct syscall_tp *fields = evsel->priv; \
268 fields->name.pointer(&fields->name, sample); })
269
270 struct syscall_arg {
271 unsigned long val;
272 struct thread *thread;
273 struct trace *trace;
274 void *parm;
275 u8 idx;
276 u8 mask;
277 };
278
279 struct strarray {
280 int offset;
281 int nr_entries;
282 const char **entries;
283 };
284
285 #define DEFINE_STRARRAY(array) struct strarray strarray__##array = { \
286 .nr_entries = ARRAY_SIZE(array), \
287 .entries = array, \
288 }
289
290 #define DEFINE_STRARRAY_OFFSET(array, off) struct strarray strarray__##array = { \
291 .offset = off, \
292 .nr_entries = ARRAY_SIZE(array), \
293 .entries = array, \
294 }
295
296 static size_t __syscall_arg__scnprintf_strarray(char *bf, size_t size,
297 const char *intfmt,
298 struct syscall_arg *arg)
299 {
300 struct strarray *sa = arg->parm;
301 int idx = arg->val - sa->offset;
302
303 if (idx < 0 || idx >= sa->nr_entries)
304 return scnprintf(bf, size, intfmt, arg->val);
305
306 return scnprintf(bf, size, "%s", sa->entries[idx]);
307 }
308
309 static size_t syscall_arg__scnprintf_strarray(char *bf, size_t size,
310 struct syscall_arg *arg)
311 {
312 return __syscall_arg__scnprintf_strarray(bf, size, "%d", arg);
313 }
314
315 #define SCA_STRARRAY syscall_arg__scnprintf_strarray
316
317 #if defined(__i386__) || defined(__x86_64__)
318 /*
319 * FIXME: Make this available to all arches as soon as the ioctl beautifier
320 * gets rewritten to support all arches.
321 */
322 static size_t syscall_arg__scnprintf_strhexarray(char *bf, size_t size,
323 struct syscall_arg *arg)
324 {
325 return __syscall_arg__scnprintf_strarray(bf, size, "%#x", arg);
326 }
327
328 #define SCA_STRHEXARRAY syscall_arg__scnprintf_strhexarray
329 #endif /* defined(__i386__) || defined(__x86_64__) */
330
331 static size_t syscall_arg__scnprintf_fd(char *bf, size_t size,
332 struct syscall_arg *arg);
333
334 #define SCA_FD syscall_arg__scnprintf_fd
335
336 #ifndef AT_FDCWD
337 #define AT_FDCWD -100
338 #endif
339
340 static size_t syscall_arg__scnprintf_fd_at(char *bf, size_t size,
341 struct syscall_arg *arg)
342 {
343 int fd = arg->val;
344
345 if (fd == AT_FDCWD)
346 return scnprintf(bf, size, "CWD");
347
348 return syscall_arg__scnprintf_fd(bf, size, arg);
349 }
350
351 #define SCA_FDAT syscall_arg__scnprintf_fd_at
352
353 static size_t syscall_arg__scnprintf_close_fd(char *bf, size_t size,
354 struct syscall_arg *arg);
355
356 #define SCA_CLOSE_FD syscall_arg__scnprintf_close_fd
357
358 static size_t syscall_arg__scnprintf_hex(char *bf, size_t size,
359 struct syscall_arg *arg)
360 {
361 return scnprintf(bf, size, "%#lx", arg->val);
362 }
363
364 #define SCA_HEX syscall_arg__scnprintf_hex
365
366 static size_t syscall_arg__scnprintf_int(char *bf, size_t size,
367 struct syscall_arg *arg)
368 {
369 return scnprintf(bf, size, "%d", arg->val);
370 }
371
372 #define SCA_INT syscall_arg__scnprintf_int
373
374 static const char *bpf_cmd[] = {
375 "MAP_CREATE", "MAP_LOOKUP_ELEM", "MAP_UPDATE_ELEM", "MAP_DELETE_ELEM",
376 "MAP_GET_NEXT_KEY", "PROG_LOAD",
377 };
378 static DEFINE_STRARRAY(bpf_cmd);
379
380 static const char *epoll_ctl_ops[] = { "ADD", "DEL", "MOD", };
381 static DEFINE_STRARRAY_OFFSET(epoll_ctl_ops, 1);
382
383 static const char *itimers[] = { "REAL", "VIRTUAL", "PROF", };
384 static DEFINE_STRARRAY(itimers);
385
386 static const char *keyctl_options[] = {
387 "GET_KEYRING_ID", "JOIN_SESSION_KEYRING", "UPDATE", "REVOKE", "CHOWN",
388 "SETPERM", "DESCRIBE", "CLEAR", "LINK", "UNLINK", "SEARCH", "READ",
389 "INSTANTIATE", "NEGATE", "SET_REQKEY_KEYRING", "SET_TIMEOUT",
390 "ASSUME_AUTHORITY", "GET_SECURITY", "SESSION_TO_PARENT", "REJECT",
391 "INSTANTIATE_IOV", "INVALIDATE", "GET_PERSISTENT",
392 };
393 static DEFINE_STRARRAY(keyctl_options);
394
395 static const char *whences[] = { "SET", "CUR", "END",
396 #ifdef SEEK_DATA
397 "DATA",
398 #endif
399 #ifdef SEEK_HOLE
400 "HOLE",
401 #endif
402 };
403 static DEFINE_STRARRAY(whences);
404
405 static const char *fcntl_cmds[] = {
406 "DUPFD", "GETFD", "SETFD", "GETFL", "SETFL", "GETLK", "SETLK",
407 "SETLKW", "SETOWN", "GETOWN", "SETSIG", "GETSIG", "F_GETLK64",
408 "F_SETLK64", "F_SETLKW64", "F_SETOWN_EX", "F_GETOWN_EX",
409 "F_GETOWNER_UIDS",
410 };
411 static DEFINE_STRARRAY(fcntl_cmds);
412
413 static const char *rlimit_resources[] = {
414 "CPU", "FSIZE", "DATA", "STACK", "CORE", "RSS", "NPROC", "NOFILE",
415 "MEMLOCK", "AS", "LOCKS", "SIGPENDING", "MSGQUEUE", "NICE", "RTPRIO",
416 "RTTIME",
417 };
418 static DEFINE_STRARRAY(rlimit_resources);
419
420 static const char *sighow[] = { "BLOCK", "UNBLOCK", "SETMASK", };
421 static DEFINE_STRARRAY(sighow);
422
423 static const char *clockid[] = {
424 "REALTIME", "MONOTONIC", "PROCESS_CPUTIME_ID", "THREAD_CPUTIME_ID",
425 "MONOTONIC_RAW", "REALTIME_COARSE", "MONOTONIC_COARSE", "BOOTTIME",
426 "REALTIME_ALARM", "BOOTTIME_ALARM", "SGI_CYCLE", "TAI"
427 };
428 static DEFINE_STRARRAY(clockid);
429
430 static const char *socket_families[] = {
431 "UNSPEC", "LOCAL", "INET", "AX25", "IPX", "APPLETALK", "NETROM",
432 "BRIDGE", "ATMPVC", "X25", "INET6", "ROSE", "DECnet", "NETBEUI",
433 "SECURITY", "KEY", "NETLINK", "PACKET", "ASH", "ECONET", "ATMSVC",
434 "RDS", "SNA", "IRDA", "PPPOX", "WANPIPE", "LLC", "IB", "CAN", "TIPC",
435 "BLUETOOTH", "IUCV", "RXRPC", "ISDN", "PHONET", "IEEE802154", "CAIF",
436 "ALG", "NFC", "VSOCK",
437 };
438 static DEFINE_STRARRAY(socket_families);
439
440 static size_t syscall_arg__scnprintf_access_mode(char *bf, size_t size,
441 struct syscall_arg *arg)
442 {
443 size_t printed = 0;
444 int mode = arg->val;
445
446 if (mode == F_OK) /* 0 */
447 return scnprintf(bf, size, "F");
448 #define P_MODE(n) \
449 if (mode & n##_OK) { \
450 printed += scnprintf(bf + printed, size - printed, "%s", #n); \
451 mode &= ~n##_OK; \
452 }
453
454 P_MODE(R);
455 P_MODE(W);
456 P_MODE(X);
457 #undef P_MODE
458
459 if (mode)
460 printed += scnprintf(bf + printed, size - printed, "|%#x", mode);
461
462 return printed;
463 }
464
465 #define SCA_ACCMODE syscall_arg__scnprintf_access_mode
466
467 static size_t syscall_arg__scnprintf_filename(char *bf, size_t size,
468 struct syscall_arg *arg);
469
470 #define SCA_FILENAME syscall_arg__scnprintf_filename
471
472 static size_t syscall_arg__scnprintf_pipe_flags(char *bf, size_t size,
473 struct syscall_arg *arg)
474 {
475 int printed = 0, flags = arg->val;
476
477 #define P_FLAG(n) \
478 if (flags & O_##n) { \
479 printed += scnprintf(bf + printed, size - printed, "%s%s", printed ? "|" : "", #n); \
480 flags &= ~O_##n; \
481 }
482
483 P_FLAG(CLOEXEC);
484 P_FLAG(NONBLOCK);
485 #undef P_FLAG
486
487 if (flags)
488 printed += scnprintf(bf + printed, size - printed, "%s%#x", printed ? "|" : "", flags);
489
490 return printed;
491 }
492
493 #define SCA_PIPE_FLAGS syscall_arg__scnprintf_pipe_flags
494
495 #if defined(__i386__) || defined(__x86_64__)
496 /*
497 * FIXME: Make this available to all arches.
498 */
499 #define TCGETS 0x5401
500
501 static const char *tioctls[] = {
502 "TCGETS", "TCSETS", "TCSETSW", "TCSETSF", "TCGETA", "TCSETA", "TCSETAW",
503 "TCSETAF", "TCSBRK", "TCXONC", "TCFLSH", "TIOCEXCL", "TIOCNXCL",
504 "TIOCSCTTY", "TIOCGPGRP", "TIOCSPGRP", "TIOCOUTQ", "TIOCSTI",
505 "TIOCGWINSZ", "TIOCSWINSZ", "TIOCMGET", "TIOCMBIS", "TIOCMBIC",
506 "TIOCMSET", "TIOCGSOFTCAR", "TIOCSSOFTCAR", "FIONREAD", "TIOCLINUX",
507 "TIOCCONS", "TIOCGSERIAL", "TIOCSSERIAL", "TIOCPKT", "FIONBIO",
508 "TIOCNOTTY", "TIOCSETD", "TIOCGETD", "TCSBRKP", [0x27] = "TIOCSBRK",
509 "TIOCCBRK", "TIOCGSID", "TCGETS2", "TCSETS2", "TCSETSW2", "TCSETSF2",
510 "TIOCGRS485", "TIOCSRS485", "TIOCGPTN", "TIOCSPTLCK",
511 "TIOCGDEV||TCGETX", "TCSETX", "TCSETXF", "TCSETXW", "TIOCSIG",
512 "TIOCVHANGUP", "TIOCGPKT", "TIOCGPTLCK", "TIOCGEXCL",
513 [0x50] = "FIONCLEX", "FIOCLEX", "FIOASYNC", "TIOCSERCONFIG",
514 "TIOCSERGWILD", "TIOCSERSWILD", "TIOCGLCKTRMIOS", "TIOCSLCKTRMIOS",
515 "TIOCSERGSTRUCT", "TIOCSERGETLSR", "TIOCSERGETMULTI", "TIOCSERSETMULTI",
516 "TIOCMIWAIT", "TIOCGICOUNT", [0x60] = "FIOQSIZE",
517 };
518
519 static DEFINE_STRARRAY_OFFSET(tioctls, 0x5401);
520 #endif /* defined(__i386__) || defined(__x86_64__) */
521
522 #ifndef GRND_NONBLOCK
523 #define GRND_NONBLOCK 0x0001
524 #endif
525 #ifndef GRND_RANDOM
526 #define GRND_RANDOM 0x0002
527 #endif
528
529 static size_t syscall_arg__scnprintf_getrandom_flags(char *bf, size_t size,
530 struct syscall_arg *arg)
531 {
532 int printed = 0, flags = arg->val;
533
534 #define P_FLAG(n) \
535 if (flags & GRND_##n) { \
536 printed += scnprintf(bf + printed, size - printed, "%s%s", printed ? "|" : "", #n); \
537 flags &= ~GRND_##n; \
538 }
539
540 P_FLAG(RANDOM);
541 P_FLAG(NONBLOCK);
542 #undef P_FLAG
543
544 if (flags)
545 printed += scnprintf(bf + printed, size - printed, "%s%#x", printed ? "|" : "", flags);
546
547 return printed;
548 }
549
550 #define SCA_GETRANDOM_FLAGS syscall_arg__scnprintf_getrandom_flags
551
552 #define STRARRAY(arg, name, array) \
553 .arg_scnprintf = { [arg] = SCA_STRARRAY, }, \
554 .arg_parm = { [arg] = &strarray__##array, }
555
556 #include "trace/beauty/eventfd.c"
557 #include "trace/beauty/flock.c"
558 #include "trace/beauty/futex_op.c"
559 #include "trace/beauty/mmap.c"
560 #include "trace/beauty/mode_t.c"
561 #include "trace/beauty/msg_flags.c"
562 #include "trace/beauty/open_flags.c"
563 #include "trace/beauty/perf_event_open.c"
564 #include "trace/beauty/pid.c"
565 #include "trace/beauty/sched_policy.c"
566 #include "trace/beauty/seccomp.c"
567 #include "trace/beauty/signum.c"
568 #include "trace/beauty/socket_type.c"
569 #include "trace/beauty/waitid_options.c"
570
571 static struct syscall_fmt {
572 const char *name;
573 const char *alias;
574 size_t (*arg_scnprintf[6])(char *bf, size_t size, struct syscall_arg *arg);
575 void *arg_parm[6];
576 bool errmsg;
577 bool errpid;
578 bool timeout;
579 bool hexret;
580 } syscall_fmts[] = {
581 { .name = "access", .errmsg = true,
582 .arg_scnprintf = { [1] = SCA_ACCMODE, /* mode */ }, },
583 { .name = "arch_prctl", .errmsg = true, .alias = "prctl", },
584 { .name = "bpf", .errmsg = true, STRARRAY(0, cmd, bpf_cmd), },
585 { .name = "brk", .hexret = true,
586 .arg_scnprintf = { [0] = SCA_HEX, /* brk */ }, },
587 { .name = "chdir", .errmsg = true, },
588 { .name = "chmod", .errmsg = true, },
589 { .name = "chroot", .errmsg = true, },
590 { .name = "clock_gettime", .errmsg = true, STRARRAY(0, clk_id, clockid), },
591 { .name = "clone", .errpid = true, },
592 { .name = "close", .errmsg = true,
593 .arg_scnprintf = { [0] = SCA_CLOSE_FD, /* fd */ }, },
594 { .name = "connect", .errmsg = true, },
595 { .name = "creat", .errmsg = true, },
596 { .name = "dup", .errmsg = true, },
597 { .name = "dup2", .errmsg = true, },
598 { .name = "dup3", .errmsg = true, },
599 { .name = "epoll_ctl", .errmsg = true, STRARRAY(1, op, epoll_ctl_ops), },
600 { .name = "eventfd2", .errmsg = true,
601 .arg_scnprintf = { [1] = SCA_EFD_FLAGS, /* flags */ }, },
602 { .name = "faccessat", .errmsg = true, },
603 { .name = "fadvise64", .errmsg = true, },
604 { .name = "fallocate", .errmsg = true, },
605 { .name = "fchdir", .errmsg = true, },
606 { .name = "fchmod", .errmsg = true, },
607 { .name = "fchmodat", .errmsg = true,
608 .arg_scnprintf = { [0] = SCA_FDAT, /* fd */ }, },
609 { .name = "fchown", .errmsg = true, },
610 { .name = "fchownat", .errmsg = true,
611 .arg_scnprintf = { [0] = SCA_FDAT, /* fd */ }, },
612 { .name = "fcntl", .errmsg = true,
613 .arg_scnprintf = { [1] = SCA_STRARRAY, /* cmd */ },
614 .arg_parm = { [1] = &strarray__fcntl_cmds, /* cmd */ }, },
615 { .name = "fdatasync", .errmsg = true, },
616 { .name = "flock", .errmsg = true,
617 .arg_scnprintf = { [1] = SCA_FLOCK, /* cmd */ }, },
618 { .name = "fsetxattr", .errmsg = true, },
619 { .name = "fstat", .errmsg = true, .alias = "newfstat", },
620 { .name = "fstatat", .errmsg = true, .alias = "newfstatat", },
621 { .name = "fstatfs", .errmsg = true, },
622 { .name = "fsync", .errmsg = true, },
623 { .name = "ftruncate", .errmsg = true, },
624 { .name = "futex", .errmsg = true,
625 .arg_scnprintf = { [1] = SCA_FUTEX_OP, /* op */ }, },
626 { .name = "futimesat", .errmsg = true,
627 .arg_scnprintf = { [0] = SCA_FDAT, /* fd */ }, },
628 { .name = "getdents", .errmsg = true, },
629 { .name = "getdents64", .errmsg = true, },
630 { .name = "getitimer", .errmsg = true, STRARRAY(0, which, itimers), },
631 { .name = "getpid", .errpid = true, },
632 { .name = "getpgid", .errpid = true, },
633 { .name = "getppid", .errpid = true, },
634 { .name = "getrandom", .errmsg = true,
635 .arg_scnprintf = { [2] = SCA_GETRANDOM_FLAGS, /* flags */ }, },
636 { .name = "getrlimit", .errmsg = true, STRARRAY(0, resource, rlimit_resources), },
637 { .name = "getxattr", .errmsg = true, },
638 { .name = "inotify_add_watch", .errmsg = true, },
639 { .name = "ioctl", .errmsg = true,
640 .arg_scnprintf = {
641 #if defined(__i386__) || defined(__x86_64__)
642 /*
643 * FIXME: Make this available to all arches.
644 */
645 [1] = SCA_STRHEXARRAY, /* cmd */
646 [2] = SCA_HEX, /* arg */ },
647 .arg_parm = { [1] = &strarray__tioctls, /* cmd */ }, },
648 #else
649 [2] = SCA_HEX, /* arg */ }, },
650 #endif
651 { .name = "keyctl", .errmsg = true, STRARRAY(0, option, keyctl_options), },
652 { .name = "kill", .errmsg = true,
653 .arg_scnprintf = { [1] = SCA_SIGNUM, /* sig */ }, },
654 { .name = "lchown", .errmsg = true, },
655 { .name = "lgetxattr", .errmsg = true, },
656 { .name = "linkat", .errmsg = true,
657 .arg_scnprintf = { [0] = SCA_FDAT, /* fd */ }, },
658 { .name = "listxattr", .errmsg = true, },
659 { .name = "llistxattr", .errmsg = true, },
660 { .name = "lremovexattr", .errmsg = true, },
661 { .name = "lseek", .errmsg = true,
662 .arg_scnprintf = { [2] = SCA_STRARRAY, /* whence */ },
663 .arg_parm = { [2] = &strarray__whences, /* whence */ }, },
664 { .name = "lsetxattr", .errmsg = true, },
665 { .name = "lstat", .errmsg = true, .alias = "newlstat", },
666 { .name = "lsxattr", .errmsg = true, },
667 { .name = "madvise", .errmsg = true,
668 .arg_scnprintf = { [0] = SCA_HEX, /* start */
669 [2] = SCA_MADV_BHV, /* behavior */ }, },
670 { .name = "mkdir", .errmsg = true, },
671 { .name = "mkdirat", .errmsg = true,
672 .arg_scnprintf = { [0] = SCA_FDAT, /* fd */ }, },
673 { .name = "mknod", .errmsg = true, },
674 { .name = "mknodat", .errmsg = true,
675 .arg_scnprintf = { [0] = SCA_FDAT, /* fd */ }, },
676 { .name = "mlock", .errmsg = true,
677 .arg_scnprintf = { [0] = SCA_HEX, /* addr */ }, },
678 { .name = "mlockall", .errmsg = true,
679 .arg_scnprintf = { [0] = SCA_HEX, /* addr */ }, },
680 { .name = "mmap", .hexret = true,
681 .arg_scnprintf = { [0] = SCA_HEX, /* addr */
682 [2] = SCA_MMAP_PROT, /* prot */
683 [3] = SCA_MMAP_FLAGS, /* flags */ }, },
684 { .name = "mprotect", .errmsg = true,
685 .arg_scnprintf = { [0] = SCA_HEX, /* start */
686 [2] = SCA_MMAP_PROT, /* prot */ }, },
687 { .name = "mq_unlink", .errmsg = true,
688 .arg_scnprintf = { [0] = SCA_FILENAME, /* u_name */ }, },
689 { .name = "mremap", .hexret = true,
690 .arg_scnprintf = { [0] = SCA_HEX, /* addr */
691 [3] = SCA_MREMAP_FLAGS, /* flags */
692 [4] = SCA_HEX, /* new_addr */ }, },
693 { .name = "munlock", .errmsg = true,
694 .arg_scnprintf = { [0] = SCA_HEX, /* addr */ }, },
695 { .name = "munmap", .errmsg = true,
696 .arg_scnprintf = { [0] = SCA_HEX, /* addr */ }, },
697 { .name = "name_to_handle_at", .errmsg = true,
698 .arg_scnprintf = { [0] = SCA_FDAT, /* dfd */ }, },
699 { .name = "newfstatat", .errmsg = true,
700 .arg_scnprintf = { [0] = SCA_FDAT, /* dfd */ }, },
701 { .name = "open", .errmsg = true,
702 .arg_scnprintf = { [1] = SCA_OPEN_FLAGS, /* flags */ }, },
703 { .name = "open_by_handle_at", .errmsg = true,
704 .arg_scnprintf = { [0] = SCA_FDAT, /* dfd */
705 [2] = SCA_OPEN_FLAGS, /* flags */ }, },
706 { .name = "openat", .errmsg = true,
707 .arg_scnprintf = { [0] = SCA_FDAT, /* dfd */
708 [2] = SCA_OPEN_FLAGS, /* flags */ }, },
709 { .name = "perf_event_open", .errmsg = true,
710 .arg_scnprintf = { [2] = SCA_INT, /* cpu */
711 [3] = SCA_FD, /* group_fd */
712 [4] = SCA_PERF_FLAGS, /* flags */ }, },
713 { .name = "pipe2", .errmsg = true,
714 .arg_scnprintf = { [1] = SCA_PIPE_FLAGS, /* flags */ }, },
715 { .name = "poll", .errmsg = true, .timeout = true, },
716 { .name = "ppoll", .errmsg = true, .timeout = true, },
717 { .name = "pread", .errmsg = true, .alias = "pread64", },
718 { .name = "preadv", .errmsg = true, .alias = "pread", },
719 { .name = "prlimit64", .errmsg = true, STRARRAY(1, resource, rlimit_resources), },
720 { .name = "pwrite", .errmsg = true, .alias = "pwrite64", },
721 { .name = "pwritev", .errmsg = true, },
722 { .name = "read", .errmsg = true, },
723 { .name = "readlink", .errmsg = true, },
724 { .name = "readlinkat", .errmsg = true,
725 .arg_scnprintf = { [0] = SCA_FDAT, /* dfd */ }, },
726 { .name = "readv", .errmsg = true, },
727 { .name = "recvfrom", .errmsg = true,
728 .arg_scnprintf = { [3] = SCA_MSG_FLAGS, /* flags */ }, },
729 { .name = "recvmmsg", .errmsg = true,
730 .arg_scnprintf = { [3] = SCA_MSG_FLAGS, /* flags */ }, },
731 { .name = "recvmsg", .errmsg = true,
732 .arg_scnprintf = { [2] = SCA_MSG_FLAGS, /* flags */ }, },
733 { .name = "removexattr", .errmsg = true, },
734 { .name = "renameat", .errmsg = true,
735 .arg_scnprintf = { [0] = SCA_FDAT, /* dfd */ }, },
736 { .name = "rmdir", .errmsg = true, },
737 { .name = "rt_sigaction", .errmsg = true,
738 .arg_scnprintf = { [0] = SCA_SIGNUM, /* sig */ }, },
739 { .name = "rt_sigprocmask", .errmsg = true, STRARRAY(0, how, sighow), },
740 { .name = "rt_sigqueueinfo", .errmsg = true,
741 .arg_scnprintf = { [1] = SCA_SIGNUM, /* sig */ }, },
742 { .name = "rt_tgsigqueueinfo", .errmsg = true,
743 .arg_scnprintf = { [2] = SCA_SIGNUM, /* sig */ }, },
744 { .name = "sched_getattr", .errmsg = true, },
745 { .name = "sched_setattr", .errmsg = true, },
746 { .name = "sched_setscheduler", .errmsg = true,
747 .arg_scnprintf = { [1] = SCA_SCHED_POLICY, /* policy */ }, },
748 { .name = "seccomp", .errmsg = true,
749 .arg_scnprintf = { [0] = SCA_SECCOMP_OP, /* op */
750 [1] = SCA_SECCOMP_FLAGS, /* flags */ }, },
751 { .name = "select", .errmsg = true, .timeout = true, },
752 { .name = "sendmmsg", .errmsg = true,
753 .arg_scnprintf = { [3] = SCA_MSG_FLAGS, /* flags */ }, },
754 { .name = "sendmsg", .errmsg = true,
755 .arg_scnprintf = { [2] = SCA_MSG_FLAGS, /* flags */ }, },
756 { .name = "sendto", .errmsg = true,
757 .arg_scnprintf = { [3] = SCA_MSG_FLAGS, /* flags */ }, },
758 { .name = "set_tid_address", .errpid = true, },
759 { .name = "setitimer", .errmsg = true, STRARRAY(0, which, itimers), },
760 { .name = "setpgid", .errmsg = true, },
761 { .name = "setrlimit", .errmsg = true, STRARRAY(0, resource, rlimit_resources), },
762 { .name = "setxattr", .errmsg = true, },
763 { .name = "shutdown", .errmsg = true, },
764 { .name = "socket", .errmsg = true,
765 .arg_scnprintf = { [0] = SCA_STRARRAY, /* family */
766 [1] = SCA_SK_TYPE, /* type */ },
767 .arg_parm = { [0] = &strarray__socket_families, /* family */ }, },
768 { .name = "socketpair", .errmsg = true,
769 .arg_scnprintf = { [0] = SCA_STRARRAY, /* family */
770 [1] = SCA_SK_TYPE, /* type */ },
771 .arg_parm = { [0] = &strarray__socket_families, /* family */ }, },
772 { .name = "stat", .errmsg = true, .alias = "newstat", },
773 { .name = "statfs", .errmsg = true, },
774 { .name = "swapoff", .errmsg = true,
775 .arg_scnprintf = { [0] = SCA_FILENAME, /* specialfile */ }, },
776 { .name = "swapon", .errmsg = true,
777 .arg_scnprintf = { [0] = SCA_FILENAME, /* specialfile */ }, },
778 { .name = "symlinkat", .errmsg = true,
779 .arg_scnprintf = { [0] = SCA_FDAT, /* dfd */ }, },
780 { .name = "tgkill", .errmsg = true,
781 .arg_scnprintf = { [2] = SCA_SIGNUM, /* sig */ }, },
782 { .name = "tkill", .errmsg = true,
783 .arg_scnprintf = { [1] = SCA_SIGNUM, /* sig */ }, },
784 { .name = "truncate", .errmsg = true, },
785 { .name = "uname", .errmsg = true, .alias = "newuname", },
786 { .name = "unlinkat", .errmsg = true,
787 .arg_scnprintf = { [0] = SCA_FDAT, /* dfd */ }, },
788 { .name = "utime", .errmsg = true, },
789 { .name = "utimensat", .errmsg = true,
790 .arg_scnprintf = { [0] = SCA_FDAT, /* dirfd */ }, },
791 { .name = "utimes", .errmsg = true, },
792 { .name = "vmsplice", .errmsg = true, },
793 { .name = "wait4", .errpid = true,
794 .arg_scnprintf = { [2] = SCA_WAITID_OPTIONS, /* options */ }, },
795 { .name = "waitid", .errpid = true,
796 .arg_scnprintf = { [3] = SCA_WAITID_OPTIONS, /* options */ }, },
797 { .name = "write", .errmsg = true, },
798 { .name = "writev", .errmsg = true, },
799 };
800
801 static int syscall_fmt__cmp(const void *name, const void *fmtp)
802 {
803 const struct syscall_fmt *fmt = fmtp;
804 return strcmp(name, fmt->name);
805 }
806
807 static struct syscall_fmt *syscall_fmt__find(const char *name)
808 {
809 const int nmemb = ARRAY_SIZE(syscall_fmts);
810 return bsearch(name, syscall_fmts, nmemb, sizeof(struct syscall_fmt), syscall_fmt__cmp);
811 }
812
813 struct syscall {
814 struct event_format *tp_format;
815 int nr_args;
816 struct format_field *args;
817 const char *name;
818 bool is_exit;
819 struct syscall_fmt *fmt;
820 size_t (**arg_scnprintf)(char *bf, size_t size, struct syscall_arg *arg);
821 void **arg_parm;
822 };
823
824 static size_t fprintf_duration(unsigned long t, FILE *fp)
825 {
826 double duration = (double)t / NSEC_PER_MSEC;
827 size_t printed = fprintf(fp, "(");
828
829 if (duration >= 1.0)
830 printed += color_fprintf(fp, PERF_COLOR_RED, "%6.3f ms", duration);
831 else if (duration >= 0.01)
832 printed += color_fprintf(fp, PERF_COLOR_YELLOW, "%6.3f ms", duration);
833 else
834 printed += color_fprintf(fp, PERF_COLOR_NORMAL, "%6.3f ms", duration);
835 return printed + fprintf(fp, "): ");
836 }
837
838 /**
839 * filename.ptr: The filename char pointer that will be vfs_getname'd
840 * filename.entry_str_pos: Where to insert the string translated from
841 * filename.ptr by the vfs_getname tracepoint/kprobe.
842 */
843 struct thread_trace {
844 u64 entry_time;
845 bool entry_pending;
846 unsigned long nr_events;
847 unsigned long pfmaj, pfmin;
848 char *entry_str;
849 double runtime_ms;
850 struct {
851 unsigned long ptr;
852 short int entry_str_pos;
853 bool pending_open;
854 unsigned int namelen;
855 char *name;
856 } filename;
857 struct {
858 int max;
859 char **table;
860 } paths;
861
862 struct intlist *syscall_stats;
863 };
864
865 static struct thread_trace *thread_trace__new(void)
866 {
867 struct thread_trace *ttrace = zalloc(sizeof(struct thread_trace));
868
869 if (ttrace)
870 ttrace->paths.max = -1;
871
872 ttrace->syscall_stats = intlist__new(NULL);
873
874 return ttrace;
875 }
876
877 static struct thread_trace *thread__trace(struct thread *thread, FILE *fp)
878 {
879 struct thread_trace *ttrace;
880
881 if (thread == NULL)
882 goto fail;
883
884 if (thread__priv(thread) == NULL)
885 thread__set_priv(thread, thread_trace__new());
886
887 if (thread__priv(thread) == NULL)
888 goto fail;
889
890 ttrace = thread__priv(thread);
891 ++ttrace->nr_events;
892
893 return ttrace;
894 fail:
895 color_fprintf(fp, PERF_COLOR_RED,
896 "WARNING: not enough memory, dropping samples!\n");
897 return NULL;
898 }
899
900 #define TRACE_PFMAJ (1 << 0)
901 #define TRACE_PFMIN (1 << 1)
902
903 static const size_t trace__entry_str_size = 2048;
904
905 static int trace__set_fd_pathname(struct thread *thread, int fd, const char *pathname)
906 {
907 struct thread_trace *ttrace = thread__priv(thread);
908
909 if (fd > ttrace->paths.max) {
910 char **npath = realloc(ttrace->paths.table, (fd + 1) * sizeof(char *));
911
912 if (npath == NULL)
913 return -1;
914
915 if (ttrace->paths.max != -1) {
916 memset(npath + ttrace->paths.max + 1, 0,
917 (fd - ttrace->paths.max) * sizeof(char *));
918 } else {
919 memset(npath, 0, (fd + 1) * sizeof(char *));
920 }
921
922 ttrace->paths.table = npath;
923 ttrace->paths.max = fd;
924 }
925
926 ttrace->paths.table[fd] = strdup(pathname);
927
928 return ttrace->paths.table[fd] != NULL ? 0 : -1;
929 }
930
931 static int thread__read_fd_path(struct thread *thread, int fd)
932 {
933 char linkname[PATH_MAX], pathname[PATH_MAX];
934 struct stat st;
935 int ret;
936
937 if (thread->pid_ == thread->tid) {
938 scnprintf(linkname, sizeof(linkname),
939 "/proc/%d/fd/%d", thread->pid_, fd);
940 } else {
941 scnprintf(linkname, sizeof(linkname),
942 "/proc/%d/task/%d/fd/%d", thread->pid_, thread->tid, fd);
943 }
944
945 if (lstat(linkname, &st) < 0 || st.st_size + 1 > (off_t)sizeof(pathname))
946 return -1;
947
948 ret = readlink(linkname, pathname, sizeof(pathname));
949
950 if (ret < 0 || ret > st.st_size)
951 return -1;
952
953 pathname[ret] = '\0';
954 return trace__set_fd_pathname(thread, fd, pathname);
955 }
956
957 static const char *thread__fd_path(struct thread *thread, int fd,
958 struct trace *trace)
959 {
960 struct thread_trace *ttrace = thread__priv(thread);
961
962 if (ttrace == NULL)
963 return NULL;
964
965 if (fd < 0)
966 return NULL;
967
968 if ((fd > ttrace->paths.max || ttrace->paths.table[fd] == NULL)) {
969 if (!trace->live)
970 return NULL;
971 ++trace->stats.proc_getname;
972 if (thread__read_fd_path(thread, fd))
973 return NULL;
974 }
975
976 return ttrace->paths.table[fd];
977 }
978
979 static size_t syscall_arg__scnprintf_fd(char *bf, size_t size,
980 struct syscall_arg *arg)
981 {
982 int fd = arg->val;
983 size_t printed = scnprintf(bf, size, "%d", fd);
984 const char *path = thread__fd_path(arg->thread, fd, arg->trace);
985
986 if (path)
987 printed += scnprintf(bf + printed, size - printed, "<%s>", path);
988
989 return printed;
990 }
991
992 static size_t syscall_arg__scnprintf_close_fd(char *bf, size_t size,
993 struct syscall_arg *arg)
994 {
995 int fd = arg->val;
996 size_t printed = syscall_arg__scnprintf_fd(bf, size, arg);
997 struct thread_trace *ttrace = thread__priv(arg->thread);
998
999 if (ttrace && fd >= 0 && fd <= ttrace->paths.max)
1000 zfree(&ttrace->paths.table[fd]);
1001
1002 return printed;
1003 }
1004
1005 static void thread__set_filename_pos(struct thread *thread, const char *bf,
1006 unsigned long ptr)
1007 {
1008 struct thread_trace *ttrace = thread__priv(thread);
1009
1010 ttrace->filename.ptr = ptr;
1011 ttrace->filename.entry_str_pos = bf - ttrace->entry_str;
1012 }
1013
1014 static size_t syscall_arg__scnprintf_filename(char *bf, size_t size,
1015 struct syscall_arg *arg)
1016 {
1017 unsigned long ptr = arg->val;
1018
1019 if (!arg->trace->vfs_getname)
1020 return scnprintf(bf, size, "%#x", ptr);
1021
1022 thread__set_filename_pos(arg->thread, bf, ptr);
1023 return 0;
1024 }
1025
1026 static bool trace__filter_duration(struct trace *trace, double t)
1027 {
1028 return t < (trace->duration_filter * NSEC_PER_MSEC);
1029 }
1030
1031 static size_t trace__fprintf_tstamp(struct trace *trace, u64 tstamp, FILE *fp)
1032 {
1033 double ts = (double)(tstamp - trace->base_time) / NSEC_PER_MSEC;
1034
1035 return fprintf(fp, "%10.3f ", ts);
1036 }
1037
1038 static bool done = false;
1039 static bool interrupted = false;
1040
1041 static void sig_handler(int sig)
1042 {
1043 done = true;
1044 interrupted = sig == SIGINT;
1045 }
1046
1047 static size_t trace__fprintf_entry_head(struct trace *trace, struct thread *thread,
1048 u64 duration, u64 tstamp, FILE *fp)
1049 {
1050 size_t printed = trace__fprintf_tstamp(trace, tstamp, fp);
1051 printed += fprintf_duration(duration, fp);
1052
1053 if (trace->multiple_threads) {
1054 if (trace->show_comm)
1055 printed += fprintf(fp, "%.14s/", thread__comm_str(thread));
1056 printed += fprintf(fp, "%d ", thread->tid);
1057 }
1058
1059 return printed;
1060 }
1061
1062 static int trace__process_event(struct trace *trace, struct machine *machine,
1063 union perf_event *event, struct perf_sample *sample)
1064 {
1065 int ret = 0;
1066
1067 switch (event->header.type) {
1068 case PERF_RECORD_LOST:
1069 color_fprintf(trace->output, PERF_COLOR_RED,
1070 "LOST %" PRIu64 " events!\n", event->lost.lost);
1071 ret = machine__process_lost_event(machine, event, sample);
1072 break;
1073 default:
1074 ret = machine__process_event(machine, event, sample);
1075 break;
1076 }
1077
1078 return ret;
1079 }
1080
1081 static int trace__tool_process(struct perf_tool *tool,
1082 union perf_event *event,
1083 struct perf_sample *sample,
1084 struct machine *machine)
1085 {
1086 struct trace *trace = container_of(tool, struct trace, tool);
1087 return trace__process_event(trace, machine, event, sample);
1088 }
1089
1090 static char *trace__machine__resolve_kernel_addr(void *vmachine, unsigned long long *addrp, char **modp)
1091 {
1092 struct machine *machine = vmachine;
1093
1094 if (machine->kptr_restrict_warned)
1095 return NULL;
1096
1097 if (symbol_conf.kptr_restrict) {
1098 pr_warning("Kernel address maps (/proc/{kallsyms,modules}) are restricted.\n\n"
1099 "Check /proc/sys/kernel/kptr_restrict.\n\n"
1100 "Kernel samples will not be resolved.\n");
1101 machine->kptr_restrict_warned = true;
1102 return NULL;
1103 }
1104
1105 return machine__resolve_kernel_addr(vmachine, addrp, modp);
1106 }
1107
1108 static int trace__symbols_init(struct trace *trace, struct perf_evlist *evlist)
1109 {
1110 int err = symbol__init(NULL);
1111
1112 if (err)
1113 return err;
1114
1115 trace->host = machine__new_host();
1116 if (trace->host == NULL)
1117 return -ENOMEM;
1118
1119 if (trace_event__register_resolver(trace->host, trace__machine__resolve_kernel_addr) < 0)
1120 return -errno;
1121
1122 err = __machine__synthesize_threads(trace->host, &trace->tool, &trace->opts.target,
1123 evlist->threads, trace__tool_process, false,
1124 trace->opts.proc_map_timeout);
1125 if (err)
1126 symbol__exit();
1127
1128 return err;
1129 }
1130
1131 static int syscall__set_arg_fmts(struct syscall *sc)
1132 {
1133 struct format_field *field;
1134 int idx = 0, len;
1135
1136 sc->arg_scnprintf = calloc(sc->nr_args, sizeof(void *));
1137 if (sc->arg_scnprintf == NULL)
1138 return -1;
1139
1140 if (sc->fmt)
1141 sc->arg_parm = sc->fmt->arg_parm;
1142
1143 for (field = sc->args; field; field = field->next) {
1144 if (sc->fmt && sc->fmt->arg_scnprintf[idx])
1145 sc->arg_scnprintf[idx] = sc->fmt->arg_scnprintf[idx];
1146 else if (strcmp(field->type, "const char *") == 0 &&
1147 (strcmp(field->name, "filename") == 0 ||
1148 strcmp(field->name, "path") == 0 ||
1149 strcmp(field->name, "pathname") == 0))
1150 sc->arg_scnprintf[idx] = SCA_FILENAME;
1151 else if (field->flags & FIELD_IS_POINTER)
1152 sc->arg_scnprintf[idx] = syscall_arg__scnprintf_hex;
1153 else if (strcmp(field->type, "pid_t") == 0)
1154 sc->arg_scnprintf[idx] = SCA_PID;
1155 else if (strcmp(field->type, "umode_t") == 0)
1156 sc->arg_scnprintf[idx] = SCA_MODE_T;
1157 else if ((strcmp(field->type, "int") == 0 ||
1158 strcmp(field->type, "unsigned int") == 0 ||
1159 strcmp(field->type, "long") == 0) &&
1160 (len = strlen(field->name)) >= 2 &&
1161 strcmp(field->name + len - 2, "fd") == 0) {
1162 /*
1163 * /sys/kernel/tracing/events/syscalls/sys_enter*
1164 * egrep 'field:.*fd;' .../format|sed -r 's/.*field:([a-z ]+) [a-z_]*fd.+/\1/g'|sort|uniq -c
1165 * 65 int
1166 * 23 unsigned int
1167 * 7 unsigned long
1168 */
1169 sc->arg_scnprintf[idx] = SCA_FD;
1170 }
1171 ++idx;
1172 }
1173
1174 return 0;
1175 }
1176
1177 static int trace__read_syscall_info(struct trace *trace, int id)
1178 {
1179 char tp_name[128];
1180 struct syscall *sc;
1181 const char *name = syscalltbl__name(trace->sctbl, id);
1182
1183 if (name == NULL)
1184 return -1;
1185
1186 if (id > trace->syscalls.max) {
1187 struct syscall *nsyscalls = realloc(trace->syscalls.table, (id + 1) * sizeof(*sc));
1188
1189 if (nsyscalls == NULL)
1190 return -1;
1191
1192 if (trace->syscalls.max != -1) {
1193 memset(nsyscalls + trace->syscalls.max + 1, 0,
1194 (id - trace->syscalls.max) * sizeof(*sc));
1195 } else {
1196 memset(nsyscalls, 0, (id + 1) * sizeof(*sc));
1197 }
1198
1199 trace->syscalls.table = nsyscalls;
1200 trace->syscalls.max = id;
1201 }
1202
1203 sc = trace->syscalls.table + id;
1204 sc->name = name;
1205
1206 sc->fmt = syscall_fmt__find(sc->name);
1207
1208 snprintf(tp_name, sizeof(tp_name), "sys_enter_%s", sc->name);
1209 sc->tp_format = trace_event__tp_format("syscalls", tp_name);
1210
1211 if (IS_ERR(sc->tp_format) && sc->fmt && sc->fmt->alias) {
1212 snprintf(tp_name, sizeof(tp_name), "sys_enter_%s", sc->fmt->alias);
1213 sc->tp_format = trace_event__tp_format("syscalls", tp_name);
1214 }
1215
1216 if (IS_ERR(sc->tp_format))
1217 return -1;
1218
1219 sc->args = sc->tp_format->format.fields;
1220 sc->nr_args = sc->tp_format->format.nr_fields;
1221 /*
1222 * We need to check and discard the first variable '__syscall_nr'
1223 * or 'nr' that mean the syscall number. It is needless here.
1224 * So drop '__syscall_nr' or 'nr' field but does not exist on older kernels.
1225 */
1226 if (sc->args && (!strcmp(sc->args->name, "__syscall_nr") || !strcmp(sc->args->name, "nr"))) {
1227 sc->args = sc->args->next;
1228 --sc->nr_args;
1229 }
1230
1231 sc->is_exit = !strcmp(name, "exit_group") || !strcmp(name, "exit");
1232
1233 return syscall__set_arg_fmts(sc);
1234 }
1235
1236 static int trace__validate_ev_qualifier(struct trace *trace)
1237 {
1238 int err = 0, i;
1239 struct str_node *pos;
1240
1241 trace->ev_qualifier_ids.nr = strlist__nr_entries(trace->ev_qualifier);
1242 trace->ev_qualifier_ids.entries = malloc(trace->ev_qualifier_ids.nr *
1243 sizeof(trace->ev_qualifier_ids.entries[0]));
1244
1245 if (trace->ev_qualifier_ids.entries == NULL) {
1246 fputs("Error:\tNot enough memory for allocating events qualifier ids\n",
1247 trace->output);
1248 err = -EINVAL;
1249 goto out;
1250 }
1251
1252 i = 0;
1253
1254 strlist__for_each_entry(pos, trace->ev_qualifier) {
1255 const char *sc = pos->s;
1256 int id = syscalltbl__id(trace->sctbl, sc);
1257
1258 if (id < 0) {
1259 if (err == 0) {
1260 fputs("Error:\tInvalid syscall ", trace->output);
1261 err = -EINVAL;
1262 } else {
1263 fputs(", ", trace->output);
1264 }
1265
1266 fputs(sc, trace->output);
1267 }
1268
1269 trace->ev_qualifier_ids.entries[i++] = id;
1270 }
1271
1272 if (err < 0) {
1273 fputs("\nHint:\ttry 'perf list syscalls:sys_enter_*'"
1274 "\nHint:\tand: 'man syscalls'\n", trace->output);
1275 zfree(&trace->ev_qualifier_ids.entries);
1276 trace->ev_qualifier_ids.nr = 0;
1277 }
1278 out:
1279 return err;
1280 }
1281
1282 /*
1283 * args is to be interpreted as a series of longs but we need to handle
1284 * 8-byte unaligned accesses. args points to raw_data within the event
1285 * and raw_data is guaranteed to be 8-byte unaligned because it is
1286 * preceded by raw_size which is a u32. So we need to copy args to a temp
1287 * variable to read it. Most notably this avoids extended load instructions
1288 * on unaligned addresses
1289 */
1290
1291 static size_t syscall__scnprintf_args(struct syscall *sc, char *bf, size_t size,
1292 unsigned char *args, struct trace *trace,
1293 struct thread *thread)
1294 {
1295 size_t printed = 0;
1296 unsigned char *p;
1297 unsigned long val;
1298
1299 if (sc->args != NULL) {
1300 struct format_field *field;
1301 u8 bit = 1;
1302 struct syscall_arg arg = {
1303 .idx = 0,
1304 .mask = 0,
1305 .trace = trace,
1306 .thread = thread,
1307 };
1308
1309 for (field = sc->args; field;
1310 field = field->next, ++arg.idx, bit <<= 1) {
1311 if (arg.mask & bit)
1312 continue;
1313
1314 /* special care for unaligned accesses */
1315 p = args + sizeof(unsigned long) * arg.idx;
1316 memcpy(&val, p, sizeof(val));
1317
1318 /*
1319 * Suppress this argument if its value is zero and
1320 * and we don't have a string associated in an
1321 * strarray for it.
1322 */
1323 if (val == 0 &&
1324 !(sc->arg_scnprintf &&
1325 sc->arg_scnprintf[arg.idx] == SCA_STRARRAY &&
1326 sc->arg_parm[arg.idx]))
1327 continue;
1328
1329 printed += scnprintf(bf + printed, size - printed,
1330 "%s%s: ", printed ? ", " : "", field->name);
1331 if (sc->arg_scnprintf && sc->arg_scnprintf[arg.idx]) {
1332 arg.val = val;
1333 if (sc->arg_parm)
1334 arg.parm = sc->arg_parm[arg.idx];
1335 printed += sc->arg_scnprintf[arg.idx](bf + printed,
1336 size - printed, &arg);
1337 } else {
1338 printed += scnprintf(bf + printed, size - printed,
1339 "%ld", val);
1340 }
1341 }
1342 } else if (IS_ERR(sc->tp_format)) {
1343 /*
1344 * If we managed to read the tracepoint /format file, then we
1345 * may end up not having any args, like with gettid(), so only
1346 * print the raw args when we didn't manage to read it.
1347 */
1348 int i = 0;
1349
1350 while (i < 6) {
1351 /* special care for unaligned accesses */
1352 p = args + sizeof(unsigned long) * i;
1353 memcpy(&val, p, sizeof(val));
1354 printed += scnprintf(bf + printed, size - printed,
1355 "%sarg%d: %ld",
1356 printed ? ", " : "", i, val);
1357 ++i;
1358 }
1359 }
1360
1361 return printed;
1362 }
1363
1364 typedef int (*tracepoint_handler)(struct trace *trace, struct perf_evsel *evsel,
1365 union perf_event *event,
1366 struct perf_sample *sample);
1367
1368 static struct syscall *trace__syscall_info(struct trace *trace,
1369 struct perf_evsel *evsel, int id)
1370 {
1371
1372 if (id < 0) {
1373
1374 /*
1375 * XXX: Noticed on x86_64, reproduced as far back as 3.0.36, haven't tried
1376 * before that, leaving at a higher verbosity level till that is
1377 * explained. Reproduced with plain ftrace with:
1378 *
1379 * echo 1 > /t/events/raw_syscalls/sys_exit/enable
1380 * grep "NR -1 " /t/trace_pipe
1381 *
1382 * After generating some load on the machine.
1383 */
1384 if (verbose > 1) {
1385 static u64 n;
1386 fprintf(trace->output, "Invalid syscall %d id, skipping (%s, %" PRIu64 ") ...\n",
1387 id, perf_evsel__name(evsel), ++n);
1388 }
1389 return NULL;
1390 }
1391
1392 if ((id > trace->syscalls.max || trace->syscalls.table[id].name == NULL) &&
1393 trace__read_syscall_info(trace, id))
1394 goto out_cant_read;
1395
1396 if ((id > trace->syscalls.max || trace->syscalls.table[id].name == NULL))
1397 goto out_cant_read;
1398
1399 return &trace->syscalls.table[id];
1400
1401 out_cant_read:
1402 if (verbose > 0) {
1403 fprintf(trace->output, "Problems reading syscall %d", id);
1404 if (id <= trace->syscalls.max && trace->syscalls.table[id].name != NULL)
1405 fprintf(trace->output, "(%s)", trace->syscalls.table[id].name);
1406 fputs(" information\n", trace->output);
1407 }
1408 return NULL;
1409 }
1410
1411 static void thread__update_stats(struct thread_trace *ttrace,
1412 int id, struct perf_sample *sample)
1413 {
1414 struct int_node *inode;
1415 struct stats *stats;
1416 u64 duration = 0;
1417
1418 inode = intlist__findnew(ttrace->syscall_stats, id);
1419 if (inode == NULL)
1420 return;
1421
1422 stats = inode->priv;
1423 if (stats == NULL) {
1424 stats = malloc(sizeof(struct stats));
1425 if (stats == NULL)
1426 return;
1427 init_stats(stats);
1428 inode->priv = stats;
1429 }
1430
1431 if (ttrace->entry_time && sample->time > ttrace->entry_time)
1432 duration = sample->time - ttrace->entry_time;
1433
1434 update_stats(stats, duration);
1435 }
1436
1437 static int trace__printf_interrupted_entry(struct trace *trace, struct perf_sample *sample)
1438 {
1439 struct thread_trace *ttrace;
1440 u64 duration;
1441 size_t printed;
1442
1443 if (trace->current == NULL)
1444 return 0;
1445
1446 ttrace = thread__priv(trace->current);
1447
1448 if (!ttrace->entry_pending)
1449 return 0;
1450
1451 duration = sample->time - ttrace->entry_time;
1452
1453 printed = trace__fprintf_entry_head(trace, trace->current, duration, ttrace->entry_time, trace->output);
1454 printed += fprintf(trace->output, "%-70s) ...\n", ttrace->entry_str);
1455 ttrace->entry_pending = false;
1456
1457 return printed;
1458 }
1459
1460 static int trace__sys_enter(struct trace *trace, struct perf_evsel *evsel,
1461 union perf_event *event __maybe_unused,
1462 struct perf_sample *sample)
1463 {
1464 char *msg;
1465 void *args;
1466 size_t printed = 0;
1467 struct thread *thread;
1468 int id = perf_evsel__sc_tp_uint(evsel, id, sample), err = -1;
1469 struct syscall *sc = trace__syscall_info(trace, evsel, id);
1470 struct thread_trace *ttrace;
1471
1472 if (sc == NULL)
1473 return -1;
1474
1475 thread = machine__findnew_thread(trace->host, sample->pid, sample->tid);
1476 ttrace = thread__trace(thread, trace->output);
1477 if (ttrace == NULL)
1478 goto out_put;
1479
1480 args = perf_evsel__sc_tp_ptr(evsel, args, sample);
1481
1482 if (ttrace->entry_str == NULL) {
1483 ttrace->entry_str = malloc(trace__entry_str_size);
1484 if (!ttrace->entry_str)
1485 goto out_put;
1486 }
1487
1488 if (!(trace->duration_filter || trace->summary_only || trace->min_stack))
1489 trace__printf_interrupted_entry(trace, sample);
1490
1491 ttrace->entry_time = sample->time;
1492 msg = ttrace->entry_str;
1493 printed += scnprintf(msg + printed, trace__entry_str_size - printed, "%s(", sc->name);
1494
1495 printed += syscall__scnprintf_args(sc, msg + printed, trace__entry_str_size - printed,
1496 args, trace, thread);
1497
1498 if (sc->is_exit) {
1499 if (!(trace->duration_filter || trace->summary_only || trace->min_stack)) {
1500 trace__fprintf_entry_head(trace, thread, 1, ttrace->entry_time, trace->output);
1501 fprintf(trace->output, "%-70s)\n", ttrace->entry_str);
1502 }
1503 } else {
1504 ttrace->entry_pending = true;
1505 /* See trace__vfs_getname & trace__sys_exit */
1506 ttrace->filename.pending_open = false;
1507 }
1508
1509 if (trace->current != thread) {
1510 thread__put(trace->current);
1511 trace->current = thread__get(thread);
1512 }
1513 err = 0;
1514 out_put:
1515 thread__put(thread);
1516 return err;
1517 }
1518
1519 static int trace__resolve_callchain(struct trace *trace, struct perf_evsel *evsel,
1520 struct perf_sample *sample,
1521 struct callchain_cursor *cursor)
1522 {
1523 struct addr_location al;
1524
1525 if (machine__resolve(trace->host, &al, sample) < 0 ||
1526 thread__resolve_callchain(al.thread, cursor, evsel, sample, NULL, NULL, trace->max_stack))
1527 return -1;
1528
1529 return 0;
1530 }
1531
1532 static int trace__fprintf_callchain(struct trace *trace, struct perf_sample *sample)
1533 {
1534 /* TODO: user-configurable print_opts */
1535 const unsigned int print_opts = EVSEL__PRINT_SYM |
1536 EVSEL__PRINT_DSO |
1537 EVSEL__PRINT_UNKNOWN_AS_ADDR;
1538
1539 return sample__fprintf_callchain(sample, 38, print_opts, &callchain_cursor, trace->output);
1540 }
1541
1542 static int trace__sys_exit(struct trace *trace, struct perf_evsel *evsel,
1543 union perf_event *event __maybe_unused,
1544 struct perf_sample *sample)
1545 {
1546 long ret;
1547 u64 duration = 0;
1548 struct thread *thread;
1549 int id = perf_evsel__sc_tp_uint(evsel, id, sample), err = -1, callchain_ret = 0;
1550 struct syscall *sc = trace__syscall_info(trace, evsel, id);
1551 struct thread_trace *ttrace;
1552
1553 if (sc == NULL)
1554 return -1;
1555
1556 thread = machine__findnew_thread(trace->host, sample->pid, sample->tid);
1557 ttrace = thread__trace(thread, trace->output);
1558 if (ttrace == NULL)
1559 goto out_put;
1560
1561 if (trace->summary)
1562 thread__update_stats(ttrace, id, sample);
1563
1564 ret = perf_evsel__sc_tp_uint(evsel, ret, sample);
1565
1566 if (id == trace->open_id && ret >= 0 && ttrace->filename.pending_open) {
1567 trace__set_fd_pathname(thread, ret, ttrace->filename.name);
1568 ttrace->filename.pending_open = false;
1569 ++trace->stats.vfs_getname;
1570 }
1571
1572 if (ttrace->entry_time) {
1573 duration = sample->time - ttrace->entry_time;
1574 if (trace__filter_duration(trace, duration))
1575 goto out;
1576 } else if (trace->duration_filter)
1577 goto out;
1578
1579 if (sample->callchain) {
1580 callchain_ret = trace__resolve_callchain(trace, evsel, sample, &callchain_cursor);
1581 if (callchain_ret == 0) {
1582 if (callchain_cursor.nr < trace->min_stack)
1583 goto out;
1584 callchain_ret = 1;
1585 }
1586 }
1587
1588 if (trace->summary_only)
1589 goto out;
1590
1591 trace__fprintf_entry_head(trace, thread, duration, ttrace->entry_time, trace->output);
1592
1593 if (ttrace->entry_pending) {
1594 fprintf(trace->output, "%-70s", ttrace->entry_str);
1595 } else {
1596 fprintf(trace->output, " ... [");
1597 color_fprintf(trace->output, PERF_COLOR_YELLOW, "continued");
1598 fprintf(trace->output, "]: %s()", sc->name);
1599 }
1600
1601 if (sc->fmt == NULL) {
1602 signed_print:
1603 fprintf(trace->output, ") = %ld", ret);
1604 } else if (ret < 0 && (sc->fmt->errmsg || sc->fmt->errpid)) {
1605 char bf[STRERR_BUFSIZE];
1606 const char *emsg = str_error_r(-ret, bf, sizeof(bf)),
1607 *e = audit_errno_to_name(-ret);
1608
1609 fprintf(trace->output, ") = -1 %s %s", e, emsg);
1610 } else if (ret == 0 && sc->fmt->timeout)
1611 fprintf(trace->output, ") = 0 Timeout");
1612 else if (sc->fmt->hexret)
1613 fprintf(trace->output, ") = %#lx", ret);
1614 else if (sc->fmt->errpid) {
1615 struct thread *child = machine__find_thread(trace->host, ret, ret);
1616
1617 if (child != NULL) {
1618 fprintf(trace->output, ") = %ld", ret);
1619 if (child->comm_set)
1620 fprintf(trace->output, " (%s)", thread__comm_str(child));
1621 thread__put(child);
1622 }
1623 } else
1624 goto signed_print;
1625
1626 fputc('\n', trace->output);
1627
1628 if (callchain_ret > 0)
1629 trace__fprintf_callchain(trace, sample);
1630 else if (callchain_ret < 0)
1631 pr_err("Problem processing %s callchain, skipping...\n", perf_evsel__name(evsel));
1632 out:
1633 ttrace->entry_pending = false;
1634 err = 0;
1635 out_put:
1636 thread__put(thread);
1637 return err;
1638 }
1639
1640 static int trace__vfs_getname(struct trace *trace, struct perf_evsel *evsel,
1641 union perf_event *event __maybe_unused,
1642 struct perf_sample *sample)
1643 {
1644 struct thread *thread = machine__findnew_thread(trace->host, sample->pid, sample->tid);
1645 struct thread_trace *ttrace;
1646 size_t filename_len, entry_str_len, to_move;
1647 ssize_t remaining_space;
1648 char *pos;
1649 const char *filename = perf_evsel__rawptr(evsel, sample, "pathname");
1650
1651 if (!thread)
1652 goto out;
1653
1654 ttrace = thread__priv(thread);
1655 if (!ttrace)
1656 goto out_put;
1657
1658 filename_len = strlen(filename);
1659 if (filename_len == 0)
1660 goto out_put;
1661
1662 if (ttrace->filename.namelen < filename_len) {
1663 char *f = realloc(ttrace->filename.name, filename_len + 1);
1664
1665 if (f == NULL)
1666 goto out_put;
1667
1668 ttrace->filename.namelen = filename_len;
1669 ttrace->filename.name = f;
1670 }
1671
1672 strcpy(ttrace->filename.name, filename);
1673 ttrace->filename.pending_open = true;
1674
1675 if (!ttrace->filename.ptr)
1676 goto out_put;
1677
1678 entry_str_len = strlen(ttrace->entry_str);
1679 remaining_space = trace__entry_str_size - entry_str_len - 1; /* \0 */
1680 if (remaining_space <= 0)
1681 goto out_put;
1682
1683 if (filename_len > (size_t)remaining_space) {
1684 filename += filename_len - remaining_space;
1685 filename_len = remaining_space;
1686 }
1687
1688 to_move = entry_str_len - ttrace->filename.entry_str_pos + 1; /* \0 */
1689 pos = ttrace->entry_str + ttrace->filename.entry_str_pos;
1690 memmove(pos + filename_len, pos, to_move);
1691 memcpy(pos, filename, filename_len);
1692
1693 ttrace->filename.ptr = 0;
1694 ttrace->filename.entry_str_pos = 0;
1695 out_put:
1696 thread__put(thread);
1697 out:
1698 return 0;
1699 }
1700
1701 static int trace__sched_stat_runtime(struct trace *trace, struct perf_evsel *evsel,
1702 union perf_event *event __maybe_unused,
1703 struct perf_sample *sample)
1704 {
1705 u64 runtime = perf_evsel__intval(evsel, sample, "runtime");
1706 double runtime_ms = (double)runtime / NSEC_PER_MSEC;
1707 struct thread *thread = machine__findnew_thread(trace->host,
1708 sample->pid,
1709 sample->tid);
1710 struct thread_trace *ttrace = thread__trace(thread, trace->output);
1711
1712 if (ttrace == NULL)
1713 goto out_dump;
1714
1715 ttrace->runtime_ms += runtime_ms;
1716 trace->runtime_ms += runtime_ms;
1717 out_put:
1718 thread__put(thread);
1719 return 0;
1720
1721 out_dump:
1722 fprintf(trace->output, "%s: comm=%s,pid=%u,runtime=%" PRIu64 ",vruntime=%" PRIu64 ")\n",
1723 evsel->name,
1724 perf_evsel__strval(evsel, sample, "comm"),
1725 (pid_t)perf_evsel__intval(evsel, sample, "pid"),
1726 runtime,
1727 perf_evsel__intval(evsel, sample, "vruntime"));
1728 goto out_put;
1729 }
1730
1731 static void bpf_output__printer(enum binary_printer_ops op,
1732 unsigned int val, void *extra)
1733 {
1734 FILE *output = extra;
1735 unsigned char ch = (unsigned char)val;
1736
1737 switch (op) {
1738 case BINARY_PRINT_CHAR_DATA:
1739 fprintf(output, "%c", isprint(ch) ? ch : '.');
1740 break;
1741 case BINARY_PRINT_DATA_BEGIN:
1742 case BINARY_PRINT_LINE_BEGIN:
1743 case BINARY_PRINT_ADDR:
1744 case BINARY_PRINT_NUM_DATA:
1745 case BINARY_PRINT_NUM_PAD:
1746 case BINARY_PRINT_SEP:
1747 case BINARY_PRINT_CHAR_PAD:
1748 case BINARY_PRINT_LINE_END:
1749 case BINARY_PRINT_DATA_END:
1750 default:
1751 break;
1752 }
1753 }
1754
1755 static void bpf_output__fprintf(struct trace *trace,
1756 struct perf_sample *sample)
1757 {
1758 print_binary(sample->raw_data, sample->raw_size, 8,
1759 bpf_output__printer, trace->output);
1760 }
1761
1762 static int trace__event_handler(struct trace *trace, struct perf_evsel *evsel,
1763 union perf_event *event __maybe_unused,
1764 struct perf_sample *sample)
1765 {
1766 int callchain_ret = 0;
1767
1768 if (sample->callchain) {
1769 callchain_ret = trace__resolve_callchain(trace, evsel, sample, &callchain_cursor);
1770 if (callchain_ret == 0) {
1771 if (callchain_cursor.nr < trace->min_stack)
1772 goto out;
1773 callchain_ret = 1;
1774 }
1775 }
1776
1777 trace__printf_interrupted_entry(trace, sample);
1778 trace__fprintf_tstamp(trace, sample->time, trace->output);
1779
1780 if (trace->trace_syscalls)
1781 fprintf(trace->output, "( ): ");
1782
1783 fprintf(trace->output, "%s:", evsel->name);
1784
1785 if (perf_evsel__is_bpf_output(evsel)) {
1786 bpf_output__fprintf(trace, sample);
1787 } else if (evsel->tp_format) {
1788 event_format__fprintf(evsel->tp_format, sample->cpu,
1789 sample->raw_data, sample->raw_size,
1790 trace->output);
1791 }
1792
1793 fprintf(trace->output, ")\n");
1794
1795 if (callchain_ret > 0)
1796 trace__fprintf_callchain(trace, sample);
1797 else if (callchain_ret < 0)
1798 pr_err("Problem processing %s callchain, skipping...\n", perf_evsel__name(evsel));
1799 out:
1800 return 0;
1801 }
1802
1803 static void print_location(FILE *f, struct perf_sample *sample,
1804 struct addr_location *al,
1805 bool print_dso, bool print_sym)
1806 {
1807
1808 if ((verbose > 0 || print_dso) && al->map)
1809 fprintf(f, "%s@", al->map->dso->long_name);
1810
1811 if ((verbose > 0 || print_sym) && al->sym)
1812 fprintf(f, "%s+0x%" PRIx64, al->sym->name,
1813 al->addr - al->sym->start);
1814 else if (al->map)
1815 fprintf(f, "0x%" PRIx64, al->addr);
1816 else
1817 fprintf(f, "0x%" PRIx64, sample->addr);
1818 }
1819
1820 static int trace__pgfault(struct trace *trace,
1821 struct perf_evsel *evsel,
1822 union perf_event *event __maybe_unused,
1823 struct perf_sample *sample)
1824 {
1825 struct thread *thread;
1826 struct addr_location al;
1827 char map_type = 'd';
1828 struct thread_trace *ttrace;
1829 int err = -1;
1830 int callchain_ret = 0;
1831
1832 thread = machine__findnew_thread(trace->host, sample->pid, sample->tid);
1833
1834 if (sample->callchain) {
1835 callchain_ret = trace__resolve_callchain(trace, evsel, sample, &callchain_cursor);
1836 if (callchain_ret == 0) {
1837 if (callchain_cursor.nr < trace->min_stack)
1838 goto out_put;
1839 callchain_ret = 1;
1840 }
1841 }
1842
1843 ttrace = thread__trace(thread, trace->output);
1844 if (ttrace == NULL)
1845 goto out_put;
1846
1847 if (evsel->attr.config == PERF_COUNT_SW_PAGE_FAULTS_MAJ)
1848 ttrace->pfmaj++;
1849 else
1850 ttrace->pfmin++;
1851
1852 if (trace->summary_only)
1853 goto out;
1854
1855 thread__find_addr_location(thread, sample->cpumode, MAP__FUNCTION,
1856 sample->ip, &al);
1857
1858 trace__fprintf_entry_head(trace, thread, 0, sample->time, trace->output);
1859
1860 fprintf(trace->output, "%sfault [",
1861 evsel->attr.config == PERF_COUNT_SW_PAGE_FAULTS_MAJ ?
1862 "maj" : "min");
1863
1864 print_location(trace->output, sample, &al, false, true);
1865
1866 fprintf(trace->output, "] => ");
1867
1868 thread__find_addr_location(thread, sample->cpumode, MAP__VARIABLE,
1869 sample->addr, &al);
1870
1871 if (!al.map) {
1872 thread__find_addr_location(thread, sample->cpumode,
1873 MAP__FUNCTION, sample->addr, &al);
1874
1875 if (al.map)
1876 map_type = 'x';
1877 else
1878 map_type = '?';
1879 }
1880
1881 print_location(trace->output, sample, &al, true, false);
1882
1883 fprintf(trace->output, " (%c%c)\n", map_type, al.level);
1884
1885 if (callchain_ret > 0)
1886 trace__fprintf_callchain(trace, sample);
1887 else if (callchain_ret < 0)
1888 pr_err("Problem processing %s callchain, skipping...\n", perf_evsel__name(evsel));
1889 out:
1890 err = 0;
1891 out_put:
1892 thread__put(thread);
1893 return err;
1894 }
1895
1896 static void trace__set_base_time(struct trace *trace,
1897 struct perf_evsel *evsel,
1898 struct perf_sample *sample)
1899 {
1900 /*
1901 * BPF events were not setting PERF_SAMPLE_TIME, so be more robust
1902 * and don't use sample->time unconditionally, we may end up having
1903 * some other event in the future without PERF_SAMPLE_TIME for good
1904 * reason, i.e. we may not be interested in its timestamps, just in
1905 * it taking place, picking some piece of information when it
1906 * appears in our event stream (vfs_getname comes to mind).
1907 */
1908 if (trace->base_time == 0 && !trace->full_time &&
1909 (evsel->attr.sample_type & PERF_SAMPLE_TIME))
1910 trace->base_time = sample->time;
1911 }
1912
1913 static int trace__process_sample(struct perf_tool *tool,
1914 union perf_event *event,
1915 struct perf_sample *sample,
1916 struct perf_evsel *evsel,
1917 struct machine *machine __maybe_unused)
1918 {
1919 struct trace *trace = container_of(tool, struct trace, tool);
1920 struct thread *thread;
1921 int err = 0;
1922
1923 tracepoint_handler handler = evsel->handler;
1924
1925 thread = machine__findnew_thread(trace->host, sample->pid, sample->tid);
1926 if (thread && thread__is_filtered(thread))
1927 goto out;
1928
1929 trace__set_base_time(trace, evsel, sample);
1930
1931 if (handler) {
1932 ++trace->nr_events;
1933 handler(trace, evsel, event, sample);
1934 }
1935 out:
1936 thread__put(thread);
1937 return err;
1938 }
1939
1940 static int trace__record(struct trace *trace, int argc, const char **argv)
1941 {
1942 unsigned int rec_argc, i, j;
1943 const char **rec_argv;
1944 const char * const record_args[] = {
1945 "record",
1946 "-R",
1947 "-m", "1024",
1948 "-c", "1",
1949 };
1950
1951 const char * const sc_args[] = { "-e", };
1952 unsigned int sc_args_nr = ARRAY_SIZE(sc_args);
1953 const char * const majpf_args[] = { "-e", "major-faults" };
1954 unsigned int majpf_args_nr = ARRAY_SIZE(majpf_args);
1955 const char * const minpf_args[] = { "-e", "minor-faults" };
1956 unsigned int minpf_args_nr = ARRAY_SIZE(minpf_args);
1957
1958 /* +1 is for the event string below */
1959 rec_argc = ARRAY_SIZE(record_args) + sc_args_nr + 1 +
1960 majpf_args_nr + minpf_args_nr + argc;
1961 rec_argv = calloc(rec_argc + 1, sizeof(char *));
1962
1963 if (rec_argv == NULL)
1964 return -ENOMEM;
1965
1966 j = 0;
1967 for (i = 0; i < ARRAY_SIZE(record_args); i++)
1968 rec_argv[j++] = record_args[i];
1969
1970 if (trace->trace_syscalls) {
1971 for (i = 0; i < sc_args_nr; i++)
1972 rec_argv[j++] = sc_args[i];
1973
1974 /* event string may be different for older kernels - e.g., RHEL6 */
1975 if (is_valid_tracepoint("raw_syscalls:sys_enter"))
1976 rec_argv[j++] = "raw_syscalls:sys_enter,raw_syscalls:sys_exit";
1977 else if (is_valid_tracepoint("syscalls:sys_enter"))
1978 rec_argv[j++] = "syscalls:sys_enter,syscalls:sys_exit";
1979 else {
1980 pr_err("Neither raw_syscalls nor syscalls events exist.\n");
1981 return -1;
1982 }
1983 }
1984
1985 if (trace->trace_pgfaults & TRACE_PFMAJ)
1986 for (i = 0; i < majpf_args_nr; i++)
1987 rec_argv[j++] = majpf_args[i];
1988
1989 if (trace->trace_pgfaults & TRACE_PFMIN)
1990 for (i = 0; i < minpf_args_nr; i++)
1991 rec_argv[j++] = minpf_args[i];
1992
1993 for (i = 0; i < (unsigned int)argc; i++)
1994 rec_argv[j++] = argv[i];
1995
1996 return cmd_record(j, rec_argv, NULL);
1997 }
1998
1999 static size_t trace__fprintf_thread_summary(struct trace *trace, FILE *fp);
2000
2001 static bool perf_evlist__add_vfs_getname(struct perf_evlist *evlist)
2002 {
2003 struct perf_evsel *evsel = perf_evsel__newtp("probe", "vfs_getname");
2004
2005 if (IS_ERR(evsel))
2006 return false;
2007
2008 if (perf_evsel__field(evsel, "pathname") == NULL) {
2009 perf_evsel__delete(evsel);
2010 return false;
2011 }
2012
2013 evsel->handler = trace__vfs_getname;
2014 perf_evlist__add(evlist, evsel);
2015 return true;
2016 }
2017
2018 static struct perf_evsel *perf_evsel__new_pgfault(u64 config)
2019 {
2020 struct perf_evsel *evsel;
2021 struct perf_event_attr attr = {
2022 .type = PERF_TYPE_SOFTWARE,
2023 .mmap_data = 1,
2024 };
2025
2026 attr.config = config;
2027 attr.sample_period = 1;
2028
2029 event_attr_init(&attr);
2030
2031 evsel = perf_evsel__new(&attr);
2032 if (evsel)
2033 evsel->handler = trace__pgfault;
2034
2035 return evsel;
2036 }
2037
2038 static void trace__handle_event(struct trace *trace, union perf_event *event, struct perf_sample *sample)
2039 {
2040 const u32 type = event->header.type;
2041 struct perf_evsel *evsel;
2042
2043 if (type != PERF_RECORD_SAMPLE) {
2044 trace__process_event(trace, trace->host, event, sample);
2045 return;
2046 }
2047
2048 evsel = perf_evlist__id2evsel(trace->evlist, sample->id);
2049 if (evsel == NULL) {
2050 fprintf(trace->output, "Unknown tp ID %" PRIu64 ", skipping...\n", sample->id);
2051 return;
2052 }
2053
2054 trace__set_base_time(trace, evsel, sample);
2055
2056 if (evsel->attr.type == PERF_TYPE_TRACEPOINT &&
2057 sample->raw_data == NULL) {
2058 fprintf(trace->output, "%s sample with no payload for tid: %d, cpu %d, raw_size=%d, skipping...\n",
2059 perf_evsel__name(evsel), sample->tid,
2060 sample->cpu, sample->raw_size);
2061 } else {
2062 tracepoint_handler handler = evsel->handler;
2063 handler(trace, evsel, event, sample);
2064 }
2065 }
2066
2067 static int trace__add_syscall_newtp(struct trace *trace)
2068 {
2069 int ret = -1;
2070 struct perf_evlist *evlist = trace->evlist;
2071 struct perf_evsel *sys_enter, *sys_exit;
2072
2073 sys_enter = perf_evsel__syscall_newtp("sys_enter", trace__sys_enter);
2074 if (sys_enter == NULL)
2075 goto out;
2076
2077 if (perf_evsel__init_sc_tp_ptr_field(sys_enter, args))
2078 goto out_delete_sys_enter;
2079
2080 sys_exit = perf_evsel__syscall_newtp("sys_exit", trace__sys_exit);
2081 if (sys_exit == NULL)
2082 goto out_delete_sys_enter;
2083
2084 if (perf_evsel__init_sc_tp_uint_field(sys_exit, ret))
2085 goto out_delete_sys_exit;
2086
2087 perf_evlist__add(evlist, sys_enter);
2088 perf_evlist__add(evlist, sys_exit);
2089
2090 if (callchain_param.enabled && !trace->kernel_syscallchains) {
2091 /*
2092 * We're interested only in the user space callchain
2093 * leading to the syscall, allow overriding that for
2094 * debugging reasons using --kernel_syscall_callchains
2095 */
2096 sys_exit->attr.exclude_callchain_kernel = 1;
2097 }
2098
2099 trace->syscalls.events.sys_enter = sys_enter;
2100 trace->syscalls.events.sys_exit = sys_exit;
2101
2102 ret = 0;
2103 out:
2104 return ret;
2105
2106 out_delete_sys_exit:
2107 perf_evsel__delete_priv(sys_exit);
2108 out_delete_sys_enter:
2109 perf_evsel__delete_priv(sys_enter);
2110 goto out;
2111 }
2112
2113 static int trace__set_ev_qualifier_filter(struct trace *trace)
2114 {
2115 int err = -1;
2116 struct perf_evsel *sys_exit;
2117 char *filter = asprintf_expr_inout_ints("id", !trace->not_ev_qualifier,
2118 trace->ev_qualifier_ids.nr,
2119 trace->ev_qualifier_ids.entries);
2120
2121 if (filter == NULL)
2122 goto out_enomem;
2123
2124 if (!perf_evsel__append_tp_filter(trace->syscalls.events.sys_enter,
2125 filter)) {
2126 sys_exit = trace->syscalls.events.sys_exit;
2127 err = perf_evsel__append_tp_filter(sys_exit, filter);
2128 }
2129
2130 free(filter);
2131 out:
2132 return err;
2133 out_enomem:
2134 errno = ENOMEM;
2135 goto out;
2136 }
2137
2138 static int trace__run(struct trace *trace, int argc, const char **argv)
2139 {
2140 struct perf_evlist *evlist = trace->evlist;
2141 struct perf_evsel *evsel, *pgfault_maj = NULL, *pgfault_min = NULL;
2142 int err = -1, i;
2143 unsigned long before;
2144 const bool forks = argc > 0;
2145 bool draining = false;
2146
2147 trace->live = true;
2148
2149 if (trace->trace_syscalls && trace__add_syscall_newtp(trace))
2150 goto out_error_raw_syscalls;
2151
2152 if (trace->trace_syscalls)
2153 trace->vfs_getname = perf_evlist__add_vfs_getname(evlist);
2154
2155 if ((trace->trace_pgfaults & TRACE_PFMAJ)) {
2156 pgfault_maj = perf_evsel__new_pgfault(PERF_COUNT_SW_PAGE_FAULTS_MAJ);
2157 if (pgfault_maj == NULL)
2158 goto out_error_mem;
2159 perf_evlist__add(evlist, pgfault_maj);
2160 }
2161
2162 if ((trace->trace_pgfaults & TRACE_PFMIN)) {
2163 pgfault_min = perf_evsel__new_pgfault(PERF_COUNT_SW_PAGE_FAULTS_MIN);
2164 if (pgfault_min == NULL)
2165 goto out_error_mem;
2166 perf_evlist__add(evlist, pgfault_min);
2167 }
2168
2169 if (trace->sched &&
2170 perf_evlist__add_newtp(evlist, "sched", "sched_stat_runtime",
2171 trace__sched_stat_runtime))
2172 goto out_error_sched_stat_runtime;
2173
2174 err = perf_evlist__create_maps(evlist, &trace->opts.target);
2175 if (err < 0) {
2176 fprintf(trace->output, "Problems parsing the target to trace, check your options!\n");
2177 goto out_delete_evlist;
2178 }
2179
2180 err = trace__symbols_init(trace, evlist);
2181 if (err < 0) {
2182 fprintf(trace->output, "Problems initializing symbol libraries!\n");
2183 goto out_delete_evlist;
2184 }
2185
2186 perf_evlist__config(evlist, &trace->opts, NULL);
2187
2188 if (callchain_param.enabled) {
2189 bool use_identifier = false;
2190
2191 if (trace->syscalls.events.sys_exit) {
2192 perf_evsel__config_callchain(trace->syscalls.events.sys_exit,
2193 &trace->opts, &callchain_param);
2194 use_identifier = true;
2195 }
2196
2197 if (pgfault_maj) {
2198 perf_evsel__config_callchain(pgfault_maj, &trace->opts, &callchain_param);
2199 use_identifier = true;
2200 }
2201
2202 if (pgfault_min) {
2203 perf_evsel__config_callchain(pgfault_min, &trace->opts, &callchain_param);
2204 use_identifier = true;
2205 }
2206
2207 if (use_identifier) {
2208 /*
2209 * Now we have evsels with different sample_ids, use
2210 * PERF_SAMPLE_IDENTIFIER to map from sample to evsel
2211 * from a fixed position in each ring buffer record.
2212 *
2213 * As of this the changeset introducing this comment, this
2214 * isn't strictly needed, as the fields that can come before
2215 * PERF_SAMPLE_ID are all used, but we'll probably disable
2216 * some of those for things like copying the payload of
2217 * pointer syscall arguments, and for vfs_getname we don't
2218 * need PERF_SAMPLE_ADDR and PERF_SAMPLE_IP, so do this
2219 * here as a warning we need to use PERF_SAMPLE_IDENTIFIER.
2220 */
2221 perf_evlist__set_sample_bit(evlist, IDENTIFIER);
2222 perf_evlist__reset_sample_bit(evlist, ID);
2223 }
2224 }
2225
2226 signal(SIGCHLD, sig_handler);
2227 signal(SIGINT, sig_handler);
2228
2229 if (forks) {
2230 err = perf_evlist__prepare_workload(evlist, &trace->opts.target,
2231 argv, false, NULL);
2232 if (err < 0) {
2233 fprintf(trace->output, "Couldn't run the workload!\n");
2234 goto out_delete_evlist;
2235 }
2236 }
2237
2238 err = perf_evlist__open(evlist);
2239 if (err < 0)
2240 goto out_error_open;
2241
2242 err = bpf__apply_obj_config();
2243 if (err) {
2244 char errbuf[BUFSIZ];
2245
2246 bpf__strerror_apply_obj_config(err, errbuf, sizeof(errbuf));
2247 pr_err("ERROR: Apply config to BPF failed: %s\n",
2248 errbuf);
2249 goto out_error_open;
2250 }
2251
2252 /*
2253 * Better not use !target__has_task() here because we need to cover the
2254 * case where no threads were specified in the command line, but a
2255 * workload was, and in that case we will fill in the thread_map when
2256 * we fork the workload in perf_evlist__prepare_workload.
2257 */
2258 if (trace->filter_pids.nr > 0)
2259 err = perf_evlist__set_filter_pids(evlist, trace->filter_pids.nr, trace->filter_pids.entries);
2260 else if (thread_map__pid(evlist->threads, 0) == -1)
2261 err = perf_evlist__set_filter_pid(evlist, getpid());
2262
2263 if (err < 0)
2264 goto out_error_mem;
2265
2266 if (trace->ev_qualifier_ids.nr > 0) {
2267 err = trace__set_ev_qualifier_filter(trace);
2268 if (err < 0)
2269 goto out_errno;
2270
2271 pr_debug("event qualifier tracepoint filter: %s\n",
2272 trace->syscalls.events.sys_exit->filter);
2273 }
2274
2275 err = perf_evlist__apply_filters(evlist, &evsel);
2276 if (err < 0)
2277 goto out_error_apply_filters;
2278
2279 err = perf_evlist__mmap(evlist, trace->opts.mmap_pages, false);
2280 if (err < 0)
2281 goto out_error_mmap;
2282
2283 if (!target__none(&trace->opts.target) && !trace->opts.initial_delay)
2284 perf_evlist__enable(evlist);
2285
2286 if (forks)
2287 perf_evlist__start_workload(evlist);
2288
2289 if (trace->opts.initial_delay) {
2290 usleep(trace->opts.initial_delay * 1000);
2291 perf_evlist__enable(evlist);
2292 }
2293
2294 trace->multiple_threads = thread_map__pid(evlist->threads, 0) == -1 ||
2295 evlist->threads->nr > 1 ||
2296 perf_evlist__first(evlist)->attr.inherit;
2297 again:
2298 before = trace->nr_events;
2299
2300 for (i = 0; i < evlist->nr_mmaps; i++) {
2301 union perf_event *event;
2302
2303 while ((event = perf_evlist__mmap_read(evlist, i)) != NULL) {
2304 struct perf_sample sample;
2305
2306 ++trace->nr_events;
2307
2308 err = perf_evlist__parse_sample(evlist, event, &sample);
2309 if (err) {
2310 fprintf(trace->output, "Can't parse sample, err = %d, skipping...\n", err);
2311 goto next_event;
2312 }
2313
2314 trace__handle_event(trace, event, &sample);
2315 next_event:
2316 perf_evlist__mmap_consume(evlist, i);
2317
2318 if (interrupted)
2319 goto out_disable;
2320
2321 if (done && !draining) {
2322 perf_evlist__disable(evlist);
2323 draining = true;
2324 }
2325 }
2326 }
2327
2328 if (trace->nr_events == before) {
2329 int timeout = done ? 100 : -1;
2330
2331 if (!draining && perf_evlist__poll(evlist, timeout) > 0) {
2332 if (perf_evlist__filter_pollfd(evlist, POLLERR | POLLHUP) == 0)
2333 draining = true;
2334
2335 goto again;
2336 }
2337 } else {
2338 goto again;
2339 }
2340
2341 out_disable:
2342 thread__zput(trace->current);
2343
2344 perf_evlist__disable(evlist);
2345
2346 if (!err) {
2347 if (trace->summary)
2348 trace__fprintf_thread_summary(trace, trace->output);
2349
2350 if (trace->show_tool_stats) {
2351 fprintf(trace->output, "Stats:\n "
2352 " vfs_getname : %" PRIu64 "\n"
2353 " proc_getname: %" PRIu64 "\n",
2354 trace->stats.vfs_getname,
2355 trace->stats.proc_getname);
2356 }
2357 }
2358
2359 out_delete_evlist:
2360 perf_evlist__delete(evlist);
2361 trace->evlist = NULL;
2362 trace->live = false;
2363 return err;
2364 {
2365 char errbuf[BUFSIZ];
2366
2367 out_error_sched_stat_runtime:
2368 tracing_path__strerror_open_tp(errno, errbuf, sizeof(errbuf), "sched", "sched_stat_runtime");
2369 goto out_error;
2370
2371 out_error_raw_syscalls:
2372 tracing_path__strerror_open_tp(errno, errbuf, sizeof(errbuf), "raw_syscalls", "sys_(enter|exit)");
2373 goto out_error;
2374
2375 out_error_mmap:
2376 perf_evlist__strerror_mmap(evlist, errno, errbuf, sizeof(errbuf));
2377 goto out_error;
2378
2379 out_error_open:
2380 perf_evlist__strerror_open(evlist, errno, errbuf, sizeof(errbuf));
2381
2382 out_error:
2383 fprintf(trace->output, "%s\n", errbuf);
2384 goto out_delete_evlist;
2385
2386 out_error_apply_filters:
2387 fprintf(trace->output,
2388 "Failed to set filter \"%s\" on event %s with %d (%s)\n",
2389 evsel->filter, perf_evsel__name(evsel), errno,
2390 str_error_r(errno, errbuf, sizeof(errbuf)));
2391 goto out_delete_evlist;
2392 }
2393 out_error_mem:
2394 fprintf(trace->output, "Not enough memory to run!\n");
2395 goto out_delete_evlist;
2396
2397 out_errno:
2398 fprintf(trace->output, "errno=%d,%s\n", errno, strerror(errno));
2399 goto out_delete_evlist;
2400 }
2401
2402 static int trace__replay(struct trace *trace)
2403 {
2404 const struct perf_evsel_str_handler handlers[] = {
2405 { "probe:vfs_getname", trace__vfs_getname, },
2406 };
2407 struct perf_data_file file = {
2408 .path = input_name,
2409 .mode = PERF_DATA_MODE_READ,
2410 .force = trace->force,
2411 };
2412 struct perf_session *session;
2413 struct perf_evsel *evsel;
2414 int err = -1;
2415
2416 trace->tool.sample = trace__process_sample;
2417 trace->tool.mmap = perf_event__process_mmap;
2418 trace->tool.mmap2 = perf_event__process_mmap2;
2419 trace->tool.comm = perf_event__process_comm;
2420 trace->tool.exit = perf_event__process_exit;
2421 trace->tool.fork = perf_event__process_fork;
2422 trace->tool.attr = perf_event__process_attr;
2423 trace->tool.tracing_data = perf_event__process_tracing_data;
2424 trace->tool.build_id = perf_event__process_build_id;
2425 trace->tool.namespaces = perf_event__process_namespaces;
2426
2427 trace->tool.ordered_events = true;
2428 trace->tool.ordering_requires_timestamps = true;
2429
2430 /* add tid to output */
2431 trace->multiple_threads = true;
2432
2433 session = perf_session__new(&file, false, &trace->tool);
2434 if (session == NULL)
2435 return -1;
2436
2437 if (trace->opts.target.pid)
2438 symbol_conf.pid_list_str = strdup(trace->opts.target.pid);
2439
2440 if (trace->opts.target.tid)
2441 symbol_conf.tid_list_str = strdup(trace->opts.target.tid);
2442
2443 if (symbol__init(&session->header.env) < 0)
2444 goto out;
2445
2446 trace->host = &session->machines.host;
2447
2448 err = perf_session__set_tracepoints_handlers(session, handlers);
2449 if (err)
2450 goto out;
2451
2452 evsel = perf_evlist__find_tracepoint_by_name(session->evlist,
2453 "raw_syscalls:sys_enter");
2454 /* older kernels have syscalls tp versus raw_syscalls */
2455 if (evsel == NULL)
2456 evsel = perf_evlist__find_tracepoint_by_name(session->evlist,
2457 "syscalls:sys_enter");
2458
2459 if (evsel &&
2460 (perf_evsel__init_syscall_tp(evsel, trace__sys_enter) < 0 ||
2461 perf_evsel__init_sc_tp_ptr_field(evsel, args))) {
2462 pr_err("Error during initialize raw_syscalls:sys_enter event\n");
2463 goto out;
2464 }
2465
2466 evsel = perf_evlist__find_tracepoint_by_name(session->evlist,
2467 "raw_syscalls:sys_exit");
2468 if (evsel == NULL)
2469 evsel = perf_evlist__find_tracepoint_by_name(session->evlist,
2470 "syscalls:sys_exit");
2471 if (evsel &&
2472 (perf_evsel__init_syscall_tp(evsel, trace__sys_exit) < 0 ||
2473 perf_evsel__init_sc_tp_uint_field(evsel, ret))) {
2474 pr_err("Error during initialize raw_syscalls:sys_exit event\n");
2475 goto out;
2476 }
2477
2478 evlist__for_each_entry(session->evlist, evsel) {
2479 if (evsel->attr.type == PERF_TYPE_SOFTWARE &&
2480 (evsel->attr.config == PERF_COUNT_SW_PAGE_FAULTS_MAJ ||
2481 evsel->attr.config == PERF_COUNT_SW_PAGE_FAULTS_MIN ||
2482 evsel->attr.config == PERF_COUNT_SW_PAGE_FAULTS))
2483 evsel->handler = trace__pgfault;
2484 }
2485
2486 setup_pager();
2487
2488 err = perf_session__process_events(session);
2489 if (err)
2490 pr_err("Failed to process events, error %d", err);
2491
2492 else if (trace->summary)
2493 trace__fprintf_thread_summary(trace, trace->output);
2494
2495 out:
2496 perf_session__delete(session);
2497
2498 return err;
2499 }
2500
2501 static size_t trace__fprintf_threads_header(FILE *fp)
2502 {
2503 size_t printed;
2504
2505 printed = fprintf(fp, "\n Summary of events:\n\n");
2506
2507 return printed;
2508 }
2509
2510 DEFINE_RESORT_RB(syscall_stats, a->msecs > b->msecs,
2511 struct stats *stats;
2512 double msecs;
2513 int syscall;
2514 )
2515 {
2516 struct int_node *source = rb_entry(nd, struct int_node, rb_node);
2517 struct stats *stats = source->priv;
2518
2519 entry->syscall = source->i;
2520 entry->stats = stats;
2521 entry->msecs = stats ? (u64)stats->n * (avg_stats(stats) / NSEC_PER_MSEC) : 0;
2522 }
2523
2524 static size_t thread__dump_stats(struct thread_trace *ttrace,
2525 struct trace *trace, FILE *fp)
2526 {
2527 size_t printed = 0;
2528 struct syscall *sc;
2529 struct rb_node *nd;
2530 DECLARE_RESORT_RB_INTLIST(syscall_stats, ttrace->syscall_stats);
2531
2532 if (syscall_stats == NULL)
2533 return 0;
2534
2535 printed += fprintf(fp, "\n");
2536
2537 printed += fprintf(fp, " syscall calls total min avg max stddev\n");
2538 printed += fprintf(fp, " (msec) (msec) (msec) (msec) (%%)\n");
2539 printed += fprintf(fp, " --------------- -------- --------- --------- --------- --------- ------\n");
2540
2541 resort_rb__for_each_entry(nd, syscall_stats) {
2542 struct stats *stats = syscall_stats_entry->stats;
2543 if (stats) {
2544 double min = (double)(stats->min) / NSEC_PER_MSEC;
2545 double max = (double)(stats->max) / NSEC_PER_MSEC;
2546 double avg = avg_stats(stats);
2547 double pct;
2548 u64 n = (u64) stats->n;
2549
2550 pct = avg ? 100.0 * stddev_stats(stats)/avg : 0.0;
2551 avg /= NSEC_PER_MSEC;
2552
2553 sc = &trace->syscalls.table[syscall_stats_entry->syscall];
2554 printed += fprintf(fp, " %-15s", sc->name);
2555 printed += fprintf(fp, " %8" PRIu64 " %9.3f %9.3f %9.3f",
2556 n, syscall_stats_entry->msecs, min, avg);
2557 printed += fprintf(fp, " %9.3f %9.2f%%\n", max, pct);
2558 }
2559 }
2560
2561 resort_rb__delete(syscall_stats);
2562 printed += fprintf(fp, "\n\n");
2563
2564 return printed;
2565 }
2566
2567 static size_t trace__fprintf_thread(FILE *fp, struct thread *thread, struct trace *trace)
2568 {
2569 size_t printed = 0;
2570 struct thread_trace *ttrace = thread__priv(thread);
2571 double ratio;
2572
2573 if (ttrace == NULL)
2574 return 0;
2575
2576 ratio = (double)ttrace->nr_events / trace->nr_events * 100.0;
2577
2578 printed += fprintf(fp, " %s (%d), ", thread__comm_str(thread), thread->tid);
2579 printed += fprintf(fp, "%lu events, ", ttrace->nr_events);
2580 printed += fprintf(fp, "%.1f%%", ratio);
2581 if (ttrace->pfmaj)
2582 printed += fprintf(fp, ", %lu majfaults", ttrace->pfmaj);
2583 if (ttrace->pfmin)
2584 printed += fprintf(fp, ", %lu minfaults", ttrace->pfmin);
2585 if (trace->sched)
2586 printed += fprintf(fp, ", %.3f msec\n", ttrace->runtime_ms);
2587 else if (fputc('\n', fp) != EOF)
2588 ++printed;
2589
2590 printed += thread__dump_stats(ttrace, trace, fp);
2591
2592 return printed;
2593 }
2594
2595 static unsigned long thread__nr_events(struct thread_trace *ttrace)
2596 {
2597 return ttrace ? ttrace->nr_events : 0;
2598 }
2599
2600 DEFINE_RESORT_RB(threads, (thread__nr_events(a->thread->priv) < thread__nr_events(b->thread->priv)),
2601 struct thread *thread;
2602 )
2603 {
2604 entry->thread = rb_entry(nd, struct thread, rb_node);
2605 }
2606
2607 static size_t trace__fprintf_thread_summary(struct trace *trace, FILE *fp)
2608 {
2609 DECLARE_RESORT_RB_MACHINE_THREADS(threads, trace->host);
2610 size_t printed = trace__fprintf_threads_header(fp);
2611 struct rb_node *nd;
2612
2613 if (threads == NULL) {
2614 fprintf(fp, "%s", "Error sorting output by nr_events!\n");
2615 return 0;
2616 }
2617
2618 resort_rb__for_each_entry(nd, threads)
2619 printed += trace__fprintf_thread(fp, threads_entry->thread, trace);
2620
2621 resort_rb__delete(threads);
2622
2623 return printed;
2624 }
2625
2626 static int trace__set_duration(const struct option *opt, const char *str,
2627 int unset __maybe_unused)
2628 {
2629 struct trace *trace = opt->value;
2630
2631 trace->duration_filter = atof(str);
2632 return 0;
2633 }
2634
2635 static int trace__set_filter_pids(const struct option *opt, const char *str,
2636 int unset __maybe_unused)
2637 {
2638 int ret = -1;
2639 size_t i;
2640 struct trace *trace = opt->value;
2641 /*
2642 * FIXME: introduce a intarray class, plain parse csv and create a
2643 * { int nr, int entries[] } struct...
2644 */
2645 struct intlist *list = intlist__new(str);
2646
2647 if (list == NULL)
2648 return -1;
2649
2650 i = trace->filter_pids.nr = intlist__nr_entries(list) + 1;
2651 trace->filter_pids.entries = calloc(i, sizeof(pid_t));
2652
2653 if (trace->filter_pids.entries == NULL)
2654 goto out;
2655
2656 trace->filter_pids.entries[0] = getpid();
2657
2658 for (i = 1; i < trace->filter_pids.nr; ++i)
2659 trace->filter_pids.entries[i] = intlist__entry(list, i - 1)->i;
2660
2661 intlist__delete(list);
2662 ret = 0;
2663 out:
2664 return ret;
2665 }
2666
2667 static int trace__open_output(struct trace *trace, const char *filename)
2668 {
2669 struct stat st;
2670
2671 if (!stat(filename, &st) && st.st_size) {
2672 char oldname[PATH_MAX];
2673
2674 scnprintf(oldname, sizeof(oldname), "%s.old", filename);
2675 unlink(oldname);
2676 rename(filename, oldname);
2677 }
2678
2679 trace->output = fopen(filename, "w");
2680
2681 return trace->output == NULL ? -errno : 0;
2682 }
2683
2684 static int parse_pagefaults(const struct option *opt, const char *str,
2685 int unset __maybe_unused)
2686 {
2687 int *trace_pgfaults = opt->value;
2688
2689 if (strcmp(str, "all") == 0)
2690 *trace_pgfaults |= TRACE_PFMAJ | TRACE_PFMIN;
2691 else if (strcmp(str, "maj") == 0)
2692 *trace_pgfaults |= TRACE_PFMAJ;
2693 else if (strcmp(str, "min") == 0)
2694 *trace_pgfaults |= TRACE_PFMIN;
2695 else
2696 return -1;
2697
2698 return 0;
2699 }
2700
2701 static void evlist__set_evsel_handler(struct perf_evlist *evlist, void *handler)
2702 {
2703 struct perf_evsel *evsel;
2704
2705 evlist__for_each_entry(evlist, evsel)
2706 evsel->handler = handler;
2707 }
2708
2709 /*
2710 * XXX: Hackish, just splitting the combined -e+--event (syscalls
2711 * (raw_syscalls:{sys_{enter,exit}} + events (tracepoints, HW, SW, etc) to use
2712 * existing facilities unchanged (trace->ev_qualifier + parse_options()).
2713 *
2714 * It'd be better to introduce a parse_options() variant that would return a
2715 * list with the terms it didn't match to an event...
2716 */
2717 static int trace__parse_events_option(const struct option *opt, const char *str,
2718 int unset __maybe_unused)
2719 {
2720 struct trace *trace = (struct trace *)opt->value;
2721 const char *s = str;
2722 char *sep = NULL, *lists[2] = { NULL, NULL, };
2723 int len = strlen(str), err = -1, list;
2724 char *strace_groups_dir = system_path(STRACE_GROUPS_DIR);
2725 char group_name[PATH_MAX];
2726
2727 if (strace_groups_dir == NULL)
2728 return -1;
2729
2730 if (*s == '!') {
2731 ++s;
2732 trace->not_ev_qualifier = true;
2733 }
2734
2735 while (1) {
2736 if ((sep = strchr(s, ',')) != NULL)
2737 *sep = '\0';
2738
2739 list = 0;
2740 if (syscalltbl__id(trace->sctbl, s) >= 0) {
2741 list = 1;
2742 } else {
2743 path__join(group_name, sizeof(group_name), strace_groups_dir, s);
2744 if (access(group_name, R_OK) == 0)
2745 list = 1;
2746 }
2747
2748 if (lists[list]) {
2749 sprintf(lists[list] + strlen(lists[list]), ",%s", s);
2750 } else {
2751 lists[list] = malloc(len);
2752 if (lists[list] == NULL)
2753 goto out;
2754 strcpy(lists[list], s);
2755 }
2756
2757 if (!sep)
2758 break;
2759
2760 *sep = ',';
2761 s = sep + 1;
2762 }
2763
2764 if (lists[1] != NULL) {
2765 struct strlist_config slist_config = {
2766 .dirname = strace_groups_dir,
2767 };
2768
2769 trace->ev_qualifier = strlist__new(lists[1], &slist_config);
2770 if (trace->ev_qualifier == NULL) {
2771 fputs("Not enough memory to parse event qualifier", trace->output);
2772 goto out;
2773 }
2774
2775 if (trace__validate_ev_qualifier(trace))
2776 goto out;
2777 }
2778
2779 err = 0;
2780
2781 if (lists[0]) {
2782 struct option o = OPT_CALLBACK('e', "event", &trace->evlist, "event",
2783 "event selector. use 'perf list' to list available events",
2784 parse_events_option);
2785 err = parse_events_option(&o, lists[0], 0);
2786 }
2787 out:
2788 if (sep)
2789 *sep = ',';
2790
2791 return err;
2792 }
2793
2794 int cmd_trace(int argc, const char **argv, const char *prefix __maybe_unused)
2795 {
2796 const char *trace_usage[] = {
2797 "perf trace [<options>] [<command>]",
2798 "perf trace [<options>] -- <command> [<options>]",
2799 "perf trace record [<options>] [<command>]",
2800 "perf trace record [<options>] -- <command> [<options>]",
2801 NULL
2802 };
2803 struct trace trace = {
2804 .syscalls = {
2805 . max = -1,
2806 },
2807 .opts = {
2808 .target = {
2809 .uid = UINT_MAX,
2810 .uses_mmap = true,
2811 },
2812 .user_freq = UINT_MAX,
2813 .user_interval = ULLONG_MAX,
2814 .no_buffering = true,
2815 .mmap_pages = UINT_MAX,
2816 .proc_map_timeout = 500,
2817 },
2818 .output = stderr,
2819 .show_comm = true,
2820 .trace_syscalls = true,
2821 .kernel_syscallchains = false,
2822 .max_stack = UINT_MAX,
2823 };
2824 const char *output_name = NULL;
2825 const struct option trace_options[] = {
2826 OPT_CALLBACK('e', "event", &trace, "event",
2827 "event/syscall selector. use 'perf list' to list available events",
2828 trace__parse_events_option),
2829 OPT_BOOLEAN(0, "comm", &trace.show_comm,
2830 "show the thread COMM next to its id"),
2831 OPT_BOOLEAN(0, "tool_stats", &trace.show_tool_stats, "show tool stats"),
2832 OPT_CALLBACK(0, "expr", &trace, "expr", "list of syscalls/events to trace",
2833 trace__parse_events_option),
2834 OPT_STRING('o', "output", &output_name, "file", "output file name"),
2835 OPT_STRING('i', "input", &input_name, "file", "Analyze events in file"),
2836 OPT_STRING('p', "pid", &trace.opts.target.pid, "pid",
2837 "trace events on existing process id"),
2838 OPT_STRING('t', "tid", &trace.opts.target.tid, "tid",
2839 "trace events on existing thread id"),
2840 OPT_CALLBACK(0, "filter-pids", &trace, "CSV list of pids",
2841 "pids to filter (by the kernel)", trace__set_filter_pids),
2842 OPT_BOOLEAN('a', "all-cpus", &trace.opts.target.system_wide,
2843 "system-wide collection from all CPUs"),
2844 OPT_STRING('C', "cpu", &trace.opts.target.cpu_list, "cpu",
2845 "list of cpus to monitor"),
2846 OPT_BOOLEAN(0, "no-inherit", &trace.opts.no_inherit,
2847 "child tasks do not inherit counters"),
2848 OPT_CALLBACK('m', "mmap-pages", &trace.opts.mmap_pages, "pages",
2849 "number of mmap data pages",
2850 perf_evlist__parse_mmap_pages),
2851 OPT_STRING('u', "uid", &trace.opts.target.uid_str, "user",
2852 "user to profile"),
2853 OPT_CALLBACK(0, "duration", &trace, "float",
2854 "show only events with duration > N.M ms",
2855 trace__set_duration),
2856 OPT_BOOLEAN(0, "sched", &trace.sched, "show blocking scheduler events"),
2857 OPT_INCR('v', "verbose", &verbose, "be more verbose"),
2858 OPT_BOOLEAN('T', "time", &trace.full_time,
2859 "Show full timestamp, not time relative to first start"),
2860 OPT_BOOLEAN('s', "summary", &trace.summary_only,
2861 "Show only syscall summary with statistics"),
2862 OPT_BOOLEAN('S', "with-summary", &trace.summary,
2863 "Show all syscalls and summary with statistics"),
2864 OPT_CALLBACK_DEFAULT('F', "pf", &trace.trace_pgfaults, "all|maj|min",
2865 "Trace pagefaults", parse_pagefaults, "maj"),
2866 OPT_BOOLEAN(0, "syscalls", &trace.trace_syscalls, "Trace syscalls"),
2867 OPT_BOOLEAN('f', "force", &trace.force, "don't complain, do it"),
2868 OPT_CALLBACK(0, "call-graph", &trace.opts,
2869 "record_mode[,record_size]", record_callchain_help,
2870 &record_parse_callchain_opt),
2871 OPT_BOOLEAN(0, "kernel-syscall-graph", &trace.kernel_syscallchains,
2872 "Show the kernel callchains on the syscall exit path"),
2873 OPT_UINTEGER(0, "min-stack", &trace.min_stack,
2874 "Set the minimum stack depth when parsing the callchain, "
2875 "anything below the specified depth will be ignored."),
2876 OPT_UINTEGER(0, "max-stack", &trace.max_stack,
2877 "Set the maximum stack depth when parsing the callchain, "
2878 "anything beyond the specified depth will be ignored. "
2879 "Default: kernel.perf_event_max_stack or " __stringify(PERF_MAX_STACK_DEPTH)),
2880 OPT_UINTEGER(0, "proc-map-timeout", &trace.opts.proc_map_timeout,
2881 "per thread proc mmap processing timeout in ms"),
2882 OPT_UINTEGER('D', "delay", &trace.opts.initial_delay,
2883 "ms to wait before starting measurement after program "
2884 "start"),
2885 OPT_END()
2886 };
2887 bool __maybe_unused max_stack_user_set = true;
2888 bool mmap_pages_user_set = true;
2889 const char * const trace_subcommands[] = { "record", NULL };
2890 int err;
2891 char bf[BUFSIZ];
2892
2893 signal(SIGSEGV, sighandler_dump_stack);
2894 signal(SIGFPE, sighandler_dump_stack);
2895
2896 trace.evlist = perf_evlist__new();
2897 trace.sctbl = syscalltbl__new();
2898
2899 if (trace.evlist == NULL || trace.sctbl == NULL) {
2900 pr_err("Not enough memory to run!\n");
2901 err = -ENOMEM;
2902 goto out;
2903 }
2904
2905 argc = parse_options_subcommand(argc, argv, trace_options, trace_subcommands,
2906 trace_usage, PARSE_OPT_STOP_AT_NON_OPTION);
2907
2908 err = bpf__setup_stdout(trace.evlist);
2909 if (err) {
2910 bpf__strerror_setup_stdout(trace.evlist, err, bf, sizeof(bf));
2911 pr_err("ERROR: Setup BPF stdout failed: %s\n", bf);
2912 goto out;
2913 }
2914
2915 err = -1;
2916
2917 if (trace.trace_pgfaults) {
2918 trace.opts.sample_address = true;
2919 trace.opts.sample_time = true;
2920 }
2921
2922 if (trace.opts.mmap_pages == UINT_MAX)
2923 mmap_pages_user_set = false;
2924
2925 if (trace.max_stack == UINT_MAX) {
2926 trace.max_stack = input_name ? PERF_MAX_STACK_DEPTH : sysctl_perf_event_max_stack;
2927 max_stack_user_set = false;
2928 }
2929
2930 #ifdef HAVE_DWARF_UNWIND_SUPPORT
2931 if ((trace.min_stack || max_stack_user_set) && !callchain_param.enabled && trace.trace_syscalls)
2932 record_opts__parse_callchain(&trace.opts, &callchain_param, "dwarf", false);
2933 #endif
2934
2935 if (callchain_param.enabled) {
2936 if (!mmap_pages_user_set && geteuid() == 0)
2937 trace.opts.mmap_pages = perf_event_mlock_kb_in_pages() * 4;
2938
2939 symbol_conf.use_callchain = true;
2940 }
2941
2942 if (trace.evlist->nr_entries > 0)
2943 evlist__set_evsel_handler(trace.evlist, trace__event_handler);
2944
2945 if ((argc >= 1) && (strcmp(argv[0], "record") == 0))
2946 return trace__record(&trace, argc-1, &argv[1]);
2947
2948 /* summary_only implies summary option, but don't overwrite summary if set */
2949 if (trace.summary_only)
2950 trace.summary = trace.summary_only;
2951
2952 if (!trace.trace_syscalls && !trace.trace_pgfaults &&
2953 trace.evlist->nr_entries == 0 /* Was --events used? */) {
2954 pr_err("Please specify something to trace.\n");
2955 return -1;
2956 }
2957
2958 if (!trace.trace_syscalls && trace.ev_qualifier) {
2959 pr_err("The -e option can't be used with --no-syscalls.\n");
2960 goto out;
2961 }
2962
2963 if (output_name != NULL) {
2964 err = trace__open_output(&trace, output_name);
2965 if (err < 0) {
2966 perror("failed to create output file");
2967 goto out;
2968 }
2969 }
2970
2971 trace.open_id = syscalltbl__id(trace.sctbl, "open");
2972
2973 err = target__validate(&trace.opts.target);
2974 if (err) {
2975 target__strerror(&trace.opts.target, err, bf, sizeof(bf));
2976 fprintf(trace.output, "%s", bf);
2977 goto out_close;
2978 }
2979
2980 err = target__parse_uid(&trace.opts.target);
2981 if (err) {
2982 target__strerror(&trace.opts.target, err, bf, sizeof(bf));
2983 fprintf(trace.output, "%s", bf);
2984 goto out_close;
2985 }
2986
2987 if (!argc && target__none(&trace.opts.target))
2988 trace.opts.target.system_wide = true;
2989
2990 if (input_name)
2991 err = trace__replay(&trace);
2992 else
2993 err = trace__run(&trace, argc, argv);
2994
2995 out_close:
2996 if (output_name != NULL)
2997 fclose(trace.output);
2998 out:
2999 return err;
3000 }