]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blame_incremental - tools/perf/builtin-trace.c
perf trace: Support ! in -e expressions
[mirror_ubuntu-artful-kernel.git] / tools / perf / builtin-trace.c
... / ...
CommitLineData
1#include <traceevent/event-parse.h>
2#include "builtin.h"
3#include "util/color.h"
4#include "util/evlist.h"
5#include "util/machine.h"
6#include "util/thread.h"
7#include "util/parse-options.h"
8#include "util/strlist.h"
9#include "util/thread_map.h"
10
11#include <libaudit.h>
12#include <stdlib.h>
13
14static struct syscall_fmt {
15 const char *name;
16 const char *alias;
17 bool errmsg;
18 bool timeout;
19} syscall_fmts[] = {
20 { .name = "access", .errmsg = true, },
21 { .name = "arch_prctl", .errmsg = true, .alias = "prctl", },
22 { .name = "connect", .errmsg = true, },
23 { .name = "fstat", .errmsg = true, .alias = "newfstat", },
24 { .name = "fstatat", .errmsg = true, .alias = "newfstatat", },
25 { .name = "futex", .errmsg = true, },
26 { .name = "open", .errmsg = true, },
27 { .name = "poll", .errmsg = true, .timeout = true, },
28 { .name = "ppoll", .errmsg = true, .timeout = true, },
29 { .name = "read", .errmsg = true, },
30 { .name = "recvfrom", .errmsg = true, },
31 { .name = "select", .errmsg = true, .timeout = true, },
32 { .name = "socket", .errmsg = true, },
33 { .name = "stat", .errmsg = true, .alias = "newstat", },
34};
35
36static int syscall_fmt__cmp(const void *name, const void *fmtp)
37{
38 const struct syscall_fmt *fmt = fmtp;
39 return strcmp(name, fmt->name);
40}
41
42static struct syscall_fmt *syscall_fmt__find(const char *name)
43{
44 const int nmemb = ARRAY_SIZE(syscall_fmts);
45 return bsearch(name, syscall_fmts, nmemb, sizeof(struct syscall_fmt), syscall_fmt__cmp);
46}
47
48struct syscall {
49 struct event_format *tp_format;
50 const char *name;
51 bool filtered;
52 struct syscall_fmt *fmt;
53};
54
55static size_t fprintf_duration(unsigned long t, FILE *fp)
56{
57 double duration = (double)t / NSEC_PER_MSEC;
58 size_t printed = fprintf(fp, "(");
59
60 if (duration >= 1.0)
61 printed += color_fprintf(fp, PERF_COLOR_RED, "%6.3f ms", duration);
62 else if (duration >= 0.01)
63 printed += color_fprintf(fp, PERF_COLOR_YELLOW, "%6.3f ms", duration);
64 else
65 printed += color_fprintf(fp, PERF_COLOR_NORMAL, "%6.3f ms", duration);
66 return printed + fprintf(fp, "): ");
67}
68
69struct thread_trace {
70 u64 entry_time;
71 u64 exit_time;
72 bool entry_pending;
73 unsigned long nr_events;
74 char *entry_str;
75 double runtime_ms;
76};
77
78static struct thread_trace *thread_trace__new(void)
79{
80 return zalloc(sizeof(struct thread_trace));
81}
82
83static struct thread_trace *thread__trace(struct thread *thread, FILE *fp)
84{
85 struct thread_trace *ttrace;
86
87 if (thread == NULL)
88 goto fail;
89
90 if (thread->priv == NULL)
91 thread->priv = thread_trace__new();
92
93 if (thread->priv == NULL)
94 goto fail;
95
96 ttrace = thread->priv;
97 ++ttrace->nr_events;
98
99 return ttrace;
100fail:
101 color_fprintf(fp, PERF_COLOR_RED,
102 "WARNING: not enough memory, dropping samples!\n");
103 return NULL;
104}
105
106struct trace {
107 struct perf_tool tool;
108 int audit_machine;
109 struct {
110 int max;
111 struct syscall *table;
112 } syscalls;
113 struct perf_record_opts opts;
114 struct machine host;
115 u64 base_time;
116 FILE *output;
117 unsigned long nr_events;
118 struct strlist *ev_qualifier;
119 bool not_ev_qualifier;
120 bool sched;
121 bool multiple_threads;
122 double duration_filter;
123 double runtime_ms;
124};
125
126static bool trace__filter_duration(struct trace *trace, double t)
127{
128 return t < (trace->duration_filter * NSEC_PER_MSEC);
129}
130
131static size_t trace__fprintf_tstamp(struct trace *trace, u64 tstamp, FILE *fp)
132{
133 double ts = (double)(tstamp - trace->base_time) / NSEC_PER_MSEC;
134
135 return fprintf(fp, "%10.3f ", ts);
136}
137
138static bool done = false;
139
140static void sig_handler(int sig __maybe_unused)
141{
142 done = true;
143}
144
145static size_t trace__fprintf_entry_head(struct trace *trace, struct thread *thread,
146 u64 duration, u64 tstamp, FILE *fp)
147{
148 size_t printed = trace__fprintf_tstamp(trace, tstamp, fp);
149 printed += fprintf_duration(duration, fp);
150
151 if (trace->multiple_threads)
152 printed += fprintf(fp, "%d ", thread->tid);
153
154 return printed;
155}
156
157static int trace__process_event(struct trace *trace, struct machine *machine,
158 union perf_event *event)
159{
160 int ret = 0;
161
162 switch (event->header.type) {
163 case PERF_RECORD_LOST:
164 color_fprintf(trace->output, PERF_COLOR_RED,
165 "LOST %" PRIu64 " events!\n", event->lost.lost);
166 ret = machine__process_lost_event(machine, event);
167 default:
168 ret = machine__process_event(machine, event);
169 break;
170 }
171
172 return ret;
173}
174
175static int trace__tool_process(struct perf_tool *tool,
176 union perf_event *event,
177 struct perf_sample *sample __maybe_unused,
178 struct machine *machine)
179{
180 struct trace *trace = container_of(tool, struct trace, tool);
181 return trace__process_event(trace, machine, event);
182}
183
184static int trace__symbols_init(struct trace *trace, struct perf_evlist *evlist)
185{
186 int err = symbol__init();
187
188 if (err)
189 return err;
190
191 machine__init(&trace->host, "", HOST_KERNEL_ID);
192 machine__create_kernel_maps(&trace->host);
193
194 if (perf_target__has_task(&trace->opts.target)) {
195 err = perf_event__synthesize_thread_map(&trace->tool, evlist->threads,
196 trace__tool_process,
197 &trace->host);
198 } else {
199 err = perf_event__synthesize_threads(&trace->tool, trace__tool_process,
200 &trace->host);
201 }
202
203 if (err)
204 symbol__exit();
205
206 return err;
207}
208
209static int trace__read_syscall_info(struct trace *trace, int id)
210{
211 char tp_name[128];
212 struct syscall *sc;
213 const char *name = audit_syscall_to_name(id, trace->audit_machine);
214
215 if (name == NULL)
216 return -1;
217
218 if (id > trace->syscalls.max) {
219 struct syscall *nsyscalls = realloc(trace->syscalls.table, (id + 1) * sizeof(*sc));
220
221 if (nsyscalls == NULL)
222 return -1;
223
224 if (trace->syscalls.max != -1) {
225 memset(nsyscalls + trace->syscalls.max + 1, 0,
226 (id - trace->syscalls.max) * sizeof(*sc));
227 } else {
228 memset(nsyscalls, 0, (id + 1) * sizeof(*sc));
229 }
230
231 trace->syscalls.table = nsyscalls;
232 trace->syscalls.max = id;
233 }
234
235 sc = trace->syscalls.table + id;
236 sc->name = name;
237
238 if (trace->ev_qualifier) {
239 bool in = strlist__find(trace->ev_qualifier, name) != NULL;
240
241 if (!(in ^ trace->not_ev_qualifier)) {
242 sc->filtered = true;
243 /*
244 * No need to do read tracepoint information since this will be
245 * filtered out.
246 */
247 return 0;
248 }
249 }
250
251 sc->fmt = syscall_fmt__find(sc->name);
252
253 snprintf(tp_name, sizeof(tp_name), "sys_enter_%s", sc->name);
254 sc->tp_format = event_format__new("syscalls", tp_name);
255
256 if (sc->tp_format == NULL && sc->fmt && sc->fmt->alias) {
257 snprintf(tp_name, sizeof(tp_name), "sys_enter_%s", sc->fmt->alias);
258 sc->tp_format = event_format__new("syscalls", tp_name);
259 }
260
261 return sc->tp_format != NULL ? 0 : -1;
262}
263
264static size_t syscall__scnprintf_args(struct syscall *sc, char *bf, size_t size,
265 unsigned long *args)
266{
267 int i = 0;
268 size_t printed = 0;
269
270 if (sc->tp_format != NULL) {
271 struct format_field *field;
272
273 for (field = sc->tp_format->format.fields->next; field; field = field->next) {
274 printed += scnprintf(bf + printed, size - printed,
275 "%s%s: %ld", printed ? ", " : "",
276 field->name, args[i++]);
277 }
278 } else {
279 while (i < 6) {
280 printed += scnprintf(bf + printed, size - printed,
281 "%sarg%d: %ld",
282 printed ? ", " : "", i, args[i]);
283 ++i;
284 }
285 }
286
287 return printed;
288}
289
290typedef int (*tracepoint_handler)(struct trace *trace, struct perf_evsel *evsel,
291 struct perf_sample *sample);
292
293static struct syscall *trace__syscall_info(struct trace *trace,
294 struct perf_evsel *evsel,
295 struct perf_sample *sample)
296{
297 int id = perf_evsel__intval(evsel, sample, "id");
298
299 if (id < 0) {
300 fprintf(trace->output, "Invalid syscall %d id, skipping...\n", id);
301 return NULL;
302 }
303
304 if ((id > trace->syscalls.max || trace->syscalls.table[id].name == NULL) &&
305 trace__read_syscall_info(trace, id))
306 goto out_cant_read;
307
308 if ((id > trace->syscalls.max || trace->syscalls.table[id].name == NULL))
309 goto out_cant_read;
310
311 return &trace->syscalls.table[id];
312
313out_cant_read:
314 fprintf(trace->output, "Problems reading syscall %d", id);
315 if (id <= trace->syscalls.max && trace->syscalls.table[id].name != NULL)
316 fprintf(trace->output, "(%s)", trace->syscalls.table[id].name);
317 fputs(" information", trace->output);
318 return NULL;
319}
320
321static int trace__sys_enter(struct trace *trace, struct perf_evsel *evsel,
322 struct perf_sample *sample)
323{
324 char *msg;
325 void *args;
326 size_t printed = 0;
327 struct thread *thread;
328 struct syscall *sc = trace__syscall_info(trace, evsel, sample);
329 struct thread_trace *ttrace;
330
331 if (sc == NULL)
332 return -1;
333
334 if (sc->filtered)
335 return 0;
336
337 thread = machine__findnew_thread(&trace->host, sample->tid);
338 ttrace = thread__trace(thread, trace->output);
339 if (ttrace == NULL)
340 return -1;
341
342 args = perf_evsel__rawptr(evsel, sample, "args");
343 if (args == NULL) {
344 fprintf(trace->output, "Problems reading syscall arguments\n");
345 return -1;
346 }
347
348 ttrace = thread->priv;
349
350 if (ttrace->entry_str == NULL) {
351 ttrace->entry_str = malloc(1024);
352 if (!ttrace->entry_str)
353 return -1;
354 }
355
356 ttrace->entry_time = sample->time;
357 msg = ttrace->entry_str;
358 printed += scnprintf(msg + printed, 1024 - printed, "%s(", sc->name);
359
360 printed += syscall__scnprintf_args(sc, msg + printed, 1024 - printed, args);
361
362 if (!strcmp(sc->name, "exit_group") || !strcmp(sc->name, "exit")) {
363 if (!trace->duration_filter) {
364 trace__fprintf_entry_head(trace, thread, 1, sample->time, trace->output);
365 fprintf(trace->output, "%-70s\n", ttrace->entry_str);
366 }
367 } else
368 ttrace->entry_pending = true;
369
370 return 0;
371}
372
373static int trace__sys_exit(struct trace *trace, struct perf_evsel *evsel,
374 struct perf_sample *sample)
375{
376 int ret;
377 u64 duration = 0;
378 struct thread *thread;
379 struct syscall *sc = trace__syscall_info(trace, evsel, sample);
380 struct thread_trace *ttrace;
381
382 if (sc == NULL)
383 return -1;
384
385 if (sc->filtered)
386 return 0;
387
388 thread = machine__findnew_thread(&trace->host, sample->tid);
389 ttrace = thread__trace(thread, trace->output);
390 if (ttrace == NULL)
391 return -1;
392
393 ret = perf_evsel__intval(evsel, sample, "ret");
394
395 ttrace = thread->priv;
396
397 ttrace->exit_time = sample->time;
398
399 if (ttrace->entry_time) {
400 duration = sample->time - ttrace->entry_time;
401 if (trace__filter_duration(trace, duration))
402 goto out;
403 } else if (trace->duration_filter)
404 goto out;
405
406 trace__fprintf_entry_head(trace, thread, duration, sample->time, trace->output);
407
408 if (ttrace->entry_pending) {
409 fprintf(trace->output, "%-70s", ttrace->entry_str);
410 } else {
411 fprintf(trace->output, " ... [");
412 color_fprintf(trace->output, PERF_COLOR_YELLOW, "continued");
413 fprintf(trace->output, "]: %s()", sc->name);
414 }
415
416 if (ret < 0 && sc->fmt && sc->fmt->errmsg) {
417 char bf[256];
418 const char *emsg = strerror_r(-ret, bf, sizeof(bf)),
419 *e = audit_errno_to_name(-ret);
420
421 fprintf(trace->output, ") = -1 %s %s", e, emsg);
422 } else if (ret == 0 && sc->fmt && sc->fmt->timeout)
423 fprintf(trace->output, ") = 0 Timeout");
424 else
425 fprintf(trace->output, ") = %d", ret);
426
427 fputc('\n', trace->output);
428out:
429 ttrace->entry_pending = false;
430
431 return 0;
432}
433
434static int trace__sched_stat_runtime(struct trace *trace, struct perf_evsel *evsel,
435 struct perf_sample *sample)
436{
437 u64 runtime = perf_evsel__intval(evsel, sample, "runtime");
438 double runtime_ms = (double)runtime / NSEC_PER_MSEC;
439 struct thread *thread = machine__findnew_thread(&trace->host, sample->tid);
440 struct thread_trace *ttrace = thread__trace(thread, trace->output);
441
442 if (ttrace == NULL)
443 goto out_dump;
444
445 ttrace->runtime_ms += runtime_ms;
446 trace->runtime_ms += runtime_ms;
447 return 0;
448
449out_dump:
450 fprintf(trace->output, "%s: comm=%s,pid=%u,runtime=%" PRIu64 ",vruntime=%" PRIu64 ")\n",
451 evsel->name,
452 perf_evsel__strval(evsel, sample, "comm"),
453 (pid_t)perf_evsel__intval(evsel, sample, "pid"),
454 runtime,
455 perf_evsel__intval(evsel, sample, "vruntime"));
456 return 0;
457}
458
459static int trace__run(struct trace *trace, int argc, const char **argv)
460{
461 struct perf_evlist *evlist = perf_evlist__new();
462 struct perf_evsel *evsel;
463 int err = -1, i;
464 unsigned long before;
465 const bool forks = argc > 0;
466
467 if (evlist == NULL) {
468 fprintf(trace->output, "Not enough memory to run!\n");
469 goto out;
470 }
471
472 if (perf_evlist__add_newtp(evlist, "raw_syscalls", "sys_enter", trace__sys_enter) ||
473 perf_evlist__add_newtp(evlist, "raw_syscalls", "sys_exit", trace__sys_exit)) {
474 fprintf(trace->output, "Couldn't read the raw_syscalls tracepoints information!\n");
475 goto out_delete_evlist;
476 }
477
478 if (trace->sched &&
479 perf_evlist__add_newtp(evlist, "sched", "sched_stat_runtime",
480 trace__sched_stat_runtime)) {
481 fprintf(trace->output, "Couldn't read the sched_stat_runtime tracepoint information!\n");
482 goto out_delete_evlist;
483 }
484
485 err = perf_evlist__create_maps(evlist, &trace->opts.target);
486 if (err < 0) {
487 fprintf(trace->output, "Problems parsing the target to trace, check your options!\n");
488 goto out_delete_evlist;
489 }
490
491 err = trace__symbols_init(trace, evlist);
492 if (err < 0) {
493 fprintf(trace->output, "Problems initializing symbol libraries!\n");
494 goto out_delete_maps;
495 }
496
497 perf_evlist__config(evlist, &trace->opts);
498
499 signal(SIGCHLD, sig_handler);
500 signal(SIGINT, sig_handler);
501
502 if (forks) {
503 err = perf_evlist__prepare_workload(evlist, &trace->opts.target,
504 argv, false, false);
505 if (err < 0) {
506 fprintf(trace->output, "Couldn't run the workload!\n");
507 goto out_delete_maps;
508 }
509 }
510
511 err = perf_evlist__open(evlist);
512 if (err < 0) {
513 fprintf(trace->output, "Couldn't create the events: %s\n", strerror(errno));
514 goto out_delete_maps;
515 }
516
517 err = perf_evlist__mmap(evlist, UINT_MAX, false);
518 if (err < 0) {
519 fprintf(trace->output, "Couldn't mmap the events: %s\n", strerror(errno));
520 goto out_close_evlist;
521 }
522
523 perf_evlist__enable(evlist);
524
525 if (forks)
526 perf_evlist__start_workload(evlist);
527
528 trace->multiple_threads = evlist->threads->map[0] == -1 || evlist->threads->nr > 1;
529again:
530 before = trace->nr_events;
531
532 for (i = 0; i < evlist->nr_mmaps; i++) {
533 union perf_event *event;
534
535 while ((event = perf_evlist__mmap_read(evlist, i)) != NULL) {
536 const u32 type = event->header.type;
537 tracepoint_handler handler;
538 struct perf_sample sample;
539
540 ++trace->nr_events;
541
542 err = perf_evlist__parse_sample(evlist, event, &sample);
543 if (err) {
544 fprintf(trace->output, "Can't parse sample, err = %d, skipping...\n", err);
545 continue;
546 }
547
548 if (trace->base_time == 0)
549 trace->base_time = sample.time;
550
551 if (type != PERF_RECORD_SAMPLE) {
552 trace__process_event(trace, &trace->host, event);
553 continue;
554 }
555
556 evsel = perf_evlist__id2evsel(evlist, sample.id);
557 if (evsel == NULL) {
558 fprintf(trace->output, "Unknown tp ID %" PRIu64 ", skipping...\n", sample.id);
559 continue;
560 }
561
562 if (sample.raw_data == NULL) {
563 fprintf(trace->output, "%s sample with no payload for tid: %d, cpu %d, raw_size=%d, skipping...\n",
564 perf_evsel__name(evsel), sample.tid,
565 sample.cpu, sample.raw_size);
566 continue;
567 }
568
569 handler = evsel->handler.func;
570 handler(trace, evsel, &sample);
571 }
572 }
573
574 if (trace->nr_events == before) {
575 if (done)
576 goto out_unmap_evlist;
577
578 poll(evlist->pollfd, evlist->nr_fds, -1);
579 }
580
581 if (done)
582 perf_evlist__disable(evlist);
583
584 goto again;
585
586out_unmap_evlist:
587 perf_evlist__munmap(evlist);
588out_close_evlist:
589 perf_evlist__close(evlist);
590out_delete_maps:
591 perf_evlist__delete_maps(evlist);
592out_delete_evlist:
593 perf_evlist__delete(evlist);
594out:
595 return err;
596}
597
598static size_t trace__fprintf_threads_header(FILE *fp)
599{
600 size_t printed;
601
602 printed = fprintf(fp, "\n _____________________________________________________________________\n");
603 printed += fprintf(fp," __) Summary of events (__\n\n");
604 printed += fprintf(fp," [ task - pid ] [ events ] [ ratio ] [ runtime ]\n");
605 printed += fprintf(fp," _____________________________________________________________________\n\n");
606
607 return printed;
608}
609
610static size_t trace__fprintf_thread_summary(struct trace *trace, FILE *fp)
611{
612 size_t printed = trace__fprintf_threads_header(fp);
613 struct rb_node *nd;
614
615 for (nd = rb_first(&trace->host.threads); nd; nd = rb_next(nd)) {
616 struct thread *thread = rb_entry(nd, struct thread, rb_node);
617 struct thread_trace *ttrace = thread->priv;
618 const char *color;
619 double ratio;
620
621 if (ttrace == NULL)
622 continue;
623
624 ratio = (double)ttrace->nr_events / trace->nr_events * 100.0;
625
626 color = PERF_COLOR_NORMAL;
627 if (ratio > 50.0)
628 color = PERF_COLOR_RED;
629 else if (ratio > 25.0)
630 color = PERF_COLOR_GREEN;
631 else if (ratio > 5.0)
632 color = PERF_COLOR_YELLOW;
633
634 printed += color_fprintf(fp, color, "%20s", thread->comm);
635 printed += fprintf(fp, " - %-5d :%11lu [", thread->tid, ttrace->nr_events);
636 printed += color_fprintf(fp, color, "%5.1f%%", ratio);
637 printed += fprintf(fp, " ] %10.3f ms\n", ttrace->runtime_ms);
638 }
639
640 return printed;
641}
642
643static int trace__set_duration(const struct option *opt, const char *str,
644 int unset __maybe_unused)
645{
646 struct trace *trace = opt->value;
647
648 trace->duration_filter = atof(str);
649 return 0;
650}
651
652static int trace__open_output(struct trace *trace, const char *filename)
653{
654 struct stat st;
655
656 if (!stat(filename, &st) && st.st_size) {
657 char oldname[PATH_MAX];
658
659 scnprintf(oldname, sizeof(oldname), "%s.old", filename);
660 unlink(oldname);
661 rename(filename, oldname);
662 }
663
664 trace->output = fopen(filename, "w");
665
666 return trace->output == NULL ? -errno : 0;
667}
668
669int cmd_trace(int argc, const char **argv, const char *prefix __maybe_unused)
670{
671 const char * const trace_usage[] = {
672 "perf trace [<options>] [<command>]",
673 "perf trace [<options>] -- <command> [<options>]",
674 NULL
675 };
676 struct trace trace = {
677 .audit_machine = audit_detect_machine(),
678 .syscalls = {
679 . max = -1,
680 },
681 .opts = {
682 .target = {
683 .uid = UINT_MAX,
684 .uses_mmap = true,
685 },
686 .user_freq = UINT_MAX,
687 .user_interval = ULLONG_MAX,
688 .no_delay = true,
689 .mmap_pages = 1024,
690 },
691 .output = stdout,
692 };
693 const char *output_name = NULL;
694 const char *ev_qualifier_str = NULL;
695 const struct option trace_options[] = {
696 OPT_STRING('e', "expr", &ev_qualifier_str, "expr",
697 "list of events to trace"),
698 OPT_STRING('o', "output", &output_name, "file", "output file name"),
699 OPT_STRING('p', "pid", &trace.opts.target.pid, "pid",
700 "trace events on existing process id"),
701 OPT_STRING('t', "tid", &trace.opts.target.tid, "tid",
702 "trace events on existing thread id"),
703 OPT_BOOLEAN('a', "all-cpus", &trace.opts.target.system_wide,
704 "system-wide collection from all CPUs"),
705 OPT_STRING('C', "cpu", &trace.opts.target.cpu_list, "cpu",
706 "list of cpus to monitor"),
707 OPT_BOOLEAN('i', "no-inherit", &trace.opts.no_inherit,
708 "child tasks do not inherit counters"),
709 OPT_UINTEGER('m', "mmap-pages", &trace.opts.mmap_pages,
710 "number of mmap data pages"),
711 OPT_STRING('u', "uid", &trace.opts.target.uid_str, "user",
712 "user to profile"),
713 OPT_CALLBACK(0, "duration", &trace, "float",
714 "show only events with duration > N.M ms",
715 trace__set_duration),
716 OPT_BOOLEAN(0, "sched", &trace.sched, "show blocking scheduler events"),
717 OPT_END()
718 };
719 int err;
720 char bf[BUFSIZ];
721
722 argc = parse_options(argc, argv, trace_options, trace_usage, 0);
723
724 if (output_name != NULL) {
725 err = trace__open_output(&trace, output_name);
726 if (err < 0) {
727 perror("failed to create output file");
728 goto out;
729 }
730 }
731
732 if (ev_qualifier_str != NULL) {
733 const char *s = ev_qualifier_str;
734
735 trace.not_ev_qualifier = *s == '!';
736 if (trace.not_ev_qualifier)
737 ++s;
738 trace.ev_qualifier = strlist__new(true, s);
739 if (trace.ev_qualifier == NULL) {
740 fputs("Not enough memory to parse event qualifier",
741 trace.output);
742 err = -ENOMEM;
743 goto out_close;
744 }
745 }
746
747 err = perf_target__validate(&trace.opts.target);
748 if (err) {
749 perf_target__strerror(&trace.opts.target, err, bf, sizeof(bf));
750 fprintf(trace.output, "%s", bf);
751 goto out_close;
752 }
753
754 err = perf_target__parse_uid(&trace.opts.target);
755 if (err) {
756 perf_target__strerror(&trace.opts.target, err, bf, sizeof(bf));
757 fprintf(trace.output, "%s", bf);
758 goto out_close;
759 }
760
761 if (!argc && perf_target__none(&trace.opts.target))
762 trace.opts.target.system_wide = true;
763
764 err = trace__run(&trace, argc, argv);
765
766 if (trace.sched && !err)
767 trace__fprintf_thread_summary(&trace, trace.output);
768
769out_close:
770 if (output_name != NULL)
771 fclose(trace.output);
772out:
773 return err;
774}