]> git.proxmox.com Git - mirror_ubuntu-hirsute-kernel.git/blobdiff - tools/perf/builtin-trace.c
perf session: Return error code for perf_session__new() function on failure
[mirror_ubuntu-hirsute-kernel.git] / tools / perf / builtin-trace.c
index 0f7d1859a2d17e27d2c5fd44301424beb268bd21..a292658b4232223a13a43514618047e32c1314ad 100644 (file)
@@ -14,6 +14,7 @@
  * http://lwn.net/Articles/415728/ ("Announcing a new utility: 'trace'")
  */
 
+#include "util/record.h"
 #include <traceevent/event-parse.h>
 #include <api/fs/tracing_path.h>
 #include <bpf/bpf.h>
 #include "util/color.h"
 #include "util/config.h"
 #include "util/debug.h"
+#include "util/dso.h"
 #include "util/env.h"
 #include "util/event.h"
+#include "util/synthetic-events.h"
 #include "util/evlist.h"
+#include "util/evswitch.h"
+#include <subcmd/pager.h>
 #include <subcmd/exec-cmd.h>
 #include "util/machine.h"
 #include "util/map.h"
@@ -39,6 +44,8 @@
 #include "util/intlist.h"
 #include "util/thread_map.h"
 #include "util/stat.h"
+#include "util/tool.h"
+#include "util/util.h"
 #include "trace/beauty/beauty.h"
 #include "trace-event.h"
 #include "util/parse-events.h"
@@ -48,6 +55,7 @@
 #include "string2.h"
 #include "syscalltbl.h"
 #include "rb_resort.h"
+#include "../perf.h"
 
 #include <errno.h>
 #include <inttypes.h>
@@ -106,6 +114,7 @@ struct trace {
        unsigned long           nr_events;
        unsigned long           nr_events_printed;
        unsigned long           max_events;
+       struct evswitch         evswitch;
        struct strlist          *ev_qualifier;
        struct {
                size_t          nr;
@@ -1379,7 +1388,7 @@ static char *trace__machine__resolve_kernel_addr(void *vmachine, unsigned long l
 
        if (symbol_conf.kptr_restrict) {
                pr_warning("Kernel address maps (/proc/{kallsyms,modules}) are restricted.\n\n"
-                          "Check /proc/sys/kernel/kptr_restrict.\n\n"
+                          "Check /proc/sys/kernel/kptr_restrict and /proc/sys/kernel/perf_event_paranoid.\n\n"
                           "Kernel samples will not be resolved.\n");
                machine->kptr_restrict_warned = true;
                return NULL;
@@ -1404,7 +1413,7 @@ static int trace__symbols_init(struct trace *trace, struct evlist *evlist)
                goto out;
 
        err = __machine__synthesize_threads(trace->host, &trace->tool, &trace->opts.target,
-                                           evlist->threads, trace__tool_process, false,
+                                           evlist->core.threads, trace__tool_process, false,
                                            1);
 out:
        if (err)
@@ -1492,7 +1501,7 @@ static int trace__read_syscall_info(struct trace *trace, int id)
        const char *name = syscalltbl__name(trace->sctbl, id);
 
        if (trace->syscalls.table == NULL) {
-               trace->syscalls.table = calloc(trace->sctbl->syscalls.nr_entries, sizeof(*sc));
+               trace->syscalls.table = calloc(trace->sctbl->syscalls.max_id + 1, sizeof(*sc));
                if (trace->syscalls.table == NULL)
                        return -ENOMEM;
        }
@@ -2046,8 +2055,8 @@ static int trace__resolve_callchain(struct trace *trace, struct evsel *evsel,
                                    struct callchain_cursor *cursor)
 {
        struct addr_location al;
-       int max_stack = evsel->attr.sample_max_stack ?
-                       evsel->attr.sample_max_stack :
+       int max_stack = evsel->core.attr.sample_max_stack ?
+                       evsel->core.attr.sample_max_stack :
                        trace->max_stack;
        int err;
 
@@ -2400,8 +2409,8 @@ static int trace__event_handler(struct trace *trace, struct evsel *evsel,
                        ++trace->nr_events_printed;
 
                        if (evsel->max_events != ULONG_MAX && ++evsel->nr_events_printed == evsel->max_events) {
-                               perf_evsel__disable(evsel);
-                               perf_evsel__close(evsel);
+                               evsel__disable(evsel);
+                               evsel__close(evsel);
                        }
                }
        }
@@ -2462,7 +2471,7 @@ static int trace__pgfault(struct trace *trace,
        if (ttrace == NULL)
                goto out_put;
 
-       if (evsel->attr.config == PERF_COUNT_SW_PAGE_FAULTS_MAJ)
+       if (evsel->core.attr.config == PERF_COUNT_SW_PAGE_FAULTS_MAJ)
                ttrace->pfmaj++;
        else
                ttrace->pfmin++;
@@ -2475,7 +2484,7 @@ static int trace__pgfault(struct trace *trace,
        trace__fprintf_entry_head(trace, thread, 0, true, sample->time, trace->output);
 
        fprintf(trace->output, "%sfault [",
-               evsel->attr.config == PERF_COUNT_SW_PAGE_FAULTS_MAJ ?
+               evsel->core.attr.config == PERF_COUNT_SW_PAGE_FAULTS_MAJ ?
                "maj" : "min");
 
        print_location(trace->output, sample, &al, false, true);
@@ -2523,7 +2532,7 @@ static void trace__set_base_time(struct trace *trace,
         * appears in our event stream (vfs_getname comes to mind).
         */
        if (trace->base_time == 0 && !trace->full_time &&
-           (evsel->attr.sample_type & PERF_SAMPLE_TIME))
+           (evsel->core.attr.sample_type & PERF_SAMPLE_TIME))
                trace->base_time = sample->time;
 }
 
@@ -2616,7 +2625,7 @@ static int trace__record(struct trace *trace, int argc, const char **argv)
 
 static size_t trace__fprintf_thread_summary(struct trace *trace, FILE *fp);
 
-static bool perf_evlist__add_vfs_getname(struct evlist *evlist)
+static bool evlist__add_vfs_getname(struct evlist *evlist)
 {
        bool found = false;
        struct evsel *evsel, *tmp;
@@ -2636,7 +2645,7 @@ static bool perf_evlist__add_vfs_getname(struct evlist *evlist)
                        continue;
                }
 
-               list_del_init(&evsel->node);
+               list_del_init(&evsel->core.node);
                evsel->evlist = NULL;
                evsel__delete(evsel);
        }
@@ -2657,7 +2666,7 @@ static struct evsel *perf_evsel__new_pgfault(u64 config)
 
        event_attr_init(&attr);
 
-       evsel = perf_evsel__new(&attr);
+       evsel = evsel__new(&attr);
        if (evsel)
                evsel->handler = trace__pgfault;
 
@@ -2680,9 +2689,12 @@ static void trace__handle_event(struct trace *trace, union perf_event *event, st
                return;
        }
 
+       if (evswitch__discard(&trace->evswitch, evsel))
+               return;
+
        trace__set_base_time(trace, evsel, sample);
 
-       if (evsel->attr.type == PERF_TYPE_TRACEPOINT &&
+       if (evsel->core.attr.type == PERF_TYPE_TRACEPOINT &&
            sample->raw_data == NULL) {
                fprintf(trace->output, "%s sample with no payload for tid: %d, cpu %d, raw_size=%d, skipping...\n",
                       perf_evsel__name(evsel), sample->tid,
@@ -2719,8 +2731,8 @@ static int trace__add_syscall_newtp(struct trace *trace)
        perf_evsel__config_callchain(sys_enter, &trace->opts, &callchain_param);
        perf_evsel__config_callchain(sys_exit, &trace->opts, &callchain_param);
 
-       perf_evlist__add(evlist, sys_enter);
-       perf_evlist__add(evlist, sys_exit);
+       evlist__add(evlist, sys_enter);
+       evlist__add(evlist, sys_exit);
 
        if (callchain_param.enabled && !trace->kernel_syscallchains) {
                /*
@@ -2728,7 +2740,7 @@ static int trace__add_syscall_newtp(struct trace *trace)
                 * leading to the syscall, allow overriding that for
                 * debugging reasons using --kernel_syscall_callchains
                 */
-               sys_exit->attr.exclude_callchain_kernel = 1;
+               sys_exit->core.attr.exclude_callchain_kernel = 1;
        }
 
        trace->syscalls.events.sys_enter = sys_enter;
@@ -3183,7 +3195,7 @@ static int trace__set_filter_pids(struct trace *trace)
                        err = bpf_map__set_filter_pids(trace->filter_pids.map, trace->filter_pids.nr,
                                                       trace->filter_pids.entries);
                }
-       } else if (thread_map__pid(trace->evlist->threads, 0) == -1) {
+       } else if (perf_thread_map__pid(trace->evlist->core.threads, 0) == -1) {
                err = trace__set_filter_loop_pids(trace);
        }
 
@@ -3264,7 +3276,7 @@ static int trace__run(struct trace *trace, int argc, const char **argv)
                        goto out_error_raw_syscalls;
 
                if (trace->trace_syscalls)
-                       trace->vfs_getname = perf_evlist__add_vfs_getname(evlist);
+                       trace->vfs_getname = evlist__add_vfs_getname(evlist);
        }
 
        if ((trace->trace_pgfaults & TRACE_PFMAJ)) {
@@ -3272,7 +3284,7 @@ static int trace__run(struct trace *trace, int argc, const char **argv)
                if (pgfault_maj == NULL)
                        goto out_error_mem;
                perf_evsel__config_callchain(pgfault_maj, &trace->opts, &callchain_param);
-               perf_evlist__add(evlist, pgfault_maj);
+               evlist__add(evlist, pgfault_maj);
        }
 
        if ((trace->trace_pgfaults & TRACE_PFMIN)) {
@@ -3280,7 +3292,7 @@ static int trace__run(struct trace *trace, int argc, const char **argv)
                if (pgfault_min == NULL)
                        goto out_error_mem;
                perf_evsel__config_callchain(pgfault_min, &trace->opts, &callchain_param);
-               perf_evlist__add(evlist, pgfault_min);
+               evlist__add(evlist, pgfault_min);
        }
 
        if (trace->sched &&
@@ -3342,7 +3354,7 @@ static int trace__run(struct trace *trace, int argc, const char **argv)
                }
        }
 
-       err = perf_evlist__open(evlist);
+       err = evlist__open(evlist);
        if (err < 0)
                goto out_error_open;
 
@@ -3402,30 +3414,30 @@ static int trace__run(struct trace *trace, int argc, const char **argv)
                goto out_error_mmap;
 
        if (!target__none(&trace->opts.target) && !trace->opts.initial_delay)
-               perf_evlist__enable(evlist);
+               evlist__enable(evlist);
 
        if (forks)
                perf_evlist__start_workload(evlist);
 
        if (trace->opts.initial_delay) {
                usleep(trace->opts.initial_delay * 1000);
-               perf_evlist__enable(evlist);
+               evlist__enable(evlist);
        }
 
-       trace->multiple_threads = thread_map__pid(evlist->threads, 0) == -1 ||
-                                 evlist->threads->nr > 1 ||
-                                 perf_evlist__first(evlist)->attr.inherit;
+       trace->multiple_threads = perf_thread_map__pid(evlist->core.threads, 0) == -1 ||
+                                 evlist->core.threads->nr > 1 ||
+                                 perf_evlist__first(evlist)->core.attr.inherit;
 
        /*
-        * Now that we already used evsel->attr to ask the kernel to setup the
-        * events, lets reuse evsel->attr.sample_max_stack as the limit in
+        * Now that we already used evsel->core.attr to ask the kernel to setup the
+        * events, lets reuse evsel->core.attr.sample_max_stack as the limit in
         * trace__resolve_callchain(), allowing per-event max-stack settings
         * to override an explicitly set --max-stack global setting.
         */
        evlist__for_each_entry(evlist, evsel) {
                if (evsel__has_callchain(evsel) &&
-                   evsel->attr.sample_max_stack == 0)
-                       evsel->attr.sample_max_stack = trace->max_stack;
+                   evsel->core.attr.sample_max_stack == 0)
+                       evsel->core.attr.sample_max_stack = trace->max_stack;
        }
 again:
        before = trace->nr_events;
@@ -3451,7 +3463,7 @@ again:
                                goto out_disable;
 
                        if (done && !draining) {
-                               perf_evlist__disable(evlist);
+                               evlist__disable(evlist);
                                draining = true;
                        }
                }
@@ -3477,7 +3489,7 @@ again:
 out_disable:
        thread__zput(trace->current);
 
-       perf_evlist__disable(evlist);
+       evlist__disable(evlist);
 
        if (trace->sort_events)
                ordered_events__flush(&trace->oe.data, OE_FLUSH__FINAL);
@@ -3573,8 +3585,8 @@ static int trace__replay(struct trace *trace)
        trace->multiple_threads = true;
 
        session = perf_session__new(&data, false, &trace->tool);
-       if (session == NULL)
-               return -1;
+       if (IS_ERR(session))
+               return PTR_ERR(session);
 
        if (trace->opts.target.pid)
                symbol_conf.pid_list_str = strdup(trace->opts.target.pid);
@@ -3618,10 +3630,10 @@ static int trace__replay(struct trace *trace)
        }
 
        evlist__for_each_entry(session->evlist, evsel) {
-               if (evsel->attr.type == PERF_TYPE_SOFTWARE &&
-                   (evsel->attr.config == PERF_COUNT_SW_PAGE_FAULTS_MAJ ||
-                    evsel->attr.config == PERF_COUNT_SW_PAGE_FAULTS_MIN ||
-                    evsel->attr.config == PERF_COUNT_SW_PAGE_FAULTS))
+               if (evsel->core.attr.type == PERF_TYPE_SOFTWARE &&
+                   (evsel->core.attr.config == PERF_COUNT_SW_PAGE_FAULTS_MAJ ||
+                    evsel->core.attr.config == PERF_COUNT_SW_PAGE_FAULTS_MIN ||
+                    evsel->core.attr.config == PERF_COUNT_SW_PAGE_FAULTS))
                        evsel->handler = trace__pgfault;
        }
 
@@ -3980,7 +3992,7 @@ static int trace__parse_cgroups(const struct option *opt, const char *str, int u
 {
        struct trace *trace = opt->value;
 
-       if (!list_empty(&trace->evlist->entries))
+       if (!list_empty(&trace->evlist->core.entries))
                return parse_cgroups(opt, str, unset);
 
        trace->cgroup = evlist__findnew_cgroup(trace->evlist, str);
@@ -4157,6 +4169,7 @@ int cmd_trace(int argc, const char **argv)
        OPT_UINTEGER('D', "delay", &trace.opts.initial_delay,
                     "ms to wait before starting measurement after program "
                     "start"),
+       OPTS_EVSWITCH(&trace.evswitch),
        OPT_END()
        };
        bool __maybe_unused max_stack_user_set = true;
@@ -4270,7 +4283,7 @@ int cmd_trace(int argc, const char **argv)
                symbol_conf.use_callchain = true;
        }
 
-       if (trace.evlist->nr_entries > 0) {
+       if (trace.evlist->core.nr_entries > 0) {
                evlist__set_evsel_handler(trace.evlist, trace__event_handler);
                if (evlist__set_syscall_tp_fields(trace.evlist)) {
                        perror("failed to set syscalls:* tracepoint fields");
@@ -4368,7 +4381,7 @@ init_augmented_syscall_tp:
                trace.summary = trace.summary_only;
 
        if (!trace.trace_syscalls && !trace.trace_pgfaults &&
-           trace.evlist->nr_entries == 0 /* Was --events used? */) {
+           trace.evlist->core.nr_entries == 0 /* Was --events used? */) {
                trace.trace_syscalls = true;
        }
 
@@ -4380,6 +4393,10 @@ init_augmented_syscall_tp:
                }
        }
 
+       err = evswitch__init(&trace.evswitch, trace.evlist, stderr);
+       if (err)
+               goto out_close;
+
        err = target__validate(&trace.opts.target);
        if (err) {
                target__strerror(&trace.opts.target, err, bf, sizeof(bf));