]> git.proxmox.com Git - mirror_ubuntu-focal-kernel.git/blobdiff - tools/perf/util/evsel.c
perf evsel: Provide meaningful warning when trying to use 'aux_output' on older kernels
[mirror_ubuntu-focal-kernel.git] / tools / perf / util / evsel.c
index 44421bbebd640d6a0821f25b717fdcedc1ba4091..0a33f7322eccbd338ac129b59a185ff0baf4b920 100644 (file)
@@ -22,6 +22,7 @@
 #include <sys/resource.h>
 #include <sys/types.h>
 #include <dirent.h>
+#include <perf/evsel.h>
 #include "asm/bug.h"
 #include "callchain.h"
 #include "cgroup.h"
@@ -89,7 +90,7 @@ set_methods:
        return 0;
 }
 
-#define FD(e, x, y) (*(int *)xyarray__entry(e->fd, x, y))
+#define FD(e, x, y) (*(int *)xyarray__entry(e->core.fd, x, y))
 
 int __perf_evsel__sample_size(u64 sample_type)
 {
@@ -169,15 +170,15 @@ static int __perf_evsel__calc_is_pos(u64 sample_type)
 
 void perf_evsel__calc_id_pos(struct evsel *evsel)
 {
-       evsel->id_pos = __perf_evsel__calc_id_pos(evsel->attr.sample_type);
-       evsel->is_pos = __perf_evsel__calc_is_pos(evsel->attr.sample_type);
+       evsel->id_pos = __perf_evsel__calc_id_pos(evsel->core.attr.sample_type);
+       evsel->is_pos = __perf_evsel__calc_is_pos(evsel->core.attr.sample_type);
 }
 
 void __perf_evsel__set_sample_bit(struct evsel *evsel,
                                  enum perf_event_sample_format bit)
 {
-       if (!(evsel->attr.sample_type & bit)) {
-               evsel->attr.sample_type |= bit;
+       if (!(evsel->core.attr.sample_type & bit)) {
+               evsel->core.attr.sample_type |= bit;
                evsel->sample_size += sizeof(u64);
                perf_evsel__calc_id_pos(evsel);
        }
@@ -186,8 +187,8 @@ void __perf_evsel__set_sample_bit(struct evsel *evsel,
 void __perf_evsel__reset_sample_bit(struct evsel *evsel,
                                    enum perf_event_sample_format bit)
 {
-       if (evsel->attr.sample_type & bit) {
-               evsel->attr.sample_type &= ~bit;
+       if (evsel->core.attr.sample_type & bit) {
+               evsel->core.attr.sample_type &= ~bit;
                evsel->sample_size -= sizeof(u64);
                perf_evsel__calc_id_pos(evsel);
        }
@@ -202,7 +203,7 @@ void perf_evsel__set_sample_id(struct evsel *evsel,
        } else {
                perf_evsel__set_sample_bit(evsel, ID);
        }
-       evsel->attr.read_format |= PERF_FORMAT_ID;
+       evsel->core.attr.read_format |= PERF_FORMAT_ID;
 }
 
 /**
@@ -223,12 +224,12 @@ bool perf_evsel__is_function_event(struct evsel *evsel)
 #undef FUNCTION_EVENT
 }
 
-void perf_evsel__init(struct evsel *evsel,
-                     struct perf_event_attr *attr, int idx)
+void evsel__init(struct evsel *evsel,
+                struct perf_event_attr *attr, int idx)
 {
+       perf_evsel__init(&evsel->core, attr);
        evsel->idx         = idx;
        evsel->tracking    = !idx;
-       evsel->attr        = *attr;
        evsel->leader      = evsel;
        evsel->unit        = "";
        evsel->scale       = 1.0;
@@ -236,7 +237,6 @@ void perf_evsel__init(struct evsel *evsel,
        evsel->evlist      = NULL;
        evsel->bpf_obj     = NULL;
        evsel->bpf_fd      = -1;
-       INIT_LIST_HEAD(&evsel->node);
        INIT_LIST_HEAD(&evsel->config_terms);
        perf_evsel__object.init(evsel);
        evsel->sample_size = __perf_evsel__sample_size(attr->sample_type);
@@ -255,12 +255,12 @@ struct evsel *perf_evsel__new_idx(struct perf_event_attr *attr, int idx)
 
        if (!evsel)
                return NULL;
-       perf_evsel__init(evsel, attr, idx);
+       evsel__init(evsel, attr, idx);
 
        if (perf_evsel__is_bpf_output(evsel)) {
-               evsel->attr.sample_type |= (PERF_SAMPLE_RAW | PERF_SAMPLE_TIME |
+               evsel->core.attr.sample_type |= (PERF_SAMPLE_RAW | PERF_SAMPLE_TIME |
                                            PERF_SAMPLE_CPU | PERF_SAMPLE_PERIOD),
-               evsel->attr.sample_period = 1;
+               evsel->core.attr.sample_period = 1;
        }
 
        if (perf_evsel__is_clock(evsel)) {
@@ -301,7 +301,7 @@ struct evsel *perf_evsel__new_cycles(bool precise)
         * to kick in when we return and before perf_evsel__open() is called.
         */
 new_event:
-       evsel = perf_evsel__new(&attr);
+       evsel = evsel__new(&attr);
        if (evsel == NULL)
                goto out;
 
@@ -316,7 +316,7 @@ new_event:
 out:
        return evsel;
 error_free:
-       perf_evsel__delete(evsel);
+       evsel__delete(evsel);
        evsel = NULL;
        goto out;
 }
@@ -350,7 +350,7 @@ struct evsel *perf_evsel__newtp_idx(const char *sys, const char *name, int idx)
                event_attr_init(&attr);
                attr.config = evsel->tp_format->id;
                attr.sample_period = 1;
-               perf_evsel__init(evsel, &attr, idx);
+               evsel__init(evsel, &attr, idx);
        }
 
        return evsel;
@@ -386,7 +386,7 @@ static const char *__perf_evsel__hw_name(u64 config)
 static int perf_evsel__add_modifiers(struct evsel *evsel, char *bf, size_t size)
 {
        int colon = 0, r = 0;
-       struct perf_event_attr *attr = &evsel->attr;
+       struct perf_event_attr *attr = &evsel->core.attr;
        bool exclude_guest_default = false;
 
 #define MOD_PRINT(context, mod)        do {                                    \
@@ -421,7 +421,7 @@ static int perf_evsel__add_modifiers(struct evsel *evsel, char *bf, size_t size)
 
 static int perf_evsel__hw_name(struct evsel *evsel, char *bf, size_t size)
 {
-       int r = scnprintf(bf, size, "%s", __perf_evsel__hw_name(evsel->attr.config));
+       int r = scnprintf(bf, size, "%s", __perf_evsel__hw_name(evsel->core.attr.config));
        return r + perf_evsel__add_modifiers(evsel, bf + r, size - r);
 }
 
@@ -447,7 +447,7 @@ static const char *__perf_evsel__sw_name(u64 config)
 
 static int perf_evsel__sw_name(struct evsel *evsel, char *bf, size_t size)
 {
-       int r = scnprintf(bf, size, "%s", __perf_evsel__sw_name(evsel->attr.config));
+       int r = scnprintf(bf, size, "%s", __perf_evsel__sw_name(evsel->core.attr.config));
        return r + perf_evsel__add_modifiers(evsel, bf + r, size - r);
 }
 
@@ -471,7 +471,7 @@ static int __perf_evsel__bp_name(char *bf, size_t size, u64 addr, u64 type)
 
 static int perf_evsel__bp_name(struct evsel *evsel, char *bf, size_t size)
 {
-       struct perf_event_attr *attr = &evsel->attr;
+       struct perf_event_attr *attr = &evsel->core.attr;
        int r = __perf_evsel__bp_name(bf, size, attr->bp_addr, attr->bp_type);
        return r + perf_evsel__add_modifiers(evsel, bf + r, size - r);
 }
@@ -571,13 +571,13 @@ out_err:
 
 static int perf_evsel__hw_cache_name(struct evsel *evsel, char *bf, size_t size)
 {
-       int ret = __perf_evsel__hw_cache_name(evsel->attr.config, bf, size);
+       int ret = __perf_evsel__hw_cache_name(evsel->core.attr.config, bf, size);
        return ret + perf_evsel__add_modifiers(evsel, bf + ret, size - ret);
 }
 
 static int perf_evsel__raw_name(struct evsel *evsel, char *bf, size_t size)
 {
-       int ret = scnprintf(bf, size, "raw 0x%" PRIx64, evsel->attr.config);
+       int ret = scnprintf(bf, size, "raw 0x%" PRIx64, evsel->core.attr.config);
        return ret + perf_evsel__add_modifiers(evsel, bf + ret, size - ret);
 }
 
@@ -597,7 +597,7 @@ const char *perf_evsel__name(struct evsel *evsel)
        if (evsel->name)
                return evsel->name;
 
-       switch (evsel->attr.type) {
+       switch (evsel->core.attr.type) {
        case PERF_TYPE_RAW:
                perf_evsel__raw_name(evsel, bf, sizeof(bf));
                break;
@@ -627,7 +627,7 @@ const char *perf_evsel__name(struct evsel *evsel)
 
        default:
                scnprintf(bf, sizeof(bf), "unknown attr type: %d",
-                         evsel->attr.type);
+                         evsel->core.attr.type);
                break;
        }
 
@@ -681,7 +681,7 @@ static void __perf_evsel__config_callchain(struct evsel *evsel,
                                           struct callchain_param *param)
 {
        bool function = perf_evsel__is_function_event(evsel);
-       struct perf_event_attr *attr = &evsel->attr;
+       struct perf_event_attr *attr = &evsel->core.attr;
 
        perf_evsel__set_sample_bit(evsel, CALLCHAIN);
 
@@ -747,7 +747,7 @@ static void
 perf_evsel__reset_callgraph(struct evsel *evsel,
                            struct callchain_param *param)
 {
-       struct perf_event_attr *attr = &evsel->attr;
+       struct perf_event_attr *attr = &evsel->core.attr;
 
        perf_evsel__reset_sample_bit(evsel, CALLCHAIN);
        if (param->record_mode == CALLCHAIN_LBR) {
@@ -766,7 +766,7 @@ static void apply_config_terms(struct evsel *evsel,
 {
        struct perf_evsel_config_term *term;
        struct list_head *config_terms = &evsel->config_terms;
-       struct perf_event_attr *attr = &evsel->attr;
+       struct perf_event_attr *attr = &evsel->core.attr;
        /* callgraph default */
        struct callchain_param param = {
                .record_mode = callchain_param.record_mode,
@@ -833,6 +833,9 @@ static void apply_config_terms(struct evsel *evsel,
                        break;
                case PERF_EVSEL__CONFIG_TERM_PERCORE:
                        break;
+               case PERF_EVSEL__CONFIG_TERM_AUX_OUTPUT:
+                       attr->aux_output = term->val.aux_output ? 1 : 0;
+                       break;
                default:
                        break;
                }
@@ -879,7 +882,7 @@ static void apply_config_terms(struct evsel *evsel,
                        if (sample_address) {
                                perf_evsel__set_sample_bit(evsel, ADDR);
                                perf_evsel__set_sample_bit(evsel, DATA_SRC);
-                               evsel->attr.mmap_data = track;
+                               evsel->core.attr.mmap_data = track;
                        }
                        perf_evsel__config_callchain(evsel, opts, &param);
                }
@@ -888,8 +891,8 @@ static void apply_config_terms(struct evsel *evsel,
 
 static bool is_dummy_event(struct evsel *evsel)
 {
-       return (evsel->attr.type == PERF_TYPE_SOFTWARE) &&
-              (evsel->attr.config == PERF_COUNT_SW_DUMMY);
+       return (evsel->core.attr.type == PERF_TYPE_SOFTWARE) &&
+              (evsel->core.attr.config == PERF_COUNT_SW_DUMMY);
 }
 
 /*
@@ -924,7 +927,7 @@ void perf_evsel__config(struct evsel *evsel, struct record_opts *opts,
                        struct callchain_param *callchain)
 {
        struct evsel *leader = evsel->leader;
-       struct perf_event_attr *attr = &evsel->attr;
+       struct perf_event_attr *attr = &evsel->core.attr;
        int track = evsel->tracking;
        bool per_cpu = opts->target.default_per_cpu && !opts->target.per_thread;
 
@@ -948,7 +951,7 @@ void perf_evsel__config(struct evsel *evsel, struct record_opts *opts,
                 * Apply group format only if we belong to group
                 * with more than one members.
                 */
-               if (leader->nr_members > 1) {
+               if (leader->core.nr_members > 1) {
                        attr->read_format |= PERF_FORMAT_GROUP;
                        attr->inherit = 0;
                }
@@ -985,14 +988,14 @@ void perf_evsel__config(struct evsel *evsel, struct record_opts *opts,
                 * event to follow the master sample_type to ease up
                 * report.
                 */
-               attr->sample_type = leader->attr.sample_type;
+               attr->sample_type = leader->core.attr.sample_type;
        }
 
        if (opts->no_samples)
                attr->sample_freq = 0;
 
        if (opts->inherit_stat) {
-               evsel->attr.read_format |=
+               evsel->core.attr.read_format |=
                        PERF_FORMAT_TOTAL_TIME_ENABLED |
                        PERF_FORMAT_TOTAL_TIME_RUNNING |
                        PERF_FORMAT_ID;
@@ -1010,7 +1013,7 @@ void perf_evsel__config(struct evsel *evsel, struct record_opts *opts,
         * fault handler and its overall trickiness nature.
         */
        if (perf_evsel__is_function_event(evsel))
-               evsel->attr.exclude_callchain_user = 1;
+               evsel->core.attr.exclude_callchain_user = 1;
 
        if (callchain && callchain->enabled && !evsel->no_aux_samples)
                perf_evsel__config_callchain(evsel, opts, callchain);
@@ -1079,7 +1082,7 @@ void perf_evsel__config(struct evsel *evsel, struct record_opts *opts,
                perf_evsel__set_sample_bit(evsel, TRANSACTION);
 
        if (opts->running_time) {
-               evsel->attr.read_format |=
+               evsel->core.attr.read_format |=
                        PERF_FORMAT_TOTAL_TIME_ENABLED |
                        PERF_FORMAT_TOTAL_TIME_RUNNING;
        }
@@ -1125,8 +1128,8 @@ void perf_evsel__config(struct evsel *evsel, struct record_opts *opts,
                attr->exclude_user   = 1;
        }
 
-       if (evsel->own_cpus || evsel->unit)
-               evsel->attr.read_format |= PERF_FORMAT_ID;
+       if (evsel->core.own_cpus || evsel->unit)
+               evsel->core.attr.read_format |= PERF_FORMAT_ID;
 
        /*
         * Apply event specific term settings,
@@ -1153,47 +1156,6 @@ void perf_evsel__config(struct evsel *evsel, struct record_opts *opts,
                perf_evsel__reset_sample_bit(evsel, BRANCH_STACK);
 }
 
-static int perf_evsel__alloc_fd(struct evsel *evsel, int ncpus, int nthreads)
-{
-       evsel->fd = xyarray__new(ncpus, nthreads, sizeof(int));
-
-       if (evsel->fd) {
-               int cpu, thread;
-               for (cpu = 0; cpu < ncpus; cpu++) {
-                       for (thread = 0; thread < nthreads; thread++) {
-                               FD(evsel, cpu, thread) = -1;
-                       }
-               }
-       }
-
-       return evsel->fd != NULL ? 0 : -ENOMEM;
-}
-
-static int perf_evsel__run_ioctl(struct evsel *evsel,
-                         int ioc,  void *arg)
-{
-       int cpu, thread;
-
-       for (cpu = 0; cpu < xyarray__max_x(evsel->fd); cpu++) {
-               for (thread = 0; thread < xyarray__max_y(evsel->fd); thread++) {
-                       int fd = FD(evsel, cpu, thread),
-                           err = ioctl(fd, ioc, arg);
-
-                       if (err)
-                               return err;
-               }
-       }
-
-       return 0;
-}
-
-int perf_evsel__apply_filter(struct evsel *evsel, const char *filter)
-{
-       return perf_evsel__run_ioctl(evsel,
-                                    PERF_EVENT_IOC_SET_FILTER,
-                                    (void *)filter);
-}
-
 int perf_evsel__set_filter(struct evsel *evsel, const char *filter)
 {
        char *new_filter = strdup(filter);
@@ -1234,9 +1196,9 @@ int perf_evsel__append_addr_filter(struct evsel *evsel, const char *filter)
        return perf_evsel__append_filter(evsel, "%s,%s", filter);
 }
 
-int perf_evsel__enable(struct evsel *evsel)
+int evsel__enable(struct evsel *evsel)
 {
-       int err = perf_evsel__run_ioctl(evsel, PERF_EVENT_IOC_ENABLE, 0);
+       int err = perf_evsel__enable(&evsel->core);
 
        if (!err)
                evsel->disabled = false;
@@ -1244,9 +1206,9 @@ int perf_evsel__enable(struct evsel *evsel)
        return err;
 }
 
-int perf_evsel__disable(struct evsel *evsel)
+int evsel__disable(struct evsel *evsel)
 {
-       int err = perf_evsel__run_ioctl(evsel, PERF_EVENT_IOC_DISABLE, 0);
+       int err = perf_evsel__disable(&evsel->core);
        /*
         * We mark it disabled here so that tools that disable a event can
         * ignore events after they disable it. I.e. the ring buffer may have
@@ -1281,12 +1243,6 @@ int perf_evsel__alloc_id(struct evsel *evsel, int ncpus, int nthreads)
        return 0;
 }
 
-static void perf_evsel__free_fd(struct evsel *evsel)
-{
-       xyarray__delete(evsel->fd);
-       evsel->fd = NULL;
-}
-
 static void perf_evsel__free_id(struct evsel *evsel)
 {
        xyarray__delete(evsel->sample_id);
@@ -1305,35 +1261,24 @@ static void perf_evsel__free_config_terms(struct evsel *evsel)
        }
 }
 
-void perf_evsel__close_fd(struct evsel *evsel)
-{
-       int cpu, thread;
-
-       for (cpu = 0; cpu < xyarray__max_x(evsel->fd); cpu++)
-               for (thread = 0; thread < xyarray__max_y(evsel->fd); ++thread) {
-                       close(FD(evsel, cpu, thread));
-                       FD(evsel, cpu, thread) = -1;
-               }
-}
-
 void perf_evsel__exit(struct evsel *evsel)
 {
-       assert(list_empty(&evsel->node));
+       assert(list_empty(&evsel->core.node));
        assert(evsel->evlist == NULL);
        perf_evsel__free_counts(evsel);
-       perf_evsel__free_fd(evsel);
+       perf_evsel__free_fd(&evsel->core);
        perf_evsel__free_id(evsel);
        perf_evsel__free_config_terms(evsel);
        cgroup__put(evsel->cgrp);
-       cpu_map__put(evsel->cpus);
-       cpu_map__put(evsel->own_cpus);
-       thread_map__put(evsel->threads);
+       perf_cpu_map__put(evsel->core.cpus);
+       perf_cpu_map__put(evsel->core.own_cpus);
+       perf_thread_map__put(evsel->core.threads);
        zfree(&evsel->group_name);
        zfree(&evsel->name);
        perf_evsel__object.fini(evsel);
 }
 
-void perf_evsel__delete(struct evsel *evsel)
+void evsel__delete(struct evsel *evsel)
 {
        perf_evsel__exit(evsel);
        free(evsel);
@@ -1379,53 +1324,12 @@ void perf_counts_values__scale(struct perf_counts_values *count,
                *pscaled = scaled;
 }
 
-static int perf_evsel__read_size(struct evsel *evsel)
-{
-       u64 read_format = evsel->attr.read_format;
-       int entry = sizeof(u64); /* value */
-       int size = 0;
-       int nr = 1;
-
-       if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED)
-               size += sizeof(u64);
-
-       if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING)
-               size += sizeof(u64);
-
-       if (read_format & PERF_FORMAT_ID)
-               entry += sizeof(u64);
-
-       if (read_format & PERF_FORMAT_GROUP) {
-               nr = evsel->nr_members;
-               size += sizeof(u64);
-       }
-
-       size += entry * nr;
-       return size;
-}
-
-int perf_evsel__read(struct evsel *evsel, int cpu, int thread,
-                    struct perf_counts_values *count)
-{
-       size_t size = perf_evsel__read_size(evsel);
-
-       memset(count, 0, sizeof(*count));
-
-       if (FD(evsel, cpu, thread) < 0)
-               return -EINVAL;
-
-       if (readn(FD(evsel, cpu, thread), count->values, size) <= 0)
-               return -errno;
-
-       return 0;
-}
-
 static int
 perf_evsel__read_one(struct evsel *evsel, int cpu, int thread)
 {
        struct perf_counts_values *count = perf_counts(evsel->counts, cpu, thread);
 
-       return perf_evsel__read(evsel, cpu, thread, count);
+       return perf_evsel__read(&evsel->core, cpu, thread, count);
 }
 
 static void
@@ -1447,13 +1351,13 @@ static int
 perf_evsel__process_group_data(struct evsel *leader,
                               int cpu, int thread, u64 *data)
 {
-       u64 read_format = leader->attr.read_format;
+       u64 read_format = leader->core.attr.read_format;
        struct sample_read_value *v;
        u64 nr, ena = 0, run = 0, i;
 
        nr = *data++;
 
-       if (nr != (u64) leader->nr_members)
+       if (nr != (u64) leader->core.nr_members)
                return -EINVAL;
 
        if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED)
@@ -1485,8 +1389,8 @@ static int
 perf_evsel__read_group(struct evsel *leader, int cpu, int thread)
 {
        struct perf_stat_evsel *ps = leader->stats;
-       u64 read_format = leader->attr.read_format;
-       int size = perf_evsel__read_size(leader);
+       u64 read_format = leader->core.attr.read_format;
+       int size = perf_evsel__read_size(&leader->core);
        u64 *data = ps->group_data;
 
        if (!(read_format & PERF_FORMAT_ID))
@@ -1514,7 +1418,7 @@ perf_evsel__read_group(struct evsel *leader, int cpu, int thread)
 
 int perf_evsel__read_counter(struct evsel *evsel, int cpu, int thread)
 {
-       u64 read_format = evsel->attr.read_format;
+       u64 read_format = evsel->core.attr.read_format;
 
        if (read_format & PERF_FORMAT_GROUP)
                return perf_evsel__read_group(evsel, cpu, thread);
@@ -1555,7 +1459,7 @@ static int get_group_fd(struct evsel *evsel, int cpu, int thread)
         * Leader must be already processed/open,
         * if not it's a bug.
         */
-       BUG_ON(!leader->fd);
+       BUG_ON(!leader->core.fd);
 
        fd = FD(leader, cpu, thread);
        BUG_ON(fd == -1);
@@ -1686,6 +1590,7 @@ int perf_event_attr__fprintf(FILE *fp, struct perf_event_attr *attr,
        PRINT_ATTRf(namespaces, p_unsigned);
        PRINT_ATTRf(ksymbol, p_unsigned);
        PRINT_ATTRf(bpf_event, p_unsigned);
+       PRINT_ATTRf(aux_output, p_unsigned);
 
        PRINT_ATTRn("{ wakeup_events, wakeup_watermark }", wakeup_events, p_unsigned);
        PRINT_ATTRf(bp_type, p_unsigned);
@@ -1792,14 +1697,14 @@ static int perf_event_open(struct evsel *evsel,
                           pid_t pid, int cpu, int group_fd,
                           unsigned long flags)
 {
-       int precise_ip = evsel->attr.precise_ip;
+       int precise_ip = evsel->core.attr.precise_ip;
        int fd;
 
        while (1) {
                pr_debug2("sys_perf_event_open: pid %d  cpu %d  group_fd %d  flags %#lx",
                          pid, cpu, group_fd, flags);
 
-               fd = sys_perf_event_open(&evsel->attr, pid, cpu, group_fd, flags);
+               fd = sys_perf_event_open(&evsel->core.attr, pid, cpu, group_fd, flags);
                if (fd >= 0)
                        break;
 
@@ -1811,36 +1716,37 @@ static int perf_event_open(struct evsel *evsel,
                 * We tried all the precise_ip values, and it's
                 * still failing, so leave it to standard fallback.
                 */
-               if (!evsel->attr.precise_ip) {
-                       evsel->attr.precise_ip = precise_ip;
+               if (!evsel->core.attr.precise_ip) {
+                       evsel->core.attr.precise_ip = precise_ip;
                        break;
                }
 
                pr_debug2("\nsys_perf_event_open failed, error %d\n", -ENOTSUP);
-               evsel->attr.precise_ip--;
-               pr_debug2("decreasing precise_ip by one (%d)\n", evsel->attr.precise_ip);
-               display_attr(&evsel->attr);
+               evsel->core.attr.precise_ip--;
+               pr_debug2("decreasing precise_ip by one (%d)\n", evsel->core.attr.precise_ip);
+               display_attr(&evsel->core.attr);
        }
 
        return fd;
 }
 
-int perf_evsel__open(struct evsel *evsel, struct perf_cpu_map *cpus,
-                    struct perf_thread_map *threads)
+int evsel__open(struct evsel *evsel, struct perf_cpu_map *cpus,
+               struct perf_thread_map *threads)
 {
        int cpu, thread, nthreads;
        unsigned long flags = PERF_FLAG_FD_CLOEXEC;
        int pid = -1, err;
        enum { NO_CHANGE, SET_TO_MAX, INCREASED_MAX } set_rlimit = NO_CHANGE;
 
-       if (perf_missing_features.write_backward && evsel->attr.write_backward)
+       if ((perf_missing_features.write_backward && evsel->core.attr.write_backward) ||
+           (perf_missing_features.aux_output     && evsel->core.attr.aux_output))
                return -EINVAL;
 
        if (cpus == NULL) {
                static struct perf_cpu_map *empty_cpu_map;
 
                if (empty_cpu_map == NULL) {
-                       empty_cpu_map = cpu_map__dummy_new();
+                       empty_cpu_map = perf_cpu_map__dummy_new();
                        if (empty_cpu_map == NULL)
                                return -ENOMEM;
                }
@@ -1865,8 +1771,8 @@ int perf_evsel__open(struct evsel *evsel, struct perf_cpu_map *cpus,
        else
                nthreads = threads->nr;
 
-       if (evsel->fd == NULL &&
-           perf_evsel__alloc_fd(evsel, cpus->nr, nthreads) < 0)
+       if (evsel->core.fd == NULL &&
+           perf_evsel__alloc_fd(&evsel->core, cpus->nr, nthreads) < 0)
                return -ENOMEM;
 
        if (evsel->cgrp) {
@@ -1876,31 +1782,31 @@ int perf_evsel__open(struct evsel *evsel, struct perf_cpu_map *cpus,
 
 fallback_missing_features:
        if (perf_missing_features.clockid_wrong)
-               evsel->attr.clockid = CLOCK_MONOTONIC; /* should always work */
+               evsel->core.attr.clockid = CLOCK_MONOTONIC; /* should always work */
        if (perf_missing_features.clockid) {
-               evsel->attr.use_clockid = 0;
-               evsel->attr.clockid = 0;
+               evsel->core.attr.use_clockid = 0;
+               evsel->core.attr.clockid = 0;
        }
        if (perf_missing_features.cloexec)
                flags &= ~(unsigned long)PERF_FLAG_FD_CLOEXEC;
        if (perf_missing_features.mmap2)
-               evsel->attr.mmap2 = 0;
+               evsel->core.attr.mmap2 = 0;
        if (perf_missing_features.exclude_guest)
-               evsel->attr.exclude_guest = evsel->attr.exclude_host = 0;
+               evsel->core.attr.exclude_guest = evsel->core.attr.exclude_host = 0;
        if (perf_missing_features.lbr_flags)
-               evsel->attr.branch_sample_type &= ~(PERF_SAMPLE_BRANCH_NO_FLAGS |
+               evsel->core.attr.branch_sample_type &= ~(PERF_SAMPLE_BRANCH_NO_FLAGS |
                                     PERF_SAMPLE_BRANCH_NO_CYCLES);
-       if (perf_missing_features.group_read && evsel->attr.inherit)
-               evsel->attr.read_format &= ~(PERF_FORMAT_GROUP|PERF_FORMAT_ID);
+       if (perf_missing_features.group_read && evsel->core.attr.inherit)
+               evsel->core.attr.read_format &= ~(PERF_FORMAT_GROUP|PERF_FORMAT_ID);
        if (perf_missing_features.ksymbol)
-               evsel->attr.ksymbol = 0;
+               evsel->core.attr.ksymbol = 0;
        if (perf_missing_features.bpf_event)
-               evsel->attr.bpf_event = 0;
+               evsel->core.attr.bpf_event = 0;
 retry_sample_id:
        if (perf_missing_features.sample_id_all)
-               evsel->attr.sample_id_all = 0;
+               evsel->core.attr.sample_id_all = 0;
 
-       display_attr(&evsel->attr);
+       display_attr(&evsel->core.attr);
 
        for (cpu = 0; cpu < cpus->nr; cpu++) {
 
@@ -2007,23 +1913,27 @@ try_fallback:
         * Must probe features in the order they were added to the
         * perf_event_attr interface.
         */
-       if (!perf_missing_features.bpf_event && evsel->attr.bpf_event) {
+       if (!perf_missing_features.aux_output && evsel->core.attr.aux_output) {
+               perf_missing_features.aux_output = true;
+               pr_debug2("Kernel has no attr.aux_output support, bailing out\n");
+               goto out_close;
+       } else if (!perf_missing_features.bpf_event && evsel->core.attr.bpf_event) {
                perf_missing_features.bpf_event = true;
                pr_debug2("switching off bpf_event\n");
                goto fallback_missing_features;
-       } else if (!perf_missing_features.ksymbol && evsel->attr.ksymbol) {
+       } else if (!perf_missing_features.ksymbol && evsel->core.attr.ksymbol) {
                perf_missing_features.ksymbol = true;
                pr_debug2("switching off ksymbol\n");
                goto fallback_missing_features;
-       } else if (!perf_missing_features.write_backward && evsel->attr.write_backward) {
+       } else if (!perf_missing_features.write_backward && evsel->core.attr.write_backward) {
                perf_missing_features.write_backward = true;
                pr_debug2("switching off write_backward\n");
                goto out_close;
-       } else if (!perf_missing_features.clockid_wrong && evsel->attr.use_clockid) {
+       } else if (!perf_missing_features.clockid_wrong && evsel->core.attr.use_clockid) {
                perf_missing_features.clockid_wrong = true;
                pr_debug2("switching off clockid\n");
                goto fallback_missing_features;
-       } else if (!perf_missing_features.clockid && evsel->attr.use_clockid) {
+       } else if (!perf_missing_features.clockid && evsel->core.attr.use_clockid) {
                perf_missing_features.clockid = true;
                pr_debug2("switching off use_clockid\n");
                goto fallback_missing_features;
@@ -2031,12 +1941,12 @@ try_fallback:
                perf_missing_features.cloexec = true;
                pr_debug2("switching off cloexec flag\n");
                goto fallback_missing_features;
-       } else if (!perf_missing_features.mmap2 && evsel->attr.mmap2) {
+       } else if (!perf_missing_features.mmap2 && evsel->core.attr.mmap2) {
                perf_missing_features.mmap2 = true;
                pr_debug2("switching off mmap2\n");
                goto fallback_missing_features;
        } else if (!perf_missing_features.exclude_guest &&
-                  (evsel->attr.exclude_guest || evsel->attr.exclude_host)) {
+                  (evsel->core.attr.exclude_guest || evsel->core.attr.exclude_host)) {
                perf_missing_features.exclude_guest = true;
                pr_debug2("switching off exclude_guest, exclude_host\n");
                goto fallback_missing_features;
@@ -2045,15 +1955,15 @@ try_fallback:
                pr_debug2("switching off sample_id_all\n");
                goto retry_sample_id;
        } else if (!perf_missing_features.lbr_flags &&
-                       (evsel->attr.branch_sample_type &
+                       (evsel->core.attr.branch_sample_type &
                         (PERF_SAMPLE_BRANCH_NO_CYCLES |
                          PERF_SAMPLE_BRANCH_NO_FLAGS))) {
                perf_missing_features.lbr_flags = true;
                pr_debug2("switching off branch sample type no (cycles/flags)\n");
                goto fallback_missing_features;
        } else if (!perf_missing_features.group_read &&
-                   evsel->attr.inherit &&
-                  (evsel->attr.read_format & PERF_FORMAT_GROUP) &&
+                   evsel->core.attr.inherit &&
+                  (evsel->core.attr.read_format & PERF_FORMAT_GROUP) &&
                   perf_evsel__is_group_leader(evsel)) {
                perf_missing_features.group_read = true;
                pr_debug2("switching off group read\n");
@@ -2073,33 +1983,29 @@ out_close:
        return err;
 }
 
-void perf_evsel__close(struct evsel *evsel)
+void evsel__close(struct evsel *evsel)
 {
-       if (evsel->fd == NULL)
-               return;
-
-       perf_evsel__close_fd(evsel);
-       perf_evsel__free_fd(evsel);
+       perf_evsel__close(&evsel->core);
        perf_evsel__free_id(evsel);
 }
 
 int perf_evsel__open_per_cpu(struct evsel *evsel,
                             struct perf_cpu_map *cpus)
 {
-       return perf_evsel__open(evsel, cpus, NULL);
+       return evsel__open(evsel, cpus, NULL);
 }
 
 int perf_evsel__open_per_thread(struct evsel *evsel,
                                struct perf_thread_map *threads)
 {
-       return perf_evsel__open(evsel, NULL, threads);
+       return evsel__open(evsel, NULL, threads);
 }
 
 static int perf_evsel__parse_id_sample(const struct evsel *evsel,
                                       const union perf_event *event,
                                       struct perf_sample *sample)
 {
-       u64 type = evsel->attr.sample_type;
+       u64 type = evsel->core.attr.sample_type;
        const u64 *array = event->sample.array;
        bool swapped = evsel->needs_swap;
        union u64_swap u;
@@ -2188,7 +2094,7 @@ perf_event__check_size(union perf_event *event, unsigned int sample_size)
 int perf_evsel__parse_sample(struct evsel *evsel, union perf_event *event,
                             struct perf_sample *data)
 {
-       u64 type = evsel->attr.sample_type;
+       u64 type = evsel->core.attr.sample_type;
        bool swapped = evsel->needs_swap;
        const u64 *array;
        u16 max_size = event->header.size;
@@ -2204,14 +2110,14 @@ int perf_evsel__parse_sample(struct evsel *evsel, union perf_event *event,
        memset(data, 0, sizeof(*data));
        data->cpu = data->pid = data->tid = -1;
        data->stream_id = data->id = data->time = -1ULL;
-       data->period = evsel->attr.sample_period;
+       data->period = evsel->core.attr.sample_period;
        data->cpumode = event->header.misc & PERF_RECORD_MISC_CPUMODE_MASK;
        data->misc    = event->header.misc;
        data->id = -1ULL;
        data->data_src = PERF_MEM_DATA_SRC_NONE;
 
        if (event->header.type != PERF_RECORD_SAMPLE) {
-               if (!evsel->attr.sample_id_all)
+               if (!evsel->core.attr.sample_id_all)
                        return 0;
                return perf_evsel__parse_id_sample(evsel, event, data);
        }
@@ -2284,7 +2190,7 @@ int perf_evsel__parse_sample(struct evsel *evsel, union perf_event *event,
        }
 
        if (type & PERF_SAMPLE_READ) {
-               u64 read_format = evsel->attr.read_format;
+               u64 read_format = evsel->core.attr.read_format;
 
                OVERFLOW_CHECK_u64(array);
                if (read_format & PERF_FORMAT_GROUP)
@@ -2389,7 +2295,7 @@ int perf_evsel__parse_sample(struct evsel *evsel, union perf_event *event,
                array++;
 
                if (data->user_regs.abi) {
-                       u64 mask = evsel->attr.sample_regs_user;
+                       u64 mask = evsel->core.attr.sample_regs_user;
 
                        sz = hweight64(mask) * sizeof(u64);
                        OVERFLOW_CHECK(array, sz, max_size);
@@ -2445,7 +2351,7 @@ int perf_evsel__parse_sample(struct evsel *evsel, union perf_event *event,
                array++;
 
                if (data->intr_regs.abi != PERF_SAMPLE_REGS_ABI_NONE) {
-                       u64 mask = evsel->attr.sample_regs_intr;
+                       u64 mask = evsel->core.attr.sample_regs_intr;
 
                        sz = hweight64(mask) * sizeof(u64);
                        OVERFLOW_CHECK(array, sz, max_size);
@@ -2468,7 +2374,7 @@ int perf_evsel__parse_sample_timestamp(struct evsel *evsel,
                                       union perf_event *event,
                                       u64 *timestamp)
 {
-       u64 type = evsel->attr.sample_type;
+       u64 type = evsel->core.attr.sample_type;
        const u64 *array;
 
        if (!(type & PERF_SAMPLE_TIME))
@@ -2479,7 +2385,7 @@ int perf_evsel__parse_sample_timestamp(struct evsel *evsel,
                        .time = -1ULL,
                };
 
-               if (!evsel->attr.sample_id_all)
+               if (!evsel->core.attr.sample_id_all)
                        return -1;
                if (perf_evsel__parse_id_sample(evsel, event, &data))
                        return -1;
@@ -2865,8 +2771,8 @@ bool perf_evsel__fallback(struct evsel *evsel, int err,
        int paranoid;
 
        if ((err == ENOENT || err == ENXIO || err == ENODEV) &&
-           evsel->attr.type   == PERF_TYPE_HARDWARE &&
-           evsel->attr.config == PERF_COUNT_HW_CPU_CYCLES) {
+           evsel->core.attr.type   == PERF_TYPE_HARDWARE &&
+           evsel->core.attr.config == PERF_COUNT_HW_CPU_CYCLES) {
                /*
                 * If it's cycles then fall back to hrtimer based
                 * cpu-clock-tick sw counter, which is always available even if
@@ -2878,12 +2784,12 @@ bool perf_evsel__fallback(struct evsel *evsel, int err,
                scnprintf(msg, msgsize, "%s",
 "The cycles event is not supported, trying to fall back to cpu-clock-ticks");
 
-               evsel->attr.type   = PERF_TYPE_SOFTWARE;
-               evsel->attr.config = PERF_COUNT_SW_CPU_CLOCK;
+               evsel->core.attr.type   = PERF_TYPE_SOFTWARE;
+               evsel->core.attr.config = PERF_COUNT_SW_CPU_CLOCK;
 
                zfree(&evsel->name);
                return true;
-       } else if (err == EACCES && !evsel->attr.exclude_kernel &&
+       } else if (err == EACCES && !evsel->core.attr.exclude_kernel &&
                   (paranoid = perf_event_paranoid()) > 1) {
                const char *name = perf_evsel__name(evsel);
                char *new_name;
@@ -2902,7 +2808,7 @@ bool perf_evsel__fallback(struct evsel *evsel, int err,
                evsel->name = new_name;
                scnprintf(msg, msgsize,
 "kernel.perf_event_paranoid=%d, trying to fall back to excluding kernel samples", paranoid);
-               evsel->attr.exclude_kernel = 1;
+               evsel->core.attr.exclude_kernel = 1;
 
                return true;
        }
@@ -2999,15 +2905,15 @@ int perf_evsel__open_strerror(struct evsel *evsel, struct target *target,
         "No such device - did you specify an out-of-range profile CPU?");
                break;
        case EOPNOTSUPP:
-               if (evsel->attr.sample_period != 0)
+               if (evsel->core.attr.sample_period != 0)
                        return scnprintf(msg, size,
        "%s: PMU Hardware doesn't support sampling/overflow-interrupts. Try 'perf stat'",
                                         perf_evsel__name(evsel));
-               if (evsel->attr.precise_ip)
+               if (evsel->core.attr.precise_ip)
                        return scnprintf(msg, size, "%s",
        "\'precise\' request may not be supported. Try removing 'p' modifier.");
 #if defined(__i386__) || defined(__x86_64__)
-               if (evsel->attr.type == PERF_TYPE_HARDWARE)
+               if (evsel->core.attr.type == PERF_TYPE_HARDWARE)
                        return scnprintf(msg, size, "%s",
        "No hardware sampling interrupt available.\n");
 #endif
@@ -3019,12 +2925,14 @@ int perf_evsel__open_strerror(struct evsel *evsel, struct target *target,
        "We found oprofile daemon running, please stop it and try again.");
                break;
        case EINVAL:
-               if (evsel->attr.write_backward && perf_missing_features.write_backward)
+               if (evsel->core.attr.write_backward && perf_missing_features.write_backward)
                        return scnprintf(msg, size, "Reading from overwrite event is not supported by this kernel.");
                if (perf_missing_features.clockid)
                        return scnprintf(msg, size, "clockid feature not supported.");
                if (perf_missing_features.clockid_wrong)
                        return scnprintf(msg, size, "wrong clockid (%d).", clockid);
+               if (perf_missing_features.aux_output)
+                       return scnprintf(msg, size, "The 'aux_output' feature is not supported, update the kernel.");
                break;
        default:
                break;
@@ -3044,12 +2952,12 @@ struct perf_env *perf_evsel__env(struct evsel *evsel)
        return NULL;
 }
 
-static int store_evsel_ids(struct evsel *evsel, struct perf_evlist *evlist)
+static int store_evsel_ids(struct evsel *evsel, struct evlist *evlist)
 {
        int cpu, thread;
 
-       for (cpu = 0; cpu < xyarray__max_x(evsel->fd); cpu++) {
-               for (thread = 0; thread < xyarray__max_y(evsel->fd);
+       for (cpu = 0; cpu < xyarray__max_x(evsel->core.fd); cpu++) {
+               for (thread = 0; thread < xyarray__max_y(evsel->core.fd);
                     thread++) {
                        int fd = FD(evsel, cpu, thread);
 
@@ -3062,10 +2970,10 @@ static int store_evsel_ids(struct evsel *evsel, struct perf_evlist *evlist)
        return 0;
 }
 
-int perf_evsel__store_ids(struct evsel *evsel, struct perf_evlist *evlist)
+int perf_evsel__store_ids(struct evsel *evsel, struct evlist *evlist)
 {
-       struct perf_cpu_map *cpus = evsel->cpus;
-       struct perf_thread_map *threads = evsel->threads;
+       struct perf_cpu_map *cpus = evsel->core.cpus;
+       struct perf_thread_map *threads = evsel->core.threads;
 
        if (perf_evsel__alloc_id(evsel, cpus->nr, threads->nr))
                return -ENOMEM;