]> git.proxmox.com Git - mirror_ubuntu-jammy-kernel.git/blame - tools/perf/util/evlist.c
perf evsel: Introduce config attr method
[mirror_ubuntu-jammy-kernel.git] / tools / perf / util / evlist.c
CommitLineData
f8a95309
ACM
1/*
2 * Copyright (C) 2011, Red Hat Inc, Arnaldo Carvalho de Melo <acme@redhat.com>
3 *
4 * Parts came from builtin-{top,stat,record}.c, see those files for further
5 * copyright notes.
6 *
7 * Released under the GPL v2. (and only v2, not any later version)
8 */
a8c9ae18
ACM
9#include "util.h"
10#include "debugfs.h"
5c581041 11#include <poll.h>
f8a95309
ACM
12#include "cpumap.h"
13#include "thread_map.h"
361c99a6
ACM
14#include "evlist.h"
15#include "evsel.h"
361c99a6 16
50d08e47
ACM
17#include "parse-events.h"
18
f8a95309
ACM
19#include <sys/mman.h>
20
70db7533
ACM
21#include <linux/bitops.h>
22#include <linux/hash.h>
23
f8a95309 24#define FD(e, x, y) (*(int *)xyarray__entry(e->fd, x, y))
a91e5431 25#define SID(e, x, y) xyarray__entry(e->sample_id, x, y)
f8a95309 26
7e2ed097
ACM
27void perf_evlist__init(struct perf_evlist *evlist, struct cpu_map *cpus,
28 struct thread_map *threads)
ef1d1af2
ACM
29{
30 int i;
31
32 for (i = 0; i < PERF_EVLIST__HLIST_SIZE; ++i)
33 INIT_HLIST_HEAD(&evlist->heads[i]);
34 INIT_LIST_HEAD(&evlist->entries);
7e2ed097 35 perf_evlist__set_maps(evlist, cpus, threads);
ef1d1af2
ACM
36}
37
7e2ed097
ACM
38struct perf_evlist *perf_evlist__new(struct cpu_map *cpus,
39 struct thread_map *threads)
361c99a6
ACM
40{
41 struct perf_evlist *evlist = zalloc(sizeof(*evlist));
42
ef1d1af2 43 if (evlist != NULL)
7e2ed097 44 perf_evlist__init(evlist, cpus, threads);
361c99a6
ACM
45
46 return evlist;
47}
48
0f82ebc4
ACM
49void perf_evlist__config_attrs(struct perf_evlist *evlist,
50 struct perf_record_opts *opts)
51{
52 struct perf_evsel *evsel;
53
54 if (evlist->cpus->map[0] < 0)
55 opts->no_inherit = true;
56
57 list_for_each_entry(evsel, &evlist->entries, node) {
58 perf_evsel__config(evsel, opts);
59
60 if (evlist->nr_entries > 1)
61 evsel->attr.sample_type |= PERF_SAMPLE_ID;
62 }
63}
64
361c99a6
ACM
65static void perf_evlist__purge(struct perf_evlist *evlist)
66{
67 struct perf_evsel *pos, *n;
68
69 list_for_each_entry_safe(pos, n, &evlist->entries, node) {
70 list_del_init(&pos->node);
71 perf_evsel__delete(pos);
72 }
73
74 evlist->nr_entries = 0;
75}
76
ef1d1af2 77void perf_evlist__exit(struct perf_evlist *evlist)
361c99a6 78{
70db7533 79 free(evlist->mmap);
5c581041 80 free(evlist->pollfd);
ef1d1af2
ACM
81 evlist->mmap = NULL;
82 evlist->pollfd = NULL;
83}
84
85void perf_evlist__delete(struct perf_evlist *evlist)
86{
87 perf_evlist__purge(evlist);
88 perf_evlist__exit(evlist);
361c99a6
ACM
89 free(evlist);
90}
91
92void perf_evlist__add(struct perf_evlist *evlist, struct perf_evsel *entry)
93{
94 list_add_tail(&entry->node, &evlist->entries);
95 ++evlist->nr_entries;
96}
97
50d08e47
ACM
98static void perf_evlist__splice_list_tail(struct perf_evlist *evlist,
99 struct list_head *list,
100 int nr_entries)
101{
102 list_splice_tail(list, &evlist->entries);
103 evlist->nr_entries += nr_entries;
104}
105
361c99a6
ACM
106int perf_evlist__add_default(struct perf_evlist *evlist)
107{
108 struct perf_event_attr attr = {
109 .type = PERF_TYPE_HARDWARE,
110 .config = PERF_COUNT_HW_CPU_CYCLES,
111 };
112 struct perf_evsel *evsel = perf_evsel__new(&attr, 0);
113
114 if (evsel == NULL)
cc2d86b0
SE
115 goto error;
116
117 /* use strdup() because free(evsel) assumes name is allocated */
118 evsel->name = strdup("cycles");
119 if (!evsel->name)
120 goto error_free;
361c99a6
ACM
121
122 perf_evlist__add(evlist, evsel);
123 return 0;
cc2d86b0
SE
124error_free:
125 perf_evsel__delete(evsel);
126error:
127 return -ENOMEM;
361c99a6 128}
5c581041 129
50d08e47
ACM
130int perf_evlist__add_attrs(struct perf_evlist *evlist,
131 struct perf_event_attr *attrs, size_t nr_attrs)
132{
133 struct perf_evsel *evsel, *n;
134 LIST_HEAD(head);
135 size_t i;
136
137 for (i = 0; i < nr_attrs; i++) {
138 evsel = perf_evsel__new(attrs + i, evlist->nr_entries + i);
139 if (evsel == NULL)
140 goto out_delete_partial_list;
141 list_add_tail(&evsel->node, &head);
142 }
143
144 perf_evlist__splice_list_tail(evlist, &head, nr_attrs);
145
146 return 0;
147
148out_delete_partial_list:
149 list_for_each_entry_safe(evsel, n, &head, node)
150 perf_evsel__delete(evsel);
151 return -1;
152}
153
a8c9ae18
ACM
154static int trace_event__id(const char *evname)
155{
156 char *filename, *colon;
157 int err = -1, fd;
158
159 if (asprintf(&filename, "%s/%s/id", tracing_events_path, evname) < 0)
160 return -1;
161
162 colon = strrchr(filename, ':');
163 if (colon != NULL)
164 *colon = '/';
165
166 fd = open(filename, O_RDONLY);
167 if (fd >= 0) {
168 char id[16];
169 if (read(fd, id, sizeof(id)) > 0)
170 err = atoi(id);
171 close(fd);
172 }
173
174 free(filename);
175 return err;
176}
177
178int perf_evlist__add_tracepoints(struct perf_evlist *evlist,
179 const char *tracepoints[],
180 size_t nr_tracepoints)
181{
182 int err;
183 size_t i;
184 struct perf_event_attr *attrs = zalloc(nr_tracepoints * sizeof(*attrs));
185
186 if (attrs == NULL)
187 return -1;
188
189 for (i = 0; i < nr_tracepoints; i++) {
190 err = trace_event__id(tracepoints[i]);
191
192 if (err < 0)
193 goto out_free_attrs;
194
195 attrs[i].type = PERF_TYPE_TRACEPOINT;
196 attrs[i].config = err;
197 attrs[i].sample_type = (PERF_SAMPLE_RAW | PERF_SAMPLE_TIME |
198 PERF_SAMPLE_CPU);
199 attrs[i].sample_period = 1;
200 }
201
202 err = perf_evlist__add_attrs(evlist, attrs, nr_tracepoints);
203out_free_attrs:
204 free(attrs);
205 return err;
206}
207
4152ab37
ACM
208void perf_evlist__disable(struct perf_evlist *evlist)
209{
210 int cpu, thread;
211 struct perf_evsel *pos;
212
213 for (cpu = 0; cpu < evlist->cpus->nr; cpu++) {
214 list_for_each_entry(pos, &evlist->entries, node) {
215 for (thread = 0; thread < evlist->threads->nr; thread++)
216 ioctl(FD(pos, cpu, thread), PERF_EVENT_IOC_DISABLE);
217 }
218 }
219}
220
764e16a3
DA
221void perf_evlist__enable(struct perf_evlist *evlist)
222{
223 int cpu, thread;
224 struct perf_evsel *pos;
225
226 for (cpu = 0; cpu < evlist->cpus->nr; cpu++) {
227 list_for_each_entry(pos, &evlist->entries, node) {
228 for (thread = 0; thread < evlist->threads->nr; thread++)
229 ioctl(FD(pos, cpu, thread), PERF_EVENT_IOC_ENABLE);
230 }
231 }
232}
233
7e2ed097 234int perf_evlist__alloc_pollfd(struct perf_evlist *evlist)
5c581041 235{
7e2ed097 236 int nfds = evlist->cpus->nr * evlist->threads->nr * evlist->nr_entries;
5c581041
ACM
237 evlist->pollfd = malloc(sizeof(struct pollfd) * nfds);
238 return evlist->pollfd != NULL ? 0 : -ENOMEM;
239}
70082dd9
ACM
240
241void perf_evlist__add_pollfd(struct perf_evlist *evlist, int fd)
242{
243 fcntl(fd, F_SETFL, O_NONBLOCK);
244 evlist->pollfd[evlist->nr_fds].fd = fd;
245 evlist->pollfd[evlist->nr_fds].events = POLLIN;
246 evlist->nr_fds++;
247}
70db7533 248
a91e5431
ACM
249static void perf_evlist__id_hash(struct perf_evlist *evlist,
250 struct perf_evsel *evsel,
251 int cpu, int thread, u64 id)
3d3b5e95
ACM
252{
253 int hash;
254 struct perf_sample_id *sid = SID(evsel, cpu, thread);
255
256 sid->id = id;
257 sid->evsel = evsel;
258 hash = hash_64(sid->id, PERF_EVLIST__HLIST_BITS);
259 hlist_add_head(&sid->node, &evlist->heads[hash]);
260}
261
a91e5431
ACM
262void perf_evlist__id_add(struct perf_evlist *evlist, struct perf_evsel *evsel,
263 int cpu, int thread, u64 id)
264{
265 perf_evlist__id_hash(evlist, evsel, cpu, thread, id);
266 evsel->id[evsel->ids++] = id;
267}
268
269static int perf_evlist__id_add_fd(struct perf_evlist *evlist,
270 struct perf_evsel *evsel,
271 int cpu, int thread, int fd)
f8a95309 272{
f8a95309 273 u64 read_data[4] = { 0, };
3d3b5e95 274 int id_idx = 1; /* The first entry is the counter value */
f8a95309
ACM
275
276 if (!(evsel->attr.read_format & PERF_FORMAT_ID) ||
277 read(fd, &read_data, sizeof(read_data)) == -1)
278 return -1;
279
280 if (evsel->attr.read_format & PERF_FORMAT_TOTAL_TIME_ENABLED)
281 ++id_idx;
282 if (evsel->attr.read_format & PERF_FORMAT_TOTAL_TIME_RUNNING)
283 ++id_idx;
284
a91e5431 285 perf_evlist__id_add(evlist, evsel, cpu, thread, read_data[id_idx]);
f8a95309
ACM
286 return 0;
287}
288
70db7533
ACM
289struct perf_evsel *perf_evlist__id2evsel(struct perf_evlist *evlist, u64 id)
290{
291 struct hlist_head *head;
292 struct hlist_node *pos;
293 struct perf_sample_id *sid;
294 int hash;
295
296 if (evlist->nr_entries == 1)
297 return list_entry(evlist->entries.next, struct perf_evsel, node);
298
299 hash = hash_64(id, PERF_EVLIST__HLIST_BITS);
300 head = &evlist->heads[hash];
301
302 hlist_for_each_entry(sid, pos, head, node)
303 if (sid->id == id)
304 return sid->evsel;
305 return NULL;
306}
04391deb 307
aece948f 308union perf_event *perf_evlist__mmap_read(struct perf_evlist *evlist, int idx)
04391deb
ACM
309{
310 /* XXX Move this to perf.c, making it generally available */
311 unsigned int page_size = sysconf(_SC_PAGE_SIZE);
aece948f 312 struct perf_mmap *md = &evlist->mmap[idx];
04391deb
ACM
313 unsigned int head = perf_mmap__read_head(md);
314 unsigned int old = md->prev;
315 unsigned char *data = md->base + page_size;
8115d60c 316 union perf_event *event = NULL;
04391deb 317
7bb41152 318 if (evlist->overwrite) {
04391deb 319 /*
7bb41152
ACM
320 * If we're further behind than half the buffer, there's a chance
321 * the writer will bite our tail and mess up the samples under us.
322 *
323 * If we somehow ended up ahead of the head, we got messed up.
324 *
325 * In either case, truncate and restart at head.
04391deb 326 */
7bb41152
ACM
327 int diff = head - old;
328 if (diff > md->mask / 2 || diff < 0) {
329 fprintf(stderr, "WARNING: failed to keep up with mmap data.\n");
330
331 /*
332 * head points to a known good entry, start there.
333 */
334 old = head;
335 }
04391deb
ACM
336 }
337
338 if (old != head) {
339 size_t size;
340
8115d60c 341 event = (union perf_event *)&data[old & md->mask];
04391deb
ACM
342 size = event->header.size;
343
344 /*
345 * Event straddles the mmap boundary -- header should always
346 * be inside due to u64 alignment of output.
347 */
348 if ((old & md->mask) + size != ((old + size) & md->mask)) {
349 unsigned int offset = old;
350 unsigned int len = min(sizeof(*event), size), cpy;
351 void *dst = &evlist->event_copy;
352
353 do {
354 cpy = min(md->mask + 1 - (offset & md->mask), len);
355 memcpy(dst, &data[offset & md->mask], cpy);
356 offset += cpy;
357 dst += cpy;
358 len -= cpy;
359 } while (len);
360
361 event = &evlist->event_copy;
362 }
363
364 old += size;
365 }
366
367 md->prev = old;
7bb41152
ACM
368
369 if (!evlist->overwrite)
370 perf_mmap__write_tail(md, old);
371
04391deb
ACM
372 return event;
373}
f8a95309 374
7e2ed097 375void perf_evlist__munmap(struct perf_evlist *evlist)
f8a95309 376{
aece948f 377 int i;
f8a95309 378
aece948f
ACM
379 for (i = 0; i < evlist->nr_mmaps; i++) {
380 if (evlist->mmap[i].base != NULL) {
381 munmap(evlist->mmap[i].base, evlist->mmap_len);
382 evlist->mmap[i].base = NULL;
f8a95309
ACM
383 }
384 }
aece948f
ACM
385
386 free(evlist->mmap);
387 evlist->mmap = NULL;
f8a95309
ACM
388}
389
7e2ed097 390int perf_evlist__alloc_mmap(struct perf_evlist *evlist)
f8a95309 391{
aece948f
ACM
392 evlist->nr_mmaps = evlist->cpus->nr;
393 if (evlist->cpus->map[0] == -1)
394 evlist->nr_mmaps = evlist->threads->nr;
395 evlist->mmap = zalloc(evlist->nr_mmaps * sizeof(struct perf_mmap));
f8a95309
ACM
396 return evlist->mmap != NULL ? 0 : -ENOMEM;
397}
398
bccdaba0 399static int __perf_evlist__mmap(struct perf_evlist *evlist,
aece948f 400 int idx, int prot, int mask, int fd)
f8a95309 401{
aece948f
ACM
402 evlist->mmap[idx].prev = 0;
403 evlist->mmap[idx].mask = mask;
404 evlist->mmap[idx].base = mmap(NULL, evlist->mmap_len, prot,
f8a95309 405 MAP_SHARED, fd, 0);
bccdaba0 406 if (evlist->mmap[idx].base == MAP_FAILED)
f8a95309
ACM
407 return -1;
408
409 perf_evlist__add_pollfd(evlist, fd);
410 return 0;
411}
412
aece948f
ACM
413static int perf_evlist__mmap_per_cpu(struct perf_evlist *evlist, int prot, int mask)
414{
415 struct perf_evsel *evsel;
416 int cpu, thread;
417
418 for (cpu = 0; cpu < evlist->cpus->nr; cpu++) {
419 int output = -1;
420
421 for (thread = 0; thread < evlist->threads->nr; thread++) {
422 list_for_each_entry(evsel, &evlist->entries, node) {
423 int fd = FD(evsel, cpu, thread);
424
425 if (output == -1) {
426 output = fd;
bccdaba0 427 if (__perf_evlist__mmap(evlist, cpu,
aece948f
ACM
428 prot, mask, output) < 0)
429 goto out_unmap;
430 } else {
431 if (ioctl(fd, PERF_EVENT_IOC_SET_OUTPUT, output) != 0)
432 goto out_unmap;
433 }
434
435 if ((evsel->attr.read_format & PERF_FORMAT_ID) &&
436 perf_evlist__id_add_fd(evlist, evsel, cpu, thread, fd) < 0)
437 goto out_unmap;
438 }
439 }
440 }
441
442 return 0;
443
444out_unmap:
445 for (cpu = 0; cpu < evlist->cpus->nr; cpu++) {
446 if (evlist->mmap[cpu].base != NULL) {
447 munmap(evlist->mmap[cpu].base, evlist->mmap_len);
448 evlist->mmap[cpu].base = NULL;
449 }
450 }
451 return -1;
452}
453
454static int perf_evlist__mmap_per_thread(struct perf_evlist *evlist, int prot, int mask)
455{
456 struct perf_evsel *evsel;
457 int thread;
458
459 for (thread = 0; thread < evlist->threads->nr; thread++) {
460 int output = -1;
461
462 list_for_each_entry(evsel, &evlist->entries, node) {
463 int fd = FD(evsel, 0, thread);
464
465 if (output == -1) {
466 output = fd;
bccdaba0 467 if (__perf_evlist__mmap(evlist, thread,
aece948f
ACM
468 prot, mask, output) < 0)
469 goto out_unmap;
470 } else {
471 if (ioctl(fd, PERF_EVENT_IOC_SET_OUTPUT, output) != 0)
472 goto out_unmap;
473 }
474
475 if ((evsel->attr.read_format & PERF_FORMAT_ID) &&
476 perf_evlist__id_add_fd(evlist, evsel, 0, thread, fd) < 0)
477 goto out_unmap;
478 }
479 }
480
481 return 0;
482
483out_unmap:
484 for (thread = 0; thread < evlist->threads->nr; thread++) {
485 if (evlist->mmap[thread].base != NULL) {
486 munmap(evlist->mmap[thread].base, evlist->mmap_len);
487 evlist->mmap[thread].base = NULL;
488 }
489 }
490 return -1;
491}
492
f8a95309
ACM
493/** perf_evlist__mmap - Create per cpu maps to receive events
494 *
495 * @evlist - list of events
f8a95309
ACM
496 * @pages - map length in pages
497 * @overwrite - overwrite older events?
498 *
499 * If overwrite is false the user needs to signal event consuption using:
500 *
501 * struct perf_mmap *m = &evlist->mmap[cpu];
502 * unsigned int head = perf_mmap__read_head(m);
503 *
504 * perf_mmap__write_tail(m, head)
7e2ed097
ACM
505 *
506 * Using perf_evlist__read_on_cpu does this automatically.
f8a95309 507 */
7e2ed097 508int perf_evlist__mmap(struct perf_evlist *evlist, int pages, bool overwrite)
f8a95309
ACM
509{
510 unsigned int page_size = sysconf(_SC_PAGE_SIZE);
aece948f
ACM
511 int mask = pages * page_size - 1;
512 struct perf_evsel *evsel;
7e2ed097
ACM
513 const struct cpu_map *cpus = evlist->cpus;
514 const struct thread_map *threads = evlist->threads;
aece948f 515 int prot = PROT_READ | (overwrite ? 0 : PROT_WRITE);
f8a95309 516
7e2ed097 517 if (evlist->mmap == NULL && perf_evlist__alloc_mmap(evlist) < 0)
f8a95309
ACM
518 return -ENOMEM;
519
7e2ed097 520 if (evlist->pollfd == NULL && perf_evlist__alloc_pollfd(evlist) < 0)
f8a95309
ACM
521 return -ENOMEM;
522
523 evlist->overwrite = overwrite;
524 evlist->mmap_len = (pages + 1) * page_size;
f8a95309
ACM
525
526 list_for_each_entry(evsel, &evlist->entries, node) {
527 if ((evsel->attr.read_format & PERF_FORMAT_ID) &&
a91e5431 528 evsel->sample_id == NULL &&
f8a95309
ACM
529 perf_evsel__alloc_id(evsel, cpus->nr, threads->nr) < 0)
530 return -ENOMEM;
f8a95309
ACM
531 }
532
aece948f
ACM
533 if (evlist->cpus->map[0] == -1)
534 return perf_evlist__mmap_per_thread(evlist, prot, mask);
f8a95309 535
aece948f 536 return perf_evlist__mmap_per_cpu(evlist, prot, mask);
f8a95309 537}
7e2ed097
ACM
538
539int perf_evlist__create_maps(struct perf_evlist *evlist, pid_t target_pid,
540 pid_t target_tid, const char *cpu_list)
541{
542 evlist->threads = thread_map__new(target_pid, target_tid);
543
544 if (evlist->threads == NULL)
545 return -1;
546
b9019418 547 if (cpu_list == NULL && target_tid != -1)
7e2ed097
ACM
548 evlist->cpus = cpu_map__dummy_new();
549 else
550 evlist->cpus = cpu_map__new(cpu_list);
551
552 if (evlist->cpus == NULL)
553 goto out_delete_threads;
554
555 return 0;
556
557out_delete_threads:
558 thread_map__delete(evlist->threads);
559 return -1;
560}
561
562void perf_evlist__delete_maps(struct perf_evlist *evlist)
563{
564 cpu_map__delete(evlist->cpus);
565 thread_map__delete(evlist->threads);
566 evlist->cpus = NULL;
567 evlist->threads = NULL;
568}
0a102479
FW
569
570int perf_evlist__set_filters(struct perf_evlist *evlist)
571{
572 const struct thread_map *threads = evlist->threads;
573 const struct cpu_map *cpus = evlist->cpus;
574 struct perf_evsel *evsel;
575 char *filter;
576 int thread;
577 int cpu;
578 int err;
579 int fd;
580
581 list_for_each_entry(evsel, &evlist->entries, node) {
582 filter = evsel->filter;
583 if (!filter)
584 continue;
585 for (cpu = 0; cpu < cpus->nr; cpu++) {
586 for (thread = 0; thread < threads->nr; thread++) {
587 fd = FD(evsel, cpu, thread);
588 err = ioctl(fd, PERF_EVENT_IOC_SET_FILTER, filter);
589 if (err)
590 return err;
591 }
592 }
593 }
594
595 return 0;
596}
74429964 597
c2a70653 598bool perf_evlist__valid_sample_type(const struct perf_evlist *evlist)
74429964 599{
c2a70653
ACM
600 struct perf_evsel *pos, *first;
601
602 pos = first = list_entry(evlist->entries.next, struct perf_evsel, node);
603
604 list_for_each_entry_continue(pos, &evlist->entries, node) {
605 if (first->attr.sample_type != pos->attr.sample_type)
606 return false;
74429964
FW
607 }
608
c2a70653 609 return true;
74429964
FW
610}
611
c2a70653
ACM
612u64 perf_evlist__sample_type(const struct perf_evlist *evlist)
613{
614 struct perf_evsel *first;
615
616 first = list_entry(evlist->entries.next, struct perf_evsel, node);
617 return first->attr.sample_type;
618}
619
620bool perf_evlist__valid_sample_id_all(const struct perf_evlist *evlist)
74429964 621{
c2a70653
ACM
622 struct perf_evsel *pos, *first;
623
624 pos = first = list_entry(evlist->entries.next, struct perf_evsel, node);
625
626 list_for_each_entry_continue(pos, &evlist->entries, node) {
627 if (first->attr.sample_id_all != pos->attr.sample_id_all)
628 return false;
74429964
FW
629 }
630
c2a70653
ACM
631 return true;
632}
633
634bool perf_evlist__sample_id_all(const struct perf_evlist *evlist)
635{
636 struct perf_evsel *first;
637
638 first = list_entry(evlist->entries.next, struct perf_evsel, node);
639 return first->attr.sample_id_all;
74429964 640}
81cce8de
ACM
641
642void perf_evlist__set_selected(struct perf_evlist *evlist,
643 struct perf_evsel *evsel)
644{
645 evlist->selected = evsel;
646}
727ab04e
ACM
647
648int perf_evlist__open(struct perf_evlist *evlist, bool group)
649{
650 struct perf_evsel *evsel, *first;
651 int err, ncpus, nthreads;
652
653 first = list_entry(evlist->entries.next, struct perf_evsel, node);
654
655 list_for_each_entry(evsel, &evlist->entries, node) {
656 struct xyarray *group_fd = NULL;
657
658 if (group && evsel != first)
659 group_fd = first->fd;
660
661 err = perf_evsel__open(evsel, evlist->cpus, evlist->threads,
662 group, group_fd);
663 if (err < 0)
664 goto out_err;
665 }
666
667 return 0;
668out_err:
669 ncpus = evlist->cpus ? evlist->cpus->nr : 1;
670 nthreads = evlist->threads ? evlist->threads->nr : 1;
671
672 list_for_each_entry_reverse(evsel, &evlist->entries, node)
673 perf_evsel__close(evsel, ncpus, nthreads);
674
675 return err;
676}