]> git.proxmox.com Git - mirror_ubuntu-eoan-kernel.git/blob - tools/perf/builtin-record.c
perf evlist: Handle default value for 'pages' on mmap method
[mirror_ubuntu-eoan-kernel.git] / tools / perf / builtin-record.c
1 /*
2 * builtin-record.c
3 *
4 * Builtin record command: Record the profile of a workload
5 * (or a CPU, or a PID) into the perf.data output file - for
6 * later analysis via perf report.
7 */
8 #define _FILE_OFFSET_BITS 64
9
10 #include "builtin.h"
11
12 #include "perf.h"
13
14 #include "util/build-id.h"
15 #include "util/util.h"
16 #include "util/parse-options.h"
17 #include "util/parse-events.h"
18
19 #include "util/header.h"
20 #include "util/event.h"
21 #include "util/evlist.h"
22 #include "util/evsel.h"
23 #include "util/debug.h"
24 #include "util/session.h"
25 #include "util/symbol.h"
26 #include "util/cpumap.h"
27 #include "util/thread_map.h"
28
29 #include <unistd.h>
30 #include <sched.h>
31 #include <sys/mman.h>
32
33 enum write_mode_t {
34 WRITE_FORCE,
35 WRITE_APPEND
36 };
37
38 static struct perf_record_opts record_opts = {
39 .target_pid = -1,
40 .target_tid = -1,
41 .user_freq = UINT_MAX,
42 .user_interval = ULLONG_MAX,
43 .freq = 1000,
44 .sample_id_all_avail = true,
45 };
46
47 static unsigned int page_size;
48 static unsigned int mmap_pages = UINT_MAX;
49 static int output;
50 static const char *output_name = NULL;
51 static bool group = false;
52 static int realtime_prio = 0;
53 static enum write_mode_t write_mode = WRITE_FORCE;
54 static bool no_buildid = false;
55 static bool no_buildid_cache = false;
56 static struct perf_evlist *evsel_list;
57
58 static long samples = 0;
59 static u64 bytes_written = 0;
60
61 static int file_new = 1;
62 static off_t post_processing_offset;
63
64 static struct perf_session *session;
65 static const char *progname;
66
67 static void advance_output(size_t size)
68 {
69 bytes_written += size;
70 }
71
72 static void write_output(void *buf, size_t size)
73 {
74 while (size) {
75 int ret = write(output, buf, size);
76
77 if (ret < 0)
78 die("failed to write");
79
80 size -= ret;
81 buf += ret;
82
83 bytes_written += ret;
84 }
85 }
86
87 static int process_synthesized_event(union perf_event *event,
88 struct perf_sample *sample __used,
89 struct perf_session *self __used)
90 {
91 write_output(event, event->header.size);
92 return 0;
93 }
94
95 static void mmap_read(struct perf_mmap *md)
96 {
97 unsigned int head = perf_mmap__read_head(md);
98 unsigned int old = md->prev;
99 unsigned char *data = md->base + page_size;
100 unsigned long size;
101 void *buf;
102
103 if (old == head)
104 return;
105
106 samples++;
107
108 size = head - old;
109
110 if ((old & md->mask) + size != (head & md->mask)) {
111 buf = &data[old & md->mask];
112 size = md->mask + 1 - (old & md->mask);
113 old += size;
114
115 write_output(buf, size);
116 }
117
118 buf = &data[old & md->mask];
119 size = head - old;
120 old += size;
121
122 write_output(buf, size);
123
124 md->prev = old;
125 perf_mmap__write_tail(md, old);
126 }
127
128 static volatile int done = 0;
129 static volatile int signr = -1;
130 static volatile int child_finished = 0;
131
132 static void sig_handler(int sig)
133 {
134 if (sig == SIGCHLD)
135 child_finished = 1;
136
137 done = 1;
138 signr = sig;
139 }
140
141 static void sig_atexit(void)
142 {
143 int status;
144
145 if (evsel_list->workload.pid > 0) {
146 if (!child_finished)
147 kill(evsel_list->workload.pid, SIGTERM);
148
149 wait(&status);
150 if (WIFSIGNALED(status))
151 psignal(WTERMSIG(status), progname);
152 }
153
154 if (signr == -1 || signr == SIGUSR1)
155 return;
156
157 signal(signr, SIG_DFL);
158 kill(getpid(), signr);
159 }
160
161 static bool perf_evlist__equal(struct perf_evlist *evlist,
162 struct perf_evlist *other)
163 {
164 struct perf_evsel *pos, *pair;
165
166 if (evlist->nr_entries != other->nr_entries)
167 return false;
168
169 pair = list_entry(other->entries.next, struct perf_evsel, node);
170
171 list_for_each_entry(pos, &evlist->entries, node) {
172 if (memcmp(&pos->attr, &pair->attr, sizeof(pos->attr) != 0))
173 return false;
174 pair = list_entry(pair->node.next, struct perf_evsel, node);
175 }
176
177 return true;
178 }
179
180 static void open_counters(struct perf_evlist *evlist)
181 {
182 struct perf_evsel *pos, *first;
183
184 first = list_entry(evlist->entries.next, struct perf_evsel, node);
185
186 perf_evlist__config_attrs(evlist, &record_opts);
187
188 list_for_each_entry(pos, &evlist->entries, node) {
189 struct perf_event_attr *attr = &pos->attr;
190 struct xyarray *group_fd = NULL;
191 /*
192 * Check if parse_single_tracepoint_event has already asked for
193 * PERF_SAMPLE_TIME.
194 *
195 * XXX this is kludgy but short term fix for problems introduced by
196 * eac23d1c that broke 'perf script' by having different sample_types
197 * when using multiple tracepoint events when we use a perf binary
198 * that tries to use sample_id_all on an older kernel.
199 *
200 * We need to move counter creation to perf_session, support
201 * different sample_types, etc.
202 */
203 bool time_needed = attr->sample_type & PERF_SAMPLE_TIME;
204
205 if (group && pos != first)
206 group_fd = first->fd;
207 retry_sample_id:
208 attr->sample_id_all = record_opts.sample_id_all_avail ? 1 : 0;
209 try_again:
210 if (perf_evsel__open(pos, evlist->cpus, evlist->threads, group,
211 group_fd) < 0) {
212 int err = errno;
213
214 if (err == EPERM || err == EACCES) {
215 ui__error_paranoid();
216 exit(EXIT_FAILURE);
217 } else if (err == ENODEV && record_opts.cpu_list) {
218 die("No such device - did you specify"
219 " an out-of-range profile CPU?\n");
220 } else if (err == EINVAL && record_opts.sample_id_all_avail) {
221 /*
222 * Old kernel, no attr->sample_id_type_all field
223 */
224 record_opts.sample_id_all_avail = false;
225 if (!record_opts.sample_time && !record_opts.raw_samples && !time_needed)
226 attr->sample_type &= ~PERF_SAMPLE_TIME;
227
228 goto retry_sample_id;
229 }
230
231 /*
232 * If it's cycles then fall back to hrtimer
233 * based cpu-clock-tick sw counter, which
234 * is always available even if no PMU support:
235 */
236 if (attr->type == PERF_TYPE_HARDWARE
237 && attr->config == PERF_COUNT_HW_CPU_CYCLES) {
238
239 if (verbose)
240 ui__warning("The cycles event is not supported, "
241 "trying to fall back to cpu-clock-ticks\n");
242 attr->type = PERF_TYPE_SOFTWARE;
243 attr->config = PERF_COUNT_SW_CPU_CLOCK;
244 goto try_again;
245 }
246
247 if (err == ENOENT) {
248 ui__warning("The %s event is not supported.\n",
249 event_name(pos));
250 exit(EXIT_FAILURE);
251 }
252
253 printf("\n");
254 error("sys_perf_event_open() syscall returned with %d (%s). /bin/dmesg may provide additional information.\n",
255 err, strerror(err));
256
257 #if defined(__i386__) || defined(__x86_64__)
258 if (attr->type == PERF_TYPE_HARDWARE && err == EOPNOTSUPP)
259 die("No hardware sampling interrupt available."
260 " No APIC? If so then you can boot the kernel"
261 " with the \"lapic\" boot parameter to"
262 " force-enable it.\n");
263 #endif
264
265 die("No CONFIG_PERF_EVENTS=y kernel support configured?\n");
266 }
267 }
268
269 if (perf_evlist__set_filters(evlist)) {
270 error("failed to set filter with %d (%s)\n", errno,
271 strerror(errno));
272 exit(-1);
273 }
274
275 if (perf_evlist__mmap(evlist, mmap_pages, false) < 0)
276 die("failed to mmap with %d (%s)\n", errno, strerror(errno));
277
278 if (file_new)
279 session->evlist = evlist;
280 else {
281 if (!perf_evlist__equal(session->evlist, evlist)) {
282 fprintf(stderr, "incompatible append\n");
283 exit(-1);
284 }
285 }
286
287 perf_session__update_sample_type(session);
288 }
289
290 static int process_buildids(void)
291 {
292 u64 size = lseek(output, 0, SEEK_CUR);
293
294 if (size == 0)
295 return 0;
296
297 session->fd = output;
298 return __perf_session__process_events(session, post_processing_offset,
299 size - post_processing_offset,
300 size, &build_id__mark_dso_hit_ops);
301 }
302
303 static void atexit_header(void)
304 {
305 if (!record_opts.pipe_output) {
306 session->header.data_size += bytes_written;
307
308 if (!no_buildid)
309 process_buildids();
310 perf_session__write_header(session, evsel_list, output, true);
311 perf_session__delete(session);
312 perf_evlist__delete(evsel_list);
313 symbol__exit();
314 }
315 }
316
317 static void perf_event__synthesize_guest_os(struct machine *machine, void *data)
318 {
319 int err;
320 struct perf_session *psession = data;
321
322 if (machine__is_host(machine))
323 return;
324
325 /*
326 *As for guest kernel when processing subcommand record&report,
327 *we arrange module mmap prior to guest kernel mmap and trigger
328 *a preload dso because default guest module symbols are loaded
329 *from guest kallsyms instead of /lib/modules/XXX/XXX. This
330 *method is used to avoid symbol missing when the first addr is
331 *in module instead of in guest kernel.
332 */
333 err = perf_event__synthesize_modules(process_synthesized_event,
334 psession, machine);
335 if (err < 0)
336 pr_err("Couldn't record guest kernel [%d]'s reference"
337 " relocation symbol.\n", machine->pid);
338
339 /*
340 * We use _stext for guest kernel because guest kernel's /proc/kallsyms
341 * have no _text sometimes.
342 */
343 err = perf_event__synthesize_kernel_mmap(process_synthesized_event,
344 psession, machine, "_text");
345 if (err < 0)
346 err = perf_event__synthesize_kernel_mmap(process_synthesized_event,
347 psession, machine,
348 "_stext");
349 if (err < 0)
350 pr_err("Couldn't record guest kernel [%d]'s reference"
351 " relocation symbol.\n", machine->pid);
352 }
353
354 static struct perf_event_header finished_round_event = {
355 .size = sizeof(struct perf_event_header),
356 .type = PERF_RECORD_FINISHED_ROUND,
357 };
358
359 static void mmap_read_all(void)
360 {
361 int i;
362
363 for (i = 0; i < evsel_list->nr_mmaps; i++) {
364 if (evsel_list->mmap[i].base)
365 mmap_read(&evsel_list->mmap[i]);
366 }
367
368 if (perf_header__has_feat(&session->header, HEADER_TRACE_INFO))
369 write_output(&finished_round_event, sizeof(finished_round_event));
370 }
371
372 static int __cmd_record(int argc, const char **argv)
373 {
374 struct stat st;
375 int flags;
376 int err;
377 unsigned long waking = 0;
378 const bool forks = argc > 0;
379 struct machine *machine;
380
381 progname = argv[0];
382
383 page_size = sysconf(_SC_PAGE_SIZE);
384
385 atexit(sig_atexit);
386 signal(SIGCHLD, sig_handler);
387 signal(SIGINT, sig_handler);
388 signal(SIGUSR1, sig_handler);
389
390 if (!output_name) {
391 if (!fstat(STDOUT_FILENO, &st) && S_ISFIFO(st.st_mode))
392 record_opts.pipe_output = true;
393 else
394 output_name = "perf.data";
395 }
396 if (output_name) {
397 if (!strcmp(output_name, "-"))
398 record_opts.pipe_output = true;
399 else if (!stat(output_name, &st) && st.st_size) {
400 if (write_mode == WRITE_FORCE) {
401 char oldname[PATH_MAX];
402 snprintf(oldname, sizeof(oldname), "%s.old",
403 output_name);
404 unlink(oldname);
405 rename(output_name, oldname);
406 }
407 } else if (write_mode == WRITE_APPEND) {
408 write_mode = WRITE_FORCE;
409 }
410 }
411
412 flags = O_CREAT|O_RDWR;
413 if (write_mode == WRITE_APPEND)
414 file_new = 0;
415 else
416 flags |= O_TRUNC;
417
418 if (record_opts.pipe_output)
419 output = STDOUT_FILENO;
420 else
421 output = open(output_name, flags, S_IRUSR | S_IWUSR);
422 if (output < 0) {
423 perror("failed to create output file");
424 exit(-1);
425 }
426
427 session = perf_session__new(output_name, O_WRONLY,
428 write_mode == WRITE_FORCE, false, NULL);
429 if (session == NULL) {
430 pr_err("Not enough memory for reading perf file header\n");
431 return -1;
432 }
433
434 if (!no_buildid)
435 perf_header__set_feat(&session->header, HEADER_BUILD_ID);
436
437 if (!file_new) {
438 err = perf_session__read_header(session, output);
439 if (err < 0)
440 goto out_delete_session;
441 }
442
443 if (have_tracepoints(&evsel_list->entries))
444 perf_header__set_feat(&session->header, HEADER_TRACE_INFO);
445
446 perf_header__set_feat(&session->header, HEADER_HOSTNAME);
447 perf_header__set_feat(&session->header, HEADER_OSRELEASE);
448 perf_header__set_feat(&session->header, HEADER_ARCH);
449 perf_header__set_feat(&session->header, HEADER_CPUDESC);
450 perf_header__set_feat(&session->header, HEADER_NRCPUS);
451 perf_header__set_feat(&session->header, HEADER_EVENT_DESC);
452 perf_header__set_feat(&session->header, HEADER_CMDLINE);
453 perf_header__set_feat(&session->header, HEADER_VERSION);
454 perf_header__set_feat(&session->header, HEADER_CPU_TOPOLOGY);
455 perf_header__set_feat(&session->header, HEADER_TOTAL_MEM);
456 perf_header__set_feat(&session->header, HEADER_NUMA_TOPOLOGY);
457 perf_header__set_feat(&session->header, HEADER_CPUID);
458
459 if (forks) {
460 err = perf_evlist__prepare_workload(evsel_list, &record_opts, argv);
461 if (err < 0) {
462 pr_err("Couldn't run the workload!\n");
463 goto out_delete_session;
464 }
465 }
466
467 open_counters(evsel_list);
468
469 /*
470 * perf_session__delete(session) will be called at atexit_header()
471 */
472 atexit(atexit_header);
473
474 if (record_opts.pipe_output) {
475 err = perf_header__write_pipe(output);
476 if (err < 0)
477 return err;
478 } else if (file_new) {
479 err = perf_session__write_header(session, evsel_list,
480 output, false);
481 if (err < 0)
482 return err;
483 }
484
485 post_processing_offset = lseek(output, 0, SEEK_CUR);
486
487 if (record_opts.pipe_output) {
488 err = perf_session__synthesize_attrs(session,
489 process_synthesized_event);
490 if (err < 0) {
491 pr_err("Couldn't synthesize attrs.\n");
492 return err;
493 }
494
495 err = perf_event__synthesize_event_types(process_synthesized_event,
496 session);
497 if (err < 0) {
498 pr_err("Couldn't synthesize event_types.\n");
499 return err;
500 }
501
502 if (have_tracepoints(&evsel_list->entries)) {
503 /*
504 * FIXME err <= 0 here actually means that
505 * there were no tracepoints so its not really
506 * an error, just that we don't need to
507 * synthesize anything. We really have to
508 * return this more properly and also
509 * propagate errors that now are calling die()
510 */
511 err = perf_event__synthesize_tracing_data(output, evsel_list,
512 process_synthesized_event,
513 session);
514 if (err <= 0) {
515 pr_err("Couldn't record tracing data.\n");
516 return err;
517 }
518 advance_output(err);
519 }
520 }
521
522 machine = perf_session__find_host_machine(session);
523 if (!machine) {
524 pr_err("Couldn't find native kernel information.\n");
525 return -1;
526 }
527
528 err = perf_event__synthesize_kernel_mmap(process_synthesized_event,
529 session, machine, "_text");
530 if (err < 0)
531 err = perf_event__synthesize_kernel_mmap(process_synthesized_event,
532 session, machine, "_stext");
533 if (err < 0)
534 pr_err("Couldn't record kernel reference relocation symbol\n"
535 "Symbol resolution may be skewed if relocation was used (e.g. kexec).\n"
536 "Check /proc/kallsyms permission or run as root.\n");
537
538 err = perf_event__synthesize_modules(process_synthesized_event,
539 session, machine);
540 if (err < 0)
541 pr_err("Couldn't record kernel module information.\n"
542 "Symbol resolution may be skewed if relocation was used (e.g. kexec).\n"
543 "Check /proc/modules permission or run as root.\n");
544
545 if (perf_guest)
546 perf_session__process_machines(session,
547 perf_event__synthesize_guest_os);
548
549 if (!record_opts.system_wide)
550 perf_event__synthesize_thread_map(evsel_list->threads,
551 process_synthesized_event,
552 session);
553 else
554 perf_event__synthesize_threads(process_synthesized_event,
555 session);
556
557 if (realtime_prio) {
558 struct sched_param param;
559
560 param.sched_priority = realtime_prio;
561 if (sched_setscheduler(0, SCHED_FIFO, &param)) {
562 pr_err("Could not set realtime priority.\n");
563 exit(-1);
564 }
565 }
566
567 perf_evlist__enable(evsel_list);
568
569 /*
570 * Let the child rip
571 */
572 if (forks)
573 perf_evlist__start_workload(evsel_list);
574
575 for (;;) {
576 int hits = samples;
577
578 mmap_read_all();
579
580 if (hits == samples) {
581 if (done)
582 break;
583 err = poll(evsel_list->pollfd, evsel_list->nr_fds, -1);
584 waking++;
585 }
586
587 if (done)
588 perf_evlist__disable(evsel_list);
589 }
590
591 if (quiet || signr == SIGUSR1)
592 return 0;
593
594 fprintf(stderr, "[ perf record: Woken up %ld times to write data ]\n", waking);
595
596 /*
597 * Approximate RIP event size: 24 bytes.
598 */
599 fprintf(stderr,
600 "[ perf record: Captured and wrote %.3f MB %s (~%" PRIu64 " samples) ]\n",
601 (double)bytes_written / 1024.0 / 1024.0,
602 output_name,
603 bytes_written / 24);
604
605 return 0;
606
607 out_delete_session:
608 perf_session__delete(session);
609 return err;
610 }
611
612 static const char * const record_usage[] = {
613 "perf record [<options>] [<command>]",
614 "perf record [<options>] -- <command> [<options>]",
615 NULL
616 };
617
618 static bool force, append_file;
619
620 const struct option record_options[] = {
621 OPT_CALLBACK('e', "event", &evsel_list, "event",
622 "event selector. use 'perf list' to list available events",
623 parse_events_option),
624 OPT_CALLBACK(0, "filter", &evsel_list, "filter",
625 "event filter", parse_filter),
626 OPT_INTEGER('p', "pid", &record_opts.target_pid,
627 "record events on existing process id"),
628 OPT_INTEGER('t', "tid", &record_opts.target_tid,
629 "record events on existing thread id"),
630 OPT_INTEGER('r', "realtime", &realtime_prio,
631 "collect data with this RT SCHED_FIFO priority"),
632 OPT_BOOLEAN('D', "no-delay", &record_opts.no_delay,
633 "collect data without buffering"),
634 OPT_BOOLEAN('R', "raw-samples", &record_opts.raw_samples,
635 "collect raw sample records from all opened counters"),
636 OPT_BOOLEAN('a', "all-cpus", &record_opts.system_wide,
637 "system-wide collection from all CPUs"),
638 OPT_BOOLEAN('A', "append", &append_file,
639 "append to the output file to do incremental profiling"),
640 OPT_STRING('C', "cpu", &record_opts.cpu_list, "cpu",
641 "list of cpus to monitor"),
642 OPT_BOOLEAN('f', "force", &force,
643 "overwrite existing data file (deprecated)"),
644 OPT_U64('c', "count", &record_opts.user_interval, "event period to sample"),
645 OPT_STRING('o', "output", &output_name, "file",
646 "output file name"),
647 OPT_BOOLEAN('i', "no-inherit", &record_opts.no_inherit,
648 "child tasks do not inherit counters"),
649 OPT_UINTEGER('F', "freq", &record_opts.user_freq, "profile at this frequency"),
650 OPT_UINTEGER('m', "mmap-pages", &mmap_pages, "number of mmap data pages"),
651 OPT_BOOLEAN(0, "group", &group,
652 "put the counters into a counter group"),
653 OPT_BOOLEAN('g', "call-graph", &record_opts.call_graph,
654 "do call-graph (stack chain/backtrace) recording"),
655 OPT_INCR('v', "verbose", &verbose,
656 "be more verbose (show counter open errors, etc)"),
657 OPT_BOOLEAN('q', "quiet", &quiet, "don't print any message"),
658 OPT_BOOLEAN('s', "stat", &record_opts.inherit_stat,
659 "per thread counts"),
660 OPT_BOOLEAN('d', "data", &record_opts.sample_address,
661 "Sample addresses"),
662 OPT_BOOLEAN('T', "timestamp", &record_opts.sample_time, "Sample timestamps"),
663 OPT_BOOLEAN('n', "no-samples", &record_opts.no_samples,
664 "don't sample"),
665 OPT_BOOLEAN('N', "no-buildid-cache", &no_buildid_cache,
666 "do not update the buildid cache"),
667 OPT_BOOLEAN('B', "no-buildid", &no_buildid,
668 "do not collect buildids in perf.data"),
669 OPT_CALLBACK('G', "cgroup", &evsel_list, "name",
670 "monitor event in cgroup name only",
671 parse_cgroups),
672 OPT_END()
673 };
674
675 int cmd_record(int argc, const char **argv, const char *prefix __used)
676 {
677 int err = -ENOMEM;
678 struct perf_evsel *pos;
679
680 perf_header__set_cmdline(argc, argv);
681
682 evsel_list = perf_evlist__new(NULL, NULL);
683 if (evsel_list == NULL)
684 return -ENOMEM;
685
686 argc = parse_options(argc, argv, record_options, record_usage,
687 PARSE_OPT_STOP_AT_NON_OPTION);
688 if (!argc && record_opts.target_pid == -1 && record_opts.target_tid == -1 &&
689 !record_opts.system_wide && !record_opts.cpu_list)
690 usage_with_options(record_usage, record_options);
691
692 if (force && append_file) {
693 fprintf(stderr, "Can't overwrite and append at the same time."
694 " You need to choose between -f and -A");
695 usage_with_options(record_usage, record_options);
696 } else if (append_file) {
697 write_mode = WRITE_APPEND;
698 } else {
699 write_mode = WRITE_FORCE;
700 }
701
702 if (nr_cgroups && !record_opts.system_wide) {
703 fprintf(stderr, "cgroup monitoring only available in"
704 " system-wide mode\n");
705 usage_with_options(record_usage, record_options);
706 }
707
708 symbol__init();
709
710 if (symbol_conf.kptr_restrict)
711 pr_warning(
712 "WARNING: Kernel address maps (/proc/{kallsyms,modules}) are restricted,\n"
713 "check /proc/sys/kernel/kptr_restrict.\n\n"
714 "Samples in kernel functions may not be resolved if a suitable vmlinux\n"
715 "file is not found in the buildid cache or in the vmlinux path.\n\n"
716 "Samples in kernel modules won't be resolved at all.\n\n"
717 "If some relocation was applied (e.g. kexec) symbols may be misresolved\n"
718 "even with a suitable vmlinux or kallsyms file.\n\n");
719
720 if (no_buildid_cache || no_buildid)
721 disable_buildid_cache();
722
723 if (evsel_list->nr_entries == 0 &&
724 perf_evlist__add_default(evsel_list) < 0) {
725 pr_err("Not enough memory for event selector list\n");
726 goto out_symbol_exit;
727 }
728
729 if (record_opts.target_pid != -1)
730 record_opts.target_tid = record_opts.target_pid;
731
732 if (perf_evlist__create_maps(evsel_list, record_opts.target_pid,
733 record_opts.target_tid, record_opts.cpu_list) < 0)
734 usage_with_options(record_usage, record_options);
735
736 list_for_each_entry(pos, &evsel_list->entries, node) {
737 if (perf_evsel__alloc_fd(pos, evsel_list->cpus->nr,
738 evsel_list->threads->nr) < 0)
739 goto out_free_fd;
740 if (perf_header__push_event(pos->attr.config, event_name(pos)))
741 goto out_free_fd;
742 }
743
744 if (perf_evlist__alloc_pollfd(evsel_list) < 0)
745 goto out_free_fd;
746
747 if (record_opts.user_interval != ULLONG_MAX)
748 record_opts.default_interval = record_opts.user_interval;
749 if (record_opts.user_freq != UINT_MAX)
750 record_opts.freq = record_opts.user_freq;
751
752 /*
753 * User specified count overrides default frequency.
754 */
755 if (record_opts.default_interval)
756 record_opts.freq = 0;
757 else if (record_opts.freq) {
758 record_opts.default_interval = record_opts.freq;
759 } else {
760 fprintf(stderr, "frequency and count are zero, aborting\n");
761 err = -EINVAL;
762 goto out_free_fd;
763 }
764
765 err = __cmd_record(argc, argv);
766 out_free_fd:
767 perf_evlist__delete_maps(evsel_list);
768 out_symbol_exit:
769 symbol__exit();
770 return err;
771 }