]> git.proxmox.com Git - mirror_ubuntu-eoan-kernel.git/blob - tools/perf/builtin-record.c
perf evlist: Introduce {prepare,start}_workload refactored from 'perf record'
[mirror_ubuntu-eoan-kernel.git] / tools / perf / builtin-record.c
1 /*
2 * builtin-record.c
3 *
4 * Builtin record command: Record the profile of a workload
5 * (or a CPU, or a PID) into the perf.data output file - for
6 * later analysis via perf report.
7 */
8 #define _FILE_OFFSET_BITS 64
9
10 #include "builtin.h"
11
12 #include "perf.h"
13
14 #include "util/build-id.h"
15 #include "util/util.h"
16 #include "util/parse-options.h"
17 #include "util/parse-events.h"
18
19 #include "util/header.h"
20 #include "util/event.h"
21 #include "util/evlist.h"
22 #include "util/evsel.h"
23 #include "util/debug.h"
24 #include "util/session.h"
25 #include "util/symbol.h"
26 #include "util/cpumap.h"
27 #include "util/thread_map.h"
28
29 #include <unistd.h>
30 #include <sched.h>
31 #include <sys/mman.h>
32
33 enum write_mode_t {
34 WRITE_FORCE,
35 WRITE_APPEND
36 };
37
38 static struct perf_record_opts record_opts = {
39 .target_pid = -1,
40 .target_tid = -1,
41 .user_freq = UINT_MAX,
42 .user_interval = ULLONG_MAX,
43 .freq = 1000,
44 .sample_id_all_avail = true,
45 };
46
47 static unsigned int page_size;
48 static unsigned int mmap_pages = UINT_MAX;
49 static int output;
50 static const char *output_name = NULL;
51 static bool group = false;
52 static int realtime_prio = 0;
53 static enum write_mode_t write_mode = WRITE_FORCE;
54 static bool no_buildid = false;
55 static bool no_buildid_cache = false;
56 static struct perf_evlist *evsel_list;
57
58 static long samples = 0;
59 static u64 bytes_written = 0;
60
61 static int file_new = 1;
62 static off_t post_processing_offset;
63
64 static struct perf_session *session;
65 static const char *progname;
66
67 static void advance_output(size_t size)
68 {
69 bytes_written += size;
70 }
71
72 static void write_output(void *buf, size_t size)
73 {
74 while (size) {
75 int ret = write(output, buf, size);
76
77 if (ret < 0)
78 die("failed to write");
79
80 size -= ret;
81 buf += ret;
82
83 bytes_written += ret;
84 }
85 }
86
87 static int process_synthesized_event(union perf_event *event,
88 struct perf_sample *sample __used,
89 struct perf_session *self __used)
90 {
91 write_output(event, event->header.size);
92 return 0;
93 }
94
95 static void mmap_read(struct perf_mmap *md)
96 {
97 unsigned int head = perf_mmap__read_head(md);
98 unsigned int old = md->prev;
99 unsigned char *data = md->base + page_size;
100 unsigned long size;
101 void *buf;
102
103 if (old == head)
104 return;
105
106 samples++;
107
108 size = head - old;
109
110 if ((old & md->mask) + size != (head & md->mask)) {
111 buf = &data[old & md->mask];
112 size = md->mask + 1 - (old & md->mask);
113 old += size;
114
115 write_output(buf, size);
116 }
117
118 buf = &data[old & md->mask];
119 size = head - old;
120 old += size;
121
122 write_output(buf, size);
123
124 md->prev = old;
125 perf_mmap__write_tail(md, old);
126 }
127
128 static volatile int done = 0;
129 static volatile int signr = -1;
130 static volatile int child_finished = 0;
131
132 static void sig_handler(int sig)
133 {
134 if (sig == SIGCHLD)
135 child_finished = 1;
136
137 done = 1;
138 signr = sig;
139 }
140
141 static void sig_atexit(void)
142 {
143 int status;
144
145 if (evsel_list->workload.pid > 0) {
146 if (!child_finished)
147 kill(evsel_list->workload.pid, SIGTERM);
148
149 wait(&status);
150 if (WIFSIGNALED(status))
151 psignal(WTERMSIG(status), progname);
152 }
153
154 if (signr == -1 || signr == SIGUSR1)
155 return;
156
157 signal(signr, SIG_DFL);
158 kill(getpid(), signr);
159 }
160
161 static bool perf_evlist__equal(struct perf_evlist *evlist,
162 struct perf_evlist *other)
163 {
164 struct perf_evsel *pos, *pair;
165
166 if (evlist->nr_entries != other->nr_entries)
167 return false;
168
169 pair = list_entry(other->entries.next, struct perf_evsel, node);
170
171 list_for_each_entry(pos, &evlist->entries, node) {
172 if (memcmp(&pos->attr, &pair->attr, sizeof(pos->attr) != 0))
173 return false;
174 pair = list_entry(pair->node.next, struct perf_evsel, node);
175 }
176
177 return true;
178 }
179
180 static void open_counters(struct perf_evlist *evlist)
181 {
182 struct perf_evsel *pos, *first;
183
184 first = list_entry(evlist->entries.next, struct perf_evsel, node);
185
186 perf_evlist__config_attrs(evlist, &record_opts);
187
188 list_for_each_entry(pos, &evlist->entries, node) {
189 struct perf_event_attr *attr = &pos->attr;
190 struct xyarray *group_fd = NULL;
191 /*
192 * Check if parse_single_tracepoint_event has already asked for
193 * PERF_SAMPLE_TIME.
194 *
195 * XXX this is kludgy but short term fix for problems introduced by
196 * eac23d1c that broke 'perf script' by having different sample_types
197 * when using multiple tracepoint events when we use a perf binary
198 * that tries to use sample_id_all on an older kernel.
199 *
200 * We need to move counter creation to perf_session, support
201 * different sample_types, etc.
202 */
203 bool time_needed = attr->sample_type & PERF_SAMPLE_TIME;
204
205 if (group && pos != first)
206 group_fd = first->fd;
207 retry_sample_id:
208 attr->sample_id_all = record_opts.sample_id_all_avail ? 1 : 0;
209 try_again:
210 if (perf_evsel__open(pos, evlist->cpus, evlist->threads, group,
211 group_fd) < 0) {
212 int err = errno;
213
214 if (err == EPERM || err == EACCES) {
215 ui__error_paranoid();
216 exit(EXIT_FAILURE);
217 } else if (err == ENODEV && record_opts.cpu_list) {
218 die("No such device - did you specify"
219 " an out-of-range profile CPU?\n");
220 } else if (err == EINVAL && record_opts.sample_id_all_avail) {
221 /*
222 * Old kernel, no attr->sample_id_type_all field
223 */
224 record_opts.sample_id_all_avail = false;
225 if (!record_opts.sample_time && !record_opts.raw_samples && !time_needed)
226 attr->sample_type &= ~PERF_SAMPLE_TIME;
227
228 goto retry_sample_id;
229 }
230
231 /*
232 * If it's cycles then fall back to hrtimer
233 * based cpu-clock-tick sw counter, which
234 * is always available even if no PMU support:
235 */
236 if (attr->type == PERF_TYPE_HARDWARE
237 && attr->config == PERF_COUNT_HW_CPU_CYCLES) {
238
239 if (verbose)
240 ui__warning("The cycles event is not supported, "
241 "trying to fall back to cpu-clock-ticks\n");
242 attr->type = PERF_TYPE_SOFTWARE;
243 attr->config = PERF_COUNT_SW_CPU_CLOCK;
244 goto try_again;
245 }
246
247 if (err == ENOENT) {
248 ui__warning("The %s event is not supported.\n",
249 event_name(pos));
250 exit(EXIT_FAILURE);
251 }
252
253 printf("\n");
254 error("sys_perf_event_open() syscall returned with %d (%s). /bin/dmesg may provide additional information.\n",
255 err, strerror(err));
256
257 #if defined(__i386__) || defined(__x86_64__)
258 if (attr->type == PERF_TYPE_HARDWARE && err == EOPNOTSUPP)
259 die("No hardware sampling interrupt available."
260 " No APIC? If so then you can boot the kernel"
261 " with the \"lapic\" boot parameter to"
262 " force-enable it.\n");
263 #endif
264
265 die("No CONFIG_PERF_EVENTS=y kernel support configured?\n");
266 }
267 }
268
269 if (perf_evlist__set_filters(evlist)) {
270 error("failed to set filter with %d (%s)\n", errno,
271 strerror(errno));
272 exit(-1);
273 }
274
275 if (perf_evlist__mmap(evlist, mmap_pages, false) < 0)
276 die("failed to mmap with %d (%s)\n", errno, strerror(errno));
277
278 if (file_new)
279 session->evlist = evlist;
280 else {
281 if (!perf_evlist__equal(session->evlist, evlist)) {
282 fprintf(stderr, "incompatible append\n");
283 exit(-1);
284 }
285 }
286
287 perf_session__update_sample_type(session);
288 }
289
290 static int process_buildids(void)
291 {
292 u64 size = lseek(output, 0, SEEK_CUR);
293
294 if (size == 0)
295 return 0;
296
297 session->fd = output;
298 return __perf_session__process_events(session, post_processing_offset,
299 size - post_processing_offset,
300 size, &build_id__mark_dso_hit_ops);
301 }
302
303 static void atexit_header(void)
304 {
305 if (!record_opts.pipe_output) {
306 session->header.data_size += bytes_written;
307
308 if (!no_buildid)
309 process_buildids();
310 perf_session__write_header(session, evsel_list, output, true);
311 perf_session__delete(session);
312 perf_evlist__delete(evsel_list);
313 symbol__exit();
314 }
315 }
316
317 static void perf_event__synthesize_guest_os(struct machine *machine, void *data)
318 {
319 int err;
320 struct perf_session *psession = data;
321
322 if (machine__is_host(machine))
323 return;
324
325 /*
326 *As for guest kernel when processing subcommand record&report,
327 *we arrange module mmap prior to guest kernel mmap and trigger
328 *a preload dso because default guest module symbols are loaded
329 *from guest kallsyms instead of /lib/modules/XXX/XXX. This
330 *method is used to avoid symbol missing when the first addr is
331 *in module instead of in guest kernel.
332 */
333 err = perf_event__synthesize_modules(process_synthesized_event,
334 psession, machine);
335 if (err < 0)
336 pr_err("Couldn't record guest kernel [%d]'s reference"
337 " relocation symbol.\n", machine->pid);
338
339 /*
340 * We use _stext for guest kernel because guest kernel's /proc/kallsyms
341 * have no _text sometimes.
342 */
343 err = perf_event__synthesize_kernel_mmap(process_synthesized_event,
344 psession, machine, "_text");
345 if (err < 0)
346 err = perf_event__synthesize_kernel_mmap(process_synthesized_event,
347 psession, machine,
348 "_stext");
349 if (err < 0)
350 pr_err("Couldn't record guest kernel [%d]'s reference"
351 " relocation symbol.\n", machine->pid);
352 }
353
354 static struct perf_event_header finished_round_event = {
355 .size = sizeof(struct perf_event_header),
356 .type = PERF_RECORD_FINISHED_ROUND,
357 };
358
359 static void mmap_read_all(void)
360 {
361 int i;
362
363 for (i = 0; i < evsel_list->nr_mmaps; i++) {
364 if (evsel_list->mmap[i].base)
365 mmap_read(&evsel_list->mmap[i]);
366 }
367
368 if (perf_header__has_feat(&session->header, HEADER_TRACE_INFO))
369 write_output(&finished_round_event, sizeof(finished_round_event));
370 }
371
372 static int __cmd_record(int argc, const char **argv)
373 {
374 struct stat st;
375 int flags;
376 int err;
377 unsigned long waking = 0;
378 const bool forks = argc > 0;
379 struct machine *machine;
380
381 progname = argv[0];
382
383 page_size = sysconf(_SC_PAGE_SIZE);
384
385 atexit(sig_atexit);
386 signal(SIGCHLD, sig_handler);
387 signal(SIGINT, sig_handler);
388 signal(SIGUSR1, sig_handler);
389
390 if (!output_name) {
391 if (!fstat(STDOUT_FILENO, &st) && S_ISFIFO(st.st_mode))
392 record_opts.pipe_output = true;
393 else
394 output_name = "perf.data";
395 }
396 if (output_name) {
397 if (!strcmp(output_name, "-"))
398 record_opts.pipe_output = true;
399 else if (!stat(output_name, &st) && st.st_size) {
400 if (write_mode == WRITE_FORCE) {
401 char oldname[PATH_MAX];
402 snprintf(oldname, sizeof(oldname), "%s.old",
403 output_name);
404 unlink(oldname);
405 rename(output_name, oldname);
406 }
407 } else if (write_mode == WRITE_APPEND) {
408 write_mode = WRITE_FORCE;
409 }
410 }
411
412 flags = O_CREAT|O_RDWR;
413 if (write_mode == WRITE_APPEND)
414 file_new = 0;
415 else
416 flags |= O_TRUNC;
417
418 if (record_opts.pipe_output)
419 output = STDOUT_FILENO;
420 else
421 output = open(output_name, flags, S_IRUSR | S_IWUSR);
422 if (output < 0) {
423 perror("failed to create output file");
424 exit(-1);
425 }
426
427 session = perf_session__new(output_name, O_WRONLY,
428 write_mode == WRITE_FORCE, false, NULL);
429 if (session == NULL) {
430 pr_err("Not enough memory for reading perf file header\n");
431 return -1;
432 }
433
434 if (!no_buildid)
435 perf_header__set_feat(&session->header, HEADER_BUILD_ID);
436
437 if (!file_new) {
438 err = perf_session__read_header(session, output);
439 if (err < 0)
440 goto out_delete_session;
441 }
442
443 if (have_tracepoints(&evsel_list->entries))
444 perf_header__set_feat(&session->header, HEADER_TRACE_INFO);
445
446 perf_header__set_feat(&session->header, HEADER_HOSTNAME);
447 perf_header__set_feat(&session->header, HEADER_OSRELEASE);
448 perf_header__set_feat(&session->header, HEADER_ARCH);
449 perf_header__set_feat(&session->header, HEADER_CPUDESC);
450 perf_header__set_feat(&session->header, HEADER_NRCPUS);
451 perf_header__set_feat(&session->header, HEADER_EVENT_DESC);
452 perf_header__set_feat(&session->header, HEADER_CMDLINE);
453 perf_header__set_feat(&session->header, HEADER_VERSION);
454 perf_header__set_feat(&session->header, HEADER_CPU_TOPOLOGY);
455 perf_header__set_feat(&session->header, HEADER_TOTAL_MEM);
456 perf_header__set_feat(&session->header, HEADER_NUMA_TOPOLOGY);
457 perf_header__set_feat(&session->header, HEADER_CPUID);
458
459 /* 512 kiB: default amount of unprivileged mlocked memory */
460 if (mmap_pages == UINT_MAX)
461 mmap_pages = (512 * 1024) / page_size;
462
463 if (forks) {
464 err = perf_evlist__prepare_workload(evsel_list, &record_opts, argv);
465 if (err < 0) {
466 pr_err("Couldn't run the workload!\n");
467 goto out_delete_session;
468 }
469 }
470
471 open_counters(evsel_list);
472
473 /*
474 * perf_session__delete(session) will be called at atexit_header()
475 */
476 atexit(atexit_header);
477
478 if (record_opts.pipe_output) {
479 err = perf_header__write_pipe(output);
480 if (err < 0)
481 return err;
482 } else if (file_new) {
483 err = perf_session__write_header(session, evsel_list,
484 output, false);
485 if (err < 0)
486 return err;
487 }
488
489 post_processing_offset = lseek(output, 0, SEEK_CUR);
490
491 if (record_opts.pipe_output) {
492 err = perf_session__synthesize_attrs(session,
493 process_synthesized_event);
494 if (err < 0) {
495 pr_err("Couldn't synthesize attrs.\n");
496 return err;
497 }
498
499 err = perf_event__synthesize_event_types(process_synthesized_event,
500 session);
501 if (err < 0) {
502 pr_err("Couldn't synthesize event_types.\n");
503 return err;
504 }
505
506 if (have_tracepoints(&evsel_list->entries)) {
507 /*
508 * FIXME err <= 0 here actually means that
509 * there were no tracepoints so its not really
510 * an error, just that we don't need to
511 * synthesize anything. We really have to
512 * return this more properly and also
513 * propagate errors that now are calling die()
514 */
515 err = perf_event__synthesize_tracing_data(output, evsel_list,
516 process_synthesized_event,
517 session);
518 if (err <= 0) {
519 pr_err("Couldn't record tracing data.\n");
520 return err;
521 }
522 advance_output(err);
523 }
524 }
525
526 machine = perf_session__find_host_machine(session);
527 if (!machine) {
528 pr_err("Couldn't find native kernel information.\n");
529 return -1;
530 }
531
532 err = perf_event__synthesize_kernel_mmap(process_synthesized_event,
533 session, machine, "_text");
534 if (err < 0)
535 err = perf_event__synthesize_kernel_mmap(process_synthesized_event,
536 session, machine, "_stext");
537 if (err < 0)
538 pr_err("Couldn't record kernel reference relocation symbol\n"
539 "Symbol resolution may be skewed if relocation was used (e.g. kexec).\n"
540 "Check /proc/kallsyms permission or run as root.\n");
541
542 err = perf_event__synthesize_modules(process_synthesized_event,
543 session, machine);
544 if (err < 0)
545 pr_err("Couldn't record kernel module information.\n"
546 "Symbol resolution may be skewed if relocation was used (e.g. kexec).\n"
547 "Check /proc/modules permission or run as root.\n");
548
549 if (perf_guest)
550 perf_session__process_machines(session,
551 perf_event__synthesize_guest_os);
552
553 if (!record_opts.system_wide)
554 perf_event__synthesize_thread_map(evsel_list->threads,
555 process_synthesized_event,
556 session);
557 else
558 perf_event__synthesize_threads(process_synthesized_event,
559 session);
560
561 if (realtime_prio) {
562 struct sched_param param;
563
564 param.sched_priority = realtime_prio;
565 if (sched_setscheduler(0, SCHED_FIFO, &param)) {
566 pr_err("Could not set realtime priority.\n");
567 exit(-1);
568 }
569 }
570
571 perf_evlist__enable(evsel_list);
572
573 /*
574 * Let the child rip
575 */
576 if (forks)
577 perf_evlist__start_workload(evsel_list);
578
579 for (;;) {
580 int hits = samples;
581
582 mmap_read_all();
583
584 if (hits == samples) {
585 if (done)
586 break;
587 err = poll(evsel_list->pollfd, evsel_list->nr_fds, -1);
588 waking++;
589 }
590
591 if (done)
592 perf_evlist__disable(evsel_list);
593 }
594
595 if (quiet || signr == SIGUSR1)
596 return 0;
597
598 fprintf(stderr, "[ perf record: Woken up %ld times to write data ]\n", waking);
599
600 /*
601 * Approximate RIP event size: 24 bytes.
602 */
603 fprintf(stderr,
604 "[ perf record: Captured and wrote %.3f MB %s (~%" PRIu64 " samples) ]\n",
605 (double)bytes_written / 1024.0 / 1024.0,
606 output_name,
607 bytes_written / 24);
608
609 return 0;
610
611 out_delete_session:
612 perf_session__delete(session);
613 return err;
614 }
615
616 static const char * const record_usage[] = {
617 "perf record [<options>] [<command>]",
618 "perf record [<options>] -- <command> [<options>]",
619 NULL
620 };
621
622 static bool force, append_file;
623
624 const struct option record_options[] = {
625 OPT_CALLBACK('e', "event", &evsel_list, "event",
626 "event selector. use 'perf list' to list available events",
627 parse_events_option),
628 OPT_CALLBACK(0, "filter", &evsel_list, "filter",
629 "event filter", parse_filter),
630 OPT_INTEGER('p', "pid", &record_opts.target_pid,
631 "record events on existing process id"),
632 OPT_INTEGER('t', "tid", &record_opts.target_tid,
633 "record events on existing thread id"),
634 OPT_INTEGER('r', "realtime", &realtime_prio,
635 "collect data with this RT SCHED_FIFO priority"),
636 OPT_BOOLEAN('D', "no-delay", &record_opts.no_delay,
637 "collect data without buffering"),
638 OPT_BOOLEAN('R', "raw-samples", &record_opts.raw_samples,
639 "collect raw sample records from all opened counters"),
640 OPT_BOOLEAN('a', "all-cpus", &record_opts.system_wide,
641 "system-wide collection from all CPUs"),
642 OPT_BOOLEAN('A', "append", &append_file,
643 "append to the output file to do incremental profiling"),
644 OPT_STRING('C', "cpu", &record_opts.cpu_list, "cpu",
645 "list of cpus to monitor"),
646 OPT_BOOLEAN('f', "force", &force,
647 "overwrite existing data file (deprecated)"),
648 OPT_U64('c', "count", &record_opts.user_interval, "event period to sample"),
649 OPT_STRING('o', "output", &output_name, "file",
650 "output file name"),
651 OPT_BOOLEAN('i', "no-inherit", &record_opts.no_inherit,
652 "child tasks do not inherit counters"),
653 OPT_UINTEGER('F', "freq", &record_opts.user_freq, "profile at this frequency"),
654 OPT_UINTEGER('m', "mmap-pages", &mmap_pages, "number of mmap data pages"),
655 OPT_BOOLEAN(0, "group", &group,
656 "put the counters into a counter group"),
657 OPT_BOOLEAN('g', "call-graph", &record_opts.call_graph,
658 "do call-graph (stack chain/backtrace) recording"),
659 OPT_INCR('v', "verbose", &verbose,
660 "be more verbose (show counter open errors, etc)"),
661 OPT_BOOLEAN('q', "quiet", &quiet, "don't print any message"),
662 OPT_BOOLEAN('s', "stat", &record_opts.inherit_stat,
663 "per thread counts"),
664 OPT_BOOLEAN('d', "data", &record_opts.sample_address,
665 "Sample addresses"),
666 OPT_BOOLEAN('T', "timestamp", &record_opts.sample_time, "Sample timestamps"),
667 OPT_BOOLEAN('n', "no-samples", &record_opts.no_samples,
668 "don't sample"),
669 OPT_BOOLEAN('N', "no-buildid-cache", &no_buildid_cache,
670 "do not update the buildid cache"),
671 OPT_BOOLEAN('B', "no-buildid", &no_buildid,
672 "do not collect buildids in perf.data"),
673 OPT_CALLBACK('G', "cgroup", &evsel_list, "name",
674 "monitor event in cgroup name only",
675 parse_cgroups),
676 OPT_END()
677 };
678
679 int cmd_record(int argc, const char **argv, const char *prefix __used)
680 {
681 int err = -ENOMEM;
682 struct perf_evsel *pos;
683
684 perf_header__set_cmdline(argc, argv);
685
686 evsel_list = perf_evlist__new(NULL, NULL);
687 if (evsel_list == NULL)
688 return -ENOMEM;
689
690 argc = parse_options(argc, argv, record_options, record_usage,
691 PARSE_OPT_STOP_AT_NON_OPTION);
692 if (!argc && record_opts.target_pid == -1 && record_opts.target_tid == -1 &&
693 !record_opts.system_wide && !record_opts.cpu_list)
694 usage_with_options(record_usage, record_options);
695
696 if (force && append_file) {
697 fprintf(stderr, "Can't overwrite and append at the same time."
698 " You need to choose between -f and -A");
699 usage_with_options(record_usage, record_options);
700 } else if (append_file) {
701 write_mode = WRITE_APPEND;
702 } else {
703 write_mode = WRITE_FORCE;
704 }
705
706 if (nr_cgroups && !record_opts.system_wide) {
707 fprintf(stderr, "cgroup monitoring only available in"
708 " system-wide mode\n");
709 usage_with_options(record_usage, record_options);
710 }
711
712 symbol__init();
713
714 if (symbol_conf.kptr_restrict)
715 pr_warning(
716 "WARNING: Kernel address maps (/proc/{kallsyms,modules}) are restricted,\n"
717 "check /proc/sys/kernel/kptr_restrict.\n\n"
718 "Samples in kernel functions may not be resolved if a suitable vmlinux\n"
719 "file is not found in the buildid cache or in the vmlinux path.\n\n"
720 "Samples in kernel modules won't be resolved at all.\n\n"
721 "If some relocation was applied (e.g. kexec) symbols may be misresolved\n"
722 "even with a suitable vmlinux or kallsyms file.\n\n");
723
724 if (no_buildid_cache || no_buildid)
725 disable_buildid_cache();
726
727 if (evsel_list->nr_entries == 0 &&
728 perf_evlist__add_default(evsel_list) < 0) {
729 pr_err("Not enough memory for event selector list\n");
730 goto out_symbol_exit;
731 }
732
733 if (record_opts.target_pid != -1)
734 record_opts.target_tid = record_opts.target_pid;
735
736 if (perf_evlist__create_maps(evsel_list, record_opts.target_pid,
737 record_opts.target_tid, record_opts.cpu_list) < 0)
738 usage_with_options(record_usage, record_options);
739
740 list_for_each_entry(pos, &evsel_list->entries, node) {
741 if (perf_evsel__alloc_fd(pos, evsel_list->cpus->nr,
742 evsel_list->threads->nr) < 0)
743 goto out_free_fd;
744 if (perf_header__push_event(pos->attr.config, event_name(pos)))
745 goto out_free_fd;
746 }
747
748 if (perf_evlist__alloc_pollfd(evsel_list) < 0)
749 goto out_free_fd;
750
751 if (record_opts.user_interval != ULLONG_MAX)
752 record_opts.default_interval = record_opts.user_interval;
753 if (record_opts.user_freq != UINT_MAX)
754 record_opts.freq = record_opts.user_freq;
755
756 /*
757 * User specified count overrides default frequency.
758 */
759 if (record_opts.default_interval)
760 record_opts.freq = 0;
761 else if (record_opts.freq) {
762 record_opts.default_interval = record_opts.freq;
763 } else {
764 fprintf(stderr, "frequency and count are zero, aborting\n");
765 err = -EINVAL;
766 goto out_free_fd;
767 }
768
769 err = __cmd_record(argc, argv);
770 out_free_fd:
771 perf_evlist__delete_maps(evsel_list);
772 out_symbol_exit:
773 symbol__exit();
774 return err;
775 }