]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blame - tools/perf/builtin-record.c
perf tools: Remove unused 'prefix' from builtin functions
[mirror_ubuntu-artful-kernel.git] / tools / perf / builtin-record.c
CommitLineData
abaff32a 1/*
bf9e1876
IM
2 * builtin-record.c
3 *
4 * Builtin record command: Record the profile of a workload
5 * (or a CPU, or a PID) into the perf.data output file - for
6 * later analysis via perf report.
abaff32a 7 */
16f762a2 8#include "builtin.h"
bf9e1876
IM
9
10#include "perf.h"
11
6122e4e4 12#include "util/build-id.h"
6eda5838 13#include "util/util.h"
4b6ab94e 14#include <subcmd/parse-options.h>
8ad8db37 15#include "util/parse-events.h"
41840d21 16#include "util/config.h"
6eda5838 17
8f651eae 18#include "util/callchain.h"
f14d5707 19#include "util/cgroup.h"
7c6a1c65 20#include "util/header.h"
66e274f3 21#include "util/event.h"
361c99a6 22#include "util/evlist.h"
69aad6f1 23#include "util/evsel.h"
8f28827a 24#include "util/debug.h"
5d8bb1ec 25#include "util/drv_configs.h"
94c744b6 26#include "util/session.h"
45694aa7 27#include "util/tool.h"
8d06367f 28#include "util/symbol.h"
a12b51c4 29#include "util/cpumap.h"
fd78260b 30#include "util/thread_map.h"
f5fc1412 31#include "util/data.h"
bcc84ec6 32#include "util/perf_regs.h"
ef149c25 33#include "util/auxtrace.h"
46bc29b9 34#include "util/tsc.h"
f00898f4 35#include "util/parse-branch-options.h"
bcc84ec6 36#include "util/parse-regs-options.h"
71dc2326 37#include "util/llvm-utils.h"
8690a2a7 38#include "util/bpf-loader.h"
5f9cf599 39#include "util/trigger.h"
a074865e 40#include "util/perf-hooks.h"
d8871ea7 41#include "asm/bug.h"
7c6a1c65 42
97124d5e 43#include <unistd.h>
de9ac07b 44#include <sched.h>
a41794cd 45#include <sys/mman.h>
2d11c650 46#include <asm/bug.h>
0693e680 47#include <linux/time64.h>
78da39fa 48
1b43b704 49struct switch_output {
dc0c6127 50 bool enabled;
1b43b704 51 bool signal;
dc0c6127 52 unsigned long size;
bfacbe3b 53 unsigned long time;
cb4e1ebb
JO
54 const char *str;
55 bool set;
1b43b704
JO
56};
57
8c6f45a7 58struct record {
45694aa7 59 struct perf_tool tool;
b4006796 60 struct record_opts opts;
d20deb64 61 u64 bytes_written;
f5fc1412 62 struct perf_data_file file;
ef149c25 63 struct auxtrace_record *itr;
d20deb64
ACM
64 struct perf_evlist *evlist;
65 struct perf_session *session;
66 const char *progname;
d20deb64 67 int realtime_prio;
d20deb64 68 bool no_buildid;
d2db9a98 69 bool no_buildid_set;
d20deb64 70 bool no_buildid_cache;
d2db9a98 71 bool no_buildid_cache_set;
6156681b 72 bool buildid_all;
ecfd7a9c 73 bool timestamp_filename;
1b43b704 74 struct switch_output switch_output;
9f065194 75 unsigned long long samples;
0f82ebc4 76};
a21ca2ca 77
dc0c6127
JO
78static volatile int auxtrace_record__snapshot_started;
79static DEFINE_TRIGGER(auxtrace_snapshot_trigger);
80static DEFINE_TRIGGER(switch_output_trigger);
81
82static bool switch_output_signal(struct record *rec)
83{
84 return rec->switch_output.signal &&
85 trigger_is_ready(&switch_output_trigger);
86}
87
88static bool switch_output_size(struct record *rec)
89{
90 return rec->switch_output.size &&
91 trigger_is_ready(&switch_output_trigger) &&
92 (rec->bytes_written >= rec->switch_output.size);
93}
94
bfacbe3b
JO
95static bool switch_output_time(struct record *rec)
96{
97 return rec->switch_output.time &&
98 trigger_is_ready(&switch_output_trigger);
99}
100
8c6f45a7 101static int record__write(struct record *rec, void *bf, size_t size)
f5970550 102{
cf8b2e69 103 if (perf_data_file__write(rec->session->file, bf, size) < 0) {
50a9b868
JO
104 pr_err("failed to write perf data, error: %m\n");
105 return -1;
f5970550 106 }
8d3eca20 107
cf8b2e69 108 rec->bytes_written += size;
dc0c6127
JO
109
110 if (switch_output_size(rec))
111 trigger_hit(&switch_output_trigger);
112
8d3eca20 113 return 0;
f5970550
PZ
114}
115
45694aa7 116static int process_synthesized_event(struct perf_tool *tool,
d20deb64 117 union perf_event *event,
1d037ca1
IT
118 struct perf_sample *sample __maybe_unused,
119 struct machine *machine __maybe_unused)
234fbbf5 120{
8c6f45a7
ACM
121 struct record *rec = container_of(tool, struct record, tool);
122 return record__write(rec, event, event->header.size);
234fbbf5
ACM
123}
124
3a62a7b8
WN
125static int
126backward_rb_find_range(void *buf, int mask, u64 head, u64 *start, u64 *end)
127{
128 struct perf_event_header *pheader;
129 u64 evt_head = head;
130 int size = mask + 1;
131
132 pr_debug2("backward_rb_find_range: buf=%p, head=%"PRIx64"\n", buf, head);
133 pheader = (struct perf_event_header *)(buf + (head & mask));
134 *start = head;
135 while (true) {
136 if (evt_head - head >= (unsigned int)size) {
5e30d55c 137 pr_debug("Finished reading backward ring buffer: rewind\n");
3a62a7b8
WN
138 if (evt_head - head > (unsigned int)size)
139 evt_head -= pheader->size;
140 *end = evt_head;
141 return 0;
142 }
143
144 pheader = (struct perf_event_header *)(buf + (evt_head & mask));
145
146 if (pheader->size == 0) {
5e30d55c 147 pr_debug("Finished reading backward ring buffer: get start\n");
3a62a7b8
WN
148 *end = evt_head;
149 return 0;
150 }
151
152 evt_head += pheader->size;
153 pr_debug3("move evt_head: %"PRIx64"\n", evt_head);
154 }
155 WARN_ONCE(1, "Shouldn't get here\n");
156 return -1;
157}
158
159static int
a4ea0ec4
WN
160rb_find_range(void *data, int mask, u64 head, u64 old,
161 u64 *start, u64 *end, bool backward)
3a62a7b8 162{
a4ea0ec4 163 if (!backward) {
3a62a7b8
WN
164 *start = old;
165 *end = head;
166 return 0;
167 }
168
169 return backward_rb_find_range(data, mask, head, start, end);
170}
171
a4ea0ec4
WN
172static int
173record__mmap_read(struct record *rec, struct perf_mmap *md,
174 bool overwrite, bool backward)
de9ac07b 175{
7b8283b5
DA
176 u64 head = perf_mmap__read_head(md);
177 u64 old = md->prev;
09fa4f40 178 u64 end = head, start = old;
918512b4 179 unsigned char *data = md->base + page_size;
de9ac07b
PZ
180 unsigned long size;
181 void *buf;
8d3eca20 182 int rc = 0;
de9ac07b 183
a4ea0ec4
WN
184 if (rb_find_range(data, md->mask, head,
185 old, &start, &end, backward))
3a62a7b8
WN
186 return -1;
187
09fa4f40 188 if (start == end)
8d3eca20 189 return 0;
dc82009a 190
d20deb64 191 rec->samples++;
de9ac07b 192
09fa4f40 193 size = end - start;
2d11c650
WN
194 if (size > (unsigned long)(md->mask) + 1) {
195 WARN_ONCE(1, "failed to keep up with mmap data. (warn only once)\n");
196
197 md->prev = head;
a4ea0ec4 198 perf_mmap__consume(md, overwrite || backward);
2d11c650
WN
199 return 0;
200 }
de9ac07b 201
09fa4f40
WN
202 if ((start & md->mask) + size != (end & md->mask)) {
203 buf = &data[start & md->mask];
204 size = md->mask + 1 - (start & md->mask);
205 start += size;
021e9f47 206
8c6f45a7 207 if (record__write(rec, buf, size) < 0) {
8d3eca20
DA
208 rc = -1;
209 goto out;
210 }
de9ac07b
PZ
211 }
212
09fa4f40
WN
213 buf = &data[start & md->mask];
214 size = end - start;
215 start += size;
021e9f47 216
8c6f45a7 217 if (record__write(rec, buf, size) < 0) {
8d3eca20
DA
218 rc = -1;
219 goto out;
220 }
de9ac07b 221
09fa4f40 222 md->prev = head;
a4ea0ec4 223 perf_mmap__consume(md, overwrite || backward);
8d3eca20
DA
224out:
225 return rc;
de9ac07b
PZ
226}
227
2dd6d8a1
AH
228static volatile int done;
229static volatile int signr = -1;
230static volatile int child_finished;
c0bdc1c4 231
2dd6d8a1
AH
232static void sig_handler(int sig)
233{
234 if (sig == SIGCHLD)
235 child_finished = 1;
236 else
237 signr = sig;
238
239 done = 1;
240}
241
a074865e
WN
242static void sigsegv_handler(int sig)
243{
244 perf_hooks__recover();
245 sighandler_dump_stack(sig);
246}
247
2dd6d8a1
AH
248static void record__sig_exit(void)
249{
250 if (signr == -1)
251 return;
252
253 signal(signr, SIG_DFL);
254 raise(signr);
255}
256
e31f0d01
AH
257#ifdef HAVE_AUXTRACE_SUPPORT
258
ef149c25
AH
259static int record__process_auxtrace(struct perf_tool *tool,
260 union perf_event *event, void *data1,
261 size_t len1, void *data2, size_t len2)
262{
263 struct record *rec = container_of(tool, struct record, tool);
99fa2984 264 struct perf_data_file *file = &rec->file;
ef149c25
AH
265 size_t padding;
266 u8 pad[8] = {0};
267
99fa2984
AH
268 if (!perf_data_file__is_pipe(file)) {
269 off_t file_offset;
270 int fd = perf_data_file__fd(file);
271 int err;
272
273 file_offset = lseek(fd, 0, SEEK_CUR);
274 if (file_offset == -1)
275 return -1;
276 err = auxtrace_index__auxtrace_event(&rec->session->auxtrace_index,
277 event, file_offset);
278 if (err)
279 return err;
280 }
281
ef149c25
AH
282 /* event.auxtrace.size includes padding, see __auxtrace_mmap__read() */
283 padding = (len1 + len2) & 7;
284 if (padding)
285 padding = 8 - padding;
286
287 record__write(rec, event, event->header.size);
288 record__write(rec, data1, len1);
289 if (len2)
290 record__write(rec, data2, len2);
291 record__write(rec, &pad, padding);
292
293 return 0;
294}
295
296static int record__auxtrace_mmap_read(struct record *rec,
297 struct auxtrace_mmap *mm)
298{
299 int ret;
300
301 ret = auxtrace_mmap__read(mm, rec->itr, &rec->tool,
302 record__process_auxtrace);
303 if (ret < 0)
304 return ret;
305
306 if (ret)
307 rec->samples++;
308
309 return 0;
310}
311
2dd6d8a1
AH
312static int record__auxtrace_mmap_read_snapshot(struct record *rec,
313 struct auxtrace_mmap *mm)
314{
315 int ret;
316
317 ret = auxtrace_mmap__read_snapshot(mm, rec->itr, &rec->tool,
318 record__process_auxtrace,
319 rec->opts.auxtrace_snapshot_size);
320 if (ret < 0)
321 return ret;
322
323 if (ret)
324 rec->samples++;
325
326 return 0;
327}
328
329static int record__auxtrace_read_snapshot_all(struct record *rec)
330{
331 int i;
332 int rc = 0;
333
334 for (i = 0; i < rec->evlist->nr_mmaps; i++) {
335 struct auxtrace_mmap *mm =
336 &rec->evlist->mmap[i].auxtrace_mmap;
337
338 if (!mm->base)
339 continue;
340
341 if (record__auxtrace_mmap_read_snapshot(rec, mm) != 0) {
342 rc = -1;
343 goto out;
344 }
345 }
346out:
347 return rc;
348}
349
350static void record__read_auxtrace_snapshot(struct record *rec)
351{
352 pr_debug("Recording AUX area tracing snapshot\n");
353 if (record__auxtrace_read_snapshot_all(rec) < 0) {
5f9cf599 354 trigger_error(&auxtrace_snapshot_trigger);
2dd6d8a1 355 } else {
5f9cf599
WN
356 if (auxtrace_record__snapshot_finish(rec->itr))
357 trigger_error(&auxtrace_snapshot_trigger);
358 else
359 trigger_ready(&auxtrace_snapshot_trigger);
2dd6d8a1
AH
360 }
361}
362
e31f0d01
AH
363#else
364
365static inline
366int record__auxtrace_mmap_read(struct record *rec __maybe_unused,
367 struct auxtrace_mmap *mm __maybe_unused)
368{
369 return 0;
370}
371
2dd6d8a1
AH
372static inline
373void record__read_auxtrace_snapshot(struct record *rec __maybe_unused)
de9ac07b 374{
f7b7c26e
PZ
375}
376
2dd6d8a1
AH
377static inline
378int auxtrace_record__snapshot_start(struct auxtrace_record *itr __maybe_unused)
f7b7c26e 379{
2dd6d8a1 380 return 0;
de9ac07b
PZ
381}
382
2dd6d8a1
AH
383#endif
384
cda57a8c
WN
385static int record__mmap_evlist(struct record *rec,
386 struct perf_evlist *evlist)
387{
388 struct record_opts *opts = &rec->opts;
389 char msg[512];
390
391 if (perf_evlist__mmap_ex(evlist, opts->mmap_pages, false,
392 opts->auxtrace_mmap_pages,
393 opts->auxtrace_snapshot_mode) < 0) {
394 if (errno == EPERM) {
395 pr_err("Permission error mapping pages.\n"
396 "Consider increasing "
397 "/proc/sys/kernel/perf_event_mlock_kb,\n"
398 "or try again with a smaller value of -m/--mmap_pages.\n"
399 "(current value: %u,%u)\n",
400 opts->mmap_pages, opts->auxtrace_mmap_pages);
401 return -errno;
402 } else {
403 pr_err("failed to mmap with %d (%s)\n", errno,
c8b5f2c9 404 str_error_r(errno, msg, sizeof(msg)));
cda57a8c
WN
405 if (errno)
406 return -errno;
407 else
408 return -EINVAL;
409 }
410 }
411 return 0;
412}
413
414static int record__mmap(struct record *rec)
415{
416 return record__mmap_evlist(rec, rec->evlist);
417}
418
8c6f45a7 419static int record__open(struct record *rec)
dd7927f4 420{
d6195a6a 421 char msg[BUFSIZ];
6a4bb04c 422 struct perf_evsel *pos;
d20deb64
ACM
423 struct perf_evlist *evlist = rec->evlist;
424 struct perf_session *session = rec->session;
b4006796 425 struct record_opts *opts = &rec->opts;
5d8bb1ec 426 struct perf_evsel_config_term *err_term;
8d3eca20 427 int rc = 0;
dd7927f4 428
e68ae9cf 429 perf_evlist__config(evlist, opts, &callchain_param);
cac21425 430
e5cadb93 431 evlist__for_each_entry(evlist, pos) {
dd7927f4 432try_again:
d988d5ee 433 if (perf_evsel__open(pos, pos->cpus, pos->threads) < 0) {
56e52e85 434 if (perf_evsel__fallback(pos, errno, msg, sizeof(msg))) {
bb963e16 435 if (verbose > 0)
c0a54341 436 ui__warning("%s\n", msg);
d6d901c2
ZY
437 goto try_again;
438 }
ca6a4258 439
56e52e85
ACM
440 rc = -errno;
441 perf_evsel__open_strerror(pos, &opts->target,
442 errno, msg, sizeof(msg));
443 ui__error("%s\n", msg);
8d3eca20 444 goto out;
c171b552
LZ
445 }
446 }
a43d3f08 447
23d4aad4
ACM
448 if (perf_evlist__apply_filters(evlist, &pos)) {
449 error("failed to set filter \"%s\" on event %s with %d (%s)\n",
450 pos->filter, perf_evsel__name(pos), errno,
c8b5f2c9 451 str_error_r(errno, msg, sizeof(msg)));
8d3eca20 452 rc = -1;
5d8bb1ec
MP
453 goto out;
454 }
455
456 if (perf_evlist__apply_drv_configs(evlist, &pos, &err_term)) {
457 error("failed to set config \"%s\" on event %s with %d (%s)\n",
458 err_term->val.drv_cfg, perf_evsel__name(pos), errno,
459 str_error_r(errno, msg, sizeof(msg)));
460 rc = -1;
8d3eca20 461 goto out;
0a102479
FW
462 }
463
cda57a8c
WN
464 rc = record__mmap(rec);
465 if (rc)
8d3eca20 466 goto out;
0a27d7f9 467
563aecb2 468 session->evlist = evlist;
7b56cce2 469 perf_session__set_id_hdr_size(session);
8d3eca20
DA
470out:
471 return rc;
16c8a109
PZ
472}
473
e3d59112
NK
474static int process_sample_event(struct perf_tool *tool,
475 union perf_event *event,
476 struct perf_sample *sample,
477 struct perf_evsel *evsel,
478 struct machine *machine)
479{
480 struct record *rec = container_of(tool, struct record, tool);
481
482 rec->samples++;
483
484 return build_id__mark_dso_hit(tool, event, sample, evsel, machine);
485}
486
8c6f45a7 487static int process_buildids(struct record *rec)
6122e4e4 488{
f5fc1412
JO
489 struct perf_data_file *file = &rec->file;
490 struct perf_session *session = rec->session;
6122e4e4 491
457ae94a 492 if (file->size == 0)
9f591fd7
ACM
493 return 0;
494
00dc8657
NK
495 /*
496 * During this process, it'll load kernel map and replace the
497 * dso->long_name to a real pathname it found. In this case
498 * we prefer the vmlinux path like
499 * /lib/modules/3.16.4/build/vmlinux
500 *
501 * rather than build-id path (in debug directory).
502 * $HOME/.debug/.build-id/f0/6e17aa50adf4d00b88925e03775de107611551
503 */
504 symbol_conf.ignore_vmlinux_buildid = true;
505
6156681b
NK
506 /*
507 * If --buildid-all is given, it marks all DSO regardless of hits,
508 * so no need to process samples.
509 */
510 if (rec->buildid_all)
511 rec->tool.sample = NULL;
512
b7b61cbe 513 return perf_session__process_events(session);
6122e4e4
ACM
514}
515
8115d60c 516static void perf_event__synthesize_guest_os(struct machine *machine, void *data)
a1645ce1
ZY
517{
518 int err;
45694aa7 519 struct perf_tool *tool = data;
a1645ce1
ZY
520 /*
521 *As for guest kernel when processing subcommand record&report,
522 *we arrange module mmap prior to guest kernel mmap and trigger
523 *a preload dso because default guest module symbols are loaded
524 *from guest kallsyms instead of /lib/modules/XXX/XXX. This
525 *method is used to avoid symbol missing when the first addr is
526 *in module instead of in guest kernel.
527 */
45694aa7 528 err = perf_event__synthesize_modules(tool, process_synthesized_event,
743eb868 529 machine);
a1645ce1
ZY
530 if (err < 0)
531 pr_err("Couldn't record guest kernel [%d]'s reference"
23346f21 532 " relocation symbol.\n", machine->pid);
a1645ce1 533
a1645ce1
ZY
534 /*
535 * We use _stext for guest kernel because guest kernel's /proc/kallsyms
536 * have no _text sometimes.
537 */
45694aa7 538 err = perf_event__synthesize_kernel_mmap(tool, process_synthesized_event,
0ae617be 539 machine);
a1645ce1
ZY
540 if (err < 0)
541 pr_err("Couldn't record guest kernel [%d]'s reference"
23346f21 542 " relocation symbol.\n", machine->pid);
a1645ce1
ZY
543}
544
98402807
FW
545static struct perf_event_header finished_round_event = {
546 .size = sizeof(struct perf_event_header),
547 .type = PERF_RECORD_FINISHED_ROUND,
548};
549
a4ea0ec4
WN
550static int record__mmap_read_evlist(struct record *rec, struct perf_evlist *evlist,
551 bool backward)
98402807 552{
dcabb507 553 u64 bytes_written = rec->bytes_written;
0e2e63dd 554 int i;
8d3eca20 555 int rc = 0;
a4ea0ec4 556 struct perf_mmap *maps;
98402807 557
cb21686b
WN
558 if (!evlist)
559 return 0;
ef149c25 560
b2cb615d 561 maps = backward ? evlist->backward_mmap : evlist->mmap;
a4ea0ec4
WN
562 if (!maps)
563 return 0;
564
54cc54de
WN
565 if (backward && evlist->bkw_mmap_state != BKW_MMAP_DATA_PENDING)
566 return 0;
567
cb21686b 568 for (i = 0; i < evlist->nr_mmaps; i++) {
a4ea0ec4 569 struct auxtrace_mmap *mm = &maps[i].auxtrace_mmap;
cb21686b 570
a4ea0ec4
WN
571 if (maps[i].base) {
572 if (record__mmap_read(rec, &maps[i],
573 evlist->overwrite, backward) != 0) {
8d3eca20
DA
574 rc = -1;
575 goto out;
576 }
577 }
ef149c25 578
2dd6d8a1 579 if (mm->base && !rec->opts.auxtrace_snapshot_mode &&
ef149c25
AH
580 record__auxtrace_mmap_read(rec, mm) != 0) {
581 rc = -1;
582 goto out;
583 }
98402807
FW
584 }
585
dcabb507
JO
586 /*
587 * Mark the round finished in case we wrote
588 * at least one event.
589 */
590 if (bytes_written != rec->bytes_written)
591 rc = record__write(rec, &finished_round_event, sizeof(finished_round_event));
8d3eca20 592
54cc54de
WN
593 if (backward)
594 perf_evlist__toggle_bkw_mmap(evlist, BKW_MMAP_EMPTY);
8d3eca20
DA
595out:
596 return rc;
98402807
FW
597}
598
cb21686b
WN
599static int record__mmap_read_all(struct record *rec)
600{
601 int err;
602
a4ea0ec4 603 err = record__mmap_read_evlist(rec, rec->evlist, false);
cb21686b
WN
604 if (err)
605 return err;
606
05737464 607 return record__mmap_read_evlist(rec, rec->evlist, true);
cb21686b
WN
608}
609
8c6f45a7 610static void record__init_features(struct record *rec)
57706abc 611{
57706abc
DA
612 struct perf_session *session = rec->session;
613 int feat;
614
615 for (feat = HEADER_FIRST_FEATURE; feat < HEADER_LAST_FEATURE; feat++)
616 perf_header__set_feat(&session->header, feat);
617
618 if (rec->no_buildid)
619 perf_header__clear_feat(&session->header, HEADER_BUILD_ID);
620
3e2be2da 621 if (!have_tracepoints(&rec->evlist->entries))
57706abc
DA
622 perf_header__clear_feat(&session->header, HEADER_TRACING_DATA);
623
624 if (!rec->opts.branch_stack)
625 perf_header__clear_feat(&session->header, HEADER_BRANCH_STACK);
ef149c25
AH
626
627 if (!rec->opts.full_auxtrace)
628 perf_header__clear_feat(&session->header, HEADER_AUXTRACE);
ffa517ad
JO
629
630 perf_header__clear_feat(&session->header, HEADER_STAT);
57706abc
DA
631}
632
e1ab48ba
WN
633static void
634record__finish_output(struct record *rec)
635{
636 struct perf_data_file *file = &rec->file;
637 int fd = perf_data_file__fd(file);
638
639 if (file->is_pipe)
640 return;
641
642 rec->session->header.data_size += rec->bytes_written;
643 file->size = lseek(perf_data_file__fd(file), 0, SEEK_CUR);
644
645 if (!rec->no_buildid) {
646 process_buildids(rec);
647
648 if (rec->buildid_all)
649 dsos__hit_all(rec->session);
650 }
651 perf_session__write_header(rec->session, rec->evlist, fd, true);
652
653 return;
654}
655
4ea648ae 656static int record__synthesize_workload(struct record *rec, bool tail)
be7b0c9e 657{
9d6aae72
ACM
658 int err;
659 struct thread_map *thread_map;
be7b0c9e 660
4ea648ae
WN
661 if (rec->opts.tail_synthesize != tail)
662 return 0;
663
9d6aae72
ACM
664 thread_map = thread_map__new_by_tid(rec->evlist->workload.pid);
665 if (thread_map == NULL)
666 return -1;
667
668 err = perf_event__synthesize_thread_map(&rec->tool, thread_map,
be7b0c9e
WN
669 process_synthesized_event,
670 &rec->session->machines.host,
671 rec->opts.sample_address,
672 rec->opts.proc_map_timeout);
9d6aae72
ACM
673 thread_map__put(thread_map);
674 return err;
be7b0c9e
WN
675}
676
4ea648ae 677static int record__synthesize(struct record *rec, bool tail);
3c1cb7e3 678
ecfd7a9c
WN
679static int
680record__switch_output(struct record *rec, bool at_exit)
681{
682 struct perf_data_file *file = &rec->file;
683 int fd, err;
684
685 /* Same Size: "2015122520103046"*/
686 char timestamp[] = "InvalidTimestamp";
687
4ea648ae
WN
688 record__synthesize(rec, true);
689 if (target__none(&rec->opts.target))
690 record__synthesize_workload(rec, true);
691
ecfd7a9c
WN
692 rec->samples = 0;
693 record__finish_output(rec);
694 err = fetch_current_timestamp(timestamp, sizeof(timestamp));
695 if (err) {
696 pr_err("Failed to get current timestamp\n");
697 return -EINVAL;
698 }
699
700 fd = perf_data_file__switch(file, timestamp,
701 rec->session->header.data_offset,
702 at_exit);
703 if (fd >= 0 && !at_exit) {
704 rec->bytes_written = 0;
705 rec->session->header.data_size = 0;
706 }
707
708 if (!quiet)
709 fprintf(stderr, "[ perf record: Dump %s.%s ]\n",
710 file->path, timestamp);
3c1cb7e3
WN
711
712 /* Output tracking events */
be7b0c9e 713 if (!at_exit) {
4ea648ae 714 record__synthesize(rec, false);
3c1cb7e3 715
be7b0c9e
WN
716 /*
717 * In 'perf record --switch-output' without -a,
718 * record__synthesize() in record__switch_output() won't
719 * generate tracking events because there's no thread_map
720 * in evlist. Which causes newly created perf.data doesn't
721 * contain map and comm information.
722 * Create a fake thread_map and directly call
723 * perf_event__synthesize_thread_map() for those events.
724 */
725 if (target__none(&rec->opts.target))
4ea648ae 726 record__synthesize_workload(rec, false);
be7b0c9e 727 }
ecfd7a9c
WN
728 return fd;
729}
730
f33cbe72
ACM
731static volatile int workload_exec_errno;
732
733/*
734 * perf_evlist__prepare_workload will send a SIGUSR1
735 * if the fork fails, since we asked by setting its
736 * want_signal to true.
737 */
45604710
NK
738static void workload_exec_failed_signal(int signo __maybe_unused,
739 siginfo_t *info,
f33cbe72
ACM
740 void *ucontext __maybe_unused)
741{
742 workload_exec_errno = info->si_value.sival_int;
743 done = 1;
f33cbe72
ACM
744 child_finished = 1;
745}
746
2dd6d8a1 747static void snapshot_sig_handler(int sig);
bfacbe3b 748static void alarm_sig_handler(int sig);
2dd6d8a1 749
46bc29b9
AH
750int __weak
751perf_event__synth_time_conv(const struct perf_event_mmap_page *pc __maybe_unused,
752 struct perf_tool *tool __maybe_unused,
753 perf_event__handler_t process __maybe_unused,
754 struct machine *machine __maybe_unused)
755{
756 return 0;
757}
758
ee667f94
WN
759static const struct perf_event_mmap_page *
760perf_evlist__pick_pc(struct perf_evlist *evlist)
761{
b2cb615d
WN
762 if (evlist) {
763 if (evlist->mmap && evlist->mmap[0].base)
764 return evlist->mmap[0].base;
765 if (evlist->backward_mmap && evlist->backward_mmap[0].base)
766 return evlist->backward_mmap[0].base;
767 }
ee667f94
WN
768 return NULL;
769}
770
c45628b0
WN
771static const struct perf_event_mmap_page *record__pick_pc(struct record *rec)
772{
ee667f94
WN
773 const struct perf_event_mmap_page *pc;
774
775 pc = perf_evlist__pick_pc(rec->evlist);
776 if (pc)
777 return pc;
c45628b0
WN
778 return NULL;
779}
780
4ea648ae 781static int record__synthesize(struct record *rec, bool tail)
c45c86eb
WN
782{
783 struct perf_session *session = rec->session;
784 struct machine *machine = &session->machines.host;
785 struct perf_data_file *file = &rec->file;
786 struct record_opts *opts = &rec->opts;
787 struct perf_tool *tool = &rec->tool;
788 int fd = perf_data_file__fd(file);
789 int err = 0;
790
4ea648ae
WN
791 if (rec->opts.tail_synthesize != tail)
792 return 0;
793
c45c86eb
WN
794 if (file->is_pipe) {
795 err = perf_event__synthesize_attrs(tool, session,
796 process_synthesized_event);
797 if (err < 0) {
798 pr_err("Couldn't synthesize attrs.\n");
799 goto out;
800 }
801
802 if (have_tracepoints(&rec->evlist->entries)) {
803 /*
804 * FIXME err <= 0 here actually means that
805 * there were no tracepoints so its not really
806 * an error, just that we don't need to
807 * synthesize anything. We really have to
808 * return this more properly and also
809 * propagate errors that now are calling die()
810 */
811 err = perf_event__synthesize_tracing_data(tool, fd, rec->evlist,
812 process_synthesized_event);
813 if (err <= 0) {
814 pr_err("Couldn't record tracing data.\n");
815 goto out;
816 }
817 rec->bytes_written += err;
818 }
819 }
820
c45628b0 821 err = perf_event__synth_time_conv(record__pick_pc(rec), tool,
46bc29b9
AH
822 process_synthesized_event, machine);
823 if (err)
824 goto out;
825
c45c86eb
WN
826 if (rec->opts.full_auxtrace) {
827 err = perf_event__synthesize_auxtrace_info(rec->itr, tool,
828 session, process_synthesized_event);
829 if (err)
830 goto out;
831 }
832
833 err = perf_event__synthesize_kernel_mmap(tool, process_synthesized_event,
834 machine);
835 WARN_ONCE(err < 0, "Couldn't record kernel reference relocation symbol\n"
836 "Symbol resolution may be skewed if relocation was used (e.g. kexec).\n"
837 "Check /proc/kallsyms permission or run as root.\n");
838
839 err = perf_event__synthesize_modules(tool, process_synthesized_event,
840 machine);
841 WARN_ONCE(err < 0, "Couldn't record kernel module information.\n"
842 "Symbol resolution may be skewed if relocation was used (e.g. kexec).\n"
843 "Check /proc/modules permission or run as root.\n");
844
845 if (perf_guest) {
846 machines__process_guests(&session->machines,
847 perf_event__synthesize_guest_os, tool);
848 }
849
850 err = __machine__synthesize_threads(machine, tool, &opts->target, rec->evlist->threads,
851 process_synthesized_event, opts->sample_address,
852 opts->proc_map_timeout);
853out:
854 return err;
855}
856
8c6f45a7 857static int __cmd_record(struct record *rec, int argc, const char **argv)
16c8a109 858{
57706abc 859 int err;
45604710 860 int status = 0;
8b412664 861 unsigned long waking = 0;
46be604b 862 const bool forks = argc > 0;
23346f21 863 struct machine *machine;
45694aa7 864 struct perf_tool *tool = &rec->tool;
b4006796 865 struct record_opts *opts = &rec->opts;
f5fc1412 866 struct perf_data_file *file = &rec->file;
d20deb64 867 struct perf_session *session;
6dcf45ef 868 bool disabled = false, draining = false;
42aa276f 869 int fd;
de9ac07b 870
d20deb64 871 rec->progname = argv[0];
33e49ea7 872
45604710 873 atexit(record__sig_exit);
f5970550
PZ
874 signal(SIGCHLD, sig_handler);
875 signal(SIGINT, sig_handler);
804f7ac7 876 signal(SIGTERM, sig_handler);
a074865e 877 signal(SIGSEGV, sigsegv_handler);
c0bdc1c4 878
f3b3614a
HB
879 if (rec->opts.record_namespaces)
880 tool->namespace_events = true;
881
dc0c6127 882 if (rec->opts.auxtrace_snapshot_mode || rec->switch_output.enabled) {
2dd6d8a1 883 signal(SIGUSR2, snapshot_sig_handler);
3c1cb7e3
WN
884 if (rec->opts.auxtrace_snapshot_mode)
885 trigger_on(&auxtrace_snapshot_trigger);
dc0c6127 886 if (rec->switch_output.enabled)
3c1cb7e3 887 trigger_on(&switch_output_trigger);
c0bdc1c4 888 } else {
2dd6d8a1 889 signal(SIGUSR2, SIG_IGN);
c0bdc1c4 890 }
f5970550 891
b7b61cbe 892 session = perf_session__new(file, false, tool);
94c744b6 893 if (session == NULL) {
ffa91880 894 pr_err("Perf session creation failed.\n");
a9a70bbc
ACM
895 return -1;
896 }
897
42aa276f 898 fd = perf_data_file__fd(file);
d20deb64
ACM
899 rec->session = session;
900
8c6f45a7 901 record__init_features(rec);
330aa675 902
d4db3f16 903 if (forks) {
3e2be2da 904 err = perf_evlist__prepare_workload(rec->evlist, &opts->target,
f5fc1412 905 argv, file->is_pipe,
735f7e0b 906 workload_exec_failed_signal);
35b9d88e
ACM
907 if (err < 0) {
908 pr_err("Couldn't run the workload!\n");
45604710 909 status = err;
35b9d88e 910 goto out_delete_session;
856e9660 911 }
856e9660
PZ
912 }
913
8c6f45a7 914 if (record__open(rec) != 0) {
8d3eca20 915 err = -1;
45604710 916 goto out_child;
8d3eca20 917 }
de9ac07b 918
8690a2a7
WN
919 err = bpf__apply_obj_config();
920 if (err) {
921 char errbuf[BUFSIZ];
922
923 bpf__strerror_apply_obj_config(err, errbuf, sizeof(errbuf));
924 pr_err("ERROR: Apply config to BPF failed: %s\n",
925 errbuf);
926 goto out_child;
927 }
928
cca8482c
AH
929 /*
930 * Normally perf_session__new would do this, but it doesn't have the
931 * evlist.
932 */
933 if (rec->tool.ordered_events && !perf_evlist__sample_id_all(rec->evlist)) {
934 pr_warning("WARNING: No sample_id_all support, falling back to unordered processing\n");
935 rec->tool.ordered_events = false;
936 }
937
3e2be2da 938 if (!rec->evlist->nr_groups)
a8bb559b
NK
939 perf_header__clear_feat(&session->header, HEADER_GROUP_DESC);
940
f5fc1412 941 if (file->is_pipe) {
42aa276f 942 err = perf_header__write_pipe(fd);
529870e3 943 if (err < 0)
45604710 944 goto out_child;
563aecb2 945 } else {
42aa276f 946 err = perf_session__write_header(session, rec->evlist, fd, false);
d5eed904 947 if (err < 0)
45604710 948 goto out_child;
56b03f3c
ACM
949 }
950
d3665498 951 if (!rec->no_buildid
e20960c0 952 && !perf_header__has_feat(&session->header, HEADER_BUILD_ID)) {
d3665498 953 pr_err("Couldn't generate buildids. "
e20960c0 954 "Use --no-buildid to profile anyway.\n");
8d3eca20 955 err = -1;
45604710 956 goto out_child;
e20960c0
RR
957 }
958
34ba5122 959 machine = &session->machines.host;
743eb868 960
4ea648ae 961 err = record__synthesize(rec, false);
c45c86eb 962 if (err < 0)
45604710 963 goto out_child;
8d3eca20 964
d20deb64 965 if (rec->realtime_prio) {
de9ac07b
PZ
966 struct sched_param param;
967
d20deb64 968 param.sched_priority = rec->realtime_prio;
de9ac07b 969 if (sched_setscheduler(0, SCHED_FIFO, &param)) {
6beba7ad 970 pr_err("Could not set realtime priority.\n");
8d3eca20 971 err = -1;
45604710 972 goto out_child;
de9ac07b
PZ
973 }
974 }
975
774cb499
JO
976 /*
977 * When perf is starting the traced process, all the events
978 * (apart from group members) have enable_on_exec=1 set,
979 * so don't spoil it by prematurely enabling them.
980 */
6619a53e 981 if (!target__none(&opts->target) && !opts->initial_delay)
3e2be2da 982 perf_evlist__enable(rec->evlist);
764e16a3 983
856e9660
PZ
984 /*
985 * Let the child rip
986 */
e803cf97 987 if (forks) {
e5bed564 988 union perf_event *event;
e907caf3 989 pid_t tgid;
e5bed564
NK
990
991 event = malloc(sizeof(event->comm) + machine->id_hdr_size);
992 if (event == NULL) {
993 err = -ENOMEM;
994 goto out_child;
995 }
996
e803cf97
NK
997 /*
998 * Some H/W events are generated before COMM event
999 * which is emitted during exec(), so perf script
1000 * cannot see a correct process name for those events.
1001 * Synthesize COMM event to prevent it.
1002 */
e907caf3
HB
1003 tgid = perf_event__synthesize_comm(tool, event,
1004 rec->evlist->workload.pid,
1005 process_synthesized_event,
1006 machine);
1007 free(event);
1008
1009 if (tgid == -1)
1010 goto out_child;
1011
1012 event = malloc(sizeof(event->namespaces) +
1013 (NR_NAMESPACES * sizeof(struct perf_ns_link_info)) +
1014 machine->id_hdr_size);
1015 if (event == NULL) {
1016 err = -ENOMEM;
1017 goto out_child;
1018 }
1019
1020 /*
1021 * Synthesize NAMESPACES event for the command specified.
1022 */
1023 perf_event__synthesize_namespaces(tool, event,
1024 rec->evlist->workload.pid,
1025 tgid, process_synthesized_event,
1026 machine);
e5bed564 1027 free(event);
e803cf97 1028
3e2be2da 1029 perf_evlist__start_workload(rec->evlist);
e803cf97 1030 }
856e9660 1031
6619a53e 1032 if (opts->initial_delay) {
0693e680 1033 usleep(opts->initial_delay * USEC_PER_MSEC);
6619a53e
AK
1034 perf_evlist__enable(rec->evlist);
1035 }
1036
5f9cf599 1037 trigger_ready(&auxtrace_snapshot_trigger);
3c1cb7e3 1038 trigger_ready(&switch_output_trigger);
a074865e 1039 perf_hooks__invoke_record_start();
649c48a9 1040 for (;;) {
9f065194 1041 unsigned long long hits = rec->samples;
de9ac07b 1042
05737464
WN
1043 /*
1044 * rec->evlist->bkw_mmap_state is possible to be
1045 * BKW_MMAP_EMPTY here: when done == true and
1046 * hits != rec->samples in previous round.
1047 *
1048 * perf_evlist__toggle_bkw_mmap ensure we never
1049 * convert BKW_MMAP_EMPTY to BKW_MMAP_DATA_PENDING.
1050 */
1051 if (trigger_is_hit(&switch_output_trigger) || done || draining)
1052 perf_evlist__toggle_bkw_mmap(rec->evlist, BKW_MMAP_DATA_PENDING);
1053
8c6f45a7 1054 if (record__mmap_read_all(rec) < 0) {
5f9cf599 1055 trigger_error(&auxtrace_snapshot_trigger);
3c1cb7e3 1056 trigger_error(&switch_output_trigger);
8d3eca20 1057 err = -1;
45604710 1058 goto out_child;
8d3eca20 1059 }
de9ac07b 1060
2dd6d8a1
AH
1061 if (auxtrace_record__snapshot_started) {
1062 auxtrace_record__snapshot_started = 0;
5f9cf599 1063 if (!trigger_is_error(&auxtrace_snapshot_trigger))
2dd6d8a1 1064 record__read_auxtrace_snapshot(rec);
5f9cf599 1065 if (trigger_is_error(&auxtrace_snapshot_trigger)) {
2dd6d8a1
AH
1066 pr_err("AUX area tracing snapshot failed\n");
1067 err = -1;
1068 goto out_child;
1069 }
1070 }
1071
3c1cb7e3 1072 if (trigger_is_hit(&switch_output_trigger)) {
05737464
WN
1073 /*
1074 * If switch_output_trigger is hit, the data in
1075 * overwritable ring buffer should have been collected,
1076 * so bkw_mmap_state should be set to BKW_MMAP_EMPTY.
1077 *
1078 * If SIGUSR2 raise after or during record__mmap_read_all(),
1079 * record__mmap_read_all() didn't collect data from
1080 * overwritable ring buffer. Read again.
1081 */
1082 if (rec->evlist->bkw_mmap_state == BKW_MMAP_RUNNING)
1083 continue;
3c1cb7e3
WN
1084 trigger_ready(&switch_output_trigger);
1085
05737464
WN
1086 /*
1087 * Reenable events in overwrite ring buffer after
1088 * record__mmap_read_all(): we should have collected
1089 * data from it.
1090 */
1091 perf_evlist__toggle_bkw_mmap(rec->evlist, BKW_MMAP_RUNNING);
1092
3c1cb7e3
WN
1093 if (!quiet)
1094 fprintf(stderr, "[ perf record: dump data: Woken up %ld times ]\n",
1095 waking);
1096 waking = 0;
1097 fd = record__switch_output(rec, false);
1098 if (fd < 0) {
1099 pr_err("Failed to switch to new file\n");
1100 trigger_error(&switch_output_trigger);
1101 err = fd;
1102 goto out_child;
1103 }
bfacbe3b
JO
1104
1105 /* re-arm the alarm */
1106 if (rec->switch_output.time)
1107 alarm(rec->switch_output.time);
3c1cb7e3
WN
1108 }
1109
d20deb64 1110 if (hits == rec->samples) {
6dcf45ef 1111 if (done || draining)
649c48a9 1112 break;
f66a889d 1113 err = perf_evlist__poll(rec->evlist, -1);
a515114f
JO
1114 /*
1115 * Propagate error, only if there's any. Ignore positive
1116 * number of returned events and interrupt error.
1117 */
1118 if (err > 0 || (err < 0 && errno == EINTR))
45604710 1119 err = 0;
8b412664 1120 waking++;
6dcf45ef
ACM
1121
1122 if (perf_evlist__filter_pollfd(rec->evlist, POLLERR | POLLHUP) == 0)
1123 draining = true;
8b412664
PZ
1124 }
1125
774cb499
JO
1126 /*
1127 * When perf is starting the traced process, at the end events
1128 * die with the process and we wait for that. Thus no need to
1129 * disable events in this case.
1130 */
602ad878 1131 if (done && !disabled && !target__none(&opts->target)) {
5f9cf599 1132 trigger_off(&auxtrace_snapshot_trigger);
3e2be2da 1133 perf_evlist__disable(rec->evlist);
2711926a
JO
1134 disabled = true;
1135 }
de9ac07b 1136 }
5f9cf599 1137 trigger_off(&auxtrace_snapshot_trigger);
3c1cb7e3 1138 trigger_off(&switch_output_trigger);
de9ac07b 1139
f33cbe72 1140 if (forks && workload_exec_errno) {
35550da3 1141 char msg[STRERR_BUFSIZE];
c8b5f2c9 1142 const char *emsg = str_error_r(workload_exec_errno, msg, sizeof(msg));
f33cbe72
ACM
1143 pr_err("Workload failed: %s\n", emsg);
1144 err = -1;
45604710 1145 goto out_child;
f33cbe72
ACM
1146 }
1147
e3d59112 1148 if (!quiet)
45604710 1149 fprintf(stderr, "[ perf record: Woken up %ld times to write data ]\n", waking);
b44308f5 1150
4ea648ae
WN
1151 if (target__none(&rec->opts.target))
1152 record__synthesize_workload(rec, true);
1153
45604710
NK
1154out_child:
1155 if (forks) {
1156 int exit_status;
addc2785 1157
45604710
NK
1158 if (!child_finished)
1159 kill(rec->evlist->workload.pid, SIGTERM);
1160
1161 wait(&exit_status);
1162
1163 if (err < 0)
1164 status = err;
1165 else if (WIFEXITED(exit_status))
1166 status = WEXITSTATUS(exit_status);
1167 else if (WIFSIGNALED(exit_status))
1168 signr = WTERMSIG(exit_status);
1169 } else
1170 status = err;
1171
4ea648ae 1172 record__synthesize(rec, true);
e3d59112
NK
1173 /* this will be recalculated during process_buildids() */
1174 rec->samples = 0;
1175
ecfd7a9c
WN
1176 if (!err) {
1177 if (!rec->timestamp_filename) {
1178 record__finish_output(rec);
1179 } else {
1180 fd = record__switch_output(rec, true);
1181 if (fd < 0) {
1182 status = fd;
1183 goto out_delete_session;
1184 }
1185 }
1186 }
39d17dac 1187
a074865e
WN
1188 perf_hooks__invoke_record_end();
1189
e3d59112
NK
1190 if (!err && !quiet) {
1191 char samples[128];
ecfd7a9c
WN
1192 const char *postfix = rec->timestamp_filename ?
1193 ".<timestamp>" : "";
e3d59112 1194
ef149c25 1195 if (rec->samples && !rec->opts.full_auxtrace)
e3d59112
NK
1196 scnprintf(samples, sizeof(samples),
1197 " (%" PRIu64 " samples)", rec->samples);
1198 else
1199 samples[0] = '\0';
1200
ecfd7a9c 1201 fprintf(stderr, "[ perf record: Captured and wrote %.3f MB %s%s%s ]\n",
e3d59112 1202 perf_data_file__size(file) / 1024.0 / 1024.0,
ecfd7a9c 1203 file->path, postfix, samples);
e3d59112
NK
1204 }
1205
39d17dac
ACM
1206out_delete_session:
1207 perf_session__delete(session);
45604710 1208 return status;
de9ac07b 1209}
0e9b20b8 1210
0883e820 1211static void callchain_debug(struct callchain_param *callchain)
09b0fd45 1212{
aad2b21c 1213 static const char *str[CALLCHAIN_MAX] = { "NONE", "FP", "DWARF", "LBR" };
a601fdff 1214
0883e820 1215 pr_debug("callchain: type %s\n", str[callchain->record_mode]);
26d33022 1216
0883e820 1217 if (callchain->record_mode == CALLCHAIN_DWARF)
09b0fd45 1218 pr_debug("callchain: stack dump size %d\n",
0883e820 1219 callchain->dump_size);
09b0fd45
JO
1220}
1221
0883e820
ACM
1222int record_opts__parse_callchain(struct record_opts *record,
1223 struct callchain_param *callchain,
1224 const char *arg, bool unset)
09b0fd45 1225{
09b0fd45 1226 int ret;
0883e820 1227 callchain->enabled = !unset;
eb853e80 1228
09b0fd45
JO
1229 /* --no-call-graph */
1230 if (unset) {
0883e820 1231 callchain->record_mode = CALLCHAIN_NONE;
09b0fd45
JO
1232 pr_debug("callchain: disabled\n");
1233 return 0;
1234 }
1235
0883e820 1236 ret = parse_callchain_record_opt(arg, callchain);
5c0cf224
JO
1237 if (!ret) {
1238 /* Enable data address sampling for DWARF unwind. */
0883e820 1239 if (callchain->record_mode == CALLCHAIN_DWARF)
5c0cf224 1240 record->sample_address = true;
0883e820 1241 callchain_debug(callchain);
5c0cf224 1242 }
26d33022
JO
1243
1244 return ret;
1245}
1246
0883e820
ACM
1247int record_parse_callchain_opt(const struct option *opt,
1248 const char *arg,
1249 int unset)
1250{
1251 return record_opts__parse_callchain(opt->value, &callchain_param, arg, unset);
1252}
1253
c421e80b 1254int record_callchain_opt(const struct option *opt,
09b0fd45
JO
1255 const char *arg __maybe_unused,
1256 int unset __maybe_unused)
1257{
2ddd5c04 1258 struct callchain_param *callchain = opt->value;
c421e80b 1259
2ddd5c04 1260 callchain->enabled = true;
09b0fd45 1261
2ddd5c04
ACM
1262 if (callchain->record_mode == CALLCHAIN_NONE)
1263 callchain->record_mode = CALLCHAIN_FP;
eb853e80 1264
2ddd5c04 1265 callchain_debug(callchain);
09b0fd45
JO
1266 return 0;
1267}
1268
eb853e80
JO
1269static int perf_record_config(const char *var, const char *value, void *cb)
1270{
7a29c087
NK
1271 struct record *rec = cb;
1272
1273 if (!strcmp(var, "record.build-id")) {
1274 if (!strcmp(value, "cache"))
1275 rec->no_buildid_cache = false;
1276 else if (!strcmp(value, "no-cache"))
1277 rec->no_buildid_cache = true;
1278 else if (!strcmp(value, "skip"))
1279 rec->no_buildid = true;
1280 else
1281 return -1;
1282 return 0;
1283 }
eb853e80 1284 if (!strcmp(var, "record.call-graph"))
5a2e5e85 1285 var = "call-graph.record-mode"; /* fall-through */
eb853e80
JO
1286
1287 return perf_default_config(var, value, cb);
1288}
1289
814c8c38
PZ
1290struct clockid_map {
1291 const char *name;
1292 int clockid;
1293};
1294
1295#define CLOCKID_MAP(n, c) \
1296 { .name = n, .clockid = (c), }
1297
1298#define CLOCKID_END { .name = NULL, }
1299
1300
1301/*
1302 * Add the missing ones, we need to build on many distros...
1303 */
1304#ifndef CLOCK_MONOTONIC_RAW
1305#define CLOCK_MONOTONIC_RAW 4
1306#endif
1307#ifndef CLOCK_BOOTTIME
1308#define CLOCK_BOOTTIME 7
1309#endif
1310#ifndef CLOCK_TAI
1311#define CLOCK_TAI 11
1312#endif
1313
1314static const struct clockid_map clockids[] = {
1315 /* available for all events, NMI safe */
1316 CLOCKID_MAP("monotonic", CLOCK_MONOTONIC),
1317 CLOCKID_MAP("monotonic_raw", CLOCK_MONOTONIC_RAW),
1318
1319 /* available for some events */
1320 CLOCKID_MAP("realtime", CLOCK_REALTIME),
1321 CLOCKID_MAP("boottime", CLOCK_BOOTTIME),
1322 CLOCKID_MAP("tai", CLOCK_TAI),
1323
1324 /* available for the lazy */
1325 CLOCKID_MAP("mono", CLOCK_MONOTONIC),
1326 CLOCKID_MAP("raw", CLOCK_MONOTONIC_RAW),
1327 CLOCKID_MAP("real", CLOCK_REALTIME),
1328 CLOCKID_MAP("boot", CLOCK_BOOTTIME),
1329
1330 CLOCKID_END,
1331};
1332
1333static int parse_clockid(const struct option *opt, const char *str, int unset)
1334{
1335 struct record_opts *opts = (struct record_opts *)opt->value;
1336 const struct clockid_map *cm;
1337 const char *ostr = str;
1338
1339 if (unset) {
1340 opts->use_clockid = 0;
1341 return 0;
1342 }
1343
1344 /* no arg passed */
1345 if (!str)
1346 return 0;
1347
1348 /* no setting it twice */
1349 if (opts->use_clockid)
1350 return -1;
1351
1352 opts->use_clockid = true;
1353
1354 /* if its a number, we're done */
1355 if (sscanf(str, "%d", &opts->clockid) == 1)
1356 return 0;
1357
1358 /* allow a "CLOCK_" prefix to the name */
1359 if (!strncasecmp(str, "CLOCK_", 6))
1360 str += 6;
1361
1362 for (cm = clockids; cm->name; cm++) {
1363 if (!strcasecmp(str, cm->name)) {
1364 opts->clockid = cm->clockid;
1365 return 0;
1366 }
1367 }
1368
1369 opts->use_clockid = false;
1370 ui__warning("unknown clockid %s, check man page\n", ostr);
1371 return -1;
1372}
1373
e9db1310
AH
1374static int record__parse_mmap_pages(const struct option *opt,
1375 const char *str,
1376 int unset __maybe_unused)
1377{
1378 struct record_opts *opts = opt->value;
1379 char *s, *p;
1380 unsigned int mmap_pages;
1381 int ret;
1382
1383 if (!str)
1384 return -EINVAL;
1385
1386 s = strdup(str);
1387 if (!s)
1388 return -ENOMEM;
1389
1390 p = strchr(s, ',');
1391 if (p)
1392 *p = '\0';
1393
1394 if (*s) {
1395 ret = __perf_evlist__parse_mmap_pages(&mmap_pages, s);
1396 if (ret)
1397 goto out_free;
1398 opts->mmap_pages = mmap_pages;
1399 }
1400
1401 if (!p) {
1402 ret = 0;
1403 goto out_free;
1404 }
1405
1406 ret = __perf_evlist__parse_mmap_pages(&mmap_pages, p + 1);
1407 if (ret)
1408 goto out_free;
1409
1410 opts->auxtrace_mmap_pages = mmap_pages;
1411
1412out_free:
1413 free(s);
1414 return ret;
1415}
1416
0c582449
JO
1417static void switch_output_size_warn(struct record *rec)
1418{
1419 u64 wakeup_size = perf_evlist__mmap_size(rec->opts.mmap_pages);
1420 struct switch_output *s = &rec->switch_output;
1421
1422 wakeup_size /= 2;
1423
1424 if (s->size < wakeup_size) {
1425 char buf[100];
1426
1427 unit_number__scnprintf(buf, sizeof(buf), wakeup_size);
1428 pr_warning("WARNING: switch-output data size lower than "
1429 "wakeup kernel buffer size (%s) "
1430 "expect bigger perf.data sizes\n", buf);
1431 }
1432}
1433
cb4e1ebb
JO
1434static int switch_output_setup(struct record *rec)
1435{
1436 struct switch_output *s = &rec->switch_output;
dc0c6127
JO
1437 static struct parse_tag tags_size[] = {
1438 { .tag = 'B', .mult = 1 },
1439 { .tag = 'K', .mult = 1 << 10 },
1440 { .tag = 'M', .mult = 1 << 20 },
1441 { .tag = 'G', .mult = 1 << 30 },
1442 { .tag = 0 },
1443 };
bfacbe3b
JO
1444 static struct parse_tag tags_time[] = {
1445 { .tag = 's', .mult = 1 },
1446 { .tag = 'm', .mult = 60 },
1447 { .tag = 'h', .mult = 60*60 },
1448 { .tag = 'd', .mult = 60*60*24 },
1449 { .tag = 0 },
1450 };
dc0c6127 1451 unsigned long val;
cb4e1ebb
JO
1452
1453 if (!s->set)
1454 return 0;
1455
1456 if (!strcmp(s->str, "signal")) {
1457 s->signal = true;
1458 pr_debug("switch-output with SIGUSR2 signal\n");
dc0c6127
JO
1459 goto enabled;
1460 }
1461
1462 val = parse_tag_value(s->str, tags_size);
1463 if (val != (unsigned long) -1) {
1464 s->size = val;
1465 pr_debug("switch-output with %s size threshold\n", s->str);
1466 goto enabled;
cb4e1ebb
JO
1467 }
1468
bfacbe3b
JO
1469 val = parse_tag_value(s->str, tags_time);
1470 if (val != (unsigned long) -1) {
1471 s->time = val;
1472 pr_debug("switch-output with %s time threshold (%lu seconds)\n",
1473 s->str, s->time);
1474 goto enabled;
1475 }
1476
cb4e1ebb 1477 return -1;
dc0c6127
JO
1478
1479enabled:
1480 rec->timestamp_filename = true;
1481 s->enabled = true;
0c582449
JO
1482
1483 if (s->size && !rec->opts.no_buffering)
1484 switch_output_size_warn(rec);
1485
dc0c6127 1486 return 0;
cb4e1ebb
JO
1487}
1488
e5b2c207 1489static const char * const __record_usage[] = {
9e096753
MG
1490 "perf record [<options>] [<command>]",
1491 "perf record [<options>] -- <command> [<options>]",
0e9b20b8
IM
1492 NULL
1493};
e5b2c207 1494const char * const *record_usage = __record_usage;
0e9b20b8 1495
d20deb64 1496/*
8c6f45a7
ACM
1497 * XXX Ideally would be local to cmd_record() and passed to a record__new
1498 * because we need to have access to it in record__exit, that is called
d20deb64
ACM
1499 * after cmd_record() exits, but since record_options need to be accessible to
1500 * builtin-script, leave it here.
1501 *
1502 * At least we don't ouch it in all the other functions here directly.
1503 *
1504 * Just say no to tons of global variables, sigh.
1505 */
8c6f45a7 1506static struct record record = {
d20deb64 1507 .opts = {
8affc2b8 1508 .sample_time = true,
d20deb64
ACM
1509 .mmap_pages = UINT_MAX,
1510 .user_freq = UINT_MAX,
1511 .user_interval = ULLONG_MAX,
447a6013 1512 .freq = 4000,
d1cb9fce
NK
1513 .target = {
1514 .uses_mmap = true,
3aa5939d 1515 .default_per_cpu = true,
d1cb9fce 1516 },
9d9cad76 1517 .proc_map_timeout = 500,
d20deb64 1518 },
e3d59112
NK
1519 .tool = {
1520 .sample = process_sample_event,
1521 .fork = perf_event__process_fork,
cca8482c 1522 .exit = perf_event__process_exit,
e3d59112 1523 .comm = perf_event__process_comm,
f3b3614a 1524 .namespaces = perf_event__process_namespaces,
e3d59112
NK
1525 .mmap = perf_event__process_mmap,
1526 .mmap2 = perf_event__process_mmap2,
cca8482c 1527 .ordered_events = true,
e3d59112 1528 },
d20deb64 1529};
7865e817 1530
76a26549
NK
1531const char record_callchain_help[] = CALLCHAIN_RECORD_HELP
1532 "\n\t\t\t\tDefault: fp";
61eaa3be 1533
0aab2136
WN
1534static bool dry_run;
1535
d20deb64
ACM
1536/*
1537 * XXX Will stay a global variable till we fix builtin-script.c to stop messing
1538 * with it and switch to use the library functions in perf_evlist that came
b4006796 1539 * from builtin-record.c, i.e. use record_opts,
d20deb64
ACM
1540 * perf_evlist__prepare_workload, etc instead of fork+exec'in 'perf record',
1541 * using pipes, etc.
1542 */
efd21307 1543static struct option __record_options[] = {
d20deb64 1544 OPT_CALLBACK('e', "event", &record.evlist, "event",
86847b62 1545 "event selector. use 'perf list' to list available events",
f120f9d5 1546 parse_events_option),
d20deb64 1547 OPT_CALLBACK(0, "filter", &record.evlist, "filter",
c171b552 1548 "event filter", parse_filter),
4ba1faa1
WN
1549 OPT_CALLBACK_NOOPT(0, "exclude-perf", &record.evlist,
1550 NULL, "don't record events from perf itself",
1551 exclude_perf),
bea03405 1552 OPT_STRING('p', "pid", &record.opts.target.pid, "pid",
d6d901c2 1553 "record events on existing process id"),
bea03405 1554 OPT_STRING('t', "tid", &record.opts.target.tid, "tid",
d6d901c2 1555 "record events on existing thread id"),
d20deb64 1556 OPT_INTEGER('r', "realtime", &record.realtime_prio,
0e9b20b8 1557 "collect data with this RT SCHED_FIFO priority"),
509051ea 1558 OPT_BOOLEAN(0, "no-buffering", &record.opts.no_buffering,
acac03fa 1559 "collect data without buffering"),
d20deb64 1560 OPT_BOOLEAN('R', "raw-samples", &record.opts.raw_samples,
daac07b2 1561 "collect raw sample records from all opened counters"),
bea03405 1562 OPT_BOOLEAN('a', "all-cpus", &record.opts.target.system_wide,
0e9b20b8 1563 "system-wide collection from all CPUs"),
bea03405 1564 OPT_STRING('C', "cpu", &record.opts.target.cpu_list, "cpu",
c45c6ea2 1565 "list of cpus to monitor"),
d20deb64 1566 OPT_U64('c', "count", &record.opts.user_interval, "event period to sample"),
f5fc1412 1567 OPT_STRING('o', "output", &record.file.path, "file",
abaff32a 1568 "output file name"),
69e7e5b0
AH
1569 OPT_BOOLEAN_SET('i', "no-inherit", &record.opts.no_inherit,
1570 &record.opts.no_inherit_set,
1571 "child tasks do not inherit counters"),
4ea648ae
WN
1572 OPT_BOOLEAN(0, "tail-synthesize", &record.opts.tail_synthesize,
1573 "synthesize non-sample events at the end of output"),
626a6b78 1574 OPT_BOOLEAN(0, "overwrite", &record.opts.overwrite, "use overwrite mode"),
d20deb64 1575 OPT_UINTEGER('F', "freq", &record.opts.user_freq, "profile at this frequency"),
e9db1310
AH
1576 OPT_CALLBACK('m', "mmap-pages", &record.opts, "pages[,pages]",
1577 "number of mmap data pages and AUX area tracing mmap pages",
1578 record__parse_mmap_pages),
d20deb64 1579 OPT_BOOLEAN(0, "group", &record.opts.group,
43bece79 1580 "put the counters into a counter group"),
2ddd5c04 1581 OPT_CALLBACK_NOOPT('g', NULL, &callchain_param,
09b0fd45
JO
1582 NULL, "enables call-graph recording" ,
1583 &record_callchain_opt),
1584 OPT_CALLBACK(0, "call-graph", &record.opts,
76a26549 1585 "record_mode[,record_size]", record_callchain_help,
09b0fd45 1586 &record_parse_callchain_opt),
c0555642 1587 OPT_INCR('v', "verbose", &verbose,
3da297a6 1588 "be more verbose (show counter open errors, etc)"),
b44308f5 1589 OPT_BOOLEAN('q', "quiet", &quiet, "don't print any message"),
d20deb64 1590 OPT_BOOLEAN('s', "stat", &record.opts.inherit_stat,
649c48a9 1591 "per thread counts"),
56100321 1592 OPT_BOOLEAN('d', "data", &record.opts.sample_address, "Record the sample addresses"),
b6f35ed7 1593 OPT_BOOLEAN(0, "sample-cpu", &record.opts.sample_cpu, "Record the sample cpu"),
3abebc55
AH
1594 OPT_BOOLEAN_SET('T', "timestamp", &record.opts.sample_time,
1595 &record.opts.sample_time_set,
1596 "Record the sample timestamps"),
56100321 1597 OPT_BOOLEAN('P', "period", &record.opts.period, "Record the sample period"),
d20deb64 1598 OPT_BOOLEAN('n', "no-samples", &record.opts.no_samples,
649c48a9 1599 "don't sample"),
d2db9a98
WN
1600 OPT_BOOLEAN_SET('N', "no-buildid-cache", &record.no_buildid_cache,
1601 &record.no_buildid_cache_set,
1602 "do not update the buildid cache"),
1603 OPT_BOOLEAN_SET('B', "no-buildid", &record.no_buildid,
1604 &record.no_buildid_set,
1605 "do not collect buildids in perf.data"),
d20deb64 1606 OPT_CALLBACK('G', "cgroup", &record.evlist, "name",
023695d9
SE
1607 "monitor event in cgroup name only",
1608 parse_cgroups),
a6205a35 1609 OPT_UINTEGER('D', "delay", &record.opts.initial_delay,
6619a53e 1610 "ms to wait before starting measurement after program start"),
bea03405
NK
1611 OPT_STRING('u', "uid", &record.opts.target.uid_str, "user",
1612 "user to profile"),
a5aabdac
SE
1613
1614 OPT_CALLBACK_NOOPT('b', "branch-any", &record.opts.branch_stack,
1615 "branch any", "sample any taken branches",
1616 parse_branch_stack),
1617
1618 OPT_CALLBACK('j', "branch-filter", &record.opts.branch_stack,
1619 "branch filter mask", "branch stack filter modes",
bdfebd84 1620 parse_branch_stack),
05484298
AK
1621 OPT_BOOLEAN('W', "weight", &record.opts.sample_weight,
1622 "sample by weight (on special events only)"),
475eeab9
AK
1623 OPT_BOOLEAN(0, "transaction", &record.opts.sample_transaction,
1624 "sample transaction flags (special events only)"),
3aa5939d
AH
1625 OPT_BOOLEAN(0, "per-thread", &record.opts.target.per_thread,
1626 "use per-thread mmaps"),
bcc84ec6
SE
1627 OPT_CALLBACK_OPTARG('I', "intr-regs", &record.opts.sample_intr_regs, NULL, "any register",
1628 "sample selected machine registers on interrupt,"
1629 " use -I ? to list register names", parse_regs),
85c273d2
AK
1630 OPT_BOOLEAN(0, "running-time", &record.opts.running_time,
1631 "Record running/enabled time of read (:S) events"),
814c8c38
PZ
1632 OPT_CALLBACK('k', "clockid", &record.opts,
1633 "clockid", "clockid to use for events, see clock_gettime()",
1634 parse_clockid),
2dd6d8a1
AH
1635 OPT_STRING_OPTARG('S', "snapshot", &record.opts.auxtrace_snapshot_opts,
1636 "opts", "AUX area tracing Snapshot Mode", ""),
9d9cad76
KL
1637 OPT_UINTEGER(0, "proc-map-timeout", &record.opts.proc_map_timeout,
1638 "per thread proc mmap processing timeout in ms"),
f3b3614a
HB
1639 OPT_BOOLEAN(0, "namespaces", &record.opts.record_namespaces,
1640 "Record namespaces events"),
b757bb09
AH
1641 OPT_BOOLEAN(0, "switch-events", &record.opts.record_switch_events,
1642 "Record context switch events"),
85723885
JO
1643 OPT_BOOLEAN_FLAG(0, "all-kernel", &record.opts.all_kernel,
1644 "Configure all used events to run in kernel space.",
1645 PARSE_OPT_EXCLUSIVE),
1646 OPT_BOOLEAN_FLAG(0, "all-user", &record.opts.all_user,
1647 "Configure all used events to run in user space.",
1648 PARSE_OPT_EXCLUSIVE),
71dc2326
WN
1649 OPT_STRING(0, "clang-path", &llvm_param.clang_path, "clang path",
1650 "clang binary to use for compiling BPF scriptlets"),
1651 OPT_STRING(0, "clang-opt", &llvm_param.clang_opt, "clang options",
1652 "options passed to clang when compiling BPF scriptlets"),
7efe0e03
HK
1653 OPT_STRING(0, "vmlinux", &symbol_conf.vmlinux_name,
1654 "file", "vmlinux pathname"),
6156681b
NK
1655 OPT_BOOLEAN(0, "buildid-all", &record.buildid_all,
1656 "Record build-id of all DSOs regardless of hits"),
ecfd7a9c
WN
1657 OPT_BOOLEAN(0, "timestamp-filename", &record.timestamp_filename,
1658 "append timestamp to output filename"),
cb4e1ebb 1659 OPT_STRING_OPTARG_SET(0, "switch-output", &record.switch_output.str,
bfacbe3b
JO
1660 &record.switch_output.set, "signal,size,time",
1661 "Switch output when receive SIGUSR2 or cross size,time threshold",
dc0c6127 1662 "signal"),
0aab2136
WN
1663 OPT_BOOLEAN(0, "dry-run", &dry_run,
1664 "Parse options then exit"),
0e9b20b8
IM
1665 OPT_END()
1666};
1667
e5b2c207
NK
1668struct option *record_options = __record_options;
1669
b0ad8ea6 1670int cmd_record(int argc, const char **argv)
0e9b20b8 1671{
ef149c25 1672 int err;
8c6f45a7 1673 struct record *rec = &record;
16ad2ffb 1674 char errbuf[BUFSIZ];
0e9b20b8 1675
48e1cab1
WN
1676#ifndef HAVE_LIBBPF_SUPPORT
1677# define set_nobuild(s, l, c) set_option_nobuild(record_options, s, l, "NO_LIBBPF=1", c)
1678 set_nobuild('\0', "clang-path", true);
1679 set_nobuild('\0', "clang-opt", true);
1680# undef set_nobuild
7efe0e03
HK
1681#endif
1682
1683#ifndef HAVE_BPF_PROLOGUE
1684# if !defined (HAVE_DWARF_SUPPORT)
1685# define REASON "NO_DWARF=1"
1686# elif !defined (HAVE_LIBBPF_SUPPORT)
1687# define REASON "NO_LIBBPF=1"
1688# else
1689# define REASON "this architecture doesn't support BPF prologue"
1690# endif
1691# define set_nobuild(s, l, c) set_option_nobuild(record_options, s, l, REASON, c)
1692 set_nobuild('\0', "vmlinux", true);
1693# undef set_nobuild
1694# undef REASON
48e1cab1
WN
1695#endif
1696
3e2be2da
ACM
1697 rec->evlist = perf_evlist__new();
1698 if (rec->evlist == NULL)
361c99a6
ACM
1699 return -ENOMEM;
1700
ecc4c561
ACM
1701 err = perf_config(perf_record_config, rec);
1702 if (err)
1703 return err;
eb853e80 1704
bca647aa 1705 argc = parse_options(argc, argv, record_options, record_usage,
655000e7 1706 PARSE_OPT_STOP_AT_NON_OPTION);
68ba3235
NK
1707 if (quiet)
1708 perf_quiet_option();
483635a9
JO
1709
1710 /* Make system wide (-a) the default target. */
602ad878 1711 if (!argc && target__none(&rec->opts.target))
483635a9 1712 rec->opts.target.system_wide = true;
0e9b20b8 1713
bea03405 1714 if (nr_cgroups && !rec->opts.target.system_wide) {
c7118369
NK
1715 usage_with_options_msg(record_usage, record_options,
1716 "cgroup monitoring only available in system-wide mode");
1717
023695d9 1718 }
b757bb09
AH
1719 if (rec->opts.record_switch_events &&
1720 !perf_can_record_switch_events()) {
c7118369
NK
1721 ui__error("kernel does not support recording context switch events\n");
1722 parse_options_usage(record_usage, record_options, "switch-events", 0);
1723 return -EINVAL;
b757bb09 1724 }
023695d9 1725
cb4e1ebb
JO
1726 if (switch_output_setup(rec)) {
1727 parse_options_usage(record_usage, record_options, "switch-output", 0);
1728 return -EINVAL;
1729 }
1730
bfacbe3b
JO
1731 if (rec->switch_output.time) {
1732 signal(SIGALRM, alarm_sig_handler);
1733 alarm(rec->switch_output.time);
1734 }
1735
ef149c25
AH
1736 if (!rec->itr) {
1737 rec->itr = auxtrace_record__init(rec->evlist, &err);
1738 if (err)
5c01ad60 1739 goto out;
ef149c25
AH
1740 }
1741
2dd6d8a1
AH
1742 err = auxtrace_parse_snapshot_options(rec->itr, &rec->opts,
1743 rec->opts.auxtrace_snapshot_opts);
1744 if (err)
5c01ad60 1745 goto out;
2dd6d8a1 1746
1b36c03e
AH
1747 /*
1748 * Allow aliases to facilitate the lookup of symbols for address
1749 * filters. Refer to auxtrace_parse_filters().
1750 */
1751 symbol_conf.allow_aliases = true;
1752
1753 symbol__init(NULL);
1754
1755 err = auxtrace_parse_filters(rec->evlist);
1756 if (err)
1757 goto out;
1758
0aab2136 1759 if (dry_run)
5c01ad60 1760 goto out;
0aab2136 1761
d7888573
WN
1762 err = bpf__setup_stdout(rec->evlist);
1763 if (err) {
1764 bpf__strerror_setup_stdout(rec->evlist, err, errbuf, sizeof(errbuf));
1765 pr_err("ERROR: Setup BPF stdout failed: %s\n",
1766 errbuf);
5c01ad60 1767 goto out;
d7888573
WN
1768 }
1769
ef149c25
AH
1770 err = -ENOMEM;
1771
ec80fde7 1772 if (symbol_conf.kptr_restrict)
646aaea6
ACM
1773 pr_warning(
1774"WARNING: Kernel address maps (/proc/{kallsyms,modules}) are restricted,\n"
1775"check /proc/sys/kernel/kptr_restrict.\n\n"
1776"Samples in kernel functions may not be resolved if a suitable vmlinux\n"
1777"file is not found in the buildid cache or in the vmlinux path.\n\n"
1778"Samples in kernel modules won't be resolved at all.\n\n"
1779"If some relocation was applied (e.g. kexec) symbols may be misresolved\n"
1780"even with a suitable vmlinux or kallsyms file.\n\n");
ec80fde7 1781
0c1d46a8 1782 if (rec->no_buildid_cache || rec->no_buildid) {
a1ac1d3c 1783 disable_buildid_cache();
dc0c6127 1784 } else if (rec->switch_output.enabled) {
0c1d46a8
WN
1785 /*
1786 * In 'perf record --switch-output', disable buildid
1787 * generation by default to reduce data file switching
1788 * overhead. Still generate buildid if they are required
1789 * explicitly using
1790 *
60437ac0 1791 * perf record --switch-output --no-no-buildid \
0c1d46a8
WN
1792 * --no-no-buildid-cache
1793 *
1794 * Following code equals to:
1795 *
1796 * if ((rec->no_buildid || !rec->no_buildid_set) &&
1797 * (rec->no_buildid_cache || !rec->no_buildid_cache_set))
1798 * disable_buildid_cache();
1799 */
1800 bool disable = true;
1801
1802 if (rec->no_buildid_set && !rec->no_buildid)
1803 disable = false;
1804 if (rec->no_buildid_cache_set && !rec->no_buildid_cache)
1805 disable = false;
1806 if (disable) {
1807 rec->no_buildid = true;
1808 rec->no_buildid_cache = true;
1809 disable_buildid_cache();
1810 }
1811 }
655000e7 1812
4ea648ae
WN
1813 if (record.opts.overwrite)
1814 record.opts.tail_synthesize = true;
1815
3e2be2da
ACM
1816 if (rec->evlist->nr_entries == 0 &&
1817 perf_evlist__add_default(rec->evlist) < 0) {
69aad6f1 1818 pr_err("Not enough memory for event selector list\n");
394c01ed 1819 goto out;
bbd36e5e 1820 }
0e9b20b8 1821
69e7e5b0
AH
1822 if (rec->opts.target.tid && !rec->opts.no_inherit_set)
1823 rec->opts.no_inherit = true;
1824
602ad878 1825 err = target__validate(&rec->opts.target);
16ad2ffb 1826 if (err) {
602ad878 1827 target__strerror(&rec->opts.target, err, errbuf, BUFSIZ);
16ad2ffb
NK
1828 ui__warning("%s", errbuf);
1829 }
1830
602ad878 1831 err = target__parse_uid(&rec->opts.target);
16ad2ffb
NK
1832 if (err) {
1833 int saved_errno = errno;
4bd0f2d2 1834
602ad878 1835 target__strerror(&rec->opts.target, err, errbuf, BUFSIZ);
3780f488 1836 ui__error("%s", errbuf);
16ad2ffb
NK
1837
1838 err = -saved_errno;
394c01ed 1839 goto out;
16ad2ffb 1840 }
0d37aa34 1841
23dc4f15
JO
1842 /* Enable ignoring missing threads when -u option is defined. */
1843 rec->opts.ignore_missing_thread = rec->opts.target.uid != UINT_MAX;
1844
16ad2ffb 1845 err = -ENOMEM;
3e2be2da 1846 if (perf_evlist__create_maps(rec->evlist, &rec->opts.target) < 0)
dd7927f4 1847 usage_with_options(record_usage, record_options);
69aad6f1 1848
ef149c25
AH
1849 err = auxtrace_record__options(rec->itr, rec->evlist, &rec->opts);
1850 if (err)
394c01ed 1851 goto out;
ef149c25 1852
6156681b
NK
1853 /*
1854 * We take all buildids when the file contains
1855 * AUX area tracing data because we do not decode the
1856 * trace because it would take too long.
1857 */
1858 if (rec->opts.full_auxtrace)
1859 rec->buildid_all = true;
1860
b4006796 1861 if (record_opts__config(&rec->opts)) {
39d17dac 1862 err = -EINVAL;
394c01ed 1863 goto out;
7e4ff9e3
MG
1864 }
1865
d20deb64 1866 err = __cmd_record(&record, argc, argv);
394c01ed 1867out:
45604710 1868 perf_evlist__delete(rec->evlist);
d65a458b 1869 symbol__exit();
ef149c25 1870 auxtrace_record__free(rec->itr);
39d17dac 1871 return err;
0e9b20b8 1872}
2dd6d8a1
AH
1873
1874static void snapshot_sig_handler(int sig __maybe_unused)
1875{
dc0c6127
JO
1876 struct record *rec = &record;
1877
5f9cf599
WN
1878 if (trigger_is_ready(&auxtrace_snapshot_trigger)) {
1879 trigger_hit(&auxtrace_snapshot_trigger);
1880 auxtrace_record__snapshot_started = 1;
1881 if (auxtrace_record__snapshot_start(record.itr))
1882 trigger_error(&auxtrace_snapshot_trigger);
1883 }
3c1cb7e3 1884
dc0c6127 1885 if (switch_output_signal(rec))
3c1cb7e3 1886 trigger_hit(&switch_output_trigger);
2dd6d8a1 1887}
bfacbe3b
JO
1888
1889static void alarm_sig_handler(int sig __maybe_unused)
1890{
1891 struct record *rec = &record;
1892
1893 if (switch_output_time(rec))
1894 trigger_hit(&switch_output_trigger);
1895}