]>
Commit | Line | Data |
---|---|---|
1 | // SPDX-License-Identifier: GPL-2.0-only | |
2 | /* | |
3 | * builtin-ftrace.c | |
4 | * | |
5 | * Copyright (c) 2013 LG Electronics, Namhyung Kim <namhyung@kernel.org> | |
6 | * Copyright (c) 2020 Changbin Du <changbin.du@gmail.com>, significant enhancement. | |
7 | */ | |
8 | ||
9 | #include "builtin.h" | |
10 | ||
11 | #include <errno.h> | |
12 | #include <unistd.h> | |
13 | #include <signal.h> | |
14 | #include <stdlib.h> | |
15 | #include <fcntl.h> | |
16 | #include <poll.h> | |
17 | #include <linux/capability.h> | |
18 | #include <linux/string.h> | |
19 | ||
20 | #include "debug.h" | |
21 | #include <subcmd/pager.h> | |
22 | #include <subcmd/parse-options.h> | |
23 | #include <api/fs/tracing_path.h> | |
24 | #include "evlist.h" | |
25 | #include "target.h" | |
26 | #include "cpumap.h" | |
27 | #include "thread_map.h" | |
28 | #include "strfilter.h" | |
29 | #include "util/cap.h" | |
30 | #include "util/config.h" | |
31 | #include "util/units.h" | |
32 | #include "util/parse-sublevel-options.h" | |
33 | ||
34 | #define DEFAULT_TRACER "function_graph" | |
35 | ||
36 | struct perf_ftrace { | |
37 | struct evlist *evlist; | |
38 | struct target target; | |
39 | const char *tracer; | |
40 | struct list_head filters; | |
41 | struct list_head notrace; | |
42 | struct list_head graph_funcs; | |
43 | struct list_head nograph_funcs; | |
44 | int graph_depth; | |
45 | unsigned long percpu_buffer_size; | |
46 | bool inherit; | |
47 | int func_stack_trace; | |
48 | int func_irq_info; | |
49 | int graph_nosleep_time; | |
50 | int graph_noirqs; | |
51 | int graph_verbose; | |
52 | int graph_thresh; | |
53 | unsigned int initial_delay; | |
54 | }; | |
55 | ||
56 | struct filter_entry { | |
57 | struct list_head list; | |
58 | char name[]; | |
59 | }; | |
60 | ||
61 | static volatile int workload_exec_errno; | |
62 | static bool done; | |
63 | ||
64 | static void sig_handler(int sig __maybe_unused) | |
65 | { | |
66 | done = true; | |
67 | } | |
68 | ||
69 | /* | |
70 | * evlist__prepare_workload will send a SIGUSR1 if the fork fails, since | |
71 | * we asked by setting its exec_error to the function below, | |
72 | * ftrace__workload_exec_failed_signal. | |
73 | * | |
74 | * XXX We need to handle this more appropriately, emitting an error, etc. | |
75 | */ | |
76 | static void ftrace__workload_exec_failed_signal(int signo __maybe_unused, | |
77 | siginfo_t *info __maybe_unused, | |
78 | void *ucontext __maybe_unused) | |
79 | { | |
80 | workload_exec_errno = info->si_value.sival_int; | |
81 | done = true; | |
82 | } | |
83 | ||
84 | static int __write_tracing_file(const char *name, const char *val, bool append) | |
85 | { | |
86 | char *file; | |
87 | int fd, ret = -1; | |
88 | ssize_t size = strlen(val); | |
89 | int flags = O_WRONLY; | |
90 | char errbuf[512]; | |
91 | char *val_copy; | |
92 | ||
93 | file = get_tracing_file(name); | |
94 | if (!file) { | |
95 | pr_debug("cannot get tracing file: %s\n", name); | |
96 | return -1; | |
97 | } | |
98 | ||
99 | if (append) | |
100 | flags |= O_APPEND; | |
101 | else | |
102 | flags |= O_TRUNC; | |
103 | ||
104 | fd = open(file, flags); | |
105 | if (fd < 0) { | |
106 | pr_debug("cannot open tracing file: %s: %s\n", | |
107 | name, str_error_r(errno, errbuf, sizeof(errbuf))); | |
108 | goto out; | |
109 | } | |
110 | ||
111 | /* | |
112 | * Copy the original value and append a '\n'. Without this, | |
113 | * the kernel can hide possible errors. | |
114 | */ | |
115 | val_copy = strdup(val); | |
116 | if (!val_copy) | |
117 | goto out_close; | |
118 | val_copy[size] = '\n'; | |
119 | ||
120 | if (write(fd, val_copy, size + 1) == size + 1) | |
121 | ret = 0; | |
122 | else | |
123 | pr_debug("write '%s' to tracing/%s failed: %s\n", | |
124 | val, name, str_error_r(errno, errbuf, sizeof(errbuf))); | |
125 | ||
126 | free(val_copy); | |
127 | out_close: | |
128 | close(fd); | |
129 | out: | |
130 | put_tracing_file(file); | |
131 | return ret; | |
132 | } | |
133 | ||
134 | static int write_tracing_file(const char *name, const char *val) | |
135 | { | |
136 | return __write_tracing_file(name, val, false); | |
137 | } | |
138 | ||
139 | static int append_tracing_file(const char *name, const char *val) | |
140 | { | |
141 | return __write_tracing_file(name, val, true); | |
142 | } | |
143 | ||
144 | static int read_tracing_file_to_stdout(const char *name) | |
145 | { | |
146 | char buf[4096]; | |
147 | char *file; | |
148 | int fd; | |
149 | int ret = -1; | |
150 | ||
151 | file = get_tracing_file(name); | |
152 | if (!file) { | |
153 | pr_debug("cannot get tracing file: %s\n", name); | |
154 | return -1; | |
155 | } | |
156 | ||
157 | fd = open(file, O_RDONLY); | |
158 | if (fd < 0) { | |
159 | pr_debug("cannot open tracing file: %s: %s\n", | |
160 | name, str_error_r(errno, buf, sizeof(buf))); | |
161 | goto out; | |
162 | } | |
163 | ||
164 | /* read contents to stdout */ | |
165 | while (true) { | |
166 | int n = read(fd, buf, sizeof(buf)); | |
167 | if (n == 0) | |
168 | break; | |
169 | else if (n < 0) | |
170 | goto out_close; | |
171 | ||
172 | if (fwrite(buf, n, 1, stdout) != 1) | |
173 | goto out_close; | |
174 | } | |
175 | ret = 0; | |
176 | ||
177 | out_close: | |
178 | close(fd); | |
179 | out: | |
180 | put_tracing_file(file); | |
181 | return ret; | |
182 | } | |
183 | ||
184 | static int read_tracing_file_by_line(const char *name, | |
185 | void (*cb)(char *str, void *arg), | |
186 | void *cb_arg) | |
187 | { | |
188 | char *line = NULL; | |
189 | size_t len = 0; | |
190 | char *file; | |
191 | FILE *fp; | |
192 | ||
193 | file = get_tracing_file(name); | |
194 | if (!file) { | |
195 | pr_debug("cannot get tracing file: %s\n", name); | |
196 | return -1; | |
197 | } | |
198 | ||
199 | fp = fopen(file, "r"); | |
200 | if (fp == NULL) { | |
201 | pr_debug("cannot open tracing file: %s\n", name); | |
202 | put_tracing_file(file); | |
203 | return -1; | |
204 | } | |
205 | ||
206 | while (getline(&line, &len, fp) != -1) { | |
207 | cb(line, cb_arg); | |
208 | } | |
209 | ||
210 | if (line) | |
211 | free(line); | |
212 | ||
213 | fclose(fp); | |
214 | put_tracing_file(file); | |
215 | return 0; | |
216 | } | |
217 | ||
218 | static int write_tracing_file_int(const char *name, int value) | |
219 | { | |
220 | char buf[16]; | |
221 | ||
222 | snprintf(buf, sizeof(buf), "%d", value); | |
223 | if (write_tracing_file(name, buf) < 0) | |
224 | return -1; | |
225 | ||
226 | return 0; | |
227 | } | |
228 | ||
229 | static int write_tracing_option_file(const char *name, const char *val) | |
230 | { | |
231 | char *file; | |
232 | int ret; | |
233 | ||
234 | if (asprintf(&file, "options/%s", name) < 0) | |
235 | return -1; | |
236 | ||
237 | ret = __write_tracing_file(file, val, false); | |
238 | free(file); | |
239 | return ret; | |
240 | } | |
241 | ||
242 | static int reset_tracing_cpu(void); | |
243 | static void reset_tracing_filters(void); | |
244 | ||
245 | static void reset_tracing_options(struct perf_ftrace *ftrace __maybe_unused) | |
246 | { | |
247 | write_tracing_option_file("function-fork", "0"); | |
248 | write_tracing_option_file("func_stack_trace", "0"); | |
249 | write_tracing_option_file("sleep-time", "1"); | |
250 | write_tracing_option_file("funcgraph-irqs", "1"); | |
251 | write_tracing_option_file("funcgraph-proc", "0"); | |
252 | write_tracing_option_file("funcgraph-abstime", "0"); | |
253 | write_tracing_option_file("latency-format", "0"); | |
254 | write_tracing_option_file("irq-info", "0"); | |
255 | } | |
256 | ||
257 | static int reset_tracing_files(struct perf_ftrace *ftrace __maybe_unused) | |
258 | { | |
259 | if (write_tracing_file("tracing_on", "0") < 0) | |
260 | return -1; | |
261 | ||
262 | if (write_tracing_file("current_tracer", "nop") < 0) | |
263 | return -1; | |
264 | ||
265 | if (write_tracing_file("set_ftrace_pid", " ") < 0) | |
266 | return -1; | |
267 | ||
268 | if (reset_tracing_cpu() < 0) | |
269 | return -1; | |
270 | ||
271 | if (write_tracing_file("max_graph_depth", "0") < 0) | |
272 | return -1; | |
273 | ||
274 | if (write_tracing_file("tracing_thresh", "0") < 0) | |
275 | return -1; | |
276 | ||
277 | reset_tracing_filters(); | |
278 | reset_tracing_options(ftrace); | |
279 | return 0; | |
280 | } | |
281 | ||
282 | static int set_tracing_pid(struct perf_ftrace *ftrace) | |
283 | { | |
284 | int i; | |
285 | char buf[16]; | |
286 | ||
287 | if (target__has_cpu(&ftrace->target)) | |
288 | return 0; | |
289 | ||
290 | for (i = 0; i < perf_thread_map__nr(ftrace->evlist->core.threads); i++) { | |
291 | scnprintf(buf, sizeof(buf), "%d", | |
292 | perf_thread_map__pid(ftrace->evlist->core.threads, i)); | |
293 | if (append_tracing_file("set_ftrace_pid", buf) < 0) | |
294 | return -1; | |
295 | } | |
296 | return 0; | |
297 | } | |
298 | ||
299 | static int set_tracing_cpumask(struct perf_cpu_map *cpumap) | |
300 | { | |
301 | char *cpumask; | |
302 | size_t mask_size; | |
303 | int ret; | |
304 | int last_cpu; | |
305 | ||
306 | last_cpu = cpu_map__cpu(cpumap, cpumap->nr - 1); | |
307 | mask_size = last_cpu / 4 + 2; /* one more byte for EOS */ | |
308 | mask_size += last_cpu / 32; /* ',' is needed for every 32th cpus */ | |
309 | ||
310 | cpumask = malloc(mask_size); | |
311 | if (cpumask == NULL) { | |
312 | pr_debug("failed to allocate cpu mask\n"); | |
313 | return -1; | |
314 | } | |
315 | ||
316 | cpu_map__snprint_mask(cpumap, cpumask, mask_size); | |
317 | ||
318 | ret = write_tracing_file("tracing_cpumask", cpumask); | |
319 | ||
320 | free(cpumask); | |
321 | return ret; | |
322 | } | |
323 | ||
324 | static int set_tracing_cpu(struct perf_ftrace *ftrace) | |
325 | { | |
326 | struct perf_cpu_map *cpumap = ftrace->evlist->core.cpus; | |
327 | ||
328 | if (!target__has_cpu(&ftrace->target)) | |
329 | return 0; | |
330 | ||
331 | return set_tracing_cpumask(cpumap); | |
332 | } | |
333 | ||
334 | static int set_tracing_func_stack_trace(struct perf_ftrace *ftrace) | |
335 | { | |
336 | if (!ftrace->func_stack_trace) | |
337 | return 0; | |
338 | ||
339 | if (write_tracing_option_file("func_stack_trace", "1") < 0) | |
340 | return -1; | |
341 | ||
342 | return 0; | |
343 | } | |
344 | ||
345 | static int set_tracing_func_irqinfo(struct perf_ftrace *ftrace) | |
346 | { | |
347 | if (!ftrace->func_irq_info) | |
348 | return 0; | |
349 | ||
350 | if (write_tracing_option_file("irq-info", "1") < 0) | |
351 | return -1; | |
352 | ||
353 | return 0; | |
354 | } | |
355 | ||
356 | static int reset_tracing_cpu(void) | |
357 | { | |
358 | struct perf_cpu_map *cpumap = perf_cpu_map__new(NULL); | |
359 | int ret; | |
360 | ||
361 | ret = set_tracing_cpumask(cpumap); | |
362 | perf_cpu_map__put(cpumap); | |
363 | return ret; | |
364 | } | |
365 | ||
366 | static int __set_tracing_filter(const char *filter_file, struct list_head *funcs) | |
367 | { | |
368 | struct filter_entry *pos; | |
369 | ||
370 | list_for_each_entry(pos, funcs, list) { | |
371 | if (append_tracing_file(filter_file, pos->name) < 0) | |
372 | return -1; | |
373 | } | |
374 | ||
375 | return 0; | |
376 | } | |
377 | ||
378 | static int set_tracing_filters(struct perf_ftrace *ftrace) | |
379 | { | |
380 | int ret; | |
381 | ||
382 | ret = __set_tracing_filter("set_ftrace_filter", &ftrace->filters); | |
383 | if (ret < 0) | |
384 | return ret; | |
385 | ||
386 | ret = __set_tracing_filter("set_ftrace_notrace", &ftrace->notrace); | |
387 | if (ret < 0) | |
388 | return ret; | |
389 | ||
390 | ret = __set_tracing_filter("set_graph_function", &ftrace->graph_funcs); | |
391 | if (ret < 0) | |
392 | return ret; | |
393 | ||
394 | /* old kernels do not have this filter */ | |
395 | __set_tracing_filter("set_graph_notrace", &ftrace->nograph_funcs); | |
396 | ||
397 | return ret; | |
398 | } | |
399 | ||
400 | static void reset_tracing_filters(void) | |
401 | { | |
402 | write_tracing_file("set_ftrace_filter", " "); | |
403 | write_tracing_file("set_ftrace_notrace", " "); | |
404 | write_tracing_file("set_graph_function", " "); | |
405 | write_tracing_file("set_graph_notrace", " "); | |
406 | } | |
407 | ||
408 | static int set_tracing_depth(struct perf_ftrace *ftrace) | |
409 | { | |
410 | if (ftrace->graph_depth == 0) | |
411 | return 0; | |
412 | ||
413 | if (ftrace->graph_depth < 0) { | |
414 | pr_err("invalid graph depth: %d\n", ftrace->graph_depth); | |
415 | return -1; | |
416 | } | |
417 | ||
418 | if (write_tracing_file_int("max_graph_depth", ftrace->graph_depth) < 0) | |
419 | return -1; | |
420 | ||
421 | return 0; | |
422 | } | |
423 | ||
424 | static int set_tracing_percpu_buffer_size(struct perf_ftrace *ftrace) | |
425 | { | |
426 | int ret; | |
427 | ||
428 | if (ftrace->percpu_buffer_size == 0) | |
429 | return 0; | |
430 | ||
431 | ret = write_tracing_file_int("buffer_size_kb", | |
432 | ftrace->percpu_buffer_size / 1024); | |
433 | if (ret < 0) | |
434 | return ret; | |
435 | ||
436 | return 0; | |
437 | } | |
438 | ||
439 | static int set_tracing_trace_inherit(struct perf_ftrace *ftrace) | |
440 | { | |
441 | if (!ftrace->inherit) | |
442 | return 0; | |
443 | ||
444 | if (write_tracing_option_file("function-fork", "1") < 0) | |
445 | return -1; | |
446 | ||
447 | return 0; | |
448 | } | |
449 | ||
450 | static int set_tracing_sleep_time(struct perf_ftrace *ftrace) | |
451 | { | |
452 | if (!ftrace->graph_nosleep_time) | |
453 | return 0; | |
454 | ||
455 | if (write_tracing_option_file("sleep-time", "0") < 0) | |
456 | return -1; | |
457 | ||
458 | return 0; | |
459 | } | |
460 | ||
461 | static int set_tracing_funcgraph_irqs(struct perf_ftrace *ftrace) | |
462 | { | |
463 | if (!ftrace->graph_noirqs) | |
464 | return 0; | |
465 | ||
466 | if (write_tracing_option_file("funcgraph-irqs", "0") < 0) | |
467 | return -1; | |
468 | ||
469 | return 0; | |
470 | } | |
471 | ||
472 | static int set_tracing_funcgraph_verbose(struct perf_ftrace *ftrace) | |
473 | { | |
474 | if (!ftrace->graph_verbose) | |
475 | return 0; | |
476 | ||
477 | if (write_tracing_option_file("funcgraph-proc", "1") < 0) | |
478 | return -1; | |
479 | ||
480 | if (write_tracing_option_file("funcgraph-abstime", "1") < 0) | |
481 | return -1; | |
482 | ||
483 | if (write_tracing_option_file("latency-format", "1") < 0) | |
484 | return -1; | |
485 | ||
486 | return 0; | |
487 | } | |
488 | ||
489 | static int set_tracing_thresh(struct perf_ftrace *ftrace) | |
490 | { | |
491 | int ret; | |
492 | ||
493 | if (ftrace->graph_thresh == 0) | |
494 | return 0; | |
495 | ||
496 | ret = write_tracing_file_int("tracing_thresh", ftrace->graph_thresh); | |
497 | if (ret < 0) | |
498 | return ret; | |
499 | ||
500 | return 0; | |
501 | } | |
502 | ||
503 | static int set_tracing_options(struct perf_ftrace *ftrace) | |
504 | { | |
505 | if (set_tracing_pid(ftrace) < 0) { | |
506 | pr_err("failed to set ftrace pid\n"); | |
507 | return -1; | |
508 | } | |
509 | ||
510 | if (set_tracing_cpu(ftrace) < 0) { | |
511 | pr_err("failed to set tracing cpumask\n"); | |
512 | return -1; | |
513 | } | |
514 | ||
515 | if (set_tracing_func_stack_trace(ftrace) < 0) { | |
516 | pr_err("failed to set tracing option func_stack_trace\n"); | |
517 | return -1; | |
518 | } | |
519 | ||
520 | if (set_tracing_func_irqinfo(ftrace) < 0) { | |
521 | pr_err("failed to set tracing option irq-info\n"); | |
522 | return -1; | |
523 | } | |
524 | ||
525 | if (set_tracing_filters(ftrace) < 0) { | |
526 | pr_err("failed to set tracing filters\n"); | |
527 | return -1; | |
528 | } | |
529 | ||
530 | if (set_tracing_depth(ftrace) < 0) { | |
531 | pr_err("failed to set graph depth\n"); | |
532 | return -1; | |
533 | } | |
534 | ||
535 | if (set_tracing_percpu_buffer_size(ftrace) < 0) { | |
536 | pr_err("failed to set tracing per-cpu buffer size\n"); | |
537 | return -1; | |
538 | } | |
539 | ||
540 | if (set_tracing_trace_inherit(ftrace) < 0) { | |
541 | pr_err("failed to set tracing option function-fork\n"); | |
542 | return -1; | |
543 | } | |
544 | ||
545 | if (set_tracing_sleep_time(ftrace) < 0) { | |
546 | pr_err("failed to set tracing option sleep-time\n"); | |
547 | return -1; | |
548 | } | |
549 | ||
550 | if (set_tracing_funcgraph_irqs(ftrace) < 0) { | |
551 | pr_err("failed to set tracing option funcgraph-irqs\n"); | |
552 | return -1; | |
553 | } | |
554 | ||
555 | if (set_tracing_funcgraph_verbose(ftrace) < 0) { | |
556 | pr_err("failed to set tracing option funcgraph-proc/funcgraph-abstime\n"); | |
557 | return -1; | |
558 | } | |
559 | ||
560 | if (set_tracing_thresh(ftrace) < 0) { | |
561 | pr_err("failed to set tracing thresh\n"); | |
562 | return -1; | |
563 | } | |
564 | ||
565 | return 0; | |
566 | } | |
567 | ||
568 | static int __cmd_ftrace(struct perf_ftrace *ftrace, int argc, const char **argv) | |
569 | { | |
570 | char *trace_file; | |
571 | int trace_fd; | |
572 | char buf[4096]; | |
573 | struct pollfd pollfd = { | |
574 | .events = POLLIN, | |
575 | }; | |
576 | ||
577 | if (!(perf_cap__capable(CAP_PERFMON) || | |
578 | perf_cap__capable(CAP_SYS_ADMIN))) { | |
579 | pr_err("ftrace only works for %s!\n", | |
580 | #ifdef HAVE_LIBCAP_SUPPORT | |
581 | "users with the CAP_PERFMON or CAP_SYS_ADMIN capability" | |
582 | #else | |
583 | "root" | |
584 | #endif | |
585 | ); | |
586 | return -1; | |
587 | } | |
588 | ||
589 | signal(SIGINT, sig_handler); | |
590 | signal(SIGUSR1, sig_handler); | |
591 | signal(SIGCHLD, sig_handler); | |
592 | signal(SIGPIPE, sig_handler); | |
593 | ||
594 | if (reset_tracing_files(ftrace) < 0) { | |
595 | pr_err("failed to reset ftrace\n"); | |
596 | goto out; | |
597 | } | |
598 | ||
599 | /* reset ftrace buffer */ | |
600 | if (write_tracing_file("trace", "0") < 0) | |
601 | goto out; | |
602 | ||
603 | if (argc && evlist__prepare_workload(ftrace->evlist, &ftrace->target, argv, false, | |
604 | ftrace__workload_exec_failed_signal) < 0) { | |
605 | goto out; | |
606 | } | |
607 | ||
608 | if (set_tracing_options(ftrace) < 0) | |
609 | goto out_reset; | |
610 | ||
611 | if (write_tracing_file("current_tracer", ftrace->tracer) < 0) { | |
612 | pr_err("failed to set current_tracer to %s\n", ftrace->tracer); | |
613 | goto out_reset; | |
614 | } | |
615 | ||
616 | setup_pager(); | |
617 | ||
618 | trace_file = get_tracing_file("trace_pipe"); | |
619 | if (!trace_file) { | |
620 | pr_err("failed to open trace_pipe\n"); | |
621 | goto out_reset; | |
622 | } | |
623 | ||
624 | trace_fd = open(trace_file, O_RDONLY); | |
625 | ||
626 | put_tracing_file(trace_file); | |
627 | ||
628 | if (trace_fd < 0) { | |
629 | pr_err("failed to open trace_pipe\n"); | |
630 | goto out_reset; | |
631 | } | |
632 | ||
633 | fcntl(trace_fd, F_SETFL, O_NONBLOCK); | |
634 | pollfd.fd = trace_fd; | |
635 | ||
636 | /* display column headers */ | |
637 | read_tracing_file_to_stdout("trace"); | |
638 | ||
639 | if (!ftrace->initial_delay) { | |
640 | if (write_tracing_file("tracing_on", "1") < 0) { | |
641 | pr_err("can't enable tracing\n"); | |
642 | goto out_close_fd; | |
643 | } | |
644 | } | |
645 | ||
646 | evlist__start_workload(ftrace->evlist); | |
647 | ||
648 | if (ftrace->initial_delay) { | |
649 | usleep(ftrace->initial_delay * 1000); | |
650 | if (write_tracing_file("tracing_on", "1") < 0) { | |
651 | pr_err("can't enable tracing\n"); | |
652 | goto out_close_fd; | |
653 | } | |
654 | } | |
655 | ||
656 | while (!done) { | |
657 | if (poll(&pollfd, 1, -1) < 0) | |
658 | break; | |
659 | ||
660 | if (pollfd.revents & POLLIN) { | |
661 | int n = read(trace_fd, buf, sizeof(buf)); | |
662 | if (n < 0) | |
663 | break; | |
664 | if (fwrite(buf, n, 1, stdout) != 1) | |
665 | break; | |
666 | } | |
667 | } | |
668 | ||
669 | write_tracing_file("tracing_on", "0"); | |
670 | ||
671 | if (workload_exec_errno) { | |
672 | const char *emsg = str_error_r(workload_exec_errno, buf, sizeof(buf)); | |
673 | /* flush stdout first so below error msg appears at the end. */ | |
674 | fflush(stdout); | |
675 | pr_err("workload failed: %s\n", emsg); | |
676 | goto out_close_fd; | |
677 | } | |
678 | ||
679 | /* read remaining buffer contents */ | |
680 | while (true) { | |
681 | int n = read(trace_fd, buf, sizeof(buf)); | |
682 | if (n <= 0) | |
683 | break; | |
684 | if (fwrite(buf, n, 1, stdout) != 1) | |
685 | break; | |
686 | } | |
687 | ||
688 | out_close_fd: | |
689 | close(trace_fd); | |
690 | out_reset: | |
691 | reset_tracing_files(ftrace); | |
692 | out: | |
693 | return (done && !workload_exec_errno) ? 0 : -1; | |
694 | } | |
695 | ||
696 | static int perf_ftrace_config(const char *var, const char *value, void *cb) | |
697 | { | |
698 | struct perf_ftrace *ftrace = cb; | |
699 | ||
700 | if (!strstarts(var, "ftrace.")) | |
701 | return 0; | |
702 | ||
703 | if (strcmp(var, "ftrace.tracer")) | |
704 | return -1; | |
705 | ||
706 | if (!strcmp(value, "function_graph") || | |
707 | !strcmp(value, "function")) { | |
708 | ftrace->tracer = value; | |
709 | return 0; | |
710 | } | |
711 | ||
712 | pr_err("Please select \"function_graph\" (default) or \"function\"\n"); | |
713 | return -1; | |
714 | } | |
715 | ||
716 | static void list_function_cb(char *str, void *arg) | |
717 | { | |
718 | struct strfilter *filter = (struct strfilter *)arg; | |
719 | ||
720 | if (strfilter__compare(filter, str)) | |
721 | printf("%s", str); | |
722 | } | |
723 | ||
724 | static int opt_list_avail_functions(const struct option *opt __maybe_unused, | |
725 | const char *str, int unset) | |
726 | { | |
727 | struct strfilter *filter; | |
728 | const char *err = NULL; | |
729 | int ret; | |
730 | ||
731 | if (unset || !str) | |
732 | return -1; | |
733 | ||
734 | filter = strfilter__new(str, &err); | |
735 | if (!filter) | |
736 | return err ? -EINVAL : -ENOMEM; | |
737 | ||
738 | ret = strfilter__or(filter, str, &err); | |
739 | if (ret == -EINVAL) { | |
740 | pr_err("Filter parse error at %td.\n", err - str + 1); | |
741 | pr_err("Source: \"%s\"\n", str); | |
742 | pr_err(" %*c\n", (int)(err - str + 1), '^'); | |
743 | strfilter__delete(filter); | |
744 | return ret; | |
745 | } | |
746 | ||
747 | ret = read_tracing_file_by_line("available_filter_functions", | |
748 | list_function_cb, filter); | |
749 | strfilter__delete(filter); | |
750 | if (ret < 0) | |
751 | return ret; | |
752 | ||
753 | exit(0); | |
754 | } | |
755 | ||
756 | static int parse_filter_func(const struct option *opt, const char *str, | |
757 | int unset __maybe_unused) | |
758 | { | |
759 | struct list_head *head = opt->value; | |
760 | struct filter_entry *entry; | |
761 | ||
762 | entry = malloc(sizeof(*entry) + strlen(str) + 1); | |
763 | if (entry == NULL) | |
764 | return -ENOMEM; | |
765 | ||
766 | strcpy(entry->name, str); | |
767 | list_add_tail(&entry->list, head); | |
768 | ||
769 | return 0; | |
770 | } | |
771 | ||
772 | static void delete_filter_func(struct list_head *head) | |
773 | { | |
774 | struct filter_entry *pos, *tmp; | |
775 | ||
776 | list_for_each_entry_safe(pos, tmp, head, list) { | |
777 | list_del_init(&pos->list); | |
778 | free(pos); | |
779 | } | |
780 | } | |
781 | ||
782 | static int parse_buffer_size(const struct option *opt, | |
783 | const char *str, int unset) | |
784 | { | |
785 | unsigned long *s = (unsigned long *)opt->value; | |
786 | static struct parse_tag tags_size[] = { | |
787 | { .tag = 'B', .mult = 1 }, | |
788 | { .tag = 'K', .mult = 1 << 10 }, | |
789 | { .tag = 'M', .mult = 1 << 20 }, | |
790 | { .tag = 'G', .mult = 1 << 30 }, | |
791 | { .tag = 0 }, | |
792 | }; | |
793 | unsigned long val; | |
794 | ||
795 | if (unset) { | |
796 | *s = 0; | |
797 | return 0; | |
798 | } | |
799 | ||
800 | val = parse_tag_value(str, tags_size); | |
801 | if (val != (unsigned long) -1) { | |
802 | if (val < 1024) { | |
803 | pr_err("buffer size too small, must larger than 1KB."); | |
804 | return -1; | |
805 | } | |
806 | *s = val; | |
807 | return 0; | |
808 | } | |
809 | ||
810 | return -1; | |
811 | } | |
812 | ||
813 | static int parse_func_tracer_opts(const struct option *opt, | |
814 | const char *str, int unset) | |
815 | { | |
816 | int ret; | |
817 | struct perf_ftrace *ftrace = (struct perf_ftrace *) opt->value; | |
818 | struct sublevel_option func_tracer_opts[] = { | |
819 | { .name = "call-graph", .value_ptr = &ftrace->func_stack_trace }, | |
820 | { .name = "irq-info", .value_ptr = &ftrace->func_irq_info }, | |
821 | { .name = NULL, } | |
822 | }; | |
823 | ||
824 | if (unset) | |
825 | return 0; | |
826 | ||
827 | ret = perf_parse_sublevel_options(str, func_tracer_opts); | |
828 | if (ret) | |
829 | return ret; | |
830 | ||
831 | return 0; | |
832 | } | |
833 | ||
834 | static int parse_graph_tracer_opts(const struct option *opt, | |
835 | const char *str, int unset) | |
836 | { | |
837 | int ret; | |
838 | struct perf_ftrace *ftrace = (struct perf_ftrace *) opt->value; | |
839 | struct sublevel_option graph_tracer_opts[] = { | |
840 | { .name = "nosleep-time", .value_ptr = &ftrace->graph_nosleep_time }, | |
841 | { .name = "noirqs", .value_ptr = &ftrace->graph_noirqs }, | |
842 | { .name = "verbose", .value_ptr = &ftrace->graph_verbose }, | |
843 | { .name = "thresh", .value_ptr = &ftrace->graph_thresh }, | |
844 | { .name = "depth", .value_ptr = &ftrace->graph_depth }, | |
845 | { .name = NULL, } | |
846 | }; | |
847 | ||
848 | if (unset) | |
849 | return 0; | |
850 | ||
851 | ret = perf_parse_sublevel_options(str, graph_tracer_opts); | |
852 | if (ret) | |
853 | return ret; | |
854 | ||
855 | return 0; | |
856 | } | |
857 | ||
858 | static void select_tracer(struct perf_ftrace *ftrace) | |
859 | { | |
860 | bool graph = !list_empty(&ftrace->graph_funcs) || | |
861 | !list_empty(&ftrace->nograph_funcs); | |
862 | bool func = !list_empty(&ftrace->filters) || | |
863 | !list_empty(&ftrace->notrace); | |
864 | ||
865 | /* The function_graph has priority over function tracer. */ | |
866 | if (graph) | |
867 | ftrace->tracer = "function_graph"; | |
868 | else if (func) | |
869 | ftrace->tracer = "function"; | |
870 | /* Otherwise, the default tracer is used. */ | |
871 | ||
872 | pr_debug("%s tracer is used\n", ftrace->tracer); | |
873 | } | |
874 | ||
875 | int cmd_ftrace(int argc, const char **argv) | |
876 | { | |
877 | int ret; | |
878 | struct perf_ftrace ftrace = { | |
879 | .tracer = DEFAULT_TRACER, | |
880 | .target = { .uid = UINT_MAX, }, | |
881 | }; | |
882 | const char * const ftrace_usage[] = { | |
883 | "perf ftrace [<options>] [<command>]", | |
884 | "perf ftrace [<options>] -- <command> [<options>]", | |
885 | NULL | |
886 | }; | |
887 | const struct option ftrace_options[] = { | |
888 | OPT_STRING('t', "tracer", &ftrace.tracer, "tracer", | |
889 | "Tracer to use: function_graph(default) or function"), | |
890 | OPT_CALLBACK_DEFAULT('F', "funcs", NULL, "[FILTER]", | |
891 | "Show available functions to filter", | |
892 | opt_list_avail_functions, "*"), | |
893 | OPT_STRING('p', "pid", &ftrace.target.pid, "pid", | |
894 | "Trace on existing process id"), | |
895 | /* TODO: Add short option -t after -t/--tracer can be removed. */ | |
896 | OPT_STRING(0, "tid", &ftrace.target.tid, "tid", | |
897 | "Trace on existing thread id (exclusive to --pid)"), | |
898 | OPT_INCR('v', "verbose", &verbose, | |
899 | "Be more verbose"), | |
900 | OPT_BOOLEAN('a', "all-cpus", &ftrace.target.system_wide, | |
901 | "System-wide collection from all CPUs"), | |
902 | OPT_STRING('C', "cpu", &ftrace.target.cpu_list, "cpu", | |
903 | "List of cpus to monitor"), | |
904 | OPT_CALLBACK('T', "trace-funcs", &ftrace.filters, "func", | |
905 | "Trace given functions using function tracer", | |
906 | parse_filter_func), | |
907 | OPT_CALLBACK('N', "notrace-funcs", &ftrace.notrace, "func", | |
908 | "Do not trace given functions", parse_filter_func), | |
909 | OPT_CALLBACK(0, "func-opts", &ftrace, "options", | |
910 | "Function tracer options, available options: call-graph,irq-info", | |
911 | parse_func_tracer_opts), | |
912 | OPT_CALLBACK('G', "graph-funcs", &ftrace.graph_funcs, "func", | |
913 | "Trace given functions using function_graph tracer", | |
914 | parse_filter_func), | |
915 | OPT_CALLBACK('g', "nograph-funcs", &ftrace.nograph_funcs, "func", | |
916 | "Set nograph filter on given functions", parse_filter_func), | |
917 | OPT_CALLBACK(0, "graph-opts", &ftrace, "options", | |
918 | "Graph tracer options, available options: nosleep-time,noirqs,verbose,thresh=<n>,depth=<n>", | |
919 | parse_graph_tracer_opts), | |
920 | OPT_CALLBACK('m', "buffer-size", &ftrace.percpu_buffer_size, "size", | |
921 | "Size of per cpu buffer, needs to use a B, K, M or G suffix.", parse_buffer_size), | |
922 | OPT_BOOLEAN(0, "inherit", &ftrace.inherit, | |
923 | "Trace children processes"), | |
924 | OPT_UINTEGER('D', "delay", &ftrace.initial_delay, | |
925 | "Number of milliseconds to wait before starting tracing after program start"), | |
926 | OPT_END() | |
927 | }; | |
928 | ||
929 | INIT_LIST_HEAD(&ftrace.filters); | |
930 | INIT_LIST_HEAD(&ftrace.notrace); | |
931 | INIT_LIST_HEAD(&ftrace.graph_funcs); | |
932 | INIT_LIST_HEAD(&ftrace.nograph_funcs); | |
933 | ||
934 | ret = perf_config(perf_ftrace_config, &ftrace); | |
935 | if (ret < 0) | |
936 | return -1; | |
937 | ||
938 | argc = parse_options(argc, argv, ftrace_options, ftrace_usage, | |
939 | PARSE_OPT_STOP_AT_NON_OPTION); | |
940 | if (!argc && target__none(&ftrace.target)) | |
941 | ftrace.target.system_wide = true; | |
942 | ||
943 | select_tracer(&ftrace); | |
944 | ||
945 | ret = target__validate(&ftrace.target); | |
946 | if (ret) { | |
947 | char errbuf[512]; | |
948 | ||
949 | target__strerror(&ftrace.target, ret, errbuf, 512); | |
950 | pr_err("%s\n", errbuf); | |
951 | goto out_delete_filters; | |
952 | } | |
953 | ||
954 | ftrace.evlist = evlist__new(); | |
955 | if (ftrace.evlist == NULL) { | |
956 | ret = -ENOMEM; | |
957 | goto out_delete_filters; | |
958 | } | |
959 | ||
960 | ret = evlist__create_maps(ftrace.evlist, &ftrace.target); | |
961 | if (ret < 0) | |
962 | goto out_delete_evlist; | |
963 | ||
964 | ret = __cmd_ftrace(&ftrace, argc, argv); | |
965 | ||
966 | out_delete_evlist: | |
967 | evlist__delete(ftrace.evlist); | |
968 | ||
969 | out_delete_filters: | |
970 | delete_filter_func(&ftrace.filters); | |
971 | delete_filter_func(&ftrace.notrace); | |
972 | delete_filter_func(&ftrace.graph_funcs); | |
973 | delete_filter_func(&ftrace.nograph_funcs); | |
974 | ||
975 | return ret; | |
976 | } |