1 // SPDX-License-Identifier: GPL-2.0
2 #include <linux/hw_breakpoint.h>
12 #include <subcmd/parse-options.h>
13 #include "parse-events.h"
14 #include <subcmd/exec-cmd.h>
20 #include "bpf-loader.h"
22 #include <api/fs/tracing_path.h>
23 #include "parse-events-bison.h"
24 #define YY_EXTRA_TYPE int
25 #include "parse-events-flex.h"
27 #include "thread_map.h"
29 #include "probe-file.h"
31 #include "util/parse-branch-options.h"
32 #include "metricgroup.h"
34 #define MAX_NAME_LEN 100
37 extern int parse_events_debug
;
39 int parse_events_parse(void *parse_state
, void *scanner
);
40 static int get_config_terms(struct list_head
*head_config
,
41 struct list_head
*head_terms __maybe_unused
);
43 static struct perf_pmu_event_symbol
*perf_pmu_events_list
;
45 * The variable indicates the number of supported pmu event symbols.
46 * 0 means not initialized and ready to init
47 * -1 means failed to init, don't try anymore
48 * >0 is the number of supported pmu event symbols
50 static int perf_pmu_events_list_num
;
52 struct event_symbol event_symbols_hw
[PERF_COUNT_HW_MAX
] = {
53 [PERF_COUNT_HW_CPU_CYCLES
] = {
54 .symbol
= "cpu-cycles",
57 [PERF_COUNT_HW_INSTRUCTIONS
] = {
58 .symbol
= "instructions",
61 [PERF_COUNT_HW_CACHE_REFERENCES
] = {
62 .symbol
= "cache-references",
65 [PERF_COUNT_HW_CACHE_MISSES
] = {
66 .symbol
= "cache-misses",
69 [PERF_COUNT_HW_BRANCH_INSTRUCTIONS
] = {
70 .symbol
= "branch-instructions",
73 [PERF_COUNT_HW_BRANCH_MISSES
] = {
74 .symbol
= "branch-misses",
77 [PERF_COUNT_HW_BUS_CYCLES
] = {
78 .symbol
= "bus-cycles",
81 [PERF_COUNT_HW_STALLED_CYCLES_FRONTEND
] = {
82 .symbol
= "stalled-cycles-frontend",
83 .alias
= "idle-cycles-frontend",
85 [PERF_COUNT_HW_STALLED_CYCLES_BACKEND
] = {
86 .symbol
= "stalled-cycles-backend",
87 .alias
= "idle-cycles-backend",
89 [PERF_COUNT_HW_REF_CPU_CYCLES
] = {
90 .symbol
= "ref-cycles",
95 struct event_symbol event_symbols_sw
[PERF_COUNT_SW_MAX
] = {
96 [PERF_COUNT_SW_CPU_CLOCK
] = {
97 .symbol
= "cpu-clock",
100 [PERF_COUNT_SW_TASK_CLOCK
] = {
101 .symbol
= "task-clock",
104 [PERF_COUNT_SW_PAGE_FAULTS
] = {
105 .symbol
= "page-faults",
108 [PERF_COUNT_SW_CONTEXT_SWITCHES
] = {
109 .symbol
= "context-switches",
112 [PERF_COUNT_SW_CPU_MIGRATIONS
] = {
113 .symbol
= "cpu-migrations",
114 .alias
= "migrations",
116 [PERF_COUNT_SW_PAGE_FAULTS_MIN
] = {
117 .symbol
= "minor-faults",
120 [PERF_COUNT_SW_PAGE_FAULTS_MAJ
] = {
121 .symbol
= "major-faults",
124 [PERF_COUNT_SW_ALIGNMENT_FAULTS
] = {
125 .symbol
= "alignment-faults",
128 [PERF_COUNT_SW_EMULATION_FAULTS
] = {
129 .symbol
= "emulation-faults",
132 [PERF_COUNT_SW_DUMMY
] = {
136 [PERF_COUNT_SW_BPF_OUTPUT
] = {
137 .symbol
= "bpf-output",
142 #define __PERF_EVENT_FIELD(config, name) \
143 ((config & PERF_EVENT_##name##_MASK) >> PERF_EVENT_##name##_SHIFT)
145 #define PERF_EVENT_RAW(config) __PERF_EVENT_FIELD(config, RAW)
146 #define PERF_EVENT_CONFIG(config) __PERF_EVENT_FIELD(config, CONFIG)
147 #define PERF_EVENT_TYPE(config) __PERF_EVENT_FIELD(config, TYPE)
148 #define PERF_EVENT_ID(config) __PERF_EVENT_FIELD(config, EVENT)
150 #define for_each_subsystem(sys_dir, sys_dirent) \
151 while ((sys_dirent = readdir(sys_dir)) != NULL) \
152 if (sys_dirent->d_type == DT_DIR && \
153 (strcmp(sys_dirent->d_name, ".")) && \
154 (strcmp(sys_dirent->d_name, "..")))
156 static int tp_event_has_id(struct dirent
*sys_dir
, struct dirent
*evt_dir
)
158 char evt_path
[MAXPATHLEN
];
161 snprintf(evt_path
, MAXPATHLEN
, "%s/%s/%s/id", tracing_events_path
,
162 sys_dir
->d_name
, evt_dir
->d_name
);
163 fd
= open(evt_path
, O_RDONLY
);
171 #define for_each_event(sys_dirent, evt_dir, evt_dirent) \
172 while ((evt_dirent = readdir(evt_dir)) != NULL) \
173 if (evt_dirent->d_type == DT_DIR && \
174 (strcmp(evt_dirent->d_name, ".")) && \
175 (strcmp(evt_dirent->d_name, "..")) && \
176 (!tp_event_has_id(sys_dirent, evt_dirent)))
178 #define MAX_EVENT_LENGTH 512
181 struct tracepoint_path
*tracepoint_id_to_path(u64 config
)
183 struct tracepoint_path
*path
= NULL
;
184 DIR *sys_dir
, *evt_dir
;
185 struct dirent
*sys_dirent
, *evt_dirent
;
189 char evt_path
[MAXPATHLEN
];
190 char dir_path
[MAXPATHLEN
];
192 sys_dir
= opendir(tracing_events_path
);
196 for_each_subsystem(sys_dir
, sys_dirent
) {
198 snprintf(dir_path
, MAXPATHLEN
, "%s/%s", tracing_events_path
,
200 evt_dir
= opendir(dir_path
);
204 for_each_event(sys_dirent
, evt_dir
, evt_dirent
) {
206 snprintf(evt_path
, MAXPATHLEN
, "%s/%s/id", dir_path
,
208 fd
= open(evt_path
, O_RDONLY
);
211 if (read(fd
, id_buf
, sizeof(id_buf
)) < 0) {
220 path
= zalloc(sizeof(*path
));
223 path
->system
= malloc(MAX_EVENT_LENGTH
);
228 path
->name
= malloc(MAX_EVENT_LENGTH
);
230 zfree(&path
->system
);
234 strncpy(path
->system
, sys_dirent
->d_name
,
236 strncpy(path
->name
, evt_dirent
->d_name
,
248 struct tracepoint_path
*tracepoint_name_to_path(const char *name
)
250 struct tracepoint_path
*path
= zalloc(sizeof(*path
));
251 char *str
= strchr(name
, ':');
253 if (path
== NULL
|| str
== NULL
) {
258 path
->system
= strndup(name
, str
- name
);
259 path
->name
= strdup(str
+1);
261 if (path
->system
== NULL
|| path
->name
== NULL
) {
262 zfree(&path
->system
);
270 const char *event_type(int type
)
273 case PERF_TYPE_HARDWARE
:
276 case PERF_TYPE_SOFTWARE
:
279 case PERF_TYPE_TRACEPOINT
:
282 case PERF_TYPE_HW_CACHE
:
283 return "hardware-cache";
292 static int parse_events__is_name_term(struct parse_events_term
*term
)
294 return term
->type_term
== PARSE_EVENTS__TERM_TYPE_NAME
;
297 static char *get_config_name(struct list_head
*head_terms
)
299 struct parse_events_term
*term
;
304 list_for_each_entry(term
, head_terms
, list
)
305 if (parse_events__is_name_term(term
))
306 return term
->val
.str
;
311 static struct perf_evsel
*
312 __add_event(struct list_head
*list
, int *idx
,
313 struct perf_event_attr
*attr
,
314 char *name
, struct perf_pmu
*pmu
,
315 struct list_head
*config_terms
, bool auto_merge_stats
)
317 struct perf_evsel
*evsel
;
318 struct cpu_map
*cpus
= pmu
? pmu
->cpus
: NULL
;
320 event_attr_init(attr
);
322 evsel
= perf_evsel__new_idx(attr
, *idx
);
327 evsel
->cpus
= cpu_map__get(cpus
);
328 evsel
->own_cpus
= cpu_map__get(cpus
);
329 evsel
->system_wide
= pmu
? pmu
->is_uncore
: false;
330 evsel
->auto_merge_stats
= auto_merge_stats
;
333 evsel
->name
= strdup(name
);
336 list_splice(config_terms
, &evsel
->config_terms
);
338 list_add_tail(&evsel
->node
, list
);
342 static int add_event(struct list_head
*list
, int *idx
,
343 struct perf_event_attr
*attr
, char *name
,
344 struct list_head
*config_terms
)
346 return __add_event(list
, idx
, attr
, name
, NULL
, config_terms
, false) ? 0 : -ENOMEM
;
349 static int parse_aliases(char *str
, const char *names
[][PERF_EVSEL__MAX_ALIASES
], int size
)
354 for (i
= 0; i
< size
; i
++) {
355 for (j
= 0; j
< PERF_EVSEL__MAX_ALIASES
&& names
[i
][j
]; j
++) {
356 n
= strlen(names
[i
][j
]);
357 if (n
> longest
&& !strncasecmp(str
, names
[i
][j
], n
))
367 typedef int config_term_func_t(struct perf_event_attr
*attr
,
368 struct parse_events_term
*term
,
369 struct parse_events_error
*err
);
370 static int config_term_common(struct perf_event_attr
*attr
,
371 struct parse_events_term
*term
,
372 struct parse_events_error
*err
);
373 static int config_attr(struct perf_event_attr
*attr
,
374 struct list_head
*head
,
375 struct parse_events_error
*err
,
376 config_term_func_t config_term
);
378 int parse_events_add_cache(struct list_head
*list
, int *idx
,
379 char *type
, char *op_result1
, char *op_result2
,
380 struct parse_events_error
*err
,
381 struct list_head
*head_config
)
383 struct perf_event_attr attr
;
384 LIST_HEAD(config_terms
);
385 char name
[MAX_NAME_LEN
], *config_name
;
386 int cache_type
= -1, cache_op
= -1, cache_result
= -1;
387 char *op_result
[2] = { op_result1
, op_result2
};
391 * No fallback - if we cannot get a clear cache type
394 cache_type
= parse_aliases(type
, perf_evsel__hw_cache
,
395 PERF_COUNT_HW_CACHE_MAX
);
396 if (cache_type
== -1)
399 config_name
= get_config_name(head_config
);
400 n
= snprintf(name
, MAX_NAME_LEN
, "%s", type
);
402 for (i
= 0; (i
< 2) && (op_result
[i
]); i
++) {
403 char *str
= op_result
[i
];
405 n
+= snprintf(name
+ n
, MAX_NAME_LEN
- n
, "-%s", str
);
407 if (cache_op
== -1) {
408 cache_op
= parse_aliases(str
, perf_evsel__hw_cache_op
,
409 PERF_COUNT_HW_CACHE_OP_MAX
);
411 if (!perf_evsel__is_cache_op_valid(cache_type
, cache_op
))
417 if (cache_result
== -1) {
418 cache_result
= parse_aliases(str
, perf_evsel__hw_cache_result
,
419 PERF_COUNT_HW_CACHE_RESULT_MAX
);
420 if (cache_result
>= 0)
426 * Fall back to reads:
429 cache_op
= PERF_COUNT_HW_CACHE_OP_READ
;
432 * Fall back to accesses:
434 if (cache_result
== -1)
435 cache_result
= PERF_COUNT_HW_CACHE_RESULT_ACCESS
;
437 memset(&attr
, 0, sizeof(attr
));
438 attr
.config
= cache_type
| (cache_op
<< 8) | (cache_result
<< 16);
439 attr
.type
= PERF_TYPE_HW_CACHE
;
442 if (config_attr(&attr
, head_config
, err
,
446 if (get_config_terms(head_config
, &config_terms
))
449 return add_event(list
, idx
, &attr
, config_name
? : name
, &config_terms
);
452 static void tracepoint_error(struct parse_events_error
*e
, int err
,
453 const char *sys
, const char *name
)
461 * We get error directly from syscall errno ( > 0),
462 * or from encoded pointer's error ( < 0).
468 e
->str
= strdup("can't access trace events");
471 e
->str
= strdup("unknown tracepoint");
474 e
->str
= strdup("failed to add tracepoint");
478 tracing_path__strerror_open_tp(err
, help
, sizeof(help
), sys
, name
);
479 e
->help
= strdup(help
);
482 static int add_tracepoint(struct list_head
*list
, int *idx
,
483 const char *sys_name
, const char *evt_name
,
484 struct parse_events_error
*err
,
485 struct list_head
*head_config
)
487 struct perf_evsel
*evsel
;
489 evsel
= perf_evsel__newtp_idx(sys_name
, evt_name
, (*idx
)++);
491 tracepoint_error(err
, PTR_ERR(evsel
), sys_name
, evt_name
);
492 return PTR_ERR(evsel
);
496 LIST_HEAD(config_terms
);
498 if (get_config_terms(head_config
, &config_terms
))
500 list_splice(&config_terms
, &evsel
->config_terms
);
503 list_add_tail(&evsel
->node
, list
);
507 static int add_tracepoint_multi_event(struct list_head
*list
, int *idx
,
508 const char *sys_name
, const char *evt_name
,
509 struct parse_events_error
*err
,
510 struct list_head
*head_config
)
512 char evt_path
[MAXPATHLEN
];
513 struct dirent
*evt_ent
;
515 int ret
= 0, found
= 0;
517 snprintf(evt_path
, MAXPATHLEN
, "%s/%s", tracing_events_path
, sys_name
);
518 evt_dir
= opendir(evt_path
);
520 tracepoint_error(err
, errno
, sys_name
, evt_name
);
524 while (!ret
&& (evt_ent
= readdir(evt_dir
))) {
525 if (!strcmp(evt_ent
->d_name
, ".")
526 || !strcmp(evt_ent
->d_name
, "..")
527 || !strcmp(evt_ent
->d_name
, "enable")
528 || !strcmp(evt_ent
->d_name
, "filter"))
531 if (!strglobmatch(evt_ent
->d_name
, evt_name
))
536 ret
= add_tracepoint(list
, idx
, sys_name
, evt_ent
->d_name
,
541 tracepoint_error(err
, ENOENT
, sys_name
, evt_name
);
549 static int add_tracepoint_event(struct list_head
*list
, int *idx
,
550 const char *sys_name
, const char *evt_name
,
551 struct parse_events_error
*err
,
552 struct list_head
*head_config
)
554 return strpbrk(evt_name
, "*?") ?
555 add_tracepoint_multi_event(list
, idx
, sys_name
, evt_name
,
557 add_tracepoint(list
, idx
, sys_name
, evt_name
,
561 static int add_tracepoint_multi_sys(struct list_head
*list
, int *idx
,
562 const char *sys_name
, const char *evt_name
,
563 struct parse_events_error
*err
,
564 struct list_head
*head_config
)
566 struct dirent
*events_ent
;
570 events_dir
= opendir(tracing_events_path
);
572 tracepoint_error(err
, errno
, sys_name
, evt_name
);
576 while (!ret
&& (events_ent
= readdir(events_dir
))) {
577 if (!strcmp(events_ent
->d_name
, ".")
578 || !strcmp(events_ent
->d_name
, "..")
579 || !strcmp(events_ent
->d_name
, "enable")
580 || !strcmp(events_ent
->d_name
, "header_event")
581 || !strcmp(events_ent
->d_name
, "header_page"))
584 if (!strglobmatch(events_ent
->d_name
, sys_name
))
587 ret
= add_tracepoint_event(list
, idx
, events_ent
->d_name
,
588 evt_name
, err
, head_config
);
591 closedir(events_dir
);
595 struct __add_bpf_event_param
{
596 struct parse_events_state
*parse_state
;
597 struct list_head
*list
;
598 struct list_head
*head_config
;
601 static int add_bpf_event(const char *group
, const char *event
, int fd
,
604 LIST_HEAD(new_evsels
);
605 struct __add_bpf_event_param
*param
= _param
;
606 struct parse_events_state
*parse_state
= param
->parse_state
;
607 struct list_head
*list
= param
->list
;
608 struct perf_evsel
*pos
;
611 pr_debug("add bpf event %s:%s and attach bpf program %d\n",
614 err
= parse_events_add_tracepoint(&new_evsels
, &parse_state
->idx
, group
,
615 event
, parse_state
->error
,
618 struct perf_evsel
*evsel
, *tmp
;
620 pr_debug("Failed to add BPF event %s:%s\n",
622 list_for_each_entry_safe(evsel
, tmp
, &new_evsels
, node
) {
623 list_del(&evsel
->node
);
624 perf_evsel__delete(evsel
);
628 pr_debug("adding %s:%s\n", group
, event
);
630 list_for_each_entry(pos
, &new_evsels
, node
) {
631 pr_debug("adding %s:%s to %p\n",
635 list_splice(&new_evsels
, list
);
639 int parse_events_load_bpf_obj(struct parse_events_state
*parse_state
,
640 struct list_head
*list
,
641 struct bpf_object
*obj
,
642 struct list_head
*head_config
)
646 struct __add_bpf_event_param param
= {parse_state
, list
, head_config
};
647 static bool registered_unprobe_atexit
= false;
649 if (IS_ERR(obj
) || !obj
) {
650 snprintf(errbuf
, sizeof(errbuf
),
651 "Internal error: load bpf obj with NULL");
657 * Register atexit handler before calling bpf__probe() so
658 * bpf__probe() don't need to unprobe probe points its already
659 * created when failure.
661 if (!registered_unprobe_atexit
) {
663 registered_unprobe_atexit
= true;
666 err
= bpf__probe(obj
);
668 bpf__strerror_probe(obj
, err
, errbuf
, sizeof(errbuf
));
672 err
= bpf__load(obj
);
674 bpf__strerror_load(obj
, err
, errbuf
, sizeof(errbuf
));
678 err
= bpf__foreach_event(obj
, add_bpf_event
, ¶m
);
680 snprintf(errbuf
, sizeof(errbuf
),
681 "Attach events in BPF object failed");
687 parse_state
->error
->help
= strdup("(add -v to see detail)");
688 parse_state
->error
->str
= strdup(errbuf
);
693 parse_events_config_bpf(struct parse_events_state
*parse_state
,
694 struct bpf_object
*obj
,
695 struct list_head
*head_config
)
697 struct parse_events_term
*term
;
700 if (!head_config
|| list_empty(head_config
))
703 list_for_each_entry(term
, head_config
, list
) {
707 if (term
->type_term
!= PARSE_EVENTS__TERM_TYPE_USER
) {
708 snprintf(errbuf
, sizeof(errbuf
),
709 "Invalid config term for BPF object");
710 errbuf
[BUFSIZ
- 1] = '\0';
712 parse_state
->error
->idx
= term
->err_term
;
713 parse_state
->error
->str
= strdup(errbuf
);
717 err
= bpf__config_obj(obj
, term
, parse_state
->evlist
, &error_pos
);
719 bpf__strerror_config_obj(obj
, term
, parse_state
->evlist
,
720 &error_pos
, err
, errbuf
,
722 parse_state
->error
->help
= strdup(
723 "Hint:\tValid config terms:\n"
724 " \tmap:[<arraymap>].value<indices>=[value]\n"
725 " \tmap:[<eventmap>].event<indices>=[event]\n"
727 " \twhere <indices> is something like [0,3...5] or [all]\n"
728 " \t(add -v to see detail)");
729 parse_state
->error
->str
= strdup(errbuf
);
730 if (err
== -BPF_LOADER_ERRNO__OBJCONF_MAP_VALUE
)
731 parse_state
->error
->idx
= term
->err_val
;
733 parse_state
->error
->idx
= term
->err_term
+ error_pos
;
741 * Split config terms:
742 * perf record -e bpf.c/call-graph=fp,map:array.value[0]=1/ ...
743 * 'call-graph=fp' is 'evt config', should be applied to each
745 * 'map:array.value[0]=1' is 'obj config', should be processed
746 * with parse_events_config_bpf.
748 * Move object config terms from the first list to obj_head_config.
751 split_bpf_config_terms(struct list_head
*evt_head_config
,
752 struct list_head
*obj_head_config
)
754 struct parse_events_term
*term
, *temp
;
757 * Currectly, all possible user config term
758 * belong to bpf object. parse_events__is_hardcoded_term()
759 * happends to be a good flag.
761 * See parse_events_config_bpf() and
762 * config_term_tracepoint().
764 list_for_each_entry_safe(term
, temp
, evt_head_config
, list
)
765 if (!parse_events__is_hardcoded_term(term
))
766 list_move_tail(&term
->list
, obj_head_config
);
769 int parse_events_load_bpf(struct parse_events_state
*parse_state
,
770 struct list_head
*list
,
773 struct list_head
*head_config
)
776 struct bpf_object
*obj
;
777 LIST_HEAD(obj_head_config
);
780 split_bpf_config_terms(head_config
, &obj_head_config
);
782 obj
= bpf__prepare_load(bpf_file_name
, source
);
789 snprintf(errbuf
, sizeof(errbuf
),
790 "BPF support is not compiled");
792 bpf__strerror_prepare_load(bpf_file_name
,
797 parse_state
->error
->help
= strdup("(add -v to see detail)");
798 parse_state
->error
->str
= strdup(errbuf
);
802 err
= parse_events_load_bpf_obj(parse_state
, list
, obj
, head_config
);
805 err
= parse_events_config_bpf(parse_state
, obj
, &obj_head_config
);
808 * Caller doesn't know anything about obj_head_config,
809 * so combine them together again before returnning.
812 list_splice_tail(&obj_head_config
, head_config
);
817 parse_breakpoint_type(const char *type
, struct perf_event_attr
*attr
)
821 for (i
= 0; i
< 3; i
++) {
822 if (!type
|| !type
[i
])
825 #define CHECK_SET_TYPE(bit) \
827 if (attr->bp_type & bit) \
830 attr->bp_type |= bit; \
835 CHECK_SET_TYPE(HW_BREAKPOINT_R
);
838 CHECK_SET_TYPE(HW_BREAKPOINT_W
);
841 CHECK_SET_TYPE(HW_BREAKPOINT_X
);
848 #undef CHECK_SET_TYPE
850 if (!attr
->bp_type
) /* Default */
851 attr
->bp_type
= HW_BREAKPOINT_R
| HW_BREAKPOINT_W
;
856 int parse_events_add_breakpoint(struct list_head
*list
, int *idx
,
857 void *ptr
, char *type
, u64 len
)
859 struct perf_event_attr attr
;
861 memset(&attr
, 0, sizeof(attr
));
862 attr
.bp_addr
= (unsigned long) ptr
;
864 if (parse_breakpoint_type(type
, &attr
))
867 /* Provide some defaults if len is not specified */
869 if (attr
.bp_type
== HW_BREAKPOINT_X
)
872 len
= HW_BREAKPOINT_LEN_4
;
877 attr
.type
= PERF_TYPE_BREAKPOINT
;
878 attr
.sample_period
= 1;
880 return add_event(list
, idx
, &attr
, NULL
, NULL
);
883 static int check_type_val(struct parse_events_term
*term
,
884 struct parse_events_error
*err
,
887 if (type
== term
->type_val
)
891 err
->idx
= term
->err_val
;
892 if (type
== PARSE_EVENTS__TERM_TYPE_NUM
)
893 err
->str
= strdup("expected numeric value");
895 err
->str
= strdup("expected string value");
901 * Update according to parse-events.l
903 static const char *config_term_names
[__PARSE_EVENTS__TERM_TYPE_NR
] = {
904 [PARSE_EVENTS__TERM_TYPE_USER
] = "<sysfs term>",
905 [PARSE_EVENTS__TERM_TYPE_CONFIG
] = "config",
906 [PARSE_EVENTS__TERM_TYPE_CONFIG1
] = "config1",
907 [PARSE_EVENTS__TERM_TYPE_CONFIG2
] = "config2",
908 [PARSE_EVENTS__TERM_TYPE_NAME
] = "name",
909 [PARSE_EVENTS__TERM_TYPE_SAMPLE_PERIOD
] = "period",
910 [PARSE_EVENTS__TERM_TYPE_SAMPLE_FREQ
] = "freq",
911 [PARSE_EVENTS__TERM_TYPE_BRANCH_SAMPLE_TYPE
] = "branch_type",
912 [PARSE_EVENTS__TERM_TYPE_TIME
] = "time",
913 [PARSE_EVENTS__TERM_TYPE_CALLGRAPH
] = "call-graph",
914 [PARSE_EVENTS__TERM_TYPE_STACKSIZE
] = "stack-size",
915 [PARSE_EVENTS__TERM_TYPE_NOINHERIT
] = "no-inherit",
916 [PARSE_EVENTS__TERM_TYPE_INHERIT
] = "inherit",
917 [PARSE_EVENTS__TERM_TYPE_MAX_STACK
] = "max-stack",
918 [PARSE_EVENTS__TERM_TYPE_OVERWRITE
] = "overwrite",
919 [PARSE_EVENTS__TERM_TYPE_NOOVERWRITE
] = "no-overwrite",
920 [PARSE_EVENTS__TERM_TYPE_DRV_CFG
] = "driver-config",
923 static bool config_term_shrinked
;
926 config_term_avail(int term_type
, struct parse_events_error
*err
)
928 if (term_type
< 0 || term_type
>= __PARSE_EVENTS__TERM_TYPE_NR
) {
929 err
->str
= strdup("Invalid term_type");
932 if (!config_term_shrinked
)
936 case PARSE_EVENTS__TERM_TYPE_CONFIG
:
937 case PARSE_EVENTS__TERM_TYPE_CONFIG1
:
938 case PARSE_EVENTS__TERM_TYPE_CONFIG2
:
939 case PARSE_EVENTS__TERM_TYPE_NAME
:
940 case PARSE_EVENTS__TERM_TYPE_SAMPLE_PERIOD
:
946 /* term_type is validated so indexing is safe */
947 if (asprintf(&err
->str
, "'%s' is not usable in 'perf stat'",
948 config_term_names
[term_type
]) < 0)
954 void parse_events__shrink_config_terms(void)
956 config_term_shrinked
= true;
959 static int config_term_common(struct perf_event_attr
*attr
,
960 struct parse_events_term
*term
,
961 struct parse_events_error
*err
)
963 #define CHECK_TYPE_VAL(type) \
965 if (check_type_val(term, err, PARSE_EVENTS__TERM_TYPE_ ## type)) \
969 switch (term
->type_term
) {
970 case PARSE_EVENTS__TERM_TYPE_CONFIG
:
972 attr
->config
= term
->val
.num
;
974 case PARSE_EVENTS__TERM_TYPE_CONFIG1
:
976 attr
->config1
= term
->val
.num
;
978 case PARSE_EVENTS__TERM_TYPE_CONFIG2
:
980 attr
->config2
= term
->val
.num
;
982 case PARSE_EVENTS__TERM_TYPE_SAMPLE_PERIOD
:
985 case PARSE_EVENTS__TERM_TYPE_SAMPLE_FREQ
:
988 case PARSE_EVENTS__TERM_TYPE_BRANCH_SAMPLE_TYPE
:
990 if (strcmp(term
->val
.str
, "no") &&
991 parse_branch_str(term
->val
.str
, &attr
->branch_sample_type
)) {
992 err
->str
= strdup("invalid branch sample type");
993 err
->idx
= term
->err_val
;
997 case PARSE_EVENTS__TERM_TYPE_TIME
:
999 if (term
->val
.num
> 1) {
1000 err
->str
= strdup("expected 0 or 1");
1001 err
->idx
= term
->err_val
;
1005 case PARSE_EVENTS__TERM_TYPE_CALLGRAPH
:
1006 CHECK_TYPE_VAL(STR
);
1008 case PARSE_EVENTS__TERM_TYPE_STACKSIZE
:
1009 CHECK_TYPE_VAL(NUM
);
1011 case PARSE_EVENTS__TERM_TYPE_INHERIT
:
1012 CHECK_TYPE_VAL(NUM
);
1014 case PARSE_EVENTS__TERM_TYPE_NOINHERIT
:
1015 CHECK_TYPE_VAL(NUM
);
1017 case PARSE_EVENTS__TERM_TYPE_OVERWRITE
:
1018 CHECK_TYPE_VAL(NUM
);
1020 case PARSE_EVENTS__TERM_TYPE_NOOVERWRITE
:
1021 CHECK_TYPE_VAL(NUM
);
1023 case PARSE_EVENTS__TERM_TYPE_NAME
:
1024 CHECK_TYPE_VAL(STR
);
1026 case PARSE_EVENTS__TERM_TYPE_MAX_STACK
:
1027 CHECK_TYPE_VAL(NUM
);
1030 err
->str
= strdup("unknown term");
1031 err
->idx
= term
->err_term
;
1032 err
->help
= parse_events_formats_error_string(NULL
);
1037 * Check term availbility after basic checking so
1038 * PARSE_EVENTS__TERM_TYPE_USER can be found and filtered.
1040 * If check availbility at the entry of this function,
1041 * user will see "'<sysfs term>' is not usable in 'perf stat'"
1042 * if an invalid config term is provided for legacy events
1043 * (for example, instructions/badterm/...), which is confusing.
1045 if (!config_term_avail(term
->type_term
, err
))
1048 #undef CHECK_TYPE_VAL
1051 static int config_term_pmu(struct perf_event_attr
*attr
,
1052 struct parse_events_term
*term
,
1053 struct parse_events_error
*err
)
1055 if (term
->type_term
== PARSE_EVENTS__TERM_TYPE_USER
||
1056 term
->type_term
== PARSE_EVENTS__TERM_TYPE_DRV_CFG
)
1058 * Always succeed for sysfs terms, as we dont know
1059 * at this point what type they need to have.
1063 return config_term_common(attr
, term
, err
);
1066 static int config_term_tracepoint(struct perf_event_attr
*attr
,
1067 struct parse_events_term
*term
,
1068 struct parse_events_error
*err
)
1070 switch (term
->type_term
) {
1071 case PARSE_EVENTS__TERM_TYPE_CALLGRAPH
:
1072 case PARSE_EVENTS__TERM_TYPE_STACKSIZE
:
1073 case PARSE_EVENTS__TERM_TYPE_INHERIT
:
1074 case PARSE_EVENTS__TERM_TYPE_NOINHERIT
:
1075 case PARSE_EVENTS__TERM_TYPE_MAX_STACK
:
1076 case PARSE_EVENTS__TERM_TYPE_OVERWRITE
:
1077 case PARSE_EVENTS__TERM_TYPE_NOOVERWRITE
:
1078 return config_term_common(attr
, term
, err
);
1081 err
->idx
= term
->err_term
;
1082 err
->str
= strdup("unknown term");
1083 err
->help
= strdup("valid terms: call-graph,stack-size\n");
1091 static int config_attr(struct perf_event_attr
*attr
,
1092 struct list_head
*head
,
1093 struct parse_events_error
*err
,
1094 config_term_func_t config_term
)
1096 struct parse_events_term
*term
;
1098 list_for_each_entry(term
, head
, list
)
1099 if (config_term(attr
, term
, err
))
1105 static int get_config_terms(struct list_head
*head_config
,
1106 struct list_head
*head_terms __maybe_unused
)
1108 #define ADD_CONFIG_TERM(__type, __name, __val) \
1110 struct perf_evsel_config_term *__t; \
1112 __t = zalloc(sizeof(*__t)); \
1116 INIT_LIST_HEAD(&__t->list); \
1117 __t->type = PERF_EVSEL__CONFIG_TERM_ ## __type; \
1118 __t->val.__name = __val; \
1119 list_add_tail(&__t->list, head_terms); \
1122 struct parse_events_term
*term
;
1124 list_for_each_entry(term
, head_config
, list
) {
1125 switch (term
->type_term
) {
1126 case PARSE_EVENTS__TERM_TYPE_SAMPLE_PERIOD
:
1127 ADD_CONFIG_TERM(PERIOD
, period
, term
->val
.num
);
1129 case PARSE_EVENTS__TERM_TYPE_SAMPLE_FREQ
:
1130 ADD_CONFIG_TERM(FREQ
, freq
, term
->val
.num
);
1132 case PARSE_EVENTS__TERM_TYPE_TIME
:
1133 ADD_CONFIG_TERM(TIME
, time
, term
->val
.num
);
1135 case PARSE_EVENTS__TERM_TYPE_CALLGRAPH
:
1136 ADD_CONFIG_TERM(CALLGRAPH
, callgraph
, term
->val
.str
);
1138 case PARSE_EVENTS__TERM_TYPE_BRANCH_SAMPLE_TYPE
:
1139 ADD_CONFIG_TERM(BRANCH
, branch
, term
->val
.str
);
1141 case PARSE_EVENTS__TERM_TYPE_STACKSIZE
:
1142 ADD_CONFIG_TERM(STACK_USER
, stack_user
, term
->val
.num
);
1144 case PARSE_EVENTS__TERM_TYPE_INHERIT
:
1145 ADD_CONFIG_TERM(INHERIT
, inherit
, term
->val
.num
? 1 : 0);
1147 case PARSE_EVENTS__TERM_TYPE_NOINHERIT
:
1148 ADD_CONFIG_TERM(INHERIT
, inherit
, term
->val
.num
? 0 : 1);
1150 case PARSE_EVENTS__TERM_TYPE_MAX_STACK
:
1151 ADD_CONFIG_TERM(MAX_STACK
, max_stack
, term
->val
.num
);
1153 case PARSE_EVENTS__TERM_TYPE_OVERWRITE
:
1154 ADD_CONFIG_TERM(OVERWRITE
, overwrite
, term
->val
.num
? 1 : 0);
1156 case PARSE_EVENTS__TERM_TYPE_NOOVERWRITE
:
1157 ADD_CONFIG_TERM(OVERWRITE
, overwrite
, term
->val
.num
? 0 : 1);
1159 case PARSE_EVENTS__TERM_TYPE_DRV_CFG
:
1160 ADD_CONFIG_TERM(DRV_CFG
, drv_cfg
, term
->val
.str
);
1166 #undef ADD_EVSEL_CONFIG
1170 int parse_events_add_tracepoint(struct list_head
*list
, int *idx
,
1171 const char *sys
, const char *event
,
1172 struct parse_events_error
*err
,
1173 struct list_head
*head_config
)
1176 struct perf_event_attr attr
;
1178 if (config_attr(&attr
, head_config
, err
,
1179 config_term_tracepoint
))
1183 if (strpbrk(sys
, "*?"))
1184 return add_tracepoint_multi_sys(list
, idx
, sys
, event
,
1187 return add_tracepoint_event(list
, idx
, sys
, event
,
1191 int parse_events_add_numeric(struct parse_events_state
*parse_state
,
1192 struct list_head
*list
,
1193 u32 type
, u64 config
,
1194 struct list_head
*head_config
)
1196 struct perf_event_attr attr
;
1197 LIST_HEAD(config_terms
);
1199 memset(&attr
, 0, sizeof(attr
));
1201 attr
.config
= config
;
1204 if (config_attr(&attr
, head_config
, parse_state
->error
,
1205 config_term_common
))
1208 if (get_config_terms(head_config
, &config_terms
))
1212 return add_event(list
, &parse_state
->idx
, &attr
,
1213 get_config_name(head_config
), &config_terms
);
1216 static int __parse_events_add_pmu(struct parse_events_state
*parse_state
,
1217 struct list_head
*list
, char *name
,
1218 struct list_head
*head_config
, bool auto_merge_stats
)
1220 struct perf_event_attr attr
;
1221 struct perf_pmu_info info
;
1222 struct perf_pmu
*pmu
;
1223 struct perf_evsel
*evsel
;
1224 struct parse_events_error
*err
= parse_state
->error
;
1225 LIST_HEAD(config_terms
);
1227 pmu
= perf_pmu__find(name
);
1229 if (asprintf(&err
->str
,
1230 "Cannot find PMU `%s'. Missing kernel support?",
1236 if (pmu
->default_config
) {
1237 memcpy(&attr
, pmu
->default_config
,
1238 sizeof(struct perf_event_attr
));
1240 memset(&attr
, 0, sizeof(attr
));
1244 attr
.type
= pmu
->type
;
1245 evsel
= __add_event(list
, &parse_state
->idx
, &attr
, NULL
, pmu
, NULL
, auto_merge_stats
);
1246 return evsel
? 0 : -ENOMEM
;
1249 if (perf_pmu__check_alias(pmu
, head_config
, &info
))
1253 * Configure hardcoded terms first, no need to check
1254 * return value when called with fail == 0 ;)
1256 if (config_attr(&attr
, head_config
, parse_state
->error
, config_term_pmu
))
1259 if (get_config_terms(head_config
, &config_terms
))
1262 if (perf_pmu__config(pmu
, &attr
, head_config
, parse_state
->error
))
1265 evsel
= __add_event(list
, &parse_state
->idx
, &attr
,
1266 get_config_name(head_config
), pmu
,
1267 &config_terms
, auto_merge_stats
);
1269 evsel
->unit
= info
.unit
;
1270 evsel
->scale
= info
.scale
;
1271 evsel
->per_pkg
= info
.per_pkg
;
1272 evsel
->snapshot
= info
.snapshot
;
1273 evsel
->metric_expr
= info
.metric_expr
;
1274 evsel
->metric_name
= info
.metric_name
;
1277 return evsel
? 0 : -ENOMEM
;
1280 int parse_events_add_pmu(struct parse_events_state
*parse_state
,
1281 struct list_head
*list
, char *name
,
1282 struct list_head
*head_config
)
1284 return __parse_events_add_pmu(parse_state
, list
, name
, head_config
, false);
1287 int parse_events_multi_pmu_add(struct parse_events_state
*parse_state
,
1288 char *str
, struct list_head
**listp
)
1290 struct list_head
*head
;
1291 struct parse_events_term
*term
;
1292 struct list_head
*list
;
1293 struct perf_pmu
*pmu
= NULL
;
1297 /* Add it for all PMUs that support the alias */
1298 list
= malloc(sizeof(struct list_head
));
1301 INIT_LIST_HEAD(list
);
1302 while ((pmu
= perf_pmu__scan(pmu
)) != NULL
) {
1303 struct perf_pmu_alias
*alias
;
1305 list_for_each_entry(alias
, &pmu
->aliases
, list
) {
1306 if (!strcasecmp(alias
->name
, str
)) {
1307 head
= malloc(sizeof(struct list_head
));
1310 INIT_LIST_HEAD(head
);
1311 if (parse_events_term__num(&term
, PARSE_EVENTS__TERM_TYPE_USER
,
1312 str
, 1, false, &str
, NULL
) < 0)
1314 list_add_tail(&term
->list
, head
);
1316 if (!__parse_events_add_pmu(parse_state
, list
,
1317 pmu
->name
, head
, true)) {
1318 pr_debug("%s -> %s/%s/\n", str
,
1319 pmu
->name
, alias
->str
);
1323 parse_events_terms__delete(head
);
1333 int parse_events__modifier_group(struct list_head
*list
,
1336 return parse_events__modifier_event(list
, event_mod
, true);
1339 void parse_events__set_leader(char *name
, struct list_head
*list
)
1341 struct perf_evsel
*leader
;
1343 if (list_empty(list
)) {
1344 WARN_ONCE(true, "WARNING: failed to set leader: empty list");
1348 __perf_evlist__set_leader(list
);
1349 leader
= list_entry(list
->next
, struct perf_evsel
, node
);
1350 leader
->group_name
= name
? strdup(name
) : NULL
;
1353 /* list_event is assumed to point to malloc'ed memory */
1354 void parse_events_update_lists(struct list_head
*list_event
,
1355 struct list_head
*list_all
)
1358 * Called for single event definition. Update the
1359 * 'all event' list, and reinit the 'single event'
1360 * list, for next event definition.
1362 list_splice_tail(list_event
, list_all
);
1366 struct event_modifier
{
1381 static int get_event_modifier(struct event_modifier
*mod
, char *str
,
1382 struct perf_evsel
*evsel
)
1384 int eu
= evsel
? evsel
->attr
.exclude_user
: 0;
1385 int ek
= evsel
? evsel
->attr
.exclude_kernel
: 0;
1386 int eh
= evsel
? evsel
->attr
.exclude_hv
: 0;
1387 int eH
= evsel
? evsel
->attr
.exclude_host
: 0;
1388 int eG
= evsel
? evsel
->attr
.exclude_guest
: 0;
1389 int eI
= evsel
? evsel
->attr
.exclude_idle
: 0;
1390 int precise
= evsel
? evsel
->attr
.precise_ip
: 0;
1391 int precise_max
= 0;
1392 int sample_read
= 0;
1393 int pinned
= evsel
? evsel
->attr
.pinned
: 0;
1395 int exclude
= eu
| ek
| eh
;
1396 int exclude_GH
= evsel
? evsel
->exclude_GH
: 0;
1399 memset(mod
, 0, sizeof(*mod
));
1404 exclude
= eu
= ek
= eh
= 1;
1406 } else if (*str
== 'k') {
1408 exclude
= eu
= ek
= eh
= 1;
1410 } else if (*str
== 'h') {
1412 exclude
= eu
= ek
= eh
= 1;
1414 } else if (*str
== 'G') {
1416 exclude_GH
= eG
= eH
= 1;
1418 } else if (*str
== 'H') {
1420 exclude_GH
= eG
= eH
= 1;
1422 } else if (*str
== 'I') {
1424 } else if (*str
== 'p') {
1426 /* use of precise requires exclude_guest */
1429 } else if (*str
== 'P') {
1431 } else if (*str
== 'S') {
1433 } else if (*str
== 'D') {
1435 } else if (*str
== 'W') {
1446 * 0 - SAMPLE_IP can have arbitrary skid
1447 * 1 - SAMPLE_IP must have constant skid
1448 * 2 - SAMPLE_IP requested to have 0 skid
1449 * 3 - SAMPLE_IP must have 0 skid
1451 * See also PERF_RECORD_MISC_EXACT_IP
1462 mod
->precise
= precise
;
1463 mod
->precise_max
= precise_max
;
1464 mod
->exclude_GH
= exclude_GH
;
1465 mod
->sample_read
= sample_read
;
1466 mod
->pinned
= pinned
;
1473 * Basic modifier sanity check to validate it contains only one
1474 * instance of any modifier (apart from 'p') present.
1476 static int check_modifier(char *str
)
1480 /* The sizeof includes 0 byte as well. */
1481 if (strlen(str
) > (sizeof("ukhGHpppPSDIW") - 1))
1485 if (*p
!= 'p' && strchr(p
+ 1, *p
))
1493 int parse_events__modifier_event(struct list_head
*list
, char *str
, bool add
)
1495 struct perf_evsel
*evsel
;
1496 struct event_modifier mod
;
1501 if (check_modifier(str
))
1504 if (!add
&& get_event_modifier(&mod
, str
, NULL
))
1507 __evlist__for_each_entry(list
, evsel
) {
1508 if (add
&& get_event_modifier(&mod
, str
, evsel
))
1511 evsel
->attr
.exclude_user
= mod
.eu
;
1512 evsel
->attr
.exclude_kernel
= mod
.ek
;
1513 evsel
->attr
.exclude_hv
= mod
.eh
;
1514 evsel
->attr
.precise_ip
= mod
.precise
;
1515 evsel
->attr
.exclude_host
= mod
.eH
;
1516 evsel
->attr
.exclude_guest
= mod
.eG
;
1517 evsel
->attr
.exclude_idle
= mod
.eI
;
1518 evsel
->exclude_GH
= mod
.exclude_GH
;
1519 evsel
->sample_read
= mod
.sample_read
;
1520 evsel
->precise_max
= mod
.precise_max
;
1521 evsel
->weak_group
= mod
.weak
;
1523 if (perf_evsel__is_group_leader(evsel
))
1524 evsel
->attr
.pinned
= mod
.pinned
;
1530 int parse_events_name(struct list_head
*list
, char *name
)
1532 struct perf_evsel
*evsel
;
1534 __evlist__for_each_entry(list
, evsel
) {
1536 evsel
->name
= strdup(name
);
1543 comp_pmu(const void *p1
, const void *p2
)
1545 struct perf_pmu_event_symbol
*pmu1
= (struct perf_pmu_event_symbol
*) p1
;
1546 struct perf_pmu_event_symbol
*pmu2
= (struct perf_pmu_event_symbol
*) p2
;
1548 return strcasecmp(pmu1
->symbol
, pmu2
->symbol
);
1551 static void perf_pmu__parse_cleanup(void)
1553 if (perf_pmu_events_list_num
> 0) {
1554 struct perf_pmu_event_symbol
*p
;
1557 for (i
= 0; i
< perf_pmu_events_list_num
; i
++) {
1558 p
= perf_pmu_events_list
+ i
;
1561 zfree(&perf_pmu_events_list
);
1562 perf_pmu_events_list_num
= 0;
1566 #define SET_SYMBOL(str, stype) \
1575 * Read the pmu events list from sysfs
1576 * Save it into perf_pmu_events_list
1578 static void perf_pmu__parse_init(void)
1581 struct perf_pmu
*pmu
= NULL
;
1582 struct perf_pmu_alias
*alias
;
1586 while ((pmu
= perf_pmu__scan(pmu
)) != NULL
) {
1587 list_for_each_entry(alias
, &pmu
->aliases
, list
) {
1588 if (strchr(alias
->name
, '-'))
1595 perf_pmu_events_list_num
= -1;
1598 perf_pmu_events_list
= malloc(sizeof(struct perf_pmu_event_symbol
) * len
);
1599 if (!perf_pmu_events_list
)
1601 perf_pmu_events_list_num
= len
;
1605 while ((pmu
= perf_pmu__scan(pmu
)) != NULL
) {
1606 list_for_each_entry(alias
, &pmu
->aliases
, list
) {
1607 struct perf_pmu_event_symbol
*p
= perf_pmu_events_list
+ len
;
1608 char *tmp
= strchr(alias
->name
, '-');
1611 SET_SYMBOL(strndup(alias
->name
, tmp
- alias
->name
),
1612 PMU_EVENT_SYMBOL_PREFIX
);
1614 SET_SYMBOL(strdup(++tmp
), PMU_EVENT_SYMBOL_SUFFIX
);
1617 SET_SYMBOL(strdup(alias
->name
), PMU_EVENT_SYMBOL
);
1622 qsort(perf_pmu_events_list
, len
,
1623 sizeof(struct perf_pmu_event_symbol
), comp_pmu
);
1627 perf_pmu__parse_cleanup();
1630 enum perf_pmu_event_symbol_type
1631 perf_pmu__parse_check(const char *name
)
1633 struct perf_pmu_event_symbol p
, *r
;
1635 /* scan kernel pmu events from sysfs if needed */
1636 if (perf_pmu_events_list_num
== 0)
1637 perf_pmu__parse_init();
1639 * name "cpu" could be prefix of cpu-cycles or cpu// events.
1640 * cpu-cycles has been handled by hardcode.
1641 * So it must be cpu// events, not kernel pmu event.
1643 if ((perf_pmu_events_list_num
<= 0) || !strcmp(name
, "cpu"))
1644 return PMU_EVENT_SYMBOL_ERR
;
1646 p
.symbol
= strdup(name
);
1647 r
= bsearch(&p
, perf_pmu_events_list
,
1648 (size_t) perf_pmu_events_list_num
,
1649 sizeof(struct perf_pmu_event_symbol
), comp_pmu
);
1651 return r
? r
->type
: PMU_EVENT_SYMBOL_ERR
;
1654 static int parse_events__scanner(const char *str
, void *parse_state
, int start_token
)
1656 YY_BUFFER_STATE buffer
;
1660 ret
= parse_events_lex_init_extra(start_token
, &scanner
);
1664 buffer
= parse_events__scan_string(str
, scanner
);
1667 parse_events_debug
= 1;
1669 ret
= parse_events_parse(parse_state
, scanner
);
1671 parse_events__flush_buffer(buffer
, scanner
);
1672 parse_events__delete_buffer(buffer
, scanner
);
1673 parse_events_lex_destroy(scanner
);
1678 * parse event config string, return a list of event terms.
1680 int parse_events_terms(struct list_head
*terms
, const char *str
)
1682 struct parse_events_state parse_state
= {
1687 ret
= parse_events__scanner(str
, &parse_state
, PE_START_TERMS
);
1689 list_splice(parse_state
.terms
, terms
);
1690 zfree(&parse_state
.terms
);
1694 parse_events_terms__delete(parse_state
.terms
);
1698 int parse_events(struct perf_evlist
*evlist
, const char *str
,
1699 struct parse_events_error
*err
)
1701 struct parse_events_state parse_state
= {
1702 .list
= LIST_HEAD_INIT(parse_state
.list
),
1703 .idx
= evlist
->nr_entries
,
1709 ret
= parse_events__scanner(str
, &parse_state
, PE_START_EVENTS
);
1710 perf_pmu__parse_cleanup();
1712 struct perf_evsel
*last
;
1714 if (list_empty(&parse_state
.list
)) {
1715 WARN_ONCE(true, "WARNING: event parser found nothing");
1719 perf_evlist__splice_list_tail(evlist
, &parse_state
.list
);
1720 evlist
->nr_groups
+= parse_state
.nr_groups
;
1721 last
= perf_evlist__last(evlist
);
1722 last
->cmdline_group_boundary
= true;
1728 * There are 2 users - builtin-record and builtin-test objects.
1729 * Both call perf_evlist__delete in case of error, so we dont
1735 #define MAX_WIDTH 1000
1736 static int get_term_width(void)
1740 get_term_dimensions(&ws
);
1741 return ws
.ws_col
> MAX_WIDTH
? MAX_WIDTH
: ws
.ws_col
;
1744 void parse_events_print_error(struct parse_events_error
*err
,
1747 const char *str
= "invalid or unsupported event: ";
1748 char _buf
[MAX_WIDTH
];
1749 char *buf
= (char *) event
;
1753 /* -2 for extra '' in the final fprintf */
1754 int width
= get_term_width() - 2;
1755 int len_event
= strlen(event
);
1756 int len_str
, max_len
, cut
= 0;
1759 * Maximum error index indent, we will cut
1760 * the event string if it's bigger.
1762 int max_err_idx
= 13;
1765 * Let's be specific with the message when
1766 * we have the precise error.
1768 str
= "event syntax error: ";
1769 len_str
= strlen(str
);
1770 max_len
= width
- len_str
;
1774 /* We're cutting from the beginning. */
1775 if (err
->idx
> max_err_idx
)
1776 cut
= err
->idx
- max_err_idx
;
1778 strncpy(buf
, event
+ cut
, max_len
);
1780 /* Mark cut parts with '..' on both sides. */
1782 buf
[0] = buf
[1] = '.';
1784 if ((len_event
- cut
) > max_len
) {
1785 buf
[max_len
- 1] = buf
[max_len
- 2] = '.';
1789 idx
= len_str
+ err
->idx
- cut
;
1792 fprintf(stderr
, "%s'%s'\n", str
, buf
);
1794 fprintf(stderr
, "%*s\\___ %s\n", idx
+ 1, "", err
->str
);
1796 fprintf(stderr
, "\n%s\n", err
->help
);
1804 int parse_events_option(const struct option
*opt
, const char *str
,
1805 int unset __maybe_unused
)
1807 struct perf_evlist
*evlist
= *(struct perf_evlist
**)opt
->value
;
1808 struct parse_events_error err
= { .idx
= 0, };
1809 int ret
= parse_events(evlist
, str
, &err
);
1812 parse_events_print_error(&err
, str
);
1813 fprintf(stderr
, "Run 'perf list' for a list of valid events\n");
1820 foreach_evsel_in_last_glob(struct perf_evlist
*evlist
,
1821 int (*func
)(struct perf_evsel
*evsel
,
1825 struct perf_evsel
*last
= NULL
;
1829 * Don't return when list_empty, give func a chance to report
1830 * error when it found last == NULL.
1832 * So no need to WARN here, let *func do this.
1834 if (evlist
->nr_entries
> 0)
1835 last
= perf_evlist__last(evlist
);
1838 err
= (*func
)(last
, arg
);
1844 if (last
->node
.prev
== &evlist
->entries
)
1846 last
= list_entry(last
->node
.prev
, struct perf_evsel
, node
);
1847 } while (!last
->cmdline_group_boundary
);
1852 static int set_filter(struct perf_evsel
*evsel
, const void *arg
)
1854 const char *str
= arg
;
1856 int nr_addr_filters
= 0;
1857 struct perf_pmu
*pmu
= NULL
;
1862 if (evsel
->attr
.type
== PERF_TYPE_TRACEPOINT
) {
1863 if (perf_evsel__append_tp_filter(evsel
, str
) < 0) {
1865 "not enough memory to hold filter string\n");
1872 while ((pmu
= perf_pmu__scan(pmu
)) != NULL
)
1873 if (pmu
->type
== evsel
->attr
.type
) {
1879 perf_pmu__scan_file(pmu
, "nr_addr_filters",
1880 "%d", &nr_addr_filters
);
1882 if (!nr_addr_filters
)
1885 if (perf_evsel__append_addr_filter(evsel
, str
) < 0) {
1887 "not enough memory to hold filter string\n");
1895 "--filter option should follow a -e tracepoint or HW tracer option\n");
1900 int parse_filter(const struct option
*opt
, const char *str
,
1901 int unset __maybe_unused
)
1903 struct perf_evlist
*evlist
= *(struct perf_evlist
**)opt
->value
;
1905 return foreach_evsel_in_last_glob(evlist
, set_filter
,
1909 static int add_exclude_perf_filter(struct perf_evsel
*evsel
,
1910 const void *arg __maybe_unused
)
1912 char new_filter
[64];
1914 if (evsel
== NULL
|| evsel
->attr
.type
!= PERF_TYPE_TRACEPOINT
) {
1916 "--exclude-perf option should follow a -e tracepoint option\n");
1920 snprintf(new_filter
, sizeof(new_filter
), "common_pid != %d", getpid());
1922 if (perf_evsel__append_tp_filter(evsel
, new_filter
) < 0) {
1924 "not enough memory to hold filter string\n");
1931 int exclude_perf(const struct option
*opt
,
1932 const char *arg __maybe_unused
,
1933 int unset __maybe_unused
)
1935 struct perf_evlist
*evlist
= *(struct perf_evlist
**)opt
->value
;
1937 return foreach_evsel_in_last_glob(evlist
, add_exclude_perf_filter
,
1941 static const char * const event_type_descriptors
[] = {
1945 "Hardware cache event",
1946 "Raw hardware event descriptor",
1947 "Hardware breakpoint",
1950 static int cmp_string(const void *a
, const void *b
)
1952 const char * const *as
= a
;
1953 const char * const *bs
= b
;
1955 return strcmp(*as
, *bs
);
1959 * Print the events from <debugfs_mount_point>/tracing/events
1962 void print_tracepoint_events(const char *subsys_glob
, const char *event_glob
,
1965 DIR *sys_dir
, *evt_dir
;
1966 struct dirent
*sys_dirent
, *evt_dirent
;
1967 char evt_path
[MAXPATHLEN
];
1968 char dir_path
[MAXPATHLEN
];
1969 char **evt_list
= NULL
;
1970 unsigned int evt_i
= 0, evt_num
= 0;
1971 bool evt_num_known
= false;
1974 sys_dir
= opendir(tracing_events_path
);
1978 if (evt_num_known
) {
1979 evt_list
= zalloc(sizeof(char *) * evt_num
);
1981 goto out_close_sys_dir
;
1984 for_each_subsystem(sys_dir
, sys_dirent
) {
1985 if (subsys_glob
!= NULL
&&
1986 !strglobmatch(sys_dirent
->d_name
, subsys_glob
))
1989 snprintf(dir_path
, MAXPATHLEN
, "%s/%s", tracing_events_path
,
1990 sys_dirent
->d_name
);
1991 evt_dir
= opendir(dir_path
);
1995 for_each_event(sys_dirent
, evt_dir
, evt_dirent
) {
1996 if (event_glob
!= NULL
&&
1997 !strglobmatch(evt_dirent
->d_name
, event_glob
))
2000 if (!evt_num_known
) {
2005 snprintf(evt_path
, MAXPATHLEN
, "%s:%s",
2006 sys_dirent
->d_name
, evt_dirent
->d_name
);
2008 evt_list
[evt_i
] = strdup(evt_path
);
2009 if (evt_list
[evt_i
] == NULL
)
2010 goto out_close_evt_dir
;
2017 if (!evt_num_known
) {
2018 evt_num_known
= true;
2021 qsort(evt_list
, evt_num
, sizeof(char *), cmp_string
);
2023 while (evt_i
< evt_num
) {
2025 printf("%s ", evt_list
[evt_i
++]);
2028 printf(" %-50s [%s]\n", evt_list
[evt_i
++],
2029 event_type_descriptors
[PERF_TYPE_TRACEPOINT
]);
2031 if (evt_num
&& pager_in_use())
2036 for (evt_i
= 0; evt_i
< evt_num
; evt_i
++)
2037 zfree(&evt_list
[evt_i
]);
2046 printf("FATAL: not enough memory to print %s\n",
2047 event_type_descriptors
[PERF_TYPE_TRACEPOINT
]);
2053 * Check whether event is in <debugfs_mount_point>/tracing/events
2056 int is_valid_tracepoint(const char *event_string
)
2058 DIR *sys_dir
, *evt_dir
;
2059 struct dirent
*sys_dirent
, *evt_dirent
;
2060 char evt_path
[MAXPATHLEN
];
2061 char dir_path
[MAXPATHLEN
];
2063 sys_dir
= opendir(tracing_events_path
);
2067 for_each_subsystem(sys_dir
, sys_dirent
) {
2069 snprintf(dir_path
, MAXPATHLEN
, "%s/%s", tracing_events_path
,
2070 sys_dirent
->d_name
);
2071 evt_dir
= opendir(dir_path
);
2075 for_each_event(sys_dirent
, evt_dir
, evt_dirent
) {
2076 snprintf(evt_path
, MAXPATHLEN
, "%s:%s",
2077 sys_dirent
->d_name
, evt_dirent
->d_name
);
2078 if (!strcmp(evt_path
, event_string
)) {
2090 static bool is_event_supported(u8 type
, unsigned config
)
2094 struct perf_evsel
*evsel
;
2095 struct perf_event_attr attr
= {
2100 struct thread_map
*tmap
= thread_map__new_by_tid(0);
2105 evsel
= perf_evsel__new(&attr
);
2107 open_return
= perf_evsel__open(evsel
, NULL
, tmap
);
2108 ret
= open_return
>= 0;
2110 if (open_return
== -EACCES
) {
2112 * This happens if the paranoid value
2113 * /proc/sys/kernel/perf_event_paranoid is set to 2
2114 * Re-run with exclude_kernel set; we don't do that
2115 * by default as some ARM machines do not support it.
2118 evsel
->attr
.exclude_kernel
= 1;
2119 ret
= perf_evsel__open(evsel
, NULL
, tmap
) >= 0;
2121 perf_evsel__delete(evsel
);
2127 void print_sdt_events(const char *subsys_glob
, const char *event_glob
,
2130 struct probe_cache
*pcache
;
2131 struct probe_cache_entry
*ent
;
2132 struct strlist
*bidlist
, *sdtlist
;
2133 struct strlist_config cfg
= {.dont_dupstr
= true};
2134 struct str_node
*nd
, *nd2
;
2135 char *buf
, *path
, *ptr
= NULL
;
2136 bool show_detail
= false;
2139 sdtlist
= strlist__new(NULL
, &cfg
);
2141 pr_debug("Failed to allocate new strlist for SDT\n");
2144 bidlist
= build_id_cache__list_all(true);
2146 pr_debug("Failed to get buildids: %d\n", errno
);
2149 strlist__for_each_entry(nd
, bidlist
) {
2150 pcache
= probe_cache__new(nd
->s
, NULL
);
2153 list_for_each_entry(ent
, &pcache
->entries
, node
) {
2157 !strglobmatch(ent
->pev
.group
, subsys_glob
))
2160 !strglobmatch(ent
->pev
.event
, event_glob
))
2162 ret
= asprintf(&buf
, "%s:%s@%s", ent
->pev
.group
,
2163 ent
->pev
.event
, nd
->s
);
2165 strlist__add(sdtlist
, buf
);
2167 probe_cache__delete(pcache
);
2169 strlist__delete(bidlist
);
2171 strlist__for_each_entry(nd
, sdtlist
) {
2172 buf
= strchr(nd
->s
, '@');
2176 printf("%s ", nd
->s
);
2179 nd2
= strlist__next(nd
);
2181 ptr
= strchr(nd2
->s
, '@');
2184 if (strcmp(nd
->s
, nd2
->s
) == 0)
2188 path
= build_id_cache__origname(buf
);
2189 ret
= asprintf(&buf
, "%s@%s(%.12s)", nd
->s
, path
, buf
);
2191 printf(" %-50s [%s]\n", buf
, "SDT event");
2195 printf(" %-50s [%s]\n", nd
->s
, "SDT event");
2197 if (strcmp(nd
->s
, nd2
->s
) != 0)
2198 show_detail
= false;
2203 strlist__delete(sdtlist
);
2206 int print_hwcache_events(const char *event_glob
, bool name_only
)
2208 unsigned int type
, op
, i
, evt_i
= 0, evt_num
= 0;
2210 char **evt_list
= NULL
;
2211 bool evt_num_known
= false;
2214 if (evt_num_known
) {
2215 evt_list
= zalloc(sizeof(char *) * evt_num
);
2220 for (type
= 0; type
< PERF_COUNT_HW_CACHE_MAX
; type
++) {
2221 for (op
= 0; op
< PERF_COUNT_HW_CACHE_OP_MAX
; op
++) {
2222 /* skip invalid cache type */
2223 if (!perf_evsel__is_cache_op_valid(type
, op
))
2226 for (i
= 0; i
< PERF_COUNT_HW_CACHE_RESULT_MAX
; i
++) {
2227 __perf_evsel__hw_cache_type_op_res_name(type
, op
, i
,
2228 name
, sizeof(name
));
2229 if (event_glob
!= NULL
&& !strglobmatch(name
, event_glob
))
2232 if (!is_event_supported(PERF_TYPE_HW_CACHE
,
2233 type
| (op
<< 8) | (i
<< 16)))
2236 if (!evt_num_known
) {
2241 evt_list
[evt_i
] = strdup(name
);
2242 if (evt_list
[evt_i
] == NULL
)
2249 if (!evt_num_known
) {
2250 evt_num_known
= true;
2253 qsort(evt_list
, evt_num
, sizeof(char *), cmp_string
);
2255 while (evt_i
< evt_num
) {
2257 printf("%s ", evt_list
[evt_i
++]);
2260 printf(" %-50s [%s]\n", evt_list
[evt_i
++],
2261 event_type_descriptors
[PERF_TYPE_HW_CACHE
]);
2263 if (evt_num
&& pager_in_use())
2268 for (evt_i
= 0; evt_i
< evt_num
; evt_i
++)
2269 zfree(&evt_list
[evt_i
]);
2274 printf("FATAL: not enough memory to print %s\n", event_type_descriptors
[PERF_TYPE_HW_CACHE
]);
2280 void print_symbol_events(const char *event_glob
, unsigned type
,
2281 struct event_symbol
*syms
, unsigned max
,
2284 unsigned int i
, evt_i
= 0, evt_num
= 0;
2285 char name
[MAX_NAME_LEN
];
2286 char **evt_list
= NULL
;
2287 bool evt_num_known
= false;
2290 if (evt_num_known
) {
2291 evt_list
= zalloc(sizeof(char *) * evt_num
);
2297 for (i
= 0; i
< max
; i
++, syms
++) {
2299 if (event_glob
!= NULL
&& syms
->symbol
!= NULL
&&
2300 !(strglobmatch(syms
->symbol
, event_glob
) ||
2301 (syms
->alias
&& strglobmatch(syms
->alias
, event_glob
))))
2304 if (!is_event_supported(type
, i
))
2307 if (!evt_num_known
) {
2312 if (!name_only
&& strlen(syms
->alias
))
2313 snprintf(name
, MAX_NAME_LEN
, "%s OR %s", syms
->symbol
, syms
->alias
);
2315 strncpy(name
, syms
->symbol
, MAX_NAME_LEN
);
2317 evt_list
[evt_i
] = strdup(name
);
2318 if (evt_list
[evt_i
] == NULL
)
2323 if (!evt_num_known
) {
2324 evt_num_known
= true;
2327 qsort(evt_list
, evt_num
, sizeof(char *), cmp_string
);
2329 while (evt_i
< evt_num
) {
2331 printf("%s ", evt_list
[evt_i
++]);
2334 printf(" %-50s [%s]\n", evt_list
[evt_i
++], event_type_descriptors
[type
]);
2336 if (evt_num
&& pager_in_use())
2341 for (evt_i
= 0; evt_i
< evt_num
; evt_i
++)
2342 zfree(&evt_list
[evt_i
]);
2347 printf("FATAL: not enough memory to print %s\n", event_type_descriptors
[type
]);
2353 * Print the help text for the event symbols:
2355 void print_events(const char *event_glob
, bool name_only
, bool quiet_flag
,
2356 bool long_desc
, bool details_flag
)
2358 print_symbol_events(event_glob
, PERF_TYPE_HARDWARE
,
2359 event_symbols_hw
, PERF_COUNT_HW_MAX
, name_only
);
2361 print_symbol_events(event_glob
, PERF_TYPE_SOFTWARE
,
2362 event_symbols_sw
, PERF_COUNT_SW_MAX
, name_only
);
2364 print_hwcache_events(event_glob
, name_only
);
2366 print_pmu_events(event_glob
, name_only
, quiet_flag
, long_desc
,
2369 if (event_glob
!= NULL
)
2373 printf(" %-50s [%s]\n",
2375 event_type_descriptors
[PERF_TYPE_RAW
]);
2376 printf(" %-50s [%s]\n",
2377 "cpu/t1=v1[,t2=v2,t3 ...]/modifier",
2378 event_type_descriptors
[PERF_TYPE_RAW
]);
2380 printf(" (see 'man perf-list' on how to encode it)\n\n");
2382 printf(" %-50s [%s]\n",
2383 "mem:<addr>[/len][:access]",
2384 event_type_descriptors
[PERF_TYPE_BREAKPOINT
]);
2389 print_tracepoint_events(NULL
, NULL
, name_only
);
2391 print_sdt_events(NULL
, NULL
, name_only
);
2393 metricgroup__print(true, true, NULL
, name_only
);
2396 int parse_events__is_hardcoded_term(struct parse_events_term
*term
)
2398 return term
->type_term
!= PARSE_EVENTS__TERM_TYPE_USER
;
2401 static int new_term(struct parse_events_term
**_term
,
2402 struct parse_events_term
*temp
,
2405 struct parse_events_term
*term
;
2407 term
= malloc(sizeof(*term
));
2412 INIT_LIST_HEAD(&term
->list
);
2414 switch (term
->type_val
) {
2415 case PARSE_EVENTS__TERM_TYPE_NUM
:
2416 term
->val
.num
= num
;
2418 case PARSE_EVENTS__TERM_TYPE_STR
:
2419 term
->val
.str
= str
;
2430 int parse_events_term__num(struct parse_events_term
**term
,
2431 int type_term
, char *config
, u64 num
,
2433 void *loc_term_
, void *loc_val_
)
2435 YYLTYPE
*loc_term
= loc_term_
;
2436 YYLTYPE
*loc_val
= loc_val_
;
2438 struct parse_events_term temp
= {
2439 .type_val
= PARSE_EVENTS__TERM_TYPE_NUM
,
2440 .type_term
= type_term
,
2442 .no_value
= no_value
,
2443 .err_term
= loc_term
? loc_term
->first_column
: 0,
2444 .err_val
= loc_val
? loc_val
->first_column
: 0,
2447 return new_term(term
, &temp
, NULL
, num
);
2450 int parse_events_term__str(struct parse_events_term
**term
,
2451 int type_term
, char *config
, char *str
,
2452 void *loc_term_
, void *loc_val_
)
2454 YYLTYPE
*loc_term
= loc_term_
;
2455 YYLTYPE
*loc_val
= loc_val_
;
2457 struct parse_events_term temp
= {
2458 .type_val
= PARSE_EVENTS__TERM_TYPE_STR
,
2459 .type_term
= type_term
,
2461 .err_term
= loc_term
? loc_term
->first_column
: 0,
2462 .err_val
= loc_val
? loc_val
->first_column
: 0,
2465 return new_term(term
, &temp
, str
, 0);
2468 int parse_events_term__sym_hw(struct parse_events_term
**term
,
2469 char *config
, unsigned idx
)
2471 struct event_symbol
*sym
;
2472 struct parse_events_term temp
= {
2473 .type_val
= PARSE_EVENTS__TERM_TYPE_STR
,
2474 .type_term
= PARSE_EVENTS__TERM_TYPE_USER
,
2475 .config
= config
?: (char *) "event",
2478 BUG_ON(idx
>= PERF_COUNT_HW_MAX
);
2479 sym
= &event_symbols_hw
[idx
];
2481 return new_term(term
, &temp
, (char *) sym
->symbol
, 0);
2484 int parse_events_term__clone(struct parse_events_term
**new,
2485 struct parse_events_term
*term
)
2487 struct parse_events_term temp
= {
2488 .type_val
= term
->type_val
,
2489 .type_term
= term
->type_term
,
2490 .config
= term
->config
,
2491 .err_term
= term
->err_term
,
2492 .err_val
= term
->err_val
,
2495 return new_term(new, &temp
, term
->val
.str
, term
->val
.num
);
2498 int parse_events_copy_term_list(struct list_head
*old
,
2499 struct list_head
**new)
2501 struct parse_events_term
*term
, *n
;
2509 *new = malloc(sizeof(struct list_head
));
2512 INIT_LIST_HEAD(*new);
2514 list_for_each_entry (term
, old
, list
) {
2515 ret
= parse_events_term__clone(&n
, term
);
2518 list_add_tail(&n
->list
, *new);
2523 void parse_events_terms__purge(struct list_head
*terms
)
2525 struct parse_events_term
*term
, *h
;
2527 list_for_each_entry_safe(term
, h
, terms
, list
) {
2528 if (term
->array
.nr_ranges
)
2529 zfree(&term
->array
.ranges
);
2530 list_del_init(&term
->list
);
2535 void parse_events_terms__delete(struct list_head
*terms
)
2539 parse_events_terms__purge(terms
);
2543 void parse_events__clear_array(struct parse_events_array
*a
)
2548 void parse_events_evlist_error(struct parse_events_state
*parse_state
,
2549 int idx
, const char *str
)
2551 struct parse_events_error
*err
= parse_state
->error
;
2556 err
->str
= strdup(str
);
2557 WARN_ONCE(!err
->str
, "WARNING: failed to allocate error string");
2560 static void config_terms_list(char *buf
, size_t buf_sz
)
2566 for (i
= 0; i
< __PARSE_EVENTS__TERM_TYPE_NR
; i
++) {
2567 const char *name
= config_term_names
[i
];
2569 if (!config_term_avail(i
, NULL
))
2576 if (strlen(buf
) + strlen(name
) + 2 >= buf_sz
)
2588 * Return string contains valid config terms of an event.
2589 * @additional_terms: For terms such as PMU sysfs terms.
2591 char *parse_events_formats_error_string(char *additional_terms
)
2594 /* "no-overwrite" is the longest name */
2595 char static_terms
[__PARSE_EVENTS__TERM_TYPE_NR
*
2596 (sizeof("no-overwrite") - 1)];
2598 config_terms_list(static_terms
, sizeof(static_terms
));
2600 if (additional_terms
) {
2601 if (asprintf(&str
, "valid terms: %s,%s",
2602 additional_terms
, static_terms
) < 0)
2605 if (asprintf(&str
, "valid terms: %s", static_terms
) < 0)