1 // SPDX-License-Identifier: GPL-2.0-only
3 * Copyright (C) 2011, Red Hat Inc, Arnaldo Carvalho de Melo <acme@redhat.com>
5 * Parts came from builtin-{top,stat,record}.c, see those files for further
12 #include <linux/bitops.h>
13 #include <api/fs/fs.h>
14 #include <api/fs/tracing_path.h>
15 #include <traceevent/event-parse.h>
16 #include <linux/hw_breakpoint.h>
17 #include <linux/perf_event.h>
18 #include <linux/compiler.h>
19 #include <linux/err.h>
20 #include <linux/zalloc.h>
21 #include <sys/ioctl.h>
22 #include <sys/resource.h>
23 #include <sys/types.h>
26 #include "callchain.h"
32 #include "thread_map.h"
34 #include "perf_regs.h"
36 #include "trace-event.h"
40 #include "util/parse-branch-options.h"
42 #include <linux/ctype.h>
44 struct perf_missing_features perf_missing_features
;
46 static clockid_t clockid
;
48 static int perf_evsel__no_extra_init(struct perf_evsel
*evsel __maybe_unused
)
53 void __weak
test_attr__ready(void) { }
55 static void perf_evsel__no_extra_fini(struct perf_evsel
*evsel __maybe_unused
)
61 int (*init
)(struct perf_evsel
*evsel
);
62 void (*fini
)(struct perf_evsel
*evsel
);
63 } perf_evsel__object
= {
64 .size
= sizeof(struct perf_evsel
),
65 .init
= perf_evsel__no_extra_init
,
66 .fini
= perf_evsel__no_extra_fini
,
69 int perf_evsel__object_config(size_t object_size
,
70 int (*init
)(struct perf_evsel
*evsel
),
71 void (*fini
)(struct perf_evsel
*evsel
))
77 if (perf_evsel__object
.size
> object_size
)
80 perf_evsel__object
.size
= object_size
;
84 perf_evsel__object
.init
= init
;
87 perf_evsel__object
.fini
= fini
;
92 #define FD(e, x, y) (*(int *)xyarray__entry(e->fd, x, y))
94 int __perf_evsel__sample_size(u64 sample_type
)
96 u64 mask
= sample_type
& PERF_SAMPLE_MASK
;
100 for (i
= 0; i
< 64; i
++) {
101 if (mask
& (1ULL << i
))
111 * __perf_evsel__calc_id_pos - calculate id_pos.
112 * @sample_type: sample type
114 * This function returns the position of the event id (PERF_SAMPLE_ID or
115 * PERF_SAMPLE_IDENTIFIER) in a sample event i.e. in the array of struct
118 static int __perf_evsel__calc_id_pos(u64 sample_type
)
122 if (sample_type
& PERF_SAMPLE_IDENTIFIER
)
125 if (!(sample_type
& PERF_SAMPLE_ID
))
128 if (sample_type
& PERF_SAMPLE_IP
)
131 if (sample_type
& PERF_SAMPLE_TID
)
134 if (sample_type
& PERF_SAMPLE_TIME
)
137 if (sample_type
& PERF_SAMPLE_ADDR
)
144 * __perf_evsel__calc_is_pos - calculate is_pos.
145 * @sample_type: sample type
147 * This function returns the position (counting backwards) of the event id
148 * (PERF_SAMPLE_ID or PERF_SAMPLE_IDENTIFIER) in a non-sample event i.e. if
149 * sample_id_all is used there is an id sample appended to non-sample events.
151 static int __perf_evsel__calc_is_pos(u64 sample_type
)
155 if (sample_type
& PERF_SAMPLE_IDENTIFIER
)
158 if (!(sample_type
& PERF_SAMPLE_ID
))
161 if (sample_type
& PERF_SAMPLE_CPU
)
164 if (sample_type
& PERF_SAMPLE_STREAM_ID
)
170 void perf_evsel__calc_id_pos(struct perf_evsel
*evsel
)
172 evsel
->id_pos
= __perf_evsel__calc_id_pos(evsel
->attr
.sample_type
);
173 evsel
->is_pos
= __perf_evsel__calc_is_pos(evsel
->attr
.sample_type
);
176 void __perf_evsel__set_sample_bit(struct perf_evsel
*evsel
,
177 enum perf_event_sample_format bit
)
179 if (!(evsel
->attr
.sample_type
& bit
)) {
180 evsel
->attr
.sample_type
|= bit
;
181 evsel
->sample_size
+= sizeof(u64
);
182 perf_evsel__calc_id_pos(evsel
);
186 void __perf_evsel__reset_sample_bit(struct perf_evsel
*evsel
,
187 enum perf_event_sample_format bit
)
189 if (evsel
->attr
.sample_type
& bit
) {
190 evsel
->attr
.sample_type
&= ~bit
;
191 evsel
->sample_size
-= sizeof(u64
);
192 perf_evsel__calc_id_pos(evsel
);
196 void perf_evsel__set_sample_id(struct perf_evsel
*evsel
,
197 bool can_sample_identifier
)
199 if (can_sample_identifier
) {
200 perf_evsel__reset_sample_bit(evsel
, ID
);
201 perf_evsel__set_sample_bit(evsel
, IDENTIFIER
);
203 perf_evsel__set_sample_bit(evsel
, ID
);
205 evsel
->attr
.read_format
|= PERF_FORMAT_ID
;
209 * perf_evsel__is_function_event - Return whether given evsel is a function
212 * @evsel - evsel selector to be tested
214 * Return %true if event is function trace event
216 bool perf_evsel__is_function_event(struct perf_evsel
*evsel
)
218 #define FUNCTION_EVENT "ftrace:function"
220 return evsel
->name
&&
221 !strncmp(FUNCTION_EVENT
, evsel
->name
, sizeof(FUNCTION_EVENT
));
223 #undef FUNCTION_EVENT
226 void perf_evsel__init(struct perf_evsel
*evsel
,
227 struct perf_event_attr
*attr
, int idx
)
230 evsel
->tracking
= !idx
;
232 evsel
->leader
= evsel
;
235 evsel
->max_events
= ULONG_MAX
;
236 evsel
->evlist
= NULL
;
238 INIT_LIST_HEAD(&evsel
->node
);
239 INIT_LIST_HEAD(&evsel
->config_terms
);
240 perf_evsel__object
.init(evsel
);
241 evsel
->sample_size
= __perf_evsel__sample_size(attr
->sample_type
);
242 perf_evsel__calc_id_pos(evsel
);
243 evsel
->cmdline_group_boundary
= false;
244 evsel
->metric_expr
= NULL
;
245 evsel
->metric_name
= NULL
;
246 evsel
->metric_events
= NULL
;
247 evsel
->collect_stat
= false;
248 evsel
->pmu_name
= NULL
;
251 struct perf_evsel
*perf_evsel__new_idx(struct perf_event_attr
*attr
, int idx
)
253 struct perf_evsel
*evsel
= zalloc(perf_evsel__object
.size
);
257 perf_evsel__init(evsel
, attr
, idx
);
259 if (perf_evsel__is_bpf_output(evsel
)) {
260 evsel
->attr
.sample_type
|= (PERF_SAMPLE_RAW
| PERF_SAMPLE_TIME
|
261 PERF_SAMPLE_CPU
| PERF_SAMPLE_PERIOD
),
262 evsel
->attr
.sample_period
= 1;
265 if (perf_evsel__is_clock(evsel
)) {
267 * The evsel->unit points to static alias->unit
268 * so it's ok to use static string in here.
270 static const char *unit
= "msec";
279 static bool perf_event_can_profile_kernel(void)
281 return geteuid() == 0 || perf_event_paranoid() == -1;
284 struct perf_evsel
*perf_evsel__new_cycles(bool precise
)
286 struct perf_event_attr attr
= {
287 .type
= PERF_TYPE_HARDWARE
,
288 .config
= PERF_COUNT_HW_CPU_CYCLES
,
289 .exclude_kernel
= !perf_event_can_profile_kernel(),
291 struct perf_evsel
*evsel
;
293 event_attr_init(&attr
);
299 * Now let the usual logic to set up the perf_event_attr defaults
300 * to kick in when we return and before perf_evsel__open() is called.
303 evsel
= perf_evsel__new(&attr
);
307 evsel
->precise_max
= true;
309 /* use asprintf() because free(evsel) assumes name is allocated */
310 if (asprintf(&evsel
->name
, "cycles%s%s%.*s",
311 (attr
.precise_ip
|| attr
.exclude_kernel
) ? ":" : "",
312 attr
.exclude_kernel
? "u" : "",
313 attr
.precise_ip
? attr
.precise_ip
+ 1 : 0, "ppp") < 0)
318 perf_evsel__delete(evsel
);
324 * Returns pointer with encoded error via <linux/err.h> interface.
326 struct perf_evsel
*perf_evsel__newtp_idx(const char *sys
, const char *name
, int idx
)
328 struct perf_evsel
*evsel
= zalloc(perf_evsel__object
.size
);
334 struct perf_event_attr attr
= {
335 .type
= PERF_TYPE_TRACEPOINT
,
336 .sample_type
= (PERF_SAMPLE_RAW
| PERF_SAMPLE_TIME
|
337 PERF_SAMPLE_CPU
| PERF_SAMPLE_PERIOD
),
340 if (asprintf(&evsel
->name
, "%s:%s", sys
, name
) < 0)
343 evsel
->tp_format
= trace_event__tp_format(sys
, name
);
344 if (IS_ERR(evsel
->tp_format
)) {
345 err
= PTR_ERR(evsel
->tp_format
);
349 event_attr_init(&attr
);
350 attr
.config
= evsel
->tp_format
->id
;
351 attr
.sample_period
= 1;
352 perf_evsel__init(evsel
, &attr
, idx
);
364 const char *perf_evsel__hw_names
[PERF_COUNT_HW_MAX
] = {
372 "stalled-cycles-frontend",
373 "stalled-cycles-backend",
377 static const char *__perf_evsel__hw_name(u64 config
)
379 if (config
< PERF_COUNT_HW_MAX
&& perf_evsel__hw_names
[config
])
380 return perf_evsel__hw_names
[config
];
382 return "unknown-hardware";
385 static int perf_evsel__add_modifiers(struct perf_evsel
*evsel
, char *bf
, size_t size
)
387 int colon
= 0, r
= 0;
388 struct perf_event_attr
*attr
= &evsel
->attr
;
389 bool exclude_guest_default
= false;
391 #define MOD_PRINT(context, mod) do { \
392 if (!attr->exclude_##context) { \
393 if (!colon) colon = ++r; \
394 r += scnprintf(bf + r, size - r, "%c", mod); \
397 if (attr
->exclude_kernel
|| attr
->exclude_user
|| attr
->exclude_hv
) {
398 MOD_PRINT(kernel
, 'k');
399 MOD_PRINT(user
, 'u');
401 exclude_guest_default
= true;
404 if (attr
->precise_ip
) {
407 r
+= scnprintf(bf
+ r
, size
- r
, "%.*s", attr
->precise_ip
, "ppp");
408 exclude_guest_default
= true;
411 if (attr
->exclude_host
|| attr
->exclude_guest
== exclude_guest_default
) {
412 MOD_PRINT(host
, 'H');
413 MOD_PRINT(guest
, 'G');
421 static int perf_evsel__hw_name(struct perf_evsel
*evsel
, char *bf
, size_t size
)
423 int r
= scnprintf(bf
, size
, "%s", __perf_evsel__hw_name(evsel
->attr
.config
));
424 return r
+ perf_evsel__add_modifiers(evsel
, bf
+ r
, size
- r
);
427 const char *perf_evsel__sw_names
[PERF_COUNT_SW_MAX
] = {
440 static const char *__perf_evsel__sw_name(u64 config
)
442 if (config
< PERF_COUNT_SW_MAX
&& perf_evsel__sw_names
[config
])
443 return perf_evsel__sw_names
[config
];
444 return "unknown-software";
447 static int perf_evsel__sw_name(struct perf_evsel
*evsel
, char *bf
, size_t size
)
449 int r
= scnprintf(bf
, size
, "%s", __perf_evsel__sw_name(evsel
->attr
.config
));
450 return r
+ perf_evsel__add_modifiers(evsel
, bf
+ r
, size
- r
);
453 static int __perf_evsel__bp_name(char *bf
, size_t size
, u64 addr
, u64 type
)
457 r
= scnprintf(bf
, size
, "mem:0x%" PRIx64
":", addr
);
459 if (type
& HW_BREAKPOINT_R
)
460 r
+= scnprintf(bf
+ r
, size
- r
, "r");
462 if (type
& HW_BREAKPOINT_W
)
463 r
+= scnprintf(bf
+ r
, size
- r
, "w");
465 if (type
& HW_BREAKPOINT_X
)
466 r
+= scnprintf(bf
+ r
, size
- r
, "x");
471 static int perf_evsel__bp_name(struct perf_evsel
*evsel
, char *bf
, size_t size
)
473 struct perf_event_attr
*attr
= &evsel
->attr
;
474 int r
= __perf_evsel__bp_name(bf
, size
, attr
->bp_addr
, attr
->bp_type
);
475 return r
+ perf_evsel__add_modifiers(evsel
, bf
+ r
, size
- r
);
478 const char *perf_evsel__hw_cache
[PERF_COUNT_HW_CACHE_MAX
]
479 [PERF_EVSEL__MAX_ALIASES
] = {
480 { "L1-dcache", "l1-d", "l1d", "L1-data", },
481 { "L1-icache", "l1-i", "l1i", "L1-instruction", },
483 { "dTLB", "d-tlb", "Data-TLB", },
484 { "iTLB", "i-tlb", "Instruction-TLB", },
485 { "branch", "branches", "bpu", "btb", "bpc", },
489 const char *perf_evsel__hw_cache_op
[PERF_COUNT_HW_CACHE_OP_MAX
]
490 [PERF_EVSEL__MAX_ALIASES
] = {
491 { "load", "loads", "read", },
492 { "store", "stores", "write", },
493 { "prefetch", "prefetches", "speculative-read", "speculative-load", },
496 const char *perf_evsel__hw_cache_result
[PERF_COUNT_HW_CACHE_RESULT_MAX
]
497 [PERF_EVSEL__MAX_ALIASES
] = {
498 { "refs", "Reference", "ops", "access", },
499 { "misses", "miss", },
502 #define C(x) PERF_COUNT_HW_CACHE_##x
503 #define CACHE_READ (1 << C(OP_READ))
504 #define CACHE_WRITE (1 << C(OP_WRITE))
505 #define CACHE_PREFETCH (1 << C(OP_PREFETCH))
506 #define COP(x) (1 << x)
509 * cache operartion stat
510 * L1I : Read and prefetch only
511 * ITLB and BPU : Read-only
513 static unsigned long perf_evsel__hw_cache_stat
[C(MAX
)] = {
514 [C(L1D
)] = (CACHE_READ
| CACHE_WRITE
| CACHE_PREFETCH
),
515 [C(L1I
)] = (CACHE_READ
| CACHE_PREFETCH
),
516 [C(LL
)] = (CACHE_READ
| CACHE_WRITE
| CACHE_PREFETCH
),
517 [C(DTLB
)] = (CACHE_READ
| CACHE_WRITE
| CACHE_PREFETCH
),
518 [C(ITLB
)] = (CACHE_READ
),
519 [C(BPU
)] = (CACHE_READ
),
520 [C(NODE
)] = (CACHE_READ
| CACHE_WRITE
| CACHE_PREFETCH
),
523 bool perf_evsel__is_cache_op_valid(u8 type
, u8 op
)
525 if (perf_evsel__hw_cache_stat
[type
] & COP(op
))
526 return true; /* valid */
528 return false; /* invalid */
531 int __perf_evsel__hw_cache_type_op_res_name(u8 type
, u8 op
, u8 result
,
532 char *bf
, size_t size
)
535 return scnprintf(bf
, size
, "%s-%s-%s", perf_evsel__hw_cache
[type
][0],
536 perf_evsel__hw_cache_op
[op
][0],
537 perf_evsel__hw_cache_result
[result
][0]);
540 return scnprintf(bf
, size
, "%s-%s", perf_evsel__hw_cache
[type
][0],
541 perf_evsel__hw_cache_op
[op
][1]);
544 static int __perf_evsel__hw_cache_name(u64 config
, char *bf
, size_t size
)
546 u8 op
, result
, type
= (config
>> 0) & 0xff;
547 const char *err
= "unknown-ext-hardware-cache-type";
549 if (type
>= PERF_COUNT_HW_CACHE_MAX
)
552 op
= (config
>> 8) & 0xff;
553 err
= "unknown-ext-hardware-cache-op";
554 if (op
>= PERF_COUNT_HW_CACHE_OP_MAX
)
557 result
= (config
>> 16) & 0xff;
558 err
= "unknown-ext-hardware-cache-result";
559 if (result
>= PERF_COUNT_HW_CACHE_RESULT_MAX
)
562 err
= "invalid-cache";
563 if (!perf_evsel__is_cache_op_valid(type
, op
))
566 return __perf_evsel__hw_cache_type_op_res_name(type
, op
, result
, bf
, size
);
568 return scnprintf(bf
, size
, "%s", err
);
571 static int perf_evsel__hw_cache_name(struct perf_evsel
*evsel
, char *bf
, size_t size
)
573 int ret
= __perf_evsel__hw_cache_name(evsel
->attr
.config
, bf
, size
);
574 return ret
+ perf_evsel__add_modifiers(evsel
, bf
+ ret
, size
- ret
);
577 static int perf_evsel__raw_name(struct perf_evsel
*evsel
, char *bf
, size_t size
)
579 int ret
= scnprintf(bf
, size
, "raw 0x%" PRIx64
, evsel
->attr
.config
);
580 return ret
+ perf_evsel__add_modifiers(evsel
, bf
+ ret
, size
- ret
);
583 static int perf_evsel__tool_name(char *bf
, size_t size
)
585 int ret
= scnprintf(bf
, size
, "duration_time");
589 const char *perf_evsel__name(struct perf_evsel
*evsel
)
599 switch (evsel
->attr
.type
) {
601 perf_evsel__raw_name(evsel
, bf
, sizeof(bf
));
604 case PERF_TYPE_HARDWARE
:
605 perf_evsel__hw_name(evsel
, bf
, sizeof(bf
));
608 case PERF_TYPE_HW_CACHE
:
609 perf_evsel__hw_cache_name(evsel
, bf
, sizeof(bf
));
612 case PERF_TYPE_SOFTWARE
:
613 if (evsel
->tool_event
)
614 perf_evsel__tool_name(bf
, sizeof(bf
));
616 perf_evsel__sw_name(evsel
, bf
, sizeof(bf
));
619 case PERF_TYPE_TRACEPOINT
:
620 scnprintf(bf
, sizeof(bf
), "%s", "unknown tracepoint");
623 case PERF_TYPE_BREAKPOINT
:
624 perf_evsel__bp_name(evsel
, bf
, sizeof(bf
));
628 scnprintf(bf
, sizeof(bf
), "unknown attr type: %d",
633 evsel
->name
= strdup(bf
);
641 const char *perf_evsel__group_name(struct perf_evsel
*evsel
)
643 return evsel
->group_name
?: "anon group";
647 * Returns the group details for the specified leader,
648 * with following rules.
650 * For record -e '{cycles,instructions}'
651 * 'anon group { cycles:u, instructions:u }'
653 * For record -e 'cycles,instructions' and report --group
654 * 'cycles:u, instructions:u'
656 int perf_evsel__group_desc(struct perf_evsel
*evsel
, char *buf
, size_t size
)
659 struct perf_evsel
*pos
;
660 const char *group_name
= perf_evsel__group_name(evsel
);
662 if (!evsel
->forced_leader
)
663 ret
= scnprintf(buf
, size
, "%s { ", group_name
);
665 ret
+= scnprintf(buf
+ ret
, size
- ret
, "%s",
666 perf_evsel__name(evsel
));
668 for_each_group_member(pos
, evsel
)
669 ret
+= scnprintf(buf
+ ret
, size
- ret
, ", %s",
670 perf_evsel__name(pos
));
672 if (!evsel
->forced_leader
)
673 ret
+= scnprintf(buf
+ ret
, size
- ret
, " }");
678 static void __perf_evsel__config_callchain(struct perf_evsel
*evsel
,
679 struct record_opts
*opts
,
680 struct callchain_param
*param
)
682 bool function
= perf_evsel__is_function_event(evsel
);
683 struct perf_event_attr
*attr
= &evsel
->attr
;
685 perf_evsel__set_sample_bit(evsel
, CALLCHAIN
);
687 attr
->sample_max_stack
= param
->max_stack
;
689 if (opts
->kernel_callchains
)
690 attr
->exclude_callchain_user
= 1;
691 if (opts
->user_callchains
)
692 attr
->exclude_callchain_kernel
= 1;
693 if (param
->record_mode
== CALLCHAIN_LBR
) {
694 if (!opts
->branch_stack
) {
695 if (attr
->exclude_user
) {
696 pr_warning("LBR callstack option is only available "
697 "to get user callchain information. "
698 "Falling back to framepointers.\n");
700 perf_evsel__set_sample_bit(evsel
, BRANCH_STACK
);
701 attr
->branch_sample_type
= PERF_SAMPLE_BRANCH_USER
|
702 PERF_SAMPLE_BRANCH_CALL_STACK
|
703 PERF_SAMPLE_BRANCH_NO_CYCLES
|
704 PERF_SAMPLE_BRANCH_NO_FLAGS
;
707 pr_warning("Cannot use LBR callstack with branch stack. "
708 "Falling back to framepointers.\n");
711 if (param
->record_mode
== CALLCHAIN_DWARF
) {
713 perf_evsel__set_sample_bit(evsel
, REGS_USER
);
714 perf_evsel__set_sample_bit(evsel
, STACK_USER
);
715 if (opts
->sample_user_regs
&& DWARF_MINIMAL_REGS
!= PERF_REGS_MASK
) {
716 attr
->sample_regs_user
|= DWARF_MINIMAL_REGS
;
717 pr_warning("WARNING: The use of --call-graph=dwarf may require all the user registers, "
718 "specifying a subset with --user-regs may render DWARF unwinding unreliable, "
719 "so the minimal registers set (IP, SP) is explicitly forced.\n");
721 attr
->sample_regs_user
|= PERF_REGS_MASK
;
723 attr
->sample_stack_user
= param
->dump_size
;
724 attr
->exclude_callchain_user
= 1;
726 pr_info("Cannot use DWARF unwind for function trace event,"
727 " falling back to framepointers.\n");
732 pr_info("Disabling user space callchains for function trace event.\n");
733 attr
->exclude_callchain_user
= 1;
737 void perf_evsel__config_callchain(struct perf_evsel
*evsel
,
738 struct record_opts
*opts
,
739 struct callchain_param
*param
)
742 return __perf_evsel__config_callchain(evsel
, opts
, param
);
746 perf_evsel__reset_callgraph(struct perf_evsel
*evsel
,
747 struct callchain_param
*param
)
749 struct perf_event_attr
*attr
= &evsel
->attr
;
751 perf_evsel__reset_sample_bit(evsel
, CALLCHAIN
);
752 if (param
->record_mode
== CALLCHAIN_LBR
) {
753 perf_evsel__reset_sample_bit(evsel
, BRANCH_STACK
);
754 attr
->branch_sample_type
&= ~(PERF_SAMPLE_BRANCH_USER
|
755 PERF_SAMPLE_BRANCH_CALL_STACK
);
757 if (param
->record_mode
== CALLCHAIN_DWARF
) {
758 perf_evsel__reset_sample_bit(evsel
, REGS_USER
);
759 perf_evsel__reset_sample_bit(evsel
, STACK_USER
);
763 static void apply_config_terms(struct perf_evsel
*evsel
,
764 struct record_opts
*opts
, bool track
)
766 struct perf_evsel_config_term
*term
;
767 struct list_head
*config_terms
= &evsel
->config_terms
;
768 struct perf_event_attr
*attr
= &evsel
->attr
;
769 /* callgraph default */
770 struct callchain_param param
= {
771 .record_mode
= callchain_param
.record_mode
,
775 const char *callgraph_buf
= NULL
;
777 list_for_each_entry(term
, config_terms
, list
) {
778 switch (term
->type
) {
779 case PERF_EVSEL__CONFIG_TERM_PERIOD
:
780 if (!(term
->weak
&& opts
->user_interval
!= ULLONG_MAX
)) {
781 attr
->sample_period
= term
->val
.period
;
783 perf_evsel__reset_sample_bit(evsel
, PERIOD
);
786 case PERF_EVSEL__CONFIG_TERM_FREQ
:
787 if (!(term
->weak
&& opts
->user_freq
!= UINT_MAX
)) {
788 attr
->sample_freq
= term
->val
.freq
;
790 perf_evsel__set_sample_bit(evsel
, PERIOD
);
793 case PERF_EVSEL__CONFIG_TERM_TIME
:
795 perf_evsel__set_sample_bit(evsel
, TIME
);
797 perf_evsel__reset_sample_bit(evsel
, TIME
);
799 case PERF_EVSEL__CONFIG_TERM_CALLGRAPH
:
800 callgraph_buf
= term
->val
.callgraph
;
802 case PERF_EVSEL__CONFIG_TERM_BRANCH
:
803 if (term
->val
.branch
&& strcmp(term
->val
.branch
, "no")) {
804 perf_evsel__set_sample_bit(evsel
, BRANCH_STACK
);
805 parse_branch_str(term
->val
.branch
,
806 &attr
->branch_sample_type
);
808 perf_evsel__reset_sample_bit(evsel
, BRANCH_STACK
);
810 case PERF_EVSEL__CONFIG_TERM_STACK_USER
:
811 dump_size
= term
->val
.stack_user
;
813 case PERF_EVSEL__CONFIG_TERM_MAX_STACK
:
814 max_stack
= term
->val
.max_stack
;
816 case PERF_EVSEL__CONFIG_TERM_MAX_EVENTS
:
817 evsel
->max_events
= term
->val
.max_events
;
819 case PERF_EVSEL__CONFIG_TERM_INHERIT
:
821 * attr->inherit should has already been set by
822 * perf_evsel__config. If user explicitly set
823 * inherit using config terms, override global
824 * opt->no_inherit setting.
826 attr
->inherit
= term
->val
.inherit
? 1 : 0;
828 case PERF_EVSEL__CONFIG_TERM_OVERWRITE
:
829 attr
->write_backward
= term
->val
.overwrite
? 1 : 0;
831 case PERF_EVSEL__CONFIG_TERM_DRV_CFG
:
833 case PERF_EVSEL__CONFIG_TERM_PERCORE
:
840 /* User explicitly set per-event callgraph, clear the old setting and reset. */
841 if ((callgraph_buf
!= NULL
) || (dump_size
> 0) || max_stack
) {
842 bool sample_address
= false;
845 param
.max_stack
= max_stack
;
846 if (callgraph_buf
== NULL
)
847 callgraph_buf
= "fp";
850 /* parse callgraph parameters */
851 if (callgraph_buf
!= NULL
) {
852 if (!strcmp(callgraph_buf
, "no")) {
853 param
.enabled
= false;
854 param
.record_mode
= CALLCHAIN_NONE
;
856 param
.enabled
= true;
857 if (parse_callchain_record(callgraph_buf
, ¶m
)) {
858 pr_err("per-event callgraph setting for %s failed. "
859 "Apply callgraph global setting for it\n",
863 if (param
.record_mode
== CALLCHAIN_DWARF
)
864 sample_address
= true;
868 dump_size
= round_up(dump_size
, sizeof(u64
));
869 param
.dump_size
= dump_size
;
872 /* If global callgraph set, clear it */
873 if (callchain_param
.enabled
)
874 perf_evsel__reset_callgraph(evsel
, &callchain_param
);
876 /* set perf-event callgraph */
878 if (sample_address
) {
879 perf_evsel__set_sample_bit(evsel
, ADDR
);
880 perf_evsel__set_sample_bit(evsel
, DATA_SRC
);
881 evsel
->attr
.mmap_data
= track
;
883 perf_evsel__config_callchain(evsel
, opts
, ¶m
);
888 static bool is_dummy_event(struct perf_evsel
*evsel
)
890 return (evsel
->attr
.type
== PERF_TYPE_SOFTWARE
) &&
891 (evsel
->attr
.config
== PERF_COUNT_SW_DUMMY
);
895 * The enable_on_exec/disabled value strategy:
897 * 1) For any type of traced program:
898 * - all independent events and group leaders are disabled
899 * - all group members are enabled
901 * Group members are ruled by group leaders. They need to
902 * be enabled, because the group scheduling relies on that.
904 * 2) For traced programs executed by perf:
905 * - all independent events and group leaders have
907 * - we don't specifically enable or disable any event during
910 * Independent events and group leaders are initially disabled
911 * and get enabled by exec. Group members are ruled by group
912 * leaders as stated in 1).
914 * 3) For traced programs attached by perf (pid/tid):
915 * - we specifically enable or disable all events during
918 * When attaching events to already running traced we
919 * enable/disable events specifically, as there's no
920 * initial traced exec call.
922 void perf_evsel__config(struct perf_evsel
*evsel
, struct record_opts
*opts
,
923 struct callchain_param
*callchain
)
925 struct perf_evsel
*leader
= evsel
->leader
;
926 struct perf_event_attr
*attr
= &evsel
->attr
;
927 int track
= evsel
->tracking
;
928 bool per_cpu
= opts
->target
.default_per_cpu
&& !opts
->target
.per_thread
;
930 attr
->sample_id_all
= perf_missing_features
.sample_id_all
? 0 : 1;
931 attr
->inherit
= !opts
->no_inherit
;
932 attr
->write_backward
= opts
->overwrite
? 1 : 0;
934 perf_evsel__set_sample_bit(evsel
, IP
);
935 perf_evsel__set_sample_bit(evsel
, TID
);
937 if (evsel
->sample_read
) {
938 perf_evsel__set_sample_bit(evsel
, READ
);
941 * We need ID even in case of single event, because
942 * PERF_SAMPLE_READ process ID specific data.
944 perf_evsel__set_sample_id(evsel
, false);
947 * Apply group format only if we belong to group
948 * with more than one members.
950 if (leader
->nr_members
> 1) {
951 attr
->read_format
|= PERF_FORMAT_GROUP
;
957 * We default some events to have a default interval. But keep
958 * it a weak assumption overridable by the user.
960 if (!attr
->sample_period
|| (opts
->user_freq
!= UINT_MAX
||
961 opts
->user_interval
!= ULLONG_MAX
)) {
963 perf_evsel__set_sample_bit(evsel
, PERIOD
);
965 attr
->sample_freq
= opts
->freq
;
967 attr
->sample_period
= opts
->default_interval
;
972 * Disable sampling for all group members other
973 * than leader in case leader 'leads' the sampling.
975 if ((leader
!= evsel
) && leader
->sample_read
) {
977 attr
->sample_freq
= 0;
978 attr
->sample_period
= 0;
979 attr
->write_backward
= 0;
982 * We don't get sample for slave events, we make them
983 * when delivering group leader sample. Set the slave
984 * event to follow the master sample_type to ease up
987 attr
->sample_type
= leader
->attr
.sample_type
;
990 if (opts
->no_samples
)
991 attr
->sample_freq
= 0;
993 if (opts
->inherit_stat
) {
994 evsel
->attr
.read_format
|=
995 PERF_FORMAT_TOTAL_TIME_ENABLED
|
996 PERF_FORMAT_TOTAL_TIME_RUNNING
|
998 attr
->inherit_stat
= 1;
1001 if (opts
->sample_address
) {
1002 perf_evsel__set_sample_bit(evsel
, ADDR
);
1003 attr
->mmap_data
= track
;
1007 * We don't allow user space callchains for function trace
1008 * event, due to issues with page faults while tracing page
1009 * fault handler and its overall trickiness nature.
1011 if (perf_evsel__is_function_event(evsel
))
1012 evsel
->attr
.exclude_callchain_user
= 1;
1014 if (callchain
&& callchain
->enabled
&& !evsel
->no_aux_samples
)
1015 perf_evsel__config_callchain(evsel
, opts
, callchain
);
1017 if (opts
->sample_intr_regs
) {
1018 attr
->sample_regs_intr
= opts
->sample_intr_regs
;
1019 perf_evsel__set_sample_bit(evsel
, REGS_INTR
);
1022 if (opts
->sample_user_regs
) {
1023 attr
->sample_regs_user
|= opts
->sample_user_regs
;
1024 perf_evsel__set_sample_bit(evsel
, REGS_USER
);
1027 if (target__has_cpu(&opts
->target
) || opts
->sample_cpu
)
1028 perf_evsel__set_sample_bit(evsel
, CPU
);
1031 * When the user explicitly disabled time don't force it here.
1033 if (opts
->sample_time
&&
1034 (!perf_missing_features
.sample_id_all
&&
1035 (!opts
->no_inherit
|| target__has_cpu(&opts
->target
) || per_cpu
||
1036 opts
->sample_time_set
)))
1037 perf_evsel__set_sample_bit(evsel
, TIME
);
1039 if (opts
->raw_samples
&& !evsel
->no_aux_samples
) {
1040 perf_evsel__set_sample_bit(evsel
, TIME
);
1041 perf_evsel__set_sample_bit(evsel
, RAW
);
1042 perf_evsel__set_sample_bit(evsel
, CPU
);
1045 if (opts
->sample_address
)
1046 perf_evsel__set_sample_bit(evsel
, DATA_SRC
);
1048 if (opts
->sample_phys_addr
)
1049 perf_evsel__set_sample_bit(evsel
, PHYS_ADDR
);
1051 if (opts
->no_buffering
) {
1052 attr
->watermark
= 0;
1053 attr
->wakeup_events
= 1;
1055 if (opts
->branch_stack
&& !evsel
->no_aux_samples
) {
1056 perf_evsel__set_sample_bit(evsel
, BRANCH_STACK
);
1057 attr
->branch_sample_type
= opts
->branch_stack
;
1060 if (opts
->sample_weight
)
1061 perf_evsel__set_sample_bit(evsel
, WEIGHT
);
1065 attr
->mmap2
= track
&& !perf_missing_features
.mmap2
;
1067 attr
->ksymbol
= track
&& !perf_missing_features
.ksymbol
;
1068 attr
->bpf_event
= track
&& !opts
->no_bpf_event
&&
1069 !perf_missing_features
.bpf_event
;
1071 if (opts
->record_namespaces
)
1072 attr
->namespaces
= track
;
1074 if (opts
->record_switch_events
)
1075 attr
->context_switch
= track
;
1077 if (opts
->sample_transaction
)
1078 perf_evsel__set_sample_bit(evsel
, TRANSACTION
);
1080 if (opts
->running_time
) {
1081 evsel
->attr
.read_format
|=
1082 PERF_FORMAT_TOTAL_TIME_ENABLED
|
1083 PERF_FORMAT_TOTAL_TIME_RUNNING
;
1087 * XXX see the function comment above
1089 * Disabling only independent events or group leaders,
1090 * keeping group members enabled.
1092 if (perf_evsel__is_group_leader(evsel
))
1096 * Setting enable_on_exec for independent events and
1097 * group leaders for traced executed by perf.
1099 if (target__none(&opts
->target
) && perf_evsel__is_group_leader(evsel
) &&
1100 !opts
->initial_delay
)
1101 attr
->enable_on_exec
= 1;
1103 if (evsel
->immediate
) {
1105 attr
->enable_on_exec
= 0;
1108 clockid
= opts
->clockid
;
1109 if (opts
->use_clockid
) {
1110 attr
->use_clockid
= 1;
1111 attr
->clockid
= opts
->clockid
;
1114 if (evsel
->precise_max
)
1115 attr
->precise_ip
= 3;
1117 if (opts
->all_user
) {
1118 attr
->exclude_kernel
= 1;
1119 attr
->exclude_user
= 0;
1122 if (opts
->all_kernel
) {
1123 attr
->exclude_kernel
= 0;
1124 attr
->exclude_user
= 1;
1127 if (evsel
->own_cpus
|| evsel
->unit
)
1128 evsel
->attr
.read_format
|= PERF_FORMAT_ID
;
1131 * Apply event specific term settings,
1132 * it overloads any global configuration.
1134 apply_config_terms(evsel
, opts
, track
);
1136 evsel
->ignore_missing_thread
= opts
->ignore_missing_thread
;
1138 /* The --period option takes the precedence. */
1139 if (opts
->period_set
) {
1141 perf_evsel__set_sample_bit(evsel
, PERIOD
);
1143 perf_evsel__reset_sample_bit(evsel
, PERIOD
);
1147 * For initial_delay, a dummy event is added implicitly.
1148 * The software event will trigger -EOPNOTSUPP error out,
1149 * if BRANCH_STACK bit is set.
1151 if (opts
->initial_delay
&& is_dummy_event(evsel
))
1152 perf_evsel__reset_sample_bit(evsel
, BRANCH_STACK
);
1155 static int perf_evsel__alloc_fd(struct perf_evsel
*evsel
, int ncpus
, int nthreads
)
1157 evsel
->fd
= xyarray__new(ncpus
, nthreads
, sizeof(int));
1161 for (cpu
= 0; cpu
< ncpus
; cpu
++) {
1162 for (thread
= 0; thread
< nthreads
; thread
++) {
1163 FD(evsel
, cpu
, thread
) = -1;
1168 return evsel
->fd
!= NULL
? 0 : -ENOMEM
;
1171 static int perf_evsel__run_ioctl(struct perf_evsel
*evsel
,
1176 for (cpu
= 0; cpu
< xyarray__max_x(evsel
->fd
); cpu
++) {
1177 for (thread
= 0; thread
< xyarray__max_y(evsel
->fd
); thread
++) {
1178 int fd
= FD(evsel
, cpu
, thread
),
1179 err
= ioctl(fd
, ioc
, arg
);
1189 int perf_evsel__apply_filter(struct perf_evsel
*evsel
, const char *filter
)
1191 return perf_evsel__run_ioctl(evsel
,
1192 PERF_EVENT_IOC_SET_FILTER
,
1196 int perf_evsel__set_filter(struct perf_evsel
*evsel
, const char *filter
)
1198 char *new_filter
= strdup(filter
);
1200 if (new_filter
!= NULL
) {
1201 free(evsel
->filter
);
1202 evsel
->filter
= new_filter
;
1209 static int perf_evsel__append_filter(struct perf_evsel
*evsel
,
1210 const char *fmt
, const char *filter
)
1214 if (evsel
->filter
== NULL
)
1215 return perf_evsel__set_filter(evsel
, filter
);
1217 if (asprintf(&new_filter
, fmt
, evsel
->filter
, filter
) > 0) {
1218 free(evsel
->filter
);
1219 evsel
->filter
= new_filter
;
1226 int perf_evsel__append_tp_filter(struct perf_evsel
*evsel
, const char *filter
)
1228 return perf_evsel__append_filter(evsel
, "(%s) && (%s)", filter
);
1231 int perf_evsel__append_addr_filter(struct perf_evsel
*evsel
, const char *filter
)
1233 return perf_evsel__append_filter(evsel
, "%s,%s", filter
);
1236 int perf_evsel__enable(struct perf_evsel
*evsel
)
1238 int err
= perf_evsel__run_ioctl(evsel
, PERF_EVENT_IOC_ENABLE
, 0);
1241 evsel
->disabled
= false;
1246 int perf_evsel__disable(struct perf_evsel
*evsel
)
1248 int err
= perf_evsel__run_ioctl(evsel
, PERF_EVENT_IOC_DISABLE
, 0);
1250 * We mark it disabled here so that tools that disable a event can
1251 * ignore events after they disable it. I.e. the ring buffer may have
1252 * already a few more events queued up before the kernel got the stop
1256 evsel
->disabled
= true;
1261 int perf_evsel__alloc_id(struct perf_evsel
*evsel
, int ncpus
, int nthreads
)
1263 if (ncpus
== 0 || nthreads
== 0)
1266 if (evsel
->system_wide
)
1269 evsel
->sample_id
= xyarray__new(ncpus
, nthreads
, sizeof(struct perf_sample_id
));
1270 if (evsel
->sample_id
== NULL
)
1273 evsel
->id
= zalloc(ncpus
* nthreads
* sizeof(u64
));
1274 if (evsel
->id
== NULL
) {
1275 xyarray__delete(evsel
->sample_id
);
1276 evsel
->sample_id
= NULL
;
1283 static void perf_evsel__free_fd(struct perf_evsel
*evsel
)
1285 xyarray__delete(evsel
->fd
);
1289 static void perf_evsel__free_id(struct perf_evsel
*evsel
)
1291 xyarray__delete(evsel
->sample_id
);
1292 evsel
->sample_id
= NULL
;
1297 static void perf_evsel__free_config_terms(struct perf_evsel
*evsel
)
1299 struct perf_evsel_config_term
*term
, *h
;
1301 list_for_each_entry_safe(term
, h
, &evsel
->config_terms
, list
) {
1302 list_del_init(&term
->list
);
1307 void perf_evsel__close_fd(struct perf_evsel
*evsel
)
1311 for (cpu
= 0; cpu
< xyarray__max_x(evsel
->fd
); cpu
++)
1312 for (thread
= 0; thread
< xyarray__max_y(evsel
->fd
); ++thread
) {
1313 close(FD(evsel
, cpu
, thread
));
1314 FD(evsel
, cpu
, thread
) = -1;
1318 void perf_evsel__exit(struct perf_evsel
*evsel
)
1320 assert(list_empty(&evsel
->node
));
1321 assert(evsel
->evlist
== NULL
);
1322 perf_evsel__free_counts(evsel
);
1323 perf_evsel__free_fd(evsel
);
1324 perf_evsel__free_id(evsel
);
1325 perf_evsel__free_config_terms(evsel
);
1326 cgroup__put(evsel
->cgrp
);
1327 cpu_map__put(evsel
->cpus
);
1328 cpu_map__put(evsel
->own_cpus
);
1329 thread_map__put(evsel
->threads
);
1330 zfree(&evsel
->group_name
);
1331 zfree(&evsel
->name
);
1332 perf_evsel__object
.fini(evsel
);
1335 void perf_evsel__delete(struct perf_evsel
*evsel
)
1337 perf_evsel__exit(evsel
);
1341 void perf_evsel__compute_deltas(struct perf_evsel
*evsel
, int cpu
, int thread
,
1342 struct perf_counts_values
*count
)
1344 struct perf_counts_values tmp
;
1346 if (!evsel
->prev_raw_counts
)
1350 tmp
= evsel
->prev_raw_counts
->aggr
;
1351 evsel
->prev_raw_counts
->aggr
= *count
;
1353 tmp
= *perf_counts(evsel
->prev_raw_counts
, cpu
, thread
);
1354 *perf_counts(evsel
->prev_raw_counts
, cpu
, thread
) = *count
;
1357 count
->val
= count
->val
- tmp
.val
;
1358 count
->ena
= count
->ena
- tmp
.ena
;
1359 count
->run
= count
->run
- tmp
.run
;
1362 void perf_counts_values__scale(struct perf_counts_values
*count
,
1363 bool scale
, s8
*pscaled
)
1368 if (count
->run
== 0) {
1371 } else if (count
->run
< count
->ena
) {
1373 count
->val
= (u64
)((double) count
->val
* count
->ena
/ count
->run
);
1381 static int perf_evsel__read_size(struct perf_evsel
*evsel
)
1383 u64 read_format
= evsel
->attr
.read_format
;
1384 int entry
= sizeof(u64
); /* value */
1388 if (read_format
& PERF_FORMAT_TOTAL_TIME_ENABLED
)
1389 size
+= sizeof(u64
);
1391 if (read_format
& PERF_FORMAT_TOTAL_TIME_RUNNING
)
1392 size
+= sizeof(u64
);
1394 if (read_format
& PERF_FORMAT_ID
)
1395 entry
+= sizeof(u64
);
1397 if (read_format
& PERF_FORMAT_GROUP
) {
1398 nr
= evsel
->nr_members
;
1399 size
+= sizeof(u64
);
1406 int perf_evsel__read(struct perf_evsel
*evsel
, int cpu
, int thread
,
1407 struct perf_counts_values
*count
)
1409 size_t size
= perf_evsel__read_size(evsel
);
1411 memset(count
, 0, sizeof(*count
));
1413 if (FD(evsel
, cpu
, thread
) < 0)
1416 if (readn(FD(evsel
, cpu
, thread
), count
->values
, size
) <= 0)
1423 perf_evsel__read_one(struct perf_evsel
*evsel
, int cpu
, int thread
)
1425 struct perf_counts_values
*count
= perf_counts(evsel
->counts
, cpu
, thread
);
1427 return perf_evsel__read(evsel
, cpu
, thread
, count
);
1431 perf_evsel__set_count(struct perf_evsel
*counter
, int cpu
, int thread
,
1432 u64 val
, u64 ena
, u64 run
)
1434 struct perf_counts_values
*count
;
1436 count
= perf_counts(counter
->counts
, cpu
, thread
);
1441 count
->loaded
= true;
1445 perf_evsel__process_group_data(struct perf_evsel
*leader
,
1446 int cpu
, int thread
, u64
*data
)
1448 u64 read_format
= leader
->attr
.read_format
;
1449 struct sample_read_value
*v
;
1450 u64 nr
, ena
= 0, run
= 0, i
;
1454 if (nr
!= (u64
) leader
->nr_members
)
1457 if (read_format
& PERF_FORMAT_TOTAL_TIME_ENABLED
)
1460 if (read_format
& PERF_FORMAT_TOTAL_TIME_RUNNING
)
1463 v
= (struct sample_read_value
*) data
;
1465 perf_evsel__set_count(leader
, cpu
, thread
,
1466 v
[0].value
, ena
, run
);
1468 for (i
= 1; i
< nr
; i
++) {
1469 struct perf_evsel
*counter
;
1471 counter
= perf_evlist__id2evsel(leader
->evlist
, v
[i
].id
);
1475 perf_evsel__set_count(counter
, cpu
, thread
,
1476 v
[i
].value
, ena
, run
);
1483 perf_evsel__read_group(struct perf_evsel
*leader
, int cpu
, int thread
)
1485 struct perf_stat_evsel
*ps
= leader
->stats
;
1486 u64 read_format
= leader
->attr
.read_format
;
1487 int size
= perf_evsel__read_size(leader
);
1488 u64
*data
= ps
->group_data
;
1490 if (!(read_format
& PERF_FORMAT_ID
))
1493 if (!perf_evsel__is_group_leader(leader
))
1497 data
= zalloc(size
);
1501 ps
->group_data
= data
;
1504 if (FD(leader
, cpu
, thread
) < 0)
1507 if (readn(FD(leader
, cpu
, thread
), data
, size
) <= 0)
1510 return perf_evsel__process_group_data(leader
, cpu
, thread
, data
);
1513 int perf_evsel__read_counter(struct perf_evsel
*evsel
, int cpu
, int thread
)
1515 u64 read_format
= evsel
->attr
.read_format
;
1517 if (read_format
& PERF_FORMAT_GROUP
)
1518 return perf_evsel__read_group(evsel
, cpu
, thread
);
1520 return perf_evsel__read_one(evsel
, cpu
, thread
);
1523 int __perf_evsel__read_on_cpu(struct perf_evsel
*evsel
,
1524 int cpu
, int thread
, bool scale
)
1526 struct perf_counts_values count
;
1527 size_t nv
= scale
? 3 : 1;
1529 if (FD(evsel
, cpu
, thread
) < 0)
1532 if (evsel
->counts
== NULL
&& perf_evsel__alloc_counts(evsel
, cpu
+ 1, thread
+ 1) < 0)
1535 if (readn(FD(evsel
, cpu
, thread
), &count
, nv
* sizeof(u64
)) <= 0)
1538 perf_evsel__compute_deltas(evsel
, cpu
, thread
, &count
);
1539 perf_counts_values__scale(&count
, scale
, NULL
);
1540 *perf_counts(evsel
->counts
, cpu
, thread
) = count
;
1544 static int get_group_fd(struct perf_evsel
*evsel
, int cpu
, int thread
)
1546 struct perf_evsel
*leader
= evsel
->leader
;
1549 if (perf_evsel__is_group_leader(evsel
))
1553 * Leader must be already processed/open,
1554 * if not it's a bug.
1556 BUG_ON(!leader
->fd
);
1558 fd
= FD(leader
, cpu
, thread
);
1569 static void __p_bits(char *buf
, size_t size
, u64 value
, struct bit_names
*bits
)
1571 bool first_bit
= true;
1575 if (value
& bits
[i
].bit
) {
1576 buf
+= scnprintf(buf
, size
, "%s%s", first_bit
? "" : "|", bits
[i
].name
);
1579 } while (bits
[++i
].name
!= NULL
);
1582 static void __p_sample_type(char *buf
, size_t size
, u64 value
)
1584 #define bit_name(n) { PERF_SAMPLE_##n, #n }
1585 struct bit_names bits
[] = {
1586 bit_name(IP
), bit_name(TID
), bit_name(TIME
), bit_name(ADDR
),
1587 bit_name(READ
), bit_name(CALLCHAIN
), bit_name(ID
), bit_name(CPU
),
1588 bit_name(PERIOD
), bit_name(STREAM_ID
), bit_name(RAW
),
1589 bit_name(BRANCH_STACK
), bit_name(REGS_USER
), bit_name(STACK_USER
),
1590 bit_name(IDENTIFIER
), bit_name(REGS_INTR
), bit_name(DATA_SRC
),
1591 bit_name(WEIGHT
), bit_name(PHYS_ADDR
),
1595 __p_bits(buf
, size
, value
, bits
);
1598 static void __p_branch_sample_type(char *buf
, size_t size
, u64 value
)
1600 #define bit_name(n) { PERF_SAMPLE_BRANCH_##n, #n }
1601 struct bit_names bits
[] = {
1602 bit_name(USER
), bit_name(KERNEL
), bit_name(HV
), bit_name(ANY
),
1603 bit_name(ANY_CALL
), bit_name(ANY_RETURN
), bit_name(IND_CALL
),
1604 bit_name(ABORT_TX
), bit_name(IN_TX
), bit_name(NO_TX
),
1605 bit_name(COND
), bit_name(CALL_STACK
), bit_name(IND_JUMP
),
1606 bit_name(CALL
), bit_name(NO_FLAGS
), bit_name(NO_CYCLES
),
1610 __p_bits(buf
, size
, value
, bits
);
1613 static void __p_read_format(char *buf
, size_t size
, u64 value
)
1615 #define bit_name(n) { PERF_FORMAT_##n, #n }
1616 struct bit_names bits
[] = {
1617 bit_name(TOTAL_TIME_ENABLED
), bit_name(TOTAL_TIME_RUNNING
),
1618 bit_name(ID
), bit_name(GROUP
),
1622 __p_bits(buf
, size
, value
, bits
);
1625 #define BUF_SIZE 1024
1627 #define p_hex(val) snprintf(buf, BUF_SIZE, "%#"PRIx64, (uint64_t)(val))
1628 #define p_unsigned(val) snprintf(buf, BUF_SIZE, "%"PRIu64, (uint64_t)(val))
1629 #define p_signed(val) snprintf(buf, BUF_SIZE, "%"PRId64, (int64_t)(val))
1630 #define p_sample_type(val) __p_sample_type(buf, BUF_SIZE, val)
1631 #define p_branch_sample_type(val) __p_branch_sample_type(buf, BUF_SIZE, val)
1632 #define p_read_format(val) __p_read_format(buf, BUF_SIZE, val)
1634 #define PRINT_ATTRn(_n, _f, _p) \
1638 ret += attr__fprintf(fp, _n, buf, priv);\
1642 #define PRINT_ATTRf(_f, _p) PRINT_ATTRn(#_f, _f, _p)
1644 int perf_event_attr__fprintf(FILE *fp
, struct perf_event_attr
*attr
,
1645 attr__fprintf_f attr__fprintf
, void *priv
)
1650 PRINT_ATTRf(type
, p_unsigned
);
1651 PRINT_ATTRf(size
, p_unsigned
);
1652 PRINT_ATTRf(config
, p_hex
);
1653 PRINT_ATTRn("{ sample_period, sample_freq }", sample_period
, p_unsigned
);
1654 PRINT_ATTRf(sample_type
, p_sample_type
);
1655 PRINT_ATTRf(read_format
, p_read_format
);
1657 PRINT_ATTRf(disabled
, p_unsigned
);
1658 PRINT_ATTRf(inherit
, p_unsigned
);
1659 PRINT_ATTRf(pinned
, p_unsigned
);
1660 PRINT_ATTRf(exclusive
, p_unsigned
);
1661 PRINT_ATTRf(exclude_user
, p_unsigned
);
1662 PRINT_ATTRf(exclude_kernel
, p_unsigned
);
1663 PRINT_ATTRf(exclude_hv
, p_unsigned
);
1664 PRINT_ATTRf(exclude_idle
, p_unsigned
);
1665 PRINT_ATTRf(mmap
, p_unsigned
);
1666 PRINT_ATTRf(comm
, p_unsigned
);
1667 PRINT_ATTRf(freq
, p_unsigned
);
1668 PRINT_ATTRf(inherit_stat
, p_unsigned
);
1669 PRINT_ATTRf(enable_on_exec
, p_unsigned
);
1670 PRINT_ATTRf(task
, p_unsigned
);
1671 PRINT_ATTRf(watermark
, p_unsigned
);
1672 PRINT_ATTRf(precise_ip
, p_unsigned
);
1673 PRINT_ATTRf(mmap_data
, p_unsigned
);
1674 PRINT_ATTRf(sample_id_all
, p_unsigned
);
1675 PRINT_ATTRf(exclude_host
, p_unsigned
);
1676 PRINT_ATTRf(exclude_guest
, p_unsigned
);
1677 PRINT_ATTRf(exclude_callchain_kernel
, p_unsigned
);
1678 PRINT_ATTRf(exclude_callchain_user
, p_unsigned
);
1679 PRINT_ATTRf(mmap2
, p_unsigned
);
1680 PRINT_ATTRf(comm_exec
, p_unsigned
);
1681 PRINT_ATTRf(use_clockid
, p_unsigned
);
1682 PRINT_ATTRf(context_switch
, p_unsigned
);
1683 PRINT_ATTRf(write_backward
, p_unsigned
);
1684 PRINT_ATTRf(namespaces
, p_unsigned
);
1685 PRINT_ATTRf(ksymbol
, p_unsigned
);
1686 PRINT_ATTRf(bpf_event
, p_unsigned
);
1688 PRINT_ATTRn("{ wakeup_events, wakeup_watermark }", wakeup_events
, p_unsigned
);
1689 PRINT_ATTRf(bp_type
, p_unsigned
);
1690 PRINT_ATTRn("{ bp_addr, config1 }", bp_addr
, p_hex
);
1691 PRINT_ATTRn("{ bp_len, config2 }", bp_len
, p_hex
);
1692 PRINT_ATTRf(branch_sample_type
, p_branch_sample_type
);
1693 PRINT_ATTRf(sample_regs_user
, p_hex
);
1694 PRINT_ATTRf(sample_stack_user
, p_unsigned
);
1695 PRINT_ATTRf(clockid
, p_signed
);
1696 PRINT_ATTRf(sample_regs_intr
, p_hex
);
1697 PRINT_ATTRf(aux_watermark
, p_unsigned
);
1698 PRINT_ATTRf(sample_max_stack
, p_unsigned
);
1703 static int __open_attr__fprintf(FILE *fp
, const char *name
, const char *val
,
1704 void *priv __maybe_unused
)
1706 return fprintf(fp
, " %-32s %s\n", name
, val
);
1709 static void perf_evsel__remove_fd(struct perf_evsel
*pos
,
1710 int nr_cpus
, int nr_threads
,
1713 for (int cpu
= 0; cpu
< nr_cpus
; cpu
++)
1714 for (int thread
= thread_idx
; thread
< nr_threads
- 1; thread
++)
1715 FD(pos
, cpu
, thread
) = FD(pos
, cpu
, thread
+ 1);
1718 static int update_fds(struct perf_evsel
*evsel
,
1719 int nr_cpus
, int cpu_idx
,
1720 int nr_threads
, int thread_idx
)
1722 struct perf_evsel
*pos
;
1724 if (cpu_idx
>= nr_cpus
|| thread_idx
>= nr_threads
)
1727 evlist__for_each_entry(evsel
->evlist
, pos
) {
1728 nr_cpus
= pos
!= evsel
? nr_cpus
: cpu_idx
;
1730 perf_evsel__remove_fd(pos
, nr_cpus
, nr_threads
, thread_idx
);
1733 * Since fds for next evsel has not been created,
1734 * there is no need to iterate whole event list.
1742 static bool ignore_missing_thread(struct perf_evsel
*evsel
,
1743 int nr_cpus
, int cpu
,
1744 struct thread_map
*threads
,
1745 int thread
, int err
)
1747 pid_t ignore_pid
= thread_map__pid(threads
, thread
);
1749 if (!evsel
->ignore_missing_thread
)
1752 /* The system wide setup does not work with threads. */
1753 if (evsel
->system_wide
)
1756 /* The -ESRCH is perf event syscall errno for pid's not found. */
1760 /* If there's only one thread, let it fail. */
1761 if (threads
->nr
== 1)
1765 * We should remove fd for missing_thread first
1766 * because thread_map__remove() will decrease threads->nr.
1768 if (update_fds(evsel
, nr_cpus
, cpu
, threads
->nr
, thread
))
1771 if (thread_map__remove(threads
, thread
))
1774 pr_warning("WARNING: Ignored open failure for pid %d\n",
1779 static void display_attr(struct perf_event_attr
*attr
)
1782 fprintf(stderr
, "%.60s\n", graph_dotted_line
);
1783 fprintf(stderr
, "perf_event_attr:\n");
1784 perf_event_attr__fprintf(stderr
, attr
, __open_attr__fprintf
, NULL
);
1785 fprintf(stderr
, "%.60s\n", graph_dotted_line
);
1789 static int perf_event_open(struct perf_evsel
*evsel
,
1790 pid_t pid
, int cpu
, int group_fd
,
1791 unsigned long flags
)
1793 int precise_ip
= evsel
->attr
.precise_ip
;
1797 pr_debug2("sys_perf_event_open: pid %d cpu %d group_fd %d flags %#lx",
1798 pid
, cpu
, group_fd
, flags
);
1800 fd
= sys_perf_event_open(&evsel
->attr
, pid
, cpu
, group_fd
, flags
);
1804 /* Do not try less precise if not requested. */
1805 if (!evsel
->precise_max
)
1809 * We tried all the precise_ip values, and it's
1810 * still failing, so leave it to standard fallback.
1812 if (!evsel
->attr
.precise_ip
) {
1813 evsel
->attr
.precise_ip
= precise_ip
;
1817 pr_debug2("\nsys_perf_event_open failed, error %d\n", -ENOTSUP
);
1818 evsel
->attr
.precise_ip
--;
1819 pr_debug2("decreasing precise_ip by one (%d)\n", evsel
->attr
.precise_ip
);
1820 display_attr(&evsel
->attr
);
1826 int perf_evsel__open(struct perf_evsel
*evsel
, struct cpu_map
*cpus
,
1827 struct thread_map
*threads
)
1829 int cpu
, thread
, nthreads
;
1830 unsigned long flags
= PERF_FLAG_FD_CLOEXEC
;
1832 enum { NO_CHANGE
, SET_TO_MAX
, INCREASED_MAX
} set_rlimit
= NO_CHANGE
;
1834 if (perf_missing_features
.write_backward
&& evsel
->attr
.write_backward
)
1838 static struct cpu_map
*empty_cpu_map
;
1840 if (empty_cpu_map
== NULL
) {
1841 empty_cpu_map
= cpu_map__dummy_new();
1842 if (empty_cpu_map
== NULL
)
1846 cpus
= empty_cpu_map
;
1849 if (threads
== NULL
) {
1850 static struct thread_map
*empty_thread_map
;
1852 if (empty_thread_map
== NULL
) {
1853 empty_thread_map
= thread_map__new_by_tid(-1);
1854 if (empty_thread_map
== NULL
)
1858 threads
= empty_thread_map
;
1861 if (evsel
->system_wide
)
1864 nthreads
= threads
->nr
;
1866 if (evsel
->fd
== NULL
&&
1867 perf_evsel__alloc_fd(evsel
, cpus
->nr
, nthreads
) < 0)
1871 flags
|= PERF_FLAG_PID_CGROUP
;
1872 pid
= evsel
->cgrp
->fd
;
1875 fallback_missing_features
:
1876 if (perf_missing_features
.clockid_wrong
)
1877 evsel
->attr
.clockid
= CLOCK_MONOTONIC
; /* should always work */
1878 if (perf_missing_features
.clockid
) {
1879 evsel
->attr
.use_clockid
= 0;
1880 evsel
->attr
.clockid
= 0;
1882 if (perf_missing_features
.cloexec
)
1883 flags
&= ~(unsigned long)PERF_FLAG_FD_CLOEXEC
;
1884 if (perf_missing_features
.mmap2
)
1885 evsel
->attr
.mmap2
= 0;
1886 if (perf_missing_features
.exclude_guest
)
1887 evsel
->attr
.exclude_guest
= evsel
->attr
.exclude_host
= 0;
1888 if (perf_missing_features
.lbr_flags
)
1889 evsel
->attr
.branch_sample_type
&= ~(PERF_SAMPLE_BRANCH_NO_FLAGS
|
1890 PERF_SAMPLE_BRANCH_NO_CYCLES
);
1891 if (perf_missing_features
.group_read
&& evsel
->attr
.inherit
)
1892 evsel
->attr
.read_format
&= ~(PERF_FORMAT_GROUP
|PERF_FORMAT_ID
);
1893 if (perf_missing_features
.ksymbol
)
1894 evsel
->attr
.ksymbol
= 0;
1895 if (perf_missing_features
.bpf_event
)
1896 evsel
->attr
.bpf_event
= 0;
1898 if (perf_missing_features
.sample_id_all
)
1899 evsel
->attr
.sample_id_all
= 0;
1901 display_attr(&evsel
->attr
);
1903 for (cpu
= 0; cpu
< cpus
->nr
; cpu
++) {
1905 for (thread
= 0; thread
< nthreads
; thread
++) {
1908 if (!evsel
->cgrp
&& !evsel
->system_wide
)
1909 pid
= thread_map__pid(threads
, thread
);
1911 group_fd
= get_group_fd(evsel
, cpu
, thread
);
1915 fd
= perf_event_open(evsel
, pid
, cpus
->map
[cpu
],
1918 FD(evsel
, cpu
, thread
) = fd
;
1923 if (ignore_missing_thread(evsel
, cpus
->nr
, cpu
, threads
, thread
, err
)) {
1925 * We just removed 1 thread, so take a step
1926 * back on thread index and lower the upper
1932 /* ... and pretend like nothing have happened. */
1937 pr_debug2("\nsys_perf_event_open failed, error %d\n",
1942 pr_debug2(" = %d\n", fd
);
1944 if (evsel
->bpf_fd
>= 0) {
1946 int bpf_fd
= evsel
->bpf_fd
;
1949 PERF_EVENT_IOC_SET_BPF
,
1951 if (err
&& errno
!= EEXIST
) {
1952 pr_err("failed to attach bpf fd %d: %s\n",
1953 bpf_fd
, strerror(errno
));
1959 set_rlimit
= NO_CHANGE
;
1962 * If we succeeded but had to kill clockid, fail and
1963 * have perf_evsel__open_strerror() print us a nice
1966 if (perf_missing_features
.clockid
||
1967 perf_missing_features
.clockid_wrong
) {
1978 * perf stat needs between 5 and 22 fds per CPU. When we run out
1979 * of them try to increase the limits.
1981 if (err
== -EMFILE
&& set_rlimit
< INCREASED_MAX
) {
1983 int old_errno
= errno
;
1985 if (getrlimit(RLIMIT_NOFILE
, &l
) == 0) {
1986 if (set_rlimit
== NO_CHANGE
)
1987 l
.rlim_cur
= l
.rlim_max
;
1989 l
.rlim_cur
= l
.rlim_max
+ 1000;
1990 l
.rlim_max
= l
.rlim_cur
;
1992 if (setrlimit(RLIMIT_NOFILE
, &l
) == 0) {
2001 if (err
!= -EINVAL
|| cpu
> 0 || thread
> 0)
2005 * Must probe features in the order they were added to the
2006 * perf_event_attr interface.
2008 if (!perf_missing_features
.bpf_event
&& evsel
->attr
.bpf_event
) {
2009 perf_missing_features
.bpf_event
= true;
2010 pr_debug2("switching off bpf_event\n");
2011 goto fallback_missing_features
;
2012 } else if (!perf_missing_features
.ksymbol
&& evsel
->attr
.ksymbol
) {
2013 perf_missing_features
.ksymbol
= true;
2014 pr_debug2("switching off ksymbol\n");
2015 goto fallback_missing_features
;
2016 } else if (!perf_missing_features
.write_backward
&& evsel
->attr
.write_backward
) {
2017 perf_missing_features
.write_backward
= true;
2018 pr_debug2("switching off write_backward\n");
2020 } else if (!perf_missing_features
.clockid_wrong
&& evsel
->attr
.use_clockid
) {
2021 perf_missing_features
.clockid_wrong
= true;
2022 pr_debug2("switching off clockid\n");
2023 goto fallback_missing_features
;
2024 } else if (!perf_missing_features
.clockid
&& evsel
->attr
.use_clockid
) {
2025 perf_missing_features
.clockid
= true;
2026 pr_debug2("switching off use_clockid\n");
2027 goto fallback_missing_features
;
2028 } else if (!perf_missing_features
.cloexec
&& (flags
& PERF_FLAG_FD_CLOEXEC
)) {
2029 perf_missing_features
.cloexec
= true;
2030 pr_debug2("switching off cloexec flag\n");
2031 goto fallback_missing_features
;
2032 } else if (!perf_missing_features
.mmap2
&& evsel
->attr
.mmap2
) {
2033 perf_missing_features
.mmap2
= true;
2034 pr_debug2("switching off mmap2\n");
2035 goto fallback_missing_features
;
2036 } else if (!perf_missing_features
.exclude_guest
&&
2037 (evsel
->attr
.exclude_guest
|| evsel
->attr
.exclude_host
)) {
2038 perf_missing_features
.exclude_guest
= true;
2039 pr_debug2("switching off exclude_guest, exclude_host\n");
2040 goto fallback_missing_features
;
2041 } else if (!perf_missing_features
.sample_id_all
) {
2042 perf_missing_features
.sample_id_all
= true;
2043 pr_debug2("switching off sample_id_all\n");
2044 goto retry_sample_id
;
2045 } else if (!perf_missing_features
.lbr_flags
&&
2046 (evsel
->attr
.branch_sample_type
&
2047 (PERF_SAMPLE_BRANCH_NO_CYCLES
|
2048 PERF_SAMPLE_BRANCH_NO_FLAGS
))) {
2049 perf_missing_features
.lbr_flags
= true;
2050 pr_debug2("switching off branch sample type no (cycles/flags)\n");
2051 goto fallback_missing_features
;
2052 } else if (!perf_missing_features
.group_read
&&
2053 evsel
->attr
.inherit
&&
2054 (evsel
->attr
.read_format
& PERF_FORMAT_GROUP
) &&
2055 perf_evsel__is_group_leader(evsel
)) {
2056 perf_missing_features
.group_read
= true;
2057 pr_debug2("switching off group read\n");
2058 goto fallback_missing_features
;
2062 threads
->err_thread
= thread
;
2065 while (--thread
>= 0) {
2066 close(FD(evsel
, cpu
, thread
));
2067 FD(evsel
, cpu
, thread
) = -1;
2070 } while (--cpu
>= 0);
2074 void perf_evsel__close(struct perf_evsel
*evsel
)
2076 if (evsel
->fd
== NULL
)
2079 perf_evsel__close_fd(evsel
);
2080 perf_evsel__free_fd(evsel
);
2081 perf_evsel__free_id(evsel
);
2084 int perf_evsel__open_per_cpu(struct perf_evsel
*evsel
,
2085 struct cpu_map
*cpus
)
2087 return perf_evsel__open(evsel
, cpus
, NULL
);
2090 int perf_evsel__open_per_thread(struct perf_evsel
*evsel
,
2091 struct thread_map
*threads
)
2093 return perf_evsel__open(evsel
, NULL
, threads
);
2096 static int perf_evsel__parse_id_sample(const struct perf_evsel
*evsel
,
2097 const union perf_event
*event
,
2098 struct perf_sample
*sample
)
2100 u64 type
= evsel
->attr
.sample_type
;
2101 const u64
*array
= event
->sample
.array
;
2102 bool swapped
= evsel
->needs_swap
;
2105 array
+= ((event
->header
.size
-
2106 sizeof(event
->header
)) / sizeof(u64
)) - 1;
2108 if (type
& PERF_SAMPLE_IDENTIFIER
) {
2109 sample
->id
= *array
;
2113 if (type
& PERF_SAMPLE_CPU
) {
2116 /* undo swap of u64, then swap on individual u32s */
2117 u
.val64
= bswap_64(u
.val64
);
2118 u
.val32
[0] = bswap_32(u
.val32
[0]);
2121 sample
->cpu
= u
.val32
[0];
2125 if (type
& PERF_SAMPLE_STREAM_ID
) {
2126 sample
->stream_id
= *array
;
2130 if (type
& PERF_SAMPLE_ID
) {
2131 sample
->id
= *array
;
2135 if (type
& PERF_SAMPLE_TIME
) {
2136 sample
->time
= *array
;
2140 if (type
& PERF_SAMPLE_TID
) {
2143 /* undo swap of u64, then swap on individual u32s */
2144 u
.val64
= bswap_64(u
.val64
);
2145 u
.val32
[0] = bswap_32(u
.val32
[0]);
2146 u
.val32
[1] = bswap_32(u
.val32
[1]);
2149 sample
->pid
= u
.val32
[0];
2150 sample
->tid
= u
.val32
[1];
2157 static inline bool overflow(const void *endp
, u16 max_size
, const void *offset
,
2160 return size
> max_size
|| offset
+ size
> endp
;
2163 #define OVERFLOW_CHECK(offset, size, max_size) \
2165 if (overflow(endp, (max_size), (offset), (size))) \
2169 #define OVERFLOW_CHECK_u64(offset) \
2170 OVERFLOW_CHECK(offset, sizeof(u64), sizeof(u64))
2173 perf_event__check_size(union perf_event
*event
, unsigned int sample_size
)
2176 * The evsel's sample_size is based on PERF_SAMPLE_MASK which includes
2177 * up to PERF_SAMPLE_PERIOD. After that overflow() must be used to
2178 * check the format does not go past the end of the event.
2180 if (sample_size
+ sizeof(event
->header
) > event
->header
.size
)
2186 int perf_evsel__parse_sample(struct perf_evsel
*evsel
, union perf_event
*event
,
2187 struct perf_sample
*data
)
2189 u64 type
= evsel
->attr
.sample_type
;
2190 bool swapped
= evsel
->needs_swap
;
2192 u16 max_size
= event
->header
.size
;
2193 const void *endp
= (void *)event
+ max_size
;
2197 * used for cross-endian analysis. See git commit 65014ab3
2198 * for why this goofiness is needed.
2202 memset(data
, 0, sizeof(*data
));
2203 data
->cpu
= data
->pid
= data
->tid
= -1;
2204 data
->stream_id
= data
->id
= data
->time
= -1ULL;
2205 data
->period
= evsel
->attr
.sample_period
;
2206 data
->cpumode
= event
->header
.misc
& PERF_RECORD_MISC_CPUMODE_MASK
;
2207 data
->misc
= event
->header
.misc
;
2209 data
->data_src
= PERF_MEM_DATA_SRC_NONE
;
2211 if (event
->header
.type
!= PERF_RECORD_SAMPLE
) {
2212 if (!evsel
->attr
.sample_id_all
)
2214 return perf_evsel__parse_id_sample(evsel
, event
, data
);
2217 array
= event
->sample
.array
;
2219 if (perf_event__check_size(event
, evsel
->sample_size
))
2222 if (type
& PERF_SAMPLE_IDENTIFIER
) {
2227 if (type
& PERF_SAMPLE_IP
) {
2232 if (type
& PERF_SAMPLE_TID
) {
2235 /* undo swap of u64, then swap on individual u32s */
2236 u
.val64
= bswap_64(u
.val64
);
2237 u
.val32
[0] = bswap_32(u
.val32
[0]);
2238 u
.val32
[1] = bswap_32(u
.val32
[1]);
2241 data
->pid
= u
.val32
[0];
2242 data
->tid
= u
.val32
[1];
2246 if (type
& PERF_SAMPLE_TIME
) {
2247 data
->time
= *array
;
2251 if (type
& PERF_SAMPLE_ADDR
) {
2252 data
->addr
= *array
;
2256 if (type
& PERF_SAMPLE_ID
) {
2261 if (type
& PERF_SAMPLE_STREAM_ID
) {
2262 data
->stream_id
= *array
;
2266 if (type
& PERF_SAMPLE_CPU
) {
2270 /* undo swap of u64, then swap on individual u32s */
2271 u
.val64
= bswap_64(u
.val64
);
2272 u
.val32
[0] = bswap_32(u
.val32
[0]);
2275 data
->cpu
= u
.val32
[0];
2279 if (type
& PERF_SAMPLE_PERIOD
) {
2280 data
->period
= *array
;
2284 if (type
& PERF_SAMPLE_READ
) {
2285 u64 read_format
= evsel
->attr
.read_format
;
2287 OVERFLOW_CHECK_u64(array
);
2288 if (read_format
& PERF_FORMAT_GROUP
)
2289 data
->read
.group
.nr
= *array
;
2291 data
->read
.one
.value
= *array
;
2295 if (read_format
& PERF_FORMAT_TOTAL_TIME_ENABLED
) {
2296 OVERFLOW_CHECK_u64(array
);
2297 data
->read
.time_enabled
= *array
;
2301 if (read_format
& PERF_FORMAT_TOTAL_TIME_RUNNING
) {
2302 OVERFLOW_CHECK_u64(array
);
2303 data
->read
.time_running
= *array
;
2307 /* PERF_FORMAT_ID is forced for PERF_SAMPLE_READ */
2308 if (read_format
& PERF_FORMAT_GROUP
) {
2309 const u64 max_group_nr
= UINT64_MAX
/
2310 sizeof(struct sample_read_value
);
2312 if (data
->read
.group
.nr
> max_group_nr
)
2314 sz
= data
->read
.group
.nr
*
2315 sizeof(struct sample_read_value
);
2316 OVERFLOW_CHECK(array
, sz
, max_size
);
2317 data
->read
.group
.values
=
2318 (struct sample_read_value
*)array
;
2319 array
= (void *)array
+ sz
;
2321 OVERFLOW_CHECK_u64(array
);
2322 data
->read
.one
.id
= *array
;
2327 if (evsel__has_callchain(evsel
)) {
2328 const u64 max_callchain_nr
= UINT64_MAX
/ sizeof(u64
);
2330 OVERFLOW_CHECK_u64(array
);
2331 data
->callchain
= (struct ip_callchain
*)array
++;
2332 if (data
->callchain
->nr
> max_callchain_nr
)
2334 sz
= data
->callchain
->nr
* sizeof(u64
);
2335 OVERFLOW_CHECK(array
, sz
, max_size
);
2336 array
= (void *)array
+ sz
;
2339 if (type
& PERF_SAMPLE_RAW
) {
2340 OVERFLOW_CHECK_u64(array
);
2344 * Undo swap of u64, then swap on individual u32s,
2345 * get the size of the raw area and undo all of the
2346 * swap. The pevent interface handles endianity by
2350 u
.val64
= bswap_64(u
.val64
);
2351 u
.val32
[0] = bswap_32(u
.val32
[0]);
2352 u
.val32
[1] = bswap_32(u
.val32
[1]);
2354 data
->raw_size
= u
.val32
[0];
2357 * The raw data is aligned on 64bits including the
2358 * u32 size, so it's safe to use mem_bswap_64.
2361 mem_bswap_64((void *) array
, data
->raw_size
);
2363 array
= (void *)array
+ sizeof(u32
);
2365 OVERFLOW_CHECK(array
, data
->raw_size
, max_size
);
2366 data
->raw_data
= (void *)array
;
2367 array
= (void *)array
+ data
->raw_size
;
2370 if (type
& PERF_SAMPLE_BRANCH_STACK
) {
2371 const u64 max_branch_nr
= UINT64_MAX
/
2372 sizeof(struct branch_entry
);
2374 OVERFLOW_CHECK_u64(array
);
2375 data
->branch_stack
= (struct branch_stack
*)array
++;
2377 if (data
->branch_stack
->nr
> max_branch_nr
)
2379 sz
= data
->branch_stack
->nr
* sizeof(struct branch_entry
);
2380 OVERFLOW_CHECK(array
, sz
, max_size
);
2381 array
= (void *)array
+ sz
;
2384 if (type
& PERF_SAMPLE_REGS_USER
) {
2385 OVERFLOW_CHECK_u64(array
);
2386 data
->user_regs
.abi
= *array
;
2389 if (data
->user_regs
.abi
) {
2390 u64 mask
= evsel
->attr
.sample_regs_user
;
2392 sz
= hweight64(mask
) * sizeof(u64
);
2393 OVERFLOW_CHECK(array
, sz
, max_size
);
2394 data
->user_regs
.mask
= mask
;
2395 data
->user_regs
.regs
= (u64
*)array
;
2396 array
= (void *)array
+ sz
;
2400 if (type
& PERF_SAMPLE_STACK_USER
) {
2401 OVERFLOW_CHECK_u64(array
);
2404 data
->user_stack
.offset
= ((char *)(array
- 1)
2408 data
->user_stack
.size
= 0;
2410 OVERFLOW_CHECK(array
, sz
, max_size
);
2411 data
->user_stack
.data
= (char *)array
;
2412 array
= (void *)array
+ sz
;
2413 OVERFLOW_CHECK_u64(array
);
2414 data
->user_stack
.size
= *array
++;
2415 if (WARN_ONCE(data
->user_stack
.size
> sz
,
2416 "user stack dump failure\n"))
2421 if (type
& PERF_SAMPLE_WEIGHT
) {
2422 OVERFLOW_CHECK_u64(array
);
2423 data
->weight
= *array
;
2427 if (type
& PERF_SAMPLE_DATA_SRC
) {
2428 OVERFLOW_CHECK_u64(array
);
2429 data
->data_src
= *array
;
2433 if (type
& PERF_SAMPLE_TRANSACTION
) {
2434 OVERFLOW_CHECK_u64(array
);
2435 data
->transaction
= *array
;
2439 data
->intr_regs
.abi
= PERF_SAMPLE_REGS_ABI_NONE
;
2440 if (type
& PERF_SAMPLE_REGS_INTR
) {
2441 OVERFLOW_CHECK_u64(array
);
2442 data
->intr_regs
.abi
= *array
;
2445 if (data
->intr_regs
.abi
!= PERF_SAMPLE_REGS_ABI_NONE
) {
2446 u64 mask
= evsel
->attr
.sample_regs_intr
;
2448 sz
= hweight64(mask
) * sizeof(u64
);
2449 OVERFLOW_CHECK(array
, sz
, max_size
);
2450 data
->intr_regs
.mask
= mask
;
2451 data
->intr_regs
.regs
= (u64
*)array
;
2452 array
= (void *)array
+ sz
;
2456 data
->phys_addr
= 0;
2457 if (type
& PERF_SAMPLE_PHYS_ADDR
) {
2458 data
->phys_addr
= *array
;
2465 int perf_evsel__parse_sample_timestamp(struct perf_evsel
*evsel
,
2466 union perf_event
*event
,
2469 u64 type
= evsel
->attr
.sample_type
;
2472 if (!(type
& PERF_SAMPLE_TIME
))
2475 if (event
->header
.type
!= PERF_RECORD_SAMPLE
) {
2476 struct perf_sample data
= {
2480 if (!evsel
->attr
.sample_id_all
)
2482 if (perf_evsel__parse_id_sample(evsel
, event
, &data
))
2485 *timestamp
= data
.time
;
2489 array
= event
->sample
.array
;
2491 if (perf_event__check_size(event
, evsel
->sample_size
))
2494 if (type
& PERF_SAMPLE_IDENTIFIER
)
2497 if (type
& PERF_SAMPLE_IP
)
2500 if (type
& PERF_SAMPLE_TID
)
2503 if (type
& PERF_SAMPLE_TIME
)
2504 *timestamp
= *array
;
2509 size_t perf_event__sample_event_size(const struct perf_sample
*sample
, u64 type
,
2512 size_t sz
, result
= sizeof(struct sample_event
);
2514 if (type
& PERF_SAMPLE_IDENTIFIER
)
2515 result
+= sizeof(u64
);
2517 if (type
& PERF_SAMPLE_IP
)
2518 result
+= sizeof(u64
);
2520 if (type
& PERF_SAMPLE_TID
)
2521 result
+= sizeof(u64
);
2523 if (type
& PERF_SAMPLE_TIME
)
2524 result
+= sizeof(u64
);
2526 if (type
& PERF_SAMPLE_ADDR
)
2527 result
+= sizeof(u64
);
2529 if (type
& PERF_SAMPLE_ID
)
2530 result
+= sizeof(u64
);
2532 if (type
& PERF_SAMPLE_STREAM_ID
)
2533 result
+= sizeof(u64
);
2535 if (type
& PERF_SAMPLE_CPU
)
2536 result
+= sizeof(u64
);
2538 if (type
& PERF_SAMPLE_PERIOD
)
2539 result
+= sizeof(u64
);
2541 if (type
& PERF_SAMPLE_READ
) {
2542 result
+= sizeof(u64
);
2543 if (read_format
& PERF_FORMAT_TOTAL_TIME_ENABLED
)
2544 result
+= sizeof(u64
);
2545 if (read_format
& PERF_FORMAT_TOTAL_TIME_RUNNING
)
2546 result
+= sizeof(u64
);
2547 /* PERF_FORMAT_ID is forced for PERF_SAMPLE_READ */
2548 if (read_format
& PERF_FORMAT_GROUP
) {
2549 sz
= sample
->read
.group
.nr
*
2550 sizeof(struct sample_read_value
);
2553 result
+= sizeof(u64
);
2557 if (type
& PERF_SAMPLE_CALLCHAIN
) {
2558 sz
= (sample
->callchain
->nr
+ 1) * sizeof(u64
);
2562 if (type
& PERF_SAMPLE_RAW
) {
2563 result
+= sizeof(u32
);
2564 result
+= sample
->raw_size
;
2567 if (type
& PERF_SAMPLE_BRANCH_STACK
) {
2568 sz
= sample
->branch_stack
->nr
* sizeof(struct branch_entry
);
2573 if (type
& PERF_SAMPLE_REGS_USER
) {
2574 if (sample
->user_regs
.abi
) {
2575 result
+= sizeof(u64
);
2576 sz
= hweight64(sample
->user_regs
.mask
) * sizeof(u64
);
2579 result
+= sizeof(u64
);
2583 if (type
& PERF_SAMPLE_STACK_USER
) {
2584 sz
= sample
->user_stack
.size
;
2585 result
+= sizeof(u64
);
2588 result
+= sizeof(u64
);
2592 if (type
& PERF_SAMPLE_WEIGHT
)
2593 result
+= sizeof(u64
);
2595 if (type
& PERF_SAMPLE_DATA_SRC
)
2596 result
+= sizeof(u64
);
2598 if (type
& PERF_SAMPLE_TRANSACTION
)
2599 result
+= sizeof(u64
);
2601 if (type
& PERF_SAMPLE_REGS_INTR
) {
2602 if (sample
->intr_regs
.abi
) {
2603 result
+= sizeof(u64
);
2604 sz
= hweight64(sample
->intr_regs
.mask
) * sizeof(u64
);
2607 result
+= sizeof(u64
);
2611 if (type
& PERF_SAMPLE_PHYS_ADDR
)
2612 result
+= sizeof(u64
);
2617 int perf_event__synthesize_sample(union perf_event
*event
, u64 type
,
2619 const struct perf_sample
*sample
)
2624 * used for cross-endian analysis. See git commit 65014ab3
2625 * for why this goofiness is needed.
2629 array
= event
->sample
.array
;
2631 if (type
& PERF_SAMPLE_IDENTIFIER
) {
2632 *array
= sample
->id
;
2636 if (type
& PERF_SAMPLE_IP
) {
2637 *array
= sample
->ip
;
2641 if (type
& PERF_SAMPLE_TID
) {
2642 u
.val32
[0] = sample
->pid
;
2643 u
.val32
[1] = sample
->tid
;
2648 if (type
& PERF_SAMPLE_TIME
) {
2649 *array
= sample
->time
;
2653 if (type
& PERF_SAMPLE_ADDR
) {
2654 *array
= sample
->addr
;
2658 if (type
& PERF_SAMPLE_ID
) {
2659 *array
= sample
->id
;
2663 if (type
& PERF_SAMPLE_STREAM_ID
) {
2664 *array
= sample
->stream_id
;
2668 if (type
& PERF_SAMPLE_CPU
) {
2669 u
.val32
[0] = sample
->cpu
;
2675 if (type
& PERF_SAMPLE_PERIOD
) {
2676 *array
= sample
->period
;
2680 if (type
& PERF_SAMPLE_READ
) {
2681 if (read_format
& PERF_FORMAT_GROUP
)
2682 *array
= sample
->read
.group
.nr
;
2684 *array
= sample
->read
.one
.value
;
2687 if (read_format
& PERF_FORMAT_TOTAL_TIME_ENABLED
) {
2688 *array
= sample
->read
.time_enabled
;
2692 if (read_format
& PERF_FORMAT_TOTAL_TIME_RUNNING
) {
2693 *array
= sample
->read
.time_running
;
2697 /* PERF_FORMAT_ID is forced for PERF_SAMPLE_READ */
2698 if (read_format
& PERF_FORMAT_GROUP
) {
2699 sz
= sample
->read
.group
.nr
*
2700 sizeof(struct sample_read_value
);
2701 memcpy(array
, sample
->read
.group
.values
, sz
);
2702 array
= (void *)array
+ sz
;
2704 *array
= sample
->read
.one
.id
;
2709 if (type
& PERF_SAMPLE_CALLCHAIN
) {
2710 sz
= (sample
->callchain
->nr
+ 1) * sizeof(u64
);
2711 memcpy(array
, sample
->callchain
, sz
);
2712 array
= (void *)array
+ sz
;
2715 if (type
& PERF_SAMPLE_RAW
) {
2716 u
.val32
[0] = sample
->raw_size
;
2718 array
= (void *)array
+ sizeof(u32
);
2720 memcpy(array
, sample
->raw_data
, sample
->raw_size
);
2721 array
= (void *)array
+ sample
->raw_size
;
2724 if (type
& PERF_SAMPLE_BRANCH_STACK
) {
2725 sz
= sample
->branch_stack
->nr
* sizeof(struct branch_entry
);
2727 memcpy(array
, sample
->branch_stack
, sz
);
2728 array
= (void *)array
+ sz
;
2731 if (type
& PERF_SAMPLE_REGS_USER
) {
2732 if (sample
->user_regs
.abi
) {
2733 *array
++ = sample
->user_regs
.abi
;
2734 sz
= hweight64(sample
->user_regs
.mask
) * sizeof(u64
);
2735 memcpy(array
, sample
->user_regs
.regs
, sz
);
2736 array
= (void *)array
+ sz
;
2742 if (type
& PERF_SAMPLE_STACK_USER
) {
2743 sz
= sample
->user_stack
.size
;
2746 memcpy(array
, sample
->user_stack
.data
, sz
);
2747 array
= (void *)array
+ sz
;
2752 if (type
& PERF_SAMPLE_WEIGHT
) {
2753 *array
= sample
->weight
;
2757 if (type
& PERF_SAMPLE_DATA_SRC
) {
2758 *array
= sample
->data_src
;
2762 if (type
& PERF_SAMPLE_TRANSACTION
) {
2763 *array
= sample
->transaction
;
2767 if (type
& PERF_SAMPLE_REGS_INTR
) {
2768 if (sample
->intr_regs
.abi
) {
2769 *array
++ = sample
->intr_regs
.abi
;
2770 sz
= hweight64(sample
->intr_regs
.mask
) * sizeof(u64
);
2771 memcpy(array
, sample
->intr_regs
.regs
, sz
);
2772 array
= (void *)array
+ sz
;
2778 if (type
& PERF_SAMPLE_PHYS_ADDR
) {
2779 *array
= sample
->phys_addr
;
2786 struct tep_format_field
*perf_evsel__field(struct perf_evsel
*evsel
, const char *name
)
2788 return tep_find_field(evsel
->tp_format
, name
);
2791 void *perf_evsel__rawptr(struct perf_evsel
*evsel
, struct perf_sample
*sample
,
2794 struct tep_format_field
*field
= perf_evsel__field(evsel
, name
);
2800 offset
= field
->offset
;
2802 if (field
->flags
& TEP_FIELD_IS_DYNAMIC
) {
2803 offset
= *(int *)(sample
->raw_data
+ field
->offset
);
2807 return sample
->raw_data
+ offset
;
2810 u64
format_field__intval(struct tep_format_field
*field
, struct perf_sample
*sample
,
2814 void *ptr
= sample
->raw_data
+ field
->offset
;
2816 switch (field
->size
) {
2820 value
= *(u16
*)ptr
;
2823 value
= *(u32
*)ptr
;
2826 memcpy(&value
, ptr
, sizeof(u64
));
2835 switch (field
->size
) {
2837 return bswap_16(value
);
2839 return bswap_32(value
);
2841 return bswap_64(value
);
2849 u64
perf_evsel__intval(struct perf_evsel
*evsel
, struct perf_sample
*sample
,
2852 struct tep_format_field
*field
= perf_evsel__field(evsel
, name
);
2857 return field
? format_field__intval(field
, sample
, evsel
->needs_swap
) : 0;
2860 bool perf_evsel__fallback(struct perf_evsel
*evsel
, int err
,
2861 char *msg
, size_t msgsize
)
2865 if ((err
== ENOENT
|| err
== ENXIO
|| err
== ENODEV
) &&
2866 evsel
->attr
.type
== PERF_TYPE_HARDWARE
&&
2867 evsel
->attr
.config
== PERF_COUNT_HW_CPU_CYCLES
) {
2869 * If it's cycles then fall back to hrtimer based
2870 * cpu-clock-tick sw counter, which is always available even if
2873 * PPC returns ENXIO until 2.6.37 (behavior changed with commit
2876 scnprintf(msg
, msgsize
, "%s",
2877 "The cycles event is not supported, trying to fall back to cpu-clock-ticks");
2879 evsel
->attr
.type
= PERF_TYPE_SOFTWARE
;
2880 evsel
->attr
.config
= PERF_COUNT_SW_CPU_CLOCK
;
2882 zfree(&evsel
->name
);
2884 } else if (err
== EACCES
&& !evsel
->attr
.exclude_kernel
&&
2885 (paranoid
= perf_event_paranoid()) > 1) {
2886 const char *name
= perf_evsel__name(evsel
);
2888 const char *sep
= ":";
2890 /* Is there already the separator in the name. */
2891 if (strchr(name
, '/') ||
2895 if (asprintf(&new_name
, "%s%su", name
, sep
) < 0)
2900 evsel
->name
= new_name
;
2901 scnprintf(msg
, msgsize
,
2902 "kernel.perf_event_paranoid=%d, trying to fall back to excluding kernel samples", paranoid
);
2903 evsel
->attr
.exclude_kernel
= 1;
2911 static bool find_process(const char *name
)
2913 size_t len
= strlen(name
);
2918 dir
= opendir(procfs__mountpoint());
2922 /* Walk through the directory. */
2923 while (ret
&& (d
= readdir(dir
)) != NULL
) {
2924 char path
[PATH_MAX
];
2928 if ((d
->d_type
!= DT_DIR
) ||
2929 !strcmp(".", d
->d_name
) ||
2930 !strcmp("..", d
->d_name
))
2933 scnprintf(path
, sizeof(path
), "%s/%s/comm",
2934 procfs__mountpoint(), d
->d_name
);
2936 if (filename__read_str(path
, &data
, &size
))
2939 ret
= strncmp(name
, data
, len
);
2944 return ret
? false : true;
2947 int perf_evsel__open_strerror(struct perf_evsel
*evsel
, struct target
*target
,
2948 int err
, char *msg
, size_t size
)
2950 char sbuf
[STRERR_BUFSIZE
];
2957 printed
= scnprintf(msg
, size
,
2958 "No permission to enable %s event.\n\n",
2959 perf_evsel__name(evsel
));
2961 return scnprintf(msg
+ printed
, size
- printed
,
2962 "You may not have permission to collect %sstats.\n\n"
2963 "Consider tweaking /proc/sys/kernel/perf_event_paranoid,\n"
2964 "which controls use of the performance events system by\n"
2965 "unprivileged users (without CAP_SYS_ADMIN).\n\n"
2966 "The current value is %d:\n\n"
2967 " -1: Allow use of (almost) all events by all users\n"
2968 " Ignore mlock limit after perf_event_mlock_kb without CAP_IPC_LOCK\n"
2969 ">= 0: Disallow ftrace function tracepoint by users without CAP_SYS_ADMIN\n"
2970 " Disallow raw tracepoint access by users without CAP_SYS_ADMIN\n"
2971 ">= 1: Disallow CPU event access by users without CAP_SYS_ADMIN\n"
2972 ">= 2: Disallow kernel profiling by users without CAP_SYS_ADMIN\n\n"
2973 "To make this setting permanent, edit /etc/sysctl.conf too, e.g.:\n\n"
2974 " kernel.perf_event_paranoid = -1\n" ,
2975 target
->system_wide
? "system-wide " : "",
2976 perf_event_paranoid());
2978 return scnprintf(msg
, size
, "The %s event is not supported.",
2979 perf_evsel__name(evsel
));
2981 return scnprintf(msg
, size
, "%s",
2982 "Too many events are opened.\n"
2983 "Probably the maximum number of open file descriptors has been reached.\n"
2984 "Hint: Try again after reducing the number of events.\n"
2985 "Hint: Try increasing the limit with 'ulimit -n <limit>'");
2987 if (evsel__has_callchain(evsel
) &&
2988 access("/proc/sys/kernel/perf_event_max_stack", F_OK
) == 0)
2989 return scnprintf(msg
, size
,
2990 "Not enough memory to setup event with callchain.\n"
2991 "Hint: Try tweaking /proc/sys/kernel/perf_event_max_stack\n"
2992 "Hint: Current value: %d", sysctl__max_stack());
2995 if (target
->cpu_list
)
2996 return scnprintf(msg
, size
, "%s",
2997 "No such device - did you specify an out-of-range profile CPU?");
3000 if (evsel
->attr
.sample_period
!= 0)
3001 return scnprintf(msg
, size
,
3002 "%s: PMU Hardware doesn't support sampling/overflow-interrupts. Try 'perf stat'",
3003 perf_evsel__name(evsel
));
3004 if (evsel
->attr
.precise_ip
)
3005 return scnprintf(msg
, size
, "%s",
3006 "\'precise\' request may not be supported. Try removing 'p' modifier.");
3007 #if defined(__i386__) || defined(__x86_64__)
3008 if (evsel
->attr
.type
== PERF_TYPE_HARDWARE
)
3009 return scnprintf(msg
, size
, "%s",
3010 "No hardware sampling interrupt available.\n");
3014 if (find_process("oprofiled"))
3015 return scnprintf(msg
, size
,
3016 "The PMU counters are busy/taken by another profiler.\n"
3017 "We found oprofile daemon running, please stop it and try again.");
3020 if (evsel
->attr
.write_backward
&& perf_missing_features
.write_backward
)
3021 return scnprintf(msg
, size
, "Reading from overwrite event is not supported by this kernel.");
3022 if (perf_missing_features
.clockid
)
3023 return scnprintf(msg
, size
, "clockid feature not supported.");
3024 if (perf_missing_features
.clockid_wrong
)
3025 return scnprintf(msg
, size
, "wrong clockid (%d).", clockid
);
3031 return scnprintf(msg
, size
,
3032 "The sys_perf_event_open() syscall returned with %d (%s) for event (%s).\n"
3033 "/bin/dmesg | grep -i perf may provide additional information.\n",
3034 err
, str_error_r(err
, sbuf
, sizeof(sbuf
)),
3035 perf_evsel__name(evsel
));
3038 struct perf_env
*perf_evsel__env(struct perf_evsel
*evsel
)
3040 if (evsel
&& evsel
->evlist
)
3041 return evsel
->evlist
->env
;
3045 static int store_evsel_ids(struct perf_evsel
*evsel
, struct perf_evlist
*evlist
)
3049 for (cpu
= 0; cpu
< xyarray__max_x(evsel
->fd
); cpu
++) {
3050 for (thread
= 0; thread
< xyarray__max_y(evsel
->fd
);
3052 int fd
= FD(evsel
, cpu
, thread
);
3054 if (perf_evlist__id_add_fd(evlist
, evsel
,
3055 cpu
, thread
, fd
) < 0)
3063 int perf_evsel__store_ids(struct perf_evsel
*evsel
, struct perf_evlist
*evlist
)
3065 struct cpu_map
*cpus
= evsel
->cpus
;
3066 struct thread_map
*threads
= evsel
->threads
;
3068 if (perf_evsel__alloc_id(evsel
, cpus
->nr
, threads
->nr
))
3071 return store_evsel_ids(evsel
, evlist
);