2 * Copyright (C) 2011, Red Hat Inc, Arnaldo Carvalho de Melo <acme@redhat.com>
4 * Parts came from builtin-{top,stat,record}.c, see those files for further
7 * Released under the GPL v2. (and only v2, not any later version)
13 #include <linux/bitops.h>
14 #include <api/fs/fs.h>
15 #include <api/fs/tracing_path.h>
16 #include <traceevent/event-parse.h>
17 #include <linux/hw_breakpoint.h>
18 #include <linux/perf_event.h>
19 #include <linux/compiler.h>
20 #include <linux/err.h>
21 #include <sys/ioctl.h>
22 #include <sys/resource.h>
23 #include <sys/types.h>
26 #include "callchain.h"
33 #include "thread_map.h"
35 #include "perf_regs.h"
37 #include "trace-event.h"
40 #include "util/parse-branch-options.h"
42 #include "sane_ctype.h"
54 } perf_missing_features
;
56 static clockid_t clockid
;
58 static int perf_evsel__no_extra_init(struct perf_evsel
*evsel __maybe_unused
)
63 void __weak
test_attr__ready(void) { }
65 static void perf_evsel__no_extra_fini(struct perf_evsel
*evsel __maybe_unused
)
71 int (*init
)(struct perf_evsel
*evsel
);
72 void (*fini
)(struct perf_evsel
*evsel
);
73 } perf_evsel__object
= {
74 .size
= sizeof(struct perf_evsel
),
75 .init
= perf_evsel__no_extra_init
,
76 .fini
= perf_evsel__no_extra_fini
,
79 int perf_evsel__object_config(size_t object_size
,
80 int (*init
)(struct perf_evsel
*evsel
),
81 void (*fini
)(struct perf_evsel
*evsel
))
87 if (perf_evsel__object
.size
> object_size
)
90 perf_evsel__object
.size
= object_size
;
94 perf_evsel__object
.init
= init
;
97 perf_evsel__object
.fini
= fini
;
102 #define FD(e, x, y) (*(int *)xyarray__entry(e->fd, x, y))
104 int __perf_evsel__sample_size(u64 sample_type
)
106 u64 mask
= sample_type
& PERF_SAMPLE_MASK
;
110 for (i
= 0; i
< 64; i
++) {
111 if (mask
& (1ULL << i
))
121 * __perf_evsel__calc_id_pos - calculate id_pos.
122 * @sample_type: sample type
124 * This function returns the position of the event id (PERF_SAMPLE_ID or
125 * PERF_SAMPLE_IDENTIFIER) in a sample event i.e. in the array of struct
128 static int __perf_evsel__calc_id_pos(u64 sample_type
)
132 if (sample_type
& PERF_SAMPLE_IDENTIFIER
)
135 if (!(sample_type
& PERF_SAMPLE_ID
))
138 if (sample_type
& PERF_SAMPLE_IP
)
141 if (sample_type
& PERF_SAMPLE_TID
)
144 if (sample_type
& PERF_SAMPLE_TIME
)
147 if (sample_type
& PERF_SAMPLE_ADDR
)
154 * __perf_evsel__calc_is_pos - calculate is_pos.
155 * @sample_type: sample type
157 * This function returns the position (counting backwards) of the event id
158 * (PERF_SAMPLE_ID or PERF_SAMPLE_IDENTIFIER) in a non-sample event i.e. if
159 * sample_id_all is used there is an id sample appended to non-sample events.
161 static int __perf_evsel__calc_is_pos(u64 sample_type
)
165 if (sample_type
& PERF_SAMPLE_IDENTIFIER
)
168 if (!(sample_type
& PERF_SAMPLE_ID
))
171 if (sample_type
& PERF_SAMPLE_CPU
)
174 if (sample_type
& PERF_SAMPLE_STREAM_ID
)
180 void perf_evsel__calc_id_pos(struct perf_evsel
*evsel
)
182 evsel
->id_pos
= __perf_evsel__calc_id_pos(evsel
->attr
.sample_type
);
183 evsel
->is_pos
= __perf_evsel__calc_is_pos(evsel
->attr
.sample_type
);
186 void __perf_evsel__set_sample_bit(struct perf_evsel
*evsel
,
187 enum perf_event_sample_format bit
)
189 if (!(evsel
->attr
.sample_type
& bit
)) {
190 evsel
->attr
.sample_type
|= bit
;
191 evsel
->sample_size
+= sizeof(u64
);
192 perf_evsel__calc_id_pos(evsel
);
196 void __perf_evsel__reset_sample_bit(struct perf_evsel
*evsel
,
197 enum perf_event_sample_format bit
)
199 if (evsel
->attr
.sample_type
& bit
) {
200 evsel
->attr
.sample_type
&= ~bit
;
201 evsel
->sample_size
-= sizeof(u64
);
202 perf_evsel__calc_id_pos(evsel
);
206 void perf_evsel__set_sample_id(struct perf_evsel
*evsel
,
207 bool can_sample_identifier
)
209 if (can_sample_identifier
) {
210 perf_evsel__reset_sample_bit(evsel
, ID
);
211 perf_evsel__set_sample_bit(evsel
, IDENTIFIER
);
213 perf_evsel__set_sample_bit(evsel
, ID
);
215 evsel
->attr
.read_format
|= PERF_FORMAT_ID
;
219 * perf_evsel__is_function_event - Return whether given evsel is a function
222 * @evsel - evsel selector to be tested
224 * Return %true if event is function trace event
226 bool perf_evsel__is_function_event(struct perf_evsel
*evsel
)
228 #define FUNCTION_EVENT "ftrace:function"
230 return evsel
->name
&&
231 !strncmp(FUNCTION_EVENT
, evsel
->name
, sizeof(FUNCTION_EVENT
));
233 #undef FUNCTION_EVENT
236 void perf_evsel__init(struct perf_evsel
*evsel
,
237 struct perf_event_attr
*attr
, int idx
)
240 evsel
->tracking
= !idx
;
242 evsel
->leader
= evsel
;
245 evsel
->evlist
= NULL
;
247 INIT_LIST_HEAD(&evsel
->node
);
248 INIT_LIST_HEAD(&evsel
->config_terms
);
249 perf_evsel__object
.init(evsel
);
250 evsel
->sample_size
= __perf_evsel__sample_size(attr
->sample_type
);
251 perf_evsel__calc_id_pos(evsel
);
252 evsel
->cmdline_group_boundary
= false;
253 evsel
->metric_expr
= NULL
;
254 evsel
->metric_name
= NULL
;
255 evsel
->metric_events
= NULL
;
256 evsel
->collect_stat
= false;
259 struct perf_evsel
*perf_evsel__new_idx(struct perf_event_attr
*attr
, int idx
)
261 struct perf_evsel
*evsel
= zalloc(perf_evsel__object
.size
);
264 perf_evsel__init(evsel
, attr
, idx
);
266 if (perf_evsel__is_bpf_output(evsel
)) {
267 evsel
->attr
.sample_type
|= (PERF_SAMPLE_RAW
| PERF_SAMPLE_TIME
|
268 PERF_SAMPLE_CPU
| PERF_SAMPLE_PERIOD
),
269 evsel
->attr
.sample_period
= 1;
275 static bool perf_event_can_profile_kernel(void)
277 return geteuid() == 0 || perf_event_paranoid() == -1;
280 struct perf_evsel
*perf_evsel__new_cycles(bool precise
)
282 struct perf_event_attr attr
= {
283 .type
= PERF_TYPE_HARDWARE
,
284 .config
= PERF_COUNT_HW_CPU_CYCLES
,
285 .exclude_kernel
= !perf_event_can_profile_kernel(),
287 struct perf_evsel
*evsel
;
289 event_attr_init(&attr
);
294 * Unnamed union member, not supported as struct member named
295 * initializer in older compilers such as gcc 4.4.7
297 * Just for probing the precise_ip:
299 attr
.sample_period
= 1;
301 perf_event_attr__set_max_precise_ip(&attr
);
303 * Now let the usual logic to set up the perf_event_attr defaults
304 * to kick in when we return and before perf_evsel__open() is called.
306 attr
.sample_period
= 0;
308 evsel
= perf_evsel__new(&attr
);
312 /* use asprintf() because free(evsel) assumes name is allocated */
313 if (asprintf(&evsel
->name
, "cycles%s%s%.*s",
314 (attr
.precise_ip
|| attr
.exclude_kernel
) ? ":" : "",
315 attr
.exclude_kernel
? "u" : "",
316 attr
.precise_ip
? attr
.precise_ip
+ 1 : 0, "ppp") < 0)
321 perf_evsel__delete(evsel
);
327 * Returns pointer with encoded error via <linux/err.h> interface.
329 struct perf_evsel
*perf_evsel__newtp_idx(const char *sys
, const char *name
, int idx
)
331 struct perf_evsel
*evsel
= zalloc(perf_evsel__object
.size
);
337 struct perf_event_attr attr
= {
338 .type
= PERF_TYPE_TRACEPOINT
,
339 .sample_type
= (PERF_SAMPLE_RAW
| PERF_SAMPLE_TIME
|
340 PERF_SAMPLE_CPU
| PERF_SAMPLE_PERIOD
),
343 if (asprintf(&evsel
->name
, "%s:%s", sys
, name
) < 0)
346 evsel
->tp_format
= trace_event__tp_format(sys
, name
);
347 if (IS_ERR(evsel
->tp_format
)) {
348 err
= PTR_ERR(evsel
->tp_format
);
352 event_attr_init(&attr
);
353 attr
.config
= evsel
->tp_format
->id
;
354 attr
.sample_period
= 1;
355 perf_evsel__init(evsel
, &attr
, idx
);
367 const char *perf_evsel__hw_names
[PERF_COUNT_HW_MAX
] = {
375 "stalled-cycles-frontend",
376 "stalled-cycles-backend",
380 static const char *__perf_evsel__hw_name(u64 config
)
382 if (config
< PERF_COUNT_HW_MAX
&& perf_evsel__hw_names
[config
])
383 return perf_evsel__hw_names
[config
];
385 return "unknown-hardware";
388 static int perf_evsel__add_modifiers(struct perf_evsel
*evsel
, char *bf
, size_t size
)
390 int colon
= 0, r
= 0;
391 struct perf_event_attr
*attr
= &evsel
->attr
;
392 bool exclude_guest_default
= false;
394 #define MOD_PRINT(context, mod) do { \
395 if (!attr->exclude_##context) { \
396 if (!colon) colon = ++r; \
397 r += scnprintf(bf + r, size - r, "%c", mod); \
400 if (attr
->exclude_kernel
|| attr
->exclude_user
|| attr
->exclude_hv
) {
401 MOD_PRINT(kernel
, 'k');
402 MOD_PRINT(user
, 'u');
404 exclude_guest_default
= true;
407 if (attr
->precise_ip
) {
410 r
+= scnprintf(bf
+ r
, size
- r
, "%.*s", attr
->precise_ip
, "ppp");
411 exclude_guest_default
= true;
414 if (attr
->exclude_host
|| attr
->exclude_guest
== exclude_guest_default
) {
415 MOD_PRINT(host
, 'H');
416 MOD_PRINT(guest
, 'G');
424 static int perf_evsel__hw_name(struct perf_evsel
*evsel
, char *bf
, size_t size
)
426 int r
= scnprintf(bf
, size
, "%s", __perf_evsel__hw_name(evsel
->attr
.config
));
427 return r
+ perf_evsel__add_modifiers(evsel
, bf
+ r
, size
- r
);
430 const char *perf_evsel__sw_names
[PERF_COUNT_SW_MAX
] = {
443 static const char *__perf_evsel__sw_name(u64 config
)
445 if (config
< PERF_COUNT_SW_MAX
&& perf_evsel__sw_names
[config
])
446 return perf_evsel__sw_names
[config
];
447 return "unknown-software";
450 static int perf_evsel__sw_name(struct perf_evsel
*evsel
, char *bf
, size_t size
)
452 int r
= scnprintf(bf
, size
, "%s", __perf_evsel__sw_name(evsel
->attr
.config
));
453 return r
+ perf_evsel__add_modifiers(evsel
, bf
+ r
, size
- r
);
456 static int __perf_evsel__bp_name(char *bf
, size_t size
, u64 addr
, u64 type
)
460 r
= scnprintf(bf
, size
, "mem:0x%" PRIx64
":", addr
);
462 if (type
& HW_BREAKPOINT_R
)
463 r
+= scnprintf(bf
+ r
, size
- r
, "r");
465 if (type
& HW_BREAKPOINT_W
)
466 r
+= scnprintf(bf
+ r
, size
- r
, "w");
468 if (type
& HW_BREAKPOINT_X
)
469 r
+= scnprintf(bf
+ r
, size
- r
, "x");
474 static int perf_evsel__bp_name(struct perf_evsel
*evsel
, char *bf
, size_t size
)
476 struct perf_event_attr
*attr
= &evsel
->attr
;
477 int r
= __perf_evsel__bp_name(bf
, size
, attr
->bp_addr
, attr
->bp_type
);
478 return r
+ perf_evsel__add_modifiers(evsel
, bf
+ r
, size
- r
);
481 const char *perf_evsel__hw_cache
[PERF_COUNT_HW_CACHE_MAX
]
482 [PERF_EVSEL__MAX_ALIASES
] = {
483 { "L1-dcache", "l1-d", "l1d", "L1-data", },
484 { "L1-icache", "l1-i", "l1i", "L1-instruction", },
486 { "dTLB", "d-tlb", "Data-TLB", },
487 { "iTLB", "i-tlb", "Instruction-TLB", },
488 { "branch", "branches", "bpu", "btb", "bpc", },
492 const char *perf_evsel__hw_cache_op
[PERF_COUNT_HW_CACHE_OP_MAX
]
493 [PERF_EVSEL__MAX_ALIASES
] = {
494 { "load", "loads", "read", },
495 { "store", "stores", "write", },
496 { "prefetch", "prefetches", "speculative-read", "speculative-load", },
499 const char *perf_evsel__hw_cache_result
[PERF_COUNT_HW_CACHE_RESULT_MAX
]
500 [PERF_EVSEL__MAX_ALIASES
] = {
501 { "refs", "Reference", "ops", "access", },
502 { "misses", "miss", },
505 #define C(x) PERF_COUNT_HW_CACHE_##x
506 #define CACHE_READ (1 << C(OP_READ))
507 #define CACHE_WRITE (1 << C(OP_WRITE))
508 #define CACHE_PREFETCH (1 << C(OP_PREFETCH))
509 #define COP(x) (1 << x)
512 * cache operartion stat
513 * L1I : Read and prefetch only
514 * ITLB and BPU : Read-only
516 static unsigned long perf_evsel__hw_cache_stat
[C(MAX
)] = {
517 [C(L1D
)] = (CACHE_READ
| CACHE_WRITE
| CACHE_PREFETCH
),
518 [C(L1I
)] = (CACHE_READ
| CACHE_PREFETCH
),
519 [C(LL
)] = (CACHE_READ
| CACHE_WRITE
| CACHE_PREFETCH
),
520 [C(DTLB
)] = (CACHE_READ
| CACHE_WRITE
| CACHE_PREFETCH
),
521 [C(ITLB
)] = (CACHE_READ
),
522 [C(BPU
)] = (CACHE_READ
),
523 [C(NODE
)] = (CACHE_READ
| CACHE_WRITE
| CACHE_PREFETCH
),
526 bool perf_evsel__is_cache_op_valid(u8 type
, u8 op
)
528 if (perf_evsel__hw_cache_stat
[type
] & COP(op
))
529 return true; /* valid */
531 return false; /* invalid */
534 int __perf_evsel__hw_cache_type_op_res_name(u8 type
, u8 op
, u8 result
,
535 char *bf
, size_t size
)
538 return scnprintf(bf
, size
, "%s-%s-%s", perf_evsel__hw_cache
[type
][0],
539 perf_evsel__hw_cache_op
[op
][0],
540 perf_evsel__hw_cache_result
[result
][0]);
543 return scnprintf(bf
, size
, "%s-%s", perf_evsel__hw_cache
[type
][0],
544 perf_evsel__hw_cache_op
[op
][1]);
547 static int __perf_evsel__hw_cache_name(u64 config
, char *bf
, size_t size
)
549 u8 op
, result
, type
= (config
>> 0) & 0xff;
550 const char *err
= "unknown-ext-hardware-cache-type";
552 if (type
>= PERF_COUNT_HW_CACHE_MAX
)
555 op
= (config
>> 8) & 0xff;
556 err
= "unknown-ext-hardware-cache-op";
557 if (op
>= PERF_COUNT_HW_CACHE_OP_MAX
)
560 result
= (config
>> 16) & 0xff;
561 err
= "unknown-ext-hardware-cache-result";
562 if (result
>= PERF_COUNT_HW_CACHE_RESULT_MAX
)
565 err
= "invalid-cache";
566 if (!perf_evsel__is_cache_op_valid(type
, op
))
569 return __perf_evsel__hw_cache_type_op_res_name(type
, op
, result
, bf
, size
);
571 return scnprintf(bf
, size
, "%s", err
);
574 static int perf_evsel__hw_cache_name(struct perf_evsel
*evsel
, char *bf
, size_t size
)
576 int ret
= __perf_evsel__hw_cache_name(evsel
->attr
.config
, bf
, size
);
577 return ret
+ perf_evsel__add_modifiers(evsel
, bf
+ ret
, size
- ret
);
580 static int perf_evsel__raw_name(struct perf_evsel
*evsel
, char *bf
, size_t size
)
582 int ret
= scnprintf(bf
, size
, "raw 0x%" PRIx64
, evsel
->attr
.config
);
583 return ret
+ perf_evsel__add_modifiers(evsel
, bf
+ ret
, size
- ret
);
586 const char *perf_evsel__name(struct perf_evsel
*evsel
)
593 switch (evsel
->attr
.type
) {
595 perf_evsel__raw_name(evsel
, bf
, sizeof(bf
));
598 case PERF_TYPE_HARDWARE
:
599 perf_evsel__hw_name(evsel
, bf
, sizeof(bf
));
602 case PERF_TYPE_HW_CACHE
:
603 perf_evsel__hw_cache_name(evsel
, bf
, sizeof(bf
));
606 case PERF_TYPE_SOFTWARE
:
607 perf_evsel__sw_name(evsel
, bf
, sizeof(bf
));
610 case PERF_TYPE_TRACEPOINT
:
611 scnprintf(bf
, sizeof(bf
), "%s", "unknown tracepoint");
614 case PERF_TYPE_BREAKPOINT
:
615 perf_evsel__bp_name(evsel
, bf
, sizeof(bf
));
619 scnprintf(bf
, sizeof(bf
), "unknown attr type: %d",
624 evsel
->name
= strdup(bf
);
626 return evsel
->name
?: "unknown";
629 const char *perf_evsel__group_name(struct perf_evsel
*evsel
)
631 return evsel
->group_name
?: "anon group";
634 int perf_evsel__group_desc(struct perf_evsel
*evsel
, char *buf
, size_t size
)
637 struct perf_evsel
*pos
;
638 const char *group_name
= perf_evsel__group_name(evsel
);
640 ret
= scnprintf(buf
, size
, "%s", group_name
);
642 ret
+= scnprintf(buf
+ ret
, size
- ret
, " { %s",
643 perf_evsel__name(evsel
));
645 for_each_group_member(pos
, evsel
)
646 ret
+= scnprintf(buf
+ ret
, size
- ret
, ", %s",
647 perf_evsel__name(pos
));
649 ret
+= scnprintf(buf
+ ret
, size
- ret
, " }");
654 void perf_evsel__config_callchain(struct perf_evsel
*evsel
,
655 struct record_opts
*opts
,
656 struct callchain_param
*param
)
658 bool function
= perf_evsel__is_function_event(evsel
);
659 struct perf_event_attr
*attr
= &evsel
->attr
;
661 perf_evsel__set_sample_bit(evsel
, CALLCHAIN
);
663 attr
->sample_max_stack
= param
->max_stack
;
665 if (param
->record_mode
== CALLCHAIN_LBR
) {
666 if (!opts
->branch_stack
) {
667 if (attr
->exclude_user
) {
668 pr_warning("LBR callstack option is only available "
669 "to get user callchain information. "
670 "Falling back to framepointers.\n");
672 perf_evsel__set_sample_bit(evsel
, BRANCH_STACK
);
673 attr
->branch_sample_type
= PERF_SAMPLE_BRANCH_USER
|
674 PERF_SAMPLE_BRANCH_CALL_STACK
|
675 PERF_SAMPLE_BRANCH_NO_CYCLES
|
676 PERF_SAMPLE_BRANCH_NO_FLAGS
;
679 pr_warning("Cannot use LBR callstack with branch stack. "
680 "Falling back to framepointers.\n");
683 if (param
->record_mode
== CALLCHAIN_DWARF
) {
685 perf_evsel__set_sample_bit(evsel
, REGS_USER
);
686 perf_evsel__set_sample_bit(evsel
, STACK_USER
);
687 attr
->sample_regs_user
|= PERF_REGS_MASK
;
688 attr
->sample_stack_user
= param
->dump_size
;
689 attr
->exclude_callchain_user
= 1;
691 pr_info("Cannot use DWARF unwind for function trace event,"
692 " falling back to framepointers.\n");
697 pr_info("Disabling user space callchains for function trace event.\n");
698 attr
->exclude_callchain_user
= 1;
703 perf_evsel__reset_callgraph(struct perf_evsel
*evsel
,
704 struct callchain_param
*param
)
706 struct perf_event_attr
*attr
= &evsel
->attr
;
708 perf_evsel__reset_sample_bit(evsel
, CALLCHAIN
);
709 if (param
->record_mode
== CALLCHAIN_LBR
) {
710 perf_evsel__reset_sample_bit(evsel
, BRANCH_STACK
);
711 attr
->branch_sample_type
&= ~(PERF_SAMPLE_BRANCH_USER
|
712 PERF_SAMPLE_BRANCH_CALL_STACK
);
714 if (param
->record_mode
== CALLCHAIN_DWARF
) {
715 perf_evsel__reset_sample_bit(evsel
, REGS_USER
);
716 perf_evsel__reset_sample_bit(evsel
, STACK_USER
);
720 static void apply_config_terms(struct perf_evsel
*evsel
,
721 struct record_opts
*opts
)
723 struct perf_evsel_config_term
*term
;
724 struct list_head
*config_terms
= &evsel
->config_terms
;
725 struct perf_event_attr
*attr
= &evsel
->attr
;
726 /* callgraph default */
727 struct callchain_param param
= {
728 .record_mode
= callchain_param
.record_mode
,
732 const char *callgraph_buf
= NULL
;
734 list_for_each_entry(term
, config_terms
, list
) {
735 switch (term
->type
) {
736 case PERF_EVSEL__CONFIG_TERM_PERIOD
:
737 if (!(term
->weak
&& opts
->user_interval
!= ULLONG_MAX
)) {
738 attr
->sample_period
= term
->val
.period
;
742 case PERF_EVSEL__CONFIG_TERM_FREQ
:
743 if (!(term
->weak
&& opts
->user_freq
!= UINT_MAX
)) {
744 attr
->sample_freq
= term
->val
.freq
;
748 case PERF_EVSEL__CONFIG_TERM_TIME
:
750 perf_evsel__set_sample_bit(evsel
, TIME
);
752 perf_evsel__reset_sample_bit(evsel
, TIME
);
754 case PERF_EVSEL__CONFIG_TERM_CALLGRAPH
:
755 callgraph_buf
= term
->val
.callgraph
;
757 case PERF_EVSEL__CONFIG_TERM_BRANCH
:
758 if (term
->val
.branch
&& strcmp(term
->val
.branch
, "no")) {
759 perf_evsel__set_sample_bit(evsel
, BRANCH_STACK
);
760 parse_branch_str(term
->val
.branch
,
761 &attr
->branch_sample_type
);
763 perf_evsel__reset_sample_bit(evsel
, BRANCH_STACK
);
765 case PERF_EVSEL__CONFIG_TERM_STACK_USER
:
766 dump_size
= term
->val
.stack_user
;
768 case PERF_EVSEL__CONFIG_TERM_MAX_STACK
:
769 max_stack
= term
->val
.max_stack
;
771 case PERF_EVSEL__CONFIG_TERM_INHERIT
:
773 * attr->inherit should has already been set by
774 * perf_evsel__config. If user explicitly set
775 * inherit using config terms, override global
776 * opt->no_inherit setting.
778 attr
->inherit
= term
->val
.inherit
? 1 : 0;
780 case PERF_EVSEL__CONFIG_TERM_OVERWRITE
:
781 attr
->write_backward
= term
->val
.overwrite
? 1 : 0;
788 /* User explicitly set per-event callgraph, clear the old setting and reset. */
789 if ((callgraph_buf
!= NULL
) || (dump_size
> 0) || max_stack
) {
791 param
.max_stack
= max_stack
;
792 if (callgraph_buf
== NULL
)
793 callgraph_buf
= "fp";
796 /* parse callgraph parameters */
797 if (callgraph_buf
!= NULL
) {
798 if (!strcmp(callgraph_buf
, "no")) {
799 param
.enabled
= false;
800 param
.record_mode
= CALLCHAIN_NONE
;
802 param
.enabled
= true;
803 if (parse_callchain_record(callgraph_buf
, ¶m
)) {
804 pr_err("per-event callgraph setting for %s failed. "
805 "Apply callgraph global setting for it\n",
812 dump_size
= round_up(dump_size
, sizeof(u64
));
813 param
.dump_size
= dump_size
;
816 /* If global callgraph set, clear it */
817 if (callchain_param
.enabled
)
818 perf_evsel__reset_callgraph(evsel
, &callchain_param
);
820 /* set perf-event callgraph */
822 perf_evsel__config_callchain(evsel
, opts
, ¶m
);
827 * The enable_on_exec/disabled value strategy:
829 * 1) For any type of traced program:
830 * - all independent events and group leaders are disabled
831 * - all group members are enabled
833 * Group members are ruled by group leaders. They need to
834 * be enabled, because the group scheduling relies on that.
836 * 2) For traced programs executed by perf:
837 * - all independent events and group leaders have
839 * - we don't specifically enable or disable any event during
842 * Independent events and group leaders are initially disabled
843 * and get enabled by exec. Group members are ruled by group
844 * leaders as stated in 1).
846 * 3) For traced programs attached by perf (pid/tid):
847 * - we specifically enable or disable all events during
850 * When attaching events to already running traced we
851 * enable/disable events specifically, as there's no
852 * initial traced exec call.
854 void perf_evsel__config(struct perf_evsel
*evsel
, struct record_opts
*opts
,
855 struct callchain_param
*callchain
)
857 struct perf_evsel
*leader
= evsel
->leader
;
858 struct perf_event_attr
*attr
= &evsel
->attr
;
859 int track
= evsel
->tracking
;
860 bool per_cpu
= opts
->target
.default_per_cpu
&& !opts
->target
.per_thread
;
862 attr
->sample_id_all
= perf_missing_features
.sample_id_all
? 0 : 1;
863 attr
->inherit
= !opts
->no_inherit
;
864 attr
->write_backward
= opts
->overwrite
? 1 : 0;
866 perf_evsel__set_sample_bit(evsel
, IP
);
867 perf_evsel__set_sample_bit(evsel
, TID
);
869 if (evsel
->sample_read
) {
870 perf_evsel__set_sample_bit(evsel
, READ
);
873 * We need ID even in case of single event, because
874 * PERF_SAMPLE_READ process ID specific data.
876 perf_evsel__set_sample_id(evsel
, false);
879 * Apply group format only if we belong to group
880 * with more than one members.
882 if (leader
->nr_members
> 1) {
883 attr
->read_format
|= PERF_FORMAT_GROUP
;
889 * We default some events to have a default interval. But keep
890 * it a weak assumption overridable by the user.
892 if (!attr
->sample_period
|| (opts
->user_freq
!= UINT_MAX
||
893 opts
->user_interval
!= ULLONG_MAX
)) {
895 perf_evsel__set_sample_bit(evsel
, PERIOD
);
897 attr
->sample_freq
= opts
->freq
;
899 attr
->sample_period
= opts
->default_interval
;
904 * Disable sampling for all group members other
905 * than leader in case leader 'leads' the sampling.
907 if ((leader
!= evsel
) && leader
->sample_read
) {
908 attr
->sample_freq
= 0;
909 attr
->sample_period
= 0;
912 if (opts
->no_samples
)
913 attr
->sample_freq
= 0;
915 if (opts
->inherit_stat
) {
916 evsel
->attr
.read_format
|=
917 PERF_FORMAT_TOTAL_TIME_ENABLED
|
918 PERF_FORMAT_TOTAL_TIME_RUNNING
|
920 attr
->inherit_stat
= 1;
923 if (opts
->sample_address
) {
924 perf_evsel__set_sample_bit(evsel
, ADDR
);
925 attr
->mmap_data
= track
;
929 * We don't allow user space callchains for function trace
930 * event, due to issues with page faults while tracing page
931 * fault handler and its overall trickiness nature.
933 if (perf_evsel__is_function_event(evsel
))
934 evsel
->attr
.exclude_callchain_user
= 1;
936 if (callchain
&& callchain
->enabled
&& !evsel
->no_aux_samples
)
937 perf_evsel__config_callchain(evsel
, opts
, callchain
);
939 if (opts
->sample_intr_regs
) {
940 attr
->sample_regs_intr
= opts
->sample_intr_regs
;
941 perf_evsel__set_sample_bit(evsel
, REGS_INTR
);
944 if (opts
->sample_user_regs
) {
945 attr
->sample_regs_user
|= opts
->sample_user_regs
;
946 perf_evsel__set_sample_bit(evsel
, REGS_USER
);
949 if (target__has_cpu(&opts
->target
) || opts
->sample_cpu
)
950 perf_evsel__set_sample_bit(evsel
, CPU
);
953 perf_evsel__set_sample_bit(evsel
, PERIOD
);
956 * When the user explicitly disabled time don't force it here.
958 if (opts
->sample_time
&&
959 (!perf_missing_features
.sample_id_all
&&
960 (!opts
->no_inherit
|| target__has_cpu(&opts
->target
) || per_cpu
||
961 opts
->sample_time_set
)))
962 perf_evsel__set_sample_bit(evsel
, TIME
);
964 if (opts
->raw_samples
&& !evsel
->no_aux_samples
) {
965 perf_evsel__set_sample_bit(evsel
, TIME
);
966 perf_evsel__set_sample_bit(evsel
, RAW
);
967 perf_evsel__set_sample_bit(evsel
, CPU
);
970 if (opts
->sample_address
)
971 perf_evsel__set_sample_bit(evsel
, DATA_SRC
);
973 if (opts
->sample_phys_addr
)
974 perf_evsel__set_sample_bit(evsel
, PHYS_ADDR
);
976 if (opts
->no_buffering
) {
978 attr
->wakeup_events
= 1;
980 if (opts
->branch_stack
&& !evsel
->no_aux_samples
) {
981 perf_evsel__set_sample_bit(evsel
, BRANCH_STACK
);
982 attr
->branch_sample_type
= opts
->branch_stack
;
985 if (opts
->sample_weight
)
986 perf_evsel__set_sample_bit(evsel
, WEIGHT
);
990 attr
->mmap2
= track
&& !perf_missing_features
.mmap2
;
993 if (opts
->record_namespaces
)
994 attr
->namespaces
= track
;
996 if (opts
->record_switch_events
)
997 attr
->context_switch
= track
;
999 if (opts
->sample_transaction
)
1000 perf_evsel__set_sample_bit(evsel
, TRANSACTION
);
1002 if (opts
->running_time
) {
1003 evsel
->attr
.read_format
|=
1004 PERF_FORMAT_TOTAL_TIME_ENABLED
|
1005 PERF_FORMAT_TOTAL_TIME_RUNNING
;
1009 * XXX see the function comment above
1011 * Disabling only independent events or group leaders,
1012 * keeping group members enabled.
1014 if (perf_evsel__is_group_leader(evsel
))
1018 * Setting enable_on_exec for independent events and
1019 * group leaders for traced executed by perf.
1021 if (target__none(&opts
->target
) && perf_evsel__is_group_leader(evsel
) &&
1022 !opts
->initial_delay
)
1023 attr
->enable_on_exec
= 1;
1025 if (evsel
->immediate
) {
1027 attr
->enable_on_exec
= 0;
1030 clockid
= opts
->clockid
;
1031 if (opts
->use_clockid
) {
1032 attr
->use_clockid
= 1;
1033 attr
->clockid
= opts
->clockid
;
1036 if (evsel
->precise_max
)
1037 perf_event_attr__set_max_precise_ip(attr
);
1039 if (opts
->all_user
) {
1040 attr
->exclude_kernel
= 1;
1041 attr
->exclude_user
= 0;
1044 if (opts
->all_kernel
) {
1045 attr
->exclude_kernel
= 0;
1046 attr
->exclude_user
= 1;
1050 * Apply event specific term settings,
1051 * it overloads any global configuration.
1053 apply_config_terms(evsel
, opts
);
1055 evsel
->ignore_missing_thread
= opts
->ignore_missing_thread
;
1058 static int perf_evsel__alloc_fd(struct perf_evsel
*evsel
, int ncpus
, int nthreads
)
1060 if (evsel
->system_wide
)
1063 evsel
->fd
= xyarray__new(ncpus
, nthreads
, sizeof(int));
1067 for (cpu
= 0; cpu
< ncpus
; cpu
++) {
1068 for (thread
= 0; thread
< nthreads
; thread
++) {
1069 FD(evsel
, cpu
, thread
) = -1;
1074 return evsel
->fd
!= NULL
? 0 : -ENOMEM
;
1077 static int perf_evsel__run_ioctl(struct perf_evsel
*evsel
,
1082 for (cpu
= 0; cpu
< xyarray__max_x(evsel
->fd
); cpu
++) {
1083 for (thread
= 0; thread
< xyarray__max_y(evsel
->fd
); thread
++) {
1084 int fd
= FD(evsel
, cpu
, thread
),
1085 err
= ioctl(fd
, ioc
, arg
);
1095 int perf_evsel__apply_filter(struct perf_evsel
*evsel
, const char *filter
)
1097 return perf_evsel__run_ioctl(evsel
,
1098 PERF_EVENT_IOC_SET_FILTER
,
1102 int perf_evsel__set_filter(struct perf_evsel
*evsel
, const char *filter
)
1104 char *new_filter
= strdup(filter
);
1106 if (new_filter
!= NULL
) {
1107 free(evsel
->filter
);
1108 evsel
->filter
= new_filter
;
1115 static int perf_evsel__append_filter(struct perf_evsel
*evsel
,
1116 const char *fmt
, const char *filter
)
1120 if (evsel
->filter
== NULL
)
1121 return perf_evsel__set_filter(evsel
, filter
);
1123 if (asprintf(&new_filter
, fmt
, evsel
->filter
, filter
) > 0) {
1124 free(evsel
->filter
);
1125 evsel
->filter
= new_filter
;
1132 int perf_evsel__append_tp_filter(struct perf_evsel
*evsel
, const char *filter
)
1134 return perf_evsel__append_filter(evsel
, "(%s) && (%s)", filter
);
1137 int perf_evsel__append_addr_filter(struct perf_evsel
*evsel
, const char *filter
)
1139 return perf_evsel__append_filter(evsel
, "%s,%s", filter
);
1142 int perf_evsel__enable(struct perf_evsel
*evsel
)
1144 return perf_evsel__run_ioctl(evsel
,
1145 PERF_EVENT_IOC_ENABLE
,
1149 int perf_evsel__disable(struct perf_evsel
*evsel
)
1151 return perf_evsel__run_ioctl(evsel
,
1152 PERF_EVENT_IOC_DISABLE
,
1156 int perf_evsel__alloc_id(struct perf_evsel
*evsel
, int ncpus
, int nthreads
)
1158 if (ncpus
== 0 || nthreads
== 0)
1161 if (evsel
->system_wide
)
1164 evsel
->sample_id
= xyarray__new(ncpus
, nthreads
, sizeof(struct perf_sample_id
));
1165 if (evsel
->sample_id
== NULL
)
1168 evsel
->id
= zalloc(ncpus
* nthreads
* sizeof(u64
));
1169 if (evsel
->id
== NULL
) {
1170 xyarray__delete(evsel
->sample_id
);
1171 evsel
->sample_id
= NULL
;
1178 static void perf_evsel__free_fd(struct perf_evsel
*evsel
)
1180 xyarray__delete(evsel
->fd
);
1184 static void perf_evsel__free_id(struct perf_evsel
*evsel
)
1186 xyarray__delete(evsel
->sample_id
);
1187 evsel
->sample_id
= NULL
;
1191 static void perf_evsel__free_config_terms(struct perf_evsel
*evsel
)
1193 struct perf_evsel_config_term
*term
, *h
;
1195 list_for_each_entry_safe(term
, h
, &evsel
->config_terms
, list
) {
1196 list_del(&term
->list
);
1201 void perf_evsel__close_fd(struct perf_evsel
*evsel
)
1205 for (cpu
= 0; cpu
< xyarray__max_x(evsel
->fd
); cpu
++)
1206 for (thread
= 0; thread
< xyarray__max_y(evsel
->fd
); ++thread
) {
1207 close(FD(evsel
, cpu
, thread
));
1208 FD(evsel
, cpu
, thread
) = -1;
1212 void perf_evsel__exit(struct perf_evsel
*evsel
)
1214 assert(list_empty(&evsel
->node
));
1215 assert(evsel
->evlist
== NULL
);
1216 perf_evsel__free_fd(evsel
);
1217 perf_evsel__free_id(evsel
);
1218 perf_evsel__free_config_terms(evsel
);
1219 close_cgroup(evsel
->cgrp
);
1220 cpu_map__put(evsel
->cpus
);
1221 cpu_map__put(evsel
->own_cpus
);
1222 thread_map__put(evsel
->threads
);
1223 zfree(&evsel
->group_name
);
1224 zfree(&evsel
->name
);
1225 perf_evsel__object
.fini(evsel
);
1228 void perf_evsel__delete(struct perf_evsel
*evsel
)
1230 perf_evsel__exit(evsel
);
1234 void perf_evsel__compute_deltas(struct perf_evsel
*evsel
, int cpu
, int thread
,
1235 struct perf_counts_values
*count
)
1237 struct perf_counts_values tmp
;
1239 if (!evsel
->prev_raw_counts
)
1243 tmp
= evsel
->prev_raw_counts
->aggr
;
1244 evsel
->prev_raw_counts
->aggr
= *count
;
1246 tmp
= *perf_counts(evsel
->prev_raw_counts
, cpu
, thread
);
1247 *perf_counts(evsel
->prev_raw_counts
, cpu
, thread
) = *count
;
1250 count
->val
= count
->val
- tmp
.val
;
1251 count
->ena
= count
->ena
- tmp
.ena
;
1252 count
->run
= count
->run
- tmp
.run
;
1255 void perf_counts_values__scale(struct perf_counts_values
*count
,
1256 bool scale
, s8
*pscaled
)
1261 if (count
->run
== 0) {
1264 } else if (count
->run
< count
->ena
) {
1266 count
->val
= (u64
)((double) count
->val
* count
->ena
/ count
->run
+ 0.5);
1269 count
->ena
= count
->run
= 0;
1275 static int perf_evsel__read_size(struct perf_evsel
*evsel
)
1277 u64 read_format
= evsel
->attr
.read_format
;
1278 int entry
= sizeof(u64
); /* value */
1282 if (read_format
& PERF_FORMAT_TOTAL_TIME_ENABLED
)
1283 size
+= sizeof(u64
);
1285 if (read_format
& PERF_FORMAT_TOTAL_TIME_RUNNING
)
1286 size
+= sizeof(u64
);
1288 if (read_format
& PERF_FORMAT_ID
)
1289 entry
+= sizeof(u64
);
1291 if (read_format
& PERF_FORMAT_GROUP
) {
1292 nr
= evsel
->nr_members
;
1293 size
+= sizeof(u64
);
1300 int perf_evsel__read(struct perf_evsel
*evsel
, int cpu
, int thread
,
1301 struct perf_counts_values
*count
)
1303 size_t size
= perf_evsel__read_size(evsel
);
1305 memset(count
, 0, sizeof(*count
));
1307 if (FD(evsel
, cpu
, thread
) < 0)
1310 if (readn(FD(evsel
, cpu
, thread
), count
->values
, size
) <= 0)
1317 perf_evsel__read_one(struct perf_evsel
*evsel
, int cpu
, int thread
)
1319 struct perf_counts_values
*count
= perf_counts(evsel
->counts
, cpu
, thread
);
1321 return perf_evsel__read(evsel
, cpu
, thread
, count
);
1325 perf_evsel__set_count(struct perf_evsel
*counter
, int cpu
, int thread
,
1326 u64 val
, u64 ena
, u64 run
)
1328 struct perf_counts_values
*count
;
1330 count
= perf_counts(counter
->counts
, cpu
, thread
);
1335 count
->loaded
= true;
1339 perf_evsel__process_group_data(struct perf_evsel
*leader
,
1340 int cpu
, int thread
, u64
*data
)
1342 u64 read_format
= leader
->attr
.read_format
;
1343 struct sample_read_value
*v
;
1344 u64 nr
, ena
= 0, run
= 0, i
;
1348 if (nr
!= (u64
) leader
->nr_members
)
1351 if (read_format
& PERF_FORMAT_TOTAL_TIME_ENABLED
)
1354 if (read_format
& PERF_FORMAT_TOTAL_TIME_RUNNING
)
1357 v
= (struct sample_read_value
*) data
;
1359 perf_evsel__set_count(leader
, cpu
, thread
,
1360 v
[0].value
, ena
, run
);
1362 for (i
= 1; i
< nr
; i
++) {
1363 struct perf_evsel
*counter
;
1365 counter
= perf_evlist__id2evsel(leader
->evlist
, v
[i
].id
);
1369 perf_evsel__set_count(counter
, cpu
, thread
,
1370 v
[i
].value
, ena
, run
);
1377 perf_evsel__read_group(struct perf_evsel
*leader
, int cpu
, int thread
)
1379 struct perf_stat_evsel
*ps
= leader
->stats
;
1380 u64 read_format
= leader
->attr
.read_format
;
1381 int size
= perf_evsel__read_size(leader
);
1382 u64
*data
= ps
->group_data
;
1384 if (!(read_format
& PERF_FORMAT_ID
))
1387 if (!perf_evsel__is_group_leader(leader
))
1391 data
= zalloc(size
);
1395 ps
->group_data
= data
;
1398 if (FD(leader
, cpu
, thread
) < 0)
1401 if (readn(FD(leader
, cpu
, thread
), data
, size
) <= 0)
1404 return perf_evsel__process_group_data(leader
, cpu
, thread
, data
);
1407 int perf_evsel__read_counter(struct perf_evsel
*evsel
, int cpu
, int thread
)
1409 u64 read_format
= evsel
->attr
.read_format
;
1411 if (read_format
& PERF_FORMAT_GROUP
)
1412 return perf_evsel__read_group(evsel
, cpu
, thread
);
1414 return perf_evsel__read_one(evsel
, cpu
, thread
);
1417 int __perf_evsel__read_on_cpu(struct perf_evsel
*evsel
,
1418 int cpu
, int thread
, bool scale
)
1420 struct perf_counts_values count
;
1421 size_t nv
= scale
? 3 : 1;
1423 if (FD(evsel
, cpu
, thread
) < 0)
1426 if (evsel
->counts
== NULL
&& perf_evsel__alloc_counts(evsel
, cpu
+ 1, thread
+ 1) < 0)
1429 if (readn(FD(evsel
, cpu
, thread
), &count
, nv
* sizeof(u64
)) <= 0)
1432 perf_evsel__compute_deltas(evsel
, cpu
, thread
, &count
);
1433 perf_counts_values__scale(&count
, scale
, NULL
);
1434 *perf_counts(evsel
->counts
, cpu
, thread
) = count
;
1438 static int get_group_fd(struct perf_evsel
*evsel
, int cpu
, int thread
)
1440 struct perf_evsel
*leader
= evsel
->leader
;
1443 if (perf_evsel__is_group_leader(evsel
))
1447 * Leader must be already processed/open,
1448 * if not it's a bug.
1450 BUG_ON(!leader
->fd
);
1452 fd
= FD(leader
, cpu
, thread
);
1463 static void __p_bits(char *buf
, size_t size
, u64 value
, struct bit_names
*bits
)
1465 bool first_bit
= true;
1469 if (value
& bits
[i
].bit
) {
1470 buf
+= scnprintf(buf
, size
, "%s%s", first_bit
? "" : "|", bits
[i
].name
);
1473 } while (bits
[++i
].name
!= NULL
);
1476 static void __p_sample_type(char *buf
, size_t size
, u64 value
)
1478 #define bit_name(n) { PERF_SAMPLE_##n, #n }
1479 struct bit_names bits
[] = {
1480 bit_name(IP
), bit_name(TID
), bit_name(TIME
), bit_name(ADDR
),
1481 bit_name(READ
), bit_name(CALLCHAIN
), bit_name(ID
), bit_name(CPU
),
1482 bit_name(PERIOD
), bit_name(STREAM_ID
), bit_name(RAW
),
1483 bit_name(BRANCH_STACK
), bit_name(REGS_USER
), bit_name(STACK_USER
),
1484 bit_name(IDENTIFIER
), bit_name(REGS_INTR
), bit_name(DATA_SRC
),
1485 bit_name(WEIGHT
), bit_name(PHYS_ADDR
),
1489 __p_bits(buf
, size
, value
, bits
);
1492 static void __p_branch_sample_type(char *buf
, size_t size
, u64 value
)
1494 #define bit_name(n) { PERF_SAMPLE_BRANCH_##n, #n }
1495 struct bit_names bits
[] = {
1496 bit_name(USER
), bit_name(KERNEL
), bit_name(HV
), bit_name(ANY
),
1497 bit_name(ANY_CALL
), bit_name(ANY_RETURN
), bit_name(IND_CALL
),
1498 bit_name(ABORT_TX
), bit_name(IN_TX
), bit_name(NO_TX
),
1499 bit_name(COND
), bit_name(CALL_STACK
), bit_name(IND_JUMP
),
1500 bit_name(CALL
), bit_name(NO_FLAGS
), bit_name(NO_CYCLES
),
1504 __p_bits(buf
, size
, value
, bits
);
1507 static void __p_read_format(char *buf
, size_t size
, u64 value
)
1509 #define bit_name(n) { PERF_FORMAT_##n, #n }
1510 struct bit_names bits
[] = {
1511 bit_name(TOTAL_TIME_ENABLED
), bit_name(TOTAL_TIME_RUNNING
),
1512 bit_name(ID
), bit_name(GROUP
),
1516 __p_bits(buf
, size
, value
, bits
);
1519 #define BUF_SIZE 1024
1521 #define p_hex(val) snprintf(buf, BUF_SIZE, "%#"PRIx64, (uint64_t)(val))
1522 #define p_unsigned(val) snprintf(buf, BUF_SIZE, "%"PRIu64, (uint64_t)(val))
1523 #define p_signed(val) snprintf(buf, BUF_SIZE, "%"PRId64, (int64_t)(val))
1524 #define p_sample_type(val) __p_sample_type(buf, BUF_SIZE, val)
1525 #define p_branch_sample_type(val) __p_branch_sample_type(buf, BUF_SIZE, val)
1526 #define p_read_format(val) __p_read_format(buf, BUF_SIZE, val)
1528 #define PRINT_ATTRn(_n, _f, _p) \
1532 ret += attr__fprintf(fp, _n, buf, priv);\
1536 #define PRINT_ATTRf(_f, _p) PRINT_ATTRn(#_f, _f, _p)
1538 int perf_event_attr__fprintf(FILE *fp
, struct perf_event_attr
*attr
,
1539 attr__fprintf_f attr__fprintf
, void *priv
)
1544 PRINT_ATTRf(type
, p_unsigned
);
1545 PRINT_ATTRf(size
, p_unsigned
);
1546 PRINT_ATTRf(config
, p_hex
);
1547 PRINT_ATTRn("{ sample_period, sample_freq }", sample_period
, p_unsigned
);
1548 PRINT_ATTRf(sample_type
, p_sample_type
);
1549 PRINT_ATTRf(read_format
, p_read_format
);
1551 PRINT_ATTRf(disabled
, p_unsigned
);
1552 PRINT_ATTRf(inherit
, p_unsigned
);
1553 PRINT_ATTRf(pinned
, p_unsigned
);
1554 PRINT_ATTRf(exclusive
, p_unsigned
);
1555 PRINT_ATTRf(exclude_user
, p_unsigned
);
1556 PRINT_ATTRf(exclude_kernel
, p_unsigned
);
1557 PRINT_ATTRf(exclude_hv
, p_unsigned
);
1558 PRINT_ATTRf(exclude_idle
, p_unsigned
);
1559 PRINT_ATTRf(mmap
, p_unsigned
);
1560 PRINT_ATTRf(comm
, p_unsigned
);
1561 PRINT_ATTRf(freq
, p_unsigned
);
1562 PRINT_ATTRf(inherit_stat
, p_unsigned
);
1563 PRINT_ATTRf(enable_on_exec
, p_unsigned
);
1564 PRINT_ATTRf(task
, p_unsigned
);
1565 PRINT_ATTRf(watermark
, p_unsigned
);
1566 PRINT_ATTRf(precise_ip
, p_unsigned
);
1567 PRINT_ATTRf(mmap_data
, p_unsigned
);
1568 PRINT_ATTRf(sample_id_all
, p_unsigned
);
1569 PRINT_ATTRf(exclude_host
, p_unsigned
);
1570 PRINT_ATTRf(exclude_guest
, p_unsigned
);
1571 PRINT_ATTRf(exclude_callchain_kernel
, p_unsigned
);
1572 PRINT_ATTRf(exclude_callchain_user
, p_unsigned
);
1573 PRINT_ATTRf(mmap2
, p_unsigned
);
1574 PRINT_ATTRf(comm_exec
, p_unsigned
);
1575 PRINT_ATTRf(use_clockid
, p_unsigned
);
1576 PRINT_ATTRf(context_switch
, p_unsigned
);
1577 PRINT_ATTRf(write_backward
, p_unsigned
);
1579 PRINT_ATTRn("{ wakeup_events, wakeup_watermark }", wakeup_events
, p_unsigned
);
1580 PRINT_ATTRf(bp_type
, p_unsigned
);
1581 PRINT_ATTRn("{ bp_addr, config1 }", bp_addr
, p_hex
);
1582 PRINT_ATTRn("{ bp_len, config2 }", bp_len
, p_hex
);
1583 PRINT_ATTRf(branch_sample_type
, p_branch_sample_type
);
1584 PRINT_ATTRf(sample_regs_user
, p_hex
);
1585 PRINT_ATTRf(sample_stack_user
, p_unsigned
);
1586 PRINT_ATTRf(clockid
, p_signed
);
1587 PRINT_ATTRf(sample_regs_intr
, p_hex
);
1588 PRINT_ATTRf(aux_watermark
, p_unsigned
);
1589 PRINT_ATTRf(sample_max_stack
, p_unsigned
);
1594 static int __open_attr__fprintf(FILE *fp
, const char *name
, const char *val
,
1595 void *priv __maybe_unused
)
1597 return fprintf(fp
, " %-32s %s\n", name
, val
);
1600 static void perf_evsel__remove_fd(struct perf_evsel
*pos
,
1601 int nr_cpus
, int nr_threads
,
1604 for (int cpu
= 0; cpu
< nr_cpus
; cpu
++)
1605 for (int thread
= thread_idx
; thread
< nr_threads
- 1; thread
++)
1606 FD(pos
, cpu
, thread
) = FD(pos
, cpu
, thread
+ 1);
1609 static int update_fds(struct perf_evsel
*evsel
,
1610 int nr_cpus
, int cpu_idx
,
1611 int nr_threads
, int thread_idx
)
1613 struct perf_evsel
*pos
;
1615 if (cpu_idx
>= nr_cpus
|| thread_idx
>= nr_threads
)
1618 evlist__for_each_entry(evsel
->evlist
, pos
) {
1619 nr_cpus
= pos
!= evsel
? nr_cpus
: cpu_idx
;
1621 perf_evsel__remove_fd(pos
, nr_cpus
, nr_threads
, thread_idx
);
1624 * Since fds for next evsel has not been created,
1625 * there is no need to iterate whole event list.
1633 static bool ignore_missing_thread(struct perf_evsel
*evsel
,
1634 int nr_cpus
, int cpu
,
1635 struct thread_map
*threads
,
1636 int thread
, int err
)
1638 pid_t ignore_pid
= thread_map__pid(threads
, thread
);
1640 if (!evsel
->ignore_missing_thread
)
1643 /* The system wide setup does not work with threads. */
1644 if (evsel
->system_wide
)
1647 /* The -ESRCH is perf event syscall errno for pid's not found. */
1651 /* If there's only one thread, let it fail. */
1652 if (threads
->nr
== 1)
1656 * We should remove fd for missing_thread first
1657 * because thread_map__remove() will decrease threads->nr.
1659 if (update_fds(evsel
, nr_cpus
, cpu
, threads
->nr
, thread
))
1662 if (thread_map__remove(threads
, thread
))
1665 pr_warning("WARNING: Ignored open failure for pid %d\n",
1670 int perf_evsel__open(struct perf_evsel
*evsel
, struct cpu_map
*cpus
,
1671 struct thread_map
*threads
)
1673 int cpu
, thread
, nthreads
;
1674 unsigned long flags
= PERF_FLAG_FD_CLOEXEC
;
1676 enum { NO_CHANGE
, SET_TO_MAX
, INCREASED_MAX
} set_rlimit
= NO_CHANGE
;
1678 if (perf_missing_features
.write_backward
&& evsel
->attr
.write_backward
)
1682 static struct cpu_map
*empty_cpu_map
;
1684 if (empty_cpu_map
== NULL
) {
1685 empty_cpu_map
= cpu_map__dummy_new();
1686 if (empty_cpu_map
== NULL
)
1690 cpus
= empty_cpu_map
;
1693 if (threads
== NULL
) {
1694 static struct thread_map
*empty_thread_map
;
1696 if (empty_thread_map
== NULL
) {
1697 empty_thread_map
= thread_map__new_by_tid(-1);
1698 if (empty_thread_map
== NULL
)
1702 threads
= empty_thread_map
;
1705 if (evsel
->system_wide
)
1708 nthreads
= threads
->nr
;
1710 if (evsel
->fd
== NULL
&&
1711 perf_evsel__alloc_fd(evsel
, cpus
->nr
, nthreads
) < 0)
1715 flags
|= PERF_FLAG_PID_CGROUP
;
1716 pid
= evsel
->cgrp
->fd
;
1719 fallback_missing_features
:
1720 if (perf_missing_features
.clockid_wrong
)
1721 evsel
->attr
.clockid
= CLOCK_MONOTONIC
; /* should always work */
1722 if (perf_missing_features
.clockid
) {
1723 evsel
->attr
.use_clockid
= 0;
1724 evsel
->attr
.clockid
= 0;
1726 if (perf_missing_features
.cloexec
)
1727 flags
&= ~(unsigned long)PERF_FLAG_FD_CLOEXEC
;
1728 if (perf_missing_features
.mmap2
)
1729 evsel
->attr
.mmap2
= 0;
1730 if (perf_missing_features
.exclude_guest
)
1731 evsel
->attr
.exclude_guest
= evsel
->attr
.exclude_host
= 0;
1732 if (perf_missing_features
.lbr_flags
)
1733 evsel
->attr
.branch_sample_type
&= ~(PERF_SAMPLE_BRANCH_NO_FLAGS
|
1734 PERF_SAMPLE_BRANCH_NO_CYCLES
);
1735 if (perf_missing_features
.group_read
&& evsel
->attr
.inherit
)
1736 evsel
->attr
.read_format
&= ~(PERF_FORMAT_GROUP
|PERF_FORMAT_ID
);
1738 if (perf_missing_features
.sample_id_all
)
1739 evsel
->attr
.sample_id_all
= 0;
1742 fprintf(stderr
, "%.60s\n", graph_dotted_line
);
1743 fprintf(stderr
, "perf_event_attr:\n");
1744 perf_event_attr__fprintf(stderr
, &evsel
->attr
, __open_attr__fprintf
, NULL
);
1745 fprintf(stderr
, "%.60s\n", graph_dotted_line
);
1748 for (cpu
= 0; cpu
< cpus
->nr
; cpu
++) {
1750 for (thread
= 0; thread
< nthreads
; thread
++) {
1753 if (!evsel
->cgrp
&& !evsel
->system_wide
)
1754 pid
= thread_map__pid(threads
, thread
);
1756 group_fd
= get_group_fd(evsel
, cpu
, thread
);
1758 pr_debug2("sys_perf_event_open: pid %d cpu %d group_fd %d flags %#lx",
1759 pid
, cpus
->map
[cpu
], group_fd
, flags
);
1763 fd
= sys_perf_event_open(&evsel
->attr
, pid
, cpus
->map
[cpu
],
1766 FD(evsel
, cpu
, thread
) = fd
;
1771 if (ignore_missing_thread(evsel
, cpus
->nr
, cpu
, threads
, thread
, err
)) {
1773 * We just removed 1 thread, so take a step
1774 * back on thread index and lower the upper
1780 /* ... and pretend like nothing have happened. */
1785 pr_debug2("\nsys_perf_event_open failed, error %d\n",
1790 pr_debug2(" = %d\n", fd
);
1792 if (evsel
->bpf_fd
>= 0) {
1794 int bpf_fd
= evsel
->bpf_fd
;
1797 PERF_EVENT_IOC_SET_BPF
,
1799 if (err
&& errno
!= EEXIST
) {
1800 pr_err("failed to attach bpf fd %d: %s\n",
1801 bpf_fd
, strerror(errno
));
1807 set_rlimit
= NO_CHANGE
;
1810 * If we succeeded but had to kill clockid, fail and
1811 * have perf_evsel__open_strerror() print us a nice
1814 if (perf_missing_features
.clockid
||
1815 perf_missing_features
.clockid_wrong
) {
1826 * perf stat needs between 5 and 22 fds per CPU. When we run out
1827 * of them try to increase the limits.
1829 if (err
== -EMFILE
&& set_rlimit
< INCREASED_MAX
) {
1831 int old_errno
= errno
;
1833 if (getrlimit(RLIMIT_NOFILE
, &l
) == 0) {
1834 if (set_rlimit
== NO_CHANGE
)
1835 l
.rlim_cur
= l
.rlim_max
;
1837 l
.rlim_cur
= l
.rlim_max
+ 1000;
1838 l
.rlim_max
= l
.rlim_cur
;
1840 if (setrlimit(RLIMIT_NOFILE
, &l
) == 0) {
1849 if (err
!= -EINVAL
|| cpu
> 0 || thread
> 0)
1853 * Must probe features in the order they were added to the
1854 * perf_event_attr interface.
1856 if (!perf_missing_features
.write_backward
&& evsel
->attr
.write_backward
) {
1857 perf_missing_features
.write_backward
= true;
1858 pr_debug2("switching off write_backward\n");
1860 } else if (!perf_missing_features
.clockid_wrong
&& evsel
->attr
.use_clockid
) {
1861 perf_missing_features
.clockid_wrong
= true;
1862 pr_debug2("switching off clockid\n");
1863 goto fallback_missing_features
;
1864 } else if (!perf_missing_features
.clockid
&& evsel
->attr
.use_clockid
) {
1865 perf_missing_features
.clockid
= true;
1866 pr_debug2("switching off use_clockid\n");
1867 goto fallback_missing_features
;
1868 } else if (!perf_missing_features
.cloexec
&& (flags
& PERF_FLAG_FD_CLOEXEC
)) {
1869 perf_missing_features
.cloexec
= true;
1870 pr_debug2("switching off cloexec flag\n");
1871 goto fallback_missing_features
;
1872 } else if (!perf_missing_features
.mmap2
&& evsel
->attr
.mmap2
) {
1873 perf_missing_features
.mmap2
= true;
1874 pr_debug2("switching off mmap2\n");
1875 goto fallback_missing_features
;
1876 } else if (!perf_missing_features
.exclude_guest
&&
1877 (evsel
->attr
.exclude_guest
|| evsel
->attr
.exclude_host
)) {
1878 perf_missing_features
.exclude_guest
= true;
1879 pr_debug2("switching off exclude_guest, exclude_host\n");
1880 goto fallback_missing_features
;
1881 } else if (!perf_missing_features
.sample_id_all
) {
1882 perf_missing_features
.sample_id_all
= true;
1883 pr_debug2("switching off sample_id_all\n");
1884 goto retry_sample_id
;
1885 } else if (!perf_missing_features
.lbr_flags
&&
1886 (evsel
->attr
.branch_sample_type
&
1887 (PERF_SAMPLE_BRANCH_NO_CYCLES
|
1888 PERF_SAMPLE_BRANCH_NO_FLAGS
))) {
1889 perf_missing_features
.lbr_flags
= true;
1890 pr_debug2("switching off branch sample type no (cycles/flags)\n");
1891 goto fallback_missing_features
;
1892 } else if (!perf_missing_features
.group_read
&&
1893 evsel
->attr
.inherit
&&
1894 (evsel
->attr
.read_format
& PERF_FORMAT_GROUP
)) {
1895 perf_missing_features
.group_read
= true;
1896 pr_debug2("switching off group read\n");
1897 goto fallback_missing_features
;
1901 while (--thread
>= 0) {
1902 close(FD(evsel
, cpu
, thread
));
1903 FD(evsel
, cpu
, thread
) = -1;
1906 } while (--cpu
>= 0);
1910 void perf_evsel__close(struct perf_evsel
*evsel
)
1912 if (evsel
->fd
== NULL
)
1915 perf_evsel__close_fd(evsel
);
1916 perf_evsel__free_fd(evsel
);
1919 int perf_evsel__open_per_cpu(struct perf_evsel
*evsel
,
1920 struct cpu_map
*cpus
)
1922 return perf_evsel__open(evsel
, cpus
, NULL
);
1925 int perf_evsel__open_per_thread(struct perf_evsel
*evsel
,
1926 struct thread_map
*threads
)
1928 return perf_evsel__open(evsel
, NULL
, threads
);
1931 static int perf_evsel__parse_id_sample(const struct perf_evsel
*evsel
,
1932 const union perf_event
*event
,
1933 struct perf_sample
*sample
)
1935 u64 type
= evsel
->attr
.sample_type
;
1936 const u64
*array
= event
->sample
.array
;
1937 bool swapped
= evsel
->needs_swap
;
1940 array
+= ((event
->header
.size
-
1941 sizeof(event
->header
)) / sizeof(u64
)) - 1;
1943 if (type
& PERF_SAMPLE_IDENTIFIER
) {
1944 sample
->id
= *array
;
1948 if (type
& PERF_SAMPLE_CPU
) {
1951 /* undo swap of u64, then swap on individual u32s */
1952 u
.val64
= bswap_64(u
.val64
);
1953 u
.val32
[0] = bswap_32(u
.val32
[0]);
1956 sample
->cpu
= u
.val32
[0];
1960 if (type
& PERF_SAMPLE_STREAM_ID
) {
1961 sample
->stream_id
= *array
;
1965 if (type
& PERF_SAMPLE_ID
) {
1966 sample
->id
= *array
;
1970 if (type
& PERF_SAMPLE_TIME
) {
1971 sample
->time
= *array
;
1975 if (type
& PERF_SAMPLE_TID
) {
1978 /* undo swap of u64, then swap on individual u32s */
1979 u
.val64
= bswap_64(u
.val64
);
1980 u
.val32
[0] = bswap_32(u
.val32
[0]);
1981 u
.val32
[1] = bswap_32(u
.val32
[1]);
1984 sample
->pid
= u
.val32
[0];
1985 sample
->tid
= u
.val32
[1];
1992 static inline bool overflow(const void *endp
, u16 max_size
, const void *offset
,
1995 return size
> max_size
|| offset
+ size
> endp
;
1998 #define OVERFLOW_CHECK(offset, size, max_size) \
2000 if (overflow(endp, (max_size), (offset), (size))) \
2004 #define OVERFLOW_CHECK_u64(offset) \
2005 OVERFLOW_CHECK(offset, sizeof(u64), sizeof(u64))
2007 int perf_evsel__parse_sample(struct perf_evsel
*evsel
, union perf_event
*event
,
2008 struct perf_sample
*data
)
2010 u64 type
= evsel
->attr
.sample_type
;
2011 bool swapped
= evsel
->needs_swap
;
2013 u16 max_size
= event
->header
.size
;
2014 const void *endp
= (void *)event
+ max_size
;
2018 * used for cross-endian analysis. See git commit 65014ab3
2019 * for why this goofiness is needed.
2023 memset(data
, 0, sizeof(*data
));
2024 data
->cpu
= data
->pid
= data
->tid
= -1;
2025 data
->stream_id
= data
->id
= data
->time
= -1ULL;
2026 data
->period
= evsel
->attr
.sample_period
;
2027 data
->cpumode
= event
->header
.misc
& PERF_RECORD_MISC_CPUMODE_MASK
;
2029 if (event
->header
.type
!= PERF_RECORD_SAMPLE
) {
2030 if (!evsel
->attr
.sample_id_all
)
2032 return perf_evsel__parse_id_sample(evsel
, event
, data
);
2035 array
= event
->sample
.array
;
2038 * The evsel's sample_size is based on PERF_SAMPLE_MASK which includes
2039 * up to PERF_SAMPLE_PERIOD. After that overflow() must be used to
2040 * check the format does not go past the end of the event.
2042 if (evsel
->sample_size
+ sizeof(event
->header
) > event
->header
.size
)
2046 if (type
& PERF_SAMPLE_IDENTIFIER
) {
2051 if (type
& PERF_SAMPLE_IP
) {
2056 if (type
& PERF_SAMPLE_TID
) {
2059 /* undo swap of u64, then swap on individual u32s */
2060 u
.val64
= bswap_64(u
.val64
);
2061 u
.val32
[0] = bswap_32(u
.val32
[0]);
2062 u
.val32
[1] = bswap_32(u
.val32
[1]);
2065 data
->pid
= u
.val32
[0];
2066 data
->tid
= u
.val32
[1];
2070 if (type
& PERF_SAMPLE_TIME
) {
2071 data
->time
= *array
;
2076 if (type
& PERF_SAMPLE_ADDR
) {
2077 data
->addr
= *array
;
2081 if (type
& PERF_SAMPLE_ID
) {
2086 if (type
& PERF_SAMPLE_STREAM_ID
) {
2087 data
->stream_id
= *array
;
2091 if (type
& PERF_SAMPLE_CPU
) {
2095 /* undo swap of u64, then swap on individual u32s */
2096 u
.val64
= bswap_64(u
.val64
);
2097 u
.val32
[0] = bswap_32(u
.val32
[0]);
2100 data
->cpu
= u
.val32
[0];
2104 if (type
& PERF_SAMPLE_PERIOD
) {
2105 data
->period
= *array
;
2109 if (type
& PERF_SAMPLE_READ
) {
2110 u64 read_format
= evsel
->attr
.read_format
;
2112 OVERFLOW_CHECK_u64(array
);
2113 if (read_format
& PERF_FORMAT_GROUP
)
2114 data
->read
.group
.nr
= *array
;
2116 data
->read
.one
.value
= *array
;
2120 if (read_format
& PERF_FORMAT_TOTAL_TIME_ENABLED
) {
2121 OVERFLOW_CHECK_u64(array
);
2122 data
->read
.time_enabled
= *array
;
2126 if (read_format
& PERF_FORMAT_TOTAL_TIME_RUNNING
) {
2127 OVERFLOW_CHECK_u64(array
);
2128 data
->read
.time_running
= *array
;
2132 /* PERF_FORMAT_ID is forced for PERF_SAMPLE_READ */
2133 if (read_format
& PERF_FORMAT_GROUP
) {
2134 const u64 max_group_nr
= UINT64_MAX
/
2135 sizeof(struct sample_read_value
);
2137 if (data
->read
.group
.nr
> max_group_nr
)
2139 sz
= data
->read
.group
.nr
*
2140 sizeof(struct sample_read_value
);
2141 OVERFLOW_CHECK(array
, sz
, max_size
);
2142 data
->read
.group
.values
=
2143 (struct sample_read_value
*)array
;
2144 array
= (void *)array
+ sz
;
2146 OVERFLOW_CHECK_u64(array
);
2147 data
->read
.one
.id
= *array
;
2152 if (type
& PERF_SAMPLE_CALLCHAIN
) {
2153 const u64 max_callchain_nr
= UINT64_MAX
/ sizeof(u64
);
2155 OVERFLOW_CHECK_u64(array
);
2156 data
->callchain
= (struct ip_callchain
*)array
++;
2157 if (data
->callchain
->nr
> max_callchain_nr
)
2159 sz
= data
->callchain
->nr
* sizeof(u64
);
2160 OVERFLOW_CHECK(array
, sz
, max_size
);
2161 array
= (void *)array
+ sz
;
2164 if (type
& PERF_SAMPLE_RAW
) {
2165 OVERFLOW_CHECK_u64(array
);
2169 * Undo swap of u64, then swap on individual u32s,
2170 * get the size of the raw area and undo all of the
2171 * swap. The pevent interface handles endianity by
2175 u
.val64
= bswap_64(u
.val64
);
2176 u
.val32
[0] = bswap_32(u
.val32
[0]);
2177 u
.val32
[1] = bswap_32(u
.val32
[1]);
2179 data
->raw_size
= u
.val32
[0];
2182 * The raw data is aligned on 64bits including the
2183 * u32 size, so it's safe to use mem_bswap_64.
2186 mem_bswap_64((void *) array
, data
->raw_size
);
2188 array
= (void *)array
+ sizeof(u32
);
2190 OVERFLOW_CHECK(array
, data
->raw_size
, max_size
);
2191 data
->raw_data
= (void *)array
;
2192 array
= (void *)array
+ data
->raw_size
;
2195 if (type
& PERF_SAMPLE_BRANCH_STACK
) {
2196 const u64 max_branch_nr
= UINT64_MAX
/
2197 sizeof(struct branch_entry
);
2199 OVERFLOW_CHECK_u64(array
);
2200 data
->branch_stack
= (struct branch_stack
*)array
++;
2202 if (data
->branch_stack
->nr
> max_branch_nr
)
2204 sz
= data
->branch_stack
->nr
* sizeof(struct branch_entry
);
2205 OVERFLOW_CHECK(array
, sz
, max_size
);
2206 array
= (void *)array
+ sz
;
2209 if (type
& PERF_SAMPLE_REGS_USER
) {
2210 OVERFLOW_CHECK_u64(array
);
2211 data
->user_regs
.abi
= *array
;
2214 if (data
->user_regs
.abi
) {
2215 u64 mask
= evsel
->attr
.sample_regs_user
;
2217 sz
= hweight_long(mask
) * sizeof(u64
);
2218 OVERFLOW_CHECK(array
, sz
, max_size
);
2219 data
->user_regs
.mask
= mask
;
2220 data
->user_regs
.regs
= (u64
*)array
;
2221 array
= (void *)array
+ sz
;
2225 if (type
& PERF_SAMPLE_STACK_USER
) {
2226 OVERFLOW_CHECK_u64(array
);
2229 data
->user_stack
.offset
= ((char *)(array
- 1)
2233 data
->user_stack
.size
= 0;
2235 OVERFLOW_CHECK(array
, sz
, max_size
);
2236 data
->user_stack
.data
= (char *)array
;
2237 array
= (void *)array
+ sz
;
2238 OVERFLOW_CHECK_u64(array
);
2239 data
->user_stack
.size
= *array
++;
2240 if (WARN_ONCE(data
->user_stack
.size
> sz
,
2241 "user stack dump failure\n"))
2246 if (type
& PERF_SAMPLE_WEIGHT
) {
2247 OVERFLOW_CHECK_u64(array
);
2248 data
->weight
= *array
;
2252 data
->data_src
= PERF_MEM_DATA_SRC_NONE
;
2253 if (type
& PERF_SAMPLE_DATA_SRC
) {
2254 OVERFLOW_CHECK_u64(array
);
2255 data
->data_src
= *array
;
2259 data
->transaction
= 0;
2260 if (type
& PERF_SAMPLE_TRANSACTION
) {
2261 OVERFLOW_CHECK_u64(array
);
2262 data
->transaction
= *array
;
2266 data
->intr_regs
.abi
= PERF_SAMPLE_REGS_ABI_NONE
;
2267 if (type
& PERF_SAMPLE_REGS_INTR
) {
2268 OVERFLOW_CHECK_u64(array
);
2269 data
->intr_regs
.abi
= *array
;
2272 if (data
->intr_regs
.abi
!= PERF_SAMPLE_REGS_ABI_NONE
) {
2273 u64 mask
= evsel
->attr
.sample_regs_intr
;
2275 sz
= hweight_long(mask
) * sizeof(u64
);
2276 OVERFLOW_CHECK(array
, sz
, max_size
);
2277 data
->intr_regs
.mask
= mask
;
2278 data
->intr_regs
.regs
= (u64
*)array
;
2279 array
= (void *)array
+ sz
;
2283 data
->phys_addr
= 0;
2284 if (type
& PERF_SAMPLE_PHYS_ADDR
) {
2285 data
->phys_addr
= *array
;
2292 size_t perf_event__sample_event_size(const struct perf_sample
*sample
, u64 type
,
2295 size_t sz
, result
= sizeof(struct sample_event
);
2297 if (type
& PERF_SAMPLE_IDENTIFIER
)
2298 result
+= sizeof(u64
);
2300 if (type
& PERF_SAMPLE_IP
)
2301 result
+= sizeof(u64
);
2303 if (type
& PERF_SAMPLE_TID
)
2304 result
+= sizeof(u64
);
2306 if (type
& PERF_SAMPLE_TIME
)
2307 result
+= sizeof(u64
);
2309 if (type
& PERF_SAMPLE_ADDR
)
2310 result
+= sizeof(u64
);
2312 if (type
& PERF_SAMPLE_ID
)
2313 result
+= sizeof(u64
);
2315 if (type
& PERF_SAMPLE_STREAM_ID
)
2316 result
+= sizeof(u64
);
2318 if (type
& PERF_SAMPLE_CPU
)
2319 result
+= sizeof(u64
);
2321 if (type
& PERF_SAMPLE_PERIOD
)
2322 result
+= sizeof(u64
);
2324 if (type
& PERF_SAMPLE_READ
) {
2325 result
+= sizeof(u64
);
2326 if (read_format
& PERF_FORMAT_TOTAL_TIME_ENABLED
)
2327 result
+= sizeof(u64
);
2328 if (read_format
& PERF_FORMAT_TOTAL_TIME_RUNNING
)
2329 result
+= sizeof(u64
);
2330 /* PERF_FORMAT_ID is forced for PERF_SAMPLE_READ */
2331 if (read_format
& PERF_FORMAT_GROUP
) {
2332 sz
= sample
->read
.group
.nr
*
2333 sizeof(struct sample_read_value
);
2336 result
+= sizeof(u64
);
2340 if (type
& PERF_SAMPLE_CALLCHAIN
) {
2341 sz
= (sample
->callchain
->nr
+ 1) * sizeof(u64
);
2345 if (type
& PERF_SAMPLE_RAW
) {
2346 result
+= sizeof(u32
);
2347 result
+= sample
->raw_size
;
2350 if (type
& PERF_SAMPLE_BRANCH_STACK
) {
2351 sz
= sample
->branch_stack
->nr
* sizeof(struct branch_entry
);
2356 if (type
& PERF_SAMPLE_REGS_USER
) {
2357 if (sample
->user_regs
.abi
) {
2358 result
+= sizeof(u64
);
2359 sz
= hweight_long(sample
->user_regs
.mask
) * sizeof(u64
);
2362 result
+= sizeof(u64
);
2366 if (type
& PERF_SAMPLE_STACK_USER
) {
2367 sz
= sample
->user_stack
.size
;
2368 result
+= sizeof(u64
);
2371 result
+= sizeof(u64
);
2375 if (type
& PERF_SAMPLE_WEIGHT
)
2376 result
+= sizeof(u64
);
2378 if (type
& PERF_SAMPLE_DATA_SRC
)
2379 result
+= sizeof(u64
);
2381 if (type
& PERF_SAMPLE_TRANSACTION
)
2382 result
+= sizeof(u64
);
2384 if (type
& PERF_SAMPLE_REGS_INTR
) {
2385 if (sample
->intr_regs
.abi
) {
2386 result
+= sizeof(u64
);
2387 sz
= hweight_long(sample
->intr_regs
.mask
) * sizeof(u64
);
2390 result
+= sizeof(u64
);
2394 if (type
& PERF_SAMPLE_PHYS_ADDR
)
2395 result
+= sizeof(u64
);
2400 int perf_event__synthesize_sample(union perf_event
*event
, u64 type
,
2402 const struct perf_sample
*sample
,
2408 * used for cross-endian analysis. See git commit 65014ab3
2409 * for why this goofiness is needed.
2413 array
= event
->sample
.array
;
2415 if (type
& PERF_SAMPLE_IDENTIFIER
) {
2416 *array
= sample
->id
;
2420 if (type
& PERF_SAMPLE_IP
) {
2421 *array
= sample
->ip
;
2425 if (type
& PERF_SAMPLE_TID
) {
2426 u
.val32
[0] = sample
->pid
;
2427 u
.val32
[1] = sample
->tid
;
2430 * Inverse of what is done in perf_evsel__parse_sample
2432 u
.val32
[0] = bswap_32(u
.val32
[0]);
2433 u
.val32
[1] = bswap_32(u
.val32
[1]);
2434 u
.val64
= bswap_64(u
.val64
);
2441 if (type
& PERF_SAMPLE_TIME
) {
2442 *array
= sample
->time
;
2446 if (type
& PERF_SAMPLE_ADDR
) {
2447 *array
= sample
->addr
;
2451 if (type
& PERF_SAMPLE_ID
) {
2452 *array
= sample
->id
;
2456 if (type
& PERF_SAMPLE_STREAM_ID
) {
2457 *array
= sample
->stream_id
;
2461 if (type
& PERF_SAMPLE_CPU
) {
2462 u
.val32
[0] = sample
->cpu
;
2465 * Inverse of what is done in perf_evsel__parse_sample
2467 u
.val32
[0] = bswap_32(u
.val32
[0]);
2468 u
.val64
= bswap_64(u
.val64
);
2474 if (type
& PERF_SAMPLE_PERIOD
) {
2475 *array
= sample
->period
;
2479 if (type
& PERF_SAMPLE_READ
) {
2480 if (read_format
& PERF_FORMAT_GROUP
)
2481 *array
= sample
->read
.group
.nr
;
2483 *array
= sample
->read
.one
.value
;
2486 if (read_format
& PERF_FORMAT_TOTAL_TIME_ENABLED
) {
2487 *array
= sample
->read
.time_enabled
;
2491 if (read_format
& PERF_FORMAT_TOTAL_TIME_RUNNING
) {
2492 *array
= sample
->read
.time_running
;
2496 /* PERF_FORMAT_ID is forced for PERF_SAMPLE_READ */
2497 if (read_format
& PERF_FORMAT_GROUP
) {
2498 sz
= sample
->read
.group
.nr
*
2499 sizeof(struct sample_read_value
);
2500 memcpy(array
, sample
->read
.group
.values
, sz
);
2501 array
= (void *)array
+ sz
;
2503 *array
= sample
->read
.one
.id
;
2508 if (type
& PERF_SAMPLE_CALLCHAIN
) {
2509 sz
= (sample
->callchain
->nr
+ 1) * sizeof(u64
);
2510 memcpy(array
, sample
->callchain
, sz
);
2511 array
= (void *)array
+ sz
;
2514 if (type
& PERF_SAMPLE_RAW
) {
2515 u
.val32
[0] = sample
->raw_size
;
2516 if (WARN_ONCE(swapped
,
2517 "Endianness of raw data not corrected!\n")) {
2519 * Inverse of what is done in perf_evsel__parse_sample
2521 u
.val32
[0] = bswap_32(u
.val32
[0]);
2522 u
.val32
[1] = bswap_32(u
.val32
[1]);
2523 u
.val64
= bswap_64(u
.val64
);
2526 array
= (void *)array
+ sizeof(u32
);
2528 memcpy(array
, sample
->raw_data
, sample
->raw_size
);
2529 array
= (void *)array
+ sample
->raw_size
;
2532 if (type
& PERF_SAMPLE_BRANCH_STACK
) {
2533 sz
= sample
->branch_stack
->nr
* sizeof(struct branch_entry
);
2535 memcpy(array
, sample
->branch_stack
, sz
);
2536 array
= (void *)array
+ sz
;
2539 if (type
& PERF_SAMPLE_REGS_USER
) {
2540 if (sample
->user_regs
.abi
) {
2541 *array
++ = sample
->user_regs
.abi
;
2542 sz
= hweight_long(sample
->user_regs
.mask
) * sizeof(u64
);
2543 memcpy(array
, sample
->user_regs
.regs
, sz
);
2544 array
= (void *)array
+ sz
;
2550 if (type
& PERF_SAMPLE_STACK_USER
) {
2551 sz
= sample
->user_stack
.size
;
2554 memcpy(array
, sample
->user_stack
.data
, sz
);
2555 array
= (void *)array
+ sz
;
2560 if (type
& PERF_SAMPLE_WEIGHT
) {
2561 *array
= sample
->weight
;
2565 if (type
& PERF_SAMPLE_DATA_SRC
) {
2566 *array
= sample
->data_src
;
2570 if (type
& PERF_SAMPLE_TRANSACTION
) {
2571 *array
= sample
->transaction
;
2575 if (type
& PERF_SAMPLE_REGS_INTR
) {
2576 if (sample
->intr_regs
.abi
) {
2577 *array
++ = sample
->intr_regs
.abi
;
2578 sz
= hweight_long(sample
->intr_regs
.mask
) * sizeof(u64
);
2579 memcpy(array
, sample
->intr_regs
.regs
, sz
);
2580 array
= (void *)array
+ sz
;
2586 if (type
& PERF_SAMPLE_PHYS_ADDR
) {
2587 *array
= sample
->phys_addr
;
2594 struct format_field
*perf_evsel__field(struct perf_evsel
*evsel
, const char *name
)
2596 return pevent_find_field(evsel
->tp_format
, name
);
2599 void *perf_evsel__rawptr(struct perf_evsel
*evsel
, struct perf_sample
*sample
,
2602 struct format_field
*field
= perf_evsel__field(evsel
, name
);
2608 offset
= field
->offset
;
2610 if (field
->flags
& FIELD_IS_DYNAMIC
) {
2611 offset
= *(int *)(sample
->raw_data
+ field
->offset
);
2615 return sample
->raw_data
+ offset
;
2618 u64
format_field__intval(struct format_field
*field
, struct perf_sample
*sample
,
2622 void *ptr
= sample
->raw_data
+ field
->offset
;
2624 switch (field
->size
) {
2628 value
= *(u16
*)ptr
;
2631 value
= *(u32
*)ptr
;
2634 memcpy(&value
, ptr
, sizeof(u64
));
2643 switch (field
->size
) {
2645 return bswap_16(value
);
2647 return bswap_32(value
);
2649 return bswap_64(value
);
2657 u64
perf_evsel__intval(struct perf_evsel
*evsel
, struct perf_sample
*sample
,
2660 struct format_field
*field
= perf_evsel__field(evsel
, name
);
2665 return field
? format_field__intval(field
, sample
, evsel
->needs_swap
) : 0;
2668 bool perf_evsel__fallback(struct perf_evsel
*evsel
, int err
,
2669 char *msg
, size_t msgsize
)
2673 if ((err
== ENOENT
|| err
== ENXIO
|| err
== ENODEV
) &&
2674 evsel
->attr
.type
== PERF_TYPE_HARDWARE
&&
2675 evsel
->attr
.config
== PERF_COUNT_HW_CPU_CYCLES
) {
2677 * If it's cycles then fall back to hrtimer based
2678 * cpu-clock-tick sw counter, which is always available even if
2681 * PPC returns ENXIO until 2.6.37 (behavior changed with commit
2684 scnprintf(msg
, msgsize
, "%s",
2685 "The cycles event is not supported, trying to fall back to cpu-clock-ticks");
2687 evsel
->attr
.type
= PERF_TYPE_SOFTWARE
;
2688 evsel
->attr
.config
= PERF_COUNT_SW_CPU_CLOCK
;
2690 zfree(&evsel
->name
);
2692 } else if (err
== EACCES
&& !evsel
->attr
.exclude_kernel
&&
2693 (paranoid
= perf_event_paranoid()) > 1) {
2694 const char *name
= perf_evsel__name(evsel
);
2697 if (asprintf(&new_name
, "%s%su", name
, strchr(name
, ':') ? "" : ":") < 0)
2702 evsel
->name
= new_name
;
2703 scnprintf(msg
, msgsize
,
2704 "kernel.perf_event_paranoid=%d, trying to fall back to excluding kernel samples", paranoid
);
2705 evsel
->attr
.exclude_kernel
= 1;
2713 static bool find_process(const char *name
)
2715 size_t len
= strlen(name
);
2720 dir
= opendir(procfs__mountpoint());
2724 /* Walk through the directory. */
2725 while (ret
&& (d
= readdir(dir
)) != NULL
) {
2726 char path
[PATH_MAX
];
2730 if ((d
->d_type
!= DT_DIR
) ||
2731 !strcmp(".", d
->d_name
) ||
2732 !strcmp("..", d
->d_name
))
2735 scnprintf(path
, sizeof(path
), "%s/%s/comm",
2736 procfs__mountpoint(), d
->d_name
);
2738 if (filename__read_str(path
, &data
, &size
))
2741 ret
= strncmp(name
, data
, len
);
2746 return ret
? false : true;
2749 int perf_evsel__open_strerror(struct perf_evsel
*evsel
, struct target
*target
,
2750 int err
, char *msg
, size_t size
)
2752 char sbuf
[STRERR_BUFSIZE
];
2759 printed
= scnprintf(msg
, size
,
2760 "No permission to enable %s event.\n\n",
2761 perf_evsel__name(evsel
));
2763 return scnprintf(msg
+ printed
, size
- printed
,
2764 "You may not have permission to collect %sstats.\n\n"
2765 "Consider tweaking /proc/sys/kernel/perf_event_paranoid,\n"
2766 "which controls use of the performance events system by\n"
2767 "unprivileged users (without CAP_SYS_ADMIN).\n\n"
2768 "The current value is %d:\n\n"
2769 " -1: Allow use of (almost) all events by all users\n"
2770 " Ignore mlock limit after perf_event_mlock_kb without CAP_IPC_LOCK\n"
2771 ">= 0: Disallow ftrace function tracepoint by users without CAP_SYS_ADMIN\n"
2772 " Disallow raw tracepoint access by users without CAP_SYS_ADMIN\n"
2773 ">= 1: Disallow CPU event access by users without CAP_SYS_ADMIN\n"
2774 ">= 2: Disallow kernel profiling by users without CAP_SYS_ADMIN\n\n"
2775 "To make this setting permanent, edit /etc/sysctl.conf too, e.g.:\n\n"
2776 " kernel.perf_event_paranoid = -1\n" ,
2777 target
->system_wide
? "system-wide " : "",
2778 perf_event_paranoid());
2780 return scnprintf(msg
, size
, "The %s event is not supported.",
2781 perf_evsel__name(evsel
));
2783 return scnprintf(msg
, size
, "%s",
2784 "Too many events are opened.\n"
2785 "Probably the maximum number of open file descriptors has been reached.\n"
2786 "Hint: Try again after reducing the number of events.\n"
2787 "Hint: Try increasing the limit with 'ulimit -n <limit>'");
2789 if ((evsel
->attr
.sample_type
& PERF_SAMPLE_CALLCHAIN
) != 0 &&
2790 access("/proc/sys/kernel/perf_event_max_stack", F_OK
) == 0)
2791 return scnprintf(msg
, size
,
2792 "Not enough memory to setup event with callchain.\n"
2793 "Hint: Try tweaking /proc/sys/kernel/perf_event_max_stack\n"
2794 "Hint: Current value: %d", sysctl_perf_event_max_stack
);
2797 if (target
->cpu_list
)
2798 return scnprintf(msg
, size
, "%s",
2799 "No such device - did you specify an out-of-range profile CPU?");
2802 if (evsel
->attr
.sample_period
!= 0)
2803 return scnprintf(msg
, size
, "%s",
2804 "PMU Hardware doesn't support sampling/overflow-interrupts.");
2805 if (evsel
->attr
.precise_ip
)
2806 return scnprintf(msg
, size
, "%s",
2807 "\'precise\' request may not be supported. Try removing 'p' modifier.");
2808 #if defined(__i386__) || defined(__x86_64__)
2809 if (evsel
->attr
.type
== PERF_TYPE_HARDWARE
)
2810 return scnprintf(msg
, size
, "%s",
2811 "No hardware sampling interrupt available.\n"
2812 "No APIC? If so then you can boot the kernel with the \"lapic\" boot parameter to force-enable it.");
2816 if (find_process("oprofiled"))
2817 return scnprintf(msg
, size
,
2818 "The PMU counters are busy/taken by another profiler.\n"
2819 "We found oprofile daemon running, please stop it and try again.");
2822 if (evsel
->attr
.write_backward
&& perf_missing_features
.write_backward
)
2823 return scnprintf(msg
, size
, "Reading from overwrite event is not supported by this kernel.");
2824 if (perf_missing_features
.clockid
)
2825 return scnprintf(msg
, size
, "clockid feature not supported.");
2826 if (perf_missing_features
.clockid_wrong
)
2827 return scnprintf(msg
, size
, "wrong clockid (%d).", clockid
);
2833 return scnprintf(msg
, size
,
2834 "The sys_perf_event_open() syscall returned with %d (%s) for event (%s).\n"
2835 "/bin/dmesg may provide additional information.\n"
2836 "No CONFIG_PERF_EVENTS=y kernel support configured?",
2837 err
, str_error_r(err
, sbuf
, sizeof(sbuf
)),
2838 perf_evsel__name(evsel
));
2841 char *perf_evsel__env_arch(struct perf_evsel
*evsel
)
2843 if (evsel
&& evsel
->evlist
&& evsel
->evlist
->env
)
2844 return evsel
->evlist
->env
->arch
;
2848 char *perf_evsel__env_cpuid(struct perf_evsel
*evsel
)
2850 if (evsel
&& evsel
->evlist
&& evsel
->evlist
->env
)
2851 return evsel
->evlist
->env
->cpuid
;