2 * Copyright (C) 2011, Red Hat Inc, Arnaldo Carvalho de Melo <acme@redhat.com>
4 * Parts came from builtin-{top,stat,record}.c, see those files for further
7 * Released under the GPL v2. (and only v2, not any later version)
13 #include <linux/bitops.h>
14 #include <api/fs/tracing_path.h>
15 #include <traceevent/event-parse.h>
16 #include <linux/hw_breakpoint.h>
17 #include <linux/perf_event.h>
18 #include <linux/err.h>
19 #include <sys/ioctl.h>
20 #include <sys/resource.h>
22 #include "callchain.h"
29 #include "thread_map.h"
31 #include "perf_regs.h"
33 #include "trace-event.h"
35 #include "util/parse-branch-options.h"
37 #include "sane_ctype.h"
48 } perf_missing_features
;
50 static clockid_t clockid
;
52 static int perf_evsel__no_extra_init(struct perf_evsel
*evsel __maybe_unused
)
57 static void perf_evsel__no_extra_fini(struct perf_evsel
*evsel __maybe_unused
)
63 int (*init
)(struct perf_evsel
*evsel
);
64 void (*fini
)(struct perf_evsel
*evsel
);
65 } perf_evsel__object
= {
66 .size
= sizeof(struct perf_evsel
),
67 .init
= perf_evsel__no_extra_init
,
68 .fini
= perf_evsel__no_extra_fini
,
71 int perf_evsel__object_config(size_t object_size
,
72 int (*init
)(struct perf_evsel
*evsel
),
73 void (*fini
)(struct perf_evsel
*evsel
))
79 if (perf_evsel__object
.size
> object_size
)
82 perf_evsel__object
.size
= object_size
;
86 perf_evsel__object
.init
= init
;
89 perf_evsel__object
.fini
= fini
;
94 #define FD(e, x, y) (*(int *)xyarray__entry(e->fd, x, y))
96 int __perf_evsel__sample_size(u64 sample_type
)
98 u64 mask
= sample_type
& PERF_SAMPLE_MASK
;
102 for (i
= 0; i
< 64; i
++) {
103 if (mask
& (1ULL << i
))
113 * __perf_evsel__calc_id_pos - calculate id_pos.
114 * @sample_type: sample type
116 * This function returns the position of the event id (PERF_SAMPLE_ID or
117 * PERF_SAMPLE_IDENTIFIER) in a sample event i.e. in the array of struct
120 static int __perf_evsel__calc_id_pos(u64 sample_type
)
124 if (sample_type
& PERF_SAMPLE_IDENTIFIER
)
127 if (!(sample_type
& PERF_SAMPLE_ID
))
130 if (sample_type
& PERF_SAMPLE_IP
)
133 if (sample_type
& PERF_SAMPLE_TID
)
136 if (sample_type
& PERF_SAMPLE_TIME
)
139 if (sample_type
& PERF_SAMPLE_ADDR
)
146 * __perf_evsel__calc_is_pos - calculate is_pos.
147 * @sample_type: sample type
149 * This function returns the position (counting backwards) of the event id
150 * (PERF_SAMPLE_ID or PERF_SAMPLE_IDENTIFIER) in a non-sample event i.e. if
151 * sample_id_all is used there is an id sample appended to non-sample events.
153 static int __perf_evsel__calc_is_pos(u64 sample_type
)
157 if (sample_type
& PERF_SAMPLE_IDENTIFIER
)
160 if (!(sample_type
& PERF_SAMPLE_ID
))
163 if (sample_type
& PERF_SAMPLE_CPU
)
166 if (sample_type
& PERF_SAMPLE_STREAM_ID
)
172 void perf_evsel__calc_id_pos(struct perf_evsel
*evsel
)
174 evsel
->id_pos
= __perf_evsel__calc_id_pos(evsel
->attr
.sample_type
);
175 evsel
->is_pos
= __perf_evsel__calc_is_pos(evsel
->attr
.sample_type
);
178 void __perf_evsel__set_sample_bit(struct perf_evsel
*evsel
,
179 enum perf_event_sample_format bit
)
181 if (!(evsel
->attr
.sample_type
& bit
)) {
182 evsel
->attr
.sample_type
|= bit
;
183 evsel
->sample_size
+= sizeof(u64
);
184 perf_evsel__calc_id_pos(evsel
);
188 void __perf_evsel__reset_sample_bit(struct perf_evsel
*evsel
,
189 enum perf_event_sample_format bit
)
191 if (evsel
->attr
.sample_type
& bit
) {
192 evsel
->attr
.sample_type
&= ~bit
;
193 evsel
->sample_size
-= sizeof(u64
);
194 perf_evsel__calc_id_pos(evsel
);
198 void perf_evsel__set_sample_id(struct perf_evsel
*evsel
,
199 bool can_sample_identifier
)
201 if (can_sample_identifier
) {
202 perf_evsel__reset_sample_bit(evsel
, ID
);
203 perf_evsel__set_sample_bit(evsel
, IDENTIFIER
);
205 perf_evsel__set_sample_bit(evsel
, ID
);
207 evsel
->attr
.read_format
|= PERF_FORMAT_ID
;
211 * perf_evsel__is_function_event - Return whether given evsel is a function
214 * @evsel - evsel selector to be tested
216 * Return %true if event is function trace event
218 bool perf_evsel__is_function_event(struct perf_evsel
*evsel
)
220 #define FUNCTION_EVENT "ftrace:function"
222 return evsel
->name
&&
223 !strncmp(FUNCTION_EVENT
, evsel
->name
, sizeof(FUNCTION_EVENT
));
225 #undef FUNCTION_EVENT
228 void perf_evsel__init(struct perf_evsel
*evsel
,
229 struct perf_event_attr
*attr
, int idx
)
232 evsel
->tracking
= !idx
;
234 evsel
->leader
= evsel
;
237 evsel
->evlist
= NULL
;
239 INIT_LIST_HEAD(&evsel
->node
);
240 INIT_LIST_HEAD(&evsel
->config_terms
);
241 perf_evsel__object
.init(evsel
);
242 evsel
->sample_size
= __perf_evsel__sample_size(attr
->sample_type
);
243 perf_evsel__calc_id_pos(evsel
);
244 evsel
->cmdline_group_boundary
= false;
245 evsel
->metric_expr
= NULL
;
246 evsel
->metric_name
= NULL
;
247 evsel
->metric_events
= NULL
;
248 evsel
->collect_stat
= false;
251 struct perf_evsel
*perf_evsel__new_idx(struct perf_event_attr
*attr
, int idx
)
253 struct perf_evsel
*evsel
= zalloc(perf_evsel__object
.size
);
256 perf_evsel__init(evsel
, attr
, idx
);
258 if (perf_evsel__is_bpf_output(evsel
)) {
259 evsel
->attr
.sample_type
|= (PERF_SAMPLE_RAW
| PERF_SAMPLE_TIME
|
260 PERF_SAMPLE_CPU
| PERF_SAMPLE_PERIOD
),
261 evsel
->attr
.sample_period
= 1;
267 struct perf_evsel
*perf_evsel__new_cycles(void)
269 struct perf_event_attr attr
= {
270 .type
= PERF_TYPE_HARDWARE
,
271 .config
= PERF_COUNT_HW_CPU_CYCLES
,
273 struct perf_evsel
*evsel
;
275 event_attr_init(&attr
);
277 * Unnamed union member, not supported as struct member named
278 * initializer in older compilers such as gcc 4.4.7
280 * Just for probing the precise_ip:
282 attr
.sample_period
= 1;
284 perf_event_attr__set_max_precise_ip(&attr
);
286 * Now let the usual logic to set up the perf_event_attr defaults
287 * to kick in when we return and before perf_evsel__open() is called.
289 attr
.sample_period
= 0;
291 evsel
= perf_evsel__new(&attr
);
295 /* use asprintf() because free(evsel) assumes name is allocated */
296 if (asprintf(&evsel
->name
, "cycles%.*s",
297 attr
.precise_ip
? attr
.precise_ip
+ 1 : 0, ":ppp") < 0)
302 perf_evsel__delete(evsel
);
308 * Returns pointer with encoded error via <linux/err.h> interface.
310 struct perf_evsel
*perf_evsel__newtp_idx(const char *sys
, const char *name
, int idx
)
312 struct perf_evsel
*evsel
= zalloc(perf_evsel__object
.size
);
318 struct perf_event_attr attr
= {
319 .type
= PERF_TYPE_TRACEPOINT
,
320 .sample_type
= (PERF_SAMPLE_RAW
| PERF_SAMPLE_TIME
|
321 PERF_SAMPLE_CPU
| PERF_SAMPLE_PERIOD
),
324 if (asprintf(&evsel
->name
, "%s:%s", sys
, name
) < 0)
327 evsel
->tp_format
= trace_event__tp_format(sys
, name
);
328 if (IS_ERR(evsel
->tp_format
)) {
329 err
= PTR_ERR(evsel
->tp_format
);
333 event_attr_init(&attr
);
334 attr
.config
= evsel
->tp_format
->id
;
335 attr
.sample_period
= 1;
336 perf_evsel__init(evsel
, &attr
, idx
);
348 const char *perf_evsel__hw_names
[PERF_COUNT_HW_MAX
] = {
356 "stalled-cycles-frontend",
357 "stalled-cycles-backend",
361 static const char *__perf_evsel__hw_name(u64 config
)
363 if (config
< PERF_COUNT_HW_MAX
&& perf_evsel__hw_names
[config
])
364 return perf_evsel__hw_names
[config
];
366 return "unknown-hardware";
369 static int perf_evsel__add_modifiers(struct perf_evsel
*evsel
, char *bf
, size_t size
)
371 int colon
= 0, r
= 0;
372 struct perf_event_attr
*attr
= &evsel
->attr
;
373 bool exclude_guest_default
= false;
375 #define MOD_PRINT(context, mod) do { \
376 if (!attr->exclude_##context) { \
377 if (!colon) colon = ++r; \
378 r += scnprintf(bf + r, size - r, "%c", mod); \
381 if (attr
->exclude_kernel
|| attr
->exclude_user
|| attr
->exclude_hv
) {
382 MOD_PRINT(kernel
, 'k');
383 MOD_PRINT(user
, 'u');
385 exclude_guest_default
= true;
388 if (attr
->precise_ip
) {
391 r
+= scnprintf(bf
+ r
, size
- r
, "%.*s", attr
->precise_ip
, "ppp");
392 exclude_guest_default
= true;
395 if (attr
->exclude_host
|| attr
->exclude_guest
== exclude_guest_default
) {
396 MOD_PRINT(host
, 'H');
397 MOD_PRINT(guest
, 'G');
405 static int perf_evsel__hw_name(struct perf_evsel
*evsel
, char *bf
, size_t size
)
407 int r
= scnprintf(bf
, size
, "%s", __perf_evsel__hw_name(evsel
->attr
.config
));
408 return r
+ perf_evsel__add_modifiers(evsel
, bf
+ r
, size
- r
);
411 const char *perf_evsel__sw_names
[PERF_COUNT_SW_MAX
] = {
424 static const char *__perf_evsel__sw_name(u64 config
)
426 if (config
< PERF_COUNT_SW_MAX
&& perf_evsel__sw_names
[config
])
427 return perf_evsel__sw_names
[config
];
428 return "unknown-software";
431 static int perf_evsel__sw_name(struct perf_evsel
*evsel
, char *bf
, size_t size
)
433 int r
= scnprintf(bf
, size
, "%s", __perf_evsel__sw_name(evsel
->attr
.config
));
434 return r
+ perf_evsel__add_modifiers(evsel
, bf
+ r
, size
- r
);
437 static int __perf_evsel__bp_name(char *bf
, size_t size
, u64 addr
, u64 type
)
441 r
= scnprintf(bf
, size
, "mem:0x%" PRIx64
":", addr
);
443 if (type
& HW_BREAKPOINT_R
)
444 r
+= scnprintf(bf
+ r
, size
- r
, "r");
446 if (type
& HW_BREAKPOINT_W
)
447 r
+= scnprintf(bf
+ r
, size
- r
, "w");
449 if (type
& HW_BREAKPOINT_X
)
450 r
+= scnprintf(bf
+ r
, size
- r
, "x");
455 static int perf_evsel__bp_name(struct perf_evsel
*evsel
, char *bf
, size_t size
)
457 struct perf_event_attr
*attr
= &evsel
->attr
;
458 int r
= __perf_evsel__bp_name(bf
, size
, attr
->bp_addr
, attr
->bp_type
);
459 return r
+ perf_evsel__add_modifiers(evsel
, bf
+ r
, size
- r
);
462 const char *perf_evsel__hw_cache
[PERF_COUNT_HW_CACHE_MAX
]
463 [PERF_EVSEL__MAX_ALIASES
] = {
464 { "L1-dcache", "l1-d", "l1d", "L1-data", },
465 { "L1-icache", "l1-i", "l1i", "L1-instruction", },
467 { "dTLB", "d-tlb", "Data-TLB", },
468 { "iTLB", "i-tlb", "Instruction-TLB", },
469 { "branch", "branches", "bpu", "btb", "bpc", },
473 const char *perf_evsel__hw_cache_op
[PERF_COUNT_HW_CACHE_OP_MAX
]
474 [PERF_EVSEL__MAX_ALIASES
] = {
475 { "load", "loads", "read", },
476 { "store", "stores", "write", },
477 { "prefetch", "prefetches", "speculative-read", "speculative-load", },
480 const char *perf_evsel__hw_cache_result
[PERF_COUNT_HW_CACHE_RESULT_MAX
]
481 [PERF_EVSEL__MAX_ALIASES
] = {
482 { "refs", "Reference", "ops", "access", },
483 { "misses", "miss", },
486 #define C(x) PERF_COUNT_HW_CACHE_##x
487 #define CACHE_READ (1 << C(OP_READ))
488 #define CACHE_WRITE (1 << C(OP_WRITE))
489 #define CACHE_PREFETCH (1 << C(OP_PREFETCH))
490 #define COP(x) (1 << x)
493 * cache operartion stat
494 * L1I : Read and prefetch only
495 * ITLB and BPU : Read-only
497 static unsigned long perf_evsel__hw_cache_stat
[C(MAX
)] = {
498 [C(L1D
)] = (CACHE_READ
| CACHE_WRITE
| CACHE_PREFETCH
),
499 [C(L1I
)] = (CACHE_READ
| CACHE_PREFETCH
),
500 [C(LL
)] = (CACHE_READ
| CACHE_WRITE
| CACHE_PREFETCH
),
501 [C(DTLB
)] = (CACHE_READ
| CACHE_WRITE
| CACHE_PREFETCH
),
502 [C(ITLB
)] = (CACHE_READ
),
503 [C(BPU
)] = (CACHE_READ
),
504 [C(NODE
)] = (CACHE_READ
| CACHE_WRITE
| CACHE_PREFETCH
),
507 bool perf_evsel__is_cache_op_valid(u8 type
, u8 op
)
509 if (perf_evsel__hw_cache_stat
[type
] & COP(op
))
510 return true; /* valid */
512 return false; /* invalid */
515 int __perf_evsel__hw_cache_type_op_res_name(u8 type
, u8 op
, u8 result
,
516 char *bf
, size_t size
)
519 return scnprintf(bf
, size
, "%s-%s-%s", perf_evsel__hw_cache
[type
][0],
520 perf_evsel__hw_cache_op
[op
][0],
521 perf_evsel__hw_cache_result
[result
][0]);
524 return scnprintf(bf
, size
, "%s-%s", perf_evsel__hw_cache
[type
][0],
525 perf_evsel__hw_cache_op
[op
][1]);
528 static int __perf_evsel__hw_cache_name(u64 config
, char *bf
, size_t size
)
530 u8 op
, result
, type
= (config
>> 0) & 0xff;
531 const char *err
= "unknown-ext-hardware-cache-type";
533 if (type
>= PERF_COUNT_HW_CACHE_MAX
)
536 op
= (config
>> 8) & 0xff;
537 err
= "unknown-ext-hardware-cache-op";
538 if (op
>= PERF_COUNT_HW_CACHE_OP_MAX
)
541 result
= (config
>> 16) & 0xff;
542 err
= "unknown-ext-hardware-cache-result";
543 if (result
>= PERF_COUNT_HW_CACHE_RESULT_MAX
)
546 err
= "invalid-cache";
547 if (!perf_evsel__is_cache_op_valid(type
, op
))
550 return __perf_evsel__hw_cache_type_op_res_name(type
, op
, result
, bf
, size
);
552 return scnprintf(bf
, size
, "%s", err
);
555 static int perf_evsel__hw_cache_name(struct perf_evsel
*evsel
, char *bf
, size_t size
)
557 int ret
= __perf_evsel__hw_cache_name(evsel
->attr
.config
, bf
, size
);
558 return ret
+ perf_evsel__add_modifiers(evsel
, bf
+ ret
, size
- ret
);
561 static int perf_evsel__raw_name(struct perf_evsel
*evsel
, char *bf
, size_t size
)
563 int ret
= scnprintf(bf
, size
, "raw 0x%" PRIx64
, evsel
->attr
.config
);
564 return ret
+ perf_evsel__add_modifiers(evsel
, bf
+ ret
, size
- ret
);
567 const char *perf_evsel__name(struct perf_evsel
*evsel
)
574 switch (evsel
->attr
.type
) {
576 perf_evsel__raw_name(evsel
, bf
, sizeof(bf
));
579 case PERF_TYPE_HARDWARE
:
580 perf_evsel__hw_name(evsel
, bf
, sizeof(bf
));
583 case PERF_TYPE_HW_CACHE
:
584 perf_evsel__hw_cache_name(evsel
, bf
, sizeof(bf
));
587 case PERF_TYPE_SOFTWARE
:
588 perf_evsel__sw_name(evsel
, bf
, sizeof(bf
));
591 case PERF_TYPE_TRACEPOINT
:
592 scnprintf(bf
, sizeof(bf
), "%s", "unknown tracepoint");
595 case PERF_TYPE_BREAKPOINT
:
596 perf_evsel__bp_name(evsel
, bf
, sizeof(bf
));
600 scnprintf(bf
, sizeof(bf
), "unknown attr type: %d",
605 evsel
->name
= strdup(bf
);
607 return evsel
->name
?: "unknown";
610 const char *perf_evsel__group_name(struct perf_evsel
*evsel
)
612 return evsel
->group_name
?: "anon group";
615 int perf_evsel__group_desc(struct perf_evsel
*evsel
, char *buf
, size_t size
)
618 struct perf_evsel
*pos
;
619 const char *group_name
= perf_evsel__group_name(evsel
);
621 ret
= scnprintf(buf
, size
, "%s", group_name
);
623 ret
+= scnprintf(buf
+ ret
, size
- ret
, " { %s",
624 perf_evsel__name(evsel
));
626 for_each_group_member(pos
, evsel
)
627 ret
+= scnprintf(buf
+ ret
, size
- ret
, ", %s",
628 perf_evsel__name(pos
));
630 ret
+= scnprintf(buf
+ ret
, size
- ret
, " }");
635 void perf_evsel__config_callchain(struct perf_evsel
*evsel
,
636 struct record_opts
*opts
,
637 struct callchain_param
*param
)
639 bool function
= perf_evsel__is_function_event(evsel
);
640 struct perf_event_attr
*attr
= &evsel
->attr
;
642 perf_evsel__set_sample_bit(evsel
, CALLCHAIN
);
644 attr
->sample_max_stack
= param
->max_stack
;
646 if (param
->record_mode
== CALLCHAIN_LBR
) {
647 if (!opts
->branch_stack
) {
648 if (attr
->exclude_user
) {
649 pr_warning("LBR callstack option is only available "
650 "to get user callchain information. "
651 "Falling back to framepointers.\n");
653 perf_evsel__set_sample_bit(evsel
, BRANCH_STACK
);
654 attr
->branch_sample_type
= PERF_SAMPLE_BRANCH_USER
|
655 PERF_SAMPLE_BRANCH_CALL_STACK
|
656 PERF_SAMPLE_BRANCH_NO_CYCLES
|
657 PERF_SAMPLE_BRANCH_NO_FLAGS
;
660 pr_warning("Cannot use LBR callstack with branch stack. "
661 "Falling back to framepointers.\n");
664 if (param
->record_mode
== CALLCHAIN_DWARF
) {
666 perf_evsel__set_sample_bit(evsel
, REGS_USER
);
667 perf_evsel__set_sample_bit(evsel
, STACK_USER
);
668 attr
->sample_regs_user
= PERF_REGS_MASK
;
669 attr
->sample_stack_user
= param
->dump_size
;
670 attr
->exclude_callchain_user
= 1;
672 pr_info("Cannot use DWARF unwind for function trace event,"
673 " falling back to framepointers.\n");
678 pr_info("Disabling user space callchains for function trace event.\n");
679 attr
->exclude_callchain_user
= 1;
684 perf_evsel__reset_callgraph(struct perf_evsel
*evsel
,
685 struct callchain_param
*param
)
687 struct perf_event_attr
*attr
= &evsel
->attr
;
689 perf_evsel__reset_sample_bit(evsel
, CALLCHAIN
);
690 if (param
->record_mode
== CALLCHAIN_LBR
) {
691 perf_evsel__reset_sample_bit(evsel
, BRANCH_STACK
);
692 attr
->branch_sample_type
&= ~(PERF_SAMPLE_BRANCH_USER
|
693 PERF_SAMPLE_BRANCH_CALL_STACK
);
695 if (param
->record_mode
== CALLCHAIN_DWARF
) {
696 perf_evsel__reset_sample_bit(evsel
, REGS_USER
);
697 perf_evsel__reset_sample_bit(evsel
, STACK_USER
);
701 static void apply_config_terms(struct perf_evsel
*evsel
,
702 struct record_opts
*opts
)
704 struct perf_evsel_config_term
*term
;
705 struct list_head
*config_terms
= &evsel
->config_terms
;
706 struct perf_event_attr
*attr
= &evsel
->attr
;
707 struct callchain_param param
;
710 const char *callgraph_buf
= NULL
;
712 /* callgraph default */
713 param
.record_mode
= callchain_param
.record_mode
;
715 list_for_each_entry(term
, config_terms
, list
) {
716 switch (term
->type
) {
717 case PERF_EVSEL__CONFIG_TERM_PERIOD
:
718 attr
->sample_period
= term
->val
.period
;
721 case PERF_EVSEL__CONFIG_TERM_FREQ
:
722 attr
->sample_freq
= term
->val
.freq
;
725 case PERF_EVSEL__CONFIG_TERM_TIME
:
727 perf_evsel__set_sample_bit(evsel
, TIME
);
729 perf_evsel__reset_sample_bit(evsel
, TIME
);
731 case PERF_EVSEL__CONFIG_TERM_CALLGRAPH
:
732 callgraph_buf
= term
->val
.callgraph
;
734 case PERF_EVSEL__CONFIG_TERM_BRANCH
:
735 if (term
->val
.branch
&& strcmp(term
->val
.branch
, "no")) {
736 perf_evsel__set_sample_bit(evsel
, BRANCH_STACK
);
737 parse_branch_str(term
->val
.branch
,
738 &attr
->branch_sample_type
);
740 perf_evsel__reset_sample_bit(evsel
, BRANCH_STACK
);
742 case PERF_EVSEL__CONFIG_TERM_STACK_USER
:
743 dump_size
= term
->val
.stack_user
;
745 case PERF_EVSEL__CONFIG_TERM_MAX_STACK
:
746 max_stack
= term
->val
.max_stack
;
748 case PERF_EVSEL__CONFIG_TERM_INHERIT
:
750 * attr->inherit should has already been set by
751 * perf_evsel__config. If user explicitly set
752 * inherit using config terms, override global
753 * opt->no_inherit setting.
755 attr
->inherit
= term
->val
.inherit
? 1 : 0;
757 case PERF_EVSEL__CONFIG_TERM_OVERWRITE
:
758 attr
->write_backward
= term
->val
.overwrite
? 1 : 0;
765 /* User explicitly set per-event callgraph, clear the old setting and reset. */
766 if ((callgraph_buf
!= NULL
) || (dump_size
> 0) || max_stack
) {
768 param
.max_stack
= max_stack
;
769 if (callgraph_buf
== NULL
)
770 callgraph_buf
= "fp";
773 /* parse callgraph parameters */
774 if (callgraph_buf
!= NULL
) {
775 if (!strcmp(callgraph_buf
, "no")) {
776 param
.enabled
= false;
777 param
.record_mode
= CALLCHAIN_NONE
;
779 param
.enabled
= true;
780 if (parse_callchain_record(callgraph_buf
, ¶m
)) {
781 pr_err("per-event callgraph setting for %s failed. "
782 "Apply callgraph global setting for it\n",
789 dump_size
= round_up(dump_size
, sizeof(u64
));
790 param
.dump_size
= dump_size
;
793 /* If global callgraph set, clear it */
794 if (callchain_param
.enabled
)
795 perf_evsel__reset_callgraph(evsel
, &callchain_param
);
797 /* set perf-event callgraph */
799 perf_evsel__config_callchain(evsel
, opts
, ¶m
);
804 * The enable_on_exec/disabled value strategy:
806 * 1) For any type of traced program:
807 * - all independent events and group leaders are disabled
808 * - all group members are enabled
810 * Group members are ruled by group leaders. They need to
811 * be enabled, because the group scheduling relies on that.
813 * 2) For traced programs executed by perf:
814 * - all independent events and group leaders have
816 * - we don't specifically enable or disable any event during
819 * Independent events and group leaders are initially disabled
820 * and get enabled by exec. Group members are ruled by group
821 * leaders as stated in 1).
823 * 3) For traced programs attached by perf (pid/tid):
824 * - we specifically enable or disable all events during
827 * When attaching events to already running traced we
828 * enable/disable events specifically, as there's no
829 * initial traced exec call.
831 void perf_evsel__config(struct perf_evsel
*evsel
, struct record_opts
*opts
,
832 struct callchain_param
*callchain
)
834 struct perf_evsel
*leader
= evsel
->leader
;
835 struct perf_event_attr
*attr
= &evsel
->attr
;
836 int track
= evsel
->tracking
;
837 bool per_cpu
= opts
->target
.default_per_cpu
&& !opts
->target
.per_thread
;
839 attr
->sample_id_all
= perf_missing_features
.sample_id_all
? 0 : 1;
840 attr
->inherit
= !opts
->no_inherit
;
841 attr
->write_backward
= opts
->overwrite
? 1 : 0;
843 perf_evsel__set_sample_bit(evsel
, IP
);
844 perf_evsel__set_sample_bit(evsel
, TID
);
846 if (evsel
->sample_read
) {
847 perf_evsel__set_sample_bit(evsel
, READ
);
850 * We need ID even in case of single event, because
851 * PERF_SAMPLE_READ process ID specific data.
853 perf_evsel__set_sample_id(evsel
, false);
856 * Apply group format only if we belong to group
857 * with more than one members.
859 if (leader
->nr_members
> 1) {
860 attr
->read_format
|= PERF_FORMAT_GROUP
;
866 * We default some events to have a default interval. But keep
867 * it a weak assumption overridable by the user.
869 if (!attr
->sample_period
|| (opts
->user_freq
!= UINT_MAX
||
870 opts
->user_interval
!= ULLONG_MAX
)) {
872 perf_evsel__set_sample_bit(evsel
, PERIOD
);
874 attr
->sample_freq
= opts
->freq
;
876 attr
->sample_period
= opts
->default_interval
;
881 * Disable sampling for all group members other
882 * than leader in case leader 'leads' the sampling.
884 if ((leader
!= evsel
) && leader
->sample_read
) {
885 attr
->sample_freq
= 0;
886 attr
->sample_period
= 0;
889 if (opts
->no_samples
)
890 attr
->sample_freq
= 0;
892 if (opts
->inherit_stat
)
893 attr
->inherit_stat
= 1;
895 if (opts
->sample_address
) {
896 perf_evsel__set_sample_bit(evsel
, ADDR
);
897 attr
->mmap_data
= track
;
901 * We don't allow user space callchains for function trace
902 * event, due to issues with page faults while tracing page
903 * fault handler and its overall trickiness nature.
905 if (perf_evsel__is_function_event(evsel
))
906 evsel
->attr
.exclude_callchain_user
= 1;
908 if (callchain
&& callchain
->enabled
&& !evsel
->no_aux_samples
)
909 perf_evsel__config_callchain(evsel
, opts
, callchain
);
911 if (opts
->sample_intr_regs
) {
912 attr
->sample_regs_intr
= opts
->sample_intr_regs
;
913 perf_evsel__set_sample_bit(evsel
, REGS_INTR
);
916 if (target__has_cpu(&opts
->target
) || opts
->sample_cpu
)
917 perf_evsel__set_sample_bit(evsel
, CPU
);
920 perf_evsel__set_sample_bit(evsel
, PERIOD
);
923 * When the user explicitly disabled time don't force it here.
925 if (opts
->sample_time
&&
926 (!perf_missing_features
.sample_id_all
&&
927 (!opts
->no_inherit
|| target__has_cpu(&opts
->target
) || per_cpu
||
928 opts
->sample_time_set
)))
929 perf_evsel__set_sample_bit(evsel
, TIME
);
931 if (opts
->raw_samples
&& !evsel
->no_aux_samples
) {
932 perf_evsel__set_sample_bit(evsel
, TIME
);
933 perf_evsel__set_sample_bit(evsel
, RAW
);
934 perf_evsel__set_sample_bit(evsel
, CPU
);
937 if (opts
->sample_address
)
938 perf_evsel__set_sample_bit(evsel
, DATA_SRC
);
940 if (opts
->no_buffering
) {
942 attr
->wakeup_events
= 1;
944 if (opts
->branch_stack
&& !evsel
->no_aux_samples
) {
945 perf_evsel__set_sample_bit(evsel
, BRANCH_STACK
);
946 attr
->branch_sample_type
= opts
->branch_stack
;
949 if (opts
->sample_weight
)
950 perf_evsel__set_sample_bit(evsel
, WEIGHT
);
954 attr
->mmap2
= track
&& !perf_missing_features
.mmap2
;
957 if (opts
->record_namespaces
)
958 attr
->namespaces
= track
;
960 if (opts
->record_switch_events
)
961 attr
->context_switch
= track
;
963 if (opts
->sample_transaction
)
964 perf_evsel__set_sample_bit(evsel
, TRANSACTION
);
966 if (opts
->running_time
) {
967 evsel
->attr
.read_format
|=
968 PERF_FORMAT_TOTAL_TIME_ENABLED
|
969 PERF_FORMAT_TOTAL_TIME_RUNNING
;
973 * XXX see the function comment above
975 * Disabling only independent events or group leaders,
976 * keeping group members enabled.
978 if (perf_evsel__is_group_leader(evsel
))
982 * Setting enable_on_exec for independent events and
983 * group leaders for traced executed by perf.
985 if (target__none(&opts
->target
) && perf_evsel__is_group_leader(evsel
) &&
986 !opts
->initial_delay
)
987 attr
->enable_on_exec
= 1;
989 if (evsel
->immediate
) {
991 attr
->enable_on_exec
= 0;
994 clockid
= opts
->clockid
;
995 if (opts
->use_clockid
) {
996 attr
->use_clockid
= 1;
997 attr
->clockid
= opts
->clockid
;
1000 if (evsel
->precise_max
)
1001 perf_event_attr__set_max_precise_ip(attr
);
1003 if (opts
->all_user
) {
1004 attr
->exclude_kernel
= 1;
1005 attr
->exclude_user
= 0;
1008 if (opts
->all_kernel
) {
1009 attr
->exclude_kernel
= 0;
1010 attr
->exclude_user
= 1;
1014 * Apply event specific term settings,
1015 * it overloads any global configuration.
1017 apply_config_terms(evsel
, opts
);
1019 evsel
->ignore_missing_thread
= opts
->ignore_missing_thread
;
1022 static int perf_evsel__alloc_fd(struct perf_evsel
*evsel
, int ncpus
, int nthreads
)
1024 if (evsel
->system_wide
)
1027 evsel
->fd
= xyarray__new(ncpus
, nthreads
, sizeof(int));
1031 for (cpu
= 0; cpu
< ncpus
; cpu
++) {
1032 for (thread
= 0; thread
< nthreads
; thread
++) {
1033 FD(evsel
, cpu
, thread
) = -1;
1038 return evsel
->fd
!= NULL
? 0 : -ENOMEM
;
1041 static int perf_evsel__run_ioctl(struct perf_evsel
*evsel
, int ncpus
, int nthreads
,
1046 if (evsel
->system_wide
)
1049 for (cpu
= 0; cpu
< ncpus
; cpu
++) {
1050 for (thread
= 0; thread
< nthreads
; thread
++) {
1051 int fd
= FD(evsel
, cpu
, thread
),
1052 err
= ioctl(fd
, ioc
, arg
);
1062 int perf_evsel__apply_filter(struct perf_evsel
*evsel
, int ncpus
, int nthreads
,
1065 return perf_evsel__run_ioctl(evsel
, ncpus
, nthreads
,
1066 PERF_EVENT_IOC_SET_FILTER
,
1070 int perf_evsel__set_filter(struct perf_evsel
*evsel
, const char *filter
)
1072 char *new_filter
= strdup(filter
);
1074 if (new_filter
!= NULL
) {
1075 free(evsel
->filter
);
1076 evsel
->filter
= new_filter
;
1083 static int perf_evsel__append_filter(struct perf_evsel
*evsel
,
1084 const char *fmt
, const char *filter
)
1088 if (evsel
->filter
== NULL
)
1089 return perf_evsel__set_filter(evsel
, filter
);
1091 if (asprintf(&new_filter
, fmt
, evsel
->filter
, filter
) > 0) {
1092 free(evsel
->filter
);
1093 evsel
->filter
= new_filter
;
1100 int perf_evsel__append_tp_filter(struct perf_evsel
*evsel
, const char *filter
)
1102 return perf_evsel__append_filter(evsel
, "(%s) && (%s)", filter
);
1105 int perf_evsel__append_addr_filter(struct perf_evsel
*evsel
, const char *filter
)
1107 return perf_evsel__append_filter(evsel
, "%s,%s", filter
);
1110 int perf_evsel__enable(struct perf_evsel
*evsel
)
1112 int nthreads
= thread_map__nr(evsel
->threads
);
1113 int ncpus
= cpu_map__nr(evsel
->cpus
);
1115 return perf_evsel__run_ioctl(evsel
, ncpus
, nthreads
,
1116 PERF_EVENT_IOC_ENABLE
,
1120 int perf_evsel__disable(struct perf_evsel
*evsel
)
1122 int nthreads
= thread_map__nr(evsel
->threads
);
1123 int ncpus
= cpu_map__nr(evsel
->cpus
);
1125 return perf_evsel__run_ioctl(evsel
, ncpus
, nthreads
,
1126 PERF_EVENT_IOC_DISABLE
,
1130 int perf_evsel__alloc_id(struct perf_evsel
*evsel
, int ncpus
, int nthreads
)
1132 if (ncpus
== 0 || nthreads
== 0)
1135 if (evsel
->system_wide
)
1138 evsel
->sample_id
= xyarray__new(ncpus
, nthreads
, sizeof(struct perf_sample_id
));
1139 if (evsel
->sample_id
== NULL
)
1142 evsel
->id
= zalloc(ncpus
* nthreads
* sizeof(u64
));
1143 if (evsel
->id
== NULL
) {
1144 xyarray__delete(evsel
->sample_id
);
1145 evsel
->sample_id
= NULL
;
1152 static void perf_evsel__free_fd(struct perf_evsel
*evsel
)
1154 xyarray__delete(evsel
->fd
);
1158 static void perf_evsel__free_id(struct perf_evsel
*evsel
)
1160 xyarray__delete(evsel
->sample_id
);
1161 evsel
->sample_id
= NULL
;
1165 static void perf_evsel__free_config_terms(struct perf_evsel
*evsel
)
1167 struct perf_evsel_config_term
*term
, *h
;
1169 list_for_each_entry_safe(term
, h
, &evsel
->config_terms
, list
) {
1170 list_del(&term
->list
);
1175 void perf_evsel__close_fd(struct perf_evsel
*evsel
, int ncpus
, int nthreads
)
1179 if (evsel
->system_wide
)
1182 for (cpu
= 0; cpu
< ncpus
; cpu
++)
1183 for (thread
= 0; thread
< nthreads
; ++thread
) {
1184 close(FD(evsel
, cpu
, thread
));
1185 FD(evsel
, cpu
, thread
) = -1;
1189 void perf_evsel__exit(struct perf_evsel
*evsel
)
1191 assert(list_empty(&evsel
->node
));
1192 assert(evsel
->evlist
== NULL
);
1193 perf_evsel__free_fd(evsel
);
1194 perf_evsel__free_id(evsel
);
1195 perf_evsel__free_config_terms(evsel
);
1196 close_cgroup(evsel
->cgrp
);
1197 cpu_map__put(evsel
->cpus
);
1198 cpu_map__put(evsel
->own_cpus
);
1199 thread_map__put(evsel
->threads
);
1200 zfree(&evsel
->group_name
);
1201 zfree(&evsel
->name
);
1202 perf_evsel__object
.fini(evsel
);
1205 void perf_evsel__delete(struct perf_evsel
*evsel
)
1207 perf_evsel__exit(evsel
);
1211 void perf_evsel__compute_deltas(struct perf_evsel
*evsel
, int cpu
, int thread
,
1212 struct perf_counts_values
*count
)
1214 struct perf_counts_values tmp
;
1216 if (!evsel
->prev_raw_counts
)
1220 tmp
= evsel
->prev_raw_counts
->aggr
;
1221 evsel
->prev_raw_counts
->aggr
= *count
;
1223 tmp
= *perf_counts(evsel
->prev_raw_counts
, cpu
, thread
);
1224 *perf_counts(evsel
->prev_raw_counts
, cpu
, thread
) = *count
;
1227 count
->val
= count
->val
- tmp
.val
;
1228 count
->ena
= count
->ena
- tmp
.ena
;
1229 count
->run
= count
->run
- tmp
.run
;
1232 void perf_counts_values__scale(struct perf_counts_values
*count
,
1233 bool scale
, s8
*pscaled
)
1238 if (count
->run
== 0) {
1241 } else if (count
->run
< count
->ena
) {
1243 count
->val
= (u64
)((double) count
->val
* count
->ena
/ count
->run
+ 0.5);
1246 count
->ena
= count
->run
= 0;
1252 int perf_evsel__read(struct perf_evsel
*evsel
, int cpu
, int thread
,
1253 struct perf_counts_values
*count
)
1255 memset(count
, 0, sizeof(*count
));
1257 if (FD(evsel
, cpu
, thread
) < 0)
1260 if (readn(FD(evsel
, cpu
, thread
), count
, sizeof(*count
)) <= 0)
1266 int __perf_evsel__read_on_cpu(struct perf_evsel
*evsel
,
1267 int cpu
, int thread
, bool scale
)
1269 struct perf_counts_values count
;
1270 size_t nv
= scale
? 3 : 1;
1272 if (FD(evsel
, cpu
, thread
) < 0)
1275 if (evsel
->counts
== NULL
&& perf_evsel__alloc_counts(evsel
, cpu
+ 1, thread
+ 1) < 0)
1278 if (readn(FD(evsel
, cpu
, thread
), &count
, nv
* sizeof(u64
)) <= 0)
1281 perf_evsel__compute_deltas(evsel
, cpu
, thread
, &count
);
1282 perf_counts_values__scale(&count
, scale
, NULL
);
1283 *perf_counts(evsel
->counts
, cpu
, thread
) = count
;
1287 static int get_group_fd(struct perf_evsel
*evsel
, int cpu
, int thread
)
1289 struct perf_evsel
*leader
= evsel
->leader
;
1292 if (perf_evsel__is_group_leader(evsel
))
1296 * Leader must be already processed/open,
1297 * if not it's a bug.
1299 BUG_ON(!leader
->fd
);
1301 fd
= FD(leader
, cpu
, thread
);
1312 static void __p_bits(char *buf
, size_t size
, u64 value
, struct bit_names
*bits
)
1314 bool first_bit
= true;
1318 if (value
& bits
[i
].bit
) {
1319 buf
+= scnprintf(buf
, size
, "%s%s", first_bit
? "" : "|", bits
[i
].name
);
1322 } while (bits
[++i
].name
!= NULL
);
1325 static void __p_sample_type(char *buf
, size_t size
, u64 value
)
1327 #define bit_name(n) { PERF_SAMPLE_##n, #n }
1328 struct bit_names bits
[] = {
1329 bit_name(IP
), bit_name(TID
), bit_name(TIME
), bit_name(ADDR
),
1330 bit_name(READ
), bit_name(CALLCHAIN
), bit_name(ID
), bit_name(CPU
),
1331 bit_name(PERIOD
), bit_name(STREAM_ID
), bit_name(RAW
),
1332 bit_name(BRANCH_STACK
), bit_name(REGS_USER
), bit_name(STACK_USER
),
1333 bit_name(IDENTIFIER
), bit_name(REGS_INTR
), bit_name(DATA_SRC
),
1338 __p_bits(buf
, size
, value
, bits
);
1341 static void __p_branch_sample_type(char *buf
, size_t size
, u64 value
)
1343 #define bit_name(n) { PERF_SAMPLE_BRANCH_##n, #n }
1344 struct bit_names bits
[] = {
1345 bit_name(USER
), bit_name(KERNEL
), bit_name(HV
), bit_name(ANY
),
1346 bit_name(ANY_CALL
), bit_name(ANY_RETURN
), bit_name(IND_CALL
),
1347 bit_name(ABORT_TX
), bit_name(IN_TX
), bit_name(NO_TX
),
1348 bit_name(COND
), bit_name(CALL_STACK
), bit_name(IND_JUMP
),
1349 bit_name(CALL
), bit_name(NO_FLAGS
), bit_name(NO_CYCLES
),
1353 __p_bits(buf
, size
, value
, bits
);
1356 static void __p_read_format(char *buf
, size_t size
, u64 value
)
1358 #define bit_name(n) { PERF_FORMAT_##n, #n }
1359 struct bit_names bits
[] = {
1360 bit_name(TOTAL_TIME_ENABLED
), bit_name(TOTAL_TIME_RUNNING
),
1361 bit_name(ID
), bit_name(GROUP
),
1365 __p_bits(buf
, size
, value
, bits
);
1368 #define BUF_SIZE 1024
1370 #define p_hex(val) snprintf(buf, BUF_SIZE, "%#"PRIx64, (uint64_t)(val))
1371 #define p_unsigned(val) snprintf(buf, BUF_SIZE, "%"PRIu64, (uint64_t)(val))
1372 #define p_signed(val) snprintf(buf, BUF_SIZE, "%"PRId64, (int64_t)(val))
1373 #define p_sample_type(val) __p_sample_type(buf, BUF_SIZE, val)
1374 #define p_branch_sample_type(val) __p_branch_sample_type(buf, BUF_SIZE, val)
1375 #define p_read_format(val) __p_read_format(buf, BUF_SIZE, val)
1377 #define PRINT_ATTRn(_n, _f, _p) \
1381 ret += attr__fprintf(fp, _n, buf, priv);\
1385 #define PRINT_ATTRf(_f, _p) PRINT_ATTRn(#_f, _f, _p)
1387 int perf_event_attr__fprintf(FILE *fp
, struct perf_event_attr
*attr
,
1388 attr__fprintf_f attr__fprintf
, void *priv
)
1393 PRINT_ATTRf(type
, p_unsigned
);
1394 PRINT_ATTRf(size
, p_unsigned
);
1395 PRINT_ATTRf(config
, p_hex
);
1396 PRINT_ATTRn("{ sample_period, sample_freq }", sample_period
, p_unsigned
);
1397 PRINT_ATTRf(sample_type
, p_sample_type
);
1398 PRINT_ATTRf(read_format
, p_read_format
);
1400 PRINT_ATTRf(disabled
, p_unsigned
);
1401 PRINT_ATTRf(inherit
, p_unsigned
);
1402 PRINT_ATTRf(pinned
, p_unsigned
);
1403 PRINT_ATTRf(exclusive
, p_unsigned
);
1404 PRINT_ATTRf(exclude_user
, p_unsigned
);
1405 PRINT_ATTRf(exclude_kernel
, p_unsigned
);
1406 PRINT_ATTRf(exclude_hv
, p_unsigned
);
1407 PRINT_ATTRf(exclude_idle
, p_unsigned
);
1408 PRINT_ATTRf(mmap
, p_unsigned
);
1409 PRINT_ATTRf(comm
, p_unsigned
);
1410 PRINT_ATTRf(freq
, p_unsigned
);
1411 PRINT_ATTRf(inherit_stat
, p_unsigned
);
1412 PRINT_ATTRf(enable_on_exec
, p_unsigned
);
1413 PRINT_ATTRf(task
, p_unsigned
);
1414 PRINT_ATTRf(watermark
, p_unsigned
);
1415 PRINT_ATTRf(precise_ip
, p_unsigned
);
1416 PRINT_ATTRf(mmap_data
, p_unsigned
);
1417 PRINT_ATTRf(sample_id_all
, p_unsigned
);
1418 PRINT_ATTRf(exclude_host
, p_unsigned
);
1419 PRINT_ATTRf(exclude_guest
, p_unsigned
);
1420 PRINT_ATTRf(exclude_callchain_kernel
, p_unsigned
);
1421 PRINT_ATTRf(exclude_callchain_user
, p_unsigned
);
1422 PRINT_ATTRf(mmap2
, p_unsigned
);
1423 PRINT_ATTRf(comm_exec
, p_unsigned
);
1424 PRINT_ATTRf(use_clockid
, p_unsigned
);
1425 PRINT_ATTRf(context_switch
, p_unsigned
);
1426 PRINT_ATTRf(write_backward
, p_unsigned
);
1428 PRINT_ATTRn("{ wakeup_events, wakeup_watermark }", wakeup_events
, p_unsigned
);
1429 PRINT_ATTRf(bp_type
, p_unsigned
);
1430 PRINT_ATTRn("{ bp_addr, config1 }", bp_addr
, p_hex
);
1431 PRINT_ATTRn("{ bp_len, config2 }", bp_len
, p_hex
);
1432 PRINT_ATTRf(branch_sample_type
, p_branch_sample_type
);
1433 PRINT_ATTRf(sample_regs_user
, p_hex
);
1434 PRINT_ATTRf(sample_stack_user
, p_unsigned
);
1435 PRINT_ATTRf(clockid
, p_signed
);
1436 PRINT_ATTRf(sample_regs_intr
, p_hex
);
1437 PRINT_ATTRf(aux_watermark
, p_unsigned
);
1438 PRINT_ATTRf(sample_max_stack
, p_unsigned
);
1443 static int __open_attr__fprintf(FILE *fp
, const char *name
, const char *val
,
1444 void *priv
__attribute__((unused
)))
1446 return fprintf(fp
, " %-32s %s\n", name
, val
);
1449 static bool ignore_missing_thread(struct perf_evsel
*evsel
,
1450 struct thread_map
*threads
,
1451 int thread
, int err
)
1453 if (!evsel
->ignore_missing_thread
)
1456 /* The system wide setup does not work with threads. */
1457 if (evsel
->system_wide
)
1460 /* The -ESRCH is perf event syscall errno for pid's not found. */
1464 /* If there's only one thread, let it fail. */
1465 if (threads
->nr
== 1)
1468 if (thread_map__remove(threads
, thread
))
1471 pr_warning("WARNING: Ignored open failure for pid %d\n",
1472 thread_map__pid(threads
, thread
));
1476 int perf_evsel__open(struct perf_evsel
*evsel
, struct cpu_map
*cpus
,
1477 struct thread_map
*threads
)
1479 int cpu
, thread
, nthreads
;
1480 unsigned long flags
= PERF_FLAG_FD_CLOEXEC
;
1482 enum { NO_CHANGE
, SET_TO_MAX
, INCREASED_MAX
} set_rlimit
= NO_CHANGE
;
1484 if (perf_missing_features
.write_backward
&& evsel
->attr
.write_backward
)
1488 static struct cpu_map
*empty_cpu_map
;
1490 if (empty_cpu_map
== NULL
) {
1491 empty_cpu_map
= cpu_map__dummy_new();
1492 if (empty_cpu_map
== NULL
)
1496 cpus
= empty_cpu_map
;
1499 if (threads
== NULL
) {
1500 static struct thread_map
*empty_thread_map
;
1502 if (empty_thread_map
== NULL
) {
1503 empty_thread_map
= thread_map__new_by_tid(-1);
1504 if (empty_thread_map
== NULL
)
1508 threads
= empty_thread_map
;
1511 if (evsel
->system_wide
)
1514 nthreads
= threads
->nr
;
1516 if (evsel
->fd
== NULL
&&
1517 perf_evsel__alloc_fd(evsel
, cpus
->nr
, nthreads
) < 0)
1521 flags
|= PERF_FLAG_PID_CGROUP
;
1522 pid
= evsel
->cgrp
->fd
;
1525 fallback_missing_features
:
1526 if (perf_missing_features
.clockid_wrong
)
1527 evsel
->attr
.clockid
= CLOCK_MONOTONIC
; /* should always work */
1528 if (perf_missing_features
.clockid
) {
1529 evsel
->attr
.use_clockid
= 0;
1530 evsel
->attr
.clockid
= 0;
1532 if (perf_missing_features
.cloexec
)
1533 flags
&= ~(unsigned long)PERF_FLAG_FD_CLOEXEC
;
1534 if (perf_missing_features
.mmap2
)
1535 evsel
->attr
.mmap2
= 0;
1536 if (perf_missing_features
.exclude_guest
)
1537 evsel
->attr
.exclude_guest
= evsel
->attr
.exclude_host
= 0;
1538 if (perf_missing_features
.lbr_flags
)
1539 evsel
->attr
.branch_sample_type
&= ~(PERF_SAMPLE_BRANCH_NO_FLAGS
|
1540 PERF_SAMPLE_BRANCH_NO_CYCLES
);
1542 if (perf_missing_features
.sample_id_all
)
1543 evsel
->attr
.sample_id_all
= 0;
1546 fprintf(stderr
, "%.60s\n", graph_dotted_line
);
1547 fprintf(stderr
, "perf_event_attr:\n");
1548 perf_event_attr__fprintf(stderr
, &evsel
->attr
, __open_attr__fprintf
, NULL
);
1549 fprintf(stderr
, "%.60s\n", graph_dotted_line
);
1552 for (cpu
= 0; cpu
< cpus
->nr
; cpu
++) {
1554 for (thread
= 0; thread
< nthreads
; thread
++) {
1557 if (!evsel
->cgrp
&& !evsel
->system_wide
)
1558 pid
= thread_map__pid(threads
, thread
);
1560 group_fd
= get_group_fd(evsel
, cpu
, thread
);
1562 pr_debug2("sys_perf_event_open: pid %d cpu %d group_fd %d flags %#lx",
1563 pid
, cpus
->map
[cpu
], group_fd
, flags
);
1565 fd
= sys_perf_event_open(&evsel
->attr
, pid
, cpus
->map
[cpu
],
1568 FD(evsel
, cpu
, thread
) = fd
;
1573 if (ignore_missing_thread(evsel
, threads
, thread
, err
)) {
1575 * We just removed 1 thread, so take a step
1576 * back on thread index and lower the upper
1582 /* ... and pretend like nothing have happened. */
1587 pr_debug2("\nsys_perf_event_open failed, error %d\n",
1592 pr_debug2(" = %d\n", fd
);
1594 if (evsel
->bpf_fd
>= 0) {
1596 int bpf_fd
= evsel
->bpf_fd
;
1599 PERF_EVENT_IOC_SET_BPF
,
1601 if (err
&& errno
!= EEXIST
) {
1602 pr_err("failed to attach bpf fd %d: %s\n",
1603 bpf_fd
, strerror(errno
));
1609 set_rlimit
= NO_CHANGE
;
1612 * If we succeeded but had to kill clockid, fail and
1613 * have perf_evsel__open_strerror() print us a nice
1616 if (perf_missing_features
.clockid
||
1617 perf_missing_features
.clockid_wrong
) {
1628 * perf stat needs between 5 and 22 fds per CPU. When we run out
1629 * of them try to increase the limits.
1631 if (err
== -EMFILE
&& set_rlimit
< INCREASED_MAX
) {
1633 int old_errno
= errno
;
1635 if (getrlimit(RLIMIT_NOFILE
, &l
) == 0) {
1636 if (set_rlimit
== NO_CHANGE
)
1637 l
.rlim_cur
= l
.rlim_max
;
1639 l
.rlim_cur
= l
.rlim_max
+ 1000;
1640 l
.rlim_max
= l
.rlim_cur
;
1642 if (setrlimit(RLIMIT_NOFILE
, &l
) == 0) {
1651 if (err
!= -EINVAL
|| cpu
> 0 || thread
> 0)
1655 * Must probe features in the order they were added to the
1656 * perf_event_attr interface.
1658 if (!perf_missing_features
.write_backward
&& evsel
->attr
.write_backward
) {
1659 perf_missing_features
.write_backward
= true;
1661 } else if (!perf_missing_features
.clockid_wrong
&& evsel
->attr
.use_clockid
) {
1662 perf_missing_features
.clockid_wrong
= true;
1663 goto fallback_missing_features
;
1664 } else if (!perf_missing_features
.clockid
&& evsel
->attr
.use_clockid
) {
1665 perf_missing_features
.clockid
= true;
1666 goto fallback_missing_features
;
1667 } else if (!perf_missing_features
.cloexec
&& (flags
& PERF_FLAG_FD_CLOEXEC
)) {
1668 perf_missing_features
.cloexec
= true;
1669 goto fallback_missing_features
;
1670 } else if (!perf_missing_features
.mmap2
&& evsel
->attr
.mmap2
) {
1671 perf_missing_features
.mmap2
= true;
1672 goto fallback_missing_features
;
1673 } else if (!perf_missing_features
.exclude_guest
&&
1674 (evsel
->attr
.exclude_guest
|| evsel
->attr
.exclude_host
)) {
1675 perf_missing_features
.exclude_guest
= true;
1676 goto fallback_missing_features
;
1677 } else if (!perf_missing_features
.sample_id_all
) {
1678 perf_missing_features
.sample_id_all
= true;
1679 goto retry_sample_id
;
1680 } else if (!perf_missing_features
.lbr_flags
&&
1681 (evsel
->attr
.branch_sample_type
&
1682 (PERF_SAMPLE_BRANCH_NO_CYCLES
|
1683 PERF_SAMPLE_BRANCH_NO_FLAGS
))) {
1684 perf_missing_features
.lbr_flags
= true;
1685 goto fallback_missing_features
;
1689 while (--thread
>= 0) {
1690 close(FD(evsel
, cpu
, thread
));
1691 FD(evsel
, cpu
, thread
) = -1;
1694 } while (--cpu
>= 0);
1698 void perf_evsel__close(struct perf_evsel
*evsel
, int ncpus
, int nthreads
)
1700 if (evsel
->fd
== NULL
)
1703 perf_evsel__close_fd(evsel
, ncpus
, nthreads
);
1704 perf_evsel__free_fd(evsel
);
1707 int perf_evsel__open_per_cpu(struct perf_evsel
*evsel
,
1708 struct cpu_map
*cpus
)
1710 return perf_evsel__open(evsel
, cpus
, NULL
);
1713 int perf_evsel__open_per_thread(struct perf_evsel
*evsel
,
1714 struct thread_map
*threads
)
1716 return perf_evsel__open(evsel
, NULL
, threads
);
1719 static int perf_evsel__parse_id_sample(const struct perf_evsel
*evsel
,
1720 const union perf_event
*event
,
1721 struct perf_sample
*sample
)
1723 u64 type
= evsel
->attr
.sample_type
;
1724 const u64
*array
= event
->sample
.array
;
1725 bool swapped
= evsel
->needs_swap
;
1728 array
+= ((event
->header
.size
-
1729 sizeof(event
->header
)) / sizeof(u64
)) - 1;
1731 if (type
& PERF_SAMPLE_IDENTIFIER
) {
1732 sample
->id
= *array
;
1736 if (type
& PERF_SAMPLE_CPU
) {
1739 /* undo swap of u64, then swap on individual u32s */
1740 u
.val64
= bswap_64(u
.val64
);
1741 u
.val32
[0] = bswap_32(u
.val32
[0]);
1744 sample
->cpu
= u
.val32
[0];
1748 if (type
& PERF_SAMPLE_STREAM_ID
) {
1749 sample
->stream_id
= *array
;
1753 if (type
& PERF_SAMPLE_ID
) {
1754 sample
->id
= *array
;
1758 if (type
& PERF_SAMPLE_TIME
) {
1759 sample
->time
= *array
;
1763 if (type
& PERF_SAMPLE_TID
) {
1766 /* undo swap of u64, then swap on individual u32s */
1767 u
.val64
= bswap_64(u
.val64
);
1768 u
.val32
[0] = bswap_32(u
.val32
[0]);
1769 u
.val32
[1] = bswap_32(u
.val32
[1]);
1772 sample
->pid
= u
.val32
[0];
1773 sample
->tid
= u
.val32
[1];
1780 static inline bool overflow(const void *endp
, u16 max_size
, const void *offset
,
1783 return size
> max_size
|| offset
+ size
> endp
;
1786 #define OVERFLOW_CHECK(offset, size, max_size) \
1788 if (overflow(endp, (max_size), (offset), (size))) \
1792 #define OVERFLOW_CHECK_u64(offset) \
1793 OVERFLOW_CHECK(offset, sizeof(u64), sizeof(u64))
1795 int perf_evsel__parse_sample(struct perf_evsel
*evsel
, union perf_event
*event
,
1796 struct perf_sample
*data
)
1798 u64 type
= evsel
->attr
.sample_type
;
1799 bool swapped
= evsel
->needs_swap
;
1801 u16 max_size
= event
->header
.size
;
1802 const void *endp
= (void *)event
+ max_size
;
1806 * used for cross-endian analysis. See git commit 65014ab3
1807 * for why this goofiness is needed.
1811 memset(data
, 0, sizeof(*data
));
1812 data
->cpu
= data
->pid
= data
->tid
= -1;
1813 data
->stream_id
= data
->id
= data
->time
= -1ULL;
1814 data
->period
= evsel
->attr
.sample_period
;
1815 data
->cpumode
= event
->header
.misc
& PERF_RECORD_MISC_CPUMODE_MASK
;
1817 if (event
->header
.type
!= PERF_RECORD_SAMPLE
) {
1818 if (!evsel
->attr
.sample_id_all
)
1820 return perf_evsel__parse_id_sample(evsel
, event
, data
);
1823 array
= event
->sample
.array
;
1826 * The evsel's sample_size is based on PERF_SAMPLE_MASK which includes
1827 * up to PERF_SAMPLE_PERIOD. After that overflow() must be used to
1828 * check the format does not go past the end of the event.
1830 if (evsel
->sample_size
+ sizeof(event
->header
) > event
->header
.size
)
1834 if (type
& PERF_SAMPLE_IDENTIFIER
) {
1839 if (type
& PERF_SAMPLE_IP
) {
1844 if (type
& PERF_SAMPLE_TID
) {
1847 /* undo swap of u64, then swap on individual u32s */
1848 u
.val64
= bswap_64(u
.val64
);
1849 u
.val32
[0] = bswap_32(u
.val32
[0]);
1850 u
.val32
[1] = bswap_32(u
.val32
[1]);
1853 data
->pid
= u
.val32
[0];
1854 data
->tid
= u
.val32
[1];
1858 if (type
& PERF_SAMPLE_TIME
) {
1859 data
->time
= *array
;
1864 if (type
& PERF_SAMPLE_ADDR
) {
1865 data
->addr
= *array
;
1869 if (type
& PERF_SAMPLE_ID
) {
1874 if (type
& PERF_SAMPLE_STREAM_ID
) {
1875 data
->stream_id
= *array
;
1879 if (type
& PERF_SAMPLE_CPU
) {
1883 /* undo swap of u64, then swap on individual u32s */
1884 u
.val64
= bswap_64(u
.val64
);
1885 u
.val32
[0] = bswap_32(u
.val32
[0]);
1888 data
->cpu
= u
.val32
[0];
1892 if (type
& PERF_SAMPLE_PERIOD
) {
1893 data
->period
= *array
;
1897 if (type
& PERF_SAMPLE_READ
) {
1898 u64 read_format
= evsel
->attr
.read_format
;
1900 OVERFLOW_CHECK_u64(array
);
1901 if (read_format
& PERF_FORMAT_GROUP
)
1902 data
->read
.group
.nr
= *array
;
1904 data
->read
.one
.value
= *array
;
1908 if (read_format
& PERF_FORMAT_TOTAL_TIME_ENABLED
) {
1909 OVERFLOW_CHECK_u64(array
);
1910 data
->read
.time_enabled
= *array
;
1914 if (read_format
& PERF_FORMAT_TOTAL_TIME_RUNNING
) {
1915 OVERFLOW_CHECK_u64(array
);
1916 data
->read
.time_running
= *array
;
1920 /* PERF_FORMAT_ID is forced for PERF_SAMPLE_READ */
1921 if (read_format
& PERF_FORMAT_GROUP
) {
1922 const u64 max_group_nr
= UINT64_MAX
/
1923 sizeof(struct sample_read_value
);
1925 if (data
->read
.group
.nr
> max_group_nr
)
1927 sz
= data
->read
.group
.nr
*
1928 sizeof(struct sample_read_value
);
1929 OVERFLOW_CHECK(array
, sz
, max_size
);
1930 data
->read
.group
.values
=
1931 (struct sample_read_value
*)array
;
1932 array
= (void *)array
+ sz
;
1934 OVERFLOW_CHECK_u64(array
);
1935 data
->read
.one
.id
= *array
;
1940 if (type
& PERF_SAMPLE_CALLCHAIN
) {
1941 const u64 max_callchain_nr
= UINT64_MAX
/ sizeof(u64
);
1943 OVERFLOW_CHECK_u64(array
);
1944 data
->callchain
= (struct ip_callchain
*)array
++;
1945 if (data
->callchain
->nr
> max_callchain_nr
)
1947 sz
= data
->callchain
->nr
* sizeof(u64
);
1948 OVERFLOW_CHECK(array
, sz
, max_size
);
1949 array
= (void *)array
+ sz
;
1952 if (type
& PERF_SAMPLE_RAW
) {
1953 OVERFLOW_CHECK_u64(array
);
1955 if (WARN_ONCE(swapped
,
1956 "Endianness of raw data not corrected!\n")) {
1957 /* undo swap of u64, then swap on individual u32s */
1958 u
.val64
= bswap_64(u
.val64
);
1959 u
.val32
[0] = bswap_32(u
.val32
[0]);
1960 u
.val32
[1] = bswap_32(u
.val32
[1]);
1962 data
->raw_size
= u
.val32
[0];
1963 array
= (void *)array
+ sizeof(u32
);
1965 OVERFLOW_CHECK(array
, data
->raw_size
, max_size
);
1966 data
->raw_data
= (void *)array
;
1967 array
= (void *)array
+ data
->raw_size
;
1970 if (type
& PERF_SAMPLE_BRANCH_STACK
) {
1971 const u64 max_branch_nr
= UINT64_MAX
/
1972 sizeof(struct branch_entry
);
1974 OVERFLOW_CHECK_u64(array
);
1975 data
->branch_stack
= (struct branch_stack
*)array
++;
1977 if (data
->branch_stack
->nr
> max_branch_nr
)
1979 sz
= data
->branch_stack
->nr
* sizeof(struct branch_entry
);
1980 OVERFLOW_CHECK(array
, sz
, max_size
);
1981 array
= (void *)array
+ sz
;
1984 if (type
& PERF_SAMPLE_REGS_USER
) {
1985 OVERFLOW_CHECK_u64(array
);
1986 data
->user_regs
.abi
= *array
;
1989 if (data
->user_regs
.abi
) {
1990 u64 mask
= evsel
->attr
.sample_regs_user
;
1992 sz
= hweight_long(mask
) * sizeof(u64
);
1993 OVERFLOW_CHECK(array
, sz
, max_size
);
1994 data
->user_regs
.mask
= mask
;
1995 data
->user_regs
.regs
= (u64
*)array
;
1996 array
= (void *)array
+ sz
;
2000 if (type
& PERF_SAMPLE_STACK_USER
) {
2001 OVERFLOW_CHECK_u64(array
);
2004 data
->user_stack
.offset
= ((char *)(array
- 1)
2008 data
->user_stack
.size
= 0;
2010 OVERFLOW_CHECK(array
, sz
, max_size
);
2011 data
->user_stack
.data
= (char *)array
;
2012 array
= (void *)array
+ sz
;
2013 OVERFLOW_CHECK_u64(array
);
2014 data
->user_stack
.size
= *array
++;
2015 if (WARN_ONCE(data
->user_stack
.size
> sz
,
2016 "user stack dump failure\n"))
2021 if (type
& PERF_SAMPLE_WEIGHT
) {
2022 OVERFLOW_CHECK_u64(array
);
2023 data
->weight
= *array
;
2027 data
->data_src
= PERF_MEM_DATA_SRC_NONE
;
2028 if (type
& PERF_SAMPLE_DATA_SRC
) {
2029 OVERFLOW_CHECK_u64(array
);
2030 data
->data_src
= *array
;
2034 data
->transaction
= 0;
2035 if (type
& PERF_SAMPLE_TRANSACTION
) {
2036 OVERFLOW_CHECK_u64(array
);
2037 data
->transaction
= *array
;
2041 data
->intr_regs
.abi
= PERF_SAMPLE_REGS_ABI_NONE
;
2042 if (type
& PERF_SAMPLE_REGS_INTR
) {
2043 OVERFLOW_CHECK_u64(array
);
2044 data
->intr_regs
.abi
= *array
;
2047 if (data
->intr_regs
.abi
!= PERF_SAMPLE_REGS_ABI_NONE
) {
2048 u64 mask
= evsel
->attr
.sample_regs_intr
;
2050 sz
= hweight_long(mask
) * sizeof(u64
);
2051 OVERFLOW_CHECK(array
, sz
, max_size
);
2052 data
->intr_regs
.mask
= mask
;
2053 data
->intr_regs
.regs
= (u64
*)array
;
2054 array
= (void *)array
+ sz
;
2061 size_t perf_event__sample_event_size(const struct perf_sample
*sample
, u64 type
,
2064 size_t sz
, result
= sizeof(struct sample_event
);
2066 if (type
& PERF_SAMPLE_IDENTIFIER
)
2067 result
+= sizeof(u64
);
2069 if (type
& PERF_SAMPLE_IP
)
2070 result
+= sizeof(u64
);
2072 if (type
& PERF_SAMPLE_TID
)
2073 result
+= sizeof(u64
);
2075 if (type
& PERF_SAMPLE_TIME
)
2076 result
+= sizeof(u64
);
2078 if (type
& PERF_SAMPLE_ADDR
)
2079 result
+= sizeof(u64
);
2081 if (type
& PERF_SAMPLE_ID
)
2082 result
+= sizeof(u64
);
2084 if (type
& PERF_SAMPLE_STREAM_ID
)
2085 result
+= sizeof(u64
);
2087 if (type
& PERF_SAMPLE_CPU
)
2088 result
+= sizeof(u64
);
2090 if (type
& PERF_SAMPLE_PERIOD
)
2091 result
+= sizeof(u64
);
2093 if (type
& PERF_SAMPLE_READ
) {
2094 result
+= sizeof(u64
);
2095 if (read_format
& PERF_FORMAT_TOTAL_TIME_ENABLED
)
2096 result
+= sizeof(u64
);
2097 if (read_format
& PERF_FORMAT_TOTAL_TIME_RUNNING
)
2098 result
+= sizeof(u64
);
2099 /* PERF_FORMAT_ID is forced for PERF_SAMPLE_READ */
2100 if (read_format
& PERF_FORMAT_GROUP
) {
2101 sz
= sample
->read
.group
.nr
*
2102 sizeof(struct sample_read_value
);
2105 result
+= sizeof(u64
);
2109 if (type
& PERF_SAMPLE_CALLCHAIN
) {
2110 sz
= (sample
->callchain
->nr
+ 1) * sizeof(u64
);
2114 if (type
& PERF_SAMPLE_RAW
) {
2115 result
+= sizeof(u32
);
2116 result
+= sample
->raw_size
;
2119 if (type
& PERF_SAMPLE_BRANCH_STACK
) {
2120 sz
= sample
->branch_stack
->nr
* sizeof(struct branch_entry
);
2125 if (type
& PERF_SAMPLE_REGS_USER
) {
2126 if (sample
->user_regs
.abi
) {
2127 result
+= sizeof(u64
);
2128 sz
= hweight_long(sample
->user_regs
.mask
) * sizeof(u64
);
2131 result
+= sizeof(u64
);
2135 if (type
& PERF_SAMPLE_STACK_USER
) {
2136 sz
= sample
->user_stack
.size
;
2137 result
+= sizeof(u64
);
2140 result
+= sizeof(u64
);
2144 if (type
& PERF_SAMPLE_WEIGHT
)
2145 result
+= sizeof(u64
);
2147 if (type
& PERF_SAMPLE_DATA_SRC
)
2148 result
+= sizeof(u64
);
2150 if (type
& PERF_SAMPLE_TRANSACTION
)
2151 result
+= sizeof(u64
);
2153 if (type
& PERF_SAMPLE_REGS_INTR
) {
2154 if (sample
->intr_regs
.abi
) {
2155 result
+= sizeof(u64
);
2156 sz
= hweight_long(sample
->intr_regs
.mask
) * sizeof(u64
);
2159 result
+= sizeof(u64
);
2166 int perf_event__synthesize_sample(union perf_event
*event
, u64 type
,
2168 const struct perf_sample
*sample
,
2174 * used for cross-endian analysis. See git commit 65014ab3
2175 * for why this goofiness is needed.
2179 array
= event
->sample
.array
;
2181 if (type
& PERF_SAMPLE_IDENTIFIER
) {
2182 *array
= sample
->id
;
2186 if (type
& PERF_SAMPLE_IP
) {
2187 *array
= sample
->ip
;
2191 if (type
& PERF_SAMPLE_TID
) {
2192 u
.val32
[0] = sample
->pid
;
2193 u
.val32
[1] = sample
->tid
;
2196 * Inverse of what is done in perf_evsel__parse_sample
2198 u
.val32
[0] = bswap_32(u
.val32
[0]);
2199 u
.val32
[1] = bswap_32(u
.val32
[1]);
2200 u
.val64
= bswap_64(u
.val64
);
2207 if (type
& PERF_SAMPLE_TIME
) {
2208 *array
= sample
->time
;
2212 if (type
& PERF_SAMPLE_ADDR
) {
2213 *array
= sample
->addr
;
2217 if (type
& PERF_SAMPLE_ID
) {
2218 *array
= sample
->id
;
2222 if (type
& PERF_SAMPLE_STREAM_ID
) {
2223 *array
= sample
->stream_id
;
2227 if (type
& PERF_SAMPLE_CPU
) {
2228 u
.val32
[0] = sample
->cpu
;
2231 * Inverse of what is done in perf_evsel__parse_sample
2233 u
.val32
[0] = bswap_32(u
.val32
[0]);
2234 u
.val64
= bswap_64(u
.val64
);
2240 if (type
& PERF_SAMPLE_PERIOD
) {
2241 *array
= sample
->period
;
2245 if (type
& PERF_SAMPLE_READ
) {
2246 if (read_format
& PERF_FORMAT_GROUP
)
2247 *array
= sample
->read
.group
.nr
;
2249 *array
= sample
->read
.one
.value
;
2252 if (read_format
& PERF_FORMAT_TOTAL_TIME_ENABLED
) {
2253 *array
= sample
->read
.time_enabled
;
2257 if (read_format
& PERF_FORMAT_TOTAL_TIME_RUNNING
) {
2258 *array
= sample
->read
.time_running
;
2262 /* PERF_FORMAT_ID is forced for PERF_SAMPLE_READ */
2263 if (read_format
& PERF_FORMAT_GROUP
) {
2264 sz
= sample
->read
.group
.nr
*
2265 sizeof(struct sample_read_value
);
2266 memcpy(array
, sample
->read
.group
.values
, sz
);
2267 array
= (void *)array
+ sz
;
2269 *array
= sample
->read
.one
.id
;
2274 if (type
& PERF_SAMPLE_CALLCHAIN
) {
2275 sz
= (sample
->callchain
->nr
+ 1) * sizeof(u64
);
2276 memcpy(array
, sample
->callchain
, sz
);
2277 array
= (void *)array
+ sz
;
2280 if (type
& PERF_SAMPLE_RAW
) {
2281 u
.val32
[0] = sample
->raw_size
;
2282 if (WARN_ONCE(swapped
,
2283 "Endianness of raw data not corrected!\n")) {
2285 * Inverse of what is done in perf_evsel__parse_sample
2287 u
.val32
[0] = bswap_32(u
.val32
[0]);
2288 u
.val32
[1] = bswap_32(u
.val32
[1]);
2289 u
.val64
= bswap_64(u
.val64
);
2292 array
= (void *)array
+ sizeof(u32
);
2294 memcpy(array
, sample
->raw_data
, sample
->raw_size
);
2295 array
= (void *)array
+ sample
->raw_size
;
2298 if (type
& PERF_SAMPLE_BRANCH_STACK
) {
2299 sz
= sample
->branch_stack
->nr
* sizeof(struct branch_entry
);
2301 memcpy(array
, sample
->branch_stack
, sz
);
2302 array
= (void *)array
+ sz
;
2305 if (type
& PERF_SAMPLE_REGS_USER
) {
2306 if (sample
->user_regs
.abi
) {
2307 *array
++ = sample
->user_regs
.abi
;
2308 sz
= hweight_long(sample
->user_regs
.mask
) * sizeof(u64
);
2309 memcpy(array
, sample
->user_regs
.regs
, sz
);
2310 array
= (void *)array
+ sz
;
2316 if (type
& PERF_SAMPLE_STACK_USER
) {
2317 sz
= sample
->user_stack
.size
;
2320 memcpy(array
, sample
->user_stack
.data
, sz
);
2321 array
= (void *)array
+ sz
;
2326 if (type
& PERF_SAMPLE_WEIGHT
) {
2327 *array
= sample
->weight
;
2331 if (type
& PERF_SAMPLE_DATA_SRC
) {
2332 *array
= sample
->data_src
;
2336 if (type
& PERF_SAMPLE_TRANSACTION
) {
2337 *array
= sample
->transaction
;
2341 if (type
& PERF_SAMPLE_REGS_INTR
) {
2342 if (sample
->intr_regs
.abi
) {
2343 *array
++ = sample
->intr_regs
.abi
;
2344 sz
= hweight_long(sample
->intr_regs
.mask
) * sizeof(u64
);
2345 memcpy(array
, sample
->intr_regs
.regs
, sz
);
2346 array
= (void *)array
+ sz
;
2355 struct format_field
*perf_evsel__field(struct perf_evsel
*evsel
, const char *name
)
2357 return pevent_find_field(evsel
->tp_format
, name
);
2360 void *perf_evsel__rawptr(struct perf_evsel
*evsel
, struct perf_sample
*sample
,
2363 struct format_field
*field
= perf_evsel__field(evsel
, name
);
2369 offset
= field
->offset
;
2371 if (field
->flags
& FIELD_IS_DYNAMIC
) {
2372 offset
= *(int *)(sample
->raw_data
+ field
->offset
);
2376 return sample
->raw_data
+ offset
;
2379 u64
format_field__intval(struct format_field
*field
, struct perf_sample
*sample
,
2383 void *ptr
= sample
->raw_data
+ field
->offset
;
2385 switch (field
->size
) {
2389 value
= *(u16
*)ptr
;
2392 value
= *(u32
*)ptr
;
2395 memcpy(&value
, ptr
, sizeof(u64
));
2404 switch (field
->size
) {
2406 return bswap_16(value
);
2408 return bswap_32(value
);
2410 return bswap_64(value
);
2418 u64
perf_evsel__intval(struct perf_evsel
*evsel
, struct perf_sample
*sample
,
2421 struct format_field
*field
= perf_evsel__field(evsel
, name
);
2426 return field
? format_field__intval(field
, sample
, evsel
->needs_swap
) : 0;
2429 bool perf_evsel__fallback(struct perf_evsel
*evsel
, int err
,
2430 char *msg
, size_t msgsize
)
2434 if ((err
== ENOENT
|| err
== ENXIO
|| err
== ENODEV
) &&
2435 evsel
->attr
.type
== PERF_TYPE_HARDWARE
&&
2436 evsel
->attr
.config
== PERF_COUNT_HW_CPU_CYCLES
) {
2438 * If it's cycles then fall back to hrtimer based
2439 * cpu-clock-tick sw counter, which is always available even if
2442 * PPC returns ENXIO until 2.6.37 (behavior changed with commit
2445 scnprintf(msg
, msgsize
, "%s",
2446 "The cycles event is not supported, trying to fall back to cpu-clock-ticks");
2448 evsel
->attr
.type
= PERF_TYPE_SOFTWARE
;
2449 evsel
->attr
.config
= PERF_COUNT_SW_CPU_CLOCK
;
2451 zfree(&evsel
->name
);
2453 } else if (err
== EACCES
&& !evsel
->attr
.exclude_kernel
&&
2454 (paranoid
= perf_event_paranoid()) > 1) {
2455 const char *name
= perf_evsel__name(evsel
);
2458 if (asprintf(&new_name
, "%s%su", name
, strchr(name
, ':') ? "" : ":") < 0)
2463 evsel
->name
= new_name
;
2464 scnprintf(msg
, msgsize
,
2465 "kernel.perf_event_paranoid=%d, trying to fall back to excluding kernel samples", paranoid
);
2466 evsel
->attr
.exclude_kernel
= 1;
2474 int perf_evsel__open_strerror(struct perf_evsel
*evsel
, struct target
*target
,
2475 int err
, char *msg
, size_t size
)
2477 char sbuf
[STRERR_BUFSIZE
];
2484 printed
= scnprintf(msg
, size
,
2485 "No permission to enable %s event.\n\n",
2486 perf_evsel__name(evsel
));
2488 return scnprintf(msg
+ printed
, size
- printed
,
2489 "You may not have permission to collect %sstats.\n\n"
2490 "Consider tweaking /proc/sys/kernel/perf_event_paranoid,\n"
2491 "which controls use of the performance events system by\n"
2492 "unprivileged users (without CAP_SYS_ADMIN).\n\n"
2493 "The current value is %d:\n\n"
2494 " -1: Allow use of (almost) all events by all users\n"
2495 ">= 0: Disallow raw tracepoint access by users without CAP_IOC_LOCK\n"
2496 ">= 1: Disallow CPU event access by users without CAP_SYS_ADMIN\n"
2497 ">= 2: Disallow kernel profiling by users without CAP_SYS_ADMIN\n\n"
2498 "To make this setting permanent, edit /etc/sysctl.conf too, e.g.:\n\n"
2499 " kernel.perf_event_paranoid = -1\n" ,
2500 target
->system_wide
? "system-wide " : "",
2501 perf_event_paranoid());
2503 return scnprintf(msg
, size
, "The %s event is not supported.",
2504 perf_evsel__name(evsel
));
2506 return scnprintf(msg
, size
, "%s",
2507 "Too many events are opened.\n"
2508 "Probably the maximum number of open file descriptors has been reached.\n"
2509 "Hint: Try again after reducing the number of events.\n"
2510 "Hint: Try increasing the limit with 'ulimit -n <limit>'");
2512 if ((evsel
->attr
.sample_type
& PERF_SAMPLE_CALLCHAIN
) != 0 &&
2513 access("/proc/sys/kernel/perf_event_max_stack", F_OK
) == 0)
2514 return scnprintf(msg
, size
,
2515 "Not enough memory to setup event with callchain.\n"
2516 "Hint: Try tweaking /proc/sys/kernel/perf_event_max_stack\n"
2517 "Hint: Current value: %d", sysctl_perf_event_max_stack
);
2520 if (target
->cpu_list
)
2521 return scnprintf(msg
, size
, "%s",
2522 "No such device - did you specify an out-of-range profile CPU?");
2525 if (evsel
->attr
.sample_period
!= 0)
2526 return scnprintf(msg
, size
, "%s",
2527 "PMU Hardware doesn't support sampling/overflow-interrupts.");
2528 if (evsel
->attr
.precise_ip
)
2529 return scnprintf(msg
, size
, "%s",
2530 "\'precise\' request may not be supported. Try removing 'p' modifier.");
2531 #if defined(__i386__) || defined(__x86_64__)
2532 if (evsel
->attr
.type
== PERF_TYPE_HARDWARE
)
2533 return scnprintf(msg
, size
, "%s",
2534 "No hardware sampling interrupt available.\n"
2535 "No APIC? If so then you can boot the kernel with the \"lapic\" boot parameter to force-enable it.");
2539 if (find_process("oprofiled"))
2540 return scnprintf(msg
, size
,
2541 "The PMU counters are busy/taken by another profiler.\n"
2542 "We found oprofile daemon running, please stop it and try again.");
2545 if (evsel
->attr
.write_backward
&& perf_missing_features
.write_backward
)
2546 return scnprintf(msg
, size
, "Reading from overwrite event is not supported by this kernel.");
2547 if (perf_missing_features
.clockid
)
2548 return scnprintf(msg
, size
, "clockid feature not supported.");
2549 if (perf_missing_features
.clockid_wrong
)
2550 return scnprintf(msg
, size
, "wrong clockid (%d).", clockid
);
2556 return scnprintf(msg
, size
,
2557 "The sys_perf_event_open() syscall returned with %d (%s) for event (%s).\n"
2558 "/bin/dmesg may provide additional information.\n"
2559 "No CONFIG_PERF_EVENTS=y kernel support configured?",
2560 err
, str_error_r(err
, sbuf
, sizeof(sbuf
)),
2561 perf_evsel__name(evsel
));
2564 char *perf_evsel__env_arch(struct perf_evsel
*evsel
)
2566 if (evsel
&& evsel
->evlist
&& evsel
->evlist
->env
)
2567 return evsel
->evlist
->env
->arch
;