2 * Copyright (C) 2011, Red Hat Inc, Arnaldo Carvalho de Melo <acme@redhat.com>
4 * Parts came from builtin-{top,stat,record}.c, see those files for further
7 * Released under the GPL v2. (and only v2, not any later version)
11 #include <linux/bitops.h>
12 #include <api/fs/debugfs.h>
13 #include <traceevent/event-parse.h>
14 #include <linux/hw_breakpoint.h>
15 #include <linux/perf_event.h>
16 #include <sys/resource.h>
18 #include "callchain.h"
24 #include "thread_map.h"
26 #include "perf_regs.h"
28 #include "trace-event.h"
35 } perf_missing_features
;
37 static int perf_evsel__no_extra_init(struct perf_evsel
*evsel __maybe_unused
)
42 static void perf_evsel__no_extra_fini(struct perf_evsel
*evsel __maybe_unused
)
48 int (*init
)(struct perf_evsel
*evsel
);
49 void (*fini
)(struct perf_evsel
*evsel
);
50 } perf_evsel__object
= {
51 .size
= sizeof(struct perf_evsel
),
52 .init
= perf_evsel__no_extra_init
,
53 .fini
= perf_evsel__no_extra_fini
,
56 int perf_evsel__object_config(size_t object_size
,
57 int (*init
)(struct perf_evsel
*evsel
),
58 void (*fini
)(struct perf_evsel
*evsel
))
64 if (perf_evsel__object
.size
> object_size
)
67 perf_evsel__object
.size
= object_size
;
71 perf_evsel__object
.init
= init
;
74 perf_evsel__object
.fini
= fini
;
79 #define FD(e, x, y) (*(int *)xyarray__entry(e->fd, x, y))
81 int __perf_evsel__sample_size(u64 sample_type
)
83 u64 mask
= sample_type
& PERF_SAMPLE_MASK
;
87 for (i
= 0; i
< 64; i
++) {
88 if (mask
& (1ULL << i
))
98 * __perf_evsel__calc_id_pos - calculate id_pos.
99 * @sample_type: sample type
101 * This function returns the position of the event id (PERF_SAMPLE_ID or
102 * PERF_SAMPLE_IDENTIFIER) in a sample event i.e. in the array of struct
105 static int __perf_evsel__calc_id_pos(u64 sample_type
)
109 if (sample_type
& PERF_SAMPLE_IDENTIFIER
)
112 if (!(sample_type
& PERF_SAMPLE_ID
))
115 if (sample_type
& PERF_SAMPLE_IP
)
118 if (sample_type
& PERF_SAMPLE_TID
)
121 if (sample_type
& PERF_SAMPLE_TIME
)
124 if (sample_type
& PERF_SAMPLE_ADDR
)
131 * __perf_evsel__calc_is_pos - calculate is_pos.
132 * @sample_type: sample type
134 * This function returns the position (counting backwards) of the event id
135 * (PERF_SAMPLE_ID or PERF_SAMPLE_IDENTIFIER) in a non-sample event i.e. if
136 * sample_id_all is used there is an id sample appended to non-sample events.
138 static int __perf_evsel__calc_is_pos(u64 sample_type
)
142 if (sample_type
& PERF_SAMPLE_IDENTIFIER
)
145 if (!(sample_type
& PERF_SAMPLE_ID
))
148 if (sample_type
& PERF_SAMPLE_CPU
)
151 if (sample_type
& PERF_SAMPLE_STREAM_ID
)
157 void perf_evsel__calc_id_pos(struct perf_evsel
*evsel
)
159 evsel
->id_pos
= __perf_evsel__calc_id_pos(evsel
->attr
.sample_type
);
160 evsel
->is_pos
= __perf_evsel__calc_is_pos(evsel
->attr
.sample_type
);
163 void __perf_evsel__set_sample_bit(struct perf_evsel
*evsel
,
164 enum perf_event_sample_format bit
)
166 if (!(evsel
->attr
.sample_type
& bit
)) {
167 evsel
->attr
.sample_type
|= bit
;
168 evsel
->sample_size
+= sizeof(u64
);
169 perf_evsel__calc_id_pos(evsel
);
173 void __perf_evsel__reset_sample_bit(struct perf_evsel
*evsel
,
174 enum perf_event_sample_format bit
)
176 if (evsel
->attr
.sample_type
& bit
) {
177 evsel
->attr
.sample_type
&= ~bit
;
178 evsel
->sample_size
-= sizeof(u64
);
179 perf_evsel__calc_id_pos(evsel
);
183 void perf_evsel__set_sample_id(struct perf_evsel
*evsel
,
184 bool can_sample_identifier
)
186 if (can_sample_identifier
) {
187 perf_evsel__reset_sample_bit(evsel
, ID
);
188 perf_evsel__set_sample_bit(evsel
, IDENTIFIER
);
190 perf_evsel__set_sample_bit(evsel
, ID
);
192 evsel
->attr
.read_format
|= PERF_FORMAT_ID
;
195 void perf_evsel__init(struct perf_evsel
*evsel
,
196 struct perf_event_attr
*attr
, int idx
)
199 evsel
->tracking
= !idx
;
201 evsel
->leader
= evsel
;
204 INIT_LIST_HEAD(&evsel
->node
);
205 perf_evsel__object
.init(evsel
);
206 evsel
->sample_size
= __perf_evsel__sample_size(attr
->sample_type
);
207 perf_evsel__calc_id_pos(evsel
);
210 struct perf_evsel
*perf_evsel__new_idx(struct perf_event_attr
*attr
, int idx
)
212 struct perf_evsel
*evsel
= zalloc(perf_evsel__object
.size
);
215 perf_evsel__init(evsel
, attr
, idx
);
220 struct perf_evsel
*perf_evsel__newtp_idx(const char *sys
, const char *name
, int idx
)
222 struct perf_evsel
*evsel
= zalloc(perf_evsel__object
.size
);
225 struct perf_event_attr attr
= {
226 .type
= PERF_TYPE_TRACEPOINT
,
227 .sample_type
= (PERF_SAMPLE_RAW
| PERF_SAMPLE_TIME
|
228 PERF_SAMPLE_CPU
| PERF_SAMPLE_PERIOD
),
231 if (asprintf(&evsel
->name
, "%s:%s", sys
, name
) < 0)
234 evsel
->tp_format
= trace_event__tp_format(sys
, name
);
235 if (evsel
->tp_format
== NULL
)
238 event_attr_init(&attr
);
239 attr
.config
= evsel
->tp_format
->id
;
240 attr
.sample_period
= 1;
241 perf_evsel__init(evsel
, &attr
, idx
);
252 const char *perf_evsel__hw_names
[PERF_COUNT_HW_MAX
] = {
260 "stalled-cycles-frontend",
261 "stalled-cycles-backend",
265 static const char *__perf_evsel__hw_name(u64 config
)
267 if (config
< PERF_COUNT_HW_MAX
&& perf_evsel__hw_names
[config
])
268 return perf_evsel__hw_names
[config
];
270 return "unknown-hardware";
273 static int perf_evsel__add_modifiers(struct perf_evsel
*evsel
, char *bf
, size_t size
)
275 int colon
= 0, r
= 0;
276 struct perf_event_attr
*attr
= &evsel
->attr
;
277 bool exclude_guest_default
= false;
279 #define MOD_PRINT(context, mod) do { \
280 if (!attr->exclude_##context) { \
281 if (!colon) colon = ++r; \
282 r += scnprintf(bf + r, size - r, "%c", mod); \
285 if (attr
->exclude_kernel
|| attr
->exclude_user
|| attr
->exclude_hv
) {
286 MOD_PRINT(kernel
, 'k');
287 MOD_PRINT(user
, 'u');
289 exclude_guest_default
= true;
292 if (attr
->precise_ip
) {
295 r
+= scnprintf(bf
+ r
, size
- r
, "%.*s", attr
->precise_ip
, "ppp");
296 exclude_guest_default
= true;
299 if (attr
->exclude_host
|| attr
->exclude_guest
== exclude_guest_default
) {
300 MOD_PRINT(host
, 'H');
301 MOD_PRINT(guest
, 'G');
309 static int perf_evsel__hw_name(struct perf_evsel
*evsel
, char *bf
, size_t size
)
311 int r
= scnprintf(bf
, size
, "%s", __perf_evsel__hw_name(evsel
->attr
.config
));
312 return r
+ perf_evsel__add_modifiers(evsel
, bf
+ r
, size
- r
);
315 const char *perf_evsel__sw_names
[PERF_COUNT_SW_MAX
] = {
328 static const char *__perf_evsel__sw_name(u64 config
)
330 if (config
< PERF_COUNT_SW_MAX
&& perf_evsel__sw_names
[config
])
331 return perf_evsel__sw_names
[config
];
332 return "unknown-software";
335 static int perf_evsel__sw_name(struct perf_evsel
*evsel
, char *bf
, size_t size
)
337 int r
= scnprintf(bf
, size
, "%s", __perf_evsel__sw_name(evsel
->attr
.config
));
338 return r
+ perf_evsel__add_modifiers(evsel
, bf
+ r
, size
- r
);
341 static int __perf_evsel__bp_name(char *bf
, size_t size
, u64 addr
, u64 type
)
345 r
= scnprintf(bf
, size
, "mem:0x%" PRIx64
":", addr
);
347 if (type
& HW_BREAKPOINT_R
)
348 r
+= scnprintf(bf
+ r
, size
- r
, "r");
350 if (type
& HW_BREAKPOINT_W
)
351 r
+= scnprintf(bf
+ r
, size
- r
, "w");
353 if (type
& HW_BREAKPOINT_X
)
354 r
+= scnprintf(bf
+ r
, size
- r
, "x");
359 static int perf_evsel__bp_name(struct perf_evsel
*evsel
, char *bf
, size_t size
)
361 struct perf_event_attr
*attr
= &evsel
->attr
;
362 int r
= __perf_evsel__bp_name(bf
, size
, attr
->bp_addr
, attr
->bp_type
);
363 return r
+ perf_evsel__add_modifiers(evsel
, bf
+ r
, size
- r
);
366 const char *perf_evsel__hw_cache
[PERF_COUNT_HW_CACHE_MAX
]
367 [PERF_EVSEL__MAX_ALIASES
] = {
368 { "L1-dcache", "l1-d", "l1d", "L1-data", },
369 { "L1-icache", "l1-i", "l1i", "L1-instruction", },
371 { "dTLB", "d-tlb", "Data-TLB", },
372 { "iTLB", "i-tlb", "Instruction-TLB", },
373 { "branch", "branches", "bpu", "btb", "bpc", },
377 const char *perf_evsel__hw_cache_op
[PERF_COUNT_HW_CACHE_OP_MAX
]
378 [PERF_EVSEL__MAX_ALIASES
] = {
379 { "load", "loads", "read", },
380 { "store", "stores", "write", },
381 { "prefetch", "prefetches", "speculative-read", "speculative-load", },
384 const char *perf_evsel__hw_cache_result
[PERF_COUNT_HW_CACHE_RESULT_MAX
]
385 [PERF_EVSEL__MAX_ALIASES
] = {
386 { "refs", "Reference", "ops", "access", },
387 { "misses", "miss", },
390 #define C(x) PERF_COUNT_HW_CACHE_##x
391 #define CACHE_READ (1 << C(OP_READ))
392 #define CACHE_WRITE (1 << C(OP_WRITE))
393 #define CACHE_PREFETCH (1 << C(OP_PREFETCH))
394 #define COP(x) (1 << x)
397 * cache operartion stat
398 * L1I : Read and prefetch only
399 * ITLB and BPU : Read-only
401 static unsigned long perf_evsel__hw_cache_stat
[C(MAX
)] = {
402 [C(L1D
)] = (CACHE_READ
| CACHE_WRITE
| CACHE_PREFETCH
),
403 [C(L1I
)] = (CACHE_READ
| CACHE_PREFETCH
),
404 [C(LL
)] = (CACHE_READ
| CACHE_WRITE
| CACHE_PREFETCH
),
405 [C(DTLB
)] = (CACHE_READ
| CACHE_WRITE
| CACHE_PREFETCH
),
406 [C(ITLB
)] = (CACHE_READ
),
407 [C(BPU
)] = (CACHE_READ
),
408 [C(NODE
)] = (CACHE_READ
| CACHE_WRITE
| CACHE_PREFETCH
),
411 bool perf_evsel__is_cache_op_valid(u8 type
, u8 op
)
413 if (perf_evsel__hw_cache_stat
[type
] & COP(op
))
414 return true; /* valid */
416 return false; /* invalid */
419 int __perf_evsel__hw_cache_type_op_res_name(u8 type
, u8 op
, u8 result
,
420 char *bf
, size_t size
)
423 return scnprintf(bf
, size
, "%s-%s-%s", perf_evsel__hw_cache
[type
][0],
424 perf_evsel__hw_cache_op
[op
][0],
425 perf_evsel__hw_cache_result
[result
][0]);
428 return scnprintf(bf
, size
, "%s-%s", perf_evsel__hw_cache
[type
][0],
429 perf_evsel__hw_cache_op
[op
][1]);
432 static int __perf_evsel__hw_cache_name(u64 config
, char *bf
, size_t size
)
434 u8 op
, result
, type
= (config
>> 0) & 0xff;
435 const char *err
= "unknown-ext-hardware-cache-type";
437 if (type
> PERF_COUNT_HW_CACHE_MAX
)
440 op
= (config
>> 8) & 0xff;
441 err
= "unknown-ext-hardware-cache-op";
442 if (op
> PERF_COUNT_HW_CACHE_OP_MAX
)
445 result
= (config
>> 16) & 0xff;
446 err
= "unknown-ext-hardware-cache-result";
447 if (result
> PERF_COUNT_HW_CACHE_RESULT_MAX
)
450 err
= "invalid-cache";
451 if (!perf_evsel__is_cache_op_valid(type
, op
))
454 return __perf_evsel__hw_cache_type_op_res_name(type
, op
, result
, bf
, size
);
456 return scnprintf(bf
, size
, "%s", err
);
459 static int perf_evsel__hw_cache_name(struct perf_evsel
*evsel
, char *bf
, size_t size
)
461 int ret
= __perf_evsel__hw_cache_name(evsel
->attr
.config
, bf
, size
);
462 return ret
+ perf_evsel__add_modifiers(evsel
, bf
+ ret
, size
- ret
);
465 static int perf_evsel__raw_name(struct perf_evsel
*evsel
, char *bf
, size_t size
)
467 int ret
= scnprintf(bf
, size
, "raw 0x%" PRIx64
, evsel
->attr
.config
);
468 return ret
+ perf_evsel__add_modifiers(evsel
, bf
+ ret
, size
- ret
);
471 const char *perf_evsel__name(struct perf_evsel
*evsel
)
478 switch (evsel
->attr
.type
) {
480 perf_evsel__raw_name(evsel
, bf
, sizeof(bf
));
483 case PERF_TYPE_HARDWARE
:
484 perf_evsel__hw_name(evsel
, bf
, sizeof(bf
));
487 case PERF_TYPE_HW_CACHE
:
488 perf_evsel__hw_cache_name(evsel
, bf
, sizeof(bf
));
491 case PERF_TYPE_SOFTWARE
:
492 perf_evsel__sw_name(evsel
, bf
, sizeof(bf
));
495 case PERF_TYPE_TRACEPOINT
:
496 scnprintf(bf
, sizeof(bf
), "%s", "unknown tracepoint");
499 case PERF_TYPE_BREAKPOINT
:
500 perf_evsel__bp_name(evsel
, bf
, sizeof(bf
));
504 scnprintf(bf
, sizeof(bf
), "unknown attr type: %d",
509 evsel
->name
= strdup(bf
);
511 return evsel
->name
?: "unknown";
514 const char *perf_evsel__group_name(struct perf_evsel
*evsel
)
516 return evsel
->group_name
?: "anon group";
519 int perf_evsel__group_desc(struct perf_evsel
*evsel
, char *buf
, size_t size
)
522 struct perf_evsel
*pos
;
523 const char *group_name
= perf_evsel__group_name(evsel
);
525 ret
= scnprintf(buf
, size
, "%s", group_name
);
527 ret
+= scnprintf(buf
+ ret
, size
- ret
, " { %s",
528 perf_evsel__name(evsel
));
530 for_each_group_member(pos
, evsel
)
531 ret
+= scnprintf(buf
+ ret
, size
- ret
, ", %s",
532 perf_evsel__name(pos
));
534 ret
+= scnprintf(buf
+ ret
, size
- ret
, " }");
540 perf_evsel__config_callgraph(struct perf_evsel
*evsel
,
541 struct record_opts
*opts
)
543 bool function
= perf_evsel__is_function_event(evsel
);
544 struct perf_event_attr
*attr
= &evsel
->attr
;
546 perf_evsel__set_sample_bit(evsel
, CALLCHAIN
);
548 if (callchain_param
.record_mode
== CALLCHAIN_LBR
) {
549 if (!opts
->branch_stack
) {
550 if (attr
->exclude_user
) {
551 pr_warning("LBR callstack option is only available "
552 "to get user callchain information. "
553 "Falling back to framepointers.\n");
555 perf_evsel__set_sample_bit(evsel
, BRANCH_STACK
);
556 attr
->branch_sample_type
= PERF_SAMPLE_BRANCH_USER
|
557 PERF_SAMPLE_BRANCH_CALL_STACK
;
560 pr_warning("Cannot use LBR callstack with branch stack. "
561 "Falling back to framepointers.\n");
564 if (callchain_param
.record_mode
== CALLCHAIN_DWARF
) {
566 perf_evsel__set_sample_bit(evsel
, REGS_USER
);
567 perf_evsel__set_sample_bit(evsel
, STACK_USER
);
568 attr
->sample_regs_user
= PERF_REGS_MASK
;
569 attr
->sample_stack_user
= callchain_param
.dump_size
;
570 attr
->exclude_callchain_user
= 1;
572 pr_info("Cannot use DWARF unwind for function trace event,"
573 " falling back to framepointers.\n");
578 pr_info("Disabling user space callchains for function trace event.\n");
579 attr
->exclude_callchain_user
= 1;
584 * The enable_on_exec/disabled value strategy:
586 * 1) For any type of traced program:
587 * - all independent events and group leaders are disabled
588 * - all group members are enabled
590 * Group members are ruled by group leaders. They need to
591 * be enabled, because the group scheduling relies on that.
593 * 2) For traced programs executed by perf:
594 * - all independent events and group leaders have
596 * - we don't specifically enable or disable any event during
599 * Independent events and group leaders are initially disabled
600 * and get enabled by exec. Group members are ruled by group
601 * leaders as stated in 1).
603 * 3) For traced programs attached by perf (pid/tid):
604 * - we specifically enable or disable all events during
607 * When attaching events to already running traced we
608 * enable/disable events specifically, as there's no
609 * initial traced exec call.
611 void perf_evsel__config(struct perf_evsel
*evsel
, struct record_opts
*opts
)
613 struct perf_evsel
*leader
= evsel
->leader
;
614 struct perf_event_attr
*attr
= &evsel
->attr
;
615 int track
= evsel
->tracking
;
616 bool per_cpu
= opts
->target
.default_per_cpu
&& !opts
->target
.per_thread
;
618 attr
->sample_id_all
= perf_missing_features
.sample_id_all
? 0 : 1;
619 attr
->inherit
= !opts
->no_inherit
;
621 perf_evsel__set_sample_bit(evsel
, IP
);
622 perf_evsel__set_sample_bit(evsel
, TID
);
624 if (evsel
->sample_read
) {
625 perf_evsel__set_sample_bit(evsel
, READ
);
628 * We need ID even in case of single event, because
629 * PERF_SAMPLE_READ process ID specific data.
631 perf_evsel__set_sample_id(evsel
, false);
634 * Apply group format only if we belong to group
635 * with more than one members.
637 if (leader
->nr_members
> 1) {
638 attr
->read_format
|= PERF_FORMAT_GROUP
;
644 * We default some events to have a default interval. But keep
645 * it a weak assumption overridable by the user.
647 if (!attr
->sample_period
|| (opts
->user_freq
!= UINT_MAX
||
648 opts
->user_interval
!= ULLONG_MAX
)) {
650 perf_evsel__set_sample_bit(evsel
, PERIOD
);
652 attr
->sample_freq
= opts
->freq
;
654 attr
->sample_period
= opts
->default_interval
;
659 * Disable sampling for all group members other
660 * than leader in case leader 'leads' the sampling.
662 if ((leader
!= evsel
) && leader
->sample_read
) {
663 attr
->sample_freq
= 0;
664 attr
->sample_period
= 0;
667 if (opts
->no_samples
)
668 attr
->sample_freq
= 0;
670 if (opts
->inherit_stat
)
671 attr
->inherit_stat
= 1;
673 if (opts
->sample_address
) {
674 perf_evsel__set_sample_bit(evsel
, ADDR
);
675 attr
->mmap_data
= track
;
679 * We don't allow user space callchains for function trace
680 * event, due to issues with page faults while tracing page
681 * fault handler and its overall trickiness nature.
683 if (perf_evsel__is_function_event(evsel
))
684 evsel
->attr
.exclude_callchain_user
= 1;
686 if (callchain_param
.enabled
&& !evsel
->no_aux_samples
)
687 perf_evsel__config_callgraph(evsel
, opts
);
689 if (opts
->sample_intr_regs
) {
690 attr
->sample_regs_intr
= PERF_REGS_MASK
;
691 perf_evsel__set_sample_bit(evsel
, REGS_INTR
);
694 if (target__has_cpu(&opts
->target
))
695 perf_evsel__set_sample_bit(evsel
, CPU
);
698 perf_evsel__set_sample_bit(evsel
, PERIOD
);
701 * When the user explicitely disabled time don't force it here.
703 if (opts
->sample_time
&&
704 (!perf_missing_features
.sample_id_all
&&
705 (!opts
->no_inherit
|| target__has_cpu(&opts
->target
) || per_cpu
)))
706 perf_evsel__set_sample_bit(evsel
, TIME
);
708 if (opts
->raw_samples
&& !evsel
->no_aux_samples
) {
709 perf_evsel__set_sample_bit(evsel
, TIME
);
710 perf_evsel__set_sample_bit(evsel
, RAW
);
711 perf_evsel__set_sample_bit(evsel
, CPU
);
714 if (opts
->sample_address
)
715 perf_evsel__set_sample_bit(evsel
, DATA_SRC
);
717 if (opts
->no_buffering
) {
719 attr
->wakeup_events
= 1;
721 if (opts
->branch_stack
&& !evsel
->no_aux_samples
) {
722 perf_evsel__set_sample_bit(evsel
, BRANCH_STACK
);
723 attr
->branch_sample_type
= opts
->branch_stack
;
726 if (opts
->sample_weight
)
727 perf_evsel__set_sample_bit(evsel
, WEIGHT
);
731 attr
->mmap2
= track
&& !perf_missing_features
.mmap2
;
734 if (opts
->sample_transaction
)
735 perf_evsel__set_sample_bit(evsel
, TRANSACTION
);
737 if (opts
->running_time
) {
738 evsel
->attr
.read_format
|=
739 PERF_FORMAT_TOTAL_TIME_ENABLED
|
740 PERF_FORMAT_TOTAL_TIME_RUNNING
;
744 * XXX see the function comment above
746 * Disabling only independent events or group leaders,
747 * keeping group members enabled.
749 if (perf_evsel__is_group_leader(evsel
))
753 * Setting enable_on_exec for independent events and
754 * group leaders for traced executed by perf.
756 if (target__none(&opts
->target
) && perf_evsel__is_group_leader(evsel
) &&
757 !opts
->initial_delay
)
758 attr
->enable_on_exec
= 1;
760 if (evsel
->immediate
) {
762 attr
->enable_on_exec
= 0;
766 static int perf_evsel__alloc_fd(struct perf_evsel
*evsel
, int ncpus
, int nthreads
)
770 if (evsel
->system_wide
)
773 evsel
->fd
= xyarray__new(ncpus
, nthreads
, sizeof(int));
776 for (cpu
= 0; cpu
< ncpus
; cpu
++) {
777 for (thread
= 0; thread
< nthreads
; thread
++) {
778 FD(evsel
, cpu
, thread
) = -1;
783 return evsel
->fd
!= NULL
? 0 : -ENOMEM
;
786 static int perf_evsel__run_ioctl(struct perf_evsel
*evsel
, int ncpus
, int nthreads
,
791 if (evsel
->system_wide
)
794 for (cpu
= 0; cpu
< ncpus
; cpu
++) {
795 for (thread
= 0; thread
< nthreads
; thread
++) {
796 int fd
= FD(evsel
, cpu
, thread
),
797 err
= ioctl(fd
, ioc
, arg
);
807 int perf_evsel__set_filter(struct perf_evsel
*evsel
, int ncpus
, int nthreads
,
810 return perf_evsel__run_ioctl(evsel
, ncpus
, nthreads
,
811 PERF_EVENT_IOC_SET_FILTER
,
815 int perf_evsel__enable(struct perf_evsel
*evsel
, int ncpus
, int nthreads
)
817 return perf_evsel__run_ioctl(evsel
, ncpus
, nthreads
,
818 PERF_EVENT_IOC_ENABLE
,
822 int perf_evsel__alloc_id(struct perf_evsel
*evsel
, int ncpus
, int nthreads
)
824 if (ncpus
== 0 || nthreads
== 0)
827 if (evsel
->system_wide
)
830 evsel
->sample_id
= xyarray__new(ncpus
, nthreads
, sizeof(struct perf_sample_id
));
831 if (evsel
->sample_id
== NULL
)
834 evsel
->id
= zalloc(ncpus
* nthreads
* sizeof(u64
));
835 if (evsel
->id
== NULL
) {
836 xyarray__delete(evsel
->sample_id
);
837 evsel
->sample_id
= NULL
;
844 void perf_evsel__reset_counts(struct perf_evsel
*evsel
, int ncpus
)
846 memset(evsel
->counts
, 0, (sizeof(*evsel
->counts
) +
847 (ncpus
* sizeof(struct perf_counts_values
))));
850 int perf_evsel__alloc_counts(struct perf_evsel
*evsel
, int ncpus
)
852 evsel
->counts
= zalloc((sizeof(*evsel
->counts
) +
853 (ncpus
* sizeof(struct perf_counts_values
))));
854 return evsel
->counts
!= NULL
? 0 : -ENOMEM
;
857 static void perf_evsel__free_fd(struct perf_evsel
*evsel
)
859 xyarray__delete(evsel
->fd
);
863 static void perf_evsel__free_id(struct perf_evsel
*evsel
)
865 xyarray__delete(evsel
->sample_id
);
866 evsel
->sample_id
= NULL
;
870 void perf_evsel__close_fd(struct perf_evsel
*evsel
, int ncpus
, int nthreads
)
874 if (evsel
->system_wide
)
877 for (cpu
= 0; cpu
< ncpus
; cpu
++)
878 for (thread
= 0; thread
< nthreads
; ++thread
) {
879 close(FD(evsel
, cpu
, thread
));
880 FD(evsel
, cpu
, thread
) = -1;
884 void perf_evsel__free_counts(struct perf_evsel
*evsel
)
886 zfree(&evsel
->counts
);
889 void perf_evsel__exit(struct perf_evsel
*evsel
)
891 assert(list_empty(&evsel
->node
));
892 perf_evsel__free_fd(evsel
);
893 perf_evsel__free_id(evsel
);
894 close_cgroup(evsel
->cgrp
);
895 zfree(&evsel
->group_name
);
897 perf_evsel__object
.fini(evsel
);
900 void perf_evsel__delete(struct perf_evsel
*evsel
)
902 perf_evsel__exit(evsel
);
906 void perf_evsel__compute_deltas(struct perf_evsel
*evsel
, int cpu
,
907 struct perf_counts_values
*count
)
909 struct perf_counts_values tmp
;
911 if (!evsel
->prev_raw_counts
)
915 tmp
= evsel
->prev_raw_counts
->aggr
;
916 evsel
->prev_raw_counts
->aggr
= *count
;
918 tmp
= evsel
->prev_raw_counts
->cpu
[cpu
];
919 evsel
->prev_raw_counts
->cpu
[cpu
] = *count
;
922 count
->val
= count
->val
- tmp
.val
;
923 count
->ena
= count
->ena
- tmp
.ena
;
924 count
->run
= count
->run
- tmp
.run
;
927 void perf_counts_values__scale(struct perf_counts_values
*count
,
928 bool scale
, s8
*pscaled
)
933 if (count
->run
== 0) {
936 } else if (count
->run
< count
->ena
) {
938 count
->val
= (u64
)((double) count
->val
* count
->ena
/ count
->run
+ 0.5);
941 count
->ena
= count
->run
= 0;
947 int perf_evsel__read_cb(struct perf_evsel
*evsel
, int cpu
, int thread
,
948 perf_evsel__read_cb_t cb
)
950 struct perf_counts_values count
;
952 memset(&count
, 0, sizeof(count
));
954 if (FD(evsel
, cpu
, thread
) < 0)
957 if (readn(FD(evsel
, cpu
, thread
), &count
, sizeof(count
)) < 0)
960 return cb(evsel
, cpu
, thread
, &count
);
963 int __perf_evsel__read_on_cpu(struct perf_evsel
*evsel
,
964 int cpu
, int thread
, bool scale
)
966 struct perf_counts_values count
;
967 size_t nv
= scale
? 3 : 1;
969 if (FD(evsel
, cpu
, thread
) < 0)
972 if (evsel
->counts
== NULL
&& perf_evsel__alloc_counts(evsel
, cpu
+ 1) < 0)
975 if (readn(FD(evsel
, cpu
, thread
), &count
, nv
* sizeof(u64
)) < 0)
978 perf_evsel__compute_deltas(evsel
, cpu
, &count
);
979 perf_counts_values__scale(&count
, scale
, NULL
);
980 evsel
->counts
->cpu
[cpu
] = count
;
984 static int get_group_fd(struct perf_evsel
*evsel
, int cpu
, int thread
)
986 struct perf_evsel
*leader
= evsel
->leader
;
989 if (perf_evsel__is_group_leader(evsel
))
993 * Leader must be already processed/open,
998 fd
= FD(leader
, cpu
, thread
);
1004 #define __PRINT_ATTR(fmt, cast, field) \
1005 fprintf(fp, " %-19s "fmt"\n", #field, cast attr->field)
1007 #define PRINT_ATTR_U32(field) __PRINT_ATTR("%u" , , field)
1008 #define PRINT_ATTR_X32(field) __PRINT_ATTR("%#x", , field)
1009 #define PRINT_ATTR_U64(field) __PRINT_ATTR("%" PRIu64, (uint64_t), field)
1010 #define PRINT_ATTR_X64(field) __PRINT_ATTR("%#"PRIx64, (uint64_t), field)
1012 #define PRINT_ATTR2N(name1, field1, name2, field2) \
1013 fprintf(fp, " %-19s %u %-19s %u\n", \
1014 name1, attr->field1, name2, attr->field2)
1016 #define PRINT_ATTR2(field1, field2) \
1017 PRINT_ATTR2N(#field1, field1, #field2, field2)
1019 static size_t perf_event_attr__fprintf(struct perf_event_attr
*attr
, FILE *fp
)
1023 ret
+= fprintf(fp
, "%.60s\n", graph_dotted_line
);
1024 ret
+= fprintf(fp
, "perf_event_attr:\n");
1026 ret
+= PRINT_ATTR_U32(type
);
1027 ret
+= PRINT_ATTR_U32(size
);
1028 ret
+= PRINT_ATTR_X64(config
);
1029 ret
+= PRINT_ATTR_U64(sample_period
);
1030 ret
+= PRINT_ATTR_U64(sample_freq
);
1031 ret
+= PRINT_ATTR_X64(sample_type
);
1032 ret
+= PRINT_ATTR_X64(read_format
);
1034 ret
+= PRINT_ATTR2(disabled
, inherit
);
1035 ret
+= PRINT_ATTR2(pinned
, exclusive
);
1036 ret
+= PRINT_ATTR2(exclude_user
, exclude_kernel
);
1037 ret
+= PRINT_ATTR2(exclude_hv
, exclude_idle
);
1038 ret
+= PRINT_ATTR2(mmap
, comm
);
1039 ret
+= PRINT_ATTR2(mmap2
, comm_exec
);
1040 ret
+= PRINT_ATTR2(freq
, inherit_stat
);
1041 ret
+= PRINT_ATTR2(enable_on_exec
, task
);
1042 ret
+= PRINT_ATTR2(watermark
, precise_ip
);
1043 ret
+= PRINT_ATTR2(mmap_data
, sample_id_all
);
1044 ret
+= PRINT_ATTR2(exclude_host
, exclude_guest
);
1045 ret
+= PRINT_ATTR2N("excl.callchain_kern", exclude_callchain_kernel
,
1046 "excl.callchain_user", exclude_callchain_user
);
1048 ret
+= PRINT_ATTR_U32(wakeup_events
);
1049 ret
+= PRINT_ATTR_U32(wakeup_watermark
);
1050 ret
+= PRINT_ATTR_X32(bp_type
);
1051 ret
+= PRINT_ATTR_X64(bp_addr
);
1052 ret
+= PRINT_ATTR_X64(config1
);
1053 ret
+= PRINT_ATTR_U64(bp_len
);
1054 ret
+= PRINT_ATTR_X64(config2
);
1055 ret
+= PRINT_ATTR_X64(branch_sample_type
);
1056 ret
+= PRINT_ATTR_X64(sample_regs_user
);
1057 ret
+= PRINT_ATTR_U32(sample_stack_user
);
1058 ret
+= PRINT_ATTR_X64(sample_regs_intr
);
1060 ret
+= fprintf(fp
, "%.60s\n", graph_dotted_line
);
1065 static int __perf_evsel__open(struct perf_evsel
*evsel
, struct cpu_map
*cpus
,
1066 struct thread_map
*threads
)
1068 int cpu
, thread
, nthreads
;
1069 unsigned long flags
= PERF_FLAG_FD_CLOEXEC
;
1071 enum { NO_CHANGE
, SET_TO_MAX
, INCREASED_MAX
} set_rlimit
= NO_CHANGE
;
1073 if (evsel
->system_wide
)
1076 nthreads
= threads
->nr
;
1078 if (evsel
->fd
== NULL
&&
1079 perf_evsel__alloc_fd(evsel
, cpus
->nr
, nthreads
) < 0)
1083 flags
|= PERF_FLAG_PID_CGROUP
;
1084 pid
= evsel
->cgrp
->fd
;
1087 fallback_missing_features
:
1088 if (perf_missing_features
.cloexec
)
1089 flags
&= ~(unsigned long)PERF_FLAG_FD_CLOEXEC
;
1090 if (perf_missing_features
.mmap2
)
1091 evsel
->attr
.mmap2
= 0;
1092 if (perf_missing_features
.exclude_guest
)
1093 evsel
->attr
.exclude_guest
= evsel
->attr
.exclude_host
= 0;
1095 if (perf_missing_features
.sample_id_all
)
1096 evsel
->attr
.sample_id_all
= 0;
1099 perf_event_attr__fprintf(&evsel
->attr
, stderr
);
1101 for (cpu
= 0; cpu
< cpus
->nr
; cpu
++) {
1103 for (thread
= 0; thread
< nthreads
; thread
++) {
1106 if (!evsel
->cgrp
&& !evsel
->system_wide
)
1107 pid
= threads
->map
[thread
];
1109 group_fd
= get_group_fd(evsel
, cpu
, thread
);
1111 pr_debug2("sys_perf_event_open: pid %d cpu %d group_fd %d flags %#lx\n",
1112 pid
, cpus
->map
[cpu
], group_fd
, flags
);
1114 FD(evsel
, cpu
, thread
) = sys_perf_event_open(&evsel
->attr
,
1118 if (FD(evsel
, cpu
, thread
) < 0) {
1120 pr_debug2("sys_perf_event_open failed, error %d\n",
1124 set_rlimit
= NO_CHANGE
;
1132 * perf stat needs between 5 and 22 fds per CPU. When we run out
1133 * of them try to increase the limits.
1135 if (err
== -EMFILE
&& set_rlimit
< INCREASED_MAX
) {
1137 int old_errno
= errno
;
1139 if (getrlimit(RLIMIT_NOFILE
, &l
) == 0) {
1140 if (set_rlimit
== NO_CHANGE
)
1141 l
.rlim_cur
= l
.rlim_max
;
1143 l
.rlim_cur
= l
.rlim_max
+ 1000;
1144 l
.rlim_max
= l
.rlim_cur
;
1146 if (setrlimit(RLIMIT_NOFILE
, &l
) == 0) {
1155 if (err
!= -EINVAL
|| cpu
> 0 || thread
> 0)
1158 if (!perf_missing_features
.cloexec
&& (flags
& PERF_FLAG_FD_CLOEXEC
)) {
1159 perf_missing_features
.cloexec
= true;
1160 goto fallback_missing_features
;
1161 } else if (!perf_missing_features
.mmap2
&& evsel
->attr
.mmap2
) {
1162 perf_missing_features
.mmap2
= true;
1163 goto fallback_missing_features
;
1164 } else if (!perf_missing_features
.exclude_guest
&&
1165 (evsel
->attr
.exclude_guest
|| evsel
->attr
.exclude_host
)) {
1166 perf_missing_features
.exclude_guest
= true;
1167 goto fallback_missing_features
;
1168 } else if (!perf_missing_features
.sample_id_all
) {
1169 perf_missing_features
.sample_id_all
= true;
1170 goto retry_sample_id
;
1175 while (--thread
>= 0) {
1176 close(FD(evsel
, cpu
, thread
));
1177 FD(evsel
, cpu
, thread
) = -1;
1180 } while (--cpu
>= 0);
1184 void perf_evsel__close(struct perf_evsel
*evsel
, int ncpus
, int nthreads
)
1186 if (evsel
->fd
== NULL
)
1189 perf_evsel__close_fd(evsel
, ncpus
, nthreads
);
1190 perf_evsel__free_fd(evsel
);
1202 struct thread_map map
;
1204 } empty_thread_map
= {
1209 int perf_evsel__open(struct perf_evsel
*evsel
, struct cpu_map
*cpus
,
1210 struct thread_map
*threads
)
1213 /* Work around old compiler warnings about strict aliasing */
1214 cpus
= &empty_cpu_map
.map
;
1217 if (threads
== NULL
)
1218 threads
= &empty_thread_map
.map
;
1220 return __perf_evsel__open(evsel
, cpus
, threads
);
1223 int perf_evsel__open_per_cpu(struct perf_evsel
*evsel
,
1224 struct cpu_map
*cpus
)
1226 return __perf_evsel__open(evsel
, cpus
, &empty_thread_map
.map
);
1229 int perf_evsel__open_per_thread(struct perf_evsel
*evsel
,
1230 struct thread_map
*threads
)
1232 return __perf_evsel__open(evsel
, &empty_cpu_map
.map
, threads
);
1235 static int perf_evsel__parse_id_sample(const struct perf_evsel
*evsel
,
1236 const union perf_event
*event
,
1237 struct perf_sample
*sample
)
1239 u64 type
= evsel
->attr
.sample_type
;
1240 const u64
*array
= event
->sample
.array
;
1241 bool swapped
= evsel
->needs_swap
;
1244 array
+= ((event
->header
.size
-
1245 sizeof(event
->header
)) / sizeof(u64
)) - 1;
1247 if (type
& PERF_SAMPLE_IDENTIFIER
) {
1248 sample
->id
= *array
;
1252 if (type
& PERF_SAMPLE_CPU
) {
1255 /* undo swap of u64, then swap on individual u32s */
1256 u
.val64
= bswap_64(u
.val64
);
1257 u
.val32
[0] = bswap_32(u
.val32
[0]);
1260 sample
->cpu
= u
.val32
[0];
1264 if (type
& PERF_SAMPLE_STREAM_ID
) {
1265 sample
->stream_id
= *array
;
1269 if (type
& PERF_SAMPLE_ID
) {
1270 sample
->id
= *array
;
1274 if (type
& PERF_SAMPLE_TIME
) {
1275 sample
->time
= *array
;
1279 if (type
& PERF_SAMPLE_TID
) {
1282 /* undo swap of u64, then swap on individual u32s */
1283 u
.val64
= bswap_64(u
.val64
);
1284 u
.val32
[0] = bswap_32(u
.val32
[0]);
1285 u
.val32
[1] = bswap_32(u
.val32
[1]);
1288 sample
->pid
= u
.val32
[0];
1289 sample
->tid
= u
.val32
[1];
1296 static inline bool overflow(const void *endp
, u16 max_size
, const void *offset
,
1299 return size
> max_size
|| offset
+ size
> endp
;
1302 #define OVERFLOW_CHECK(offset, size, max_size) \
1304 if (overflow(endp, (max_size), (offset), (size))) \
1308 #define OVERFLOW_CHECK_u64(offset) \
1309 OVERFLOW_CHECK(offset, sizeof(u64), sizeof(u64))
1311 int perf_evsel__parse_sample(struct perf_evsel
*evsel
, union perf_event
*event
,
1312 struct perf_sample
*data
)
1314 u64 type
= evsel
->attr
.sample_type
;
1315 bool swapped
= evsel
->needs_swap
;
1317 u16 max_size
= event
->header
.size
;
1318 const void *endp
= (void *)event
+ max_size
;
1322 * used for cross-endian analysis. See git commit 65014ab3
1323 * for why this goofiness is needed.
1327 memset(data
, 0, sizeof(*data
));
1328 data
->cpu
= data
->pid
= data
->tid
= -1;
1329 data
->stream_id
= data
->id
= data
->time
= -1ULL;
1330 data
->period
= evsel
->attr
.sample_period
;
1333 if (event
->header
.type
!= PERF_RECORD_SAMPLE
) {
1334 if (!evsel
->attr
.sample_id_all
)
1336 return perf_evsel__parse_id_sample(evsel
, event
, data
);
1339 array
= event
->sample
.array
;
1342 * The evsel's sample_size is based on PERF_SAMPLE_MASK which includes
1343 * up to PERF_SAMPLE_PERIOD. After that overflow() must be used to
1344 * check the format does not go past the end of the event.
1346 if (evsel
->sample_size
+ sizeof(event
->header
) > event
->header
.size
)
1350 if (type
& PERF_SAMPLE_IDENTIFIER
) {
1355 if (type
& PERF_SAMPLE_IP
) {
1360 if (type
& PERF_SAMPLE_TID
) {
1363 /* undo swap of u64, then swap on individual u32s */
1364 u
.val64
= bswap_64(u
.val64
);
1365 u
.val32
[0] = bswap_32(u
.val32
[0]);
1366 u
.val32
[1] = bswap_32(u
.val32
[1]);
1369 data
->pid
= u
.val32
[0];
1370 data
->tid
= u
.val32
[1];
1374 if (type
& PERF_SAMPLE_TIME
) {
1375 data
->time
= *array
;
1380 if (type
& PERF_SAMPLE_ADDR
) {
1381 data
->addr
= *array
;
1385 if (type
& PERF_SAMPLE_ID
) {
1390 if (type
& PERF_SAMPLE_STREAM_ID
) {
1391 data
->stream_id
= *array
;
1395 if (type
& PERF_SAMPLE_CPU
) {
1399 /* undo swap of u64, then swap on individual u32s */
1400 u
.val64
= bswap_64(u
.val64
);
1401 u
.val32
[0] = bswap_32(u
.val32
[0]);
1404 data
->cpu
= u
.val32
[0];
1408 if (type
& PERF_SAMPLE_PERIOD
) {
1409 data
->period
= *array
;
1413 if (type
& PERF_SAMPLE_READ
) {
1414 u64 read_format
= evsel
->attr
.read_format
;
1416 OVERFLOW_CHECK_u64(array
);
1417 if (read_format
& PERF_FORMAT_GROUP
)
1418 data
->read
.group
.nr
= *array
;
1420 data
->read
.one
.value
= *array
;
1424 if (read_format
& PERF_FORMAT_TOTAL_TIME_ENABLED
) {
1425 OVERFLOW_CHECK_u64(array
);
1426 data
->read
.time_enabled
= *array
;
1430 if (read_format
& PERF_FORMAT_TOTAL_TIME_RUNNING
) {
1431 OVERFLOW_CHECK_u64(array
);
1432 data
->read
.time_running
= *array
;
1436 /* PERF_FORMAT_ID is forced for PERF_SAMPLE_READ */
1437 if (read_format
& PERF_FORMAT_GROUP
) {
1438 const u64 max_group_nr
= UINT64_MAX
/
1439 sizeof(struct sample_read_value
);
1441 if (data
->read
.group
.nr
> max_group_nr
)
1443 sz
= data
->read
.group
.nr
*
1444 sizeof(struct sample_read_value
);
1445 OVERFLOW_CHECK(array
, sz
, max_size
);
1446 data
->read
.group
.values
=
1447 (struct sample_read_value
*)array
;
1448 array
= (void *)array
+ sz
;
1450 OVERFLOW_CHECK_u64(array
);
1451 data
->read
.one
.id
= *array
;
1456 if (type
& PERF_SAMPLE_CALLCHAIN
) {
1457 const u64 max_callchain_nr
= UINT64_MAX
/ sizeof(u64
);
1459 OVERFLOW_CHECK_u64(array
);
1460 data
->callchain
= (struct ip_callchain
*)array
++;
1461 if (data
->callchain
->nr
> max_callchain_nr
)
1463 sz
= data
->callchain
->nr
* sizeof(u64
);
1464 OVERFLOW_CHECK(array
, sz
, max_size
);
1465 array
= (void *)array
+ sz
;
1468 if (type
& PERF_SAMPLE_RAW
) {
1469 OVERFLOW_CHECK_u64(array
);
1471 if (WARN_ONCE(swapped
,
1472 "Endianness of raw data not corrected!\n")) {
1473 /* undo swap of u64, then swap on individual u32s */
1474 u
.val64
= bswap_64(u
.val64
);
1475 u
.val32
[0] = bswap_32(u
.val32
[0]);
1476 u
.val32
[1] = bswap_32(u
.val32
[1]);
1478 data
->raw_size
= u
.val32
[0];
1479 array
= (void *)array
+ sizeof(u32
);
1481 OVERFLOW_CHECK(array
, data
->raw_size
, max_size
);
1482 data
->raw_data
= (void *)array
;
1483 array
= (void *)array
+ data
->raw_size
;
1486 if (type
& PERF_SAMPLE_BRANCH_STACK
) {
1487 const u64 max_branch_nr
= UINT64_MAX
/
1488 sizeof(struct branch_entry
);
1490 OVERFLOW_CHECK_u64(array
);
1491 data
->branch_stack
= (struct branch_stack
*)array
++;
1493 if (data
->branch_stack
->nr
> max_branch_nr
)
1495 sz
= data
->branch_stack
->nr
* sizeof(struct branch_entry
);
1496 OVERFLOW_CHECK(array
, sz
, max_size
);
1497 array
= (void *)array
+ sz
;
1500 if (type
& PERF_SAMPLE_REGS_USER
) {
1501 OVERFLOW_CHECK_u64(array
);
1502 data
->user_regs
.abi
= *array
;
1505 if (data
->user_regs
.abi
) {
1506 u64 mask
= evsel
->attr
.sample_regs_user
;
1508 sz
= hweight_long(mask
) * sizeof(u64
);
1509 OVERFLOW_CHECK(array
, sz
, max_size
);
1510 data
->user_regs
.mask
= mask
;
1511 data
->user_regs
.regs
= (u64
*)array
;
1512 array
= (void *)array
+ sz
;
1516 if (type
& PERF_SAMPLE_STACK_USER
) {
1517 OVERFLOW_CHECK_u64(array
);
1520 data
->user_stack
.offset
= ((char *)(array
- 1)
1524 data
->user_stack
.size
= 0;
1526 OVERFLOW_CHECK(array
, sz
, max_size
);
1527 data
->user_stack
.data
= (char *)array
;
1528 array
= (void *)array
+ sz
;
1529 OVERFLOW_CHECK_u64(array
);
1530 data
->user_stack
.size
= *array
++;
1531 if (WARN_ONCE(data
->user_stack
.size
> sz
,
1532 "user stack dump failure\n"))
1538 if (type
& PERF_SAMPLE_WEIGHT
) {
1539 OVERFLOW_CHECK_u64(array
);
1540 data
->weight
= *array
;
1544 data
->data_src
= PERF_MEM_DATA_SRC_NONE
;
1545 if (type
& PERF_SAMPLE_DATA_SRC
) {
1546 OVERFLOW_CHECK_u64(array
);
1547 data
->data_src
= *array
;
1551 data
->transaction
= 0;
1552 if (type
& PERF_SAMPLE_TRANSACTION
) {
1553 OVERFLOW_CHECK_u64(array
);
1554 data
->transaction
= *array
;
1558 data
->intr_regs
.abi
= PERF_SAMPLE_REGS_ABI_NONE
;
1559 if (type
& PERF_SAMPLE_REGS_INTR
) {
1560 OVERFLOW_CHECK_u64(array
);
1561 data
->intr_regs
.abi
= *array
;
1564 if (data
->intr_regs
.abi
!= PERF_SAMPLE_REGS_ABI_NONE
) {
1565 u64 mask
= evsel
->attr
.sample_regs_intr
;
1567 sz
= hweight_long(mask
) * sizeof(u64
);
1568 OVERFLOW_CHECK(array
, sz
, max_size
);
1569 data
->intr_regs
.mask
= mask
;
1570 data
->intr_regs
.regs
= (u64
*)array
;
1571 array
= (void *)array
+ sz
;
1578 size_t perf_event__sample_event_size(const struct perf_sample
*sample
, u64 type
,
1581 size_t sz
, result
= sizeof(struct sample_event
);
1583 if (type
& PERF_SAMPLE_IDENTIFIER
)
1584 result
+= sizeof(u64
);
1586 if (type
& PERF_SAMPLE_IP
)
1587 result
+= sizeof(u64
);
1589 if (type
& PERF_SAMPLE_TID
)
1590 result
+= sizeof(u64
);
1592 if (type
& PERF_SAMPLE_TIME
)
1593 result
+= sizeof(u64
);
1595 if (type
& PERF_SAMPLE_ADDR
)
1596 result
+= sizeof(u64
);
1598 if (type
& PERF_SAMPLE_ID
)
1599 result
+= sizeof(u64
);
1601 if (type
& PERF_SAMPLE_STREAM_ID
)
1602 result
+= sizeof(u64
);
1604 if (type
& PERF_SAMPLE_CPU
)
1605 result
+= sizeof(u64
);
1607 if (type
& PERF_SAMPLE_PERIOD
)
1608 result
+= sizeof(u64
);
1610 if (type
& PERF_SAMPLE_READ
) {
1611 result
+= sizeof(u64
);
1612 if (read_format
& PERF_FORMAT_TOTAL_TIME_ENABLED
)
1613 result
+= sizeof(u64
);
1614 if (read_format
& PERF_FORMAT_TOTAL_TIME_RUNNING
)
1615 result
+= sizeof(u64
);
1616 /* PERF_FORMAT_ID is forced for PERF_SAMPLE_READ */
1617 if (read_format
& PERF_FORMAT_GROUP
) {
1618 sz
= sample
->read
.group
.nr
*
1619 sizeof(struct sample_read_value
);
1622 result
+= sizeof(u64
);
1626 if (type
& PERF_SAMPLE_CALLCHAIN
) {
1627 sz
= (sample
->callchain
->nr
+ 1) * sizeof(u64
);
1631 if (type
& PERF_SAMPLE_RAW
) {
1632 result
+= sizeof(u32
);
1633 result
+= sample
->raw_size
;
1636 if (type
& PERF_SAMPLE_BRANCH_STACK
) {
1637 sz
= sample
->branch_stack
->nr
* sizeof(struct branch_entry
);
1642 if (type
& PERF_SAMPLE_REGS_USER
) {
1643 if (sample
->user_regs
.abi
) {
1644 result
+= sizeof(u64
);
1645 sz
= hweight_long(sample
->user_regs
.mask
) * sizeof(u64
);
1648 result
+= sizeof(u64
);
1652 if (type
& PERF_SAMPLE_STACK_USER
) {
1653 sz
= sample
->user_stack
.size
;
1654 result
+= sizeof(u64
);
1657 result
+= sizeof(u64
);
1661 if (type
& PERF_SAMPLE_WEIGHT
)
1662 result
+= sizeof(u64
);
1664 if (type
& PERF_SAMPLE_DATA_SRC
)
1665 result
+= sizeof(u64
);
1667 if (type
& PERF_SAMPLE_TRANSACTION
)
1668 result
+= sizeof(u64
);
1670 if (type
& PERF_SAMPLE_REGS_INTR
) {
1671 if (sample
->intr_regs
.abi
) {
1672 result
+= sizeof(u64
);
1673 sz
= hweight_long(sample
->intr_regs
.mask
) * sizeof(u64
);
1676 result
+= sizeof(u64
);
1683 int perf_event__synthesize_sample(union perf_event
*event
, u64 type
,
1685 const struct perf_sample
*sample
,
1691 * used for cross-endian analysis. See git commit 65014ab3
1692 * for why this goofiness is needed.
1696 array
= event
->sample
.array
;
1698 if (type
& PERF_SAMPLE_IDENTIFIER
) {
1699 *array
= sample
->id
;
1703 if (type
& PERF_SAMPLE_IP
) {
1704 *array
= sample
->ip
;
1708 if (type
& PERF_SAMPLE_TID
) {
1709 u
.val32
[0] = sample
->pid
;
1710 u
.val32
[1] = sample
->tid
;
1713 * Inverse of what is done in perf_evsel__parse_sample
1715 u
.val32
[0] = bswap_32(u
.val32
[0]);
1716 u
.val32
[1] = bswap_32(u
.val32
[1]);
1717 u
.val64
= bswap_64(u
.val64
);
1724 if (type
& PERF_SAMPLE_TIME
) {
1725 *array
= sample
->time
;
1729 if (type
& PERF_SAMPLE_ADDR
) {
1730 *array
= sample
->addr
;
1734 if (type
& PERF_SAMPLE_ID
) {
1735 *array
= sample
->id
;
1739 if (type
& PERF_SAMPLE_STREAM_ID
) {
1740 *array
= sample
->stream_id
;
1744 if (type
& PERF_SAMPLE_CPU
) {
1745 u
.val32
[0] = sample
->cpu
;
1748 * Inverse of what is done in perf_evsel__parse_sample
1750 u
.val32
[0] = bswap_32(u
.val32
[0]);
1751 u
.val64
= bswap_64(u
.val64
);
1757 if (type
& PERF_SAMPLE_PERIOD
) {
1758 *array
= sample
->period
;
1762 if (type
& PERF_SAMPLE_READ
) {
1763 if (read_format
& PERF_FORMAT_GROUP
)
1764 *array
= sample
->read
.group
.nr
;
1766 *array
= sample
->read
.one
.value
;
1769 if (read_format
& PERF_FORMAT_TOTAL_TIME_ENABLED
) {
1770 *array
= sample
->read
.time_enabled
;
1774 if (read_format
& PERF_FORMAT_TOTAL_TIME_RUNNING
) {
1775 *array
= sample
->read
.time_running
;
1779 /* PERF_FORMAT_ID is forced for PERF_SAMPLE_READ */
1780 if (read_format
& PERF_FORMAT_GROUP
) {
1781 sz
= sample
->read
.group
.nr
*
1782 sizeof(struct sample_read_value
);
1783 memcpy(array
, sample
->read
.group
.values
, sz
);
1784 array
= (void *)array
+ sz
;
1786 *array
= sample
->read
.one
.id
;
1791 if (type
& PERF_SAMPLE_CALLCHAIN
) {
1792 sz
= (sample
->callchain
->nr
+ 1) * sizeof(u64
);
1793 memcpy(array
, sample
->callchain
, sz
);
1794 array
= (void *)array
+ sz
;
1797 if (type
& PERF_SAMPLE_RAW
) {
1798 u
.val32
[0] = sample
->raw_size
;
1799 if (WARN_ONCE(swapped
,
1800 "Endianness of raw data not corrected!\n")) {
1802 * Inverse of what is done in perf_evsel__parse_sample
1804 u
.val32
[0] = bswap_32(u
.val32
[0]);
1805 u
.val32
[1] = bswap_32(u
.val32
[1]);
1806 u
.val64
= bswap_64(u
.val64
);
1809 array
= (void *)array
+ sizeof(u32
);
1811 memcpy(array
, sample
->raw_data
, sample
->raw_size
);
1812 array
= (void *)array
+ sample
->raw_size
;
1815 if (type
& PERF_SAMPLE_BRANCH_STACK
) {
1816 sz
= sample
->branch_stack
->nr
* sizeof(struct branch_entry
);
1818 memcpy(array
, sample
->branch_stack
, sz
);
1819 array
= (void *)array
+ sz
;
1822 if (type
& PERF_SAMPLE_REGS_USER
) {
1823 if (sample
->user_regs
.abi
) {
1824 *array
++ = sample
->user_regs
.abi
;
1825 sz
= hweight_long(sample
->user_regs
.mask
) * sizeof(u64
);
1826 memcpy(array
, sample
->user_regs
.regs
, sz
);
1827 array
= (void *)array
+ sz
;
1833 if (type
& PERF_SAMPLE_STACK_USER
) {
1834 sz
= sample
->user_stack
.size
;
1837 memcpy(array
, sample
->user_stack
.data
, sz
);
1838 array
= (void *)array
+ sz
;
1843 if (type
& PERF_SAMPLE_WEIGHT
) {
1844 *array
= sample
->weight
;
1848 if (type
& PERF_SAMPLE_DATA_SRC
) {
1849 *array
= sample
->data_src
;
1853 if (type
& PERF_SAMPLE_TRANSACTION
) {
1854 *array
= sample
->transaction
;
1858 if (type
& PERF_SAMPLE_REGS_INTR
) {
1859 if (sample
->intr_regs
.abi
) {
1860 *array
++ = sample
->intr_regs
.abi
;
1861 sz
= hweight_long(sample
->intr_regs
.mask
) * sizeof(u64
);
1862 memcpy(array
, sample
->intr_regs
.regs
, sz
);
1863 array
= (void *)array
+ sz
;
1872 struct format_field
*perf_evsel__field(struct perf_evsel
*evsel
, const char *name
)
1874 return pevent_find_field(evsel
->tp_format
, name
);
1877 void *perf_evsel__rawptr(struct perf_evsel
*evsel
, struct perf_sample
*sample
,
1880 struct format_field
*field
= perf_evsel__field(evsel
, name
);
1886 offset
= field
->offset
;
1888 if (field
->flags
& FIELD_IS_DYNAMIC
) {
1889 offset
= *(int *)(sample
->raw_data
+ field
->offset
);
1893 return sample
->raw_data
+ offset
;
1896 u64
perf_evsel__intval(struct perf_evsel
*evsel
, struct perf_sample
*sample
,
1899 struct format_field
*field
= perf_evsel__field(evsel
, name
);
1906 ptr
= sample
->raw_data
+ field
->offset
;
1908 switch (field
->size
) {
1912 value
= *(u16
*)ptr
;
1915 value
= *(u32
*)ptr
;
1918 value
= *(u64
*)ptr
;
1924 if (!evsel
->needs_swap
)
1927 switch (field
->size
) {
1929 return bswap_16(value
);
1931 return bswap_32(value
);
1933 return bswap_64(value
);
1941 static int comma_fprintf(FILE *fp
, bool *first
, const char *fmt
, ...)
1947 ret
+= fprintf(fp
, ",");
1949 ret
+= fprintf(fp
, ":");
1953 va_start(args
, fmt
);
1954 ret
+= vfprintf(fp
, fmt
, args
);
1959 static int __if_fprintf(FILE *fp
, bool *first
, const char *field
, u64 value
)
1964 return comma_fprintf(fp
, first
, " %s: %" PRIu64
, field
, value
);
1967 #define if_print(field) printed += __if_fprintf(fp, &first, #field, evsel->attr.field)
1974 static int bits__fprintf(FILE *fp
, const char *field
, u64 value
,
1975 struct bit_names
*bits
, bool *first
)
1977 int i
= 0, printed
= comma_fprintf(fp
, first
, " %s: ", field
);
1978 bool first_bit
= true;
1981 if (value
& bits
[i
].bit
) {
1982 printed
+= fprintf(fp
, "%s%s", first_bit
? "" : "|", bits
[i
].name
);
1985 } while (bits
[++i
].name
!= NULL
);
1990 static int sample_type__fprintf(FILE *fp
, bool *first
, u64 value
)
1992 #define bit_name(n) { PERF_SAMPLE_##n, #n }
1993 struct bit_names bits
[] = {
1994 bit_name(IP
), bit_name(TID
), bit_name(TIME
), bit_name(ADDR
),
1995 bit_name(READ
), bit_name(CALLCHAIN
), bit_name(ID
), bit_name(CPU
),
1996 bit_name(PERIOD
), bit_name(STREAM_ID
), bit_name(RAW
),
1997 bit_name(BRANCH_STACK
), bit_name(REGS_USER
), bit_name(STACK_USER
),
1998 bit_name(IDENTIFIER
), bit_name(REGS_INTR
),
2002 return bits__fprintf(fp
, "sample_type", value
, bits
, first
);
2005 static int read_format__fprintf(FILE *fp
, bool *first
, u64 value
)
2007 #define bit_name(n) { PERF_FORMAT_##n, #n }
2008 struct bit_names bits
[] = {
2009 bit_name(TOTAL_TIME_ENABLED
), bit_name(TOTAL_TIME_RUNNING
),
2010 bit_name(ID
), bit_name(GROUP
),
2014 return bits__fprintf(fp
, "read_format", value
, bits
, first
);
2017 int perf_evsel__fprintf(struct perf_evsel
*evsel
,
2018 struct perf_attr_details
*details
, FILE *fp
)
2023 if (details
->event_group
) {
2024 struct perf_evsel
*pos
;
2026 if (!perf_evsel__is_group_leader(evsel
))
2029 if (evsel
->nr_members
> 1)
2030 printed
+= fprintf(fp
, "%s{", evsel
->group_name
?: "");
2032 printed
+= fprintf(fp
, "%s", perf_evsel__name(evsel
));
2033 for_each_group_member(pos
, evsel
)
2034 printed
+= fprintf(fp
, ",%s", perf_evsel__name(pos
));
2036 if (evsel
->nr_members
> 1)
2037 printed
+= fprintf(fp
, "}");
2041 printed
+= fprintf(fp
, "%s", perf_evsel__name(evsel
));
2043 if (details
->verbose
|| details
->freq
) {
2044 printed
+= comma_fprintf(fp
, &first
, " sample_freq=%" PRIu64
,
2045 (u64
)evsel
->attr
.sample_freq
);
2048 if (details
->verbose
) {
2054 printed
+= sample_type__fprintf(fp
, &first
, evsel
->attr
.sample_type
);
2055 if (evsel
->attr
.read_format
)
2056 printed
+= read_format__fprintf(fp
, &first
, evsel
->attr
.read_format
);
2060 if_print(exclusive
);
2061 if_print(exclude_user
);
2062 if_print(exclude_kernel
);
2063 if_print(exclude_hv
);
2064 if_print(exclude_idle
);
2068 if_print(comm_exec
);
2070 if_print(inherit_stat
);
2071 if_print(enable_on_exec
);
2073 if_print(watermark
);
2074 if_print(precise_ip
);
2075 if_print(mmap_data
);
2076 if_print(sample_id_all
);
2077 if_print(exclude_host
);
2078 if_print(exclude_guest
);
2079 if_print(__reserved_1
);
2080 if_print(wakeup_events
);
2082 if_print(branch_sample_type
);
2089 bool perf_evsel__fallback(struct perf_evsel
*evsel
, int err
,
2090 char *msg
, size_t msgsize
)
2092 if ((err
== ENOENT
|| err
== ENXIO
|| err
== ENODEV
) &&
2093 evsel
->attr
.type
== PERF_TYPE_HARDWARE
&&
2094 evsel
->attr
.config
== PERF_COUNT_HW_CPU_CYCLES
) {
2096 * If it's cycles then fall back to hrtimer based
2097 * cpu-clock-tick sw counter, which is always available even if
2100 * PPC returns ENXIO until 2.6.37 (behavior changed with commit
2103 scnprintf(msg
, msgsize
, "%s",
2104 "The cycles event is not supported, trying to fall back to cpu-clock-ticks");
2106 evsel
->attr
.type
= PERF_TYPE_SOFTWARE
;
2107 evsel
->attr
.config
= PERF_COUNT_SW_CPU_CLOCK
;
2109 zfree(&evsel
->name
);
2116 int perf_evsel__open_strerror(struct perf_evsel
*evsel
, struct target
*target
,
2117 int err
, char *msg
, size_t size
)
2119 char sbuf
[STRERR_BUFSIZE
];
2124 return scnprintf(msg
, size
,
2125 "You may not have permission to collect %sstats.\n"
2126 "Consider tweaking /proc/sys/kernel/perf_event_paranoid:\n"
2127 " -1 - Not paranoid at all\n"
2128 " 0 - Disallow raw tracepoint access for unpriv\n"
2129 " 1 - Disallow cpu events for unpriv\n"
2130 " 2 - Disallow kernel profiling for unpriv",
2131 target
->system_wide
? "system-wide " : "");
2133 return scnprintf(msg
, size
, "The %s event is not supported.",
2134 perf_evsel__name(evsel
));
2136 return scnprintf(msg
, size
, "%s",
2137 "Too many events are opened.\n"
2138 "Try again after reducing the number of events.");
2140 if (target
->cpu_list
)
2141 return scnprintf(msg
, size
, "%s",
2142 "No such device - did you specify an out-of-range profile CPU?\n");
2145 if (evsel
->attr
.precise_ip
)
2146 return scnprintf(msg
, size
, "%s",
2147 "\'precise\' request may not be supported. Try removing 'p' modifier.");
2148 #if defined(__i386__) || defined(__x86_64__)
2149 if (evsel
->attr
.type
== PERF_TYPE_HARDWARE
)
2150 return scnprintf(msg
, size
, "%s",
2151 "No hardware sampling interrupt available.\n"
2152 "No APIC? If so then you can boot the kernel with the \"lapic\" boot parameter to force-enable it.");
2156 if (find_process("oprofiled"))
2157 return scnprintf(msg
, size
,
2158 "The PMU counters are busy/taken by another profiler.\n"
2159 "We found oprofile daemon running, please stop it and try again.");
2165 return scnprintf(msg
, size
,
2166 "The sys_perf_event_open() syscall returned with %d (%s) for event (%s).\n"
2167 "/bin/dmesg may provide additional information.\n"
2168 "No CONFIG_PERF_EVENTS=y kernel support configured?\n",
2169 err
, strerror_r(err
, sbuf
, sizeof(sbuf
)),
2170 perf_evsel__name(evsel
));