2 * Copyright (C) 2011, Red Hat Inc, Arnaldo Carvalho de Melo <acme@redhat.com>
4 * Parts came from builtin-{top,stat,record}.c, see those files for further
7 * Released under the GPL v2. (and only v2, not any later version)
11 #include <linux/bitops.h>
12 #include <api/fs/debugfs.h>
13 #include <traceevent/event-parse.h>
14 #include <linux/hw_breakpoint.h>
15 #include <linux/perf_event.h>
16 #include <sys/resource.h>
18 #include "callchain.h"
24 #include "thread_map.h"
26 #include "perf_regs.h"
28 #include "trace-event.h"
37 } perf_missing_features
;
39 static clockid_t clockid
;
41 static int perf_evsel__no_extra_init(struct perf_evsel
*evsel __maybe_unused
)
46 static void perf_evsel__no_extra_fini(struct perf_evsel
*evsel __maybe_unused
)
52 int (*init
)(struct perf_evsel
*evsel
);
53 void (*fini
)(struct perf_evsel
*evsel
);
54 } perf_evsel__object
= {
55 .size
= sizeof(struct perf_evsel
),
56 .init
= perf_evsel__no_extra_init
,
57 .fini
= perf_evsel__no_extra_fini
,
60 int perf_evsel__object_config(size_t object_size
,
61 int (*init
)(struct perf_evsel
*evsel
),
62 void (*fini
)(struct perf_evsel
*evsel
))
68 if (perf_evsel__object
.size
> object_size
)
71 perf_evsel__object
.size
= object_size
;
75 perf_evsel__object
.init
= init
;
78 perf_evsel__object
.fini
= fini
;
83 #define FD(e, x, y) (*(int *)xyarray__entry(e->fd, x, y))
85 int __perf_evsel__sample_size(u64 sample_type
)
87 u64 mask
= sample_type
& PERF_SAMPLE_MASK
;
91 for (i
= 0; i
< 64; i
++) {
92 if (mask
& (1ULL << i
))
102 * __perf_evsel__calc_id_pos - calculate id_pos.
103 * @sample_type: sample type
105 * This function returns the position of the event id (PERF_SAMPLE_ID or
106 * PERF_SAMPLE_IDENTIFIER) in a sample event i.e. in the array of struct
109 static int __perf_evsel__calc_id_pos(u64 sample_type
)
113 if (sample_type
& PERF_SAMPLE_IDENTIFIER
)
116 if (!(sample_type
& PERF_SAMPLE_ID
))
119 if (sample_type
& PERF_SAMPLE_IP
)
122 if (sample_type
& PERF_SAMPLE_TID
)
125 if (sample_type
& PERF_SAMPLE_TIME
)
128 if (sample_type
& PERF_SAMPLE_ADDR
)
135 * __perf_evsel__calc_is_pos - calculate is_pos.
136 * @sample_type: sample type
138 * This function returns the position (counting backwards) of the event id
139 * (PERF_SAMPLE_ID or PERF_SAMPLE_IDENTIFIER) in a non-sample event i.e. if
140 * sample_id_all is used there is an id sample appended to non-sample events.
142 static int __perf_evsel__calc_is_pos(u64 sample_type
)
146 if (sample_type
& PERF_SAMPLE_IDENTIFIER
)
149 if (!(sample_type
& PERF_SAMPLE_ID
))
152 if (sample_type
& PERF_SAMPLE_CPU
)
155 if (sample_type
& PERF_SAMPLE_STREAM_ID
)
161 void perf_evsel__calc_id_pos(struct perf_evsel
*evsel
)
163 evsel
->id_pos
= __perf_evsel__calc_id_pos(evsel
->attr
.sample_type
);
164 evsel
->is_pos
= __perf_evsel__calc_is_pos(evsel
->attr
.sample_type
);
167 void __perf_evsel__set_sample_bit(struct perf_evsel
*evsel
,
168 enum perf_event_sample_format bit
)
170 if (!(evsel
->attr
.sample_type
& bit
)) {
171 evsel
->attr
.sample_type
|= bit
;
172 evsel
->sample_size
+= sizeof(u64
);
173 perf_evsel__calc_id_pos(evsel
);
177 void __perf_evsel__reset_sample_bit(struct perf_evsel
*evsel
,
178 enum perf_event_sample_format bit
)
180 if (evsel
->attr
.sample_type
& bit
) {
181 evsel
->attr
.sample_type
&= ~bit
;
182 evsel
->sample_size
-= sizeof(u64
);
183 perf_evsel__calc_id_pos(evsel
);
187 void perf_evsel__set_sample_id(struct perf_evsel
*evsel
,
188 bool can_sample_identifier
)
190 if (can_sample_identifier
) {
191 perf_evsel__reset_sample_bit(evsel
, ID
);
192 perf_evsel__set_sample_bit(evsel
, IDENTIFIER
);
194 perf_evsel__set_sample_bit(evsel
, ID
);
196 evsel
->attr
.read_format
|= PERF_FORMAT_ID
;
199 void perf_evsel__init(struct perf_evsel
*evsel
,
200 struct perf_event_attr
*attr
, int idx
)
203 evsel
->tracking
= !idx
;
205 evsel
->leader
= evsel
;
208 INIT_LIST_HEAD(&evsel
->node
);
209 perf_evsel__object
.init(evsel
);
210 evsel
->sample_size
= __perf_evsel__sample_size(attr
->sample_type
);
211 perf_evsel__calc_id_pos(evsel
);
214 struct perf_evsel
*perf_evsel__new_idx(struct perf_event_attr
*attr
, int idx
)
216 struct perf_evsel
*evsel
= zalloc(perf_evsel__object
.size
);
219 perf_evsel__init(evsel
, attr
, idx
);
224 struct perf_evsel
*perf_evsel__newtp_idx(const char *sys
, const char *name
, int idx
)
226 struct perf_evsel
*evsel
= zalloc(perf_evsel__object
.size
);
229 struct perf_event_attr attr
= {
230 .type
= PERF_TYPE_TRACEPOINT
,
231 .sample_type
= (PERF_SAMPLE_RAW
| PERF_SAMPLE_TIME
|
232 PERF_SAMPLE_CPU
| PERF_SAMPLE_PERIOD
),
235 if (asprintf(&evsel
->name
, "%s:%s", sys
, name
) < 0)
238 evsel
->tp_format
= trace_event__tp_format(sys
, name
);
239 if (evsel
->tp_format
== NULL
)
242 event_attr_init(&attr
);
243 attr
.config
= evsel
->tp_format
->id
;
244 attr
.sample_period
= 1;
245 perf_evsel__init(evsel
, &attr
, idx
);
256 const char *perf_evsel__hw_names
[PERF_COUNT_HW_MAX
] = {
264 "stalled-cycles-frontend",
265 "stalled-cycles-backend",
269 static const char *__perf_evsel__hw_name(u64 config
)
271 if (config
< PERF_COUNT_HW_MAX
&& perf_evsel__hw_names
[config
])
272 return perf_evsel__hw_names
[config
];
274 return "unknown-hardware";
277 static int perf_evsel__add_modifiers(struct perf_evsel
*evsel
, char *bf
, size_t size
)
279 int colon
= 0, r
= 0;
280 struct perf_event_attr
*attr
= &evsel
->attr
;
281 bool exclude_guest_default
= false;
283 #define MOD_PRINT(context, mod) do { \
284 if (!attr->exclude_##context) { \
285 if (!colon) colon = ++r; \
286 r += scnprintf(bf + r, size - r, "%c", mod); \
289 if (attr
->exclude_kernel
|| attr
->exclude_user
|| attr
->exclude_hv
) {
290 MOD_PRINT(kernel
, 'k');
291 MOD_PRINT(user
, 'u');
293 exclude_guest_default
= true;
296 if (attr
->precise_ip
) {
299 r
+= scnprintf(bf
+ r
, size
- r
, "%.*s", attr
->precise_ip
, "ppp");
300 exclude_guest_default
= true;
303 if (attr
->exclude_host
|| attr
->exclude_guest
== exclude_guest_default
) {
304 MOD_PRINT(host
, 'H');
305 MOD_PRINT(guest
, 'G');
313 static int perf_evsel__hw_name(struct perf_evsel
*evsel
, char *bf
, size_t size
)
315 int r
= scnprintf(bf
, size
, "%s", __perf_evsel__hw_name(evsel
->attr
.config
));
316 return r
+ perf_evsel__add_modifiers(evsel
, bf
+ r
, size
- r
);
319 const char *perf_evsel__sw_names
[PERF_COUNT_SW_MAX
] = {
332 static const char *__perf_evsel__sw_name(u64 config
)
334 if (config
< PERF_COUNT_SW_MAX
&& perf_evsel__sw_names
[config
])
335 return perf_evsel__sw_names
[config
];
336 return "unknown-software";
339 static int perf_evsel__sw_name(struct perf_evsel
*evsel
, char *bf
, size_t size
)
341 int r
= scnprintf(bf
, size
, "%s", __perf_evsel__sw_name(evsel
->attr
.config
));
342 return r
+ perf_evsel__add_modifiers(evsel
, bf
+ r
, size
- r
);
345 static int __perf_evsel__bp_name(char *bf
, size_t size
, u64 addr
, u64 type
)
349 r
= scnprintf(bf
, size
, "mem:0x%" PRIx64
":", addr
);
351 if (type
& HW_BREAKPOINT_R
)
352 r
+= scnprintf(bf
+ r
, size
- r
, "r");
354 if (type
& HW_BREAKPOINT_W
)
355 r
+= scnprintf(bf
+ r
, size
- r
, "w");
357 if (type
& HW_BREAKPOINT_X
)
358 r
+= scnprintf(bf
+ r
, size
- r
, "x");
363 static int perf_evsel__bp_name(struct perf_evsel
*evsel
, char *bf
, size_t size
)
365 struct perf_event_attr
*attr
= &evsel
->attr
;
366 int r
= __perf_evsel__bp_name(bf
, size
, attr
->bp_addr
, attr
->bp_type
);
367 return r
+ perf_evsel__add_modifiers(evsel
, bf
+ r
, size
- r
);
370 const char *perf_evsel__hw_cache
[PERF_COUNT_HW_CACHE_MAX
]
371 [PERF_EVSEL__MAX_ALIASES
] = {
372 { "L1-dcache", "l1-d", "l1d", "L1-data", },
373 { "L1-icache", "l1-i", "l1i", "L1-instruction", },
375 { "dTLB", "d-tlb", "Data-TLB", },
376 { "iTLB", "i-tlb", "Instruction-TLB", },
377 { "branch", "branches", "bpu", "btb", "bpc", },
381 const char *perf_evsel__hw_cache_op
[PERF_COUNT_HW_CACHE_OP_MAX
]
382 [PERF_EVSEL__MAX_ALIASES
] = {
383 { "load", "loads", "read", },
384 { "store", "stores", "write", },
385 { "prefetch", "prefetches", "speculative-read", "speculative-load", },
388 const char *perf_evsel__hw_cache_result
[PERF_COUNT_HW_CACHE_RESULT_MAX
]
389 [PERF_EVSEL__MAX_ALIASES
] = {
390 { "refs", "Reference", "ops", "access", },
391 { "misses", "miss", },
394 #define C(x) PERF_COUNT_HW_CACHE_##x
395 #define CACHE_READ (1 << C(OP_READ))
396 #define CACHE_WRITE (1 << C(OP_WRITE))
397 #define CACHE_PREFETCH (1 << C(OP_PREFETCH))
398 #define COP(x) (1 << x)
401 * cache operartion stat
402 * L1I : Read and prefetch only
403 * ITLB and BPU : Read-only
405 static unsigned long perf_evsel__hw_cache_stat
[C(MAX
)] = {
406 [C(L1D
)] = (CACHE_READ
| CACHE_WRITE
| CACHE_PREFETCH
),
407 [C(L1I
)] = (CACHE_READ
| CACHE_PREFETCH
),
408 [C(LL
)] = (CACHE_READ
| CACHE_WRITE
| CACHE_PREFETCH
),
409 [C(DTLB
)] = (CACHE_READ
| CACHE_WRITE
| CACHE_PREFETCH
),
410 [C(ITLB
)] = (CACHE_READ
),
411 [C(BPU
)] = (CACHE_READ
),
412 [C(NODE
)] = (CACHE_READ
| CACHE_WRITE
| CACHE_PREFETCH
),
415 bool perf_evsel__is_cache_op_valid(u8 type
, u8 op
)
417 if (perf_evsel__hw_cache_stat
[type
] & COP(op
))
418 return true; /* valid */
420 return false; /* invalid */
423 int __perf_evsel__hw_cache_type_op_res_name(u8 type
, u8 op
, u8 result
,
424 char *bf
, size_t size
)
427 return scnprintf(bf
, size
, "%s-%s-%s", perf_evsel__hw_cache
[type
][0],
428 perf_evsel__hw_cache_op
[op
][0],
429 perf_evsel__hw_cache_result
[result
][0]);
432 return scnprintf(bf
, size
, "%s-%s", perf_evsel__hw_cache
[type
][0],
433 perf_evsel__hw_cache_op
[op
][1]);
436 static int __perf_evsel__hw_cache_name(u64 config
, char *bf
, size_t size
)
438 u8 op
, result
, type
= (config
>> 0) & 0xff;
439 const char *err
= "unknown-ext-hardware-cache-type";
441 if (type
> PERF_COUNT_HW_CACHE_MAX
)
444 op
= (config
>> 8) & 0xff;
445 err
= "unknown-ext-hardware-cache-op";
446 if (op
> PERF_COUNT_HW_CACHE_OP_MAX
)
449 result
= (config
>> 16) & 0xff;
450 err
= "unknown-ext-hardware-cache-result";
451 if (result
> PERF_COUNT_HW_CACHE_RESULT_MAX
)
454 err
= "invalid-cache";
455 if (!perf_evsel__is_cache_op_valid(type
, op
))
458 return __perf_evsel__hw_cache_type_op_res_name(type
, op
, result
, bf
, size
);
460 return scnprintf(bf
, size
, "%s", err
);
463 static int perf_evsel__hw_cache_name(struct perf_evsel
*evsel
, char *bf
, size_t size
)
465 int ret
= __perf_evsel__hw_cache_name(evsel
->attr
.config
, bf
, size
);
466 return ret
+ perf_evsel__add_modifiers(evsel
, bf
+ ret
, size
- ret
);
469 static int perf_evsel__raw_name(struct perf_evsel
*evsel
, char *bf
, size_t size
)
471 int ret
= scnprintf(bf
, size
, "raw 0x%" PRIx64
, evsel
->attr
.config
);
472 return ret
+ perf_evsel__add_modifiers(evsel
, bf
+ ret
, size
- ret
);
475 const char *perf_evsel__name(struct perf_evsel
*evsel
)
482 switch (evsel
->attr
.type
) {
484 perf_evsel__raw_name(evsel
, bf
, sizeof(bf
));
487 case PERF_TYPE_HARDWARE
:
488 perf_evsel__hw_name(evsel
, bf
, sizeof(bf
));
491 case PERF_TYPE_HW_CACHE
:
492 perf_evsel__hw_cache_name(evsel
, bf
, sizeof(bf
));
495 case PERF_TYPE_SOFTWARE
:
496 perf_evsel__sw_name(evsel
, bf
, sizeof(bf
));
499 case PERF_TYPE_TRACEPOINT
:
500 scnprintf(bf
, sizeof(bf
), "%s", "unknown tracepoint");
503 case PERF_TYPE_BREAKPOINT
:
504 perf_evsel__bp_name(evsel
, bf
, sizeof(bf
));
508 scnprintf(bf
, sizeof(bf
), "unknown attr type: %d",
513 evsel
->name
= strdup(bf
);
515 return evsel
->name
?: "unknown";
518 const char *perf_evsel__group_name(struct perf_evsel
*evsel
)
520 return evsel
->group_name
?: "anon group";
523 int perf_evsel__group_desc(struct perf_evsel
*evsel
, char *buf
, size_t size
)
526 struct perf_evsel
*pos
;
527 const char *group_name
= perf_evsel__group_name(evsel
);
529 ret
= scnprintf(buf
, size
, "%s", group_name
);
531 ret
+= scnprintf(buf
+ ret
, size
- ret
, " { %s",
532 perf_evsel__name(evsel
));
534 for_each_group_member(pos
, evsel
)
535 ret
+= scnprintf(buf
+ ret
, size
- ret
, ", %s",
536 perf_evsel__name(pos
));
538 ret
+= scnprintf(buf
+ ret
, size
- ret
, " }");
544 perf_evsel__config_callgraph(struct perf_evsel
*evsel
,
545 struct record_opts
*opts
)
547 bool function
= perf_evsel__is_function_event(evsel
);
548 struct perf_event_attr
*attr
= &evsel
->attr
;
550 perf_evsel__set_sample_bit(evsel
, CALLCHAIN
);
552 if (callchain_param
.record_mode
== CALLCHAIN_LBR
) {
553 if (!opts
->branch_stack
) {
554 if (attr
->exclude_user
) {
555 pr_warning("LBR callstack option is only available "
556 "to get user callchain information. "
557 "Falling back to framepointers.\n");
559 perf_evsel__set_sample_bit(evsel
, BRANCH_STACK
);
560 attr
->branch_sample_type
= PERF_SAMPLE_BRANCH_USER
|
561 PERF_SAMPLE_BRANCH_CALL_STACK
;
564 pr_warning("Cannot use LBR callstack with branch stack. "
565 "Falling back to framepointers.\n");
568 if (callchain_param
.record_mode
== CALLCHAIN_DWARF
) {
570 perf_evsel__set_sample_bit(evsel
, REGS_USER
);
571 perf_evsel__set_sample_bit(evsel
, STACK_USER
);
572 attr
->sample_regs_user
= PERF_REGS_MASK
;
573 attr
->sample_stack_user
= callchain_param
.dump_size
;
574 attr
->exclude_callchain_user
= 1;
576 pr_info("Cannot use DWARF unwind for function trace event,"
577 " falling back to framepointers.\n");
582 pr_info("Disabling user space callchains for function trace event.\n");
583 attr
->exclude_callchain_user
= 1;
588 * The enable_on_exec/disabled value strategy:
590 * 1) For any type of traced program:
591 * - all independent events and group leaders are disabled
592 * - all group members are enabled
594 * Group members are ruled by group leaders. They need to
595 * be enabled, because the group scheduling relies on that.
597 * 2) For traced programs executed by perf:
598 * - all independent events and group leaders have
600 * - we don't specifically enable or disable any event during
603 * Independent events and group leaders are initially disabled
604 * and get enabled by exec. Group members are ruled by group
605 * leaders as stated in 1).
607 * 3) For traced programs attached by perf (pid/tid):
608 * - we specifically enable or disable all events during
611 * When attaching events to already running traced we
612 * enable/disable events specifically, as there's no
613 * initial traced exec call.
615 void perf_evsel__config(struct perf_evsel
*evsel
, struct record_opts
*opts
)
617 struct perf_evsel
*leader
= evsel
->leader
;
618 struct perf_event_attr
*attr
= &evsel
->attr
;
619 int track
= evsel
->tracking
;
620 bool per_cpu
= opts
->target
.default_per_cpu
&& !opts
->target
.per_thread
;
622 attr
->sample_id_all
= perf_missing_features
.sample_id_all
? 0 : 1;
623 attr
->inherit
= !opts
->no_inherit
;
625 perf_evsel__set_sample_bit(evsel
, IP
);
626 perf_evsel__set_sample_bit(evsel
, TID
);
628 if (evsel
->sample_read
) {
629 perf_evsel__set_sample_bit(evsel
, READ
);
632 * We need ID even in case of single event, because
633 * PERF_SAMPLE_READ process ID specific data.
635 perf_evsel__set_sample_id(evsel
, false);
638 * Apply group format only if we belong to group
639 * with more than one members.
641 if (leader
->nr_members
> 1) {
642 attr
->read_format
|= PERF_FORMAT_GROUP
;
648 * We default some events to have a default interval. But keep
649 * it a weak assumption overridable by the user.
651 if (!attr
->sample_period
|| (opts
->user_freq
!= UINT_MAX
||
652 opts
->user_interval
!= ULLONG_MAX
)) {
654 perf_evsel__set_sample_bit(evsel
, PERIOD
);
656 attr
->sample_freq
= opts
->freq
;
658 attr
->sample_period
= opts
->default_interval
;
663 * Disable sampling for all group members other
664 * than leader in case leader 'leads' the sampling.
666 if ((leader
!= evsel
) && leader
->sample_read
) {
667 attr
->sample_freq
= 0;
668 attr
->sample_period
= 0;
671 if (opts
->no_samples
)
672 attr
->sample_freq
= 0;
674 if (opts
->inherit_stat
)
675 attr
->inherit_stat
= 1;
677 if (opts
->sample_address
) {
678 perf_evsel__set_sample_bit(evsel
, ADDR
);
679 attr
->mmap_data
= track
;
683 * We don't allow user space callchains for function trace
684 * event, due to issues with page faults while tracing page
685 * fault handler and its overall trickiness nature.
687 if (perf_evsel__is_function_event(evsel
))
688 evsel
->attr
.exclude_callchain_user
= 1;
690 if (callchain_param
.enabled
&& !evsel
->no_aux_samples
)
691 perf_evsel__config_callgraph(evsel
, opts
);
693 if (opts
->sample_intr_regs
) {
694 attr
->sample_regs_intr
= PERF_REGS_MASK
;
695 perf_evsel__set_sample_bit(evsel
, REGS_INTR
);
698 if (target__has_cpu(&opts
->target
))
699 perf_evsel__set_sample_bit(evsel
, CPU
);
702 perf_evsel__set_sample_bit(evsel
, PERIOD
);
705 * When the user explicitely disabled time don't force it here.
707 if (opts
->sample_time
&&
708 (!perf_missing_features
.sample_id_all
&&
709 (!opts
->no_inherit
|| target__has_cpu(&opts
->target
) || per_cpu
)))
710 perf_evsel__set_sample_bit(evsel
, TIME
);
712 if (opts
->raw_samples
&& !evsel
->no_aux_samples
) {
713 perf_evsel__set_sample_bit(evsel
, TIME
);
714 perf_evsel__set_sample_bit(evsel
, RAW
);
715 perf_evsel__set_sample_bit(evsel
, CPU
);
718 if (opts
->sample_address
)
719 perf_evsel__set_sample_bit(evsel
, DATA_SRC
);
721 if (opts
->no_buffering
) {
723 attr
->wakeup_events
= 1;
725 if (opts
->branch_stack
&& !evsel
->no_aux_samples
) {
726 perf_evsel__set_sample_bit(evsel
, BRANCH_STACK
);
727 attr
->branch_sample_type
= opts
->branch_stack
;
730 if (opts
->sample_weight
)
731 perf_evsel__set_sample_bit(evsel
, WEIGHT
);
735 attr
->mmap2
= track
&& !perf_missing_features
.mmap2
;
738 if (opts
->sample_transaction
)
739 perf_evsel__set_sample_bit(evsel
, TRANSACTION
);
741 if (opts
->running_time
) {
742 evsel
->attr
.read_format
|=
743 PERF_FORMAT_TOTAL_TIME_ENABLED
|
744 PERF_FORMAT_TOTAL_TIME_RUNNING
;
748 * XXX see the function comment above
750 * Disabling only independent events or group leaders,
751 * keeping group members enabled.
753 if (perf_evsel__is_group_leader(evsel
))
757 * Setting enable_on_exec for independent events and
758 * group leaders for traced executed by perf.
760 if (target__none(&opts
->target
) && perf_evsel__is_group_leader(evsel
) &&
761 !opts
->initial_delay
)
762 attr
->enable_on_exec
= 1;
764 if (evsel
->immediate
) {
766 attr
->enable_on_exec
= 0;
769 clockid
= opts
->clockid
;
770 if (opts
->use_clockid
) {
771 attr
->use_clockid
= 1;
772 attr
->clockid
= opts
->clockid
;
776 static int perf_evsel__alloc_fd(struct perf_evsel
*evsel
, int ncpus
, int nthreads
)
780 if (evsel
->system_wide
)
783 evsel
->fd
= xyarray__new(ncpus
, nthreads
, sizeof(int));
786 for (cpu
= 0; cpu
< ncpus
; cpu
++) {
787 for (thread
= 0; thread
< nthreads
; thread
++) {
788 FD(evsel
, cpu
, thread
) = -1;
793 return evsel
->fd
!= NULL
? 0 : -ENOMEM
;
796 static int perf_evsel__run_ioctl(struct perf_evsel
*evsel
, int ncpus
, int nthreads
,
801 if (evsel
->system_wide
)
804 for (cpu
= 0; cpu
< ncpus
; cpu
++) {
805 for (thread
= 0; thread
< nthreads
; thread
++) {
806 int fd
= FD(evsel
, cpu
, thread
),
807 err
= ioctl(fd
, ioc
, arg
);
817 int perf_evsel__set_filter(struct perf_evsel
*evsel
, int ncpus
, int nthreads
,
820 return perf_evsel__run_ioctl(evsel
, ncpus
, nthreads
,
821 PERF_EVENT_IOC_SET_FILTER
,
825 int perf_evsel__enable(struct perf_evsel
*evsel
, int ncpus
, int nthreads
)
827 return perf_evsel__run_ioctl(evsel
, ncpus
, nthreads
,
828 PERF_EVENT_IOC_ENABLE
,
832 int perf_evsel__alloc_id(struct perf_evsel
*evsel
, int ncpus
, int nthreads
)
834 if (ncpus
== 0 || nthreads
== 0)
837 if (evsel
->system_wide
)
840 evsel
->sample_id
= xyarray__new(ncpus
, nthreads
, sizeof(struct perf_sample_id
));
841 if (evsel
->sample_id
== NULL
)
844 evsel
->id
= zalloc(ncpus
* nthreads
* sizeof(u64
));
845 if (evsel
->id
== NULL
) {
846 xyarray__delete(evsel
->sample_id
);
847 evsel
->sample_id
= NULL
;
854 void perf_evsel__reset_counts(struct perf_evsel
*evsel
, int ncpus
)
856 memset(evsel
->counts
, 0, (sizeof(*evsel
->counts
) +
857 (ncpus
* sizeof(struct perf_counts_values
))));
860 int perf_evsel__alloc_counts(struct perf_evsel
*evsel
, int ncpus
)
862 evsel
->counts
= zalloc((sizeof(*evsel
->counts
) +
863 (ncpus
* sizeof(struct perf_counts_values
))));
864 return evsel
->counts
!= NULL
? 0 : -ENOMEM
;
867 static void perf_evsel__free_fd(struct perf_evsel
*evsel
)
869 xyarray__delete(evsel
->fd
);
873 static void perf_evsel__free_id(struct perf_evsel
*evsel
)
875 xyarray__delete(evsel
->sample_id
);
876 evsel
->sample_id
= NULL
;
880 void perf_evsel__close_fd(struct perf_evsel
*evsel
, int ncpus
, int nthreads
)
884 if (evsel
->system_wide
)
887 for (cpu
= 0; cpu
< ncpus
; cpu
++)
888 for (thread
= 0; thread
< nthreads
; ++thread
) {
889 close(FD(evsel
, cpu
, thread
));
890 FD(evsel
, cpu
, thread
) = -1;
894 void perf_evsel__free_counts(struct perf_evsel
*evsel
)
896 zfree(&evsel
->counts
);
899 void perf_evsel__exit(struct perf_evsel
*evsel
)
901 assert(list_empty(&evsel
->node
));
902 perf_evsel__free_fd(evsel
);
903 perf_evsel__free_id(evsel
);
904 close_cgroup(evsel
->cgrp
);
905 zfree(&evsel
->group_name
);
907 perf_evsel__object
.fini(evsel
);
910 void perf_evsel__delete(struct perf_evsel
*evsel
)
912 perf_evsel__exit(evsel
);
916 void perf_evsel__compute_deltas(struct perf_evsel
*evsel
, int cpu
,
917 struct perf_counts_values
*count
)
919 struct perf_counts_values tmp
;
921 if (!evsel
->prev_raw_counts
)
925 tmp
= evsel
->prev_raw_counts
->aggr
;
926 evsel
->prev_raw_counts
->aggr
= *count
;
928 tmp
= evsel
->prev_raw_counts
->cpu
[cpu
];
929 evsel
->prev_raw_counts
->cpu
[cpu
] = *count
;
932 count
->val
= count
->val
- tmp
.val
;
933 count
->ena
= count
->ena
- tmp
.ena
;
934 count
->run
= count
->run
- tmp
.run
;
937 void perf_counts_values__scale(struct perf_counts_values
*count
,
938 bool scale
, s8
*pscaled
)
943 if (count
->run
== 0) {
946 } else if (count
->run
< count
->ena
) {
948 count
->val
= (u64
)((double) count
->val
* count
->ena
/ count
->run
+ 0.5);
951 count
->ena
= count
->run
= 0;
957 int perf_evsel__read_cb(struct perf_evsel
*evsel
, int cpu
, int thread
,
958 perf_evsel__read_cb_t cb
)
960 struct perf_counts_values count
;
962 memset(&count
, 0, sizeof(count
));
964 if (FD(evsel
, cpu
, thread
) < 0)
967 if (readn(FD(evsel
, cpu
, thread
), &count
, sizeof(count
)) < 0)
970 return cb(evsel
, cpu
, thread
, &count
);
973 int __perf_evsel__read_on_cpu(struct perf_evsel
*evsel
,
974 int cpu
, int thread
, bool scale
)
976 struct perf_counts_values count
;
977 size_t nv
= scale
? 3 : 1;
979 if (FD(evsel
, cpu
, thread
) < 0)
982 if (evsel
->counts
== NULL
&& perf_evsel__alloc_counts(evsel
, cpu
+ 1) < 0)
985 if (readn(FD(evsel
, cpu
, thread
), &count
, nv
* sizeof(u64
)) < 0)
988 perf_evsel__compute_deltas(evsel
, cpu
, &count
);
989 perf_counts_values__scale(&count
, scale
, NULL
);
990 evsel
->counts
->cpu
[cpu
] = count
;
994 static int get_group_fd(struct perf_evsel
*evsel
, int cpu
, int thread
)
996 struct perf_evsel
*leader
= evsel
->leader
;
999 if (perf_evsel__is_group_leader(evsel
))
1003 * Leader must be already processed/open,
1004 * if not it's a bug.
1006 BUG_ON(!leader
->fd
);
1008 fd
= FD(leader
, cpu
, thread
);
1019 static void __p_bits(char *buf
, size_t size
, u64 value
, struct bit_names
*bits
)
1021 bool first_bit
= true;
1025 if (value
& bits
[i
].bit
) {
1026 buf
+= scnprintf(buf
, size
, "%s%s", first_bit
? "" : "|", bits
[i
].name
);
1029 } while (bits
[++i
].name
!= NULL
);
1032 static void __p_sample_type(char *buf
, size_t size
, u64 value
)
1034 #define bit_name(n) { PERF_SAMPLE_##n, #n }
1035 struct bit_names bits
[] = {
1036 bit_name(IP
), bit_name(TID
), bit_name(TIME
), bit_name(ADDR
),
1037 bit_name(READ
), bit_name(CALLCHAIN
), bit_name(ID
), bit_name(CPU
),
1038 bit_name(PERIOD
), bit_name(STREAM_ID
), bit_name(RAW
),
1039 bit_name(BRANCH_STACK
), bit_name(REGS_USER
), bit_name(STACK_USER
),
1040 bit_name(IDENTIFIER
), bit_name(REGS_INTR
),
1044 __p_bits(buf
, size
, value
, bits
);
1047 static void __p_read_format(char *buf
, size_t size
, u64 value
)
1049 #define bit_name(n) { PERF_FORMAT_##n, #n }
1050 struct bit_names bits
[] = {
1051 bit_name(TOTAL_TIME_ENABLED
), bit_name(TOTAL_TIME_RUNNING
),
1052 bit_name(ID
), bit_name(GROUP
),
1056 __p_bits(buf
, size
, value
, bits
);
1059 #define BUF_SIZE 1024
1061 #define p_hex(val) snprintf(buf, BUF_SIZE, "%"PRIx64, (uint64_t)(val))
1062 #define p_unsigned(val) snprintf(buf, BUF_SIZE, "%"PRIu64, (uint64_t)(val))
1063 #define p_signed(val) snprintf(buf, BUF_SIZE, "%"PRId64, (int64_t)(val))
1064 #define p_sample_type(val) __p_sample_type(buf, BUF_SIZE, val)
1065 #define p_read_format(val) __p_read_format(buf, BUF_SIZE, val)
1067 #define PRINT_ATTRn(_n, _f, _p) \
1071 ret += attr__fprintf(fp, _n, buf, priv);\
1075 #define PRINT_ATTRf(_f, _p) PRINT_ATTRn(#_f, _f, _p)
1077 int perf_event_attr__fprintf(FILE *fp
, struct perf_event_attr
*attr
,
1078 attr__fprintf_f attr__fprintf
, void *priv
)
1083 PRINT_ATTRf(type
, p_unsigned
);
1084 PRINT_ATTRf(size
, p_unsigned
);
1085 PRINT_ATTRf(config
, p_hex
);
1086 PRINT_ATTRn("{ sample_period, sample_freq }", sample_period
, p_unsigned
);
1087 PRINT_ATTRf(sample_type
, p_sample_type
);
1088 PRINT_ATTRf(read_format
, p_read_format
);
1090 PRINT_ATTRf(disabled
, p_unsigned
);
1091 PRINT_ATTRf(inherit
, p_unsigned
);
1092 PRINT_ATTRf(pinned
, p_unsigned
);
1093 PRINT_ATTRf(exclusive
, p_unsigned
);
1094 PRINT_ATTRf(exclude_user
, p_unsigned
);
1095 PRINT_ATTRf(exclude_kernel
, p_unsigned
);
1096 PRINT_ATTRf(exclude_hv
, p_unsigned
);
1097 PRINT_ATTRf(exclude_idle
, p_unsigned
);
1098 PRINT_ATTRf(mmap
, p_unsigned
);
1099 PRINT_ATTRf(comm
, p_unsigned
);
1100 PRINT_ATTRf(freq
, p_unsigned
);
1101 PRINT_ATTRf(inherit_stat
, p_unsigned
);
1102 PRINT_ATTRf(enable_on_exec
, p_unsigned
);
1103 PRINT_ATTRf(task
, p_unsigned
);
1104 PRINT_ATTRf(watermark
, p_unsigned
);
1105 PRINT_ATTRf(precise_ip
, p_unsigned
);
1106 PRINT_ATTRf(mmap_data
, p_unsigned
);
1107 PRINT_ATTRf(sample_id_all
, p_unsigned
);
1108 PRINT_ATTRf(exclude_host
, p_unsigned
);
1109 PRINT_ATTRf(exclude_guest
, p_unsigned
);
1110 PRINT_ATTRf(exclude_callchain_kernel
, p_unsigned
);
1111 PRINT_ATTRf(exclude_callchain_user
, p_unsigned
);
1112 PRINT_ATTRf(mmap2
, p_unsigned
);
1113 PRINT_ATTRf(comm_exec
, p_unsigned
);
1114 PRINT_ATTRf(use_clockid
, p_unsigned
);
1116 PRINT_ATTRn("{ wakeup_events, wakeup_watermark }", wakeup_events
, p_unsigned
);
1117 PRINT_ATTRf(bp_type
, p_unsigned
);
1118 PRINT_ATTRn("{ bp_addr, config1 }", bp_addr
, p_hex
);
1119 PRINT_ATTRn("{ bp_len, config2 }", bp_len
, p_hex
);
1120 PRINT_ATTRf(sample_regs_user
, p_hex
);
1121 PRINT_ATTRf(sample_stack_user
, p_unsigned
);
1122 PRINT_ATTRf(clockid
, p_signed
);
1123 PRINT_ATTRf(sample_regs_intr
, p_hex
);
1128 static int __open_attr__fprintf(FILE *fp
, const char *name
, const char *val
,
1129 void *priv
__attribute__((unused
)))
1131 return fprintf(fp
, " %-32s %s\n", name
, val
);
1134 static int __perf_evsel__open(struct perf_evsel
*evsel
, struct cpu_map
*cpus
,
1135 struct thread_map
*threads
)
1137 int cpu
, thread
, nthreads
;
1138 unsigned long flags
= PERF_FLAG_FD_CLOEXEC
;
1140 enum { NO_CHANGE
, SET_TO_MAX
, INCREASED_MAX
} set_rlimit
= NO_CHANGE
;
1142 if (evsel
->system_wide
)
1145 nthreads
= threads
->nr
;
1147 if (evsel
->fd
== NULL
&&
1148 perf_evsel__alloc_fd(evsel
, cpus
->nr
, nthreads
) < 0)
1152 flags
|= PERF_FLAG_PID_CGROUP
;
1153 pid
= evsel
->cgrp
->fd
;
1156 fallback_missing_features
:
1157 if (perf_missing_features
.clockid_wrong
)
1158 evsel
->attr
.clockid
= CLOCK_MONOTONIC
; /* should always work */
1159 if (perf_missing_features
.clockid
) {
1160 evsel
->attr
.use_clockid
= 0;
1161 evsel
->attr
.clockid
= 0;
1163 if (perf_missing_features
.cloexec
)
1164 flags
&= ~(unsigned long)PERF_FLAG_FD_CLOEXEC
;
1165 if (perf_missing_features
.mmap2
)
1166 evsel
->attr
.mmap2
= 0;
1167 if (perf_missing_features
.exclude_guest
)
1168 evsel
->attr
.exclude_guest
= evsel
->attr
.exclude_host
= 0;
1170 if (perf_missing_features
.sample_id_all
)
1171 evsel
->attr
.sample_id_all
= 0;
1174 fprintf(stderr
, "%.60s\n", graph_dotted_line
);
1175 fprintf(stderr
, "perf_event_attr:\n");
1176 perf_event_attr__fprintf(stderr
, &evsel
->attr
, __open_attr__fprintf
, NULL
);
1177 fprintf(stderr
, "%.60s\n", graph_dotted_line
);
1180 for (cpu
= 0; cpu
< cpus
->nr
; cpu
++) {
1182 for (thread
= 0; thread
< nthreads
; thread
++) {
1185 if (!evsel
->cgrp
&& !evsel
->system_wide
)
1186 pid
= threads
->map
[thread
];
1188 group_fd
= get_group_fd(evsel
, cpu
, thread
);
1190 pr_debug2("sys_perf_event_open: pid %d cpu %d group_fd %d flags %#lx\n",
1191 pid
, cpus
->map
[cpu
], group_fd
, flags
);
1193 FD(evsel
, cpu
, thread
) = sys_perf_event_open(&evsel
->attr
,
1197 if (FD(evsel
, cpu
, thread
) < 0) {
1199 pr_debug2("sys_perf_event_open failed, error %d\n",
1203 set_rlimit
= NO_CHANGE
;
1206 * If we succeeded but had to kill clockid, fail and
1207 * have perf_evsel__open_strerror() print us a nice
1210 if (perf_missing_features
.clockid
||
1211 perf_missing_features
.clockid_wrong
) {
1222 * perf stat needs between 5 and 22 fds per CPU. When we run out
1223 * of them try to increase the limits.
1225 if (err
== -EMFILE
&& set_rlimit
< INCREASED_MAX
) {
1227 int old_errno
= errno
;
1229 if (getrlimit(RLIMIT_NOFILE
, &l
) == 0) {
1230 if (set_rlimit
== NO_CHANGE
)
1231 l
.rlim_cur
= l
.rlim_max
;
1233 l
.rlim_cur
= l
.rlim_max
+ 1000;
1234 l
.rlim_max
= l
.rlim_cur
;
1236 if (setrlimit(RLIMIT_NOFILE
, &l
) == 0) {
1245 if (err
!= -EINVAL
|| cpu
> 0 || thread
> 0)
1249 * Must probe features in the order they were added to the
1250 * perf_event_attr interface.
1252 if (!perf_missing_features
.clockid_wrong
&& evsel
->attr
.use_clockid
) {
1253 perf_missing_features
.clockid_wrong
= true;
1254 goto fallback_missing_features
;
1255 } else if (!perf_missing_features
.clockid
&& evsel
->attr
.use_clockid
) {
1256 perf_missing_features
.clockid
= true;
1257 goto fallback_missing_features
;
1258 } else if (!perf_missing_features
.cloexec
&& (flags
& PERF_FLAG_FD_CLOEXEC
)) {
1259 perf_missing_features
.cloexec
= true;
1260 goto fallback_missing_features
;
1261 } else if (!perf_missing_features
.mmap2
&& evsel
->attr
.mmap2
) {
1262 perf_missing_features
.mmap2
= true;
1263 goto fallback_missing_features
;
1264 } else if (!perf_missing_features
.exclude_guest
&&
1265 (evsel
->attr
.exclude_guest
|| evsel
->attr
.exclude_host
)) {
1266 perf_missing_features
.exclude_guest
= true;
1267 goto fallback_missing_features
;
1268 } else if (!perf_missing_features
.sample_id_all
) {
1269 perf_missing_features
.sample_id_all
= true;
1270 goto retry_sample_id
;
1275 while (--thread
>= 0) {
1276 close(FD(evsel
, cpu
, thread
));
1277 FD(evsel
, cpu
, thread
) = -1;
1280 } while (--cpu
>= 0);
1284 void perf_evsel__close(struct perf_evsel
*evsel
, int ncpus
, int nthreads
)
1286 if (evsel
->fd
== NULL
)
1289 perf_evsel__close_fd(evsel
, ncpus
, nthreads
);
1290 perf_evsel__free_fd(evsel
);
1302 struct thread_map map
;
1304 } empty_thread_map
= {
1309 int perf_evsel__open(struct perf_evsel
*evsel
, struct cpu_map
*cpus
,
1310 struct thread_map
*threads
)
1313 /* Work around old compiler warnings about strict aliasing */
1314 cpus
= &empty_cpu_map
.map
;
1317 if (threads
== NULL
)
1318 threads
= &empty_thread_map
.map
;
1320 return __perf_evsel__open(evsel
, cpus
, threads
);
1323 int perf_evsel__open_per_cpu(struct perf_evsel
*evsel
,
1324 struct cpu_map
*cpus
)
1326 return __perf_evsel__open(evsel
, cpus
, &empty_thread_map
.map
);
1329 int perf_evsel__open_per_thread(struct perf_evsel
*evsel
,
1330 struct thread_map
*threads
)
1332 return __perf_evsel__open(evsel
, &empty_cpu_map
.map
, threads
);
1335 static int perf_evsel__parse_id_sample(const struct perf_evsel
*evsel
,
1336 const union perf_event
*event
,
1337 struct perf_sample
*sample
)
1339 u64 type
= evsel
->attr
.sample_type
;
1340 const u64
*array
= event
->sample
.array
;
1341 bool swapped
= evsel
->needs_swap
;
1344 array
+= ((event
->header
.size
-
1345 sizeof(event
->header
)) / sizeof(u64
)) - 1;
1347 if (type
& PERF_SAMPLE_IDENTIFIER
) {
1348 sample
->id
= *array
;
1352 if (type
& PERF_SAMPLE_CPU
) {
1355 /* undo swap of u64, then swap on individual u32s */
1356 u
.val64
= bswap_64(u
.val64
);
1357 u
.val32
[0] = bswap_32(u
.val32
[0]);
1360 sample
->cpu
= u
.val32
[0];
1364 if (type
& PERF_SAMPLE_STREAM_ID
) {
1365 sample
->stream_id
= *array
;
1369 if (type
& PERF_SAMPLE_ID
) {
1370 sample
->id
= *array
;
1374 if (type
& PERF_SAMPLE_TIME
) {
1375 sample
->time
= *array
;
1379 if (type
& PERF_SAMPLE_TID
) {
1382 /* undo swap of u64, then swap on individual u32s */
1383 u
.val64
= bswap_64(u
.val64
);
1384 u
.val32
[0] = bswap_32(u
.val32
[0]);
1385 u
.val32
[1] = bswap_32(u
.val32
[1]);
1388 sample
->pid
= u
.val32
[0];
1389 sample
->tid
= u
.val32
[1];
1396 static inline bool overflow(const void *endp
, u16 max_size
, const void *offset
,
1399 return size
> max_size
|| offset
+ size
> endp
;
1402 #define OVERFLOW_CHECK(offset, size, max_size) \
1404 if (overflow(endp, (max_size), (offset), (size))) \
1408 #define OVERFLOW_CHECK_u64(offset) \
1409 OVERFLOW_CHECK(offset, sizeof(u64), sizeof(u64))
1411 int perf_evsel__parse_sample(struct perf_evsel
*evsel
, union perf_event
*event
,
1412 struct perf_sample
*data
)
1414 u64 type
= evsel
->attr
.sample_type
;
1415 bool swapped
= evsel
->needs_swap
;
1417 u16 max_size
= event
->header
.size
;
1418 const void *endp
= (void *)event
+ max_size
;
1422 * used for cross-endian analysis. See git commit 65014ab3
1423 * for why this goofiness is needed.
1427 memset(data
, 0, sizeof(*data
));
1428 data
->cpu
= data
->pid
= data
->tid
= -1;
1429 data
->stream_id
= data
->id
= data
->time
= -1ULL;
1430 data
->period
= evsel
->attr
.sample_period
;
1433 if (event
->header
.type
!= PERF_RECORD_SAMPLE
) {
1434 if (!evsel
->attr
.sample_id_all
)
1436 return perf_evsel__parse_id_sample(evsel
, event
, data
);
1439 array
= event
->sample
.array
;
1442 * The evsel's sample_size is based on PERF_SAMPLE_MASK which includes
1443 * up to PERF_SAMPLE_PERIOD. After that overflow() must be used to
1444 * check the format does not go past the end of the event.
1446 if (evsel
->sample_size
+ sizeof(event
->header
) > event
->header
.size
)
1450 if (type
& PERF_SAMPLE_IDENTIFIER
) {
1455 if (type
& PERF_SAMPLE_IP
) {
1460 if (type
& PERF_SAMPLE_TID
) {
1463 /* undo swap of u64, then swap on individual u32s */
1464 u
.val64
= bswap_64(u
.val64
);
1465 u
.val32
[0] = bswap_32(u
.val32
[0]);
1466 u
.val32
[1] = bswap_32(u
.val32
[1]);
1469 data
->pid
= u
.val32
[0];
1470 data
->tid
= u
.val32
[1];
1474 if (type
& PERF_SAMPLE_TIME
) {
1475 data
->time
= *array
;
1480 if (type
& PERF_SAMPLE_ADDR
) {
1481 data
->addr
= *array
;
1485 if (type
& PERF_SAMPLE_ID
) {
1490 if (type
& PERF_SAMPLE_STREAM_ID
) {
1491 data
->stream_id
= *array
;
1495 if (type
& PERF_SAMPLE_CPU
) {
1499 /* undo swap of u64, then swap on individual u32s */
1500 u
.val64
= bswap_64(u
.val64
);
1501 u
.val32
[0] = bswap_32(u
.val32
[0]);
1504 data
->cpu
= u
.val32
[0];
1508 if (type
& PERF_SAMPLE_PERIOD
) {
1509 data
->period
= *array
;
1513 if (type
& PERF_SAMPLE_READ
) {
1514 u64 read_format
= evsel
->attr
.read_format
;
1516 OVERFLOW_CHECK_u64(array
);
1517 if (read_format
& PERF_FORMAT_GROUP
)
1518 data
->read
.group
.nr
= *array
;
1520 data
->read
.one
.value
= *array
;
1524 if (read_format
& PERF_FORMAT_TOTAL_TIME_ENABLED
) {
1525 OVERFLOW_CHECK_u64(array
);
1526 data
->read
.time_enabled
= *array
;
1530 if (read_format
& PERF_FORMAT_TOTAL_TIME_RUNNING
) {
1531 OVERFLOW_CHECK_u64(array
);
1532 data
->read
.time_running
= *array
;
1536 /* PERF_FORMAT_ID is forced for PERF_SAMPLE_READ */
1537 if (read_format
& PERF_FORMAT_GROUP
) {
1538 const u64 max_group_nr
= UINT64_MAX
/
1539 sizeof(struct sample_read_value
);
1541 if (data
->read
.group
.nr
> max_group_nr
)
1543 sz
= data
->read
.group
.nr
*
1544 sizeof(struct sample_read_value
);
1545 OVERFLOW_CHECK(array
, sz
, max_size
);
1546 data
->read
.group
.values
=
1547 (struct sample_read_value
*)array
;
1548 array
= (void *)array
+ sz
;
1550 OVERFLOW_CHECK_u64(array
);
1551 data
->read
.one
.id
= *array
;
1556 if (type
& PERF_SAMPLE_CALLCHAIN
) {
1557 const u64 max_callchain_nr
= UINT64_MAX
/ sizeof(u64
);
1559 OVERFLOW_CHECK_u64(array
);
1560 data
->callchain
= (struct ip_callchain
*)array
++;
1561 if (data
->callchain
->nr
> max_callchain_nr
)
1563 sz
= data
->callchain
->nr
* sizeof(u64
);
1564 OVERFLOW_CHECK(array
, sz
, max_size
);
1565 array
= (void *)array
+ sz
;
1568 if (type
& PERF_SAMPLE_RAW
) {
1569 OVERFLOW_CHECK_u64(array
);
1571 if (WARN_ONCE(swapped
,
1572 "Endianness of raw data not corrected!\n")) {
1573 /* undo swap of u64, then swap on individual u32s */
1574 u
.val64
= bswap_64(u
.val64
);
1575 u
.val32
[0] = bswap_32(u
.val32
[0]);
1576 u
.val32
[1] = bswap_32(u
.val32
[1]);
1578 data
->raw_size
= u
.val32
[0];
1579 array
= (void *)array
+ sizeof(u32
);
1581 OVERFLOW_CHECK(array
, data
->raw_size
, max_size
);
1582 data
->raw_data
= (void *)array
;
1583 array
= (void *)array
+ data
->raw_size
;
1586 if (type
& PERF_SAMPLE_BRANCH_STACK
) {
1587 const u64 max_branch_nr
= UINT64_MAX
/
1588 sizeof(struct branch_entry
);
1590 OVERFLOW_CHECK_u64(array
);
1591 data
->branch_stack
= (struct branch_stack
*)array
++;
1593 if (data
->branch_stack
->nr
> max_branch_nr
)
1595 sz
= data
->branch_stack
->nr
* sizeof(struct branch_entry
);
1596 OVERFLOW_CHECK(array
, sz
, max_size
);
1597 array
= (void *)array
+ sz
;
1600 if (type
& PERF_SAMPLE_REGS_USER
) {
1601 OVERFLOW_CHECK_u64(array
);
1602 data
->user_regs
.abi
= *array
;
1605 if (data
->user_regs
.abi
) {
1606 u64 mask
= evsel
->attr
.sample_regs_user
;
1608 sz
= hweight_long(mask
) * sizeof(u64
);
1609 OVERFLOW_CHECK(array
, sz
, max_size
);
1610 data
->user_regs
.mask
= mask
;
1611 data
->user_regs
.regs
= (u64
*)array
;
1612 array
= (void *)array
+ sz
;
1616 if (type
& PERF_SAMPLE_STACK_USER
) {
1617 OVERFLOW_CHECK_u64(array
);
1620 data
->user_stack
.offset
= ((char *)(array
- 1)
1624 data
->user_stack
.size
= 0;
1626 OVERFLOW_CHECK(array
, sz
, max_size
);
1627 data
->user_stack
.data
= (char *)array
;
1628 array
= (void *)array
+ sz
;
1629 OVERFLOW_CHECK_u64(array
);
1630 data
->user_stack
.size
= *array
++;
1631 if (WARN_ONCE(data
->user_stack
.size
> sz
,
1632 "user stack dump failure\n"))
1638 if (type
& PERF_SAMPLE_WEIGHT
) {
1639 OVERFLOW_CHECK_u64(array
);
1640 data
->weight
= *array
;
1644 data
->data_src
= PERF_MEM_DATA_SRC_NONE
;
1645 if (type
& PERF_SAMPLE_DATA_SRC
) {
1646 OVERFLOW_CHECK_u64(array
);
1647 data
->data_src
= *array
;
1651 data
->transaction
= 0;
1652 if (type
& PERF_SAMPLE_TRANSACTION
) {
1653 OVERFLOW_CHECK_u64(array
);
1654 data
->transaction
= *array
;
1658 data
->intr_regs
.abi
= PERF_SAMPLE_REGS_ABI_NONE
;
1659 if (type
& PERF_SAMPLE_REGS_INTR
) {
1660 OVERFLOW_CHECK_u64(array
);
1661 data
->intr_regs
.abi
= *array
;
1664 if (data
->intr_regs
.abi
!= PERF_SAMPLE_REGS_ABI_NONE
) {
1665 u64 mask
= evsel
->attr
.sample_regs_intr
;
1667 sz
= hweight_long(mask
) * sizeof(u64
);
1668 OVERFLOW_CHECK(array
, sz
, max_size
);
1669 data
->intr_regs
.mask
= mask
;
1670 data
->intr_regs
.regs
= (u64
*)array
;
1671 array
= (void *)array
+ sz
;
1678 size_t perf_event__sample_event_size(const struct perf_sample
*sample
, u64 type
,
1681 size_t sz
, result
= sizeof(struct sample_event
);
1683 if (type
& PERF_SAMPLE_IDENTIFIER
)
1684 result
+= sizeof(u64
);
1686 if (type
& PERF_SAMPLE_IP
)
1687 result
+= sizeof(u64
);
1689 if (type
& PERF_SAMPLE_TID
)
1690 result
+= sizeof(u64
);
1692 if (type
& PERF_SAMPLE_TIME
)
1693 result
+= sizeof(u64
);
1695 if (type
& PERF_SAMPLE_ADDR
)
1696 result
+= sizeof(u64
);
1698 if (type
& PERF_SAMPLE_ID
)
1699 result
+= sizeof(u64
);
1701 if (type
& PERF_SAMPLE_STREAM_ID
)
1702 result
+= sizeof(u64
);
1704 if (type
& PERF_SAMPLE_CPU
)
1705 result
+= sizeof(u64
);
1707 if (type
& PERF_SAMPLE_PERIOD
)
1708 result
+= sizeof(u64
);
1710 if (type
& PERF_SAMPLE_READ
) {
1711 result
+= sizeof(u64
);
1712 if (read_format
& PERF_FORMAT_TOTAL_TIME_ENABLED
)
1713 result
+= sizeof(u64
);
1714 if (read_format
& PERF_FORMAT_TOTAL_TIME_RUNNING
)
1715 result
+= sizeof(u64
);
1716 /* PERF_FORMAT_ID is forced for PERF_SAMPLE_READ */
1717 if (read_format
& PERF_FORMAT_GROUP
) {
1718 sz
= sample
->read
.group
.nr
*
1719 sizeof(struct sample_read_value
);
1722 result
+= sizeof(u64
);
1726 if (type
& PERF_SAMPLE_CALLCHAIN
) {
1727 sz
= (sample
->callchain
->nr
+ 1) * sizeof(u64
);
1731 if (type
& PERF_SAMPLE_RAW
) {
1732 result
+= sizeof(u32
);
1733 result
+= sample
->raw_size
;
1736 if (type
& PERF_SAMPLE_BRANCH_STACK
) {
1737 sz
= sample
->branch_stack
->nr
* sizeof(struct branch_entry
);
1742 if (type
& PERF_SAMPLE_REGS_USER
) {
1743 if (sample
->user_regs
.abi
) {
1744 result
+= sizeof(u64
);
1745 sz
= hweight_long(sample
->user_regs
.mask
) * sizeof(u64
);
1748 result
+= sizeof(u64
);
1752 if (type
& PERF_SAMPLE_STACK_USER
) {
1753 sz
= sample
->user_stack
.size
;
1754 result
+= sizeof(u64
);
1757 result
+= sizeof(u64
);
1761 if (type
& PERF_SAMPLE_WEIGHT
)
1762 result
+= sizeof(u64
);
1764 if (type
& PERF_SAMPLE_DATA_SRC
)
1765 result
+= sizeof(u64
);
1767 if (type
& PERF_SAMPLE_TRANSACTION
)
1768 result
+= sizeof(u64
);
1770 if (type
& PERF_SAMPLE_REGS_INTR
) {
1771 if (sample
->intr_regs
.abi
) {
1772 result
+= sizeof(u64
);
1773 sz
= hweight_long(sample
->intr_regs
.mask
) * sizeof(u64
);
1776 result
+= sizeof(u64
);
1783 int perf_event__synthesize_sample(union perf_event
*event
, u64 type
,
1785 const struct perf_sample
*sample
,
1791 * used for cross-endian analysis. See git commit 65014ab3
1792 * for why this goofiness is needed.
1796 array
= event
->sample
.array
;
1798 if (type
& PERF_SAMPLE_IDENTIFIER
) {
1799 *array
= sample
->id
;
1803 if (type
& PERF_SAMPLE_IP
) {
1804 *array
= sample
->ip
;
1808 if (type
& PERF_SAMPLE_TID
) {
1809 u
.val32
[0] = sample
->pid
;
1810 u
.val32
[1] = sample
->tid
;
1813 * Inverse of what is done in perf_evsel__parse_sample
1815 u
.val32
[0] = bswap_32(u
.val32
[0]);
1816 u
.val32
[1] = bswap_32(u
.val32
[1]);
1817 u
.val64
= bswap_64(u
.val64
);
1824 if (type
& PERF_SAMPLE_TIME
) {
1825 *array
= sample
->time
;
1829 if (type
& PERF_SAMPLE_ADDR
) {
1830 *array
= sample
->addr
;
1834 if (type
& PERF_SAMPLE_ID
) {
1835 *array
= sample
->id
;
1839 if (type
& PERF_SAMPLE_STREAM_ID
) {
1840 *array
= sample
->stream_id
;
1844 if (type
& PERF_SAMPLE_CPU
) {
1845 u
.val32
[0] = sample
->cpu
;
1848 * Inverse of what is done in perf_evsel__parse_sample
1850 u
.val32
[0] = bswap_32(u
.val32
[0]);
1851 u
.val64
= bswap_64(u
.val64
);
1857 if (type
& PERF_SAMPLE_PERIOD
) {
1858 *array
= sample
->period
;
1862 if (type
& PERF_SAMPLE_READ
) {
1863 if (read_format
& PERF_FORMAT_GROUP
)
1864 *array
= sample
->read
.group
.nr
;
1866 *array
= sample
->read
.one
.value
;
1869 if (read_format
& PERF_FORMAT_TOTAL_TIME_ENABLED
) {
1870 *array
= sample
->read
.time_enabled
;
1874 if (read_format
& PERF_FORMAT_TOTAL_TIME_RUNNING
) {
1875 *array
= sample
->read
.time_running
;
1879 /* PERF_FORMAT_ID is forced for PERF_SAMPLE_READ */
1880 if (read_format
& PERF_FORMAT_GROUP
) {
1881 sz
= sample
->read
.group
.nr
*
1882 sizeof(struct sample_read_value
);
1883 memcpy(array
, sample
->read
.group
.values
, sz
);
1884 array
= (void *)array
+ sz
;
1886 *array
= sample
->read
.one
.id
;
1891 if (type
& PERF_SAMPLE_CALLCHAIN
) {
1892 sz
= (sample
->callchain
->nr
+ 1) * sizeof(u64
);
1893 memcpy(array
, sample
->callchain
, sz
);
1894 array
= (void *)array
+ sz
;
1897 if (type
& PERF_SAMPLE_RAW
) {
1898 u
.val32
[0] = sample
->raw_size
;
1899 if (WARN_ONCE(swapped
,
1900 "Endianness of raw data not corrected!\n")) {
1902 * Inverse of what is done in perf_evsel__parse_sample
1904 u
.val32
[0] = bswap_32(u
.val32
[0]);
1905 u
.val32
[1] = bswap_32(u
.val32
[1]);
1906 u
.val64
= bswap_64(u
.val64
);
1909 array
= (void *)array
+ sizeof(u32
);
1911 memcpy(array
, sample
->raw_data
, sample
->raw_size
);
1912 array
= (void *)array
+ sample
->raw_size
;
1915 if (type
& PERF_SAMPLE_BRANCH_STACK
) {
1916 sz
= sample
->branch_stack
->nr
* sizeof(struct branch_entry
);
1918 memcpy(array
, sample
->branch_stack
, sz
);
1919 array
= (void *)array
+ sz
;
1922 if (type
& PERF_SAMPLE_REGS_USER
) {
1923 if (sample
->user_regs
.abi
) {
1924 *array
++ = sample
->user_regs
.abi
;
1925 sz
= hweight_long(sample
->user_regs
.mask
) * sizeof(u64
);
1926 memcpy(array
, sample
->user_regs
.regs
, sz
);
1927 array
= (void *)array
+ sz
;
1933 if (type
& PERF_SAMPLE_STACK_USER
) {
1934 sz
= sample
->user_stack
.size
;
1937 memcpy(array
, sample
->user_stack
.data
, sz
);
1938 array
= (void *)array
+ sz
;
1943 if (type
& PERF_SAMPLE_WEIGHT
) {
1944 *array
= sample
->weight
;
1948 if (type
& PERF_SAMPLE_DATA_SRC
) {
1949 *array
= sample
->data_src
;
1953 if (type
& PERF_SAMPLE_TRANSACTION
) {
1954 *array
= sample
->transaction
;
1958 if (type
& PERF_SAMPLE_REGS_INTR
) {
1959 if (sample
->intr_regs
.abi
) {
1960 *array
++ = sample
->intr_regs
.abi
;
1961 sz
= hweight_long(sample
->intr_regs
.mask
) * sizeof(u64
);
1962 memcpy(array
, sample
->intr_regs
.regs
, sz
);
1963 array
= (void *)array
+ sz
;
1972 struct format_field
*perf_evsel__field(struct perf_evsel
*evsel
, const char *name
)
1974 return pevent_find_field(evsel
->tp_format
, name
);
1977 void *perf_evsel__rawptr(struct perf_evsel
*evsel
, struct perf_sample
*sample
,
1980 struct format_field
*field
= perf_evsel__field(evsel
, name
);
1986 offset
= field
->offset
;
1988 if (field
->flags
& FIELD_IS_DYNAMIC
) {
1989 offset
= *(int *)(sample
->raw_data
+ field
->offset
);
1993 return sample
->raw_data
+ offset
;
1996 u64
perf_evsel__intval(struct perf_evsel
*evsel
, struct perf_sample
*sample
,
1999 struct format_field
*field
= perf_evsel__field(evsel
, name
);
2006 ptr
= sample
->raw_data
+ field
->offset
;
2008 switch (field
->size
) {
2012 value
= *(u16
*)ptr
;
2015 value
= *(u32
*)ptr
;
2018 memcpy(&value
, ptr
, sizeof(u64
));
2024 if (!evsel
->needs_swap
)
2027 switch (field
->size
) {
2029 return bswap_16(value
);
2031 return bswap_32(value
);
2033 return bswap_64(value
);
2041 static int comma_fprintf(FILE *fp
, bool *first
, const char *fmt
, ...)
2047 ret
+= fprintf(fp
, ",");
2049 ret
+= fprintf(fp
, ":");
2053 va_start(args
, fmt
);
2054 ret
+= vfprintf(fp
, fmt
, args
);
2059 static int __print_attr__fprintf(FILE *fp
, const char *name
, const char *val
, void *priv
)
2061 return comma_fprintf(fp
, (bool *)priv
, " %s: %s", name
, val
);
2064 int perf_evsel__fprintf(struct perf_evsel
*evsel
,
2065 struct perf_attr_details
*details
, FILE *fp
)
2070 if (details
->event_group
) {
2071 struct perf_evsel
*pos
;
2073 if (!perf_evsel__is_group_leader(evsel
))
2076 if (evsel
->nr_members
> 1)
2077 printed
+= fprintf(fp
, "%s{", evsel
->group_name
?: "");
2079 printed
+= fprintf(fp
, "%s", perf_evsel__name(evsel
));
2080 for_each_group_member(pos
, evsel
)
2081 printed
+= fprintf(fp
, ",%s", perf_evsel__name(pos
));
2083 if (evsel
->nr_members
> 1)
2084 printed
+= fprintf(fp
, "}");
2088 printed
+= fprintf(fp
, "%s", perf_evsel__name(evsel
));
2090 if (details
->verbose
) {
2091 printed
+= perf_event_attr__fprintf(fp
, &evsel
->attr
,
2092 __print_attr__fprintf
, &first
);
2093 } else if (details
->freq
) {
2094 printed
+= comma_fprintf(fp
, &first
, " sample_freq=%" PRIu64
,
2095 (u64
)evsel
->attr
.sample_freq
);
2102 bool perf_evsel__fallback(struct perf_evsel
*evsel
, int err
,
2103 char *msg
, size_t msgsize
)
2105 if ((err
== ENOENT
|| err
== ENXIO
|| err
== ENODEV
) &&
2106 evsel
->attr
.type
== PERF_TYPE_HARDWARE
&&
2107 evsel
->attr
.config
== PERF_COUNT_HW_CPU_CYCLES
) {
2109 * If it's cycles then fall back to hrtimer based
2110 * cpu-clock-tick sw counter, which is always available even if
2113 * PPC returns ENXIO until 2.6.37 (behavior changed with commit
2116 scnprintf(msg
, msgsize
, "%s",
2117 "The cycles event is not supported, trying to fall back to cpu-clock-ticks");
2119 evsel
->attr
.type
= PERF_TYPE_SOFTWARE
;
2120 evsel
->attr
.config
= PERF_COUNT_SW_CPU_CLOCK
;
2122 zfree(&evsel
->name
);
2129 int perf_evsel__open_strerror(struct perf_evsel
*evsel
, struct target
*target
,
2130 int err
, char *msg
, size_t size
)
2132 char sbuf
[STRERR_BUFSIZE
];
2137 return scnprintf(msg
, size
,
2138 "You may not have permission to collect %sstats.\n"
2139 "Consider tweaking /proc/sys/kernel/perf_event_paranoid:\n"
2140 " -1 - Not paranoid at all\n"
2141 " 0 - Disallow raw tracepoint access for unpriv\n"
2142 " 1 - Disallow cpu events for unpriv\n"
2143 " 2 - Disallow kernel profiling for unpriv",
2144 target
->system_wide
? "system-wide " : "");
2146 return scnprintf(msg
, size
, "The %s event is not supported.",
2147 perf_evsel__name(evsel
));
2149 return scnprintf(msg
, size
, "%s",
2150 "Too many events are opened.\n"
2151 "Try again after reducing the number of events.");
2153 if (target
->cpu_list
)
2154 return scnprintf(msg
, size
, "%s",
2155 "No such device - did you specify an out-of-range profile CPU?\n");
2158 if (evsel
->attr
.precise_ip
)
2159 return scnprintf(msg
, size
, "%s",
2160 "\'precise\' request may not be supported. Try removing 'p' modifier.");
2161 #if defined(__i386__) || defined(__x86_64__)
2162 if (evsel
->attr
.type
== PERF_TYPE_HARDWARE
)
2163 return scnprintf(msg
, size
, "%s",
2164 "No hardware sampling interrupt available.\n"
2165 "No APIC? If so then you can boot the kernel with the \"lapic\" boot parameter to force-enable it.");
2169 if (find_process("oprofiled"))
2170 return scnprintf(msg
, size
,
2171 "The PMU counters are busy/taken by another profiler.\n"
2172 "We found oprofile daemon running, please stop it and try again.");
2175 if (perf_missing_features
.clockid
)
2176 return scnprintf(msg
, size
, "clockid feature not supported.");
2177 if (perf_missing_features
.clockid_wrong
)
2178 return scnprintf(msg
, size
, "wrong clockid (%d).", clockid
);
2184 return scnprintf(msg
, size
,
2185 "The sys_perf_event_open() syscall returned with %d (%s) for event (%s).\n"
2186 "/bin/dmesg may provide additional information.\n"
2187 "No CONFIG_PERF_EVENTS=y kernel support configured?\n",
2188 err
, strerror_r(err
, sbuf
, sizeof(sbuf
)),
2189 perf_evsel__name(evsel
));