2 * Copyright (C) 2011, Red Hat Inc, Arnaldo Carvalho de Melo <acme@redhat.com>
4 * Parts came from builtin-{top,stat,record}.c, see those files for further
7 * Released under the GPL v2. (and only v2, not any later version)
11 #include <linux/bitops.h>
12 #include <lk/debugfs.h>
13 #include <traceevent/event-parse.h>
14 #include <linux/hw_breakpoint.h>
15 #include <linux/perf_event.h>
16 #include <sys/resource.h>
22 #include "thread_map.h"
24 #include "perf_regs.h"
30 } perf_missing_features
;
32 #define FD(e, x, y) (*(int *)xyarray__entry(e->fd, x, y))
34 static int __perf_evsel__sample_size(u64 sample_type
)
36 u64 mask
= sample_type
& PERF_SAMPLE_MASK
;
40 for (i
= 0; i
< 64; i
++) {
41 if (mask
& (1ULL << i
))
50 void hists__init(struct hists
*hists
)
52 memset(hists
, 0, sizeof(*hists
));
53 hists
->entries_in_array
[0] = hists
->entries_in_array
[1] = RB_ROOT
;
54 hists
->entries_in
= &hists
->entries_in_array
[0];
55 hists
->entries_collapsed
= RB_ROOT
;
56 hists
->entries
= RB_ROOT
;
57 pthread_mutex_init(&hists
->lock
, NULL
);
60 void __perf_evsel__set_sample_bit(struct perf_evsel
*evsel
,
61 enum perf_event_sample_format bit
)
63 if (!(evsel
->attr
.sample_type
& bit
)) {
64 evsel
->attr
.sample_type
|= bit
;
65 evsel
->sample_size
+= sizeof(u64
);
69 void __perf_evsel__reset_sample_bit(struct perf_evsel
*evsel
,
70 enum perf_event_sample_format bit
)
72 if (evsel
->attr
.sample_type
& bit
) {
73 evsel
->attr
.sample_type
&= ~bit
;
74 evsel
->sample_size
-= sizeof(u64
);
78 void perf_evsel__set_sample_id(struct perf_evsel
*evsel
)
80 perf_evsel__set_sample_bit(evsel
, ID
);
81 evsel
->attr
.read_format
|= PERF_FORMAT_ID
;
84 void perf_evsel__init(struct perf_evsel
*evsel
,
85 struct perf_event_attr
*attr
, int idx
)
89 evsel
->leader
= evsel
;
90 INIT_LIST_HEAD(&evsel
->node
);
91 hists__init(&evsel
->hists
);
92 evsel
->sample_size
= __perf_evsel__sample_size(attr
->sample_type
);
95 struct perf_evsel
*perf_evsel__new(struct perf_event_attr
*attr
, int idx
)
97 struct perf_evsel
*evsel
= zalloc(sizeof(*evsel
));
100 perf_evsel__init(evsel
, attr
, idx
);
105 struct event_format
*event_format__new(const char *sys
, const char *name
)
109 void *bf
= NULL
, *nbf
;
110 size_t size
= 0, alloc_size
= 0;
111 struct event_format
*format
= NULL
;
113 if (asprintf(&filename
, "%s/%s/%s/format", tracing_events_path
, sys
, name
) < 0)
116 fd
= open(filename
, O_RDONLY
);
118 goto out_free_filename
;
121 if (size
== alloc_size
) {
122 alloc_size
+= BUFSIZ
;
123 nbf
= realloc(bf
, alloc_size
);
129 n
= read(fd
, bf
+ size
, alloc_size
- size
);
135 pevent_parse_format(&format
, bf
, size
, sys
);
146 struct perf_evsel
*perf_evsel__newtp(const char *sys
, const char *name
, int idx
)
148 struct perf_evsel
*evsel
= zalloc(sizeof(*evsel
));
151 struct perf_event_attr attr
= {
152 .type
= PERF_TYPE_TRACEPOINT
,
153 .sample_type
= (PERF_SAMPLE_RAW
| PERF_SAMPLE_TIME
|
154 PERF_SAMPLE_CPU
| PERF_SAMPLE_PERIOD
),
157 if (asprintf(&evsel
->name
, "%s:%s", sys
, name
) < 0)
160 evsel
->tp_format
= event_format__new(sys
, name
);
161 if (evsel
->tp_format
== NULL
)
164 event_attr_init(&attr
);
165 attr
.config
= evsel
->tp_format
->id
;
166 attr
.sample_period
= 1;
167 perf_evsel__init(evsel
, &attr
, idx
);
178 const char *perf_evsel__hw_names
[PERF_COUNT_HW_MAX
] = {
186 "stalled-cycles-frontend",
187 "stalled-cycles-backend",
191 static const char *__perf_evsel__hw_name(u64 config
)
193 if (config
< PERF_COUNT_HW_MAX
&& perf_evsel__hw_names
[config
])
194 return perf_evsel__hw_names
[config
];
196 return "unknown-hardware";
199 static int perf_evsel__add_modifiers(struct perf_evsel
*evsel
, char *bf
, size_t size
)
201 int colon
= 0, r
= 0;
202 struct perf_event_attr
*attr
= &evsel
->attr
;
203 bool exclude_guest_default
= false;
205 #define MOD_PRINT(context, mod) do { \
206 if (!attr->exclude_##context) { \
207 if (!colon) colon = ++r; \
208 r += scnprintf(bf + r, size - r, "%c", mod); \
211 if (attr
->exclude_kernel
|| attr
->exclude_user
|| attr
->exclude_hv
) {
212 MOD_PRINT(kernel
, 'k');
213 MOD_PRINT(user
, 'u');
215 exclude_guest_default
= true;
218 if (attr
->precise_ip
) {
221 r
+= scnprintf(bf
+ r
, size
- r
, "%.*s", attr
->precise_ip
, "ppp");
222 exclude_guest_default
= true;
225 if (attr
->exclude_host
|| attr
->exclude_guest
== exclude_guest_default
) {
226 MOD_PRINT(host
, 'H');
227 MOD_PRINT(guest
, 'G');
235 static int perf_evsel__hw_name(struct perf_evsel
*evsel
, char *bf
, size_t size
)
237 int r
= scnprintf(bf
, size
, "%s", __perf_evsel__hw_name(evsel
->attr
.config
));
238 return r
+ perf_evsel__add_modifiers(evsel
, bf
+ r
, size
- r
);
241 const char *perf_evsel__sw_names
[PERF_COUNT_SW_MAX
] = {
253 static const char *__perf_evsel__sw_name(u64 config
)
255 if (config
< PERF_COUNT_SW_MAX
&& perf_evsel__sw_names
[config
])
256 return perf_evsel__sw_names
[config
];
257 return "unknown-software";
260 static int perf_evsel__sw_name(struct perf_evsel
*evsel
, char *bf
, size_t size
)
262 int r
= scnprintf(bf
, size
, "%s", __perf_evsel__sw_name(evsel
->attr
.config
));
263 return r
+ perf_evsel__add_modifiers(evsel
, bf
+ r
, size
- r
);
266 static int __perf_evsel__bp_name(char *bf
, size_t size
, u64 addr
, u64 type
)
270 r
= scnprintf(bf
, size
, "mem:0x%" PRIx64
":", addr
);
272 if (type
& HW_BREAKPOINT_R
)
273 r
+= scnprintf(bf
+ r
, size
- r
, "r");
275 if (type
& HW_BREAKPOINT_W
)
276 r
+= scnprintf(bf
+ r
, size
- r
, "w");
278 if (type
& HW_BREAKPOINT_X
)
279 r
+= scnprintf(bf
+ r
, size
- r
, "x");
284 static int perf_evsel__bp_name(struct perf_evsel
*evsel
, char *bf
, size_t size
)
286 struct perf_event_attr
*attr
= &evsel
->attr
;
287 int r
= __perf_evsel__bp_name(bf
, size
, attr
->bp_addr
, attr
->bp_type
);
288 return r
+ perf_evsel__add_modifiers(evsel
, bf
+ r
, size
- r
);
291 const char *perf_evsel__hw_cache
[PERF_COUNT_HW_CACHE_MAX
]
292 [PERF_EVSEL__MAX_ALIASES
] = {
293 { "L1-dcache", "l1-d", "l1d", "L1-data", },
294 { "L1-icache", "l1-i", "l1i", "L1-instruction", },
296 { "dTLB", "d-tlb", "Data-TLB", },
297 { "iTLB", "i-tlb", "Instruction-TLB", },
298 { "branch", "branches", "bpu", "btb", "bpc", },
302 const char *perf_evsel__hw_cache_op
[PERF_COUNT_HW_CACHE_OP_MAX
]
303 [PERF_EVSEL__MAX_ALIASES
] = {
304 { "load", "loads", "read", },
305 { "store", "stores", "write", },
306 { "prefetch", "prefetches", "speculative-read", "speculative-load", },
309 const char *perf_evsel__hw_cache_result
[PERF_COUNT_HW_CACHE_RESULT_MAX
]
310 [PERF_EVSEL__MAX_ALIASES
] = {
311 { "refs", "Reference", "ops", "access", },
312 { "misses", "miss", },
315 #define C(x) PERF_COUNT_HW_CACHE_##x
316 #define CACHE_READ (1 << C(OP_READ))
317 #define CACHE_WRITE (1 << C(OP_WRITE))
318 #define CACHE_PREFETCH (1 << C(OP_PREFETCH))
319 #define COP(x) (1 << x)
322 * cache operartion stat
323 * L1I : Read and prefetch only
324 * ITLB and BPU : Read-only
326 static unsigned long perf_evsel__hw_cache_stat
[C(MAX
)] = {
327 [C(L1D
)] = (CACHE_READ
| CACHE_WRITE
| CACHE_PREFETCH
),
328 [C(L1I
)] = (CACHE_READ
| CACHE_PREFETCH
),
329 [C(LL
)] = (CACHE_READ
| CACHE_WRITE
| CACHE_PREFETCH
),
330 [C(DTLB
)] = (CACHE_READ
| CACHE_WRITE
| CACHE_PREFETCH
),
331 [C(ITLB
)] = (CACHE_READ
),
332 [C(BPU
)] = (CACHE_READ
),
333 [C(NODE
)] = (CACHE_READ
| CACHE_WRITE
| CACHE_PREFETCH
),
336 bool perf_evsel__is_cache_op_valid(u8 type
, u8 op
)
338 if (perf_evsel__hw_cache_stat
[type
] & COP(op
))
339 return true; /* valid */
341 return false; /* invalid */
344 int __perf_evsel__hw_cache_type_op_res_name(u8 type
, u8 op
, u8 result
,
345 char *bf
, size_t size
)
348 return scnprintf(bf
, size
, "%s-%s-%s", perf_evsel__hw_cache
[type
][0],
349 perf_evsel__hw_cache_op
[op
][0],
350 perf_evsel__hw_cache_result
[result
][0]);
353 return scnprintf(bf
, size
, "%s-%s", perf_evsel__hw_cache
[type
][0],
354 perf_evsel__hw_cache_op
[op
][1]);
357 static int __perf_evsel__hw_cache_name(u64 config
, char *bf
, size_t size
)
359 u8 op
, result
, type
= (config
>> 0) & 0xff;
360 const char *err
= "unknown-ext-hardware-cache-type";
362 if (type
> PERF_COUNT_HW_CACHE_MAX
)
365 op
= (config
>> 8) & 0xff;
366 err
= "unknown-ext-hardware-cache-op";
367 if (op
> PERF_COUNT_HW_CACHE_OP_MAX
)
370 result
= (config
>> 16) & 0xff;
371 err
= "unknown-ext-hardware-cache-result";
372 if (result
> PERF_COUNT_HW_CACHE_RESULT_MAX
)
375 err
= "invalid-cache";
376 if (!perf_evsel__is_cache_op_valid(type
, op
))
379 return __perf_evsel__hw_cache_type_op_res_name(type
, op
, result
, bf
, size
);
381 return scnprintf(bf
, size
, "%s", err
);
384 static int perf_evsel__hw_cache_name(struct perf_evsel
*evsel
, char *bf
, size_t size
)
386 int ret
= __perf_evsel__hw_cache_name(evsel
->attr
.config
, bf
, size
);
387 return ret
+ perf_evsel__add_modifiers(evsel
, bf
+ ret
, size
- ret
);
390 static int perf_evsel__raw_name(struct perf_evsel
*evsel
, char *bf
, size_t size
)
392 int ret
= scnprintf(bf
, size
, "raw 0x%" PRIx64
, evsel
->attr
.config
);
393 return ret
+ perf_evsel__add_modifiers(evsel
, bf
+ ret
, size
- ret
);
396 const char *perf_evsel__name(struct perf_evsel
*evsel
)
403 switch (evsel
->attr
.type
) {
405 perf_evsel__raw_name(evsel
, bf
, sizeof(bf
));
408 case PERF_TYPE_HARDWARE
:
409 perf_evsel__hw_name(evsel
, bf
, sizeof(bf
));
412 case PERF_TYPE_HW_CACHE
:
413 perf_evsel__hw_cache_name(evsel
, bf
, sizeof(bf
));
416 case PERF_TYPE_SOFTWARE
:
417 perf_evsel__sw_name(evsel
, bf
, sizeof(bf
));
420 case PERF_TYPE_TRACEPOINT
:
421 scnprintf(bf
, sizeof(bf
), "%s", "unknown tracepoint");
424 case PERF_TYPE_BREAKPOINT
:
425 perf_evsel__bp_name(evsel
, bf
, sizeof(bf
));
429 scnprintf(bf
, sizeof(bf
), "unknown attr type: %d",
434 evsel
->name
= strdup(bf
);
436 return evsel
->name
?: "unknown";
439 const char *perf_evsel__group_name(struct perf_evsel
*evsel
)
441 return evsel
->group_name
?: "anon group";
444 int perf_evsel__group_desc(struct perf_evsel
*evsel
, char *buf
, size_t size
)
447 struct perf_evsel
*pos
;
448 const char *group_name
= perf_evsel__group_name(evsel
);
450 ret
= scnprintf(buf
, size
, "%s", group_name
);
452 ret
+= scnprintf(buf
+ ret
, size
- ret
, " { %s",
453 perf_evsel__name(evsel
));
455 for_each_group_member(pos
, evsel
)
456 ret
+= scnprintf(buf
+ ret
, size
- ret
, ", %s",
457 perf_evsel__name(pos
));
459 ret
+= scnprintf(buf
+ ret
, size
- ret
, " }");
465 * The enable_on_exec/disabled value strategy:
467 * 1) For any type of traced program:
468 * - all independent events and group leaders are disabled
469 * - all group members are enabled
471 * Group members are ruled by group leaders. They need to
472 * be enabled, because the group scheduling relies on that.
474 * 2) For traced programs executed by perf:
475 * - all independent events and group leaders have
477 * - we don't specifically enable or disable any event during
480 * Independent events and group leaders are initially disabled
481 * and get enabled by exec. Group members are ruled by group
482 * leaders as stated in 1).
484 * 3) For traced programs attached by perf (pid/tid):
485 * - we specifically enable or disable all events during
488 * When attaching events to already running traced we
489 * enable/disable events specifically, as there's no
490 * initial traced exec call.
492 void perf_evsel__config(struct perf_evsel
*evsel
,
493 struct perf_record_opts
*opts
)
495 struct perf_evsel
*leader
= evsel
->leader
;
496 struct perf_event_attr
*attr
= &evsel
->attr
;
497 int track
= !evsel
->idx
; /* only the first counter needs these */
499 attr
->sample_id_all
= perf_missing_features
.sample_id_all
? 0 : 1;
500 attr
->inherit
= !opts
->no_inherit
;
502 perf_evsel__set_sample_bit(evsel
, IP
);
503 perf_evsel__set_sample_bit(evsel
, TID
);
505 if (evsel
->sample_read
) {
506 perf_evsel__set_sample_bit(evsel
, READ
);
509 * We need ID even in case of single event, because
510 * PERF_SAMPLE_READ process ID specific data.
512 perf_evsel__set_sample_id(evsel
);
515 * Apply group format only if we belong to group
516 * with more than one members.
518 if (leader
->nr_members
> 1) {
519 attr
->read_format
|= PERF_FORMAT_GROUP
;
525 * We default some events to a 1 default interval. But keep
526 * it a weak assumption overridable by the user.
528 if (!attr
->sample_period
|| (opts
->user_freq
!= UINT_MAX
&&
529 opts
->user_interval
!= ULLONG_MAX
)) {
531 perf_evsel__set_sample_bit(evsel
, PERIOD
);
533 attr
->sample_freq
= opts
->freq
;
535 attr
->sample_period
= opts
->default_interval
;
540 * Disable sampling for all group members other
541 * than leader in case leader 'leads' the sampling.
543 if ((leader
!= evsel
) && leader
->sample_read
) {
544 attr
->sample_freq
= 0;
545 attr
->sample_period
= 0;
548 if (opts
->no_samples
)
549 attr
->sample_freq
= 0;
551 if (opts
->inherit_stat
)
552 attr
->inherit_stat
= 1;
554 if (opts
->sample_address
) {
555 perf_evsel__set_sample_bit(evsel
, ADDR
);
556 attr
->mmap_data
= track
;
559 if (opts
->call_graph
) {
560 perf_evsel__set_sample_bit(evsel
, CALLCHAIN
);
562 if (opts
->call_graph
== CALLCHAIN_DWARF
) {
563 perf_evsel__set_sample_bit(evsel
, REGS_USER
);
564 perf_evsel__set_sample_bit(evsel
, STACK_USER
);
565 attr
->sample_regs_user
= PERF_REGS_MASK
;
566 attr
->sample_stack_user
= opts
->stack_dump_size
;
567 attr
->exclude_callchain_user
= 1;
571 if (perf_target__has_cpu(&opts
->target
))
572 perf_evsel__set_sample_bit(evsel
, CPU
);
575 perf_evsel__set_sample_bit(evsel
, PERIOD
);
577 if (!perf_missing_features
.sample_id_all
&&
578 (opts
->sample_time
|| !opts
->no_inherit
||
579 perf_target__has_cpu(&opts
->target
)))
580 perf_evsel__set_sample_bit(evsel
, TIME
);
582 if (opts
->raw_samples
) {
583 perf_evsel__set_sample_bit(evsel
, TIME
);
584 perf_evsel__set_sample_bit(evsel
, RAW
);
585 perf_evsel__set_sample_bit(evsel
, CPU
);
588 if (opts
->sample_address
)
589 attr
->sample_type
|= PERF_SAMPLE_DATA_SRC
;
591 if (opts
->no_delay
) {
593 attr
->wakeup_events
= 1;
595 if (opts
->branch_stack
) {
596 perf_evsel__set_sample_bit(evsel
, BRANCH_STACK
);
597 attr
->branch_sample_type
= opts
->branch_stack
;
600 if (opts
->sample_weight
)
601 attr
->sample_type
|= PERF_SAMPLE_WEIGHT
;
607 * XXX see the function comment above
609 * Disabling only independent events or group leaders,
610 * keeping group members enabled.
612 if (perf_evsel__is_group_leader(evsel
))
616 * Setting enable_on_exec for independent events and
617 * group leaders for traced executed by perf.
619 if (perf_target__none(&opts
->target
) && perf_evsel__is_group_leader(evsel
))
620 attr
->enable_on_exec
= 1;
623 int perf_evsel__alloc_fd(struct perf_evsel
*evsel
, int ncpus
, int nthreads
)
626 evsel
->fd
= xyarray__new(ncpus
, nthreads
, sizeof(int));
629 for (cpu
= 0; cpu
< ncpus
; cpu
++) {
630 for (thread
= 0; thread
< nthreads
; thread
++) {
631 FD(evsel
, cpu
, thread
) = -1;
636 return evsel
->fd
!= NULL
? 0 : -ENOMEM
;
639 static int perf_evsel__run_ioctl(struct perf_evsel
*evsel
, int ncpus
, int nthreads
,
644 for (cpu
= 0; cpu
< ncpus
; cpu
++) {
645 for (thread
= 0; thread
< nthreads
; thread
++) {
646 int fd
= FD(evsel
, cpu
, thread
),
647 err
= ioctl(fd
, ioc
, arg
);
657 int perf_evsel__set_filter(struct perf_evsel
*evsel
, int ncpus
, int nthreads
,
660 return perf_evsel__run_ioctl(evsel
, ncpus
, nthreads
,
661 PERF_EVENT_IOC_SET_FILTER
,
665 int perf_evsel__enable(struct perf_evsel
*evsel
, int ncpus
, int nthreads
)
667 return perf_evsel__run_ioctl(evsel
, ncpus
, nthreads
,
668 PERF_EVENT_IOC_ENABLE
,
672 int perf_evsel__alloc_id(struct perf_evsel
*evsel
, int ncpus
, int nthreads
)
674 evsel
->sample_id
= xyarray__new(ncpus
, nthreads
, sizeof(struct perf_sample_id
));
675 if (evsel
->sample_id
== NULL
)
678 evsel
->id
= zalloc(ncpus
* nthreads
* sizeof(u64
));
679 if (evsel
->id
== NULL
) {
680 xyarray__delete(evsel
->sample_id
);
681 evsel
->sample_id
= NULL
;
688 void perf_evsel__reset_counts(struct perf_evsel
*evsel
, int ncpus
)
690 memset(evsel
->counts
, 0, (sizeof(*evsel
->counts
) +
691 (ncpus
* sizeof(struct perf_counts_values
))));
694 int perf_evsel__alloc_counts(struct perf_evsel
*evsel
, int ncpus
)
696 evsel
->counts
= zalloc((sizeof(*evsel
->counts
) +
697 (ncpus
* sizeof(struct perf_counts_values
))));
698 return evsel
->counts
!= NULL
? 0 : -ENOMEM
;
701 void perf_evsel__free_fd(struct perf_evsel
*evsel
)
703 xyarray__delete(evsel
->fd
);
707 void perf_evsel__free_id(struct perf_evsel
*evsel
)
709 xyarray__delete(evsel
->sample_id
);
710 evsel
->sample_id
= NULL
;
715 void perf_evsel__close_fd(struct perf_evsel
*evsel
, int ncpus
, int nthreads
)
719 for (cpu
= 0; cpu
< ncpus
; cpu
++)
720 for (thread
= 0; thread
< nthreads
; ++thread
) {
721 close(FD(evsel
, cpu
, thread
));
722 FD(evsel
, cpu
, thread
) = -1;
726 void perf_evsel__free_counts(struct perf_evsel
*evsel
)
731 void perf_evsel__exit(struct perf_evsel
*evsel
)
733 assert(list_empty(&evsel
->node
));
734 perf_evsel__free_fd(evsel
);
735 perf_evsel__free_id(evsel
);
738 void perf_evsel__delete(struct perf_evsel
*evsel
)
740 perf_evsel__exit(evsel
);
741 close_cgroup(evsel
->cgrp
);
742 free(evsel
->group_name
);
743 if (evsel
->tp_format
)
744 pevent_free_format(evsel
->tp_format
);
749 static inline void compute_deltas(struct perf_evsel
*evsel
,
751 struct perf_counts_values
*count
)
753 struct perf_counts_values tmp
;
755 if (!evsel
->prev_raw_counts
)
759 tmp
= evsel
->prev_raw_counts
->aggr
;
760 evsel
->prev_raw_counts
->aggr
= *count
;
762 tmp
= evsel
->prev_raw_counts
->cpu
[cpu
];
763 evsel
->prev_raw_counts
->cpu
[cpu
] = *count
;
766 count
->val
= count
->val
- tmp
.val
;
767 count
->ena
= count
->ena
- tmp
.ena
;
768 count
->run
= count
->run
- tmp
.run
;
771 int __perf_evsel__read_on_cpu(struct perf_evsel
*evsel
,
772 int cpu
, int thread
, bool scale
)
774 struct perf_counts_values count
;
775 size_t nv
= scale
? 3 : 1;
777 if (FD(evsel
, cpu
, thread
) < 0)
780 if (evsel
->counts
== NULL
&& perf_evsel__alloc_counts(evsel
, cpu
+ 1) < 0)
783 if (readn(FD(evsel
, cpu
, thread
), &count
, nv
* sizeof(u64
)) < 0)
786 compute_deltas(evsel
, cpu
, &count
);
791 else if (count
.run
< count
.ena
)
792 count
.val
= (u64
)((double)count
.val
* count
.ena
/ count
.run
+ 0.5);
794 count
.ena
= count
.run
= 0;
796 evsel
->counts
->cpu
[cpu
] = count
;
800 int __perf_evsel__read(struct perf_evsel
*evsel
,
801 int ncpus
, int nthreads
, bool scale
)
803 size_t nv
= scale
? 3 : 1;
805 struct perf_counts_values
*aggr
= &evsel
->counts
->aggr
, count
;
807 aggr
->val
= aggr
->ena
= aggr
->run
= 0;
809 for (cpu
= 0; cpu
< ncpus
; cpu
++) {
810 for (thread
= 0; thread
< nthreads
; thread
++) {
811 if (FD(evsel
, cpu
, thread
) < 0)
814 if (readn(FD(evsel
, cpu
, thread
),
815 &count
, nv
* sizeof(u64
)) < 0)
818 aggr
->val
+= count
.val
;
820 aggr
->ena
+= count
.ena
;
821 aggr
->run
+= count
.run
;
826 compute_deltas(evsel
, -1, aggr
);
828 evsel
->counts
->scaled
= 0;
830 if (aggr
->run
== 0) {
831 evsel
->counts
->scaled
= -1;
836 if (aggr
->run
< aggr
->ena
) {
837 evsel
->counts
->scaled
= 1;
838 aggr
->val
= (u64
)((double)aggr
->val
* aggr
->ena
/ aggr
->run
+ 0.5);
841 aggr
->ena
= aggr
->run
= 0;
846 static int get_group_fd(struct perf_evsel
*evsel
, int cpu
, int thread
)
848 struct perf_evsel
*leader
= evsel
->leader
;
851 if (perf_evsel__is_group_leader(evsel
))
855 * Leader must be already processed/open,
860 fd
= FD(leader
, cpu
, thread
);
866 #define __PRINT_ATTR(fmt, cast, field) \
867 fprintf(fp, " %-19s "fmt"\n", #field, cast attr->field)
869 #define PRINT_ATTR_U32(field) __PRINT_ATTR("%u" , , field)
870 #define PRINT_ATTR_X32(field) __PRINT_ATTR("%#x", , field)
871 #define PRINT_ATTR_U64(field) __PRINT_ATTR("%" PRIu64, (uint64_t), field)
872 #define PRINT_ATTR_X64(field) __PRINT_ATTR("%#"PRIx64, (uint64_t), field)
874 #define PRINT_ATTR2N(name1, field1, name2, field2) \
875 fprintf(fp, " %-19s %u %-19s %u\n", \
876 name1, attr->field1, name2, attr->field2)
878 #define PRINT_ATTR2(field1, field2) \
879 PRINT_ATTR2N(#field1, field1, #field2, field2)
881 static size_t perf_event_attr__fprintf(struct perf_event_attr
*attr
, FILE *fp
)
885 ret
+= fprintf(fp
, "%.60s\n", graph_dotted_line
);
886 ret
+= fprintf(fp
, "perf_event_attr:\n");
888 ret
+= PRINT_ATTR_U32(type
);
889 ret
+= PRINT_ATTR_U32(size
);
890 ret
+= PRINT_ATTR_X64(config
);
891 ret
+= PRINT_ATTR_U64(sample_period
);
892 ret
+= PRINT_ATTR_U64(sample_freq
);
893 ret
+= PRINT_ATTR_X64(sample_type
);
894 ret
+= PRINT_ATTR_X64(read_format
);
896 ret
+= PRINT_ATTR2(disabled
, inherit
);
897 ret
+= PRINT_ATTR2(pinned
, exclusive
);
898 ret
+= PRINT_ATTR2(exclude_user
, exclude_kernel
);
899 ret
+= PRINT_ATTR2(exclude_hv
, exclude_idle
);
900 ret
+= PRINT_ATTR2(mmap
, comm
);
901 ret
+= PRINT_ATTR2(freq
, inherit_stat
);
902 ret
+= PRINT_ATTR2(enable_on_exec
, task
);
903 ret
+= PRINT_ATTR2(watermark
, precise_ip
);
904 ret
+= PRINT_ATTR2(mmap_data
, sample_id_all
);
905 ret
+= PRINT_ATTR2(exclude_host
, exclude_guest
);
906 ret
+= PRINT_ATTR2N("excl.callchain_kern", exclude_callchain_kernel
,
907 "excl.callchain_user", exclude_callchain_user
);
909 ret
+= PRINT_ATTR_U32(wakeup_events
);
910 ret
+= PRINT_ATTR_U32(wakeup_watermark
);
911 ret
+= PRINT_ATTR_X32(bp_type
);
912 ret
+= PRINT_ATTR_X64(bp_addr
);
913 ret
+= PRINT_ATTR_X64(config1
);
914 ret
+= PRINT_ATTR_U64(bp_len
);
915 ret
+= PRINT_ATTR_X64(config2
);
916 ret
+= PRINT_ATTR_X64(branch_sample_type
);
917 ret
+= PRINT_ATTR_X64(sample_regs_user
);
918 ret
+= PRINT_ATTR_U32(sample_stack_user
);
920 ret
+= fprintf(fp
, "%.60s\n", graph_dotted_line
);
925 static int __perf_evsel__open(struct perf_evsel
*evsel
, struct cpu_map
*cpus
,
926 struct thread_map
*threads
)
929 unsigned long flags
= 0;
931 enum { NO_CHANGE
, SET_TO_MAX
, INCREASED_MAX
} set_rlimit
= NO_CHANGE
;
933 if (evsel
->fd
== NULL
&&
934 perf_evsel__alloc_fd(evsel
, cpus
->nr
, threads
->nr
) < 0)
938 flags
= PERF_FLAG_PID_CGROUP
;
939 pid
= evsel
->cgrp
->fd
;
942 fallback_missing_features
:
943 if (perf_missing_features
.exclude_guest
)
944 evsel
->attr
.exclude_guest
= evsel
->attr
.exclude_host
= 0;
946 if (perf_missing_features
.sample_id_all
)
947 evsel
->attr
.sample_id_all
= 0;
950 perf_event_attr__fprintf(&evsel
->attr
, stderr
);
952 for (cpu
= 0; cpu
< cpus
->nr
; cpu
++) {
954 for (thread
= 0; thread
< threads
->nr
; thread
++) {
958 pid
= threads
->map
[thread
];
960 group_fd
= get_group_fd(evsel
, cpu
, thread
);
962 pr_debug2("perf_event_open: pid %d cpu %d group_fd %d flags %#lx\n",
963 pid
, cpus
->map
[cpu
], group_fd
, flags
);
965 FD(evsel
, cpu
, thread
) = sys_perf_event_open(&evsel
->attr
,
969 if (FD(evsel
, cpu
, thread
) < 0) {
973 set_rlimit
= NO_CHANGE
;
981 * perf stat needs between 5 and 22 fds per CPU. When we run out
982 * of them try to increase the limits.
984 if (err
== -EMFILE
&& set_rlimit
< INCREASED_MAX
) {
986 int old_errno
= errno
;
988 if (getrlimit(RLIMIT_NOFILE
, &l
) == 0) {
989 if (set_rlimit
== NO_CHANGE
)
990 l
.rlim_cur
= l
.rlim_max
;
992 l
.rlim_cur
= l
.rlim_max
+ 1000;
993 l
.rlim_max
= l
.rlim_cur
;
995 if (setrlimit(RLIMIT_NOFILE
, &l
) == 0) {
1004 if (err
!= -EINVAL
|| cpu
> 0 || thread
> 0)
1007 if (!perf_missing_features
.exclude_guest
&&
1008 (evsel
->attr
.exclude_guest
|| evsel
->attr
.exclude_host
)) {
1009 perf_missing_features
.exclude_guest
= true;
1010 goto fallback_missing_features
;
1011 } else if (!perf_missing_features
.sample_id_all
) {
1012 perf_missing_features
.sample_id_all
= true;
1013 goto retry_sample_id
;
1018 while (--thread
>= 0) {
1019 close(FD(evsel
, cpu
, thread
));
1020 FD(evsel
, cpu
, thread
) = -1;
1022 thread
= threads
->nr
;
1023 } while (--cpu
>= 0);
1027 void perf_evsel__close(struct perf_evsel
*evsel
, int ncpus
, int nthreads
)
1029 if (evsel
->fd
== NULL
)
1032 perf_evsel__close_fd(evsel
, ncpus
, nthreads
);
1033 perf_evsel__free_fd(evsel
);
1046 struct thread_map map
;
1048 } empty_thread_map
= {
1053 int perf_evsel__open(struct perf_evsel
*evsel
, struct cpu_map
*cpus
,
1054 struct thread_map
*threads
)
1057 /* Work around old compiler warnings about strict aliasing */
1058 cpus
= &empty_cpu_map
.map
;
1061 if (threads
== NULL
)
1062 threads
= &empty_thread_map
.map
;
1064 return __perf_evsel__open(evsel
, cpus
, threads
);
1067 int perf_evsel__open_per_cpu(struct perf_evsel
*evsel
,
1068 struct cpu_map
*cpus
)
1070 return __perf_evsel__open(evsel
, cpus
, &empty_thread_map
.map
);
1073 int perf_evsel__open_per_thread(struct perf_evsel
*evsel
,
1074 struct thread_map
*threads
)
1076 return __perf_evsel__open(evsel
, &empty_cpu_map
.map
, threads
);
1079 static int perf_evsel__parse_id_sample(const struct perf_evsel
*evsel
,
1080 const union perf_event
*event
,
1081 struct perf_sample
*sample
)
1083 u64 type
= evsel
->attr
.sample_type
;
1084 const u64
*array
= event
->sample
.array
;
1085 bool swapped
= evsel
->needs_swap
;
1088 array
+= ((event
->header
.size
-
1089 sizeof(event
->header
)) / sizeof(u64
)) - 1;
1091 if (type
& PERF_SAMPLE_CPU
) {
1094 /* undo swap of u64, then swap on individual u32s */
1095 u
.val64
= bswap_64(u
.val64
);
1096 u
.val32
[0] = bswap_32(u
.val32
[0]);
1099 sample
->cpu
= u
.val32
[0];
1103 if (type
& PERF_SAMPLE_STREAM_ID
) {
1104 sample
->stream_id
= *array
;
1108 if (type
& PERF_SAMPLE_ID
) {
1109 sample
->id
= *array
;
1113 if (type
& PERF_SAMPLE_TIME
) {
1114 sample
->time
= *array
;
1118 if (type
& PERF_SAMPLE_TID
) {
1121 /* undo swap of u64, then swap on individual u32s */
1122 u
.val64
= bswap_64(u
.val64
);
1123 u
.val32
[0] = bswap_32(u
.val32
[0]);
1124 u
.val32
[1] = bswap_32(u
.val32
[1]);
1127 sample
->pid
= u
.val32
[0];
1128 sample
->tid
= u
.val32
[1];
1134 static inline bool overflow(const void *endp
, u16 max_size
, const void *offset
,
1137 return size
> max_size
|| offset
+ size
> endp
;
1140 #define OVERFLOW_CHECK(offset, size, max_size) \
1142 if (overflow(endp, (max_size), (offset), (size))) \
1146 #define OVERFLOW_CHECK_u64(offset) \
1147 OVERFLOW_CHECK(offset, sizeof(u64), sizeof(u64))
1149 int perf_evsel__parse_sample(struct perf_evsel
*evsel
, union perf_event
*event
,
1150 struct perf_sample
*data
)
1152 u64 type
= evsel
->attr
.sample_type
;
1153 bool swapped
= evsel
->needs_swap
;
1155 u16 max_size
= event
->header
.size
;
1156 const void *endp
= (void *)event
+ max_size
;
1160 * used for cross-endian analysis. See git commit 65014ab3
1161 * for why this goofiness is needed.
1165 memset(data
, 0, sizeof(*data
));
1166 data
->cpu
= data
->pid
= data
->tid
= -1;
1167 data
->stream_id
= data
->id
= data
->time
= -1ULL;
1171 if (event
->header
.type
!= PERF_RECORD_SAMPLE
) {
1172 if (!evsel
->attr
.sample_id_all
)
1174 return perf_evsel__parse_id_sample(evsel
, event
, data
);
1177 array
= event
->sample
.array
;
1180 * The evsel's sample_size is based on PERF_SAMPLE_MASK which includes
1181 * up to PERF_SAMPLE_PERIOD. After that overflow() must be used to
1182 * check the format does not go past the end of the event.
1184 if (evsel
->sample_size
+ sizeof(event
->header
) > event
->header
.size
)
1187 if (type
& PERF_SAMPLE_IP
) {
1192 if (type
& PERF_SAMPLE_TID
) {
1195 /* undo swap of u64, then swap on individual u32s */
1196 u
.val64
= bswap_64(u
.val64
);
1197 u
.val32
[0] = bswap_32(u
.val32
[0]);
1198 u
.val32
[1] = bswap_32(u
.val32
[1]);
1201 data
->pid
= u
.val32
[0];
1202 data
->tid
= u
.val32
[1];
1206 if (type
& PERF_SAMPLE_TIME
) {
1207 data
->time
= *array
;
1212 if (type
& PERF_SAMPLE_ADDR
) {
1213 data
->addr
= *array
;
1218 if (type
& PERF_SAMPLE_ID
) {
1223 if (type
& PERF_SAMPLE_STREAM_ID
) {
1224 data
->stream_id
= *array
;
1228 if (type
& PERF_SAMPLE_CPU
) {
1232 /* undo swap of u64, then swap on individual u32s */
1233 u
.val64
= bswap_64(u
.val64
);
1234 u
.val32
[0] = bswap_32(u
.val32
[0]);
1237 data
->cpu
= u
.val32
[0];
1241 if (type
& PERF_SAMPLE_PERIOD
) {
1242 data
->period
= *array
;
1246 if (type
& PERF_SAMPLE_READ
) {
1247 u64 read_format
= evsel
->attr
.read_format
;
1249 OVERFLOW_CHECK_u64(array
);
1250 if (read_format
& PERF_FORMAT_GROUP
)
1251 data
->read
.group
.nr
= *array
;
1253 data
->read
.one
.value
= *array
;
1257 if (read_format
& PERF_FORMAT_TOTAL_TIME_ENABLED
) {
1258 OVERFLOW_CHECK_u64(array
);
1259 data
->read
.time_enabled
= *array
;
1263 if (read_format
& PERF_FORMAT_TOTAL_TIME_RUNNING
) {
1264 OVERFLOW_CHECK_u64(array
);
1265 data
->read
.time_running
= *array
;
1269 /* PERF_FORMAT_ID is forced for PERF_SAMPLE_READ */
1270 if (read_format
& PERF_FORMAT_GROUP
) {
1271 const u64 max_group_nr
= UINT64_MAX
/
1272 sizeof(struct sample_read_value
);
1274 if (data
->read
.group
.nr
> max_group_nr
)
1276 sz
= data
->read
.group
.nr
*
1277 sizeof(struct sample_read_value
);
1278 OVERFLOW_CHECK(array
, sz
, max_size
);
1279 data
->read
.group
.values
=
1280 (struct sample_read_value
*)array
;
1281 array
= (void *)array
+ sz
;
1283 OVERFLOW_CHECK_u64(array
);
1284 data
->read
.one
.id
= *array
;
1289 if (type
& PERF_SAMPLE_CALLCHAIN
) {
1290 const u64 max_callchain_nr
= UINT64_MAX
/ sizeof(u64
);
1292 OVERFLOW_CHECK_u64(array
);
1293 data
->callchain
= (struct ip_callchain
*)array
++;
1294 if (data
->callchain
->nr
> max_callchain_nr
)
1296 sz
= data
->callchain
->nr
* sizeof(u64
);
1297 OVERFLOW_CHECK(array
, sz
, max_size
);
1298 array
= (void *)array
+ sz
;
1301 if (type
& PERF_SAMPLE_RAW
) {
1302 OVERFLOW_CHECK_u64(array
);
1304 if (WARN_ONCE(swapped
,
1305 "Endianness of raw data not corrected!\n")) {
1306 /* undo swap of u64, then swap on individual u32s */
1307 u
.val64
= bswap_64(u
.val64
);
1308 u
.val32
[0] = bswap_32(u
.val32
[0]);
1309 u
.val32
[1] = bswap_32(u
.val32
[1]);
1311 data
->raw_size
= u
.val32
[0];
1312 array
= (void *)array
+ sizeof(u32
);
1314 OVERFLOW_CHECK(array
, data
->raw_size
, max_size
);
1315 data
->raw_data
= (void *)array
;
1316 array
= (void *)array
+ data
->raw_size
;
1319 if (type
& PERF_SAMPLE_BRANCH_STACK
) {
1320 const u64 max_branch_nr
= UINT64_MAX
/
1321 sizeof(struct branch_entry
);
1323 OVERFLOW_CHECK_u64(array
);
1324 data
->branch_stack
= (struct branch_stack
*)array
++;
1326 if (data
->branch_stack
->nr
> max_branch_nr
)
1328 sz
= data
->branch_stack
->nr
* sizeof(struct branch_entry
);
1329 OVERFLOW_CHECK(array
, sz
, max_size
);
1330 array
= (void *)array
+ sz
;
1333 if (type
& PERF_SAMPLE_REGS_USER
) {
1336 /* First u64 tells us if we have any regs in sample. */
1337 OVERFLOW_CHECK_u64(array
);
1341 u64 regs_user
= evsel
->attr
.sample_regs_user
;
1343 sz
= hweight_long(regs_user
) * sizeof(u64
);
1344 OVERFLOW_CHECK(array
, sz
, max_size
);
1345 data
->user_regs
.regs
= (u64
*)array
;
1346 array
= (void *)array
+ sz
;
1350 if (type
& PERF_SAMPLE_STACK_USER
) {
1351 OVERFLOW_CHECK_u64(array
);
1354 data
->user_stack
.offset
= ((char *)(array
- 1)
1358 data
->user_stack
.size
= 0;
1360 OVERFLOW_CHECK(array
, sz
, max_size
);
1361 data
->user_stack
.data
= (char *)array
;
1362 array
= (void *)array
+ sz
;
1363 OVERFLOW_CHECK_u64(array
);
1364 data
->user_stack
.size
= *array
++;
1369 if (type
& PERF_SAMPLE_WEIGHT
) {
1370 OVERFLOW_CHECK_u64(array
);
1371 data
->weight
= *array
;
1375 data
->data_src
= PERF_MEM_DATA_SRC_NONE
;
1376 if (type
& PERF_SAMPLE_DATA_SRC
) {
1377 OVERFLOW_CHECK_u64(array
);
1378 data
->data_src
= *array
;
1385 int perf_event__synthesize_sample(union perf_event
*event
, u64 type
,
1386 const struct perf_sample
*sample
,
1392 * used for cross-endian analysis. See git commit 65014ab3
1393 * for why this goofiness is needed.
1397 array
= event
->sample
.array
;
1399 if (type
& PERF_SAMPLE_IP
) {
1400 *array
= sample
->ip
;
1404 if (type
& PERF_SAMPLE_TID
) {
1405 u
.val32
[0] = sample
->pid
;
1406 u
.val32
[1] = sample
->tid
;
1409 * Inverse of what is done in perf_evsel__parse_sample
1411 u
.val32
[0] = bswap_32(u
.val32
[0]);
1412 u
.val32
[1] = bswap_32(u
.val32
[1]);
1413 u
.val64
= bswap_64(u
.val64
);
1420 if (type
& PERF_SAMPLE_TIME
) {
1421 *array
= sample
->time
;
1425 if (type
& PERF_SAMPLE_ADDR
) {
1426 *array
= sample
->addr
;
1430 if (type
& PERF_SAMPLE_ID
) {
1431 *array
= sample
->id
;
1435 if (type
& PERF_SAMPLE_STREAM_ID
) {
1436 *array
= sample
->stream_id
;
1440 if (type
& PERF_SAMPLE_CPU
) {
1441 u
.val32
[0] = sample
->cpu
;
1444 * Inverse of what is done in perf_evsel__parse_sample
1446 u
.val32
[0] = bswap_32(u
.val32
[0]);
1447 u
.val64
= bswap_64(u
.val64
);
1453 if (type
& PERF_SAMPLE_PERIOD
) {
1454 *array
= sample
->period
;
1461 struct format_field
*perf_evsel__field(struct perf_evsel
*evsel
, const char *name
)
1463 return pevent_find_field(evsel
->tp_format
, name
);
1466 void *perf_evsel__rawptr(struct perf_evsel
*evsel
, struct perf_sample
*sample
,
1469 struct format_field
*field
= perf_evsel__field(evsel
, name
);
1475 offset
= field
->offset
;
1477 if (field
->flags
& FIELD_IS_DYNAMIC
) {
1478 offset
= *(int *)(sample
->raw_data
+ field
->offset
);
1482 return sample
->raw_data
+ offset
;
1485 u64
perf_evsel__intval(struct perf_evsel
*evsel
, struct perf_sample
*sample
,
1488 struct format_field
*field
= perf_evsel__field(evsel
, name
);
1495 ptr
= sample
->raw_data
+ field
->offset
;
1497 switch (field
->size
) {
1501 value
= *(u16
*)ptr
;
1504 value
= *(u32
*)ptr
;
1507 value
= *(u64
*)ptr
;
1513 if (!evsel
->needs_swap
)
1516 switch (field
->size
) {
1518 return bswap_16(value
);
1520 return bswap_32(value
);
1522 return bswap_64(value
);
1530 static int comma_fprintf(FILE *fp
, bool *first
, const char *fmt
, ...)
1536 ret
+= fprintf(fp
, ",");
1538 ret
+= fprintf(fp
, ":");
1542 va_start(args
, fmt
);
1543 ret
+= vfprintf(fp
, fmt
, args
);
1548 static int __if_fprintf(FILE *fp
, bool *first
, const char *field
, u64 value
)
1553 return comma_fprintf(fp
, first
, " %s: %" PRIu64
, field
, value
);
1556 #define if_print(field) printed += __if_fprintf(fp, &first, #field, evsel->attr.field)
1563 static int bits__fprintf(FILE *fp
, const char *field
, u64 value
,
1564 struct bit_names
*bits
, bool *first
)
1566 int i
= 0, printed
= comma_fprintf(fp
, first
, " %s: ", field
);
1567 bool first_bit
= true;
1570 if (value
& bits
[i
].bit
) {
1571 printed
+= fprintf(fp
, "%s%s", first_bit
? "" : "|", bits
[i
].name
);
1574 } while (bits
[++i
].name
!= NULL
);
1579 static int sample_type__fprintf(FILE *fp
, bool *first
, u64 value
)
1581 #define bit_name(n) { PERF_SAMPLE_##n, #n }
1582 struct bit_names bits
[] = {
1583 bit_name(IP
), bit_name(TID
), bit_name(TIME
), bit_name(ADDR
),
1584 bit_name(READ
), bit_name(CALLCHAIN
), bit_name(ID
), bit_name(CPU
),
1585 bit_name(PERIOD
), bit_name(STREAM_ID
), bit_name(RAW
),
1586 bit_name(BRANCH_STACK
), bit_name(REGS_USER
), bit_name(STACK_USER
),
1590 return bits__fprintf(fp
, "sample_type", value
, bits
, first
);
1593 static int read_format__fprintf(FILE *fp
, bool *first
, u64 value
)
1595 #define bit_name(n) { PERF_FORMAT_##n, #n }
1596 struct bit_names bits
[] = {
1597 bit_name(TOTAL_TIME_ENABLED
), bit_name(TOTAL_TIME_RUNNING
),
1598 bit_name(ID
), bit_name(GROUP
),
1602 return bits__fprintf(fp
, "read_format", value
, bits
, first
);
1605 int perf_evsel__fprintf(struct perf_evsel
*evsel
,
1606 struct perf_attr_details
*details
, FILE *fp
)
1611 if (details
->event_group
) {
1612 struct perf_evsel
*pos
;
1614 if (!perf_evsel__is_group_leader(evsel
))
1617 if (evsel
->nr_members
> 1)
1618 printed
+= fprintf(fp
, "%s{", evsel
->group_name
?: "");
1620 printed
+= fprintf(fp
, "%s", perf_evsel__name(evsel
));
1621 for_each_group_member(pos
, evsel
)
1622 printed
+= fprintf(fp
, ",%s", perf_evsel__name(pos
));
1624 if (evsel
->nr_members
> 1)
1625 printed
+= fprintf(fp
, "}");
1629 printed
+= fprintf(fp
, "%s", perf_evsel__name(evsel
));
1631 if (details
->verbose
|| details
->freq
) {
1632 printed
+= comma_fprintf(fp
, &first
, " sample_freq=%" PRIu64
,
1633 (u64
)evsel
->attr
.sample_freq
);
1636 if (details
->verbose
) {
1642 printed
+= sample_type__fprintf(fp
, &first
, evsel
->attr
.sample_type
);
1643 if (evsel
->attr
.read_format
)
1644 printed
+= read_format__fprintf(fp
, &first
, evsel
->attr
.read_format
);
1648 if_print(exclusive
);
1649 if_print(exclude_user
);
1650 if_print(exclude_kernel
);
1651 if_print(exclude_hv
);
1652 if_print(exclude_idle
);
1656 if_print(inherit_stat
);
1657 if_print(enable_on_exec
);
1659 if_print(watermark
);
1660 if_print(precise_ip
);
1661 if_print(mmap_data
);
1662 if_print(sample_id_all
);
1663 if_print(exclude_host
);
1664 if_print(exclude_guest
);
1665 if_print(__reserved_1
);
1666 if_print(wakeup_events
);
1668 if_print(branch_sample_type
);
1675 bool perf_evsel__fallback(struct perf_evsel
*evsel
, int err
,
1676 char *msg
, size_t msgsize
)
1678 if ((err
== ENOENT
|| err
== ENXIO
|| err
== ENODEV
) &&
1679 evsel
->attr
.type
== PERF_TYPE_HARDWARE
&&
1680 evsel
->attr
.config
== PERF_COUNT_HW_CPU_CYCLES
) {
1682 * If it's cycles then fall back to hrtimer based
1683 * cpu-clock-tick sw counter, which is always available even if
1686 * PPC returns ENXIO until 2.6.37 (behavior changed with commit
1689 scnprintf(msg
, msgsize
, "%s",
1690 "The cycles event is not supported, trying to fall back to cpu-clock-ticks");
1692 evsel
->attr
.type
= PERF_TYPE_SOFTWARE
;
1693 evsel
->attr
.config
= PERF_COUNT_SW_CPU_CLOCK
;
1703 int perf_evsel__open_strerror(struct perf_evsel
*evsel
,
1704 struct perf_target
*target
,
1705 int err
, char *msg
, size_t size
)
1710 return scnprintf(msg
, size
,
1711 "You may not have permission to collect %sstats.\n"
1712 "Consider tweaking /proc/sys/kernel/perf_event_paranoid:\n"
1713 " -1 - Not paranoid at all\n"
1714 " 0 - Disallow raw tracepoint access for unpriv\n"
1715 " 1 - Disallow cpu events for unpriv\n"
1716 " 2 - Disallow kernel profiling for unpriv",
1717 target
->system_wide
? "system-wide " : "");
1719 return scnprintf(msg
, size
, "The %s event is not supported.",
1720 perf_evsel__name(evsel
));
1722 return scnprintf(msg
, size
, "%s",
1723 "Too many events are opened.\n"
1724 "Try again after reducing the number of events.");
1726 if (target
->cpu_list
)
1727 return scnprintf(msg
, size
, "%s",
1728 "No such device - did you specify an out-of-range profile CPU?\n");
1731 if (evsel
->attr
.precise_ip
)
1732 return scnprintf(msg
, size
, "%s",
1733 "\'precise\' request may not be supported. Try removing 'p' modifier.");
1734 #if defined(__i386__) || defined(__x86_64__)
1735 if (evsel
->attr
.type
== PERF_TYPE_HARDWARE
)
1736 return scnprintf(msg
, size
, "%s",
1737 "No hardware sampling interrupt available.\n"
1738 "No APIC? If so then you can boot the kernel with the \"lapic\" boot parameter to force-enable it.");
1745 return scnprintf(msg
, size
,
1746 "The sys_perf_event_open() syscall returned with %d (%s) for event (%s). \n"
1747 "/bin/dmesg may provide additional information.\n"
1748 "No CONFIG_PERF_EVENTS=y kernel support configured?\n",
1749 err
, strerror(err
), perf_evsel__name(evsel
));