1 // SPDX-License-Identifier: GPL-2.0
2 #include <linux/bitops.h>
3 #include <linux/types.h>
4 #include <linux/slab.h>
6 #include <asm/cpu_entry_area.h>
7 #include <asm/perf_event.h>
8 #include <asm/tlbflush.h>
11 #include "../perf_event.h"
13 /* Waste a full page so it can be mapped into the cpu_entry_area */
14 DEFINE_PER_CPU_PAGE_ALIGNED(struct debug_store
, cpu_debug_store
);
16 /* The size of a BTS record in bytes: */
17 #define BTS_RECORD_SIZE 24
19 #define PEBS_FIXUP_SIZE PAGE_SIZE
22 * pebs_record_32 for p4 and core not supported
24 struct pebs_record_32 {
32 union intel_x86_pebs_dse
{
35 unsigned int ld_dse
:4;
36 unsigned int ld_stlb_miss
:1;
37 unsigned int ld_locked
:1;
38 unsigned int ld_reserved
:26;
41 unsigned int st_l1d_hit
:1;
42 unsigned int st_reserved1
:3;
43 unsigned int st_stlb_miss
:1;
44 unsigned int st_locked
:1;
45 unsigned int st_reserved2
:26;
51 * Map PEBS Load Latency Data Source encodings to generic
52 * memory data source information
54 #define P(a, b) PERF_MEM_S(a, b)
55 #define OP_LH (P(OP, LOAD) | P(LVL, HIT))
56 #define LEVEL(x) P(LVLNUM, x)
57 #define REM P(REMOTE, REMOTE)
58 #define SNOOP_NONE_MISS (P(SNOOP, NONE) | P(SNOOP, MISS))
60 /* Version for Sandy Bridge and later */
61 static u64 pebs_data_source
[] = {
62 P(OP
, LOAD
) | P(LVL
, MISS
) | LEVEL(L3
) | P(SNOOP
, NA
),/* 0x00:ukn L3 */
63 OP_LH
| P(LVL
, L1
) | LEVEL(L1
) | P(SNOOP
, NONE
), /* 0x01: L1 local */
64 OP_LH
| P(LVL
, LFB
) | LEVEL(LFB
) | P(SNOOP
, NONE
), /* 0x02: LFB hit */
65 OP_LH
| P(LVL
, L2
) | LEVEL(L2
) | P(SNOOP
, NONE
), /* 0x03: L2 hit */
66 OP_LH
| P(LVL
, L3
) | LEVEL(L3
) | P(SNOOP
, NONE
), /* 0x04: L3 hit */
67 OP_LH
| P(LVL
, L3
) | LEVEL(L3
) | P(SNOOP
, MISS
), /* 0x05: L3 hit, snoop miss */
68 OP_LH
| P(LVL
, L3
) | LEVEL(L3
) | P(SNOOP
, HIT
), /* 0x06: L3 hit, snoop hit */
69 OP_LH
| P(LVL
, L3
) | LEVEL(L3
) | P(SNOOP
, HITM
), /* 0x07: L3 hit, snoop hitm */
70 OP_LH
| P(LVL
, REM_CCE1
) | REM
| LEVEL(L3
) | P(SNOOP
, HIT
), /* 0x08: L3 miss snoop hit */
71 OP_LH
| P(LVL
, REM_CCE1
) | REM
| LEVEL(L3
) | P(SNOOP
, HITM
), /* 0x09: L3 miss snoop hitm*/
72 OP_LH
| P(LVL
, LOC_RAM
) | LEVEL(RAM
) | P(SNOOP
, HIT
), /* 0x0a: L3 miss, shared */
73 OP_LH
| P(LVL
, REM_RAM1
) | REM
| LEVEL(L3
) | P(SNOOP
, HIT
), /* 0x0b: L3 miss, shared */
74 OP_LH
| P(LVL
, LOC_RAM
) | LEVEL(RAM
) | SNOOP_NONE_MISS
, /* 0x0c: L3 miss, excl */
75 OP_LH
| P(LVL
, REM_RAM1
) | LEVEL(RAM
) | REM
| SNOOP_NONE_MISS
, /* 0x0d: L3 miss, excl */
76 OP_LH
| P(LVL
, IO
) | LEVEL(NA
) | P(SNOOP
, NONE
), /* 0x0e: I/O */
77 OP_LH
| P(LVL
, UNC
) | LEVEL(NA
) | P(SNOOP
, NONE
), /* 0x0f: uncached */
80 /* Patch up minor differences in the bits */
81 void __init
intel_pmu_pebs_data_source_nhm(void)
83 pebs_data_source
[0x05] = OP_LH
| P(LVL
, L3
) | LEVEL(L3
) | P(SNOOP
, HIT
);
84 pebs_data_source
[0x06] = OP_LH
| P(LVL
, L3
) | LEVEL(L3
) | P(SNOOP
, HITM
);
85 pebs_data_source
[0x07] = OP_LH
| P(LVL
, L3
) | LEVEL(L3
) | P(SNOOP
, HITM
);
88 void __init
intel_pmu_pebs_data_source_skl(bool pmem
)
90 u64 pmem_or_l4
= pmem
? LEVEL(PMEM
) : LEVEL(L4
);
92 pebs_data_source
[0x08] = OP_LH
| pmem_or_l4
| P(SNOOP
, HIT
);
93 pebs_data_source
[0x09] = OP_LH
| pmem_or_l4
| REM
| P(SNOOP
, HIT
);
94 pebs_data_source
[0x0b] = OP_LH
| LEVEL(RAM
) | REM
| P(SNOOP
, NONE
);
95 pebs_data_source
[0x0c] = OP_LH
| LEVEL(ANY_CACHE
) | REM
| P(SNOOPX
, FWD
);
96 pebs_data_source
[0x0d] = OP_LH
| LEVEL(ANY_CACHE
) | REM
| P(SNOOP
, HITM
);
99 static u64
precise_store_data(u64 status
)
101 union intel_x86_pebs_dse dse
;
102 u64 val
= P(OP
, STORE
) | P(SNOOP
, NA
) | P(LVL
, L1
) | P(TLB
, L2
);
108 * 1 = stored missed 2nd level TLB
110 * so it either hit the walker or the OS
111 * otherwise hit 2nd level TLB
113 if (dse
.st_stlb_miss
)
119 * bit 0: hit L1 data cache
120 * if not set, then all we know is that
129 * bit 5: Locked prefix
132 val
|= P(LOCK
, LOCKED
);
137 static u64
precise_datala_hsw(struct perf_event
*event
, u64 status
)
139 union perf_mem_data_src dse
;
141 dse
.val
= PERF_MEM_NA
;
143 if (event
->hw
.flags
& PERF_X86_EVENT_PEBS_ST_HSW
)
144 dse
.mem_op
= PERF_MEM_OP_STORE
;
145 else if (event
->hw
.flags
& PERF_X86_EVENT_PEBS_LD_HSW
)
146 dse
.mem_op
= PERF_MEM_OP_LOAD
;
149 * L1 info only valid for following events:
151 * MEM_UOPS_RETIRED.STLB_MISS_STORES
152 * MEM_UOPS_RETIRED.LOCK_STORES
153 * MEM_UOPS_RETIRED.SPLIT_STORES
154 * MEM_UOPS_RETIRED.ALL_STORES
156 if (event
->hw
.flags
& PERF_X86_EVENT_PEBS_ST_HSW
) {
158 dse
.mem_lvl
= PERF_MEM_LVL_L1
| PERF_MEM_LVL_HIT
;
160 dse
.mem_lvl
= PERF_MEM_LVL_L1
| PERF_MEM_LVL_MISS
;
165 static u64
load_latency_data(u64 status
)
167 union intel_x86_pebs_dse dse
;
173 * use the mapping table for bit 0-3
175 val
= pebs_data_source
[dse
.ld_dse
];
178 * Nehalem models do not support TLB, Lock infos
180 if (x86_pmu
.pebs_no_tlb
) {
181 val
|= P(TLB
, NA
) | P(LOCK
, NA
);
186 * 0 = did not miss 2nd level TLB
187 * 1 = missed 2nd level TLB
189 if (dse
.ld_stlb_miss
)
190 val
|= P(TLB
, MISS
) | P(TLB
, L2
);
192 val
|= P(TLB
, HIT
) | P(TLB
, L1
) | P(TLB
, L2
);
195 * bit 5: locked prefix
198 val
|= P(LOCK
, LOCKED
);
203 struct pebs_record_core
{
207 u64 r8
, r9
, r10
, r11
;
208 u64 r12
, r13
, r14
, r15
;
211 struct pebs_record_nhm
{
215 u64 r8
, r9
, r10
, r11
;
216 u64 r12
, r13
, r14
, r15
;
217 u64 status
, dla
, dse
, lat
;
221 * Same as pebs_record_nhm, with two additional fields.
223 struct pebs_record_hsw
{
227 u64 r8
, r9
, r10
, r11
;
228 u64 r12
, r13
, r14
, r15
;
229 u64 status
, dla
, dse
, lat
;
230 u64 real_ip
, tsx_tuning
;
233 union hsw_tsx_tuning
{
235 u32 cycles_last_block
: 32,
238 instruction_abort
: 1,
239 non_instruction_abort
: 1,
248 #define PEBS_HSW_TSX_FLAGS 0xff00000000ULL
250 /* Same as HSW, plus TSC */
252 struct pebs_record_skl
{
256 u64 r8
, r9
, r10
, r11
;
257 u64 r12
, r13
, r14
, r15
;
258 u64 status
, dla
, dse
, lat
;
259 u64 real_ip
, tsx_tuning
;
263 void init_debug_store_on_cpu(int cpu
)
265 struct debug_store
*ds
= per_cpu(cpu_hw_events
, cpu
).ds
;
270 wrmsr_on_cpu(cpu
, MSR_IA32_DS_AREA
,
271 (u32
)((u64
)(unsigned long)ds
),
272 (u32
)((u64
)(unsigned long)ds
>> 32));
275 void fini_debug_store_on_cpu(int cpu
)
277 if (!per_cpu(cpu_hw_events
, cpu
).ds
)
280 wrmsr_on_cpu(cpu
, MSR_IA32_DS_AREA
, 0, 0);
283 static DEFINE_PER_CPU(void *, insn_buffer
);
285 static void ds_update_cea(void *cea
, void *addr
, size_t size
, pgprot_t prot
)
287 unsigned long start
= (unsigned long)cea
;
291 pa
= virt_to_phys(addr
);
294 for (; msz
< size
; msz
+= PAGE_SIZE
, pa
+= PAGE_SIZE
, cea
+= PAGE_SIZE
)
295 cea_set_pte(cea
, pa
, prot
);
298 * This is a cross-CPU update of the cpu_entry_area, we must shoot down
299 * all TLB entries for it.
301 flush_tlb_kernel_range(start
, start
+ size
);
305 static void ds_clear_cea(void *cea
, size_t size
)
307 unsigned long start
= (unsigned long)cea
;
311 for (; msz
< size
; msz
+= PAGE_SIZE
, cea
+= PAGE_SIZE
)
312 cea_set_pte(cea
, 0, PAGE_NONE
);
314 flush_tlb_kernel_range(start
, start
+ size
);
318 static void *dsalloc_pages(size_t size
, gfp_t flags
, int cpu
)
320 unsigned int order
= get_order(size
);
321 int node
= cpu_to_node(cpu
);
324 page
= __alloc_pages_node(node
, flags
| __GFP_ZERO
, order
);
325 return page
? page_address(page
) : NULL
;
328 static void dsfree_pages(const void *buffer
, size_t size
)
331 free_pages((unsigned long)buffer
, get_order(size
));
334 static int alloc_pebs_buffer(int cpu
)
336 struct cpu_hw_events
*hwev
= per_cpu_ptr(&cpu_hw_events
, cpu
);
337 struct debug_store
*ds
= hwev
->ds
;
338 size_t bsiz
= x86_pmu
.pebs_buffer_size
;
339 int max
, node
= cpu_to_node(cpu
);
340 void *buffer
, *ibuffer
, *cea
;
345 buffer
= dsalloc_pages(bsiz
, GFP_KERNEL
, cpu
);
346 if (unlikely(!buffer
))
350 * HSW+ already provides us the eventing ip; no need to allocate this
353 if (x86_pmu
.intel_cap
.pebs_format
< 2) {
354 ibuffer
= kzalloc_node(PEBS_FIXUP_SIZE
, GFP_KERNEL
, node
);
356 dsfree_pages(buffer
, bsiz
);
359 per_cpu(insn_buffer
, cpu
) = ibuffer
;
361 hwev
->ds_pebs_vaddr
= buffer
;
362 /* Update the cpu entry area mapping */
363 cea
= &get_cpu_entry_area(cpu
)->cpu_debug_buffers
.pebs_buffer
;
364 ds
->pebs_buffer_base
= (unsigned long) cea
;
365 ds_update_cea(cea
, buffer
, bsiz
, PAGE_KERNEL
);
366 ds
->pebs_index
= ds
->pebs_buffer_base
;
367 max
= x86_pmu
.pebs_record_size
* (bsiz
/ x86_pmu
.pebs_record_size
);
368 ds
->pebs_absolute_maximum
= ds
->pebs_buffer_base
+ max
;
372 static void release_pebs_buffer(int cpu
)
374 struct cpu_hw_events
*hwev
= per_cpu_ptr(&cpu_hw_events
, cpu
);
380 kfree(per_cpu(insn_buffer
, cpu
));
381 per_cpu(insn_buffer
, cpu
) = NULL
;
383 /* Clear the fixmap */
384 cea
= &get_cpu_entry_area(cpu
)->cpu_debug_buffers
.pebs_buffer
;
385 ds_clear_cea(cea
, x86_pmu
.pebs_buffer_size
);
386 dsfree_pages(hwev
->ds_pebs_vaddr
, x86_pmu
.pebs_buffer_size
);
387 hwev
->ds_pebs_vaddr
= NULL
;
390 static int alloc_bts_buffer(int cpu
)
392 struct cpu_hw_events
*hwev
= per_cpu_ptr(&cpu_hw_events
, cpu
);
393 struct debug_store
*ds
= hwev
->ds
;
400 buffer
= dsalloc_pages(BTS_BUFFER_SIZE
, GFP_KERNEL
| __GFP_NOWARN
, cpu
);
401 if (unlikely(!buffer
)) {
402 WARN_ONCE(1, "%s: BTS buffer allocation failure\n", __func__
);
405 hwev
->ds_bts_vaddr
= buffer
;
406 /* Update the fixmap */
407 cea
= &get_cpu_entry_area(cpu
)->cpu_debug_buffers
.bts_buffer
;
408 ds
->bts_buffer_base
= (unsigned long) cea
;
409 ds_update_cea(cea
, buffer
, BTS_BUFFER_SIZE
, PAGE_KERNEL
);
410 ds
->bts_index
= ds
->bts_buffer_base
;
411 max
= BTS_BUFFER_SIZE
/ BTS_RECORD_SIZE
;
412 ds
->bts_absolute_maximum
= ds
->bts_buffer_base
+
413 max
* BTS_RECORD_SIZE
;
414 ds
->bts_interrupt_threshold
= ds
->bts_absolute_maximum
-
415 (max
/ 16) * BTS_RECORD_SIZE
;
419 static void release_bts_buffer(int cpu
)
421 struct cpu_hw_events
*hwev
= per_cpu_ptr(&cpu_hw_events
, cpu
);
427 /* Clear the fixmap */
428 cea
= &get_cpu_entry_area(cpu
)->cpu_debug_buffers
.bts_buffer
;
429 ds_clear_cea(cea
, BTS_BUFFER_SIZE
);
430 dsfree_pages(hwev
->ds_bts_vaddr
, BTS_BUFFER_SIZE
);
431 hwev
->ds_bts_vaddr
= NULL
;
434 static int alloc_ds_buffer(int cpu
)
436 struct debug_store
*ds
= &get_cpu_entry_area(cpu
)->cpu_debug_store
;
438 memset(ds
, 0, sizeof(*ds
));
439 per_cpu(cpu_hw_events
, cpu
).ds
= ds
;
443 static void release_ds_buffer(int cpu
)
445 per_cpu(cpu_hw_events
, cpu
).ds
= NULL
;
448 void release_ds_buffers(void)
452 if (!x86_pmu
.bts
&& !x86_pmu
.pebs
)
455 for_each_possible_cpu(cpu
)
456 release_ds_buffer(cpu
);
458 for_each_possible_cpu(cpu
) {
460 * Again, ignore errors from offline CPUs, they will no longer
461 * observe cpu_hw_events.ds and not program the DS_AREA when
464 fini_debug_store_on_cpu(cpu
);
467 for_each_possible_cpu(cpu
) {
468 release_pebs_buffer(cpu
);
469 release_bts_buffer(cpu
);
473 void reserve_ds_buffers(void)
475 int bts_err
= 0, pebs_err
= 0;
478 x86_pmu
.bts_active
= 0;
479 x86_pmu
.pebs_active
= 0;
481 if (!x86_pmu
.bts
&& !x86_pmu
.pebs
)
490 for_each_possible_cpu(cpu
) {
491 if (alloc_ds_buffer(cpu
)) {
496 if (!bts_err
&& alloc_bts_buffer(cpu
))
499 if (!pebs_err
&& alloc_pebs_buffer(cpu
))
502 if (bts_err
&& pebs_err
)
507 for_each_possible_cpu(cpu
)
508 release_bts_buffer(cpu
);
512 for_each_possible_cpu(cpu
)
513 release_pebs_buffer(cpu
);
516 if (bts_err
&& pebs_err
) {
517 for_each_possible_cpu(cpu
)
518 release_ds_buffer(cpu
);
520 if (x86_pmu
.bts
&& !bts_err
)
521 x86_pmu
.bts_active
= 1;
523 if (x86_pmu
.pebs
&& !pebs_err
)
524 x86_pmu
.pebs_active
= 1;
526 for_each_possible_cpu(cpu
) {
528 * Ignores wrmsr_on_cpu() errors for offline CPUs they
529 * will get this call through intel_pmu_cpu_starting().
531 init_debug_store_on_cpu(cpu
);
540 struct event_constraint bts_constraint
=
541 EVENT_CONSTRAINT(0, 1ULL << INTEL_PMC_IDX_FIXED_BTS
, 0);
543 void intel_pmu_enable_bts(u64 config
)
545 unsigned long debugctlmsr
;
547 debugctlmsr
= get_debugctlmsr();
549 debugctlmsr
|= DEBUGCTLMSR_TR
;
550 debugctlmsr
|= DEBUGCTLMSR_BTS
;
551 if (config
& ARCH_PERFMON_EVENTSEL_INT
)
552 debugctlmsr
|= DEBUGCTLMSR_BTINT
;
554 if (!(config
& ARCH_PERFMON_EVENTSEL_OS
))
555 debugctlmsr
|= DEBUGCTLMSR_BTS_OFF_OS
;
557 if (!(config
& ARCH_PERFMON_EVENTSEL_USR
))
558 debugctlmsr
|= DEBUGCTLMSR_BTS_OFF_USR
;
560 update_debugctlmsr(debugctlmsr
);
563 void intel_pmu_disable_bts(void)
565 struct cpu_hw_events
*cpuc
= this_cpu_ptr(&cpu_hw_events
);
566 unsigned long debugctlmsr
;
571 debugctlmsr
= get_debugctlmsr();
574 ~(DEBUGCTLMSR_TR
| DEBUGCTLMSR_BTS
| DEBUGCTLMSR_BTINT
|
575 DEBUGCTLMSR_BTS_OFF_OS
| DEBUGCTLMSR_BTS_OFF_USR
);
577 update_debugctlmsr(debugctlmsr
);
580 int intel_pmu_drain_bts_buffer(void)
582 struct cpu_hw_events
*cpuc
= this_cpu_ptr(&cpu_hw_events
);
583 struct debug_store
*ds
= cpuc
->ds
;
589 struct perf_event
*event
= cpuc
->events
[INTEL_PMC_IDX_FIXED_BTS
];
590 struct bts_record
*at
, *base
, *top
;
591 struct perf_output_handle handle
;
592 struct perf_event_header header
;
593 struct perf_sample_data data
;
594 unsigned long skip
= 0;
600 if (!x86_pmu
.bts_active
)
603 base
= (struct bts_record
*)(unsigned long)ds
->bts_buffer_base
;
604 top
= (struct bts_record
*)(unsigned long)ds
->bts_index
;
609 memset(®s
, 0, sizeof(regs
));
611 ds
->bts_index
= ds
->bts_buffer_base
;
613 perf_sample_data_init(&data
, 0, event
->hw
.last_period
);
616 * BTS leaks kernel addresses in branches across the cpl boundary,
617 * such as traps or system calls, so unless the user is asking for
618 * kernel tracing (and right now it's not possible), we'd need to
619 * filter them out. But first we need to count how many of those we
620 * have in the current batch. This is an extra O(n) pass, however,
621 * it's much faster than the other one especially considering that
622 * n <= 2560 (BTS_BUFFER_SIZE / BTS_RECORD_SIZE * 15/16; see the
623 * alloc_bts_buffer()).
625 for (at
= base
; at
< top
; at
++) {
627 * Note that right now *this* BTS code only works if
628 * attr::exclude_kernel is set, but let's keep this extra
629 * check here in case that changes.
631 if (event
->attr
.exclude_kernel
&&
632 (kernel_ip(at
->from
) || kernel_ip(at
->to
)))
637 * Prepare a generic sample, i.e. fill in the invariant fields.
638 * We will overwrite the from and to address before we output
642 perf_prepare_sample(&header
, &data
, event
, ®s
);
644 if (perf_output_begin(&handle
, event
, header
.size
*
645 (top
- base
- skip
)))
648 for (at
= base
; at
< top
; at
++) {
649 /* Filter out any records that contain kernel addresses. */
650 if (event
->attr
.exclude_kernel
&&
651 (kernel_ip(at
->from
) || kernel_ip(at
->to
)))
657 perf_output_sample(&handle
, &header
, &data
, event
);
660 perf_output_end(&handle
);
662 /* There's new data available. */
663 event
->hw
.interrupts
++;
664 event
->pending_kill
= POLL_IN
;
670 static inline void intel_pmu_drain_pebs_buffer(void)
674 x86_pmu
.drain_pebs(®s
);
680 struct event_constraint intel_core2_pebs_event_constraints
[] = {
681 INTEL_FLAGS_UEVENT_CONSTRAINT(0x00c0, 0x1), /* INST_RETIRED.ANY */
682 INTEL_FLAGS_UEVENT_CONSTRAINT(0xfec1, 0x1), /* X87_OPS_RETIRED.ANY */
683 INTEL_FLAGS_UEVENT_CONSTRAINT(0x00c5, 0x1), /* BR_INST_RETIRED.MISPRED */
684 INTEL_FLAGS_UEVENT_CONSTRAINT(0x1fc7, 0x1), /* SIMD_INST_RETURED.ANY */
685 INTEL_FLAGS_EVENT_CONSTRAINT(0xcb, 0x1), /* MEM_LOAD_RETIRED.* */
686 /* INST_RETIRED.ANY_P, inv=1, cmask=16 (cycles:p). */
687 INTEL_FLAGS_UEVENT_CONSTRAINT(0x108000c0, 0x01),
691 struct event_constraint intel_atom_pebs_event_constraints
[] = {
692 INTEL_FLAGS_UEVENT_CONSTRAINT(0x00c0, 0x1), /* INST_RETIRED.ANY */
693 INTEL_FLAGS_UEVENT_CONSTRAINT(0x00c5, 0x1), /* MISPREDICTED_BRANCH_RETIRED */
694 INTEL_FLAGS_EVENT_CONSTRAINT(0xcb, 0x1), /* MEM_LOAD_RETIRED.* */
695 /* INST_RETIRED.ANY_P, inv=1, cmask=16 (cycles:p). */
696 INTEL_FLAGS_UEVENT_CONSTRAINT(0x108000c0, 0x01),
697 /* Allow all events as PEBS with no flags */
698 INTEL_ALL_EVENT_CONSTRAINT(0, 0x1),
702 struct event_constraint intel_slm_pebs_event_constraints
[] = {
703 /* INST_RETIRED.ANY_P, inv=1, cmask=16 (cycles:p). */
704 INTEL_FLAGS_UEVENT_CONSTRAINT(0x108000c0, 0x1),
705 /* Allow all events as PEBS with no flags */
706 INTEL_ALL_EVENT_CONSTRAINT(0, 0x1),
710 struct event_constraint intel_glm_pebs_event_constraints
[] = {
711 /* Allow all events as PEBS with no flags */
712 INTEL_ALL_EVENT_CONSTRAINT(0, 0x1),
716 struct event_constraint intel_nehalem_pebs_event_constraints
[] = {
717 INTEL_PLD_CONSTRAINT(0x100b, 0xf), /* MEM_INST_RETIRED.* */
718 INTEL_FLAGS_EVENT_CONSTRAINT(0x0f, 0xf), /* MEM_UNCORE_RETIRED.* */
719 INTEL_FLAGS_UEVENT_CONSTRAINT(0x010c, 0xf), /* MEM_STORE_RETIRED.DTLB_MISS */
720 INTEL_FLAGS_EVENT_CONSTRAINT(0xc0, 0xf), /* INST_RETIRED.ANY */
721 INTEL_EVENT_CONSTRAINT(0xc2, 0xf), /* UOPS_RETIRED.* */
722 INTEL_FLAGS_EVENT_CONSTRAINT(0xc4, 0xf), /* BR_INST_RETIRED.* */
723 INTEL_FLAGS_UEVENT_CONSTRAINT(0x02c5, 0xf), /* BR_MISP_RETIRED.NEAR_CALL */
724 INTEL_FLAGS_EVENT_CONSTRAINT(0xc7, 0xf), /* SSEX_UOPS_RETIRED.* */
725 INTEL_FLAGS_UEVENT_CONSTRAINT(0x20c8, 0xf), /* ITLB_MISS_RETIRED */
726 INTEL_FLAGS_EVENT_CONSTRAINT(0xcb, 0xf), /* MEM_LOAD_RETIRED.* */
727 INTEL_FLAGS_EVENT_CONSTRAINT(0xf7, 0xf), /* FP_ASSIST.* */
728 /* INST_RETIRED.ANY_P, inv=1, cmask=16 (cycles:p). */
729 INTEL_FLAGS_UEVENT_CONSTRAINT(0x108000c0, 0x0f),
733 struct event_constraint intel_westmere_pebs_event_constraints
[] = {
734 INTEL_PLD_CONSTRAINT(0x100b, 0xf), /* MEM_INST_RETIRED.* */
735 INTEL_FLAGS_EVENT_CONSTRAINT(0x0f, 0xf), /* MEM_UNCORE_RETIRED.* */
736 INTEL_FLAGS_UEVENT_CONSTRAINT(0x010c, 0xf), /* MEM_STORE_RETIRED.DTLB_MISS */
737 INTEL_FLAGS_EVENT_CONSTRAINT(0xc0, 0xf), /* INSTR_RETIRED.* */
738 INTEL_EVENT_CONSTRAINT(0xc2, 0xf), /* UOPS_RETIRED.* */
739 INTEL_FLAGS_EVENT_CONSTRAINT(0xc4, 0xf), /* BR_INST_RETIRED.* */
740 INTEL_FLAGS_EVENT_CONSTRAINT(0xc5, 0xf), /* BR_MISP_RETIRED.* */
741 INTEL_FLAGS_EVENT_CONSTRAINT(0xc7, 0xf), /* SSEX_UOPS_RETIRED.* */
742 INTEL_FLAGS_UEVENT_CONSTRAINT(0x20c8, 0xf), /* ITLB_MISS_RETIRED */
743 INTEL_FLAGS_EVENT_CONSTRAINT(0xcb, 0xf), /* MEM_LOAD_RETIRED.* */
744 INTEL_FLAGS_EVENT_CONSTRAINT(0xf7, 0xf), /* FP_ASSIST.* */
745 /* INST_RETIRED.ANY_P, inv=1, cmask=16 (cycles:p). */
746 INTEL_FLAGS_UEVENT_CONSTRAINT(0x108000c0, 0x0f),
750 struct event_constraint intel_snb_pebs_event_constraints
[] = {
751 INTEL_FLAGS_UEVENT_CONSTRAINT(0x01c0, 0x2), /* INST_RETIRED.PRECDIST */
752 INTEL_PLD_CONSTRAINT(0x01cd, 0x8), /* MEM_TRANS_RETIRED.LAT_ABOVE_THR */
753 INTEL_PST_CONSTRAINT(0x02cd, 0x8), /* MEM_TRANS_RETIRED.PRECISE_STORES */
754 /* UOPS_RETIRED.ALL, inv=1, cmask=16 (cycles:p). */
755 INTEL_FLAGS_UEVENT_CONSTRAINT(0x108001c2, 0xf),
756 INTEL_EXCLEVT_CONSTRAINT(0xd0, 0xf), /* MEM_UOP_RETIRED.* */
757 INTEL_EXCLEVT_CONSTRAINT(0xd1, 0xf), /* MEM_LOAD_UOPS_RETIRED.* */
758 INTEL_EXCLEVT_CONSTRAINT(0xd2, 0xf), /* MEM_LOAD_UOPS_LLC_HIT_RETIRED.* */
759 INTEL_EXCLEVT_CONSTRAINT(0xd3, 0xf), /* MEM_LOAD_UOPS_LLC_MISS_RETIRED.* */
760 /* Allow all events as PEBS with no flags */
761 INTEL_ALL_EVENT_CONSTRAINT(0, 0xf),
765 struct event_constraint intel_ivb_pebs_event_constraints
[] = {
766 INTEL_FLAGS_UEVENT_CONSTRAINT(0x01c0, 0x2), /* INST_RETIRED.PRECDIST */
767 INTEL_PLD_CONSTRAINT(0x01cd, 0x8), /* MEM_TRANS_RETIRED.LAT_ABOVE_THR */
768 INTEL_PST_CONSTRAINT(0x02cd, 0x8), /* MEM_TRANS_RETIRED.PRECISE_STORES */
769 /* UOPS_RETIRED.ALL, inv=1, cmask=16 (cycles:p). */
770 INTEL_FLAGS_UEVENT_CONSTRAINT(0x108001c2, 0xf),
771 /* INST_RETIRED.PREC_DIST, inv=1, cmask=16 (cycles:ppp). */
772 INTEL_FLAGS_UEVENT_CONSTRAINT(0x108001c0, 0x2),
773 INTEL_EXCLEVT_CONSTRAINT(0xd0, 0xf), /* MEM_UOP_RETIRED.* */
774 INTEL_EXCLEVT_CONSTRAINT(0xd1, 0xf), /* MEM_LOAD_UOPS_RETIRED.* */
775 INTEL_EXCLEVT_CONSTRAINT(0xd2, 0xf), /* MEM_LOAD_UOPS_LLC_HIT_RETIRED.* */
776 INTEL_EXCLEVT_CONSTRAINT(0xd3, 0xf), /* MEM_LOAD_UOPS_LLC_MISS_RETIRED.* */
777 /* Allow all events as PEBS with no flags */
778 INTEL_ALL_EVENT_CONSTRAINT(0, 0xf),
782 struct event_constraint intel_hsw_pebs_event_constraints
[] = {
783 INTEL_FLAGS_UEVENT_CONSTRAINT(0x01c0, 0x2), /* INST_RETIRED.PRECDIST */
784 INTEL_PLD_CONSTRAINT(0x01cd, 0xf), /* MEM_TRANS_RETIRED.* */
785 /* UOPS_RETIRED.ALL, inv=1, cmask=16 (cycles:p). */
786 INTEL_FLAGS_UEVENT_CONSTRAINT(0x108001c2, 0xf),
787 /* INST_RETIRED.PREC_DIST, inv=1, cmask=16 (cycles:ppp). */
788 INTEL_FLAGS_UEVENT_CONSTRAINT(0x108001c0, 0x2),
789 INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_NA(0x01c2, 0xf), /* UOPS_RETIRED.ALL */
790 INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_XLD(0x11d0, 0xf), /* MEM_UOPS_RETIRED.STLB_MISS_LOADS */
791 INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_XLD(0x21d0, 0xf), /* MEM_UOPS_RETIRED.LOCK_LOADS */
792 INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_XLD(0x41d0, 0xf), /* MEM_UOPS_RETIRED.SPLIT_LOADS */
793 INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_XLD(0x81d0, 0xf), /* MEM_UOPS_RETIRED.ALL_LOADS */
794 INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_XST(0x12d0, 0xf), /* MEM_UOPS_RETIRED.STLB_MISS_STORES */
795 INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_XST(0x42d0, 0xf), /* MEM_UOPS_RETIRED.SPLIT_STORES */
796 INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_XST(0x82d0, 0xf), /* MEM_UOPS_RETIRED.ALL_STORES */
797 INTEL_FLAGS_EVENT_CONSTRAINT_DATALA_XLD(0xd1, 0xf), /* MEM_LOAD_UOPS_RETIRED.* */
798 INTEL_FLAGS_EVENT_CONSTRAINT_DATALA_XLD(0xd2, 0xf), /* MEM_LOAD_UOPS_L3_HIT_RETIRED.* */
799 INTEL_FLAGS_EVENT_CONSTRAINT_DATALA_XLD(0xd3, 0xf), /* MEM_LOAD_UOPS_L3_MISS_RETIRED.* */
800 /* Allow all events as PEBS with no flags */
801 INTEL_ALL_EVENT_CONSTRAINT(0, 0xf),
805 struct event_constraint intel_bdw_pebs_event_constraints
[] = {
806 INTEL_FLAGS_UEVENT_CONSTRAINT(0x01c0, 0x2), /* INST_RETIRED.PRECDIST */
807 INTEL_PLD_CONSTRAINT(0x01cd, 0xf), /* MEM_TRANS_RETIRED.* */
808 /* UOPS_RETIRED.ALL, inv=1, cmask=16 (cycles:p). */
809 INTEL_FLAGS_UEVENT_CONSTRAINT(0x108001c2, 0xf),
810 /* INST_RETIRED.PREC_DIST, inv=1, cmask=16 (cycles:ppp). */
811 INTEL_FLAGS_UEVENT_CONSTRAINT(0x108001c0, 0x2),
812 INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_NA(0x01c2, 0xf), /* UOPS_RETIRED.ALL */
813 INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_LD(0x11d0, 0xf), /* MEM_UOPS_RETIRED.STLB_MISS_LOADS */
814 INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_LD(0x21d0, 0xf), /* MEM_UOPS_RETIRED.LOCK_LOADS */
815 INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_LD(0x41d0, 0xf), /* MEM_UOPS_RETIRED.SPLIT_LOADS */
816 INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_LD(0x81d0, 0xf), /* MEM_UOPS_RETIRED.ALL_LOADS */
817 INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_ST(0x12d0, 0xf), /* MEM_UOPS_RETIRED.STLB_MISS_STORES */
818 INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_ST(0x42d0, 0xf), /* MEM_UOPS_RETIRED.SPLIT_STORES */
819 INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_ST(0x82d0, 0xf), /* MEM_UOPS_RETIRED.ALL_STORES */
820 INTEL_FLAGS_EVENT_CONSTRAINT_DATALA_LD(0xd1, 0xf), /* MEM_LOAD_UOPS_RETIRED.* */
821 INTEL_FLAGS_EVENT_CONSTRAINT_DATALA_LD(0xd2, 0xf), /* MEM_LOAD_UOPS_L3_HIT_RETIRED.* */
822 INTEL_FLAGS_EVENT_CONSTRAINT_DATALA_LD(0xd3, 0xf), /* MEM_LOAD_UOPS_L3_MISS_RETIRED.* */
823 /* Allow all events as PEBS with no flags */
824 INTEL_ALL_EVENT_CONSTRAINT(0, 0xf),
829 struct event_constraint intel_skl_pebs_event_constraints
[] = {
830 INTEL_FLAGS_UEVENT_CONSTRAINT(0x1c0, 0x2), /* INST_RETIRED.PREC_DIST */
831 /* INST_RETIRED.PREC_DIST, inv=1, cmask=16 (cycles:ppp). */
832 INTEL_FLAGS_UEVENT_CONSTRAINT(0x108001c0, 0x2),
833 /* INST_RETIRED.TOTAL_CYCLES_PS (inv=1, cmask=16) (cycles:p). */
834 INTEL_FLAGS_UEVENT_CONSTRAINT(0x108000c0, 0x0f),
835 INTEL_PLD_CONSTRAINT(0x1cd, 0xf), /* MEM_TRANS_RETIRED.* */
836 INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_LD(0x11d0, 0xf), /* MEM_INST_RETIRED.STLB_MISS_LOADS */
837 INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_ST(0x12d0, 0xf), /* MEM_INST_RETIRED.STLB_MISS_STORES */
838 INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_LD(0x21d0, 0xf), /* MEM_INST_RETIRED.LOCK_LOADS */
839 INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_ST(0x22d0, 0xf), /* MEM_INST_RETIRED.LOCK_STORES */
840 INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_LD(0x41d0, 0xf), /* MEM_INST_RETIRED.SPLIT_LOADS */
841 INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_ST(0x42d0, 0xf), /* MEM_INST_RETIRED.SPLIT_STORES */
842 INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_LD(0x81d0, 0xf), /* MEM_INST_RETIRED.ALL_LOADS */
843 INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_ST(0x82d0, 0xf), /* MEM_INST_RETIRED.ALL_STORES */
844 INTEL_FLAGS_EVENT_CONSTRAINT_DATALA_LD(0xd1, 0xf), /* MEM_LOAD_RETIRED.* */
845 INTEL_FLAGS_EVENT_CONSTRAINT_DATALA_LD(0xd2, 0xf), /* MEM_LOAD_L3_HIT_RETIRED.* */
846 INTEL_FLAGS_EVENT_CONSTRAINT_DATALA_LD(0xd3, 0xf), /* MEM_LOAD_L3_MISS_RETIRED.* */
847 /* Allow all events as PEBS with no flags */
848 INTEL_ALL_EVENT_CONSTRAINT(0, 0xf),
852 struct event_constraint intel_icl_pebs_event_constraints
[] = {
853 INTEL_FLAGS_UEVENT_CONSTRAINT(0x1c0, 0x100000000ULL
), /* INST_RETIRED.PREC_DIST */
854 INTEL_FLAGS_UEVENT_CONSTRAINT(0x0400, 0x400000000ULL
), /* SLOTS */
856 INTEL_PLD_CONSTRAINT(0x1cd, 0xff), /* MEM_TRANS_RETIRED.LOAD_LATENCY */
857 INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_LD(0x1d0, 0xf), /* MEM_INST_RETIRED.LOAD */
858 INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_ST(0x2d0, 0xf), /* MEM_INST_RETIRED.STORE */
860 INTEL_FLAGS_EVENT_CONSTRAINT_DATALA_LD_RANGE(0xd1, 0xd4, 0xf), /* MEM_LOAD_*_RETIRED.* */
862 INTEL_FLAGS_EVENT_CONSTRAINT(0xd0, 0xf), /* MEM_INST_RETIRED.* */
865 * Everything else is handled by PMU_FL_PEBS_ALL, because we
866 * need the full constraints from the main table.
872 struct event_constraint
*intel_pebs_constraints(struct perf_event
*event
)
874 struct event_constraint
*c
;
876 if (!event
->attr
.precise_ip
)
879 if (x86_pmu
.pebs_constraints
) {
880 for_each_event_constraint(c
, x86_pmu
.pebs_constraints
) {
881 if (constraint_match(c
, event
->hw
.config
)) {
882 event
->hw
.flags
|= c
->flags
;
889 * Extended PEBS support
890 * Makes the PEBS code search the normal constraints.
892 if (x86_pmu
.flags
& PMU_FL_PEBS_ALL
)
895 return &emptyconstraint
;
899 * We need the sched_task callback even for per-cpu events when we use
900 * the large interrupt threshold, such that we can provide PID and TID
903 static inline bool pebs_needs_sched_cb(struct cpu_hw_events
*cpuc
)
905 return cpuc
->n_pebs
&& (cpuc
->n_pebs
== cpuc
->n_large_pebs
);
908 void intel_pmu_pebs_sched_task(struct perf_event_context
*ctx
, bool sched_in
)
910 struct cpu_hw_events
*cpuc
= this_cpu_ptr(&cpu_hw_events
);
912 if (!sched_in
&& pebs_needs_sched_cb(cpuc
))
913 intel_pmu_drain_pebs_buffer();
916 static inline void pebs_update_threshold(struct cpu_hw_events
*cpuc
)
918 struct debug_store
*ds
= cpuc
->ds
;
922 if (x86_pmu
.flags
& PMU_FL_PEBS_ALL
)
923 reserved
= x86_pmu
.max_pebs_events
+ x86_pmu
.num_counters_fixed
;
925 reserved
= x86_pmu
.max_pebs_events
;
927 if (cpuc
->n_pebs
== cpuc
->n_large_pebs
) {
928 threshold
= ds
->pebs_absolute_maximum
-
929 reserved
* cpuc
->pebs_record_size
;
931 threshold
= ds
->pebs_buffer_base
+ cpuc
->pebs_record_size
;
934 ds
->pebs_interrupt_threshold
= threshold
;
937 static void adaptive_pebs_record_size_update(void)
939 struct cpu_hw_events
*cpuc
= this_cpu_ptr(&cpu_hw_events
);
940 u64 pebs_data_cfg
= cpuc
->pebs_data_cfg
;
941 int sz
= sizeof(struct pebs_basic
);
943 if (pebs_data_cfg
& PEBS_DATACFG_MEMINFO
)
944 sz
+= sizeof(struct pebs_meminfo
);
945 if (pebs_data_cfg
& PEBS_DATACFG_GP
)
946 sz
+= sizeof(struct pebs_gprs
);
947 if (pebs_data_cfg
& PEBS_DATACFG_XMMS
)
948 sz
+= sizeof(struct pebs_xmm
);
949 if (pebs_data_cfg
& PEBS_DATACFG_LBRS
)
950 sz
+= x86_pmu
.lbr_nr
* sizeof(struct pebs_lbr_entry
);
952 cpuc
->pebs_record_size
= sz
;
955 #define PERF_PEBS_MEMINFO_TYPE (PERF_SAMPLE_ADDR | PERF_SAMPLE_DATA_SRC | \
956 PERF_SAMPLE_PHYS_ADDR | PERF_SAMPLE_WEIGHT | \
957 PERF_SAMPLE_TRANSACTION)
959 static u64
pebs_update_adaptive_cfg(struct perf_event
*event
)
961 struct perf_event_attr
*attr
= &event
->attr
;
962 u64 sample_type
= attr
->sample_type
;
963 u64 pebs_data_cfg
= 0;
964 bool gprs
, tsx_weight
;
966 if (!(sample_type
& ~(PERF_SAMPLE_IP
|PERF_SAMPLE_TIME
)) &&
967 attr
->precise_ip
> 1)
968 return pebs_data_cfg
;
970 if (sample_type
& PERF_PEBS_MEMINFO_TYPE
)
971 pebs_data_cfg
|= PEBS_DATACFG_MEMINFO
;
975 * + user requested them
976 * + precise_ip < 2 for the non event IP
977 * + For RTM TSX weight we need GPRs for the abort code.
979 gprs
= (sample_type
& PERF_SAMPLE_REGS_INTR
) &&
980 (attr
->sample_regs_intr
& PEBS_GP_REGS
);
982 tsx_weight
= (sample_type
& PERF_SAMPLE_WEIGHT
) &&
983 ((attr
->config
& INTEL_ARCH_EVENT_MASK
) ==
984 x86_pmu
.rtm_abort_event
);
986 if (gprs
|| (attr
->precise_ip
< 2) || tsx_weight
)
987 pebs_data_cfg
|= PEBS_DATACFG_GP
;
989 if ((sample_type
& PERF_SAMPLE_REGS_INTR
) &&
990 (attr
->sample_regs_intr
& PEBS_XMM_REGS
))
991 pebs_data_cfg
|= PEBS_DATACFG_XMMS
;
993 if (sample_type
& PERF_SAMPLE_BRANCH_STACK
) {
995 * For now always log all LBRs. Could configure this
998 pebs_data_cfg
|= PEBS_DATACFG_LBRS
|
999 ((x86_pmu
.lbr_nr
-1) << PEBS_DATACFG_LBR_SHIFT
);
1002 return pebs_data_cfg
;
1006 pebs_update_state(bool needed_cb
, struct cpu_hw_events
*cpuc
,
1007 struct perf_event
*event
, bool add
)
1009 struct pmu
*pmu
= event
->ctx
->pmu
;
1011 * Make sure we get updated with the first PEBS
1012 * event. It will trigger also during removal, but
1013 * that does not hurt:
1015 bool update
= cpuc
->n_pebs
== 1;
1017 if (needed_cb
!= pebs_needs_sched_cb(cpuc
)) {
1019 perf_sched_cb_inc(pmu
);
1021 perf_sched_cb_dec(pmu
);
1027 * The PEBS record doesn't shrink on pmu::del(). Doing so would require
1028 * iterating all remaining PEBS events to reconstruct the config.
1030 if (x86_pmu
.intel_cap
.pebs_baseline
&& add
) {
1033 /* Clear pebs_data_cfg and pebs_record_size for first PEBS. */
1034 if (cpuc
->n_pebs
== 1) {
1035 cpuc
->pebs_data_cfg
= 0;
1036 cpuc
->pebs_record_size
= sizeof(struct pebs_basic
);
1039 pebs_data_cfg
= pebs_update_adaptive_cfg(event
);
1041 /* Update pebs_record_size if new event requires more data. */
1042 if (pebs_data_cfg
& ~cpuc
->pebs_data_cfg
) {
1043 cpuc
->pebs_data_cfg
|= pebs_data_cfg
;
1044 adaptive_pebs_record_size_update();
1050 pebs_update_threshold(cpuc
);
1053 void intel_pmu_pebs_add(struct perf_event
*event
)
1055 struct cpu_hw_events
*cpuc
= this_cpu_ptr(&cpu_hw_events
);
1056 struct hw_perf_event
*hwc
= &event
->hw
;
1057 bool needed_cb
= pebs_needs_sched_cb(cpuc
);
1060 if (hwc
->flags
& PERF_X86_EVENT_LARGE_PEBS
)
1061 cpuc
->n_large_pebs
++;
1063 pebs_update_state(needed_cb
, cpuc
, event
, true);
1066 void intel_pmu_pebs_enable(struct perf_event
*event
)
1068 struct cpu_hw_events
*cpuc
= this_cpu_ptr(&cpu_hw_events
);
1069 struct hw_perf_event
*hwc
= &event
->hw
;
1070 struct debug_store
*ds
= cpuc
->ds
;
1072 hwc
->config
&= ~ARCH_PERFMON_EVENTSEL_INT
;
1074 cpuc
->pebs_enabled
|= 1ULL << hwc
->idx
;
1076 if ((event
->hw
.flags
& PERF_X86_EVENT_PEBS_LDLAT
) && (x86_pmu
.version
< 5))
1077 cpuc
->pebs_enabled
|= 1ULL << (hwc
->idx
+ 32);
1078 else if (event
->hw
.flags
& PERF_X86_EVENT_PEBS_ST
)
1079 cpuc
->pebs_enabled
|= 1ULL << 63;
1081 if (x86_pmu
.intel_cap
.pebs_baseline
) {
1082 hwc
->config
|= ICL_EVENTSEL_ADAPTIVE
;
1083 if (cpuc
->pebs_data_cfg
!= cpuc
->active_pebs_data_cfg
) {
1084 wrmsrl(MSR_PEBS_DATA_CFG
, cpuc
->pebs_data_cfg
);
1085 cpuc
->active_pebs_data_cfg
= cpuc
->pebs_data_cfg
;
1090 * Use auto-reload if possible to save a MSR write in the PMI.
1091 * This must be done in pmu::start(), because PERF_EVENT_IOC_PERIOD.
1093 if (hwc
->flags
& PERF_X86_EVENT_AUTO_RELOAD
) {
1094 unsigned int idx
= hwc
->idx
;
1096 if (idx
>= INTEL_PMC_IDX_FIXED
)
1097 idx
= MAX_PEBS_EVENTS
+ (idx
- INTEL_PMC_IDX_FIXED
);
1098 ds
->pebs_event_reset
[idx
] =
1099 (u64
)(-hwc
->sample_period
) & x86_pmu
.cntval_mask
;
1101 ds
->pebs_event_reset
[hwc
->idx
] = 0;
1105 void intel_pmu_pebs_del(struct perf_event
*event
)
1107 struct cpu_hw_events
*cpuc
= this_cpu_ptr(&cpu_hw_events
);
1108 struct hw_perf_event
*hwc
= &event
->hw
;
1109 bool needed_cb
= pebs_needs_sched_cb(cpuc
);
1112 if (hwc
->flags
& PERF_X86_EVENT_LARGE_PEBS
)
1113 cpuc
->n_large_pebs
--;
1115 pebs_update_state(needed_cb
, cpuc
, event
, false);
1118 void intel_pmu_pebs_disable(struct perf_event
*event
)
1120 struct cpu_hw_events
*cpuc
= this_cpu_ptr(&cpu_hw_events
);
1121 struct hw_perf_event
*hwc
= &event
->hw
;
1123 if (cpuc
->n_pebs
== cpuc
->n_large_pebs
)
1124 intel_pmu_drain_pebs_buffer();
1126 cpuc
->pebs_enabled
&= ~(1ULL << hwc
->idx
);
1128 if ((event
->hw
.flags
& PERF_X86_EVENT_PEBS_LDLAT
) &&
1129 (x86_pmu
.version
< 5))
1130 cpuc
->pebs_enabled
&= ~(1ULL << (hwc
->idx
+ 32));
1131 else if (event
->hw
.flags
& PERF_X86_EVENT_PEBS_ST
)
1132 cpuc
->pebs_enabled
&= ~(1ULL << 63);
1135 wrmsrl(MSR_IA32_PEBS_ENABLE
, cpuc
->pebs_enabled
);
1137 hwc
->config
|= ARCH_PERFMON_EVENTSEL_INT
;
1140 void intel_pmu_pebs_enable_all(void)
1142 struct cpu_hw_events
*cpuc
= this_cpu_ptr(&cpu_hw_events
);
1144 if (cpuc
->pebs_enabled
)
1145 wrmsrl(MSR_IA32_PEBS_ENABLE
, cpuc
->pebs_enabled
);
1148 void intel_pmu_pebs_disable_all(void)
1150 struct cpu_hw_events
*cpuc
= this_cpu_ptr(&cpu_hw_events
);
1152 if (cpuc
->pebs_enabled
)
1153 wrmsrl(MSR_IA32_PEBS_ENABLE
, 0);
1156 static int intel_pmu_pebs_fixup_ip(struct pt_regs
*regs
)
1158 struct cpu_hw_events
*cpuc
= this_cpu_ptr(&cpu_hw_events
);
1159 unsigned long from
= cpuc
->lbr_entries
[0].from
;
1160 unsigned long old_to
, to
= cpuc
->lbr_entries
[0].to
;
1161 unsigned long ip
= regs
->ip
;
1167 * We don't need to fixup if the PEBS assist is fault like
1169 if (!x86_pmu
.intel_cap
.pebs_trap
)
1173 * No LBR entry, no basic block, no rewinding
1175 if (!cpuc
->lbr_stack
.nr
|| !from
|| !to
)
1179 * Basic blocks should never cross user/kernel boundaries
1181 if (kernel_ip(ip
) != kernel_ip(to
))
1185 * unsigned math, either ip is before the start (impossible) or
1186 * the basic block is larger than 1 page (sanity)
1188 if ((ip
- to
) > PEBS_FIXUP_SIZE
)
1192 * We sampled a branch insn, rewind using the LBR stack
1195 set_linear_ip(regs
, from
);
1200 if (!kernel_ip(ip
)) {
1202 u8
*buf
= this_cpu_read(insn_buffer
);
1204 /* 'size' must fit our buffer, see above */
1205 bytes
= copy_from_user_nmi(buf
, (void __user
*)to
, size
);
1219 #ifdef CONFIG_X86_64
1220 is_64bit
= kernel_ip(to
) || !test_thread_flag(TIF_IA32
);
1222 insn_init(&insn
, kaddr
, size
, is_64bit
);
1223 insn_get_length(&insn
);
1225 * Make sure there was not a problem decoding the
1226 * instruction and getting the length. This is
1227 * doubly important because we have an infinite
1228 * loop if insn.length=0.
1234 kaddr
+= insn
.length
;
1235 size
-= insn
.length
;
1239 set_linear_ip(regs
, old_to
);
1244 * Even though we decoded the basic block, the instruction stream
1245 * never matched the given IP, either the TO or the IP got corrupted.
1250 static inline u64
intel_get_tsx_weight(u64 tsx_tuning
)
1253 union hsw_tsx_tuning tsx
= { .value
= tsx_tuning
};
1254 return tsx
.cycles_last_block
;
1259 static inline u64
intel_get_tsx_transaction(u64 tsx_tuning
, u64 ax
)
1261 u64 txn
= (tsx_tuning
& PEBS_HSW_TSX_FLAGS
) >> 32;
1263 /* For RTM XABORTs also log the abort code from AX */
1264 if ((txn
& PERF_TXN_TRANSACTION
) && (ax
& 1))
1265 txn
|= ((ax
>> 24) & 0xff) << PERF_TXN_ABORT_SHIFT
;
1269 static inline u64
get_pebs_status(void *n
)
1271 if (x86_pmu
.intel_cap
.pebs_format
< 4)
1272 return ((struct pebs_record_nhm
*)n
)->status
;
1273 return ((struct pebs_basic
*)n
)->applicable_counters
;
1276 #define PERF_X86_EVENT_PEBS_HSW_PREC \
1277 (PERF_X86_EVENT_PEBS_ST_HSW | \
1278 PERF_X86_EVENT_PEBS_LD_HSW | \
1279 PERF_X86_EVENT_PEBS_NA_HSW)
1281 static u64
get_data_src(struct perf_event
*event
, u64 aux
)
1283 u64 val
= PERF_MEM_NA
;
1284 int fl
= event
->hw
.flags
;
1285 bool fst
= fl
& (PERF_X86_EVENT_PEBS_ST
| PERF_X86_EVENT_PEBS_HSW_PREC
);
1287 if (fl
& PERF_X86_EVENT_PEBS_LDLAT
)
1288 val
= load_latency_data(aux
);
1289 else if (fst
&& (fl
& PERF_X86_EVENT_PEBS_HSW_PREC
))
1290 val
= precise_datala_hsw(event
, aux
);
1292 val
= precise_store_data(aux
);
1296 static void setup_pebs_fixed_sample_data(struct perf_event
*event
,
1297 struct pt_regs
*iregs
, void *__pebs
,
1298 struct perf_sample_data
*data
,
1299 struct pt_regs
*regs
)
1302 * We cast to the biggest pebs_record but are careful not to
1303 * unconditionally access the 'extra' entries.
1305 struct cpu_hw_events
*cpuc
= this_cpu_ptr(&cpu_hw_events
);
1306 struct pebs_record_skl
*pebs
= __pebs
;
1313 sample_type
= event
->attr
.sample_type
;
1314 fll
= event
->hw
.flags
& PERF_X86_EVENT_PEBS_LDLAT
;
1316 perf_sample_data_init(data
, 0, event
->hw
.last_period
);
1318 data
->period
= event
->hw
.last_period
;
1321 * Use latency for weight (only avail with PEBS-LL)
1323 if (fll
&& (sample_type
& PERF_SAMPLE_WEIGHT
))
1324 data
->weight
= pebs
->lat
;
1327 * data.data_src encodes the data source
1329 if (sample_type
& PERF_SAMPLE_DATA_SRC
)
1330 data
->data_src
.val
= get_data_src(event
, pebs
->dse
);
1333 * We must however always use iregs for the unwinder to stay sane; the
1334 * record BP,SP,IP can point into thin air when the record is from a
1335 * previous PMI context or an (I)RET happened between the record and
1338 if (sample_type
& PERF_SAMPLE_CALLCHAIN
)
1339 data
->callchain
= perf_callchain(event
, iregs
);
1342 * We use the interrupt regs as a base because the PEBS record does not
1343 * contain a full regs set, specifically it seems to lack segment
1344 * descriptors, which get used by things like user_mode().
1346 * In the simple case fix up only the IP for PERF_SAMPLE_IP.
1351 * Initialize regs_>flags from PEBS,
1352 * Clear exact bit (which uses x86 EFLAGS Reserved bit 3),
1353 * i.e., do not rely on it being zero:
1355 regs
->flags
= pebs
->flags
& ~PERF_EFLAGS_EXACT
;
1357 if (sample_type
& PERF_SAMPLE_REGS_INTR
) {
1358 regs
->ax
= pebs
->ax
;
1359 regs
->bx
= pebs
->bx
;
1360 regs
->cx
= pebs
->cx
;
1361 regs
->dx
= pebs
->dx
;
1362 regs
->si
= pebs
->si
;
1363 regs
->di
= pebs
->di
;
1365 regs
->bp
= pebs
->bp
;
1366 regs
->sp
= pebs
->sp
;
1368 #ifndef CONFIG_X86_32
1369 regs
->r8
= pebs
->r8
;
1370 regs
->r9
= pebs
->r9
;
1371 regs
->r10
= pebs
->r10
;
1372 regs
->r11
= pebs
->r11
;
1373 regs
->r12
= pebs
->r12
;
1374 regs
->r13
= pebs
->r13
;
1375 regs
->r14
= pebs
->r14
;
1376 regs
->r15
= pebs
->r15
;
1380 if (event
->attr
.precise_ip
> 1) {
1382 * Haswell and later processors have an 'eventing IP'
1383 * (real IP) which fixes the off-by-1 skid in hardware.
1384 * Use it when precise_ip >= 2 :
1386 if (x86_pmu
.intel_cap
.pebs_format
>= 2) {
1387 set_linear_ip(regs
, pebs
->real_ip
);
1388 regs
->flags
|= PERF_EFLAGS_EXACT
;
1390 /* Otherwise, use PEBS off-by-1 IP: */
1391 set_linear_ip(regs
, pebs
->ip
);
1394 * With precise_ip >= 2, try to fix up the off-by-1 IP
1395 * using the LBR. If successful, the fixup function
1396 * corrects regs->ip and calls set_linear_ip() on regs:
1398 if (intel_pmu_pebs_fixup_ip(regs
))
1399 regs
->flags
|= PERF_EFLAGS_EXACT
;
1403 * When precise_ip == 1, return the PEBS off-by-1 IP,
1404 * no fixup attempted:
1406 set_linear_ip(regs
, pebs
->ip
);
1410 if ((sample_type
& (PERF_SAMPLE_ADDR
| PERF_SAMPLE_PHYS_ADDR
)) &&
1411 x86_pmu
.intel_cap
.pebs_format
>= 1)
1412 data
->addr
= pebs
->dla
;
1414 if (x86_pmu
.intel_cap
.pebs_format
>= 2) {
1415 /* Only set the TSX weight when no memory weight. */
1416 if ((sample_type
& PERF_SAMPLE_WEIGHT
) && !fll
)
1417 data
->weight
= intel_get_tsx_weight(pebs
->tsx_tuning
);
1419 if (sample_type
& PERF_SAMPLE_TRANSACTION
)
1420 data
->txn
= intel_get_tsx_transaction(pebs
->tsx_tuning
,
1425 * v3 supplies an accurate time stamp, so we use that
1426 * for the time stamp.
1428 * We can only do this for the default trace clock.
1430 if (x86_pmu
.intel_cap
.pebs_format
>= 3 &&
1431 event
->attr
.use_clockid
== 0)
1432 data
->time
= native_sched_clock_from_tsc(pebs
->tsc
);
1434 if (has_branch_stack(event
))
1435 data
->br_stack
= &cpuc
->lbr_stack
;
1438 static void adaptive_pebs_save_regs(struct pt_regs
*regs
,
1439 struct pebs_gprs
*gprs
)
1441 regs
->ax
= gprs
->ax
;
1442 regs
->bx
= gprs
->bx
;
1443 regs
->cx
= gprs
->cx
;
1444 regs
->dx
= gprs
->dx
;
1445 regs
->si
= gprs
->si
;
1446 regs
->di
= gprs
->di
;
1447 regs
->bp
= gprs
->bp
;
1448 regs
->sp
= gprs
->sp
;
1449 #ifndef CONFIG_X86_32
1450 regs
->r8
= gprs
->r8
;
1451 regs
->r9
= gprs
->r9
;
1452 regs
->r10
= gprs
->r10
;
1453 regs
->r11
= gprs
->r11
;
1454 regs
->r12
= gprs
->r12
;
1455 regs
->r13
= gprs
->r13
;
1456 regs
->r14
= gprs
->r14
;
1457 regs
->r15
= gprs
->r15
;
1462 * With adaptive PEBS the layout depends on what fields are configured.
1465 static void setup_pebs_adaptive_sample_data(struct perf_event
*event
,
1466 struct pt_regs
*iregs
, void *__pebs
,
1467 struct perf_sample_data
*data
,
1468 struct pt_regs
*regs
)
1470 struct cpu_hw_events
*cpuc
= this_cpu_ptr(&cpu_hw_events
);
1471 struct pebs_basic
*basic
= __pebs
;
1472 void *next_record
= basic
+ 1;
1475 struct pebs_meminfo
*meminfo
= NULL
;
1476 struct pebs_gprs
*gprs
= NULL
;
1477 struct x86_perf_regs
*perf_regs
;
1482 perf_regs
= container_of(regs
, struct x86_perf_regs
, regs
);
1483 perf_regs
->xmm_regs
= NULL
;
1485 sample_type
= event
->attr
.sample_type
;
1486 format_size
= basic
->format_size
;
1487 perf_sample_data_init(data
, 0, event
->hw
.last_period
);
1488 data
->period
= event
->hw
.last_period
;
1490 if (event
->attr
.use_clockid
== 0)
1491 data
->time
= native_sched_clock_from_tsc(basic
->tsc
);
1494 * We must however always use iregs for the unwinder to stay sane; the
1495 * record BP,SP,IP can point into thin air when the record is from a
1496 * previous PMI context or an (I)RET happened between the record and
1499 if (sample_type
& PERF_SAMPLE_CALLCHAIN
)
1500 data
->callchain
= perf_callchain(event
, iregs
);
1503 /* The ip in basic is EventingIP */
1504 set_linear_ip(regs
, basic
->ip
);
1505 regs
->flags
= PERF_EFLAGS_EXACT
;
1508 * The record for MEMINFO is in front of GP
1509 * But PERF_SAMPLE_TRANSACTION needs gprs->ax.
1510 * Save the pointer here but process later.
1512 if (format_size
& PEBS_DATACFG_MEMINFO
) {
1513 meminfo
= next_record
;
1514 next_record
= meminfo
+ 1;
1517 if (format_size
& PEBS_DATACFG_GP
) {
1519 next_record
= gprs
+ 1;
1521 if (event
->attr
.precise_ip
< 2) {
1522 set_linear_ip(regs
, gprs
->ip
);
1523 regs
->flags
&= ~PERF_EFLAGS_EXACT
;
1526 if (sample_type
& PERF_SAMPLE_REGS_INTR
)
1527 adaptive_pebs_save_regs(regs
, gprs
);
1530 if (format_size
& PEBS_DATACFG_MEMINFO
) {
1531 if (sample_type
& PERF_SAMPLE_WEIGHT
)
1532 data
->weight
= meminfo
->latency
?:
1533 intel_get_tsx_weight(meminfo
->tsx_tuning
);
1535 if (sample_type
& PERF_SAMPLE_DATA_SRC
)
1536 data
->data_src
.val
= get_data_src(event
, meminfo
->aux
);
1538 if (sample_type
& (PERF_SAMPLE_ADDR
| PERF_SAMPLE_PHYS_ADDR
))
1539 data
->addr
= meminfo
->address
;
1541 if (sample_type
& PERF_SAMPLE_TRANSACTION
)
1542 data
->txn
= intel_get_tsx_transaction(meminfo
->tsx_tuning
,
1543 gprs
? gprs
->ax
: 0);
1546 if (format_size
& PEBS_DATACFG_XMMS
) {
1547 struct pebs_xmm
*xmm
= next_record
;
1549 next_record
= xmm
+ 1;
1550 perf_regs
->xmm_regs
= xmm
->xmm
;
1553 if (format_size
& PEBS_DATACFG_LBRS
) {
1554 struct pebs_lbr
*lbr
= next_record
;
1555 int num_lbr
= ((format_size
>> PEBS_DATACFG_LBR_SHIFT
)
1557 next_record
= next_record
+ num_lbr
*sizeof(struct pebs_lbr_entry
);
1559 if (has_branch_stack(event
)) {
1560 intel_pmu_store_pebs_lbrs(lbr
);
1561 data
->br_stack
= &cpuc
->lbr_stack
;
1565 WARN_ONCE(next_record
!= __pebs
+ (format_size
>> 48),
1566 "PEBS record size %llu, expected %llu, config %llx\n",
1568 (u64
)(next_record
- __pebs
),
1569 basic
->format_size
);
1572 static inline void *
1573 get_next_pebs_record_by_bit(void *base
, void *top
, int bit
)
1575 struct cpu_hw_events
*cpuc
= this_cpu_ptr(&cpu_hw_events
);
1580 * fmt0 does not have a status bitfield (does not use
1581 * perf_record_nhm format)
1583 if (x86_pmu
.intel_cap
.pebs_format
< 1)
1589 for (at
= base
; at
< top
; at
+= cpuc
->pebs_record_size
) {
1590 unsigned long status
= get_pebs_status(at
);
1592 if (test_bit(bit
, (unsigned long *)&status
)) {
1593 /* PEBS v3 has accurate status bits */
1594 if (x86_pmu
.intel_cap
.pebs_format
>= 3)
1597 if (status
== (1 << bit
))
1600 /* clear non-PEBS bit and re-check */
1601 pebs_status
= status
& cpuc
->pebs_enabled
;
1602 pebs_status
&= PEBS_COUNTER_MASK
;
1603 if (pebs_status
== (1 << bit
))
1610 void intel_pmu_auto_reload_read(struct perf_event
*event
)
1612 WARN_ON(!(event
->hw
.flags
& PERF_X86_EVENT_AUTO_RELOAD
));
1614 perf_pmu_disable(event
->pmu
);
1615 intel_pmu_drain_pebs_buffer();
1616 perf_pmu_enable(event
->pmu
);
1620 * Special variant of intel_pmu_save_and_restart() for auto-reload.
1623 intel_pmu_save_and_restart_reload(struct perf_event
*event
, int count
)
1625 struct hw_perf_event
*hwc
= &event
->hw
;
1626 int shift
= 64 - x86_pmu
.cntval_bits
;
1627 u64 period
= hwc
->sample_period
;
1628 u64 prev_raw_count
, new_raw_count
;
1634 * drain_pebs() only happens when the PMU is disabled.
1636 WARN_ON(this_cpu_read(cpu_hw_events
.enabled
));
1638 prev_raw_count
= local64_read(&hwc
->prev_count
);
1639 rdpmcl(hwc
->event_base_rdpmc
, new_raw_count
);
1640 local64_set(&hwc
->prev_count
, new_raw_count
);
1643 * Since the counter increments a negative counter value and
1644 * overflows on the sign switch, giving the interval:
1648 * the difference between two consequtive reads is:
1650 * A) value2 - value1;
1651 * when no overflows have happened in between,
1653 * B) (0 - value1) + (value2 - (-period));
1654 * when one overflow happened in between,
1656 * C) (0 - value1) + (n - 1) * (period) + (value2 - (-period));
1657 * when @n overflows happened in between.
1659 * Here A) is the obvious difference, B) is the extension to the
1660 * discrete interval, where the first term is to the top of the
1661 * interval and the second term is from the bottom of the next
1662 * interval and C) the extension to multiple intervals, where the
1663 * middle term is the whole intervals covered.
1665 * An equivalent of C, by reduction, is:
1667 * value2 - value1 + n * period
1669 new = ((s64
)(new_raw_count
<< shift
) >> shift
);
1670 old
= ((s64
)(prev_raw_count
<< shift
) >> shift
);
1671 local64_add(new - old
+ count
* period
, &event
->count
);
1673 perf_event_update_userpage(event
);
1678 static void __intel_pmu_pebs_event(struct perf_event
*event
,
1679 struct pt_regs
*iregs
,
1680 void *base
, void *top
,
1682 void (*setup_sample
)(struct perf_event
*,
1685 struct perf_sample_data
*,
1688 struct cpu_hw_events
*cpuc
= this_cpu_ptr(&cpu_hw_events
);
1689 struct hw_perf_event
*hwc
= &event
->hw
;
1690 struct perf_sample_data data
;
1691 struct x86_perf_regs perf_regs
;
1692 struct pt_regs
*regs
= &perf_regs
.regs
;
1693 void *at
= get_next_pebs_record_by_bit(base
, top
, bit
);
1695 if (hwc
->flags
& PERF_X86_EVENT_AUTO_RELOAD
) {
1697 * Now, auto-reload is only enabled in fixed period mode.
1698 * The reload value is always hwc->sample_period.
1699 * May need to change it, if auto-reload is enabled in
1702 intel_pmu_save_and_restart_reload(event
, count
);
1703 } else if (!intel_pmu_save_and_restart(event
))
1707 setup_sample(event
, iregs
, at
, &data
, regs
);
1708 perf_event_output(event
, &data
, regs
);
1709 at
+= cpuc
->pebs_record_size
;
1710 at
= get_next_pebs_record_by_bit(at
, top
, bit
);
1714 setup_sample(event
, iregs
, at
, &data
, regs
);
1717 * All but the last records are processed.
1718 * The last one is left to be able to call the overflow handler.
1720 if (perf_event_overflow(event
, &data
, regs
)) {
1721 x86_pmu_stop(event
, 0);
1727 static void intel_pmu_drain_pebs_core(struct pt_regs
*iregs
)
1729 struct cpu_hw_events
*cpuc
= this_cpu_ptr(&cpu_hw_events
);
1730 struct debug_store
*ds
= cpuc
->ds
;
1731 struct perf_event
*event
= cpuc
->events
[0]; /* PMC0 only */
1732 struct pebs_record_core
*at
, *top
;
1735 if (!x86_pmu
.pebs_active
)
1738 at
= (struct pebs_record_core
*)(unsigned long)ds
->pebs_buffer_base
;
1739 top
= (struct pebs_record_core
*)(unsigned long)ds
->pebs_index
;
1742 * Whatever else happens, drain the thing
1744 ds
->pebs_index
= ds
->pebs_buffer_base
;
1746 if (!test_bit(0, cpuc
->active_mask
))
1749 WARN_ON_ONCE(!event
);
1751 if (!event
->attr
.precise_ip
)
1756 if (event
->hw
.flags
& PERF_X86_EVENT_AUTO_RELOAD
)
1757 intel_pmu_save_and_restart_reload(event
, 0);
1761 __intel_pmu_pebs_event(event
, iregs
, at
, top
, 0, n
,
1762 setup_pebs_fixed_sample_data
);
1765 static void intel_pmu_pebs_event_update_no_drain(struct cpu_hw_events
*cpuc
, int size
)
1767 struct perf_event
*event
;
1771 * The drain_pebs() could be called twice in a short period
1772 * for auto-reload event in pmu::read(). There are no
1773 * overflows have happened in between.
1774 * It needs to call intel_pmu_save_and_restart_reload() to
1775 * update the event->count for this case.
1777 for_each_set_bit(bit
, (unsigned long *)&cpuc
->pebs_enabled
, size
) {
1778 event
= cpuc
->events
[bit
];
1779 if (event
->hw
.flags
& PERF_X86_EVENT_AUTO_RELOAD
)
1780 intel_pmu_save_and_restart_reload(event
, 0);
1784 static void intel_pmu_drain_pebs_nhm(struct pt_regs
*iregs
)
1786 struct cpu_hw_events
*cpuc
= this_cpu_ptr(&cpu_hw_events
);
1787 struct debug_store
*ds
= cpuc
->ds
;
1788 struct perf_event
*event
;
1789 void *base
, *at
, *top
;
1790 short counts
[INTEL_PMC_IDX_FIXED
+ MAX_FIXED_PEBS_EVENTS
] = {};
1791 short error
[INTEL_PMC_IDX_FIXED
+ MAX_FIXED_PEBS_EVENTS
] = {};
1795 if (!x86_pmu
.pebs_active
)
1798 base
= (struct pebs_record_nhm
*)(unsigned long)ds
->pebs_buffer_base
;
1799 top
= (struct pebs_record_nhm
*)(unsigned long)ds
->pebs_index
;
1801 ds
->pebs_index
= ds
->pebs_buffer_base
;
1803 mask
= (1ULL << x86_pmu
.max_pebs_events
) - 1;
1804 size
= x86_pmu
.max_pebs_events
;
1805 if (x86_pmu
.flags
& PMU_FL_PEBS_ALL
) {
1806 mask
|= ((1ULL << x86_pmu
.num_counters_fixed
) - 1) << INTEL_PMC_IDX_FIXED
;
1807 size
= INTEL_PMC_IDX_FIXED
+ x86_pmu
.num_counters_fixed
;
1810 if (unlikely(base
>= top
)) {
1811 intel_pmu_pebs_event_update_no_drain(cpuc
, size
);
1815 for (at
= base
; at
< top
; at
+= x86_pmu
.pebs_record_size
) {
1816 struct pebs_record_nhm
*p
= at
;
1819 pebs_status
= p
->status
& cpuc
->pebs_enabled
;
1820 pebs_status
&= mask
;
1822 /* PEBS v3 has more accurate status bits */
1823 if (x86_pmu
.intel_cap
.pebs_format
>= 3) {
1824 for_each_set_bit(bit
, (unsigned long *)&pebs_status
, size
)
1831 * On some CPUs the PEBS status can be zero when PEBS is
1832 * racing with clearing of GLOBAL_STATUS.
1834 * Normally we would drop that record, but in the
1835 * case when there is only a single active PEBS event
1836 * we can assume it's for that event.
1838 if (!pebs_status
&& cpuc
->pebs_enabled
&&
1839 !(cpuc
->pebs_enabled
& (cpuc
->pebs_enabled
-1)))
1840 pebs_status
= cpuc
->pebs_enabled
;
1842 bit
= find_first_bit((unsigned long *)&pebs_status
,
1843 x86_pmu
.max_pebs_events
);
1844 if (bit
>= x86_pmu
.max_pebs_events
)
1848 * The PEBS hardware does not deal well with the situation
1849 * when events happen near to each other and multiple bits
1850 * are set. But it should happen rarely.
1852 * If these events include one PEBS and multiple non-PEBS
1853 * events, it doesn't impact PEBS record. The record will
1854 * be handled normally. (slow path)
1856 * If these events include two or more PEBS events, the
1857 * records for the events can be collapsed into a single
1858 * one, and it's not possible to reconstruct all events
1859 * that caused the PEBS record. It's called collision.
1860 * If collision happened, the record will be dropped.
1862 if (p
->status
!= (1ULL << bit
)) {
1863 for_each_set_bit(i
, (unsigned long *)&pebs_status
, size
)
1871 for_each_set_bit(bit
, (unsigned long *)&mask
, size
) {
1872 if ((counts
[bit
] == 0) && (error
[bit
] == 0))
1875 event
= cpuc
->events
[bit
];
1876 if (WARN_ON_ONCE(!event
))
1879 if (WARN_ON_ONCE(!event
->attr
.precise_ip
))
1882 /* log dropped samples number */
1884 perf_log_lost_samples(event
, error
[bit
]);
1886 if (perf_event_account_interrupt(event
))
1887 x86_pmu_stop(event
, 0);
1891 __intel_pmu_pebs_event(event
, iregs
, base
,
1892 top
, bit
, counts
[bit
],
1893 setup_pebs_fixed_sample_data
);
1898 static void intel_pmu_drain_pebs_icl(struct pt_regs
*iregs
)
1900 short counts
[INTEL_PMC_IDX_FIXED
+ MAX_FIXED_PEBS_EVENTS
] = {};
1901 struct cpu_hw_events
*cpuc
= this_cpu_ptr(&cpu_hw_events
);
1902 struct debug_store
*ds
= cpuc
->ds
;
1903 struct perf_event
*event
;
1904 void *base
, *at
, *top
;
1908 if (!x86_pmu
.pebs_active
)
1911 base
= (struct pebs_basic
*)(unsigned long)ds
->pebs_buffer_base
;
1912 top
= (struct pebs_basic
*)(unsigned long)ds
->pebs_index
;
1914 ds
->pebs_index
= ds
->pebs_buffer_base
;
1916 mask
= ((1ULL << x86_pmu
.max_pebs_events
) - 1) |
1917 (((1ULL << x86_pmu
.num_counters_fixed
) - 1) << INTEL_PMC_IDX_FIXED
);
1918 size
= INTEL_PMC_IDX_FIXED
+ x86_pmu
.num_counters_fixed
;
1920 if (unlikely(base
>= top
)) {
1921 intel_pmu_pebs_event_update_no_drain(cpuc
, size
);
1925 for (at
= base
; at
< top
; at
+= cpuc
->pebs_record_size
) {
1928 pebs_status
= get_pebs_status(at
) & cpuc
->pebs_enabled
;
1929 pebs_status
&= mask
;
1931 for_each_set_bit(bit
, (unsigned long *)&pebs_status
, size
)
1935 for_each_set_bit(bit
, (unsigned long *)&mask
, size
) {
1936 if (counts
[bit
] == 0)
1939 event
= cpuc
->events
[bit
];
1940 if (WARN_ON_ONCE(!event
))
1943 if (WARN_ON_ONCE(!event
->attr
.precise_ip
))
1946 __intel_pmu_pebs_event(event
, iregs
, base
,
1947 top
, bit
, counts
[bit
],
1948 setup_pebs_adaptive_sample_data
);
1953 * BTS, PEBS probe and setup
1956 void __init
intel_ds_init(void)
1959 * No support for 32bit formats
1961 if (!boot_cpu_has(X86_FEATURE_DTES64
))
1964 x86_pmu
.bts
= boot_cpu_has(X86_FEATURE_BTS
);
1965 x86_pmu
.pebs
= boot_cpu_has(X86_FEATURE_PEBS
);
1966 x86_pmu
.pebs_buffer_size
= PEBS_BUFFER_SIZE
;
1967 if (x86_pmu
.version
<= 4) {
1968 x86_pmu
.pebs_no_isolation
= 1;
1969 x86_pmu
.pebs_no_xmm_regs
= 1;
1972 char pebs_type
= x86_pmu
.intel_cap
.pebs_trap
? '+' : '-';
1973 char *pebs_qual
= "";
1974 int format
= x86_pmu
.intel_cap
.pebs_format
;
1977 x86_pmu
.intel_cap
.pebs_baseline
= 0;
1981 pr_cont("PEBS fmt0%c, ", pebs_type
);
1982 x86_pmu
.pebs_record_size
= sizeof(struct pebs_record_core
);
1984 * Using >PAGE_SIZE buffers makes the WRMSR to
1985 * PERF_GLOBAL_CTRL in intel_pmu_enable_all()
1986 * mysteriously hang on Core2.
1988 * As a workaround, we don't do this.
1990 x86_pmu
.pebs_buffer_size
= PAGE_SIZE
;
1991 x86_pmu
.drain_pebs
= intel_pmu_drain_pebs_core
;
1995 pr_cont("PEBS fmt1%c, ", pebs_type
);
1996 x86_pmu
.pebs_record_size
= sizeof(struct pebs_record_nhm
);
1997 x86_pmu
.drain_pebs
= intel_pmu_drain_pebs_nhm
;
2001 pr_cont("PEBS fmt2%c, ", pebs_type
);
2002 x86_pmu
.pebs_record_size
= sizeof(struct pebs_record_hsw
);
2003 x86_pmu
.drain_pebs
= intel_pmu_drain_pebs_nhm
;
2007 pr_cont("PEBS fmt3%c, ", pebs_type
);
2008 x86_pmu
.pebs_record_size
=
2009 sizeof(struct pebs_record_skl
);
2010 x86_pmu
.drain_pebs
= intel_pmu_drain_pebs_nhm
;
2011 x86_pmu
.large_pebs_flags
|= PERF_SAMPLE_TIME
;
2015 x86_pmu
.drain_pebs
= intel_pmu_drain_pebs_icl
;
2016 x86_pmu
.pebs_record_size
= sizeof(struct pebs_basic
);
2017 if (x86_pmu
.intel_cap
.pebs_baseline
) {
2018 x86_pmu
.large_pebs_flags
|=
2019 PERF_SAMPLE_BRANCH_STACK
|
2021 x86_pmu
.flags
|= PMU_FL_PEBS_ALL
;
2022 pebs_qual
= "-baseline";
2023 x86_get_pmu()->capabilities
|= PERF_PMU_CAP_EXTENDED_REGS
;
2025 /* Only basic record supported */
2026 x86_pmu
.pebs_no_xmm_regs
= 1;
2027 x86_pmu
.large_pebs_flags
&=
2028 ~(PERF_SAMPLE_ADDR
|
2030 PERF_SAMPLE_DATA_SRC
|
2031 PERF_SAMPLE_TRANSACTION
|
2032 PERF_SAMPLE_REGS_USER
|
2033 PERF_SAMPLE_REGS_INTR
);
2035 pr_cont("PEBS fmt4%c%s, ", pebs_type
, pebs_qual
);
2039 pr_cont("no PEBS fmt%d%c, ", format
, pebs_type
);
2045 void perf_restore_debug_store(void)
2047 struct debug_store
*ds
= __this_cpu_read(cpu_hw_events
.ds
);
2049 if (!x86_pmu
.bts
&& !x86_pmu
.pebs
)
2052 wrmsrl(MSR_IA32_DS_AREA
, (unsigned long)ds
);