4 * Used to coordinate shared registers between HT threads or
5 * among events on a single PMU.
8 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
10 #include <linux/stddef.h>
11 #include <linux/types.h>
12 #include <linux/init.h>
13 #include <linux/slab.h>
14 #include <linux/export.h>
16 #include <asm/hardirq.h>
19 #include "perf_event.h"
22 * Intel PerfMon, used on Core and later.
24 static u64 intel_perfmon_event_map
[PERF_COUNT_HW_MAX
] __read_mostly
=
26 [PERF_COUNT_HW_CPU_CYCLES
] = 0x003c,
27 [PERF_COUNT_HW_INSTRUCTIONS
] = 0x00c0,
28 [PERF_COUNT_HW_CACHE_REFERENCES
] = 0x4f2e,
29 [PERF_COUNT_HW_CACHE_MISSES
] = 0x412e,
30 [PERF_COUNT_HW_BRANCH_INSTRUCTIONS
] = 0x00c4,
31 [PERF_COUNT_HW_BRANCH_MISSES
] = 0x00c5,
32 [PERF_COUNT_HW_BUS_CYCLES
] = 0x013c,
33 [PERF_COUNT_HW_REF_CPU_CYCLES
] = 0x0300, /* pseudo-encoding */
36 static struct event_constraint intel_core_event_constraints
[] __read_mostly
=
38 INTEL_EVENT_CONSTRAINT(0x11, 0x2), /* FP_ASSIST */
39 INTEL_EVENT_CONSTRAINT(0x12, 0x2), /* MUL */
40 INTEL_EVENT_CONSTRAINT(0x13, 0x2), /* DIV */
41 INTEL_EVENT_CONSTRAINT(0x14, 0x1), /* CYCLES_DIV_BUSY */
42 INTEL_EVENT_CONSTRAINT(0x19, 0x2), /* DELAYED_BYPASS */
43 INTEL_EVENT_CONSTRAINT(0xc1, 0x1), /* FP_COMP_INSTR_RET */
47 static struct event_constraint intel_core2_event_constraints
[] __read_mostly
=
49 FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */
50 FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */
51 FIXED_EVENT_CONSTRAINT(0x0300, 2), /* CPU_CLK_UNHALTED.REF */
52 INTEL_EVENT_CONSTRAINT(0x10, 0x1), /* FP_COMP_OPS_EXE */
53 INTEL_EVENT_CONSTRAINT(0x11, 0x2), /* FP_ASSIST */
54 INTEL_EVENT_CONSTRAINT(0x12, 0x2), /* MUL */
55 INTEL_EVENT_CONSTRAINT(0x13, 0x2), /* DIV */
56 INTEL_EVENT_CONSTRAINT(0x14, 0x1), /* CYCLES_DIV_BUSY */
57 INTEL_EVENT_CONSTRAINT(0x18, 0x1), /* IDLE_DURING_DIV */
58 INTEL_EVENT_CONSTRAINT(0x19, 0x2), /* DELAYED_BYPASS */
59 INTEL_EVENT_CONSTRAINT(0xa1, 0x1), /* RS_UOPS_DISPATCH_CYCLES */
60 INTEL_EVENT_CONSTRAINT(0xc9, 0x1), /* ITLB_MISS_RETIRED (T30-9) */
61 INTEL_EVENT_CONSTRAINT(0xcb, 0x1), /* MEM_LOAD_RETIRED */
65 static struct event_constraint intel_nehalem_event_constraints
[] __read_mostly
=
67 FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */
68 FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */
69 FIXED_EVENT_CONSTRAINT(0x0300, 2), /* CPU_CLK_UNHALTED.REF */
70 INTEL_EVENT_CONSTRAINT(0x40, 0x3), /* L1D_CACHE_LD */
71 INTEL_EVENT_CONSTRAINT(0x41, 0x3), /* L1D_CACHE_ST */
72 INTEL_EVENT_CONSTRAINT(0x42, 0x3), /* L1D_CACHE_LOCK */
73 INTEL_EVENT_CONSTRAINT(0x43, 0x3), /* L1D_ALL_REF */
74 INTEL_EVENT_CONSTRAINT(0x48, 0x3), /* L1D_PEND_MISS */
75 INTEL_EVENT_CONSTRAINT(0x4e, 0x3), /* L1D_PREFETCH */
76 INTEL_EVENT_CONSTRAINT(0x51, 0x3), /* L1D */
77 INTEL_EVENT_CONSTRAINT(0x63, 0x3), /* CACHE_LOCK_CYCLES */
81 static struct extra_reg intel_nehalem_extra_regs
[] __read_mostly
=
83 INTEL_EVENT_EXTRA_REG(0xb7, MSR_OFFCORE_RSP_0
, 0xffff, RSP_0
),
84 INTEL_UEVENT_PEBS_LDLAT_EXTRA_REG(0x100b),
88 static struct event_constraint intel_westmere_event_constraints
[] __read_mostly
=
90 FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */
91 FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */
92 FIXED_EVENT_CONSTRAINT(0x0300, 2), /* CPU_CLK_UNHALTED.REF */
93 INTEL_EVENT_CONSTRAINT(0x51, 0x3), /* L1D */
94 INTEL_EVENT_CONSTRAINT(0x60, 0x1), /* OFFCORE_REQUESTS_OUTSTANDING */
95 INTEL_EVENT_CONSTRAINT(0x63, 0x3), /* CACHE_LOCK_CYCLES */
96 INTEL_EVENT_CONSTRAINT(0xb3, 0x1), /* SNOOPQ_REQUEST_OUTSTANDING */
100 static struct event_constraint intel_snb_event_constraints
[] __read_mostly
=
102 FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */
103 FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */
104 FIXED_EVENT_CONSTRAINT(0x0300, 2), /* CPU_CLK_UNHALTED.REF */
105 INTEL_UEVENT_CONSTRAINT(0x04a3, 0xf), /* CYCLE_ACTIVITY.CYCLES_NO_DISPATCH */
106 INTEL_UEVENT_CONSTRAINT(0x05a3, 0xf), /* CYCLE_ACTIVITY.STALLS_L2_PENDING */
107 INTEL_UEVENT_CONSTRAINT(0x02a3, 0x4), /* CYCLE_ACTIVITY.CYCLES_L1D_PENDING */
108 INTEL_UEVENT_CONSTRAINT(0x06a3, 0x4), /* CYCLE_ACTIVITY.STALLS_L1D_PENDING */
109 INTEL_EVENT_CONSTRAINT(0x48, 0x4), /* L1D_PEND_MISS.PENDING */
110 INTEL_UEVENT_CONSTRAINT(0x01c0, 0x2), /* INST_RETIRED.PREC_DIST */
111 INTEL_EVENT_CONSTRAINT(0xcd, 0x8), /* MEM_TRANS_RETIRED.LOAD_LATENCY */
115 static struct event_constraint intel_ivb_event_constraints
[] __read_mostly
=
117 FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */
118 FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */
119 FIXED_EVENT_CONSTRAINT(0x0300, 2), /* CPU_CLK_UNHALTED.REF */
120 INTEL_UEVENT_CONSTRAINT(0x0148, 0x4), /* L1D_PEND_MISS.PENDING */
121 INTEL_UEVENT_CONSTRAINT(0x0279, 0xf), /* IDQ.EMTPY */
122 INTEL_UEVENT_CONSTRAINT(0x019c, 0xf), /* IDQ_UOPS_NOT_DELIVERED.CORE */
123 INTEL_UEVENT_CONSTRAINT(0x04a3, 0xf), /* CYCLE_ACTIVITY.CYCLES_NO_EXECUTE */
124 INTEL_UEVENT_CONSTRAINT(0x05a3, 0xf), /* CYCLE_ACTIVITY.STALLS_L2_PENDING */
125 INTEL_UEVENT_CONSTRAINT(0x06a3, 0xf), /* CYCLE_ACTIVITY.STALLS_LDM_PENDING */
126 INTEL_UEVENT_CONSTRAINT(0x08a3, 0x4), /* CYCLE_ACTIVITY.CYCLES_L1D_PENDING */
127 INTEL_UEVENT_CONSTRAINT(0x0ca3, 0x4), /* CYCLE_ACTIVITY.STALLS_L1D_PENDING */
128 INTEL_UEVENT_CONSTRAINT(0x01c0, 0x2), /* INST_RETIRED.PREC_DIST */
129 INTEL_EVENT_CONSTRAINT(0xd0, 0xf), /* MEM_UOPS_RETIRED.* */
130 INTEL_EVENT_CONSTRAINT(0xd1, 0xf), /* MEM_LOAD_UOPS_RETIRED.* */
131 INTEL_EVENT_CONSTRAINT(0xd2, 0xf), /* MEM_LOAD_UOPS_LLC_HIT_RETIRED.* */
132 INTEL_EVENT_CONSTRAINT(0xd3, 0xf), /* MEM_LOAD_UOPS_LLC_MISS_RETIRED.* */
136 static struct extra_reg intel_westmere_extra_regs
[] __read_mostly
=
138 INTEL_EVENT_EXTRA_REG(0xb7, MSR_OFFCORE_RSP_0
, 0xffff, RSP_0
),
139 INTEL_EVENT_EXTRA_REG(0xbb, MSR_OFFCORE_RSP_1
, 0xffff, RSP_1
),
140 INTEL_UEVENT_PEBS_LDLAT_EXTRA_REG(0x100b),
144 static struct event_constraint intel_v1_event_constraints
[] __read_mostly
=
149 static struct event_constraint intel_gen_event_constraints
[] __read_mostly
=
151 FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */
152 FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */
153 FIXED_EVENT_CONSTRAINT(0x0300, 2), /* CPU_CLK_UNHALTED.REF */
157 static struct extra_reg intel_snb_extra_regs
[] __read_mostly
= {
158 INTEL_EVENT_EXTRA_REG(0xb7, MSR_OFFCORE_RSP_0
, 0x3fffffffffull
, RSP_0
),
159 INTEL_EVENT_EXTRA_REG(0xbb, MSR_OFFCORE_RSP_1
, 0x3fffffffffull
, RSP_1
),
160 INTEL_UEVENT_PEBS_LDLAT_EXTRA_REG(0x01cd),
164 EVENT_ATTR_STR(mem
-loads
, mem_ld_nhm
, "event=0x0b,umask=0x10,ldlat=3");
165 EVENT_ATTR_STR(mem
-loads
, mem_ld_snb
, "event=0xcd,umask=0x1,ldlat=3");
166 EVENT_ATTR_STR(mem
-stores
, mem_st_snb
, "event=0xcd,umask=0x2");
168 struct attribute
*nhm_events_attrs
[] = {
169 EVENT_PTR(mem_ld_nhm
),
173 struct attribute
*snb_events_attrs
[] = {
174 EVENT_PTR(mem_ld_snb
),
175 EVENT_PTR(mem_st_snb
),
179 static u64
intel_pmu_event_map(int hw_event
)
181 return intel_perfmon_event_map
[hw_event
];
184 #define SNB_DMND_DATA_RD (1ULL << 0)
185 #define SNB_DMND_RFO (1ULL << 1)
186 #define SNB_DMND_IFETCH (1ULL << 2)
187 #define SNB_DMND_WB (1ULL << 3)
188 #define SNB_PF_DATA_RD (1ULL << 4)
189 #define SNB_PF_RFO (1ULL << 5)
190 #define SNB_PF_IFETCH (1ULL << 6)
191 #define SNB_LLC_DATA_RD (1ULL << 7)
192 #define SNB_LLC_RFO (1ULL << 8)
193 #define SNB_LLC_IFETCH (1ULL << 9)
194 #define SNB_BUS_LOCKS (1ULL << 10)
195 #define SNB_STRM_ST (1ULL << 11)
196 #define SNB_OTHER (1ULL << 15)
197 #define SNB_RESP_ANY (1ULL << 16)
198 #define SNB_NO_SUPP (1ULL << 17)
199 #define SNB_LLC_HITM (1ULL << 18)
200 #define SNB_LLC_HITE (1ULL << 19)
201 #define SNB_LLC_HITS (1ULL << 20)
202 #define SNB_LLC_HITF (1ULL << 21)
203 #define SNB_LOCAL (1ULL << 22)
204 #define SNB_REMOTE (0xffULL << 23)
205 #define SNB_SNP_NONE (1ULL << 31)
206 #define SNB_SNP_NOT_NEEDED (1ULL << 32)
207 #define SNB_SNP_MISS (1ULL << 33)
208 #define SNB_NO_FWD (1ULL << 34)
209 #define SNB_SNP_FWD (1ULL << 35)
210 #define SNB_HITM (1ULL << 36)
211 #define SNB_NON_DRAM (1ULL << 37)
213 #define SNB_DMND_READ (SNB_DMND_DATA_RD|SNB_LLC_DATA_RD)
214 #define SNB_DMND_WRITE (SNB_DMND_RFO|SNB_LLC_RFO)
215 #define SNB_DMND_PREFETCH (SNB_PF_DATA_RD|SNB_PF_RFO)
217 #define SNB_SNP_ANY (SNB_SNP_NONE|SNB_SNP_NOT_NEEDED| \
218 SNB_SNP_MISS|SNB_NO_FWD|SNB_SNP_FWD| \
221 #define SNB_DRAM_ANY (SNB_LOCAL|SNB_REMOTE|SNB_SNP_ANY)
222 #define SNB_DRAM_REMOTE (SNB_REMOTE|SNB_SNP_ANY)
224 #define SNB_L3_ACCESS SNB_RESP_ANY
225 #define SNB_L3_MISS (SNB_DRAM_ANY|SNB_NON_DRAM)
227 static __initconst
const u64 snb_hw_cache_extra_regs
228 [PERF_COUNT_HW_CACHE_MAX
]
229 [PERF_COUNT_HW_CACHE_OP_MAX
]
230 [PERF_COUNT_HW_CACHE_RESULT_MAX
] =
234 [ C(RESULT_ACCESS
) ] = SNB_DMND_READ
|SNB_L3_ACCESS
,
235 [ C(RESULT_MISS
) ] = SNB_DMND_READ
|SNB_L3_MISS
,
238 [ C(RESULT_ACCESS
) ] = SNB_DMND_WRITE
|SNB_L3_ACCESS
,
239 [ C(RESULT_MISS
) ] = SNB_DMND_WRITE
|SNB_L3_MISS
,
241 [ C(OP_PREFETCH
) ] = {
242 [ C(RESULT_ACCESS
) ] = SNB_DMND_PREFETCH
|SNB_L3_ACCESS
,
243 [ C(RESULT_MISS
) ] = SNB_DMND_PREFETCH
|SNB_L3_MISS
,
248 [ C(RESULT_ACCESS
) ] = SNB_DMND_READ
|SNB_DRAM_ANY
,
249 [ C(RESULT_MISS
) ] = SNB_DMND_READ
|SNB_DRAM_REMOTE
,
252 [ C(RESULT_ACCESS
) ] = SNB_DMND_WRITE
|SNB_DRAM_ANY
,
253 [ C(RESULT_MISS
) ] = SNB_DMND_WRITE
|SNB_DRAM_REMOTE
,
255 [ C(OP_PREFETCH
) ] = {
256 [ C(RESULT_ACCESS
) ] = SNB_DMND_PREFETCH
|SNB_DRAM_ANY
,
257 [ C(RESULT_MISS
) ] = SNB_DMND_PREFETCH
|SNB_DRAM_REMOTE
,
262 static __initconst
const u64 snb_hw_cache_event_ids
263 [PERF_COUNT_HW_CACHE_MAX
]
264 [PERF_COUNT_HW_CACHE_OP_MAX
]
265 [PERF_COUNT_HW_CACHE_RESULT_MAX
] =
269 [ C(RESULT_ACCESS
) ] = 0xf1d0, /* MEM_UOP_RETIRED.LOADS */
270 [ C(RESULT_MISS
) ] = 0x0151, /* L1D.REPLACEMENT */
273 [ C(RESULT_ACCESS
) ] = 0xf2d0, /* MEM_UOP_RETIRED.STORES */
274 [ C(RESULT_MISS
) ] = 0x0851, /* L1D.ALL_M_REPLACEMENT */
276 [ C(OP_PREFETCH
) ] = {
277 [ C(RESULT_ACCESS
) ] = 0x0,
278 [ C(RESULT_MISS
) ] = 0x024e, /* HW_PRE_REQ.DL1_MISS */
283 [ C(RESULT_ACCESS
) ] = 0x0,
284 [ C(RESULT_MISS
) ] = 0x0280, /* ICACHE.MISSES */
287 [ C(RESULT_ACCESS
) ] = -1,
288 [ C(RESULT_MISS
) ] = -1,
290 [ C(OP_PREFETCH
) ] = {
291 [ C(RESULT_ACCESS
) ] = 0x0,
292 [ C(RESULT_MISS
) ] = 0x0,
297 /* OFFCORE_RESPONSE.ANY_DATA.LOCAL_CACHE */
298 [ C(RESULT_ACCESS
) ] = 0x01b7,
299 /* OFFCORE_RESPONSE.ANY_DATA.ANY_LLC_MISS */
300 [ C(RESULT_MISS
) ] = 0x01b7,
303 /* OFFCORE_RESPONSE.ANY_RFO.LOCAL_CACHE */
304 [ C(RESULT_ACCESS
) ] = 0x01b7,
305 /* OFFCORE_RESPONSE.ANY_RFO.ANY_LLC_MISS */
306 [ C(RESULT_MISS
) ] = 0x01b7,
308 [ C(OP_PREFETCH
) ] = {
309 /* OFFCORE_RESPONSE.PREFETCH.LOCAL_CACHE */
310 [ C(RESULT_ACCESS
) ] = 0x01b7,
311 /* OFFCORE_RESPONSE.PREFETCH.ANY_LLC_MISS */
312 [ C(RESULT_MISS
) ] = 0x01b7,
317 [ C(RESULT_ACCESS
) ] = 0x81d0, /* MEM_UOP_RETIRED.ALL_LOADS */
318 [ C(RESULT_MISS
) ] = 0x0108, /* DTLB_LOAD_MISSES.CAUSES_A_WALK */
321 [ C(RESULT_ACCESS
) ] = 0x82d0, /* MEM_UOP_RETIRED.ALL_STORES */
322 [ C(RESULT_MISS
) ] = 0x0149, /* DTLB_STORE_MISSES.MISS_CAUSES_A_WALK */
324 [ C(OP_PREFETCH
) ] = {
325 [ C(RESULT_ACCESS
) ] = 0x0,
326 [ C(RESULT_MISS
) ] = 0x0,
331 [ C(RESULT_ACCESS
) ] = 0x1085, /* ITLB_MISSES.STLB_HIT */
332 [ C(RESULT_MISS
) ] = 0x0185, /* ITLB_MISSES.CAUSES_A_WALK */
335 [ C(RESULT_ACCESS
) ] = -1,
336 [ C(RESULT_MISS
) ] = -1,
338 [ C(OP_PREFETCH
) ] = {
339 [ C(RESULT_ACCESS
) ] = -1,
340 [ C(RESULT_MISS
) ] = -1,
345 [ C(RESULT_ACCESS
) ] = 0x00c4, /* BR_INST_RETIRED.ALL_BRANCHES */
346 [ C(RESULT_MISS
) ] = 0x00c5, /* BR_MISP_RETIRED.ALL_BRANCHES */
349 [ C(RESULT_ACCESS
) ] = -1,
350 [ C(RESULT_MISS
) ] = -1,
352 [ C(OP_PREFETCH
) ] = {
353 [ C(RESULT_ACCESS
) ] = -1,
354 [ C(RESULT_MISS
) ] = -1,
359 [ C(RESULT_ACCESS
) ] = 0x01b7,
360 [ C(RESULT_MISS
) ] = 0x01b7,
363 [ C(RESULT_ACCESS
) ] = 0x01b7,
364 [ C(RESULT_MISS
) ] = 0x01b7,
366 [ C(OP_PREFETCH
) ] = {
367 [ C(RESULT_ACCESS
) ] = 0x01b7,
368 [ C(RESULT_MISS
) ] = 0x01b7,
374 static __initconst
const u64 westmere_hw_cache_event_ids
375 [PERF_COUNT_HW_CACHE_MAX
]
376 [PERF_COUNT_HW_CACHE_OP_MAX
]
377 [PERF_COUNT_HW_CACHE_RESULT_MAX
] =
381 [ C(RESULT_ACCESS
) ] = 0x010b, /* MEM_INST_RETIRED.LOADS */
382 [ C(RESULT_MISS
) ] = 0x0151, /* L1D.REPL */
385 [ C(RESULT_ACCESS
) ] = 0x020b, /* MEM_INST_RETURED.STORES */
386 [ C(RESULT_MISS
) ] = 0x0251, /* L1D.M_REPL */
388 [ C(OP_PREFETCH
) ] = {
389 [ C(RESULT_ACCESS
) ] = 0x014e, /* L1D_PREFETCH.REQUESTS */
390 [ C(RESULT_MISS
) ] = 0x024e, /* L1D_PREFETCH.MISS */
395 [ C(RESULT_ACCESS
) ] = 0x0380, /* L1I.READS */
396 [ C(RESULT_MISS
) ] = 0x0280, /* L1I.MISSES */
399 [ C(RESULT_ACCESS
) ] = -1,
400 [ C(RESULT_MISS
) ] = -1,
402 [ C(OP_PREFETCH
) ] = {
403 [ C(RESULT_ACCESS
) ] = 0x0,
404 [ C(RESULT_MISS
) ] = 0x0,
409 /* OFFCORE_RESPONSE.ANY_DATA.LOCAL_CACHE */
410 [ C(RESULT_ACCESS
) ] = 0x01b7,
411 /* OFFCORE_RESPONSE.ANY_DATA.ANY_LLC_MISS */
412 [ C(RESULT_MISS
) ] = 0x01b7,
415 * Use RFO, not WRITEBACK, because a write miss would typically occur
419 /* OFFCORE_RESPONSE.ANY_RFO.LOCAL_CACHE */
420 [ C(RESULT_ACCESS
) ] = 0x01b7,
421 /* OFFCORE_RESPONSE.ANY_RFO.ANY_LLC_MISS */
422 [ C(RESULT_MISS
) ] = 0x01b7,
424 [ C(OP_PREFETCH
) ] = {
425 /* OFFCORE_RESPONSE.PREFETCH.LOCAL_CACHE */
426 [ C(RESULT_ACCESS
) ] = 0x01b7,
427 /* OFFCORE_RESPONSE.PREFETCH.ANY_LLC_MISS */
428 [ C(RESULT_MISS
) ] = 0x01b7,
433 [ C(RESULT_ACCESS
) ] = 0x010b, /* MEM_INST_RETIRED.LOADS */
434 [ C(RESULT_MISS
) ] = 0x0108, /* DTLB_LOAD_MISSES.ANY */
437 [ C(RESULT_ACCESS
) ] = 0x020b, /* MEM_INST_RETURED.STORES */
438 [ C(RESULT_MISS
) ] = 0x010c, /* MEM_STORE_RETIRED.DTLB_MISS */
440 [ C(OP_PREFETCH
) ] = {
441 [ C(RESULT_ACCESS
) ] = 0x0,
442 [ C(RESULT_MISS
) ] = 0x0,
447 [ C(RESULT_ACCESS
) ] = 0x01c0, /* INST_RETIRED.ANY_P */
448 [ C(RESULT_MISS
) ] = 0x0185, /* ITLB_MISSES.ANY */
451 [ C(RESULT_ACCESS
) ] = -1,
452 [ C(RESULT_MISS
) ] = -1,
454 [ C(OP_PREFETCH
) ] = {
455 [ C(RESULT_ACCESS
) ] = -1,
456 [ C(RESULT_MISS
) ] = -1,
461 [ C(RESULT_ACCESS
) ] = 0x00c4, /* BR_INST_RETIRED.ALL_BRANCHES */
462 [ C(RESULT_MISS
) ] = 0x03e8, /* BPU_CLEARS.ANY */
465 [ C(RESULT_ACCESS
) ] = -1,
466 [ C(RESULT_MISS
) ] = -1,
468 [ C(OP_PREFETCH
) ] = {
469 [ C(RESULT_ACCESS
) ] = -1,
470 [ C(RESULT_MISS
) ] = -1,
475 [ C(RESULT_ACCESS
) ] = 0x01b7,
476 [ C(RESULT_MISS
) ] = 0x01b7,
479 [ C(RESULT_ACCESS
) ] = 0x01b7,
480 [ C(RESULT_MISS
) ] = 0x01b7,
482 [ C(OP_PREFETCH
) ] = {
483 [ C(RESULT_ACCESS
) ] = 0x01b7,
484 [ C(RESULT_MISS
) ] = 0x01b7,
490 * Nehalem/Westmere MSR_OFFCORE_RESPONSE bits;
491 * See IA32 SDM Vol 3B 30.6.1.3
494 #define NHM_DMND_DATA_RD (1 << 0)
495 #define NHM_DMND_RFO (1 << 1)
496 #define NHM_DMND_IFETCH (1 << 2)
497 #define NHM_DMND_WB (1 << 3)
498 #define NHM_PF_DATA_RD (1 << 4)
499 #define NHM_PF_DATA_RFO (1 << 5)
500 #define NHM_PF_IFETCH (1 << 6)
501 #define NHM_OFFCORE_OTHER (1 << 7)
502 #define NHM_UNCORE_HIT (1 << 8)
503 #define NHM_OTHER_CORE_HIT_SNP (1 << 9)
504 #define NHM_OTHER_CORE_HITM (1 << 10)
506 #define NHM_REMOTE_CACHE_FWD (1 << 12)
507 #define NHM_REMOTE_DRAM (1 << 13)
508 #define NHM_LOCAL_DRAM (1 << 14)
509 #define NHM_NON_DRAM (1 << 15)
511 #define NHM_LOCAL (NHM_LOCAL_DRAM|NHM_REMOTE_CACHE_FWD)
512 #define NHM_REMOTE (NHM_REMOTE_DRAM)
514 #define NHM_DMND_READ (NHM_DMND_DATA_RD)
515 #define NHM_DMND_WRITE (NHM_DMND_RFO|NHM_DMND_WB)
516 #define NHM_DMND_PREFETCH (NHM_PF_DATA_RD|NHM_PF_DATA_RFO)
518 #define NHM_L3_HIT (NHM_UNCORE_HIT|NHM_OTHER_CORE_HIT_SNP|NHM_OTHER_CORE_HITM)
519 #define NHM_L3_MISS (NHM_NON_DRAM|NHM_LOCAL_DRAM|NHM_REMOTE_DRAM|NHM_REMOTE_CACHE_FWD)
520 #define NHM_L3_ACCESS (NHM_L3_HIT|NHM_L3_MISS)
522 static __initconst
const u64 nehalem_hw_cache_extra_regs
523 [PERF_COUNT_HW_CACHE_MAX
]
524 [PERF_COUNT_HW_CACHE_OP_MAX
]
525 [PERF_COUNT_HW_CACHE_RESULT_MAX
] =
529 [ C(RESULT_ACCESS
) ] = NHM_DMND_READ
|NHM_L3_ACCESS
,
530 [ C(RESULT_MISS
) ] = NHM_DMND_READ
|NHM_L3_MISS
,
533 [ C(RESULT_ACCESS
) ] = NHM_DMND_WRITE
|NHM_L3_ACCESS
,
534 [ C(RESULT_MISS
) ] = NHM_DMND_WRITE
|NHM_L3_MISS
,
536 [ C(OP_PREFETCH
) ] = {
537 [ C(RESULT_ACCESS
) ] = NHM_DMND_PREFETCH
|NHM_L3_ACCESS
,
538 [ C(RESULT_MISS
) ] = NHM_DMND_PREFETCH
|NHM_L3_MISS
,
543 [ C(RESULT_ACCESS
) ] = NHM_DMND_READ
|NHM_LOCAL
|NHM_REMOTE
,
544 [ C(RESULT_MISS
) ] = NHM_DMND_READ
|NHM_REMOTE
,
547 [ C(RESULT_ACCESS
) ] = NHM_DMND_WRITE
|NHM_LOCAL
|NHM_REMOTE
,
548 [ C(RESULT_MISS
) ] = NHM_DMND_WRITE
|NHM_REMOTE
,
550 [ C(OP_PREFETCH
) ] = {
551 [ C(RESULT_ACCESS
) ] = NHM_DMND_PREFETCH
|NHM_LOCAL
|NHM_REMOTE
,
552 [ C(RESULT_MISS
) ] = NHM_DMND_PREFETCH
|NHM_REMOTE
,
557 static __initconst
const u64 nehalem_hw_cache_event_ids
558 [PERF_COUNT_HW_CACHE_MAX
]
559 [PERF_COUNT_HW_CACHE_OP_MAX
]
560 [PERF_COUNT_HW_CACHE_RESULT_MAX
] =
564 [ C(RESULT_ACCESS
) ] = 0x010b, /* MEM_INST_RETIRED.LOADS */
565 [ C(RESULT_MISS
) ] = 0x0151, /* L1D.REPL */
568 [ C(RESULT_ACCESS
) ] = 0x020b, /* MEM_INST_RETURED.STORES */
569 [ C(RESULT_MISS
) ] = 0x0251, /* L1D.M_REPL */
571 [ C(OP_PREFETCH
) ] = {
572 [ C(RESULT_ACCESS
) ] = 0x014e, /* L1D_PREFETCH.REQUESTS */
573 [ C(RESULT_MISS
) ] = 0x024e, /* L1D_PREFETCH.MISS */
578 [ C(RESULT_ACCESS
) ] = 0x0380, /* L1I.READS */
579 [ C(RESULT_MISS
) ] = 0x0280, /* L1I.MISSES */
582 [ C(RESULT_ACCESS
) ] = -1,
583 [ C(RESULT_MISS
) ] = -1,
585 [ C(OP_PREFETCH
) ] = {
586 [ C(RESULT_ACCESS
) ] = 0x0,
587 [ C(RESULT_MISS
) ] = 0x0,
592 /* OFFCORE_RESPONSE.ANY_DATA.LOCAL_CACHE */
593 [ C(RESULT_ACCESS
) ] = 0x01b7,
594 /* OFFCORE_RESPONSE.ANY_DATA.ANY_LLC_MISS */
595 [ C(RESULT_MISS
) ] = 0x01b7,
598 * Use RFO, not WRITEBACK, because a write miss would typically occur
602 /* OFFCORE_RESPONSE.ANY_RFO.LOCAL_CACHE */
603 [ C(RESULT_ACCESS
) ] = 0x01b7,
604 /* OFFCORE_RESPONSE.ANY_RFO.ANY_LLC_MISS */
605 [ C(RESULT_MISS
) ] = 0x01b7,
607 [ C(OP_PREFETCH
) ] = {
608 /* OFFCORE_RESPONSE.PREFETCH.LOCAL_CACHE */
609 [ C(RESULT_ACCESS
) ] = 0x01b7,
610 /* OFFCORE_RESPONSE.PREFETCH.ANY_LLC_MISS */
611 [ C(RESULT_MISS
) ] = 0x01b7,
616 [ C(RESULT_ACCESS
) ] = 0x0f40, /* L1D_CACHE_LD.MESI (alias) */
617 [ C(RESULT_MISS
) ] = 0x0108, /* DTLB_LOAD_MISSES.ANY */
620 [ C(RESULT_ACCESS
) ] = 0x0f41, /* L1D_CACHE_ST.MESI (alias) */
621 [ C(RESULT_MISS
) ] = 0x010c, /* MEM_STORE_RETIRED.DTLB_MISS */
623 [ C(OP_PREFETCH
) ] = {
624 [ C(RESULT_ACCESS
) ] = 0x0,
625 [ C(RESULT_MISS
) ] = 0x0,
630 [ C(RESULT_ACCESS
) ] = 0x01c0, /* INST_RETIRED.ANY_P */
631 [ C(RESULT_MISS
) ] = 0x20c8, /* ITLB_MISS_RETIRED */
634 [ C(RESULT_ACCESS
) ] = -1,
635 [ C(RESULT_MISS
) ] = -1,
637 [ C(OP_PREFETCH
) ] = {
638 [ C(RESULT_ACCESS
) ] = -1,
639 [ C(RESULT_MISS
) ] = -1,
644 [ C(RESULT_ACCESS
) ] = 0x00c4, /* BR_INST_RETIRED.ALL_BRANCHES */
645 [ C(RESULT_MISS
) ] = 0x03e8, /* BPU_CLEARS.ANY */
648 [ C(RESULT_ACCESS
) ] = -1,
649 [ C(RESULT_MISS
) ] = -1,
651 [ C(OP_PREFETCH
) ] = {
652 [ C(RESULT_ACCESS
) ] = -1,
653 [ C(RESULT_MISS
) ] = -1,
658 [ C(RESULT_ACCESS
) ] = 0x01b7,
659 [ C(RESULT_MISS
) ] = 0x01b7,
662 [ C(RESULT_ACCESS
) ] = 0x01b7,
663 [ C(RESULT_MISS
) ] = 0x01b7,
665 [ C(OP_PREFETCH
) ] = {
666 [ C(RESULT_ACCESS
) ] = 0x01b7,
667 [ C(RESULT_MISS
) ] = 0x01b7,
672 static __initconst
const u64 core2_hw_cache_event_ids
673 [PERF_COUNT_HW_CACHE_MAX
]
674 [PERF_COUNT_HW_CACHE_OP_MAX
]
675 [PERF_COUNT_HW_CACHE_RESULT_MAX
] =
679 [ C(RESULT_ACCESS
) ] = 0x0f40, /* L1D_CACHE_LD.MESI */
680 [ C(RESULT_MISS
) ] = 0x0140, /* L1D_CACHE_LD.I_STATE */
683 [ C(RESULT_ACCESS
) ] = 0x0f41, /* L1D_CACHE_ST.MESI */
684 [ C(RESULT_MISS
) ] = 0x0141, /* L1D_CACHE_ST.I_STATE */
686 [ C(OP_PREFETCH
) ] = {
687 [ C(RESULT_ACCESS
) ] = 0x104e, /* L1D_PREFETCH.REQUESTS */
688 [ C(RESULT_MISS
) ] = 0,
693 [ C(RESULT_ACCESS
) ] = 0x0080, /* L1I.READS */
694 [ C(RESULT_MISS
) ] = 0x0081, /* L1I.MISSES */
697 [ C(RESULT_ACCESS
) ] = -1,
698 [ C(RESULT_MISS
) ] = -1,
700 [ C(OP_PREFETCH
) ] = {
701 [ C(RESULT_ACCESS
) ] = 0,
702 [ C(RESULT_MISS
) ] = 0,
707 [ C(RESULT_ACCESS
) ] = 0x4f29, /* L2_LD.MESI */
708 [ C(RESULT_MISS
) ] = 0x4129, /* L2_LD.ISTATE */
711 [ C(RESULT_ACCESS
) ] = 0x4f2A, /* L2_ST.MESI */
712 [ C(RESULT_MISS
) ] = 0x412A, /* L2_ST.ISTATE */
714 [ C(OP_PREFETCH
) ] = {
715 [ C(RESULT_ACCESS
) ] = 0,
716 [ C(RESULT_MISS
) ] = 0,
721 [ C(RESULT_ACCESS
) ] = 0x0f40, /* L1D_CACHE_LD.MESI (alias) */
722 [ C(RESULT_MISS
) ] = 0x0208, /* DTLB_MISSES.MISS_LD */
725 [ C(RESULT_ACCESS
) ] = 0x0f41, /* L1D_CACHE_ST.MESI (alias) */
726 [ C(RESULT_MISS
) ] = 0x0808, /* DTLB_MISSES.MISS_ST */
728 [ C(OP_PREFETCH
) ] = {
729 [ C(RESULT_ACCESS
) ] = 0,
730 [ C(RESULT_MISS
) ] = 0,
735 [ C(RESULT_ACCESS
) ] = 0x00c0, /* INST_RETIRED.ANY_P */
736 [ C(RESULT_MISS
) ] = 0x1282, /* ITLBMISSES */
739 [ C(RESULT_ACCESS
) ] = -1,
740 [ C(RESULT_MISS
) ] = -1,
742 [ C(OP_PREFETCH
) ] = {
743 [ C(RESULT_ACCESS
) ] = -1,
744 [ C(RESULT_MISS
) ] = -1,
749 [ C(RESULT_ACCESS
) ] = 0x00c4, /* BR_INST_RETIRED.ANY */
750 [ C(RESULT_MISS
) ] = 0x00c5, /* BP_INST_RETIRED.MISPRED */
753 [ C(RESULT_ACCESS
) ] = -1,
754 [ C(RESULT_MISS
) ] = -1,
756 [ C(OP_PREFETCH
) ] = {
757 [ C(RESULT_ACCESS
) ] = -1,
758 [ C(RESULT_MISS
) ] = -1,
763 static __initconst
const u64 atom_hw_cache_event_ids
764 [PERF_COUNT_HW_CACHE_MAX
]
765 [PERF_COUNT_HW_CACHE_OP_MAX
]
766 [PERF_COUNT_HW_CACHE_RESULT_MAX
] =
770 [ C(RESULT_ACCESS
) ] = 0x2140, /* L1D_CACHE.LD */
771 [ C(RESULT_MISS
) ] = 0,
774 [ C(RESULT_ACCESS
) ] = 0x2240, /* L1D_CACHE.ST */
775 [ C(RESULT_MISS
) ] = 0,
777 [ C(OP_PREFETCH
) ] = {
778 [ C(RESULT_ACCESS
) ] = 0x0,
779 [ C(RESULT_MISS
) ] = 0,
784 [ C(RESULT_ACCESS
) ] = 0x0380, /* L1I.READS */
785 [ C(RESULT_MISS
) ] = 0x0280, /* L1I.MISSES */
788 [ C(RESULT_ACCESS
) ] = -1,
789 [ C(RESULT_MISS
) ] = -1,
791 [ C(OP_PREFETCH
) ] = {
792 [ C(RESULT_ACCESS
) ] = 0,
793 [ C(RESULT_MISS
) ] = 0,
798 [ C(RESULT_ACCESS
) ] = 0x4f29, /* L2_LD.MESI */
799 [ C(RESULT_MISS
) ] = 0x4129, /* L2_LD.ISTATE */
802 [ C(RESULT_ACCESS
) ] = 0x4f2A, /* L2_ST.MESI */
803 [ C(RESULT_MISS
) ] = 0x412A, /* L2_ST.ISTATE */
805 [ C(OP_PREFETCH
) ] = {
806 [ C(RESULT_ACCESS
) ] = 0,
807 [ C(RESULT_MISS
) ] = 0,
812 [ C(RESULT_ACCESS
) ] = 0x2140, /* L1D_CACHE_LD.MESI (alias) */
813 [ C(RESULT_MISS
) ] = 0x0508, /* DTLB_MISSES.MISS_LD */
816 [ C(RESULT_ACCESS
) ] = 0x2240, /* L1D_CACHE_ST.MESI (alias) */
817 [ C(RESULT_MISS
) ] = 0x0608, /* DTLB_MISSES.MISS_ST */
819 [ C(OP_PREFETCH
) ] = {
820 [ C(RESULT_ACCESS
) ] = 0,
821 [ C(RESULT_MISS
) ] = 0,
826 [ C(RESULT_ACCESS
) ] = 0x00c0, /* INST_RETIRED.ANY_P */
827 [ C(RESULT_MISS
) ] = 0x0282, /* ITLB.MISSES */
830 [ C(RESULT_ACCESS
) ] = -1,
831 [ C(RESULT_MISS
) ] = -1,
833 [ C(OP_PREFETCH
) ] = {
834 [ C(RESULT_ACCESS
) ] = -1,
835 [ C(RESULT_MISS
) ] = -1,
840 [ C(RESULT_ACCESS
) ] = 0x00c4, /* BR_INST_RETIRED.ANY */
841 [ C(RESULT_MISS
) ] = 0x00c5, /* BP_INST_RETIRED.MISPRED */
844 [ C(RESULT_ACCESS
) ] = -1,
845 [ C(RESULT_MISS
) ] = -1,
847 [ C(OP_PREFETCH
) ] = {
848 [ C(RESULT_ACCESS
) ] = -1,
849 [ C(RESULT_MISS
) ] = -1,
854 static inline bool intel_pmu_needs_lbr_smpl(struct perf_event
*event
)
856 /* user explicitly requested branch sampling */
857 if (has_branch_stack(event
))
860 /* implicit branch sampling to correct PEBS skid */
861 if (x86_pmu
.intel_cap
.pebs_trap
&& event
->attr
.precise_ip
> 1)
867 static void intel_pmu_disable_all(void)
869 struct cpu_hw_events
*cpuc
= &__get_cpu_var(cpu_hw_events
);
871 wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL
, 0);
873 if (test_bit(INTEL_PMC_IDX_FIXED_BTS
, cpuc
->active_mask
))
874 intel_pmu_disable_bts();
876 intel_pmu_pebs_disable_all();
877 intel_pmu_lbr_disable_all();
880 static void intel_pmu_enable_all(int added
)
882 struct cpu_hw_events
*cpuc
= &__get_cpu_var(cpu_hw_events
);
884 intel_pmu_pebs_enable_all();
885 intel_pmu_lbr_enable_all();
886 wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL
,
887 x86_pmu
.intel_ctrl
& ~cpuc
->intel_ctrl_guest_mask
);
889 if (test_bit(INTEL_PMC_IDX_FIXED_BTS
, cpuc
->active_mask
)) {
890 struct perf_event
*event
=
891 cpuc
->events
[INTEL_PMC_IDX_FIXED_BTS
];
893 if (WARN_ON_ONCE(!event
))
896 intel_pmu_enable_bts(event
->hw
.config
);
902 * Intel Errata AAK100 (model 26)
903 * Intel Errata AAP53 (model 30)
904 * Intel Errata BD53 (model 44)
906 * The official story:
907 * These chips need to be 'reset' when adding counters by programming the
908 * magic three (non-counting) events 0x4300B5, 0x4300D2, and 0x4300B1 either
909 * in sequence on the same PMC or on different PMCs.
911 * In practise it appears some of these events do in fact count, and
912 * we need to programm all 4 events.
914 static void intel_pmu_nhm_workaround(void)
916 struct cpu_hw_events
*cpuc
= &__get_cpu_var(cpu_hw_events
);
917 static const unsigned long nhm_magic
[4] = {
923 struct perf_event
*event
;
927 * The Errata requires below steps:
928 * 1) Clear MSR_IA32_PEBS_ENABLE and MSR_CORE_PERF_GLOBAL_CTRL;
929 * 2) Configure 4 PERFEVTSELx with the magic events and clear
930 * the corresponding PMCx;
931 * 3) set bit0~bit3 of MSR_CORE_PERF_GLOBAL_CTRL;
932 * 4) Clear MSR_CORE_PERF_GLOBAL_CTRL;
933 * 5) Clear 4 pairs of ERFEVTSELx and PMCx;
937 * The real steps we choose are a little different from above.
938 * A) To reduce MSR operations, we don't run step 1) as they
939 * are already cleared before this function is called;
940 * B) Call x86_perf_event_update to save PMCx before configuring
941 * PERFEVTSELx with magic number;
942 * C) With step 5), we do clear only when the PERFEVTSELx is
943 * not used currently.
944 * D) Call x86_perf_event_set_period to restore PMCx;
947 /* We always operate 4 pairs of PERF Counters */
948 for (i
= 0; i
< 4; i
++) {
949 event
= cpuc
->events
[i
];
951 x86_perf_event_update(event
);
954 for (i
= 0; i
< 4; i
++) {
955 wrmsrl(MSR_ARCH_PERFMON_EVENTSEL0
+ i
, nhm_magic
[i
]);
956 wrmsrl(MSR_ARCH_PERFMON_PERFCTR0
+ i
, 0x0);
959 wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL
, 0xf);
960 wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL
, 0x0);
962 for (i
= 0; i
< 4; i
++) {
963 event
= cpuc
->events
[i
];
966 x86_perf_event_set_period(event
);
967 __x86_pmu_enable_event(&event
->hw
,
968 ARCH_PERFMON_EVENTSEL_ENABLE
);
970 wrmsrl(MSR_ARCH_PERFMON_EVENTSEL0
+ i
, 0x0);
974 static void intel_pmu_nhm_enable_all(int added
)
977 intel_pmu_nhm_workaround();
978 intel_pmu_enable_all(added
);
981 static inline u64
intel_pmu_get_status(void)
985 rdmsrl(MSR_CORE_PERF_GLOBAL_STATUS
, status
);
990 static inline void intel_pmu_ack_status(u64 ack
)
992 wrmsrl(MSR_CORE_PERF_GLOBAL_OVF_CTRL
, ack
);
995 static void intel_pmu_disable_fixed(struct hw_perf_event
*hwc
)
997 int idx
= hwc
->idx
- INTEL_PMC_IDX_FIXED
;
1000 mask
= 0xfULL
<< (idx
* 4);
1002 rdmsrl(hwc
->config_base
, ctrl_val
);
1004 wrmsrl(hwc
->config_base
, ctrl_val
);
1007 static void intel_pmu_disable_event(struct perf_event
*event
)
1009 struct hw_perf_event
*hwc
= &event
->hw
;
1010 struct cpu_hw_events
*cpuc
= &__get_cpu_var(cpu_hw_events
);
1012 if (unlikely(hwc
->idx
== INTEL_PMC_IDX_FIXED_BTS
)) {
1013 intel_pmu_disable_bts();
1014 intel_pmu_drain_bts_buffer();
1018 cpuc
->intel_ctrl_guest_mask
&= ~(1ull << hwc
->idx
);
1019 cpuc
->intel_ctrl_host_mask
&= ~(1ull << hwc
->idx
);
1022 * must disable before any actual event
1023 * because any event may be combined with LBR
1025 if (intel_pmu_needs_lbr_smpl(event
))
1026 intel_pmu_lbr_disable(event
);
1028 if (unlikely(hwc
->config_base
== MSR_ARCH_PERFMON_FIXED_CTR_CTRL
)) {
1029 intel_pmu_disable_fixed(hwc
);
1033 x86_pmu_disable_event(event
);
1035 if (unlikely(event
->attr
.precise_ip
))
1036 intel_pmu_pebs_disable(event
);
1039 static void intel_pmu_enable_fixed(struct hw_perf_event
*hwc
)
1041 int idx
= hwc
->idx
- INTEL_PMC_IDX_FIXED
;
1042 u64 ctrl_val
, bits
, mask
;
1045 * Enable IRQ generation (0x8),
1046 * and enable ring-3 counting (0x2) and ring-0 counting (0x1)
1050 if (hwc
->config
& ARCH_PERFMON_EVENTSEL_USR
)
1052 if (hwc
->config
& ARCH_PERFMON_EVENTSEL_OS
)
1056 * ANY bit is supported in v3 and up
1058 if (x86_pmu
.version
> 2 && hwc
->config
& ARCH_PERFMON_EVENTSEL_ANY
)
1062 mask
= 0xfULL
<< (idx
* 4);
1064 rdmsrl(hwc
->config_base
, ctrl_val
);
1067 wrmsrl(hwc
->config_base
, ctrl_val
);
1070 static void intel_pmu_enable_event(struct perf_event
*event
)
1072 struct hw_perf_event
*hwc
= &event
->hw
;
1073 struct cpu_hw_events
*cpuc
= &__get_cpu_var(cpu_hw_events
);
1075 if (unlikely(hwc
->idx
== INTEL_PMC_IDX_FIXED_BTS
)) {
1076 if (!__this_cpu_read(cpu_hw_events
.enabled
))
1079 intel_pmu_enable_bts(hwc
->config
);
1083 * must enabled before any actual event
1084 * because any event may be combined with LBR
1086 if (intel_pmu_needs_lbr_smpl(event
))
1087 intel_pmu_lbr_enable(event
);
1089 if (event
->attr
.exclude_host
)
1090 cpuc
->intel_ctrl_guest_mask
|= (1ull << hwc
->idx
);
1091 if (event
->attr
.exclude_guest
)
1092 cpuc
->intel_ctrl_host_mask
|= (1ull << hwc
->idx
);
1094 if (unlikely(hwc
->config_base
== MSR_ARCH_PERFMON_FIXED_CTR_CTRL
)) {
1095 intel_pmu_enable_fixed(hwc
);
1099 if (unlikely(event
->attr
.precise_ip
))
1100 intel_pmu_pebs_enable(event
);
1102 __x86_pmu_enable_event(hwc
, ARCH_PERFMON_EVENTSEL_ENABLE
);
1106 * Save and restart an expired event. Called by NMI contexts,
1107 * so it has to be careful about preempting normal event ops:
1109 int intel_pmu_save_and_restart(struct perf_event
*event
)
1111 x86_perf_event_update(event
);
1112 return x86_perf_event_set_period(event
);
1115 static void intel_pmu_reset(void)
1117 struct debug_store
*ds
= __this_cpu_read(cpu_hw_events
.ds
);
1118 unsigned long flags
;
1121 if (!x86_pmu
.num_counters
)
1124 local_irq_save(flags
);
1126 pr_info("clearing PMU state on CPU#%d\n", smp_processor_id());
1128 for (idx
= 0; idx
< x86_pmu
.num_counters
; idx
++) {
1129 wrmsrl_safe(x86_pmu_config_addr(idx
), 0ull);
1130 wrmsrl_safe(x86_pmu_event_addr(idx
), 0ull);
1132 for (idx
= 0; idx
< x86_pmu
.num_counters_fixed
; idx
++)
1133 wrmsrl_safe(MSR_ARCH_PERFMON_FIXED_CTR0
+ idx
, 0ull);
1136 ds
->bts_index
= ds
->bts_buffer_base
;
1138 local_irq_restore(flags
);
1142 * This handler is triggered by the local APIC, so the APIC IRQ handling
1145 static int intel_pmu_handle_irq(struct pt_regs
*regs
)
1147 struct perf_sample_data data
;
1148 struct cpu_hw_events
*cpuc
;
1153 cpuc
= &__get_cpu_var(cpu_hw_events
);
1156 * Some chipsets need to unmask the LVTPC in a particular spot
1157 * inside the nmi handler. As a result, the unmasking was pushed
1158 * into all the nmi handlers.
1160 * This handler doesn't seem to have any issues with the unmasking
1161 * so it was left at the top.
1163 apic_write(APIC_LVTPC
, APIC_DM_NMI
);
1165 intel_pmu_disable_all();
1166 handled
= intel_pmu_drain_bts_buffer();
1167 status
= intel_pmu_get_status();
1169 intel_pmu_enable_all(0);
1175 intel_pmu_ack_status(status
);
1176 if (++loops
> 100) {
1177 WARN_ONCE(1, "perfevents: irq loop stuck!\n");
1178 perf_event_print_debug();
1183 inc_irq_stat(apic_perf_irqs
);
1185 intel_pmu_lbr_read();
1188 * PEBS overflow sets bit 62 in the global status register
1190 if (__test_and_clear_bit(62, (unsigned long *)&status
)) {
1192 x86_pmu
.drain_pebs(regs
);
1195 for_each_set_bit(bit
, (unsigned long *)&status
, X86_PMC_IDX_MAX
) {
1196 struct perf_event
*event
= cpuc
->events
[bit
];
1200 if (!test_bit(bit
, cpuc
->active_mask
))
1203 if (!intel_pmu_save_and_restart(event
))
1206 perf_sample_data_init(&data
, 0, event
->hw
.last_period
);
1208 if (has_branch_stack(event
))
1209 data
.br_stack
= &cpuc
->lbr_stack
;
1211 if (perf_event_overflow(event
, &data
, regs
))
1212 x86_pmu_stop(event
, 0);
1216 * Repeat if there is more work to be done:
1218 status
= intel_pmu_get_status();
1223 intel_pmu_enable_all(0);
1227 static struct event_constraint
*
1228 intel_bts_constraints(struct perf_event
*event
)
1230 struct hw_perf_event
*hwc
= &event
->hw
;
1231 unsigned int hw_event
, bts_event
;
1233 if (event
->attr
.freq
)
1236 hw_event
= hwc
->config
& INTEL_ARCH_EVENT_MASK
;
1237 bts_event
= x86_pmu
.event_map(PERF_COUNT_HW_BRANCH_INSTRUCTIONS
);
1239 if (unlikely(hw_event
== bts_event
&& hwc
->sample_period
== 1))
1240 return &bts_constraint
;
1245 static int intel_alt_er(int idx
)
1247 if (!(x86_pmu
.er_flags
& ERF_HAS_RSP_1
))
1250 if (idx
== EXTRA_REG_RSP_0
)
1251 return EXTRA_REG_RSP_1
;
1253 if (idx
== EXTRA_REG_RSP_1
)
1254 return EXTRA_REG_RSP_0
;
1259 static void intel_fixup_er(struct perf_event
*event
, int idx
)
1261 event
->hw
.extra_reg
.idx
= idx
;
1263 if (idx
== EXTRA_REG_RSP_0
) {
1264 event
->hw
.config
&= ~INTEL_ARCH_EVENT_MASK
;
1265 event
->hw
.config
|= 0x01b7;
1266 event
->hw
.extra_reg
.reg
= MSR_OFFCORE_RSP_0
;
1267 } else if (idx
== EXTRA_REG_RSP_1
) {
1268 event
->hw
.config
&= ~INTEL_ARCH_EVENT_MASK
;
1269 event
->hw
.config
|= 0x01bb;
1270 event
->hw
.extra_reg
.reg
= MSR_OFFCORE_RSP_1
;
1275 * manage allocation of shared extra msr for certain events
1278 * per-cpu: to be shared between the various events on a single PMU
1279 * per-core: per-cpu + shared by HT threads
1281 static struct event_constraint
*
1282 __intel_shared_reg_get_constraints(struct cpu_hw_events
*cpuc
,
1283 struct perf_event
*event
,
1284 struct hw_perf_event_extra
*reg
)
1286 struct event_constraint
*c
= &emptyconstraint
;
1287 struct er_account
*era
;
1288 unsigned long flags
;
1292 * reg->alloc can be set due to existing state, so for fake cpuc we
1293 * need to ignore this, otherwise we might fail to allocate proper fake
1294 * state for this extra reg constraint. Also see the comment below.
1296 if (reg
->alloc
&& !cpuc
->is_fake
)
1297 return NULL
; /* call x86_get_event_constraint() */
1300 era
= &cpuc
->shared_regs
->regs
[idx
];
1302 * we use spin_lock_irqsave() to avoid lockdep issues when
1303 * passing a fake cpuc
1305 raw_spin_lock_irqsave(&era
->lock
, flags
);
1307 if (!atomic_read(&era
->ref
) || era
->config
== reg
->config
) {
1310 * If its a fake cpuc -- as per validate_{group,event}() we
1311 * shouldn't touch event state and we can avoid doing so
1312 * since both will only call get_event_constraints() once
1313 * on each event, this avoids the need for reg->alloc.
1315 * Not doing the ER fixup will only result in era->reg being
1316 * wrong, but since we won't actually try and program hardware
1317 * this isn't a problem either.
1319 if (!cpuc
->is_fake
) {
1320 if (idx
!= reg
->idx
)
1321 intel_fixup_er(event
, idx
);
1324 * x86_schedule_events() can call get_event_constraints()
1325 * multiple times on events in the case of incremental
1326 * scheduling(). reg->alloc ensures we only do the ER
1332 /* lock in msr value */
1333 era
->config
= reg
->config
;
1334 era
->reg
= reg
->reg
;
1337 atomic_inc(&era
->ref
);
1340 * need to call x86_get_event_constraint()
1341 * to check if associated event has constraints
1345 idx
= intel_alt_er(idx
);
1346 if (idx
!= reg
->idx
) {
1347 raw_spin_unlock_irqrestore(&era
->lock
, flags
);
1351 raw_spin_unlock_irqrestore(&era
->lock
, flags
);
1357 __intel_shared_reg_put_constraints(struct cpu_hw_events
*cpuc
,
1358 struct hw_perf_event_extra
*reg
)
1360 struct er_account
*era
;
1363 * Only put constraint if extra reg was actually allocated. Also takes
1364 * care of event which do not use an extra shared reg.
1366 * Also, if this is a fake cpuc we shouldn't touch any event state
1367 * (reg->alloc) and we don't care about leaving inconsistent cpuc state
1368 * either since it'll be thrown out.
1370 if (!reg
->alloc
|| cpuc
->is_fake
)
1373 era
= &cpuc
->shared_regs
->regs
[reg
->idx
];
1375 /* one fewer user */
1376 atomic_dec(&era
->ref
);
1378 /* allocate again next time */
1382 static struct event_constraint
*
1383 intel_shared_regs_constraints(struct cpu_hw_events
*cpuc
,
1384 struct perf_event
*event
)
1386 struct event_constraint
*c
= NULL
, *d
;
1387 struct hw_perf_event_extra
*xreg
, *breg
;
1389 xreg
= &event
->hw
.extra_reg
;
1390 if (xreg
->idx
!= EXTRA_REG_NONE
) {
1391 c
= __intel_shared_reg_get_constraints(cpuc
, event
, xreg
);
1392 if (c
== &emptyconstraint
)
1395 breg
= &event
->hw
.branch_reg
;
1396 if (breg
->idx
!= EXTRA_REG_NONE
) {
1397 d
= __intel_shared_reg_get_constraints(cpuc
, event
, breg
);
1398 if (d
== &emptyconstraint
) {
1399 __intel_shared_reg_put_constraints(cpuc
, xreg
);
1406 struct event_constraint
*
1407 x86_get_event_constraints(struct cpu_hw_events
*cpuc
, struct perf_event
*event
)
1409 struct event_constraint
*c
;
1411 if (x86_pmu
.event_constraints
) {
1412 for_each_event_constraint(c
, x86_pmu
.event_constraints
) {
1413 if ((event
->hw
.config
& c
->cmask
) == c
->code
) {
1414 /* hw.flags zeroed at initialization */
1415 event
->hw
.flags
|= c
->flags
;
1421 return &unconstrained
;
1424 static struct event_constraint
*
1425 intel_get_event_constraints(struct cpu_hw_events
*cpuc
, struct perf_event
*event
)
1427 struct event_constraint
*c
;
1429 c
= intel_bts_constraints(event
);
1433 c
= intel_pebs_constraints(event
);
1437 c
= intel_shared_regs_constraints(cpuc
, event
);
1441 return x86_get_event_constraints(cpuc
, event
);
1445 intel_put_shared_regs_event_constraints(struct cpu_hw_events
*cpuc
,
1446 struct perf_event
*event
)
1448 struct hw_perf_event_extra
*reg
;
1450 reg
= &event
->hw
.extra_reg
;
1451 if (reg
->idx
!= EXTRA_REG_NONE
)
1452 __intel_shared_reg_put_constraints(cpuc
, reg
);
1454 reg
= &event
->hw
.branch_reg
;
1455 if (reg
->idx
!= EXTRA_REG_NONE
)
1456 __intel_shared_reg_put_constraints(cpuc
, reg
);
1459 static void intel_put_event_constraints(struct cpu_hw_events
*cpuc
,
1460 struct perf_event
*event
)
1462 event
->hw
.flags
= 0;
1463 intel_put_shared_regs_event_constraints(cpuc
, event
);
1466 static void intel_pebs_aliases_core2(struct perf_event
*event
)
1468 if ((event
->hw
.config
& X86_RAW_EVENT_MASK
) == 0x003c) {
1470 * Use an alternative encoding for CPU_CLK_UNHALTED.THREAD_P
1471 * (0x003c) so that we can use it with PEBS.
1473 * The regular CPU_CLK_UNHALTED.THREAD_P event (0x003c) isn't
1474 * PEBS capable. However we can use INST_RETIRED.ANY_P
1475 * (0x00c0), which is a PEBS capable event, to get the same
1478 * INST_RETIRED.ANY_P counts the number of cycles that retires
1479 * CNTMASK instructions. By setting CNTMASK to a value (16)
1480 * larger than the maximum number of instructions that can be
1481 * retired per cycle (4) and then inverting the condition, we
1482 * count all cycles that retire 16 or less instructions, which
1485 * Thereby we gain a PEBS capable cycle counter.
1487 u64 alt_config
= X86_CONFIG(.event
=0xc0, .inv
=1, .cmask
=16);
1489 alt_config
|= (event
->hw
.config
& ~X86_RAW_EVENT_MASK
);
1490 event
->hw
.config
= alt_config
;
1494 static void intel_pebs_aliases_snb(struct perf_event
*event
)
1496 if ((event
->hw
.config
& X86_RAW_EVENT_MASK
) == 0x003c) {
1498 * Use an alternative encoding for CPU_CLK_UNHALTED.THREAD_P
1499 * (0x003c) so that we can use it with PEBS.
1501 * The regular CPU_CLK_UNHALTED.THREAD_P event (0x003c) isn't
1502 * PEBS capable. However we can use UOPS_RETIRED.ALL
1503 * (0x01c2), which is a PEBS capable event, to get the same
1506 * UOPS_RETIRED.ALL counts the number of cycles that retires
1507 * CNTMASK micro-ops. By setting CNTMASK to a value (16)
1508 * larger than the maximum number of micro-ops that can be
1509 * retired per cycle (4) and then inverting the condition, we
1510 * count all cycles that retire 16 or less micro-ops, which
1513 * Thereby we gain a PEBS capable cycle counter.
1515 u64 alt_config
= X86_CONFIG(.event
=0xc2, .umask
=0x01, .inv
=1, .cmask
=16);
1517 alt_config
|= (event
->hw
.config
& ~X86_RAW_EVENT_MASK
);
1518 event
->hw
.config
= alt_config
;
1522 static int intel_pmu_hw_config(struct perf_event
*event
)
1524 int ret
= x86_pmu_hw_config(event
);
1529 if (event
->attr
.precise_ip
&& x86_pmu
.pebs_aliases
)
1530 x86_pmu
.pebs_aliases(event
);
1532 if (intel_pmu_needs_lbr_smpl(event
)) {
1533 ret
= intel_pmu_setup_lbr_filter(event
);
1538 if (event
->attr
.type
!= PERF_TYPE_RAW
)
1541 if (!(event
->attr
.config
& ARCH_PERFMON_EVENTSEL_ANY
))
1544 if (x86_pmu
.version
< 3)
1547 if (perf_paranoid_cpu() && !capable(CAP_SYS_ADMIN
))
1550 event
->hw
.config
|= ARCH_PERFMON_EVENTSEL_ANY
;
1555 struct perf_guest_switch_msr
*perf_guest_get_msrs(int *nr
)
1557 if (x86_pmu
.guest_get_msrs
)
1558 return x86_pmu
.guest_get_msrs(nr
);
1562 EXPORT_SYMBOL_GPL(perf_guest_get_msrs
);
1564 static struct perf_guest_switch_msr
*intel_guest_get_msrs(int *nr
)
1566 struct cpu_hw_events
*cpuc
= &__get_cpu_var(cpu_hw_events
);
1567 struct perf_guest_switch_msr
*arr
= cpuc
->guest_switch_msrs
;
1569 arr
[0].msr
= MSR_CORE_PERF_GLOBAL_CTRL
;
1570 arr
[0].host
= x86_pmu
.intel_ctrl
& ~cpuc
->intel_ctrl_guest_mask
;
1571 arr
[0].guest
= x86_pmu
.intel_ctrl
& ~cpuc
->intel_ctrl_host_mask
;
1573 * If PMU counter has PEBS enabled it is not enough to disable counter
1574 * on a guest entry since PEBS memory write can overshoot guest entry
1575 * and corrupt guest memory. Disabling PEBS solves the problem.
1577 arr
[1].msr
= MSR_IA32_PEBS_ENABLE
;
1578 arr
[1].host
= cpuc
->pebs_enabled
;
1585 static struct perf_guest_switch_msr
*core_guest_get_msrs(int *nr
)
1587 struct cpu_hw_events
*cpuc
= &__get_cpu_var(cpu_hw_events
);
1588 struct perf_guest_switch_msr
*arr
= cpuc
->guest_switch_msrs
;
1591 for (idx
= 0; idx
< x86_pmu
.num_counters
; idx
++) {
1592 struct perf_event
*event
= cpuc
->events
[idx
];
1594 arr
[idx
].msr
= x86_pmu_config_addr(idx
);
1595 arr
[idx
].host
= arr
[idx
].guest
= 0;
1597 if (!test_bit(idx
, cpuc
->active_mask
))
1600 arr
[idx
].host
= arr
[idx
].guest
=
1601 event
->hw
.config
| ARCH_PERFMON_EVENTSEL_ENABLE
;
1603 if (event
->attr
.exclude_host
)
1604 arr
[idx
].host
&= ~ARCH_PERFMON_EVENTSEL_ENABLE
;
1605 else if (event
->attr
.exclude_guest
)
1606 arr
[idx
].guest
&= ~ARCH_PERFMON_EVENTSEL_ENABLE
;
1609 *nr
= x86_pmu
.num_counters
;
1613 static void core_pmu_enable_event(struct perf_event
*event
)
1615 if (!event
->attr
.exclude_host
)
1616 x86_pmu_enable_event(event
);
1619 static void core_pmu_enable_all(int added
)
1621 struct cpu_hw_events
*cpuc
= &__get_cpu_var(cpu_hw_events
);
1624 for (idx
= 0; idx
< x86_pmu
.num_counters
; idx
++) {
1625 struct hw_perf_event
*hwc
= &cpuc
->events
[idx
]->hw
;
1627 if (!test_bit(idx
, cpuc
->active_mask
) ||
1628 cpuc
->events
[idx
]->attr
.exclude_host
)
1631 __x86_pmu_enable_event(hwc
, ARCH_PERFMON_EVENTSEL_ENABLE
);
1635 PMU_FORMAT_ATTR(event
, "config:0-7" );
1636 PMU_FORMAT_ATTR(umask
, "config:8-15" );
1637 PMU_FORMAT_ATTR(edge
, "config:18" );
1638 PMU_FORMAT_ATTR(pc
, "config:19" );
1639 PMU_FORMAT_ATTR(any
, "config:21" ); /* v3 + */
1640 PMU_FORMAT_ATTR(inv
, "config:23" );
1641 PMU_FORMAT_ATTR(cmask
, "config:24-31" );
1643 static struct attribute
*intel_arch_formats_attr
[] = {
1644 &format_attr_event
.attr
,
1645 &format_attr_umask
.attr
,
1646 &format_attr_edge
.attr
,
1647 &format_attr_pc
.attr
,
1648 &format_attr_inv
.attr
,
1649 &format_attr_cmask
.attr
,
1653 ssize_t
intel_event_sysfs_show(char *page
, u64 config
)
1655 u64 event
= (config
& ARCH_PERFMON_EVENTSEL_EVENT
);
1657 return x86_event_sysfs_show(page
, config
, event
);
1660 static __initconst
const struct x86_pmu core_pmu
= {
1662 .handle_irq
= x86_pmu_handle_irq
,
1663 .disable_all
= x86_pmu_disable_all
,
1664 .enable_all
= core_pmu_enable_all
,
1665 .enable
= core_pmu_enable_event
,
1666 .disable
= x86_pmu_disable_event
,
1667 .hw_config
= x86_pmu_hw_config
,
1668 .schedule_events
= x86_schedule_events
,
1669 .eventsel
= MSR_ARCH_PERFMON_EVENTSEL0
,
1670 .perfctr
= MSR_ARCH_PERFMON_PERFCTR0
,
1671 .event_map
= intel_pmu_event_map
,
1672 .max_events
= ARRAY_SIZE(intel_perfmon_event_map
),
1675 * Intel PMCs cannot be accessed sanely above 32 bit width,
1676 * so we install an artificial 1<<31 period regardless of
1677 * the generic event period:
1679 .max_period
= (1ULL << 31) - 1,
1680 .get_event_constraints
= intel_get_event_constraints
,
1681 .put_event_constraints
= intel_put_event_constraints
,
1682 .event_constraints
= intel_core_event_constraints
,
1683 .guest_get_msrs
= core_guest_get_msrs
,
1684 .format_attrs
= intel_arch_formats_attr
,
1685 .events_sysfs_show
= intel_event_sysfs_show
,
1688 struct intel_shared_regs
*allocate_shared_regs(int cpu
)
1690 struct intel_shared_regs
*regs
;
1693 regs
= kzalloc_node(sizeof(struct intel_shared_regs
),
1694 GFP_KERNEL
, cpu_to_node(cpu
));
1697 * initialize the locks to keep lockdep happy
1699 for (i
= 0; i
< EXTRA_REG_MAX
; i
++)
1700 raw_spin_lock_init(®s
->regs
[i
].lock
);
1707 static int intel_pmu_cpu_prepare(int cpu
)
1709 struct cpu_hw_events
*cpuc
= &per_cpu(cpu_hw_events
, cpu
);
1711 if (!(x86_pmu
.extra_regs
|| x86_pmu
.lbr_sel_map
))
1714 cpuc
->shared_regs
= allocate_shared_regs(cpu
);
1715 if (!cpuc
->shared_regs
)
1721 static void intel_pmu_cpu_starting(int cpu
)
1723 struct cpu_hw_events
*cpuc
= &per_cpu(cpu_hw_events
, cpu
);
1724 int core_id
= topology_core_id(cpu
);
1727 init_debug_store_on_cpu(cpu
);
1729 * Deal with CPUs that don't clear their LBRs on power-up.
1731 intel_pmu_lbr_reset();
1733 cpuc
->lbr_sel
= NULL
;
1735 if (!cpuc
->shared_regs
)
1738 if (!(x86_pmu
.er_flags
& ERF_NO_HT_SHARING
)) {
1739 for_each_cpu(i
, topology_thread_cpumask(cpu
)) {
1740 struct intel_shared_regs
*pc
;
1742 pc
= per_cpu(cpu_hw_events
, i
).shared_regs
;
1743 if (pc
&& pc
->core_id
== core_id
) {
1744 cpuc
->kfree_on_online
= cpuc
->shared_regs
;
1745 cpuc
->shared_regs
= pc
;
1749 cpuc
->shared_regs
->core_id
= core_id
;
1750 cpuc
->shared_regs
->refcnt
++;
1753 if (x86_pmu
.lbr_sel_map
)
1754 cpuc
->lbr_sel
= &cpuc
->shared_regs
->regs
[EXTRA_REG_LBR
];
1757 static void intel_pmu_cpu_dying(int cpu
)
1759 struct cpu_hw_events
*cpuc
= &per_cpu(cpu_hw_events
, cpu
);
1760 struct intel_shared_regs
*pc
;
1762 pc
= cpuc
->shared_regs
;
1764 if (pc
->core_id
== -1 || --pc
->refcnt
== 0)
1766 cpuc
->shared_regs
= NULL
;
1769 fini_debug_store_on_cpu(cpu
);
1772 static void intel_pmu_flush_branch_stack(void)
1775 * Intel LBR does not tag entries with the
1776 * PID of the current task, then we need to
1778 * For now, we simply reset it
1781 intel_pmu_lbr_reset();
1784 PMU_FORMAT_ATTR(offcore_rsp
, "config1:0-63");
1786 PMU_FORMAT_ATTR(ldlat
, "config1:0-15");
1788 static struct attribute
*intel_arch3_formats_attr
[] = {
1789 &format_attr_event
.attr
,
1790 &format_attr_umask
.attr
,
1791 &format_attr_edge
.attr
,
1792 &format_attr_pc
.attr
,
1793 &format_attr_any
.attr
,
1794 &format_attr_inv
.attr
,
1795 &format_attr_cmask
.attr
,
1797 &format_attr_offcore_rsp
.attr
, /* XXX do NHM/WSM + SNB breakout */
1798 &format_attr_ldlat
.attr
, /* PEBS load latency */
1802 static __initconst
const struct x86_pmu intel_pmu
= {
1804 .handle_irq
= intel_pmu_handle_irq
,
1805 .disable_all
= intel_pmu_disable_all
,
1806 .enable_all
= intel_pmu_enable_all
,
1807 .enable
= intel_pmu_enable_event
,
1808 .disable
= intel_pmu_disable_event
,
1809 .hw_config
= intel_pmu_hw_config
,
1810 .schedule_events
= x86_schedule_events
,
1811 .eventsel
= MSR_ARCH_PERFMON_EVENTSEL0
,
1812 .perfctr
= MSR_ARCH_PERFMON_PERFCTR0
,
1813 .event_map
= intel_pmu_event_map
,
1814 .max_events
= ARRAY_SIZE(intel_perfmon_event_map
),
1817 * Intel PMCs cannot be accessed sanely above 32 bit width,
1818 * so we install an artificial 1<<31 period regardless of
1819 * the generic event period:
1821 .max_period
= (1ULL << 31) - 1,
1822 .get_event_constraints
= intel_get_event_constraints
,
1823 .put_event_constraints
= intel_put_event_constraints
,
1824 .pebs_aliases
= intel_pebs_aliases_core2
,
1826 .format_attrs
= intel_arch3_formats_attr
,
1827 .events_sysfs_show
= intel_event_sysfs_show
,
1829 .cpu_prepare
= intel_pmu_cpu_prepare
,
1830 .cpu_starting
= intel_pmu_cpu_starting
,
1831 .cpu_dying
= intel_pmu_cpu_dying
,
1832 .guest_get_msrs
= intel_guest_get_msrs
,
1833 .flush_branch_stack
= intel_pmu_flush_branch_stack
,
1836 static __init
void intel_clovertown_quirk(void)
1839 * PEBS is unreliable due to:
1841 * AJ67 - PEBS may experience CPL leaks
1842 * AJ68 - PEBS PMI may be delayed by one event
1843 * AJ69 - GLOBAL_STATUS[62] will only be set when DEBUGCTL[12]
1844 * AJ106 - FREEZE_LBRS_ON_PMI doesn't work in combination with PEBS
1846 * AJ67 could be worked around by restricting the OS/USR flags.
1847 * AJ69 could be worked around by setting PMU_FREEZE_ON_PMI.
1849 * AJ106 could possibly be worked around by not allowing LBR
1850 * usage from PEBS, including the fixup.
1851 * AJ68 could possibly be worked around by always programming
1852 * a pebs_event_reset[0] value and coping with the lost events.
1854 * But taken together it might just make sense to not enable PEBS on
1857 pr_warn("PEBS disabled due to CPU errata\n");
1859 x86_pmu
.pebs_constraints
= NULL
;
1862 static int intel_snb_pebs_broken(int cpu
)
1864 u32 rev
= UINT_MAX
; /* default to broken for unknown models */
1866 switch (cpu_data(cpu
).x86_model
) {
1871 case 45: /* SNB-EP */
1872 switch (cpu_data(cpu
).x86_mask
) {
1873 case 6: rev
= 0x618; break;
1874 case 7: rev
= 0x70c; break;
1878 return (cpu_data(cpu
).microcode
< rev
);
1881 static void intel_snb_check_microcode(void)
1883 int pebs_broken
= 0;
1887 for_each_online_cpu(cpu
) {
1888 if ((pebs_broken
= intel_snb_pebs_broken(cpu
)))
1893 if (pebs_broken
== x86_pmu
.pebs_broken
)
1897 * Serialized by the microcode lock..
1899 if (x86_pmu
.pebs_broken
) {
1900 pr_info("PEBS enabled due to microcode update\n");
1901 x86_pmu
.pebs_broken
= 0;
1903 pr_info("PEBS disabled due to CPU errata, please upgrade microcode\n");
1904 x86_pmu
.pebs_broken
= 1;
1908 static __init
void intel_sandybridge_quirk(void)
1910 x86_pmu
.check_microcode
= intel_snb_check_microcode
;
1911 intel_snb_check_microcode();
1914 static const struct { int id
; char *name
; } intel_arch_events_map
[] __initconst
= {
1915 { PERF_COUNT_HW_CPU_CYCLES
, "cpu cycles" },
1916 { PERF_COUNT_HW_INSTRUCTIONS
, "instructions" },
1917 { PERF_COUNT_HW_BUS_CYCLES
, "bus cycles" },
1918 { PERF_COUNT_HW_CACHE_REFERENCES
, "cache references" },
1919 { PERF_COUNT_HW_CACHE_MISSES
, "cache misses" },
1920 { PERF_COUNT_HW_BRANCH_INSTRUCTIONS
, "branch instructions" },
1921 { PERF_COUNT_HW_BRANCH_MISSES
, "branch misses" },
1924 static __init
void intel_arch_events_quirk(void)
1928 /* disable event that reported as not presend by cpuid */
1929 for_each_set_bit(bit
, x86_pmu
.events_mask
, ARRAY_SIZE(intel_arch_events_map
)) {
1930 intel_perfmon_event_map
[intel_arch_events_map
[bit
].id
] = 0;
1931 pr_warn("CPUID marked event: \'%s\' unavailable\n",
1932 intel_arch_events_map
[bit
].name
);
1936 static __init
void intel_nehalem_quirk(void)
1938 union cpuid10_ebx ebx
;
1940 ebx
.full
= x86_pmu
.events_maskl
;
1941 if (ebx
.split
.no_branch_misses_retired
) {
1943 * Erratum AAJ80 detected, we work it around by using
1944 * the BR_MISP_EXEC.ANY event. This will over-count
1945 * branch-misses, but it's still much better than the
1946 * architectural event which is often completely bogus:
1948 intel_perfmon_event_map
[PERF_COUNT_HW_BRANCH_MISSES
] = 0x7f89;
1949 ebx
.split
.no_branch_misses_retired
= 0;
1950 x86_pmu
.events_maskl
= ebx
.full
;
1951 pr_info("CPU erratum AAJ80 worked around\n");
1955 __init
int intel_pmu_init(void)
1957 union cpuid10_edx edx
;
1958 union cpuid10_eax eax
;
1959 union cpuid10_ebx ebx
;
1960 struct event_constraint
*c
;
1961 unsigned int unused
;
1964 if (!cpu_has(&boot_cpu_data
, X86_FEATURE_ARCH_PERFMON
)) {
1965 switch (boot_cpu_data
.x86
) {
1967 return p6_pmu_init();
1969 return knc_pmu_init();
1971 return p4_pmu_init();
1977 * Check whether the Architectural PerfMon supports
1978 * Branch Misses Retired hw_event or not.
1980 cpuid(10, &eax
.full
, &ebx
.full
, &unused
, &edx
.full
);
1981 if (eax
.split
.mask_length
< ARCH_PERFMON_EVENTS_COUNT
)
1984 version
= eax
.split
.version_id
;
1988 x86_pmu
= intel_pmu
;
1990 x86_pmu
.version
= version
;
1991 x86_pmu
.num_counters
= eax
.split
.num_counters
;
1992 x86_pmu
.cntval_bits
= eax
.split
.bit_width
;
1993 x86_pmu
.cntval_mask
= (1ULL << eax
.split
.bit_width
) - 1;
1995 x86_pmu
.events_maskl
= ebx
.full
;
1996 x86_pmu
.events_mask_len
= eax
.split
.mask_length
;
1998 x86_pmu
.max_pebs_events
= min_t(unsigned, MAX_PEBS_EVENTS
, x86_pmu
.num_counters
);
2001 * Quirk: v2 perfmon does not report fixed-purpose events, so
2002 * assume at least 3 events:
2005 x86_pmu
.num_counters_fixed
= max((int)edx
.split
.num_counters_fixed
, 3);
2008 * v2 and above have a perf capabilities MSR
2013 rdmsrl(MSR_IA32_PERF_CAPABILITIES
, capabilities
);
2014 x86_pmu
.intel_cap
.capabilities
= capabilities
;
2019 x86_add_quirk(intel_arch_events_quirk
); /* Install first, so it runs last */
2022 * Install the hw-cache-events table:
2024 switch (boot_cpu_data
.x86_model
) {
2025 case 14: /* 65 nm core solo/duo, "Yonah" */
2026 pr_cont("Core events, ");
2029 case 15: /* original 65 nm celeron/pentium/core2/xeon, "Merom"/"Conroe" */
2030 x86_add_quirk(intel_clovertown_quirk
);
2031 case 22: /* single-core 65 nm celeron/core2solo "Merom-L"/"Conroe-L" */
2032 case 23: /* current 45 nm celeron/core2/xeon "Penryn"/"Wolfdale" */
2033 case 29: /* six-core 45 nm xeon "Dunnington" */
2034 memcpy(hw_cache_event_ids
, core2_hw_cache_event_ids
,
2035 sizeof(hw_cache_event_ids
));
2037 intel_pmu_lbr_init_core();
2039 x86_pmu
.event_constraints
= intel_core2_event_constraints
;
2040 x86_pmu
.pebs_constraints
= intel_core2_pebs_event_constraints
;
2041 pr_cont("Core2 events, ");
2044 case 26: /* 45 nm nehalem, "Bloomfield" */
2045 case 30: /* 45 nm nehalem, "Lynnfield" */
2046 case 46: /* 45 nm nehalem-ex, "Beckton" */
2047 memcpy(hw_cache_event_ids
, nehalem_hw_cache_event_ids
,
2048 sizeof(hw_cache_event_ids
));
2049 memcpy(hw_cache_extra_regs
, nehalem_hw_cache_extra_regs
,
2050 sizeof(hw_cache_extra_regs
));
2052 intel_pmu_lbr_init_nhm();
2054 x86_pmu
.event_constraints
= intel_nehalem_event_constraints
;
2055 x86_pmu
.pebs_constraints
= intel_nehalem_pebs_event_constraints
;
2056 x86_pmu
.enable_all
= intel_pmu_nhm_enable_all
;
2057 x86_pmu
.extra_regs
= intel_nehalem_extra_regs
;
2059 x86_pmu
.cpu_events
= nhm_events_attrs
;
2061 /* UOPS_ISSUED.STALLED_CYCLES */
2062 intel_perfmon_event_map
[PERF_COUNT_HW_STALLED_CYCLES_FRONTEND
] =
2063 X86_CONFIG(.event
=0x0e, .umask
=0x01, .inv
=1, .cmask
=1);
2064 /* UOPS_EXECUTED.CORE_ACTIVE_CYCLES,c=1,i=1 */
2065 intel_perfmon_event_map
[PERF_COUNT_HW_STALLED_CYCLES_BACKEND
] =
2066 X86_CONFIG(.event
=0xb1, .umask
=0x3f, .inv
=1, .cmask
=1);
2068 x86_add_quirk(intel_nehalem_quirk
);
2070 pr_cont("Nehalem events, ");
2074 case 38: /* Lincroft */
2075 case 39: /* Penwell */
2076 case 53: /* Cloverview */
2077 case 54: /* Cedarview */
2078 memcpy(hw_cache_event_ids
, atom_hw_cache_event_ids
,
2079 sizeof(hw_cache_event_ids
));
2081 intel_pmu_lbr_init_atom();
2083 x86_pmu
.event_constraints
= intel_gen_event_constraints
;
2084 x86_pmu
.pebs_constraints
= intel_atom_pebs_event_constraints
;
2085 pr_cont("Atom events, ");
2088 case 37: /* 32 nm nehalem, "Clarkdale" */
2089 case 44: /* 32 nm nehalem, "Gulftown" */
2090 case 47: /* 32 nm Xeon E7 */
2091 memcpy(hw_cache_event_ids
, westmere_hw_cache_event_ids
,
2092 sizeof(hw_cache_event_ids
));
2093 memcpy(hw_cache_extra_regs
, nehalem_hw_cache_extra_regs
,
2094 sizeof(hw_cache_extra_regs
));
2096 intel_pmu_lbr_init_nhm();
2098 x86_pmu
.event_constraints
= intel_westmere_event_constraints
;
2099 x86_pmu
.enable_all
= intel_pmu_nhm_enable_all
;
2100 x86_pmu
.pebs_constraints
= intel_westmere_pebs_event_constraints
;
2101 x86_pmu
.extra_regs
= intel_westmere_extra_regs
;
2102 x86_pmu
.er_flags
|= ERF_HAS_RSP_1
;
2104 x86_pmu
.cpu_events
= nhm_events_attrs
;
2106 /* UOPS_ISSUED.STALLED_CYCLES */
2107 intel_perfmon_event_map
[PERF_COUNT_HW_STALLED_CYCLES_FRONTEND
] =
2108 X86_CONFIG(.event
=0x0e, .umask
=0x01, .inv
=1, .cmask
=1);
2109 /* UOPS_EXECUTED.CORE_ACTIVE_CYCLES,c=1,i=1 */
2110 intel_perfmon_event_map
[PERF_COUNT_HW_STALLED_CYCLES_BACKEND
] =
2111 X86_CONFIG(.event
=0xb1, .umask
=0x3f, .inv
=1, .cmask
=1);
2113 pr_cont("Westmere events, ");
2116 case 42: /* SandyBridge */
2117 case 45: /* SandyBridge, "Romely-EP" */
2118 x86_add_quirk(intel_sandybridge_quirk
);
2119 memcpy(hw_cache_event_ids
, snb_hw_cache_event_ids
,
2120 sizeof(hw_cache_event_ids
));
2121 memcpy(hw_cache_extra_regs
, snb_hw_cache_extra_regs
,
2122 sizeof(hw_cache_extra_regs
));
2124 intel_pmu_lbr_init_snb();
2126 x86_pmu
.event_constraints
= intel_snb_event_constraints
;
2127 x86_pmu
.pebs_constraints
= intel_snb_pebs_event_constraints
;
2128 x86_pmu
.pebs_aliases
= intel_pebs_aliases_snb
;
2129 x86_pmu
.extra_regs
= intel_snb_extra_regs
;
2130 /* all extra regs are per-cpu when HT is on */
2131 x86_pmu
.er_flags
|= ERF_HAS_RSP_1
;
2132 x86_pmu
.er_flags
|= ERF_NO_HT_SHARING
;
2134 x86_pmu
.cpu_events
= snb_events_attrs
;
2136 /* UOPS_ISSUED.ANY,c=1,i=1 to count stall cycles */
2137 intel_perfmon_event_map
[PERF_COUNT_HW_STALLED_CYCLES_FRONTEND
] =
2138 X86_CONFIG(.event
=0x0e, .umask
=0x01, .inv
=1, .cmask
=1);
2139 /* UOPS_DISPATCHED.THREAD,c=1,i=1 to count stall cycles*/
2140 intel_perfmon_event_map
[PERF_COUNT_HW_STALLED_CYCLES_BACKEND
] =
2141 X86_CONFIG(.event
=0xb1, .umask
=0x01, .inv
=1, .cmask
=1);
2143 pr_cont("SandyBridge events, ");
2145 case 58: /* IvyBridge */
2146 case 62: /* IvyBridge EP */
2147 memcpy(hw_cache_event_ids
, snb_hw_cache_event_ids
,
2148 sizeof(hw_cache_event_ids
));
2149 memcpy(hw_cache_extra_regs
, snb_hw_cache_extra_regs
,
2150 sizeof(hw_cache_extra_regs
));
2152 intel_pmu_lbr_init_snb();
2154 x86_pmu
.event_constraints
= intel_ivb_event_constraints
;
2155 x86_pmu
.pebs_constraints
= intel_ivb_pebs_event_constraints
;
2156 x86_pmu
.pebs_aliases
= intel_pebs_aliases_snb
;
2157 x86_pmu
.extra_regs
= intel_snb_extra_regs
;
2158 /* all extra regs are per-cpu when HT is on */
2159 x86_pmu
.er_flags
|= ERF_HAS_RSP_1
;
2160 x86_pmu
.er_flags
|= ERF_NO_HT_SHARING
;
2162 x86_pmu
.cpu_events
= snb_events_attrs
;
2164 /* UOPS_ISSUED.ANY,c=1,i=1 to count stall cycles */
2165 intel_perfmon_event_map
[PERF_COUNT_HW_STALLED_CYCLES_FRONTEND
] =
2166 X86_CONFIG(.event
=0x0e, .umask
=0x01, .inv
=1, .cmask
=1);
2168 pr_cont("IvyBridge events, ");
2173 switch (x86_pmu
.version
) {
2175 x86_pmu
.event_constraints
= intel_v1_event_constraints
;
2176 pr_cont("generic architected perfmon v1, ");
2180 * default constraints for v2 and up
2182 x86_pmu
.event_constraints
= intel_gen_event_constraints
;
2183 pr_cont("generic architected perfmon, ");
2188 if (x86_pmu
.num_counters
> INTEL_PMC_MAX_GENERIC
) {
2189 WARN(1, KERN_ERR
"hw perf events %d > max(%d), clipping!",
2190 x86_pmu
.num_counters
, INTEL_PMC_MAX_GENERIC
);
2191 x86_pmu
.num_counters
= INTEL_PMC_MAX_GENERIC
;
2193 x86_pmu
.intel_ctrl
= (1 << x86_pmu
.num_counters
) - 1;
2195 if (x86_pmu
.num_counters_fixed
> INTEL_PMC_MAX_FIXED
) {
2196 WARN(1, KERN_ERR
"hw perf events fixed %d > max(%d), clipping!",
2197 x86_pmu
.num_counters_fixed
, INTEL_PMC_MAX_FIXED
);
2198 x86_pmu
.num_counters_fixed
= INTEL_PMC_MAX_FIXED
;
2201 x86_pmu
.intel_ctrl
|=
2202 ((1LL << x86_pmu
.num_counters_fixed
)-1) << INTEL_PMC_IDX_FIXED
;
2204 if (x86_pmu
.event_constraints
) {
2206 * event on fixed counter2 (REF_CYCLES) only works on this
2207 * counter, so do not extend mask to generic counters
2209 for_each_event_constraint(c
, x86_pmu
.event_constraints
) {
2210 if (c
->cmask
!= X86_RAW_EVENT_MASK
2211 || c
->idxmsk64
== INTEL_PMC_MSK_FIXED_REF_CYCLES
) {
2215 c
->idxmsk64
|= (1ULL << x86_pmu
.num_counters
) - 1;
2216 c
->weight
+= x86_pmu
.num_counters
;