]> git.proxmox.com Git - mirror_ubuntu-jammy-kernel.git/blame - arch/x86/kernel/cpu/perf_event_intel.c
perf/x86/intel: Add Haswell PEBS record support
[mirror_ubuntu-jammy-kernel.git] / arch / x86 / kernel / cpu / perf_event_intel.c
CommitLineData
a7e3ed1e 1/*
efc9f05d
SE
2 * Per core/cpu state
3 *
4 * Used to coordinate shared registers between HT threads or
5 * among events on a single PMU.
a7e3ed1e 6 */
de0428a7 7
c767a54b
JP
8#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
9
de0428a7
KW
10#include <linux/stddef.h>
11#include <linux/types.h>
12#include <linux/init.h>
13#include <linux/slab.h>
69c60c88 14#include <linux/export.h>
de0428a7
KW
15
16#include <asm/hardirq.h>
17#include <asm/apic.h>
18
19#include "perf_event.h"
a7e3ed1e 20
f22f54f4 21/*
b622d644 22 * Intel PerfMon, used on Core and later.
f22f54f4 23 */
ec75a716 24static u64 intel_perfmon_event_map[PERF_COUNT_HW_MAX] __read_mostly =
f22f54f4 25{
c3b7cdf1
PE
26 [PERF_COUNT_HW_CPU_CYCLES] = 0x003c,
27 [PERF_COUNT_HW_INSTRUCTIONS] = 0x00c0,
28 [PERF_COUNT_HW_CACHE_REFERENCES] = 0x4f2e,
29 [PERF_COUNT_HW_CACHE_MISSES] = 0x412e,
30 [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = 0x00c4,
31 [PERF_COUNT_HW_BRANCH_MISSES] = 0x00c5,
32 [PERF_COUNT_HW_BUS_CYCLES] = 0x013c,
33 [PERF_COUNT_HW_REF_CPU_CYCLES] = 0x0300, /* pseudo-encoding */
f22f54f4
PZ
34};
35
5c543e3c 36static struct event_constraint intel_core_event_constraints[] __read_mostly =
f22f54f4
PZ
37{
38 INTEL_EVENT_CONSTRAINT(0x11, 0x2), /* FP_ASSIST */
39 INTEL_EVENT_CONSTRAINT(0x12, 0x2), /* MUL */
40 INTEL_EVENT_CONSTRAINT(0x13, 0x2), /* DIV */
41 INTEL_EVENT_CONSTRAINT(0x14, 0x1), /* CYCLES_DIV_BUSY */
42 INTEL_EVENT_CONSTRAINT(0x19, 0x2), /* DELAYED_BYPASS */
43 INTEL_EVENT_CONSTRAINT(0xc1, 0x1), /* FP_COMP_INSTR_RET */
44 EVENT_CONSTRAINT_END
45};
46
5c543e3c 47static struct event_constraint intel_core2_event_constraints[] __read_mostly =
f22f54f4 48{
b622d644
PZ
49 FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */
50 FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */
cd09c0c4 51 FIXED_EVENT_CONSTRAINT(0x0300, 2), /* CPU_CLK_UNHALTED.REF */
f22f54f4
PZ
52 INTEL_EVENT_CONSTRAINT(0x10, 0x1), /* FP_COMP_OPS_EXE */
53 INTEL_EVENT_CONSTRAINT(0x11, 0x2), /* FP_ASSIST */
54 INTEL_EVENT_CONSTRAINT(0x12, 0x2), /* MUL */
55 INTEL_EVENT_CONSTRAINT(0x13, 0x2), /* DIV */
56 INTEL_EVENT_CONSTRAINT(0x14, 0x1), /* CYCLES_DIV_BUSY */
57 INTEL_EVENT_CONSTRAINT(0x18, 0x1), /* IDLE_DURING_DIV */
58 INTEL_EVENT_CONSTRAINT(0x19, 0x2), /* DELAYED_BYPASS */
59 INTEL_EVENT_CONSTRAINT(0xa1, 0x1), /* RS_UOPS_DISPATCH_CYCLES */
b622d644 60 INTEL_EVENT_CONSTRAINT(0xc9, 0x1), /* ITLB_MISS_RETIRED (T30-9) */
f22f54f4
PZ
61 INTEL_EVENT_CONSTRAINT(0xcb, 0x1), /* MEM_LOAD_RETIRED */
62 EVENT_CONSTRAINT_END
63};
64
5c543e3c 65static struct event_constraint intel_nehalem_event_constraints[] __read_mostly =
f22f54f4 66{
b622d644
PZ
67 FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */
68 FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */
cd09c0c4 69 FIXED_EVENT_CONSTRAINT(0x0300, 2), /* CPU_CLK_UNHALTED.REF */
f22f54f4
PZ
70 INTEL_EVENT_CONSTRAINT(0x40, 0x3), /* L1D_CACHE_LD */
71 INTEL_EVENT_CONSTRAINT(0x41, 0x3), /* L1D_CACHE_ST */
72 INTEL_EVENT_CONSTRAINT(0x42, 0x3), /* L1D_CACHE_LOCK */
73 INTEL_EVENT_CONSTRAINT(0x43, 0x3), /* L1D_ALL_REF */
74 INTEL_EVENT_CONSTRAINT(0x48, 0x3), /* L1D_PEND_MISS */
75 INTEL_EVENT_CONSTRAINT(0x4e, 0x3), /* L1D_PREFETCH */
76 INTEL_EVENT_CONSTRAINT(0x51, 0x3), /* L1D */
77 INTEL_EVENT_CONSTRAINT(0x63, 0x3), /* CACHE_LOCK_CYCLES */
78 EVENT_CONSTRAINT_END
79};
80
5c543e3c 81static struct extra_reg intel_nehalem_extra_regs[] __read_mostly =
a7e3ed1e 82{
efc9f05d 83 INTEL_EVENT_EXTRA_REG(0xb7, MSR_OFFCORE_RSP_0, 0xffff, RSP_0),
f20093ee 84 INTEL_UEVENT_PEBS_LDLAT_EXTRA_REG(0x100b),
a7e3ed1e
AK
85 EVENT_EXTRA_END
86};
87
5c543e3c 88static struct event_constraint intel_westmere_event_constraints[] __read_mostly =
f22f54f4 89{
b622d644
PZ
90 FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */
91 FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */
cd09c0c4 92 FIXED_EVENT_CONSTRAINT(0x0300, 2), /* CPU_CLK_UNHALTED.REF */
f22f54f4
PZ
93 INTEL_EVENT_CONSTRAINT(0x51, 0x3), /* L1D */
94 INTEL_EVENT_CONSTRAINT(0x60, 0x1), /* OFFCORE_REQUESTS_OUTSTANDING */
95 INTEL_EVENT_CONSTRAINT(0x63, 0x3), /* CACHE_LOCK_CYCLES */
d1100770 96 INTEL_EVENT_CONSTRAINT(0xb3, 0x1), /* SNOOPQ_REQUEST_OUTSTANDING */
f22f54f4
PZ
97 EVENT_CONSTRAINT_END
98};
99
5c543e3c 100static struct event_constraint intel_snb_event_constraints[] __read_mostly =
b06b3d49
LM
101{
102 FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */
103 FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */
cd09c0c4 104 FIXED_EVENT_CONSTRAINT(0x0300, 2), /* CPU_CLK_UNHALTED.REF */
fd4a5aef
SE
105 INTEL_UEVENT_CONSTRAINT(0x04a3, 0xf), /* CYCLE_ACTIVITY.CYCLES_NO_DISPATCH */
106 INTEL_UEVENT_CONSTRAINT(0x05a3, 0xf), /* CYCLE_ACTIVITY.STALLS_L2_PENDING */
107 INTEL_UEVENT_CONSTRAINT(0x02a3, 0x4), /* CYCLE_ACTIVITY.CYCLES_L1D_PENDING */
108 INTEL_UEVENT_CONSTRAINT(0x06a3, 0x4), /* CYCLE_ACTIVITY.STALLS_L1D_PENDING */
b06b3d49 109 INTEL_EVENT_CONSTRAINT(0x48, 0x4), /* L1D_PEND_MISS.PENDING */
b06b3d49
LM
110 INTEL_UEVENT_CONSTRAINT(0x01c0, 0x2), /* INST_RETIRED.PREC_DIST */
111 INTEL_EVENT_CONSTRAINT(0xcd, 0x8), /* MEM_TRANS_RETIRED.LOAD_LATENCY */
f8378f52
AK
112 INTEL_UEVENT_CONSTRAINT(0x04a3, 0xf), /* CYCLE_ACTIVITY.CYCLES_NO_DISPATCH */
113 INTEL_UEVENT_CONSTRAINT(0x02a3, 0x4), /* CYCLE_ACTIVITY.CYCLES_L1D_PENDING */
b06b3d49
LM
114 EVENT_CONSTRAINT_END
115};
116
69943182
SE
117static struct event_constraint intel_ivb_event_constraints[] __read_mostly =
118{
119 FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */
120 FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */
121 FIXED_EVENT_CONSTRAINT(0x0300, 2), /* CPU_CLK_UNHALTED.REF */
122 INTEL_UEVENT_CONSTRAINT(0x0148, 0x4), /* L1D_PEND_MISS.PENDING */
123 INTEL_UEVENT_CONSTRAINT(0x0279, 0xf), /* IDQ.EMTPY */
124 INTEL_UEVENT_CONSTRAINT(0x019c, 0xf), /* IDQ_UOPS_NOT_DELIVERED.CORE */
125 INTEL_UEVENT_CONSTRAINT(0x04a3, 0xf), /* CYCLE_ACTIVITY.CYCLES_NO_EXECUTE */
126 INTEL_UEVENT_CONSTRAINT(0x05a3, 0xf), /* CYCLE_ACTIVITY.STALLS_L2_PENDING */
127 INTEL_UEVENT_CONSTRAINT(0x06a3, 0xf), /* CYCLE_ACTIVITY.STALLS_LDM_PENDING */
128 INTEL_UEVENT_CONSTRAINT(0x08a3, 0x4), /* CYCLE_ACTIVITY.CYCLES_L1D_PENDING */
129 INTEL_UEVENT_CONSTRAINT(0x0ca3, 0x4), /* CYCLE_ACTIVITY.STALLS_L1D_PENDING */
130 INTEL_UEVENT_CONSTRAINT(0x01c0, 0x2), /* INST_RETIRED.PREC_DIST */
741a698f
PZ
131 /*
132 * Errata BV98 -- MEM_*_RETIRED events can leak between counters of SMT
133 * siblings; disable these events because they can corrupt unrelated
134 * counters.
135 */
136 INTEL_EVENT_CONSTRAINT(0xd0, 0x0), /* MEM_UOPS_RETIRED.* */
137 INTEL_EVENT_CONSTRAINT(0xd1, 0x0), /* MEM_LOAD_UOPS_RETIRED.* */
138 INTEL_EVENT_CONSTRAINT(0xd2, 0x0), /* MEM_LOAD_UOPS_LLC_HIT_RETIRED.* */
139 INTEL_EVENT_CONSTRAINT(0xd3, 0x0), /* MEM_LOAD_UOPS_LLC_MISS_RETIRED.* */
69943182
SE
140 EVENT_CONSTRAINT_END
141};
142
5c543e3c 143static struct extra_reg intel_westmere_extra_regs[] __read_mostly =
a7e3ed1e 144{
efc9f05d
SE
145 INTEL_EVENT_EXTRA_REG(0xb7, MSR_OFFCORE_RSP_0, 0xffff, RSP_0),
146 INTEL_EVENT_EXTRA_REG(0xbb, MSR_OFFCORE_RSP_1, 0xffff, RSP_1),
f20093ee 147 INTEL_UEVENT_PEBS_LDLAT_EXTRA_REG(0x100b),
a7e3ed1e
AK
148 EVENT_EXTRA_END
149};
150
0af3ac1f
AK
151static struct event_constraint intel_v1_event_constraints[] __read_mostly =
152{
153 EVENT_CONSTRAINT_END
154};
155
5c543e3c 156static struct event_constraint intel_gen_event_constraints[] __read_mostly =
f22f54f4 157{
b622d644
PZ
158 FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */
159 FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */
cd09c0c4 160 FIXED_EVENT_CONSTRAINT(0x0300, 2), /* CPU_CLK_UNHALTED.REF */
f22f54f4
PZ
161 EVENT_CONSTRAINT_END
162};
163
ee89cbc2 164static struct extra_reg intel_snb_extra_regs[] __read_mostly = {
f1923820
SE
165 INTEL_EVENT_EXTRA_REG(0xb7, MSR_OFFCORE_RSP_0, 0x3f807f8fffull, RSP_0),
166 INTEL_EVENT_EXTRA_REG(0xbb, MSR_OFFCORE_RSP_1, 0x3f807f8fffull, RSP_1),
f20093ee 167 INTEL_UEVENT_PEBS_LDLAT_EXTRA_REG(0x01cd),
f1923820
SE
168 EVENT_EXTRA_END
169};
170
171static struct extra_reg intel_snbep_extra_regs[] __read_mostly = {
172 INTEL_EVENT_EXTRA_REG(0xb7, MSR_OFFCORE_RSP_0, 0x3fffff8fffull, RSP_0),
173 INTEL_EVENT_EXTRA_REG(0xbb, MSR_OFFCORE_RSP_1, 0x3fffff8fffull, RSP_1),
f1a52789 174 INTEL_UEVENT_PEBS_LDLAT_EXTRA_REG(0x01cd),
ee89cbc2
SE
175 EVENT_EXTRA_END
176};
177
f20093ee
SE
178EVENT_ATTR_STR(mem-loads, mem_ld_nhm, "event=0x0b,umask=0x10,ldlat=3");
179EVENT_ATTR_STR(mem-loads, mem_ld_snb, "event=0xcd,umask=0x1,ldlat=3");
9ad64c0f 180EVENT_ATTR_STR(mem-stores, mem_st_snb, "event=0xcd,umask=0x2");
f20093ee
SE
181
182struct attribute *nhm_events_attrs[] = {
183 EVENT_PTR(mem_ld_nhm),
184 NULL,
185};
186
187struct attribute *snb_events_attrs[] = {
188 EVENT_PTR(mem_ld_snb),
9ad64c0f 189 EVENT_PTR(mem_st_snb),
f20093ee
SE
190 NULL,
191};
192
f22f54f4
PZ
193static u64 intel_pmu_event_map(int hw_event)
194{
195 return intel_perfmon_event_map[hw_event];
196}
197
74e6543f
YZ
198#define SNB_DMND_DATA_RD (1ULL << 0)
199#define SNB_DMND_RFO (1ULL << 1)
200#define SNB_DMND_IFETCH (1ULL << 2)
201#define SNB_DMND_WB (1ULL << 3)
202#define SNB_PF_DATA_RD (1ULL << 4)
203#define SNB_PF_RFO (1ULL << 5)
204#define SNB_PF_IFETCH (1ULL << 6)
205#define SNB_LLC_DATA_RD (1ULL << 7)
206#define SNB_LLC_RFO (1ULL << 8)
207#define SNB_LLC_IFETCH (1ULL << 9)
208#define SNB_BUS_LOCKS (1ULL << 10)
209#define SNB_STRM_ST (1ULL << 11)
210#define SNB_OTHER (1ULL << 15)
211#define SNB_RESP_ANY (1ULL << 16)
212#define SNB_NO_SUPP (1ULL << 17)
213#define SNB_LLC_HITM (1ULL << 18)
214#define SNB_LLC_HITE (1ULL << 19)
215#define SNB_LLC_HITS (1ULL << 20)
216#define SNB_LLC_HITF (1ULL << 21)
217#define SNB_LOCAL (1ULL << 22)
218#define SNB_REMOTE (0xffULL << 23)
219#define SNB_SNP_NONE (1ULL << 31)
220#define SNB_SNP_NOT_NEEDED (1ULL << 32)
221#define SNB_SNP_MISS (1ULL << 33)
222#define SNB_NO_FWD (1ULL << 34)
223#define SNB_SNP_FWD (1ULL << 35)
224#define SNB_HITM (1ULL << 36)
225#define SNB_NON_DRAM (1ULL << 37)
226
227#define SNB_DMND_READ (SNB_DMND_DATA_RD|SNB_LLC_DATA_RD)
228#define SNB_DMND_WRITE (SNB_DMND_RFO|SNB_LLC_RFO)
229#define SNB_DMND_PREFETCH (SNB_PF_DATA_RD|SNB_PF_RFO)
230
231#define SNB_SNP_ANY (SNB_SNP_NONE|SNB_SNP_NOT_NEEDED| \
232 SNB_SNP_MISS|SNB_NO_FWD|SNB_SNP_FWD| \
233 SNB_HITM)
234
235#define SNB_DRAM_ANY (SNB_LOCAL|SNB_REMOTE|SNB_SNP_ANY)
236#define SNB_DRAM_REMOTE (SNB_REMOTE|SNB_SNP_ANY)
237
238#define SNB_L3_ACCESS SNB_RESP_ANY
239#define SNB_L3_MISS (SNB_DRAM_ANY|SNB_NON_DRAM)
240
241static __initconst const u64 snb_hw_cache_extra_regs
242 [PERF_COUNT_HW_CACHE_MAX]
243 [PERF_COUNT_HW_CACHE_OP_MAX]
244 [PERF_COUNT_HW_CACHE_RESULT_MAX] =
245{
246 [ C(LL ) ] = {
247 [ C(OP_READ) ] = {
248 [ C(RESULT_ACCESS) ] = SNB_DMND_READ|SNB_L3_ACCESS,
249 [ C(RESULT_MISS) ] = SNB_DMND_READ|SNB_L3_MISS,
250 },
251 [ C(OP_WRITE) ] = {
252 [ C(RESULT_ACCESS) ] = SNB_DMND_WRITE|SNB_L3_ACCESS,
253 [ C(RESULT_MISS) ] = SNB_DMND_WRITE|SNB_L3_MISS,
254 },
255 [ C(OP_PREFETCH) ] = {
256 [ C(RESULT_ACCESS) ] = SNB_DMND_PREFETCH|SNB_L3_ACCESS,
257 [ C(RESULT_MISS) ] = SNB_DMND_PREFETCH|SNB_L3_MISS,
258 },
259 },
260 [ C(NODE) ] = {
261 [ C(OP_READ) ] = {
262 [ C(RESULT_ACCESS) ] = SNB_DMND_READ|SNB_DRAM_ANY,
263 [ C(RESULT_MISS) ] = SNB_DMND_READ|SNB_DRAM_REMOTE,
264 },
265 [ C(OP_WRITE) ] = {
266 [ C(RESULT_ACCESS) ] = SNB_DMND_WRITE|SNB_DRAM_ANY,
267 [ C(RESULT_MISS) ] = SNB_DMND_WRITE|SNB_DRAM_REMOTE,
268 },
269 [ C(OP_PREFETCH) ] = {
270 [ C(RESULT_ACCESS) ] = SNB_DMND_PREFETCH|SNB_DRAM_ANY,
271 [ C(RESULT_MISS) ] = SNB_DMND_PREFETCH|SNB_DRAM_REMOTE,
272 },
273 },
274};
275
b06b3d49
LM
276static __initconst const u64 snb_hw_cache_event_ids
277 [PERF_COUNT_HW_CACHE_MAX]
278 [PERF_COUNT_HW_CACHE_OP_MAX]
279 [PERF_COUNT_HW_CACHE_RESULT_MAX] =
280{
281 [ C(L1D) ] = {
282 [ C(OP_READ) ] = {
283 [ C(RESULT_ACCESS) ] = 0xf1d0, /* MEM_UOP_RETIRED.LOADS */
284 [ C(RESULT_MISS) ] = 0x0151, /* L1D.REPLACEMENT */
285 },
286 [ C(OP_WRITE) ] = {
287 [ C(RESULT_ACCESS) ] = 0xf2d0, /* MEM_UOP_RETIRED.STORES */
288 [ C(RESULT_MISS) ] = 0x0851, /* L1D.ALL_M_REPLACEMENT */
289 },
290 [ C(OP_PREFETCH) ] = {
291 [ C(RESULT_ACCESS) ] = 0x0,
292 [ C(RESULT_MISS) ] = 0x024e, /* HW_PRE_REQ.DL1_MISS */
293 },
294 },
295 [ C(L1I ) ] = {
296 [ C(OP_READ) ] = {
297 [ C(RESULT_ACCESS) ] = 0x0,
298 [ C(RESULT_MISS) ] = 0x0280, /* ICACHE.MISSES */
299 },
300 [ C(OP_WRITE) ] = {
301 [ C(RESULT_ACCESS) ] = -1,
302 [ C(RESULT_MISS) ] = -1,
303 },
304 [ C(OP_PREFETCH) ] = {
305 [ C(RESULT_ACCESS) ] = 0x0,
306 [ C(RESULT_MISS) ] = 0x0,
307 },
308 },
309 [ C(LL ) ] = {
b06b3d49 310 [ C(OP_READ) ] = {
63b6a675 311 /* OFFCORE_RESPONSE.ANY_DATA.LOCAL_CACHE */
b06b3d49 312 [ C(RESULT_ACCESS) ] = 0x01b7,
63b6a675
PZ
313 /* OFFCORE_RESPONSE.ANY_DATA.ANY_LLC_MISS */
314 [ C(RESULT_MISS) ] = 0x01b7,
b06b3d49
LM
315 },
316 [ C(OP_WRITE) ] = {
63b6a675 317 /* OFFCORE_RESPONSE.ANY_RFO.LOCAL_CACHE */
b06b3d49 318 [ C(RESULT_ACCESS) ] = 0x01b7,
63b6a675
PZ
319 /* OFFCORE_RESPONSE.ANY_RFO.ANY_LLC_MISS */
320 [ C(RESULT_MISS) ] = 0x01b7,
b06b3d49
LM
321 },
322 [ C(OP_PREFETCH) ] = {
63b6a675 323 /* OFFCORE_RESPONSE.PREFETCH.LOCAL_CACHE */
b06b3d49 324 [ C(RESULT_ACCESS) ] = 0x01b7,
63b6a675
PZ
325 /* OFFCORE_RESPONSE.PREFETCH.ANY_LLC_MISS */
326 [ C(RESULT_MISS) ] = 0x01b7,
b06b3d49
LM
327 },
328 },
329 [ C(DTLB) ] = {
330 [ C(OP_READ) ] = {
331 [ C(RESULT_ACCESS) ] = 0x81d0, /* MEM_UOP_RETIRED.ALL_LOADS */
332 [ C(RESULT_MISS) ] = 0x0108, /* DTLB_LOAD_MISSES.CAUSES_A_WALK */
333 },
334 [ C(OP_WRITE) ] = {
335 [ C(RESULT_ACCESS) ] = 0x82d0, /* MEM_UOP_RETIRED.ALL_STORES */
336 [ C(RESULT_MISS) ] = 0x0149, /* DTLB_STORE_MISSES.MISS_CAUSES_A_WALK */
337 },
338 [ C(OP_PREFETCH) ] = {
339 [ C(RESULT_ACCESS) ] = 0x0,
340 [ C(RESULT_MISS) ] = 0x0,
341 },
342 },
343 [ C(ITLB) ] = {
344 [ C(OP_READ) ] = {
345 [ C(RESULT_ACCESS) ] = 0x1085, /* ITLB_MISSES.STLB_HIT */
346 [ C(RESULT_MISS) ] = 0x0185, /* ITLB_MISSES.CAUSES_A_WALK */
347 },
348 [ C(OP_WRITE) ] = {
349 [ C(RESULT_ACCESS) ] = -1,
350 [ C(RESULT_MISS) ] = -1,
351 },
352 [ C(OP_PREFETCH) ] = {
353 [ C(RESULT_ACCESS) ] = -1,
354 [ C(RESULT_MISS) ] = -1,
355 },
356 },
357 [ C(BPU ) ] = {
358 [ C(OP_READ) ] = {
359 [ C(RESULT_ACCESS) ] = 0x00c4, /* BR_INST_RETIRED.ALL_BRANCHES */
360 [ C(RESULT_MISS) ] = 0x00c5, /* BR_MISP_RETIRED.ALL_BRANCHES */
361 },
362 [ C(OP_WRITE) ] = {
363 [ C(RESULT_ACCESS) ] = -1,
364 [ C(RESULT_MISS) ] = -1,
365 },
366 [ C(OP_PREFETCH) ] = {
367 [ C(RESULT_ACCESS) ] = -1,
368 [ C(RESULT_MISS) ] = -1,
369 },
370 },
89d6c0b5
PZ
371 [ C(NODE) ] = {
372 [ C(OP_READ) ] = {
74e6543f
YZ
373 [ C(RESULT_ACCESS) ] = 0x01b7,
374 [ C(RESULT_MISS) ] = 0x01b7,
89d6c0b5
PZ
375 },
376 [ C(OP_WRITE) ] = {
74e6543f
YZ
377 [ C(RESULT_ACCESS) ] = 0x01b7,
378 [ C(RESULT_MISS) ] = 0x01b7,
89d6c0b5
PZ
379 },
380 [ C(OP_PREFETCH) ] = {
74e6543f
YZ
381 [ C(RESULT_ACCESS) ] = 0x01b7,
382 [ C(RESULT_MISS) ] = 0x01b7,
89d6c0b5
PZ
383 },
384 },
385
b06b3d49
LM
386};
387
caaa8be3 388static __initconst const u64 westmere_hw_cache_event_ids
f22f54f4
PZ
389 [PERF_COUNT_HW_CACHE_MAX]
390 [PERF_COUNT_HW_CACHE_OP_MAX]
391 [PERF_COUNT_HW_CACHE_RESULT_MAX] =
392{
393 [ C(L1D) ] = {
394 [ C(OP_READ) ] = {
395 [ C(RESULT_ACCESS) ] = 0x010b, /* MEM_INST_RETIRED.LOADS */
396 [ C(RESULT_MISS) ] = 0x0151, /* L1D.REPL */
397 },
398 [ C(OP_WRITE) ] = {
399 [ C(RESULT_ACCESS) ] = 0x020b, /* MEM_INST_RETURED.STORES */
400 [ C(RESULT_MISS) ] = 0x0251, /* L1D.M_REPL */
401 },
402 [ C(OP_PREFETCH) ] = {
403 [ C(RESULT_ACCESS) ] = 0x014e, /* L1D_PREFETCH.REQUESTS */
404 [ C(RESULT_MISS) ] = 0x024e, /* L1D_PREFETCH.MISS */
405 },
406 },
407 [ C(L1I ) ] = {
408 [ C(OP_READ) ] = {
409 [ C(RESULT_ACCESS) ] = 0x0380, /* L1I.READS */
410 [ C(RESULT_MISS) ] = 0x0280, /* L1I.MISSES */
411 },
412 [ C(OP_WRITE) ] = {
413 [ C(RESULT_ACCESS) ] = -1,
414 [ C(RESULT_MISS) ] = -1,
415 },
416 [ C(OP_PREFETCH) ] = {
417 [ C(RESULT_ACCESS) ] = 0x0,
418 [ C(RESULT_MISS) ] = 0x0,
419 },
420 },
421 [ C(LL ) ] = {
422 [ C(OP_READ) ] = {
63b6a675 423 /* OFFCORE_RESPONSE.ANY_DATA.LOCAL_CACHE */
e994d7d2 424 [ C(RESULT_ACCESS) ] = 0x01b7,
63b6a675
PZ
425 /* OFFCORE_RESPONSE.ANY_DATA.ANY_LLC_MISS */
426 [ C(RESULT_MISS) ] = 0x01b7,
f22f54f4 427 },
e994d7d2
AK
428 /*
429 * Use RFO, not WRITEBACK, because a write miss would typically occur
430 * on RFO.
431 */
f22f54f4 432 [ C(OP_WRITE) ] = {
63b6a675
PZ
433 /* OFFCORE_RESPONSE.ANY_RFO.LOCAL_CACHE */
434 [ C(RESULT_ACCESS) ] = 0x01b7,
435 /* OFFCORE_RESPONSE.ANY_RFO.ANY_LLC_MISS */
e994d7d2 436 [ C(RESULT_MISS) ] = 0x01b7,
f22f54f4
PZ
437 },
438 [ C(OP_PREFETCH) ] = {
63b6a675 439 /* OFFCORE_RESPONSE.PREFETCH.LOCAL_CACHE */
e994d7d2 440 [ C(RESULT_ACCESS) ] = 0x01b7,
63b6a675
PZ
441 /* OFFCORE_RESPONSE.PREFETCH.ANY_LLC_MISS */
442 [ C(RESULT_MISS) ] = 0x01b7,
f22f54f4
PZ
443 },
444 },
445 [ C(DTLB) ] = {
446 [ C(OP_READ) ] = {
447 [ C(RESULT_ACCESS) ] = 0x010b, /* MEM_INST_RETIRED.LOADS */
448 [ C(RESULT_MISS) ] = 0x0108, /* DTLB_LOAD_MISSES.ANY */
449 },
450 [ C(OP_WRITE) ] = {
451 [ C(RESULT_ACCESS) ] = 0x020b, /* MEM_INST_RETURED.STORES */
452 [ C(RESULT_MISS) ] = 0x010c, /* MEM_STORE_RETIRED.DTLB_MISS */
453 },
454 [ C(OP_PREFETCH) ] = {
455 [ C(RESULT_ACCESS) ] = 0x0,
456 [ C(RESULT_MISS) ] = 0x0,
457 },
458 },
459 [ C(ITLB) ] = {
460 [ C(OP_READ) ] = {
461 [ C(RESULT_ACCESS) ] = 0x01c0, /* INST_RETIRED.ANY_P */
462 [ C(RESULT_MISS) ] = 0x0185, /* ITLB_MISSES.ANY */
463 },
464 [ C(OP_WRITE) ] = {
465 [ C(RESULT_ACCESS) ] = -1,
466 [ C(RESULT_MISS) ] = -1,
467 },
468 [ C(OP_PREFETCH) ] = {
469 [ C(RESULT_ACCESS) ] = -1,
470 [ C(RESULT_MISS) ] = -1,
471 },
472 },
473 [ C(BPU ) ] = {
474 [ C(OP_READ) ] = {
475 [ C(RESULT_ACCESS) ] = 0x00c4, /* BR_INST_RETIRED.ALL_BRANCHES */
476 [ C(RESULT_MISS) ] = 0x03e8, /* BPU_CLEARS.ANY */
477 },
478 [ C(OP_WRITE) ] = {
479 [ C(RESULT_ACCESS) ] = -1,
480 [ C(RESULT_MISS) ] = -1,
481 },
482 [ C(OP_PREFETCH) ] = {
483 [ C(RESULT_ACCESS) ] = -1,
484 [ C(RESULT_MISS) ] = -1,
485 },
486 },
89d6c0b5
PZ
487 [ C(NODE) ] = {
488 [ C(OP_READ) ] = {
489 [ C(RESULT_ACCESS) ] = 0x01b7,
490 [ C(RESULT_MISS) ] = 0x01b7,
491 },
492 [ C(OP_WRITE) ] = {
493 [ C(RESULT_ACCESS) ] = 0x01b7,
494 [ C(RESULT_MISS) ] = 0x01b7,
495 },
496 [ C(OP_PREFETCH) ] = {
497 [ C(RESULT_ACCESS) ] = 0x01b7,
498 [ C(RESULT_MISS) ] = 0x01b7,
499 },
500 },
f22f54f4
PZ
501};
502
e994d7d2 503/*
63b6a675
PZ
504 * Nehalem/Westmere MSR_OFFCORE_RESPONSE bits;
505 * See IA32 SDM Vol 3B 30.6.1.3
e994d7d2
AK
506 */
507
63b6a675
PZ
508#define NHM_DMND_DATA_RD (1 << 0)
509#define NHM_DMND_RFO (1 << 1)
510#define NHM_DMND_IFETCH (1 << 2)
511#define NHM_DMND_WB (1 << 3)
512#define NHM_PF_DATA_RD (1 << 4)
513#define NHM_PF_DATA_RFO (1 << 5)
514#define NHM_PF_IFETCH (1 << 6)
515#define NHM_OFFCORE_OTHER (1 << 7)
516#define NHM_UNCORE_HIT (1 << 8)
517#define NHM_OTHER_CORE_HIT_SNP (1 << 9)
518#define NHM_OTHER_CORE_HITM (1 << 10)
519 /* reserved */
520#define NHM_REMOTE_CACHE_FWD (1 << 12)
521#define NHM_REMOTE_DRAM (1 << 13)
522#define NHM_LOCAL_DRAM (1 << 14)
523#define NHM_NON_DRAM (1 << 15)
524
87e24f4b
PZ
525#define NHM_LOCAL (NHM_LOCAL_DRAM|NHM_REMOTE_CACHE_FWD)
526#define NHM_REMOTE (NHM_REMOTE_DRAM)
63b6a675
PZ
527
528#define NHM_DMND_READ (NHM_DMND_DATA_RD)
529#define NHM_DMND_WRITE (NHM_DMND_RFO|NHM_DMND_WB)
530#define NHM_DMND_PREFETCH (NHM_PF_DATA_RD|NHM_PF_DATA_RFO)
531
532#define NHM_L3_HIT (NHM_UNCORE_HIT|NHM_OTHER_CORE_HIT_SNP|NHM_OTHER_CORE_HITM)
87e24f4b 533#define NHM_L3_MISS (NHM_NON_DRAM|NHM_LOCAL_DRAM|NHM_REMOTE_DRAM|NHM_REMOTE_CACHE_FWD)
63b6a675 534#define NHM_L3_ACCESS (NHM_L3_HIT|NHM_L3_MISS)
e994d7d2
AK
535
536static __initconst const u64 nehalem_hw_cache_extra_regs
537 [PERF_COUNT_HW_CACHE_MAX]
538 [PERF_COUNT_HW_CACHE_OP_MAX]
539 [PERF_COUNT_HW_CACHE_RESULT_MAX] =
540{
541 [ C(LL ) ] = {
542 [ C(OP_READ) ] = {
63b6a675
PZ
543 [ C(RESULT_ACCESS) ] = NHM_DMND_READ|NHM_L3_ACCESS,
544 [ C(RESULT_MISS) ] = NHM_DMND_READ|NHM_L3_MISS,
e994d7d2
AK
545 },
546 [ C(OP_WRITE) ] = {
63b6a675
PZ
547 [ C(RESULT_ACCESS) ] = NHM_DMND_WRITE|NHM_L3_ACCESS,
548 [ C(RESULT_MISS) ] = NHM_DMND_WRITE|NHM_L3_MISS,
e994d7d2
AK
549 },
550 [ C(OP_PREFETCH) ] = {
63b6a675
PZ
551 [ C(RESULT_ACCESS) ] = NHM_DMND_PREFETCH|NHM_L3_ACCESS,
552 [ C(RESULT_MISS) ] = NHM_DMND_PREFETCH|NHM_L3_MISS,
e994d7d2 553 },
89d6c0b5
PZ
554 },
555 [ C(NODE) ] = {
556 [ C(OP_READ) ] = {
87e24f4b
PZ
557 [ C(RESULT_ACCESS) ] = NHM_DMND_READ|NHM_LOCAL|NHM_REMOTE,
558 [ C(RESULT_MISS) ] = NHM_DMND_READ|NHM_REMOTE,
89d6c0b5
PZ
559 },
560 [ C(OP_WRITE) ] = {
87e24f4b
PZ
561 [ C(RESULT_ACCESS) ] = NHM_DMND_WRITE|NHM_LOCAL|NHM_REMOTE,
562 [ C(RESULT_MISS) ] = NHM_DMND_WRITE|NHM_REMOTE,
89d6c0b5
PZ
563 },
564 [ C(OP_PREFETCH) ] = {
87e24f4b
PZ
565 [ C(RESULT_ACCESS) ] = NHM_DMND_PREFETCH|NHM_LOCAL|NHM_REMOTE,
566 [ C(RESULT_MISS) ] = NHM_DMND_PREFETCH|NHM_REMOTE,
89d6c0b5
PZ
567 },
568 },
e994d7d2
AK
569};
570
caaa8be3 571static __initconst const u64 nehalem_hw_cache_event_ids
f22f54f4
PZ
572 [PERF_COUNT_HW_CACHE_MAX]
573 [PERF_COUNT_HW_CACHE_OP_MAX]
574 [PERF_COUNT_HW_CACHE_RESULT_MAX] =
575{
576 [ C(L1D) ] = {
577 [ C(OP_READ) ] = {
f4929bd3
PZ
578 [ C(RESULT_ACCESS) ] = 0x010b, /* MEM_INST_RETIRED.LOADS */
579 [ C(RESULT_MISS) ] = 0x0151, /* L1D.REPL */
f22f54f4
PZ
580 },
581 [ C(OP_WRITE) ] = {
f4929bd3
PZ
582 [ C(RESULT_ACCESS) ] = 0x020b, /* MEM_INST_RETURED.STORES */
583 [ C(RESULT_MISS) ] = 0x0251, /* L1D.M_REPL */
f22f54f4
PZ
584 },
585 [ C(OP_PREFETCH) ] = {
586 [ C(RESULT_ACCESS) ] = 0x014e, /* L1D_PREFETCH.REQUESTS */
587 [ C(RESULT_MISS) ] = 0x024e, /* L1D_PREFETCH.MISS */
588 },
589 },
590 [ C(L1I ) ] = {
591 [ C(OP_READ) ] = {
592 [ C(RESULT_ACCESS) ] = 0x0380, /* L1I.READS */
593 [ C(RESULT_MISS) ] = 0x0280, /* L1I.MISSES */
594 },
595 [ C(OP_WRITE) ] = {
596 [ C(RESULT_ACCESS) ] = -1,
597 [ C(RESULT_MISS) ] = -1,
598 },
599 [ C(OP_PREFETCH) ] = {
600 [ C(RESULT_ACCESS) ] = 0x0,
601 [ C(RESULT_MISS) ] = 0x0,
602 },
603 },
604 [ C(LL ) ] = {
605 [ C(OP_READ) ] = {
e994d7d2
AK
606 /* OFFCORE_RESPONSE.ANY_DATA.LOCAL_CACHE */
607 [ C(RESULT_ACCESS) ] = 0x01b7,
608 /* OFFCORE_RESPONSE.ANY_DATA.ANY_LLC_MISS */
609 [ C(RESULT_MISS) ] = 0x01b7,
f22f54f4 610 },
e994d7d2
AK
611 /*
612 * Use RFO, not WRITEBACK, because a write miss would typically occur
613 * on RFO.
614 */
f22f54f4 615 [ C(OP_WRITE) ] = {
e994d7d2
AK
616 /* OFFCORE_RESPONSE.ANY_RFO.LOCAL_CACHE */
617 [ C(RESULT_ACCESS) ] = 0x01b7,
618 /* OFFCORE_RESPONSE.ANY_RFO.ANY_LLC_MISS */
619 [ C(RESULT_MISS) ] = 0x01b7,
f22f54f4
PZ
620 },
621 [ C(OP_PREFETCH) ] = {
e994d7d2
AK
622 /* OFFCORE_RESPONSE.PREFETCH.LOCAL_CACHE */
623 [ C(RESULT_ACCESS) ] = 0x01b7,
624 /* OFFCORE_RESPONSE.PREFETCH.ANY_LLC_MISS */
625 [ C(RESULT_MISS) ] = 0x01b7,
f22f54f4
PZ
626 },
627 },
628 [ C(DTLB) ] = {
629 [ C(OP_READ) ] = {
630 [ C(RESULT_ACCESS) ] = 0x0f40, /* L1D_CACHE_LD.MESI (alias) */
631 [ C(RESULT_MISS) ] = 0x0108, /* DTLB_LOAD_MISSES.ANY */
632 },
633 [ C(OP_WRITE) ] = {
634 [ C(RESULT_ACCESS) ] = 0x0f41, /* L1D_CACHE_ST.MESI (alias) */
635 [ C(RESULT_MISS) ] = 0x010c, /* MEM_STORE_RETIRED.DTLB_MISS */
636 },
637 [ C(OP_PREFETCH) ] = {
638 [ C(RESULT_ACCESS) ] = 0x0,
639 [ C(RESULT_MISS) ] = 0x0,
640 },
641 },
642 [ C(ITLB) ] = {
643 [ C(OP_READ) ] = {
644 [ C(RESULT_ACCESS) ] = 0x01c0, /* INST_RETIRED.ANY_P */
645 [ C(RESULT_MISS) ] = 0x20c8, /* ITLB_MISS_RETIRED */
646 },
647 [ C(OP_WRITE) ] = {
648 [ C(RESULT_ACCESS) ] = -1,
649 [ C(RESULT_MISS) ] = -1,
650 },
651 [ C(OP_PREFETCH) ] = {
652 [ C(RESULT_ACCESS) ] = -1,
653 [ C(RESULT_MISS) ] = -1,
654 },
655 },
656 [ C(BPU ) ] = {
657 [ C(OP_READ) ] = {
658 [ C(RESULT_ACCESS) ] = 0x00c4, /* BR_INST_RETIRED.ALL_BRANCHES */
659 [ C(RESULT_MISS) ] = 0x03e8, /* BPU_CLEARS.ANY */
660 },
661 [ C(OP_WRITE) ] = {
662 [ C(RESULT_ACCESS) ] = -1,
663 [ C(RESULT_MISS) ] = -1,
664 },
665 [ C(OP_PREFETCH) ] = {
666 [ C(RESULT_ACCESS) ] = -1,
667 [ C(RESULT_MISS) ] = -1,
668 },
669 },
89d6c0b5
PZ
670 [ C(NODE) ] = {
671 [ C(OP_READ) ] = {
672 [ C(RESULT_ACCESS) ] = 0x01b7,
673 [ C(RESULT_MISS) ] = 0x01b7,
674 },
675 [ C(OP_WRITE) ] = {
676 [ C(RESULT_ACCESS) ] = 0x01b7,
677 [ C(RESULT_MISS) ] = 0x01b7,
678 },
679 [ C(OP_PREFETCH) ] = {
680 [ C(RESULT_ACCESS) ] = 0x01b7,
681 [ C(RESULT_MISS) ] = 0x01b7,
682 },
683 },
f22f54f4
PZ
684};
685
caaa8be3 686static __initconst const u64 core2_hw_cache_event_ids
f22f54f4
PZ
687 [PERF_COUNT_HW_CACHE_MAX]
688 [PERF_COUNT_HW_CACHE_OP_MAX]
689 [PERF_COUNT_HW_CACHE_RESULT_MAX] =
690{
691 [ C(L1D) ] = {
692 [ C(OP_READ) ] = {
693 [ C(RESULT_ACCESS) ] = 0x0f40, /* L1D_CACHE_LD.MESI */
694 [ C(RESULT_MISS) ] = 0x0140, /* L1D_CACHE_LD.I_STATE */
695 },
696 [ C(OP_WRITE) ] = {
697 [ C(RESULT_ACCESS) ] = 0x0f41, /* L1D_CACHE_ST.MESI */
698 [ C(RESULT_MISS) ] = 0x0141, /* L1D_CACHE_ST.I_STATE */
699 },
700 [ C(OP_PREFETCH) ] = {
701 [ C(RESULT_ACCESS) ] = 0x104e, /* L1D_PREFETCH.REQUESTS */
702 [ C(RESULT_MISS) ] = 0,
703 },
704 },
705 [ C(L1I ) ] = {
706 [ C(OP_READ) ] = {
707 [ C(RESULT_ACCESS) ] = 0x0080, /* L1I.READS */
708 [ C(RESULT_MISS) ] = 0x0081, /* L1I.MISSES */
709 },
710 [ C(OP_WRITE) ] = {
711 [ C(RESULT_ACCESS) ] = -1,
712 [ C(RESULT_MISS) ] = -1,
713 },
714 [ C(OP_PREFETCH) ] = {
715 [ C(RESULT_ACCESS) ] = 0,
716 [ C(RESULT_MISS) ] = 0,
717 },
718 },
719 [ C(LL ) ] = {
720 [ C(OP_READ) ] = {
721 [ C(RESULT_ACCESS) ] = 0x4f29, /* L2_LD.MESI */
722 [ C(RESULT_MISS) ] = 0x4129, /* L2_LD.ISTATE */
723 },
724 [ C(OP_WRITE) ] = {
725 [ C(RESULT_ACCESS) ] = 0x4f2A, /* L2_ST.MESI */
726 [ C(RESULT_MISS) ] = 0x412A, /* L2_ST.ISTATE */
727 },
728 [ C(OP_PREFETCH) ] = {
729 [ C(RESULT_ACCESS) ] = 0,
730 [ C(RESULT_MISS) ] = 0,
731 },
732 },
733 [ C(DTLB) ] = {
734 [ C(OP_READ) ] = {
735 [ C(RESULT_ACCESS) ] = 0x0f40, /* L1D_CACHE_LD.MESI (alias) */
736 [ C(RESULT_MISS) ] = 0x0208, /* DTLB_MISSES.MISS_LD */
737 },
738 [ C(OP_WRITE) ] = {
739 [ C(RESULT_ACCESS) ] = 0x0f41, /* L1D_CACHE_ST.MESI (alias) */
740 [ C(RESULT_MISS) ] = 0x0808, /* DTLB_MISSES.MISS_ST */
741 },
742 [ C(OP_PREFETCH) ] = {
743 [ C(RESULT_ACCESS) ] = 0,
744 [ C(RESULT_MISS) ] = 0,
745 },
746 },
747 [ C(ITLB) ] = {
748 [ C(OP_READ) ] = {
749 [ C(RESULT_ACCESS) ] = 0x00c0, /* INST_RETIRED.ANY_P */
750 [ C(RESULT_MISS) ] = 0x1282, /* ITLBMISSES */
751 },
752 [ C(OP_WRITE) ] = {
753 [ C(RESULT_ACCESS) ] = -1,
754 [ C(RESULT_MISS) ] = -1,
755 },
756 [ C(OP_PREFETCH) ] = {
757 [ C(RESULT_ACCESS) ] = -1,
758 [ C(RESULT_MISS) ] = -1,
759 },
760 },
761 [ C(BPU ) ] = {
762 [ C(OP_READ) ] = {
763 [ C(RESULT_ACCESS) ] = 0x00c4, /* BR_INST_RETIRED.ANY */
764 [ C(RESULT_MISS) ] = 0x00c5, /* BP_INST_RETIRED.MISPRED */
765 },
766 [ C(OP_WRITE) ] = {
767 [ C(RESULT_ACCESS) ] = -1,
768 [ C(RESULT_MISS) ] = -1,
769 },
770 [ C(OP_PREFETCH) ] = {
771 [ C(RESULT_ACCESS) ] = -1,
772 [ C(RESULT_MISS) ] = -1,
773 },
774 },
775};
776
caaa8be3 777static __initconst const u64 atom_hw_cache_event_ids
f22f54f4
PZ
778 [PERF_COUNT_HW_CACHE_MAX]
779 [PERF_COUNT_HW_CACHE_OP_MAX]
780 [PERF_COUNT_HW_CACHE_RESULT_MAX] =
781{
782 [ C(L1D) ] = {
783 [ C(OP_READ) ] = {
784 [ C(RESULT_ACCESS) ] = 0x2140, /* L1D_CACHE.LD */
785 [ C(RESULT_MISS) ] = 0,
786 },
787 [ C(OP_WRITE) ] = {
788 [ C(RESULT_ACCESS) ] = 0x2240, /* L1D_CACHE.ST */
789 [ C(RESULT_MISS) ] = 0,
790 },
791 [ C(OP_PREFETCH) ] = {
792 [ C(RESULT_ACCESS) ] = 0x0,
793 [ C(RESULT_MISS) ] = 0,
794 },
795 },
796 [ C(L1I ) ] = {
797 [ C(OP_READ) ] = {
798 [ C(RESULT_ACCESS) ] = 0x0380, /* L1I.READS */
799 [ C(RESULT_MISS) ] = 0x0280, /* L1I.MISSES */
800 },
801 [ C(OP_WRITE) ] = {
802 [ C(RESULT_ACCESS) ] = -1,
803 [ C(RESULT_MISS) ] = -1,
804 },
805 [ C(OP_PREFETCH) ] = {
806 [ C(RESULT_ACCESS) ] = 0,
807 [ C(RESULT_MISS) ] = 0,
808 },
809 },
810 [ C(LL ) ] = {
811 [ C(OP_READ) ] = {
812 [ C(RESULT_ACCESS) ] = 0x4f29, /* L2_LD.MESI */
813 [ C(RESULT_MISS) ] = 0x4129, /* L2_LD.ISTATE */
814 },
815 [ C(OP_WRITE) ] = {
816 [ C(RESULT_ACCESS) ] = 0x4f2A, /* L2_ST.MESI */
817 [ C(RESULT_MISS) ] = 0x412A, /* L2_ST.ISTATE */
818 },
819 [ C(OP_PREFETCH) ] = {
820 [ C(RESULT_ACCESS) ] = 0,
821 [ C(RESULT_MISS) ] = 0,
822 },
823 },
824 [ C(DTLB) ] = {
825 [ C(OP_READ) ] = {
826 [ C(RESULT_ACCESS) ] = 0x2140, /* L1D_CACHE_LD.MESI (alias) */
827 [ C(RESULT_MISS) ] = 0x0508, /* DTLB_MISSES.MISS_LD */
828 },
829 [ C(OP_WRITE) ] = {
830 [ C(RESULT_ACCESS) ] = 0x2240, /* L1D_CACHE_ST.MESI (alias) */
831 [ C(RESULT_MISS) ] = 0x0608, /* DTLB_MISSES.MISS_ST */
832 },
833 [ C(OP_PREFETCH) ] = {
834 [ C(RESULT_ACCESS) ] = 0,
835 [ C(RESULT_MISS) ] = 0,
836 },
837 },
838 [ C(ITLB) ] = {
839 [ C(OP_READ) ] = {
840 [ C(RESULT_ACCESS) ] = 0x00c0, /* INST_RETIRED.ANY_P */
841 [ C(RESULT_MISS) ] = 0x0282, /* ITLB.MISSES */
842 },
843 [ C(OP_WRITE) ] = {
844 [ C(RESULT_ACCESS) ] = -1,
845 [ C(RESULT_MISS) ] = -1,
846 },
847 [ C(OP_PREFETCH) ] = {
848 [ C(RESULT_ACCESS) ] = -1,
849 [ C(RESULT_MISS) ] = -1,
850 },
851 },
852 [ C(BPU ) ] = {
853 [ C(OP_READ) ] = {
854 [ C(RESULT_ACCESS) ] = 0x00c4, /* BR_INST_RETIRED.ANY */
855 [ C(RESULT_MISS) ] = 0x00c5, /* BP_INST_RETIRED.MISPRED */
856 },
857 [ C(OP_WRITE) ] = {
858 [ C(RESULT_ACCESS) ] = -1,
859 [ C(RESULT_MISS) ] = -1,
860 },
861 [ C(OP_PREFETCH) ] = {
862 [ C(RESULT_ACCESS) ] = -1,
863 [ C(RESULT_MISS) ] = -1,
864 },
865 },
866};
867
60ce0fbd
SE
868static inline bool intel_pmu_needs_lbr_smpl(struct perf_event *event)
869{
870 /* user explicitly requested branch sampling */
871 if (has_branch_stack(event))
872 return true;
873
874 /* implicit branch sampling to correct PEBS skid */
875 if (x86_pmu.intel_cap.pebs_trap && event->attr.precise_ip > 1)
876 return true;
877
878 return false;
879}
880
f22f54f4
PZ
881static void intel_pmu_disable_all(void)
882{
883 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
884
885 wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL, 0);
886
15c7ad51 887 if (test_bit(INTEL_PMC_IDX_FIXED_BTS, cpuc->active_mask))
f22f54f4 888 intel_pmu_disable_bts();
ca037701
PZ
889
890 intel_pmu_pebs_disable_all();
caff2bef 891 intel_pmu_lbr_disable_all();
f22f54f4
PZ
892}
893
11164cd4 894static void intel_pmu_enable_all(int added)
f22f54f4
PZ
895{
896 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
897
d329527e
PZ
898 intel_pmu_pebs_enable_all();
899 intel_pmu_lbr_enable_all();
144d31e6
GN
900 wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL,
901 x86_pmu.intel_ctrl & ~cpuc->intel_ctrl_guest_mask);
f22f54f4 902
15c7ad51 903 if (test_bit(INTEL_PMC_IDX_FIXED_BTS, cpuc->active_mask)) {
f22f54f4 904 struct perf_event *event =
15c7ad51 905 cpuc->events[INTEL_PMC_IDX_FIXED_BTS];
f22f54f4
PZ
906
907 if (WARN_ON_ONCE(!event))
908 return;
909
910 intel_pmu_enable_bts(event->hw.config);
911 }
912}
913
11164cd4
PZ
914/*
915 * Workaround for:
916 * Intel Errata AAK100 (model 26)
917 * Intel Errata AAP53 (model 30)
40b91cd1 918 * Intel Errata BD53 (model 44)
11164cd4 919 *
351af072
ZY
920 * The official story:
921 * These chips need to be 'reset' when adding counters by programming the
922 * magic three (non-counting) events 0x4300B5, 0x4300D2, and 0x4300B1 either
923 * in sequence on the same PMC or on different PMCs.
924 *
925 * In practise it appears some of these events do in fact count, and
926 * we need to programm all 4 events.
11164cd4 927 */
351af072 928static void intel_pmu_nhm_workaround(void)
11164cd4 929{
351af072
ZY
930 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
931 static const unsigned long nhm_magic[4] = {
932 0x4300B5,
933 0x4300D2,
934 0x4300B1,
935 0x4300B1
936 };
937 struct perf_event *event;
938 int i;
11164cd4 939
351af072
ZY
940 /*
941 * The Errata requires below steps:
942 * 1) Clear MSR_IA32_PEBS_ENABLE and MSR_CORE_PERF_GLOBAL_CTRL;
943 * 2) Configure 4 PERFEVTSELx with the magic events and clear
944 * the corresponding PMCx;
945 * 3) set bit0~bit3 of MSR_CORE_PERF_GLOBAL_CTRL;
946 * 4) Clear MSR_CORE_PERF_GLOBAL_CTRL;
947 * 5) Clear 4 pairs of ERFEVTSELx and PMCx;
948 */
11164cd4 949
351af072
ZY
950 /*
951 * The real steps we choose are a little different from above.
952 * A) To reduce MSR operations, we don't run step 1) as they
953 * are already cleared before this function is called;
954 * B) Call x86_perf_event_update to save PMCx before configuring
955 * PERFEVTSELx with magic number;
956 * C) With step 5), we do clear only when the PERFEVTSELx is
957 * not used currently.
958 * D) Call x86_perf_event_set_period to restore PMCx;
959 */
11164cd4 960
351af072
ZY
961 /* We always operate 4 pairs of PERF Counters */
962 for (i = 0; i < 4; i++) {
963 event = cpuc->events[i];
964 if (event)
965 x86_perf_event_update(event);
966 }
11164cd4 967
351af072
ZY
968 for (i = 0; i < 4; i++) {
969 wrmsrl(MSR_ARCH_PERFMON_EVENTSEL0 + i, nhm_magic[i]);
970 wrmsrl(MSR_ARCH_PERFMON_PERFCTR0 + i, 0x0);
971 }
972
973 wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL, 0xf);
974 wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL, 0x0);
11164cd4 975
351af072
ZY
976 for (i = 0; i < 4; i++) {
977 event = cpuc->events[i];
978
979 if (event) {
980 x86_perf_event_set_period(event);
31fa58af 981 __x86_pmu_enable_event(&event->hw,
351af072
ZY
982 ARCH_PERFMON_EVENTSEL_ENABLE);
983 } else
984 wrmsrl(MSR_ARCH_PERFMON_EVENTSEL0 + i, 0x0);
11164cd4 985 }
351af072
ZY
986}
987
988static void intel_pmu_nhm_enable_all(int added)
989{
990 if (added)
991 intel_pmu_nhm_workaround();
11164cd4
PZ
992 intel_pmu_enable_all(added);
993}
994
f22f54f4
PZ
995static inline u64 intel_pmu_get_status(void)
996{
997 u64 status;
998
999 rdmsrl(MSR_CORE_PERF_GLOBAL_STATUS, status);
1000
1001 return status;
1002}
1003
1004static inline void intel_pmu_ack_status(u64 ack)
1005{
1006 wrmsrl(MSR_CORE_PERF_GLOBAL_OVF_CTRL, ack);
1007}
1008
ca037701 1009static void intel_pmu_disable_fixed(struct hw_perf_event *hwc)
f22f54f4 1010{
15c7ad51 1011 int idx = hwc->idx - INTEL_PMC_IDX_FIXED;
f22f54f4
PZ
1012 u64 ctrl_val, mask;
1013
1014 mask = 0xfULL << (idx * 4);
1015
1016 rdmsrl(hwc->config_base, ctrl_val);
1017 ctrl_val &= ~mask;
7645a24c 1018 wrmsrl(hwc->config_base, ctrl_val);
f22f54f4
PZ
1019}
1020
ca037701 1021static void intel_pmu_disable_event(struct perf_event *event)
f22f54f4 1022{
aff3d91a 1023 struct hw_perf_event *hwc = &event->hw;
144d31e6 1024 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
aff3d91a 1025
15c7ad51 1026 if (unlikely(hwc->idx == INTEL_PMC_IDX_FIXED_BTS)) {
f22f54f4
PZ
1027 intel_pmu_disable_bts();
1028 intel_pmu_drain_bts_buffer();
1029 return;
1030 }
1031
144d31e6
GN
1032 cpuc->intel_ctrl_guest_mask &= ~(1ull << hwc->idx);
1033 cpuc->intel_ctrl_host_mask &= ~(1ull << hwc->idx);
1034
60ce0fbd
SE
1035 /*
1036 * must disable before any actual event
1037 * because any event may be combined with LBR
1038 */
1039 if (intel_pmu_needs_lbr_smpl(event))
1040 intel_pmu_lbr_disable(event);
1041
f22f54f4 1042 if (unlikely(hwc->config_base == MSR_ARCH_PERFMON_FIXED_CTR_CTRL)) {
aff3d91a 1043 intel_pmu_disable_fixed(hwc);
f22f54f4
PZ
1044 return;
1045 }
1046
aff3d91a 1047 x86_pmu_disable_event(event);
ca037701 1048
ab608344 1049 if (unlikely(event->attr.precise_ip))
ef21f683 1050 intel_pmu_pebs_disable(event);
f22f54f4
PZ
1051}
1052
ca037701 1053static void intel_pmu_enable_fixed(struct hw_perf_event *hwc)
f22f54f4 1054{
15c7ad51 1055 int idx = hwc->idx - INTEL_PMC_IDX_FIXED;
f22f54f4 1056 u64 ctrl_val, bits, mask;
f22f54f4
PZ
1057
1058 /*
1059 * Enable IRQ generation (0x8),
1060 * and enable ring-3 counting (0x2) and ring-0 counting (0x1)
1061 * if requested:
1062 */
1063 bits = 0x8ULL;
1064 if (hwc->config & ARCH_PERFMON_EVENTSEL_USR)
1065 bits |= 0x2;
1066 if (hwc->config & ARCH_PERFMON_EVENTSEL_OS)
1067 bits |= 0x1;
1068
1069 /*
1070 * ANY bit is supported in v3 and up
1071 */
1072 if (x86_pmu.version > 2 && hwc->config & ARCH_PERFMON_EVENTSEL_ANY)
1073 bits |= 0x4;
1074
1075 bits <<= (idx * 4);
1076 mask = 0xfULL << (idx * 4);
1077
1078 rdmsrl(hwc->config_base, ctrl_val);
1079 ctrl_val &= ~mask;
1080 ctrl_val |= bits;
7645a24c 1081 wrmsrl(hwc->config_base, ctrl_val);
f22f54f4
PZ
1082}
1083
aff3d91a 1084static void intel_pmu_enable_event(struct perf_event *event)
f22f54f4 1085{
aff3d91a 1086 struct hw_perf_event *hwc = &event->hw;
144d31e6 1087 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
aff3d91a 1088
15c7ad51 1089 if (unlikely(hwc->idx == INTEL_PMC_IDX_FIXED_BTS)) {
0a3aee0d 1090 if (!__this_cpu_read(cpu_hw_events.enabled))
f22f54f4
PZ
1091 return;
1092
1093 intel_pmu_enable_bts(hwc->config);
1094 return;
1095 }
60ce0fbd
SE
1096 /*
1097 * must enabled before any actual event
1098 * because any event may be combined with LBR
1099 */
1100 if (intel_pmu_needs_lbr_smpl(event))
1101 intel_pmu_lbr_enable(event);
f22f54f4 1102
144d31e6
GN
1103 if (event->attr.exclude_host)
1104 cpuc->intel_ctrl_guest_mask |= (1ull << hwc->idx);
1105 if (event->attr.exclude_guest)
1106 cpuc->intel_ctrl_host_mask |= (1ull << hwc->idx);
1107
f22f54f4 1108 if (unlikely(hwc->config_base == MSR_ARCH_PERFMON_FIXED_CTR_CTRL)) {
aff3d91a 1109 intel_pmu_enable_fixed(hwc);
f22f54f4
PZ
1110 return;
1111 }
1112
ab608344 1113 if (unlikely(event->attr.precise_ip))
ef21f683 1114 intel_pmu_pebs_enable(event);
ca037701 1115
31fa58af 1116 __x86_pmu_enable_event(hwc, ARCH_PERFMON_EVENTSEL_ENABLE);
f22f54f4
PZ
1117}
1118
1119/*
1120 * Save and restart an expired event. Called by NMI contexts,
1121 * so it has to be careful about preempting normal event ops:
1122 */
de0428a7 1123int intel_pmu_save_and_restart(struct perf_event *event)
f22f54f4 1124{
cc2ad4ba
PZ
1125 x86_perf_event_update(event);
1126 return x86_perf_event_set_period(event);
f22f54f4
PZ
1127}
1128
1129static void intel_pmu_reset(void)
1130{
0a3aee0d 1131 struct debug_store *ds = __this_cpu_read(cpu_hw_events.ds);
f22f54f4
PZ
1132 unsigned long flags;
1133 int idx;
1134
948b1bb8 1135 if (!x86_pmu.num_counters)
f22f54f4
PZ
1136 return;
1137
1138 local_irq_save(flags);
1139
c767a54b 1140 pr_info("clearing PMU state on CPU#%d\n", smp_processor_id());
f22f54f4 1141
948b1bb8 1142 for (idx = 0; idx < x86_pmu.num_counters; idx++) {
715c85b1
PA
1143 wrmsrl_safe(x86_pmu_config_addr(idx), 0ull);
1144 wrmsrl_safe(x86_pmu_event_addr(idx), 0ull);
f22f54f4 1145 }
948b1bb8 1146 for (idx = 0; idx < x86_pmu.num_counters_fixed; idx++)
715c85b1 1147 wrmsrl_safe(MSR_ARCH_PERFMON_FIXED_CTR0 + idx, 0ull);
948b1bb8 1148
f22f54f4
PZ
1149 if (ds)
1150 ds->bts_index = ds->bts_buffer_base;
1151
1152 local_irq_restore(flags);
1153}
1154
1155/*
1156 * This handler is triggered by the local APIC, so the APIC IRQ handling
1157 * rules apply:
1158 */
1159static int intel_pmu_handle_irq(struct pt_regs *regs)
1160{
1161 struct perf_sample_data data;
1162 struct cpu_hw_events *cpuc;
1163 int bit, loops;
2e556b5b 1164 u64 status;
b0b2072d 1165 int handled;
f22f54f4 1166
f22f54f4
PZ
1167 cpuc = &__get_cpu_var(cpu_hw_events);
1168
2bce5dac
DZ
1169 /*
1170 * Some chipsets need to unmask the LVTPC in a particular spot
1171 * inside the nmi handler. As a result, the unmasking was pushed
1172 * into all the nmi handlers.
1173 *
1174 * This handler doesn't seem to have any issues with the unmasking
1175 * so it was left at the top.
1176 */
1177 apic_write(APIC_LVTPC, APIC_DM_NMI);
1178
3fb2b8dd 1179 intel_pmu_disable_all();
b0b2072d 1180 handled = intel_pmu_drain_bts_buffer();
f22f54f4
PZ
1181 status = intel_pmu_get_status();
1182 if (!status) {
11164cd4 1183 intel_pmu_enable_all(0);
b0b2072d 1184 return handled;
f22f54f4
PZ
1185 }
1186
1187 loops = 0;
1188again:
2e556b5b 1189 intel_pmu_ack_status(status);
f22f54f4 1190 if (++loops > 100) {
ae0def05
DH
1191 static bool warned = false;
1192 if (!warned) {
1193 WARN(1, "perfevents: irq loop stuck!\n");
1194 perf_event_print_debug();
1195 warned = true;
1196 }
f22f54f4 1197 intel_pmu_reset();
3fb2b8dd 1198 goto done;
f22f54f4
PZ
1199 }
1200
1201 inc_irq_stat(apic_perf_irqs);
ca037701 1202
caff2bef
PZ
1203 intel_pmu_lbr_read();
1204
ca037701
PZ
1205 /*
1206 * PEBS overflow sets bit 62 in the global status register
1207 */
de725dec
PZ
1208 if (__test_and_clear_bit(62, (unsigned long *)&status)) {
1209 handled++;
ca037701 1210 x86_pmu.drain_pebs(regs);
de725dec 1211 }
ca037701 1212
984b3f57 1213 for_each_set_bit(bit, (unsigned long *)&status, X86_PMC_IDX_MAX) {
f22f54f4
PZ
1214 struct perf_event *event = cpuc->events[bit];
1215
de725dec
PZ
1216 handled++;
1217
f22f54f4
PZ
1218 if (!test_bit(bit, cpuc->active_mask))
1219 continue;
1220
1221 if (!intel_pmu_save_and_restart(event))
1222 continue;
1223
fd0d000b 1224 perf_sample_data_init(&data, 0, event->hw.last_period);
f22f54f4 1225
60ce0fbd
SE
1226 if (has_branch_stack(event))
1227 data.br_stack = &cpuc->lbr_stack;
1228
a8b0ca17 1229 if (perf_event_overflow(event, &data, regs))
a4eaf7f1 1230 x86_pmu_stop(event, 0);
f22f54f4
PZ
1231 }
1232
f22f54f4
PZ
1233 /*
1234 * Repeat if there is more work to be done:
1235 */
1236 status = intel_pmu_get_status();
1237 if (status)
1238 goto again;
1239
3fb2b8dd 1240done:
11164cd4 1241 intel_pmu_enable_all(0);
de725dec 1242 return handled;
f22f54f4
PZ
1243}
1244
f22f54f4 1245static struct event_constraint *
ca037701 1246intel_bts_constraints(struct perf_event *event)
f22f54f4 1247{
ca037701
PZ
1248 struct hw_perf_event *hwc = &event->hw;
1249 unsigned int hw_event, bts_event;
f22f54f4 1250
18a073a3
PZ
1251 if (event->attr.freq)
1252 return NULL;
1253
ca037701
PZ
1254 hw_event = hwc->config & INTEL_ARCH_EVENT_MASK;
1255 bts_event = x86_pmu.event_map(PERF_COUNT_HW_BRANCH_INSTRUCTIONS);
f22f54f4 1256
ca037701 1257 if (unlikely(hw_event == bts_event && hwc->sample_period == 1))
f22f54f4 1258 return &bts_constraint;
ca037701 1259
f22f54f4
PZ
1260 return NULL;
1261}
1262
5a425294 1263static int intel_alt_er(int idx)
b79e8941
PZ
1264{
1265 if (!(x86_pmu.er_flags & ERF_HAS_RSP_1))
5a425294 1266 return idx;
b79e8941 1267
5a425294
PZ
1268 if (idx == EXTRA_REG_RSP_0)
1269 return EXTRA_REG_RSP_1;
1270
1271 if (idx == EXTRA_REG_RSP_1)
1272 return EXTRA_REG_RSP_0;
1273
1274 return idx;
1275}
1276
1277static void intel_fixup_er(struct perf_event *event, int idx)
1278{
1279 event->hw.extra_reg.idx = idx;
1280
1281 if (idx == EXTRA_REG_RSP_0) {
b79e8941
PZ
1282 event->hw.config &= ~INTEL_ARCH_EVENT_MASK;
1283 event->hw.config |= 0x01b7;
b79e8941 1284 event->hw.extra_reg.reg = MSR_OFFCORE_RSP_0;
5a425294
PZ
1285 } else if (idx == EXTRA_REG_RSP_1) {
1286 event->hw.config &= ~INTEL_ARCH_EVENT_MASK;
1287 event->hw.config |= 0x01bb;
1288 event->hw.extra_reg.reg = MSR_OFFCORE_RSP_1;
b79e8941 1289 }
b79e8941
PZ
1290}
1291
efc9f05d
SE
1292/*
1293 * manage allocation of shared extra msr for certain events
1294 *
1295 * sharing can be:
1296 * per-cpu: to be shared between the various events on a single PMU
1297 * per-core: per-cpu + shared by HT threads
1298 */
a7e3ed1e 1299static struct event_constraint *
efc9f05d 1300__intel_shared_reg_get_constraints(struct cpu_hw_events *cpuc,
b36817e8
SE
1301 struct perf_event *event,
1302 struct hw_perf_event_extra *reg)
a7e3ed1e 1303{
efc9f05d 1304 struct event_constraint *c = &emptyconstraint;
a7e3ed1e 1305 struct er_account *era;
cd8a38d3 1306 unsigned long flags;
5a425294 1307 int idx = reg->idx;
a7e3ed1e 1308
5a425294
PZ
1309 /*
1310 * reg->alloc can be set due to existing state, so for fake cpuc we
1311 * need to ignore this, otherwise we might fail to allocate proper fake
1312 * state for this extra reg constraint. Also see the comment below.
1313 */
1314 if (reg->alloc && !cpuc->is_fake)
b36817e8 1315 return NULL; /* call x86_get_event_constraint() */
a7e3ed1e 1316
b79e8941 1317again:
5a425294 1318 era = &cpuc->shared_regs->regs[idx];
cd8a38d3
SE
1319 /*
1320 * we use spin_lock_irqsave() to avoid lockdep issues when
1321 * passing a fake cpuc
1322 */
1323 raw_spin_lock_irqsave(&era->lock, flags);
efc9f05d
SE
1324
1325 if (!atomic_read(&era->ref) || era->config == reg->config) {
1326
5a425294
PZ
1327 /*
1328 * If its a fake cpuc -- as per validate_{group,event}() we
1329 * shouldn't touch event state and we can avoid doing so
1330 * since both will only call get_event_constraints() once
1331 * on each event, this avoids the need for reg->alloc.
1332 *
1333 * Not doing the ER fixup will only result in era->reg being
1334 * wrong, but since we won't actually try and program hardware
1335 * this isn't a problem either.
1336 */
1337 if (!cpuc->is_fake) {
1338 if (idx != reg->idx)
1339 intel_fixup_er(event, idx);
1340
1341 /*
1342 * x86_schedule_events() can call get_event_constraints()
1343 * multiple times on events in the case of incremental
1344 * scheduling(). reg->alloc ensures we only do the ER
1345 * allocation once.
1346 */
1347 reg->alloc = 1;
1348 }
1349
efc9f05d
SE
1350 /* lock in msr value */
1351 era->config = reg->config;
1352 era->reg = reg->reg;
1353
1354 /* one more user */
1355 atomic_inc(&era->ref);
1356
a7e3ed1e 1357 /*
b36817e8
SE
1358 * need to call x86_get_event_constraint()
1359 * to check if associated event has constraints
a7e3ed1e 1360 */
b36817e8 1361 c = NULL;
5a425294
PZ
1362 } else {
1363 idx = intel_alt_er(idx);
1364 if (idx != reg->idx) {
1365 raw_spin_unlock_irqrestore(&era->lock, flags);
1366 goto again;
1367 }
a7e3ed1e 1368 }
cd8a38d3 1369 raw_spin_unlock_irqrestore(&era->lock, flags);
a7e3ed1e 1370
efc9f05d
SE
1371 return c;
1372}
1373
1374static void
1375__intel_shared_reg_put_constraints(struct cpu_hw_events *cpuc,
1376 struct hw_perf_event_extra *reg)
1377{
1378 struct er_account *era;
1379
1380 /*
5a425294
PZ
1381 * Only put constraint if extra reg was actually allocated. Also takes
1382 * care of event which do not use an extra shared reg.
1383 *
1384 * Also, if this is a fake cpuc we shouldn't touch any event state
1385 * (reg->alloc) and we don't care about leaving inconsistent cpuc state
1386 * either since it'll be thrown out.
efc9f05d 1387 */
5a425294 1388 if (!reg->alloc || cpuc->is_fake)
efc9f05d
SE
1389 return;
1390
1391 era = &cpuc->shared_regs->regs[reg->idx];
1392
1393 /* one fewer user */
1394 atomic_dec(&era->ref);
1395
1396 /* allocate again next time */
1397 reg->alloc = 0;
1398}
1399
1400static struct event_constraint *
1401intel_shared_regs_constraints(struct cpu_hw_events *cpuc,
1402 struct perf_event *event)
1403{
b36817e8
SE
1404 struct event_constraint *c = NULL, *d;
1405 struct hw_perf_event_extra *xreg, *breg;
1406
1407 xreg = &event->hw.extra_reg;
1408 if (xreg->idx != EXTRA_REG_NONE) {
1409 c = __intel_shared_reg_get_constraints(cpuc, event, xreg);
1410 if (c == &emptyconstraint)
1411 return c;
1412 }
1413 breg = &event->hw.branch_reg;
1414 if (breg->idx != EXTRA_REG_NONE) {
1415 d = __intel_shared_reg_get_constraints(cpuc, event, breg);
1416 if (d == &emptyconstraint) {
1417 __intel_shared_reg_put_constraints(cpuc, xreg);
1418 c = d;
1419 }
1420 }
efc9f05d 1421 return c;
a7e3ed1e
AK
1422}
1423
de0428a7
KW
1424struct event_constraint *
1425x86_get_event_constraints(struct cpu_hw_events *cpuc, struct perf_event *event)
1426{
1427 struct event_constraint *c;
1428
1429 if (x86_pmu.event_constraints) {
1430 for_each_event_constraint(c, x86_pmu.event_constraints) {
9fac2cf3
SE
1431 if ((event->hw.config & c->cmask) == c->code) {
1432 /* hw.flags zeroed at initialization */
1433 event->hw.flags |= c->flags;
de0428a7 1434 return c;
9fac2cf3 1435 }
de0428a7
KW
1436 }
1437 }
1438
1439 return &unconstrained;
1440}
1441
f22f54f4
PZ
1442static struct event_constraint *
1443intel_get_event_constraints(struct cpu_hw_events *cpuc, struct perf_event *event)
1444{
1445 struct event_constraint *c;
1446
ca037701
PZ
1447 c = intel_bts_constraints(event);
1448 if (c)
1449 return c;
1450
1451 c = intel_pebs_constraints(event);
f22f54f4
PZ
1452 if (c)
1453 return c;
1454
efc9f05d 1455 c = intel_shared_regs_constraints(cpuc, event);
a7e3ed1e
AK
1456 if (c)
1457 return c;
1458
f22f54f4
PZ
1459 return x86_get_event_constraints(cpuc, event);
1460}
1461
efc9f05d
SE
1462static void
1463intel_put_shared_regs_event_constraints(struct cpu_hw_events *cpuc,
a7e3ed1e
AK
1464 struct perf_event *event)
1465{
efc9f05d 1466 struct hw_perf_event_extra *reg;
a7e3ed1e 1467
efc9f05d
SE
1468 reg = &event->hw.extra_reg;
1469 if (reg->idx != EXTRA_REG_NONE)
1470 __intel_shared_reg_put_constraints(cpuc, reg);
b36817e8
SE
1471
1472 reg = &event->hw.branch_reg;
1473 if (reg->idx != EXTRA_REG_NONE)
1474 __intel_shared_reg_put_constraints(cpuc, reg);
efc9f05d 1475}
a7e3ed1e 1476
efc9f05d
SE
1477static void intel_put_event_constraints(struct cpu_hw_events *cpuc,
1478 struct perf_event *event)
1479{
9fac2cf3 1480 event->hw.flags = 0;
efc9f05d 1481 intel_put_shared_regs_event_constraints(cpuc, event);
a7e3ed1e
AK
1482}
1483
0780c927 1484static void intel_pebs_aliases_core2(struct perf_event *event)
b4cdc5c2 1485{
0780c927 1486 if ((event->hw.config & X86_RAW_EVENT_MASK) == 0x003c) {
7639dae0
PZ
1487 /*
1488 * Use an alternative encoding for CPU_CLK_UNHALTED.THREAD_P
1489 * (0x003c) so that we can use it with PEBS.
1490 *
1491 * The regular CPU_CLK_UNHALTED.THREAD_P event (0x003c) isn't
1492 * PEBS capable. However we can use INST_RETIRED.ANY_P
1493 * (0x00c0), which is a PEBS capable event, to get the same
1494 * count.
1495 *
1496 * INST_RETIRED.ANY_P counts the number of cycles that retires
1497 * CNTMASK instructions. By setting CNTMASK to a value (16)
1498 * larger than the maximum number of instructions that can be
1499 * retired per cycle (4) and then inverting the condition, we
1500 * count all cycles that retire 16 or less instructions, which
1501 * is every cycle.
1502 *
1503 * Thereby we gain a PEBS capable cycle counter.
1504 */
f9b4eeb8
PZ
1505 u64 alt_config = X86_CONFIG(.event=0xc0, .inv=1, .cmask=16);
1506
0780c927
PZ
1507 alt_config |= (event->hw.config & ~X86_RAW_EVENT_MASK);
1508 event->hw.config = alt_config;
1509 }
1510}
1511
1512static void intel_pebs_aliases_snb(struct perf_event *event)
1513{
1514 if ((event->hw.config & X86_RAW_EVENT_MASK) == 0x003c) {
1515 /*
1516 * Use an alternative encoding for CPU_CLK_UNHALTED.THREAD_P
1517 * (0x003c) so that we can use it with PEBS.
1518 *
1519 * The regular CPU_CLK_UNHALTED.THREAD_P event (0x003c) isn't
1520 * PEBS capable. However we can use UOPS_RETIRED.ALL
1521 * (0x01c2), which is a PEBS capable event, to get the same
1522 * count.
1523 *
1524 * UOPS_RETIRED.ALL counts the number of cycles that retires
1525 * CNTMASK micro-ops. By setting CNTMASK to a value (16)
1526 * larger than the maximum number of micro-ops that can be
1527 * retired per cycle (4) and then inverting the condition, we
1528 * count all cycles that retire 16 or less micro-ops, which
1529 * is every cycle.
1530 *
1531 * Thereby we gain a PEBS capable cycle counter.
1532 */
1533 u64 alt_config = X86_CONFIG(.event=0xc2, .umask=0x01, .inv=1, .cmask=16);
7639dae0
PZ
1534
1535 alt_config |= (event->hw.config & ~X86_RAW_EVENT_MASK);
1536 event->hw.config = alt_config;
1537 }
0780c927
PZ
1538}
1539
1540static int intel_pmu_hw_config(struct perf_event *event)
1541{
1542 int ret = x86_pmu_hw_config(event);
1543
1544 if (ret)
1545 return ret;
1546
1547 if (event->attr.precise_ip && x86_pmu.pebs_aliases)
1548 x86_pmu.pebs_aliases(event);
7639dae0 1549
60ce0fbd
SE
1550 if (intel_pmu_needs_lbr_smpl(event)) {
1551 ret = intel_pmu_setup_lbr_filter(event);
1552 if (ret)
1553 return ret;
1554 }
1555
b4cdc5c2
PZ
1556 if (event->attr.type != PERF_TYPE_RAW)
1557 return 0;
1558
1559 if (!(event->attr.config & ARCH_PERFMON_EVENTSEL_ANY))
1560 return 0;
1561
1562 if (x86_pmu.version < 3)
1563 return -EINVAL;
1564
1565 if (perf_paranoid_cpu() && !capable(CAP_SYS_ADMIN))
1566 return -EACCES;
1567
1568 event->hw.config |= ARCH_PERFMON_EVENTSEL_ANY;
1569
1570 return 0;
1571}
1572
144d31e6
GN
1573struct perf_guest_switch_msr *perf_guest_get_msrs(int *nr)
1574{
1575 if (x86_pmu.guest_get_msrs)
1576 return x86_pmu.guest_get_msrs(nr);
1577 *nr = 0;
1578 return NULL;
1579}
1580EXPORT_SYMBOL_GPL(perf_guest_get_msrs);
1581
1582static struct perf_guest_switch_msr *intel_guest_get_msrs(int *nr)
1583{
1584 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
1585 struct perf_guest_switch_msr *arr = cpuc->guest_switch_msrs;
1586
1587 arr[0].msr = MSR_CORE_PERF_GLOBAL_CTRL;
1588 arr[0].host = x86_pmu.intel_ctrl & ~cpuc->intel_ctrl_guest_mask;
1589 arr[0].guest = x86_pmu.intel_ctrl & ~cpuc->intel_ctrl_host_mask;
26a4f3c0
GN
1590 /*
1591 * If PMU counter has PEBS enabled it is not enough to disable counter
1592 * on a guest entry since PEBS memory write can overshoot guest entry
1593 * and corrupt guest memory. Disabling PEBS solves the problem.
1594 */
1595 arr[1].msr = MSR_IA32_PEBS_ENABLE;
1596 arr[1].host = cpuc->pebs_enabled;
1597 arr[1].guest = 0;
144d31e6 1598
26a4f3c0 1599 *nr = 2;
144d31e6
GN
1600 return arr;
1601}
1602
1603static struct perf_guest_switch_msr *core_guest_get_msrs(int *nr)
1604{
1605 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
1606 struct perf_guest_switch_msr *arr = cpuc->guest_switch_msrs;
1607 int idx;
1608
1609 for (idx = 0; idx < x86_pmu.num_counters; idx++) {
1610 struct perf_event *event = cpuc->events[idx];
1611
1612 arr[idx].msr = x86_pmu_config_addr(idx);
1613 arr[idx].host = arr[idx].guest = 0;
1614
1615 if (!test_bit(idx, cpuc->active_mask))
1616 continue;
1617
1618 arr[idx].host = arr[idx].guest =
1619 event->hw.config | ARCH_PERFMON_EVENTSEL_ENABLE;
1620
1621 if (event->attr.exclude_host)
1622 arr[idx].host &= ~ARCH_PERFMON_EVENTSEL_ENABLE;
1623 else if (event->attr.exclude_guest)
1624 arr[idx].guest &= ~ARCH_PERFMON_EVENTSEL_ENABLE;
1625 }
1626
1627 *nr = x86_pmu.num_counters;
1628 return arr;
1629}
1630
1631static void core_pmu_enable_event(struct perf_event *event)
1632{
1633 if (!event->attr.exclude_host)
1634 x86_pmu_enable_event(event);
1635}
1636
1637static void core_pmu_enable_all(int added)
1638{
1639 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
1640 int idx;
1641
1642 for (idx = 0; idx < x86_pmu.num_counters; idx++) {
1643 struct hw_perf_event *hwc = &cpuc->events[idx]->hw;
1644
1645 if (!test_bit(idx, cpuc->active_mask) ||
1646 cpuc->events[idx]->attr.exclude_host)
1647 continue;
1648
1649 __x86_pmu_enable_event(hwc, ARCH_PERFMON_EVENTSEL_ENABLE);
1650 }
1651}
1652
641cc938
JO
1653PMU_FORMAT_ATTR(event, "config:0-7" );
1654PMU_FORMAT_ATTR(umask, "config:8-15" );
1655PMU_FORMAT_ATTR(edge, "config:18" );
1656PMU_FORMAT_ATTR(pc, "config:19" );
1657PMU_FORMAT_ATTR(any, "config:21" ); /* v3 + */
1658PMU_FORMAT_ATTR(inv, "config:23" );
1659PMU_FORMAT_ATTR(cmask, "config:24-31" );
1660
1661static struct attribute *intel_arch_formats_attr[] = {
1662 &format_attr_event.attr,
1663 &format_attr_umask.attr,
1664 &format_attr_edge.attr,
1665 &format_attr_pc.attr,
1666 &format_attr_inv.attr,
1667 &format_attr_cmask.attr,
1668 NULL,
1669};
1670
0bf79d44
JO
1671ssize_t intel_event_sysfs_show(char *page, u64 config)
1672{
1673 u64 event = (config & ARCH_PERFMON_EVENTSEL_EVENT);
1674
1675 return x86_event_sysfs_show(page, config, event);
1676}
1677
caaa8be3 1678static __initconst const struct x86_pmu core_pmu = {
f22f54f4
PZ
1679 .name = "core",
1680 .handle_irq = x86_pmu_handle_irq,
1681 .disable_all = x86_pmu_disable_all,
144d31e6
GN
1682 .enable_all = core_pmu_enable_all,
1683 .enable = core_pmu_enable_event,
f22f54f4 1684 .disable = x86_pmu_disable_event,
b4cdc5c2 1685 .hw_config = x86_pmu_hw_config,
a072738e 1686 .schedule_events = x86_schedule_events,
f22f54f4
PZ
1687 .eventsel = MSR_ARCH_PERFMON_EVENTSEL0,
1688 .perfctr = MSR_ARCH_PERFMON_PERFCTR0,
1689 .event_map = intel_pmu_event_map,
f22f54f4
PZ
1690 .max_events = ARRAY_SIZE(intel_perfmon_event_map),
1691 .apic = 1,
1692 /*
1693 * Intel PMCs cannot be accessed sanely above 32 bit width,
1694 * so we install an artificial 1<<31 period regardless of
1695 * the generic event period:
1696 */
1697 .max_period = (1ULL << 31) - 1,
1698 .get_event_constraints = intel_get_event_constraints,
a7e3ed1e 1699 .put_event_constraints = intel_put_event_constraints,
f22f54f4 1700 .event_constraints = intel_core_event_constraints,
144d31e6 1701 .guest_get_msrs = core_guest_get_msrs,
641cc938 1702 .format_attrs = intel_arch_formats_attr,
0bf79d44 1703 .events_sysfs_show = intel_event_sysfs_show,
f22f54f4
PZ
1704};
1705
de0428a7 1706struct intel_shared_regs *allocate_shared_regs(int cpu)
efc9f05d
SE
1707{
1708 struct intel_shared_regs *regs;
1709 int i;
1710
1711 regs = kzalloc_node(sizeof(struct intel_shared_regs),
1712 GFP_KERNEL, cpu_to_node(cpu));
1713 if (regs) {
1714 /*
1715 * initialize the locks to keep lockdep happy
1716 */
1717 for (i = 0; i < EXTRA_REG_MAX; i++)
1718 raw_spin_lock_init(&regs->regs[i].lock);
1719
1720 regs->core_id = -1;
1721 }
1722 return regs;
1723}
1724
a7e3ed1e
AK
1725static int intel_pmu_cpu_prepare(int cpu)
1726{
1727 struct cpu_hw_events *cpuc = &per_cpu(cpu_hw_events, cpu);
1728
b36817e8 1729 if (!(x86_pmu.extra_regs || x86_pmu.lbr_sel_map))
69092624
LM
1730 return NOTIFY_OK;
1731
efc9f05d
SE
1732 cpuc->shared_regs = allocate_shared_regs(cpu);
1733 if (!cpuc->shared_regs)
a7e3ed1e
AK
1734 return NOTIFY_BAD;
1735
a7e3ed1e
AK
1736 return NOTIFY_OK;
1737}
1738
74846d35
PZ
1739static void intel_pmu_cpu_starting(int cpu)
1740{
a7e3ed1e
AK
1741 struct cpu_hw_events *cpuc = &per_cpu(cpu_hw_events, cpu);
1742 int core_id = topology_core_id(cpu);
1743 int i;
1744
69092624
LM
1745 init_debug_store_on_cpu(cpu);
1746 /*
1747 * Deal with CPUs that don't clear their LBRs on power-up.
1748 */
1749 intel_pmu_lbr_reset();
1750
b36817e8
SE
1751 cpuc->lbr_sel = NULL;
1752
1753 if (!cpuc->shared_regs)
69092624
LM
1754 return;
1755
b36817e8
SE
1756 if (!(x86_pmu.er_flags & ERF_NO_HT_SHARING)) {
1757 for_each_cpu(i, topology_thread_cpumask(cpu)) {
1758 struct intel_shared_regs *pc;
a7e3ed1e 1759
b36817e8
SE
1760 pc = per_cpu(cpu_hw_events, i).shared_regs;
1761 if (pc && pc->core_id == core_id) {
1762 cpuc->kfree_on_online = cpuc->shared_regs;
1763 cpuc->shared_regs = pc;
1764 break;
1765 }
a7e3ed1e 1766 }
b36817e8
SE
1767 cpuc->shared_regs->core_id = core_id;
1768 cpuc->shared_regs->refcnt++;
a7e3ed1e
AK
1769 }
1770
b36817e8
SE
1771 if (x86_pmu.lbr_sel_map)
1772 cpuc->lbr_sel = &cpuc->shared_regs->regs[EXTRA_REG_LBR];
74846d35
PZ
1773}
1774
1775static void intel_pmu_cpu_dying(int cpu)
1776{
a7e3ed1e 1777 struct cpu_hw_events *cpuc = &per_cpu(cpu_hw_events, cpu);
efc9f05d 1778 struct intel_shared_regs *pc;
a7e3ed1e 1779
efc9f05d 1780 pc = cpuc->shared_regs;
a7e3ed1e
AK
1781 if (pc) {
1782 if (pc->core_id == -1 || --pc->refcnt == 0)
1783 kfree(pc);
efc9f05d 1784 cpuc->shared_regs = NULL;
a7e3ed1e
AK
1785 }
1786
74846d35
PZ
1787 fini_debug_store_on_cpu(cpu);
1788}
1789
d010b332
SE
1790static void intel_pmu_flush_branch_stack(void)
1791{
1792 /*
1793 * Intel LBR does not tag entries with the
1794 * PID of the current task, then we need to
1795 * flush it on ctxsw
1796 * For now, we simply reset it
1797 */
1798 if (x86_pmu.lbr_nr)
1799 intel_pmu_lbr_reset();
1800}
1801
641cc938
JO
1802PMU_FORMAT_ATTR(offcore_rsp, "config1:0-63");
1803
a63fcab4
SE
1804PMU_FORMAT_ATTR(ldlat, "config1:0-15");
1805
641cc938
JO
1806static struct attribute *intel_arch3_formats_attr[] = {
1807 &format_attr_event.attr,
1808 &format_attr_umask.attr,
1809 &format_attr_edge.attr,
1810 &format_attr_pc.attr,
1811 &format_attr_any.attr,
1812 &format_attr_inv.attr,
1813 &format_attr_cmask.attr,
1814
1815 &format_attr_offcore_rsp.attr, /* XXX do NHM/WSM + SNB breakout */
a63fcab4 1816 &format_attr_ldlat.attr, /* PEBS load latency */
641cc938
JO
1817 NULL,
1818};
1819
caaa8be3 1820static __initconst const struct x86_pmu intel_pmu = {
f22f54f4
PZ
1821 .name = "Intel",
1822 .handle_irq = intel_pmu_handle_irq,
1823 .disable_all = intel_pmu_disable_all,
1824 .enable_all = intel_pmu_enable_all,
1825 .enable = intel_pmu_enable_event,
1826 .disable = intel_pmu_disable_event,
b4cdc5c2 1827 .hw_config = intel_pmu_hw_config,
a072738e 1828 .schedule_events = x86_schedule_events,
f22f54f4
PZ
1829 .eventsel = MSR_ARCH_PERFMON_EVENTSEL0,
1830 .perfctr = MSR_ARCH_PERFMON_PERFCTR0,
1831 .event_map = intel_pmu_event_map,
f22f54f4
PZ
1832 .max_events = ARRAY_SIZE(intel_perfmon_event_map),
1833 .apic = 1,
1834 /*
1835 * Intel PMCs cannot be accessed sanely above 32 bit width,
1836 * so we install an artificial 1<<31 period regardless of
1837 * the generic event period:
1838 */
1839 .max_period = (1ULL << 31) - 1,
3f6da390 1840 .get_event_constraints = intel_get_event_constraints,
a7e3ed1e 1841 .put_event_constraints = intel_put_event_constraints,
0780c927 1842 .pebs_aliases = intel_pebs_aliases_core2,
3f6da390 1843
641cc938 1844 .format_attrs = intel_arch3_formats_attr,
0bf79d44 1845 .events_sysfs_show = intel_event_sysfs_show,
641cc938 1846
a7e3ed1e 1847 .cpu_prepare = intel_pmu_cpu_prepare,
74846d35
PZ
1848 .cpu_starting = intel_pmu_cpu_starting,
1849 .cpu_dying = intel_pmu_cpu_dying,
144d31e6 1850 .guest_get_msrs = intel_guest_get_msrs,
d010b332 1851 .flush_branch_stack = intel_pmu_flush_branch_stack,
f22f54f4
PZ
1852};
1853
c1d6f42f 1854static __init void intel_clovertown_quirk(void)
3c44780b
PZ
1855{
1856 /*
1857 * PEBS is unreliable due to:
1858 *
1859 * AJ67 - PEBS may experience CPL leaks
1860 * AJ68 - PEBS PMI may be delayed by one event
1861 * AJ69 - GLOBAL_STATUS[62] will only be set when DEBUGCTL[12]
1862 * AJ106 - FREEZE_LBRS_ON_PMI doesn't work in combination with PEBS
1863 *
1864 * AJ67 could be worked around by restricting the OS/USR flags.
1865 * AJ69 could be worked around by setting PMU_FREEZE_ON_PMI.
1866 *
1867 * AJ106 could possibly be worked around by not allowing LBR
1868 * usage from PEBS, including the fixup.
1869 * AJ68 could possibly be worked around by always programming
ec75a716 1870 * a pebs_event_reset[0] value and coping with the lost events.
3c44780b
PZ
1871 *
1872 * But taken together it might just make sense to not enable PEBS on
1873 * these chips.
1874 */
c767a54b 1875 pr_warn("PEBS disabled due to CPU errata\n");
3c44780b
PZ
1876 x86_pmu.pebs = 0;
1877 x86_pmu.pebs_constraints = NULL;
1878}
1879
c93dc84c
PZ
1880static int intel_snb_pebs_broken(int cpu)
1881{
1882 u32 rev = UINT_MAX; /* default to broken for unknown models */
1883
1884 switch (cpu_data(cpu).x86_model) {
1885 case 42: /* SNB */
1886 rev = 0x28;
1887 break;
1888
1889 case 45: /* SNB-EP */
1890 switch (cpu_data(cpu).x86_mask) {
1891 case 6: rev = 0x618; break;
1892 case 7: rev = 0x70c; break;
1893 }
1894 }
1895
1896 return (cpu_data(cpu).microcode < rev);
1897}
1898
1899static void intel_snb_check_microcode(void)
1900{
1901 int pebs_broken = 0;
1902 int cpu;
1903
1904 get_online_cpus();
1905 for_each_online_cpu(cpu) {
1906 if ((pebs_broken = intel_snb_pebs_broken(cpu)))
1907 break;
1908 }
1909 put_online_cpus();
1910
1911 if (pebs_broken == x86_pmu.pebs_broken)
1912 return;
1913
1914 /*
1915 * Serialized by the microcode lock..
1916 */
1917 if (x86_pmu.pebs_broken) {
1918 pr_info("PEBS enabled due to microcode update\n");
1919 x86_pmu.pebs_broken = 0;
1920 } else {
1921 pr_info("PEBS disabled due to CPU errata, please upgrade microcode\n");
1922 x86_pmu.pebs_broken = 1;
1923 }
1924}
1925
c1d6f42f 1926static __init void intel_sandybridge_quirk(void)
6a600a8b 1927{
c93dc84c
PZ
1928 x86_pmu.check_microcode = intel_snb_check_microcode;
1929 intel_snb_check_microcode();
6a600a8b
PZ
1930}
1931
c1d6f42f
PZ
1932static const struct { int id; char *name; } intel_arch_events_map[] __initconst = {
1933 { PERF_COUNT_HW_CPU_CYCLES, "cpu cycles" },
1934 { PERF_COUNT_HW_INSTRUCTIONS, "instructions" },
1935 { PERF_COUNT_HW_BUS_CYCLES, "bus cycles" },
1936 { PERF_COUNT_HW_CACHE_REFERENCES, "cache references" },
1937 { PERF_COUNT_HW_CACHE_MISSES, "cache misses" },
1938 { PERF_COUNT_HW_BRANCH_INSTRUCTIONS, "branch instructions" },
1939 { PERF_COUNT_HW_BRANCH_MISSES, "branch misses" },
ffb871bc
GN
1940};
1941
c1d6f42f
PZ
1942static __init void intel_arch_events_quirk(void)
1943{
1944 int bit;
1945
1946 /* disable event that reported as not presend by cpuid */
1947 for_each_set_bit(bit, x86_pmu.events_mask, ARRAY_SIZE(intel_arch_events_map)) {
1948 intel_perfmon_event_map[intel_arch_events_map[bit].id] = 0;
c767a54b
JP
1949 pr_warn("CPUID marked event: \'%s\' unavailable\n",
1950 intel_arch_events_map[bit].name);
c1d6f42f
PZ
1951 }
1952}
1953
1954static __init void intel_nehalem_quirk(void)
1955{
1956 union cpuid10_ebx ebx;
1957
1958 ebx.full = x86_pmu.events_maskl;
1959 if (ebx.split.no_branch_misses_retired) {
1960 /*
1961 * Erratum AAJ80 detected, we work it around by using
1962 * the BR_MISP_EXEC.ANY event. This will over-count
1963 * branch-misses, but it's still much better than the
1964 * architectural event which is often completely bogus:
1965 */
1966 intel_perfmon_event_map[PERF_COUNT_HW_BRANCH_MISSES] = 0x7f89;
1967 ebx.split.no_branch_misses_retired = 0;
1968 x86_pmu.events_maskl = ebx.full;
c767a54b 1969 pr_info("CPU erratum AAJ80 worked around\n");
c1d6f42f
PZ
1970 }
1971}
1972
de0428a7 1973__init int intel_pmu_init(void)
f22f54f4
PZ
1974{
1975 union cpuid10_edx edx;
1976 union cpuid10_eax eax;
ffb871bc 1977 union cpuid10_ebx ebx;
a1eac7ac 1978 struct event_constraint *c;
f22f54f4 1979 unsigned int unused;
f22f54f4
PZ
1980 int version;
1981
1982 if (!cpu_has(&boot_cpu_data, X86_FEATURE_ARCH_PERFMON)) {
a072738e
CG
1983 switch (boot_cpu_data.x86) {
1984 case 0x6:
1985 return p6_pmu_init();
e717bf4e
VW
1986 case 0xb:
1987 return knc_pmu_init();
a072738e
CG
1988 case 0xf:
1989 return p4_pmu_init();
1990 }
f22f54f4 1991 return -ENODEV;
f22f54f4
PZ
1992 }
1993
1994 /*
1995 * Check whether the Architectural PerfMon supports
1996 * Branch Misses Retired hw_event or not.
1997 */
ffb871bc
GN
1998 cpuid(10, &eax.full, &ebx.full, &unused, &edx.full);
1999 if (eax.split.mask_length < ARCH_PERFMON_EVENTS_COUNT)
f22f54f4
PZ
2000 return -ENODEV;
2001
2002 version = eax.split.version_id;
2003 if (version < 2)
2004 x86_pmu = core_pmu;
2005 else
2006 x86_pmu = intel_pmu;
2007
2008 x86_pmu.version = version;
948b1bb8
RR
2009 x86_pmu.num_counters = eax.split.num_counters;
2010 x86_pmu.cntval_bits = eax.split.bit_width;
2011 x86_pmu.cntval_mask = (1ULL << eax.split.bit_width) - 1;
f22f54f4 2012
c1d6f42f
PZ
2013 x86_pmu.events_maskl = ebx.full;
2014 x86_pmu.events_mask_len = eax.split.mask_length;
2015
70ab7003
AK
2016 x86_pmu.max_pebs_events = min_t(unsigned, MAX_PEBS_EVENTS, x86_pmu.num_counters);
2017
f22f54f4
PZ
2018 /*
2019 * Quirk: v2 perfmon does not report fixed-purpose events, so
2020 * assume at least 3 events:
2021 */
2022 if (version > 1)
948b1bb8 2023 x86_pmu.num_counters_fixed = max((int)edx.split.num_counters_fixed, 3);
f22f54f4 2024
8db909a7
PZ
2025 /*
2026 * v2 and above have a perf capabilities MSR
2027 */
2028 if (version > 1) {
2029 u64 capabilities;
2030
2031 rdmsrl(MSR_IA32_PERF_CAPABILITIES, capabilities);
2032 x86_pmu.intel_cap.capabilities = capabilities;
2033 }
2034
ca037701
PZ
2035 intel_ds_init();
2036
c1d6f42f
PZ
2037 x86_add_quirk(intel_arch_events_quirk); /* Install first, so it runs last */
2038
f22f54f4
PZ
2039 /*
2040 * Install the hw-cache-events table:
2041 */
2042 switch (boot_cpu_data.x86_model) {
2043 case 14: /* 65 nm core solo/duo, "Yonah" */
2044 pr_cont("Core events, ");
2045 break;
2046
2047 case 15: /* original 65 nm celeron/pentium/core2/xeon, "Merom"/"Conroe" */
c1d6f42f 2048 x86_add_quirk(intel_clovertown_quirk);
f22f54f4
PZ
2049 case 22: /* single-core 65 nm celeron/core2solo "Merom-L"/"Conroe-L" */
2050 case 23: /* current 45 nm celeron/core2/xeon "Penryn"/"Wolfdale" */
2051 case 29: /* six-core 45 nm xeon "Dunnington" */
2052 memcpy(hw_cache_event_ids, core2_hw_cache_event_ids,
2053 sizeof(hw_cache_event_ids));
2054
caff2bef
PZ
2055 intel_pmu_lbr_init_core();
2056
f22f54f4 2057 x86_pmu.event_constraints = intel_core2_event_constraints;
17e31629 2058 x86_pmu.pebs_constraints = intel_core2_pebs_event_constraints;
f22f54f4
PZ
2059 pr_cont("Core2 events, ");
2060 break;
2061
2062 case 26: /* 45 nm nehalem, "Bloomfield" */
2063 case 30: /* 45 nm nehalem, "Lynnfield" */
134fbadf 2064 case 46: /* 45 nm nehalem-ex, "Beckton" */
f22f54f4
PZ
2065 memcpy(hw_cache_event_ids, nehalem_hw_cache_event_ids,
2066 sizeof(hw_cache_event_ids));
e994d7d2
AK
2067 memcpy(hw_cache_extra_regs, nehalem_hw_cache_extra_regs,
2068 sizeof(hw_cache_extra_regs));
f22f54f4 2069
caff2bef
PZ
2070 intel_pmu_lbr_init_nhm();
2071
f22f54f4 2072 x86_pmu.event_constraints = intel_nehalem_event_constraints;
17e31629 2073 x86_pmu.pebs_constraints = intel_nehalem_pebs_event_constraints;
11164cd4 2074 x86_pmu.enable_all = intel_pmu_nhm_enable_all;
a7e3ed1e 2075 x86_pmu.extra_regs = intel_nehalem_extra_regs;
ec75a716 2076
f20093ee
SE
2077 x86_pmu.cpu_events = nhm_events_attrs;
2078
91fc4cc0 2079 /* UOPS_ISSUED.STALLED_CYCLES */
f9b4eeb8
PZ
2080 intel_perfmon_event_map[PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] =
2081 X86_CONFIG(.event=0x0e, .umask=0x01, .inv=1, .cmask=1);
91fc4cc0 2082 /* UOPS_EXECUTED.CORE_ACTIVE_CYCLES,c=1,i=1 */
f9b4eeb8
PZ
2083 intel_perfmon_event_map[PERF_COUNT_HW_STALLED_CYCLES_BACKEND] =
2084 X86_CONFIG(.event=0xb1, .umask=0x3f, .inv=1, .cmask=1);
94403f88 2085
c1d6f42f 2086 x86_add_quirk(intel_nehalem_quirk);
ec75a716 2087
11164cd4 2088 pr_cont("Nehalem events, ");
f22f54f4 2089 break;
caff2bef 2090
b622d644 2091 case 28: /* Atom */
0927b482
SL
2092 case 38: /* Lincroft */
2093 case 39: /* Penwell */
2094 case 53: /* Cloverview */
2095 case 54: /* Cedarview */
f22f54f4
PZ
2096 memcpy(hw_cache_event_ids, atom_hw_cache_event_ids,
2097 sizeof(hw_cache_event_ids));
2098
caff2bef
PZ
2099 intel_pmu_lbr_init_atom();
2100
f22f54f4 2101 x86_pmu.event_constraints = intel_gen_event_constraints;
17e31629 2102 x86_pmu.pebs_constraints = intel_atom_pebs_event_constraints;
f22f54f4
PZ
2103 pr_cont("Atom events, ");
2104 break;
2105
2106 case 37: /* 32 nm nehalem, "Clarkdale" */
2107 case 44: /* 32 nm nehalem, "Gulftown" */
b2508e82 2108 case 47: /* 32 nm Xeon E7 */
f22f54f4
PZ
2109 memcpy(hw_cache_event_ids, westmere_hw_cache_event_ids,
2110 sizeof(hw_cache_event_ids));
e994d7d2
AK
2111 memcpy(hw_cache_extra_regs, nehalem_hw_cache_extra_regs,
2112 sizeof(hw_cache_extra_regs));
f22f54f4 2113
caff2bef
PZ
2114 intel_pmu_lbr_init_nhm();
2115
f22f54f4 2116 x86_pmu.event_constraints = intel_westmere_event_constraints;
40b91cd1 2117 x86_pmu.enable_all = intel_pmu_nhm_enable_all;
17e31629 2118 x86_pmu.pebs_constraints = intel_westmere_pebs_event_constraints;
a7e3ed1e 2119 x86_pmu.extra_regs = intel_westmere_extra_regs;
b79e8941 2120 x86_pmu.er_flags |= ERF_HAS_RSP_1;
30112039 2121
f20093ee
SE
2122 x86_pmu.cpu_events = nhm_events_attrs;
2123
30112039 2124 /* UOPS_ISSUED.STALLED_CYCLES */
f9b4eeb8
PZ
2125 intel_perfmon_event_map[PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] =
2126 X86_CONFIG(.event=0x0e, .umask=0x01, .inv=1, .cmask=1);
30112039 2127 /* UOPS_EXECUTED.CORE_ACTIVE_CYCLES,c=1,i=1 */
f9b4eeb8
PZ
2128 intel_perfmon_event_map[PERF_COUNT_HW_STALLED_CYCLES_BACKEND] =
2129 X86_CONFIG(.event=0xb1, .umask=0x3f, .inv=1, .cmask=1);
30112039 2130
f22f54f4
PZ
2131 pr_cont("Westmere events, ");
2132 break;
b622d644 2133
b06b3d49 2134 case 42: /* SandyBridge */
a34668f6 2135 case 45: /* SandyBridge, "Romely-EP" */
47a8863d 2136 x86_add_quirk(intel_sandybridge_quirk);
b06b3d49
LM
2137 memcpy(hw_cache_event_ids, snb_hw_cache_event_ids,
2138 sizeof(hw_cache_event_ids));
74e6543f
YZ
2139 memcpy(hw_cache_extra_regs, snb_hw_cache_extra_regs,
2140 sizeof(hw_cache_extra_regs));
b06b3d49 2141
c5cc2cd9 2142 intel_pmu_lbr_init_snb();
b06b3d49
LM
2143
2144 x86_pmu.event_constraints = intel_snb_event_constraints;
de0428a7 2145 x86_pmu.pebs_constraints = intel_snb_pebs_event_constraints;
0780c927 2146 x86_pmu.pebs_aliases = intel_pebs_aliases_snb;
f1923820
SE
2147 if (boot_cpu_data.x86_model == 45)
2148 x86_pmu.extra_regs = intel_snbep_extra_regs;
2149 else
2150 x86_pmu.extra_regs = intel_snb_extra_regs;
ee89cbc2 2151 /* all extra regs are per-cpu when HT is on */
b79e8941
PZ
2152 x86_pmu.er_flags |= ERF_HAS_RSP_1;
2153 x86_pmu.er_flags |= ERF_NO_HT_SHARING;
e04d1b23 2154
f20093ee
SE
2155 x86_pmu.cpu_events = snb_events_attrs;
2156
e04d1b23 2157 /* UOPS_ISSUED.ANY,c=1,i=1 to count stall cycles */
f9b4eeb8
PZ
2158 intel_perfmon_event_map[PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] =
2159 X86_CONFIG(.event=0x0e, .umask=0x01, .inv=1, .cmask=1);
e04d1b23 2160 /* UOPS_DISPATCHED.THREAD,c=1,i=1 to count stall cycles*/
f9b4eeb8
PZ
2161 intel_perfmon_event_map[PERF_COUNT_HW_STALLED_CYCLES_BACKEND] =
2162 X86_CONFIG(.event=0xb1, .umask=0x01, .inv=1, .cmask=1);
e04d1b23 2163
b06b3d49
LM
2164 pr_cont("SandyBridge events, ");
2165 break;
20a36e39 2166 case 58: /* IvyBridge */
923d8697 2167 case 62: /* IvyBridge EP */
20a36e39
SE
2168 memcpy(hw_cache_event_ids, snb_hw_cache_event_ids,
2169 sizeof(hw_cache_event_ids));
2170 memcpy(hw_cache_extra_regs, snb_hw_cache_extra_regs,
2171 sizeof(hw_cache_extra_regs));
2172
2173 intel_pmu_lbr_init_snb();
2174
69943182 2175 x86_pmu.event_constraints = intel_ivb_event_constraints;
20a36e39
SE
2176 x86_pmu.pebs_constraints = intel_ivb_pebs_event_constraints;
2177 x86_pmu.pebs_aliases = intel_pebs_aliases_snb;
f1923820
SE
2178 if (boot_cpu_data.x86_model == 62)
2179 x86_pmu.extra_regs = intel_snbep_extra_regs;
2180 else
2181 x86_pmu.extra_regs = intel_snb_extra_regs;
20a36e39
SE
2182 /* all extra regs are per-cpu when HT is on */
2183 x86_pmu.er_flags |= ERF_HAS_RSP_1;
2184 x86_pmu.er_flags |= ERF_NO_HT_SHARING;
2185
f20093ee
SE
2186 x86_pmu.cpu_events = snb_events_attrs;
2187
20a36e39
SE
2188 /* UOPS_ISSUED.ANY,c=1,i=1 to count stall cycles */
2189 intel_perfmon_event_map[PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] =
2190 X86_CONFIG(.event=0x0e, .umask=0x01, .inv=1, .cmask=1);
2191
2192 pr_cont("IvyBridge events, ");
2193 break;
2194
b06b3d49 2195
f22f54f4 2196 default:
0af3ac1f
AK
2197 switch (x86_pmu.version) {
2198 case 1:
2199 x86_pmu.event_constraints = intel_v1_event_constraints;
2200 pr_cont("generic architected perfmon v1, ");
2201 break;
2202 default:
2203 /*
2204 * default constraints for v2 and up
2205 */
2206 x86_pmu.event_constraints = intel_gen_event_constraints;
2207 pr_cont("generic architected perfmon, ");
2208 break;
2209 }
f22f54f4 2210 }
ffb871bc 2211
a1eac7ac
RR
2212 if (x86_pmu.num_counters > INTEL_PMC_MAX_GENERIC) {
2213 WARN(1, KERN_ERR "hw perf events %d > max(%d), clipping!",
2214 x86_pmu.num_counters, INTEL_PMC_MAX_GENERIC);
2215 x86_pmu.num_counters = INTEL_PMC_MAX_GENERIC;
2216 }
2217 x86_pmu.intel_ctrl = (1 << x86_pmu.num_counters) - 1;
2218
2219 if (x86_pmu.num_counters_fixed > INTEL_PMC_MAX_FIXED) {
2220 WARN(1, KERN_ERR "hw perf events fixed %d > max(%d), clipping!",
2221 x86_pmu.num_counters_fixed, INTEL_PMC_MAX_FIXED);
2222 x86_pmu.num_counters_fixed = INTEL_PMC_MAX_FIXED;
2223 }
2224
2225 x86_pmu.intel_ctrl |=
2226 ((1LL << x86_pmu.num_counters_fixed)-1) << INTEL_PMC_IDX_FIXED;
2227
2228 if (x86_pmu.event_constraints) {
2229 /*
2230 * event on fixed counter2 (REF_CYCLES) only works on this
2231 * counter, so do not extend mask to generic counters
2232 */
2233 for_each_event_constraint(c, x86_pmu.event_constraints) {
2234 if (c->cmask != X86_RAW_EVENT_MASK
2235 || c->idxmsk64 == INTEL_PMC_MSK_FIXED_REF_CYCLES) {
2236 continue;
2237 }
2238
2239 c->idxmsk64 |= (1ULL << x86_pmu.num_counters) - 1;
2240 c->weight += x86_pmu.num_counters;
2241 }
2242 }
2243
f22f54f4
PZ
2244 return 0;
2245}