]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blob - arch/x86/kernel/cpu/perf_event_intel.c
Merge remote-tracking branches 'regulator/topic/s5m8767', 'regulator/topic/st-pwm...
[mirror_ubuntu-artful-kernel.git] / arch / x86 / kernel / cpu / perf_event_intel.c
1 /*
2 * Per core/cpu state
3 *
4 * Used to coordinate shared registers between HT threads or
5 * among events on a single PMU.
6 */
7
8 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
9
10 #include <linux/stddef.h>
11 #include <linux/types.h>
12 #include <linux/init.h>
13 #include <linux/slab.h>
14 #include <linux/export.h>
15
16 #include <asm/cpufeature.h>
17 #include <asm/hardirq.h>
18 #include <asm/apic.h>
19
20 #include "perf_event.h"
21
22 /*
23 * Intel PerfMon, used on Core and later.
24 */
25 static u64 intel_perfmon_event_map[PERF_COUNT_HW_MAX] __read_mostly =
26 {
27 [PERF_COUNT_HW_CPU_CYCLES] = 0x003c,
28 [PERF_COUNT_HW_INSTRUCTIONS] = 0x00c0,
29 [PERF_COUNT_HW_CACHE_REFERENCES] = 0x4f2e,
30 [PERF_COUNT_HW_CACHE_MISSES] = 0x412e,
31 [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = 0x00c4,
32 [PERF_COUNT_HW_BRANCH_MISSES] = 0x00c5,
33 [PERF_COUNT_HW_BUS_CYCLES] = 0x013c,
34 [PERF_COUNT_HW_REF_CPU_CYCLES] = 0x0300, /* pseudo-encoding */
35 };
36
37 static struct event_constraint intel_core_event_constraints[] __read_mostly =
38 {
39 INTEL_EVENT_CONSTRAINT(0x11, 0x2), /* FP_ASSIST */
40 INTEL_EVENT_CONSTRAINT(0x12, 0x2), /* MUL */
41 INTEL_EVENT_CONSTRAINT(0x13, 0x2), /* DIV */
42 INTEL_EVENT_CONSTRAINT(0x14, 0x1), /* CYCLES_DIV_BUSY */
43 INTEL_EVENT_CONSTRAINT(0x19, 0x2), /* DELAYED_BYPASS */
44 INTEL_EVENT_CONSTRAINT(0xc1, 0x1), /* FP_COMP_INSTR_RET */
45 EVENT_CONSTRAINT_END
46 };
47
48 static struct event_constraint intel_core2_event_constraints[] __read_mostly =
49 {
50 FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */
51 FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */
52 FIXED_EVENT_CONSTRAINT(0x0300, 2), /* CPU_CLK_UNHALTED.REF */
53 INTEL_EVENT_CONSTRAINT(0x10, 0x1), /* FP_COMP_OPS_EXE */
54 INTEL_EVENT_CONSTRAINT(0x11, 0x2), /* FP_ASSIST */
55 INTEL_EVENT_CONSTRAINT(0x12, 0x2), /* MUL */
56 INTEL_EVENT_CONSTRAINT(0x13, 0x2), /* DIV */
57 INTEL_EVENT_CONSTRAINT(0x14, 0x1), /* CYCLES_DIV_BUSY */
58 INTEL_EVENT_CONSTRAINT(0x18, 0x1), /* IDLE_DURING_DIV */
59 INTEL_EVENT_CONSTRAINT(0x19, 0x2), /* DELAYED_BYPASS */
60 INTEL_EVENT_CONSTRAINT(0xa1, 0x1), /* RS_UOPS_DISPATCH_CYCLES */
61 INTEL_EVENT_CONSTRAINT(0xc9, 0x1), /* ITLB_MISS_RETIRED (T30-9) */
62 INTEL_EVENT_CONSTRAINT(0xcb, 0x1), /* MEM_LOAD_RETIRED */
63 EVENT_CONSTRAINT_END
64 };
65
66 static struct event_constraint intel_nehalem_event_constraints[] __read_mostly =
67 {
68 FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */
69 FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */
70 FIXED_EVENT_CONSTRAINT(0x0300, 2), /* CPU_CLK_UNHALTED.REF */
71 INTEL_EVENT_CONSTRAINT(0x40, 0x3), /* L1D_CACHE_LD */
72 INTEL_EVENT_CONSTRAINT(0x41, 0x3), /* L1D_CACHE_ST */
73 INTEL_EVENT_CONSTRAINT(0x42, 0x3), /* L1D_CACHE_LOCK */
74 INTEL_EVENT_CONSTRAINT(0x43, 0x3), /* L1D_ALL_REF */
75 INTEL_EVENT_CONSTRAINT(0x48, 0x3), /* L1D_PEND_MISS */
76 INTEL_EVENT_CONSTRAINT(0x4e, 0x3), /* L1D_PREFETCH */
77 INTEL_EVENT_CONSTRAINT(0x51, 0x3), /* L1D */
78 INTEL_EVENT_CONSTRAINT(0x63, 0x3), /* CACHE_LOCK_CYCLES */
79 EVENT_CONSTRAINT_END
80 };
81
82 static struct extra_reg intel_nehalem_extra_regs[] __read_mostly =
83 {
84 /* must define OFFCORE_RSP_X first, see intel_fixup_er() */
85 INTEL_UEVENT_EXTRA_REG(0x01b7, MSR_OFFCORE_RSP_0, 0xffff, RSP_0),
86 INTEL_UEVENT_PEBS_LDLAT_EXTRA_REG(0x100b),
87 EVENT_EXTRA_END
88 };
89
90 static struct event_constraint intel_westmere_event_constraints[] __read_mostly =
91 {
92 FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */
93 FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */
94 FIXED_EVENT_CONSTRAINT(0x0300, 2), /* CPU_CLK_UNHALTED.REF */
95 INTEL_EVENT_CONSTRAINT(0x51, 0x3), /* L1D */
96 INTEL_EVENT_CONSTRAINT(0x60, 0x1), /* OFFCORE_REQUESTS_OUTSTANDING */
97 INTEL_EVENT_CONSTRAINT(0x63, 0x3), /* CACHE_LOCK_CYCLES */
98 INTEL_EVENT_CONSTRAINT(0xb3, 0x1), /* SNOOPQ_REQUEST_OUTSTANDING */
99 EVENT_CONSTRAINT_END
100 };
101
102 static struct event_constraint intel_snb_event_constraints[] __read_mostly =
103 {
104 FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */
105 FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */
106 FIXED_EVENT_CONSTRAINT(0x0300, 2), /* CPU_CLK_UNHALTED.REF */
107 INTEL_UEVENT_CONSTRAINT(0x04a3, 0xf), /* CYCLE_ACTIVITY.CYCLES_NO_DISPATCH */
108 INTEL_UEVENT_CONSTRAINT(0x05a3, 0xf), /* CYCLE_ACTIVITY.STALLS_L2_PENDING */
109 INTEL_UEVENT_CONSTRAINT(0x02a3, 0x4), /* CYCLE_ACTIVITY.CYCLES_L1D_PENDING */
110 INTEL_UEVENT_CONSTRAINT(0x06a3, 0x4), /* CYCLE_ACTIVITY.STALLS_L1D_PENDING */
111 INTEL_EVENT_CONSTRAINT(0x48, 0x4), /* L1D_PEND_MISS.PENDING */
112 INTEL_UEVENT_CONSTRAINT(0x01c0, 0x2), /* INST_RETIRED.PREC_DIST */
113 INTEL_EVENT_CONSTRAINT(0xcd, 0x8), /* MEM_TRANS_RETIRED.LOAD_LATENCY */
114 INTEL_UEVENT_CONSTRAINT(0x04a3, 0xf), /* CYCLE_ACTIVITY.CYCLES_NO_DISPATCH */
115 INTEL_UEVENT_CONSTRAINT(0x02a3, 0x4), /* CYCLE_ACTIVITY.CYCLES_L1D_PENDING */
116 EVENT_CONSTRAINT_END
117 };
118
119 static struct event_constraint intel_ivb_event_constraints[] __read_mostly =
120 {
121 FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */
122 FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */
123 FIXED_EVENT_CONSTRAINT(0x0300, 2), /* CPU_CLK_UNHALTED.REF */
124 INTEL_UEVENT_CONSTRAINT(0x0148, 0x4), /* L1D_PEND_MISS.PENDING */
125 INTEL_UEVENT_CONSTRAINT(0x0279, 0xf), /* IDQ.EMTPY */
126 INTEL_UEVENT_CONSTRAINT(0x019c, 0xf), /* IDQ_UOPS_NOT_DELIVERED.CORE */
127 INTEL_UEVENT_CONSTRAINT(0x02a3, 0xf), /* CYCLE_ACTIVITY.CYCLES_LDM_PENDING */
128 INTEL_UEVENT_CONSTRAINT(0x04a3, 0xf), /* CYCLE_ACTIVITY.CYCLES_NO_EXECUTE */
129 INTEL_UEVENT_CONSTRAINT(0x05a3, 0xf), /* CYCLE_ACTIVITY.STALLS_L2_PENDING */
130 INTEL_UEVENT_CONSTRAINT(0x06a3, 0xf), /* CYCLE_ACTIVITY.STALLS_LDM_PENDING */
131 INTEL_UEVENT_CONSTRAINT(0x08a3, 0x4), /* CYCLE_ACTIVITY.CYCLES_L1D_PENDING */
132 INTEL_UEVENT_CONSTRAINT(0x0ca3, 0x4), /* CYCLE_ACTIVITY.STALLS_L1D_PENDING */
133 INTEL_UEVENT_CONSTRAINT(0x01c0, 0x2), /* INST_RETIRED.PREC_DIST */
134 /*
135 * Errata BV98 -- MEM_*_RETIRED events can leak between counters of SMT
136 * siblings; disable these events because they can corrupt unrelated
137 * counters.
138 */
139 INTEL_EVENT_CONSTRAINT(0xd0, 0x0), /* MEM_UOPS_RETIRED.* */
140 INTEL_EVENT_CONSTRAINT(0xd1, 0x0), /* MEM_LOAD_UOPS_RETIRED.* */
141 INTEL_EVENT_CONSTRAINT(0xd2, 0x0), /* MEM_LOAD_UOPS_LLC_HIT_RETIRED.* */
142 INTEL_EVENT_CONSTRAINT(0xd3, 0x0), /* MEM_LOAD_UOPS_LLC_MISS_RETIRED.* */
143 EVENT_CONSTRAINT_END
144 };
145
146 static struct extra_reg intel_westmere_extra_regs[] __read_mostly =
147 {
148 /* must define OFFCORE_RSP_X first, see intel_fixup_er() */
149 INTEL_UEVENT_EXTRA_REG(0x01b7, MSR_OFFCORE_RSP_0, 0xffff, RSP_0),
150 INTEL_UEVENT_EXTRA_REG(0x01bb, MSR_OFFCORE_RSP_1, 0xffff, RSP_1),
151 INTEL_UEVENT_PEBS_LDLAT_EXTRA_REG(0x100b),
152 EVENT_EXTRA_END
153 };
154
155 static struct event_constraint intel_v1_event_constraints[] __read_mostly =
156 {
157 EVENT_CONSTRAINT_END
158 };
159
160 static struct event_constraint intel_gen_event_constraints[] __read_mostly =
161 {
162 FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */
163 FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */
164 FIXED_EVENT_CONSTRAINT(0x0300, 2), /* CPU_CLK_UNHALTED.REF */
165 EVENT_CONSTRAINT_END
166 };
167
168 static struct event_constraint intel_slm_event_constraints[] __read_mostly =
169 {
170 FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */
171 FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */
172 FIXED_EVENT_CONSTRAINT(0x013c, 2), /* CPU_CLK_UNHALTED.REF */
173 FIXED_EVENT_CONSTRAINT(0x0300, 2), /* pseudo CPU_CLK_UNHALTED.REF */
174 EVENT_CONSTRAINT_END
175 };
176
177 static struct extra_reg intel_snb_extra_regs[] __read_mostly = {
178 /* must define OFFCORE_RSP_X first, see intel_fixup_er() */
179 INTEL_UEVENT_EXTRA_REG(0x01b7, MSR_OFFCORE_RSP_0, 0x3f807f8fffull, RSP_0),
180 INTEL_UEVENT_EXTRA_REG(0x01bb, MSR_OFFCORE_RSP_1, 0x3f807f8fffull, RSP_1),
181 INTEL_UEVENT_PEBS_LDLAT_EXTRA_REG(0x01cd),
182 EVENT_EXTRA_END
183 };
184
185 static struct extra_reg intel_snbep_extra_regs[] __read_mostly = {
186 /* must define OFFCORE_RSP_X first, see intel_fixup_er() */
187 INTEL_UEVENT_EXTRA_REG(0x01b7, MSR_OFFCORE_RSP_0, 0x3fffff8fffull, RSP_0),
188 INTEL_UEVENT_EXTRA_REG(0x01bb, MSR_OFFCORE_RSP_1, 0x3fffff8fffull, RSP_1),
189 INTEL_UEVENT_PEBS_LDLAT_EXTRA_REG(0x01cd),
190 EVENT_EXTRA_END
191 };
192
193 EVENT_ATTR_STR(mem-loads, mem_ld_nhm, "event=0x0b,umask=0x10,ldlat=3");
194 EVENT_ATTR_STR(mem-loads, mem_ld_snb, "event=0xcd,umask=0x1,ldlat=3");
195 EVENT_ATTR_STR(mem-stores, mem_st_snb, "event=0xcd,umask=0x2");
196
197 struct attribute *nhm_events_attrs[] = {
198 EVENT_PTR(mem_ld_nhm),
199 NULL,
200 };
201
202 struct attribute *snb_events_attrs[] = {
203 EVENT_PTR(mem_ld_snb),
204 EVENT_PTR(mem_st_snb),
205 NULL,
206 };
207
208 static struct event_constraint intel_hsw_event_constraints[] = {
209 FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */
210 FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */
211 FIXED_EVENT_CONSTRAINT(0x0300, 2), /* CPU_CLK_UNHALTED.REF */
212 INTEL_EVENT_CONSTRAINT(0x48, 0x4), /* L1D_PEND_MISS.* */
213 INTEL_UEVENT_CONSTRAINT(0x01c0, 0x2), /* INST_RETIRED.PREC_DIST */
214 INTEL_EVENT_CONSTRAINT(0xcd, 0x8), /* MEM_TRANS_RETIRED.LOAD_LATENCY */
215 /* CYCLE_ACTIVITY.CYCLES_L1D_PENDING */
216 INTEL_EVENT_CONSTRAINT(0x08a3, 0x4),
217 /* CYCLE_ACTIVITY.STALLS_L1D_PENDING */
218 INTEL_EVENT_CONSTRAINT(0x0ca3, 0x4),
219 /* CYCLE_ACTIVITY.CYCLES_NO_EXECUTE */
220 INTEL_EVENT_CONSTRAINT(0x04a3, 0xf),
221 EVENT_CONSTRAINT_END
222 };
223
224 static u64 intel_pmu_event_map(int hw_event)
225 {
226 return intel_perfmon_event_map[hw_event];
227 }
228
229 #define SNB_DMND_DATA_RD (1ULL << 0)
230 #define SNB_DMND_RFO (1ULL << 1)
231 #define SNB_DMND_IFETCH (1ULL << 2)
232 #define SNB_DMND_WB (1ULL << 3)
233 #define SNB_PF_DATA_RD (1ULL << 4)
234 #define SNB_PF_RFO (1ULL << 5)
235 #define SNB_PF_IFETCH (1ULL << 6)
236 #define SNB_LLC_DATA_RD (1ULL << 7)
237 #define SNB_LLC_RFO (1ULL << 8)
238 #define SNB_LLC_IFETCH (1ULL << 9)
239 #define SNB_BUS_LOCKS (1ULL << 10)
240 #define SNB_STRM_ST (1ULL << 11)
241 #define SNB_OTHER (1ULL << 15)
242 #define SNB_RESP_ANY (1ULL << 16)
243 #define SNB_NO_SUPP (1ULL << 17)
244 #define SNB_LLC_HITM (1ULL << 18)
245 #define SNB_LLC_HITE (1ULL << 19)
246 #define SNB_LLC_HITS (1ULL << 20)
247 #define SNB_LLC_HITF (1ULL << 21)
248 #define SNB_LOCAL (1ULL << 22)
249 #define SNB_REMOTE (0xffULL << 23)
250 #define SNB_SNP_NONE (1ULL << 31)
251 #define SNB_SNP_NOT_NEEDED (1ULL << 32)
252 #define SNB_SNP_MISS (1ULL << 33)
253 #define SNB_NO_FWD (1ULL << 34)
254 #define SNB_SNP_FWD (1ULL << 35)
255 #define SNB_HITM (1ULL << 36)
256 #define SNB_NON_DRAM (1ULL << 37)
257
258 #define SNB_DMND_READ (SNB_DMND_DATA_RD|SNB_LLC_DATA_RD)
259 #define SNB_DMND_WRITE (SNB_DMND_RFO|SNB_LLC_RFO)
260 #define SNB_DMND_PREFETCH (SNB_PF_DATA_RD|SNB_PF_RFO)
261
262 #define SNB_SNP_ANY (SNB_SNP_NONE|SNB_SNP_NOT_NEEDED| \
263 SNB_SNP_MISS|SNB_NO_FWD|SNB_SNP_FWD| \
264 SNB_HITM)
265
266 #define SNB_DRAM_ANY (SNB_LOCAL|SNB_REMOTE|SNB_SNP_ANY)
267 #define SNB_DRAM_REMOTE (SNB_REMOTE|SNB_SNP_ANY)
268
269 #define SNB_L3_ACCESS SNB_RESP_ANY
270 #define SNB_L3_MISS (SNB_DRAM_ANY|SNB_NON_DRAM)
271
272 static __initconst const u64 snb_hw_cache_extra_regs
273 [PERF_COUNT_HW_CACHE_MAX]
274 [PERF_COUNT_HW_CACHE_OP_MAX]
275 [PERF_COUNT_HW_CACHE_RESULT_MAX] =
276 {
277 [ C(LL ) ] = {
278 [ C(OP_READ) ] = {
279 [ C(RESULT_ACCESS) ] = SNB_DMND_READ|SNB_L3_ACCESS,
280 [ C(RESULT_MISS) ] = SNB_DMND_READ|SNB_L3_MISS,
281 },
282 [ C(OP_WRITE) ] = {
283 [ C(RESULT_ACCESS) ] = SNB_DMND_WRITE|SNB_L3_ACCESS,
284 [ C(RESULT_MISS) ] = SNB_DMND_WRITE|SNB_L3_MISS,
285 },
286 [ C(OP_PREFETCH) ] = {
287 [ C(RESULT_ACCESS) ] = SNB_DMND_PREFETCH|SNB_L3_ACCESS,
288 [ C(RESULT_MISS) ] = SNB_DMND_PREFETCH|SNB_L3_MISS,
289 },
290 },
291 [ C(NODE) ] = {
292 [ C(OP_READ) ] = {
293 [ C(RESULT_ACCESS) ] = SNB_DMND_READ|SNB_DRAM_ANY,
294 [ C(RESULT_MISS) ] = SNB_DMND_READ|SNB_DRAM_REMOTE,
295 },
296 [ C(OP_WRITE) ] = {
297 [ C(RESULT_ACCESS) ] = SNB_DMND_WRITE|SNB_DRAM_ANY,
298 [ C(RESULT_MISS) ] = SNB_DMND_WRITE|SNB_DRAM_REMOTE,
299 },
300 [ C(OP_PREFETCH) ] = {
301 [ C(RESULT_ACCESS) ] = SNB_DMND_PREFETCH|SNB_DRAM_ANY,
302 [ C(RESULT_MISS) ] = SNB_DMND_PREFETCH|SNB_DRAM_REMOTE,
303 },
304 },
305 };
306
307 static __initconst const u64 snb_hw_cache_event_ids
308 [PERF_COUNT_HW_CACHE_MAX]
309 [PERF_COUNT_HW_CACHE_OP_MAX]
310 [PERF_COUNT_HW_CACHE_RESULT_MAX] =
311 {
312 [ C(L1D) ] = {
313 [ C(OP_READ) ] = {
314 [ C(RESULT_ACCESS) ] = 0xf1d0, /* MEM_UOP_RETIRED.LOADS */
315 [ C(RESULT_MISS) ] = 0x0151, /* L1D.REPLACEMENT */
316 },
317 [ C(OP_WRITE) ] = {
318 [ C(RESULT_ACCESS) ] = 0xf2d0, /* MEM_UOP_RETIRED.STORES */
319 [ C(RESULT_MISS) ] = 0x0851, /* L1D.ALL_M_REPLACEMENT */
320 },
321 [ C(OP_PREFETCH) ] = {
322 [ C(RESULT_ACCESS) ] = 0x0,
323 [ C(RESULT_MISS) ] = 0x024e, /* HW_PRE_REQ.DL1_MISS */
324 },
325 },
326 [ C(L1I ) ] = {
327 [ C(OP_READ) ] = {
328 [ C(RESULT_ACCESS) ] = 0x0,
329 [ C(RESULT_MISS) ] = 0x0280, /* ICACHE.MISSES */
330 },
331 [ C(OP_WRITE) ] = {
332 [ C(RESULT_ACCESS) ] = -1,
333 [ C(RESULT_MISS) ] = -1,
334 },
335 [ C(OP_PREFETCH) ] = {
336 [ C(RESULT_ACCESS) ] = 0x0,
337 [ C(RESULT_MISS) ] = 0x0,
338 },
339 },
340 [ C(LL ) ] = {
341 [ C(OP_READ) ] = {
342 /* OFFCORE_RESPONSE.ANY_DATA.LOCAL_CACHE */
343 [ C(RESULT_ACCESS) ] = 0x01b7,
344 /* OFFCORE_RESPONSE.ANY_DATA.ANY_LLC_MISS */
345 [ C(RESULT_MISS) ] = 0x01b7,
346 },
347 [ C(OP_WRITE) ] = {
348 /* OFFCORE_RESPONSE.ANY_RFO.LOCAL_CACHE */
349 [ C(RESULT_ACCESS) ] = 0x01b7,
350 /* OFFCORE_RESPONSE.ANY_RFO.ANY_LLC_MISS */
351 [ C(RESULT_MISS) ] = 0x01b7,
352 },
353 [ C(OP_PREFETCH) ] = {
354 /* OFFCORE_RESPONSE.PREFETCH.LOCAL_CACHE */
355 [ C(RESULT_ACCESS) ] = 0x01b7,
356 /* OFFCORE_RESPONSE.PREFETCH.ANY_LLC_MISS */
357 [ C(RESULT_MISS) ] = 0x01b7,
358 },
359 },
360 [ C(DTLB) ] = {
361 [ C(OP_READ) ] = {
362 [ C(RESULT_ACCESS) ] = 0x81d0, /* MEM_UOP_RETIRED.ALL_LOADS */
363 [ C(RESULT_MISS) ] = 0x0108, /* DTLB_LOAD_MISSES.CAUSES_A_WALK */
364 },
365 [ C(OP_WRITE) ] = {
366 [ C(RESULT_ACCESS) ] = 0x82d0, /* MEM_UOP_RETIRED.ALL_STORES */
367 [ C(RESULT_MISS) ] = 0x0149, /* DTLB_STORE_MISSES.MISS_CAUSES_A_WALK */
368 },
369 [ C(OP_PREFETCH) ] = {
370 [ C(RESULT_ACCESS) ] = 0x0,
371 [ C(RESULT_MISS) ] = 0x0,
372 },
373 },
374 [ C(ITLB) ] = {
375 [ C(OP_READ) ] = {
376 [ C(RESULT_ACCESS) ] = 0x1085, /* ITLB_MISSES.STLB_HIT */
377 [ C(RESULT_MISS) ] = 0x0185, /* ITLB_MISSES.CAUSES_A_WALK */
378 },
379 [ C(OP_WRITE) ] = {
380 [ C(RESULT_ACCESS) ] = -1,
381 [ C(RESULT_MISS) ] = -1,
382 },
383 [ C(OP_PREFETCH) ] = {
384 [ C(RESULT_ACCESS) ] = -1,
385 [ C(RESULT_MISS) ] = -1,
386 },
387 },
388 [ C(BPU ) ] = {
389 [ C(OP_READ) ] = {
390 [ C(RESULT_ACCESS) ] = 0x00c4, /* BR_INST_RETIRED.ALL_BRANCHES */
391 [ C(RESULT_MISS) ] = 0x00c5, /* BR_MISP_RETIRED.ALL_BRANCHES */
392 },
393 [ C(OP_WRITE) ] = {
394 [ C(RESULT_ACCESS) ] = -1,
395 [ C(RESULT_MISS) ] = -1,
396 },
397 [ C(OP_PREFETCH) ] = {
398 [ C(RESULT_ACCESS) ] = -1,
399 [ C(RESULT_MISS) ] = -1,
400 },
401 },
402 [ C(NODE) ] = {
403 [ C(OP_READ) ] = {
404 [ C(RESULT_ACCESS) ] = 0x01b7,
405 [ C(RESULT_MISS) ] = 0x01b7,
406 },
407 [ C(OP_WRITE) ] = {
408 [ C(RESULT_ACCESS) ] = 0x01b7,
409 [ C(RESULT_MISS) ] = 0x01b7,
410 },
411 [ C(OP_PREFETCH) ] = {
412 [ C(RESULT_ACCESS) ] = 0x01b7,
413 [ C(RESULT_MISS) ] = 0x01b7,
414 },
415 },
416
417 };
418
419 static __initconst const u64 westmere_hw_cache_event_ids
420 [PERF_COUNT_HW_CACHE_MAX]
421 [PERF_COUNT_HW_CACHE_OP_MAX]
422 [PERF_COUNT_HW_CACHE_RESULT_MAX] =
423 {
424 [ C(L1D) ] = {
425 [ C(OP_READ) ] = {
426 [ C(RESULT_ACCESS) ] = 0x010b, /* MEM_INST_RETIRED.LOADS */
427 [ C(RESULT_MISS) ] = 0x0151, /* L1D.REPL */
428 },
429 [ C(OP_WRITE) ] = {
430 [ C(RESULT_ACCESS) ] = 0x020b, /* MEM_INST_RETURED.STORES */
431 [ C(RESULT_MISS) ] = 0x0251, /* L1D.M_REPL */
432 },
433 [ C(OP_PREFETCH) ] = {
434 [ C(RESULT_ACCESS) ] = 0x014e, /* L1D_PREFETCH.REQUESTS */
435 [ C(RESULT_MISS) ] = 0x024e, /* L1D_PREFETCH.MISS */
436 },
437 },
438 [ C(L1I ) ] = {
439 [ C(OP_READ) ] = {
440 [ C(RESULT_ACCESS) ] = 0x0380, /* L1I.READS */
441 [ C(RESULT_MISS) ] = 0x0280, /* L1I.MISSES */
442 },
443 [ C(OP_WRITE) ] = {
444 [ C(RESULT_ACCESS) ] = -1,
445 [ C(RESULT_MISS) ] = -1,
446 },
447 [ C(OP_PREFETCH) ] = {
448 [ C(RESULT_ACCESS) ] = 0x0,
449 [ C(RESULT_MISS) ] = 0x0,
450 },
451 },
452 [ C(LL ) ] = {
453 [ C(OP_READ) ] = {
454 /* OFFCORE_RESPONSE.ANY_DATA.LOCAL_CACHE */
455 [ C(RESULT_ACCESS) ] = 0x01b7,
456 /* OFFCORE_RESPONSE.ANY_DATA.ANY_LLC_MISS */
457 [ C(RESULT_MISS) ] = 0x01b7,
458 },
459 /*
460 * Use RFO, not WRITEBACK, because a write miss would typically occur
461 * on RFO.
462 */
463 [ C(OP_WRITE) ] = {
464 /* OFFCORE_RESPONSE.ANY_RFO.LOCAL_CACHE */
465 [ C(RESULT_ACCESS) ] = 0x01b7,
466 /* OFFCORE_RESPONSE.ANY_RFO.ANY_LLC_MISS */
467 [ C(RESULT_MISS) ] = 0x01b7,
468 },
469 [ C(OP_PREFETCH) ] = {
470 /* OFFCORE_RESPONSE.PREFETCH.LOCAL_CACHE */
471 [ C(RESULT_ACCESS) ] = 0x01b7,
472 /* OFFCORE_RESPONSE.PREFETCH.ANY_LLC_MISS */
473 [ C(RESULT_MISS) ] = 0x01b7,
474 },
475 },
476 [ C(DTLB) ] = {
477 [ C(OP_READ) ] = {
478 [ C(RESULT_ACCESS) ] = 0x010b, /* MEM_INST_RETIRED.LOADS */
479 [ C(RESULT_MISS) ] = 0x0108, /* DTLB_LOAD_MISSES.ANY */
480 },
481 [ C(OP_WRITE) ] = {
482 [ C(RESULT_ACCESS) ] = 0x020b, /* MEM_INST_RETURED.STORES */
483 [ C(RESULT_MISS) ] = 0x010c, /* MEM_STORE_RETIRED.DTLB_MISS */
484 },
485 [ C(OP_PREFETCH) ] = {
486 [ C(RESULT_ACCESS) ] = 0x0,
487 [ C(RESULT_MISS) ] = 0x0,
488 },
489 },
490 [ C(ITLB) ] = {
491 [ C(OP_READ) ] = {
492 [ C(RESULT_ACCESS) ] = 0x01c0, /* INST_RETIRED.ANY_P */
493 [ C(RESULT_MISS) ] = 0x0185, /* ITLB_MISSES.ANY */
494 },
495 [ C(OP_WRITE) ] = {
496 [ C(RESULT_ACCESS) ] = -1,
497 [ C(RESULT_MISS) ] = -1,
498 },
499 [ C(OP_PREFETCH) ] = {
500 [ C(RESULT_ACCESS) ] = -1,
501 [ C(RESULT_MISS) ] = -1,
502 },
503 },
504 [ C(BPU ) ] = {
505 [ C(OP_READ) ] = {
506 [ C(RESULT_ACCESS) ] = 0x00c4, /* BR_INST_RETIRED.ALL_BRANCHES */
507 [ C(RESULT_MISS) ] = 0x03e8, /* BPU_CLEARS.ANY */
508 },
509 [ C(OP_WRITE) ] = {
510 [ C(RESULT_ACCESS) ] = -1,
511 [ C(RESULT_MISS) ] = -1,
512 },
513 [ C(OP_PREFETCH) ] = {
514 [ C(RESULT_ACCESS) ] = -1,
515 [ C(RESULT_MISS) ] = -1,
516 },
517 },
518 [ C(NODE) ] = {
519 [ C(OP_READ) ] = {
520 [ C(RESULT_ACCESS) ] = 0x01b7,
521 [ C(RESULT_MISS) ] = 0x01b7,
522 },
523 [ C(OP_WRITE) ] = {
524 [ C(RESULT_ACCESS) ] = 0x01b7,
525 [ C(RESULT_MISS) ] = 0x01b7,
526 },
527 [ C(OP_PREFETCH) ] = {
528 [ C(RESULT_ACCESS) ] = 0x01b7,
529 [ C(RESULT_MISS) ] = 0x01b7,
530 },
531 },
532 };
533
534 /*
535 * Nehalem/Westmere MSR_OFFCORE_RESPONSE bits;
536 * See IA32 SDM Vol 3B 30.6.1.3
537 */
538
539 #define NHM_DMND_DATA_RD (1 << 0)
540 #define NHM_DMND_RFO (1 << 1)
541 #define NHM_DMND_IFETCH (1 << 2)
542 #define NHM_DMND_WB (1 << 3)
543 #define NHM_PF_DATA_RD (1 << 4)
544 #define NHM_PF_DATA_RFO (1 << 5)
545 #define NHM_PF_IFETCH (1 << 6)
546 #define NHM_OFFCORE_OTHER (1 << 7)
547 #define NHM_UNCORE_HIT (1 << 8)
548 #define NHM_OTHER_CORE_HIT_SNP (1 << 9)
549 #define NHM_OTHER_CORE_HITM (1 << 10)
550 /* reserved */
551 #define NHM_REMOTE_CACHE_FWD (1 << 12)
552 #define NHM_REMOTE_DRAM (1 << 13)
553 #define NHM_LOCAL_DRAM (1 << 14)
554 #define NHM_NON_DRAM (1 << 15)
555
556 #define NHM_LOCAL (NHM_LOCAL_DRAM|NHM_REMOTE_CACHE_FWD)
557 #define NHM_REMOTE (NHM_REMOTE_DRAM)
558
559 #define NHM_DMND_READ (NHM_DMND_DATA_RD)
560 #define NHM_DMND_WRITE (NHM_DMND_RFO|NHM_DMND_WB)
561 #define NHM_DMND_PREFETCH (NHM_PF_DATA_RD|NHM_PF_DATA_RFO)
562
563 #define NHM_L3_HIT (NHM_UNCORE_HIT|NHM_OTHER_CORE_HIT_SNP|NHM_OTHER_CORE_HITM)
564 #define NHM_L3_MISS (NHM_NON_DRAM|NHM_LOCAL_DRAM|NHM_REMOTE_DRAM|NHM_REMOTE_CACHE_FWD)
565 #define NHM_L3_ACCESS (NHM_L3_HIT|NHM_L3_MISS)
566
567 static __initconst const u64 nehalem_hw_cache_extra_regs
568 [PERF_COUNT_HW_CACHE_MAX]
569 [PERF_COUNT_HW_CACHE_OP_MAX]
570 [PERF_COUNT_HW_CACHE_RESULT_MAX] =
571 {
572 [ C(LL ) ] = {
573 [ C(OP_READ) ] = {
574 [ C(RESULT_ACCESS) ] = NHM_DMND_READ|NHM_L3_ACCESS,
575 [ C(RESULT_MISS) ] = NHM_DMND_READ|NHM_L3_MISS,
576 },
577 [ C(OP_WRITE) ] = {
578 [ C(RESULT_ACCESS) ] = NHM_DMND_WRITE|NHM_L3_ACCESS,
579 [ C(RESULT_MISS) ] = NHM_DMND_WRITE|NHM_L3_MISS,
580 },
581 [ C(OP_PREFETCH) ] = {
582 [ C(RESULT_ACCESS) ] = NHM_DMND_PREFETCH|NHM_L3_ACCESS,
583 [ C(RESULT_MISS) ] = NHM_DMND_PREFETCH|NHM_L3_MISS,
584 },
585 },
586 [ C(NODE) ] = {
587 [ C(OP_READ) ] = {
588 [ C(RESULT_ACCESS) ] = NHM_DMND_READ|NHM_LOCAL|NHM_REMOTE,
589 [ C(RESULT_MISS) ] = NHM_DMND_READ|NHM_REMOTE,
590 },
591 [ C(OP_WRITE) ] = {
592 [ C(RESULT_ACCESS) ] = NHM_DMND_WRITE|NHM_LOCAL|NHM_REMOTE,
593 [ C(RESULT_MISS) ] = NHM_DMND_WRITE|NHM_REMOTE,
594 },
595 [ C(OP_PREFETCH) ] = {
596 [ C(RESULT_ACCESS) ] = NHM_DMND_PREFETCH|NHM_LOCAL|NHM_REMOTE,
597 [ C(RESULT_MISS) ] = NHM_DMND_PREFETCH|NHM_REMOTE,
598 },
599 },
600 };
601
602 static __initconst const u64 nehalem_hw_cache_event_ids
603 [PERF_COUNT_HW_CACHE_MAX]
604 [PERF_COUNT_HW_CACHE_OP_MAX]
605 [PERF_COUNT_HW_CACHE_RESULT_MAX] =
606 {
607 [ C(L1D) ] = {
608 [ C(OP_READ) ] = {
609 [ C(RESULT_ACCESS) ] = 0x010b, /* MEM_INST_RETIRED.LOADS */
610 [ C(RESULT_MISS) ] = 0x0151, /* L1D.REPL */
611 },
612 [ C(OP_WRITE) ] = {
613 [ C(RESULT_ACCESS) ] = 0x020b, /* MEM_INST_RETURED.STORES */
614 [ C(RESULT_MISS) ] = 0x0251, /* L1D.M_REPL */
615 },
616 [ C(OP_PREFETCH) ] = {
617 [ C(RESULT_ACCESS) ] = 0x014e, /* L1D_PREFETCH.REQUESTS */
618 [ C(RESULT_MISS) ] = 0x024e, /* L1D_PREFETCH.MISS */
619 },
620 },
621 [ C(L1I ) ] = {
622 [ C(OP_READ) ] = {
623 [ C(RESULT_ACCESS) ] = 0x0380, /* L1I.READS */
624 [ C(RESULT_MISS) ] = 0x0280, /* L1I.MISSES */
625 },
626 [ C(OP_WRITE) ] = {
627 [ C(RESULT_ACCESS) ] = -1,
628 [ C(RESULT_MISS) ] = -1,
629 },
630 [ C(OP_PREFETCH) ] = {
631 [ C(RESULT_ACCESS) ] = 0x0,
632 [ C(RESULT_MISS) ] = 0x0,
633 },
634 },
635 [ C(LL ) ] = {
636 [ C(OP_READ) ] = {
637 /* OFFCORE_RESPONSE.ANY_DATA.LOCAL_CACHE */
638 [ C(RESULT_ACCESS) ] = 0x01b7,
639 /* OFFCORE_RESPONSE.ANY_DATA.ANY_LLC_MISS */
640 [ C(RESULT_MISS) ] = 0x01b7,
641 },
642 /*
643 * Use RFO, not WRITEBACK, because a write miss would typically occur
644 * on RFO.
645 */
646 [ C(OP_WRITE) ] = {
647 /* OFFCORE_RESPONSE.ANY_RFO.LOCAL_CACHE */
648 [ C(RESULT_ACCESS) ] = 0x01b7,
649 /* OFFCORE_RESPONSE.ANY_RFO.ANY_LLC_MISS */
650 [ C(RESULT_MISS) ] = 0x01b7,
651 },
652 [ C(OP_PREFETCH) ] = {
653 /* OFFCORE_RESPONSE.PREFETCH.LOCAL_CACHE */
654 [ C(RESULT_ACCESS) ] = 0x01b7,
655 /* OFFCORE_RESPONSE.PREFETCH.ANY_LLC_MISS */
656 [ C(RESULT_MISS) ] = 0x01b7,
657 },
658 },
659 [ C(DTLB) ] = {
660 [ C(OP_READ) ] = {
661 [ C(RESULT_ACCESS) ] = 0x0f40, /* L1D_CACHE_LD.MESI (alias) */
662 [ C(RESULT_MISS) ] = 0x0108, /* DTLB_LOAD_MISSES.ANY */
663 },
664 [ C(OP_WRITE) ] = {
665 [ C(RESULT_ACCESS) ] = 0x0f41, /* L1D_CACHE_ST.MESI (alias) */
666 [ C(RESULT_MISS) ] = 0x010c, /* MEM_STORE_RETIRED.DTLB_MISS */
667 },
668 [ C(OP_PREFETCH) ] = {
669 [ C(RESULT_ACCESS) ] = 0x0,
670 [ C(RESULT_MISS) ] = 0x0,
671 },
672 },
673 [ C(ITLB) ] = {
674 [ C(OP_READ) ] = {
675 [ C(RESULT_ACCESS) ] = 0x01c0, /* INST_RETIRED.ANY_P */
676 [ C(RESULT_MISS) ] = 0x20c8, /* ITLB_MISS_RETIRED */
677 },
678 [ C(OP_WRITE) ] = {
679 [ C(RESULT_ACCESS) ] = -1,
680 [ C(RESULT_MISS) ] = -1,
681 },
682 [ C(OP_PREFETCH) ] = {
683 [ C(RESULT_ACCESS) ] = -1,
684 [ C(RESULT_MISS) ] = -1,
685 },
686 },
687 [ C(BPU ) ] = {
688 [ C(OP_READ) ] = {
689 [ C(RESULT_ACCESS) ] = 0x00c4, /* BR_INST_RETIRED.ALL_BRANCHES */
690 [ C(RESULT_MISS) ] = 0x03e8, /* BPU_CLEARS.ANY */
691 },
692 [ C(OP_WRITE) ] = {
693 [ C(RESULT_ACCESS) ] = -1,
694 [ C(RESULT_MISS) ] = -1,
695 },
696 [ C(OP_PREFETCH) ] = {
697 [ C(RESULT_ACCESS) ] = -1,
698 [ C(RESULT_MISS) ] = -1,
699 },
700 },
701 [ C(NODE) ] = {
702 [ C(OP_READ) ] = {
703 [ C(RESULT_ACCESS) ] = 0x01b7,
704 [ C(RESULT_MISS) ] = 0x01b7,
705 },
706 [ C(OP_WRITE) ] = {
707 [ C(RESULT_ACCESS) ] = 0x01b7,
708 [ C(RESULT_MISS) ] = 0x01b7,
709 },
710 [ C(OP_PREFETCH) ] = {
711 [ C(RESULT_ACCESS) ] = 0x01b7,
712 [ C(RESULT_MISS) ] = 0x01b7,
713 },
714 },
715 };
716
717 static __initconst const u64 core2_hw_cache_event_ids
718 [PERF_COUNT_HW_CACHE_MAX]
719 [PERF_COUNT_HW_CACHE_OP_MAX]
720 [PERF_COUNT_HW_CACHE_RESULT_MAX] =
721 {
722 [ C(L1D) ] = {
723 [ C(OP_READ) ] = {
724 [ C(RESULT_ACCESS) ] = 0x0f40, /* L1D_CACHE_LD.MESI */
725 [ C(RESULT_MISS) ] = 0x0140, /* L1D_CACHE_LD.I_STATE */
726 },
727 [ C(OP_WRITE) ] = {
728 [ C(RESULT_ACCESS) ] = 0x0f41, /* L1D_CACHE_ST.MESI */
729 [ C(RESULT_MISS) ] = 0x0141, /* L1D_CACHE_ST.I_STATE */
730 },
731 [ C(OP_PREFETCH) ] = {
732 [ C(RESULT_ACCESS) ] = 0x104e, /* L1D_PREFETCH.REQUESTS */
733 [ C(RESULT_MISS) ] = 0,
734 },
735 },
736 [ C(L1I ) ] = {
737 [ C(OP_READ) ] = {
738 [ C(RESULT_ACCESS) ] = 0x0080, /* L1I.READS */
739 [ C(RESULT_MISS) ] = 0x0081, /* L1I.MISSES */
740 },
741 [ C(OP_WRITE) ] = {
742 [ C(RESULT_ACCESS) ] = -1,
743 [ C(RESULT_MISS) ] = -1,
744 },
745 [ C(OP_PREFETCH) ] = {
746 [ C(RESULT_ACCESS) ] = 0,
747 [ C(RESULT_MISS) ] = 0,
748 },
749 },
750 [ C(LL ) ] = {
751 [ C(OP_READ) ] = {
752 [ C(RESULT_ACCESS) ] = 0x4f29, /* L2_LD.MESI */
753 [ C(RESULT_MISS) ] = 0x4129, /* L2_LD.ISTATE */
754 },
755 [ C(OP_WRITE) ] = {
756 [ C(RESULT_ACCESS) ] = 0x4f2A, /* L2_ST.MESI */
757 [ C(RESULT_MISS) ] = 0x412A, /* L2_ST.ISTATE */
758 },
759 [ C(OP_PREFETCH) ] = {
760 [ C(RESULT_ACCESS) ] = 0,
761 [ C(RESULT_MISS) ] = 0,
762 },
763 },
764 [ C(DTLB) ] = {
765 [ C(OP_READ) ] = {
766 [ C(RESULT_ACCESS) ] = 0x0f40, /* L1D_CACHE_LD.MESI (alias) */
767 [ C(RESULT_MISS) ] = 0x0208, /* DTLB_MISSES.MISS_LD */
768 },
769 [ C(OP_WRITE) ] = {
770 [ C(RESULT_ACCESS) ] = 0x0f41, /* L1D_CACHE_ST.MESI (alias) */
771 [ C(RESULT_MISS) ] = 0x0808, /* DTLB_MISSES.MISS_ST */
772 },
773 [ C(OP_PREFETCH) ] = {
774 [ C(RESULT_ACCESS) ] = 0,
775 [ C(RESULT_MISS) ] = 0,
776 },
777 },
778 [ C(ITLB) ] = {
779 [ C(OP_READ) ] = {
780 [ C(RESULT_ACCESS) ] = 0x00c0, /* INST_RETIRED.ANY_P */
781 [ C(RESULT_MISS) ] = 0x1282, /* ITLBMISSES */
782 },
783 [ C(OP_WRITE) ] = {
784 [ C(RESULT_ACCESS) ] = -1,
785 [ C(RESULT_MISS) ] = -1,
786 },
787 [ C(OP_PREFETCH) ] = {
788 [ C(RESULT_ACCESS) ] = -1,
789 [ C(RESULT_MISS) ] = -1,
790 },
791 },
792 [ C(BPU ) ] = {
793 [ C(OP_READ) ] = {
794 [ C(RESULT_ACCESS) ] = 0x00c4, /* BR_INST_RETIRED.ANY */
795 [ C(RESULT_MISS) ] = 0x00c5, /* BP_INST_RETIRED.MISPRED */
796 },
797 [ C(OP_WRITE) ] = {
798 [ C(RESULT_ACCESS) ] = -1,
799 [ C(RESULT_MISS) ] = -1,
800 },
801 [ C(OP_PREFETCH) ] = {
802 [ C(RESULT_ACCESS) ] = -1,
803 [ C(RESULT_MISS) ] = -1,
804 },
805 },
806 };
807
808 static __initconst const u64 atom_hw_cache_event_ids
809 [PERF_COUNT_HW_CACHE_MAX]
810 [PERF_COUNT_HW_CACHE_OP_MAX]
811 [PERF_COUNT_HW_CACHE_RESULT_MAX] =
812 {
813 [ C(L1D) ] = {
814 [ C(OP_READ) ] = {
815 [ C(RESULT_ACCESS) ] = 0x2140, /* L1D_CACHE.LD */
816 [ C(RESULT_MISS) ] = 0,
817 },
818 [ C(OP_WRITE) ] = {
819 [ C(RESULT_ACCESS) ] = 0x2240, /* L1D_CACHE.ST */
820 [ C(RESULT_MISS) ] = 0,
821 },
822 [ C(OP_PREFETCH) ] = {
823 [ C(RESULT_ACCESS) ] = 0x0,
824 [ C(RESULT_MISS) ] = 0,
825 },
826 },
827 [ C(L1I ) ] = {
828 [ C(OP_READ) ] = {
829 [ C(RESULT_ACCESS) ] = 0x0380, /* L1I.READS */
830 [ C(RESULT_MISS) ] = 0x0280, /* L1I.MISSES */
831 },
832 [ C(OP_WRITE) ] = {
833 [ C(RESULT_ACCESS) ] = -1,
834 [ C(RESULT_MISS) ] = -1,
835 },
836 [ C(OP_PREFETCH) ] = {
837 [ C(RESULT_ACCESS) ] = 0,
838 [ C(RESULT_MISS) ] = 0,
839 },
840 },
841 [ C(LL ) ] = {
842 [ C(OP_READ) ] = {
843 [ C(RESULT_ACCESS) ] = 0x4f29, /* L2_LD.MESI */
844 [ C(RESULT_MISS) ] = 0x4129, /* L2_LD.ISTATE */
845 },
846 [ C(OP_WRITE) ] = {
847 [ C(RESULT_ACCESS) ] = 0x4f2A, /* L2_ST.MESI */
848 [ C(RESULT_MISS) ] = 0x412A, /* L2_ST.ISTATE */
849 },
850 [ C(OP_PREFETCH) ] = {
851 [ C(RESULT_ACCESS) ] = 0,
852 [ C(RESULT_MISS) ] = 0,
853 },
854 },
855 [ C(DTLB) ] = {
856 [ C(OP_READ) ] = {
857 [ C(RESULT_ACCESS) ] = 0x2140, /* L1D_CACHE_LD.MESI (alias) */
858 [ C(RESULT_MISS) ] = 0x0508, /* DTLB_MISSES.MISS_LD */
859 },
860 [ C(OP_WRITE) ] = {
861 [ C(RESULT_ACCESS) ] = 0x2240, /* L1D_CACHE_ST.MESI (alias) */
862 [ C(RESULT_MISS) ] = 0x0608, /* DTLB_MISSES.MISS_ST */
863 },
864 [ C(OP_PREFETCH) ] = {
865 [ C(RESULT_ACCESS) ] = 0,
866 [ C(RESULT_MISS) ] = 0,
867 },
868 },
869 [ C(ITLB) ] = {
870 [ C(OP_READ) ] = {
871 [ C(RESULT_ACCESS) ] = 0x00c0, /* INST_RETIRED.ANY_P */
872 [ C(RESULT_MISS) ] = 0x0282, /* ITLB.MISSES */
873 },
874 [ C(OP_WRITE) ] = {
875 [ C(RESULT_ACCESS) ] = -1,
876 [ C(RESULT_MISS) ] = -1,
877 },
878 [ C(OP_PREFETCH) ] = {
879 [ C(RESULT_ACCESS) ] = -1,
880 [ C(RESULT_MISS) ] = -1,
881 },
882 },
883 [ C(BPU ) ] = {
884 [ C(OP_READ) ] = {
885 [ C(RESULT_ACCESS) ] = 0x00c4, /* BR_INST_RETIRED.ANY */
886 [ C(RESULT_MISS) ] = 0x00c5, /* BP_INST_RETIRED.MISPRED */
887 },
888 [ C(OP_WRITE) ] = {
889 [ C(RESULT_ACCESS) ] = -1,
890 [ C(RESULT_MISS) ] = -1,
891 },
892 [ C(OP_PREFETCH) ] = {
893 [ C(RESULT_ACCESS) ] = -1,
894 [ C(RESULT_MISS) ] = -1,
895 },
896 },
897 };
898
899 static struct extra_reg intel_slm_extra_regs[] __read_mostly =
900 {
901 /* must define OFFCORE_RSP_X first, see intel_fixup_er() */
902 INTEL_UEVENT_EXTRA_REG(0x01b7, MSR_OFFCORE_RSP_0, 0x768005ffffull, RSP_0),
903 INTEL_UEVENT_EXTRA_REG(0x02b7, MSR_OFFCORE_RSP_1, 0x768005ffffull, RSP_1),
904 EVENT_EXTRA_END
905 };
906
907 #define SLM_DMND_READ SNB_DMND_DATA_RD
908 #define SLM_DMND_WRITE SNB_DMND_RFO
909 #define SLM_DMND_PREFETCH (SNB_PF_DATA_RD|SNB_PF_RFO)
910
911 #define SLM_SNP_ANY (SNB_SNP_NONE|SNB_SNP_MISS|SNB_NO_FWD|SNB_HITM)
912 #define SLM_LLC_ACCESS SNB_RESP_ANY
913 #define SLM_LLC_MISS (SLM_SNP_ANY|SNB_NON_DRAM)
914
915 static __initconst const u64 slm_hw_cache_extra_regs
916 [PERF_COUNT_HW_CACHE_MAX]
917 [PERF_COUNT_HW_CACHE_OP_MAX]
918 [PERF_COUNT_HW_CACHE_RESULT_MAX] =
919 {
920 [ C(LL ) ] = {
921 [ C(OP_READ) ] = {
922 [ C(RESULT_ACCESS) ] = SLM_DMND_READ|SLM_LLC_ACCESS,
923 [ C(RESULT_MISS) ] = SLM_DMND_READ|SLM_LLC_MISS,
924 },
925 [ C(OP_WRITE) ] = {
926 [ C(RESULT_ACCESS) ] = SLM_DMND_WRITE|SLM_LLC_ACCESS,
927 [ C(RESULT_MISS) ] = SLM_DMND_WRITE|SLM_LLC_MISS,
928 },
929 [ C(OP_PREFETCH) ] = {
930 [ C(RESULT_ACCESS) ] = SLM_DMND_PREFETCH|SLM_LLC_ACCESS,
931 [ C(RESULT_MISS) ] = SLM_DMND_PREFETCH|SLM_LLC_MISS,
932 },
933 },
934 };
935
936 static __initconst const u64 slm_hw_cache_event_ids
937 [PERF_COUNT_HW_CACHE_MAX]
938 [PERF_COUNT_HW_CACHE_OP_MAX]
939 [PERF_COUNT_HW_CACHE_RESULT_MAX] =
940 {
941 [ C(L1D) ] = {
942 [ C(OP_READ) ] = {
943 [ C(RESULT_ACCESS) ] = 0,
944 [ C(RESULT_MISS) ] = 0x0104, /* LD_DCU_MISS */
945 },
946 [ C(OP_WRITE) ] = {
947 [ C(RESULT_ACCESS) ] = 0,
948 [ C(RESULT_MISS) ] = 0,
949 },
950 [ C(OP_PREFETCH) ] = {
951 [ C(RESULT_ACCESS) ] = 0,
952 [ C(RESULT_MISS) ] = 0,
953 },
954 },
955 [ C(L1I ) ] = {
956 [ C(OP_READ) ] = {
957 [ C(RESULT_ACCESS) ] = 0x0380, /* ICACHE.ACCESSES */
958 [ C(RESULT_MISS) ] = 0x0280, /* ICACGE.MISSES */
959 },
960 [ C(OP_WRITE) ] = {
961 [ C(RESULT_ACCESS) ] = -1,
962 [ C(RESULT_MISS) ] = -1,
963 },
964 [ C(OP_PREFETCH) ] = {
965 [ C(RESULT_ACCESS) ] = 0,
966 [ C(RESULT_MISS) ] = 0,
967 },
968 },
969 [ C(LL ) ] = {
970 [ C(OP_READ) ] = {
971 /* OFFCORE_RESPONSE.ANY_DATA.LOCAL_CACHE */
972 [ C(RESULT_ACCESS) ] = 0x01b7,
973 /* OFFCORE_RESPONSE.ANY_DATA.ANY_LLC_MISS */
974 [ C(RESULT_MISS) ] = 0x01b7,
975 },
976 [ C(OP_WRITE) ] = {
977 /* OFFCORE_RESPONSE.ANY_RFO.LOCAL_CACHE */
978 [ C(RESULT_ACCESS) ] = 0x01b7,
979 /* OFFCORE_RESPONSE.ANY_RFO.ANY_LLC_MISS */
980 [ C(RESULT_MISS) ] = 0x01b7,
981 },
982 [ C(OP_PREFETCH) ] = {
983 /* OFFCORE_RESPONSE.PREFETCH.LOCAL_CACHE */
984 [ C(RESULT_ACCESS) ] = 0x01b7,
985 /* OFFCORE_RESPONSE.PREFETCH.ANY_LLC_MISS */
986 [ C(RESULT_MISS) ] = 0x01b7,
987 },
988 },
989 [ C(DTLB) ] = {
990 [ C(OP_READ) ] = {
991 [ C(RESULT_ACCESS) ] = 0,
992 [ C(RESULT_MISS) ] = 0x0804, /* LD_DTLB_MISS */
993 },
994 [ C(OP_WRITE) ] = {
995 [ C(RESULT_ACCESS) ] = 0,
996 [ C(RESULT_MISS) ] = 0,
997 },
998 [ C(OP_PREFETCH) ] = {
999 [ C(RESULT_ACCESS) ] = 0,
1000 [ C(RESULT_MISS) ] = 0,
1001 },
1002 },
1003 [ C(ITLB) ] = {
1004 [ C(OP_READ) ] = {
1005 [ C(RESULT_ACCESS) ] = 0x00c0, /* INST_RETIRED.ANY_P */
1006 [ C(RESULT_MISS) ] = 0x0282, /* ITLB.MISSES */
1007 },
1008 [ C(OP_WRITE) ] = {
1009 [ C(RESULT_ACCESS) ] = -1,
1010 [ C(RESULT_MISS) ] = -1,
1011 },
1012 [ C(OP_PREFETCH) ] = {
1013 [ C(RESULT_ACCESS) ] = -1,
1014 [ C(RESULT_MISS) ] = -1,
1015 },
1016 },
1017 [ C(BPU ) ] = {
1018 [ C(OP_READ) ] = {
1019 [ C(RESULT_ACCESS) ] = 0x00c4, /* BR_INST_RETIRED.ANY */
1020 [ C(RESULT_MISS) ] = 0x00c5, /* BP_INST_RETIRED.MISPRED */
1021 },
1022 [ C(OP_WRITE) ] = {
1023 [ C(RESULT_ACCESS) ] = -1,
1024 [ C(RESULT_MISS) ] = -1,
1025 },
1026 [ C(OP_PREFETCH) ] = {
1027 [ C(RESULT_ACCESS) ] = -1,
1028 [ C(RESULT_MISS) ] = -1,
1029 },
1030 },
1031 };
1032
1033 static inline bool intel_pmu_needs_lbr_smpl(struct perf_event *event)
1034 {
1035 /* user explicitly requested branch sampling */
1036 if (has_branch_stack(event))
1037 return true;
1038
1039 /* implicit branch sampling to correct PEBS skid */
1040 if (x86_pmu.intel_cap.pebs_trap && event->attr.precise_ip > 1 &&
1041 x86_pmu.intel_cap.pebs_format < 2)
1042 return true;
1043
1044 return false;
1045 }
1046
1047 static void intel_pmu_disable_all(void)
1048 {
1049 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
1050
1051 wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL, 0);
1052
1053 if (test_bit(INTEL_PMC_IDX_FIXED_BTS, cpuc->active_mask))
1054 intel_pmu_disable_bts();
1055
1056 intel_pmu_pebs_disable_all();
1057 intel_pmu_lbr_disable_all();
1058 }
1059
1060 static void intel_pmu_enable_all(int added)
1061 {
1062 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
1063
1064 intel_pmu_pebs_enable_all();
1065 intel_pmu_lbr_enable_all();
1066 wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL,
1067 x86_pmu.intel_ctrl & ~cpuc->intel_ctrl_guest_mask);
1068
1069 if (test_bit(INTEL_PMC_IDX_FIXED_BTS, cpuc->active_mask)) {
1070 struct perf_event *event =
1071 cpuc->events[INTEL_PMC_IDX_FIXED_BTS];
1072
1073 if (WARN_ON_ONCE(!event))
1074 return;
1075
1076 intel_pmu_enable_bts(event->hw.config);
1077 }
1078 }
1079
1080 /*
1081 * Workaround for:
1082 * Intel Errata AAK100 (model 26)
1083 * Intel Errata AAP53 (model 30)
1084 * Intel Errata BD53 (model 44)
1085 *
1086 * The official story:
1087 * These chips need to be 'reset' when adding counters by programming the
1088 * magic three (non-counting) events 0x4300B5, 0x4300D2, and 0x4300B1 either
1089 * in sequence on the same PMC or on different PMCs.
1090 *
1091 * In practise it appears some of these events do in fact count, and
1092 * we need to programm all 4 events.
1093 */
1094 static void intel_pmu_nhm_workaround(void)
1095 {
1096 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
1097 static const unsigned long nhm_magic[4] = {
1098 0x4300B5,
1099 0x4300D2,
1100 0x4300B1,
1101 0x4300B1
1102 };
1103 struct perf_event *event;
1104 int i;
1105
1106 /*
1107 * The Errata requires below steps:
1108 * 1) Clear MSR_IA32_PEBS_ENABLE and MSR_CORE_PERF_GLOBAL_CTRL;
1109 * 2) Configure 4 PERFEVTSELx with the magic events and clear
1110 * the corresponding PMCx;
1111 * 3) set bit0~bit3 of MSR_CORE_PERF_GLOBAL_CTRL;
1112 * 4) Clear MSR_CORE_PERF_GLOBAL_CTRL;
1113 * 5) Clear 4 pairs of ERFEVTSELx and PMCx;
1114 */
1115
1116 /*
1117 * The real steps we choose are a little different from above.
1118 * A) To reduce MSR operations, we don't run step 1) as they
1119 * are already cleared before this function is called;
1120 * B) Call x86_perf_event_update to save PMCx before configuring
1121 * PERFEVTSELx with magic number;
1122 * C) With step 5), we do clear only when the PERFEVTSELx is
1123 * not used currently.
1124 * D) Call x86_perf_event_set_period to restore PMCx;
1125 */
1126
1127 /* We always operate 4 pairs of PERF Counters */
1128 for (i = 0; i < 4; i++) {
1129 event = cpuc->events[i];
1130 if (event)
1131 x86_perf_event_update(event);
1132 }
1133
1134 for (i = 0; i < 4; i++) {
1135 wrmsrl(MSR_ARCH_PERFMON_EVENTSEL0 + i, nhm_magic[i]);
1136 wrmsrl(MSR_ARCH_PERFMON_PERFCTR0 + i, 0x0);
1137 }
1138
1139 wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL, 0xf);
1140 wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL, 0x0);
1141
1142 for (i = 0; i < 4; i++) {
1143 event = cpuc->events[i];
1144
1145 if (event) {
1146 x86_perf_event_set_period(event);
1147 __x86_pmu_enable_event(&event->hw,
1148 ARCH_PERFMON_EVENTSEL_ENABLE);
1149 } else
1150 wrmsrl(MSR_ARCH_PERFMON_EVENTSEL0 + i, 0x0);
1151 }
1152 }
1153
1154 static void intel_pmu_nhm_enable_all(int added)
1155 {
1156 if (added)
1157 intel_pmu_nhm_workaround();
1158 intel_pmu_enable_all(added);
1159 }
1160
1161 static inline u64 intel_pmu_get_status(void)
1162 {
1163 u64 status;
1164
1165 rdmsrl(MSR_CORE_PERF_GLOBAL_STATUS, status);
1166
1167 return status;
1168 }
1169
1170 static inline void intel_pmu_ack_status(u64 ack)
1171 {
1172 wrmsrl(MSR_CORE_PERF_GLOBAL_OVF_CTRL, ack);
1173 }
1174
1175 static void intel_pmu_disable_fixed(struct hw_perf_event *hwc)
1176 {
1177 int idx = hwc->idx - INTEL_PMC_IDX_FIXED;
1178 u64 ctrl_val, mask;
1179
1180 mask = 0xfULL << (idx * 4);
1181
1182 rdmsrl(hwc->config_base, ctrl_val);
1183 ctrl_val &= ~mask;
1184 wrmsrl(hwc->config_base, ctrl_val);
1185 }
1186
1187 static inline bool event_is_checkpointed(struct perf_event *event)
1188 {
1189 return (event->hw.config & HSW_IN_TX_CHECKPOINTED) != 0;
1190 }
1191
1192 static void intel_pmu_disable_event(struct perf_event *event)
1193 {
1194 struct hw_perf_event *hwc = &event->hw;
1195 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
1196
1197 if (unlikely(hwc->idx == INTEL_PMC_IDX_FIXED_BTS)) {
1198 intel_pmu_disable_bts();
1199 intel_pmu_drain_bts_buffer();
1200 return;
1201 }
1202
1203 cpuc->intel_ctrl_guest_mask &= ~(1ull << hwc->idx);
1204 cpuc->intel_ctrl_host_mask &= ~(1ull << hwc->idx);
1205 cpuc->intel_cp_status &= ~(1ull << hwc->idx);
1206
1207 /*
1208 * must disable before any actual event
1209 * because any event may be combined with LBR
1210 */
1211 if (intel_pmu_needs_lbr_smpl(event))
1212 intel_pmu_lbr_disable(event);
1213
1214 if (unlikely(hwc->config_base == MSR_ARCH_PERFMON_FIXED_CTR_CTRL)) {
1215 intel_pmu_disable_fixed(hwc);
1216 return;
1217 }
1218
1219 x86_pmu_disable_event(event);
1220
1221 if (unlikely(event->attr.precise_ip))
1222 intel_pmu_pebs_disable(event);
1223 }
1224
1225 static void intel_pmu_enable_fixed(struct hw_perf_event *hwc)
1226 {
1227 int idx = hwc->idx - INTEL_PMC_IDX_FIXED;
1228 u64 ctrl_val, bits, mask;
1229
1230 /*
1231 * Enable IRQ generation (0x8),
1232 * and enable ring-3 counting (0x2) and ring-0 counting (0x1)
1233 * if requested:
1234 */
1235 bits = 0x8ULL;
1236 if (hwc->config & ARCH_PERFMON_EVENTSEL_USR)
1237 bits |= 0x2;
1238 if (hwc->config & ARCH_PERFMON_EVENTSEL_OS)
1239 bits |= 0x1;
1240
1241 /*
1242 * ANY bit is supported in v3 and up
1243 */
1244 if (x86_pmu.version > 2 && hwc->config & ARCH_PERFMON_EVENTSEL_ANY)
1245 bits |= 0x4;
1246
1247 bits <<= (idx * 4);
1248 mask = 0xfULL << (idx * 4);
1249
1250 rdmsrl(hwc->config_base, ctrl_val);
1251 ctrl_val &= ~mask;
1252 ctrl_val |= bits;
1253 wrmsrl(hwc->config_base, ctrl_val);
1254 }
1255
1256 static void intel_pmu_enable_event(struct perf_event *event)
1257 {
1258 struct hw_perf_event *hwc = &event->hw;
1259 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
1260
1261 if (unlikely(hwc->idx == INTEL_PMC_IDX_FIXED_BTS)) {
1262 if (!__this_cpu_read(cpu_hw_events.enabled))
1263 return;
1264
1265 intel_pmu_enable_bts(hwc->config);
1266 return;
1267 }
1268 /*
1269 * must enabled before any actual event
1270 * because any event may be combined with LBR
1271 */
1272 if (intel_pmu_needs_lbr_smpl(event))
1273 intel_pmu_lbr_enable(event);
1274
1275 if (event->attr.exclude_host)
1276 cpuc->intel_ctrl_guest_mask |= (1ull << hwc->idx);
1277 if (event->attr.exclude_guest)
1278 cpuc->intel_ctrl_host_mask |= (1ull << hwc->idx);
1279
1280 if (unlikely(event_is_checkpointed(event)))
1281 cpuc->intel_cp_status |= (1ull << hwc->idx);
1282
1283 if (unlikely(hwc->config_base == MSR_ARCH_PERFMON_FIXED_CTR_CTRL)) {
1284 intel_pmu_enable_fixed(hwc);
1285 return;
1286 }
1287
1288 if (unlikely(event->attr.precise_ip))
1289 intel_pmu_pebs_enable(event);
1290
1291 __x86_pmu_enable_event(hwc, ARCH_PERFMON_EVENTSEL_ENABLE);
1292 }
1293
1294 /*
1295 * Save and restart an expired event. Called by NMI contexts,
1296 * so it has to be careful about preempting normal event ops:
1297 */
1298 int intel_pmu_save_and_restart(struct perf_event *event)
1299 {
1300 x86_perf_event_update(event);
1301 /*
1302 * For a checkpointed counter always reset back to 0. This
1303 * avoids a situation where the counter overflows, aborts the
1304 * transaction and is then set back to shortly before the
1305 * overflow, and overflows and aborts again.
1306 */
1307 if (unlikely(event_is_checkpointed(event))) {
1308 /* No race with NMIs because the counter should not be armed */
1309 wrmsrl(event->hw.event_base, 0);
1310 local64_set(&event->hw.prev_count, 0);
1311 }
1312 return x86_perf_event_set_period(event);
1313 }
1314
1315 static void intel_pmu_reset(void)
1316 {
1317 struct debug_store *ds = __this_cpu_read(cpu_hw_events.ds);
1318 unsigned long flags;
1319 int idx;
1320
1321 if (!x86_pmu.num_counters)
1322 return;
1323
1324 local_irq_save(flags);
1325
1326 pr_info("clearing PMU state on CPU#%d\n", smp_processor_id());
1327
1328 for (idx = 0; idx < x86_pmu.num_counters; idx++) {
1329 wrmsrl_safe(x86_pmu_config_addr(idx), 0ull);
1330 wrmsrl_safe(x86_pmu_event_addr(idx), 0ull);
1331 }
1332 for (idx = 0; idx < x86_pmu.num_counters_fixed; idx++)
1333 wrmsrl_safe(MSR_ARCH_PERFMON_FIXED_CTR0 + idx, 0ull);
1334
1335 if (ds)
1336 ds->bts_index = ds->bts_buffer_base;
1337
1338 local_irq_restore(flags);
1339 }
1340
1341 /*
1342 * This handler is triggered by the local APIC, so the APIC IRQ handling
1343 * rules apply:
1344 */
1345 static int intel_pmu_handle_irq(struct pt_regs *regs)
1346 {
1347 struct perf_sample_data data;
1348 struct cpu_hw_events *cpuc;
1349 int bit, loops;
1350 u64 status;
1351 int handled;
1352
1353 cpuc = &__get_cpu_var(cpu_hw_events);
1354
1355 /*
1356 * No known reason to not always do late ACK,
1357 * but just in case do it opt-in.
1358 */
1359 if (!x86_pmu.late_ack)
1360 apic_write(APIC_LVTPC, APIC_DM_NMI);
1361 intel_pmu_disable_all();
1362 handled = intel_pmu_drain_bts_buffer();
1363 status = intel_pmu_get_status();
1364 if (!status)
1365 goto done;
1366
1367 loops = 0;
1368 again:
1369 intel_pmu_ack_status(status);
1370 if (++loops > 100) {
1371 static bool warned = false;
1372 if (!warned) {
1373 WARN(1, "perfevents: irq loop stuck!\n");
1374 perf_event_print_debug();
1375 warned = true;
1376 }
1377 intel_pmu_reset();
1378 goto done;
1379 }
1380
1381 inc_irq_stat(apic_perf_irqs);
1382
1383 intel_pmu_lbr_read();
1384
1385 /*
1386 * PEBS overflow sets bit 62 in the global status register
1387 */
1388 if (__test_and_clear_bit(62, (unsigned long *)&status)) {
1389 handled++;
1390 x86_pmu.drain_pebs(regs);
1391 }
1392
1393 /*
1394 * Checkpointed counters can lead to 'spurious' PMIs because the
1395 * rollback caused by the PMI will have cleared the overflow status
1396 * bit. Therefore always force probe these counters.
1397 */
1398 status |= cpuc->intel_cp_status;
1399
1400 for_each_set_bit(bit, (unsigned long *)&status, X86_PMC_IDX_MAX) {
1401 struct perf_event *event = cpuc->events[bit];
1402
1403 handled++;
1404
1405 if (!test_bit(bit, cpuc->active_mask))
1406 continue;
1407
1408 if (!intel_pmu_save_and_restart(event))
1409 continue;
1410
1411 perf_sample_data_init(&data, 0, event->hw.last_period);
1412
1413 if (has_branch_stack(event))
1414 data.br_stack = &cpuc->lbr_stack;
1415
1416 if (perf_event_overflow(event, &data, regs))
1417 x86_pmu_stop(event, 0);
1418 }
1419
1420 /*
1421 * Repeat if there is more work to be done:
1422 */
1423 status = intel_pmu_get_status();
1424 if (status)
1425 goto again;
1426
1427 done:
1428 intel_pmu_enable_all(0);
1429 /*
1430 * Only unmask the NMI after the overflow counters
1431 * have been reset. This avoids spurious NMIs on
1432 * Haswell CPUs.
1433 */
1434 if (x86_pmu.late_ack)
1435 apic_write(APIC_LVTPC, APIC_DM_NMI);
1436 return handled;
1437 }
1438
1439 static struct event_constraint *
1440 intel_bts_constraints(struct perf_event *event)
1441 {
1442 struct hw_perf_event *hwc = &event->hw;
1443 unsigned int hw_event, bts_event;
1444
1445 if (event->attr.freq)
1446 return NULL;
1447
1448 hw_event = hwc->config & INTEL_ARCH_EVENT_MASK;
1449 bts_event = x86_pmu.event_map(PERF_COUNT_HW_BRANCH_INSTRUCTIONS);
1450
1451 if (unlikely(hw_event == bts_event && hwc->sample_period == 1))
1452 return &bts_constraint;
1453
1454 return NULL;
1455 }
1456
1457 static int intel_alt_er(int idx)
1458 {
1459 if (!(x86_pmu.er_flags & ERF_HAS_RSP_1))
1460 return idx;
1461
1462 if (idx == EXTRA_REG_RSP_0)
1463 return EXTRA_REG_RSP_1;
1464
1465 if (idx == EXTRA_REG_RSP_1)
1466 return EXTRA_REG_RSP_0;
1467
1468 return idx;
1469 }
1470
1471 static void intel_fixup_er(struct perf_event *event, int idx)
1472 {
1473 event->hw.extra_reg.idx = idx;
1474
1475 if (idx == EXTRA_REG_RSP_0) {
1476 event->hw.config &= ~INTEL_ARCH_EVENT_MASK;
1477 event->hw.config |= x86_pmu.extra_regs[EXTRA_REG_RSP_0].event;
1478 event->hw.extra_reg.reg = MSR_OFFCORE_RSP_0;
1479 } else if (idx == EXTRA_REG_RSP_1) {
1480 event->hw.config &= ~INTEL_ARCH_EVENT_MASK;
1481 event->hw.config |= x86_pmu.extra_regs[EXTRA_REG_RSP_1].event;
1482 event->hw.extra_reg.reg = MSR_OFFCORE_RSP_1;
1483 }
1484 }
1485
1486 /*
1487 * manage allocation of shared extra msr for certain events
1488 *
1489 * sharing can be:
1490 * per-cpu: to be shared between the various events on a single PMU
1491 * per-core: per-cpu + shared by HT threads
1492 */
1493 static struct event_constraint *
1494 __intel_shared_reg_get_constraints(struct cpu_hw_events *cpuc,
1495 struct perf_event *event,
1496 struct hw_perf_event_extra *reg)
1497 {
1498 struct event_constraint *c = &emptyconstraint;
1499 struct er_account *era;
1500 unsigned long flags;
1501 int idx = reg->idx;
1502
1503 /*
1504 * reg->alloc can be set due to existing state, so for fake cpuc we
1505 * need to ignore this, otherwise we might fail to allocate proper fake
1506 * state for this extra reg constraint. Also see the comment below.
1507 */
1508 if (reg->alloc && !cpuc->is_fake)
1509 return NULL; /* call x86_get_event_constraint() */
1510
1511 again:
1512 era = &cpuc->shared_regs->regs[idx];
1513 /*
1514 * we use spin_lock_irqsave() to avoid lockdep issues when
1515 * passing a fake cpuc
1516 */
1517 raw_spin_lock_irqsave(&era->lock, flags);
1518
1519 if (!atomic_read(&era->ref) || era->config == reg->config) {
1520
1521 /*
1522 * If its a fake cpuc -- as per validate_{group,event}() we
1523 * shouldn't touch event state and we can avoid doing so
1524 * since both will only call get_event_constraints() once
1525 * on each event, this avoids the need for reg->alloc.
1526 *
1527 * Not doing the ER fixup will only result in era->reg being
1528 * wrong, but since we won't actually try and program hardware
1529 * this isn't a problem either.
1530 */
1531 if (!cpuc->is_fake) {
1532 if (idx != reg->idx)
1533 intel_fixup_er(event, idx);
1534
1535 /*
1536 * x86_schedule_events() can call get_event_constraints()
1537 * multiple times on events in the case of incremental
1538 * scheduling(). reg->alloc ensures we only do the ER
1539 * allocation once.
1540 */
1541 reg->alloc = 1;
1542 }
1543
1544 /* lock in msr value */
1545 era->config = reg->config;
1546 era->reg = reg->reg;
1547
1548 /* one more user */
1549 atomic_inc(&era->ref);
1550
1551 /*
1552 * need to call x86_get_event_constraint()
1553 * to check if associated event has constraints
1554 */
1555 c = NULL;
1556 } else {
1557 idx = intel_alt_er(idx);
1558 if (idx != reg->idx) {
1559 raw_spin_unlock_irqrestore(&era->lock, flags);
1560 goto again;
1561 }
1562 }
1563 raw_spin_unlock_irqrestore(&era->lock, flags);
1564
1565 return c;
1566 }
1567
1568 static void
1569 __intel_shared_reg_put_constraints(struct cpu_hw_events *cpuc,
1570 struct hw_perf_event_extra *reg)
1571 {
1572 struct er_account *era;
1573
1574 /*
1575 * Only put constraint if extra reg was actually allocated. Also takes
1576 * care of event which do not use an extra shared reg.
1577 *
1578 * Also, if this is a fake cpuc we shouldn't touch any event state
1579 * (reg->alloc) and we don't care about leaving inconsistent cpuc state
1580 * either since it'll be thrown out.
1581 */
1582 if (!reg->alloc || cpuc->is_fake)
1583 return;
1584
1585 era = &cpuc->shared_regs->regs[reg->idx];
1586
1587 /* one fewer user */
1588 atomic_dec(&era->ref);
1589
1590 /* allocate again next time */
1591 reg->alloc = 0;
1592 }
1593
1594 static struct event_constraint *
1595 intel_shared_regs_constraints(struct cpu_hw_events *cpuc,
1596 struct perf_event *event)
1597 {
1598 struct event_constraint *c = NULL, *d;
1599 struct hw_perf_event_extra *xreg, *breg;
1600
1601 xreg = &event->hw.extra_reg;
1602 if (xreg->idx != EXTRA_REG_NONE) {
1603 c = __intel_shared_reg_get_constraints(cpuc, event, xreg);
1604 if (c == &emptyconstraint)
1605 return c;
1606 }
1607 breg = &event->hw.branch_reg;
1608 if (breg->idx != EXTRA_REG_NONE) {
1609 d = __intel_shared_reg_get_constraints(cpuc, event, breg);
1610 if (d == &emptyconstraint) {
1611 __intel_shared_reg_put_constraints(cpuc, xreg);
1612 c = d;
1613 }
1614 }
1615 return c;
1616 }
1617
1618 struct event_constraint *
1619 x86_get_event_constraints(struct cpu_hw_events *cpuc, struct perf_event *event)
1620 {
1621 struct event_constraint *c;
1622
1623 if (x86_pmu.event_constraints) {
1624 for_each_event_constraint(c, x86_pmu.event_constraints) {
1625 if ((event->hw.config & c->cmask) == c->code) {
1626 event->hw.flags |= c->flags;
1627 return c;
1628 }
1629 }
1630 }
1631
1632 return &unconstrained;
1633 }
1634
1635 static struct event_constraint *
1636 intel_get_event_constraints(struct cpu_hw_events *cpuc, struct perf_event *event)
1637 {
1638 struct event_constraint *c;
1639
1640 c = intel_bts_constraints(event);
1641 if (c)
1642 return c;
1643
1644 c = intel_pebs_constraints(event);
1645 if (c)
1646 return c;
1647
1648 c = intel_shared_regs_constraints(cpuc, event);
1649 if (c)
1650 return c;
1651
1652 return x86_get_event_constraints(cpuc, event);
1653 }
1654
1655 static void
1656 intel_put_shared_regs_event_constraints(struct cpu_hw_events *cpuc,
1657 struct perf_event *event)
1658 {
1659 struct hw_perf_event_extra *reg;
1660
1661 reg = &event->hw.extra_reg;
1662 if (reg->idx != EXTRA_REG_NONE)
1663 __intel_shared_reg_put_constraints(cpuc, reg);
1664
1665 reg = &event->hw.branch_reg;
1666 if (reg->idx != EXTRA_REG_NONE)
1667 __intel_shared_reg_put_constraints(cpuc, reg);
1668 }
1669
1670 static void intel_put_event_constraints(struct cpu_hw_events *cpuc,
1671 struct perf_event *event)
1672 {
1673 intel_put_shared_regs_event_constraints(cpuc, event);
1674 }
1675
1676 static void intel_pebs_aliases_core2(struct perf_event *event)
1677 {
1678 if ((event->hw.config & X86_RAW_EVENT_MASK) == 0x003c) {
1679 /*
1680 * Use an alternative encoding for CPU_CLK_UNHALTED.THREAD_P
1681 * (0x003c) so that we can use it with PEBS.
1682 *
1683 * The regular CPU_CLK_UNHALTED.THREAD_P event (0x003c) isn't
1684 * PEBS capable. However we can use INST_RETIRED.ANY_P
1685 * (0x00c0), which is a PEBS capable event, to get the same
1686 * count.
1687 *
1688 * INST_RETIRED.ANY_P counts the number of cycles that retires
1689 * CNTMASK instructions. By setting CNTMASK to a value (16)
1690 * larger than the maximum number of instructions that can be
1691 * retired per cycle (4) and then inverting the condition, we
1692 * count all cycles that retire 16 or less instructions, which
1693 * is every cycle.
1694 *
1695 * Thereby we gain a PEBS capable cycle counter.
1696 */
1697 u64 alt_config = X86_CONFIG(.event=0xc0, .inv=1, .cmask=16);
1698
1699 alt_config |= (event->hw.config & ~X86_RAW_EVENT_MASK);
1700 event->hw.config = alt_config;
1701 }
1702 }
1703
1704 static void intel_pebs_aliases_snb(struct perf_event *event)
1705 {
1706 if ((event->hw.config & X86_RAW_EVENT_MASK) == 0x003c) {
1707 /*
1708 * Use an alternative encoding for CPU_CLK_UNHALTED.THREAD_P
1709 * (0x003c) so that we can use it with PEBS.
1710 *
1711 * The regular CPU_CLK_UNHALTED.THREAD_P event (0x003c) isn't
1712 * PEBS capable. However we can use UOPS_RETIRED.ALL
1713 * (0x01c2), which is a PEBS capable event, to get the same
1714 * count.
1715 *
1716 * UOPS_RETIRED.ALL counts the number of cycles that retires
1717 * CNTMASK micro-ops. By setting CNTMASK to a value (16)
1718 * larger than the maximum number of micro-ops that can be
1719 * retired per cycle (4) and then inverting the condition, we
1720 * count all cycles that retire 16 or less micro-ops, which
1721 * is every cycle.
1722 *
1723 * Thereby we gain a PEBS capable cycle counter.
1724 */
1725 u64 alt_config = X86_CONFIG(.event=0xc2, .umask=0x01, .inv=1, .cmask=16);
1726
1727 alt_config |= (event->hw.config & ~X86_RAW_EVENT_MASK);
1728 event->hw.config = alt_config;
1729 }
1730 }
1731
1732 static int intel_pmu_hw_config(struct perf_event *event)
1733 {
1734 int ret = x86_pmu_hw_config(event);
1735
1736 if (ret)
1737 return ret;
1738
1739 if (event->attr.precise_ip && x86_pmu.pebs_aliases)
1740 x86_pmu.pebs_aliases(event);
1741
1742 if (intel_pmu_needs_lbr_smpl(event)) {
1743 ret = intel_pmu_setup_lbr_filter(event);
1744 if (ret)
1745 return ret;
1746 }
1747
1748 if (event->attr.type != PERF_TYPE_RAW)
1749 return 0;
1750
1751 if (!(event->attr.config & ARCH_PERFMON_EVENTSEL_ANY))
1752 return 0;
1753
1754 if (x86_pmu.version < 3)
1755 return -EINVAL;
1756
1757 if (perf_paranoid_cpu() && !capable(CAP_SYS_ADMIN))
1758 return -EACCES;
1759
1760 event->hw.config |= ARCH_PERFMON_EVENTSEL_ANY;
1761
1762 return 0;
1763 }
1764
1765 struct perf_guest_switch_msr *perf_guest_get_msrs(int *nr)
1766 {
1767 if (x86_pmu.guest_get_msrs)
1768 return x86_pmu.guest_get_msrs(nr);
1769 *nr = 0;
1770 return NULL;
1771 }
1772 EXPORT_SYMBOL_GPL(perf_guest_get_msrs);
1773
1774 static struct perf_guest_switch_msr *intel_guest_get_msrs(int *nr)
1775 {
1776 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
1777 struct perf_guest_switch_msr *arr = cpuc->guest_switch_msrs;
1778
1779 arr[0].msr = MSR_CORE_PERF_GLOBAL_CTRL;
1780 arr[0].host = x86_pmu.intel_ctrl & ~cpuc->intel_ctrl_guest_mask;
1781 arr[0].guest = x86_pmu.intel_ctrl & ~cpuc->intel_ctrl_host_mask;
1782 /*
1783 * If PMU counter has PEBS enabled it is not enough to disable counter
1784 * on a guest entry since PEBS memory write can overshoot guest entry
1785 * and corrupt guest memory. Disabling PEBS solves the problem.
1786 */
1787 arr[1].msr = MSR_IA32_PEBS_ENABLE;
1788 arr[1].host = cpuc->pebs_enabled;
1789 arr[1].guest = 0;
1790
1791 *nr = 2;
1792 return arr;
1793 }
1794
1795 static struct perf_guest_switch_msr *core_guest_get_msrs(int *nr)
1796 {
1797 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
1798 struct perf_guest_switch_msr *arr = cpuc->guest_switch_msrs;
1799 int idx;
1800
1801 for (idx = 0; idx < x86_pmu.num_counters; idx++) {
1802 struct perf_event *event = cpuc->events[idx];
1803
1804 arr[idx].msr = x86_pmu_config_addr(idx);
1805 arr[idx].host = arr[idx].guest = 0;
1806
1807 if (!test_bit(idx, cpuc->active_mask))
1808 continue;
1809
1810 arr[idx].host = arr[idx].guest =
1811 event->hw.config | ARCH_PERFMON_EVENTSEL_ENABLE;
1812
1813 if (event->attr.exclude_host)
1814 arr[idx].host &= ~ARCH_PERFMON_EVENTSEL_ENABLE;
1815 else if (event->attr.exclude_guest)
1816 arr[idx].guest &= ~ARCH_PERFMON_EVENTSEL_ENABLE;
1817 }
1818
1819 *nr = x86_pmu.num_counters;
1820 return arr;
1821 }
1822
1823 static void core_pmu_enable_event(struct perf_event *event)
1824 {
1825 if (!event->attr.exclude_host)
1826 x86_pmu_enable_event(event);
1827 }
1828
1829 static void core_pmu_enable_all(int added)
1830 {
1831 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
1832 int idx;
1833
1834 for (idx = 0; idx < x86_pmu.num_counters; idx++) {
1835 struct hw_perf_event *hwc = &cpuc->events[idx]->hw;
1836
1837 if (!test_bit(idx, cpuc->active_mask) ||
1838 cpuc->events[idx]->attr.exclude_host)
1839 continue;
1840
1841 __x86_pmu_enable_event(hwc, ARCH_PERFMON_EVENTSEL_ENABLE);
1842 }
1843 }
1844
1845 static int hsw_hw_config(struct perf_event *event)
1846 {
1847 int ret = intel_pmu_hw_config(event);
1848
1849 if (ret)
1850 return ret;
1851 if (!boot_cpu_has(X86_FEATURE_RTM) && !boot_cpu_has(X86_FEATURE_HLE))
1852 return 0;
1853 event->hw.config |= event->attr.config & (HSW_IN_TX|HSW_IN_TX_CHECKPOINTED);
1854
1855 /*
1856 * IN_TX/IN_TX-CP filters are not supported by the Haswell PMU with
1857 * PEBS or in ANY thread mode. Since the results are non-sensical forbid
1858 * this combination.
1859 */
1860 if ((event->hw.config & (HSW_IN_TX|HSW_IN_TX_CHECKPOINTED)) &&
1861 ((event->hw.config & ARCH_PERFMON_EVENTSEL_ANY) ||
1862 event->attr.precise_ip > 0))
1863 return -EOPNOTSUPP;
1864
1865 if (event_is_checkpointed(event)) {
1866 /*
1867 * Sampling of checkpointed events can cause situations where
1868 * the CPU constantly aborts because of a overflow, which is
1869 * then checkpointed back and ignored. Forbid checkpointing
1870 * for sampling.
1871 *
1872 * But still allow a long sampling period, so that perf stat
1873 * from KVM works.
1874 */
1875 if (event->attr.sample_period > 0 &&
1876 event->attr.sample_period < 0x7fffffff)
1877 return -EOPNOTSUPP;
1878 }
1879 return 0;
1880 }
1881
1882 static struct event_constraint counter2_constraint =
1883 EVENT_CONSTRAINT(0, 0x4, 0);
1884
1885 static struct event_constraint *
1886 hsw_get_event_constraints(struct cpu_hw_events *cpuc, struct perf_event *event)
1887 {
1888 struct event_constraint *c = intel_get_event_constraints(cpuc, event);
1889
1890 /* Handle special quirk on in_tx_checkpointed only in counter 2 */
1891 if (event->hw.config & HSW_IN_TX_CHECKPOINTED) {
1892 if (c->idxmsk64 & (1U << 2))
1893 return &counter2_constraint;
1894 return &emptyconstraint;
1895 }
1896
1897 return c;
1898 }
1899
1900 PMU_FORMAT_ATTR(event, "config:0-7" );
1901 PMU_FORMAT_ATTR(umask, "config:8-15" );
1902 PMU_FORMAT_ATTR(edge, "config:18" );
1903 PMU_FORMAT_ATTR(pc, "config:19" );
1904 PMU_FORMAT_ATTR(any, "config:21" ); /* v3 + */
1905 PMU_FORMAT_ATTR(inv, "config:23" );
1906 PMU_FORMAT_ATTR(cmask, "config:24-31" );
1907 PMU_FORMAT_ATTR(in_tx, "config:32");
1908 PMU_FORMAT_ATTR(in_tx_cp, "config:33");
1909
1910 static struct attribute *intel_arch_formats_attr[] = {
1911 &format_attr_event.attr,
1912 &format_attr_umask.attr,
1913 &format_attr_edge.attr,
1914 &format_attr_pc.attr,
1915 &format_attr_inv.attr,
1916 &format_attr_cmask.attr,
1917 NULL,
1918 };
1919
1920 ssize_t intel_event_sysfs_show(char *page, u64 config)
1921 {
1922 u64 event = (config & ARCH_PERFMON_EVENTSEL_EVENT);
1923
1924 return x86_event_sysfs_show(page, config, event);
1925 }
1926
1927 static __initconst const struct x86_pmu core_pmu = {
1928 .name = "core",
1929 .handle_irq = x86_pmu_handle_irq,
1930 .disable_all = x86_pmu_disable_all,
1931 .enable_all = core_pmu_enable_all,
1932 .enable = core_pmu_enable_event,
1933 .disable = x86_pmu_disable_event,
1934 .hw_config = x86_pmu_hw_config,
1935 .schedule_events = x86_schedule_events,
1936 .eventsel = MSR_ARCH_PERFMON_EVENTSEL0,
1937 .perfctr = MSR_ARCH_PERFMON_PERFCTR0,
1938 .event_map = intel_pmu_event_map,
1939 .max_events = ARRAY_SIZE(intel_perfmon_event_map),
1940 .apic = 1,
1941 /*
1942 * Intel PMCs cannot be accessed sanely above 32 bit width,
1943 * so we install an artificial 1<<31 period regardless of
1944 * the generic event period:
1945 */
1946 .max_period = (1ULL << 31) - 1,
1947 .get_event_constraints = intel_get_event_constraints,
1948 .put_event_constraints = intel_put_event_constraints,
1949 .event_constraints = intel_core_event_constraints,
1950 .guest_get_msrs = core_guest_get_msrs,
1951 .format_attrs = intel_arch_formats_attr,
1952 .events_sysfs_show = intel_event_sysfs_show,
1953 };
1954
1955 struct intel_shared_regs *allocate_shared_regs(int cpu)
1956 {
1957 struct intel_shared_regs *regs;
1958 int i;
1959
1960 regs = kzalloc_node(sizeof(struct intel_shared_regs),
1961 GFP_KERNEL, cpu_to_node(cpu));
1962 if (regs) {
1963 /*
1964 * initialize the locks to keep lockdep happy
1965 */
1966 for (i = 0; i < EXTRA_REG_MAX; i++)
1967 raw_spin_lock_init(&regs->regs[i].lock);
1968
1969 regs->core_id = -1;
1970 }
1971 return regs;
1972 }
1973
1974 static int intel_pmu_cpu_prepare(int cpu)
1975 {
1976 struct cpu_hw_events *cpuc = &per_cpu(cpu_hw_events, cpu);
1977
1978 if (!(x86_pmu.extra_regs || x86_pmu.lbr_sel_map))
1979 return NOTIFY_OK;
1980
1981 cpuc->shared_regs = allocate_shared_regs(cpu);
1982 if (!cpuc->shared_regs)
1983 return NOTIFY_BAD;
1984
1985 return NOTIFY_OK;
1986 }
1987
1988 static void intel_pmu_cpu_starting(int cpu)
1989 {
1990 struct cpu_hw_events *cpuc = &per_cpu(cpu_hw_events, cpu);
1991 int core_id = topology_core_id(cpu);
1992 int i;
1993
1994 init_debug_store_on_cpu(cpu);
1995 /*
1996 * Deal with CPUs that don't clear their LBRs on power-up.
1997 */
1998 intel_pmu_lbr_reset();
1999
2000 cpuc->lbr_sel = NULL;
2001
2002 if (!cpuc->shared_regs)
2003 return;
2004
2005 if (!(x86_pmu.er_flags & ERF_NO_HT_SHARING)) {
2006 for_each_cpu(i, topology_thread_cpumask(cpu)) {
2007 struct intel_shared_regs *pc;
2008
2009 pc = per_cpu(cpu_hw_events, i).shared_regs;
2010 if (pc && pc->core_id == core_id) {
2011 cpuc->kfree_on_online = cpuc->shared_regs;
2012 cpuc->shared_regs = pc;
2013 break;
2014 }
2015 }
2016 cpuc->shared_regs->core_id = core_id;
2017 cpuc->shared_regs->refcnt++;
2018 }
2019
2020 if (x86_pmu.lbr_sel_map)
2021 cpuc->lbr_sel = &cpuc->shared_regs->regs[EXTRA_REG_LBR];
2022 }
2023
2024 static void intel_pmu_cpu_dying(int cpu)
2025 {
2026 struct cpu_hw_events *cpuc = &per_cpu(cpu_hw_events, cpu);
2027 struct intel_shared_regs *pc;
2028
2029 pc = cpuc->shared_regs;
2030 if (pc) {
2031 if (pc->core_id == -1 || --pc->refcnt == 0)
2032 kfree(pc);
2033 cpuc->shared_regs = NULL;
2034 }
2035
2036 fini_debug_store_on_cpu(cpu);
2037 }
2038
2039 static void intel_pmu_flush_branch_stack(void)
2040 {
2041 /*
2042 * Intel LBR does not tag entries with the
2043 * PID of the current task, then we need to
2044 * flush it on ctxsw
2045 * For now, we simply reset it
2046 */
2047 if (x86_pmu.lbr_nr)
2048 intel_pmu_lbr_reset();
2049 }
2050
2051 PMU_FORMAT_ATTR(offcore_rsp, "config1:0-63");
2052
2053 PMU_FORMAT_ATTR(ldlat, "config1:0-15");
2054
2055 static struct attribute *intel_arch3_formats_attr[] = {
2056 &format_attr_event.attr,
2057 &format_attr_umask.attr,
2058 &format_attr_edge.attr,
2059 &format_attr_pc.attr,
2060 &format_attr_any.attr,
2061 &format_attr_inv.attr,
2062 &format_attr_cmask.attr,
2063 &format_attr_in_tx.attr,
2064 &format_attr_in_tx_cp.attr,
2065
2066 &format_attr_offcore_rsp.attr, /* XXX do NHM/WSM + SNB breakout */
2067 &format_attr_ldlat.attr, /* PEBS load latency */
2068 NULL,
2069 };
2070
2071 static __initconst const struct x86_pmu intel_pmu = {
2072 .name = "Intel",
2073 .handle_irq = intel_pmu_handle_irq,
2074 .disable_all = intel_pmu_disable_all,
2075 .enable_all = intel_pmu_enable_all,
2076 .enable = intel_pmu_enable_event,
2077 .disable = intel_pmu_disable_event,
2078 .hw_config = intel_pmu_hw_config,
2079 .schedule_events = x86_schedule_events,
2080 .eventsel = MSR_ARCH_PERFMON_EVENTSEL0,
2081 .perfctr = MSR_ARCH_PERFMON_PERFCTR0,
2082 .event_map = intel_pmu_event_map,
2083 .max_events = ARRAY_SIZE(intel_perfmon_event_map),
2084 .apic = 1,
2085 /*
2086 * Intel PMCs cannot be accessed sanely above 32 bit width,
2087 * so we install an artificial 1<<31 period regardless of
2088 * the generic event period:
2089 */
2090 .max_period = (1ULL << 31) - 1,
2091 .get_event_constraints = intel_get_event_constraints,
2092 .put_event_constraints = intel_put_event_constraints,
2093 .pebs_aliases = intel_pebs_aliases_core2,
2094
2095 .format_attrs = intel_arch3_formats_attr,
2096 .events_sysfs_show = intel_event_sysfs_show,
2097
2098 .cpu_prepare = intel_pmu_cpu_prepare,
2099 .cpu_starting = intel_pmu_cpu_starting,
2100 .cpu_dying = intel_pmu_cpu_dying,
2101 .guest_get_msrs = intel_guest_get_msrs,
2102 .flush_branch_stack = intel_pmu_flush_branch_stack,
2103 };
2104
2105 static __init void intel_clovertown_quirk(void)
2106 {
2107 /*
2108 * PEBS is unreliable due to:
2109 *
2110 * AJ67 - PEBS may experience CPL leaks
2111 * AJ68 - PEBS PMI may be delayed by one event
2112 * AJ69 - GLOBAL_STATUS[62] will only be set when DEBUGCTL[12]
2113 * AJ106 - FREEZE_LBRS_ON_PMI doesn't work in combination with PEBS
2114 *
2115 * AJ67 could be worked around by restricting the OS/USR flags.
2116 * AJ69 could be worked around by setting PMU_FREEZE_ON_PMI.
2117 *
2118 * AJ106 could possibly be worked around by not allowing LBR
2119 * usage from PEBS, including the fixup.
2120 * AJ68 could possibly be worked around by always programming
2121 * a pebs_event_reset[0] value and coping with the lost events.
2122 *
2123 * But taken together it might just make sense to not enable PEBS on
2124 * these chips.
2125 */
2126 pr_warn("PEBS disabled due to CPU errata\n");
2127 x86_pmu.pebs = 0;
2128 x86_pmu.pebs_constraints = NULL;
2129 }
2130
2131 static int intel_snb_pebs_broken(int cpu)
2132 {
2133 u32 rev = UINT_MAX; /* default to broken for unknown models */
2134
2135 switch (cpu_data(cpu).x86_model) {
2136 case 42: /* SNB */
2137 rev = 0x28;
2138 break;
2139
2140 case 45: /* SNB-EP */
2141 switch (cpu_data(cpu).x86_mask) {
2142 case 6: rev = 0x618; break;
2143 case 7: rev = 0x70c; break;
2144 }
2145 }
2146
2147 return (cpu_data(cpu).microcode < rev);
2148 }
2149
2150 static void intel_snb_check_microcode(void)
2151 {
2152 int pebs_broken = 0;
2153 int cpu;
2154
2155 get_online_cpus();
2156 for_each_online_cpu(cpu) {
2157 if ((pebs_broken = intel_snb_pebs_broken(cpu)))
2158 break;
2159 }
2160 put_online_cpus();
2161
2162 if (pebs_broken == x86_pmu.pebs_broken)
2163 return;
2164
2165 /*
2166 * Serialized by the microcode lock..
2167 */
2168 if (x86_pmu.pebs_broken) {
2169 pr_info("PEBS enabled due to microcode update\n");
2170 x86_pmu.pebs_broken = 0;
2171 } else {
2172 pr_info("PEBS disabled due to CPU errata, please upgrade microcode\n");
2173 x86_pmu.pebs_broken = 1;
2174 }
2175 }
2176
2177 static __init void intel_sandybridge_quirk(void)
2178 {
2179 x86_pmu.check_microcode = intel_snb_check_microcode;
2180 intel_snb_check_microcode();
2181 }
2182
2183 static const struct { int id; char *name; } intel_arch_events_map[] __initconst = {
2184 { PERF_COUNT_HW_CPU_CYCLES, "cpu cycles" },
2185 { PERF_COUNT_HW_INSTRUCTIONS, "instructions" },
2186 { PERF_COUNT_HW_BUS_CYCLES, "bus cycles" },
2187 { PERF_COUNT_HW_CACHE_REFERENCES, "cache references" },
2188 { PERF_COUNT_HW_CACHE_MISSES, "cache misses" },
2189 { PERF_COUNT_HW_BRANCH_INSTRUCTIONS, "branch instructions" },
2190 { PERF_COUNT_HW_BRANCH_MISSES, "branch misses" },
2191 };
2192
2193 static __init void intel_arch_events_quirk(void)
2194 {
2195 int bit;
2196
2197 /* disable event that reported as not presend by cpuid */
2198 for_each_set_bit(bit, x86_pmu.events_mask, ARRAY_SIZE(intel_arch_events_map)) {
2199 intel_perfmon_event_map[intel_arch_events_map[bit].id] = 0;
2200 pr_warn("CPUID marked event: \'%s\' unavailable\n",
2201 intel_arch_events_map[bit].name);
2202 }
2203 }
2204
2205 static __init void intel_nehalem_quirk(void)
2206 {
2207 union cpuid10_ebx ebx;
2208
2209 ebx.full = x86_pmu.events_maskl;
2210 if (ebx.split.no_branch_misses_retired) {
2211 /*
2212 * Erratum AAJ80 detected, we work it around by using
2213 * the BR_MISP_EXEC.ANY event. This will over-count
2214 * branch-misses, but it's still much better than the
2215 * architectural event which is often completely bogus:
2216 */
2217 intel_perfmon_event_map[PERF_COUNT_HW_BRANCH_MISSES] = 0x7f89;
2218 ebx.split.no_branch_misses_retired = 0;
2219 x86_pmu.events_maskl = ebx.full;
2220 pr_info("CPU erratum AAJ80 worked around\n");
2221 }
2222 }
2223
2224 EVENT_ATTR_STR(mem-loads, mem_ld_hsw, "event=0xcd,umask=0x1,ldlat=3");
2225 EVENT_ATTR_STR(mem-stores, mem_st_hsw, "event=0xd0,umask=0x82")
2226
2227 /* Haswell special events */
2228 EVENT_ATTR_STR(tx-start, tx_start, "event=0xc9,umask=0x1");
2229 EVENT_ATTR_STR(tx-commit, tx_commit, "event=0xc9,umask=0x2");
2230 EVENT_ATTR_STR(tx-abort, tx_abort, "event=0xc9,umask=0x4");
2231 EVENT_ATTR_STR(tx-capacity, tx_capacity, "event=0x54,umask=0x2");
2232 EVENT_ATTR_STR(tx-conflict, tx_conflict, "event=0x54,umask=0x1");
2233 EVENT_ATTR_STR(el-start, el_start, "event=0xc8,umask=0x1");
2234 EVENT_ATTR_STR(el-commit, el_commit, "event=0xc8,umask=0x2");
2235 EVENT_ATTR_STR(el-abort, el_abort, "event=0xc8,umask=0x4");
2236 EVENT_ATTR_STR(el-capacity, el_capacity, "event=0x54,umask=0x2");
2237 EVENT_ATTR_STR(el-conflict, el_conflict, "event=0x54,umask=0x1");
2238 EVENT_ATTR_STR(cycles-t, cycles_t, "event=0x3c,in_tx=1");
2239 EVENT_ATTR_STR(cycles-ct, cycles_ct, "event=0x3c,in_tx=1,in_tx_cp=1");
2240
2241 static struct attribute *hsw_events_attrs[] = {
2242 EVENT_PTR(tx_start),
2243 EVENT_PTR(tx_commit),
2244 EVENT_PTR(tx_abort),
2245 EVENT_PTR(tx_capacity),
2246 EVENT_PTR(tx_conflict),
2247 EVENT_PTR(el_start),
2248 EVENT_PTR(el_commit),
2249 EVENT_PTR(el_abort),
2250 EVENT_PTR(el_capacity),
2251 EVENT_PTR(el_conflict),
2252 EVENT_PTR(cycles_t),
2253 EVENT_PTR(cycles_ct),
2254 EVENT_PTR(mem_ld_hsw),
2255 EVENT_PTR(mem_st_hsw),
2256 NULL
2257 };
2258
2259 __init int intel_pmu_init(void)
2260 {
2261 union cpuid10_edx edx;
2262 union cpuid10_eax eax;
2263 union cpuid10_ebx ebx;
2264 struct event_constraint *c;
2265 unsigned int unused;
2266 int version;
2267
2268 if (!cpu_has(&boot_cpu_data, X86_FEATURE_ARCH_PERFMON)) {
2269 switch (boot_cpu_data.x86) {
2270 case 0x6:
2271 return p6_pmu_init();
2272 case 0xb:
2273 return knc_pmu_init();
2274 case 0xf:
2275 return p4_pmu_init();
2276 }
2277 return -ENODEV;
2278 }
2279
2280 /*
2281 * Check whether the Architectural PerfMon supports
2282 * Branch Misses Retired hw_event or not.
2283 */
2284 cpuid(10, &eax.full, &ebx.full, &unused, &edx.full);
2285 if (eax.split.mask_length < ARCH_PERFMON_EVENTS_COUNT)
2286 return -ENODEV;
2287
2288 version = eax.split.version_id;
2289 if (version < 2)
2290 x86_pmu = core_pmu;
2291 else
2292 x86_pmu = intel_pmu;
2293
2294 x86_pmu.version = version;
2295 x86_pmu.num_counters = eax.split.num_counters;
2296 x86_pmu.cntval_bits = eax.split.bit_width;
2297 x86_pmu.cntval_mask = (1ULL << eax.split.bit_width) - 1;
2298
2299 x86_pmu.events_maskl = ebx.full;
2300 x86_pmu.events_mask_len = eax.split.mask_length;
2301
2302 x86_pmu.max_pebs_events = min_t(unsigned, MAX_PEBS_EVENTS, x86_pmu.num_counters);
2303
2304 /*
2305 * Quirk: v2 perfmon does not report fixed-purpose events, so
2306 * assume at least 3 events:
2307 */
2308 if (version > 1)
2309 x86_pmu.num_counters_fixed = max((int)edx.split.num_counters_fixed, 3);
2310
2311 if (boot_cpu_has(X86_FEATURE_PDCM)) {
2312 u64 capabilities;
2313
2314 rdmsrl(MSR_IA32_PERF_CAPABILITIES, capabilities);
2315 x86_pmu.intel_cap.capabilities = capabilities;
2316 }
2317
2318 intel_ds_init();
2319
2320 x86_add_quirk(intel_arch_events_quirk); /* Install first, so it runs last */
2321
2322 /*
2323 * Install the hw-cache-events table:
2324 */
2325 switch (boot_cpu_data.x86_model) {
2326 case 14: /* 65 nm core solo/duo, "Yonah" */
2327 pr_cont("Core events, ");
2328 break;
2329
2330 case 15: /* original 65 nm celeron/pentium/core2/xeon, "Merom"/"Conroe" */
2331 x86_add_quirk(intel_clovertown_quirk);
2332 case 22: /* single-core 65 nm celeron/core2solo "Merom-L"/"Conroe-L" */
2333 case 23: /* current 45 nm celeron/core2/xeon "Penryn"/"Wolfdale" */
2334 case 29: /* six-core 45 nm xeon "Dunnington" */
2335 memcpy(hw_cache_event_ids, core2_hw_cache_event_ids,
2336 sizeof(hw_cache_event_ids));
2337
2338 intel_pmu_lbr_init_core();
2339
2340 x86_pmu.event_constraints = intel_core2_event_constraints;
2341 x86_pmu.pebs_constraints = intel_core2_pebs_event_constraints;
2342 pr_cont("Core2 events, ");
2343 break;
2344
2345 case 26: /* 45 nm nehalem, "Bloomfield" */
2346 case 30: /* 45 nm nehalem, "Lynnfield" */
2347 case 46: /* 45 nm nehalem-ex, "Beckton" */
2348 memcpy(hw_cache_event_ids, nehalem_hw_cache_event_ids,
2349 sizeof(hw_cache_event_ids));
2350 memcpy(hw_cache_extra_regs, nehalem_hw_cache_extra_regs,
2351 sizeof(hw_cache_extra_regs));
2352
2353 intel_pmu_lbr_init_nhm();
2354
2355 x86_pmu.event_constraints = intel_nehalem_event_constraints;
2356 x86_pmu.pebs_constraints = intel_nehalem_pebs_event_constraints;
2357 x86_pmu.enable_all = intel_pmu_nhm_enable_all;
2358 x86_pmu.extra_regs = intel_nehalem_extra_regs;
2359
2360 x86_pmu.cpu_events = nhm_events_attrs;
2361
2362 /* UOPS_ISSUED.STALLED_CYCLES */
2363 intel_perfmon_event_map[PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] =
2364 X86_CONFIG(.event=0x0e, .umask=0x01, .inv=1, .cmask=1);
2365 /* UOPS_EXECUTED.CORE_ACTIVE_CYCLES,c=1,i=1 */
2366 intel_perfmon_event_map[PERF_COUNT_HW_STALLED_CYCLES_BACKEND] =
2367 X86_CONFIG(.event=0xb1, .umask=0x3f, .inv=1, .cmask=1);
2368
2369 x86_add_quirk(intel_nehalem_quirk);
2370
2371 pr_cont("Nehalem events, ");
2372 break;
2373
2374 case 28: /* Atom */
2375 case 38: /* Lincroft */
2376 case 39: /* Penwell */
2377 case 53: /* Cloverview */
2378 case 54: /* Cedarview */
2379 memcpy(hw_cache_event_ids, atom_hw_cache_event_ids,
2380 sizeof(hw_cache_event_ids));
2381
2382 intel_pmu_lbr_init_atom();
2383
2384 x86_pmu.event_constraints = intel_gen_event_constraints;
2385 x86_pmu.pebs_constraints = intel_atom_pebs_event_constraints;
2386 pr_cont("Atom events, ");
2387 break;
2388
2389 case 55: /* Atom 22nm "Silvermont" */
2390 case 77: /* Avoton "Silvermont" */
2391 memcpy(hw_cache_event_ids, slm_hw_cache_event_ids,
2392 sizeof(hw_cache_event_ids));
2393 memcpy(hw_cache_extra_regs, slm_hw_cache_extra_regs,
2394 sizeof(hw_cache_extra_regs));
2395
2396 intel_pmu_lbr_init_atom();
2397
2398 x86_pmu.event_constraints = intel_slm_event_constraints;
2399 x86_pmu.pebs_constraints = intel_slm_pebs_event_constraints;
2400 x86_pmu.extra_regs = intel_slm_extra_regs;
2401 x86_pmu.er_flags |= ERF_HAS_RSP_1;
2402 pr_cont("Silvermont events, ");
2403 break;
2404
2405 case 37: /* 32 nm nehalem, "Clarkdale" */
2406 case 44: /* 32 nm nehalem, "Gulftown" */
2407 case 47: /* 32 nm Xeon E7 */
2408 memcpy(hw_cache_event_ids, westmere_hw_cache_event_ids,
2409 sizeof(hw_cache_event_ids));
2410 memcpy(hw_cache_extra_regs, nehalem_hw_cache_extra_regs,
2411 sizeof(hw_cache_extra_regs));
2412
2413 intel_pmu_lbr_init_nhm();
2414
2415 x86_pmu.event_constraints = intel_westmere_event_constraints;
2416 x86_pmu.enable_all = intel_pmu_nhm_enable_all;
2417 x86_pmu.pebs_constraints = intel_westmere_pebs_event_constraints;
2418 x86_pmu.extra_regs = intel_westmere_extra_regs;
2419 x86_pmu.er_flags |= ERF_HAS_RSP_1;
2420
2421 x86_pmu.cpu_events = nhm_events_attrs;
2422
2423 /* UOPS_ISSUED.STALLED_CYCLES */
2424 intel_perfmon_event_map[PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] =
2425 X86_CONFIG(.event=0x0e, .umask=0x01, .inv=1, .cmask=1);
2426 /* UOPS_EXECUTED.CORE_ACTIVE_CYCLES,c=1,i=1 */
2427 intel_perfmon_event_map[PERF_COUNT_HW_STALLED_CYCLES_BACKEND] =
2428 X86_CONFIG(.event=0xb1, .umask=0x3f, .inv=1, .cmask=1);
2429
2430 pr_cont("Westmere events, ");
2431 break;
2432
2433 case 42: /* SandyBridge */
2434 case 45: /* SandyBridge, "Romely-EP" */
2435 x86_add_quirk(intel_sandybridge_quirk);
2436 memcpy(hw_cache_event_ids, snb_hw_cache_event_ids,
2437 sizeof(hw_cache_event_ids));
2438 memcpy(hw_cache_extra_regs, snb_hw_cache_extra_regs,
2439 sizeof(hw_cache_extra_regs));
2440
2441 intel_pmu_lbr_init_snb();
2442
2443 x86_pmu.event_constraints = intel_snb_event_constraints;
2444 x86_pmu.pebs_constraints = intel_snb_pebs_event_constraints;
2445 x86_pmu.pebs_aliases = intel_pebs_aliases_snb;
2446 if (boot_cpu_data.x86_model == 45)
2447 x86_pmu.extra_regs = intel_snbep_extra_regs;
2448 else
2449 x86_pmu.extra_regs = intel_snb_extra_regs;
2450 /* all extra regs are per-cpu when HT is on */
2451 x86_pmu.er_flags |= ERF_HAS_RSP_1;
2452 x86_pmu.er_flags |= ERF_NO_HT_SHARING;
2453
2454 x86_pmu.cpu_events = snb_events_attrs;
2455
2456 /* UOPS_ISSUED.ANY,c=1,i=1 to count stall cycles */
2457 intel_perfmon_event_map[PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] =
2458 X86_CONFIG(.event=0x0e, .umask=0x01, .inv=1, .cmask=1);
2459 /* UOPS_DISPATCHED.THREAD,c=1,i=1 to count stall cycles*/
2460 intel_perfmon_event_map[PERF_COUNT_HW_STALLED_CYCLES_BACKEND] =
2461 X86_CONFIG(.event=0xb1, .umask=0x01, .inv=1, .cmask=1);
2462
2463 pr_cont("SandyBridge events, ");
2464 break;
2465 case 58: /* IvyBridge */
2466 case 62: /* IvyBridge EP */
2467 memcpy(hw_cache_event_ids, snb_hw_cache_event_ids,
2468 sizeof(hw_cache_event_ids));
2469 memcpy(hw_cache_extra_regs, snb_hw_cache_extra_regs,
2470 sizeof(hw_cache_extra_regs));
2471
2472 intel_pmu_lbr_init_snb();
2473
2474 x86_pmu.event_constraints = intel_ivb_event_constraints;
2475 x86_pmu.pebs_constraints = intel_ivb_pebs_event_constraints;
2476 x86_pmu.pebs_aliases = intel_pebs_aliases_snb;
2477 if (boot_cpu_data.x86_model == 62)
2478 x86_pmu.extra_regs = intel_snbep_extra_regs;
2479 else
2480 x86_pmu.extra_regs = intel_snb_extra_regs;
2481 /* all extra regs are per-cpu when HT is on */
2482 x86_pmu.er_flags |= ERF_HAS_RSP_1;
2483 x86_pmu.er_flags |= ERF_NO_HT_SHARING;
2484
2485 x86_pmu.cpu_events = snb_events_attrs;
2486
2487 /* UOPS_ISSUED.ANY,c=1,i=1 to count stall cycles */
2488 intel_perfmon_event_map[PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] =
2489 X86_CONFIG(.event=0x0e, .umask=0x01, .inv=1, .cmask=1);
2490
2491 pr_cont("IvyBridge events, ");
2492 break;
2493
2494
2495 case 60: /* Haswell Client */
2496 case 70:
2497 case 71:
2498 case 63:
2499 case 69:
2500 x86_pmu.late_ack = true;
2501 memcpy(hw_cache_event_ids, snb_hw_cache_event_ids, sizeof(hw_cache_event_ids));
2502 memcpy(hw_cache_extra_regs, snb_hw_cache_extra_regs, sizeof(hw_cache_extra_regs));
2503
2504 intel_pmu_lbr_init_snb();
2505
2506 x86_pmu.event_constraints = intel_hsw_event_constraints;
2507 x86_pmu.pebs_constraints = intel_hsw_pebs_event_constraints;
2508 x86_pmu.extra_regs = intel_snb_extra_regs;
2509 x86_pmu.pebs_aliases = intel_pebs_aliases_snb;
2510 /* all extra regs are per-cpu when HT is on */
2511 x86_pmu.er_flags |= ERF_HAS_RSP_1;
2512 x86_pmu.er_flags |= ERF_NO_HT_SHARING;
2513
2514 x86_pmu.hw_config = hsw_hw_config;
2515 x86_pmu.get_event_constraints = hsw_get_event_constraints;
2516 x86_pmu.cpu_events = hsw_events_attrs;
2517 x86_pmu.lbr_double_abort = true;
2518 pr_cont("Haswell events, ");
2519 break;
2520
2521 default:
2522 switch (x86_pmu.version) {
2523 case 1:
2524 x86_pmu.event_constraints = intel_v1_event_constraints;
2525 pr_cont("generic architected perfmon v1, ");
2526 break;
2527 default:
2528 /*
2529 * default constraints for v2 and up
2530 */
2531 x86_pmu.event_constraints = intel_gen_event_constraints;
2532 pr_cont("generic architected perfmon, ");
2533 break;
2534 }
2535 }
2536
2537 if (x86_pmu.num_counters > INTEL_PMC_MAX_GENERIC) {
2538 WARN(1, KERN_ERR "hw perf events %d > max(%d), clipping!",
2539 x86_pmu.num_counters, INTEL_PMC_MAX_GENERIC);
2540 x86_pmu.num_counters = INTEL_PMC_MAX_GENERIC;
2541 }
2542 x86_pmu.intel_ctrl = (1 << x86_pmu.num_counters) - 1;
2543
2544 if (x86_pmu.num_counters_fixed > INTEL_PMC_MAX_FIXED) {
2545 WARN(1, KERN_ERR "hw perf events fixed %d > max(%d), clipping!",
2546 x86_pmu.num_counters_fixed, INTEL_PMC_MAX_FIXED);
2547 x86_pmu.num_counters_fixed = INTEL_PMC_MAX_FIXED;
2548 }
2549
2550 x86_pmu.intel_ctrl |=
2551 ((1LL << x86_pmu.num_counters_fixed)-1) << INTEL_PMC_IDX_FIXED;
2552
2553 if (x86_pmu.event_constraints) {
2554 /*
2555 * event on fixed counter2 (REF_CYCLES) only works on this
2556 * counter, so do not extend mask to generic counters
2557 */
2558 for_each_event_constraint(c, x86_pmu.event_constraints) {
2559 if (c->cmask != FIXED_EVENT_FLAGS
2560 || c->idxmsk64 == INTEL_PMC_MSK_FIXED_REF_CYCLES) {
2561 continue;
2562 }
2563
2564 c->idxmsk64 |= (1ULL << x86_pmu.num_counters) - 1;
2565 c->weight += x86_pmu.num_counters;
2566 }
2567 }
2568
2569 /* Support full width counters using alternative MSR range */
2570 if (x86_pmu.intel_cap.full_width_write) {
2571 x86_pmu.max_period = x86_pmu.cntval_mask;
2572 x86_pmu.perfctr = MSR_IA32_PMC0;
2573 pr_cont("full-width counters, ");
2574 }
2575
2576 return 0;
2577 }