]>
Commit | Line | Data |
---|---|---|
f22f54f4 PZ |
1 | #ifdef CONFIG_CPU_SUP_INTEL |
2 | ||
a7e3ed1e AK |
3 | #define MAX_EXTRA_REGS 2 |
4 | ||
5 | /* | |
6 | * Per register state. | |
7 | */ | |
8 | struct er_account { | |
9 | int ref; /* reference count */ | |
10 | unsigned int extra_reg; /* extra MSR number */ | |
11 | u64 extra_config; /* extra MSR config */ | |
12 | }; | |
13 | ||
14 | /* | |
15 | * Per core state | |
16 | * This used to coordinate shared registers for HT threads. | |
17 | */ | |
18 | struct intel_percore { | |
19 | raw_spinlock_t lock; /* protect structure */ | |
20 | struct er_account regs[MAX_EXTRA_REGS]; | |
21 | int refcnt; /* number of threads */ | |
22 | unsigned core_id; | |
23 | }; | |
24 | ||
f22f54f4 | 25 | /* |
b622d644 | 26 | * Intel PerfMon, used on Core and later. |
f22f54f4 | 27 | */ |
ec75a716 | 28 | static u64 intel_perfmon_event_map[PERF_COUNT_HW_MAX] __read_mostly = |
f22f54f4 PZ |
29 | { |
30 | [PERF_COUNT_HW_CPU_CYCLES] = 0x003c, | |
31 | [PERF_COUNT_HW_INSTRUCTIONS] = 0x00c0, | |
32 | [PERF_COUNT_HW_CACHE_REFERENCES] = 0x4f2e, | |
33 | [PERF_COUNT_HW_CACHE_MISSES] = 0x412e, | |
34 | [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = 0x00c4, | |
35 | [PERF_COUNT_HW_BRANCH_MISSES] = 0x00c5, | |
36 | [PERF_COUNT_HW_BUS_CYCLES] = 0x013c, | |
37 | }; | |
38 | ||
39 | static struct event_constraint intel_core_event_constraints[] = | |
40 | { | |
41 | INTEL_EVENT_CONSTRAINT(0x11, 0x2), /* FP_ASSIST */ | |
42 | INTEL_EVENT_CONSTRAINT(0x12, 0x2), /* MUL */ | |
43 | INTEL_EVENT_CONSTRAINT(0x13, 0x2), /* DIV */ | |
44 | INTEL_EVENT_CONSTRAINT(0x14, 0x1), /* CYCLES_DIV_BUSY */ | |
45 | INTEL_EVENT_CONSTRAINT(0x19, 0x2), /* DELAYED_BYPASS */ | |
46 | INTEL_EVENT_CONSTRAINT(0xc1, 0x1), /* FP_COMP_INSTR_RET */ | |
47 | EVENT_CONSTRAINT_END | |
48 | }; | |
49 | ||
50 | static struct event_constraint intel_core2_event_constraints[] = | |
51 | { | |
b622d644 PZ |
52 | FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */ |
53 | FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */ | |
54 | /* | |
55 | * Core2 has Fixed Counter 2 listed as CPU_CLK_UNHALTED.REF and event | |
56 | * 0x013c as CPU_CLK_UNHALTED.BUS and specifies there is a fixed | |
57 | * ratio between these counters. | |
58 | */ | |
59 | /* FIXED_EVENT_CONSTRAINT(0x013c, 2), CPU_CLK_UNHALTED.REF */ | |
f22f54f4 PZ |
60 | INTEL_EVENT_CONSTRAINT(0x10, 0x1), /* FP_COMP_OPS_EXE */ |
61 | INTEL_EVENT_CONSTRAINT(0x11, 0x2), /* FP_ASSIST */ | |
62 | INTEL_EVENT_CONSTRAINT(0x12, 0x2), /* MUL */ | |
63 | INTEL_EVENT_CONSTRAINT(0x13, 0x2), /* DIV */ | |
64 | INTEL_EVENT_CONSTRAINT(0x14, 0x1), /* CYCLES_DIV_BUSY */ | |
65 | INTEL_EVENT_CONSTRAINT(0x18, 0x1), /* IDLE_DURING_DIV */ | |
66 | INTEL_EVENT_CONSTRAINT(0x19, 0x2), /* DELAYED_BYPASS */ | |
67 | INTEL_EVENT_CONSTRAINT(0xa1, 0x1), /* RS_UOPS_DISPATCH_CYCLES */ | |
b622d644 | 68 | INTEL_EVENT_CONSTRAINT(0xc9, 0x1), /* ITLB_MISS_RETIRED (T30-9) */ |
f22f54f4 PZ |
69 | INTEL_EVENT_CONSTRAINT(0xcb, 0x1), /* MEM_LOAD_RETIRED */ |
70 | EVENT_CONSTRAINT_END | |
71 | }; | |
72 | ||
73 | static struct event_constraint intel_nehalem_event_constraints[] = | |
74 | { | |
b622d644 PZ |
75 | FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */ |
76 | FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */ | |
77 | /* FIXED_EVENT_CONSTRAINT(0x013c, 2), CPU_CLK_UNHALTED.REF */ | |
f22f54f4 PZ |
78 | INTEL_EVENT_CONSTRAINT(0x40, 0x3), /* L1D_CACHE_LD */ |
79 | INTEL_EVENT_CONSTRAINT(0x41, 0x3), /* L1D_CACHE_ST */ | |
80 | INTEL_EVENT_CONSTRAINT(0x42, 0x3), /* L1D_CACHE_LOCK */ | |
81 | INTEL_EVENT_CONSTRAINT(0x43, 0x3), /* L1D_ALL_REF */ | |
82 | INTEL_EVENT_CONSTRAINT(0x48, 0x3), /* L1D_PEND_MISS */ | |
83 | INTEL_EVENT_CONSTRAINT(0x4e, 0x3), /* L1D_PREFETCH */ | |
84 | INTEL_EVENT_CONSTRAINT(0x51, 0x3), /* L1D */ | |
85 | INTEL_EVENT_CONSTRAINT(0x63, 0x3), /* CACHE_LOCK_CYCLES */ | |
86 | EVENT_CONSTRAINT_END | |
87 | }; | |
88 | ||
a7e3ed1e AK |
89 | static struct extra_reg intel_nehalem_extra_regs[] = |
90 | { | |
91 | INTEL_EVENT_EXTRA_REG(0xb7, MSR_OFFCORE_RSP_0, 0xffff), | |
92 | EVENT_EXTRA_END | |
93 | }; | |
94 | ||
95 | static struct event_constraint intel_nehalem_percore_constraints[] = | |
96 | { | |
97 | INTEL_EVENT_CONSTRAINT(0xb7, 0), | |
98 | EVENT_CONSTRAINT_END | |
99 | }; | |
100 | ||
f22f54f4 PZ |
101 | static struct event_constraint intel_westmere_event_constraints[] = |
102 | { | |
b622d644 PZ |
103 | FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */ |
104 | FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */ | |
105 | /* FIXED_EVENT_CONSTRAINT(0x013c, 2), CPU_CLK_UNHALTED.REF */ | |
f22f54f4 PZ |
106 | INTEL_EVENT_CONSTRAINT(0x51, 0x3), /* L1D */ |
107 | INTEL_EVENT_CONSTRAINT(0x60, 0x1), /* OFFCORE_REQUESTS_OUTSTANDING */ | |
108 | INTEL_EVENT_CONSTRAINT(0x63, 0x3), /* CACHE_LOCK_CYCLES */ | |
d1100770 | 109 | INTEL_EVENT_CONSTRAINT(0xb3, 0x1), /* SNOOPQ_REQUEST_OUTSTANDING */ |
f22f54f4 PZ |
110 | EVENT_CONSTRAINT_END |
111 | }; | |
112 | ||
b06b3d49 LM |
113 | static struct event_constraint intel_snb_event_constraints[] = |
114 | { | |
115 | FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */ | |
116 | FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */ | |
117 | /* FIXED_EVENT_CONSTRAINT(0x013c, 2), CPU_CLK_UNHALTED.REF */ | |
118 | INTEL_EVENT_CONSTRAINT(0x48, 0x4), /* L1D_PEND_MISS.PENDING */ | |
119 | INTEL_EVENT_CONSTRAINT(0xb7, 0x1), /* OFF_CORE_RESPONSE_0 */ | |
120 | INTEL_EVENT_CONSTRAINT(0xbb, 0x8), /* OFF_CORE_RESPONSE_1 */ | |
121 | INTEL_UEVENT_CONSTRAINT(0x01c0, 0x2), /* INST_RETIRED.PREC_DIST */ | |
122 | INTEL_EVENT_CONSTRAINT(0xcd, 0x8), /* MEM_TRANS_RETIRED.LOAD_LATENCY */ | |
123 | EVENT_CONSTRAINT_END | |
124 | }; | |
125 | ||
a7e3ed1e AK |
126 | static struct extra_reg intel_westmere_extra_regs[] = |
127 | { | |
128 | INTEL_EVENT_EXTRA_REG(0xb7, MSR_OFFCORE_RSP_0, 0xffff), | |
129 | INTEL_EVENT_EXTRA_REG(0xbb, MSR_OFFCORE_RSP_1, 0xffff), | |
130 | EVENT_EXTRA_END | |
131 | }; | |
132 | ||
133 | static struct event_constraint intel_westmere_percore_constraints[] = | |
134 | { | |
135 | INTEL_EVENT_CONSTRAINT(0xb7, 0), | |
136 | INTEL_EVENT_CONSTRAINT(0xbb, 0), | |
137 | EVENT_CONSTRAINT_END | |
138 | }; | |
139 | ||
f22f54f4 PZ |
140 | static struct event_constraint intel_gen_event_constraints[] = |
141 | { | |
b622d644 PZ |
142 | FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */ |
143 | FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */ | |
144 | /* FIXED_EVENT_CONSTRAINT(0x013c, 2), CPU_CLK_UNHALTED.REF */ | |
f22f54f4 PZ |
145 | EVENT_CONSTRAINT_END |
146 | }; | |
147 | ||
148 | static u64 intel_pmu_event_map(int hw_event) | |
149 | { | |
150 | return intel_perfmon_event_map[hw_event]; | |
151 | } | |
152 | ||
b06b3d49 LM |
153 | static __initconst const u64 snb_hw_cache_event_ids |
154 | [PERF_COUNT_HW_CACHE_MAX] | |
155 | [PERF_COUNT_HW_CACHE_OP_MAX] | |
156 | [PERF_COUNT_HW_CACHE_RESULT_MAX] = | |
157 | { | |
158 | [ C(L1D) ] = { | |
159 | [ C(OP_READ) ] = { | |
160 | [ C(RESULT_ACCESS) ] = 0xf1d0, /* MEM_UOP_RETIRED.LOADS */ | |
161 | [ C(RESULT_MISS) ] = 0x0151, /* L1D.REPLACEMENT */ | |
162 | }, | |
163 | [ C(OP_WRITE) ] = { | |
164 | [ C(RESULT_ACCESS) ] = 0xf2d0, /* MEM_UOP_RETIRED.STORES */ | |
165 | [ C(RESULT_MISS) ] = 0x0851, /* L1D.ALL_M_REPLACEMENT */ | |
166 | }, | |
167 | [ C(OP_PREFETCH) ] = { | |
168 | [ C(RESULT_ACCESS) ] = 0x0, | |
169 | [ C(RESULT_MISS) ] = 0x024e, /* HW_PRE_REQ.DL1_MISS */ | |
170 | }, | |
171 | }, | |
172 | [ C(L1I ) ] = { | |
173 | [ C(OP_READ) ] = { | |
174 | [ C(RESULT_ACCESS) ] = 0x0, | |
175 | [ C(RESULT_MISS) ] = 0x0280, /* ICACHE.MISSES */ | |
176 | }, | |
177 | [ C(OP_WRITE) ] = { | |
178 | [ C(RESULT_ACCESS) ] = -1, | |
179 | [ C(RESULT_MISS) ] = -1, | |
180 | }, | |
181 | [ C(OP_PREFETCH) ] = { | |
182 | [ C(RESULT_ACCESS) ] = 0x0, | |
183 | [ C(RESULT_MISS) ] = 0x0, | |
184 | }, | |
185 | }, | |
186 | [ C(LL ) ] = { | |
187 | /* | |
188 | * TBD: Need Off-core Response Performance Monitoring support | |
189 | */ | |
190 | [ C(OP_READ) ] = { | |
191 | /* OFFCORE_RESPONSE_0.ANY_DATA.LOCAL_CACHE */ | |
192 | [ C(RESULT_ACCESS) ] = 0x01b7, | |
193 | /* OFFCORE_RESPONSE_1.ANY_DATA.ANY_LLC_MISS */ | |
194 | [ C(RESULT_MISS) ] = 0x01bb, | |
195 | }, | |
196 | [ C(OP_WRITE) ] = { | |
197 | /* OFFCORE_RESPONSE_0.ANY_RFO.LOCAL_CACHE */ | |
198 | [ C(RESULT_ACCESS) ] = 0x01b7, | |
199 | /* OFFCORE_RESPONSE_1.ANY_RFO.ANY_LLC_MISS */ | |
200 | [ C(RESULT_MISS) ] = 0x01bb, | |
201 | }, | |
202 | [ C(OP_PREFETCH) ] = { | |
203 | /* OFFCORE_RESPONSE_0.PREFETCH.LOCAL_CACHE */ | |
204 | [ C(RESULT_ACCESS) ] = 0x01b7, | |
205 | /* OFFCORE_RESPONSE_1.PREFETCH.ANY_LLC_MISS */ | |
206 | [ C(RESULT_MISS) ] = 0x01bb, | |
207 | }, | |
208 | }, | |
209 | [ C(DTLB) ] = { | |
210 | [ C(OP_READ) ] = { | |
211 | [ C(RESULT_ACCESS) ] = 0x81d0, /* MEM_UOP_RETIRED.ALL_LOADS */ | |
212 | [ C(RESULT_MISS) ] = 0x0108, /* DTLB_LOAD_MISSES.CAUSES_A_WALK */ | |
213 | }, | |
214 | [ C(OP_WRITE) ] = { | |
215 | [ C(RESULT_ACCESS) ] = 0x82d0, /* MEM_UOP_RETIRED.ALL_STORES */ | |
216 | [ C(RESULT_MISS) ] = 0x0149, /* DTLB_STORE_MISSES.MISS_CAUSES_A_WALK */ | |
217 | }, | |
218 | [ C(OP_PREFETCH) ] = { | |
219 | [ C(RESULT_ACCESS) ] = 0x0, | |
220 | [ C(RESULT_MISS) ] = 0x0, | |
221 | }, | |
222 | }, | |
223 | [ C(ITLB) ] = { | |
224 | [ C(OP_READ) ] = { | |
225 | [ C(RESULT_ACCESS) ] = 0x1085, /* ITLB_MISSES.STLB_HIT */ | |
226 | [ C(RESULT_MISS) ] = 0x0185, /* ITLB_MISSES.CAUSES_A_WALK */ | |
227 | }, | |
228 | [ C(OP_WRITE) ] = { | |
229 | [ C(RESULT_ACCESS) ] = -1, | |
230 | [ C(RESULT_MISS) ] = -1, | |
231 | }, | |
232 | [ C(OP_PREFETCH) ] = { | |
233 | [ C(RESULT_ACCESS) ] = -1, | |
234 | [ C(RESULT_MISS) ] = -1, | |
235 | }, | |
236 | }, | |
237 | [ C(BPU ) ] = { | |
238 | [ C(OP_READ) ] = { | |
239 | [ C(RESULT_ACCESS) ] = 0x00c4, /* BR_INST_RETIRED.ALL_BRANCHES */ | |
240 | [ C(RESULT_MISS) ] = 0x00c5, /* BR_MISP_RETIRED.ALL_BRANCHES */ | |
241 | }, | |
242 | [ C(OP_WRITE) ] = { | |
243 | [ C(RESULT_ACCESS) ] = -1, | |
244 | [ C(RESULT_MISS) ] = -1, | |
245 | }, | |
246 | [ C(OP_PREFETCH) ] = { | |
247 | [ C(RESULT_ACCESS) ] = -1, | |
248 | [ C(RESULT_MISS) ] = -1, | |
249 | }, | |
250 | }, | |
251 | }; | |
252 | ||
caaa8be3 | 253 | static __initconst const u64 westmere_hw_cache_event_ids |
f22f54f4 PZ |
254 | [PERF_COUNT_HW_CACHE_MAX] |
255 | [PERF_COUNT_HW_CACHE_OP_MAX] | |
256 | [PERF_COUNT_HW_CACHE_RESULT_MAX] = | |
257 | { | |
258 | [ C(L1D) ] = { | |
259 | [ C(OP_READ) ] = { | |
260 | [ C(RESULT_ACCESS) ] = 0x010b, /* MEM_INST_RETIRED.LOADS */ | |
261 | [ C(RESULT_MISS) ] = 0x0151, /* L1D.REPL */ | |
262 | }, | |
263 | [ C(OP_WRITE) ] = { | |
264 | [ C(RESULT_ACCESS) ] = 0x020b, /* MEM_INST_RETURED.STORES */ | |
265 | [ C(RESULT_MISS) ] = 0x0251, /* L1D.M_REPL */ | |
266 | }, | |
267 | [ C(OP_PREFETCH) ] = { | |
268 | [ C(RESULT_ACCESS) ] = 0x014e, /* L1D_PREFETCH.REQUESTS */ | |
269 | [ C(RESULT_MISS) ] = 0x024e, /* L1D_PREFETCH.MISS */ | |
270 | }, | |
271 | }, | |
272 | [ C(L1I ) ] = { | |
273 | [ C(OP_READ) ] = { | |
274 | [ C(RESULT_ACCESS) ] = 0x0380, /* L1I.READS */ | |
275 | [ C(RESULT_MISS) ] = 0x0280, /* L1I.MISSES */ | |
276 | }, | |
277 | [ C(OP_WRITE) ] = { | |
278 | [ C(RESULT_ACCESS) ] = -1, | |
279 | [ C(RESULT_MISS) ] = -1, | |
280 | }, | |
281 | [ C(OP_PREFETCH) ] = { | |
282 | [ C(RESULT_ACCESS) ] = 0x0, | |
283 | [ C(RESULT_MISS) ] = 0x0, | |
284 | }, | |
285 | }, | |
286 | [ C(LL ) ] = { | |
287 | [ C(OP_READ) ] = { | |
e994d7d2 AK |
288 | /* OFFCORE_RESPONSE_0.ANY_DATA.LOCAL_CACHE */ |
289 | [ C(RESULT_ACCESS) ] = 0x01b7, | |
290 | /* OFFCORE_RESPONSE_1.ANY_DATA.ANY_LLC_MISS */ | |
291 | [ C(RESULT_MISS) ] = 0x01bb, | |
f22f54f4 | 292 | }, |
e994d7d2 AK |
293 | /* |
294 | * Use RFO, not WRITEBACK, because a write miss would typically occur | |
295 | * on RFO. | |
296 | */ | |
f22f54f4 | 297 | [ C(OP_WRITE) ] = { |
e994d7d2 AK |
298 | /* OFFCORE_RESPONSE_1.ANY_RFO.LOCAL_CACHE */ |
299 | [ C(RESULT_ACCESS) ] = 0x01bb, | |
300 | /* OFFCORE_RESPONSE_0.ANY_RFO.ANY_LLC_MISS */ | |
301 | [ C(RESULT_MISS) ] = 0x01b7, | |
f22f54f4 PZ |
302 | }, |
303 | [ C(OP_PREFETCH) ] = { | |
e994d7d2 AK |
304 | /* OFFCORE_RESPONSE_0.PREFETCH.LOCAL_CACHE */ |
305 | [ C(RESULT_ACCESS) ] = 0x01b7, | |
306 | /* OFFCORE_RESPONSE_1.PREFETCH.ANY_LLC_MISS */ | |
307 | [ C(RESULT_MISS) ] = 0x01bb, | |
f22f54f4 PZ |
308 | }, |
309 | }, | |
310 | [ C(DTLB) ] = { | |
311 | [ C(OP_READ) ] = { | |
312 | [ C(RESULT_ACCESS) ] = 0x010b, /* MEM_INST_RETIRED.LOADS */ | |
313 | [ C(RESULT_MISS) ] = 0x0108, /* DTLB_LOAD_MISSES.ANY */ | |
314 | }, | |
315 | [ C(OP_WRITE) ] = { | |
316 | [ C(RESULT_ACCESS) ] = 0x020b, /* MEM_INST_RETURED.STORES */ | |
317 | [ C(RESULT_MISS) ] = 0x010c, /* MEM_STORE_RETIRED.DTLB_MISS */ | |
318 | }, | |
319 | [ C(OP_PREFETCH) ] = { | |
320 | [ C(RESULT_ACCESS) ] = 0x0, | |
321 | [ C(RESULT_MISS) ] = 0x0, | |
322 | }, | |
323 | }, | |
324 | [ C(ITLB) ] = { | |
325 | [ C(OP_READ) ] = { | |
326 | [ C(RESULT_ACCESS) ] = 0x01c0, /* INST_RETIRED.ANY_P */ | |
327 | [ C(RESULT_MISS) ] = 0x0185, /* ITLB_MISSES.ANY */ | |
328 | }, | |
329 | [ C(OP_WRITE) ] = { | |
330 | [ C(RESULT_ACCESS) ] = -1, | |
331 | [ C(RESULT_MISS) ] = -1, | |
332 | }, | |
333 | [ C(OP_PREFETCH) ] = { | |
334 | [ C(RESULT_ACCESS) ] = -1, | |
335 | [ C(RESULT_MISS) ] = -1, | |
336 | }, | |
337 | }, | |
338 | [ C(BPU ) ] = { | |
339 | [ C(OP_READ) ] = { | |
340 | [ C(RESULT_ACCESS) ] = 0x00c4, /* BR_INST_RETIRED.ALL_BRANCHES */ | |
341 | [ C(RESULT_MISS) ] = 0x03e8, /* BPU_CLEARS.ANY */ | |
342 | }, | |
343 | [ C(OP_WRITE) ] = { | |
344 | [ C(RESULT_ACCESS) ] = -1, | |
345 | [ C(RESULT_MISS) ] = -1, | |
346 | }, | |
347 | [ C(OP_PREFETCH) ] = { | |
348 | [ C(RESULT_ACCESS) ] = -1, | |
349 | [ C(RESULT_MISS) ] = -1, | |
350 | }, | |
351 | }, | |
352 | }; | |
353 | ||
e994d7d2 AK |
354 | /* |
355 | * OFFCORE_RESPONSE MSR bits (subset), See IA32 SDM Vol 3 30.6.1.3 | |
356 | */ | |
357 | ||
358 | #define DMND_DATA_RD (1 << 0) | |
359 | #define DMND_RFO (1 << 1) | |
360 | #define DMND_WB (1 << 3) | |
361 | #define PF_DATA_RD (1 << 4) | |
362 | #define PF_DATA_RFO (1 << 5) | |
363 | #define RESP_UNCORE_HIT (1 << 8) | |
364 | #define RESP_MISS (0xf600) /* non uncore hit */ | |
365 | ||
366 | static __initconst const u64 nehalem_hw_cache_extra_regs | |
367 | [PERF_COUNT_HW_CACHE_MAX] | |
368 | [PERF_COUNT_HW_CACHE_OP_MAX] | |
369 | [PERF_COUNT_HW_CACHE_RESULT_MAX] = | |
370 | { | |
371 | [ C(LL ) ] = { | |
372 | [ C(OP_READ) ] = { | |
373 | [ C(RESULT_ACCESS) ] = DMND_DATA_RD|RESP_UNCORE_HIT, | |
374 | [ C(RESULT_MISS) ] = DMND_DATA_RD|RESP_MISS, | |
375 | }, | |
376 | [ C(OP_WRITE) ] = { | |
377 | [ C(RESULT_ACCESS) ] = DMND_RFO|DMND_WB|RESP_UNCORE_HIT, | |
378 | [ C(RESULT_MISS) ] = DMND_RFO|DMND_WB|RESP_MISS, | |
379 | }, | |
380 | [ C(OP_PREFETCH) ] = { | |
381 | [ C(RESULT_ACCESS) ] = PF_DATA_RD|PF_DATA_RFO|RESP_UNCORE_HIT, | |
382 | [ C(RESULT_MISS) ] = PF_DATA_RD|PF_DATA_RFO|RESP_MISS, | |
383 | }, | |
384 | } | |
385 | }; | |
386 | ||
caaa8be3 | 387 | static __initconst const u64 nehalem_hw_cache_event_ids |
f22f54f4 PZ |
388 | [PERF_COUNT_HW_CACHE_MAX] |
389 | [PERF_COUNT_HW_CACHE_OP_MAX] | |
390 | [PERF_COUNT_HW_CACHE_RESULT_MAX] = | |
391 | { | |
392 | [ C(L1D) ] = { | |
393 | [ C(OP_READ) ] = { | |
f4929bd3 PZ |
394 | [ C(RESULT_ACCESS) ] = 0x010b, /* MEM_INST_RETIRED.LOADS */ |
395 | [ C(RESULT_MISS) ] = 0x0151, /* L1D.REPL */ | |
f22f54f4 PZ |
396 | }, |
397 | [ C(OP_WRITE) ] = { | |
f4929bd3 PZ |
398 | [ C(RESULT_ACCESS) ] = 0x020b, /* MEM_INST_RETURED.STORES */ |
399 | [ C(RESULT_MISS) ] = 0x0251, /* L1D.M_REPL */ | |
f22f54f4 PZ |
400 | }, |
401 | [ C(OP_PREFETCH) ] = { | |
402 | [ C(RESULT_ACCESS) ] = 0x014e, /* L1D_PREFETCH.REQUESTS */ | |
403 | [ C(RESULT_MISS) ] = 0x024e, /* L1D_PREFETCH.MISS */ | |
404 | }, | |
405 | }, | |
406 | [ C(L1I ) ] = { | |
407 | [ C(OP_READ) ] = { | |
408 | [ C(RESULT_ACCESS) ] = 0x0380, /* L1I.READS */ | |
409 | [ C(RESULT_MISS) ] = 0x0280, /* L1I.MISSES */ | |
410 | }, | |
411 | [ C(OP_WRITE) ] = { | |
412 | [ C(RESULT_ACCESS) ] = -1, | |
413 | [ C(RESULT_MISS) ] = -1, | |
414 | }, | |
415 | [ C(OP_PREFETCH) ] = { | |
416 | [ C(RESULT_ACCESS) ] = 0x0, | |
417 | [ C(RESULT_MISS) ] = 0x0, | |
418 | }, | |
419 | }, | |
420 | [ C(LL ) ] = { | |
421 | [ C(OP_READ) ] = { | |
e994d7d2 AK |
422 | /* OFFCORE_RESPONSE.ANY_DATA.LOCAL_CACHE */ |
423 | [ C(RESULT_ACCESS) ] = 0x01b7, | |
424 | /* OFFCORE_RESPONSE.ANY_DATA.ANY_LLC_MISS */ | |
425 | [ C(RESULT_MISS) ] = 0x01b7, | |
f22f54f4 | 426 | }, |
e994d7d2 AK |
427 | /* |
428 | * Use RFO, not WRITEBACK, because a write miss would typically occur | |
429 | * on RFO. | |
430 | */ | |
f22f54f4 | 431 | [ C(OP_WRITE) ] = { |
e994d7d2 AK |
432 | /* OFFCORE_RESPONSE.ANY_RFO.LOCAL_CACHE */ |
433 | [ C(RESULT_ACCESS) ] = 0x01b7, | |
434 | /* OFFCORE_RESPONSE.ANY_RFO.ANY_LLC_MISS */ | |
435 | [ C(RESULT_MISS) ] = 0x01b7, | |
f22f54f4 PZ |
436 | }, |
437 | [ C(OP_PREFETCH) ] = { | |
e994d7d2 AK |
438 | /* OFFCORE_RESPONSE.PREFETCH.LOCAL_CACHE */ |
439 | [ C(RESULT_ACCESS) ] = 0x01b7, | |
440 | /* OFFCORE_RESPONSE.PREFETCH.ANY_LLC_MISS */ | |
441 | [ C(RESULT_MISS) ] = 0x01b7, | |
f22f54f4 PZ |
442 | }, |
443 | }, | |
444 | [ C(DTLB) ] = { | |
445 | [ C(OP_READ) ] = { | |
446 | [ C(RESULT_ACCESS) ] = 0x0f40, /* L1D_CACHE_LD.MESI (alias) */ | |
447 | [ C(RESULT_MISS) ] = 0x0108, /* DTLB_LOAD_MISSES.ANY */ | |
448 | }, | |
449 | [ C(OP_WRITE) ] = { | |
450 | [ C(RESULT_ACCESS) ] = 0x0f41, /* L1D_CACHE_ST.MESI (alias) */ | |
451 | [ C(RESULT_MISS) ] = 0x010c, /* MEM_STORE_RETIRED.DTLB_MISS */ | |
452 | }, | |
453 | [ C(OP_PREFETCH) ] = { | |
454 | [ C(RESULT_ACCESS) ] = 0x0, | |
455 | [ C(RESULT_MISS) ] = 0x0, | |
456 | }, | |
457 | }, | |
458 | [ C(ITLB) ] = { | |
459 | [ C(OP_READ) ] = { | |
460 | [ C(RESULT_ACCESS) ] = 0x01c0, /* INST_RETIRED.ANY_P */ | |
461 | [ C(RESULT_MISS) ] = 0x20c8, /* ITLB_MISS_RETIRED */ | |
462 | }, | |
463 | [ C(OP_WRITE) ] = { | |
464 | [ C(RESULT_ACCESS) ] = -1, | |
465 | [ C(RESULT_MISS) ] = -1, | |
466 | }, | |
467 | [ C(OP_PREFETCH) ] = { | |
468 | [ C(RESULT_ACCESS) ] = -1, | |
469 | [ C(RESULT_MISS) ] = -1, | |
470 | }, | |
471 | }, | |
472 | [ C(BPU ) ] = { | |
473 | [ C(OP_READ) ] = { | |
474 | [ C(RESULT_ACCESS) ] = 0x00c4, /* BR_INST_RETIRED.ALL_BRANCHES */ | |
475 | [ C(RESULT_MISS) ] = 0x03e8, /* BPU_CLEARS.ANY */ | |
476 | }, | |
477 | [ C(OP_WRITE) ] = { | |
478 | [ C(RESULT_ACCESS) ] = -1, | |
479 | [ C(RESULT_MISS) ] = -1, | |
480 | }, | |
481 | [ C(OP_PREFETCH) ] = { | |
482 | [ C(RESULT_ACCESS) ] = -1, | |
483 | [ C(RESULT_MISS) ] = -1, | |
484 | }, | |
485 | }, | |
486 | }; | |
487 | ||
caaa8be3 | 488 | static __initconst const u64 core2_hw_cache_event_ids |
f22f54f4 PZ |
489 | [PERF_COUNT_HW_CACHE_MAX] |
490 | [PERF_COUNT_HW_CACHE_OP_MAX] | |
491 | [PERF_COUNT_HW_CACHE_RESULT_MAX] = | |
492 | { | |
493 | [ C(L1D) ] = { | |
494 | [ C(OP_READ) ] = { | |
495 | [ C(RESULT_ACCESS) ] = 0x0f40, /* L1D_CACHE_LD.MESI */ | |
496 | [ C(RESULT_MISS) ] = 0x0140, /* L1D_CACHE_LD.I_STATE */ | |
497 | }, | |
498 | [ C(OP_WRITE) ] = { | |
499 | [ C(RESULT_ACCESS) ] = 0x0f41, /* L1D_CACHE_ST.MESI */ | |
500 | [ C(RESULT_MISS) ] = 0x0141, /* L1D_CACHE_ST.I_STATE */ | |
501 | }, | |
502 | [ C(OP_PREFETCH) ] = { | |
503 | [ C(RESULT_ACCESS) ] = 0x104e, /* L1D_PREFETCH.REQUESTS */ | |
504 | [ C(RESULT_MISS) ] = 0, | |
505 | }, | |
506 | }, | |
507 | [ C(L1I ) ] = { | |
508 | [ C(OP_READ) ] = { | |
509 | [ C(RESULT_ACCESS) ] = 0x0080, /* L1I.READS */ | |
510 | [ C(RESULT_MISS) ] = 0x0081, /* L1I.MISSES */ | |
511 | }, | |
512 | [ C(OP_WRITE) ] = { | |
513 | [ C(RESULT_ACCESS) ] = -1, | |
514 | [ C(RESULT_MISS) ] = -1, | |
515 | }, | |
516 | [ C(OP_PREFETCH) ] = { | |
517 | [ C(RESULT_ACCESS) ] = 0, | |
518 | [ C(RESULT_MISS) ] = 0, | |
519 | }, | |
520 | }, | |
521 | [ C(LL ) ] = { | |
522 | [ C(OP_READ) ] = { | |
523 | [ C(RESULT_ACCESS) ] = 0x4f29, /* L2_LD.MESI */ | |
524 | [ C(RESULT_MISS) ] = 0x4129, /* L2_LD.ISTATE */ | |
525 | }, | |
526 | [ C(OP_WRITE) ] = { | |
527 | [ C(RESULT_ACCESS) ] = 0x4f2A, /* L2_ST.MESI */ | |
528 | [ C(RESULT_MISS) ] = 0x412A, /* L2_ST.ISTATE */ | |
529 | }, | |
530 | [ C(OP_PREFETCH) ] = { | |
531 | [ C(RESULT_ACCESS) ] = 0, | |
532 | [ C(RESULT_MISS) ] = 0, | |
533 | }, | |
534 | }, | |
535 | [ C(DTLB) ] = { | |
536 | [ C(OP_READ) ] = { | |
537 | [ C(RESULT_ACCESS) ] = 0x0f40, /* L1D_CACHE_LD.MESI (alias) */ | |
538 | [ C(RESULT_MISS) ] = 0x0208, /* DTLB_MISSES.MISS_LD */ | |
539 | }, | |
540 | [ C(OP_WRITE) ] = { | |
541 | [ C(RESULT_ACCESS) ] = 0x0f41, /* L1D_CACHE_ST.MESI (alias) */ | |
542 | [ C(RESULT_MISS) ] = 0x0808, /* DTLB_MISSES.MISS_ST */ | |
543 | }, | |
544 | [ C(OP_PREFETCH) ] = { | |
545 | [ C(RESULT_ACCESS) ] = 0, | |
546 | [ C(RESULT_MISS) ] = 0, | |
547 | }, | |
548 | }, | |
549 | [ C(ITLB) ] = { | |
550 | [ C(OP_READ) ] = { | |
551 | [ C(RESULT_ACCESS) ] = 0x00c0, /* INST_RETIRED.ANY_P */ | |
552 | [ C(RESULT_MISS) ] = 0x1282, /* ITLBMISSES */ | |
553 | }, | |
554 | [ C(OP_WRITE) ] = { | |
555 | [ C(RESULT_ACCESS) ] = -1, | |
556 | [ C(RESULT_MISS) ] = -1, | |
557 | }, | |
558 | [ C(OP_PREFETCH) ] = { | |
559 | [ C(RESULT_ACCESS) ] = -1, | |
560 | [ C(RESULT_MISS) ] = -1, | |
561 | }, | |
562 | }, | |
563 | [ C(BPU ) ] = { | |
564 | [ C(OP_READ) ] = { | |
565 | [ C(RESULT_ACCESS) ] = 0x00c4, /* BR_INST_RETIRED.ANY */ | |
566 | [ C(RESULT_MISS) ] = 0x00c5, /* BP_INST_RETIRED.MISPRED */ | |
567 | }, | |
568 | [ C(OP_WRITE) ] = { | |
569 | [ C(RESULT_ACCESS) ] = -1, | |
570 | [ C(RESULT_MISS) ] = -1, | |
571 | }, | |
572 | [ C(OP_PREFETCH) ] = { | |
573 | [ C(RESULT_ACCESS) ] = -1, | |
574 | [ C(RESULT_MISS) ] = -1, | |
575 | }, | |
576 | }, | |
577 | }; | |
578 | ||
caaa8be3 | 579 | static __initconst const u64 atom_hw_cache_event_ids |
f22f54f4 PZ |
580 | [PERF_COUNT_HW_CACHE_MAX] |
581 | [PERF_COUNT_HW_CACHE_OP_MAX] | |
582 | [PERF_COUNT_HW_CACHE_RESULT_MAX] = | |
583 | { | |
584 | [ C(L1D) ] = { | |
585 | [ C(OP_READ) ] = { | |
586 | [ C(RESULT_ACCESS) ] = 0x2140, /* L1D_CACHE.LD */ | |
587 | [ C(RESULT_MISS) ] = 0, | |
588 | }, | |
589 | [ C(OP_WRITE) ] = { | |
590 | [ C(RESULT_ACCESS) ] = 0x2240, /* L1D_CACHE.ST */ | |
591 | [ C(RESULT_MISS) ] = 0, | |
592 | }, | |
593 | [ C(OP_PREFETCH) ] = { | |
594 | [ C(RESULT_ACCESS) ] = 0x0, | |
595 | [ C(RESULT_MISS) ] = 0, | |
596 | }, | |
597 | }, | |
598 | [ C(L1I ) ] = { | |
599 | [ C(OP_READ) ] = { | |
600 | [ C(RESULT_ACCESS) ] = 0x0380, /* L1I.READS */ | |
601 | [ C(RESULT_MISS) ] = 0x0280, /* L1I.MISSES */ | |
602 | }, | |
603 | [ C(OP_WRITE) ] = { | |
604 | [ C(RESULT_ACCESS) ] = -1, | |
605 | [ C(RESULT_MISS) ] = -1, | |
606 | }, | |
607 | [ C(OP_PREFETCH) ] = { | |
608 | [ C(RESULT_ACCESS) ] = 0, | |
609 | [ C(RESULT_MISS) ] = 0, | |
610 | }, | |
611 | }, | |
612 | [ C(LL ) ] = { | |
613 | [ C(OP_READ) ] = { | |
614 | [ C(RESULT_ACCESS) ] = 0x4f29, /* L2_LD.MESI */ | |
615 | [ C(RESULT_MISS) ] = 0x4129, /* L2_LD.ISTATE */ | |
616 | }, | |
617 | [ C(OP_WRITE) ] = { | |
618 | [ C(RESULT_ACCESS) ] = 0x4f2A, /* L2_ST.MESI */ | |
619 | [ C(RESULT_MISS) ] = 0x412A, /* L2_ST.ISTATE */ | |
620 | }, | |
621 | [ C(OP_PREFETCH) ] = { | |
622 | [ C(RESULT_ACCESS) ] = 0, | |
623 | [ C(RESULT_MISS) ] = 0, | |
624 | }, | |
625 | }, | |
626 | [ C(DTLB) ] = { | |
627 | [ C(OP_READ) ] = { | |
628 | [ C(RESULT_ACCESS) ] = 0x2140, /* L1D_CACHE_LD.MESI (alias) */ | |
629 | [ C(RESULT_MISS) ] = 0x0508, /* DTLB_MISSES.MISS_LD */ | |
630 | }, | |
631 | [ C(OP_WRITE) ] = { | |
632 | [ C(RESULT_ACCESS) ] = 0x2240, /* L1D_CACHE_ST.MESI (alias) */ | |
633 | [ C(RESULT_MISS) ] = 0x0608, /* DTLB_MISSES.MISS_ST */ | |
634 | }, | |
635 | [ C(OP_PREFETCH) ] = { | |
636 | [ C(RESULT_ACCESS) ] = 0, | |
637 | [ C(RESULT_MISS) ] = 0, | |
638 | }, | |
639 | }, | |
640 | [ C(ITLB) ] = { | |
641 | [ C(OP_READ) ] = { | |
642 | [ C(RESULT_ACCESS) ] = 0x00c0, /* INST_RETIRED.ANY_P */ | |
643 | [ C(RESULT_MISS) ] = 0x0282, /* ITLB.MISSES */ | |
644 | }, | |
645 | [ C(OP_WRITE) ] = { | |
646 | [ C(RESULT_ACCESS) ] = -1, | |
647 | [ C(RESULT_MISS) ] = -1, | |
648 | }, | |
649 | [ C(OP_PREFETCH) ] = { | |
650 | [ C(RESULT_ACCESS) ] = -1, | |
651 | [ C(RESULT_MISS) ] = -1, | |
652 | }, | |
653 | }, | |
654 | [ C(BPU ) ] = { | |
655 | [ C(OP_READ) ] = { | |
656 | [ C(RESULT_ACCESS) ] = 0x00c4, /* BR_INST_RETIRED.ANY */ | |
657 | [ C(RESULT_MISS) ] = 0x00c5, /* BP_INST_RETIRED.MISPRED */ | |
658 | }, | |
659 | [ C(OP_WRITE) ] = { | |
660 | [ C(RESULT_ACCESS) ] = -1, | |
661 | [ C(RESULT_MISS) ] = -1, | |
662 | }, | |
663 | [ C(OP_PREFETCH) ] = { | |
664 | [ C(RESULT_ACCESS) ] = -1, | |
665 | [ C(RESULT_MISS) ] = -1, | |
666 | }, | |
667 | }, | |
668 | }; | |
669 | ||
f22f54f4 PZ |
670 | static void intel_pmu_disable_all(void) |
671 | { | |
672 | struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); | |
673 | ||
674 | wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL, 0); | |
675 | ||
676 | if (test_bit(X86_PMC_IDX_FIXED_BTS, cpuc->active_mask)) | |
677 | intel_pmu_disable_bts(); | |
ca037701 PZ |
678 | |
679 | intel_pmu_pebs_disable_all(); | |
caff2bef | 680 | intel_pmu_lbr_disable_all(); |
f22f54f4 PZ |
681 | } |
682 | ||
11164cd4 | 683 | static void intel_pmu_enable_all(int added) |
f22f54f4 PZ |
684 | { |
685 | struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); | |
686 | ||
d329527e PZ |
687 | intel_pmu_pebs_enable_all(); |
688 | intel_pmu_lbr_enable_all(); | |
f22f54f4 PZ |
689 | wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL, x86_pmu.intel_ctrl); |
690 | ||
691 | if (test_bit(X86_PMC_IDX_FIXED_BTS, cpuc->active_mask)) { | |
692 | struct perf_event *event = | |
693 | cpuc->events[X86_PMC_IDX_FIXED_BTS]; | |
694 | ||
695 | if (WARN_ON_ONCE(!event)) | |
696 | return; | |
697 | ||
698 | intel_pmu_enable_bts(event->hw.config); | |
699 | } | |
700 | } | |
701 | ||
11164cd4 PZ |
702 | /* |
703 | * Workaround for: | |
704 | * Intel Errata AAK100 (model 26) | |
705 | * Intel Errata AAP53 (model 30) | |
40b91cd1 | 706 | * Intel Errata BD53 (model 44) |
11164cd4 | 707 | * |
351af072 ZY |
708 | * The official story: |
709 | * These chips need to be 'reset' when adding counters by programming the | |
710 | * magic three (non-counting) events 0x4300B5, 0x4300D2, and 0x4300B1 either | |
711 | * in sequence on the same PMC or on different PMCs. | |
712 | * | |
713 | * In practise it appears some of these events do in fact count, and | |
714 | * we need to programm all 4 events. | |
11164cd4 | 715 | */ |
351af072 | 716 | static void intel_pmu_nhm_workaround(void) |
11164cd4 | 717 | { |
351af072 ZY |
718 | struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); |
719 | static const unsigned long nhm_magic[4] = { | |
720 | 0x4300B5, | |
721 | 0x4300D2, | |
722 | 0x4300B1, | |
723 | 0x4300B1 | |
724 | }; | |
725 | struct perf_event *event; | |
726 | int i; | |
11164cd4 | 727 | |
351af072 ZY |
728 | /* |
729 | * The Errata requires below steps: | |
730 | * 1) Clear MSR_IA32_PEBS_ENABLE and MSR_CORE_PERF_GLOBAL_CTRL; | |
731 | * 2) Configure 4 PERFEVTSELx with the magic events and clear | |
732 | * the corresponding PMCx; | |
733 | * 3) set bit0~bit3 of MSR_CORE_PERF_GLOBAL_CTRL; | |
734 | * 4) Clear MSR_CORE_PERF_GLOBAL_CTRL; | |
735 | * 5) Clear 4 pairs of ERFEVTSELx and PMCx; | |
736 | */ | |
11164cd4 | 737 | |
351af072 ZY |
738 | /* |
739 | * The real steps we choose are a little different from above. | |
740 | * A) To reduce MSR operations, we don't run step 1) as they | |
741 | * are already cleared before this function is called; | |
742 | * B) Call x86_perf_event_update to save PMCx before configuring | |
743 | * PERFEVTSELx with magic number; | |
744 | * C) With step 5), we do clear only when the PERFEVTSELx is | |
745 | * not used currently. | |
746 | * D) Call x86_perf_event_set_period to restore PMCx; | |
747 | */ | |
11164cd4 | 748 | |
351af072 ZY |
749 | /* We always operate 4 pairs of PERF Counters */ |
750 | for (i = 0; i < 4; i++) { | |
751 | event = cpuc->events[i]; | |
752 | if (event) | |
753 | x86_perf_event_update(event); | |
754 | } | |
11164cd4 | 755 | |
351af072 ZY |
756 | for (i = 0; i < 4; i++) { |
757 | wrmsrl(MSR_ARCH_PERFMON_EVENTSEL0 + i, nhm_magic[i]); | |
758 | wrmsrl(MSR_ARCH_PERFMON_PERFCTR0 + i, 0x0); | |
759 | } | |
760 | ||
761 | wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL, 0xf); | |
762 | wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL, 0x0); | |
11164cd4 | 763 | |
351af072 ZY |
764 | for (i = 0; i < 4; i++) { |
765 | event = cpuc->events[i]; | |
766 | ||
767 | if (event) { | |
768 | x86_perf_event_set_period(event); | |
31fa58af | 769 | __x86_pmu_enable_event(&event->hw, |
351af072 ZY |
770 | ARCH_PERFMON_EVENTSEL_ENABLE); |
771 | } else | |
772 | wrmsrl(MSR_ARCH_PERFMON_EVENTSEL0 + i, 0x0); | |
11164cd4 | 773 | } |
351af072 ZY |
774 | } |
775 | ||
776 | static void intel_pmu_nhm_enable_all(int added) | |
777 | { | |
778 | if (added) | |
779 | intel_pmu_nhm_workaround(); | |
11164cd4 PZ |
780 | intel_pmu_enable_all(added); |
781 | } | |
782 | ||
f22f54f4 PZ |
783 | static inline u64 intel_pmu_get_status(void) |
784 | { | |
785 | u64 status; | |
786 | ||
787 | rdmsrl(MSR_CORE_PERF_GLOBAL_STATUS, status); | |
788 | ||
789 | return status; | |
790 | } | |
791 | ||
792 | static inline void intel_pmu_ack_status(u64 ack) | |
793 | { | |
794 | wrmsrl(MSR_CORE_PERF_GLOBAL_OVF_CTRL, ack); | |
795 | } | |
796 | ||
ca037701 | 797 | static void intel_pmu_disable_fixed(struct hw_perf_event *hwc) |
f22f54f4 | 798 | { |
aff3d91a | 799 | int idx = hwc->idx - X86_PMC_IDX_FIXED; |
f22f54f4 PZ |
800 | u64 ctrl_val, mask; |
801 | ||
802 | mask = 0xfULL << (idx * 4); | |
803 | ||
804 | rdmsrl(hwc->config_base, ctrl_val); | |
805 | ctrl_val &= ~mask; | |
7645a24c | 806 | wrmsrl(hwc->config_base, ctrl_val); |
f22f54f4 PZ |
807 | } |
808 | ||
ca037701 | 809 | static void intel_pmu_disable_event(struct perf_event *event) |
f22f54f4 | 810 | { |
aff3d91a PZ |
811 | struct hw_perf_event *hwc = &event->hw; |
812 | ||
813 | if (unlikely(hwc->idx == X86_PMC_IDX_FIXED_BTS)) { | |
f22f54f4 PZ |
814 | intel_pmu_disable_bts(); |
815 | intel_pmu_drain_bts_buffer(); | |
816 | return; | |
817 | } | |
818 | ||
819 | if (unlikely(hwc->config_base == MSR_ARCH_PERFMON_FIXED_CTR_CTRL)) { | |
aff3d91a | 820 | intel_pmu_disable_fixed(hwc); |
f22f54f4 PZ |
821 | return; |
822 | } | |
823 | ||
aff3d91a | 824 | x86_pmu_disable_event(event); |
ca037701 | 825 | |
ab608344 | 826 | if (unlikely(event->attr.precise_ip)) |
ef21f683 | 827 | intel_pmu_pebs_disable(event); |
f22f54f4 PZ |
828 | } |
829 | ||
ca037701 | 830 | static void intel_pmu_enable_fixed(struct hw_perf_event *hwc) |
f22f54f4 | 831 | { |
aff3d91a | 832 | int idx = hwc->idx - X86_PMC_IDX_FIXED; |
f22f54f4 | 833 | u64 ctrl_val, bits, mask; |
f22f54f4 PZ |
834 | |
835 | /* | |
836 | * Enable IRQ generation (0x8), | |
837 | * and enable ring-3 counting (0x2) and ring-0 counting (0x1) | |
838 | * if requested: | |
839 | */ | |
840 | bits = 0x8ULL; | |
841 | if (hwc->config & ARCH_PERFMON_EVENTSEL_USR) | |
842 | bits |= 0x2; | |
843 | if (hwc->config & ARCH_PERFMON_EVENTSEL_OS) | |
844 | bits |= 0x1; | |
845 | ||
846 | /* | |
847 | * ANY bit is supported in v3 and up | |
848 | */ | |
849 | if (x86_pmu.version > 2 && hwc->config & ARCH_PERFMON_EVENTSEL_ANY) | |
850 | bits |= 0x4; | |
851 | ||
852 | bits <<= (idx * 4); | |
853 | mask = 0xfULL << (idx * 4); | |
854 | ||
855 | rdmsrl(hwc->config_base, ctrl_val); | |
856 | ctrl_val &= ~mask; | |
857 | ctrl_val |= bits; | |
7645a24c | 858 | wrmsrl(hwc->config_base, ctrl_val); |
f22f54f4 PZ |
859 | } |
860 | ||
aff3d91a | 861 | static void intel_pmu_enable_event(struct perf_event *event) |
f22f54f4 | 862 | { |
aff3d91a PZ |
863 | struct hw_perf_event *hwc = &event->hw; |
864 | ||
865 | if (unlikely(hwc->idx == X86_PMC_IDX_FIXED_BTS)) { | |
0a3aee0d | 866 | if (!__this_cpu_read(cpu_hw_events.enabled)) |
f22f54f4 PZ |
867 | return; |
868 | ||
869 | intel_pmu_enable_bts(hwc->config); | |
870 | return; | |
871 | } | |
872 | ||
873 | if (unlikely(hwc->config_base == MSR_ARCH_PERFMON_FIXED_CTR_CTRL)) { | |
aff3d91a | 874 | intel_pmu_enable_fixed(hwc); |
f22f54f4 PZ |
875 | return; |
876 | } | |
877 | ||
ab608344 | 878 | if (unlikely(event->attr.precise_ip)) |
ef21f683 | 879 | intel_pmu_pebs_enable(event); |
ca037701 | 880 | |
31fa58af | 881 | __x86_pmu_enable_event(hwc, ARCH_PERFMON_EVENTSEL_ENABLE); |
f22f54f4 PZ |
882 | } |
883 | ||
884 | /* | |
885 | * Save and restart an expired event. Called by NMI contexts, | |
886 | * so it has to be careful about preempting normal event ops: | |
887 | */ | |
888 | static int intel_pmu_save_and_restart(struct perf_event *event) | |
889 | { | |
cc2ad4ba PZ |
890 | x86_perf_event_update(event); |
891 | return x86_perf_event_set_period(event); | |
f22f54f4 PZ |
892 | } |
893 | ||
894 | static void intel_pmu_reset(void) | |
895 | { | |
0a3aee0d | 896 | struct debug_store *ds = __this_cpu_read(cpu_hw_events.ds); |
f22f54f4 PZ |
897 | unsigned long flags; |
898 | int idx; | |
899 | ||
948b1bb8 | 900 | if (!x86_pmu.num_counters) |
f22f54f4 PZ |
901 | return; |
902 | ||
903 | local_irq_save(flags); | |
904 | ||
905 | printk("clearing PMU state on CPU#%d\n", smp_processor_id()); | |
906 | ||
948b1bb8 | 907 | for (idx = 0; idx < x86_pmu.num_counters; idx++) { |
41bf4989 RR |
908 | checking_wrmsrl(x86_pmu_config_addr(idx), 0ull); |
909 | checking_wrmsrl(x86_pmu_event_addr(idx), 0ull); | |
f22f54f4 | 910 | } |
948b1bb8 | 911 | for (idx = 0; idx < x86_pmu.num_counters_fixed; idx++) |
f22f54f4 | 912 | checking_wrmsrl(MSR_ARCH_PERFMON_FIXED_CTR0 + idx, 0ull); |
948b1bb8 | 913 | |
f22f54f4 PZ |
914 | if (ds) |
915 | ds->bts_index = ds->bts_buffer_base; | |
916 | ||
917 | local_irq_restore(flags); | |
918 | } | |
919 | ||
920 | /* | |
921 | * This handler is triggered by the local APIC, so the APIC IRQ handling | |
922 | * rules apply: | |
923 | */ | |
924 | static int intel_pmu_handle_irq(struct pt_regs *regs) | |
925 | { | |
926 | struct perf_sample_data data; | |
927 | struct cpu_hw_events *cpuc; | |
928 | int bit, loops; | |
2e556b5b | 929 | u64 status; |
b0b2072d | 930 | int handled; |
f22f54f4 | 931 | |
dc1d628a | 932 | perf_sample_data_init(&data, 0); |
f22f54f4 PZ |
933 | |
934 | cpuc = &__get_cpu_var(cpu_hw_events); | |
935 | ||
2bce5dac DZ |
936 | /* |
937 | * Some chipsets need to unmask the LVTPC in a particular spot | |
938 | * inside the nmi handler. As a result, the unmasking was pushed | |
939 | * into all the nmi handlers. | |
940 | * | |
941 | * This handler doesn't seem to have any issues with the unmasking | |
942 | * so it was left at the top. | |
943 | */ | |
944 | apic_write(APIC_LVTPC, APIC_DM_NMI); | |
945 | ||
3fb2b8dd | 946 | intel_pmu_disable_all(); |
b0b2072d | 947 | handled = intel_pmu_drain_bts_buffer(); |
f22f54f4 PZ |
948 | status = intel_pmu_get_status(); |
949 | if (!status) { | |
11164cd4 | 950 | intel_pmu_enable_all(0); |
b0b2072d | 951 | return handled; |
f22f54f4 PZ |
952 | } |
953 | ||
954 | loops = 0; | |
955 | again: | |
2e556b5b | 956 | intel_pmu_ack_status(status); |
f22f54f4 PZ |
957 | if (++loops > 100) { |
958 | WARN_ONCE(1, "perfevents: irq loop stuck!\n"); | |
959 | perf_event_print_debug(); | |
960 | intel_pmu_reset(); | |
3fb2b8dd | 961 | goto done; |
f22f54f4 PZ |
962 | } |
963 | ||
964 | inc_irq_stat(apic_perf_irqs); | |
ca037701 | 965 | |
caff2bef PZ |
966 | intel_pmu_lbr_read(); |
967 | ||
ca037701 PZ |
968 | /* |
969 | * PEBS overflow sets bit 62 in the global status register | |
970 | */ | |
de725dec PZ |
971 | if (__test_and_clear_bit(62, (unsigned long *)&status)) { |
972 | handled++; | |
ca037701 | 973 | x86_pmu.drain_pebs(regs); |
de725dec | 974 | } |
ca037701 | 975 | |
984b3f57 | 976 | for_each_set_bit(bit, (unsigned long *)&status, X86_PMC_IDX_MAX) { |
f22f54f4 PZ |
977 | struct perf_event *event = cpuc->events[bit]; |
978 | ||
de725dec PZ |
979 | handled++; |
980 | ||
f22f54f4 PZ |
981 | if (!test_bit(bit, cpuc->active_mask)) |
982 | continue; | |
983 | ||
984 | if (!intel_pmu_save_and_restart(event)) | |
985 | continue; | |
986 | ||
987 | data.period = event->hw.last_period; | |
988 | ||
989 | if (perf_event_overflow(event, 1, &data, regs)) | |
a4eaf7f1 | 990 | x86_pmu_stop(event, 0); |
f22f54f4 PZ |
991 | } |
992 | ||
f22f54f4 PZ |
993 | /* |
994 | * Repeat if there is more work to be done: | |
995 | */ | |
996 | status = intel_pmu_get_status(); | |
997 | if (status) | |
998 | goto again; | |
999 | ||
3fb2b8dd | 1000 | done: |
11164cd4 | 1001 | intel_pmu_enable_all(0); |
de725dec | 1002 | return handled; |
f22f54f4 PZ |
1003 | } |
1004 | ||
f22f54f4 | 1005 | static struct event_constraint * |
ca037701 | 1006 | intel_bts_constraints(struct perf_event *event) |
f22f54f4 | 1007 | { |
ca037701 PZ |
1008 | struct hw_perf_event *hwc = &event->hw; |
1009 | unsigned int hw_event, bts_event; | |
f22f54f4 | 1010 | |
18a073a3 PZ |
1011 | if (event->attr.freq) |
1012 | return NULL; | |
1013 | ||
ca037701 PZ |
1014 | hw_event = hwc->config & INTEL_ARCH_EVENT_MASK; |
1015 | bts_event = x86_pmu.event_map(PERF_COUNT_HW_BRANCH_INSTRUCTIONS); | |
f22f54f4 | 1016 | |
ca037701 | 1017 | if (unlikely(hw_event == bts_event && hwc->sample_period == 1)) |
f22f54f4 | 1018 | return &bts_constraint; |
ca037701 | 1019 | |
f22f54f4 PZ |
1020 | return NULL; |
1021 | } | |
1022 | ||
a7e3ed1e AK |
1023 | static struct event_constraint * |
1024 | intel_percore_constraints(struct cpu_hw_events *cpuc, struct perf_event *event) | |
1025 | { | |
1026 | struct hw_perf_event *hwc = &event->hw; | |
1027 | unsigned int e = hwc->config & ARCH_PERFMON_EVENTSEL_EVENT; | |
1028 | struct event_constraint *c; | |
1029 | struct intel_percore *pc; | |
1030 | struct er_account *era; | |
1031 | int i; | |
1032 | int free_slot; | |
1033 | int found; | |
1034 | ||
1035 | if (!x86_pmu.percore_constraints || hwc->extra_alloc) | |
1036 | return NULL; | |
1037 | ||
1038 | for (c = x86_pmu.percore_constraints; c->cmask; c++) { | |
1039 | if (e != c->code) | |
1040 | continue; | |
1041 | ||
1042 | /* | |
1043 | * Allocate resource per core. | |
1044 | */ | |
1045 | pc = cpuc->per_core; | |
1046 | if (!pc) | |
1047 | break; | |
1048 | c = &emptyconstraint; | |
1049 | raw_spin_lock(&pc->lock); | |
1050 | free_slot = -1; | |
1051 | found = 0; | |
1052 | for (i = 0; i < MAX_EXTRA_REGS; i++) { | |
1053 | era = &pc->regs[i]; | |
1054 | if (era->ref > 0 && hwc->extra_reg == era->extra_reg) { | |
1055 | /* Allow sharing same config */ | |
1056 | if (hwc->extra_config == era->extra_config) { | |
1057 | era->ref++; | |
1058 | cpuc->percore_used = 1; | |
1059 | hwc->extra_alloc = 1; | |
1060 | c = NULL; | |
1061 | } | |
1062 | /* else conflict */ | |
1063 | found = 1; | |
1064 | break; | |
1065 | } else if (era->ref == 0 && free_slot == -1) | |
1066 | free_slot = i; | |
1067 | } | |
1068 | if (!found && free_slot != -1) { | |
1069 | era = &pc->regs[free_slot]; | |
1070 | era->ref = 1; | |
1071 | era->extra_reg = hwc->extra_reg; | |
1072 | era->extra_config = hwc->extra_config; | |
1073 | cpuc->percore_used = 1; | |
1074 | hwc->extra_alloc = 1; | |
1075 | c = NULL; | |
1076 | } | |
1077 | raw_spin_unlock(&pc->lock); | |
1078 | return c; | |
1079 | } | |
1080 | ||
1081 | return NULL; | |
1082 | } | |
1083 | ||
f22f54f4 PZ |
1084 | static struct event_constraint * |
1085 | intel_get_event_constraints(struct cpu_hw_events *cpuc, struct perf_event *event) | |
1086 | { | |
1087 | struct event_constraint *c; | |
1088 | ||
ca037701 PZ |
1089 | c = intel_bts_constraints(event); |
1090 | if (c) | |
1091 | return c; | |
1092 | ||
1093 | c = intel_pebs_constraints(event); | |
f22f54f4 PZ |
1094 | if (c) |
1095 | return c; | |
1096 | ||
a7e3ed1e AK |
1097 | c = intel_percore_constraints(cpuc, event); |
1098 | if (c) | |
1099 | return c; | |
1100 | ||
f22f54f4 PZ |
1101 | return x86_get_event_constraints(cpuc, event); |
1102 | } | |
1103 | ||
a7e3ed1e AK |
1104 | static void intel_put_event_constraints(struct cpu_hw_events *cpuc, |
1105 | struct perf_event *event) | |
1106 | { | |
1107 | struct extra_reg *er; | |
1108 | struct intel_percore *pc; | |
1109 | struct er_account *era; | |
1110 | struct hw_perf_event *hwc = &event->hw; | |
1111 | int i, allref; | |
1112 | ||
1113 | if (!cpuc->percore_used) | |
1114 | return; | |
1115 | ||
1116 | for (er = x86_pmu.extra_regs; er->msr; er++) { | |
1117 | if (er->event != (hwc->config & er->config_mask)) | |
1118 | continue; | |
1119 | ||
1120 | pc = cpuc->per_core; | |
1121 | raw_spin_lock(&pc->lock); | |
1122 | for (i = 0; i < MAX_EXTRA_REGS; i++) { | |
1123 | era = &pc->regs[i]; | |
1124 | if (era->ref > 0 && | |
1125 | era->extra_config == hwc->extra_config && | |
1126 | era->extra_reg == er->msr) { | |
1127 | era->ref--; | |
1128 | hwc->extra_alloc = 0; | |
1129 | break; | |
1130 | } | |
1131 | } | |
1132 | allref = 0; | |
1133 | for (i = 0; i < MAX_EXTRA_REGS; i++) | |
1134 | allref += pc->regs[i].ref; | |
1135 | if (allref == 0) | |
1136 | cpuc->percore_used = 0; | |
1137 | raw_spin_unlock(&pc->lock); | |
1138 | break; | |
1139 | } | |
1140 | } | |
1141 | ||
b4cdc5c2 PZ |
1142 | static int intel_pmu_hw_config(struct perf_event *event) |
1143 | { | |
1144 | int ret = x86_pmu_hw_config(event); | |
1145 | ||
1146 | if (ret) | |
1147 | return ret; | |
1148 | ||
7639dae0 PZ |
1149 | if (event->attr.precise_ip && |
1150 | (event->hw.config & X86_RAW_EVENT_MASK) == 0x003c) { | |
1151 | /* | |
1152 | * Use an alternative encoding for CPU_CLK_UNHALTED.THREAD_P | |
1153 | * (0x003c) so that we can use it with PEBS. | |
1154 | * | |
1155 | * The regular CPU_CLK_UNHALTED.THREAD_P event (0x003c) isn't | |
1156 | * PEBS capable. However we can use INST_RETIRED.ANY_P | |
1157 | * (0x00c0), which is a PEBS capable event, to get the same | |
1158 | * count. | |
1159 | * | |
1160 | * INST_RETIRED.ANY_P counts the number of cycles that retires | |
1161 | * CNTMASK instructions. By setting CNTMASK to a value (16) | |
1162 | * larger than the maximum number of instructions that can be | |
1163 | * retired per cycle (4) and then inverting the condition, we | |
1164 | * count all cycles that retire 16 or less instructions, which | |
1165 | * is every cycle. | |
1166 | * | |
1167 | * Thereby we gain a PEBS capable cycle counter. | |
1168 | */ | |
1169 | u64 alt_config = 0x108000c0; /* INST_RETIRED.TOTAL_CYCLES */ | |
1170 | ||
1171 | alt_config |= (event->hw.config & ~X86_RAW_EVENT_MASK); | |
1172 | event->hw.config = alt_config; | |
1173 | } | |
1174 | ||
b4cdc5c2 PZ |
1175 | if (event->attr.type != PERF_TYPE_RAW) |
1176 | return 0; | |
1177 | ||
1178 | if (!(event->attr.config & ARCH_PERFMON_EVENTSEL_ANY)) | |
1179 | return 0; | |
1180 | ||
1181 | if (x86_pmu.version < 3) | |
1182 | return -EINVAL; | |
1183 | ||
1184 | if (perf_paranoid_cpu() && !capable(CAP_SYS_ADMIN)) | |
1185 | return -EACCES; | |
1186 | ||
1187 | event->hw.config |= ARCH_PERFMON_EVENTSEL_ANY; | |
1188 | ||
1189 | return 0; | |
1190 | } | |
1191 | ||
caaa8be3 | 1192 | static __initconst const struct x86_pmu core_pmu = { |
f22f54f4 PZ |
1193 | .name = "core", |
1194 | .handle_irq = x86_pmu_handle_irq, | |
1195 | .disable_all = x86_pmu_disable_all, | |
1196 | .enable_all = x86_pmu_enable_all, | |
1197 | .enable = x86_pmu_enable_event, | |
1198 | .disable = x86_pmu_disable_event, | |
b4cdc5c2 | 1199 | .hw_config = x86_pmu_hw_config, |
a072738e | 1200 | .schedule_events = x86_schedule_events, |
f22f54f4 PZ |
1201 | .eventsel = MSR_ARCH_PERFMON_EVENTSEL0, |
1202 | .perfctr = MSR_ARCH_PERFMON_PERFCTR0, | |
1203 | .event_map = intel_pmu_event_map, | |
f22f54f4 PZ |
1204 | .max_events = ARRAY_SIZE(intel_perfmon_event_map), |
1205 | .apic = 1, | |
1206 | /* | |
1207 | * Intel PMCs cannot be accessed sanely above 32 bit width, | |
1208 | * so we install an artificial 1<<31 period regardless of | |
1209 | * the generic event period: | |
1210 | */ | |
1211 | .max_period = (1ULL << 31) - 1, | |
1212 | .get_event_constraints = intel_get_event_constraints, | |
a7e3ed1e | 1213 | .put_event_constraints = intel_put_event_constraints, |
f22f54f4 PZ |
1214 | .event_constraints = intel_core_event_constraints, |
1215 | }; | |
1216 | ||
a7e3ed1e AK |
1217 | static int intel_pmu_cpu_prepare(int cpu) |
1218 | { | |
1219 | struct cpu_hw_events *cpuc = &per_cpu(cpu_hw_events, cpu); | |
1220 | ||
69092624 LM |
1221 | if (!cpu_has_ht_siblings()) |
1222 | return NOTIFY_OK; | |
1223 | ||
a7e3ed1e AK |
1224 | cpuc->per_core = kzalloc_node(sizeof(struct intel_percore), |
1225 | GFP_KERNEL, cpu_to_node(cpu)); | |
1226 | if (!cpuc->per_core) | |
1227 | return NOTIFY_BAD; | |
1228 | ||
1229 | raw_spin_lock_init(&cpuc->per_core->lock); | |
1230 | cpuc->per_core->core_id = -1; | |
1231 | return NOTIFY_OK; | |
1232 | } | |
1233 | ||
74846d35 PZ |
1234 | static void intel_pmu_cpu_starting(int cpu) |
1235 | { | |
a7e3ed1e AK |
1236 | struct cpu_hw_events *cpuc = &per_cpu(cpu_hw_events, cpu); |
1237 | int core_id = topology_core_id(cpu); | |
1238 | int i; | |
1239 | ||
69092624 LM |
1240 | init_debug_store_on_cpu(cpu); |
1241 | /* | |
1242 | * Deal with CPUs that don't clear their LBRs on power-up. | |
1243 | */ | |
1244 | intel_pmu_lbr_reset(); | |
1245 | ||
1246 | if (!cpu_has_ht_siblings()) | |
1247 | return; | |
1248 | ||
a7e3ed1e AK |
1249 | for_each_cpu(i, topology_thread_cpumask(cpu)) { |
1250 | struct intel_percore *pc = per_cpu(cpu_hw_events, i).per_core; | |
1251 | ||
1252 | if (pc && pc->core_id == core_id) { | |
1253 | kfree(cpuc->per_core); | |
1254 | cpuc->per_core = pc; | |
1255 | break; | |
1256 | } | |
1257 | } | |
1258 | ||
1259 | cpuc->per_core->core_id = core_id; | |
1260 | cpuc->per_core->refcnt++; | |
74846d35 PZ |
1261 | } |
1262 | ||
1263 | static void intel_pmu_cpu_dying(int cpu) | |
1264 | { | |
a7e3ed1e AK |
1265 | struct cpu_hw_events *cpuc = &per_cpu(cpu_hw_events, cpu); |
1266 | struct intel_percore *pc = cpuc->per_core; | |
1267 | ||
1268 | if (pc) { | |
1269 | if (pc->core_id == -1 || --pc->refcnt == 0) | |
1270 | kfree(pc); | |
1271 | cpuc->per_core = NULL; | |
1272 | } | |
1273 | ||
74846d35 PZ |
1274 | fini_debug_store_on_cpu(cpu); |
1275 | } | |
1276 | ||
caaa8be3 | 1277 | static __initconst const struct x86_pmu intel_pmu = { |
f22f54f4 PZ |
1278 | .name = "Intel", |
1279 | .handle_irq = intel_pmu_handle_irq, | |
1280 | .disable_all = intel_pmu_disable_all, | |
1281 | .enable_all = intel_pmu_enable_all, | |
1282 | .enable = intel_pmu_enable_event, | |
1283 | .disable = intel_pmu_disable_event, | |
b4cdc5c2 | 1284 | .hw_config = intel_pmu_hw_config, |
a072738e | 1285 | .schedule_events = x86_schedule_events, |
f22f54f4 PZ |
1286 | .eventsel = MSR_ARCH_PERFMON_EVENTSEL0, |
1287 | .perfctr = MSR_ARCH_PERFMON_PERFCTR0, | |
1288 | .event_map = intel_pmu_event_map, | |
f22f54f4 PZ |
1289 | .max_events = ARRAY_SIZE(intel_perfmon_event_map), |
1290 | .apic = 1, | |
1291 | /* | |
1292 | * Intel PMCs cannot be accessed sanely above 32 bit width, | |
1293 | * so we install an artificial 1<<31 period regardless of | |
1294 | * the generic event period: | |
1295 | */ | |
1296 | .max_period = (1ULL << 31) - 1, | |
3f6da390 | 1297 | .get_event_constraints = intel_get_event_constraints, |
a7e3ed1e | 1298 | .put_event_constraints = intel_put_event_constraints, |
3f6da390 | 1299 | |
a7e3ed1e | 1300 | .cpu_prepare = intel_pmu_cpu_prepare, |
74846d35 PZ |
1301 | .cpu_starting = intel_pmu_cpu_starting, |
1302 | .cpu_dying = intel_pmu_cpu_dying, | |
f22f54f4 PZ |
1303 | }; |
1304 | ||
3c44780b PZ |
1305 | static void intel_clovertown_quirks(void) |
1306 | { | |
1307 | /* | |
1308 | * PEBS is unreliable due to: | |
1309 | * | |
1310 | * AJ67 - PEBS may experience CPL leaks | |
1311 | * AJ68 - PEBS PMI may be delayed by one event | |
1312 | * AJ69 - GLOBAL_STATUS[62] will only be set when DEBUGCTL[12] | |
1313 | * AJ106 - FREEZE_LBRS_ON_PMI doesn't work in combination with PEBS | |
1314 | * | |
1315 | * AJ67 could be worked around by restricting the OS/USR flags. | |
1316 | * AJ69 could be worked around by setting PMU_FREEZE_ON_PMI. | |
1317 | * | |
1318 | * AJ106 could possibly be worked around by not allowing LBR | |
1319 | * usage from PEBS, including the fixup. | |
1320 | * AJ68 could possibly be worked around by always programming | |
ec75a716 | 1321 | * a pebs_event_reset[0] value and coping with the lost events. |
3c44780b PZ |
1322 | * |
1323 | * But taken together it might just make sense to not enable PEBS on | |
1324 | * these chips. | |
1325 | */ | |
1326 | printk(KERN_WARNING "PEBS disabled due to CPU errata.\n"); | |
1327 | x86_pmu.pebs = 0; | |
1328 | x86_pmu.pebs_constraints = NULL; | |
1329 | } | |
1330 | ||
f22f54f4 PZ |
1331 | static __init int intel_pmu_init(void) |
1332 | { | |
1333 | union cpuid10_edx edx; | |
1334 | union cpuid10_eax eax; | |
1335 | unsigned int unused; | |
1336 | unsigned int ebx; | |
1337 | int version; | |
1338 | ||
1339 | if (!cpu_has(&boot_cpu_data, X86_FEATURE_ARCH_PERFMON)) { | |
a072738e CG |
1340 | switch (boot_cpu_data.x86) { |
1341 | case 0x6: | |
1342 | return p6_pmu_init(); | |
1343 | case 0xf: | |
1344 | return p4_pmu_init(); | |
1345 | } | |
f22f54f4 | 1346 | return -ENODEV; |
f22f54f4 PZ |
1347 | } |
1348 | ||
1349 | /* | |
1350 | * Check whether the Architectural PerfMon supports | |
1351 | * Branch Misses Retired hw_event or not. | |
1352 | */ | |
1353 | cpuid(10, &eax.full, &ebx, &unused, &edx.full); | |
1354 | if (eax.split.mask_length <= ARCH_PERFMON_BRANCH_MISSES_RETIRED) | |
1355 | return -ENODEV; | |
1356 | ||
1357 | version = eax.split.version_id; | |
1358 | if (version < 2) | |
1359 | x86_pmu = core_pmu; | |
1360 | else | |
1361 | x86_pmu = intel_pmu; | |
1362 | ||
1363 | x86_pmu.version = version; | |
948b1bb8 RR |
1364 | x86_pmu.num_counters = eax.split.num_counters; |
1365 | x86_pmu.cntval_bits = eax.split.bit_width; | |
1366 | x86_pmu.cntval_mask = (1ULL << eax.split.bit_width) - 1; | |
f22f54f4 PZ |
1367 | |
1368 | /* | |
1369 | * Quirk: v2 perfmon does not report fixed-purpose events, so | |
1370 | * assume at least 3 events: | |
1371 | */ | |
1372 | if (version > 1) | |
948b1bb8 | 1373 | x86_pmu.num_counters_fixed = max((int)edx.split.num_counters_fixed, 3); |
f22f54f4 | 1374 | |
8db909a7 PZ |
1375 | /* |
1376 | * v2 and above have a perf capabilities MSR | |
1377 | */ | |
1378 | if (version > 1) { | |
1379 | u64 capabilities; | |
1380 | ||
1381 | rdmsrl(MSR_IA32_PERF_CAPABILITIES, capabilities); | |
1382 | x86_pmu.intel_cap.capabilities = capabilities; | |
1383 | } | |
1384 | ||
ca037701 PZ |
1385 | intel_ds_init(); |
1386 | ||
f22f54f4 PZ |
1387 | /* |
1388 | * Install the hw-cache-events table: | |
1389 | */ | |
1390 | switch (boot_cpu_data.x86_model) { | |
1391 | case 14: /* 65 nm core solo/duo, "Yonah" */ | |
1392 | pr_cont("Core events, "); | |
1393 | break; | |
1394 | ||
1395 | case 15: /* original 65 nm celeron/pentium/core2/xeon, "Merom"/"Conroe" */ | |
3c44780b | 1396 | x86_pmu.quirks = intel_clovertown_quirks; |
f22f54f4 PZ |
1397 | case 22: /* single-core 65 nm celeron/core2solo "Merom-L"/"Conroe-L" */ |
1398 | case 23: /* current 45 nm celeron/core2/xeon "Penryn"/"Wolfdale" */ | |
1399 | case 29: /* six-core 45 nm xeon "Dunnington" */ | |
1400 | memcpy(hw_cache_event_ids, core2_hw_cache_event_ids, | |
1401 | sizeof(hw_cache_event_ids)); | |
1402 | ||
caff2bef PZ |
1403 | intel_pmu_lbr_init_core(); |
1404 | ||
f22f54f4 | 1405 | x86_pmu.event_constraints = intel_core2_event_constraints; |
17e31629 | 1406 | x86_pmu.pebs_constraints = intel_core2_pebs_event_constraints; |
f22f54f4 PZ |
1407 | pr_cont("Core2 events, "); |
1408 | break; | |
1409 | ||
1410 | case 26: /* 45 nm nehalem, "Bloomfield" */ | |
1411 | case 30: /* 45 nm nehalem, "Lynnfield" */ | |
134fbadf | 1412 | case 46: /* 45 nm nehalem-ex, "Beckton" */ |
f22f54f4 PZ |
1413 | memcpy(hw_cache_event_ids, nehalem_hw_cache_event_ids, |
1414 | sizeof(hw_cache_event_ids)); | |
e994d7d2 AK |
1415 | memcpy(hw_cache_extra_regs, nehalem_hw_cache_extra_regs, |
1416 | sizeof(hw_cache_extra_regs)); | |
f22f54f4 | 1417 | |
caff2bef PZ |
1418 | intel_pmu_lbr_init_nhm(); |
1419 | ||
f22f54f4 | 1420 | x86_pmu.event_constraints = intel_nehalem_event_constraints; |
17e31629 | 1421 | x86_pmu.pebs_constraints = intel_nehalem_pebs_event_constraints; |
a7e3ed1e | 1422 | x86_pmu.percore_constraints = intel_nehalem_percore_constraints; |
11164cd4 | 1423 | x86_pmu.enable_all = intel_pmu_nhm_enable_all; |
a7e3ed1e | 1424 | x86_pmu.extra_regs = intel_nehalem_extra_regs; |
ec75a716 IM |
1425 | |
1426 | if (ebx & 0x40) { | |
1427 | /* | |
1428 | * Erratum AAJ80 detected, we work it around by using | |
1429 | * the BR_MISP_EXEC.ANY event. This will over-count | |
1430 | * branch-misses, but it's still much better than the | |
1431 | * architectural event which is often completely bogus: | |
1432 | */ | |
1433 | intel_perfmon_event_map[PERF_COUNT_HW_BRANCH_MISSES] = 0x7f89; | |
1434 | ||
1435 | pr_cont("erratum AAJ80 worked around, "); | |
1436 | } | |
11164cd4 | 1437 | pr_cont("Nehalem events, "); |
f22f54f4 | 1438 | break; |
caff2bef | 1439 | |
b622d644 | 1440 | case 28: /* Atom */ |
f22f54f4 PZ |
1441 | memcpy(hw_cache_event_ids, atom_hw_cache_event_ids, |
1442 | sizeof(hw_cache_event_ids)); | |
1443 | ||
caff2bef PZ |
1444 | intel_pmu_lbr_init_atom(); |
1445 | ||
f22f54f4 | 1446 | x86_pmu.event_constraints = intel_gen_event_constraints; |
17e31629 | 1447 | x86_pmu.pebs_constraints = intel_atom_pebs_event_constraints; |
f22f54f4 PZ |
1448 | pr_cont("Atom events, "); |
1449 | break; | |
1450 | ||
1451 | case 37: /* 32 nm nehalem, "Clarkdale" */ | |
1452 | case 44: /* 32 nm nehalem, "Gulftown" */ | |
b2508e82 | 1453 | case 47: /* 32 nm Xeon E7 */ |
f22f54f4 PZ |
1454 | memcpy(hw_cache_event_ids, westmere_hw_cache_event_ids, |
1455 | sizeof(hw_cache_event_ids)); | |
e994d7d2 AK |
1456 | memcpy(hw_cache_extra_regs, nehalem_hw_cache_extra_regs, |
1457 | sizeof(hw_cache_extra_regs)); | |
f22f54f4 | 1458 | |
caff2bef PZ |
1459 | intel_pmu_lbr_init_nhm(); |
1460 | ||
f22f54f4 | 1461 | x86_pmu.event_constraints = intel_westmere_event_constraints; |
a7e3ed1e | 1462 | x86_pmu.percore_constraints = intel_westmere_percore_constraints; |
40b91cd1 | 1463 | x86_pmu.enable_all = intel_pmu_nhm_enable_all; |
17e31629 | 1464 | x86_pmu.pebs_constraints = intel_westmere_pebs_event_constraints; |
a7e3ed1e | 1465 | x86_pmu.extra_regs = intel_westmere_extra_regs; |
f22f54f4 PZ |
1466 | pr_cont("Westmere events, "); |
1467 | break; | |
b622d644 | 1468 | |
b06b3d49 LM |
1469 | case 42: /* SandyBridge */ |
1470 | memcpy(hw_cache_event_ids, snb_hw_cache_event_ids, | |
1471 | sizeof(hw_cache_event_ids)); | |
1472 | ||
1473 | intel_pmu_lbr_init_nhm(); | |
1474 | ||
1475 | x86_pmu.event_constraints = intel_snb_event_constraints; | |
1476 | x86_pmu.pebs_constraints = intel_snb_pebs_events; | |
1477 | pr_cont("SandyBridge events, "); | |
1478 | break; | |
1479 | ||
f22f54f4 PZ |
1480 | default: |
1481 | /* | |
1482 | * default constraints for v2 and up | |
1483 | */ | |
1484 | x86_pmu.event_constraints = intel_gen_event_constraints; | |
1485 | pr_cont("generic architected perfmon, "); | |
1486 | } | |
1487 | return 0; | |
1488 | } | |
1489 | ||
1490 | #else /* CONFIG_CPU_SUP_INTEL */ | |
1491 | ||
1492 | static int intel_pmu_init(void) | |
1493 | { | |
1494 | return 0; | |
1495 | } | |
1496 | ||
1497 | #endif /* CONFIG_CPU_SUP_INTEL */ |