]>
Commit | Line | Data |
---|---|---|
de0428a7 | 1 | #include <linux/perf_event.h> |
1018faa6 | 2 | #include <linux/export.h> |
de0428a7 KW |
3 | #include <linux/types.h> |
4 | #include <linux/init.h> | |
5 | #include <linux/slab.h> | |
914123fa | 6 | #include <linux/delay.h> |
d6eed550 | 7 | #include <asm/apicdef.h> |
de0428a7 | 8 | |
27f6d22b | 9 | #include "../perf_event.h" |
f22f54f4 | 10 | |
caaa8be3 | 11 | static __initconst const u64 amd_hw_cache_event_ids |
f22f54f4 PZ |
12 | [PERF_COUNT_HW_CACHE_MAX] |
13 | [PERF_COUNT_HW_CACHE_OP_MAX] | |
14 | [PERF_COUNT_HW_CACHE_RESULT_MAX] = | |
15 | { | |
16 | [ C(L1D) ] = { | |
17 | [ C(OP_READ) ] = { | |
18 | [ C(RESULT_ACCESS) ] = 0x0040, /* Data Cache Accesses */ | |
83112e68 | 19 | [ C(RESULT_MISS) ] = 0x0141, /* Data Cache Misses */ |
f22f54f4 PZ |
20 | }, |
21 | [ C(OP_WRITE) ] = { | |
9cc2617d | 22 | [ C(RESULT_ACCESS) ] = 0, |
f22f54f4 PZ |
23 | [ C(RESULT_MISS) ] = 0, |
24 | }, | |
25 | [ C(OP_PREFETCH) ] = { | |
26 | [ C(RESULT_ACCESS) ] = 0x0267, /* Data Prefetcher :attempts */ | |
27 | [ C(RESULT_MISS) ] = 0x0167, /* Data Prefetcher :cancelled */ | |
28 | }, | |
29 | }, | |
30 | [ C(L1I ) ] = { | |
31 | [ C(OP_READ) ] = { | |
32 | [ C(RESULT_ACCESS) ] = 0x0080, /* Instruction cache fetches */ | |
33 | [ C(RESULT_MISS) ] = 0x0081, /* Instruction cache misses */ | |
34 | }, | |
35 | [ C(OP_WRITE) ] = { | |
36 | [ C(RESULT_ACCESS) ] = -1, | |
37 | [ C(RESULT_MISS) ] = -1, | |
38 | }, | |
39 | [ C(OP_PREFETCH) ] = { | |
40 | [ C(RESULT_ACCESS) ] = 0x014B, /* Prefetch Instructions :Load */ | |
41 | [ C(RESULT_MISS) ] = 0, | |
42 | }, | |
43 | }, | |
44 | [ C(LL ) ] = { | |
45 | [ C(OP_READ) ] = { | |
46 | [ C(RESULT_ACCESS) ] = 0x037D, /* Requests to L2 Cache :IC+DC */ | |
47 | [ C(RESULT_MISS) ] = 0x037E, /* L2 Cache Misses : IC+DC */ | |
48 | }, | |
49 | [ C(OP_WRITE) ] = { | |
50 | [ C(RESULT_ACCESS) ] = 0x017F, /* L2 Fill/Writeback */ | |
51 | [ C(RESULT_MISS) ] = 0, | |
52 | }, | |
53 | [ C(OP_PREFETCH) ] = { | |
54 | [ C(RESULT_ACCESS) ] = 0, | |
55 | [ C(RESULT_MISS) ] = 0, | |
56 | }, | |
57 | }, | |
58 | [ C(DTLB) ] = { | |
59 | [ C(OP_READ) ] = { | |
60 | [ C(RESULT_ACCESS) ] = 0x0040, /* Data Cache Accesses */ | |
ba0cef3d | 61 | [ C(RESULT_MISS) ] = 0x0746, /* L1_DTLB_AND_L2_DLTB_MISS.ALL */ |
f22f54f4 PZ |
62 | }, |
63 | [ C(OP_WRITE) ] = { | |
64 | [ C(RESULT_ACCESS) ] = 0, | |
65 | [ C(RESULT_MISS) ] = 0, | |
66 | }, | |
67 | [ C(OP_PREFETCH) ] = { | |
68 | [ C(RESULT_ACCESS) ] = 0, | |
69 | [ C(RESULT_MISS) ] = 0, | |
70 | }, | |
71 | }, | |
72 | [ C(ITLB) ] = { | |
73 | [ C(OP_READ) ] = { | |
74 | [ C(RESULT_ACCESS) ] = 0x0080, /* Instruction fecthes */ | |
ba0cef3d | 75 | [ C(RESULT_MISS) ] = 0x0385, /* L1_ITLB_AND_L2_ITLB_MISS.ALL */ |
f22f54f4 PZ |
76 | }, |
77 | [ C(OP_WRITE) ] = { | |
78 | [ C(RESULT_ACCESS) ] = -1, | |
79 | [ C(RESULT_MISS) ] = -1, | |
80 | }, | |
81 | [ C(OP_PREFETCH) ] = { | |
82 | [ C(RESULT_ACCESS) ] = -1, | |
83 | [ C(RESULT_MISS) ] = -1, | |
84 | }, | |
85 | }, | |
86 | [ C(BPU ) ] = { | |
87 | [ C(OP_READ) ] = { | |
88 | [ C(RESULT_ACCESS) ] = 0x00c2, /* Retired Branch Instr. */ | |
89 | [ C(RESULT_MISS) ] = 0x00c3, /* Retired Mispredicted BI */ | |
90 | }, | |
91 | [ C(OP_WRITE) ] = { | |
92 | [ C(RESULT_ACCESS) ] = -1, | |
93 | [ C(RESULT_MISS) ] = -1, | |
94 | }, | |
95 | [ C(OP_PREFETCH) ] = { | |
96 | [ C(RESULT_ACCESS) ] = -1, | |
97 | [ C(RESULT_MISS) ] = -1, | |
98 | }, | |
99 | }, | |
89d6c0b5 PZ |
100 | [ C(NODE) ] = { |
101 | [ C(OP_READ) ] = { | |
102 | [ C(RESULT_ACCESS) ] = 0xb8e9, /* CPU Request to Memory, l+r */ | |
103 | [ C(RESULT_MISS) ] = 0x98e9, /* CPU Request to Memory, r */ | |
104 | }, | |
105 | [ C(OP_WRITE) ] = { | |
106 | [ C(RESULT_ACCESS) ] = -1, | |
107 | [ C(RESULT_MISS) ] = -1, | |
108 | }, | |
109 | [ C(OP_PREFETCH) ] = { | |
110 | [ C(RESULT_ACCESS) ] = -1, | |
111 | [ C(RESULT_MISS) ] = -1, | |
112 | }, | |
113 | }, | |
f22f54f4 PZ |
114 | }; |
115 | ||
116 | /* | |
117 | * AMD Performance Monitor K7 and later. | |
118 | */ | |
0a25556f | 119 | static const u64 amd_perfmon_event_map[PERF_COUNT_HW_MAX] = |
f22f54f4 | 120 | { |
91fc4cc0 IM |
121 | [PERF_COUNT_HW_CPU_CYCLES] = 0x0076, |
122 | [PERF_COUNT_HW_INSTRUCTIONS] = 0x00c0, | |
080fe0b7 MF |
123 | [PERF_COUNT_HW_CACHE_REFERENCES] = 0x077d, |
124 | [PERF_COUNT_HW_CACHE_MISSES] = 0x077e, | |
91fc4cc0 IM |
125 | [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = 0x00c2, |
126 | [PERF_COUNT_HW_BRANCH_MISSES] = 0x00c3, | |
127 | [PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] = 0x00d0, /* "Decoder empty" event */ | |
128 | [PERF_COUNT_HW_STALLED_CYCLES_BACKEND] = 0x00d1, /* "Dispatch stalls" event */ | |
f22f54f4 PZ |
129 | }; |
130 | ||
131 | static u64 amd_pmu_event_map(int hw_event) | |
132 | { | |
133 | return amd_perfmon_event_map[hw_event]; | |
134 | } | |
135 | ||
4c1fd17a JS |
136 | /* |
137 | * Previously calculated offsets | |
138 | */ | |
139 | static unsigned int event_offsets[X86_PMC_IDX_MAX] __read_mostly; | |
140 | static unsigned int count_offsets[X86_PMC_IDX_MAX] __read_mostly; | |
141 | ||
142 | /* | |
143 | * Legacy CPUs: | |
144 | * 4 counters starting at 0xc0010000 each offset by 1 | |
145 | * | |
146 | * CPUs with core performance counter extensions: | |
147 | * 6 counters starting at 0xc0010200 each offset by 2 | |
148 | */ | |
149 | static inline int amd_pmu_addr_offset(int index, bool eventsel) | |
150 | { | |
0cf5f432 | 151 | int offset; |
4c1fd17a JS |
152 | |
153 | if (!index) | |
154 | return index; | |
155 | ||
156 | if (eventsel) | |
157 | offset = event_offsets[index]; | |
158 | else | |
159 | offset = count_offsets[index]; | |
160 | ||
161 | if (offset) | |
162 | return offset; | |
163 | ||
362f924b | 164 | if (!boot_cpu_has(X86_FEATURE_PERFCTR_CORE)) |
4c1fd17a JS |
165 | offset = index; |
166 | else | |
167 | offset = index << 1; | |
168 | ||
169 | if (eventsel) | |
170 | event_offsets[index] = offset; | |
171 | else | |
172 | count_offsets[index] = offset; | |
173 | ||
174 | return offset; | |
175 | } | |
176 | ||
e259514e JS |
177 | static int amd_core_hw_config(struct perf_event *event) |
178 | { | |
011af857 JR |
179 | if (event->attr.exclude_host && event->attr.exclude_guest) |
180 | /* | |
181 | * When HO == GO == 1 the hardware treats that as GO == HO == 0 | |
182 | * and will count in both modes. We don't want to count in that | |
183 | * case so we emulate no-counting by setting US = OS = 0. | |
184 | */ | |
185 | event->hw.config &= ~(ARCH_PERFMON_EVENTSEL_USR | | |
186 | ARCH_PERFMON_EVENTSEL_OS); | |
187 | else if (event->attr.exclude_host) | |
9f19010a | 188 | event->hw.config |= AMD64_EVENTSEL_GUESTONLY; |
011af857 | 189 | else if (event->attr.exclude_guest) |
9f19010a | 190 | event->hw.config |= AMD64_EVENTSEL_HOSTONLY; |
011af857 | 191 | |
e259514e JS |
192 | return 0; |
193 | } | |
b4cdc5c2 | 194 | |
f22f54f4 PZ |
195 | /* |
196 | * AMD64 events are detected based on their event codes. | |
197 | */ | |
4979d272 RR |
198 | static inline unsigned int amd_get_event_code(struct hw_perf_event *hwc) |
199 | { | |
200 | return ((hwc->config >> 24) & 0x0f00) | (hwc->config & 0x00ff); | |
201 | } | |
202 | ||
f22f54f4 PZ |
203 | static inline int amd_is_nb_event(struct hw_perf_event *hwc) |
204 | { | |
205 | return (hwc->config & 0xe0) == 0xe0; | |
206 | } | |
207 | ||
b38b24ea PZ |
208 | static inline int amd_has_nb(struct cpu_hw_events *cpuc) |
209 | { | |
210 | struct amd_nb *nb = cpuc->amd_nb; | |
211 | ||
212 | return nb && nb->nb_id != -1; | |
213 | } | |
214 | ||
e259514e JS |
215 | static int amd_pmu_hw_config(struct perf_event *event) |
216 | { | |
217 | int ret; | |
218 | ||
219 | /* pass precise event sampling to ibs: */ | |
220 | if (event->attr.precise_ip && get_ibs_caps()) | |
221 | return -ENOENT; | |
222 | ||
223 | if (has_branch_stack(event)) | |
224 | return -EOPNOTSUPP; | |
225 | ||
226 | ret = x86_pmu_hw_config(event); | |
227 | if (ret) | |
228 | return ret; | |
229 | ||
230 | if (event->attr.type == PERF_TYPE_RAW) | |
231 | event->hw.config |= event->attr.config & AMD64_RAW_EVENT_MASK; | |
232 | ||
e259514e JS |
233 | return amd_core_hw_config(event); |
234 | } | |
235 | ||
4dd4c2ae RR |
236 | static void __amd_put_nb_event_constraints(struct cpu_hw_events *cpuc, |
237 | struct perf_event *event) | |
f22f54f4 | 238 | { |
f22f54f4 PZ |
239 | struct amd_nb *nb = cpuc->amd_nb; |
240 | int i; | |
241 | ||
f22f54f4 PZ |
242 | /* |
243 | * need to scan whole list because event may not have | |
244 | * been assigned during scheduling | |
245 | * | |
246 | * no race condition possible because event can only | |
247 | * be removed on one CPU at a time AND PMU is disabled | |
248 | * when we come here | |
249 | */ | |
948b1bb8 | 250 | for (i = 0; i < x86_pmu.num_counters; i++) { |
5f09fc68 | 251 | if (cmpxchg(nb->owners + i, event, NULL) == event) |
f22f54f4 | 252 | break; |
f22f54f4 PZ |
253 | } |
254 | } | |
255 | ||
256 | /* | |
257 | * AMD64 NorthBridge events need special treatment because | |
258 | * counter access needs to be synchronized across all cores | |
259 | * of a package. Refer to BKDG section 3.12 | |
260 | * | |
261 | * NB events are events measuring L3 cache, Hypertransport | |
262 | * traffic. They are identified by an event code >= 0xe00. | |
263 | * They measure events on the NorthBride which is shared | |
264 | * by all cores on a package. NB events are counted on a | |
265 | * shared set of counters. When a NB event is programmed | |
266 | * in a counter, the data actually comes from a shared | |
267 | * counter. Thus, access to those counters needs to be | |
268 | * synchronized. | |
269 | * | |
270 | * We implement the synchronization such that no two cores | |
271 | * can be measuring NB events using the same counters. Thus, | |
272 | * we maintain a per-NB allocation table. The available slot | |
273 | * is propagated using the event_constraint structure. | |
274 | * | |
275 | * We provide only one choice for each NB event based on | |
276 | * the fact that only NB events have restrictions. Consequently, | |
277 | * if a counter is available, there is a guarantee the NB event | |
278 | * will be assigned to it. If no slot is available, an empty | |
279 | * constraint is returned and scheduling will eventually fail | |
280 | * for this event. | |
281 | * | |
282 | * Note that all cores attached the same NB compete for the same | |
283 | * counters to host NB events, this is why we use atomic ops. Some | |
284 | * multi-chip CPUs may have more than one NB. | |
285 | * | |
286 | * Given that resources are allocated (cmpxchg), they must be | |
287 | * eventually freed for others to use. This is accomplished by | |
4dd4c2ae | 288 | * calling __amd_put_nb_event_constraints() |
f22f54f4 PZ |
289 | * |
290 | * Non NB events are not impacted by this restriction. | |
291 | */ | |
292 | static struct event_constraint * | |
4dd4c2ae RR |
293 | __amd_get_nb_event_constraints(struct cpu_hw_events *cpuc, struct perf_event *event, |
294 | struct event_constraint *c) | |
f22f54f4 PZ |
295 | { |
296 | struct hw_perf_event *hwc = &event->hw; | |
297 | struct amd_nb *nb = cpuc->amd_nb; | |
2c53c3dd RR |
298 | struct perf_event *old; |
299 | int idx, new = -1; | |
f22f54f4 | 300 | |
e259514e JS |
301 | if (!c) |
302 | c = &unconstrained; | |
303 | ||
304 | if (cpuc->is_fake) | |
305 | return c; | |
306 | ||
f22f54f4 PZ |
307 | /* |
308 | * detect if already present, if so reuse | |
309 | * | |
310 | * cannot merge with actual allocation | |
311 | * because of possible holes | |
312 | * | |
313 | * event can already be present yet not assigned (in hwc->idx) | |
314 | * because of successive calls to x86_schedule_events() from | |
315 | * hw_perf_group_sched_in() without hw_perf_enable() | |
316 | */ | |
4dd4c2ae | 317 | for_each_set_bit(idx, c->idxmsk, x86_pmu.num_counters) { |
2c53c3dd RR |
318 | if (new == -1 || hwc->idx == idx) |
319 | /* assign free slot, prefer hwc->idx */ | |
320 | old = cmpxchg(nb->owners + idx, NULL, event); | |
321 | else if (nb->owners[idx] == event) | |
322 | /* event already present */ | |
323 | old = event; | |
324 | else | |
325 | continue; | |
326 | ||
327 | if (old && old != event) | |
328 | continue; | |
329 | ||
330 | /* reassign to this slot */ | |
331 | if (new != -1) | |
332 | cmpxchg(nb->owners + new, event, NULL); | |
333 | new = idx; | |
f22f54f4 PZ |
334 | |
335 | /* already present, reuse */ | |
2c53c3dd | 336 | if (old == event) |
f22f54f4 | 337 | break; |
2c53c3dd RR |
338 | } |
339 | ||
340 | if (new == -1) | |
341 | return &emptyconstraint; | |
342 | ||
343 | return &nb->event_constraints[new]; | |
f22f54f4 PZ |
344 | } |
345 | ||
c079c791 | 346 | static struct amd_nb *amd_alloc_nb(int cpu) |
f22f54f4 PZ |
347 | { |
348 | struct amd_nb *nb; | |
349 | int i; | |
350 | ||
7bfb7e6b | 351 | nb = kzalloc_node(sizeof(struct amd_nb), GFP_KERNEL, cpu_to_node(cpu)); |
f22f54f4 PZ |
352 | if (!nb) |
353 | return NULL; | |
354 | ||
c079c791 | 355 | nb->nb_id = -1; |
f22f54f4 PZ |
356 | |
357 | /* | |
358 | * initialize all possible NB constraints | |
359 | */ | |
948b1bb8 | 360 | for (i = 0; i < x86_pmu.num_counters; i++) { |
34538ee7 | 361 | __set_bit(i, nb->event_constraints[i].idxmsk); |
f22f54f4 PZ |
362 | nb->event_constraints[i].weight = 1; |
363 | } | |
364 | return nb; | |
365 | } | |
366 | ||
b38b24ea PZ |
367 | static int amd_pmu_cpu_prepare(int cpu) |
368 | { | |
369 | struct cpu_hw_events *cpuc = &per_cpu(cpu_hw_events, cpu); | |
370 | ||
371 | WARN_ON_ONCE(cpuc->amd_nb); | |
372 | ||
32b62f44 | 373 | if (!x86_pmu.amd_nb_constraints) |
95ca792c | 374 | return 0; |
b38b24ea | 375 | |
c079c791 | 376 | cpuc->amd_nb = amd_alloc_nb(cpu); |
b38b24ea | 377 | if (!cpuc->amd_nb) |
95ca792c | 378 | return -ENOMEM; |
b38b24ea | 379 | |
95ca792c | 380 | return 0; |
b38b24ea PZ |
381 | } |
382 | ||
383 | static void amd_pmu_cpu_starting(int cpu) | |
f22f54f4 | 384 | { |
b38b24ea | 385 | struct cpu_hw_events *cpuc = &per_cpu(cpu_hw_events, cpu); |
90413464 | 386 | void **onln = &cpuc->kfree_on_online[X86_PERF_KFREE_SHARED]; |
b38b24ea | 387 | struct amd_nb *nb; |
f22f54f4 PZ |
388 | int i, nb_id; |
389 | ||
9f19010a | 390 | cpuc->perf_ctr_virt_mask = AMD64_EVENTSEL_HOSTONLY; |
1018faa6 | 391 | |
32b62f44 | 392 | if (!x86_pmu.amd_nb_constraints) |
f22f54f4 PZ |
393 | return; |
394 | ||
f22f54f4 | 395 | nb_id = amd_get_nb_id(cpu); |
b38b24ea | 396 | WARN_ON_ONCE(nb_id == BAD_APICID); |
f22f54f4 | 397 | |
f22f54f4 | 398 | for_each_online_cpu(i) { |
b38b24ea PZ |
399 | nb = per_cpu(cpu_hw_events, i).amd_nb; |
400 | if (WARN_ON_ONCE(!nb)) | |
f22f54f4 | 401 | continue; |
f22f54f4 | 402 | |
b38b24ea | 403 | if (nb->nb_id == nb_id) { |
90413464 | 404 | *onln = cpuc->amd_nb; |
b38b24ea PZ |
405 | cpuc->amd_nb = nb; |
406 | break; | |
407 | } | |
f22f54f4 | 408 | } |
b38b24ea PZ |
409 | |
410 | cpuc->amd_nb->nb_id = nb_id; | |
411 | cpuc->amd_nb->refcnt++; | |
f22f54f4 PZ |
412 | } |
413 | ||
b38b24ea | 414 | static void amd_pmu_cpu_dead(int cpu) |
f22f54f4 PZ |
415 | { |
416 | struct cpu_hw_events *cpuhw; | |
417 | ||
32b62f44 | 418 | if (!x86_pmu.amd_nb_constraints) |
f22f54f4 PZ |
419 | return; |
420 | ||
421 | cpuhw = &per_cpu(cpu_hw_events, cpu); | |
422 | ||
a90110c6 | 423 | if (cpuhw->amd_nb) { |
b38b24ea PZ |
424 | struct amd_nb *nb = cpuhw->amd_nb; |
425 | ||
426 | if (nb->nb_id == -1 || --nb->refcnt == 0) | |
427 | kfree(nb); | |
f22f54f4 | 428 | |
a90110c6 RW |
429 | cpuhw->amd_nb = NULL; |
430 | } | |
f22f54f4 PZ |
431 | } |
432 | ||
914123fa LT |
433 | /* |
434 | * When a PMC counter overflows, an NMI is used to process the event and | |
435 | * reset the counter. NMI latency can result in the counter being updated | |
436 | * before the NMI can run, which can result in what appear to be spurious | |
437 | * NMIs. This function is intended to wait for the NMI to run and reset | |
438 | * the counter to avoid possible unhandled NMI messages. | |
439 | */ | |
440 | #define OVERFLOW_WAIT_COUNT 50 | |
441 | ||
442 | static void amd_pmu_wait_on_overflow(int idx) | |
443 | { | |
444 | unsigned int i; | |
445 | u64 counter; | |
446 | ||
447 | /* | |
448 | * Wait for the counter to be reset if it has overflowed. This loop | |
449 | * should exit very, very quickly, but just in case, don't wait | |
450 | * forever... | |
451 | */ | |
452 | for (i = 0; i < OVERFLOW_WAIT_COUNT; i++) { | |
453 | rdmsrl(x86_pmu_event_addr(idx), counter); | |
454 | if (counter & (1ULL << (x86_pmu.cntval_bits - 1))) | |
455 | break; | |
456 | ||
457 | /* Might be in IRQ context, so can't sleep */ | |
458 | udelay(1); | |
459 | } | |
460 | } | |
461 | ||
462 | static void amd_pmu_disable_all(void) | |
463 | { | |
464 | struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); | |
465 | int idx; | |
466 | ||
467 | x86_pmu_disable_all(); | |
468 | ||
469 | /* | |
470 | * This shouldn't be called from NMI context, but add a safeguard here | |
471 | * to return, since if we're in NMI context we can't wait for an NMI | |
472 | * to reset an overflowed counter value. | |
473 | */ | |
474 | if (in_nmi()) | |
475 | return; | |
476 | ||
477 | /* | |
478 | * Check each counter for overflow and wait for it to be reset by the | |
479 | * NMI if it has overflowed. This relies on the fact that all active | |
480 | * counters are always enabled when this function is caled and | |
481 | * ARCH_PERFMON_EVENTSEL_INT is always set. | |
482 | */ | |
483 | for (idx = 0; idx < x86_pmu.num_counters; idx++) { | |
484 | if (!test_bit(idx, cpuc->active_mask)) | |
485 | continue; | |
486 | ||
487 | amd_pmu_wait_on_overflow(idx); | |
488 | } | |
489 | } | |
490 | ||
4dd4c2ae | 491 | static struct event_constraint * |
79cba822 SE |
492 | amd_get_event_constraints(struct cpu_hw_events *cpuc, int idx, |
493 | struct perf_event *event) | |
4dd4c2ae RR |
494 | { |
495 | /* | |
496 | * if not NB event or no NB, then no constraints | |
497 | */ | |
498 | if (!(amd_has_nb(cpuc) && amd_is_nb_event(&event->hw))) | |
499 | return &unconstrained; | |
500 | ||
0cf5f432 | 501 | return __amd_get_nb_event_constraints(cpuc, event, NULL); |
4dd4c2ae RR |
502 | } |
503 | ||
504 | static void amd_put_event_constraints(struct cpu_hw_events *cpuc, | |
505 | struct perf_event *event) | |
506 | { | |
507 | if (amd_has_nb(cpuc) && amd_is_nb_event(&event->hw)) | |
508 | __amd_put_nb_event_constraints(cpuc, event); | |
509 | } | |
510 | ||
641cc938 JO |
511 | PMU_FORMAT_ATTR(event, "config:0-7,32-35"); |
512 | PMU_FORMAT_ATTR(umask, "config:8-15" ); | |
513 | PMU_FORMAT_ATTR(edge, "config:18" ); | |
514 | PMU_FORMAT_ATTR(inv, "config:23" ); | |
515 | PMU_FORMAT_ATTR(cmask, "config:24-31" ); | |
516 | ||
517 | static struct attribute *amd_format_attr[] = { | |
518 | &format_attr_event.attr, | |
519 | &format_attr_umask.attr, | |
520 | &format_attr_edge.attr, | |
521 | &format_attr_inv.attr, | |
522 | &format_attr_cmask.attr, | |
523 | NULL, | |
524 | }; | |
525 | ||
4979d272 RR |
526 | /* AMD Family 15h */ |
527 | ||
528 | #define AMD_EVENT_TYPE_MASK 0x000000F0ULL | |
529 | ||
530 | #define AMD_EVENT_FP 0x00000000ULL ... 0x00000010ULL | |
531 | #define AMD_EVENT_LS 0x00000020ULL ... 0x00000030ULL | |
532 | #define AMD_EVENT_DC 0x00000040ULL ... 0x00000050ULL | |
533 | #define AMD_EVENT_CU 0x00000060ULL ... 0x00000070ULL | |
534 | #define AMD_EVENT_IC_DE 0x00000080ULL ... 0x00000090ULL | |
535 | #define AMD_EVENT_EX_LS 0x000000C0ULL | |
536 | #define AMD_EVENT_DE 0x000000D0ULL | |
537 | #define AMD_EVENT_NB 0x000000E0ULL ... 0x000000F0ULL | |
538 | ||
539 | /* | |
540 | * AMD family 15h event code/PMC mappings: | |
541 | * | |
542 | * type = event_code & 0x0F0: | |
543 | * | |
544 | * 0x000 FP PERF_CTL[5:3] | |
545 | * 0x010 FP PERF_CTL[5:3] | |
546 | * 0x020 LS PERF_CTL[5:0] | |
547 | * 0x030 LS PERF_CTL[5:0] | |
548 | * 0x040 DC PERF_CTL[5:0] | |
549 | * 0x050 DC PERF_CTL[5:0] | |
550 | * 0x060 CU PERF_CTL[2:0] | |
551 | * 0x070 CU PERF_CTL[2:0] | |
552 | * 0x080 IC/DE PERF_CTL[2:0] | |
553 | * 0x090 IC/DE PERF_CTL[2:0] | |
554 | * 0x0A0 --- | |
555 | * 0x0B0 --- | |
556 | * 0x0C0 EX/LS PERF_CTL[5:0] | |
557 | * 0x0D0 DE PERF_CTL[2:0] | |
558 | * 0x0E0 NB NB_PERF_CTL[3:0] | |
559 | * 0x0F0 NB NB_PERF_CTL[3:0] | |
560 | * | |
561 | * Exceptions: | |
562 | * | |
855357a2 | 563 | * 0x000 FP PERF_CTL[3], PERF_CTL[5:3] (*) |
4979d272 | 564 | * 0x003 FP PERF_CTL[3] |
855357a2 | 565 | * 0x004 FP PERF_CTL[3], PERF_CTL[5:3] (*) |
4979d272 RR |
566 | * 0x00B FP PERF_CTL[3] |
567 | * 0x00D FP PERF_CTL[3] | |
568 | * 0x023 DE PERF_CTL[2:0] | |
569 | * 0x02D LS PERF_CTL[3] | |
570 | * 0x02E LS PERF_CTL[3,0] | |
5bcdf5e4 | 571 | * 0x031 LS PERF_CTL[2:0] (**) |
4979d272 RR |
572 | * 0x043 CU PERF_CTL[2:0] |
573 | * 0x045 CU PERF_CTL[2:0] | |
574 | * 0x046 CU PERF_CTL[2:0] | |
575 | * 0x054 CU PERF_CTL[2:0] | |
576 | * 0x055 CU PERF_CTL[2:0] | |
577 | * 0x08F IC PERF_CTL[0] | |
578 | * 0x187 DE PERF_CTL[0] | |
579 | * 0x188 DE PERF_CTL[0] | |
580 | * 0x0DB EX PERF_CTL[5:0] | |
581 | * 0x0DC LS PERF_CTL[5:0] | |
582 | * 0x0DD LS PERF_CTL[5:0] | |
583 | * 0x0DE LS PERF_CTL[5:0] | |
584 | * 0x0DF LS PERF_CTL[5:0] | |
5bcdf5e4 | 585 | * 0x1C0 EX PERF_CTL[5:3] |
4979d272 RR |
586 | * 0x1D6 EX PERF_CTL[5:0] |
587 | * 0x1D8 EX PERF_CTL[5:0] | |
855357a2 | 588 | * |
5bcdf5e4 RR |
589 | * (*) depending on the umask all FPU counters may be used |
590 | * (**) only one unitmask enabled at a time | |
4979d272 RR |
591 | */ |
592 | ||
593 | static struct event_constraint amd_f15_PMC0 = EVENT_CONSTRAINT(0, 0x01, 0); | |
594 | static struct event_constraint amd_f15_PMC20 = EVENT_CONSTRAINT(0, 0x07, 0); | |
595 | static struct event_constraint amd_f15_PMC3 = EVENT_CONSTRAINT(0, 0x08, 0); | |
bc1738f6 | 596 | static struct event_constraint amd_f15_PMC30 = EVENT_CONSTRAINT_OVERLAP(0, 0x09, 0); |
4979d272 RR |
597 | static struct event_constraint amd_f15_PMC50 = EVENT_CONSTRAINT(0, 0x3F, 0); |
598 | static struct event_constraint amd_f15_PMC53 = EVENT_CONSTRAINT(0, 0x38, 0); | |
599 | ||
600 | static struct event_constraint * | |
79cba822 SE |
601 | amd_get_event_constraints_f15h(struct cpu_hw_events *cpuc, int idx, |
602 | struct perf_event *event) | |
4979d272 | 603 | { |
855357a2 RR |
604 | struct hw_perf_event *hwc = &event->hw; |
605 | unsigned int event_code = amd_get_event_code(hwc); | |
4979d272 RR |
606 | |
607 | switch (event_code & AMD_EVENT_TYPE_MASK) { | |
608 | case AMD_EVENT_FP: | |
609 | switch (event_code) { | |
855357a2 RR |
610 | case 0x000: |
611 | if (!(hwc->config & 0x0000F000ULL)) | |
612 | break; | |
613 | if (!(hwc->config & 0x00000F00ULL)) | |
614 | break; | |
615 | return &amd_f15_PMC3; | |
616 | case 0x004: | |
617 | if (hweight_long(hwc->config & ARCH_PERFMON_EVENTSEL_UMASK) <= 1) | |
618 | break; | |
619 | return &amd_f15_PMC3; | |
4979d272 RR |
620 | case 0x003: |
621 | case 0x00B: | |
622 | case 0x00D: | |
623 | return &amd_f15_PMC3; | |
4979d272 | 624 | } |
855357a2 | 625 | return &amd_f15_PMC53; |
4979d272 RR |
626 | case AMD_EVENT_LS: |
627 | case AMD_EVENT_DC: | |
628 | case AMD_EVENT_EX_LS: | |
629 | switch (event_code) { | |
630 | case 0x023: | |
631 | case 0x043: | |
632 | case 0x045: | |
633 | case 0x046: | |
634 | case 0x054: | |
635 | case 0x055: | |
636 | return &amd_f15_PMC20; | |
637 | case 0x02D: | |
638 | return &amd_f15_PMC3; | |
639 | case 0x02E: | |
640 | return &amd_f15_PMC30; | |
5bcdf5e4 RR |
641 | case 0x031: |
642 | if (hweight_long(hwc->config & ARCH_PERFMON_EVENTSEL_UMASK) <= 1) | |
643 | return &amd_f15_PMC20; | |
644 | return &emptyconstraint; | |
645 | case 0x1C0: | |
646 | return &amd_f15_PMC53; | |
4979d272 RR |
647 | default: |
648 | return &amd_f15_PMC50; | |
649 | } | |
650 | case AMD_EVENT_CU: | |
651 | case AMD_EVENT_IC_DE: | |
652 | case AMD_EVENT_DE: | |
653 | switch (event_code) { | |
654 | case 0x08F: | |
655 | case 0x187: | |
656 | case 0x188: | |
657 | return &amd_f15_PMC0; | |
658 | case 0x0DB ... 0x0DF: | |
659 | case 0x1D6: | |
660 | case 0x1D8: | |
661 | return &amd_f15_PMC50; | |
662 | default: | |
663 | return &amd_f15_PMC20; | |
664 | } | |
665 | case AMD_EVENT_NB: | |
940b2f2f | 666 | /* moved to uncore.c */ |
0cf5f432 | 667 | return &emptyconstraint; |
4979d272 RR |
668 | default: |
669 | return &emptyconstraint; | |
670 | } | |
671 | } | |
672 | ||
0bf79d44 JO |
673 | static ssize_t amd_event_sysfs_show(char *page, u64 config) |
674 | { | |
675 | u64 event = (config & ARCH_PERFMON_EVENTSEL_EVENT) | | |
676 | (config & AMD64_EVENTSEL_EVENT) >> 24; | |
677 | ||
678 | return x86_event_sysfs_show(page, config, event); | |
679 | } | |
680 | ||
b1dc3c48 RR |
681 | static __initconst const struct x86_pmu amd_pmu = { |
682 | .name = "AMD", | |
4979d272 | 683 | .handle_irq = x86_pmu_handle_irq, |
914123fa | 684 | .disable_all = amd_pmu_disable_all, |
4979d272 RR |
685 | .enable_all = x86_pmu_enable_all, |
686 | .enable = x86_pmu_enable_event, | |
687 | .disable = x86_pmu_disable_event, | |
688 | .hw_config = amd_pmu_hw_config, | |
689 | .schedule_events = x86_schedule_events, | |
b1dc3c48 RR |
690 | .eventsel = MSR_K7_EVNTSEL0, |
691 | .perfctr = MSR_K7_PERFCTR0, | |
4c1fd17a | 692 | .addr_offset = amd_pmu_addr_offset, |
4979d272 RR |
693 | .event_map = amd_pmu_event_map, |
694 | .max_events = ARRAY_SIZE(amd_perfmon_event_map), | |
b1dc3c48 | 695 | .num_counters = AMD64_NUM_COUNTERS, |
4979d272 RR |
696 | .cntval_bits = 48, |
697 | .cntval_mask = (1ULL << 48) - 1, | |
698 | .apic = 1, | |
699 | /* use highest bit to detect overflow */ | |
700 | .max_period = (1ULL << 47) - 1, | |
b1dc3c48 | 701 | .get_event_constraints = amd_get_event_constraints, |
4979d272 RR |
702 | .put_event_constraints = amd_put_event_constraints, |
703 | ||
b1dc3c48 | 704 | .format_attrs = amd_format_attr, |
0bf79d44 | 705 | .events_sysfs_show = amd_event_sysfs_show, |
b1dc3c48 | 706 | |
4979d272 | 707 | .cpu_prepare = amd_pmu_cpu_prepare, |
1018faa6 | 708 | .cpu_starting = amd_pmu_cpu_starting, |
b1dc3c48 | 709 | .cpu_dead = amd_pmu_cpu_dead, |
32b62f44 PZ |
710 | |
711 | .amd_nb_constraints = 1, | |
4979d272 RR |
712 | }; |
713 | ||
1b45adcd | 714 | static int __init amd_core_pmu_init(void) |
b1dc3c48 | 715 | { |
362f924b | 716 | if (!boot_cpu_has(X86_FEATURE_PERFCTR_CORE)) |
1b45adcd PZ |
717 | return 0; |
718 | ||
719 | switch (boot_cpu_data.x86) { | |
720 | case 0x15: | |
721 | pr_cont("Fam15h "); | |
b1dc3c48 | 722 | x86_pmu.get_event_constraints = amd_get_event_constraints_f15h; |
1b45adcd | 723 | break; |
e40ed154 JN |
724 | case 0x17: |
725 | pr_cont("Fam17h "); | |
726 | /* | |
727 | * In family 17h, there are no event constraints in the PMC hardware. | |
728 | * We fallback to using default amd_get_event_constraints. | |
729 | */ | |
730 | break; | |
6d0ef316 PW |
731 | case 0x18: |
732 | pr_cont("Fam18h "); | |
733 | /* Using default amd_get_event_constraints. */ | |
734 | break; | |
1b45adcd PZ |
735 | default: |
736 | pr_err("core perfctr but no constraints; unknown hardware!\n"); | |
b1dc3c48 RR |
737 | return -ENODEV; |
738 | } | |
739 | ||
b1dc3c48 RR |
740 | /* |
741 | * If core performance counter extensions exists, we must use | |
742 | * MSR_F15H_PERF_CTL/MSR_F15H_PERF_CTR msrs. See also | |
1b45adcd | 743 | * amd_pmu_addr_offset(). |
b1dc3c48 RR |
744 | */ |
745 | x86_pmu.eventsel = MSR_F15H_PERF_CTL; | |
746 | x86_pmu.perfctr = MSR_F15H_PERF_CTR; | |
747 | x86_pmu.num_counters = AMD64_NUM_COUNTERS_CORE; | |
32b62f44 PZ |
748 | /* |
749 | * AMD Core perfctr has separate MSRs for the NB events, see | |
750 | * the amd/uncore.c driver. | |
751 | */ | |
752 | x86_pmu.amd_nb_constraints = 0; | |
b1dc3c48 | 753 | |
1b45adcd | 754 | pr_cont("core perfctr, "); |
b1dc3c48 RR |
755 | return 0; |
756 | } | |
757 | ||
de0428a7 | 758 | __init int amd_pmu_init(void) |
f22f54f4 | 759 | { |
1b45adcd PZ |
760 | int ret; |
761 | ||
f22f54f4 PZ |
762 | /* Performance-monitoring supported from K7 and later: */ |
763 | if (boot_cpu_data.x86 < 6) | |
764 | return -ENODEV; | |
765 | ||
b1dc3c48 RR |
766 | x86_pmu = amd_pmu; |
767 | ||
1b45adcd PZ |
768 | ret = amd_core_pmu_init(); |
769 | if (ret) | |
770 | return ret; | |
f22f54f4 | 771 | |
32b62f44 PZ |
772 | if (num_possible_cpus() == 1) { |
773 | /* | |
774 | * No point in allocating data structures to serialize | |
775 | * against other CPUs, when there is only the one CPU. | |
776 | */ | |
777 | x86_pmu.amd_nb_constraints = 0; | |
778 | } | |
779 | ||
f22f54f4 PZ |
780 | /* Events are common for all AMDs */ |
781 | memcpy(hw_cache_event_ids, amd_hw_cache_event_ids, | |
782 | sizeof(hw_cache_event_ids)); | |
783 | ||
f22f54f4 PZ |
784 | return 0; |
785 | } | |
1018faa6 JR |
786 | |
787 | void amd_pmu_enable_virt(void) | |
788 | { | |
89cbc767 | 789 | struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); |
1018faa6 JR |
790 | |
791 | cpuc->perf_ctr_virt_mask = 0; | |
792 | ||
793 | /* Reload all events */ | |
914123fa | 794 | amd_pmu_disable_all(); |
1018faa6 JR |
795 | x86_pmu_enable_all(0); |
796 | } | |
797 | EXPORT_SYMBOL_GPL(amd_pmu_enable_virt); | |
798 | ||
799 | void amd_pmu_disable_virt(void) | |
800 | { | |
89cbc767 | 801 | struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); |
1018faa6 JR |
802 | |
803 | /* | |
804 | * We only mask out the Host-only bit so that host-only counting works | |
805 | * when SVM is disabled. If someone sets up a guest-only counter when | |
806 | * SVM is disabled the Guest-only bits still gets set and the counter | |
807 | * will not count anything. | |
808 | */ | |
9f19010a | 809 | cpuc->perf_ctr_virt_mask = AMD64_EVENTSEL_HOSTONLY; |
1018faa6 JR |
810 | |
811 | /* Reload all events */ | |
914123fa | 812 | amd_pmu_disable_all(); |
1018faa6 JR |
813 | x86_pmu_enable_all(0); |
814 | } | |
815 | EXPORT_SYMBOL_GPL(amd_pmu_disable_virt); |