]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blame - arch/x86/kernel/cpu/perf_counter.c
perf_counter, x86: Implement generalized cache event types, add Atom support
[mirror_ubuntu-artful-kernel.git] / arch / x86 / kernel / cpu / perf_counter.c
CommitLineData
241771ef
IM
1/*
2 * Performance counter x86 architecture code
3 *
98144511
IM
4 * Copyright (C) 2008 Thomas Gleixner <tglx@linutronix.de>
5 * Copyright (C) 2008-2009 Red Hat, Inc., Ingo Molnar
6 * Copyright (C) 2009 Jaswinder Singh Rajput
7 * Copyright (C) 2009 Advanced Micro Devices, Inc., Robert Richter
8 * Copyright (C) 2008-2009 Red Hat, Inc., Peter Zijlstra <pzijlstr@redhat.com>
241771ef
IM
9 *
10 * For licencing details see kernel-base/COPYING
11 */
12
13#include <linux/perf_counter.h>
14#include <linux/capability.h>
15#include <linux/notifier.h>
16#include <linux/hardirq.h>
17#include <linux/kprobes.h>
4ac13294 18#include <linux/module.h>
241771ef
IM
19#include <linux/kdebug.h>
20#include <linux/sched.h>
d7d59fb3 21#include <linux/uaccess.h>
241771ef 22
241771ef 23#include <asm/apic.h>
d7d59fb3 24#include <asm/stacktrace.h>
4e935e47 25#include <asm/nmi.h>
241771ef 26
862a1a5f 27static u64 perf_counter_mask __read_mostly;
703e937c 28
241771ef 29struct cpu_hw_counters {
862a1a5f 30 struct perf_counter *counters[X86_PMC_IDX_MAX];
43f6201a
RR
31 unsigned long used_mask[BITS_TO_LONGS(X86_PMC_IDX_MAX)];
32 unsigned long active_mask[BITS_TO_LONGS(X86_PMC_IDX_MAX)];
4b39fd96 33 unsigned long interrupts;
b0f3f28e 34 int enabled;
241771ef
IM
35};
36
37/*
5f4ec28f 38 * struct x86_pmu - generic x86 pmu
241771ef 39 */
5f4ec28f 40struct x86_pmu {
faa28ae0
RR
41 const char *name;
42 int version;
a3288106 43 int (*handle_irq)(struct pt_regs *);
9e35ad38
PZ
44 void (*disable_all)(void);
45 void (*enable_all)(void);
7c90cc45 46 void (*enable)(struct hw_perf_counter *, int);
d4369891 47 void (*disable)(struct hw_perf_counter *, int);
169e41eb
JSR
48 unsigned eventsel;
49 unsigned perfctr;
b0f3f28e
PZ
50 u64 (*event_map)(int);
51 u64 (*raw_event)(u64);
169e41eb 52 int max_events;
0933e5c6
RR
53 int num_counters;
54 int num_counters_fixed;
55 int counter_bits;
56 u64 counter_mask;
c619b8ff 57 u64 max_period;
9e35ad38 58 u64 intel_ctrl;
b56a3802
JSR
59};
60
4a06bd85 61static struct x86_pmu x86_pmu __read_mostly;
b56a3802 62
b0f3f28e
PZ
63static DEFINE_PER_CPU(struct cpu_hw_counters, cpu_hw_counters) = {
64 .enabled = 1,
65};
241771ef 66
b56a3802
JSR
67/*
68 * Intel PerfMon v3. Used on Core2 and later.
69 */
b0f3f28e 70static const u64 intel_perfmon_event_map[] =
241771ef 71{
f650a672 72 [PERF_COUNT_CPU_CYCLES] = 0x003c,
241771ef
IM
73 [PERF_COUNT_INSTRUCTIONS] = 0x00c0,
74 [PERF_COUNT_CACHE_REFERENCES] = 0x4f2e,
75 [PERF_COUNT_CACHE_MISSES] = 0x412e,
76 [PERF_COUNT_BRANCH_INSTRUCTIONS] = 0x00c4,
77 [PERF_COUNT_BRANCH_MISSES] = 0x00c5,
f650a672 78 [PERF_COUNT_BUS_CYCLES] = 0x013c,
241771ef
IM
79};
80
5f4ec28f 81static u64 intel_pmu_event_map(int event)
b56a3802
JSR
82{
83 return intel_perfmon_event_map[event];
84}
241771ef 85
8326f44d
IM
86/*
87 * Generalized hw caching related event table, filled
88 * in on a per model basis. A value of 0 means
89 * 'not supported', -1 means 'event makes no sense on
90 * this CPU', any other value means the raw event
91 * ID.
92 */
93
94#define C(x) PERF_COUNT_HW_CACHE_##x
95
96static u64 __read_mostly hw_cache_event_ids
97 [PERF_COUNT_HW_CACHE_MAX]
98 [PERF_COUNT_HW_CACHE_OP_MAX]
99 [PERF_COUNT_HW_CACHE_RESULT_MAX];
100
101static const u64 nehalem_hw_cache_event_ids
102 [PERF_COUNT_HW_CACHE_MAX]
103 [PERF_COUNT_HW_CACHE_OP_MAX]
104 [PERF_COUNT_HW_CACHE_RESULT_MAX] =
105{
106 [ C(L1D) ] = {
107 [ C(OP_READ) ] = {
108 [ C(RESULT_ACCESS) ] = 0x0f40, /* L1D_CACHE_LD.MESI */
109 [ C(RESULT_MISS) ] = 0x0140, /* L1D_CACHE_LD.I_STATE */
110 },
111 [ C(OP_WRITE) ] = {
112 [ C(RESULT_ACCESS) ] = 0x0f41, /* L1D_CACHE_ST.MESI */
113 [ C(RESULT_MISS) ] = 0x0141, /* L1D_CACHE_ST.I_STATE */
114 },
115 [ C(OP_PREFETCH) ] = {
116 [ C(RESULT_ACCESS) ] = 0x014e, /* L1D_PREFETCH.REQUESTS */
117 [ C(RESULT_MISS) ] = 0x024e, /* L1D_PREFETCH.MISS */
118 },
119 },
120 [ C(L1I ) ] = {
121 [ C(OP_READ) ] = {
122 [ C(RESULT_ACCESS) ] = 0x0480, /* L1I.READS */
123 [ C(RESULT_MISS) ] = 0x0280, /* L1I.MISSES */
124 },
125 [ C(OP_WRITE) ] = {
126 [ C(RESULT_ACCESS) ] = -1,
127 [ C(RESULT_MISS) ] = -1,
128 },
129 [ C(OP_PREFETCH) ] = {
130 [ C(RESULT_ACCESS) ] = 0x0,
131 [ C(RESULT_MISS) ] = 0x0,
132 },
133 },
134 [ C(L2 ) ] = {
135 [ C(OP_READ) ] = {
136 [ C(RESULT_ACCESS) ] = 0x0324, /* L2_RQSTS.LOADS */
137 [ C(RESULT_MISS) ] = 0x0224, /* L2_RQSTS.LD_MISS */
138 },
139 [ C(OP_WRITE) ] = {
140 [ C(RESULT_ACCESS) ] = 0x0c24, /* L2_RQSTS.RFOS */
141 [ C(RESULT_MISS) ] = 0x0824, /* L2_RQSTS.RFO_MISS */
142 },
143 [ C(OP_PREFETCH) ] = {
144 [ C(RESULT_ACCESS) ] = 0xc024, /* L2_RQSTS.PREFETCHES */
145 [ C(RESULT_MISS) ] = 0x8024, /* L2_RQSTS.PREFETCH_MISS */
146 },
147 },
148 [ C(DTLB) ] = {
149 [ C(OP_READ) ] = {
150 [ C(RESULT_ACCESS) ] = 0x0f40, /* L1D_CACHE_LD.MESI (alias) */
151 [ C(RESULT_MISS) ] = 0x0108, /* DTLB_LOAD_MISSES.ANY */
152 },
153 [ C(OP_WRITE) ] = {
154 [ C(RESULT_ACCESS) ] = 0x0f41, /* L1D_CACHE_ST.MESI (alias) */
155 [ C(RESULT_MISS) ] = 0x010c, /* MEM_STORE_RETIRED.DTLB_MISS */
156 },
157 [ C(OP_PREFETCH) ] = {
158 [ C(RESULT_ACCESS) ] = 0x0,
159 [ C(RESULT_MISS) ] = 0x0,
160 },
161 },
162 [ C(ITLB) ] = {
163 [ C(OP_READ) ] = {
164 [ C(RESULT_ACCESS) ] = 0x01c0, /* INST_RETIRED.ANY_P */
165 [ C(RESULT_MISS) ] = 0x0185, /* ITLB_MISS_RETIRED */
166 },
167 [ C(OP_WRITE) ] = {
168 [ C(RESULT_ACCESS) ] = -1,
169 [ C(RESULT_MISS) ] = -1,
170 },
171 [ C(OP_PREFETCH) ] = {
172 [ C(RESULT_ACCESS) ] = -1,
173 [ C(RESULT_MISS) ] = -1,
174 },
175 },
176 [ C(BPU ) ] = {
177 [ C(OP_READ) ] = {
178 [ C(RESULT_ACCESS) ] = 0x00c4, /* BR_INST_RETIRED.ALL_BRANCHES */
179 [ C(RESULT_MISS) ] = 0x03e8, /* BPU_CLEARS.ANY */
180 },
181 [ C(OP_WRITE) ] = {
182 [ C(RESULT_ACCESS) ] = -1,
183 [ C(RESULT_MISS) ] = -1,
184 },
185 [ C(OP_PREFETCH) ] = {
186 [ C(RESULT_ACCESS) ] = -1,
187 [ C(RESULT_MISS) ] = -1,
188 },
189 },
190};
191
192static const u64 core2_hw_cache_event_ids
193 [PERF_COUNT_HW_CACHE_MAX]
194 [PERF_COUNT_HW_CACHE_OP_MAX]
195 [PERF_COUNT_HW_CACHE_RESULT_MAX] =
196{
0312af84
TG
197 [ C(L1D) ] = {
198 [ C(OP_READ) ] = {
199 [ C(RESULT_ACCESS) ] = 0x0f40, /* L1D_CACHE_LD.MESI */
200 [ C(RESULT_MISS) ] = 0x0140, /* L1D_CACHE_LD.I_STATE */
201 },
202 [ C(OP_WRITE) ] = {
203 [ C(RESULT_ACCESS) ] = 0x0f41, /* L1D_CACHE_ST.MESI */
204 [ C(RESULT_MISS) ] = 0x0141, /* L1D_CACHE_ST.I_STATE */
205 },
206 [ C(OP_PREFETCH) ] = {
207 [ C(RESULT_ACCESS) ] = 0x104e, /* L1D_PREFETCH.REQUESTS */
208 [ C(RESULT_MISS) ] = 0,
209 },
210 },
211 [ C(L1I ) ] = {
212 [ C(OP_READ) ] = {
213 [ C(RESULT_ACCESS) ] = 0x0080, /* L1I.READS */
214 [ C(RESULT_MISS) ] = 0x0081, /* L1I.MISSES */
215 },
216 [ C(OP_WRITE) ] = {
217 [ C(RESULT_ACCESS) ] = -1,
218 [ C(RESULT_MISS) ] = -1,
219 },
220 [ C(OP_PREFETCH) ] = {
221 [ C(RESULT_ACCESS) ] = 0,
222 [ C(RESULT_MISS) ] = 0,
223 },
224 },
225 [ C(L2 ) ] = {
226 [ C(OP_READ) ] = {
227 [ C(RESULT_ACCESS) ] = 0x4f29, /* L2_LD.MESI */
228 [ C(RESULT_MISS) ] = 0x4129, /* L2_LD.ISTATE */
229 },
230 [ C(OP_WRITE) ] = {
231 [ C(RESULT_ACCESS) ] = 0x4f2A, /* L2_ST.MESI */
232 [ C(RESULT_MISS) ] = 0x412A, /* L2_ST.ISTATE */
233 },
234 [ C(OP_PREFETCH) ] = {
235 [ C(RESULT_ACCESS) ] = 0,
236 [ C(RESULT_MISS) ] = 0,
237 },
238 },
239 [ C(DTLB) ] = {
240 [ C(OP_READ) ] = {
241 [ C(RESULT_ACCESS) ] = 0x0f40, /* L1D_CACHE_LD.MESI (alias) */
242 [ C(RESULT_MISS) ] = 0x0208, /* DTLB_MISSES.MISS_LD */
243 },
244 [ C(OP_WRITE) ] = {
245 [ C(RESULT_ACCESS) ] = 0x0f41, /* L1D_CACHE_ST.MESI (alias) */
246 [ C(RESULT_MISS) ] = 0x0808, /* DTLB_MISSES.MISS_ST */
247 },
248 [ C(OP_PREFETCH) ] = {
249 [ C(RESULT_ACCESS) ] = 0,
250 [ C(RESULT_MISS) ] = 0,
251 },
252 },
253 [ C(ITLB) ] = {
254 [ C(OP_READ) ] = {
255 [ C(RESULT_ACCESS) ] = 0x00c0, /* INST_RETIRED.ANY_P */
256 [ C(RESULT_MISS) ] = 0x1282, /* ITLBMISSES */
257 },
258 [ C(OP_WRITE) ] = {
259 [ C(RESULT_ACCESS) ] = -1,
260 [ C(RESULT_MISS) ] = -1,
261 },
262 [ C(OP_PREFETCH) ] = {
263 [ C(RESULT_ACCESS) ] = -1,
264 [ C(RESULT_MISS) ] = -1,
265 },
266 },
267 [ C(BPU ) ] = {
268 [ C(OP_READ) ] = {
269 [ C(RESULT_ACCESS) ] = 0x00c4, /* BR_INST_RETIRED.ANY */
270 [ C(RESULT_MISS) ] = 0x00c5, /* BP_INST_RETIRED.MISPRED */
271 },
272 [ C(OP_WRITE) ] = {
273 [ C(RESULT_ACCESS) ] = -1,
274 [ C(RESULT_MISS) ] = -1,
275 },
276 [ C(OP_PREFETCH) ] = {
277 [ C(RESULT_ACCESS) ] = -1,
278 [ C(RESULT_MISS) ] = -1,
279 },
280 },
8326f44d
IM
281};
282
283static const u64 atom_hw_cache_event_ids
284 [PERF_COUNT_HW_CACHE_MAX]
285 [PERF_COUNT_HW_CACHE_OP_MAX]
286 [PERF_COUNT_HW_CACHE_RESULT_MAX] =
287{
ad689220
TG
288 [ C(L1D) ] = {
289 [ C(OP_READ) ] = {
290 [ C(RESULT_ACCESS) ] = 0x2140, /* L1D_CACHE.LD */
291 [ C(RESULT_MISS) ] = 0,
292 },
293 [ C(OP_WRITE) ] = {
294 [ C(RESULT_ACCESS) ] = 0x2241, /* L1D_CACHE.ST */
295 [ C(RESULT_MISS) ] = 0,
296 },
297 [ C(OP_PREFETCH) ] = {
298 [ C(RESULT_ACCESS) ] = 0x0,
299 [ C(RESULT_MISS) ] = 0,
300 },
301 },
302 [ C(L1I ) ] = {
303 [ C(OP_READ) ] = {
304 [ C(RESULT_ACCESS) ] = 0x0080, /* L1I.READS */
305 [ C(RESULT_MISS) ] = 0x0081, /* L1I.MISSES */
306 },
307 [ C(OP_WRITE) ] = {
308 [ C(RESULT_ACCESS) ] = -1,
309 [ C(RESULT_MISS) ] = -1,
310 },
311 [ C(OP_PREFETCH) ] = {
312 [ C(RESULT_ACCESS) ] = 0,
313 [ C(RESULT_MISS) ] = 0,
314 },
315 },
316 [ C(L2 ) ] = {
317 [ C(OP_READ) ] = {
318 [ C(RESULT_ACCESS) ] = 0x4f29, /* L2_LD.MESI */
319 [ C(RESULT_MISS) ] = 0x4129, /* L2_LD.ISTATE */
320 },
321 [ C(OP_WRITE) ] = {
322 [ C(RESULT_ACCESS) ] = 0x4f2A, /* L2_ST.MESI */
323 [ C(RESULT_MISS) ] = 0x412A, /* L2_ST.ISTATE */
324 },
325 [ C(OP_PREFETCH) ] = {
326 [ C(RESULT_ACCESS) ] = 0,
327 [ C(RESULT_MISS) ] = 0,
328 },
329 },
330 [ C(DTLB) ] = {
331 [ C(OP_READ) ] = {
332 [ C(RESULT_ACCESS) ] = 0x0f40, /* L1D_CACHE_LD.MESI (alias) */
333 [ C(RESULT_MISS) ] = 0x0508, /* DTLB_MISSES.MISS_LD */
334 },
335 [ C(OP_WRITE) ] = {
336 [ C(RESULT_ACCESS) ] = 0x0f41, /* L1D_CACHE_ST.MESI (alias) */
337 [ C(RESULT_MISS) ] = 0x0608, /* DTLB_MISSES.MISS_ST */
338 },
339 [ C(OP_PREFETCH) ] = {
340 [ C(RESULT_ACCESS) ] = 0,
341 [ C(RESULT_MISS) ] = 0,
342 },
343 },
344 [ C(ITLB) ] = {
345 [ C(OP_READ) ] = {
346 [ C(RESULT_ACCESS) ] = 0x00c0, /* INST_RETIRED.ANY_P */
347 [ C(RESULT_MISS) ] = 0x0282, /* ITLB.MISSES */
348 },
349 [ C(OP_WRITE) ] = {
350 [ C(RESULT_ACCESS) ] = -1,
351 [ C(RESULT_MISS) ] = -1,
352 },
353 [ C(OP_PREFETCH) ] = {
354 [ C(RESULT_ACCESS) ] = -1,
355 [ C(RESULT_MISS) ] = -1,
356 },
357 },
358 [ C(BPU ) ] = {
359 [ C(OP_READ) ] = {
360 [ C(RESULT_ACCESS) ] = 0x00c4, /* BR_INST_RETIRED.ANY */
361 [ C(RESULT_MISS) ] = 0x00c5, /* BP_INST_RETIRED.MISPRED */
362 },
363 [ C(OP_WRITE) ] = {
364 [ C(RESULT_ACCESS) ] = -1,
365 [ C(RESULT_MISS) ] = -1,
366 },
367 [ C(OP_PREFETCH) ] = {
368 [ C(RESULT_ACCESS) ] = -1,
369 [ C(RESULT_MISS) ] = -1,
370 },
371 },
8326f44d
IM
372};
373
5f4ec28f 374static u64 intel_pmu_raw_event(u64 event)
b0f3f28e 375{
82bae4f8
PZ
376#define CORE_EVNTSEL_EVENT_MASK 0x000000FFULL
377#define CORE_EVNTSEL_UNIT_MASK 0x0000FF00ULL
ff99be57
PZ
378#define CORE_EVNTSEL_EDGE_MASK 0x00040000ULL
379#define CORE_EVNTSEL_INV_MASK 0x00800000ULL
82bae4f8 380#define CORE_EVNTSEL_COUNTER_MASK 0xFF000000ULL
b0f3f28e 381
128f048f 382#define CORE_EVNTSEL_MASK \
b0f3f28e
PZ
383 (CORE_EVNTSEL_EVENT_MASK | \
384 CORE_EVNTSEL_UNIT_MASK | \
ff99be57
PZ
385 CORE_EVNTSEL_EDGE_MASK | \
386 CORE_EVNTSEL_INV_MASK | \
b0f3f28e
PZ
387 CORE_EVNTSEL_COUNTER_MASK)
388
389 return event & CORE_EVNTSEL_MASK;
390}
391
f87ad35d
JSR
392/*
393 * AMD Performance Monitor K7 and later.
394 */
b0f3f28e 395static const u64 amd_perfmon_event_map[] =
f87ad35d
JSR
396{
397 [PERF_COUNT_CPU_CYCLES] = 0x0076,
398 [PERF_COUNT_INSTRUCTIONS] = 0x00c0,
399 [PERF_COUNT_CACHE_REFERENCES] = 0x0080,
400 [PERF_COUNT_CACHE_MISSES] = 0x0081,
401 [PERF_COUNT_BRANCH_INSTRUCTIONS] = 0x00c4,
402 [PERF_COUNT_BRANCH_MISSES] = 0x00c5,
403};
404
5f4ec28f 405static u64 amd_pmu_event_map(int event)
f87ad35d
JSR
406{
407 return amd_perfmon_event_map[event];
408}
409
5f4ec28f 410static u64 amd_pmu_raw_event(u64 event)
b0f3f28e 411{
82bae4f8
PZ
412#define K7_EVNTSEL_EVENT_MASK 0x7000000FFULL
413#define K7_EVNTSEL_UNIT_MASK 0x00000FF00ULL
ff99be57
PZ
414#define K7_EVNTSEL_EDGE_MASK 0x000040000ULL
415#define K7_EVNTSEL_INV_MASK 0x000800000ULL
82bae4f8 416#define K7_EVNTSEL_COUNTER_MASK 0x0FF000000ULL
b0f3f28e
PZ
417
418#define K7_EVNTSEL_MASK \
419 (K7_EVNTSEL_EVENT_MASK | \
420 K7_EVNTSEL_UNIT_MASK | \
ff99be57
PZ
421 K7_EVNTSEL_EDGE_MASK | \
422 K7_EVNTSEL_INV_MASK | \
b0f3f28e
PZ
423 K7_EVNTSEL_COUNTER_MASK)
424
425 return event & K7_EVNTSEL_MASK;
426}
427
ee06094f
IM
428/*
429 * Propagate counter elapsed time into the generic counter.
430 * Can only be executed on the CPU where the counter is active.
431 * Returns the delta events processed.
432 */
4b7bfd0d 433static u64
ee06094f
IM
434x86_perf_counter_update(struct perf_counter *counter,
435 struct hw_perf_counter *hwc, int idx)
436{
ec3232bd
PZ
437 int shift = 64 - x86_pmu.counter_bits;
438 u64 prev_raw_count, new_raw_count;
439 s64 delta;
ee06094f 440
ee06094f
IM
441 /*
442 * Careful: an NMI might modify the previous counter value.
443 *
444 * Our tactic to handle this is to first atomically read and
445 * exchange a new raw count - then add that new-prev delta
446 * count to the generic counter atomically:
447 */
448again:
449 prev_raw_count = atomic64_read(&hwc->prev_count);
450 rdmsrl(hwc->counter_base + idx, new_raw_count);
451
452 if (atomic64_cmpxchg(&hwc->prev_count, prev_raw_count,
453 new_raw_count) != prev_raw_count)
454 goto again;
455
456 /*
457 * Now we have the new raw value and have updated the prev
458 * timestamp already. We can now calculate the elapsed delta
459 * (counter-)time and add that to the generic counter.
460 *
461 * Careful, not all hw sign-extends above the physical width
ec3232bd 462 * of the count.
ee06094f 463 */
ec3232bd
PZ
464 delta = (new_raw_count << shift) - (prev_raw_count << shift);
465 delta >>= shift;
ee06094f
IM
466
467 atomic64_add(delta, &counter->count);
468 atomic64_sub(delta, &hwc->period_left);
4b7bfd0d
RR
469
470 return new_raw_count;
ee06094f
IM
471}
472
ba77813a 473static atomic_t active_counters;
4e935e47
PZ
474static DEFINE_MUTEX(pmc_reserve_mutex);
475
476static bool reserve_pmc_hardware(void)
477{
478 int i;
479
480 if (nmi_watchdog == NMI_LOCAL_APIC)
481 disable_lapic_nmi_watchdog();
482
0933e5c6 483 for (i = 0; i < x86_pmu.num_counters; i++) {
4a06bd85 484 if (!reserve_perfctr_nmi(x86_pmu.perfctr + i))
4e935e47
PZ
485 goto perfctr_fail;
486 }
487
0933e5c6 488 for (i = 0; i < x86_pmu.num_counters; i++) {
4a06bd85 489 if (!reserve_evntsel_nmi(x86_pmu.eventsel + i))
4e935e47
PZ
490 goto eventsel_fail;
491 }
492
493 return true;
494
495eventsel_fail:
496 for (i--; i >= 0; i--)
4a06bd85 497 release_evntsel_nmi(x86_pmu.eventsel + i);
4e935e47 498
0933e5c6 499 i = x86_pmu.num_counters;
4e935e47
PZ
500
501perfctr_fail:
502 for (i--; i >= 0; i--)
4a06bd85 503 release_perfctr_nmi(x86_pmu.perfctr + i);
4e935e47
PZ
504
505 if (nmi_watchdog == NMI_LOCAL_APIC)
506 enable_lapic_nmi_watchdog();
507
508 return false;
509}
510
511static void release_pmc_hardware(void)
512{
513 int i;
514
0933e5c6 515 for (i = 0; i < x86_pmu.num_counters; i++) {
4a06bd85
RR
516 release_perfctr_nmi(x86_pmu.perfctr + i);
517 release_evntsel_nmi(x86_pmu.eventsel + i);
4e935e47
PZ
518 }
519
520 if (nmi_watchdog == NMI_LOCAL_APIC)
521 enable_lapic_nmi_watchdog();
522}
523
524static void hw_perf_counter_destroy(struct perf_counter *counter)
525{
ba77813a 526 if (atomic_dec_and_mutex_lock(&active_counters, &pmc_reserve_mutex)) {
4e935e47
PZ
527 release_pmc_hardware();
528 mutex_unlock(&pmc_reserve_mutex);
529 }
530}
531
85cf9dba
RR
532static inline int x86_pmu_initialized(void)
533{
534 return x86_pmu.handle_irq != NULL;
535}
536
8326f44d
IM
537static inline int
538set_ext_hw_attr(struct hw_perf_counter *hwc, struct perf_counter_attr *attr)
539{
540 unsigned int cache_type, cache_op, cache_result;
541 u64 config, val;
542
543 config = attr->config;
544
545 cache_type = (config >> 0) & 0xff;
546 if (cache_type >= PERF_COUNT_HW_CACHE_MAX)
547 return -EINVAL;
548
549 cache_op = (config >> 8) & 0xff;
550 if (cache_op >= PERF_COUNT_HW_CACHE_OP_MAX)
551 return -EINVAL;
552
553 cache_result = (config >> 16) & 0xff;
554 if (cache_result >= PERF_COUNT_HW_CACHE_RESULT_MAX)
555 return -EINVAL;
556
557 val = hw_cache_event_ids[cache_type][cache_op][cache_result];
558
559 if (val == 0)
560 return -ENOENT;
561
562 if (val == -1)
563 return -EINVAL;
564
565 hwc->config |= val;
566
567 return 0;
568}
569
241771ef 570/*
0d48696f 571 * Setup the hardware configuration for a given attr_type
241771ef 572 */
621a01ea 573static int __hw_perf_counter_init(struct perf_counter *counter)
241771ef 574{
0d48696f 575 struct perf_counter_attr *attr = &counter->attr;
241771ef 576 struct hw_perf_counter *hwc = &counter->hw;
4e935e47 577 int err;
241771ef 578
85cf9dba
RR
579 if (!x86_pmu_initialized())
580 return -ENODEV;
241771ef 581
4e935e47 582 err = 0;
ba77813a 583 if (!atomic_inc_not_zero(&active_counters)) {
4e935e47 584 mutex_lock(&pmc_reserve_mutex);
ba77813a 585 if (atomic_read(&active_counters) == 0 && !reserve_pmc_hardware())
4e935e47
PZ
586 err = -EBUSY;
587 else
ba77813a 588 atomic_inc(&active_counters);
4e935e47
PZ
589 mutex_unlock(&pmc_reserve_mutex);
590 }
591 if (err)
592 return err;
593
241771ef 594 /*
0475f9ea 595 * Generate PMC IRQs:
241771ef
IM
596 * (keep 'enabled' bit clear for now)
597 */
0475f9ea 598 hwc->config = ARCH_PERFMON_EVENTSEL_INT;
241771ef
IM
599
600 /*
0475f9ea 601 * Count user and OS events unless requested not to.
241771ef 602 */
0d48696f 603 if (!attr->exclude_user)
0475f9ea 604 hwc->config |= ARCH_PERFMON_EVENTSEL_USR;
0d48696f 605 if (!attr->exclude_kernel)
241771ef 606 hwc->config |= ARCH_PERFMON_EVENTSEL_OS;
0475f9ea 607
b23f3325
PZ
608 if (!hwc->sample_period)
609 hwc->sample_period = x86_pmu.max_period;
d2517a49 610
e4abb5d4 611 atomic64_set(&hwc->period_left, hwc->sample_period);
8326f44d 612 counter->destroy = hw_perf_counter_destroy;
241771ef
IM
613
614 /*
dfa7c899 615 * Raw event type provide the config in the event structure
241771ef 616 */
a21ca2ca
IM
617 if (attr->type == PERF_TYPE_RAW) {
618 hwc->config |= x86_pmu.raw_event(attr->config);
8326f44d 619 return 0;
241771ef 620 }
241771ef 621
8326f44d
IM
622 if (attr->type == PERF_TYPE_HW_CACHE)
623 return set_ext_hw_attr(hwc, attr);
624
625 if (attr->config >= x86_pmu.max_events)
626 return -EINVAL;
627 /*
628 * The generic map:
629 */
630 hwc->config |= x86_pmu.event_map(attr->config);
4e935e47 631
241771ef
IM
632 return 0;
633}
634
9e35ad38 635static void intel_pmu_disable_all(void)
4ac13294 636{
862a1a5f 637 wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL, 0);
241771ef 638}
b56a3802 639
9e35ad38 640static void amd_pmu_disable_all(void)
f87ad35d 641{
b0f3f28e 642 struct cpu_hw_counters *cpuc = &__get_cpu_var(cpu_hw_counters);
9e35ad38
PZ
643 int idx;
644
645 if (!cpuc->enabled)
646 return;
b0f3f28e 647
b0f3f28e 648 cpuc->enabled = 0;
60b3df9c
PZ
649 /*
650 * ensure we write the disable before we start disabling the
5f4ec28f
RR
651 * counters proper, so that amd_pmu_enable_counter() does the
652 * right thing.
60b3df9c 653 */
b0f3f28e 654 barrier();
f87ad35d 655
0933e5c6 656 for (idx = 0; idx < x86_pmu.num_counters; idx++) {
b0f3f28e
PZ
657 u64 val;
658
43f6201a 659 if (!test_bit(idx, cpuc->active_mask))
4295ee62 660 continue;
f87ad35d 661 rdmsrl(MSR_K7_EVNTSEL0 + idx, val);
4295ee62
RR
662 if (!(val & ARCH_PERFMON_EVENTSEL0_ENABLE))
663 continue;
664 val &= ~ARCH_PERFMON_EVENTSEL0_ENABLE;
665 wrmsrl(MSR_K7_EVNTSEL0 + idx, val);
f87ad35d 666 }
f87ad35d
JSR
667}
668
9e35ad38 669void hw_perf_disable(void)
b56a3802 670{
85cf9dba 671 if (!x86_pmu_initialized())
9e35ad38
PZ
672 return;
673 return x86_pmu.disable_all();
b56a3802 674}
241771ef 675
9e35ad38 676static void intel_pmu_enable_all(void)
b56a3802 677{
9e35ad38 678 wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL, x86_pmu.intel_ctrl);
b56a3802
JSR
679}
680
9e35ad38 681static void amd_pmu_enable_all(void)
f87ad35d 682{
b0f3f28e 683 struct cpu_hw_counters *cpuc = &__get_cpu_var(cpu_hw_counters);
f87ad35d
JSR
684 int idx;
685
9e35ad38 686 if (cpuc->enabled)
b0f3f28e
PZ
687 return;
688
9e35ad38
PZ
689 cpuc->enabled = 1;
690 barrier();
691
0933e5c6 692 for (idx = 0; idx < x86_pmu.num_counters; idx++) {
4295ee62 693 u64 val;
b0f3f28e 694
43f6201a 695 if (!test_bit(idx, cpuc->active_mask))
4295ee62
RR
696 continue;
697 rdmsrl(MSR_K7_EVNTSEL0 + idx, val);
698 if (val & ARCH_PERFMON_EVENTSEL0_ENABLE)
699 continue;
700 val |= ARCH_PERFMON_EVENTSEL0_ENABLE;
701 wrmsrl(MSR_K7_EVNTSEL0 + idx, val);
f87ad35d
JSR
702 }
703}
704
9e35ad38 705void hw_perf_enable(void)
ee06094f 706{
85cf9dba 707 if (!x86_pmu_initialized())
2b9ff0db 708 return;
9e35ad38 709 x86_pmu.enable_all();
ee06094f 710}
ee06094f 711
19d84dab 712static inline u64 intel_pmu_get_status(void)
b0f3f28e
PZ
713{
714 u64 status;
715
b7f8859a 716 rdmsrl(MSR_CORE_PERF_GLOBAL_STATUS, status);
b0f3f28e 717
b7f8859a 718 return status;
b0f3f28e
PZ
719}
720
dee5d906 721static inline void intel_pmu_ack_status(u64 ack)
b0f3f28e
PZ
722{
723 wrmsrl(MSR_CORE_PERF_GLOBAL_OVF_CTRL, ack);
724}
725
7c90cc45 726static inline void x86_pmu_enable_counter(struct hw_perf_counter *hwc, int idx)
b0f3f28e 727{
7c90cc45 728 int err;
7c90cc45
RR
729 err = checking_wrmsrl(hwc->config_base + idx,
730 hwc->config | ARCH_PERFMON_EVENTSEL0_ENABLE);
b0f3f28e
PZ
731}
732
d4369891 733static inline void x86_pmu_disable_counter(struct hw_perf_counter *hwc, int idx)
b0f3f28e 734{
d4369891 735 int err;
d4369891
RR
736 err = checking_wrmsrl(hwc->config_base + idx,
737 hwc->config);
b0f3f28e
PZ
738}
739
2f18d1e8 740static inline void
d4369891 741intel_pmu_disable_fixed(struct hw_perf_counter *hwc, int __idx)
2f18d1e8
IM
742{
743 int idx = __idx - X86_PMC_IDX_FIXED;
744 u64 ctrl_val, mask;
745 int err;
746
747 mask = 0xfULL << (idx * 4);
748
749 rdmsrl(hwc->config_base, ctrl_val);
750 ctrl_val &= ~mask;
751 err = checking_wrmsrl(hwc->config_base, ctrl_val);
752}
753
7e2ae347 754static inline void
d4369891 755intel_pmu_disable_counter(struct hw_perf_counter *hwc, int idx)
7e2ae347 756{
d4369891
RR
757 if (unlikely(hwc->config_base == MSR_ARCH_PERFMON_FIXED_CTR_CTRL)) {
758 intel_pmu_disable_fixed(hwc, idx);
759 return;
760 }
761
762 x86_pmu_disable_counter(hwc, idx);
763}
764
765static inline void
766amd_pmu_disable_counter(struct hw_perf_counter *hwc, int idx)
767{
768 x86_pmu_disable_counter(hwc, idx);
7e2ae347
IM
769}
770
2f18d1e8 771static DEFINE_PER_CPU(u64, prev_left[X86_PMC_IDX_MAX]);
241771ef 772
ee06094f
IM
773/*
774 * Set the next IRQ period, based on the hwc->period_left value.
775 * To be called with the counter disabled in hw:
776 */
e4abb5d4 777static int
26816c28 778x86_perf_counter_set_period(struct perf_counter *counter,
ee06094f 779 struct hw_perf_counter *hwc, int idx)
241771ef 780{
2f18d1e8 781 s64 left = atomic64_read(&hwc->period_left);
e4abb5d4
PZ
782 s64 period = hwc->sample_period;
783 int err, ret = 0;
ee06094f 784
ee06094f
IM
785 /*
786 * If we are way outside a reasoable range then just skip forward:
787 */
788 if (unlikely(left <= -period)) {
789 left = period;
790 atomic64_set(&hwc->period_left, left);
e4abb5d4 791 ret = 1;
ee06094f
IM
792 }
793
794 if (unlikely(left <= 0)) {
795 left += period;
796 atomic64_set(&hwc->period_left, left);
e4abb5d4 797 ret = 1;
ee06094f 798 }
1c80f4b5
IM
799 /*
800 * Quirk: certain CPUs dont like it if just 1 event is left:
801 */
802 if (unlikely(left < 2))
803 left = 2;
241771ef 804
e4abb5d4
PZ
805 if (left > x86_pmu.max_period)
806 left = x86_pmu.max_period;
807
ee06094f
IM
808 per_cpu(prev_left[idx], smp_processor_id()) = left;
809
810 /*
811 * The hw counter starts counting from this counter offset,
812 * mark it to be able to extra future deltas:
813 */
2f18d1e8 814 atomic64_set(&hwc->prev_count, (u64)-left);
ee06094f 815
2f18d1e8 816 err = checking_wrmsrl(hwc->counter_base + idx,
0933e5c6 817 (u64)(-left) & x86_pmu.counter_mask);
e4abb5d4
PZ
818
819 return ret;
2f18d1e8
IM
820}
821
822static inline void
7c90cc45 823intel_pmu_enable_fixed(struct hw_perf_counter *hwc, int __idx)
2f18d1e8
IM
824{
825 int idx = __idx - X86_PMC_IDX_FIXED;
826 u64 ctrl_val, bits, mask;
827 int err;
828
829 /*
0475f9ea
PM
830 * Enable IRQ generation (0x8),
831 * and enable ring-3 counting (0x2) and ring-0 counting (0x1)
832 * if requested:
2f18d1e8 833 */
0475f9ea
PM
834 bits = 0x8ULL;
835 if (hwc->config & ARCH_PERFMON_EVENTSEL_USR)
836 bits |= 0x2;
2f18d1e8
IM
837 if (hwc->config & ARCH_PERFMON_EVENTSEL_OS)
838 bits |= 0x1;
839 bits <<= (idx * 4);
840 mask = 0xfULL << (idx * 4);
841
842 rdmsrl(hwc->config_base, ctrl_val);
843 ctrl_val &= ~mask;
844 ctrl_val |= bits;
845 err = checking_wrmsrl(hwc->config_base, ctrl_val);
7e2ae347
IM
846}
847
7c90cc45 848static void intel_pmu_enable_counter(struct hw_perf_counter *hwc, int idx)
7e2ae347 849{
7c90cc45
RR
850 if (unlikely(hwc->config_base == MSR_ARCH_PERFMON_FIXED_CTR_CTRL)) {
851 intel_pmu_enable_fixed(hwc, idx);
852 return;
853 }
854
855 x86_pmu_enable_counter(hwc, idx);
856}
857
858static void amd_pmu_enable_counter(struct hw_perf_counter *hwc, int idx)
859{
860 struct cpu_hw_counters *cpuc = &__get_cpu_var(cpu_hw_counters);
861
862 if (cpuc->enabled)
863 x86_pmu_enable_counter(hwc, idx);
2b583d8b 864 else
d4369891 865 x86_pmu_disable_counter(hwc, idx);
241771ef
IM
866}
867
2f18d1e8
IM
868static int
869fixed_mode_idx(struct perf_counter *counter, struct hw_perf_counter *hwc)
862a1a5f 870{
2f18d1e8
IM
871 unsigned int event;
872
ef7b3e09 873 if (!x86_pmu.num_counters_fixed)
f87ad35d
JSR
874 return -1;
875
2f18d1e8
IM
876 event = hwc->config & ARCH_PERFMON_EVENT_MASK;
877
4a06bd85 878 if (unlikely(event == x86_pmu.event_map(PERF_COUNT_INSTRUCTIONS)))
2f18d1e8 879 return X86_PMC_IDX_FIXED_INSTRUCTIONS;
4a06bd85 880 if (unlikely(event == x86_pmu.event_map(PERF_COUNT_CPU_CYCLES)))
2f18d1e8 881 return X86_PMC_IDX_FIXED_CPU_CYCLES;
4a06bd85 882 if (unlikely(event == x86_pmu.event_map(PERF_COUNT_BUS_CYCLES)))
2f18d1e8
IM
883 return X86_PMC_IDX_FIXED_BUS_CYCLES;
884
862a1a5f
IM
885 return -1;
886}
887
ee06094f
IM
888/*
889 * Find a PMC slot for the freshly enabled / scheduled in counter:
890 */
4aeb0b42 891static int x86_pmu_enable(struct perf_counter *counter)
241771ef
IM
892{
893 struct cpu_hw_counters *cpuc = &__get_cpu_var(cpu_hw_counters);
894 struct hw_perf_counter *hwc = &counter->hw;
2f18d1e8 895 int idx;
241771ef 896
2f18d1e8
IM
897 idx = fixed_mode_idx(counter, hwc);
898 if (idx >= 0) {
899 /*
900 * Try to get the fixed counter, if that is already taken
901 * then try to get a generic counter:
902 */
43f6201a 903 if (test_and_set_bit(idx, cpuc->used_mask))
2f18d1e8 904 goto try_generic;
0dff86aa 905
2f18d1e8
IM
906 hwc->config_base = MSR_ARCH_PERFMON_FIXED_CTR_CTRL;
907 /*
908 * We set it so that counter_base + idx in wrmsr/rdmsr maps to
909 * MSR_ARCH_PERFMON_FIXED_CTR0 ... CTR2:
910 */
911 hwc->counter_base =
912 MSR_ARCH_PERFMON_FIXED_CTR0 - X86_PMC_IDX_FIXED;
241771ef 913 hwc->idx = idx;
2f18d1e8
IM
914 } else {
915 idx = hwc->idx;
916 /* Try to get the previous generic counter again */
43f6201a 917 if (test_and_set_bit(idx, cpuc->used_mask)) {
2f18d1e8 918try_generic:
43f6201a 919 idx = find_first_zero_bit(cpuc->used_mask,
0933e5c6
RR
920 x86_pmu.num_counters);
921 if (idx == x86_pmu.num_counters)
2f18d1e8
IM
922 return -EAGAIN;
923
43f6201a 924 set_bit(idx, cpuc->used_mask);
2f18d1e8
IM
925 hwc->idx = idx;
926 }
4a06bd85
RR
927 hwc->config_base = x86_pmu.eventsel;
928 hwc->counter_base = x86_pmu.perfctr;
241771ef
IM
929 }
930
c323d95f 931 perf_counters_lapic_init();
53b441a5 932
d4369891 933 x86_pmu.disable(hwc, idx);
241771ef 934
862a1a5f 935 cpuc->counters[idx] = counter;
43f6201a 936 set_bit(idx, cpuc->active_mask);
7e2ae347 937
26816c28 938 x86_perf_counter_set_period(counter, hwc, idx);
7c90cc45 939 x86_pmu.enable(hwc, idx);
95cdd2e7
IM
940
941 return 0;
241771ef
IM
942}
943
a78ac325
PZ
944static void x86_pmu_unthrottle(struct perf_counter *counter)
945{
946 struct cpu_hw_counters *cpuc = &__get_cpu_var(cpu_hw_counters);
947 struct hw_perf_counter *hwc = &counter->hw;
948
949 if (WARN_ON_ONCE(hwc->idx >= X86_PMC_IDX_MAX ||
950 cpuc->counters[hwc->idx] != counter))
951 return;
952
953 x86_pmu.enable(hwc, hwc->idx);
954}
955
241771ef
IM
956void perf_counter_print_debug(void)
957{
2f18d1e8 958 u64 ctrl, status, overflow, pmc_ctrl, pmc_count, prev_left, fixed;
0dff86aa 959 struct cpu_hw_counters *cpuc;
5bb9efe3 960 unsigned long flags;
1e125676
IM
961 int cpu, idx;
962
0933e5c6 963 if (!x86_pmu.num_counters)
1e125676 964 return;
241771ef 965
5bb9efe3 966 local_irq_save(flags);
241771ef
IM
967
968 cpu = smp_processor_id();
0dff86aa 969 cpuc = &per_cpu(cpu_hw_counters, cpu);
241771ef 970
faa28ae0 971 if (x86_pmu.version >= 2) {
a1ef58f4
JSR
972 rdmsrl(MSR_CORE_PERF_GLOBAL_CTRL, ctrl);
973 rdmsrl(MSR_CORE_PERF_GLOBAL_STATUS, status);
974 rdmsrl(MSR_CORE_PERF_GLOBAL_OVF_CTRL, overflow);
975 rdmsrl(MSR_ARCH_PERFMON_FIXED_CTR_CTRL, fixed);
976
977 pr_info("\n");
978 pr_info("CPU#%d: ctrl: %016llx\n", cpu, ctrl);
979 pr_info("CPU#%d: status: %016llx\n", cpu, status);
980 pr_info("CPU#%d: overflow: %016llx\n", cpu, overflow);
981 pr_info("CPU#%d: fixed: %016llx\n", cpu, fixed);
f87ad35d 982 }
43f6201a 983 pr_info("CPU#%d: used: %016llx\n", cpu, *(u64 *)cpuc->used_mask);
241771ef 984
0933e5c6 985 for (idx = 0; idx < x86_pmu.num_counters; idx++) {
4a06bd85
RR
986 rdmsrl(x86_pmu.eventsel + idx, pmc_ctrl);
987 rdmsrl(x86_pmu.perfctr + idx, pmc_count);
241771ef 988
ee06094f 989 prev_left = per_cpu(prev_left[idx], cpu);
241771ef 990
a1ef58f4 991 pr_info("CPU#%d: gen-PMC%d ctrl: %016llx\n",
241771ef 992 cpu, idx, pmc_ctrl);
a1ef58f4 993 pr_info("CPU#%d: gen-PMC%d count: %016llx\n",
241771ef 994 cpu, idx, pmc_count);
a1ef58f4 995 pr_info("CPU#%d: gen-PMC%d left: %016llx\n",
ee06094f 996 cpu, idx, prev_left);
241771ef 997 }
0933e5c6 998 for (idx = 0; idx < x86_pmu.num_counters_fixed; idx++) {
2f18d1e8
IM
999 rdmsrl(MSR_ARCH_PERFMON_FIXED_CTR0 + idx, pmc_count);
1000
a1ef58f4 1001 pr_info("CPU#%d: fixed-PMC%d count: %016llx\n",
2f18d1e8
IM
1002 cpu, idx, pmc_count);
1003 }
5bb9efe3 1004 local_irq_restore(flags);
241771ef
IM
1005}
1006
4aeb0b42 1007static void x86_pmu_disable(struct perf_counter *counter)
241771ef
IM
1008{
1009 struct cpu_hw_counters *cpuc = &__get_cpu_var(cpu_hw_counters);
1010 struct hw_perf_counter *hwc = &counter->hw;
6f00cada 1011 int idx = hwc->idx;
241771ef 1012
09534238
RR
1013 /*
1014 * Must be done before we disable, otherwise the nmi handler
1015 * could reenable again:
1016 */
43f6201a 1017 clear_bit(idx, cpuc->active_mask);
d4369891 1018 x86_pmu.disable(hwc, idx);
241771ef 1019
2f18d1e8
IM
1020 /*
1021 * Make sure the cleared pointer becomes visible before we
1022 * (potentially) free the counter:
1023 */
527e26af 1024 barrier();
241771ef 1025
ee06094f
IM
1026 /*
1027 * Drain the remaining delta count out of a counter
1028 * that we are disabling:
1029 */
1030 x86_perf_counter_update(counter, hwc, idx);
09534238 1031 cpuc->counters[idx] = NULL;
43f6201a 1032 clear_bit(idx, cpuc->used_mask);
241771ef
IM
1033}
1034
7e2ae347 1035/*
ee06094f
IM
1036 * Save and restart an expired counter. Called by NMI contexts,
1037 * so it has to be careful about preempting normal counter ops:
7e2ae347 1038 */
e4abb5d4 1039static int intel_pmu_save_and_restart(struct perf_counter *counter)
241771ef
IM
1040{
1041 struct hw_perf_counter *hwc = &counter->hw;
1042 int idx = hwc->idx;
e4abb5d4 1043 int ret;
241771ef 1044
ee06094f 1045 x86_perf_counter_update(counter, hwc, idx);
e4abb5d4 1046 ret = x86_perf_counter_set_period(counter, hwc, idx);
7e2ae347 1047
2f18d1e8 1048 if (counter->state == PERF_COUNTER_STATE_ACTIVE)
7c90cc45 1049 intel_pmu_enable_counter(hwc, idx);
e4abb5d4
PZ
1050
1051 return ret;
241771ef
IM
1052}
1053
aaba9801
IM
1054static void intel_pmu_reset(void)
1055{
1056 unsigned long flags;
1057 int idx;
1058
1059 if (!x86_pmu.num_counters)
1060 return;
1061
1062 local_irq_save(flags);
1063
1064 printk("clearing PMU state on CPU#%d\n", smp_processor_id());
1065
1066 for (idx = 0; idx < x86_pmu.num_counters; idx++) {
1067 checking_wrmsrl(x86_pmu.eventsel + idx, 0ull);
1068 checking_wrmsrl(x86_pmu.perfctr + idx, 0ull);
1069 }
1070 for (idx = 0; idx < x86_pmu.num_counters_fixed; idx++) {
1071 checking_wrmsrl(MSR_ARCH_PERFMON_FIXED_CTR0 + idx, 0ull);
1072 }
1073
1074 local_irq_restore(flags);
1075}
1076
1077
241771ef
IM
1078/*
1079 * This handler is triggered by the local APIC, so the APIC IRQ handling
1080 * rules apply:
1081 */
a3288106 1082static int intel_pmu_handle_irq(struct pt_regs *regs)
241771ef 1083{
9029a5e3
IM
1084 struct cpu_hw_counters *cpuc;
1085 struct cpu_hw_counters;
1086 int bit, cpu, loops;
4b39fd96 1087 u64 ack, status;
9029a5e3
IM
1088
1089 cpu = smp_processor_id();
1090 cpuc = &per_cpu(cpu_hw_counters, cpu);
241771ef 1091
9e35ad38 1092 perf_disable();
19d84dab 1093 status = intel_pmu_get_status();
9e35ad38
PZ
1094 if (!status) {
1095 perf_enable();
1096 return 0;
1097 }
87b9cf46 1098
9029a5e3 1099 loops = 0;
241771ef 1100again:
9029a5e3
IM
1101 if (++loops > 100) {
1102 WARN_ONCE(1, "perfcounters: irq loop stuck!\n");
34adc806 1103 perf_counter_print_debug();
aaba9801
IM
1104 intel_pmu_reset();
1105 perf_enable();
9029a5e3
IM
1106 return 1;
1107 }
1108
d278c484 1109 inc_irq_stat(apic_perf_irqs);
241771ef 1110 ack = status;
2f18d1e8 1111 for_each_bit(bit, (unsigned long *)&status, X86_PMC_IDX_MAX) {
862a1a5f 1112 struct perf_counter *counter = cpuc->counters[bit];
241771ef
IM
1113
1114 clear_bit(bit, (unsigned long *) &status);
43f6201a 1115 if (!test_bit(bit, cpuc->active_mask))
241771ef
IM
1116 continue;
1117
e4abb5d4
PZ
1118 if (!intel_pmu_save_and_restart(counter))
1119 continue;
1120
a3288106 1121 if (perf_counter_overflow(counter, 1, regs, 0))
d4369891 1122 intel_pmu_disable_counter(&counter->hw, bit);
241771ef
IM
1123 }
1124
dee5d906 1125 intel_pmu_ack_status(ack);
241771ef
IM
1126
1127 /*
1128 * Repeat if there is more work to be done:
1129 */
19d84dab 1130 status = intel_pmu_get_status();
241771ef
IM
1131 if (status)
1132 goto again;
b0f3f28e 1133
48e22d56 1134 perf_enable();
9e35ad38
PZ
1135
1136 return 1;
1b023a96
MG
1137}
1138
a3288106 1139static int amd_pmu_handle_irq(struct pt_regs *regs)
a29aa8a7 1140{
48e22d56 1141 int cpu, idx, handled = 0;
9029a5e3 1142 struct cpu_hw_counters *cpuc;
a29aa8a7
RR
1143 struct perf_counter *counter;
1144 struct hw_perf_counter *hwc;
9029a5e3
IM
1145 u64 val;
1146
1147 cpu = smp_processor_id();
1148 cpuc = &per_cpu(cpu_hw_counters, cpu);
962bf7a6 1149
a29aa8a7 1150 for (idx = 0; idx < x86_pmu.num_counters; idx++) {
43f6201a 1151 if (!test_bit(idx, cpuc->active_mask))
a29aa8a7 1152 continue;
962bf7a6 1153
a29aa8a7
RR
1154 counter = cpuc->counters[idx];
1155 hwc = &counter->hw;
a4016a79 1156
4b7bfd0d 1157 val = x86_perf_counter_update(counter, hwc, idx);
a29aa8a7 1158 if (val & (1ULL << (x86_pmu.counter_bits - 1)))
48e22d56 1159 continue;
962bf7a6 1160
a29aa8a7 1161 /* counter overflow */
a29aa8a7
RR
1162 handled = 1;
1163 inc_irq_stat(apic_perf_irqs);
e4abb5d4
PZ
1164 if (!x86_perf_counter_set_period(counter, hwc, idx))
1165 continue;
1166
a3288106 1167 if (perf_counter_overflow(counter, 1, regs, 0))
a29aa8a7 1168 amd_pmu_disable_counter(hwc, idx);
a29aa8a7 1169 }
962bf7a6 1170
a29aa8a7
RR
1171 return handled;
1172}
39d81eab 1173
b6276f35
PZ
1174void smp_perf_pending_interrupt(struct pt_regs *regs)
1175{
1176 irq_enter();
1177 ack_APIC_irq();
1178 inc_irq_stat(apic_pending_irqs);
1179 perf_counter_do_pending();
1180 irq_exit();
1181}
1182
1183void set_perf_counter_pending(void)
1184{
1185 apic->send_IPI_self(LOCAL_PENDING_VECTOR);
1186}
1187
c323d95f 1188void perf_counters_lapic_init(void)
241771ef 1189{
85cf9dba 1190 if (!x86_pmu_initialized())
241771ef 1191 return;
85cf9dba 1192
241771ef 1193 /*
c323d95f 1194 * Always use NMI for PMU
241771ef 1195 */
c323d95f 1196 apic_write(APIC_LVTPC, APIC_DM_NMI);
241771ef
IM
1197}
1198
1199static int __kprobes
1200perf_counter_nmi_handler(struct notifier_block *self,
1201 unsigned long cmd, void *__args)
1202{
1203 struct die_args *args = __args;
1204 struct pt_regs *regs;
b0f3f28e 1205
ba77813a 1206 if (!atomic_read(&active_counters))
63a809a2
PZ
1207 return NOTIFY_DONE;
1208
b0f3f28e
PZ
1209 switch (cmd) {
1210 case DIE_NMI:
1211 case DIE_NMI_IPI:
1212 break;
241771ef 1213
b0f3f28e 1214 default:
241771ef 1215 return NOTIFY_DONE;
b0f3f28e 1216 }
241771ef
IM
1217
1218 regs = args->regs;
1219
1220 apic_write(APIC_LVTPC, APIC_DM_NMI);
a4016a79
PZ
1221 /*
1222 * Can't rely on the handled return value to say it was our NMI, two
1223 * counters could trigger 'simultaneously' raising two back-to-back NMIs.
1224 *
1225 * If the first NMI handles both, the latter will be empty and daze
1226 * the CPU.
1227 */
a3288106 1228 x86_pmu.handle_irq(regs);
241771ef 1229
a4016a79 1230 return NOTIFY_STOP;
241771ef
IM
1231}
1232
1233static __read_mostly struct notifier_block perf_counter_nmi_notifier = {
5b75af0a
MG
1234 .notifier_call = perf_counter_nmi_handler,
1235 .next = NULL,
1236 .priority = 1
241771ef
IM
1237};
1238
5f4ec28f 1239static struct x86_pmu intel_pmu = {
faa28ae0 1240 .name = "Intel",
39d81eab 1241 .handle_irq = intel_pmu_handle_irq,
9e35ad38
PZ
1242 .disable_all = intel_pmu_disable_all,
1243 .enable_all = intel_pmu_enable_all,
5f4ec28f
RR
1244 .enable = intel_pmu_enable_counter,
1245 .disable = intel_pmu_disable_counter,
b56a3802
JSR
1246 .eventsel = MSR_ARCH_PERFMON_EVENTSEL0,
1247 .perfctr = MSR_ARCH_PERFMON_PERFCTR0,
5f4ec28f
RR
1248 .event_map = intel_pmu_event_map,
1249 .raw_event = intel_pmu_raw_event,
b56a3802 1250 .max_events = ARRAY_SIZE(intel_perfmon_event_map),
c619b8ff
RR
1251 /*
1252 * Intel PMCs cannot be accessed sanely above 32 bit width,
1253 * so we install an artificial 1<<31 period regardless of
1254 * the generic counter period:
1255 */
1256 .max_period = (1ULL << 31) - 1,
b56a3802
JSR
1257};
1258
5f4ec28f 1259static struct x86_pmu amd_pmu = {
faa28ae0 1260 .name = "AMD",
39d81eab 1261 .handle_irq = amd_pmu_handle_irq,
9e35ad38
PZ
1262 .disable_all = amd_pmu_disable_all,
1263 .enable_all = amd_pmu_enable_all,
5f4ec28f
RR
1264 .enable = amd_pmu_enable_counter,
1265 .disable = amd_pmu_disable_counter,
f87ad35d
JSR
1266 .eventsel = MSR_K7_EVNTSEL0,
1267 .perfctr = MSR_K7_PERFCTR0,
5f4ec28f
RR
1268 .event_map = amd_pmu_event_map,
1269 .raw_event = amd_pmu_raw_event,
f87ad35d 1270 .max_events = ARRAY_SIZE(amd_perfmon_event_map),
0933e5c6
RR
1271 .num_counters = 4,
1272 .counter_bits = 48,
1273 .counter_mask = (1ULL << 48) - 1,
c619b8ff
RR
1274 /* use highest bit to detect overflow */
1275 .max_period = (1ULL << 47) - 1,
f87ad35d
JSR
1276};
1277
72eae04d 1278static int intel_pmu_init(void)
241771ef 1279{
7bb497bd 1280 union cpuid10_edx edx;
241771ef 1281 union cpuid10_eax eax;
703e937c 1282 unsigned int unused;
7bb497bd 1283 unsigned int ebx;
faa28ae0 1284 int version;
241771ef 1285
da1a776b 1286 if (!cpu_has(&boot_cpu_data, X86_FEATURE_ARCH_PERFMON))
72eae04d 1287 return -ENODEV;
da1a776b 1288
241771ef
IM
1289 /*
1290 * Check whether the Architectural PerfMon supports
1291 * Branch Misses Retired Event or not.
1292 */
703e937c 1293 cpuid(10, &eax.full, &ebx, &unused, &edx.full);
241771ef 1294 if (eax.split.mask_length <= ARCH_PERFMON_BRANCH_MISSES_RETIRED)
72eae04d 1295 return -ENODEV;
241771ef 1296
faa28ae0
RR
1297 version = eax.split.version_id;
1298 if (version < 2)
72eae04d 1299 return -ENODEV;
7bb497bd 1300
4a06bd85 1301 x86_pmu = intel_pmu;
faa28ae0 1302 x86_pmu.version = version;
0933e5c6 1303 x86_pmu.num_counters = eax.split.num_counters;
066d7dea
IM
1304
1305 /*
1306 * Quirk: v2 perfmon does not report fixed-purpose counters, so
1307 * assume at least 3 counters:
1308 */
1309 x86_pmu.num_counters_fixed = max((int)edx.split.num_counters_fixed, 3);
1310
0933e5c6
RR
1311 x86_pmu.counter_bits = eax.split.bit_width;
1312 x86_pmu.counter_mask = (1ULL << eax.split.bit_width) - 1;
b56a3802 1313
9e35ad38
PZ
1314 rdmsrl(MSR_CORE_PERF_GLOBAL_CTRL, x86_pmu.intel_ctrl);
1315
8326f44d
IM
1316 /*
1317 * Nehalem:
1318 */
1319 switch (boot_cpu_data.x86_model) {
1320 case 17:
1321 memcpy(hw_cache_event_ids, core2_hw_cache_event_ids,
1322 sizeof(u64)*PERF_COUNT_HW_CACHE_MAX*
1323 PERF_COUNT_HW_CACHE_OP_MAX*PERF_COUNT_HW_CACHE_RESULT_MAX);
1324
1325 pr_info("... installed Core2 event tables\n");
1326 break;
1327 default:
1328 case 26:
1329 memcpy(hw_cache_event_ids, nehalem_hw_cache_event_ids,
1330 sizeof(u64)*PERF_COUNT_HW_CACHE_MAX*
1331 PERF_COUNT_HW_CACHE_OP_MAX*PERF_COUNT_HW_CACHE_RESULT_MAX);
1332
1333 pr_info("... installed Nehalem/Corei7 event tables\n");
1334 break;
1335 case 28:
1336 memcpy(hw_cache_event_ids, atom_hw_cache_event_ids,
1337 sizeof(u64)*PERF_COUNT_HW_CACHE_MAX*
1338 PERF_COUNT_HW_CACHE_OP_MAX*PERF_COUNT_HW_CACHE_RESULT_MAX);
1339
1340 pr_info("... installed Atom event tables\n");
1341 break;
1342 }
72eae04d 1343 return 0;
b56a3802
JSR
1344}
1345
72eae04d 1346static int amd_pmu_init(void)
f87ad35d 1347{
4a06bd85 1348 x86_pmu = amd_pmu;
72eae04d 1349 return 0;
f87ad35d
JSR
1350}
1351
b56a3802
JSR
1352void __init init_hw_perf_counters(void)
1353{
72eae04d
RR
1354 int err;
1355
b56a3802
JSR
1356 switch (boot_cpu_data.x86_vendor) {
1357 case X86_VENDOR_INTEL:
72eae04d 1358 err = intel_pmu_init();
b56a3802 1359 break;
f87ad35d 1360 case X86_VENDOR_AMD:
72eae04d 1361 err = amd_pmu_init();
f87ad35d 1362 break;
4138960a
RR
1363 default:
1364 return;
b56a3802 1365 }
72eae04d 1366 if (err != 0)
b56a3802
JSR
1367 return;
1368
faa28ae0
RR
1369 pr_info("%s Performance Monitoring support detected.\n", x86_pmu.name);
1370 pr_info("... version: %d\n", x86_pmu.version);
1371 pr_info("... bit width: %d\n", x86_pmu.counter_bits);
1372
0933e5c6
RR
1373 pr_info("... num counters: %d\n", x86_pmu.num_counters);
1374 if (x86_pmu.num_counters > X86_PMC_MAX_GENERIC) {
1375 x86_pmu.num_counters = X86_PMC_MAX_GENERIC;
241771ef 1376 WARN(1, KERN_ERR "hw perf counters %d > max(%d), clipping!",
0933e5c6 1377 x86_pmu.num_counters, X86_PMC_MAX_GENERIC);
241771ef 1378 }
0933e5c6
RR
1379 perf_counter_mask = (1 << x86_pmu.num_counters) - 1;
1380 perf_max_counters = x86_pmu.num_counters;
241771ef 1381
0933e5c6 1382 pr_info("... value mask: %016Lx\n", x86_pmu.counter_mask);
c619b8ff 1383 pr_info("... max period: %016Lx\n", x86_pmu.max_period);
2f18d1e8 1384
0933e5c6
RR
1385 if (x86_pmu.num_counters_fixed > X86_PMC_MAX_FIXED) {
1386 x86_pmu.num_counters_fixed = X86_PMC_MAX_FIXED;
703e937c 1387 WARN(1, KERN_ERR "hw perf counters fixed %d > max(%d), clipping!",
0933e5c6 1388 x86_pmu.num_counters_fixed, X86_PMC_MAX_FIXED);
703e937c 1389 }
0933e5c6 1390 pr_info("... fixed counters: %d\n", x86_pmu.num_counters_fixed);
862a1a5f 1391
0933e5c6
RR
1392 perf_counter_mask |=
1393 ((1LL << x86_pmu.num_counters_fixed)-1) << X86_PMC_IDX_FIXED;
241771ef 1394
a1ef58f4 1395 pr_info("... counter mask: %016Lx\n", perf_counter_mask);
75f224cf 1396
c323d95f 1397 perf_counters_lapic_init();
241771ef 1398 register_die_notifier(&perf_counter_nmi_notifier);
241771ef 1399}
621a01ea 1400
bb775fc2 1401static inline void x86_pmu_read(struct perf_counter *counter)
ee06094f
IM
1402{
1403 x86_perf_counter_update(counter, &counter->hw, counter->hw.idx);
1404}
1405
4aeb0b42
RR
1406static const struct pmu pmu = {
1407 .enable = x86_pmu_enable,
1408 .disable = x86_pmu_disable,
1409 .read = x86_pmu_read,
a78ac325 1410 .unthrottle = x86_pmu_unthrottle,
621a01ea
IM
1411};
1412
4aeb0b42 1413const struct pmu *hw_perf_counter_init(struct perf_counter *counter)
621a01ea
IM
1414{
1415 int err;
1416
1417 err = __hw_perf_counter_init(counter);
1418 if (err)
9ea98e19 1419 return ERR_PTR(err);
621a01ea 1420
4aeb0b42 1421 return &pmu;
621a01ea 1422}
d7d59fb3
PZ
1423
1424/*
1425 * callchain support
1426 */
1427
1428static inline
1429void callchain_store(struct perf_callchain_entry *entry, unsigned long ip)
1430{
1431 if (entry->nr < MAX_STACK_DEPTH)
1432 entry->ip[entry->nr++] = ip;
1433}
1434
1435static DEFINE_PER_CPU(struct perf_callchain_entry, irq_entry);
1436static DEFINE_PER_CPU(struct perf_callchain_entry, nmi_entry);
1437
1438
1439static void
1440backtrace_warning_symbol(void *data, char *msg, unsigned long symbol)
1441{
1442 /* Ignore warnings */
1443}
1444
1445static void backtrace_warning(void *data, char *msg)
1446{
1447 /* Ignore warnings */
1448}
1449
1450static int backtrace_stack(void *data, char *name)
1451{
1452 /* Don't bother with IRQ stacks for now */
1453 return -1;
1454}
1455
1456static void backtrace_address(void *data, unsigned long addr, int reliable)
1457{
1458 struct perf_callchain_entry *entry = data;
1459
1460 if (reliable)
1461 callchain_store(entry, addr);
1462}
1463
1464static const struct stacktrace_ops backtrace_ops = {
1465 .warning = backtrace_warning,
1466 .warning_symbol = backtrace_warning_symbol,
1467 .stack = backtrace_stack,
1468 .address = backtrace_address,
1469};
1470
1471static void
1472perf_callchain_kernel(struct pt_regs *regs, struct perf_callchain_entry *entry)
1473{
1474 unsigned long bp;
1475 char *stack;
5872bdb8 1476 int nr = entry->nr;
d7d59fb3
PZ
1477
1478 callchain_store(entry, instruction_pointer(regs));
1479
1480 stack = ((char *)regs + sizeof(struct pt_regs));
1481#ifdef CONFIG_FRAME_POINTER
1482 bp = frame_pointer(regs);
1483#else
1484 bp = 0;
1485#endif
1486
1487 dump_trace(NULL, regs, (void *)stack, bp, &backtrace_ops, entry);
5872bdb8
PZ
1488
1489 entry->kernel = entry->nr - nr;
d7d59fb3
PZ
1490}
1491
1492
1493struct stack_frame {
1494 const void __user *next_fp;
1495 unsigned long return_address;
1496};
1497
1498static int copy_stack_frame(const void __user *fp, struct stack_frame *frame)
1499{
1500 int ret;
1501
1502 if (!access_ok(VERIFY_READ, fp, sizeof(*frame)))
1503 return 0;
1504
1505 ret = 1;
1506 pagefault_disable();
1507 if (__copy_from_user_inatomic(frame, fp, sizeof(*frame)))
1508 ret = 0;
1509 pagefault_enable();
1510
1511 return ret;
1512}
1513
1514static void
1515perf_callchain_user(struct pt_regs *regs, struct perf_callchain_entry *entry)
1516{
1517 struct stack_frame frame;
1518 const void __user *fp;
5872bdb8 1519 int nr = entry->nr;
d7d59fb3
PZ
1520
1521 regs = (struct pt_regs *)current->thread.sp0 - 1;
1522 fp = (void __user *)regs->bp;
1523
1524 callchain_store(entry, regs->ip);
1525
1526 while (entry->nr < MAX_STACK_DEPTH) {
1527 frame.next_fp = NULL;
1528 frame.return_address = 0;
1529
1530 if (!copy_stack_frame(fp, &frame))
1531 break;
1532
1533 if ((unsigned long)fp < user_stack_pointer(regs))
1534 break;
1535
1536 callchain_store(entry, frame.return_address);
1537 fp = frame.next_fp;
1538 }
5872bdb8
PZ
1539
1540 entry->user = entry->nr - nr;
d7d59fb3
PZ
1541}
1542
1543static void
1544perf_do_callchain(struct pt_regs *regs, struct perf_callchain_entry *entry)
1545{
1546 int is_user;
1547
1548 if (!regs)
1549 return;
1550
1551 is_user = user_mode(regs);
1552
1553 if (!current || current->pid == 0)
1554 return;
1555
1556 if (is_user && current->state != TASK_RUNNING)
1557 return;
1558
1559 if (!is_user)
1560 perf_callchain_kernel(regs, entry);
1561
1562 if (current->mm)
1563 perf_callchain_user(regs, entry);
1564}
1565
1566struct perf_callchain_entry *perf_callchain(struct pt_regs *regs)
1567{
1568 struct perf_callchain_entry *entry;
1569
1570 if (in_nmi())
1571 entry = &__get_cpu_var(nmi_entry);
1572 else
1573 entry = &__get_cpu_var(irq_entry);
1574
1575 entry->nr = 0;
5872bdb8
PZ
1576 entry->hv = 0;
1577 entry->kernel = 0;
1578 entry->user = 0;
d7d59fb3
PZ
1579
1580 perf_do_callchain(regs, entry);
1581
1582 return entry;
1583}