]>
Commit | Line | Data |
---|---|---|
241771ef IM |
1 | /* |
2 | * Performance counter x86 architecture code | |
3 | * | |
4 | * Copyright(C) 2008 Thomas Gleixner <tglx@linutronix.de> | |
5 | * Copyright(C) 2008 Red Hat, Inc., Ingo Molnar | |
6 | * | |
7 | * For licencing details see kernel-base/COPYING | |
8 | */ | |
9 | ||
10 | #include <linux/perf_counter.h> | |
11 | #include <linux/capability.h> | |
12 | #include <linux/notifier.h> | |
13 | #include <linux/hardirq.h> | |
14 | #include <linux/kprobes.h> | |
4ac13294 | 15 | #include <linux/module.h> |
241771ef IM |
16 | #include <linux/kdebug.h> |
17 | #include <linux/sched.h> | |
18 | ||
5c167b85 | 19 | #include <asm/perf_counter.h> |
241771ef IM |
20 | #include <asm/apic.h> |
21 | ||
22 | static bool perf_counters_initialized __read_mostly; | |
23 | ||
24 | /* | |
25 | * Number of (generic) HW counters: | |
26 | */ | |
27 | static int nr_hw_counters __read_mostly; | |
28 | static u32 perf_counter_mask __read_mostly; | |
29 | ||
241771ef | 30 | struct cpu_hw_counters { |
eb2b8618 IM |
31 | struct perf_counter *generic[X86_PMC_MAX_GENERIC]; |
32 | unsigned long used[BITS_TO_LONGS(X86_PMC_MAX_GENERIC)]; | |
33 | ||
34 | struct perf_counter *fixed[X86_PMC_MAX_FIXED]; | |
35 | unsigned long used_fixed[BITS_TO_LONGS(X86_PMC_MAX_FIXED)]; | |
241771ef IM |
36 | }; |
37 | ||
38 | /* | |
39 | * Intel PerfMon v3. Used on Core2 and later. | |
40 | */ | |
41 | static DEFINE_PER_CPU(struct cpu_hw_counters, cpu_hw_counters); | |
42 | ||
94c46572 | 43 | static const int intel_perfmon_event_map[] = |
241771ef IM |
44 | { |
45 | [PERF_COUNT_CYCLES] = 0x003c, | |
46 | [PERF_COUNT_INSTRUCTIONS] = 0x00c0, | |
47 | [PERF_COUNT_CACHE_REFERENCES] = 0x4f2e, | |
48 | [PERF_COUNT_CACHE_MISSES] = 0x412e, | |
49 | [PERF_COUNT_BRANCH_INSTRUCTIONS] = 0x00c4, | |
50 | [PERF_COUNT_BRANCH_MISSES] = 0x00c5, | |
51 | }; | |
52 | ||
94c46572 | 53 | static const int max_intel_perfmon_events = ARRAY_SIZE(intel_perfmon_event_map); |
241771ef | 54 | |
ee06094f IM |
55 | /* |
56 | * Propagate counter elapsed time into the generic counter. | |
57 | * Can only be executed on the CPU where the counter is active. | |
58 | * Returns the delta events processed. | |
59 | */ | |
60 | static void | |
61 | x86_perf_counter_update(struct perf_counter *counter, | |
62 | struct hw_perf_counter *hwc, int idx) | |
63 | { | |
64 | u64 prev_raw_count, new_raw_count, delta; | |
65 | ||
ee06094f IM |
66 | /* |
67 | * Careful: an NMI might modify the previous counter value. | |
68 | * | |
69 | * Our tactic to handle this is to first atomically read and | |
70 | * exchange a new raw count - then add that new-prev delta | |
71 | * count to the generic counter atomically: | |
72 | */ | |
73 | again: | |
74 | prev_raw_count = atomic64_read(&hwc->prev_count); | |
75 | rdmsrl(hwc->counter_base + idx, new_raw_count); | |
76 | ||
77 | if (atomic64_cmpxchg(&hwc->prev_count, prev_raw_count, | |
78 | new_raw_count) != prev_raw_count) | |
79 | goto again; | |
80 | ||
81 | /* | |
82 | * Now we have the new raw value and have updated the prev | |
83 | * timestamp already. We can now calculate the elapsed delta | |
84 | * (counter-)time and add that to the generic counter. | |
85 | * | |
86 | * Careful, not all hw sign-extends above the physical width | |
87 | * of the count, so we do that by clipping the delta to 32 bits: | |
88 | */ | |
89 | delta = (u64)(u32)((s32)new_raw_count - (s32)prev_raw_count); | |
ee06094f IM |
90 | |
91 | atomic64_add(delta, &counter->count); | |
92 | atomic64_sub(delta, &hwc->period_left); | |
93 | } | |
94 | ||
241771ef IM |
95 | /* |
96 | * Setup the hardware configuration for a given hw_event_type | |
97 | */ | |
621a01ea | 98 | static int __hw_perf_counter_init(struct perf_counter *counter) |
241771ef | 99 | { |
9f66a381 | 100 | struct perf_counter_hw_event *hw_event = &counter->hw_event; |
241771ef IM |
101 | struct hw_perf_counter *hwc = &counter->hw; |
102 | ||
103 | if (unlikely(!perf_counters_initialized)) | |
104 | return -EINVAL; | |
105 | ||
106 | /* | |
107 | * Count user events, and generate PMC IRQs: | |
108 | * (keep 'enabled' bit clear for now) | |
109 | */ | |
110 | hwc->config = ARCH_PERFMON_EVENTSEL_USR | ARCH_PERFMON_EVENTSEL_INT; | |
111 | ||
112 | /* | |
113 | * If privileged enough, count OS events too, and allow | |
114 | * NMI events as well: | |
115 | */ | |
116 | hwc->nmi = 0; | |
117 | if (capable(CAP_SYS_ADMIN)) { | |
118 | hwc->config |= ARCH_PERFMON_EVENTSEL_OS; | |
9f66a381 | 119 | if (hw_event->nmi) |
241771ef IM |
120 | hwc->nmi = 1; |
121 | } | |
122 | ||
9f66a381 IM |
123 | hwc->config_base = MSR_ARCH_PERFMON_EVENTSEL0; |
124 | hwc->counter_base = MSR_ARCH_PERFMON_PERFCTR0; | |
241771ef | 125 | |
9f66a381 | 126 | hwc->irq_period = hw_event->irq_period; |
241771ef IM |
127 | /* |
128 | * Intel PMCs cannot be accessed sanely above 32 bit width, | |
129 | * so we install an artificial 1<<31 period regardless of | |
130 | * the generic counter period: | |
131 | */ | |
ee06094f | 132 | if ((s64)hwc->irq_period <= 0 || hwc->irq_period > 0x7FFFFFFF) |
241771ef IM |
133 | hwc->irq_period = 0x7FFFFFFF; |
134 | ||
ee06094f | 135 | atomic64_set(&hwc->period_left, hwc->irq_period); |
241771ef IM |
136 | |
137 | /* | |
dfa7c899 | 138 | * Raw event type provide the config in the event structure |
241771ef | 139 | */ |
9f66a381 IM |
140 | if (hw_event->raw) { |
141 | hwc->config |= hw_event->type; | |
241771ef | 142 | } else { |
9f66a381 | 143 | if (hw_event->type >= max_intel_perfmon_events) |
241771ef IM |
144 | return -EINVAL; |
145 | /* | |
146 | * The generic map: | |
147 | */ | |
9f66a381 | 148 | hwc->config |= intel_perfmon_event_map[hw_event->type]; |
241771ef | 149 | } |
241771ef IM |
150 | counter->wakeup_pending = 0; |
151 | ||
152 | return 0; | |
153 | } | |
154 | ||
241771ef IM |
155 | void hw_perf_enable_all(void) |
156 | { | |
2b9ff0db IM |
157 | if (unlikely(!perf_counters_initialized)) |
158 | return; | |
159 | ||
43874d23 | 160 | wrmsr(MSR_CORE_PERF_GLOBAL_CTRL, perf_counter_mask, 0); |
241771ef IM |
161 | } |
162 | ||
01b2838c | 163 | u64 hw_perf_save_disable(void) |
4ac13294 TG |
164 | { |
165 | u64 ctrl; | |
166 | ||
2b9ff0db IM |
167 | if (unlikely(!perf_counters_initialized)) |
168 | return 0; | |
169 | ||
4ac13294 | 170 | rdmsrl(MSR_CORE_PERF_GLOBAL_CTRL, ctrl); |
241771ef | 171 | wrmsr(MSR_CORE_PERF_GLOBAL_CTRL, 0, 0); |
2b9ff0db | 172 | |
4ac13294 | 173 | return ctrl; |
241771ef | 174 | } |
01b2838c | 175 | EXPORT_SYMBOL_GPL(hw_perf_save_disable); |
241771ef | 176 | |
ee06094f IM |
177 | void hw_perf_restore(u64 ctrl) |
178 | { | |
2b9ff0db IM |
179 | if (unlikely(!perf_counters_initialized)) |
180 | return; | |
181 | ||
ee06094f IM |
182 | wrmsr(MSR_CORE_PERF_GLOBAL_CTRL, ctrl, 0); |
183 | } | |
184 | EXPORT_SYMBOL_GPL(hw_perf_restore); | |
185 | ||
7e2ae347 | 186 | static inline void |
eb2b8618 | 187 | __pmc_generic_disable(struct perf_counter *counter, |
ee06094f | 188 | struct hw_perf_counter *hwc, unsigned int idx) |
7e2ae347 | 189 | { |
ee06094f IM |
190 | int err; |
191 | ||
192 | err = wrmsr_safe(hwc->config_base + idx, hwc->config, 0); | |
7e2ae347 IM |
193 | } |
194 | ||
eb2b8618 | 195 | static DEFINE_PER_CPU(u64, prev_left[X86_PMC_MAX_GENERIC]); |
241771ef | 196 | |
ee06094f IM |
197 | /* |
198 | * Set the next IRQ period, based on the hwc->period_left value. | |
199 | * To be called with the counter disabled in hw: | |
200 | */ | |
201 | static void | |
202 | __hw_perf_counter_set_period(struct perf_counter *counter, | |
203 | struct hw_perf_counter *hwc, int idx) | |
241771ef | 204 | { |
ee06094f IM |
205 | s32 left = atomic64_read(&hwc->period_left); |
206 | s32 period = hwc->irq_period; | |
207 | ||
ee06094f IM |
208 | /* |
209 | * If we are way outside a reasoable range then just skip forward: | |
210 | */ | |
211 | if (unlikely(left <= -period)) { | |
212 | left = period; | |
213 | atomic64_set(&hwc->period_left, left); | |
214 | } | |
215 | ||
216 | if (unlikely(left <= 0)) { | |
217 | left += period; | |
218 | atomic64_set(&hwc->period_left, left); | |
219 | } | |
241771ef | 220 | |
ee06094f IM |
221 | per_cpu(prev_left[idx], smp_processor_id()) = left; |
222 | ||
223 | /* | |
224 | * The hw counter starts counting from this counter offset, | |
225 | * mark it to be able to extra future deltas: | |
226 | */ | |
227 | atomic64_set(&hwc->prev_count, (u64)(s64)-left); | |
228 | ||
229 | wrmsr(hwc->counter_base + idx, -left, 0); | |
7e2ae347 IM |
230 | } |
231 | ||
ee06094f | 232 | static void |
eb2b8618 | 233 | __pmc_generic_enable(struct perf_counter *counter, |
ee06094f | 234 | struct hw_perf_counter *hwc, int idx) |
7e2ae347 IM |
235 | { |
236 | wrmsr(hwc->config_base + idx, | |
237 | hwc->config | ARCH_PERFMON_EVENTSEL0_ENABLE, 0); | |
241771ef IM |
238 | } |
239 | ||
ee06094f IM |
240 | /* |
241 | * Find a PMC slot for the freshly enabled / scheduled in counter: | |
242 | */ | |
eb2b8618 | 243 | static void pmc_generic_enable(struct perf_counter *counter) |
241771ef IM |
244 | { |
245 | struct cpu_hw_counters *cpuc = &__get_cpu_var(cpu_hw_counters); | |
246 | struct hw_perf_counter *hwc = &counter->hw; | |
247 | int idx = hwc->idx; | |
248 | ||
249 | /* Try to get the previous counter again */ | |
250 | if (test_and_set_bit(idx, cpuc->used)) { | |
251 | idx = find_first_zero_bit(cpuc->used, nr_hw_counters); | |
252 | set_bit(idx, cpuc->used); | |
253 | hwc->idx = idx; | |
254 | } | |
255 | ||
256 | perf_counters_lapic_init(hwc->nmi); | |
257 | ||
eb2b8618 | 258 | __pmc_generic_disable(counter, hwc, idx); |
241771ef | 259 | |
eb2b8618 | 260 | cpuc->generic[idx] = counter; |
7e2ae347 | 261 | |
ee06094f | 262 | __hw_perf_counter_set_period(counter, hwc, idx); |
eb2b8618 | 263 | __pmc_generic_enable(counter, hwc, idx); |
241771ef IM |
264 | } |
265 | ||
266 | void perf_counter_print_debug(void) | |
267 | { | |
ee06094f | 268 | u64 ctrl, status, overflow, pmc_ctrl, pmc_count, prev_left; |
1e125676 IM |
269 | int cpu, idx; |
270 | ||
271 | if (!nr_hw_counters) | |
272 | return; | |
241771ef IM |
273 | |
274 | local_irq_disable(); | |
275 | ||
276 | cpu = smp_processor_id(); | |
277 | ||
1e125676 IM |
278 | rdmsrl(MSR_CORE_PERF_GLOBAL_CTRL, ctrl); |
279 | rdmsrl(MSR_CORE_PERF_GLOBAL_STATUS, status); | |
280 | rdmsrl(MSR_CORE_PERF_GLOBAL_OVF_CTRL, overflow); | |
241771ef IM |
281 | |
282 | printk(KERN_INFO "\n"); | |
283 | printk(KERN_INFO "CPU#%d: ctrl: %016llx\n", cpu, ctrl); | |
284 | printk(KERN_INFO "CPU#%d: status: %016llx\n", cpu, status); | |
285 | printk(KERN_INFO "CPU#%d: overflow: %016llx\n", cpu, overflow); | |
286 | ||
287 | for (idx = 0; idx < nr_hw_counters; idx++) { | |
1e125676 IM |
288 | rdmsrl(MSR_ARCH_PERFMON_EVENTSEL0 + idx, pmc_ctrl); |
289 | rdmsrl(MSR_ARCH_PERFMON_PERFCTR0 + idx, pmc_count); | |
241771ef | 290 | |
ee06094f | 291 | prev_left = per_cpu(prev_left[idx], cpu); |
241771ef IM |
292 | |
293 | printk(KERN_INFO "CPU#%d: PMC%d ctrl: %016llx\n", | |
294 | cpu, idx, pmc_ctrl); | |
295 | printk(KERN_INFO "CPU#%d: PMC%d count: %016llx\n", | |
296 | cpu, idx, pmc_count); | |
ee06094f IM |
297 | printk(KERN_INFO "CPU#%d: PMC%d left: %016llx\n", |
298 | cpu, idx, prev_left); | |
241771ef IM |
299 | } |
300 | local_irq_enable(); | |
301 | } | |
302 | ||
eb2b8618 | 303 | static void pmc_generic_disable(struct perf_counter *counter) |
241771ef IM |
304 | { |
305 | struct cpu_hw_counters *cpuc = &__get_cpu_var(cpu_hw_counters); | |
306 | struct hw_perf_counter *hwc = &counter->hw; | |
307 | unsigned int idx = hwc->idx; | |
308 | ||
eb2b8618 | 309 | __pmc_generic_disable(counter, hwc, idx); |
241771ef IM |
310 | |
311 | clear_bit(idx, cpuc->used); | |
eb2b8618 | 312 | cpuc->generic[idx] = NULL; |
241771ef | 313 | |
ee06094f IM |
314 | /* |
315 | * Drain the remaining delta count out of a counter | |
316 | * that we are disabling: | |
317 | */ | |
318 | x86_perf_counter_update(counter, hwc, idx); | |
241771ef IM |
319 | } |
320 | ||
321 | static void perf_store_irq_data(struct perf_counter *counter, u64 data) | |
322 | { | |
323 | struct perf_data *irqdata = counter->irqdata; | |
324 | ||
325 | if (irqdata->len > PERF_DATA_BUFLEN - sizeof(u64)) { | |
326 | irqdata->overrun++; | |
327 | } else { | |
328 | u64 *p = (u64 *) &irqdata->data[irqdata->len]; | |
329 | ||
330 | *p = data; | |
331 | irqdata->len += sizeof(u64); | |
332 | } | |
333 | } | |
334 | ||
7e2ae347 | 335 | /* |
ee06094f IM |
336 | * Save and restart an expired counter. Called by NMI contexts, |
337 | * so it has to be careful about preempting normal counter ops: | |
7e2ae347 | 338 | */ |
241771ef IM |
339 | static void perf_save_and_restart(struct perf_counter *counter) |
340 | { | |
341 | struct hw_perf_counter *hwc = &counter->hw; | |
342 | int idx = hwc->idx; | |
7e2ae347 | 343 | u64 pmc_ctrl; |
241771ef | 344 | |
1e125676 | 345 | rdmsrl(MSR_ARCH_PERFMON_EVENTSEL0 + idx, pmc_ctrl); |
241771ef | 346 | |
ee06094f IM |
347 | x86_perf_counter_update(counter, hwc, idx); |
348 | __hw_perf_counter_set_period(counter, hwc, idx); | |
7e2ae347 IM |
349 | |
350 | if (pmc_ctrl & ARCH_PERFMON_EVENTSEL0_ENABLE) | |
eb2b8618 | 351 | __pmc_generic_enable(counter, hwc, idx); |
241771ef IM |
352 | } |
353 | ||
354 | static void | |
04289bb9 | 355 | perf_handle_group(struct perf_counter *sibling, u64 *status, u64 *overflown) |
241771ef | 356 | { |
04289bb9 | 357 | struct perf_counter *counter, *group_leader = sibling->group_leader; |
241771ef | 358 | |
04289bb9 | 359 | /* |
ee06094f | 360 | * Store sibling timestamps (if any): |
04289bb9 IM |
361 | */ |
362 | list_for_each_entry(counter, &group_leader->sibling_list, list_entry) { | |
ee06094f | 363 | x86_perf_counter_update(counter, &counter->hw, counter->hw.idx); |
04289bb9 | 364 | perf_store_irq_data(sibling, counter->hw_event.type); |
ee06094f | 365 | perf_store_irq_data(sibling, atomic64_read(&counter->count)); |
241771ef IM |
366 | } |
367 | } | |
368 | ||
369 | /* | |
370 | * This handler is triggered by the local APIC, so the APIC IRQ handling | |
371 | * rules apply: | |
372 | */ | |
373 | static void __smp_perf_counter_interrupt(struct pt_regs *regs, int nmi) | |
374 | { | |
375 | int bit, cpu = smp_processor_id(); | |
43874d23 | 376 | u64 ack, status, saved_global; |
241771ef | 377 | struct cpu_hw_counters *cpuc; |
43874d23 IM |
378 | |
379 | rdmsrl(MSR_CORE_PERF_GLOBAL_CTRL, saved_global); | |
241771ef | 380 | |
241771ef IM |
381 | /* Disable counters globally */ |
382 | wrmsr(MSR_CORE_PERF_GLOBAL_CTRL, 0, 0); | |
383 | ack_APIC_irq(); | |
384 | ||
385 | cpuc = &per_cpu(cpu_hw_counters, cpu); | |
386 | ||
87b9cf46 IM |
387 | rdmsrl(MSR_CORE_PERF_GLOBAL_STATUS, status); |
388 | if (!status) | |
389 | goto out; | |
390 | ||
241771ef IM |
391 | again: |
392 | ack = status; | |
393 | for_each_bit(bit, (unsigned long *) &status, nr_hw_counters) { | |
eb2b8618 | 394 | struct perf_counter *counter = cpuc->generic[bit]; |
241771ef IM |
395 | |
396 | clear_bit(bit, (unsigned long *) &status); | |
397 | if (!counter) | |
398 | continue; | |
399 | ||
400 | perf_save_and_restart(counter); | |
401 | ||
9f66a381 | 402 | switch (counter->hw_event.record_type) { |
241771ef IM |
403 | case PERF_RECORD_SIMPLE: |
404 | continue; | |
405 | case PERF_RECORD_IRQ: | |
406 | perf_store_irq_data(counter, instruction_pointer(regs)); | |
407 | break; | |
408 | case PERF_RECORD_GROUP: | |
241771ef IM |
409 | perf_handle_group(counter, &status, &ack); |
410 | break; | |
411 | } | |
412 | /* | |
413 | * From NMI context we cannot call into the scheduler to | |
eb2b8618 | 414 | * do a task wakeup - but we mark these generic as |
241771ef IM |
415 | * wakeup_pending and initate a wakeup callback: |
416 | */ | |
417 | if (nmi) { | |
418 | counter->wakeup_pending = 1; | |
419 | set_tsk_thread_flag(current, TIF_PERF_COUNTERS); | |
420 | } else { | |
421 | wake_up(&counter->waitq); | |
422 | } | |
423 | } | |
424 | ||
425 | wrmsr(MSR_CORE_PERF_GLOBAL_OVF_CTRL, ack, 0); | |
426 | ||
427 | /* | |
428 | * Repeat if there is more work to be done: | |
429 | */ | |
430 | rdmsrl(MSR_CORE_PERF_GLOBAL_STATUS, status); | |
431 | if (status) | |
432 | goto again; | |
87b9cf46 | 433 | out: |
241771ef | 434 | /* |
43874d23 | 435 | * Restore - do not reenable when global enable is off: |
241771ef | 436 | */ |
43874d23 | 437 | wrmsr(MSR_CORE_PERF_GLOBAL_CTRL, saved_global, 0); |
241771ef IM |
438 | } |
439 | ||
440 | void smp_perf_counter_interrupt(struct pt_regs *regs) | |
441 | { | |
442 | irq_enter(); | |
92bf73e9 | 443 | inc_irq_stat(apic_perf_irqs); |
241771ef IM |
444 | apic_write(APIC_LVTPC, LOCAL_PERF_VECTOR); |
445 | __smp_perf_counter_interrupt(regs, 0); | |
446 | ||
447 | irq_exit(); | |
448 | } | |
449 | ||
450 | /* | |
451 | * This handler is triggered by NMI contexts: | |
452 | */ | |
453 | void perf_counter_notify(struct pt_regs *regs) | |
454 | { | |
455 | struct cpu_hw_counters *cpuc; | |
456 | unsigned long flags; | |
457 | int bit, cpu; | |
458 | ||
459 | local_irq_save(flags); | |
460 | cpu = smp_processor_id(); | |
461 | cpuc = &per_cpu(cpu_hw_counters, cpu); | |
462 | ||
463 | for_each_bit(bit, cpuc->used, nr_hw_counters) { | |
eb2b8618 | 464 | struct perf_counter *counter = cpuc->generic[bit]; |
241771ef IM |
465 | |
466 | if (!counter) | |
467 | continue; | |
468 | ||
469 | if (counter->wakeup_pending) { | |
470 | counter->wakeup_pending = 0; | |
471 | wake_up(&counter->waitq); | |
472 | } | |
473 | } | |
474 | ||
475 | local_irq_restore(flags); | |
476 | } | |
477 | ||
478 | void __cpuinit perf_counters_lapic_init(int nmi) | |
479 | { | |
480 | u32 apic_val; | |
481 | ||
482 | if (!perf_counters_initialized) | |
483 | return; | |
484 | /* | |
485 | * Enable the performance counter vector in the APIC LVT: | |
486 | */ | |
487 | apic_val = apic_read(APIC_LVTERR); | |
488 | ||
489 | apic_write(APIC_LVTERR, apic_val | APIC_LVT_MASKED); | |
490 | if (nmi) | |
491 | apic_write(APIC_LVTPC, APIC_DM_NMI); | |
492 | else | |
493 | apic_write(APIC_LVTPC, LOCAL_PERF_VECTOR); | |
494 | apic_write(APIC_LVTERR, apic_val); | |
495 | } | |
496 | ||
497 | static int __kprobes | |
498 | perf_counter_nmi_handler(struct notifier_block *self, | |
499 | unsigned long cmd, void *__args) | |
500 | { | |
501 | struct die_args *args = __args; | |
502 | struct pt_regs *regs; | |
503 | ||
504 | if (likely(cmd != DIE_NMI_IPI)) | |
505 | return NOTIFY_DONE; | |
506 | ||
507 | regs = args->regs; | |
508 | ||
509 | apic_write(APIC_LVTPC, APIC_DM_NMI); | |
510 | __smp_perf_counter_interrupt(regs, 1); | |
511 | ||
512 | return NOTIFY_STOP; | |
513 | } | |
514 | ||
515 | static __read_mostly struct notifier_block perf_counter_nmi_notifier = { | |
516 | .notifier_call = perf_counter_nmi_handler | |
517 | }; | |
518 | ||
519 | void __init init_hw_perf_counters(void) | |
520 | { | |
521 | union cpuid10_eax eax; | |
522 | unsigned int unused; | |
523 | unsigned int ebx; | |
524 | ||
525 | if (!cpu_has(&boot_cpu_data, X86_FEATURE_ARCH_PERFMON)) | |
526 | return; | |
527 | ||
528 | /* | |
529 | * Check whether the Architectural PerfMon supports | |
530 | * Branch Misses Retired Event or not. | |
531 | */ | |
532 | cpuid(10, &(eax.full), &ebx, &unused, &unused); | |
533 | if (eax.split.mask_length <= ARCH_PERFMON_BRANCH_MISSES_RETIRED) | |
534 | return; | |
535 | ||
536 | printk(KERN_INFO "Intel Performance Monitoring support detected.\n"); | |
537 | ||
538 | printk(KERN_INFO "... version: %d\n", eax.split.version_id); | |
539 | printk(KERN_INFO "... num_counters: %d\n", eax.split.num_counters); | |
540 | nr_hw_counters = eax.split.num_counters; | |
eb2b8618 IM |
541 | if (nr_hw_counters > X86_PMC_MAX_GENERIC) { |
542 | nr_hw_counters = X86_PMC_MAX_GENERIC; | |
241771ef | 543 | WARN(1, KERN_ERR "hw perf counters %d > max(%d), clipping!", |
eb2b8618 | 544 | nr_hw_counters, X86_PMC_MAX_GENERIC); |
241771ef IM |
545 | } |
546 | perf_counter_mask = (1 << nr_hw_counters) - 1; | |
547 | perf_max_counters = nr_hw_counters; | |
548 | ||
549 | printk(KERN_INFO "... bit_width: %d\n", eax.split.bit_width); | |
550 | printk(KERN_INFO "... mask_length: %d\n", eax.split.mask_length); | |
551 | ||
75f224cf IM |
552 | perf_counters_initialized = true; |
553 | ||
241771ef IM |
554 | perf_counters_lapic_init(0); |
555 | register_die_notifier(&perf_counter_nmi_notifier); | |
241771ef | 556 | } |
621a01ea | 557 | |
eb2b8618 | 558 | static void pmc_generic_read(struct perf_counter *counter) |
ee06094f IM |
559 | { |
560 | x86_perf_counter_update(counter, &counter->hw, counter->hw.idx); | |
561 | } | |
562 | ||
5c92d124 | 563 | static const struct hw_perf_counter_ops x86_perf_counter_ops = { |
eb2b8618 IM |
564 | .hw_perf_counter_enable = pmc_generic_enable, |
565 | .hw_perf_counter_disable = pmc_generic_disable, | |
566 | .hw_perf_counter_read = pmc_generic_read, | |
621a01ea IM |
567 | }; |
568 | ||
5c92d124 IM |
569 | const struct hw_perf_counter_ops * |
570 | hw_perf_counter_init(struct perf_counter *counter) | |
621a01ea IM |
571 | { |
572 | int err; | |
573 | ||
574 | err = __hw_perf_counter_init(counter); | |
575 | if (err) | |
576 | return NULL; | |
577 | ||
578 | return &x86_perf_counter_ops; | |
579 | } |