]>
Commit | Line | Data |
---|---|---|
8b45b72b | 1 | /* |
1da177e4 | 2 | * @file op_model_ppro.h |
b9917028 | 3 | * Family 6 perfmon and architectural perfmon MSR operations |
1da177e4 LT |
4 | * |
5 | * @remark Copyright 2002 OProfile authors | |
b9917028 | 6 | * @remark Copyright 2008 Intel Corporation |
1da177e4 LT |
7 | * @remark Read the file COPYING |
8 | * | |
9 | * @author John Levon | |
10 | * @author Philippe Elie | |
11 | * @author Graydon Hoare | |
b9917028 | 12 | * @author Andi Kleen |
3370d358 | 13 | * @author Robert Richter <robert.richter@amd.com> |
1da177e4 LT |
14 | */ |
15 | ||
16 | #include <linux/oprofile.h> | |
b9917028 | 17 | #include <linux/slab.h> |
1da177e4 LT |
18 | #include <asm/ptrace.h> |
19 | #include <asm/msr.h> | |
20 | #include <asm/apic.h> | |
3e4ff115 | 21 | #include <asm/nmi.h> |
8b45b72b | 22 | |
1da177e4 LT |
23 | #include "op_x86_model.h" |
24 | #include "op_counter.h" | |
25 | ||
b9917028 AK |
26 | static int num_counters = 2; |
27 | static int counter_width = 32; | |
1da177e4 | 28 | |
3370d358 | 29 | #define MSR_PPRO_EVENTSEL_RESERVED ((0xFFFFFFFFULL<<32)|(1ULL<<21)) |
1da177e4 | 30 | |
b9917028 | 31 | static u64 *reset_value; |
8b45b72b | 32 | |
1da177e4 LT |
33 | static void ppro_fill_in_addresses(struct op_msrs * const msrs) |
34 | { | |
cb9c448c DZ |
35 | int i; |
36 | ||
b9917028 | 37 | for (i = 0; i < num_counters; i++) { |
cb9c448c DZ |
38 | if (reserve_perfctr_nmi(MSR_P6_PERFCTR0 + i)) |
39 | msrs->counters[i].addr = MSR_P6_PERFCTR0 + i; | |
40 | else | |
41 | msrs->counters[i].addr = 0; | |
42 | } | |
8b45b72b | 43 | |
b9917028 | 44 | for (i = 0; i < num_counters; i++) { |
cb9c448c DZ |
45 | if (reserve_evntsel_nmi(MSR_P6_EVNTSEL0 + i)) |
46 | msrs->controls[i].addr = MSR_P6_EVNTSEL0 + i; | |
47 | else | |
48 | msrs->controls[i].addr = 0; | |
49 | } | |
1da177e4 LT |
50 | } |
51 | ||
52 | ||
ef8828dd RR |
53 | static void ppro_setup_ctrs(struct op_x86_model_spec const *model, |
54 | struct op_msrs const * const msrs) | |
1da177e4 | 55 | { |
3370d358 | 56 | u64 val; |
1da177e4 LT |
57 | int i; |
58 | ||
b9917028 | 59 | if (!reset_value) { |
a4a16bea | 60 | reset_value = kmalloc(sizeof(reset_value[0]) * num_counters, |
b9917028 AK |
61 | GFP_ATOMIC); |
62 | if (!reset_value) | |
63 | return; | |
64 | } | |
65 | ||
66 | if (cpu_has_arch_perfmon) { | |
67 | union cpuid10_eax eax; | |
68 | eax.full = cpuid_eax(0xa); | |
780eef94 TB |
69 | |
70 | /* | |
71 | * For Core2 (family 6, model 15), don't reset the | |
72 | * counter width: | |
73 | */ | |
74 | if (!(eax.split.version_id == 0 && | |
75 | current_cpu_data.x86 == 6 && | |
76 | current_cpu_data.x86_model == 15)) { | |
77 | ||
78 | if (counter_width < eax.split.bit_width) | |
79 | counter_width = eax.split.bit_width; | |
80 | } | |
b9917028 AK |
81 | } |
82 | ||
1da177e4 | 83 | /* clear all counters */ |
6e63ea4b | 84 | for (i = 0; i < num_counters; ++i) { |
217d3cfb | 85 | if (unlikely(!msrs->controls[i].addr)) |
cb9c448c | 86 | continue; |
3370d358 RR |
87 | rdmsrl(msrs->controls[i].addr, val); |
88 | val &= model->reserved; | |
89 | wrmsrl(msrs->controls[i].addr, val); | |
1da177e4 | 90 | } |
8b45b72b | 91 | |
1da177e4 | 92 | /* avoid a false detection of ctr overflows in NMI handler */ |
b9917028 | 93 | for (i = 0; i < num_counters; ++i) { |
217d3cfb | 94 | if (unlikely(!msrs->counters[i].addr)) |
cb9c448c | 95 | continue; |
b9917028 | 96 | wrmsrl(msrs->counters[i].addr, -1LL); |
1da177e4 LT |
97 | } |
98 | ||
99 | /* enable active counters */ | |
b9917028 | 100 | for (i = 0; i < num_counters; ++i) { |
217d3cfb | 101 | if (counter_config[i].enabled && msrs->counters[i].addr) { |
1da177e4 | 102 | reset_value[i] = counter_config[i].count; |
b9917028 | 103 | wrmsrl(msrs->counters[i].addr, -reset_value[i]); |
3370d358 RR |
104 | rdmsrl(msrs->controls[i].addr, val); |
105 | val &= model->reserved; | |
106 | val |= op_x86_get_ctrl(model, &counter_config[i]); | |
107 | wrmsrl(msrs->controls[i].addr, val); | |
cb9c448c DZ |
108 | } else { |
109 | reset_value[i] = 0; | |
1da177e4 LT |
110 | } |
111 | } | |
112 | } | |
113 | ||
8b45b72b | 114 | |
1da177e4 LT |
115 | static int ppro_check_ctrs(struct pt_regs * const regs, |
116 | struct op_msrs const * const msrs) | |
117 | { | |
7c64ade5 | 118 | u64 val; |
1da177e4 | 119 | int i; |
8b45b72b | 120 | |
82aa9a18 IM |
121 | /* |
122 | * This can happen if perf counters are in use when | |
123 | * we steal the die notifier NMI. | |
124 | */ | |
125 | if (unlikely(!reset_value)) | |
126 | goto out; | |
127 | ||
6e63ea4b | 128 | for (i = 0; i < num_counters; ++i) { |
cb9c448c DZ |
129 | if (!reset_value[i]) |
130 | continue; | |
7c64ade5 | 131 | rdmsrl(msrs->counters[i].addr, val); |
42399adb RR |
132 | if (val & (1ULL << (counter_width - 1))) |
133 | continue; | |
134 | oprofile_add_sample(regs, i); | |
135 | wrmsrl(msrs->counters[i].addr, -reset_value[i]); | |
1da177e4 LT |
136 | } |
137 | ||
82aa9a18 | 138 | out: |
1da177e4 LT |
139 | /* Only P6 based Pentium M need to re-unmask the apic vector but it |
140 | * doesn't hurt other P6 variant */ | |
141 | apic_write(APIC_LVTPC, apic_read(APIC_LVTPC) & ~APIC_LVT_MASKED); | |
142 | ||
143 | /* We can't work out if we really handled an interrupt. We | |
144 | * might have caught a *second* counter just after overflowing | |
145 | * the interrupt for this counter then arrives | |
146 | * and we don't find a counter that's overflowed, so we | |
147 | * would return 0 and get dazed + confused. Instead we always | |
148 | * assume we found an overflow. This sucks. | |
149 | */ | |
150 | return 1; | |
151 | } | |
152 | ||
8b45b72b | 153 | |
1da177e4 LT |
154 | static void ppro_start(struct op_msrs const * const msrs) |
155 | { | |
dea3766c | 156 | u64 val; |
6b77df08 | 157 | int i; |
cb9c448c | 158 | |
9ea84ad7 ED |
159 | if (!reset_value) |
160 | return; | |
b9917028 | 161 | for (i = 0; i < num_counters; ++i) { |
6b77df08 | 162 | if (reset_value[i]) { |
dea3766c RR |
163 | rdmsrl(msrs->controls[i].addr, val); |
164 | val |= ARCH_PERFMON_EVENTSEL0_ENABLE; | |
165 | wrmsrl(msrs->controls[i].addr, val); | |
6b77df08 | 166 | } |
cb9c448c | 167 | } |
1da177e4 LT |
168 | } |
169 | ||
170 | ||
171 | static void ppro_stop(struct op_msrs const * const msrs) | |
172 | { | |
dea3766c | 173 | u64 val; |
6b77df08 | 174 | int i; |
cb9c448c | 175 | |
9ea84ad7 ED |
176 | if (!reset_value) |
177 | return; | |
b9917028 | 178 | for (i = 0; i < num_counters; ++i) { |
6b77df08 AS |
179 | if (!reset_value[i]) |
180 | continue; | |
dea3766c RR |
181 | rdmsrl(msrs->controls[i].addr, val); |
182 | val &= ~ARCH_PERFMON_EVENTSEL0_ENABLE; | |
183 | wrmsrl(msrs->controls[i].addr, val); | |
cb9c448c DZ |
184 | } |
185 | } | |
186 | ||
187 | static void ppro_shutdown(struct op_msrs const * const msrs) | |
188 | { | |
189 | int i; | |
190 | ||
6e63ea4b | 191 | for (i = 0; i < num_counters; ++i) { |
217d3cfb | 192 | if (msrs->counters[i].addr) |
cb9c448c DZ |
193 | release_perfctr_nmi(MSR_P6_PERFCTR0 + i); |
194 | } | |
6e63ea4b | 195 | for (i = 0; i < num_counters; ++i) { |
217d3cfb | 196 | if (msrs->controls[i].addr) |
cb9c448c DZ |
197 | release_evntsel_nmi(MSR_P6_EVNTSEL0 + i); |
198 | } | |
b9917028 AK |
199 | if (reset_value) { |
200 | kfree(reset_value); | |
201 | reset_value = NULL; | |
202 | } | |
1da177e4 LT |
203 | } |
204 | ||
205 | ||
259a83a8 | 206 | struct op_x86_model_spec op_ppro_spec = { |
849620fa RR |
207 | .num_counters = 2, |
208 | .num_controls = 2, | |
3370d358 | 209 | .reserved = MSR_PPRO_EVENTSEL_RESERVED, |
5a289395 RR |
210 | .fill_in_addresses = &ppro_fill_in_addresses, |
211 | .setup_ctrs = &ppro_setup_ctrs, | |
212 | .check_ctrs = &ppro_check_ctrs, | |
213 | .start = &ppro_start, | |
214 | .stop = &ppro_stop, | |
215 | .shutdown = &ppro_shutdown | |
b9917028 AK |
216 | }; |
217 | ||
218 | /* | |
219 | * Architectural performance monitoring. | |
220 | * | |
221 | * Newer Intel CPUs (Core1+) have support for architectural | |
222 | * events described in CPUID 0xA. See the IA32 SDM Vol3b.18 for details. | |
223 | * The advantage of this is that it can be done without knowing about | |
224 | * the specific CPU. | |
225 | */ | |
226 | ||
e419294e | 227 | static void arch_perfmon_setup_counters(void) |
b9917028 AK |
228 | { |
229 | union cpuid10_eax eax; | |
230 | ||
231 | eax.full = cpuid_eax(0xa); | |
232 | ||
233 | /* Workaround for BIOS bugs in 6/15. Taken from perfmon2 */ | |
234 | if (eax.split.version_id == 0 && current_cpu_data.x86 == 6 && | |
235 | current_cpu_data.x86_model == 15) { | |
236 | eax.split.version_id = 2; | |
237 | eax.split.num_counters = 2; | |
238 | eax.split.bit_width = 40; | |
239 | } | |
240 | ||
241 | num_counters = eax.split.num_counters; | |
242 | ||
243 | op_arch_perfmon_spec.num_counters = num_counters; | |
244 | op_arch_perfmon_spec.num_controls = num_counters; | |
245 | } | |
246 | ||
e419294e RR |
247 | static int arch_perfmon_init(struct oprofile_operations *ignore) |
248 | { | |
249 | arch_perfmon_setup_counters(); | |
250 | return 0; | |
251 | } | |
252 | ||
b9917028 | 253 | struct op_x86_model_spec op_arch_perfmon_spec = { |
3370d358 | 254 | .reserved = MSR_PPRO_EVENTSEL_RESERVED, |
e419294e | 255 | .init = &arch_perfmon_init, |
b9917028 | 256 | /* num_counters/num_controls filled in at runtime */ |
c92960fc | 257 | .fill_in_addresses = &ppro_fill_in_addresses, |
b9917028 | 258 | /* user space does the cpuid check for available events */ |
c92960fc RR |
259 | .setup_ctrs = &ppro_setup_ctrs, |
260 | .check_ctrs = &ppro_check_ctrs, | |
261 | .start = &ppro_start, | |
262 | .stop = &ppro_stop, | |
263 | .shutdown = &ppro_shutdown | |
1da177e4 | 264 | }; |