]>
Commit | Line | Data |
---|---|---|
8b45b72b | 1 | /* |
1da177e4 | 2 | * @file op_model_ppro.h |
b9917028 | 3 | * Family 6 perfmon and architectural perfmon MSR operations |
1da177e4 LT |
4 | * |
5 | * @remark Copyright 2002 OProfile authors | |
b9917028 | 6 | * @remark Copyright 2008 Intel Corporation |
1da177e4 LT |
7 | * @remark Read the file COPYING |
8 | * | |
9 | * @author John Levon | |
10 | * @author Philippe Elie | |
11 | * @author Graydon Hoare | |
b9917028 | 12 | * @author Andi Kleen |
1da177e4 LT |
13 | */ |
14 | ||
15 | #include <linux/oprofile.h> | |
b9917028 | 16 | #include <linux/slab.h> |
1da177e4 LT |
17 | #include <asm/ptrace.h> |
18 | #include <asm/msr.h> | |
19 | #include <asm/apic.h> | |
3e4ff115 | 20 | #include <asm/nmi.h> |
8b45b72b | 21 | |
1da177e4 LT |
22 | #include "op_x86_model.h" |
23 | #include "op_counter.h" | |
24 | ||
b9917028 AK |
25 | static int num_counters = 2; |
26 | static int counter_width = 32; | |
1da177e4 | 27 | |
7c64ade5 | 28 | #define CTR_OVERFLOWED(n) (!((n) & (1ULL<<(counter_width-1)))) |
1da177e4 | 29 | #define CTRL_CLEAR(x) (x &= (1<<21)) |
1da177e4 LT |
30 | #define CTRL_SET_EVENT(val, e) (val |= e) |
31 | ||
b9917028 | 32 | static u64 *reset_value; |
8b45b72b | 33 | |
1da177e4 LT |
34 | static void ppro_fill_in_addresses(struct op_msrs * const msrs) |
35 | { | |
cb9c448c DZ |
36 | int i; |
37 | ||
b9917028 | 38 | for (i = 0; i < num_counters; i++) { |
cb9c448c DZ |
39 | if (reserve_perfctr_nmi(MSR_P6_PERFCTR0 + i)) |
40 | msrs->counters[i].addr = MSR_P6_PERFCTR0 + i; | |
41 | else | |
42 | msrs->counters[i].addr = 0; | |
43 | } | |
8b45b72b | 44 | |
b9917028 | 45 | for (i = 0; i < num_counters; i++) { |
cb9c448c DZ |
46 | if (reserve_evntsel_nmi(MSR_P6_EVNTSEL0 + i)) |
47 | msrs->controls[i].addr = MSR_P6_EVNTSEL0 + i; | |
48 | else | |
49 | msrs->controls[i].addr = 0; | |
50 | } | |
1da177e4 LT |
51 | } |
52 | ||
53 | ||
54 | static void ppro_setup_ctrs(struct op_msrs const * const msrs) | |
55 | { | |
56 | unsigned int low, high; | |
57 | int i; | |
58 | ||
b9917028 | 59 | if (!reset_value) { |
a4a16bea | 60 | reset_value = kmalloc(sizeof(reset_value[0]) * num_counters, |
b9917028 AK |
61 | GFP_ATOMIC); |
62 | if (!reset_value) | |
63 | return; | |
64 | } | |
65 | ||
66 | if (cpu_has_arch_perfmon) { | |
67 | union cpuid10_eax eax; | |
68 | eax.full = cpuid_eax(0xa); | |
780eef94 TB |
69 | |
70 | /* | |
71 | * For Core2 (family 6, model 15), don't reset the | |
72 | * counter width: | |
73 | */ | |
74 | if (!(eax.split.version_id == 0 && | |
75 | current_cpu_data.x86 == 6 && | |
76 | current_cpu_data.x86_model == 15)) { | |
77 | ||
78 | if (counter_width < eax.split.bit_width) | |
79 | counter_width = eax.split.bit_width; | |
80 | } | |
b9917028 AK |
81 | } |
82 | ||
1da177e4 | 83 | /* clear all counters */ |
b9917028 | 84 | for (i = 0 ; i < num_counters; ++i) { |
8b45b72b | 85 | if (unlikely(!CTRL_IS_RESERVED(msrs, i))) |
cb9c448c | 86 | continue; |
74c9a5c3 | 87 | rdmsr(msrs->controls[i].addr, low, high); |
1da177e4 | 88 | CTRL_CLEAR(low); |
74c9a5c3 | 89 | wrmsr(msrs->controls[i].addr, low, high); |
1da177e4 | 90 | } |
8b45b72b | 91 | |
1da177e4 | 92 | /* avoid a false detection of ctr overflows in NMI handler */ |
b9917028 | 93 | for (i = 0; i < num_counters; ++i) { |
8b45b72b | 94 | if (unlikely(!CTR_IS_RESERVED(msrs, i))) |
cb9c448c | 95 | continue; |
b9917028 | 96 | wrmsrl(msrs->counters[i].addr, -1LL); |
1da177e4 LT |
97 | } |
98 | ||
99 | /* enable active counters */ | |
b9917028 | 100 | for (i = 0; i < num_counters; ++i) { |
8b45b72b | 101 | if ((counter_config[i].enabled) && (CTR_IS_RESERVED(msrs, i))) { |
1da177e4 LT |
102 | reset_value[i] = counter_config[i].count; |
103 | ||
b9917028 | 104 | wrmsrl(msrs->counters[i].addr, -reset_value[i]); |
1da177e4 | 105 | |
74c9a5c3 | 106 | rdmsr(msrs->controls[i].addr, low, high); |
1da177e4 LT |
107 | CTRL_CLEAR(low); |
108 | CTRL_SET_ENABLE(low); | |
109 | CTRL_SET_USR(low, counter_config[i].user); | |
110 | CTRL_SET_KERN(low, counter_config[i].kernel); | |
111 | CTRL_SET_UM(low, counter_config[i].unit_mask); | |
112 | CTRL_SET_EVENT(low, counter_config[i].event); | |
74c9a5c3 | 113 | wrmsr(msrs->controls[i].addr, low, high); |
cb9c448c DZ |
114 | } else { |
115 | reset_value[i] = 0; | |
1da177e4 LT |
116 | } |
117 | } | |
118 | } | |
119 | ||
8b45b72b | 120 | |
1da177e4 LT |
121 | static int ppro_check_ctrs(struct pt_regs * const regs, |
122 | struct op_msrs const * const msrs) | |
123 | { | |
7c64ade5 | 124 | u64 val; |
1da177e4 | 125 | int i; |
8b45b72b | 126 | |
b9917028 | 127 | for (i = 0 ; i < num_counters; ++i) { |
cb9c448c DZ |
128 | if (!reset_value[i]) |
129 | continue; | |
7c64ade5 AK |
130 | rdmsrl(msrs->counters[i].addr, val); |
131 | if (CTR_OVERFLOWED(val)) { | |
1da177e4 | 132 | oprofile_add_sample(regs, i); |
b9917028 | 133 | wrmsrl(msrs->counters[i].addr, -reset_value[i]); |
1da177e4 LT |
134 | } |
135 | } | |
136 | ||
137 | /* Only P6 based Pentium M need to re-unmask the apic vector but it | |
138 | * doesn't hurt other P6 variant */ | |
139 | apic_write(APIC_LVTPC, apic_read(APIC_LVTPC) & ~APIC_LVT_MASKED); | |
140 | ||
141 | /* We can't work out if we really handled an interrupt. We | |
142 | * might have caught a *second* counter just after overflowing | |
143 | * the interrupt for this counter then arrives | |
144 | * and we don't find a counter that's overflowed, so we | |
145 | * would return 0 and get dazed + confused. Instead we always | |
146 | * assume we found an overflow. This sucks. | |
147 | */ | |
148 | return 1; | |
149 | } | |
150 | ||
8b45b72b | 151 | |
1da177e4 LT |
152 | static void ppro_start(struct op_msrs const * const msrs) |
153 | { | |
8b45b72b | 154 | unsigned int low, high; |
6b77df08 | 155 | int i; |
cb9c448c | 156 | |
9ea84ad7 ED |
157 | if (!reset_value) |
158 | return; | |
b9917028 | 159 | for (i = 0; i < num_counters; ++i) { |
6b77df08 | 160 | if (reset_value[i]) { |
74c9a5c3 | 161 | rdmsr(msrs->controls[i].addr, low, high); |
6b77df08 | 162 | CTRL_SET_ACTIVE(low); |
74c9a5c3 | 163 | wrmsr(msrs->controls[i].addr, low, high); |
6b77df08 | 164 | } |
cb9c448c | 165 | } |
1da177e4 LT |
166 | } |
167 | ||
168 | ||
169 | static void ppro_stop(struct op_msrs const * const msrs) | |
170 | { | |
8b45b72b | 171 | unsigned int low, high; |
6b77df08 | 172 | int i; |
cb9c448c | 173 | |
9ea84ad7 ED |
174 | if (!reset_value) |
175 | return; | |
b9917028 | 176 | for (i = 0; i < num_counters; ++i) { |
6b77df08 AS |
177 | if (!reset_value[i]) |
178 | continue; | |
74c9a5c3 | 179 | rdmsr(msrs->controls[i].addr, low, high); |
cb9c448c | 180 | CTRL_SET_INACTIVE(low); |
74c9a5c3 | 181 | wrmsr(msrs->controls[i].addr, low, high); |
cb9c448c DZ |
182 | } |
183 | } | |
184 | ||
185 | static void ppro_shutdown(struct op_msrs const * const msrs) | |
186 | { | |
187 | int i; | |
188 | ||
b9917028 | 189 | for (i = 0 ; i < num_counters ; ++i) { |
8b45b72b | 190 | if (CTR_IS_RESERVED(msrs, i)) |
cb9c448c DZ |
191 | release_perfctr_nmi(MSR_P6_PERFCTR0 + i); |
192 | } | |
b9917028 | 193 | for (i = 0 ; i < num_counters ; ++i) { |
8b45b72b | 194 | if (CTRL_IS_RESERVED(msrs, i)) |
cb9c448c DZ |
195 | release_evntsel_nmi(MSR_P6_EVNTSEL0 + i); |
196 | } | |
b9917028 AK |
197 | if (reset_value) { |
198 | kfree(reset_value); | |
199 | reset_value = NULL; | |
200 | } | |
1da177e4 LT |
201 | } |
202 | ||
203 | ||
849620fa RR |
204 | struct op_x86_model_spec const op_ppro_spec = { |
205 | .num_counters = 2, | |
206 | .num_controls = 2, | |
5a289395 RR |
207 | .fill_in_addresses = &ppro_fill_in_addresses, |
208 | .setup_ctrs = &ppro_setup_ctrs, | |
209 | .check_ctrs = &ppro_check_ctrs, | |
210 | .start = &ppro_start, | |
211 | .stop = &ppro_stop, | |
212 | .shutdown = &ppro_shutdown | |
b9917028 AK |
213 | }; |
214 | ||
215 | /* | |
216 | * Architectural performance monitoring. | |
217 | * | |
218 | * Newer Intel CPUs (Core1+) have support for architectural | |
219 | * events described in CPUID 0xA. See the IA32 SDM Vol3b.18 for details. | |
220 | * The advantage of this is that it can be done without knowing about | |
221 | * the specific CPU. | |
222 | */ | |
223 | ||
e419294e | 224 | static void arch_perfmon_setup_counters(void) |
b9917028 AK |
225 | { |
226 | union cpuid10_eax eax; | |
227 | ||
228 | eax.full = cpuid_eax(0xa); | |
229 | ||
230 | /* Workaround for BIOS bugs in 6/15. Taken from perfmon2 */ | |
231 | if (eax.split.version_id == 0 && current_cpu_data.x86 == 6 && | |
232 | current_cpu_data.x86_model == 15) { | |
233 | eax.split.version_id = 2; | |
234 | eax.split.num_counters = 2; | |
235 | eax.split.bit_width = 40; | |
236 | } | |
237 | ||
238 | num_counters = eax.split.num_counters; | |
239 | ||
240 | op_arch_perfmon_spec.num_counters = num_counters; | |
241 | op_arch_perfmon_spec.num_controls = num_counters; | |
242 | } | |
243 | ||
e419294e RR |
244 | static int arch_perfmon_init(struct oprofile_operations *ignore) |
245 | { | |
246 | arch_perfmon_setup_counters(); | |
247 | return 0; | |
248 | } | |
249 | ||
b9917028 | 250 | struct op_x86_model_spec op_arch_perfmon_spec = { |
e419294e | 251 | .init = &arch_perfmon_init, |
b9917028 | 252 | /* num_counters/num_controls filled in at runtime */ |
c92960fc | 253 | .fill_in_addresses = &ppro_fill_in_addresses, |
b9917028 | 254 | /* user space does the cpuid check for available events */ |
c92960fc RR |
255 | .setup_ctrs = &ppro_setup_ctrs, |
256 | .check_ctrs = &ppro_check_ctrs, | |
257 | .start = &ppro_start, | |
258 | .stop = &ppro_stop, | |
259 | .shutdown = &ppro_shutdown | |
1da177e4 | 260 | }; |