]>
Commit | Line | Data |
---|---|---|
250c2277 | 1 | /* |
835c34a1 | 2 | * check TSC synchronization. |
250c2277 TG |
3 | * |
4 | * Copyright (C) 2006, Red Hat, Inc., Ingo Molnar | |
5 | * | |
6 | * We check whether all boot CPUs have their TSC's synchronized, | |
7 | * print a warning if not and turn off the TSC clock-source. | |
8 | * | |
9 | * The warp-check is point-to-point between two CPUs, the CPU | |
10 | * initiating the bootup is the 'source CPU', the freshly booting | |
11 | * CPU is the 'target CPU'. | |
12 | * | |
13 | * Only two CPUs may participate - they can enter in any order. | |
14 | * ( The serial nature of the boot logic and the CPU hotplug lock | |
15 | * protects against more than 2 CPUs entering this code. ) | |
16 | */ | |
8b223bc7 | 17 | #include <linux/topology.h> |
250c2277 TG |
18 | #include <linux/spinlock.h> |
19 | #include <linux/kernel.h> | |
250c2277 TG |
20 | #include <linux/smp.h> |
21 | #include <linux/nmi.h> | |
22 | #include <asm/tsc.h> | |
23 | ||
8b223bc7 | 24 | struct tsc_adjust { |
1d0095fe TG |
25 | s64 bootval; |
26 | s64 adjusted; | |
27 | unsigned long nextcheck; | |
28 | bool warned; | |
8b223bc7 TG |
29 | }; |
30 | ||
31 | static DEFINE_PER_CPU(struct tsc_adjust, tsc_adjust); | |
32 | ||
6a369583 | 33 | void tsc_verify_tsc_adjust(bool resume) |
1d0095fe TG |
34 | { |
35 | struct tsc_adjust *adj = this_cpu_ptr(&tsc_adjust); | |
36 | s64 curval; | |
37 | ||
38 | if (!boot_cpu_has(X86_FEATURE_TSC_ADJUST)) | |
39 | return; | |
40 | ||
41 | /* Rate limit the MSR check */ | |
6a369583 | 42 | if (!resume && time_before(jiffies, adj->nextcheck)) |
1d0095fe TG |
43 | return; |
44 | ||
45 | adj->nextcheck = jiffies + HZ; | |
46 | ||
47 | rdmsrl(MSR_IA32_TSC_ADJUST, curval); | |
48 | if (adj->adjusted == curval) | |
49 | return; | |
50 | ||
51 | /* Restore the original value */ | |
52 | wrmsrl(MSR_IA32_TSC_ADJUST, adj->adjusted); | |
53 | ||
6a369583 | 54 | if (!adj->warned || resume) { |
1d0095fe TG |
55 | pr_warn(FW_BUG "TSC ADJUST differs: CPU%u %lld --> %lld. Restoring\n", |
56 | smp_processor_id(), adj->adjusted, curval); | |
57 | adj->warned = true; | |
58 | } | |
59 | } | |
60 | ||
5bae1562 TG |
61 | static void tsc_sanitize_first_cpu(struct tsc_adjust *cur, s64 bootval, |
62 | unsigned int cpu, bool bootcpu) | |
63 | { | |
64 | /* | |
65 | * First online CPU in a package stores the boot value in the | |
66 | * adjustment value. This value might change later via the sync | |
67 | * mechanism. If that fails we still can yell about boot values not | |
68 | * being consistent. | |
69 | * | |
70 | * On the boot cpu we just force set the ADJUST value to 0 if it's | |
71 | * non zero. We don't do that on non boot cpus because physical | |
72 | * hotplug should have set the ADJUST register to a value > 0 so | |
73 | * the TSC is in sync with the already running cpus. | |
5bae1562 | 74 | */ |
855615ee | 75 | if (bootcpu && bootval != 0) { |
16588f65 TG |
76 | pr_warn(FW_BUG "TSC ADJUST: CPU%u: %lld force to 0\n", cpu, |
77 | bootval); | |
5bae1562 TG |
78 | wrmsrl(MSR_IA32_TSC_ADJUST, 0); |
79 | bootval = 0; | |
80 | } | |
81 | cur->adjusted = bootval; | |
82 | } | |
83 | ||
8b223bc7 | 84 | #ifndef CONFIG_SMP |
5bae1562 | 85 | bool __init tsc_store_and_check_tsc_adjust(bool bootcpu) |
8b223bc7 | 86 | { |
b8365543 | 87 | struct tsc_adjust *cur = this_cpu_ptr(&tsc_adjust); |
8b223bc7 TG |
88 | s64 bootval; |
89 | ||
90 | if (!boot_cpu_has(X86_FEATURE_TSC_ADJUST)) | |
a36f5136 | 91 | return false; |
8b223bc7 TG |
92 | |
93 | rdmsrl(MSR_IA32_TSC_ADJUST, bootval); | |
94 | cur->bootval = bootval; | |
1d0095fe | 95 | cur->nextcheck = jiffies + HZ; |
5bae1562 | 96 | tsc_sanitize_first_cpu(cur, bootval, smp_processor_id(), bootcpu); |
a36f5136 | 97 | return false; |
8b223bc7 TG |
98 | } |
99 | ||
100 | #else /* !CONFIG_SMP */ | |
101 | ||
102 | /* | |
103 | * Store and check the TSC ADJUST MSR if available | |
104 | */ | |
5bae1562 | 105 | bool tsc_store_and_check_tsc_adjust(bool bootcpu) |
8b223bc7 TG |
106 | { |
107 | struct tsc_adjust *ref, *cur = this_cpu_ptr(&tsc_adjust); | |
108 | unsigned int refcpu, cpu = smp_processor_id(); | |
31f8a651 | 109 | struct cpumask *mask; |
8b223bc7 TG |
110 | s64 bootval; |
111 | ||
112 | if (!boot_cpu_has(X86_FEATURE_TSC_ADJUST)) | |
a36f5136 | 113 | return false; |
8b223bc7 TG |
114 | |
115 | rdmsrl(MSR_IA32_TSC_ADJUST, bootval); | |
116 | cur->bootval = bootval; | |
1d0095fe TG |
117 | cur->nextcheck = jiffies + HZ; |
118 | cur->warned = false; | |
8b223bc7 TG |
119 | |
120 | /* | |
121 | * Check whether this CPU is the first in a package to come up. In | |
122 | * this case do not check the boot value against another package | |
5bae1562 TG |
123 | * because the new package might have been physically hotplugged, |
124 | * where TSC_ADJUST is expected to be different. When called on the | |
125 | * boot CPU topology_core_cpumask() might not be available yet. | |
8b223bc7 | 126 | */ |
31f8a651 TG |
127 | mask = topology_core_cpumask(cpu); |
128 | refcpu = mask ? cpumask_any_but(mask, cpu) : nr_cpu_ids; | |
8b223bc7 TG |
129 | |
130 | if (refcpu >= nr_cpu_ids) { | |
5bae1562 TG |
131 | tsc_sanitize_first_cpu(cur, bootval, smp_processor_id(), |
132 | bootcpu); | |
a36f5136 | 133 | return false; |
8b223bc7 TG |
134 | } |
135 | ||
136 | ref = per_cpu_ptr(&tsc_adjust, refcpu); | |
137 | /* | |
138 | * Compare the boot value and complain if it differs in the | |
139 | * package. | |
140 | */ | |
141 | if (bootval != ref->bootval) { | |
16588f65 | 142 | pr_warn(FW_BUG "TSC ADJUST differs: Reference CPU%u: %lld CPU%u: %lld\n", |
8b223bc7 TG |
143 | refcpu, ref->bootval, cpu, bootval); |
144 | } | |
145 | /* | |
146 | * The TSC_ADJUST values in a package must be the same. If the boot | |
147 | * value on this newly upcoming CPU differs from the adjustment | |
148 | * value of the already online CPU in this package, set it to that | |
149 | * adjusted value. | |
150 | */ | |
151 | if (bootval != ref->adjusted) { | |
152 | pr_warn("TSC ADJUST synchronize: Reference CPU%u: %lld CPU%u: %lld\n", | |
153 | refcpu, ref->adjusted, cpu, bootval); | |
154 | cur->adjusted = ref->adjusted; | |
155 | wrmsrl(MSR_IA32_TSC_ADJUST, ref->adjusted); | |
156 | } | |
a36f5136 TG |
157 | /* |
158 | * We have the TSCs forced to be in sync on this package. Skip sync | |
159 | * test: | |
160 | */ | |
161 | return true; | |
8b223bc7 TG |
162 | } |
163 | ||
250c2277 TG |
164 | /* |
165 | * Entry/exit counters that make sure that both CPUs | |
166 | * run the measurement code at once: | |
167 | */ | |
148f9bb8 PG |
168 | static atomic_t start_count; |
169 | static atomic_t stop_count; | |
a36f5136 | 170 | static atomic_t skip_test; |
cc4db268 | 171 | static atomic_t test_runs; |
250c2277 TG |
172 | |
173 | /* | |
174 | * We use a raw spinlock in this exceptional case, because | |
175 | * we want to have the fastest, inlined, non-debug version | |
176 | * of a critical section, to be able to prove TSC time-warps: | |
177 | */ | |
148f9bb8 | 178 | static arch_spinlock_t sync_lock = __ARCH_SPIN_LOCK_UNLOCKED; |
643bec95 | 179 | |
148f9bb8 PG |
180 | static cycles_t last_tsc; |
181 | static cycles_t max_warp; | |
182 | static int nr_warps; | |
bec8520d | 183 | static int random_warps; |
250c2277 TG |
184 | |
185 | /* | |
eee6946e AL |
186 | * TSC-warp measurement loop running on both CPUs. This is not called |
187 | * if there is no TSC. | |
250c2277 | 188 | */ |
76d3b851 | 189 | static cycles_t check_tsc_warp(unsigned int timeout) |
250c2277 | 190 | { |
76d3b851 | 191 | cycles_t start, now, prev, end, cur_max_warp = 0; |
bec8520d | 192 | int i, cur_warps = 0; |
250c2277 | 193 | |
eee6946e | 194 | start = rdtsc_ordered(); |
250c2277 | 195 | /* |
b0e5c779 | 196 | * The measurement runs for 'timeout' msecs: |
250c2277 | 197 | */ |
b0e5c779 | 198 | end = start + (cycles_t) tsc_khz * timeout; |
250c2277 TG |
199 | now = start; |
200 | ||
201 | for (i = 0; ; i++) { | |
202 | /* | |
203 | * We take the global lock, measure TSC, save the | |
204 | * previous TSC that was measured (possibly on | |
205 | * another CPU) and update the previous TSC timestamp. | |
206 | */ | |
0199c4e6 | 207 | arch_spin_lock(&sync_lock); |
250c2277 | 208 | prev = last_tsc; |
eee6946e | 209 | now = rdtsc_ordered(); |
250c2277 | 210 | last_tsc = now; |
0199c4e6 | 211 | arch_spin_unlock(&sync_lock); |
250c2277 TG |
212 | |
213 | /* | |
214 | * Be nice every now and then (and also check whether | |
df43510b | 215 | * measurement is done [we also insert a 10 million |
250c2277 TG |
216 | * loops safety exit, so we dont lock up in case the |
217 | * TSC readout is totally broken]): | |
218 | */ | |
219 | if (unlikely(!(i & 7))) { | |
df43510b | 220 | if (now > end || i > 10000000) |
250c2277 TG |
221 | break; |
222 | cpu_relax(); | |
223 | touch_nmi_watchdog(); | |
224 | } | |
225 | /* | |
226 | * Outside the critical section we can now see whether | |
227 | * we saw a time-warp of the TSC going backwards: | |
228 | */ | |
229 | if (unlikely(prev > now)) { | |
0199c4e6 | 230 | arch_spin_lock(&sync_lock); |
250c2277 | 231 | max_warp = max(max_warp, prev - now); |
76d3b851 | 232 | cur_max_warp = max_warp; |
bec8520d TG |
233 | /* |
234 | * Check whether this bounces back and forth. Only | |
235 | * one CPU should observe time going backwards. | |
236 | */ | |
237 | if (cur_warps != nr_warps) | |
238 | random_warps++; | |
250c2277 | 239 | nr_warps++; |
bec8520d | 240 | cur_warps = nr_warps; |
0199c4e6 | 241 | arch_spin_unlock(&sync_lock); |
250c2277 | 242 | } |
ad8ca495 | 243 | } |
bde78a79 AV |
244 | WARN(!(now-start), |
245 | "Warning: zero tsc calibration delta: %Ld [max: %Ld]\n", | |
ad8ca495 | 246 | now-start, end-start); |
76d3b851 | 247 | return cur_max_warp; |
250c2277 TG |
248 | } |
249 | ||
b0e5c779 SS |
250 | /* |
251 | * If the target CPU coming online doesn't have any of its core-siblings | |
252 | * online, a timeout of 20msec will be used for the TSC-warp measurement | |
253 | * loop. Otherwise a smaller timeout of 2msec will be used, as we have some | |
254 | * information about this socket already (and this information grows as we | |
255 | * have more and more logical-siblings in that socket). | |
256 | * | |
257 | * Ideally we should be able to skip the TSC sync check on the other | |
258 | * core-siblings, if the first logical CPU in a socket passed the sync test. | |
259 | * But as the TSC is per-logical CPU and can potentially be modified wrongly | |
260 | * by the bios, TSC sync test for smaller duration should be able | |
261 | * to catch such errors. Also this will catch the condition where all the | |
262 | * cores in the socket doesn't get reset at the same time. | |
263 | */ | |
264 | static inline unsigned int loop_timeout(int cpu) | |
265 | { | |
7d79a7bd | 266 | return (cpumask_weight(topology_core_cpumask(cpu)) > 1) ? 2 : 20; |
b0e5c779 SS |
267 | } |
268 | ||
250c2277 TG |
269 | /* |
270 | * Source CPU calls into this - it waits for the freshly booted | |
271 | * target CPU to arrive and then starts the measurement: | |
272 | */ | |
148f9bb8 | 273 | void check_tsc_sync_source(int cpu) |
250c2277 TG |
274 | { |
275 | int cpus = 2; | |
276 | ||
277 | /* | |
278 | * No need to check if we already know that the TSC is not | |
eee6946e | 279 | * synchronized or if we have no TSC. |
250c2277 TG |
280 | */ |
281 | if (unsynchronized_tsc()) | |
282 | return; | |
283 | ||
cc4db268 TG |
284 | /* |
285 | * Set the maximum number of test runs to | |
286 | * 1 if the CPU does not provide the TSC_ADJUST MSR | |
287 | * 3 if the MSR is available, so the target can try to adjust | |
288 | */ | |
289 | if (!boot_cpu_has(X86_FEATURE_TSC_ADJUST)) | |
290 | atomic_set(&test_runs, 1); | |
291 | else | |
292 | atomic_set(&test_runs, 3); | |
293 | retry: | |
250c2277 | 294 | /* |
a36f5136 | 295 | * Wait for the target to start or to skip the test: |
250c2277 | 296 | */ |
a36f5136 TG |
297 | while (atomic_read(&start_count) != cpus - 1) { |
298 | if (atomic_read(&skip_test) > 0) { | |
299 | atomic_set(&skip_test, 0); | |
300 | return; | |
301 | } | |
250c2277 | 302 | cpu_relax(); |
a36f5136 TG |
303 | } |
304 | ||
250c2277 TG |
305 | /* |
306 | * Trigger the target to continue into the measurement too: | |
307 | */ | |
308 | atomic_inc(&start_count); | |
309 | ||
b0e5c779 | 310 | check_tsc_warp(loop_timeout(cpu)); |
250c2277 TG |
311 | |
312 | while (atomic_read(&stop_count) != cpus-1) | |
313 | cpu_relax(); | |
314 | ||
cc4db268 TG |
315 | /* |
316 | * If the test was successful set the number of runs to zero and | |
317 | * stop. If not, decrement the number of runs an check if we can | |
318 | * retry. In case of random warps no retry is attempted. | |
319 | */ | |
320 | if (!nr_warps) { | |
321 | atomic_set(&test_runs, 0); | |
322 | ||
323 | pr_debug("TSC synchronization [CPU#%d -> CPU#%d]: passed\n", | |
324 | smp_processor_id(), cpu); | |
325 | ||
326 | } else if (atomic_dec_and_test(&test_runs) || random_warps) { | |
327 | /* Force it to 0 if random warps brought us here */ | |
328 | atomic_set(&test_runs, 0); | |
329 | ||
9b3660a5 MT |
330 | pr_warning("TSC synchronization [CPU#%d -> CPU#%d]:\n", |
331 | smp_processor_id(), cpu); | |
643bec95 IM |
332 | pr_warning("Measured %Ld cycles TSC warp between CPUs, " |
333 | "turning off TSC clock.\n", max_warp); | |
bec8520d TG |
334 | if (random_warps) |
335 | pr_warning("TSC warped randomly between CPUs\n"); | |
250c2277 | 336 | mark_tsc_unstable("check_tsc_sync_source failed"); |
250c2277 TG |
337 | } |
338 | ||
4c6b8b4d MG |
339 | /* |
340 | * Reset it - just in case we boot another CPU later: | |
341 | */ | |
342 | atomic_set(&start_count, 0); | |
bec8520d | 343 | random_warps = 0; |
4c6b8b4d MG |
344 | nr_warps = 0; |
345 | max_warp = 0; | |
346 | last_tsc = 0; | |
347 | ||
250c2277 TG |
348 | /* |
349 | * Let the target continue with the bootup: | |
350 | */ | |
351 | atomic_inc(&stop_count); | |
cc4db268 TG |
352 | |
353 | /* | |
354 | * Retry, if there is a chance to do so. | |
355 | */ | |
356 | if (atomic_read(&test_runs) > 0) | |
357 | goto retry; | |
250c2277 TG |
358 | } |
359 | ||
360 | /* | |
361 | * Freshly booted CPUs call into this: | |
362 | */ | |
148f9bb8 | 363 | void check_tsc_sync_target(void) |
250c2277 | 364 | { |
cc4db268 TG |
365 | struct tsc_adjust *cur = this_cpu_ptr(&tsc_adjust); |
366 | unsigned int cpu = smp_processor_id(); | |
367 | cycles_t cur_max_warp, gbl_max_warp; | |
250c2277 TG |
368 | int cpus = 2; |
369 | ||
eee6946e | 370 | /* Also aborts if there is no TSC. */ |
5f2e71e7 | 371 | if (unsynchronized_tsc()) |
250c2277 TG |
372 | return; |
373 | ||
a36f5136 TG |
374 | /* |
375 | * Store, verify and sanitize the TSC adjust register. If | |
376 | * successful skip the test. | |
5f2e71e7 TG |
377 | * |
378 | * The test is also skipped when the TSC is marked reliable. This | |
379 | * is true for SoCs which have no fallback clocksource. On these | |
380 | * SoCs the TSC is frequency synchronized, but still the TSC ADJUST | |
381 | * register might have been wreckaged by the BIOS.. | |
a36f5136 | 382 | */ |
5f2e71e7 | 383 | if (tsc_store_and_check_tsc_adjust(false) || tsc_clocksource_reliable) { |
a36f5136 TG |
384 | atomic_inc(&skip_test); |
385 | return; | |
386 | } | |
8b223bc7 | 387 | |
cc4db268 | 388 | retry: |
250c2277 TG |
389 | /* |
390 | * Register this CPU's participation and wait for the | |
391 | * source CPU to start the measurement: | |
392 | */ | |
393 | atomic_inc(&start_count); | |
394 | while (atomic_read(&start_count) != cpus) | |
395 | cpu_relax(); | |
396 | ||
cc4db268 TG |
397 | cur_max_warp = check_tsc_warp(loop_timeout(cpu)); |
398 | ||
399 | /* | |
400 | * Store the maximum observed warp value for a potential retry: | |
401 | */ | |
402 | gbl_max_warp = max_warp; | |
250c2277 TG |
403 | |
404 | /* | |
405 | * Ok, we are done: | |
406 | */ | |
407 | atomic_inc(&stop_count); | |
408 | ||
409 | /* | |
410 | * Wait for the source CPU to print stuff: | |
411 | */ | |
412 | while (atomic_read(&stop_count) != cpus) | |
413 | cpu_relax(); | |
4c5e3c63 TG |
414 | |
415 | /* | |
416 | * Reset it for the next sync test: | |
417 | */ | |
418 | atomic_set(&stop_count, 0); | |
cc4db268 TG |
419 | |
420 | /* | |
421 | * Check the number of remaining test runs. If not zero, the test | |
422 | * failed and a retry with adjusted TSC is possible. If zero the | |
423 | * test was either successful or failed terminally. | |
424 | */ | |
425 | if (!atomic_read(&test_runs)) | |
426 | return; | |
427 | ||
428 | /* | |
429 | * If the warp value of this CPU is 0, then the other CPU | |
430 | * observed time going backwards so this TSC was ahead and | |
431 | * needs to move backwards. | |
432 | */ | |
433 | if (!cur_max_warp) | |
434 | cur_max_warp = -gbl_max_warp; | |
435 | ||
436 | /* | |
437 | * Add the result to the previous adjustment value. | |
438 | * | |
439 | * The adjustement value is slightly off by the overhead of the | |
440 | * sync mechanism (observed values are ~200 TSC cycles), but this | |
441 | * really depends on CPU, node distance and frequency. So | |
442 | * compensating for this is hard to get right. Experiments show | |
443 | * that the warp is not longer detectable when the observed warp | |
444 | * value is used. In the worst case the adjustment needs to go | |
445 | * through a 3rd run for fine tuning. | |
446 | */ | |
447 | cur->adjusted += cur_max_warp; | |
8c9b9d87 | 448 | |
cc4db268 TG |
449 | pr_warn("TSC ADJUST compensate: CPU%u observed %lld warp. Adjust: %lld\n", |
450 | cpu, cur_max_warp, cur->adjusted); | |
451 | ||
452 | wrmsrl(MSR_IA32_TSC_ADJUST, cur->adjusted); | |
453 | goto retry; | |
454 | ||
250c2277 | 455 | } |
8b223bc7 TG |
456 | |
457 | #endif /* CONFIG_SMP */ |