]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blame - arch/x86/kernel/tsc.c
x86/timers/apic: Fix imprecise timer interrupts by eliminating TSC clockevents freque...
[mirror_ubuntu-bionic-kernel.git] / arch / x86 / kernel / tsc.c
CommitLineData
c767a54b
JP
1#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
2
bfc0f594 3#include <linux/kernel.h>
0ef95533
AK
4#include <linux/sched.h>
5#include <linux/init.h>
6#include <linux/module.h>
7#include <linux/timer.h>
bfc0f594 8#include <linux/acpi_pmtmr.h>
2dbe06fa 9#include <linux/cpufreq.h>
8fbbc4b4
AK
10#include <linux/delay.h>
11#include <linux/clocksource.h>
12#include <linux/percpu.h>
08604bd9 13#include <linux/timex.h>
10b033d4 14#include <linux/static_key.h>
bfc0f594
AK
15
16#include <asm/hpet.h>
8fbbc4b4
AK
17#include <asm/timer.h>
18#include <asm/vgtod.h>
19#include <asm/time.h>
20#include <asm/delay.h>
88b094fb 21#include <asm/hypervisor.h>
08047c4f 22#include <asm/nmi.h>
2d826404 23#include <asm/x86_init.h>
03da3ff1 24#include <asm/geode.h>
0ef95533 25
f24ade3a 26unsigned int __read_mostly cpu_khz; /* TSC clocks / usec, not used here */
0ef95533 27EXPORT_SYMBOL(cpu_khz);
f24ade3a
IM
28
29unsigned int __read_mostly tsc_khz;
0ef95533
AK
30EXPORT_SYMBOL(tsc_khz);
31
32/*
33 * TSC can be unstable due to cpufreq or due to unsynced TSCs
34 */
f24ade3a 35static int __read_mostly tsc_unstable;
0ef95533
AK
36
37/* native_sched_clock() is called before tsc_init(), so
38 we must start with the TSC soft disabled to prevent
59e21e3d 39 erroneous rdtsc usage on !boot_cpu_has(X86_FEATURE_TSC) processors */
f24ade3a 40static int __read_mostly tsc_disabled = -1;
0ef95533 41
3bbfafb7 42static DEFINE_STATIC_KEY_FALSE(__use_tsc);
10b033d4 43
28a00184 44int tsc_clocksource_reliable;
57c67da2 45
f9677e0f
CH
46static u32 art_to_tsc_numerator;
47static u32 art_to_tsc_denominator;
48static u64 art_to_tsc_offset;
49struct clocksource *art_related_clocksource;
50
20d1c86a
PZ
51/*
52 * Use a ring-buffer like data structure, where a writer advances the head by
53 * writing a new data entry and a reader advances the tail when it observes a
54 * new entry.
55 *
56 * Writers are made to wait on readers until there's space to write a new
57 * entry.
58 *
59 * This means that we can always use an {offset, mul} pair to compute a ns
60 * value that is 'roughly' in the right direction, even if we're writing a new
61 * {offset, mul} pair during the clock read.
62 *
63 * The down-side is that we can no longer guarantee strict monotonicity anymore
64 * (assuming the TSC was that to begin with), because while we compute the
65 * intersection point of the two clock slopes and make sure the time is
66 * continuous at the point of switching; we can no longer guarantee a reader is
67 * strictly before or after the switch point.
68 *
69 * It does mean a reader no longer needs to disable IRQs in order to avoid
70 * CPU-Freq updates messing with his times, and similarly an NMI reader will
71 * no longer run the risk of hitting half-written state.
72 */
73
74struct cyc2ns {
75 struct cyc2ns_data data[2]; /* 0 + 2*24 = 48 */
76 struct cyc2ns_data *head; /* 48 + 8 = 56 */
77 struct cyc2ns_data *tail; /* 56 + 8 = 64 */
78}; /* exactly fits one cacheline */
79
80static DEFINE_PER_CPU_ALIGNED(struct cyc2ns, cyc2ns);
81
82struct cyc2ns_data *cyc2ns_read_begin(void)
83{
84 struct cyc2ns_data *head;
85
86 preempt_disable();
87
88 head = this_cpu_read(cyc2ns.head);
89 /*
90 * Ensure we observe the entry when we observe the pointer to it.
91 * matches the wmb from cyc2ns_write_end().
92 */
93 smp_read_barrier_depends();
94 head->__count++;
95 barrier();
96
97 return head;
98}
99
100void cyc2ns_read_end(struct cyc2ns_data *head)
101{
102 barrier();
103 /*
104 * If we're the outer most nested read; update the tail pointer
105 * when we're done. This notifies possible pending writers
106 * that we've observed the head pointer and that the other
107 * entry is now free.
108 */
109 if (!--head->__count) {
110 /*
111 * x86-TSO does not reorder writes with older reads;
112 * therefore once this write becomes visible to another
113 * cpu, we must be finished reading the cyc2ns_data.
114 *
115 * matches with cyc2ns_write_begin().
116 */
117 this_cpu_write(cyc2ns.tail, head);
118 }
119 preempt_enable();
120}
121
122/*
123 * Begin writing a new @data entry for @cpu.
124 *
125 * Assumes some sort of write side lock; currently 'provided' by the assumption
126 * that cpufreq will call its notifiers sequentially.
127 */
128static struct cyc2ns_data *cyc2ns_write_begin(int cpu)
129{
130 struct cyc2ns *c2n = &per_cpu(cyc2ns, cpu);
131 struct cyc2ns_data *data = c2n->data;
132
133 if (data == c2n->head)
134 data++;
135
136 /* XXX send an IPI to @cpu in order to guarantee a read? */
137
138 /*
139 * When we observe the tail write from cyc2ns_read_end(),
140 * the cpu must be done with that entry and its safe
141 * to start writing to it.
142 */
143 while (c2n->tail == data)
144 cpu_relax();
145
146 return data;
147}
148
149static void cyc2ns_write_end(int cpu, struct cyc2ns_data *data)
150{
151 struct cyc2ns *c2n = &per_cpu(cyc2ns, cpu);
152
153 /*
154 * Ensure the @data writes are visible before we publish the
155 * entry. Matches the data-depencency in cyc2ns_read_begin().
156 */
157 smp_wmb();
158
159 ACCESS_ONCE(c2n->head) = data;
160}
161
162/*
163 * Accelerators for sched_clock()
57c67da2
PZ
164 * convert from cycles(64bits) => nanoseconds (64bits)
165 * basic equation:
166 * ns = cycles / (freq / ns_per_sec)
167 * ns = cycles * (ns_per_sec / freq)
168 * ns = cycles * (10^9 / (cpu_khz * 10^3))
169 * ns = cycles * (10^6 / cpu_khz)
170 *
171 * Then we use scaling math (suggested by george@mvista.com) to get:
172 * ns = cycles * (10^6 * SC / cpu_khz) / SC
173 * ns = cycles * cyc2ns_scale / SC
174 *
175 * And since SC is a constant power of two, we can convert the div
b20112ed
AH
176 * into a shift. The larger SC is, the more accurate the conversion, but
177 * cyc2ns_scale needs to be a 32-bit value so that 32-bit multiplication
178 * (64-bit result) can be used.
57c67da2 179 *
b20112ed 180 * We can use khz divisor instead of mhz to keep a better precision.
57c67da2
PZ
181 * (mathieu.desnoyers@polymtl.ca)
182 *
183 * -johnstul@us.ibm.com "math is hard, lets go shopping!"
184 */
185
20d1c86a
PZ
186static void cyc2ns_data_init(struct cyc2ns_data *data)
187{
5e3c1afd 188 data->cyc2ns_mul = 0;
b20112ed 189 data->cyc2ns_shift = 0;
20d1c86a
PZ
190 data->cyc2ns_offset = 0;
191 data->__count = 0;
192}
193
194static void cyc2ns_init(int cpu)
195{
196 struct cyc2ns *c2n = &per_cpu(cyc2ns, cpu);
197
198 cyc2ns_data_init(&c2n->data[0]);
199 cyc2ns_data_init(&c2n->data[1]);
200
201 c2n->head = c2n->data;
202 c2n->tail = c2n->data;
203}
204
57c67da2
PZ
205static inline unsigned long long cycles_2_ns(unsigned long long cyc)
206{
20d1c86a
PZ
207 struct cyc2ns_data *data, *tail;
208 unsigned long long ns;
209
210 /*
211 * See cyc2ns_read_*() for details; replicated in order to avoid
212 * an extra few instructions that came with the abstraction.
213 * Notable, it allows us to only do the __count and tail update
214 * dance when its actually needed.
215 */
216
569d6557 217 preempt_disable_notrace();
20d1c86a
PZ
218 data = this_cpu_read(cyc2ns.head);
219 tail = this_cpu_read(cyc2ns.tail);
220
221 if (likely(data == tail)) {
222 ns = data->cyc2ns_offset;
b20112ed 223 ns += mul_u64_u32_shr(cyc, data->cyc2ns_mul, data->cyc2ns_shift);
20d1c86a
PZ
224 } else {
225 data->__count++;
226
227 barrier();
228
229 ns = data->cyc2ns_offset;
b20112ed 230 ns += mul_u64_u32_shr(cyc, data->cyc2ns_mul, data->cyc2ns_shift);
20d1c86a
PZ
231
232 barrier();
233
234 if (!--data->__count)
235 this_cpu_write(cyc2ns.tail, data);
236 }
569d6557 237 preempt_enable_notrace();
20d1c86a 238
57c67da2
PZ
239 return ns;
240}
241
aa297292 242static void set_cyc2ns_scale(unsigned long khz, int cpu)
57c67da2 243{
20d1c86a
PZ
244 unsigned long long tsc_now, ns_now;
245 struct cyc2ns_data *data;
246 unsigned long flags;
57c67da2
PZ
247
248 local_irq_save(flags);
249 sched_clock_idle_sleep_event();
250
aa297292 251 if (!khz)
20d1c86a
PZ
252 goto done;
253
254 data = cyc2ns_write_begin(cpu);
57c67da2 255
4ea1636b 256 tsc_now = rdtsc();
57c67da2
PZ
257 ns_now = cycles_2_ns(tsc_now);
258
20d1c86a
PZ
259 /*
260 * Compute a new multiplier as per the above comment and ensure our
261 * time function is continuous; see the comment near struct
262 * cyc2ns_data.
263 */
aa297292 264 clocks_calc_mult_shift(&data->cyc2ns_mul, &data->cyc2ns_shift, khz,
b20112ed
AH
265 NSEC_PER_MSEC, 0);
266
b9511cd7
AH
267 /*
268 * cyc2ns_shift is exported via arch_perf_update_userpage() where it is
269 * not expected to be greater than 31 due to the original published
270 * conversion algorithm shifting a 32-bit value (now specifies a 64-bit
271 * value) - refer perf_event_mmap_page documentation in perf_event.h.
272 */
273 if (data->cyc2ns_shift == 32) {
274 data->cyc2ns_shift = 31;
275 data->cyc2ns_mul >>= 1;
276 }
277
20d1c86a 278 data->cyc2ns_offset = ns_now -
b20112ed 279 mul_u64_u32_shr(tsc_now, data->cyc2ns_mul, data->cyc2ns_shift);
20d1c86a
PZ
280
281 cyc2ns_write_end(cpu, data);
57c67da2 282
20d1c86a 283done:
57c67da2
PZ
284 sched_clock_idle_wakeup_event(0);
285 local_irq_restore(flags);
286}
0ef95533
AK
287/*
288 * Scheduler clock - returns current time in nanosec units.
289 */
290u64 native_sched_clock(void)
291{
3bbfafb7
PZ
292 if (static_branch_likely(&__use_tsc)) {
293 u64 tsc_now = rdtsc();
294
295 /* return the value in ns */
296 return cycles_2_ns(tsc_now);
297 }
0ef95533
AK
298
299 /*
300 * Fall back to jiffies if there's no TSC available:
301 * ( But note that we still use it if the TSC is marked
302 * unstable. We do this because unlike Time Of Day,
303 * the scheduler clock tolerates small errors and it's
304 * very important for it to be as fast as the platform
3ad2f3fb 305 * can achieve it. )
0ef95533 306 */
0ef95533 307
3bbfafb7
PZ
308 /* No locking but a rare wrong value is not a big deal: */
309 return (jiffies_64 - INITIAL_JIFFIES) * (1000000000 / HZ);
0ef95533
AK
310}
311
a94cab23
AK
312/*
313 * Generate a sched_clock if you already have a TSC value.
314 */
315u64 native_sched_clock_from_tsc(u64 tsc)
316{
317 return cycles_2_ns(tsc);
318}
319
0ef95533
AK
320/* We need to define a real function for sched_clock, to override the
321 weak default version */
322#ifdef CONFIG_PARAVIRT
323unsigned long long sched_clock(void)
324{
325 return paravirt_sched_clock();
326}
327#else
328unsigned long long
329sched_clock(void) __attribute__((alias("native_sched_clock")));
330#endif
331
332int check_tsc_unstable(void)
333{
334 return tsc_unstable;
335}
336EXPORT_SYMBOL_GPL(check_tsc_unstable);
337
338#ifdef CONFIG_X86_TSC
339int __init notsc_setup(char *str)
340{
c767a54b 341 pr_warn("Kernel compiled with CONFIG_X86_TSC, cannot disable TSC completely\n");
0ef95533
AK
342 tsc_disabled = 1;
343 return 1;
344}
345#else
346/*
347 * disable flag for tsc. Takes effect by clearing the TSC cpu flag
348 * in cpu/common.c
349 */
350int __init notsc_setup(char *str)
351{
352 setup_clear_cpu_cap(X86_FEATURE_TSC);
353 return 1;
354}
355#endif
356
357__setup("notsc", notsc_setup);
bfc0f594 358
e82b8e4e
VP
359static int no_sched_irq_time;
360
395628ef
AK
361static int __init tsc_setup(char *str)
362{
363 if (!strcmp(str, "reliable"))
364 tsc_clocksource_reliable = 1;
e82b8e4e
VP
365 if (!strncmp(str, "noirqtime", 9))
366 no_sched_irq_time = 1;
395628ef
AK
367 return 1;
368}
369
370__setup("tsc=", tsc_setup);
371
bfc0f594
AK
372#define MAX_RETRIES 5
373#define SMI_TRESHOLD 50000
374
375/*
376 * Read TSC and the reference counters. Take care of SMI disturbance
377 */
827014be 378static u64 tsc_read_refs(u64 *p, int hpet)
bfc0f594
AK
379{
380 u64 t1, t2;
381 int i;
382
383 for (i = 0; i < MAX_RETRIES; i++) {
384 t1 = get_cycles();
385 if (hpet)
827014be 386 *p = hpet_readl(HPET_COUNTER) & 0xFFFFFFFF;
bfc0f594 387 else
827014be 388 *p = acpi_pm_read_early();
bfc0f594
AK
389 t2 = get_cycles();
390 if ((t2 - t1) < SMI_TRESHOLD)
391 return t2;
392 }
393 return ULLONG_MAX;
394}
395
d683ef7a
TG
396/*
397 * Calculate the TSC frequency from HPET reference
bfc0f594 398 */
d683ef7a 399static unsigned long calc_hpet_ref(u64 deltatsc, u64 hpet1, u64 hpet2)
bfc0f594 400{
d683ef7a 401 u64 tmp;
bfc0f594 402
d683ef7a
TG
403 if (hpet2 < hpet1)
404 hpet2 += 0x100000000ULL;
405 hpet2 -= hpet1;
406 tmp = ((u64)hpet2 * hpet_readl(HPET_PERIOD));
407 do_div(tmp, 1000000);
408 do_div(deltatsc, tmp);
409
410 return (unsigned long) deltatsc;
411}
412
413/*
414 * Calculate the TSC frequency from PMTimer reference
415 */
416static unsigned long calc_pmtimer_ref(u64 deltatsc, u64 pm1, u64 pm2)
417{
418 u64 tmp;
bfc0f594 419
d683ef7a
TG
420 if (!pm1 && !pm2)
421 return ULONG_MAX;
422
423 if (pm2 < pm1)
424 pm2 += (u64)ACPI_PM_OVRRUN;
425 pm2 -= pm1;
426 tmp = pm2 * 1000000000LL;
427 do_div(tmp, PMTMR_TICKS_PER_SEC);
428 do_div(deltatsc, tmp);
429
430 return (unsigned long) deltatsc;
431}
432
a977c400 433#define CAL_MS 10
b7743970 434#define CAL_LATCH (PIT_TICK_RATE / (1000 / CAL_MS))
a977c400
TG
435#define CAL_PIT_LOOPS 1000
436
437#define CAL2_MS 50
b7743970 438#define CAL2_LATCH (PIT_TICK_RATE / (1000 / CAL2_MS))
a977c400
TG
439#define CAL2_PIT_LOOPS 5000
440
cce3e057 441
ec0c15af
LT
442/*
443 * Try to calibrate the TSC against the Programmable
444 * Interrupt Timer and return the frequency of the TSC
445 * in kHz.
446 *
447 * Return ULONG_MAX on failure to calibrate.
448 */
a977c400 449static unsigned long pit_calibrate_tsc(u32 latch, unsigned long ms, int loopmin)
ec0c15af
LT
450{
451 u64 tsc, t1, t2, delta;
452 unsigned long tscmin, tscmax;
453 int pitcnt;
454
455 /* Set the Gate high, disable speaker */
456 outb((inb(0x61) & ~0x02) | 0x01, 0x61);
457
458 /*
459 * Setup CTC channel 2* for mode 0, (interrupt on terminal
460 * count mode), binary count. Set the latch register to 50ms
461 * (LSB then MSB) to begin countdown.
462 */
463 outb(0xb0, 0x43);
a977c400
TG
464 outb(latch & 0xff, 0x42);
465 outb(latch >> 8, 0x42);
ec0c15af
LT
466
467 tsc = t1 = t2 = get_cycles();
468
469 pitcnt = 0;
470 tscmax = 0;
471 tscmin = ULONG_MAX;
472 while ((inb(0x61) & 0x20) == 0) {
473 t2 = get_cycles();
474 delta = t2 - tsc;
475 tsc = t2;
476 if ((unsigned long) delta < tscmin)
477 tscmin = (unsigned int) delta;
478 if ((unsigned long) delta > tscmax)
479 tscmax = (unsigned int) delta;
480 pitcnt++;
481 }
482
483 /*
484 * Sanity checks:
485 *
a977c400 486 * If we were not able to read the PIT more than loopmin
ec0c15af
LT
487 * times, then we have been hit by a massive SMI
488 *
489 * If the maximum is 10 times larger than the minimum,
490 * then we got hit by an SMI as well.
491 */
a977c400 492 if (pitcnt < loopmin || tscmax > 10 * tscmin)
ec0c15af
LT
493 return ULONG_MAX;
494
495 /* Calculate the PIT value */
496 delta = t2 - t1;
a977c400 497 do_div(delta, ms);
ec0c15af
LT
498 return delta;
499}
500
6ac40ed0
LT
501/*
502 * This reads the current MSB of the PIT counter, and
503 * checks if we are running on sufficiently fast and
504 * non-virtualized hardware.
505 *
506 * Our expectations are:
507 *
508 * - the PIT is running at roughly 1.19MHz
509 *
510 * - each IO is going to take about 1us on real hardware,
511 * but we allow it to be much faster (by a factor of 10) or
512 * _slightly_ slower (ie we allow up to a 2us read+counter
513 * update - anything else implies a unacceptably slow CPU
514 * or PIT for the fast calibration to work.
515 *
516 * - with 256 PIT ticks to read the value, we have 214us to
517 * see the same MSB (and overhead like doing a single TSC
518 * read per MSB value etc).
519 *
520 * - We're doing 2 reads per loop (LSB, MSB), and we expect
521 * them each to take about a microsecond on real hardware.
522 * So we expect a count value of around 100. But we'll be
523 * generous, and accept anything over 50.
524 *
525 * - if the PIT is stuck, and we see *many* more reads, we
526 * return early (and the next caller of pit_expect_msb()
527 * then consider it a failure when they don't see the
528 * next expected value).
529 *
530 * These expectations mean that we know that we have seen the
531 * transition from one expected value to another with a fairly
532 * high accuracy, and we didn't miss any events. We can thus
533 * use the TSC value at the transitions to calculate a pretty
534 * good value for the TSC frequencty.
535 */
b6e61eef
LT
536static inline int pit_verify_msb(unsigned char val)
537{
538 /* Ignore LSB */
539 inb(0x42);
540 return inb(0x42) == val;
541}
542
9e8912e0 543static inline int pit_expect_msb(unsigned char val, u64 *tscp, unsigned long *deltap)
6ac40ed0 544{
9e8912e0 545 int count;
68f30fbe 546 u64 tsc = 0, prev_tsc = 0;
bfc0f594 547
6ac40ed0 548 for (count = 0; count < 50000; count++) {
b6e61eef 549 if (!pit_verify_msb(val))
6ac40ed0 550 break;
68f30fbe 551 prev_tsc = tsc;
9e8912e0 552 tsc = get_cycles();
6ac40ed0 553 }
68f30fbe 554 *deltap = get_cycles() - prev_tsc;
9e8912e0
LT
555 *tscp = tsc;
556
557 /*
558 * We require _some_ success, but the quality control
559 * will be based on the error terms on the TSC values.
560 */
561 return count > 5;
6ac40ed0
LT
562}
563
564/*
9e8912e0
LT
565 * How many MSB values do we want to see? We aim for
566 * a maximum error rate of 500ppm (in practice the
567 * real error is much smaller), but refuse to spend
68f30fbe 568 * more than 50ms on it.
6ac40ed0 569 */
68f30fbe 570#define MAX_QUICK_PIT_MS 50
9e8912e0 571#define MAX_QUICK_PIT_ITERATIONS (MAX_QUICK_PIT_MS * PIT_TICK_RATE / 1000 / 256)
bfc0f594 572
6ac40ed0
LT
573static unsigned long quick_pit_calibrate(void)
574{
9e8912e0
LT
575 int i;
576 u64 tsc, delta;
577 unsigned long d1, d2;
578
6ac40ed0 579 /* Set the Gate high, disable speaker */
bfc0f594
AK
580 outb((inb(0x61) & ~0x02) | 0x01, 0x61);
581
6ac40ed0
LT
582 /*
583 * Counter 2, mode 0 (one-shot), binary count
584 *
585 * NOTE! Mode 2 decrements by two (and then the
586 * output is flipped each time, giving the same
587 * final output frequency as a decrement-by-one),
588 * so mode 0 is much better when looking at the
589 * individual counts.
590 */
bfc0f594 591 outb(0xb0, 0x43);
bfc0f594 592
6ac40ed0
LT
593 /* Start at 0xffff */
594 outb(0xff, 0x42);
595 outb(0xff, 0x42);
596
a6a80e1d
LT
597 /*
598 * The PIT starts counting at the next edge, so we
599 * need to delay for a microsecond. The easiest way
600 * to do that is to just read back the 16-bit counter
601 * once from the PIT.
602 */
b6e61eef 603 pit_verify_msb(0);
a6a80e1d 604
9e8912e0
LT
605 if (pit_expect_msb(0xff, &tsc, &d1)) {
606 for (i = 1; i <= MAX_QUICK_PIT_ITERATIONS; i++) {
607 if (!pit_expect_msb(0xff-i, &delta, &d2))
608 break;
609
5aac644a
AH
610 delta -= tsc;
611
612 /*
613 * Extrapolate the error and fail fast if the error will
614 * never be below 500 ppm.
615 */
616 if (i == 1 &&
617 d1 + d2 >= (delta * MAX_QUICK_PIT_ITERATIONS) >> 11)
618 return 0;
619
9e8912e0
LT
620 /*
621 * Iterate until the error is less than 500 ppm
622 */
b6e61eef
LT
623 if (d1+d2 >= delta >> 11)
624 continue;
625
626 /*
627 * Check the PIT one more time to verify that
628 * all TSC reads were stable wrt the PIT.
629 *
630 * This also guarantees serialization of the
631 * last cycle read ('d2') in pit_expect_msb.
632 */
633 if (!pit_verify_msb(0xfe - i))
634 break;
635 goto success;
6ac40ed0 636 }
6ac40ed0 637 }
52045217 638 pr_info("Fast TSC calibration failed\n");
6ac40ed0 639 return 0;
9e8912e0
LT
640
641success:
642 /*
643 * Ok, if we get here, then we've seen the
644 * MSB of the PIT decrement 'i' times, and the
645 * error has shrunk to less than 500 ppm.
646 *
647 * As a result, we can depend on there not being
648 * any odd delays anywhere, and the TSC reads are
68f30fbe 649 * reliable (within the error).
9e8912e0
LT
650 *
651 * kHz = ticks / time-in-seconds / 1000;
652 * kHz = (t2 - t1) / (I * 256 / PIT_TICK_RATE) / 1000
653 * kHz = ((t2 - t1) * PIT_TICK_RATE) / (I * 256 * 1000)
654 */
9e8912e0
LT
655 delta *= PIT_TICK_RATE;
656 do_div(delta, i*256*1000);
c767a54b 657 pr_info("Fast TSC calibration using PIT\n");
9e8912e0 658 return delta;
6ac40ed0 659}
ec0c15af 660
bfc0f594 661/**
aa297292
LB
662 * native_calibrate_tsc
663 * Determine TSC frequency via CPUID, else return 0.
bfc0f594 664 */
e93ef949 665unsigned long native_calibrate_tsc(void)
aa297292
LB
666{
667 unsigned int eax_denominator, ebx_numerator, ecx_hz, edx;
668 unsigned int crystal_khz;
669
670 if (boot_cpu_data.x86_vendor != X86_VENDOR_INTEL)
671 return 0;
672
673 if (boot_cpu_data.cpuid_level < 0x15)
674 return 0;
675
676 eax_denominator = ebx_numerator = ecx_hz = edx = 0;
677
678 /* CPUID 15H TSC/Crystal ratio, plus optionally Crystal Hz */
679 cpuid(0x15, &eax_denominator, &ebx_numerator, &ecx_hz, &edx);
680
681 if (ebx_numerator == 0 || eax_denominator == 0)
682 return 0;
683
684 crystal_khz = ecx_hz / 1000;
685
686 if (crystal_khz == 0) {
687 switch (boot_cpu_data.x86_model) {
688 case 0x4E: /* SKL */
689 case 0x5E: /* SKL */
ff4c8663
LB
690 crystal_khz = 24000; /* 24.0 MHz */
691 break;
692 case 0x5C: /* BXT */
693 crystal_khz = 19200; /* 19.2 MHz */
694 break;
aa297292
LB
695 }
696 }
697
698 return crystal_khz * ebx_numerator / eax_denominator;
699}
700
701static unsigned long cpu_khz_from_cpuid(void)
702{
703 unsigned int eax_base_mhz, ebx_max_mhz, ecx_bus_mhz, edx;
704
705 if (boot_cpu_data.x86_vendor != X86_VENDOR_INTEL)
706 return 0;
707
708 if (boot_cpu_data.cpuid_level < 0x16)
709 return 0;
710
711 eax_base_mhz = ebx_max_mhz = ecx_bus_mhz = edx = 0;
712
713 cpuid(0x16, &eax_base_mhz, &ebx_max_mhz, &ecx_bus_mhz, &edx);
714
715 return eax_base_mhz * 1000;
716}
717
718/**
719 * native_calibrate_cpu - calibrate the cpu on boot
720 */
721unsigned long native_calibrate_cpu(void)
bfc0f594 722{
827014be 723 u64 tsc1, tsc2, delta, ref1, ref2;
fbb16e24 724 unsigned long tsc_pit_min = ULONG_MAX, tsc_ref_min = ULONG_MAX;
2d826404 725 unsigned long flags, latch, ms, fast_calibrate;
a977c400 726 int hpet = is_hpet_enabled(), i, loopmin;
bfc0f594 727
aa297292
LB
728 fast_calibrate = cpu_khz_from_cpuid();
729 if (fast_calibrate)
730 return fast_calibrate;
731
02c0cd2d 732 fast_calibrate = cpu_khz_from_msr();
5f0e0309 733 if (fast_calibrate)
7da7c156 734 return fast_calibrate;
7da7c156 735
6ac40ed0
LT
736 local_irq_save(flags);
737 fast_calibrate = quick_pit_calibrate();
bfc0f594 738 local_irq_restore(flags);
6ac40ed0
LT
739 if (fast_calibrate)
740 return fast_calibrate;
bfc0f594 741
fbb16e24
TG
742 /*
743 * Run 5 calibration loops to get the lowest frequency value
744 * (the best estimate). We use two different calibration modes
745 * here:
746 *
747 * 1) PIT loop. We set the PIT Channel 2 to oneshot mode and
748 * load a timeout of 50ms. We read the time right after we
749 * started the timer and wait until the PIT count down reaches
750 * zero. In each wait loop iteration we read the TSC and check
751 * the delta to the previous read. We keep track of the min
752 * and max values of that delta. The delta is mostly defined
753 * by the IO time of the PIT access, so we can detect when a
0d2eb44f 754 * SMI/SMM disturbance happened between the two reads. If the
fbb16e24
TG
755 * maximum time is significantly larger than the minimum time,
756 * then we discard the result and have another try.
757 *
758 * 2) Reference counter. If available we use the HPET or the
759 * PMTIMER as a reference to check the sanity of that value.
760 * We use separate TSC readouts and check inside of the
761 * reference read for a SMI/SMM disturbance. We dicard
762 * disturbed values here as well. We do that around the PIT
763 * calibration delay loop as we have to wait for a certain
764 * amount of time anyway.
765 */
a977c400
TG
766
767 /* Preset PIT loop values */
768 latch = CAL_LATCH;
769 ms = CAL_MS;
770 loopmin = CAL_PIT_LOOPS;
771
772 for (i = 0; i < 3; i++) {
ec0c15af 773 unsigned long tsc_pit_khz;
fbb16e24
TG
774
775 /*
776 * Read the start value and the reference count of
ec0c15af
LT
777 * hpet/pmtimer when available. Then do the PIT
778 * calibration, which will take at least 50ms, and
779 * read the end value.
fbb16e24 780 */
ec0c15af 781 local_irq_save(flags);
827014be 782 tsc1 = tsc_read_refs(&ref1, hpet);
a977c400 783 tsc_pit_khz = pit_calibrate_tsc(latch, ms, loopmin);
827014be 784 tsc2 = tsc_read_refs(&ref2, hpet);
fbb16e24
TG
785 local_irq_restore(flags);
786
ec0c15af
LT
787 /* Pick the lowest PIT TSC calibration so far */
788 tsc_pit_min = min(tsc_pit_min, tsc_pit_khz);
fbb16e24
TG
789
790 /* hpet or pmtimer available ? */
62627bec 791 if (ref1 == ref2)
fbb16e24
TG
792 continue;
793
794 /* Check, whether the sampling was disturbed by an SMI */
795 if (tsc1 == ULLONG_MAX || tsc2 == ULLONG_MAX)
796 continue;
797
798 tsc2 = (tsc2 - tsc1) * 1000000LL;
d683ef7a 799 if (hpet)
827014be 800 tsc2 = calc_hpet_ref(tsc2, ref1, ref2);
d683ef7a 801 else
827014be 802 tsc2 = calc_pmtimer_ref(tsc2, ref1, ref2);
fbb16e24 803
fbb16e24 804 tsc_ref_min = min(tsc_ref_min, (unsigned long) tsc2);
a977c400
TG
805
806 /* Check the reference deviation */
807 delta = ((u64) tsc_pit_min) * 100;
808 do_div(delta, tsc_ref_min);
809
810 /*
811 * If both calibration results are inside a 10% window
812 * then we can be sure, that the calibration
813 * succeeded. We break out of the loop right away. We
814 * use the reference value, as it is more precise.
815 */
816 if (delta >= 90 && delta <= 110) {
c767a54b
JP
817 pr_info("PIT calibration matches %s. %d loops\n",
818 hpet ? "HPET" : "PMTIMER", i + 1);
a977c400 819 return tsc_ref_min;
fbb16e24
TG
820 }
821
a977c400
TG
822 /*
823 * Check whether PIT failed more than once. This
824 * happens in virtualized environments. We need to
825 * give the virtual PC a slightly longer timeframe for
826 * the HPET/PMTIMER to make the result precise.
827 */
828 if (i == 1 && tsc_pit_min == ULONG_MAX) {
829 latch = CAL2_LATCH;
830 ms = CAL2_MS;
831 loopmin = CAL2_PIT_LOOPS;
832 }
fbb16e24 833 }
bfc0f594
AK
834
835 /*
fbb16e24 836 * Now check the results.
bfc0f594 837 */
fbb16e24
TG
838 if (tsc_pit_min == ULONG_MAX) {
839 /* PIT gave no useful value */
c767a54b 840 pr_warn("Unable to calibrate against PIT\n");
fbb16e24
TG
841
842 /* We don't have an alternative source, disable TSC */
827014be 843 if (!hpet && !ref1 && !ref2) {
c767a54b 844 pr_notice("No reference (HPET/PMTIMER) available\n");
fbb16e24
TG
845 return 0;
846 }
847
848 /* The alternative source failed as well, disable TSC */
849 if (tsc_ref_min == ULONG_MAX) {
c767a54b 850 pr_warn("HPET/PMTIMER calibration failed\n");
fbb16e24
TG
851 return 0;
852 }
853
854 /* Use the alternative source */
c767a54b
JP
855 pr_info("using %s reference calibration\n",
856 hpet ? "HPET" : "PMTIMER");
fbb16e24
TG
857
858 return tsc_ref_min;
859 }
bfc0f594 860
fbb16e24 861 /* We don't have an alternative source, use the PIT calibration value */
827014be 862 if (!hpet && !ref1 && !ref2) {
c767a54b 863 pr_info("Using PIT calibration value\n");
fbb16e24 864 return tsc_pit_min;
bfc0f594
AK
865 }
866
fbb16e24
TG
867 /* The alternative source failed, use the PIT calibration value */
868 if (tsc_ref_min == ULONG_MAX) {
c767a54b 869 pr_warn("HPET/PMTIMER calibration failed. Using PIT calibration.\n");
fbb16e24 870 return tsc_pit_min;
bfc0f594
AK
871 }
872
fbb16e24
TG
873 /*
874 * The calibration values differ too much. In doubt, we use
875 * the PIT value as we know that there are PMTIMERs around
a977c400 876 * running at double speed. At least we let the user know:
fbb16e24 877 */
c767a54b
JP
878 pr_warn("PIT calibration deviates from %s: %lu %lu\n",
879 hpet ? "HPET" : "PMTIMER", tsc_pit_min, tsc_ref_min);
880 pr_info("Using PIT calibration value\n");
fbb16e24 881 return tsc_pit_min;
bfc0f594
AK
882}
883
bfc0f594
AK
884int recalibrate_cpu_khz(void)
885{
886#ifndef CONFIG_SMP
887 unsigned long cpu_khz_old = cpu_khz;
888
eff4677e 889 if (!boot_cpu_has(X86_FEATURE_TSC))
bfc0f594 890 return -ENODEV;
eff4677e 891
aa297292 892 cpu_khz = x86_platform.calibrate_cpu();
eff4677e 893 tsc_khz = x86_platform.calibrate_tsc();
aa297292
LB
894 if (tsc_khz == 0)
895 tsc_khz = cpu_khz;
ff4c8663
LB
896 else if (abs(cpu_khz - tsc_khz) * 10 > tsc_khz)
897 cpu_khz = tsc_khz;
eff4677e
BP
898 cpu_data(0).loops_per_jiffy = cpufreq_scale(cpu_data(0).loops_per_jiffy,
899 cpu_khz_old, cpu_khz);
900
901 return 0;
bfc0f594
AK
902#else
903 return -ENODEV;
904#endif
905}
906
907EXPORT_SYMBOL(recalibrate_cpu_khz);
908
2dbe06fa 909
cd7240c0
SS
910static unsigned long long cyc2ns_suspend;
911
b74f05d6 912void tsc_save_sched_clock_state(void)
cd7240c0 913{
35af99e6 914 if (!sched_clock_stable())
cd7240c0
SS
915 return;
916
917 cyc2ns_suspend = sched_clock();
918}
919
920/*
921 * Even on processors with invariant TSC, TSC gets reset in some the
922 * ACPI system sleep states. And in some systems BIOS seem to reinit TSC to
923 * arbitrary value (still sync'd across cpu's) during resume from such sleep
924 * states. To cope up with this, recompute the cyc2ns_offset for each cpu so
925 * that sched_clock() continues from the point where it was left off during
926 * suspend.
927 */
b74f05d6 928void tsc_restore_sched_clock_state(void)
cd7240c0
SS
929{
930 unsigned long long offset;
931 unsigned long flags;
932 int cpu;
933
35af99e6 934 if (!sched_clock_stable())
cd7240c0
SS
935 return;
936
937 local_irq_save(flags);
938
20d1c86a 939 /*
6a6256f9 940 * We're coming out of suspend, there's no concurrency yet; don't
20d1c86a
PZ
941 * bother being nice about the RCU stuff, just write to both
942 * data fields.
943 */
944
945 this_cpu_write(cyc2ns.data[0].cyc2ns_offset, 0);
946 this_cpu_write(cyc2ns.data[1].cyc2ns_offset, 0);
947
cd7240c0
SS
948 offset = cyc2ns_suspend - sched_clock();
949
20d1c86a
PZ
950 for_each_possible_cpu(cpu) {
951 per_cpu(cyc2ns.data[0].cyc2ns_offset, cpu) = offset;
952 per_cpu(cyc2ns.data[1].cyc2ns_offset, cpu) = offset;
953 }
cd7240c0
SS
954
955 local_irq_restore(flags);
956}
957
2dbe06fa
AK
958#ifdef CONFIG_CPU_FREQ
959
960/* Frequency scaling support. Adjust the TSC based timer when the cpu frequency
961 * changes.
962 *
963 * RED-PEN: On SMP we assume all CPUs run with the same frequency. It's
964 * not that important because current Opteron setups do not support
965 * scaling on SMP anyroads.
966 *
967 * Should fix up last_tsc too. Currently gettimeofday in the
968 * first tick after the change will be slightly wrong.
969 */
970
971static unsigned int ref_freq;
972static unsigned long loops_per_jiffy_ref;
973static unsigned long tsc_khz_ref;
974
975static int time_cpufreq_notifier(struct notifier_block *nb, unsigned long val,
976 void *data)
977{
978 struct cpufreq_freqs *freq = data;
931db6a3 979 unsigned long *lpj;
2dbe06fa 980
931db6a3 981 lpj = &boot_cpu_data.loops_per_jiffy;
2dbe06fa 982#ifdef CONFIG_SMP
931db6a3 983 if (!(freq->flags & CPUFREQ_CONST_LOOPS))
2dbe06fa 984 lpj = &cpu_data(freq->cpu).loops_per_jiffy;
2dbe06fa
AK
985#endif
986
987 if (!ref_freq) {
988 ref_freq = freq->old;
989 loops_per_jiffy_ref = *lpj;
990 tsc_khz_ref = tsc_khz;
991 }
992 if ((val == CPUFREQ_PRECHANGE && freq->old < freq->new) ||
0b443ead 993 (val == CPUFREQ_POSTCHANGE && freq->old > freq->new)) {
878f4f53 994 *lpj = cpufreq_scale(loops_per_jiffy_ref, ref_freq, freq->new);
2dbe06fa
AK
995
996 tsc_khz = cpufreq_scale(tsc_khz_ref, ref_freq, freq->new);
997 if (!(freq->flags & CPUFREQ_CONST_LOOPS))
998 mark_tsc_unstable("cpufreq changes");
2dbe06fa 999
3896c329
PZ
1000 set_cyc2ns_scale(tsc_khz, freq->cpu);
1001 }
2dbe06fa
AK
1002
1003 return 0;
1004}
1005
1006static struct notifier_block time_cpufreq_notifier_block = {
1007 .notifier_call = time_cpufreq_notifier
1008};
1009
a841cca7 1010static int __init cpufreq_register_tsc_scaling(void)
2dbe06fa 1011{
59e21e3d 1012 if (!boot_cpu_has(X86_FEATURE_TSC))
060700b5
LT
1013 return 0;
1014 if (boot_cpu_has(X86_FEATURE_CONSTANT_TSC))
1015 return 0;
2dbe06fa
AK
1016 cpufreq_register_notifier(&time_cpufreq_notifier_block,
1017 CPUFREQ_TRANSITION_NOTIFIER);
1018 return 0;
1019}
1020
a841cca7 1021core_initcall(cpufreq_register_tsc_scaling);
2dbe06fa
AK
1022
1023#endif /* CONFIG_CPU_FREQ */
8fbbc4b4 1024
f9677e0f
CH
1025#define ART_CPUID_LEAF (0x15)
1026#define ART_MIN_DENOMINATOR (1)
1027
1028
1029/*
1030 * If ART is present detect the numerator:denominator to convert to TSC
1031 */
1032static void detect_art(void)
1033{
1034 unsigned int unused[2];
1035
1036 if (boot_cpu_data.cpuid_level < ART_CPUID_LEAF)
1037 return;
1038
1039 cpuid(ART_CPUID_LEAF, &art_to_tsc_denominator,
1040 &art_to_tsc_numerator, unused, unused+1);
1041
1042 /* Don't enable ART in a VM, non-stop TSC required */
1043 if (boot_cpu_has(X86_FEATURE_HYPERVISOR) ||
1044 !boot_cpu_has(X86_FEATURE_NONSTOP_TSC) ||
1045 art_to_tsc_denominator < ART_MIN_DENOMINATOR)
1046 return;
1047
1048 if (rdmsrl_safe(MSR_IA32_TSC_ADJUST, &art_to_tsc_offset))
1049 return;
1050
1051 /* Make this sticky over multiple CPU init calls */
1052 setup_force_cpu_cap(X86_FEATURE_ART);
1053}
1054
1055
8fbbc4b4
AK
1056/* clocksource code */
1057
1058static struct clocksource clocksource_tsc;
1059
1060/*
09ec5442 1061 * We used to compare the TSC to the cycle_last value in the clocksource
8fbbc4b4
AK
1062 * structure to avoid a nasty time-warp. This can be observed in a
1063 * very small window right after one CPU updated cycle_last under
1064 * xtime/vsyscall_gtod lock and the other CPU reads a TSC value which
1065 * is smaller than the cycle_last reference value due to a TSC which
1066 * is slighty behind. This delta is nowhere else observable, but in
1067 * that case it results in a forward time jump in the range of hours
1068 * due to the unsigned delta calculation of the time keeping core
1069 * code, which is necessary to support wrapping clocksources like pm
1070 * timer.
09ec5442
TG
1071 *
1072 * This sanity check is now done in the core timekeeping code.
1073 * checking the result of read_tsc() - cycle_last for being negative.
1074 * That works because CLOCKSOURCE_MASK(64) does not mask out any bit.
8fbbc4b4 1075 */
8e19608e 1076static cycle_t read_tsc(struct clocksource *cs)
8fbbc4b4 1077{
27c63405 1078 return (cycle_t)rdtsc_ordered();
1be39679
MS
1079}
1080
09ec5442
TG
1081/*
1082 * .mask MUST be CLOCKSOURCE_MASK(64). See comment above read_tsc()
1083 */
8fbbc4b4
AK
1084static struct clocksource clocksource_tsc = {
1085 .name = "tsc",
1086 .rating = 300,
1087 .read = read_tsc,
1088 .mask = CLOCKSOURCE_MASK(64),
8fbbc4b4
AK
1089 .flags = CLOCK_SOURCE_IS_CONTINUOUS |
1090 CLOCK_SOURCE_MUST_VERIFY,
98d0ac38 1091 .archdata = { .vclock_mode = VCLOCK_TSC },
8fbbc4b4
AK
1092};
1093
1094void mark_tsc_unstable(char *reason)
1095{
1096 if (!tsc_unstable) {
1097 tsc_unstable = 1;
35af99e6 1098 clear_sched_clock_stable();
e82b8e4e 1099 disable_sched_clock_irqtime();
c767a54b 1100 pr_info("Marking TSC unstable due to %s\n", reason);
8fbbc4b4
AK
1101 /* Change only the rating, when not registered */
1102 if (clocksource_tsc.mult)
7285dd7f
TG
1103 clocksource_mark_unstable(&clocksource_tsc);
1104 else {
1105 clocksource_tsc.flags |= CLOCK_SOURCE_UNSTABLE;
8fbbc4b4 1106 clocksource_tsc.rating = 0;
7285dd7f 1107 }
8fbbc4b4
AK
1108 }
1109}
1110
1111EXPORT_SYMBOL_GPL(mark_tsc_unstable);
1112
395628ef
AK
1113static void __init check_system_tsc_reliable(void)
1114{
03da3ff1
DW
1115#if defined(CONFIG_MGEODEGX1) || defined(CONFIG_MGEODE_LX) || defined(CONFIG_X86_GENERIC)
1116 if (is_geode_lx()) {
1117 /* RTSC counts during suspend */
8fbbc4b4 1118#define RTSC_SUSP 0x100
03da3ff1 1119 unsigned long res_low, res_high;
8fbbc4b4 1120
03da3ff1
DW
1121 rdmsr_safe(MSR_GEODE_BUSCONT_CONF0, &res_low, &res_high);
1122 /* Geode_LX - the OLPC CPU has a very reliable TSC */
1123 if (res_low & RTSC_SUSP)
1124 tsc_clocksource_reliable = 1;
1125 }
8fbbc4b4 1126#endif
395628ef
AK
1127 if (boot_cpu_has(X86_FEATURE_TSC_RELIABLE))
1128 tsc_clocksource_reliable = 1;
1129}
8fbbc4b4
AK
1130
1131/*
1132 * Make an educated guess if the TSC is trustworthy and synchronized
1133 * over all CPUs.
1134 */
148f9bb8 1135int unsynchronized_tsc(void)
8fbbc4b4 1136{
59e21e3d 1137 if (!boot_cpu_has(X86_FEATURE_TSC) || tsc_unstable)
8fbbc4b4
AK
1138 return 1;
1139
3e5095d1 1140#ifdef CONFIG_SMP
8fbbc4b4
AK
1141 if (apic_is_clustered_box())
1142 return 1;
1143#endif
1144
1145 if (boot_cpu_has(X86_FEATURE_CONSTANT_TSC))
1146 return 0;
d3b8f889
JS
1147
1148 if (tsc_clocksource_reliable)
1149 return 0;
8fbbc4b4
AK
1150 /*
1151 * Intel systems are normally all synchronized.
1152 * Exceptions must mark TSC as unstable:
1153 */
1154 if (boot_cpu_data.x86_vendor != X86_VENDOR_INTEL) {
1155 /* assume multi socket systems are not synchronized: */
1156 if (num_possible_cpus() > 1)
d3b8f889 1157 return 1;
8fbbc4b4
AK
1158 }
1159
d3b8f889 1160 return 0;
8fbbc4b4
AK
1161}
1162
f9677e0f
CH
1163/*
1164 * Convert ART to TSC given numerator/denominator found in detect_art()
1165 */
1166struct system_counterval_t convert_art_to_tsc(cycle_t art)
1167{
1168 u64 tmp, res, rem;
1169
1170 rem = do_div(art, art_to_tsc_denominator);
1171
1172 res = art * art_to_tsc_numerator;
1173 tmp = rem * art_to_tsc_numerator;
1174
1175 do_div(tmp, art_to_tsc_denominator);
1176 res += tmp + art_to_tsc_offset;
1177
1178 return (struct system_counterval_t) {.cs = art_related_clocksource,
1179 .cycles = res};
1180}
1181EXPORT_SYMBOL(convert_art_to_tsc);
08ec0c58
JS
1182
1183static void tsc_refine_calibration_work(struct work_struct *work);
1184static DECLARE_DELAYED_WORK(tsc_irqwork, tsc_refine_calibration_work);
1185/**
1186 * tsc_refine_calibration_work - Further refine tsc freq calibration
1187 * @work - ignored.
1188 *
1189 * This functions uses delayed work over a period of a
1190 * second to further refine the TSC freq value. Since this is
1191 * timer based, instead of loop based, we don't block the boot
1192 * process while this longer calibration is done.
1193 *
0d2eb44f 1194 * If there are any calibration anomalies (too many SMIs, etc),
08ec0c58
JS
1195 * or the refined calibration is off by 1% of the fast early
1196 * calibration, we throw out the new calibration and use the
1197 * early calibration.
1198 */
1199static void tsc_refine_calibration_work(struct work_struct *work)
1200{
1201 static u64 tsc_start = -1, ref_start;
1202 static int hpet;
1203 u64 tsc_stop, ref_stop, delta;
1204 unsigned long freq;
1205
1206 /* Don't bother refining TSC on unstable systems */
1207 if (check_tsc_unstable())
1208 goto out;
1209
1210 /*
1211 * Since the work is started early in boot, we may be
1212 * delayed the first time we expire. So set the workqueue
1213 * again once we know timers are working.
1214 */
1215 if (tsc_start == -1) {
1216 /*
1217 * Only set hpet once, to avoid mixing hardware
1218 * if the hpet becomes enabled later.
1219 */
1220 hpet = is_hpet_enabled();
1221 schedule_delayed_work(&tsc_irqwork, HZ);
1222 tsc_start = tsc_read_refs(&ref_start, hpet);
1223 return;
1224 }
1225
1226 tsc_stop = tsc_read_refs(&ref_stop, hpet);
1227
1228 /* hpet or pmtimer available ? */
62627bec 1229 if (ref_start == ref_stop)
08ec0c58
JS
1230 goto out;
1231
1232 /* Check, whether the sampling was disturbed by an SMI */
1233 if (tsc_start == ULLONG_MAX || tsc_stop == ULLONG_MAX)
1234 goto out;
1235
1236 delta = tsc_stop - tsc_start;
1237 delta *= 1000000LL;
1238 if (hpet)
1239 freq = calc_hpet_ref(delta, ref_start, ref_stop);
1240 else
1241 freq = calc_pmtimer_ref(delta, ref_start, ref_stop);
1242
1243 /* Make sure we're within 1% */
1244 if (abs(tsc_khz - freq) > tsc_khz/100)
1245 goto out;
1246
1247 tsc_khz = freq;
c767a54b
JP
1248 pr_info("Refined TSC clocksource calibration: %lu.%03lu MHz\n",
1249 (unsigned long)tsc_khz / 1000,
1250 (unsigned long)tsc_khz % 1000);
08ec0c58
JS
1251
1252out:
f9677e0f
CH
1253 if (boot_cpu_has(X86_FEATURE_ART))
1254 art_related_clocksource = &clocksource_tsc;
08ec0c58
JS
1255 clocksource_register_khz(&clocksource_tsc, tsc_khz);
1256}
1257
1258
1259static int __init init_tsc_clocksource(void)
8fbbc4b4 1260{
59e21e3d 1261 if (!boot_cpu_has(X86_FEATURE_TSC) || tsc_disabled > 0 || !tsc_khz)
a8760eca
TG
1262 return 0;
1263
395628ef
AK
1264 if (tsc_clocksource_reliable)
1265 clocksource_tsc.flags &= ~CLOCK_SOURCE_MUST_VERIFY;
8fbbc4b4
AK
1266 /* lower the rating if we already know its unstable: */
1267 if (check_tsc_unstable()) {
1268 clocksource_tsc.rating = 0;
1269 clocksource_tsc.flags &= ~CLOCK_SOURCE_IS_CONTINUOUS;
1270 }
57779dc2 1271
82f9c080
FT
1272 if (boot_cpu_has(X86_FEATURE_NONSTOP_TSC_S3))
1273 clocksource_tsc.flags |= CLOCK_SOURCE_SUSPEND_NONSTOP;
1274
57779dc2
AK
1275 /*
1276 * Trust the results of the earlier calibration on systems
1277 * exporting a reliable TSC.
1278 */
1279 if (boot_cpu_has(X86_FEATURE_TSC_RELIABLE)) {
1280 clocksource_register_khz(&clocksource_tsc, tsc_khz);
1281 return 0;
1282 }
1283
08ec0c58
JS
1284 schedule_delayed_work(&tsc_irqwork, 0);
1285 return 0;
8fbbc4b4 1286}
08ec0c58
JS
1287/*
1288 * We use device_initcall here, to ensure we run after the hpet
1289 * is fully initialized, which may occur at fs_initcall time.
1290 */
1291device_initcall(init_tsc_clocksource);
8fbbc4b4
AK
1292
1293void __init tsc_init(void)
1294{
1295 u64 lpj;
1296 int cpu;
1297
59e21e3d 1298 if (!boot_cpu_has(X86_FEATURE_TSC)) {
b47dcbdc 1299 setup_clear_cpu_cap(X86_FEATURE_TSC_DEADLINE_TIMER);
8fbbc4b4 1300 return;
b47dcbdc 1301 }
8fbbc4b4 1302
aa297292 1303 cpu_khz = x86_platform.calibrate_cpu();
2d826404 1304 tsc_khz = x86_platform.calibrate_tsc();
ff4c8663
LB
1305
1306 /*
1307 * Trust non-zero tsc_khz as authorative,
1308 * and use it to sanity check cpu_khz,
1309 * which will be off if system timer is off.
1310 */
aa297292
LB
1311 if (tsc_khz == 0)
1312 tsc_khz = cpu_khz;
ff4c8663
LB
1313 else if (abs(cpu_khz - tsc_khz) * 10 > tsc_khz)
1314 cpu_khz = tsc_khz;
8fbbc4b4 1315
e93ef949 1316 if (!tsc_khz) {
8fbbc4b4 1317 mark_tsc_unstable("could not calculate TSC khz");
b47dcbdc 1318 setup_clear_cpu_cap(X86_FEATURE_TSC_DEADLINE_TIMER);
8fbbc4b4
AK
1319 return;
1320 }
1321
c767a54b
JP
1322 pr_info("Detected %lu.%03lu MHz processor\n",
1323 (unsigned long)cpu_khz / 1000,
1324 (unsigned long)cpu_khz % 1000);
8fbbc4b4
AK
1325
1326 /*
1327 * Secondary CPUs do not run through tsc_init(), so set up
1328 * all the scale factors for all CPUs, assuming the same
1329 * speed as the bootup CPU. (cpufreq notifiers will fix this
1330 * up if their speed diverges)
1331 */
20d1c86a
PZ
1332 for_each_possible_cpu(cpu) {
1333 cyc2ns_init(cpu);
aa297292 1334 set_cyc2ns_scale(tsc_khz, cpu);
20d1c86a 1335 }
8fbbc4b4
AK
1336
1337 if (tsc_disabled > 0)
1338 return;
1339
1340 /* now allow native_sched_clock() to use rdtsc */
10b033d4 1341
8fbbc4b4 1342 tsc_disabled = 0;
3bbfafb7 1343 static_branch_enable(&__use_tsc);
8fbbc4b4 1344
e82b8e4e
VP
1345 if (!no_sched_irq_time)
1346 enable_sched_clock_irqtime();
1347
70de9a97
AK
1348 lpj = ((u64)tsc_khz * 1000);
1349 do_div(lpj, HZ);
1350 lpj_fine = lpj;
1351
8fbbc4b4 1352 use_tsc_delay();
8fbbc4b4
AK
1353
1354 if (unsynchronized_tsc())
1355 mark_tsc_unstable("TSCs unsynchronized");
1356
395628ef 1357 check_system_tsc_reliable();
f9677e0f
CH
1358
1359 detect_art();
8fbbc4b4
AK
1360}
1361
b565201c
JS
1362#ifdef CONFIG_SMP
1363/*
1364 * If we have a constant TSC and are using the TSC for the delay loop,
1365 * we can skip clock calibration if another cpu in the same socket has already
1366 * been calibrated. This assumes that CONSTANT_TSC applies to all
1367 * cpus in the socket - this should be a safe assumption.
1368 */
148f9bb8 1369unsigned long calibrate_delay_is_known(void)
b565201c 1370{
c25323c0 1371 int sibling, cpu = smp_processor_id();
f508a5ba 1372 struct cpumask *mask = topology_core_cpumask(cpu);
b565201c
JS
1373
1374 if (!tsc_disabled && !cpu_has(&cpu_data(cpu), X86_FEATURE_CONSTANT_TSC))
1375 return 0;
1376
f508a5ba
TG
1377 if (!mask)
1378 return 0;
1379
1380 sibling = cpumask_any_but(mask, cpu);
c25323c0
TG
1381 if (sibling < nr_cpu_ids)
1382 return cpu_data(sibling).loops_per_jiffy;
b565201c
JS
1383 return 0;
1384}
1385#endif