1 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
3 #include <linux/kernel.h>
4 #include <linux/sched.h>
5 #include <linux/sched/clock.h>
6 #include <linux/init.h>
7 #include <linux/export.h>
8 #include <linux/timer.h>
9 #include <linux/acpi_pmtmr.h>
10 #include <linux/cpufreq.h>
11 #include <linux/delay.h>
12 #include <linux/clocksource.h>
13 #include <linux/percpu.h>
14 #include <linux/timex.h>
15 #include <linux/static_key.h>
18 #include <asm/timer.h>
19 #include <asm/vgtod.h>
21 #include <asm/delay.h>
22 #include <asm/hypervisor.h>
24 #include <asm/x86_init.h>
25 #include <asm/geode.h>
27 #include <asm/intel-family.h>
29 unsigned int __read_mostly cpu_khz
; /* TSC clocks / usec, not used here */
30 EXPORT_SYMBOL(cpu_khz
);
32 unsigned int __read_mostly tsc_khz
;
33 EXPORT_SYMBOL(tsc_khz
);
36 * TSC can be unstable due to cpufreq or due to unsynced TSCs
38 static int __read_mostly tsc_unstable
;
40 /* native_sched_clock() is called before tsc_init(), so
41 we must start with the TSC soft disabled to prevent
42 erroneous rdtsc usage on !boot_cpu_has(X86_FEATURE_TSC) processors */
43 static int __read_mostly tsc_disabled
= -1;
45 static DEFINE_STATIC_KEY_FALSE(__use_tsc
);
47 int tsc_clocksource_reliable
;
49 static u32 art_to_tsc_numerator
;
50 static u32 art_to_tsc_denominator
;
51 static u64 art_to_tsc_offset
;
52 struct clocksource
*art_related_clocksource
;
55 * Use a ring-buffer like data structure, where a writer advances the head by
56 * writing a new data entry and a reader advances the tail when it observes a
59 * Writers are made to wait on readers until there's space to write a new
62 * This means that we can always use an {offset, mul} pair to compute a ns
63 * value that is 'roughly' in the right direction, even if we're writing a new
64 * {offset, mul} pair during the clock read.
66 * The down-side is that we can no longer guarantee strict monotonicity anymore
67 * (assuming the TSC was that to begin with), because while we compute the
68 * intersection point of the two clock slopes and make sure the time is
69 * continuous at the point of switching; we can no longer guarantee a reader is
70 * strictly before or after the switch point.
72 * It does mean a reader no longer needs to disable IRQs in order to avoid
73 * CPU-Freq updates messing with his times, and similarly an NMI reader will
74 * no longer run the risk of hitting half-written state.
78 struct cyc2ns_data data
[2]; /* 0 + 2*24 = 48 */
79 struct cyc2ns_data
*head
; /* 48 + 8 = 56 */
80 struct cyc2ns_data
*tail
; /* 56 + 8 = 64 */
81 }; /* exactly fits one cacheline */
83 static DEFINE_PER_CPU_ALIGNED(struct cyc2ns
, cyc2ns
);
85 struct cyc2ns_data
*cyc2ns_read_begin(void)
87 struct cyc2ns_data
*head
;
91 head
= this_cpu_read(cyc2ns
.head
);
93 * Ensure we observe the entry when we observe the pointer to it.
94 * matches the wmb from cyc2ns_write_end().
96 smp_read_barrier_depends();
103 void cyc2ns_read_end(struct cyc2ns_data
*head
)
107 * If we're the outer most nested read; update the tail pointer
108 * when we're done. This notifies possible pending writers
109 * that we've observed the head pointer and that the other
112 if (!--head
->__count
) {
114 * x86-TSO does not reorder writes with older reads;
115 * therefore once this write becomes visible to another
116 * cpu, we must be finished reading the cyc2ns_data.
118 * matches with cyc2ns_write_begin().
120 this_cpu_write(cyc2ns
.tail
, head
);
126 * Begin writing a new @data entry for @cpu.
128 * Assumes some sort of write side lock; currently 'provided' by the assumption
129 * that cpufreq will call its notifiers sequentially.
131 static struct cyc2ns_data
*cyc2ns_write_begin(int cpu
)
133 struct cyc2ns
*c2n
= &per_cpu(cyc2ns
, cpu
);
134 struct cyc2ns_data
*data
= c2n
->data
;
136 if (data
== c2n
->head
)
139 /* XXX send an IPI to @cpu in order to guarantee a read? */
142 * When we observe the tail write from cyc2ns_read_end(),
143 * the cpu must be done with that entry and its safe
144 * to start writing to it.
146 while (c2n
->tail
== data
)
152 static void cyc2ns_write_end(int cpu
, struct cyc2ns_data
*data
)
154 struct cyc2ns
*c2n
= &per_cpu(cyc2ns
, cpu
);
157 * Ensure the @data writes are visible before we publish the
158 * entry. Matches the data-depencency in cyc2ns_read_begin().
162 ACCESS_ONCE(c2n
->head
) = data
;
166 * Accelerators for sched_clock()
167 * convert from cycles(64bits) => nanoseconds (64bits)
169 * ns = cycles / (freq / ns_per_sec)
170 * ns = cycles * (ns_per_sec / freq)
171 * ns = cycles * (10^9 / (cpu_khz * 10^3))
172 * ns = cycles * (10^6 / cpu_khz)
174 * Then we use scaling math (suggested by george@mvista.com) to get:
175 * ns = cycles * (10^6 * SC / cpu_khz) / SC
176 * ns = cycles * cyc2ns_scale / SC
178 * And since SC is a constant power of two, we can convert the div
179 * into a shift. The larger SC is, the more accurate the conversion, but
180 * cyc2ns_scale needs to be a 32-bit value so that 32-bit multiplication
181 * (64-bit result) can be used.
183 * We can use khz divisor instead of mhz to keep a better precision.
184 * (mathieu.desnoyers@polymtl.ca)
186 * -johnstul@us.ibm.com "math is hard, lets go shopping!"
189 static void cyc2ns_data_init(struct cyc2ns_data
*data
)
191 data
->cyc2ns_mul
= 0;
192 data
->cyc2ns_shift
= 0;
193 data
->cyc2ns_offset
= 0;
197 static void cyc2ns_init(int cpu
)
199 struct cyc2ns
*c2n
= &per_cpu(cyc2ns
, cpu
);
201 cyc2ns_data_init(&c2n
->data
[0]);
202 cyc2ns_data_init(&c2n
->data
[1]);
204 c2n
->head
= c2n
->data
;
205 c2n
->tail
= c2n
->data
;
208 static inline unsigned long long cycles_2_ns(unsigned long long cyc
)
210 struct cyc2ns_data
*data
, *tail
;
211 unsigned long long ns
;
214 * See cyc2ns_read_*() for details; replicated in order to avoid
215 * an extra few instructions that came with the abstraction.
216 * Notable, it allows us to only do the __count and tail update
217 * dance when its actually needed.
220 preempt_disable_notrace();
221 data
= this_cpu_read(cyc2ns
.head
);
222 tail
= this_cpu_read(cyc2ns
.tail
);
224 if (likely(data
== tail
)) {
225 ns
= data
->cyc2ns_offset
;
226 ns
+= mul_u64_u32_shr(cyc
, data
->cyc2ns_mul
, data
->cyc2ns_shift
);
232 ns
= data
->cyc2ns_offset
;
233 ns
+= mul_u64_u32_shr(cyc
, data
->cyc2ns_mul
, data
->cyc2ns_shift
);
237 if (!--data
->__count
)
238 this_cpu_write(cyc2ns
.tail
, data
);
240 preempt_enable_notrace();
245 static void set_cyc2ns_scale(unsigned long khz
, int cpu
)
247 unsigned long long tsc_now
, ns_now
;
248 struct cyc2ns_data
*data
;
251 local_irq_save(flags
);
252 sched_clock_idle_sleep_event();
257 data
= cyc2ns_write_begin(cpu
);
260 ns_now
= cycles_2_ns(tsc_now
);
263 * Compute a new multiplier as per the above comment and ensure our
264 * time function is continuous; see the comment near struct
267 clocks_calc_mult_shift(&data
->cyc2ns_mul
, &data
->cyc2ns_shift
, khz
,
271 * cyc2ns_shift is exported via arch_perf_update_userpage() where it is
272 * not expected to be greater than 31 due to the original published
273 * conversion algorithm shifting a 32-bit value (now specifies a 64-bit
274 * value) - refer perf_event_mmap_page documentation in perf_event.h.
276 if (data
->cyc2ns_shift
== 32) {
277 data
->cyc2ns_shift
= 31;
278 data
->cyc2ns_mul
>>= 1;
281 data
->cyc2ns_offset
= ns_now
-
282 mul_u64_u32_shr(tsc_now
, data
->cyc2ns_mul
, data
->cyc2ns_shift
);
284 cyc2ns_write_end(cpu
, data
);
287 sched_clock_idle_wakeup_event(0);
288 local_irq_restore(flags
);
291 * Scheduler clock - returns current time in nanosec units.
293 u64
native_sched_clock(void)
295 if (static_branch_likely(&__use_tsc
)) {
296 u64 tsc_now
= rdtsc();
298 /* return the value in ns */
299 return cycles_2_ns(tsc_now
);
303 * Fall back to jiffies if there's no TSC available:
304 * ( But note that we still use it if the TSC is marked
305 * unstable. We do this because unlike Time Of Day,
306 * the scheduler clock tolerates small errors and it's
307 * very important for it to be as fast as the platform
311 /* No locking but a rare wrong value is not a big deal: */
312 return (jiffies_64
- INITIAL_JIFFIES
) * (1000000000 / HZ
);
316 * Generate a sched_clock if you already have a TSC value.
318 u64
native_sched_clock_from_tsc(u64 tsc
)
320 return cycles_2_ns(tsc
);
323 /* We need to define a real function for sched_clock, to override the
324 weak default version */
325 #ifdef CONFIG_PARAVIRT
326 unsigned long long sched_clock(void)
328 return paravirt_sched_clock();
332 sched_clock(void) __attribute__((alias("native_sched_clock")));
335 int check_tsc_unstable(void)
339 EXPORT_SYMBOL_GPL(check_tsc_unstable
);
341 #ifdef CONFIG_X86_TSC
342 int __init
notsc_setup(char *str
)
344 pr_warn("Kernel compiled with CONFIG_X86_TSC, cannot disable TSC completely\n");
350 * disable flag for tsc. Takes effect by clearing the TSC cpu flag
353 int __init
notsc_setup(char *str
)
355 setup_clear_cpu_cap(X86_FEATURE_TSC
);
360 __setup("notsc", notsc_setup
);
362 static int no_sched_irq_time
;
364 static int __init
tsc_setup(char *str
)
366 if (!strcmp(str
, "reliable"))
367 tsc_clocksource_reliable
= 1;
368 if (!strncmp(str
, "noirqtime", 9))
369 no_sched_irq_time
= 1;
373 __setup("tsc=", tsc_setup
);
375 #define MAX_RETRIES 5
376 #define SMI_TRESHOLD 50000
379 * Read TSC and the reference counters. Take care of SMI disturbance
381 static u64
tsc_read_refs(u64
*p
, int hpet
)
386 for (i
= 0; i
< MAX_RETRIES
; i
++) {
389 *p
= hpet_readl(HPET_COUNTER
) & 0xFFFFFFFF;
391 *p
= acpi_pm_read_early();
393 if ((t2
- t1
) < SMI_TRESHOLD
)
400 * Calculate the TSC frequency from HPET reference
402 static unsigned long calc_hpet_ref(u64 deltatsc
, u64 hpet1
, u64 hpet2
)
407 hpet2
+= 0x100000000ULL
;
409 tmp
= ((u64
)hpet2
* hpet_readl(HPET_PERIOD
));
410 do_div(tmp
, 1000000);
411 do_div(deltatsc
, tmp
);
413 return (unsigned long) deltatsc
;
417 * Calculate the TSC frequency from PMTimer reference
419 static unsigned long calc_pmtimer_ref(u64 deltatsc
, u64 pm1
, u64 pm2
)
427 pm2
+= (u64
)ACPI_PM_OVRRUN
;
429 tmp
= pm2
* 1000000000LL;
430 do_div(tmp
, PMTMR_TICKS_PER_SEC
);
431 do_div(deltatsc
, tmp
);
433 return (unsigned long) deltatsc
;
437 #define CAL_LATCH (PIT_TICK_RATE / (1000 / CAL_MS))
438 #define CAL_PIT_LOOPS 1000
441 #define CAL2_LATCH (PIT_TICK_RATE / (1000 / CAL2_MS))
442 #define CAL2_PIT_LOOPS 5000
446 * Try to calibrate the TSC against the Programmable
447 * Interrupt Timer and return the frequency of the TSC
450 * Return ULONG_MAX on failure to calibrate.
452 static unsigned long pit_calibrate_tsc(u32 latch
, unsigned long ms
, int loopmin
)
454 u64 tsc
, t1
, t2
, delta
;
455 unsigned long tscmin
, tscmax
;
458 /* Set the Gate high, disable speaker */
459 outb((inb(0x61) & ~0x02) | 0x01, 0x61);
462 * Setup CTC channel 2* for mode 0, (interrupt on terminal
463 * count mode), binary count. Set the latch register to 50ms
464 * (LSB then MSB) to begin countdown.
467 outb(latch
& 0xff, 0x42);
468 outb(latch
>> 8, 0x42);
470 tsc
= t1
= t2
= get_cycles();
475 while ((inb(0x61) & 0x20) == 0) {
479 if ((unsigned long) delta
< tscmin
)
480 tscmin
= (unsigned int) delta
;
481 if ((unsigned long) delta
> tscmax
)
482 tscmax
= (unsigned int) delta
;
489 * If we were not able to read the PIT more than loopmin
490 * times, then we have been hit by a massive SMI
492 * If the maximum is 10 times larger than the minimum,
493 * then we got hit by an SMI as well.
495 if (pitcnt
< loopmin
|| tscmax
> 10 * tscmin
)
498 /* Calculate the PIT value */
505 * This reads the current MSB of the PIT counter, and
506 * checks if we are running on sufficiently fast and
507 * non-virtualized hardware.
509 * Our expectations are:
511 * - the PIT is running at roughly 1.19MHz
513 * - each IO is going to take about 1us on real hardware,
514 * but we allow it to be much faster (by a factor of 10) or
515 * _slightly_ slower (ie we allow up to a 2us read+counter
516 * update - anything else implies a unacceptably slow CPU
517 * or PIT for the fast calibration to work.
519 * - with 256 PIT ticks to read the value, we have 214us to
520 * see the same MSB (and overhead like doing a single TSC
521 * read per MSB value etc).
523 * - We're doing 2 reads per loop (LSB, MSB), and we expect
524 * them each to take about a microsecond on real hardware.
525 * So we expect a count value of around 100. But we'll be
526 * generous, and accept anything over 50.
528 * - if the PIT is stuck, and we see *many* more reads, we
529 * return early (and the next caller of pit_expect_msb()
530 * then consider it a failure when they don't see the
531 * next expected value).
533 * These expectations mean that we know that we have seen the
534 * transition from one expected value to another with a fairly
535 * high accuracy, and we didn't miss any events. We can thus
536 * use the TSC value at the transitions to calculate a pretty
537 * good value for the TSC frequencty.
539 static inline int pit_verify_msb(unsigned char val
)
543 return inb(0x42) == val
;
546 static inline int pit_expect_msb(unsigned char val
, u64
*tscp
, unsigned long *deltap
)
549 u64 tsc
= 0, prev_tsc
= 0;
551 for (count
= 0; count
< 50000; count
++) {
552 if (!pit_verify_msb(val
))
557 *deltap
= get_cycles() - prev_tsc
;
561 * We require _some_ success, but the quality control
562 * will be based on the error terms on the TSC values.
568 * How many MSB values do we want to see? We aim for
569 * a maximum error rate of 500ppm (in practice the
570 * real error is much smaller), but refuse to spend
571 * more than 50ms on it.
573 #define MAX_QUICK_PIT_MS 50
574 #define MAX_QUICK_PIT_ITERATIONS (MAX_QUICK_PIT_MS * PIT_TICK_RATE / 1000 / 256)
576 static unsigned long quick_pit_calibrate(void)
580 unsigned long d1
, d2
;
582 /* Set the Gate high, disable speaker */
583 outb((inb(0x61) & ~0x02) | 0x01, 0x61);
586 * Counter 2, mode 0 (one-shot), binary count
588 * NOTE! Mode 2 decrements by two (and then the
589 * output is flipped each time, giving the same
590 * final output frequency as a decrement-by-one),
591 * so mode 0 is much better when looking at the
596 /* Start at 0xffff */
601 * The PIT starts counting at the next edge, so we
602 * need to delay for a microsecond. The easiest way
603 * to do that is to just read back the 16-bit counter
608 if (pit_expect_msb(0xff, &tsc
, &d1
)) {
609 for (i
= 1; i
<= MAX_QUICK_PIT_ITERATIONS
; i
++) {
610 if (!pit_expect_msb(0xff-i
, &delta
, &d2
))
616 * Extrapolate the error and fail fast if the error will
617 * never be below 500 ppm.
620 d1
+ d2
>= (delta
* MAX_QUICK_PIT_ITERATIONS
) >> 11)
624 * Iterate until the error is less than 500 ppm
626 if (d1
+d2
>= delta
>> 11)
630 * Check the PIT one more time to verify that
631 * all TSC reads were stable wrt the PIT.
633 * This also guarantees serialization of the
634 * last cycle read ('d2') in pit_expect_msb.
636 if (!pit_verify_msb(0xfe - i
))
641 pr_info("Fast TSC calibration failed\n");
646 * Ok, if we get here, then we've seen the
647 * MSB of the PIT decrement 'i' times, and the
648 * error has shrunk to less than 500 ppm.
650 * As a result, we can depend on there not being
651 * any odd delays anywhere, and the TSC reads are
652 * reliable (within the error).
654 * kHz = ticks / time-in-seconds / 1000;
655 * kHz = (t2 - t1) / (I * 256 / PIT_TICK_RATE) / 1000
656 * kHz = ((t2 - t1) * PIT_TICK_RATE) / (I * 256 * 1000)
658 delta
*= PIT_TICK_RATE
;
659 do_div(delta
, i
*256*1000);
660 pr_info("Fast TSC calibration using PIT\n");
665 * native_calibrate_tsc
666 * Determine TSC frequency via CPUID, else return 0.
668 unsigned long native_calibrate_tsc(void)
670 unsigned int eax_denominator
, ebx_numerator
, ecx_hz
, edx
;
671 unsigned int crystal_khz
;
673 if (boot_cpu_data
.x86_vendor
!= X86_VENDOR_INTEL
)
676 if (boot_cpu_data
.cpuid_level
< 0x15)
679 eax_denominator
= ebx_numerator
= ecx_hz
= edx
= 0;
681 /* CPUID 15H TSC/Crystal ratio, plus optionally Crystal Hz */
682 cpuid(0x15, &eax_denominator
, &ebx_numerator
, &ecx_hz
, &edx
);
684 if (ebx_numerator
== 0 || eax_denominator
== 0)
687 crystal_khz
= ecx_hz
/ 1000;
689 if (crystal_khz
== 0) {
690 switch (boot_cpu_data
.x86_model
) {
691 case INTEL_FAM6_SKYLAKE_MOBILE
:
692 case INTEL_FAM6_SKYLAKE_DESKTOP
:
693 case INTEL_FAM6_KABYLAKE_MOBILE
:
694 case INTEL_FAM6_KABYLAKE_DESKTOP
:
695 crystal_khz
= 24000; /* 24.0 MHz */
697 case INTEL_FAM6_SKYLAKE_X
:
698 case INTEL_FAM6_ATOM_DENVERTON
:
699 crystal_khz
= 25000; /* 25.0 MHz */
701 case INTEL_FAM6_ATOM_GOLDMONT
:
702 crystal_khz
= 19200; /* 19.2 MHz */
708 * TSC frequency determined by CPUID is a "hardware reported"
709 * frequency and is the most accurate one so far we have. This
710 * is considered a known frequency.
712 setup_force_cpu_cap(X86_FEATURE_TSC_KNOWN_FREQ
);
715 * For Atom SoCs TSC is the only reliable clocksource.
716 * Mark TSC reliable so no watchdog on it.
718 if (boot_cpu_data
.x86_model
== INTEL_FAM6_ATOM_GOLDMONT
)
719 setup_force_cpu_cap(X86_FEATURE_TSC_RELIABLE
);
721 return crystal_khz
* ebx_numerator
/ eax_denominator
;
724 static unsigned long cpu_khz_from_cpuid(void)
726 unsigned int eax_base_mhz
, ebx_max_mhz
, ecx_bus_mhz
, edx
;
728 if (boot_cpu_data
.x86_vendor
!= X86_VENDOR_INTEL
)
731 if (boot_cpu_data
.cpuid_level
< 0x16)
734 eax_base_mhz
= ebx_max_mhz
= ecx_bus_mhz
= edx
= 0;
736 cpuid(0x16, &eax_base_mhz
, &ebx_max_mhz
, &ecx_bus_mhz
, &edx
);
738 return eax_base_mhz
* 1000;
742 * native_calibrate_cpu - calibrate the cpu on boot
744 unsigned long native_calibrate_cpu(void)
746 u64 tsc1
, tsc2
, delta
, ref1
, ref2
;
747 unsigned long tsc_pit_min
= ULONG_MAX
, tsc_ref_min
= ULONG_MAX
;
748 unsigned long flags
, latch
, ms
, fast_calibrate
;
749 int hpet
= is_hpet_enabled(), i
, loopmin
;
751 fast_calibrate
= cpu_khz_from_cpuid();
753 return fast_calibrate
;
755 fast_calibrate
= cpu_khz_from_msr();
757 return fast_calibrate
;
759 local_irq_save(flags
);
760 fast_calibrate
= quick_pit_calibrate();
761 local_irq_restore(flags
);
763 return fast_calibrate
;
766 * Run 5 calibration loops to get the lowest frequency value
767 * (the best estimate). We use two different calibration modes
770 * 1) PIT loop. We set the PIT Channel 2 to oneshot mode and
771 * load a timeout of 50ms. We read the time right after we
772 * started the timer and wait until the PIT count down reaches
773 * zero. In each wait loop iteration we read the TSC and check
774 * the delta to the previous read. We keep track of the min
775 * and max values of that delta. The delta is mostly defined
776 * by the IO time of the PIT access, so we can detect when a
777 * SMI/SMM disturbance happened between the two reads. If the
778 * maximum time is significantly larger than the minimum time,
779 * then we discard the result and have another try.
781 * 2) Reference counter. If available we use the HPET or the
782 * PMTIMER as a reference to check the sanity of that value.
783 * We use separate TSC readouts and check inside of the
784 * reference read for a SMI/SMM disturbance. We dicard
785 * disturbed values here as well. We do that around the PIT
786 * calibration delay loop as we have to wait for a certain
787 * amount of time anyway.
790 /* Preset PIT loop values */
793 loopmin
= CAL_PIT_LOOPS
;
795 for (i
= 0; i
< 3; i
++) {
796 unsigned long tsc_pit_khz
;
799 * Read the start value and the reference count of
800 * hpet/pmtimer when available. Then do the PIT
801 * calibration, which will take at least 50ms, and
802 * read the end value.
804 local_irq_save(flags
);
805 tsc1
= tsc_read_refs(&ref1
, hpet
);
806 tsc_pit_khz
= pit_calibrate_tsc(latch
, ms
, loopmin
);
807 tsc2
= tsc_read_refs(&ref2
, hpet
);
808 local_irq_restore(flags
);
810 /* Pick the lowest PIT TSC calibration so far */
811 tsc_pit_min
= min(tsc_pit_min
, tsc_pit_khz
);
813 /* hpet or pmtimer available ? */
817 /* Check, whether the sampling was disturbed by an SMI */
818 if (tsc1
== ULLONG_MAX
|| tsc2
== ULLONG_MAX
)
821 tsc2
= (tsc2
- tsc1
) * 1000000LL;
823 tsc2
= calc_hpet_ref(tsc2
, ref1
, ref2
);
825 tsc2
= calc_pmtimer_ref(tsc2
, ref1
, ref2
);
827 tsc_ref_min
= min(tsc_ref_min
, (unsigned long) tsc2
);
829 /* Check the reference deviation */
830 delta
= ((u64
) tsc_pit_min
) * 100;
831 do_div(delta
, tsc_ref_min
);
834 * If both calibration results are inside a 10% window
835 * then we can be sure, that the calibration
836 * succeeded. We break out of the loop right away. We
837 * use the reference value, as it is more precise.
839 if (delta
>= 90 && delta
<= 110) {
840 pr_info("PIT calibration matches %s. %d loops\n",
841 hpet
? "HPET" : "PMTIMER", i
+ 1);
846 * Check whether PIT failed more than once. This
847 * happens in virtualized environments. We need to
848 * give the virtual PC a slightly longer timeframe for
849 * the HPET/PMTIMER to make the result precise.
851 if (i
== 1 && tsc_pit_min
== ULONG_MAX
) {
854 loopmin
= CAL2_PIT_LOOPS
;
859 * Now check the results.
861 if (tsc_pit_min
== ULONG_MAX
) {
862 /* PIT gave no useful value */
863 pr_warn("Unable to calibrate against PIT\n");
865 /* We don't have an alternative source, disable TSC */
866 if (!hpet
&& !ref1
&& !ref2
) {
867 pr_notice("No reference (HPET/PMTIMER) available\n");
871 /* The alternative source failed as well, disable TSC */
872 if (tsc_ref_min
== ULONG_MAX
) {
873 pr_warn("HPET/PMTIMER calibration failed\n");
877 /* Use the alternative source */
878 pr_info("using %s reference calibration\n",
879 hpet
? "HPET" : "PMTIMER");
884 /* We don't have an alternative source, use the PIT calibration value */
885 if (!hpet
&& !ref1
&& !ref2
) {
886 pr_info("Using PIT calibration value\n");
890 /* The alternative source failed, use the PIT calibration value */
891 if (tsc_ref_min
== ULONG_MAX
) {
892 pr_warn("HPET/PMTIMER calibration failed. Using PIT calibration.\n");
897 * The calibration values differ too much. In doubt, we use
898 * the PIT value as we know that there are PMTIMERs around
899 * running at double speed. At least we let the user know:
901 pr_warn("PIT calibration deviates from %s: %lu %lu\n",
902 hpet
? "HPET" : "PMTIMER", tsc_pit_min
, tsc_ref_min
);
903 pr_info("Using PIT calibration value\n");
907 int recalibrate_cpu_khz(void)
910 unsigned long cpu_khz_old
= cpu_khz
;
912 if (!boot_cpu_has(X86_FEATURE_TSC
))
915 cpu_khz
= x86_platform
.calibrate_cpu();
916 tsc_khz
= x86_platform
.calibrate_tsc();
919 else if (abs(cpu_khz
- tsc_khz
) * 10 > tsc_khz
)
921 cpu_data(0).loops_per_jiffy
= cpufreq_scale(cpu_data(0).loops_per_jiffy
,
922 cpu_khz_old
, cpu_khz
);
930 EXPORT_SYMBOL(recalibrate_cpu_khz
);
933 static unsigned long long cyc2ns_suspend
;
935 void tsc_save_sched_clock_state(void)
937 if (!sched_clock_stable())
940 cyc2ns_suspend
= sched_clock();
944 * Even on processors with invariant TSC, TSC gets reset in some the
945 * ACPI system sleep states. And in some systems BIOS seem to reinit TSC to
946 * arbitrary value (still sync'd across cpu's) during resume from such sleep
947 * states. To cope up with this, recompute the cyc2ns_offset for each cpu so
948 * that sched_clock() continues from the point where it was left off during
951 void tsc_restore_sched_clock_state(void)
953 unsigned long long offset
;
957 if (!sched_clock_stable())
960 local_irq_save(flags
);
963 * We're coming out of suspend, there's no concurrency yet; don't
964 * bother being nice about the RCU stuff, just write to both
968 this_cpu_write(cyc2ns
.data
[0].cyc2ns_offset
, 0);
969 this_cpu_write(cyc2ns
.data
[1].cyc2ns_offset
, 0);
971 offset
= cyc2ns_suspend
- sched_clock();
973 for_each_possible_cpu(cpu
) {
974 per_cpu(cyc2ns
.data
[0].cyc2ns_offset
, cpu
) = offset
;
975 per_cpu(cyc2ns
.data
[1].cyc2ns_offset
, cpu
) = offset
;
978 local_irq_restore(flags
);
981 #ifdef CONFIG_CPU_FREQ
983 /* Frequency scaling support. Adjust the TSC based timer when the cpu frequency
986 * RED-PEN: On SMP we assume all CPUs run with the same frequency. It's
987 * not that important because current Opteron setups do not support
988 * scaling on SMP anyroads.
990 * Should fix up last_tsc too. Currently gettimeofday in the
991 * first tick after the change will be slightly wrong.
994 static unsigned int ref_freq
;
995 static unsigned long loops_per_jiffy_ref
;
996 static unsigned long tsc_khz_ref
;
998 static int time_cpufreq_notifier(struct notifier_block
*nb
, unsigned long val
,
1001 struct cpufreq_freqs
*freq
= data
;
1004 lpj
= &boot_cpu_data
.loops_per_jiffy
;
1006 if (!(freq
->flags
& CPUFREQ_CONST_LOOPS
))
1007 lpj
= &cpu_data(freq
->cpu
).loops_per_jiffy
;
1011 ref_freq
= freq
->old
;
1012 loops_per_jiffy_ref
= *lpj
;
1013 tsc_khz_ref
= tsc_khz
;
1015 if ((val
== CPUFREQ_PRECHANGE
&& freq
->old
< freq
->new) ||
1016 (val
== CPUFREQ_POSTCHANGE
&& freq
->old
> freq
->new)) {
1017 *lpj
= cpufreq_scale(loops_per_jiffy_ref
, ref_freq
, freq
->new);
1019 tsc_khz
= cpufreq_scale(tsc_khz_ref
, ref_freq
, freq
->new);
1020 if (!(freq
->flags
& CPUFREQ_CONST_LOOPS
))
1021 mark_tsc_unstable("cpufreq changes");
1023 set_cyc2ns_scale(tsc_khz
, freq
->cpu
);
1029 static struct notifier_block time_cpufreq_notifier_block
= {
1030 .notifier_call
= time_cpufreq_notifier
1033 static int __init
cpufreq_register_tsc_scaling(void)
1035 if (!boot_cpu_has(X86_FEATURE_TSC
))
1037 if (boot_cpu_has(X86_FEATURE_CONSTANT_TSC
))
1039 cpufreq_register_notifier(&time_cpufreq_notifier_block
,
1040 CPUFREQ_TRANSITION_NOTIFIER
);
1044 core_initcall(cpufreq_register_tsc_scaling
);
1046 #endif /* CONFIG_CPU_FREQ */
1048 #define ART_CPUID_LEAF (0x15)
1049 #define ART_MIN_DENOMINATOR (1)
1053 * If ART is present detect the numerator:denominator to convert to TSC
1055 static void detect_art(void)
1057 unsigned int unused
[2];
1059 if (boot_cpu_data
.cpuid_level
< ART_CPUID_LEAF
)
1062 /* Don't enable ART in a VM, non-stop TSC and TSC_ADJUST required */
1063 if (boot_cpu_has(X86_FEATURE_HYPERVISOR
) ||
1064 !boot_cpu_has(X86_FEATURE_NONSTOP_TSC
) ||
1065 !boot_cpu_has(X86_FEATURE_TSC_ADJUST
))
1068 cpuid(ART_CPUID_LEAF
, &art_to_tsc_denominator
,
1069 &art_to_tsc_numerator
, unused
, unused
+1);
1071 if (art_to_tsc_denominator
< ART_MIN_DENOMINATOR
)
1074 rdmsrl(MSR_IA32_TSC_ADJUST
, art_to_tsc_offset
);
1076 /* Make this sticky over multiple CPU init calls */
1077 setup_force_cpu_cap(X86_FEATURE_ART
);
1081 /* clocksource code */
1083 static struct clocksource clocksource_tsc
;
1085 static void tsc_resume(struct clocksource
*cs
)
1087 tsc_verify_tsc_adjust(true);
1091 * We used to compare the TSC to the cycle_last value in the clocksource
1092 * structure to avoid a nasty time-warp. This can be observed in a
1093 * very small window right after one CPU updated cycle_last under
1094 * xtime/vsyscall_gtod lock and the other CPU reads a TSC value which
1095 * is smaller than the cycle_last reference value due to a TSC which
1096 * is slighty behind. This delta is nowhere else observable, but in
1097 * that case it results in a forward time jump in the range of hours
1098 * due to the unsigned delta calculation of the time keeping core
1099 * code, which is necessary to support wrapping clocksources like pm
1102 * This sanity check is now done in the core timekeeping code.
1103 * checking the result of read_tsc() - cycle_last for being negative.
1104 * That works because CLOCKSOURCE_MASK(64) does not mask out any bit.
1106 static u64
read_tsc(struct clocksource
*cs
)
1108 return (u64
)rdtsc_ordered();
1111 static void tsc_cs_mark_unstable(struct clocksource
*cs
)
1116 clear_sched_clock_stable();
1117 disable_sched_clock_irqtime();
1118 pr_info("Marking TSC unstable due to clocksource watchdog\n");
1122 * .mask MUST be CLOCKSOURCE_MASK(64). See comment above read_tsc()
1124 static struct clocksource clocksource_tsc
= {
1128 .mask
= CLOCKSOURCE_MASK(64),
1129 .flags
= CLOCK_SOURCE_IS_CONTINUOUS
|
1130 CLOCK_SOURCE_MUST_VERIFY
,
1131 .archdata
= { .vclock_mode
= VCLOCK_TSC
},
1132 .resume
= tsc_resume
,
1133 .mark_unstable
= tsc_cs_mark_unstable
,
1136 void mark_tsc_unstable(char *reason
)
1138 if (!tsc_unstable
) {
1140 clear_sched_clock_stable();
1141 disable_sched_clock_irqtime();
1142 pr_info("Marking TSC unstable due to %s\n", reason
);
1143 /* Change only the rating, when not registered */
1144 if (clocksource_tsc
.mult
)
1145 clocksource_mark_unstable(&clocksource_tsc
);
1147 clocksource_tsc
.flags
|= CLOCK_SOURCE_UNSTABLE
;
1148 clocksource_tsc
.rating
= 0;
1153 EXPORT_SYMBOL_GPL(mark_tsc_unstable
);
1155 static void __init
check_system_tsc_reliable(void)
1157 #if defined(CONFIG_MGEODEGX1) || defined(CONFIG_MGEODE_LX) || defined(CONFIG_X86_GENERIC)
1158 if (is_geode_lx()) {
1159 /* RTSC counts during suspend */
1160 #define RTSC_SUSP 0x100
1161 unsigned long res_low
, res_high
;
1163 rdmsr_safe(MSR_GEODE_BUSCONT_CONF0
, &res_low
, &res_high
);
1164 /* Geode_LX - the OLPC CPU has a very reliable TSC */
1165 if (res_low
& RTSC_SUSP
)
1166 tsc_clocksource_reliable
= 1;
1169 if (boot_cpu_has(X86_FEATURE_TSC_RELIABLE
))
1170 tsc_clocksource_reliable
= 1;
1174 * Make an educated guess if the TSC is trustworthy and synchronized
1177 int unsynchronized_tsc(void)
1179 if (!boot_cpu_has(X86_FEATURE_TSC
) || tsc_unstable
)
1183 if (apic_is_clustered_box())
1187 if (boot_cpu_has(X86_FEATURE_CONSTANT_TSC
))
1190 if (tsc_clocksource_reliable
)
1193 * Intel systems are normally all synchronized.
1194 * Exceptions must mark TSC as unstable:
1196 if (boot_cpu_data
.x86_vendor
!= X86_VENDOR_INTEL
) {
1197 /* assume multi socket systems are not synchronized: */
1198 if (num_possible_cpus() > 1)
1206 * Convert ART to TSC given numerator/denominator found in detect_art()
1208 struct system_counterval_t
convert_art_to_tsc(u64 art
)
1212 rem
= do_div(art
, art_to_tsc_denominator
);
1214 res
= art
* art_to_tsc_numerator
;
1215 tmp
= rem
* art_to_tsc_numerator
;
1217 do_div(tmp
, art_to_tsc_denominator
);
1218 res
+= tmp
+ art_to_tsc_offset
;
1220 return (struct system_counterval_t
) {.cs
= art_related_clocksource
,
1223 EXPORT_SYMBOL(convert_art_to_tsc
);
1225 static void tsc_refine_calibration_work(struct work_struct
*work
);
1226 static DECLARE_DELAYED_WORK(tsc_irqwork
, tsc_refine_calibration_work
);
1228 * tsc_refine_calibration_work - Further refine tsc freq calibration
1231 * This functions uses delayed work over a period of a
1232 * second to further refine the TSC freq value. Since this is
1233 * timer based, instead of loop based, we don't block the boot
1234 * process while this longer calibration is done.
1236 * If there are any calibration anomalies (too many SMIs, etc),
1237 * or the refined calibration is off by 1% of the fast early
1238 * calibration, we throw out the new calibration and use the
1239 * early calibration.
1241 static void tsc_refine_calibration_work(struct work_struct
*work
)
1243 static u64 tsc_start
= -1, ref_start
;
1245 u64 tsc_stop
, ref_stop
, delta
;
1248 /* Don't bother refining TSC on unstable systems */
1249 if (check_tsc_unstable())
1253 * Since the work is started early in boot, we may be
1254 * delayed the first time we expire. So set the workqueue
1255 * again once we know timers are working.
1257 if (tsc_start
== -1) {
1259 * Only set hpet once, to avoid mixing hardware
1260 * if the hpet becomes enabled later.
1262 hpet
= is_hpet_enabled();
1263 schedule_delayed_work(&tsc_irqwork
, HZ
);
1264 tsc_start
= tsc_read_refs(&ref_start
, hpet
);
1268 tsc_stop
= tsc_read_refs(&ref_stop
, hpet
);
1270 /* hpet or pmtimer available ? */
1271 if (ref_start
== ref_stop
)
1274 /* Check, whether the sampling was disturbed by an SMI */
1275 if (tsc_start
== ULLONG_MAX
|| tsc_stop
== ULLONG_MAX
)
1278 delta
= tsc_stop
- tsc_start
;
1281 freq
= calc_hpet_ref(delta
, ref_start
, ref_stop
);
1283 freq
= calc_pmtimer_ref(delta
, ref_start
, ref_stop
);
1285 /* Make sure we're within 1% */
1286 if (abs(tsc_khz
- freq
) > tsc_khz
/100)
1290 pr_info("Refined TSC clocksource calibration: %lu.%03lu MHz\n",
1291 (unsigned long)tsc_khz
/ 1000,
1292 (unsigned long)tsc_khz
% 1000);
1294 /* Inform the TSC deadline clockevent devices about the recalibration */
1295 lapic_update_tsc_freq();
1298 if (boot_cpu_has(X86_FEATURE_ART
))
1299 art_related_clocksource
= &clocksource_tsc
;
1300 clocksource_register_khz(&clocksource_tsc
, tsc_khz
);
1304 static int __init
init_tsc_clocksource(void)
1306 if (!boot_cpu_has(X86_FEATURE_TSC
) || tsc_disabled
> 0 || !tsc_khz
)
1309 if (tsc_clocksource_reliable
)
1310 clocksource_tsc
.flags
&= ~CLOCK_SOURCE_MUST_VERIFY
;
1311 /* lower the rating if we already know its unstable: */
1312 if (check_tsc_unstable()) {
1313 clocksource_tsc
.rating
= 0;
1314 clocksource_tsc
.flags
&= ~CLOCK_SOURCE_IS_CONTINUOUS
;
1317 if (boot_cpu_has(X86_FEATURE_NONSTOP_TSC_S3
))
1318 clocksource_tsc
.flags
|= CLOCK_SOURCE_SUSPEND_NONSTOP
;
1321 * When TSC frequency is known (retrieved via MSR or CPUID), we skip
1322 * the refined calibration and directly register it as a clocksource.
1324 if (boot_cpu_has(X86_FEATURE_TSC_KNOWN_FREQ
)) {
1325 clocksource_register_khz(&clocksource_tsc
, tsc_khz
);
1329 schedule_delayed_work(&tsc_irqwork
, 0);
1333 * We use device_initcall here, to ensure we run after the hpet
1334 * is fully initialized, which may occur at fs_initcall time.
1336 device_initcall(init_tsc_clocksource
);
1338 void __init
tsc_init(void)
1343 if (!boot_cpu_has(X86_FEATURE_TSC
)) {
1344 setup_clear_cpu_cap(X86_FEATURE_TSC_DEADLINE_TIMER
);
1348 cpu_khz
= x86_platform
.calibrate_cpu();
1349 tsc_khz
= x86_platform
.calibrate_tsc();
1352 * Trust non-zero tsc_khz as authorative,
1353 * and use it to sanity check cpu_khz,
1354 * which will be off if system timer is off.
1358 else if (abs(cpu_khz
- tsc_khz
) * 10 > tsc_khz
)
1362 mark_tsc_unstable("could not calculate TSC khz");
1363 setup_clear_cpu_cap(X86_FEATURE_TSC_DEADLINE_TIMER
);
1367 pr_info("Detected %lu.%03lu MHz processor\n",
1368 (unsigned long)cpu_khz
/ 1000,
1369 (unsigned long)cpu_khz
% 1000);
1371 /* Sanitize TSC ADJUST before cyc2ns gets initialized */
1372 tsc_store_and_check_tsc_adjust(true);
1375 * Secondary CPUs do not run through tsc_init(), so set up
1376 * all the scale factors for all CPUs, assuming the same
1377 * speed as the bootup CPU. (cpufreq notifiers will fix this
1378 * up if their speed diverges)
1380 for_each_possible_cpu(cpu
) {
1382 set_cyc2ns_scale(tsc_khz
, cpu
);
1385 if (tsc_disabled
> 0)
1388 /* now allow native_sched_clock() to use rdtsc */
1391 static_branch_enable(&__use_tsc
);
1393 if (!no_sched_irq_time
)
1394 enable_sched_clock_irqtime();
1396 lpj
= ((u64
)tsc_khz
* 1000);
1402 if (unsynchronized_tsc())
1403 mark_tsc_unstable("TSCs unsynchronized");
1405 check_system_tsc_reliable();
1412 * If we have a constant TSC and are using the TSC for the delay loop,
1413 * we can skip clock calibration if another cpu in the same socket has already
1414 * been calibrated. This assumes that CONSTANT_TSC applies to all
1415 * cpus in the socket - this should be a safe assumption.
1417 unsigned long calibrate_delay_is_known(void)
1419 int sibling
, cpu
= smp_processor_id();
1420 struct cpumask
*mask
= topology_core_cpumask(cpu
);
1422 if (!tsc_disabled
&& !cpu_has(&cpu_data(cpu
), X86_FEATURE_CONSTANT_TSC
))
1428 sibling
= cpumask_any_but(mask
, cpu
);
1429 if (sibling
< nr_cpu_ids
)
1430 return cpu_data(sibling
).loops_per_jiffy
;