1 #include <linux/sched.h>
2 #include <linux/clocksource.h>
3 #include <linux/workqueue.h>
4 #include <linux/cpufreq.h>
5 #include <linux/jiffies.h>
6 #include <linux/init.h>
8 #include <linux/percpu.h>
10 #include <asm/delay.h>
13 #include <asm/timer.h>
15 #include "mach_timer.h"
17 /* native_sched_clock() is called before tsc_init(), so
18 we must start with the TSC soft disabled to prevent
19 erroneous rdtsc usage on !cpu_has_tsc processors */
20 static int tsc_disabled
= -1;
23 * On some systems the TSC frequency does not
24 * change with the cpu frequency. So we need
25 * an extra value to store the TSC freq
28 EXPORT_SYMBOL_GPL(tsc_khz
);
31 static int __init
tsc_setup(char *str
)
33 printk(KERN_WARNING
"notsc: Kernel compiled with CONFIG_X86_TSC, "
34 "cannot disable TSC completely.\n");
40 * disable flag for tsc. Takes effect by clearing the TSC cpu flag
43 static int __init
tsc_setup(char *str
)
45 setup_clear_cpu_cap(X86_FEATURE_TSC
);
50 __setup("notsc", tsc_setup
);
53 * code to mark and check if the TSC is unstable
54 * due to cpufreq or due to unsynced TSCs
56 static int tsc_unstable
;
58 int check_tsc_unstable(void)
62 EXPORT_SYMBOL_GPL(check_tsc_unstable
);
64 /* Accelerators for sched_clock()
65 * convert from cycles(64bits) => nanoseconds (64bits)
67 * ns = cycles / (freq / ns_per_sec)
68 * ns = cycles * (ns_per_sec / freq)
69 * ns = cycles * (10^9 / (cpu_khz * 10^3))
70 * ns = cycles * (10^6 / cpu_khz)
72 * Then we use scaling math (suggested by george@mvista.com) to get:
73 * ns = cycles * (10^6 * SC / cpu_khz) / SC
74 * ns = cycles * cyc2ns_scale / SC
76 * And since SC is a constant power of two, we can convert the div
79 * We can use khz divisor instead of mhz to keep a better precision, since
80 * cyc2ns_scale is limited to 10^6 * 2^10, which fits in 32 bits.
81 * (mathieu.desnoyers@polymtl.ca)
83 * -johnstul@us.ibm.com "math is hard, lets go shopping!"
86 DEFINE_PER_CPU(unsigned long, cyc2ns
);
88 static void set_cyc2ns_scale(unsigned long cpu_khz
, int cpu
)
90 unsigned long long tsc_now
, ns_now
;
91 unsigned long flags
, *scale
;
93 local_irq_save(flags
);
94 sched_clock_idle_sleep_event();
96 scale
= &per_cpu(cyc2ns
, cpu
);
99 ns_now
= __cycles_2_ns(tsc_now
);
102 *scale
= (NSEC_PER_MSEC
<< CYC2NS_SCALE_FACTOR
)/cpu_khz
;
105 * Start smoothly with the new frequency:
107 sched_clock_idle_wakeup_event(0);
108 local_irq_restore(flags
);
112 * Scheduler clock - returns current time in nanosec units.
114 unsigned long long native_sched_clock(void)
116 unsigned long long this_offset
;
119 * Fall back to jiffies if there's no TSC available:
120 * ( But note that we still use it if the TSC is marked
121 * unstable. We do this because unlike Time Of Day,
122 * the scheduler clock tolerates small errors and it's
123 * very important for it to be as fast as the platform
126 if (unlikely(tsc_disabled
))
127 /* No locking but a rare wrong value is not a big deal: */
128 return (jiffies_64
- INITIAL_JIFFIES
) * (1000000000 / HZ
);
130 /* read the Time Stamp Counter: */
131 rdtscll(this_offset
);
133 /* return the value in ns */
134 return cycles_2_ns(this_offset
);
137 /* We need to define a real function for sched_clock, to override the
138 weak default version */
139 #ifdef CONFIG_PARAVIRT
140 unsigned long long sched_clock(void)
142 return paravirt_sched_clock();
145 unsigned long long sched_clock(void)
146 __attribute__((alias("native_sched_clock")));
149 unsigned long native_calculate_cpu_khz(void)
151 unsigned long long start
, end
;
153 u64 delta64
= (u64
)ULLONG_MAX
;
157 local_irq_save(flags
);
159 /* run 3 times to ensure the cache is warm and to get an accurate reading */
160 for (i
= 0; i
< 3; i
++) {
161 mach_prepare_counter();
163 mach_countup(&count
);
167 * Error: ECTCNEVERSET
168 * The CTC wasn't reliable: we got a hit on the very first read,
169 * or the CPU was so fast/slow that the quotient wouldn't fit in
175 /* cpu freq too slow: */
176 if ((end
- start
) <= CALIBRATE_TIME_MSEC
)
180 * We want the minimum time of all runs in case one of them
181 * is inaccurate due to SMI or other delay
183 delta64
= min(delta64
, (end
- start
));
186 /* cpu freq too fast (or every run was bad): */
187 if (delta64
> (1ULL<<32))
190 delta64
+= CALIBRATE_TIME_MSEC
/2; /* round for do_div */
191 do_div(delta64
,CALIBRATE_TIME_MSEC
);
193 local_irq_restore(flags
);
194 return (unsigned long)delta64
;
196 local_irq_restore(flags
);
200 int recalibrate_cpu_khz(void)
203 unsigned long cpu_khz_old
= cpu_khz
;
206 cpu_khz
= calculate_cpu_khz();
208 cpu_data(0).loops_per_jiffy
=
209 cpufreq_scale(cpu_data(0).loops_per_jiffy
,
210 cpu_khz_old
, cpu_khz
);
219 EXPORT_SYMBOL(recalibrate_cpu_khz
);
221 #ifdef CONFIG_CPU_FREQ
224 * if the CPU frequency is scaled, TSC-based delays will need a different
225 * loops_per_jiffy value to function properly.
227 static unsigned int ref_freq
;
228 static unsigned long loops_per_jiffy_ref
;
229 static unsigned long cpu_khz_ref
;
232 time_cpufreq_notifier(struct notifier_block
*nb
, unsigned long val
, void *data
)
234 struct cpufreq_freqs
*freq
= data
;
238 ref_freq
= freq
->new;
241 ref_freq
= freq
->old
;
242 loops_per_jiffy_ref
= cpu_data(freq
->cpu
).loops_per_jiffy
;
243 cpu_khz_ref
= cpu_khz
;
246 if ((val
== CPUFREQ_PRECHANGE
&& freq
->old
< freq
->new) ||
247 (val
== CPUFREQ_POSTCHANGE
&& freq
->old
> freq
->new) ||
248 (val
== CPUFREQ_RESUMECHANGE
)) {
249 if (!(freq
->flags
& CPUFREQ_CONST_LOOPS
))
250 cpu_data(freq
->cpu
).loops_per_jiffy
=
251 cpufreq_scale(loops_per_jiffy_ref
,
252 ref_freq
, freq
->new);
256 if (num_online_cpus() == 1)
257 cpu_khz
= cpufreq_scale(cpu_khz_ref
,
258 ref_freq
, freq
->new);
259 if (!(freq
->flags
& CPUFREQ_CONST_LOOPS
)) {
261 set_cyc2ns_scale(cpu_khz
, freq
->cpu
);
263 * TSC based sched_clock turns
266 mark_tsc_unstable("cpufreq changes");
274 static struct notifier_block time_cpufreq_notifier_block
= {
275 .notifier_call
= time_cpufreq_notifier
278 static int __init
cpufreq_tsc(void)
280 return cpufreq_register_notifier(&time_cpufreq_notifier_block
,
281 CPUFREQ_TRANSITION_NOTIFIER
);
283 core_initcall(cpufreq_tsc
);
287 /* clock source code */
289 static unsigned long current_tsc_khz
;
290 static struct clocksource clocksource_tsc
;
293 * We compare the TSC to the cycle_last value in the clocksource
294 * structure to avoid a nasty time-warp issue. This can be observed in
295 * a very small window right after one CPU updated cycle_last under
296 * xtime lock and the other CPU reads a TSC value which is smaller
297 * than the cycle_last reference value due to a TSC which is slighty
298 * behind. This delta is nowhere else observable, but in that case it
299 * results in a forward time jump in the range of hours due to the
300 * unsigned delta calculation of the time keeping core code, which is
301 * necessary to support wrapping clocksources like pm timer.
303 static cycle_t
read_tsc(void)
309 return ret
>= clocksource_tsc
.cycle_last
?
310 ret
: clocksource_tsc
.cycle_last
;
313 static struct clocksource clocksource_tsc
= {
317 .mask
= CLOCKSOURCE_MASK(64),
318 .mult
= 0, /* to be set */
320 .flags
= CLOCK_SOURCE_IS_CONTINUOUS
|
321 CLOCK_SOURCE_MUST_VERIFY
,
324 void mark_tsc_unstable(char *reason
)
328 printk("Marking TSC unstable due to: %s.\n", reason
);
329 /* Can be called before registration */
330 if (clocksource_tsc
.mult
)
331 clocksource_change_rating(&clocksource_tsc
, 0);
333 clocksource_tsc
.rating
= 0;
336 EXPORT_SYMBOL_GPL(mark_tsc_unstable
);
338 static int __init
dmi_mark_tsc_unstable(const struct dmi_system_id
*d
)
340 printk(KERN_NOTICE
"%s detected: marking TSC unstable.\n",
346 /* List of systems that have known TSC problems */
347 static struct dmi_system_id __initdata bad_tsc_dmi_table
[] = {
349 .callback
= dmi_mark_tsc_unstable
,
350 .ident
= "IBM Thinkpad 380XD",
352 DMI_MATCH(DMI_BOARD_VENDOR
, "IBM"),
353 DMI_MATCH(DMI_BOARD_NAME
, "2635FA0"),
360 * Make an educated guess if the TSC is trustworthy and synchronized
363 __cpuinit
int unsynchronized_tsc(void)
365 if (!cpu_has_tsc
|| tsc_unstable
)
368 /* Anything with constant TSC should be synchronized */
369 if (boot_cpu_has(X86_FEATURE_CONSTANT_TSC
))
373 * Intel systems are normally all synchronized.
374 * Exceptions must mark TSC as unstable:
376 if (boot_cpu_data
.x86_vendor
!= X86_VENDOR_INTEL
) {
377 /* assume multi socket systems are not synchronized: */
378 if (num_possible_cpus() > 1)
385 * Geode_LX - the OLPC CPU has a possibly a very reliable TSC
387 #ifdef CONFIG_MGEODE_LX
388 /* RTSC counts during suspend */
389 #define RTSC_SUSP 0x100
391 static void __init
check_geode_tsc_reliable(void)
393 unsigned long res_low
, res_high
;
395 rdmsr_safe(MSR_GEODE_BUSCONT_CONF0
, &res_low
, &res_high
);
396 if (res_low
& RTSC_SUSP
)
397 clocksource_tsc
.flags
&= ~CLOCK_SOURCE_MUST_VERIFY
;
400 static inline void check_geode_tsc_reliable(void) { }
404 void __init
tsc_init(void)
408 if (!cpu_has_tsc
|| tsc_disabled
> 0)
411 cpu_khz
= calculate_cpu_khz();
415 mark_tsc_unstable("could not calculate TSC khz");
419 /* now allow native_sched_clock() to use rdtsc */
422 printk("Detected %lu.%03lu MHz processor.\n",
423 (unsigned long)cpu_khz
/ 1000,
424 (unsigned long)cpu_khz
% 1000);
427 * Secondary CPUs do not run through tsc_init(), so set up
428 * all the scale factors for all CPUs, assuming the same
429 * speed as the bootup CPU. (cpufreq notifiers will fix this
430 * up if their speed diverges)
432 for_each_possible_cpu(cpu
)
433 set_cyc2ns_scale(cpu_khz
, cpu
);
437 /* Check and install the TSC clocksource */
438 dmi_check_system(bad_tsc_dmi_table
);
440 unsynchronized_tsc();
441 check_geode_tsc_reliable();
442 current_tsc_khz
= tsc_khz
;
443 clocksource_tsc
.mult
= clocksource_khz2mult(current_tsc_khz
,
444 clocksource_tsc
.shift
);
445 /* lower the rating if we already know its unstable: */
446 if (check_tsc_unstable()) {
447 clocksource_tsc
.rating
= 0;
448 clocksource_tsc
.flags
&= ~CLOCK_SOURCE_IS_CONTINUOUS
;
450 clocksource_register(&clocksource_tsc
);