]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blob - arch/x86/kernel/tsc_64.c
x86_64: move kernel
[mirror_ubuntu-bionic-kernel.git] / arch / x86 / kernel / tsc_64.c
1 #include <linux/kernel.h>
2 #include <linux/sched.h>
3 #include <linux/interrupt.h>
4 #include <linux/init.h>
5 #include <linux/clocksource.h>
6 #include <linux/time.h>
7 #include <linux/acpi.h>
8 #include <linux/cpufreq.h>
9
10 #include <asm/timex.h>
11
12 static int notsc __initdata = 0;
13
14 unsigned int cpu_khz; /* TSC clocks / usec, not used here */
15 EXPORT_SYMBOL(cpu_khz);
16 unsigned int tsc_khz;
17 EXPORT_SYMBOL(tsc_khz);
18
19 static unsigned int cyc2ns_scale __read_mostly;
20
21 void set_cyc2ns_scale(unsigned long khz)
22 {
23 cyc2ns_scale = (NSEC_PER_MSEC << NS_SCALE) / khz;
24 }
25
26 static unsigned long long cycles_2_ns(unsigned long long cyc)
27 {
28 return (cyc * cyc2ns_scale) >> NS_SCALE;
29 }
30
31 unsigned long long sched_clock(void)
32 {
33 unsigned long a = 0;
34
35 /* Could do CPU core sync here. Opteron can execute rdtsc speculatively,
36 * which means it is not completely exact and may not be monotonous
37 * between CPUs. But the errors should be too small to matter for
38 * scheduling purposes.
39 */
40
41 rdtscll(a);
42 return cycles_2_ns(a);
43 }
44
45 static int tsc_unstable;
46
47 inline int check_tsc_unstable(void)
48 {
49 return tsc_unstable;
50 }
51 #ifdef CONFIG_CPU_FREQ
52
53 /* Frequency scaling support. Adjust the TSC based timer when the cpu frequency
54 * changes.
55 *
56 * RED-PEN: On SMP we assume all CPUs run with the same frequency. It's
57 * not that important because current Opteron setups do not support
58 * scaling on SMP anyroads.
59 *
60 * Should fix up last_tsc too. Currently gettimeofday in the
61 * first tick after the change will be slightly wrong.
62 */
63
64 static unsigned int ref_freq;
65 static unsigned long loops_per_jiffy_ref;
66 static unsigned long tsc_khz_ref;
67
68 static int time_cpufreq_notifier(struct notifier_block *nb, unsigned long val,
69 void *data)
70 {
71 struct cpufreq_freqs *freq = data;
72 unsigned long *lpj, dummy;
73
74 if (cpu_has(&cpu_data[freq->cpu], X86_FEATURE_CONSTANT_TSC))
75 return 0;
76
77 lpj = &dummy;
78 if (!(freq->flags & CPUFREQ_CONST_LOOPS))
79 #ifdef CONFIG_SMP
80 lpj = &cpu_data[freq->cpu].loops_per_jiffy;
81 #else
82 lpj = &boot_cpu_data.loops_per_jiffy;
83 #endif
84
85 if (!ref_freq) {
86 ref_freq = freq->old;
87 loops_per_jiffy_ref = *lpj;
88 tsc_khz_ref = tsc_khz;
89 }
90 if ((val == CPUFREQ_PRECHANGE && freq->old < freq->new) ||
91 (val == CPUFREQ_POSTCHANGE && freq->old > freq->new) ||
92 (val == CPUFREQ_RESUMECHANGE)) {
93 *lpj =
94 cpufreq_scale(loops_per_jiffy_ref, ref_freq, freq->new);
95
96 tsc_khz = cpufreq_scale(tsc_khz_ref, ref_freq, freq->new);
97 if (!(freq->flags & CPUFREQ_CONST_LOOPS))
98 mark_tsc_unstable("cpufreq changes");
99 }
100
101 set_cyc2ns_scale(tsc_khz_ref);
102
103 return 0;
104 }
105
106 static struct notifier_block time_cpufreq_notifier_block = {
107 .notifier_call = time_cpufreq_notifier
108 };
109
110 static int __init cpufreq_tsc(void)
111 {
112 cpufreq_register_notifier(&time_cpufreq_notifier_block,
113 CPUFREQ_TRANSITION_NOTIFIER);
114 return 0;
115 }
116
117 core_initcall(cpufreq_tsc);
118
119 #endif
120
121 /*
122 * Make an educated guess if the TSC is trustworthy and synchronized
123 * over all CPUs.
124 */
125 __cpuinit int unsynchronized_tsc(void)
126 {
127 if (tsc_unstable)
128 return 1;
129
130 #ifdef CONFIG_SMP
131 if (apic_is_clustered_box())
132 return 1;
133 #endif
134 /* Most intel systems have synchronized TSCs except for
135 multi node systems */
136 if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL) {
137 #ifdef CONFIG_ACPI
138 /* But TSC doesn't tick in C3 so don't use it there */
139 if (acpi_gbl_FADT.header.length > 0 &&
140 acpi_gbl_FADT.C3latency < 1000)
141 return 1;
142 #endif
143 return 0;
144 }
145
146 /* Assume multi socket systems are not synchronized */
147 return num_present_cpus() > 1;
148 }
149
150 int __init notsc_setup(char *s)
151 {
152 notsc = 1;
153 return 1;
154 }
155
156 __setup("notsc", notsc_setup);
157
158
159 /* clock source code: */
160 static cycle_t read_tsc(void)
161 {
162 cycle_t ret = (cycle_t)get_cycles_sync();
163 return ret;
164 }
165
166 static cycle_t __vsyscall_fn vread_tsc(void)
167 {
168 cycle_t ret = (cycle_t)get_cycles_sync();
169 return ret;
170 }
171
172 static struct clocksource clocksource_tsc = {
173 .name = "tsc",
174 .rating = 300,
175 .read = read_tsc,
176 .mask = CLOCKSOURCE_MASK(64),
177 .shift = 22,
178 .flags = CLOCK_SOURCE_IS_CONTINUOUS |
179 CLOCK_SOURCE_MUST_VERIFY,
180 .vread = vread_tsc,
181 };
182
183 void mark_tsc_unstable(char *reason)
184 {
185 if (!tsc_unstable) {
186 tsc_unstable = 1;
187 printk("Marking TSC unstable due to %s\n", reason);
188 /* Change only the rating, when not registered */
189 if (clocksource_tsc.mult)
190 clocksource_change_rating(&clocksource_tsc, 0);
191 else
192 clocksource_tsc.rating = 0;
193 }
194 }
195 EXPORT_SYMBOL_GPL(mark_tsc_unstable);
196
197 void __init init_tsc_clocksource(void)
198 {
199 if (!notsc) {
200 clocksource_tsc.mult = clocksource_khz2mult(tsc_khz,
201 clocksource_tsc.shift);
202 if (check_tsc_unstable())
203 clocksource_tsc.rating = 0;
204
205 clocksource_register(&clocksource_tsc);
206 }
207 }