]>
Commit | Line | Data |
---|---|---|
790c73f6 GOC |
1 | /* KVM paravirtual clock driver. A clocksource implementation |
2 | Copyright (C) 2008 Glauber de Oliveira Costa, Red Hat Inc. | |
3 | ||
4 | This program is free software; you can redistribute it and/or modify | |
5 | it under the terms of the GNU General Public License as published by | |
6 | the Free Software Foundation; either version 2 of the License, or | |
7 | (at your option) any later version. | |
8 | ||
9 | This program is distributed in the hope that it will be useful, | |
10 | but WITHOUT ANY WARRANTY; without even the implied warranty of | |
11 | MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | |
12 | GNU General Public License for more details. | |
13 | ||
14 | You should have received a copy of the GNU General Public License | |
15 | along with this program; if not, write to the Free Software | |
16 | Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA | |
17 | */ | |
18 | ||
19 | #include <linux/clocksource.h> | |
20 | #include <linux/kvm_para.h> | |
f6e16d5a | 21 | #include <asm/pvclock.h> |
790c73f6 GOC |
22 | #include <asm/msr.h> |
23 | #include <asm/apic.h> | |
24 | #include <linux/percpu.h> | |
3b5d56b9 | 25 | #include <linux/hardirq.h> |
7069ed67 | 26 | #include <linux/memblock.h> |
0ad83caa | 27 | #include <linux/sched.h> |
736decac TG |
28 | |
29 | #include <asm/x86_init.h> | |
1e977aa1 | 30 | #include <asm/reboot.h> |
790c73f6 | 31 | |
790c73f6 | 32 | static int kvmclock = 1; |
838815a7 GC |
33 | static int msr_kvm_system_time = MSR_KVM_SYSTEM_TIME; |
34 | static int msr_kvm_wall_clock = MSR_KVM_WALL_CLOCK; | |
72c930dc | 35 | static cycle_t kvm_sched_clock_offset; |
790c73f6 GOC |
36 | |
37 | static int parse_no_kvmclock(char *arg) | |
38 | { | |
39 | kvmclock = 0; | |
40 | return 0; | |
41 | } | |
42 | early_param("no-kvmclock", parse_no_kvmclock); | |
43 | ||
44 | /* The hypervisor will put information about time periodically here */ | |
3dc4f7cf | 45 | static struct pvclock_vsyscall_time_info *hv_clock; |
f6e16d5a | 46 | static struct pvclock_wall_clock wall_clock; |
790c73f6 | 47 | |
790c73f6 GOC |
48 | /* |
49 | * The wallclock is the time of day when we booted. Since then, some time may | |
50 | * have elapsed since the hypervisor wrote the data. So we try to account for | |
51 | * that with system time | |
52 | */ | |
3565184e | 53 | static void kvm_get_wallclock(struct timespec *now) |
790c73f6 | 54 | { |
f6e16d5a | 55 | struct pvclock_vcpu_time_info *vcpu_time; |
790c73f6 | 56 | int low, high; |
7069ed67 | 57 | int cpu; |
790c73f6 | 58 | |
a20316d2 GC |
59 | low = (int)__pa_symbol(&wall_clock); |
60 | high = ((u64)__pa_symbol(&wall_clock) >> 32); | |
838815a7 GC |
61 | |
62 | native_write_msr(msr_kvm_wall_clock, low, high); | |
790c73f6 | 63 | |
c6338ce4 | 64 | cpu = get_cpu(); |
7069ed67 | 65 | |
3dc4f7cf | 66 | vcpu_time = &hv_clock[cpu].pvti; |
3565184e | 67 | pvclock_read_wallclock(&wall_clock, vcpu_time, now); |
7069ed67 | 68 | |
c6338ce4 | 69 | put_cpu(); |
790c73f6 GOC |
70 | } |
71 | ||
3565184e | 72 | static int kvm_set_wallclock(const struct timespec *now) |
790c73f6 | 73 | { |
f6e16d5a | 74 | return -1; |
790c73f6 GOC |
75 | } |
76 | ||
790c73f6 GOC |
77 | static cycle_t kvm_clock_read(void) |
78 | { | |
f6e16d5a GH |
79 | struct pvclock_vcpu_time_info *src; |
80 | cycle_t ret; | |
7069ed67 | 81 | int cpu; |
790c73f6 | 82 | |
95ef1e52 | 83 | preempt_disable_notrace(); |
7069ed67 | 84 | cpu = smp_processor_id(); |
3dc4f7cf | 85 | src = &hv_clock[cpu].pvti; |
f6e16d5a | 86 | ret = pvclock_clocksource_read(src); |
95ef1e52 | 87 | preempt_enable_notrace(); |
f6e16d5a | 88 | return ret; |
790c73f6 | 89 | } |
f6e16d5a | 90 | |
8e19608e MD |
91 | static cycle_t kvm_clock_get_cycles(struct clocksource *cs) |
92 | { | |
93 | return kvm_clock_read(); | |
94 | } | |
95 | ||
72c930dc RK |
96 | static cycle_t kvm_sched_clock_read(void) |
97 | { | |
98 | return kvm_clock_read() - kvm_sched_clock_offset; | |
99 | } | |
100 | ||
101 | static inline void kvm_sched_clock_init(bool stable) | |
102 | { | |
103 | if (!stable) { | |
104 | pv_time_ops.sched_clock = kvm_clock_read; | |
105 | return; | |
106 | } | |
107 | ||
108 | kvm_sched_clock_offset = kvm_clock_read(); | |
109 | pv_time_ops.sched_clock = kvm_sched_clock_read; | |
110 | set_sched_clock_stable(); | |
111 | ||
112 | printk(KERN_INFO "kvm-clock: using sched offset of %llu cycles\n", | |
113 | kvm_sched_clock_offset); | |
114 | ||
115 | BUILD_BUG_ON(sizeof(kvm_sched_clock_offset) > | |
116 | sizeof(((struct pvclock_vcpu_time_info *)NULL)->system_time)); | |
117 | } | |
118 | ||
0293615f GC |
119 | /* |
120 | * If we don't do that, there is the possibility that the guest | |
121 | * will calibrate under heavy load - thus, getting a lower lpj - | |
122 | * and execute the delays themselves without load. This is wrong, | |
123 | * because no delay loop can finish beforehand. | |
124 | * Any heuristics is subject to fail, because ultimately, a large | |
125 | * poll of guests can be running and trouble each other. So we preset | |
126 | * lpj here | |
127 | */ | |
128 | static unsigned long kvm_get_tsc_khz(void) | |
129 | { | |
e93353c9 | 130 | struct pvclock_vcpu_time_info *src; |
7069ed67 MT |
131 | int cpu; |
132 | unsigned long tsc_khz; | |
133 | ||
c6338ce4 | 134 | cpu = get_cpu(); |
3dc4f7cf | 135 | src = &hv_clock[cpu].pvti; |
7069ed67 | 136 | tsc_khz = pvclock_tsc_khz(src); |
c6338ce4 | 137 | put_cpu(); |
7069ed67 | 138 | return tsc_khz; |
0293615f GC |
139 | } |
140 | ||
141 | static void kvm_get_preset_lpj(void) | |
142 | { | |
0293615f GC |
143 | unsigned long khz; |
144 | u64 lpj; | |
145 | ||
e93353c9 | 146 | khz = kvm_get_tsc_khz(); |
0293615f GC |
147 | |
148 | lpj = ((u64)khz * 1000); | |
149 | do_div(lpj, HZ); | |
150 | preset_lpj = lpj; | |
151 | } | |
152 | ||
3b5d56b9 EM |
153 | bool kvm_check_and_clear_guest_paused(void) |
154 | { | |
155 | bool ret = false; | |
156 | struct pvclock_vcpu_time_info *src; | |
7069ed67 MT |
157 | int cpu = smp_processor_id(); |
158 | ||
159 | if (!hv_clock) | |
160 | return ret; | |
3b5d56b9 | 161 | |
3dc4f7cf | 162 | src = &hv_clock[cpu].pvti; |
3b5d56b9 | 163 | if ((src->flags & PVCLOCK_GUEST_STOPPED) != 0) { |
7069ed67 | 164 | src->flags &= ~PVCLOCK_GUEST_STOPPED; |
d63285e9 | 165 | pvclock_touch_watchdogs(); |
3b5d56b9 EM |
166 | ret = true; |
167 | } | |
168 | ||
169 | return ret; | |
170 | } | |
3b5d56b9 | 171 | |
790c73f6 GOC |
172 | static struct clocksource kvm_clock = { |
173 | .name = "kvm-clock", | |
8e19608e | 174 | .read = kvm_clock_get_cycles, |
790c73f6 GOC |
175 | .rating = 400, |
176 | .mask = CLOCKSOURCE_MASK(64), | |
790c73f6 GOC |
177 | .flags = CLOCK_SOURCE_IS_CONTINUOUS, |
178 | }; | |
179 | ||
ca3f1017 | 180 | int kvm_register_clock(char *txt) |
790c73f6 GOC |
181 | { |
182 | int cpu = smp_processor_id(); | |
19b6a85b | 183 | int low, high, ret; |
fe1140cc JK |
184 | struct pvclock_vcpu_time_info *src; |
185 | ||
186 | if (!hv_clock) | |
187 | return 0; | |
19b6a85b | 188 | |
fe1140cc | 189 | src = &hv_clock[cpu].pvti; |
5dfd486c DH |
190 | low = (int)slow_virt_to_phys(src) | 1; |
191 | high = ((u64)slow_virt_to_phys(src) >> 32); | |
19b6a85b | 192 | ret = native_write_msr_safe(msr_kvm_system_time, low, high); |
f6e16d5a GH |
193 | printk(KERN_INFO "kvm-clock: cpu %d, msr %x:%x, %s\n", |
194 | cpu, high, low, txt); | |
838815a7 | 195 | |
19b6a85b | 196 | return ret; |
790c73f6 GOC |
197 | } |
198 | ||
b74f05d6 MT |
199 | static void kvm_save_sched_clock_state(void) |
200 | { | |
201 | } | |
202 | ||
203 | static void kvm_restore_sched_clock_state(void) | |
204 | { | |
205 | kvm_register_clock("primary cpu clock, resume"); | |
206 | } | |
207 | ||
b8ba5f10 | 208 | #ifdef CONFIG_X86_LOCAL_APIC |
148f9bb8 | 209 | static void kvm_setup_secondary_clock(void) |
790c73f6 GOC |
210 | { |
211 | /* | |
212 | * Now that the first cpu already had this clocksource initialized, | |
213 | * we shouldn't fail. | |
214 | */ | |
f6e16d5a | 215 | WARN_ON(kvm_register_clock("secondary cpu clock")); |
790c73f6 | 216 | } |
b8ba5f10 | 217 | #endif |
790c73f6 | 218 | |
1e977aa1 GC |
219 | /* |
220 | * After the clock is registered, the host will keep writing to the | |
221 | * registered memory location. If the guest happens to shutdown, this memory | |
222 | * won't be valid. In cases like kexec, in which you install a new kernel, this | |
223 | * means a random memory location will be kept being written. So before any | |
224 | * kind of shutdown from our side, we unregister the clock by writting anything | |
225 | * that does not have the 'enable' bit set in the msr | |
226 | */ | |
2965faa5 | 227 | #ifdef CONFIG_KEXEC_CORE |
1e977aa1 GC |
228 | static void kvm_crash_shutdown(struct pt_regs *regs) |
229 | { | |
838815a7 | 230 | native_write_msr(msr_kvm_system_time, 0, 0); |
d910f5c1 | 231 | kvm_disable_steal_time(); |
1e977aa1 GC |
232 | native_machine_crash_shutdown(regs); |
233 | } | |
234 | #endif | |
235 | ||
236 | static void kvm_shutdown(void) | |
237 | { | |
838815a7 | 238 | native_write_msr(msr_kvm_system_time, 0, 0); |
d910f5c1 | 239 | kvm_disable_steal_time(); |
1e977aa1 GC |
240 | native_machine_shutdown(); |
241 | } | |
242 | ||
790c73f6 GOC |
243 | void __init kvmclock_init(void) |
244 | { | |
0ad83caa | 245 | struct pvclock_vcpu_time_info *vcpu_time; |
7069ed67 | 246 | unsigned long mem; |
0ad83caa LC |
247 | int size, cpu; |
248 | u8 flags; | |
ed55705d MT |
249 | |
250 | size = PAGE_ALIGN(sizeof(struct pvclock_vsyscall_time_info)*NR_CPUS); | |
7069ed67 | 251 | |
790c73f6 GOC |
252 | if (!kvm_para_available()) |
253 | return; | |
254 | ||
838815a7 GC |
255 | if (kvmclock && kvm_para_has_feature(KVM_FEATURE_CLOCKSOURCE2)) { |
256 | msr_kvm_system_time = MSR_KVM_SYSTEM_TIME_NEW; | |
257 | msr_kvm_wall_clock = MSR_KVM_WALL_CLOCK_NEW; | |
258 | } else if (!(kvmclock && kvm_para_has_feature(KVM_FEATURE_CLOCKSOURCE))) | |
259 | return; | |
260 | ||
261 | printk(KERN_INFO "kvm-clock: Using msrs %x and %x", | |
262 | msr_kvm_system_time, msr_kvm_wall_clock); | |
263 | ||
ed55705d | 264 | mem = memblock_alloc(size, PAGE_SIZE); |
7069ed67 MT |
265 | if (!mem) |
266 | return; | |
267 | hv_clock = __va(mem); | |
07868fc6 | 268 | memset(hv_clock, 0, size); |
7069ed67 | 269 | |
0d75de4a | 270 | if (kvm_register_clock("primary cpu clock")) { |
7069ed67 | 271 | hv_clock = NULL; |
ed55705d | 272 | memblock_free(mem, size); |
838815a7 | 273 | return; |
7069ed67 | 274 | } |
72c930dc RK |
275 | |
276 | if (kvm_para_has_feature(KVM_FEATURE_CLOCKSOURCE_STABLE_BIT)) | |
277 | pvclock_set_flags(PVCLOCK_TSC_STABLE_BIT); | |
278 | ||
279 | cpu = get_cpu(); | |
280 | vcpu_time = &hv_clock[cpu].pvti; | |
281 | flags = pvclock_read_flags(vcpu_time); | |
282 | ||
283 | kvm_sched_clock_init(flags & PVCLOCK_TSC_STABLE_BIT); | |
284 | put_cpu(); | |
285 | ||
838815a7 GC |
286 | x86_platform.calibrate_tsc = kvm_get_tsc_khz; |
287 | x86_platform.get_wallclock = kvm_get_wallclock; | |
288 | x86_platform.set_wallclock = kvm_set_wallclock; | |
b8ba5f10 | 289 | #ifdef CONFIG_X86_LOCAL_APIC |
df156f90 | 290 | x86_cpuinit.early_percpu_clock_init = |
838815a7 | 291 | kvm_setup_secondary_clock; |
b8ba5f10 | 292 | #endif |
b74f05d6 MT |
293 | x86_platform.save_sched_clock_state = kvm_save_sched_clock_state; |
294 | x86_platform.restore_sched_clock_state = kvm_restore_sched_clock_state; | |
838815a7 | 295 | machine_ops.shutdown = kvm_shutdown; |
2965faa5 | 296 | #ifdef CONFIG_KEXEC_CORE |
838815a7 | 297 | machine_ops.crash_shutdown = kvm_crash_shutdown; |
1e977aa1 | 298 | #endif |
838815a7 | 299 | kvm_get_preset_lpj(); |
b01cc1b0 | 300 | clocksource_register_hz(&kvm_clock, NSEC_PER_SEC); |
838815a7 | 301 | pv_info.name = "KVM"; |
790c73f6 | 302 | } |
3dc4f7cf MT |
303 | |
304 | int __init kvm_setup_vsyscall_timeinfo(void) | |
305 | { | |
306 | #ifdef CONFIG_X86_64 | |
307 | int cpu; | |
308 | int ret; | |
309 | u8 flags; | |
310 | struct pvclock_vcpu_time_info *vcpu_time; | |
311 | unsigned int size; | |
312 | ||
fe1140cc JK |
313 | if (!hv_clock) |
314 | return 0; | |
315 | ||
ed55705d | 316 | size = PAGE_ALIGN(sizeof(struct pvclock_vsyscall_time_info)*NR_CPUS); |
3dc4f7cf | 317 | |
c6338ce4 | 318 | cpu = get_cpu(); |
3dc4f7cf MT |
319 | |
320 | vcpu_time = &hv_clock[cpu].pvti; | |
321 | flags = pvclock_read_flags(vcpu_time); | |
322 | ||
323 | if (!(flags & PVCLOCK_TSC_STABLE_BIT)) { | |
c6338ce4 | 324 | put_cpu(); |
3dc4f7cf MT |
325 | return 1; |
326 | } | |
327 | ||
328 | if ((ret = pvclock_init_vsyscall(hv_clock, size))) { | |
c6338ce4 | 329 | put_cpu(); |
3dc4f7cf MT |
330 | return ret; |
331 | } | |
332 | ||
c6338ce4 | 333 | put_cpu(); |
3dc4f7cf MT |
334 | |
335 | kvm_clock.archdata.vclock_mode = VCLOCK_PVCLOCK; | |
336 | #endif | |
337 | return 0; | |
338 | } |