]>
Commit | Line | Data |
---|---|---|
15c84731 JF |
1 | /* |
2 | * Xen time implementation. | |
3 | * | |
4 | * This is implemented in terms of a clocksource driver which uses | |
5 | * the hypervisor clock as a nanosecond timebase, and a clockevent | |
6 | * driver which uses the hypervisor's timer mechanism. | |
7 | * | |
8 | * Jeremy Fitzhardinge <jeremy@xensource.com>, XenSource Inc, 2007 | |
9 | */ | |
10 | #include <linux/kernel.h> | |
11 | #include <linux/interrupt.h> | |
12 | #include <linux/clocksource.h> | |
13 | #include <linux/clockchips.h> | |
f91a8b44 | 14 | #include <linux/kernel_stat.h> |
f595ec96 | 15 | #include <linux/math64.h> |
5a0e3ad6 | 16 | #include <linux/gfp.h> |
c9d76a24 | 17 | #include <linux/slab.h> |
5584880e | 18 | #include <linux/pvclock_gtod.h> |
15c84731 | 19 | |
1c7b67f7 | 20 | #include <asm/pvclock.h> |
15c84731 JF |
21 | #include <asm/xen/hypervisor.h> |
22 | #include <asm/xen/hypercall.h> | |
23 | ||
24 | #include <xen/events.h> | |
409771d2 | 25 | #include <xen/features.h> |
15c84731 JF |
26 | #include <xen/interface/xen.h> |
27 | #include <xen/interface/vcpu.h> | |
28 | ||
29 | #include "xen-ops.h" | |
30 | ||
15c84731 JF |
31 | /* Xen may fire a timer up to this many ns early */ |
32 | #define TIMER_SLOP 100000 | |
f91a8b44 | 33 | #define NS_PER_TICK (1000000000LL / HZ) |
15c84731 | 34 | |
f91a8b44 | 35 | /* runstate info updated by Xen */ |
c6e22f9e | 36 | static DEFINE_PER_CPU(struct vcpu_runstate_info, xen_runstate); |
f91a8b44 JF |
37 | |
38 | /* snapshots of runstate info */ | |
c6e22f9e | 39 | static DEFINE_PER_CPU(struct vcpu_runstate_info, xen_runstate_snapshot); |
f91a8b44 | 40 | |
0b0c002c | 41 | /* unused ns of stolen time */ |
c6e22f9e | 42 | static DEFINE_PER_CPU(u64, xen_residual_stolen); |
f91a8b44 JF |
43 | |
44 | /* return an consistent snapshot of 64-bit time/counter value */ | |
45 | static u64 get64(const u64 *p) | |
46 | { | |
47 | u64 ret; | |
48 | ||
49 | if (BITS_PER_LONG < 64) { | |
50 | u32 *p32 = (u32 *)p; | |
51 | u32 h, l; | |
52 | ||
53 | /* | |
54 | * Read high then low, and then make sure high is | |
55 | * still the same; this will only loop if low wraps | |
56 | * and carries into high. | |
57 | * XXX some clean way to make this endian-proof? | |
58 | */ | |
59 | do { | |
60 | h = p32[1]; | |
61 | barrier(); | |
62 | l = p32[0]; | |
63 | barrier(); | |
64 | } while (p32[1] != h); | |
65 | ||
66 | ret = (((u64)h) << 32) | l; | |
67 | } else | |
68 | ret = *p; | |
69 | ||
70 | return ret; | |
71 | } | |
72 | ||
73 | /* | |
74 | * Runstate accounting | |
75 | */ | |
76 | static void get_runstate_snapshot(struct vcpu_runstate_info *res) | |
77 | { | |
78 | u64 state_time; | |
79 | struct vcpu_runstate_info *state; | |
80 | ||
f120f13e | 81 | BUG_ON(preemptible()); |
f91a8b44 | 82 | |
c6e22f9e | 83 | state = &__get_cpu_var(xen_runstate); |
f91a8b44 JF |
84 | |
85 | /* | |
86 | * The runstate info is always updated by the hypervisor on | |
87 | * the current CPU, so there's no need to use anything | |
88 | * stronger than a compiler barrier when fetching it. | |
89 | */ | |
90 | do { | |
91 | state_time = get64(&state->state_entry_time); | |
92 | barrier(); | |
93 | *res = *state; | |
94 | barrier(); | |
95 | } while (get64(&state->state_entry_time) != state_time); | |
f91a8b44 JF |
96 | } |
97 | ||
f0d73394 JF |
98 | /* return true when a vcpu could run but has no real cpu to run on */ |
99 | bool xen_vcpu_stolen(int vcpu) | |
100 | { | |
c6e22f9e | 101 | return per_cpu(xen_runstate, vcpu).state == RUNSTATE_runnable; |
f0d73394 JF |
102 | } |
103 | ||
be012920 | 104 | void xen_setup_runstate_info(int cpu) |
f91a8b44 JF |
105 | { |
106 | struct vcpu_register_runstate_memory_area area; | |
107 | ||
c6e22f9e | 108 | area.addr.v = &per_cpu(xen_runstate, cpu); |
f91a8b44 JF |
109 | |
110 | if (HYPERVISOR_vcpu_op(VCPUOP_register_runstate_memory_area, | |
111 | cpu, &area)) | |
112 | BUG(); | |
113 | } | |
114 | ||
115 | static void do_stolen_accounting(void) | |
116 | { | |
117 | struct vcpu_runstate_info state; | |
118 | struct vcpu_runstate_info *snap; | |
0b0c002c | 119 | s64 runnable, offline, stolen; |
f91a8b44 JF |
120 | cputime_t ticks; |
121 | ||
122 | get_runstate_snapshot(&state); | |
123 | ||
124 | WARN_ON(state.state != RUNSTATE_running); | |
125 | ||
c6e22f9e | 126 | snap = &__get_cpu_var(xen_runstate_snapshot); |
f91a8b44 JF |
127 | |
128 | /* work out how much time the VCPU has not been runn*ing* */ | |
f91a8b44 JF |
129 | runnable = state.time[RUNSTATE_runnable] - snap->time[RUNSTATE_runnable]; |
130 | offline = state.time[RUNSTATE_offline] - snap->time[RUNSTATE_offline]; | |
131 | ||
132 | *snap = state; | |
133 | ||
134 | /* Add the appropriate number of ticks of stolen time, | |
79741dd3 | 135 | including any left-overs from last time. */ |
780f36d8 | 136 | stolen = runnable + offline + __this_cpu_read(xen_residual_stolen); |
f91a8b44 JF |
137 | |
138 | if (stolen < 0) | |
139 | stolen = 0; | |
140 | ||
f595ec96 | 141 | ticks = iter_div_u64_rem(stolen, NS_PER_TICK, &stolen); |
780f36d8 | 142 | __this_cpu_write(xen_residual_stolen, stolen); |
79741dd3 | 143 | account_steal_ticks(ticks); |
f91a8b44 JF |
144 | } |
145 | ||
e93ef949 | 146 | /* Get the TSC speed from Xen */ |
409771d2 | 147 | static unsigned long xen_tsc_khz(void) |
15c84731 | 148 | { |
3807f345 | 149 | struct pvclock_vcpu_time_info *info = |
15c84731 JF |
150 | &HYPERVISOR_shared_info->vcpu_info[0].time; |
151 | ||
3807f345 | 152 | return pvclock_tsc_khz(info); |
15c84731 JF |
153 | } |
154 | ||
ee7686bc | 155 | cycle_t xen_clocksource_read(void) |
15c84731 | 156 | { |
1c7b67f7 | 157 | struct pvclock_vcpu_time_info *src; |
15c84731 | 158 | cycle_t ret; |
15c84731 | 159 | |
f1c39625 JF |
160 | preempt_disable_notrace(); |
161 | src = &__get_cpu_var(xen_vcpu)->time; | |
1c7b67f7 | 162 | ret = pvclock_clocksource_read(src); |
f1c39625 | 163 | preempt_enable_notrace(); |
15c84731 JF |
164 | return ret; |
165 | } | |
166 | ||
8e19608e MD |
167 | static cycle_t xen_clocksource_get_cycles(struct clocksource *cs) |
168 | { | |
169 | return xen_clocksource_read(); | |
170 | } | |
171 | ||
15c84731 JF |
172 | static void xen_read_wallclock(struct timespec *ts) |
173 | { | |
1c7b67f7 GH |
174 | struct shared_info *s = HYPERVISOR_shared_info; |
175 | struct pvclock_wall_clock *wall_clock = &(s->wc); | |
176 | struct pvclock_vcpu_time_info *vcpu_time; | |
15c84731 | 177 | |
1c7b67f7 GH |
178 | vcpu_time = &get_cpu_var(xen_vcpu)->time; |
179 | pvclock_read_wallclock(wall_clock, vcpu_time, ts); | |
180 | put_cpu_var(xen_vcpu); | |
15c84731 JF |
181 | } |
182 | ||
3565184e | 183 | static void xen_get_wallclock(struct timespec *now) |
15c84731 | 184 | { |
3565184e | 185 | xen_read_wallclock(now); |
15c84731 | 186 | } |
15c84731 | 187 | |
3565184e | 188 | static int xen_set_wallclock(const struct timespec *now) |
15c84731 | 189 | { |
47433b8c | 190 | return -1; |
15c84731 JF |
191 | } |
192 | ||
47433b8c DV |
193 | static int xen_pvclock_gtod_notify(struct notifier_block *nb, |
194 | unsigned long was_set, void *priv) | |
15c84731 | 195 | { |
47433b8c DV |
196 | /* Protected by the calling core code serialization */ |
197 | static struct timespec next_sync; | |
5584880e | 198 | |
fdb9eb9f | 199 | struct xen_platform_op op; |
47433b8c | 200 | struct timespec now; |
fdb9eb9f | 201 | |
5584880e DV |
202 | now = __current_kernel_time(); |
203 | ||
47433b8c DV |
204 | /* |
205 | * We only take the expensive HV call when the clock was set | |
206 | * or when the 11 minutes RTC synchronization time elapsed. | |
207 | */ | |
208 | if (!was_set && timespec_compare(&now, &next_sync) < 0) | |
209 | return NOTIFY_OK; | |
fdb9eb9f JF |
210 | |
211 | op.cmd = XENPF_settime; | |
5584880e DV |
212 | op.u.settime.secs = now.tv_sec; |
213 | op.u.settime.nsecs = now.tv_nsec; | |
fdb9eb9f JF |
214 | op.u.settime.system_time = xen_clocksource_read(); |
215 | ||
5584880e | 216 | (void)HYPERVISOR_dom0_op(&op); |
fdb9eb9f | 217 | |
47433b8c DV |
218 | /* |
219 | * Move the next drift compensation time 11 minutes | |
220 | * ahead. That's emulating the sync_cmos_clock() update for | |
221 | * the hardware RTC. | |
222 | */ | |
223 | next_sync = now; | |
224 | next_sync.tv_sec += 11 * 60; | |
225 | ||
5584880e | 226 | return NOTIFY_OK; |
15c84731 JF |
227 | } |
228 | ||
5584880e DV |
229 | static struct notifier_block xen_pvclock_gtod_notifier = { |
230 | .notifier_call = xen_pvclock_gtod_notify, | |
231 | }; | |
232 | ||
15c84731 JF |
233 | static struct clocksource xen_clocksource __read_mostly = { |
234 | .name = "xen", | |
235 | .rating = 400, | |
8e19608e | 236 | .read = xen_clocksource_get_cycles, |
15c84731 | 237 | .mask = ~0, |
15c84731 JF |
238 | .flags = CLOCK_SOURCE_IS_CONTINUOUS, |
239 | }; | |
240 | ||
241 | /* | |
242 | Xen clockevent implementation | |
243 | ||
244 | Xen has two clockevent implementations: | |
245 | ||
246 | The old timer_op one works with all released versions of Xen prior | |
247 | to version 3.0.4. This version of the hypervisor provides a | |
248 | single-shot timer with nanosecond resolution. However, sharing the | |
249 | same event channel is a 100Hz tick which is delivered while the | |
250 | vcpu is running. We don't care about or use this tick, but it will | |
251 | cause the core time code to think the timer fired too soon, and | |
252 | will end up resetting it each time. It could be filtered, but | |
253 | doing so has complications when the ktime clocksource is not yet | |
254 | the xen clocksource (ie, at boot time). | |
255 | ||
256 | The new vcpu_op-based timer interface allows the tick timer period | |
257 | to be changed or turned off. The tick timer is not useful as a | |
258 | periodic timer because events are only delivered to running vcpus. | |
259 | The one-shot timer can report when a timeout is in the past, so | |
260 | set_next_event is capable of returning -ETIME when appropriate. | |
261 | This interface is used when available. | |
262 | */ | |
263 | ||
264 | ||
265 | /* | |
266 | Get a hypervisor absolute time. In theory we could maintain an | |
267 | offset between the kernel's time and the hypervisor's time, and | |
268 | apply that to a kernel's absolute timeout. Unfortunately the | |
269 | hypervisor and kernel times can drift even if the kernel is using | |
270 | the Xen clocksource, because ntp can warp the kernel's clocksource. | |
271 | */ | |
272 | static s64 get_abs_timeout(unsigned long delta) | |
273 | { | |
274 | return xen_clocksource_read() + delta; | |
275 | } | |
276 | ||
277 | static void xen_timerop_set_mode(enum clock_event_mode mode, | |
278 | struct clock_event_device *evt) | |
279 | { | |
280 | switch (mode) { | |
281 | case CLOCK_EVT_MODE_PERIODIC: | |
282 | /* unsupported */ | |
283 | WARN_ON(1); | |
284 | break; | |
285 | ||
286 | case CLOCK_EVT_MODE_ONESHOT: | |
18de5bc4 | 287 | case CLOCK_EVT_MODE_RESUME: |
15c84731 JF |
288 | break; |
289 | ||
290 | case CLOCK_EVT_MODE_UNUSED: | |
291 | case CLOCK_EVT_MODE_SHUTDOWN: | |
292 | HYPERVISOR_set_timer_op(0); /* cancel timeout */ | |
293 | break; | |
294 | } | |
295 | } | |
296 | ||
297 | static int xen_timerop_set_next_event(unsigned long delta, | |
298 | struct clock_event_device *evt) | |
299 | { | |
300 | WARN_ON(evt->mode != CLOCK_EVT_MODE_ONESHOT); | |
301 | ||
302 | if (HYPERVISOR_set_timer_op(get_abs_timeout(delta)) < 0) | |
303 | BUG(); | |
304 | ||
305 | /* We may have missed the deadline, but there's no real way of | |
306 | knowing for sure. If the event was in the past, then we'll | |
307 | get an immediate interrupt. */ | |
308 | ||
309 | return 0; | |
310 | } | |
311 | ||
312 | static const struct clock_event_device xen_timerop_clockevent = { | |
313 | .name = "xen", | |
314 | .features = CLOCK_EVT_FEAT_ONESHOT, | |
315 | ||
316 | .max_delta_ns = 0xffffffff, | |
317 | .min_delta_ns = TIMER_SLOP, | |
318 | ||
319 | .mult = 1, | |
320 | .shift = 0, | |
321 | .rating = 500, | |
322 | ||
323 | .set_mode = xen_timerop_set_mode, | |
324 | .set_next_event = xen_timerop_set_next_event, | |
325 | }; | |
326 | ||
327 | ||
328 | ||
329 | static void xen_vcpuop_set_mode(enum clock_event_mode mode, | |
330 | struct clock_event_device *evt) | |
331 | { | |
332 | int cpu = smp_processor_id(); | |
333 | ||
334 | switch (mode) { | |
335 | case CLOCK_EVT_MODE_PERIODIC: | |
336 | WARN_ON(1); /* unsupported */ | |
337 | break; | |
338 | ||
339 | case CLOCK_EVT_MODE_ONESHOT: | |
340 | if (HYPERVISOR_vcpu_op(VCPUOP_stop_periodic_timer, cpu, NULL)) | |
341 | BUG(); | |
342 | break; | |
343 | ||
344 | case CLOCK_EVT_MODE_UNUSED: | |
345 | case CLOCK_EVT_MODE_SHUTDOWN: | |
346 | if (HYPERVISOR_vcpu_op(VCPUOP_stop_singleshot_timer, cpu, NULL) || | |
347 | HYPERVISOR_vcpu_op(VCPUOP_stop_periodic_timer, cpu, NULL)) | |
348 | BUG(); | |
349 | break; | |
18de5bc4 TG |
350 | case CLOCK_EVT_MODE_RESUME: |
351 | break; | |
15c84731 JF |
352 | } |
353 | } | |
354 | ||
355 | static int xen_vcpuop_set_next_event(unsigned long delta, | |
356 | struct clock_event_device *evt) | |
357 | { | |
358 | int cpu = smp_processor_id(); | |
359 | struct vcpu_set_singleshot_timer single; | |
360 | int ret; | |
361 | ||
362 | WARN_ON(evt->mode != CLOCK_EVT_MODE_ONESHOT); | |
363 | ||
364 | single.timeout_abs_ns = get_abs_timeout(delta); | |
365 | single.flags = VCPU_SSHOTTMR_future; | |
366 | ||
367 | ret = HYPERVISOR_vcpu_op(VCPUOP_set_singleshot_timer, cpu, &single); | |
368 | ||
369 | BUG_ON(ret != 0 && ret != -ETIME); | |
370 | ||
371 | return ret; | |
372 | } | |
373 | ||
374 | static const struct clock_event_device xen_vcpuop_clockevent = { | |
375 | .name = "xen", | |
376 | .features = CLOCK_EVT_FEAT_ONESHOT, | |
377 | ||
378 | .max_delta_ns = 0xffffffff, | |
379 | .min_delta_ns = TIMER_SLOP, | |
380 | ||
381 | .mult = 1, | |
382 | .shift = 0, | |
383 | .rating = 500, | |
384 | ||
385 | .set_mode = xen_vcpuop_set_mode, | |
386 | .set_next_event = xen_vcpuop_set_next_event, | |
387 | }; | |
388 | ||
389 | static const struct clock_event_device *xen_clockevent = | |
390 | &xen_timerop_clockevent; | |
31620a19 KRW |
391 | |
392 | struct xen_clock_event_device { | |
393 | struct clock_event_device evt; | |
394 | char *name; | |
395 | }; | |
396 | static DEFINE_PER_CPU(struct xen_clock_event_device, xen_clock_events) = { .evt.irq = -1 }; | |
15c84731 JF |
397 | |
398 | static irqreturn_t xen_timer_interrupt(int irq, void *dev_id) | |
399 | { | |
31620a19 | 400 | struct clock_event_device *evt = &__get_cpu_var(xen_clock_events).evt; |
15c84731 JF |
401 | irqreturn_t ret; |
402 | ||
403 | ret = IRQ_NONE; | |
404 | if (evt->event_handler) { | |
405 | evt->event_handler(evt); | |
406 | ret = IRQ_HANDLED; | |
407 | } | |
408 | ||
f91a8b44 JF |
409 | do_stolen_accounting(); |
410 | ||
15c84731 JF |
411 | return ret; |
412 | } | |
413 | ||
09e99da7 KRW |
414 | void xen_teardown_timer(int cpu) |
415 | { | |
416 | struct clock_event_device *evt; | |
417 | BUG_ON(cpu == 0); | |
418 | evt = &per_cpu(xen_clock_events, cpu).evt; | |
419 | ||
420 | if (evt->irq >= 0) { | |
421 | unbind_from_irqhandler(evt->irq, NULL); | |
422 | evt->irq = -1; | |
423 | kfree(per_cpu(xen_clock_events, cpu).name); | |
424 | per_cpu(xen_clock_events, cpu).name = NULL; | |
425 | } | |
426 | } | |
427 | ||
f87e4cac | 428 | void xen_setup_timer(int cpu) |
15c84731 | 429 | { |
c9d76a24 | 430 | char *name; |
15c84731 JF |
431 | struct clock_event_device *evt; |
432 | int irq; | |
433 | ||
31620a19 | 434 | evt = &per_cpu(xen_clock_events, cpu).evt; |
ef35a4e6 | 435 | WARN(evt->irq >= 0, "IRQ%d for CPU%d is already allocated\n", evt->irq, cpu); |
09e99da7 KRW |
436 | if (evt->irq >= 0) |
437 | xen_teardown_timer(cpu); | |
ef35a4e6 | 438 | |
15c84731 JF |
439 | printk(KERN_INFO "installing Xen timer for CPU %d\n", cpu); |
440 | ||
441 | name = kasprintf(GFP_KERNEL, "timer%d", cpu); | |
442 | if (!name) | |
443 | name = "<timer kasprintf failed>"; | |
444 | ||
445 | irq = bind_virq_to_irqhandler(VIRQ_TIMER, cpu, xen_timer_interrupt, | |
9d71cee6 | 446 | IRQF_PERCPU|IRQF_NOBALANCING|IRQF_TIMER| |
8d5999df | 447 | IRQF_FORCE_RESUME|IRQF_EARLY_RESUME, |
15c84731 | 448 | name, NULL); |
8785c676 | 449 | (void)xen_set_irq_priority(irq, XEN_IRQ_PRIORITY_MAX); |
15c84731 | 450 | |
15c84731 JF |
451 | memcpy(evt, xen_clockevent, sizeof(*evt)); |
452 | ||
320ab2b0 | 453 | evt->cpumask = cpumask_of(cpu); |
15c84731 | 454 | evt->irq = irq; |
c9d76a24 | 455 | per_cpu(xen_clock_events, cpu).name = name; |
f87e4cac JF |
456 | } |
457 | ||
d68d82af | 458 | |
f87e4cac JF |
459 | void xen_setup_cpu_clockevents(void) |
460 | { | |
461 | BUG_ON(preemptible()); | |
f91a8b44 | 462 | |
31620a19 | 463 | clockevents_register_device(&__get_cpu_var(xen_clock_events).evt); |
15c84731 JF |
464 | } |
465 | ||
d07af1f0 JF |
466 | void xen_timer_resume(void) |
467 | { | |
468 | int cpu; | |
469 | ||
e7a3481c JF |
470 | pvclock_resume(); |
471 | ||
d07af1f0 JF |
472 | if (xen_clockevent != &xen_vcpuop_clockevent) |
473 | return; | |
474 | ||
475 | for_each_online_cpu(cpu) { | |
476 | if (HYPERVISOR_vcpu_op(VCPUOP_stop_periodic_timer, cpu, NULL)) | |
477 | BUG(); | |
478 | } | |
479 | } | |
480 | ||
fb6ce5de | 481 | static const struct pv_time_ops xen_time_ops __initconst = { |
ca50a5f3 | 482 | .sched_clock = xen_clocksource_read, |
409771d2 SS |
483 | }; |
484 | ||
fb6ce5de | 485 | static void __init xen_time_init(void) |
15c84731 JF |
486 | { |
487 | int cpu = smp_processor_id(); | |
c4507257 | 488 | struct timespec tp; |
15c84731 | 489 | |
b01cc1b0 | 490 | clocksource_register_hz(&xen_clocksource, NSEC_PER_SEC); |
15c84731 JF |
491 | |
492 | if (HYPERVISOR_vcpu_op(VCPUOP_stop_periodic_timer, cpu, NULL) == 0) { | |
f91a8b44 | 493 | /* Successfully turned off 100Hz tick, so we have the |
15c84731 JF |
494 | vcpuop-based timer interface */ |
495 | printk(KERN_DEBUG "Xen: using vcpuop timer interface\n"); | |
496 | xen_clockevent = &xen_vcpuop_clockevent; | |
497 | } | |
498 | ||
499 | /* Set initial system time with full resolution */ | |
c4507257 JS |
500 | xen_read_wallclock(&tp); |
501 | do_settimeofday(&tp); | |
15c84731 | 502 | |
404ee5b1 | 503 | setup_force_cpu_cap(X86_FEATURE_TSC); |
15c84731 | 504 | |
be012920 | 505 | xen_setup_runstate_info(cpu); |
15c84731 | 506 | xen_setup_timer(cpu); |
f87e4cac | 507 | xen_setup_cpu_clockevents(); |
5584880e DV |
508 | |
509 | if (xen_initial_domain()) | |
510 | pvclock_gtod_register_notifier(&xen_pvclock_gtod_notifier); | |
15c84731 | 511 | } |
409771d2 | 512 | |
fb6ce5de | 513 | void __init xen_init_time_ops(void) |
409771d2 SS |
514 | { |
515 | pv_time_ops = xen_time_ops; | |
516 | ||
517 | x86_init.timers.timer_init = xen_time_init; | |
518 | x86_init.timers.setup_percpu_clockev = x86_init_noop; | |
519 | x86_cpuinit.setup_percpu_clockev = x86_init_noop; | |
520 | ||
521 | x86_platform.calibrate_tsc = xen_tsc_khz; | |
522 | x86_platform.get_wallclock = xen_get_wallclock; | |
47433b8c DV |
523 | /* Dom0 uses the native method to set the hardware RTC. */ |
524 | if (!xen_initial_domain()) | |
525 | x86_platform.set_wallclock = xen_set_wallclock; | |
409771d2 SS |
526 | } |
527 | ||
ca65f9fc | 528 | #ifdef CONFIG_XEN_PVHVM |
409771d2 SS |
529 | static void xen_hvm_setup_cpu_clockevents(void) |
530 | { | |
531 | int cpu = smp_processor_id(); | |
532 | xen_setup_runstate_info(cpu); | |
7918c92a KRW |
533 | /* |
534 | * xen_setup_timer(cpu) - snprintf is bad in atomic context. Hence | |
535 | * doing it xen_hvm_cpu_notify (which gets called by smp_init during | |
536 | * early bootup and also during CPU hotplug events). | |
537 | */ | |
409771d2 SS |
538 | xen_setup_cpu_clockevents(); |
539 | } | |
540 | ||
fb6ce5de | 541 | void __init xen_hvm_init_time_ops(void) |
409771d2 SS |
542 | { |
543 | /* vector callback is needed otherwise we cannot receive interrupts | |
31e7e931 SS |
544 | * on cpu > 0 and at this point we don't know how many cpus are |
545 | * available */ | |
546 | if (!xen_have_vector_callback) | |
409771d2 SS |
547 | return; |
548 | if (!xen_feature(XENFEAT_hvm_safe_pvclock)) { | |
549 | printk(KERN_INFO "Xen doesn't support pvclock on HVM," | |
550 | "disable pv timer\n"); | |
551 | return; | |
552 | } | |
553 | ||
554 | pv_time_ops = xen_time_ops; | |
555 | x86_init.timers.setup_percpu_clockev = xen_time_init; | |
556 | x86_cpuinit.setup_percpu_clockev = xen_hvm_setup_cpu_clockevents; | |
557 | ||
558 | x86_platform.calibrate_tsc = xen_tsc_khz; | |
559 | x86_platform.get_wallclock = xen_get_wallclock; | |
560 | x86_platform.set_wallclock = xen_set_wallclock; | |
561 | } | |
ca65f9fc | 562 | #endif |