]>
Commit | Line | Data |
---|---|---|
15c84731 JF |
1 | /* |
2 | * Xen time implementation. | |
3 | * | |
4 | * This is implemented in terms of a clocksource driver which uses | |
5 | * the hypervisor clock as a nanosecond timebase, and a clockevent | |
6 | * driver which uses the hypervisor's timer mechanism. | |
7 | * | |
8 | * Jeremy Fitzhardinge <jeremy@xensource.com>, XenSource Inc, 2007 | |
9 | */ | |
10 | #include <linux/kernel.h> | |
11 | #include <linux/interrupt.h> | |
12 | #include <linux/clocksource.h> | |
13 | #include <linux/clockchips.h> | |
f91a8b44 | 14 | #include <linux/kernel_stat.h> |
f595ec96 | 15 | #include <linux/math64.h> |
5a0e3ad6 | 16 | #include <linux/gfp.h> |
15c84731 | 17 | |
1c7b67f7 | 18 | #include <asm/pvclock.h> |
15c84731 JF |
19 | #include <asm/xen/hypervisor.h> |
20 | #include <asm/xen/hypercall.h> | |
21 | ||
22 | #include <xen/events.h> | |
409771d2 | 23 | #include <xen/features.h> |
15c84731 JF |
24 | #include <xen/interface/xen.h> |
25 | #include <xen/interface/vcpu.h> | |
26 | ||
27 | #include "xen-ops.h" | |
28 | ||
29 | #define XEN_SHIFT 22 | |
30 | ||
31 | /* Xen may fire a timer up to this many ns early */ | |
32 | #define TIMER_SLOP 100000 | |
f91a8b44 | 33 | #define NS_PER_TICK (1000000000LL / HZ) |
15c84731 | 34 | |
f91a8b44 | 35 | /* runstate info updated by Xen */ |
c6e22f9e | 36 | static DEFINE_PER_CPU(struct vcpu_runstate_info, xen_runstate); |
f91a8b44 JF |
37 | |
38 | /* snapshots of runstate info */ | |
c6e22f9e | 39 | static DEFINE_PER_CPU(struct vcpu_runstate_info, xen_runstate_snapshot); |
f91a8b44 JF |
40 | |
41 | /* unused ns of stolen and blocked time */ | |
c6e22f9e TH |
42 | static DEFINE_PER_CPU(u64, xen_residual_stolen); |
43 | static DEFINE_PER_CPU(u64, xen_residual_blocked); | |
f91a8b44 JF |
44 | |
45 | /* return an consistent snapshot of 64-bit time/counter value */ | |
46 | static u64 get64(const u64 *p) | |
47 | { | |
48 | u64 ret; | |
49 | ||
50 | if (BITS_PER_LONG < 64) { | |
51 | u32 *p32 = (u32 *)p; | |
52 | u32 h, l; | |
53 | ||
54 | /* | |
55 | * Read high then low, and then make sure high is | |
56 | * still the same; this will only loop if low wraps | |
57 | * and carries into high. | |
58 | * XXX some clean way to make this endian-proof? | |
59 | */ | |
60 | do { | |
61 | h = p32[1]; | |
62 | barrier(); | |
63 | l = p32[0]; | |
64 | barrier(); | |
65 | } while (p32[1] != h); | |
66 | ||
67 | ret = (((u64)h) << 32) | l; | |
68 | } else | |
69 | ret = *p; | |
70 | ||
71 | return ret; | |
72 | } | |
73 | ||
74 | /* | |
75 | * Runstate accounting | |
76 | */ | |
77 | static void get_runstate_snapshot(struct vcpu_runstate_info *res) | |
78 | { | |
79 | u64 state_time; | |
80 | struct vcpu_runstate_info *state; | |
81 | ||
f120f13e | 82 | BUG_ON(preemptible()); |
f91a8b44 | 83 | |
c6e22f9e | 84 | state = &__get_cpu_var(xen_runstate); |
f91a8b44 JF |
85 | |
86 | /* | |
87 | * The runstate info is always updated by the hypervisor on | |
88 | * the current CPU, so there's no need to use anything | |
89 | * stronger than a compiler barrier when fetching it. | |
90 | */ | |
91 | do { | |
92 | state_time = get64(&state->state_entry_time); | |
93 | barrier(); | |
94 | *res = *state; | |
95 | barrier(); | |
96 | } while (get64(&state->state_entry_time) != state_time); | |
f91a8b44 JF |
97 | } |
98 | ||
f0d73394 JF |
99 | /* return true when a vcpu could run but has no real cpu to run on */ |
100 | bool xen_vcpu_stolen(int vcpu) | |
101 | { | |
c6e22f9e | 102 | return per_cpu(xen_runstate, vcpu).state == RUNSTATE_runnable; |
f0d73394 JF |
103 | } |
104 | ||
be012920 | 105 | void xen_setup_runstate_info(int cpu) |
f91a8b44 JF |
106 | { |
107 | struct vcpu_register_runstate_memory_area area; | |
108 | ||
c6e22f9e | 109 | area.addr.v = &per_cpu(xen_runstate, cpu); |
f91a8b44 JF |
110 | |
111 | if (HYPERVISOR_vcpu_op(VCPUOP_register_runstate_memory_area, | |
112 | cpu, &area)) | |
113 | BUG(); | |
114 | } | |
115 | ||
116 | static void do_stolen_accounting(void) | |
117 | { | |
118 | struct vcpu_runstate_info state; | |
119 | struct vcpu_runstate_info *snap; | |
120 | s64 blocked, runnable, offline, stolen; | |
121 | cputime_t ticks; | |
122 | ||
123 | get_runstate_snapshot(&state); | |
124 | ||
125 | WARN_ON(state.state != RUNSTATE_running); | |
126 | ||
c6e22f9e | 127 | snap = &__get_cpu_var(xen_runstate_snapshot); |
f91a8b44 JF |
128 | |
129 | /* work out how much time the VCPU has not been runn*ing* */ | |
130 | blocked = state.time[RUNSTATE_blocked] - snap->time[RUNSTATE_blocked]; | |
131 | runnable = state.time[RUNSTATE_runnable] - snap->time[RUNSTATE_runnable]; | |
132 | offline = state.time[RUNSTATE_offline] - snap->time[RUNSTATE_offline]; | |
133 | ||
134 | *snap = state; | |
135 | ||
136 | /* Add the appropriate number of ticks of stolen time, | |
79741dd3 | 137 | including any left-overs from last time. */ |
c6e22f9e | 138 | stolen = runnable + offline + __get_cpu_var(xen_residual_stolen); |
f91a8b44 JF |
139 | |
140 | if (stolen < 0) | |
141 | stolen = 0; | |
142 | ||
f595ec96 | 143 | ticks = iter_div_u64_rem(stolen, NS_PER_TICK, &stolen); |
c6e22f9e | 144 | __get_cpu_var(xen_residual_stolen) = stolen; |
79741dd3 | 145 | account_steal_ticks(ticks); |
f91a8b44 JF |
146 | |
147 | /* Add the appropriate number of ticks of blocked time, | |
79741dd3 | 148 | including any left-overs from last time. */ |
c6e22f9e | 149 | blocked += __get_cpu_var(xen_residual_blocked); |
f91a8b44 JF |
150 | |
151 | if (blocked < 0) | |
152 | blocked = 0; | |
153 | ||
f595ec96 | 154 | ticks = iter_div_u64_rem(blocked, NS_PER_TICK, &blocked); |
c6e22f9e | 155 | __get_cpu_var(xen_residual_blocked) = blocked; |
79741dd3 | 156 | account_idle_ticks(ticks); |
f91a8b44 JF |
157 | } |
158 | ||
ab550288 JF |
159 | /* |
160 | * Xen sched_clock implementation. Returns the number of unstolen | |
161 | * nanoseconds, which is nanoseconds the VCPU spent in RUNNING+BLOCKED | |
162 | * states. | |
163 | */ | |
409771d2 | 164 | static unsigned long long xen_sched_clock(void) |
ab550288 JF |
165 | { |
166 | struct vcpu_runstate_info state; | |
f120f13e JF |
167 | cycle_t now; |
168 | u64 ret; | |
ab550288 JF |
169 | s64 offset; |
170 | ||
f120f13e JF |
171 | /* |
172 | * Ideally sched_clock should be called on a per-cpu basis | |
173 | * anyway, so preempt should already be disabled, but that's | |
174 | * not current practice at the moment. | |
175 | */ | |
176 | preempt_disable(); | |
177 | ||
178 | now = xen_clocksource_read(); | |
179 | ||
ab550288 JF |
180 | get_runstate_snapshot(&state); |
181 | ||
182 | WARN_ON(state.state != RUNSTATE_running); | |
183 | ||
184 | offset = now - state.state_entry_time; | |
185 | if (offset < 0) | |
186 | offset = 0; | |
187 | ||
f120f13e | 188 | ret = state.time[RUNSTATE_blocked] + |
ab550288 JF |
189 | state.time[RUNSTATE_running] + |
190 | offset; | |
f120f13e JF |
191 | |
192 | preempt_enable(); | |
193 | ||
194 | return ret; | |
ab550288 | 195 | } |
f91a8b44 JF |
196 | |
197 | ||
e93ef949 | 198 | /* Get the TSC speed from Xen */ |
409771d2 | 199 | static unsigned long xen_tsc_khz(void) |
15c84731 | 200 | { |
3807f345 | 201 | struct pvclock_vcpu_time_info *info = |
15c84731 JF |
202 | &HYPERVISOR_shared_info->vcpu_info[0].time; |
203 | ||
3807f345 | 204 | return pvclock_tsc_khz(info); |
15c84731 JF |
205 | } |
206 | ||
ee7686bc | 207 | cycle_t xen_clocksource_read(void) |
15c84731 | 208 | { |
1c7b67f7 | 209 | struct pvclock_vcpu_time_info *src; |
15c84731 | 210 | cycle_t ret; |
15c84731 | 211 | |
1c7b67f7 GH |
212 | src = &get_cpu_var(xen_vcpu)->time; |
213 | ret = pvclock_clocksource_read(src); | |
214 | put_cpu_var(xen_vcpu); | |
15c84731 JF |
215 | return ret; |
216 | } | |
217 | ||
8e19608e MD |
218 | static cycle_t xen_clocksource_get_cycles(struct clocksource *cs) |
219 | { | |
220 | return xen_clocksource_read(); | |
221 | } | |
222 | ||
15c84731 JF |
223 | static void xen_read_wallclock(struct timespec *ts) |
224 | { | |
1c7b67f7 GH |
225 | struct shared_info *s = HYPERVISOR_shared_info; |
226 | struct pvclock_wall_clock *wall_clock = &(s->wc); | |
227 | struct pvclock_vcpu_time_info *vcpu_time; | |
15c84731 | 228 | |
1c7b67f7 GH |
229 | vcpu_time = &get_cpu_var(xen_vcpu)->time; |
230 | pvclock_read_wallclock(wall_clock, vcpu_time, ts); | |
231 | put_cpu_var(xen_vcpu); | |
15c84731 JF |
232 | } |
233 | ||
409771d2 | 234 | static unsigned long xen_get_wallclock(void) |
15c84731 JF |
235 | { |
236 | struct timespec ts; | |
237 | ||
238 | xen_read_wallclock(&ts); | |
15c84731 JF |
239 | return ts.tv_sec; |
240 | } | |
241 | ||
409771d2 | 242 | static int xen_set_wallclock(unsigned long now) |
15c84731 JF |
243 | { |
244 | /* do nothing for domU */ | |
245 | return -1; | |
246 | } | |
247 | ||
248 | static struct clocksource xen_clocksource __read_mostly = { | |
249 | .name = "xen", | |
250 | .rating = 400, | |
8e19608e | 251 | .read = xen_clocksource_get_cycles, |
15c84731 JF |
252 | .mask = ~0, |
253 | .mult = 1<<XEN_SHIFT, /* time directly in nanoseconds */ | |
254 | .shift = XEN_SHIFT, | |
255 | .flags = CLOCK_SOURCE_IS_CONTINUOUS, | |
256 | }; | |
257 | ||
258 | /* | |
259 | Xen clockevent implementation | |
260 | ||
261 | Xen has two clockevent implementations: | |
262 | ||
263 | The old timer_op one works with all released versions of Xen prior | |
264 | to version 3.0.4. This version of the hypervisor provides a | |
265 | single-shot timer with nanosecond resolution. However, sharing the | |
266 | same event channel is a 100Hz tick which is delivered while the | |
267 | vcpu is running. We don't care about or use this tick, but it will | |
268 | cause the core time code to think the timer fired too soon, and | |
269 | will end up resetting it each time. It could be filtered, but | |
270 | doing so has complications when the ktime clocksource is not yet | |
271 | the xen clocksource (ie, at boot time). | |
272 | ||
273 | The new vcpu_op-based timer interface allows the tick timer period | |
274 | to be changed or turned off. The tick timer is not useful as a | |
275 | periodic timer because events are only delivered to running vcpus. | |
276 | The one-shot timer can report when a timeout is in the past, so | |
277 | set_next_event is capable of returning -ETIME when appropriate. | |
278 | This interface is used when available. | |
279 | */ | |
280 | ||
281 | ||
282 | /* | |
283 | Get a hypervisor absolute time. In theory we could maintain an | |
284 | offset between the kernel's time and the hypervisor's time, and | |
285 | apply that to a kernel's absolute timeout. Unfortunately the | |
286 | hypervisor and kernel times can drift even if the kernel is using | |
287 | the Xen clocksource, because ntp can warp the kernel's clocksource. | |
288 | */ | |
289 | static s64 get_abs_timeout(unsigned long delta) | |
290 | { | |
291 | return xen_clocksource_read() + delta; | |
292 | } | |
293 | ||
294 | static void xen_timerop_set_mode(enum clock_event_mode mode, | |
295 | struct clock_event_device *evt) | |
296 | { | |
297 | switch (mode) { | |
298 | case CLOCK_EVT_MODE_PERIODIC: | |
299 | /* unsupported */ | |
300 | WARN_ON(1); | |
301 | break; | |
302 | ||
303 | case CLOCK_EVT_MODE_ONESHOT: | |
18de5bc4 | 304 | case CLOCK_EVT_MODE_RESUME: |
15c84731 JF |
305 | break; |
306 | ||
307 | case CLOCK_EVT_MODE_UNUSED: | |
308 | case CLOCK_EVT_MODE_SHUTDOWN: | |
309 | HYPERVISOR_set_timer_op(0); /* cancel timeout */ | |
310 | break; | |
311 | } | |
312 | } | |
313 | ||
314 | static int xen_timerop_set_next_event(unsigned long delta, | |
315 | struct clock_event_device *evt) | |
316 | { | |
317 | WARN_ON(evt->mode != CLOCK_EVT_MODE_ONESHOT); | |
318 | ||
319 | if (HYPERVISOR_set_timer_op(get_abs_timeout(delta)) < 0) | |
320 | BUG(); | |
321 | ||
322 | /* We may have missed the deadline, but there's no real way of | |
323 | knowing for sure. If the event was in the past, then we'll | |
324 | get an immediate interrupt. */ | |
325 | ||
326 | return 0; | |
327 | } | |
328 | ||
329 | static const struct clock_event_device xen_timerop_clockevent = { | |
330 | .name = "xen", | |
331 | .features = CLOCK_EVT_FEAT_ONESHOT, | |
332 | ||
333 | .max_delta_ns = 0xffffffff, | |
334 | .min_delta_ns = TIMER_SLOP, | |
335 | ||
336 | .mult = 1, | |
337 | .shift = 0, | |
338 | .rating = 500, | |
339 | ||
340 | .set_mode = xen_timerop_set_mode, | |
341 | .set_next_event = xen_timerop_set_next_event, | |
342 | }; | |
343 | ||
344 | ||
345 | ||
346 | static void xen_vcpuop_set_mode(enum clock_event_mode mode, | |
347 | struct clock_event_device *evt) | |
348 | { | |
349 | int cpu = smp_processor_id(); | |
350 | ||
351 | switch (mode) { | |
352 | case CLOCK_EVT_MODE_PERIODIC: | |
353 | WARN_ON(1); /* unsupported */ | |
354 | break; | |
355 | ||
356 | case CLOCK_EVT_MODE_ONESHOT: | |
357 | if (HYPERVISOR_vcpu_op(VCPUOP_stop_periodic_timer, cpu, NULL)) | |
358 | BUG(); | |
359 | break; | |
360 | ||
361 | case CLOCK_EVT_MODE_UNUSED: | |
362 | case CLOCK_EVT_MODE_SHUTDOWN: | |
363 | if (HYPERVISOR_vcpu_op(VCPUOP_stop_singleshot_timer, cpu, NULL) || | |
364 | HYPERVISOR_vcpu_op(VCPUOP_stop_periodic_timer, cpu, NULL)) | |
365 | BUG(); | |
366 | break; | |
18de5bc4 TG |
367 | case CLOCK_EVT_MODE_RESUME: |
368 | break; | |
15c84731 JF |
369 | } |
370 | } | |
371 | ||
372 | static int xen_vcpuop_set_next_event(unsigned long delta, | |
373 | struct clock_event_device *evt) | |
374 | { | |
375 | int cpu = smp_processor_id(); | |
376 | struct vcpu_set_singleshot_timer single; | |
377 | int ret; | |
378 | ||
379 | WARN_ON(evt->mode != CLOCK_EVT_MODE_ONESHOT); | |
380 | ||
381 | single.timeout_abs_ns = get_abs_timeout(delta); | |
382 | single.flags = VCPU_SSHOTTMR_future; | |
383 | ||
384 | ret = HYPERVISOR_vcpu_op(VCPUOP_set_singleshot_timer, cpu, &single); | |
385 | ||
386 | BUG_ON(ret != 0 && ret != -ETIME); | |
387 | ||
388 | return ret; | |
389 | } | |
390 | ||
391 | static const struct clock_event_device xen_vcpuop_clockevent = { | |
392 | .name = "xen", | |
393 | .features = CLOCK_EVT_FEAT_ONESHOT, | |
394 | ||
395 | .max_delta_ns = 0xffffffff, | |
396 | .min_delta_ns = TIMER_SLOP, | |
397 | ||
398 | .mult = 1, | |
399 | .shift = 0, | |
400 | .rating = 500, | |
401 | ||
402 | .set_mode = xen_vcpuop_set_mode, | |
403 | .set_next_event = xen_vcpuop_set_next_event, | |
404 | }; | |
405 | ||
406 | static const struct clock_event_device *xen_clockevent = | |
407 | &xen_timerop_clockevent; | |
408 | static DEFINE_PER_CPU(struct clock_event_device, xen_clock_events); | |
409 | ||
410 | static irqreturn_t xen_timer_interrupt(int irq, void *dev_id) | |
411 | { | |
412 | struct clock_event_device *evt = &__get_cpu_var(xen_clock_events); | |
413 | irqreturn_t ret; | |
414 | ||
415 | ret = IRQ_NONE; | |
416 | if (evt->event_handler) { | |
417 | evt->event_handler(evt); | |
418 | ret = IRQ_HANDLED; | |
419 | } | |
420 | ||
f91a8b44 JF |
421 | do_stolen_accounting(); |
422 | ||
15c84731 JF |
423 | return ret; |
424 | } | |
425 | ||
f87e4cac | 426 | void xen_setup_timer(int cpu) |
15c84731 JF |
427 | { |
428 | const char *name; | |
429 | struct clock_event_device *evt; | |
430 | int irq; | |
431 | ||
432 | printk(KERN_INFO "installing Xen timer for CPU %d\n", cpu); | |
433 | ||
434 | name = kasprintf(GFP_KERNEL, "timer%d", cpu); | |
435 | if (!name) | |
436 | name = "<timer kasprintf failed>"; | |
437 | ||
438 | irq = bind_virq_to_irqhandler(VIRQ_TIMER, cpu, xen_timer_interrupt, | |
f350c792 | 439 | IRQF_DISABLED|IRQF_PERCPU|IRQF_NOBALANCING|IRQF_TIMER, |
15c84731 JF |
440 | name, NULL); |
441 | ||
f87e4cac | 442 | evt = &per_cpu(xen_clock_events, cpu); |
15c84731 JF |
443 | memcpy(evt, xen_clockevent, sizeof(*evt)); |
444 | ||
320ab2b0 | 445 | evt->cpumask = cpumask_of(cpu); |
15c84731 | 446 | evt->irq = irq; |
f87e4cac JF |
447 | } |
448 | ||
d68d82af AN |
449 | void xen_teardown_timer(int cpu) |
450 | { | |
451 | struct clock_event_device *evt; | |
452 | BUG_ON(cpu == 0); | |
453 | evt = &per_cpu(xen_clock_events, cpu); | |
454 | unbind_from_irqhandler(evt->irq, NULL); | |
455 | } | |
456 | ||
f87e4cac JF |
457 | void xen_setup_cpu_clockevents(void) |
458 | { | |
459 | BUG_ON(preemptible()); | |
f91a8b44 | 460 | |
f87e4cac | 461 | clockevents_register_device(&__get_cpu_var(xen_clock_events)); |
15c84731 JF |
462 | } |
463 | ||
d07af1f0 JF |
464 | void xen_timer_resume(void) |
465 | { | |
466 | int cpu; | |
467 | ||
468 | if (xen_clockevent != &xen_vcpuop_clockevent) | |
469 | return; | |
470 | ||
471 | for_each_online_cpu(cpu) { | |
472 | if (HYPERVISOR_vcpu_op(VCPUOP_stop_periodic_timer, cpu, NULL)) | |
473 | BUG(); | |
474 | } | |
475 | } | |
476 | ||
409771d2 SS |
477 | static const struct pv_time_ops xen_time_ops __initdata = { |
478 | .sched_clock = xen_sched_clock, | |
479 | }; | |
480 | ||
481 | static __init void xen_time_init(void) | |
15c84731 JF |
482 | { |
483 | int cpu = smp_processor_id(); | |
c4507257 | 484 | struct timespec tp; |
15c84731 | 485 | |
15c84731 JF |
486 | clocksource_register(&xen_clocksource); |
487 | ||
488 | if (HYPERVISOR_vcpu_op(VCPUOP_stop_periodic_timer, cpu, NULL) == 0) { | |
f91a8b44 | 489 | /* Successfully turned off 100Hz tick, so we have the |
15c84731 JF |
490 | vcpuop-based timer interface */ |
491 | printk(KERN_DEBUG "Xen: using vcpuop timer interface\n"); | |
492 | xen_clockevent = &xen_vcpuop_clockevent; | |
493 | } | |
494 | ||
495 | /* Set initial system time with full resolution */ | |
c4507257 JS |
496 | xen_read_wallclock(&tp); |
497 | do_settimeofday(&tp); | |
15c84731 | 498 | |
404ee5b1 | 499 | setup_force_cpu_cap(X86_FEATURE_TSC); |
15c84731 | 500 | |
be012920 | 501 | xen_setup_runstate_info(cpu); |
15c84731 | 502 | xen_setup_timer(cpu); |
f87e4cac | 503 | xen_setup_cpu_clockevents(); |
15c84731 | 504 | } |
409771d2 SS |
505 | |
506 | __init void xen_init_time_ops(void) | |
507 | { | |
508 | pv_time_ops = xen_time_ops; | |
509 | ||
510 | x86_init.timers.timer_init = xen_time_init; | |
511 | x86_init.timers.setup_percpu_clockev = x86_init_noop; | |
512 | x86_cpuinit.setup_percpu_clockev = x86_init_noop; | |
513 | ||
514 | x86_platform.calibrate_tsc = xen_tsc_khz; | |
515 | x86_platform.get_wallclock = xen_get_wallclock; | |
516 | x86_platform.set_wallclock = xen_set_wallclock; | |
517 | } | |
518 | ||
519 | static void xen_hvm_setup_cpu_clockevents(void) | |
520 | { | |
521 | int cpu = smp_processor_id(); | |
522 | xen_setup_runstate_info(cpu); | |
523 | xen_setup_timer(cpu); | |
524 | xen_setup_cpu_clockevents(); | |
525 | } | |
526 | ||
527 | __init void xen_hvm_init_time_ops(void) | |
528 | { | |
529 | /* vector callback is needed otherwise we cannot receive interrupts | |
530 | * on cpu > 0 */ | |
531 | if (!xen_have_vector_callback && num_present_cpus() > 1) | |
532 | return; | |
533 | if (!xen_feature(XENFEAT_hvm_safe_pvclock)) { | |
534 | printk(KERN_INFO "Xen doesn't support pvclock on HVM," | |
535 | "disable pv timer\n"); | |
536 | return; | |
537 | } | |
538 | ||
539 | pv_time_ops = xen_time_ops; | |
540 | x86_init.timers.setup_percpu_clockev = xen_time_init; | |
541 | x86_cpuinit.setup_percpu_clockev = xen_hvm_setup_cpu_clockevents; | |
542 | ||
543 | x86_platform.calibrate_tsc = xen_tsc_khz; | |
544 | x86_platform.get_wallclock = xen_get_wallclock; | |
545 | x86_platform.set_wallclock = xen_set_wallclock; | |
546 | } | |
547 |