2 * Copyright (c) 2009, Microsoft Corporation.
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms and conditions of the GNU General Public License,
6 * version 2, as published by the Free Software Foundation.
8 * This program is distributed in the hope it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * You should have received a copy of the GNU General Public License along with
14 * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
15 * Place - Suite 330, Boston, MA 02111-1307 USA.
18 * Haiyang Zhang <haiyangz@microsoft.com>
19 * Hank Janssen <hjanssen@microsoft.com>
22 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
24 #include <linux/kernel.h>
26 #include <linux/slab.h>
27 #include <linux/vmalloc.h>
28 #include <linux/hyperv.h>
29 #include <linux/version.h>
30 #include <linux/interrupt.h>
31 #include <linux/clockchips.h>
32 #include <asm/hyperv.h>
33 #include <asm/mshyperv.h>
34 #include "hyperv_vmbus.h"
38 * Preserve the ability to 'make deb-pkg' since PKG_ABI is provided
39 * by the Ubuntu build rules.
44 /* The one and only */
45 struct hv_context hv_context
= {
46 .synic_initialized
= false,
47 .hypercall_page
= NULL
,
50 #define HV_TIMER_FREQUENCY (10 * 1000 * 1000) /* 100ns period */
51 #define HV_MAX_MAX_DELTA_TICKS 0xffffffff
52 #define HV_MIN_DELTA_TICKS 1
55 * query_hypervisor_info - Get version info of the windows hypervisor
57 unsigned int host_info_eax
;
58 unsigned int host_info_ebx
;
59 unsigned int host_info_ecx
;
60 unsigned int host_info_edx
;
62 static int query_hypervisor_info(void)
68 unsigned int max_leaf
;
72 * Its assumed that this is called after confirming that Viridian
73 * is present. Query id and revision.
79 op
= HVCPUID_VENDOR_MAXFUNCTION
;
80 cpuid(op
, &eax
, &ebx
, &ecx
, &edx
);
84 if (max_leaf
>= HVCPUID_VERSION
) {
90 cpuid(op
, &eax
, &ebx
, &ecx
, &edx
);
100 * hv_do_hypercall- Invoke the specified hypercall
102 u64
hv_do_hypercall(u64 control
, void *input
, void *output
)
104 u64 input_address
= (input
) ? virt_to_phys(input
) : 0;
105 u64 output_address
= (output
) ? virt_to_phys(output
) : 0;
106 void *hypercall_page
= hv_context
.hypercall_page
;
111 return (u64
)ULLONG_MAX
;
113 __asm__
__volatile__("mov %0, %%r8" : : "r" (output_address
) : "r8");
114 __asm__
__volatile__("call *%3" : "=a" (hv_status
) :
115 "c" (control
), "d" (input_address
),
116 "m" (hypercall_page
));
122 u32 control_hi
= control
>> 32;
123 u32 control_lo
= control
& 0xFFFFFFFF;
124 u32 hv_status_hi
= 1;
125 u32 hv_status_lo
= 1;
126 u32 input_address_hi
= input_address
>> 32;
127 u32 input_address_lo
= input_address
& 0xFFFFFFFF;
128 u32 output_address_hi
= output_address
>> 32;
129 u32 output_address_lo
= output_address
& 0xFFFFFFFF;
132 return (u64
)ULLONG_MAX
;
134 __asm__
__volatile__ ("call *%8" : "=d"(hv_status_hi
),
135 "=a"(hv_status_lo
) : "d" (control_hi
),
136 "a" (control_lo
), "b" (input_address_hi
),
137 "c" (input_address_lo
), "D"(output_address_hi
),
138 "S"(output_address_lo
), "m" (hypercall_page
));
140 return hv_status_lo
| ((u64
)hv_status_hi
<< 32);
143 EXPORT_SYMBOL_GPL(hv_do_hypercall
);
146 static u64
read_hv_clock_tsc(struct clocksource
*arg
)
149 struct ms_hyperv_tsc_page
*tsc_pg
= hv_context
.tsc_page
;
151 if (tsc_pg
->tsc_sequence
!= 0) {
153 * Use the tsc page to compute the value.
158 u32 sequence
= tsc_pg
->tsc_sequence
;
160 u64 scale
= tsc_pg
->tsc_scale
;
161 s64 offset
= tsc_pg
->tsc_offset
;
164 /* current_tick = ((cur_tsc *scale) >> 64) + offset */
166 : "=d" (current_tick
), "=a" (tmp
)
167 : "a" (cur_tsc
), "r" (scale
));
169 current_tick
+= offset
;
170 if (tsc_pg
->tsc_sequence
== sequence
)
173 if (tsc_pg
->tsc_sequence
!= 0)
176 * Fallback using MSR method.
181 rdmsrl(HV_X64_MSR_TIME_REF_COUNT
, current_tick
);
185 static struct clocksource hyperv_cs_tsc
= {
186 .name
= "hyperv_clocksource_tsc_page",
188 .read
= read_hv_clock_tsc
,
189 .mask
= CLOCKSOURCE_MASK(64),
190 .flags
= CLOCK_SOURCE_IS_CONTINUOUS
,
196 * hv_init - Main initialization routine.
198 * This routine must be called before any other routines in here are called
203 union hv_x64_msr_hypercall_contents hypercall_msr
;
204 void *virtaddr
= NULL
;
206 memset(hv_context
.synic_event_page
, 0, sizeof(void *) * NR_CPUS
);
207 memset(hv_context
.synic_message_page
, 0,
208 sizeof(void *) * NR_CPUS
);
209 memset(hv_context
.post_msg_page
, 0,
210 sizeof(void *) * NR_CPUS
);
211 memset(hv_context
.vp_index
, 0,
212 sizeof(int) * NR_CPUS
);
213 memset(hv_context
.event_dpc
, 0,
214 sizeof(void *) * NR_CPUS
);
215 memset(hv_context
.msg_dpc
, 0,
216 sizeof(void *) * NR_CPUS
);
217 memset(hv_context
.clk_evt
, 0,
218 sizeof(void *) * NR_CPUS
);
220 max_leaf
= query_hypervisor_info();
225 hv_context
.guestid
= generate_guest_id(0x80 /*Canonical*/, LINUX_VERSION_CODE
, PKG_ABI
);
226 wrmsrl(HV_X64_MSR_GUEST_OS_ID
, hv_context
.guestid
);
228 /* See if the hypercall page is already set */
229 rdmsrl(HV_X64_MSR_HYPERCALL
, hypercall_msr
.as_uint64
);
231 virtaddr
= __vmalloc(PAGE_SIZE
, GFP_KERNEL
, PAGE_KERNEL_EXEC
);
236 hypercall_msr
.enable
= 1;
238 hypercall_msr
.guest_physical_address
= vmalloc_to_pfn(virtaddr
);
239 wrmsrl(HV_X64_MSR_HYPERCALL
, hypercall_msr
.as_uint64
);
241 /* Confirm that hypercall page did get setup. */
242 hypercall_msr
.as_uint64
= 0;
243 rdmsrl(HV_X64_MSR_HYPERCALL
, hypercall_msr
.as_uint64
);
245 if (!hypercall_msr
.enable
)
248 hv_context
.hypercall_page
= virtaddr
;
251 if (ms_hyperv
.features
& HV_X64_MSR_REFERENCE_TSC_AVAILABLE
) {
252 union hv_x64_msr_hypercall_contents tsc_msr
;
255 va_tsc
= __vmalloc(PAGE_SIZE
, GFP_KERNEL
, PAGE_KERNEL
);
258 hv_context
.tsc_page
= va_tsc
;
260 rdmsrl(HV_X64_MSR_REFERENCE_TSC
, tsc_msr
.as_uint64
);
263 tsc_msr
.guest_physical_address
= vmalloc_to_pfn(va_tsc
);
265 wrmsrl(HV_X64_MSR_REFERENCE_TSC
, tsc_msr
.as_uint64
);
266 clocksource_register_hz(&hyperv_cs_tsc
, NSEC_PER_SEC
/100);
273 if (hypercall_msr
.enable
) {
274 hypercall_msr
.as_uint64
= 0;
275 wrmsrl(HV_X64_MSR_HYPERCALL
, hypercall_msr
.as_uint64
);
285 * hv_cleanup - Cleanup routine.
287 * This routine is called normally during driver unloading or exiting.
289 void hv_cleanup(bool crash
)
291 union hv_x64_msr_hypercall_contents hypercall_msr
;
293 /* Reset our OS id */
294 wrmsrl(HV_X64_MSR_GUEST_OS_ID
, 0);
296 if (hv_context
.hypercall_page
) {
297 hypercall_msr
.as_uint64
= 0;
298 wrmsrl(HV_X64_MSR_HYPERCALL
, hypercall_msr
.as_uint64
);
300 vfree(hv_context
.hypercall_page
);
301 hv_context
.hypercall_page
= NULL
;
306 * Cleanup the TSC page based CS.
308 if (ms_hyperv
.features
& HV_X64_MSR_REFERENCE_TSC_AVAILABLE
) {
310 * Crash can happen in an interrupt context and unregistering
311 * a clocksource is impossible and redundant in this case.
313 if (!oops_in_progress
) {
314 clocksource_change_rating(&hyperv_cs_tsc
, 10);
315 clocksource_unregister(&hyperv_cs_tsc
);
318 hypercall_msr
.as_uint64
= 0;
319 wrmsrl(HV_X64_MSR_REFERENCE_TSC
, hypercall_msr
.as_uint64
);
321 vfree(hv_context
.tsc_page
);
322 hv_context
.tsc_page
= NULL
;
329 * hv_post_message - Post a message using the hypervisor message IPC.
331 * This involves a hypercall.
333 int hv_post_message(union hv_connection_id connection_id
,
334 enum hv_message_type message_type
,
335 void *payload
, size_t payload_size
)
338 struct hv_input_post_message
*aligned_msg
;
341 if (payload_size
> HV_MESSAGE_PAYLOAD_BYTE_COUNT
)
344 aligned_msg
= (struct hv_input_post_message
*)
345 hv_context
.post_msg_page
[get_cpu()];
347 aligned_msg
->connectionid
= connection_id
;
348 aligned_msg
->reserved
= 0;
349 aligned_msg
->message_type
= message_type
;
350 aligned_msg
->payload_size
= payload_size
;
351 memcpy((void *)aligned_msg
->payload
, payload
, payload_size
);
353 status
= hv_do_hypercall(HVCALL_POST_MESSAGE
, aligned_msg
, NULL
);
356 return status
& 0xFFFF;
359 static int hv_ce_set_next_event(unsigned long delta
,
360 struct clock_event_device
*evt
)
364 WARN_ON(!clockevent_state_oneshot(evt
));
366 rdmsrl(HV_X64_MSR_TIME_REF_COUNT
, current_tick
);
367 current_tick
+= delta
;
368 wrmsrl(HV_X64_MSR_STIMER0_COUNT
, current_tick
);
372 static int hv_ce_shutdown(struct clock_event_device
*evt
)
374 wrmsrl(HV_X64_MSR_STIMER0_COUNT
, 0);
375 wrmsrl(HV_X64_MSR_STIMER0_CONFIG
, 0);
380 static int hv_ce_set_oneshot(struct clock_event_device
*evt
)
382 union hv_timer_config timer_cfg
;
384 timer_cfg
.enable
= 1;
385 timer_cfg
.auto_enable
= 1;
386 timer_cfg
.sintx
= VMBUS_MESSAGE_SINT
;
387 wrmsrl(HV_X64_MSR_STIMER0_CONFIG
, timer_cfg
.as_uint64
);
392 static void hv_init_clockevent_device(struct clock_event_device
*dev
, int cpu
)
394 dev
->name
= "Hyper-V clockevent";
395 dev
->features
= CLOCK_EVT_FEAT_ONESHOT
;
396 dev
->cpumask
= cpumask_of(cpu
);
399 * Avoid settint dev->owner = THIS_MODULE deliberately as doing so will
400 * result in clockevents_config_and_register() taking additional
401 * references to the hv_vmbus module making it impossible to unload.
404 dev
->set_state_shutdown
= hv_ce_shutdown
;
405 dev
->set_state_oneshot
= hv_ce_set_oneshot
;
406 dev
->set_next_event
= hv_ce_set_next_event
;
410 int hv_synic_alloc(void)
412 size_t size
= sizeof(struct tasklet_struct
);
413 size_t ced_size
= sizeof(struct clock_event_device
);
416 hv_context
.hv_numa_map
= kzalloc(sizeof(struct cpumask
) * nr_node_ids
,
418 if (hv_context
.hv_numa_map
== NULL
) {
419 pr_err("Unable to allocate NUMA map\n");
423 for_each_present_cpu(cpu
) {
424 hv_context
.event_dpc
[cpu
] = kmalloc(size
, GFP_ATOMIC
);
425 if (hv_context
.event_dpc
[cpu
] == NULL
) {
426 pr_err("Unable to allocate event dpc\n");
429 tasklet_init(hv_context
.event_dpc
[cpu
], vmbus_on_event
, cpu
);
431 hv_context
.msg_dpc
[cpu
] = kmalloc(size
, GFP_ATOMIC
);
432 if (hv_context
.msg_dpc
[cpu
] == NULL
) {
433 pr_err("Unable to allocate event dpc\n");
436 tasklet_init(hv_context
.msg_dpc
[cpu
], vmbus_on_msg_dpc
, cpu
);
438 hv_context
.clk_evt
[cpu
] = kzalloc(ced_size
, GFP_ATOMIC
);
439 if (hv_context
.clk_evt
[cpu
] == NULL
) {
440 pr_err("Unable to allocate clock event device\n");
444 hv_init_clockevent_device(hv_context
.clk_evt
[cpu
], cpu
);
446 hv_context
.synic_message_page
[cpu
] =
447 (void *)get_zeroed_page(GFP_ATOMIC
);
449 if (hv_context
.synic_message_page
[cpu
] == NULL
) {
450 pr_err("Unable to allocate SYNIC message page\n");
454 hv_context
.synic_event_page
[cpu
] =
455 (void *)get_zeroed_page(GFP_ATOMIC
);
457 if (hv_context
.synic_event_page
[cpu
] == NULL
) {
458 pr_err("Unable to allocate SYNIC event page\n");
462 hv_context
.post_msg_page
[cpu
] =
463 (void *)get_zeroed_page(GFP_ATOMIC
);
465 if (hv_context
.post_msg_page
[cpu
] == NULL
) {
466 pr_err("Unable to allocate post msg page\n");
476 static void hv_synic_free_cpu(int cpu
)
478 kfree(hv_context
.event_dpc
[cpu
]);
479 kfree(hv_context
.msg_dpc
[cpu
]);
480 kfree(hv_context
.clk_evt
[cpu
]);
481 if (hv_context
.synic_event_page
[cpu
])
482 free_page((unsigned long)hv_context
.synic_event_page
[cpu
]);
483 if (hv_context
.synic_message_page
[cpu
])
484 free_page((unsigned long)hv_context
.synic_message_page
[cpu
]);
485 if (hv_context
.post_msg_page
[cpu
])
486 free_page((unsigned long)hv_context
.post_msg_page
[cpu
]);
489 void hv_synic_free(void)
493 kfree(hv_context
.hv_numa_map
);
494 for_each_present_cpu(cpu
)
495 hv_synic_free_cpu(cpu
);
499 * hv_synic_init - Initialize the Synthethic Interrupt Controller.
501 * If it is already initialized by another entity (ie x2v shim), we need to
502 * retrieve the initialized message and event pages. Otherwise, we create and
503 * initialize the message and event pages.
505 void hv_synic_init(void *arg
)
508 union hv_synic_simp simp
;
509 union hv_synic_siefp siefp
;
510 union hv_synic_sint shared_sint
;
511 union hv_synic_scontrol sctrl
;
514 int cpu
= smp_processor_id();
516 if (!hv_context
.hypercall_page
)
519 /* Check the version */
520 rdmsrl(HV_X64_MSR_SVERSION
, version
);
522 /* Setup the Synic's message page */
523 rdmsrl(HV_X64_MSR_SIMP
, simp
.as_uint64
);
524 simp
.simp_enabled
= 1;
525 simp
.base_simp_gpa
= virt_to_phys(hv_context
.synic_message_page
[cpu
])
528 wrmsrl(HV_X64_MSR_SIMP
, simp
.as_uint64
);
530 /* Setup the Synic's event page */
531 rdmsrl(HV_X64_MSR_SIEFP
, siefp
.as_uint64
);
532 siefp
.siefp_enabled
= 1;
533 siefp
.base_siefp_gpa
= virt_to_phys(hv_context
.synic_event_page
[cpu
])
536 wrmsrl(HV_X64_MSR_SIEFP
, siefp
.as_uint64
);
538 /* Setup the shared SINT. */
539 rdmsrl(HV_X64_MSR_SINT0
+ VMBUS_MESSAGE_SINT
, shared_sint
.as_uint64
);
541 shared_sint
.as_uint64
= 0;
542 shared_sint
.vector
= HYPERVISOR_CALLBACK_VECTOR
;
543 shared_sint
.masked
= false;
544 shared_sint
.auto_eoi
= true;
546 wrmsrl(HV_X64_MSR_SINT0
+ VMBUS_MESSAGE_SINT
, shared_sint
.as_uint64
);
548 /* Enable the global synic bit */
549 rdmsrl(HV_X64_MSR_SCONTROL
, sctrl
.as_uint64
);
552 wrmsrl(HV_X64_MSR_SCONTROL
, sctrl
.as_uint64
);
554 hv_context
.synic_initialized
= true;
557 * Setup the mapping between Hyper-V's notion
558 * of cpuid and Linux' notion of cpuid.
559 * This array will be indexed using Linux cpuid.
561 rdmsrl(HV_X64_MSR_VP_INDEX
, vp_index
);
562 hv_context
.vp_index
[cpu
] = (u32
)vp_index
;
564 INIT_LIST_HEAD(&hv_context
.percpu_list
[cpu
]);
567 * Register the per-cpu clockevent source.
569 if (ms_hyperv
.features
& HV_X64_MSR_SYNTIMER_AVAILABLE
)
570 clockevents_config_and_register(hv_context
.clk_evt
[cpu
],
573 HV_MAX_MAX_DELTA_TICKS
);
578 * hv_synic_clockevents_cleanup - Cleanup clockevent devices
580 void hv_synic_clockevents_cleanup(void)
584 if (!(ms_hyperv
.features
& HV_X64_MSR_SYNTIMER_AVAILABLE
))
587 for_each_present_cpu(cpu
)
588 clockevents_unbind_device(hv_context
.clk_evt
[cpu
], cpu
);
592 * hv_synic_cleanup - Cleanup routine for hv_synic_init().
594 void hv_synic_cleanup(void *arg
)
596 union hv_synic_sint shared_sint
;
597 union hv_synic_simp simp
;
598 union hv_synic_siefp siefp
;
599 union hv_synic_scontrol sctrl
;
600 int cpu
= smp_processor_id();
602 if (!hv_context
.synic_initialized
)
605 /* Turn off clockevent device */
606 if (ms_hyperv
.features
& HV_X64_MSR_SYNTIMER_AVAILABLE
) {
607 clockevents_unbind_device(hv_context
.clk_evt
[cpu
], cpu
);
608 hv_ce_shutdown(hv_context
.clk_evt
[cpu
]);
611 rdmsrl(HV_X64_MSR_SINT0
+ VMBUS_MESSAGE_SINT
, shared_sint
.as_uint64
);
613 shared_sint
.masked
= 1;
615 /* Need to correctly cleanup in the case of SMP!!! */
616 /* Disable the interrupt */
617 wrmsrl(HV_X64_MSR_SINT0
+ VMBUS_MESSAGE_SINT
, shared_sint
.as_uint64
);
619 rdmsrl(HV_X64_MSR_SIMP
, simp
.as_uint64
);
620 simp
.simp_enabled
= 0;
621 simp
.base_simp_gpa
= 0;
623 wrmsrl(HV_X64_MSR_SIMP
, simp
.as_uint64
);
625 rdmsrl(HV_X64_MSR_SIEFP
, siefp
.as_uint64
);
626 siefp
.siefp_enabled
= 0;
627 siefp
.base_siefp_gpa
= 0;
629 wrmsrl(HV_X64_MSR_SIEFP
, siefp
.as_uint64
);
631 /* Disable the global synic bit */
632 rdmsrl(HV_X64_MSR_SCONTROL
, sctrl
.as_uint64
);
634 wrmsrl(HV_X64_MSR_SCONTROL
, sctrl
.as_uint64
);