]>
Commit | Line | Data |
---|---|---|
3e7ee490 | 1 | /* |
3e7ee490 HJ |
2 | * Copyright (c) 2009, Microsoft Corporation. |
3 | * | |
4 | * This program is free software; you can redistribute it and/or modify it | |
5 | * under the terms and conditions of the GNU General Public License, | |
6 | * version 2, as published by the Free Software Foundation. | |
7 | * | |
8 | * This program is distributed in the hope it will be useful, but WITHOUT | |
9 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | |
10 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for | |
11 | * more details. | |
12 | * | |
13 | * You should have received a copy of the GNU General Public License along with | |
14 | * this program; if not, write to the Free Software Foundation, Inc., 59 Temple | |
15 | * Place - Suite 330, Boston, MA 02111-1307 USA. | |
16 | * | |
17 | * Authors: | |
18 | * Haiyang Zhang <haiyangz@microsoft.com> | |
19 | * Hank Janssen <hjanssen@microsoft.com> | |
20 | * | |
21 | */ | |
0a46618d HJ |
22 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt |
23 | ||
a0086dc5 GKH |
24 | #include <linux/kernel.h> |
25 | #include <linux/mm.h> | |
5a0e3ad6 | 26 | #include <linux/slab.h> |
b7c947f0 | 27 | #include <linux/vmalloc.h> |
46a97191 | 28 | #include <linux/hyperv.h> |
83ba0c4f | 29 | #include <linux/version.h> |
db11f12a | 30 | #include <linux/interrupt.h> |
4061ed9e | 31 | #include <linux/clockchips.h> |
407dd164 | 32 | #include <asm/hyperv.h> |
4061ed9e | 33 | #include <asm/mshyperv.h> |
0f2a6619 | 34 | #include "hyperv_vmbus.h" |
3e7ee490 | 35 | |
454f18a9 | 36 | /* The one and only */ |
6a0aaa18 HZ |
37 | struct hv_context hv_context = { |
38 | .synic_initialized = false, | |
39 | .hypercall_page = NULL, | |
3e7ee490 HJ |
40 | }; |
41 | ||
4061ed9e S |
42 | #define HV_TIMER_FREQUENCY (10 * 1000 * 1000) /* 100ns period */ |
43 | #define HV_MAX_MAX_DELTA_TICKS 0xffffffff | |
44 | #define HV_MIN_DELTA_TICKS 1 | |
45 | ||
3e189519 | 46 | /* |
d44890c8 | 47 | * query_hypervisor_info - Get version info of the windows hypervisor |
0831ad04 | 48 | */ |
5fbebb2d S |
49 | unsigned int host_info_eax; |
50 | unsigned int host_info_ebx; | |
51 | unsigned int host_info_ecx; | |
52 | unsigned int host_info_edx; | |
53 | ||
d44890c8 | 54 | static int query_hypervisor_info(void) |
0831ad04 GKH |
55 | { |
56 | unsigned int eax; | |
57 | unsigned int ebx; | |
58 | unsigned int ecx; | |
59 | unsigned int edx; | |
b8dfb264 | 60 | unsigned int max_leaf; |
0831ad04 | 61 | unsigned int op; |
3e7ee490 | 62 | |
0831ad04 GKH |
63 | /* |
64 | * Its assumed that this is called after confirming that Viridian | |
65 | * is present. Query id and revision. | |
66 | */ | |
67 | eax = 0; | |
68 | ebx = 0; | |
69 | ecx = 0; | |
70 | edx = 0; | |
f6feebe0 | 71 | op = HVCPUID_VENDOR_MAXFUNCTION; |
0831ad04 | 72 | cpuid(op, &eax, &ebx, &ecx, &edx); |
3e7ee490 | 73 | |
b8dfb264 | 74 | max_leaf = eax; |
0831ad04 | 75 | |
b8dfb264 | 76 | if (max_leaf >= HVCPUID_VERSION) { |
0831ad04 GKH |
77 | eax = 0; |
78 | ebx = 0; | |
79 | ecx = 0; | |
80 | edx = 0; | |
f6feebe0 | 81 | op = HVCPUID_VERSION; |
0831ad04 | 82 | cpuid(op, &eax, &ebx, &ecx, &edx); |
5fbebb2d S |
83 | host_info_eax = eax; |
84 | host_info_ebx = ebx; | |
85 | host_info_ecx = ecx; | |
86 | host_info_edx = edx; | |
0831ad04 | 87 | } |
b8dfb264 | 88 | return max_leaf; |
0831ad04 | 89 | } |
3e7ee490 | 90 | |
3e189519 | 91 | /* |
a108393d | 92 | * hv_do_hypercall- Invoke the specified hypercall |
0831ad04 | 93 | */ |
a108393d | 94 | u64 hv_do_hypercall(u64 control, void *input, void *output) |
3e7ee490 | 95 | { |
b8dfb264 HZ |
96 | u64 input_address = (input) ? virt_to_phys(input) : 0; |
97 | u64 output_address = (output) ? virt_to_phys(output) : 0; | |
dec317fd | 98 | void *hypercall_page = hv_context.hypercall_page; |
d7646eaa VK |
99 | #ifdef CONFIG_X86_64 |
100 | u64 hv_status = 0; | |
101 | ||
102 | if (!hypercall_page) | |
103 | return (u64)ULLONG_MAX; | |
3e7ee490 | 104 | |
b8dfb264 HZ |
105 | __asm__ __volatile__("mov %0, %%r8" : : "r" (output_address) : "r8"); |
106 | __asm__ __volatile__("call *%3" : "=a" (hv_status) : | |
107 | "c" (control), "d" (input_address), | |
108 | "m" (hypercall_page)); | |
3e7ee490 | 109 | |
b8dfb264 | 110 | return hv_status; |
3e7ee490 HJ |
111 | |
112 | #else | |
113 | ||
b8dfb264 HZ |
114 | u32 control_hi = control >> 32; |
115 | u32 control_lo = control & 0xFFFFFFFF; | |
116 | u32 hv_status_hi = 1; | |
117 | u32 hv_status_lo = 1; | |
b8dfb264 HZ |
118 | u32 input_address_hi = input_address >> 32; |
119 | u32 input_address_lo = input_address & 0xFFFFFFFF; | |
b8dfb264 HZ |
120 | u32 output_address_hi = output_address >> 32; |
121 | u32 output_address_lo = output_address & 0xFFFFFFFF; | |
d7646eaa VK |
122 | |
123 | if (!hypercall_page) | |
124 | return (u64)ULLONG_MAX; | |
3e7ee490 | 125 | |
b8dfb264 HZ |
126 | __asm__ __volatile__ ("call *%8" : "=d"(hv_status_hi), |
127 | "=a"(hv_status_lo) : "d" (control_hi), | |
128 | "a" (control_lo), "b" (input_address_hi), | |
129 | "c" (input_address_lo), "D"(output_address_hi), | |
130 | "S"(output_address_lo), "m" (hypercall_page)); | |
3e7ee490 | 131 | |
b8dfb264 | 132 | return hv_status_lo | ((u64)hv_status_hi << 32); |
0831ad04 | 133 | #endif /* !x86_64 */ |
3e7ee490 | 134 | } |
a108393d | 135 | EXPORT_SYMBOL_GPL(hv_do_hypercall); |
3e7ee490 | 136 | |
ca9357bd | 137 | #ifdef CONFIG_X86_64 |
a5a1d1c2 | 138 | static u64 read_hv_clock_tsc(struct clocksource *arg) |
ca9357bd | 139 | { |
a5a1d1c2 | 140 | u64 current_tick; |
ca9357bd S |
141 | struct ms_hyperv_tsc_page *tsc_pg = hv_context.tsc_page; |
142 | ||
c35b82ef | 143 | if (tsc_pg->tsc_sequence != 0) { |
ca9357bd S |
144 | /* |
145 | * Use the tsc page to compute the value. | |
146 | */ | |
147 | ||
148 | while (1) { | |
a5a1d1c2 | 149 | u64 tmp; |
ca9357bd S |
150 | u32 sequence = tsc_pg->tsc_sequence; |
151 | u64 cur_tsc; | |
152 | u64 scale = tsc_pg->tsc_scale; | |
153 | s64 offset = tsc_pg->tsc_offset; | |
154 | ||
155 | rdtscll(cur_tsc); | |
156 | /* current_tick = ((cur_tsc *scale) >> 64) + offset */ | |
157 | asm("mulq %3" | |
158 | : "=d" (current_tick), "=a" (tmp) | |
159 | : "a" (cur_tsc), "r" (scale)); | |
160 | ||
161 | current_tick += offset; | |
162 | if (tsc_pg->tsc_sequence == sequence) | |
163 | return current_tick; | |
164 | ||
c35b82ef | 165 | if (tsc_pg->tsc_sequence != 0) |
ca9357bd S |
166 | continue; |
167 | /* | |
168 | * Fallback using MSR method. | |
169 | */ | |
170 | break; | |
171 | } | |
172 | } | |
173 | rdmsrl(HV_X64_MSR_TIME_REF_COUNT, current_tick); | |
174 | return current_tick; | |
175 | } | |
176 | ||
177 | static struct clocksource hyperv_cs_tsc = { | |
178 | .name = "hyperv_clocksource_tsc_page", | |
179 | .rating = 425, | |
180 | .read = read_hv_clock_tsc, | |
181 | .mask = CLOCKSOURCE_MASK(64), | |
182 | .flags = CLOCK_SOURCE_IS_CONTINUOUS, | |
183 | }; | |
184 | #endif | |
185 | ||
186 | ||
3e189519 | 187 | /* |
d44890c8 | 188 | * hv_init - Main initialization routine. |
0831ad04 GKH |
189 | * |
190 | * This routine must be called before any other routines in here are called | |
191 | */ | |
d44890c8 | 192 | int hv_init(void) |
3e7ee490 | 193 | { |
b8dfb264 HZ |
194 | int max_leaf; |
195 | union hv_x64_msr_hypercall_contents hypercall_msr; | |
3e7ee490 | 196 | |
14c1bf8a | 197 | memset(hv_context.synic_event_page, 0, sizeof(void *) * NR_CPUS); |
6a0aaa18 | 198 | memset(hv_context.synic_message_page, 0, |
14c1bf8a | 199 | sizeof(void *) * NR_CPUS); |
b29ef354 S |
200 | memset(hv_context.post_msg_page, 0, |
201 | sizeof(void *) * NR_CPUS); | |
917ea427 S |
202 | memset(hv_context.vp_index, 0, |
203 | sizeof(int) * NR_CPUS); | |
db11f12a S |
204 | memset(hv_context.event_dpc, 0, |
205 | sizeof(void *) * NR_CPUS); | |
d81274aa S |
206 | memset(hv_context.msg_dpc, 0, |
207 | sizeof(void *) * NR_CPUS); | |
4061ed9e S |
208 | memset(hv_context.clk_evt, 0, |
209 | sizeof(void *) * NR_CPUS); | |
3e7ee490 | 210 | |
d44890c8 | 211 | max_leaf = query_hypervisor_info(); |
3e7ee490 | 212 | |
a73e6b7c | 213 | |
454f18a9 | 214 | /* See if the hypercall page is already set */ |
b8dfb264 HZ |
215 | hypercall_msr.as_uint64 = 0; |
216 | rdmsrl(HV_X64_MSR_HYPERCALL, hypercall_msr.as_uint64); | |
a73e6b7c | 217 | |
98e08702 | 218 | if (!hypercall_msr.enable) |
b1d6b256 | 219 | return -ENOTSUPP; |
3e7ee490 | 220 | |
b1d6b256 | 221 | hv_context.hypercall_page = hv_hypercall_pg; |
a73e6b7c | 222 | |
ca9357bd S |
223 | #ifdef CONFIG_X86_64 |
224 | if (ms_hyperv.features & HV_X64_MSR_REFERENCE_TSC_AVAILABLE) { | |
9220e39b SM |
225 | union hv_x64_msr_hypercall_contents tsc_msr; |
226 | void *va_tsc; | |
227 | ||
ca9357bd S |
228 | va_tsc = __vmalloc(PAGE_SIZE, GFP_KERNEL, PAGE_KERNEL); |
229 | if (!va_tsc) | |
230 | goto cleanup; | |
231 | hv_context.tsc_page = va_tsc; | |
232 | ||
233 | rdmsrl(HV_X64_MSR_REFERENCE_TSC, tsc_msr.as_uint64); | |
234 | ||
235 | tsc_msr.enable = 1; | |
236 | tsc_msr.guest_physical_address = vmalloc_to_pfn(va_tsc); | |
237 | ||
238 | wrmsrl(HV_X64_MSR_REFERENCE_TSC, tsc_msr.as_uint64); | |
239 | clocksource_register_hz(&hyperv_cs_tsc, NSEC_PER_SEC/100); | |
240 | } | |
241 | #endif | |
5433e003 | 242 | return 0; |
3e7ee490 | 243 | |
44939d37 | 244 | cleanup: |
5433e003 | 245 | return -ENOTSUPP; |
3e7ee490 HJ |
246 | } |
247 | ||
3e189519 | 248 | /* |
d44890c8 | 249 | * hv_cleanup - Cleanup routine. |
0831ad04 GKH |
250 | * |
251 | * This routine is called normally during driver unloading or exiting. | |
252 | */ | |
a9f61ca7 | 253 | void hv_cleanup(bool crash) |
3e7ee490 | 254 | { |
ca9357bd S |
255 | |
256 | #ifdef CONFIG_X86_64 | |
b1d6b256 | 257 | union hv_x64_msr_hypercall_contents hypercall_msr; |
ca9357bd S |
258 | /* |
259 | * Cleanup the TSC page based CS. | |
260 | */ | |
261 | if (ms_hyperv.features & HV_X64_MSR_REFERENCE_TSC_AVAILABLE) { | |
3ccb4fd8 VK |
262 | /* |
263 | * Crash can happen in an interrupt context and unregistering | |
264 | * a clocksource is impossible and redundant in this case. | |
265 | */ | |
266 | if (!oops_in_progress) { | |
267 | clocksource_change_rating(&hyperv_cs_tsc, 10); | |
268 | clocksource_unregister(&hyperv_cs_tsc); | |
269 | } | |
ca9357bd S |
270 | |
271 | hypercall_msr.as_uint64 = 0; | |
272 | wrmsrl(HV_X64_MSR_REFERENCE_TSC, hypercall_msr.as_uint64); | |
95702acc | 273 | if (!crash) { |
a9f61ca7 | 274 | vfree(hv_context.tsc_page); |
95702acc VK |
275 | hv_context.tsc_page = NULL; |
276 | } | |
ca9357bd S |
277 | } |
278 | #endif | |
3e7ee490 HJ |
279 | } |
280 | ||
3e189519 | 281 | /* |
d44890c8 | 282 | * hv_post_message - Post a message using the hypervisor message IPC. |
0831ad04 GKH |
283 | * |
284 | * This involves a hypercall. | |
285 | */ | |
415f0a02 | 286 | int hv_post_message(union hv_connection_id connection_id, |
b8dfb264 HZ |
287 | enum hv_message_type message_type, |
288 | void *payload, size_t payload_size) | |
3e7ee490 | 289 | { |
3e7ee490 | 290 | |
b8dfb264 | 291 | struct hv_input_post_message *aligned_msg; |
a108393d | 292 | u64 status; |
3e7ee490 | 293 | |
b8dfb264 | 294 | if (payload_size > HV_MESSAGE_PAYLOAD_BYTE_COUNT) |
39594abc | 295 | return -EMSGSIZE; |
3e7ee490 | 296 | |
b8dfb264 | 297 | aligned_msg = (struct hv_input_post_message *) |
b29ef354 | 298 | hv_context.post_msg_page[get_cpu()]; |
3e7ee490 | 299 | |
b8dfb264 | 300 | aligned_msg->connectionid = connection_id; |
b29ef354 | 301 | aligned_msg->reserved = 0; |
b8dfb264 HZ |
302 | aligned_msg->message_type = message_type; |
303 | aligned_msg->payload_size = payload_size; | |
304 | memcpy((void *)aligned_msg->payload, payload, payload_size); | |
3e7ee490 | 305 | |
a108393d | 306 | status = hv_do_hypercall(HVCALL_POST_MESSAGE, aligned_msg, NULL); |
3e7ee490 | 307 | |
b29ef354 | 308 | put_cpu(); |
a108393d | 309 | return status & 0xFFFF; |
3e7ee490 HJ |
310 | } |
311 | ||
4061ed9e S |
312 | static int hv_ce_set_next_event(unsigned long delta, |
313 | struct clock_event_device *evt) | |
314 | { | |
a5a1d1c2 | 315 | u64 current_tick; |
4061ed9e | 316 | |
bc609cb4 | 317 | WARN_ON(!clockevent_state_oneshot(evt)); |
4061ed9e S |
318 | |
319 | rdmsrl(HV_X64_MSR_TIME_REF_COUNT, current_tick); | |
320 | current_tick += delta; | |
321 | wrmsrl(HV_X64_MSR_STIMER0_COUNT, current_tick); | |
322 | return 0; | |
323 | } | |
324 | ||
bc609cb4 VK |
325 | static int hv_ce_shutdown(struct clock_event_device *evt) |
326 | { | |
327 | wrmsrl(HV_X64_MSR_STIMER0_COUNT, 0); | |
328 | wrmsrl(HV_X64_MSR_STIMER0_CONFIG, 0); | |
329 | ||
330 | return 0; | |
331 | } | |
332 | ||
333 | static int hv_ce_set_oneshot(struct clock_event_device *evt) | |
4061ed9e S |
334 | { |
335 | union hv_timer_config timer_cfg; | |
336 | ||
bc609cb4 VK |
337 | timer_cfg.enable = 1; |
338 | timer_cfg.auto_enable = 1; | |
339 | timer_cfg.sintx = VMBUS_MESSAGE_SINT; | |
340 | wrmsrl(HV_X64_MSR_STIMER0_CONFIG, timer_cfg.as_uint64); | |
341 | ||
342 | return 0; | |
4061ed9e S |
343 | } |
344 | ||
345 | static void hv_init_clockevent_device(struct clock_event_device *dev, int cpu) | |
346 | { | |
347 | dev->name = "Hyper-V clockevent"; | |
348 | dev->features = CLOCK_EVT_FEAT_ONESHOT; | |
349 | dev->cpumask = cpumask_of(cpu); | |
350 | dev->rating = 1000; | |
e086748c VK |
351 | /* |
352 | * Avoid settint dev->owner = THIS_MODULE deliberately as doing so will | |
353 | * result in clockevents_config_and_register() taking additional | |
354 | * references to the hv_vmbus module making it impossible to unload. | |
355 | */ | |
4061ed9e | 356 | |
bc609cb4 VK |
357 | dev->set_state_shutdown = hv_ce_shutdown; |
358 | dev->set_state_oneshot = hv_ce_set_oneshot; | |
4061ed9e S |
359 | dev->set_next_event = hv_ce_set_next_event; |
360 | } | |
361 | ||
2608fb65 JW |
362 | |
363 | int hv_synic_alloc(void) | |
364 | { | |
365 | size_t size = sizeof(struct tasklet_struct); | |
4061ed9e | 366 | size_t ced_size = sizeof(struct clock_event_device); |
2608fb65 JW |
367 | int cpu; |
368 | ||
9f01ec53 S |
369 | hv_context.hv_numa_map = kzalloc(sizeof(struct cpumask) * nr_node_ids, |
370 | GFP_ATOMIC); | |
371 | if (hv_context.hv_numa_map == NULL) { | |
372 | pr_err("Unable to allocate NUMA map\n"); | |
373 | goto err; | |
374 | } | |
375 | ||
d74e2e80 | 376 | for_each_present_cpu(cpu) { |
2608fb65 JW |
377 | hv_context.event_dpc[cpu] = kmalloc(size, GFP_ATOMIC); |
378 | if (hv_context.event_dpc[cpu] == NULL) { | |
379 | pr_err("Unable to allocate event dpc\n"); | |
380 | goto err; | |
381 | } | |
382 | tasklet_init(hv_context.event_dpc[cpu], vmbus_on_event, cpu); | |
383 | ||
d81274aa S |
384 | hv_context.msg_dpc[cpu] = kmalloc(size, GFP_ATOMIC); |
385 | if (hv_context.msg_dpc[cpu] == NULL) { | |
386 | pr_err("Unable to allocate event dpc\n"); | |
387 | goto err; | |
388 | } | |
389 | tasklet_init(hv_context.msg_dpc[cpu], vmbus_on_msg_dpc, cpu); | |
390 | ||
4061ed9e S |
391 | hv_context.clk_evt[cpu] = kzalloc(ced_size, GFP_ATOMIC); |
392 | if (hv_context.clk_evt[cpu] == NULL) { | |
393 | pr_err("Unable to allocate clock event device\n"); | |
394 | goto err; | |
395 | } | |
9f01ec53 | 396 | |
4061ed9e S |
397 | hv_init_clockevent_device(hv_context.clk_evt[cpu], cpu); |
398 | ||
2608fb65 JW |
399 | hv_context.synic_message_page[cpu] = |
400 | (void *)get_zeroed_page(GFP_ATOMIC); | |
401 | ||
402 | if (hv_context.synic_message_page[cpu] == NULL) { | |
403 | pr_err("Unable to allocate SYNIC message page\n"); | |
404 | goto err; | |
405 | } | |
406 | ||
407 | hv_context.synic_event_page[cpu] = | |
408 | (void *)get_zeroed_page(GFP_ATOMIC); | |
409 | ||
410 | if (hv_context.synic_event_page[cpu] == NULL) { | |
411 | pr_err("Unable to allocate SYNIC event page\n"); | |
412 | goto err; | |
413 | } | |
b29ef354 S |
414 | |
415 | hv_context.post_msg_page[cpu] = | |
416 | (void *)get_zeroed_page(GFP_ATOMIC); | |
417 | ||
418 | if (hv_context.post_msg_page[cpu] == NULL) { | |
419 | pr_err("Unable to allocate post msg page\n"); | |
420 | goto err; | |
421 | } | |
9b1bf703 VK |
422 | |
423 | INIT_LIST_HEAD(&hv_context.percpu_list[cpu]); | |
2608fb65 JW |
424 | } |
425 | ||
426 | return 0; | |
427 | err: | |
428 | return -ENOMEM; | |
429 | } | |
430 | ||
8712954d | 431 | static void hv_synic_free_cpu(int cpu) |
2608fb65 JW |
432 | { |
433 | kfree(hv_context.event_dpc[cpu]); | |
d81274aa | 434 | kfree(hv_context.msg_dpc[cpu]); |
4061ed9e | 435 | kfree(hv_context.clk_evt[cpu]); |
fdf91dae | 436 | if (hv_context.synic_event_page[cpu]) |
2608fb65 JW |
437 | free_page((unsigned long)hv_context.synic_event_page[cpu]); |
438 | if (hv_context.synic_message_page[cpu]) | |
439 | free_page((unsigned long)hv_context.synic_message_page[cpu]); | |
b29ef354 S |
440 | if (hv_context.post_msg_page[cpu]) |
441 | free_page((unsigned long)hv_context.post_msg_page[cpu]); | |
2608fb65 JW |
442 | } |
443 | ||
444 | void hv_synic_free(void) | |
445 | { | |
446 | int cpu; | |
447 | ||
9f01ec53 | 448 | kfree(hv_context.hv_numa_map); |
d74e2e80 | 449 | for_each_present_cpu(cpu) |
2608fb65 JW |
450 | hv_synic_free_cpu(cpu); |
451 | } | |
452 | ||
3e189519 | 453 | /* |
d44890c8 | 454 | * hv_synic_init - Initialize the Synthethic Interrupt Controller. |
0831ad04 GKH |
455 | * |
456 | * If it is already initialized by another entity (ie x2v shim), we need to | |
457 | * retrieve the initialized message and event pages. Otherwise, we create and | |
458 | * initialize the message and event pages. | |
459 | */ | |
fdf5149c | 460 | int hv_synic_init(unsigned int cpu) |
3e7ee490 | 461 | { |
0831ad04 | 462 | u64 version; |
eacb1b4d GKH |
463 | union hv_synic_simp simp; |
464 | union hv_synic_siefp siefp; | |
b8dfb264 | 465 | union hv_synic_sint shared_sint; |
eacb1b4d | 466 | union hv_synic_scontrol sctrl; |
917ea427 | 467 | u64 vp_index; |
a73e6b7c | 468 | |
6a0aaa18 | 469 | if (!hv_context.hypercall_page) |
fdf5149c | 470 | return -EFAULT; |
3e7ee490 | 471 | |
454f18a9 | 472 | /* Check the version */ |
a51ed7d6 | 473 | rdmsrl(HV_X64_MSR_SVERSION, version); |
3e7ee490 | 474 | |
a73e6b7c | 475 | /* Setup the Synic's message page */ |
f6feebe0 HZ |
476 | rdmsrl(HV_X64_MSR_SIMP, simp.as_uint64); |
477 | simp.simp_enabled = 1; | |
6a0aaa18 | 478 | simp.base_simp_gpa = virt_to_phys(hv_context.synic_message_page[cpu]) |
a73e6b7c | 479 | >> PAGE_SHIFT; |
3e7ee490 | 480 | |
f6feebe0 | 481 | wrmsrl(HV_X64_MSR_SIMP, simp.as_uint64); |
3e7ee490 | 482 | |
a73e6b7c | 483 | /* Setup the Synic's event page */ |
f6feebe0 HZ |
484 | rdmsrl(HV_X64_MSR_SIEFP, siefp.as_uint64); |
485 | siefp.siefp_enabled = 1; | |
6a0aaa18 | 486 | siefp.base_siefp_gpa = virt_to_phys(hv_context.synic_event_page[cpu]) |
a73e6b7c HJ |
487 | >> PAGE_SHIFT; |
488 | ||
f6feebe0 | 489 | wrmsrl(HV_X64_MSR_SIEFP, siefp.as_uint64); |
0831ad04 | 490 | |
0831ad04 | 491 | /* Setup the shared SINT. */ |
b8dfb264 | 492 | rdmsrl(HV_X64_MSR_SINT0 + VMBUS_MESSAGE_SINT, shared_sint.as_uint64); |
3e7ee490 | 493 | |
b8dfb264 | 494 | shared_sint.as_uint64 = 0; |
302a3c0f | 495 | shared_sint.vector = HYPERVISOR_CALLBACK_VECTOR; |
b8dfb264 | 496 | shared_sint.masked = false; |
b0209501 | 497 | shared_sint.auto_eoi = true; |
3e7ee490 | 498 | |
b8dfb264 | 499 | wrmsrl(HV_X64_MSR_SINT0 + VMBUS_MESSAGE_SINT, shared_sint.as_uint64); |
3e7ee490 | 500 | |
454f18a9 | 501 | /* Enable the global synic bit */ |
f6feebe0 HZ |
502 | rdmsrl(HV_X64_MSR_SCONTROL, sctrl.as_uint64); |
503 | sctrl.enable = 1; | |
3e7ee490 | 504 | |
f6feebe0 | 505 | wrmsrl(HV_X64_MSR_SCONTROL, sctrl.as_uint64); |
3e7ee490 | 506 | |
6a0aaa18 | 507 | hv_context.synic_initialized = true; |
917ea427 S |
508 | |
509 | /* | |
510 | * Setup the mapping between Hyper-V's notion | |
511 | * of cpuid and Linux' notion of cpuid. | |
512 | * This array will be indexed using Linux cpuid. | |
513 | */ | |
514 | rdmsrl(HV_X64_MSR_VP_INDEX, vp_index); | |
515 | hv_context.vp_index[cpu] = (u32)vp_index; | |
3a28fa35 | 516 | |
4061ed9e S |
517 | /* |
518 | * Register the per-cpu clockevent source. | |
519 | */ | |
520 | if (ms_hyperv.features & HV_X64_MSR_SYNTIMER_AVAILABLE) | |
521 | clockevents_config_and_register(hv_context.clk_evt[cpu], | |
522 | HV_TIMER_FREQUENCY, | |
523 | HV_MIN_DELTA_TICKS, | |
524 | HV_MAX_MAX_DELTA_TICKS); | |
fdf5149c | 525 | return 0; |
3e7ee490 HJ |
526 | } |
527 | ||
e086748c VK |
528 | /* |
529 | * hv_synic_clockevents_cleanup - Cleanup clockevent devices | |
530 | */ | |
531 | void hv_synic_clockevents_cleanup(void) | |
532 | { | |
533 | int cpu; | |
534 | ||
535 | if (!(ms_hyperv.features & HV_X64_MSR_SYNTIMER_AVAILABLE)) | |
536 | return; | |
537 | ||
6ffc4b85 | 538 | for_each_present_cpu(cpu) |
e086748c VK |
539 | clockevents_unbind_device(hv_context.clk_evt[cpu], cpu); |
540 | } | |
541 | ||
3e189519 | 542 | /* |
d44890c8 | 543 | * hv_synic_cleanup - Cleanup routine for hv_synic_init(). |
0831ad04 | 544 | */ |
fdf5149c | 545 | int hv_synic_cleanup(unsigned int cpu) |
3e7ee490 | 546 | { |
b8dfb264 | 547 | union hv_synic_sint shared_sint; |
eacb1b4d GKH |
548 | union hv_synic_simp simp; |
549 | union hv_synic_siefp siefp; | |
e72e7ac5 | 550 | union hv_synic_scontrol sctrl; |
ccbf4254 VK |
551 | struct vmbus_channel *channel, *sc; |
552 | bool channel_found = false; | |
553 | unsigned long flags; | |
3e7ee490 | 554 | |
6a0aaa18 | 555 | if (!hv_context.synic_initialized) |
fdf5149c | 556 | return -EFAULT; |
3e7ee490 | 557 | |
ccbf4254 VK |
558 | /* |
559 | * Search for channels which are bound to the CPU we're about to | |
560 | * cleanup. In case we find one and vmbus is still connected we need to | |
561 | * fail, this will effectively prevent CPU offlining. There is no way | |
562 | * we can re-bind channels to different CPUs for now. | |
563 | */ | |
564 | mutex_lock(&vmbus_connection.channel_mutex); | |
565 | list_for_each_entry(channel, &vmbus_connection.chn_list, listentry) { | |
566 | if (channel->target_cpu == cpu) { | |
567 | channel_found = true; | |
568 | break; | |
569 | } | |
570 | spin_lock_irqsave(&channel->lock, flags); | |
571 | list_for_each_entry(sc, &channel->sc_list, sc_list) { | |
572 | if (sc->target_cpu == cpu) { | |
573 | channel_found = true; | |
574 | break; | |
575 | } | |
576 | } | |
577 | spin_unlock_irqrestore(&channel->lock, flags); | |
578 | if (channel_found) | |
579 | break; | |
580 | } | |
581 | mutex_unlock(&vmbus_connection.channel_mutex); | |
582 | ||
583 | if (channel_found && vmbus_connection.conn_state == CONNECTED) | |
584 | return -EBUSY; | |
585 | ||
e086748c | 586 | /* Turn off clockevent device */ |
6ffc4b85 VK |
587 | if (ms_hyperv.features & HV_X64_MSR_SYNTIMER_AVAILABLE) { |
588 | clockevents_unbind_device(hv_context.clk_evt[cpu], cpu); | |
bc609cb4 | 589 | hv_ce_shutdown(hv_context.clk_evt[cpu]); |
6ffc4b85 | 590 | } |
e086748c | 591 | |
b8dfb264 | 592 | rdmsrl(HV_X64_MSR_SINT0 + VMBUS_MESSAGE_SINT, shared_sint.as_uint64); |
3e7ee490 | 593 | |
b8dfb264 | 594 | shared_sint.masked = 1; |
3e7ee490 | 595 | |
7692fd4d | 596 | /* Need to correctly cleanup in the case of SMP!!! */ |
454f18a9 | 597 | /* Disable the interrupt */ |
b8dfb264 | 598 | wrmsrl(HV_X64_MSR_SINT0 + VMBUS_MESSAGE_SINT, shared_sint.as_uint64); |
3e7ee490 | 599 | |
f6feebe0 HZ |
600 | rdmsrl(HV_X64_MSR_SIMP, simp.as_uint64); |
601 | simp.simp_enabled = 0; | |
602 | simp.base_simp_gpa = 0; | |
3e7ee490 | 603 | |
f6feebe0 | 604 | wrmsrl(HV_X64_MSR_SIMP, simp.as_uint64); |
3e7ee490 | 605 | |
f6feebe0 HZ |
606 | rdmsrl(HV_X64_MSR_SIEFP, siefp.as_uint64); |
607 | siefp.siefp_enabled = 0; | |
608 | siefp.base_siefp_gpa = 0; | |
3e7ee490 | 609 | |
f6feebe0 | 610 | wrmsrl(HV_X64_MSR_SIEFP, siefp.as_uint64); |
3e7ee490 | 611 | |
e72e7ac5 VK |
612 | /* Disable the global synic bit */ |
613 | rdmsrl(HV_X64_MSR_SCONTROL, sctrl.as_uint64); | |
614 | sctrl.enable = 0; | |
615 | wrmsrl(HV_X64_MSR_SCONTROL, sctrl.as_uint64); | |
fdf5149c VK |
616 | |
617 | return 0; | |
3e7ee490 | 618 | } |