]>
Commit | Line | Data |
---|---|---|
e83d5887 AS |
1 | /* |
2 | * KVM Microsoft Hyper-V emulation | |
3 | * | |
4 | * derived from arch/x86/kvm/x86.c | |
5 | * | |
6 | * Copyright (C) 2006 Qumranet, Inc. | |
7 | * Copyright (C) 2008 Qumranet, Inc. | |
8 | * Copyright IBM Corporation, 2008 | |
9 | * Copyright 2010 Red Hat, Inc. and/or its affiliates. | |
10 | * Copyright (C) 2015 Andrey Smetanin <asmetanin@virtuozzo.com> | |
11 | * | |
12 | * Authors: | |
13 | * Avi Kivity <avi@qumranet.com> | |
14 | * Yaniv Kamay <yaniv@qumranet.com> | |
15 | * Amit Shah <amit.shah@qumranet.com> | |
16 | * Ben-Ami Yassour <benami@il.ibm.com> | |
17 | * Andrey Smetanin <asmetanin@virtuozzo.com> | |
18 | * | |
19 | * This work is licensed under the terms of the GNU GPL, version 2. See | |
20 | * the COPYING file in the top-level directory. | |
21 | * | |
22 | */ | |
23 | ||
24 | #include "x86.h" | |
25 | #include "lapic.h" | |
5c919412 | 26 | #include "ioapic.h" |
e83d5887 AS |
27 | #include "hyperv.h" |
28 | ||
29 | #include <linux/kvm_host.h> | |
765eaa0f | 30 | #include <linux/highmem.h> |
32ef5517 IM |
31 | #include <linux/sched/cputime.h> |
32 | ||
5c919412 | 33 | #include <asm/apicdef.h> |
e83d5887 AS |
34 | #include <trace/events/kvm.h> |
35 | ||
36 | #include "trace.h" | |
37 | ||
5c919412 AS |
38 | static inline u64 synic_read_sint(struct kvm_vcpu_hv_synic *synic, int sint) |
39 | { | |
40 | return atomic64_read(&synic->sint[sint]); | |
41 | } | |
42 | ||
43 | static inline int synic_get_sint_vector(u64 sint_value) | |
44 | { | |
45 | if (sint_value & HV_SYNIC_SINT_MASKED) | |
46 | return -1; | |
47 | return sint_value & HV_SYNIC_SINT_VECTOR_MASK; | |
48 | } | |
49 | ||
50 | static bool synic_has_vector_connected(struct kvm_vcpu_hv_synic *synic, | |
51 | int vector) | |
52 | { | |
53 | int i; | |
54 | ||
55 | for (i = 0; i < ARRAY_SIZE(synic->sint); i++) { | |
56 | if (synic_get_sint_vector(synic_read_sint(synic, i)) == vector) | |
57 | return true; | |
58 | } | |
59 | return false; | |
60 | } | |
61 | ||
62 | static bool synic_has_vector_auto_eoi(struct kvm_vcpu_hv_synic *synic, | |
63 | int vector) | |
64 | { | |
65 | int i; | |
66 | u64 sint_value; | |
67 | ||
68 | for (i = 0; i < ARRAY_SIZE(synic->sint); i++) { | |
69 | sint_value = synic_read_sint(synic, i); | |
70 | if (synic_get_sint_vector(sint_value) == vector && | |
71 | sint_value & HV_SYNIC_SINT_AUTO_EOI) | |
72 | return true; | |
73 | } | |
74 | return false; | |
75 | } | |
76 | ||
7be58a64 AS |
77 | static int synic_set_sint(struct kvm_vcpu_hv_synic *synic, int sint, |
78 | u64 data, bool host) | |
5c919412 AS |
79 | { |
80 | int vector; | |
81 | ||
82 | vector = data & HV_SYNIC_SINT_VECTOR_MASK; | |
7be58a64 | 83 | if (vector < 16 && !host) |
5c919412 AS |
84 | return 1; |
85 | /* | |
86 | * Guest may configure multiple SINTs to use the same vector, so | |
87 | * we maintain a bitmap of vectors handled by synic, and a | |
88 | * bitmap of vectors with auto-eoi behavior. The bitmaps are | |
89 | * updated here, and atomically queried on fast paths. | |
90 | */ | |
91 | ||
92 | atomic64_set(&synic->sint[sint], data); | |
93 | ||
94 | if (synic_has_vector_connected(synic, vector)) | |
95 | __set_bit(vector, synic->vec_bitmap); | |
96 | else | |
97 | __clear_bit(vector, synic->vec_bitmap); | |
98 | ||
99 | if (synic_has_vector_auto_eoi(synic, vector)) | |
100 | __set_bit(vector, synic->auto_eoi_bitmap); | |
101 | else | |
102 | __clear_bit(vector, synic->auto_eoi_bitmap); | |
103 | ||
104 | /* Load SynIC vectors into EOI exit bitmap */ | |
105 | kvm_make_request(KVM_REQ_SCAN_IOAPIC, synic_to_vcpu(synic)); | |
106 | return 0; | |
107 | } | |
108 | ||
109 | static struct kvm_vcpu_hv_synic *synic_get(struct kvm *kvm, u32 vcpu_id) | |
110 | { | |
111 | struct kvm_vcpu *vcpu; | |
112 | struct kvm_vcpu_hv_synic *synic; | |
113 | ||
114 | if (vcpu_id >= atomic_read(&kvm->online_vcpus)) | |
115 | return NULL; | |
116 | vcpu = kvm_get_vcpu(kvm, vcpu_id); | |
117 | if (!vcpu) | |
118 | return NULL; | |
119 | synic = vcpu_to_synic(vcpu); | |
120 | return (synic->active) ? synic : NULL; | |
121 | } | |
122 | ||
765eaa0f AS |
123 | static void synic_clear_sint_msg_pending(struct kvm_vcpu_hv_synic *synic, |
124 | u32 sint) | |
125 | { | |
126 | struct kvm_vcpu *vcpu = synic_to_vcpu(synic); | |
127 | struct page *page; | |
128 | gpa_t gpa; | |
129 | struct hv_message *msg; | |
130 | struct hv_message_page *msg_page; | |
131 | ||
132 | gpa = synic->msg_page & PAGE_MASK; | |
133 | page = kvm_vcpu_gfn_to_page(vcpu, gpa >> PAGE_SHIFT); | |
134 | if (is_error_page(page)) { | |
135 | vcpu_err(vcpu, "Hyper-V SynIC can't get msg page, gpa 0x%llx\n", | |
136 | gpa); | |
137 | return; | |
138 | } | |
139 | msg_page = kmap_atomic(page); | |
140 | ||
141 | msg = &msg_page->sint_message[sint]; | |
142 | msg->header.message_flags.msg_pending = 0; | |
143 | ||
144 | kunmap_atomic(msg_page); | |
145 | kvm_release_page_dirty(page); | |
146 | kvm_vcpu_mark_page_dirty(vcpu, gpa >> PAGE_SHIFT); | |
147 | } | |
148 | ||
5c919412 AS |
149 | static void kvm_hv_notify_acked_sint(struct kvm_vcpu *vcpu, u32 sint) |
150 | { | |
151 | struct kvm *kvm = vcpu->kvm; | |
765eaa0f | 152 | struct kvm_vcpu_hv_synic *synic = vcpu_to_synic(vcpu); |
1f4b34f8 AS |
153 | struct kvm_vcpu_hv *hv_vcpu = vcpu_to_hv_vcpu(vcpu); |
154 | struct kvm_vcpu_hv_stimer *stimer; | |
155 | int gsi, idx, stimers_pending; | |
5c919412 | 156 | |
18659a9c | 157 | trace_kvm_hv_notify_acked_sint(vcpu->vcpu_id, sint); |
5c919412 | 158 | |
765eaa0f AS |
159 | if (synic->msg_page & HV_SYNIC_SIMP_ENABLE) |
160 | synic_clear_sint_msg_pending(synic, sint); | |
161 | ||
1f4b34f8 AS |
162 | /* Try to deliver pending Hyper-V SynIC timers messages */ |
163 | stimers_pending = 0; | |
164 | for (idx = 0; idx < ARRAY_SIZE(hv_vcpu->stimer); idx++) { | |
165 | stimer = &hv_vcpu->stimer[idx]; | |
166 | if (stimer->msg_pending && | |
167 | (stimer->config & HV_STIMER_ENABLE) && | |
168 | HV_STIMER_SINT(stimer->config) == sint) { | |
169 | set_bit(stimer->index, | |
170 | hv_vcpu->stimer_pending_bitmap); | |
171 | stimers_pending++; | |
172 | } | |
173 | } | |
174 | if (stimers_pending) | |
175 | kvm_make_request(KVM_REQ_HV_STIMER, vcpu); | |
176 | ||
5c919412 | 177 | idx = srcu_read_lock(&kvm->irq_srcu); |
1f4b34f8 | 178 | gsi = atomic_read(&synic->sint_to_gsi[sint]); |
5c919412 AS |
179 | if (gsi != -1) |
180 | kvm_notify_acked_gsi(kvm, gsi); | |
181 | srcu_read_unlock(&kvm->irq_srcu, idx); | |
182 | } | |
183 | ||
db397571 AS |
184 | static void synic_exit(struct kvm_vcpu_hv_synic *synic, u32 msr) |
185 | { | |
186 | struct kvm_vcpu *vcpu = synic_to_vcpu(synic); | |
187 | struct kvm_vcpu_hv *hv_vcpu = &vcpu->arch.hyperv; | |
188 | ||
189 | hv_vcpu->exit.type = KVM_EXIT_HYPERV_SYNIC; | |
190 | hv_vcpu->exit.u.synic.msr = msr; | |
191 | hv_vcpu->exit.u.synic.control = synic->control; | |
192 | hv_vcpu->exit.u.synic.evt_page = synic->evt_page; | |
193 | hv_vcpu->exit.u.synic.msg_page = synic->msg_page; | |
194 | ||
195 | kvm_make_request(KVM_REQ_HV_EXIT, vcpu); | |
196 | } | |
197 | ||
5c919412 AS |
198 | static int synic_set_msr(struct kvm_vcpu_hv_synic *synic, |
199 | u32 msr, u64 data, bool host) | |
200 | { | |
201 | struct kvm_vcpu *vcpu = synic_to_vcpu(synic); | |
202 | int ret; | |
203 | ||
204 | if (!synic->active) | |
205 | return 1; | |
206 | ||
18659a9c AS |
207 | trace_kvm_hv_synic_set_msr(vcpu->vcpu_id, msr, data, host); |
208 | ||
5c919412 AS |
209 | ret = 0; |
210 | switch (msr) { | |
211 | case HV_X64_MSR_SCONTROL: | |
212 | synic->control = data; | |
db397571 AS |
213 | if (!host) |
214 | synic_exit(synic, msr); | |
5c919412 AS |
215 | break; |
216 | case HV_X64_MSR_SVERSION: | |
217 | if (!host) { | |
218 | ret = 1; | |
219 | break; | |
220 | } | |
221 | synic->version = data; | |
222 | break; | |
223 | case HV_X64_MSR_SIEFP: | |
224 | if (data & HV_SYNIC_SIEFP_ENABLE) | |
225 | if (kvm_clear_guest(vcpu->kvm, | |
226 | data & PAGE_MASK, PAGE_SIZE)) { | |
227 | ret = 1; | |
228 | break; | |
229 | } | |
230 | synic->evt_page = data; | |
db397571 AS |
231 | if (!host) |
232 | synic_exit(synic, msr); | |
5c919412 AS |
233 | break; |
234 | case HV_X64_MSR_SIMP: | |
235 | if (data & HV_SYNIC_SIMP_ENABLE) | |
236 | if (kvm_clear_guest(vcpu->kvm, | |
237 | data & PAGE_MASK, PAGE_SIZE)) { | |
238 | ret = 1; | |
239 | break; | |
240 | } | |
241 | synic->msg_page = data; | |
db397571 AS |
242 | if (!host) |
243 | synic_exit(synic, msr); | |
5c919412 AS |
244 | break; |
245 | case HV_X64_MSR_EOM: { | |
246 | int i; | |
247 | ||
248 | for (i = 0; i < ARRAY_SIZE(synic->sint); i++) | |
249 | kvm_hv_notify_acked_sint(vcpu, i); | |
250 | break; | |
251 | } | |
252 | case HV_X64_MSR_SINT0 ... HV_X64_MSR_SINT15: | |
7be58a64 | 253 | ret = synic_set_sint(synic, msr - HV_X64_MSR_SINT0, data, host); |
5c919412 AS |
254 | break; |
255 | default: | |
256 | ret = 1; | |
257 | break; | |
258 | } | |
259 | return ret; | |
260 | } | |
261 | ||
262 | static int synic_get_msr(struct kvm_vcpu_hv_synic *synic, u32 msr, u64 *pdata) | |
263 | { | |
264 | int ret; | |
265 | ||
266 | if (!synic->active) | |
267 | return 1; | |
268 | ||
269 | ret = 0; | |
270 | switch (msr) { | |
271 | case HV_X64_MSR_SCONTROL: | |
272 | *pdata = synic->control; | |
273 | break; | |
274 | case HV_X64_MSR_SVERSION: | |
275 | *pdata = synic->version; | |
276 | break; | |
277 | case HV_X64_MSR_SIEFP: | |
278 | *pdata = synic->evt_page; | |
279 | break; | |
280 | case HV_X64_MSR_SIMP: | |
281 | *pdata = synic->msg_page; | |
282 | break; | |
283 | case HV_X64_MSR_EOM: | |
284 | *pdata = 0; | |
285 | break; | |
286 | case HV_X64_MSR_SINT0 ... HV_X64_MSR_SINT15: | |
287 | *pdata = atomic64_read(&synic->sint[msr - HV_X64_MSR_SINT0]); | |
288 | break; | |
289 | default: | |
290 | ret = 1; | |
291 | break; | |
292 | } | |
293 | return ret; | |
294 | } | |
295 | ||
ecd8a8c2 | 296 | static int synic_set_irq(struct kvm_vcpu_hv_synic *synic, u32 sint) |
5c919412 AS |
297 | { |
298 | struct kvm_vcpu *vcpu = synic_to_vcpu(synic); | |
299 | struct kvm_lapic_irq irq; | |
300 | int ret, vector; | |
301 | ||
302 | if (sint >= ARRAY_SIZE(synic->sint)) | |
303 | return -EINVAL; | |
304 | ||
305 | vector = synic_get_sint_vector(synic_read_sint(synic, sint)); | |
306 | if (vector < 0) | |
307 | return -ENOENT; | |
308 | ||
309 | memset(&irq, 0, sizeof(irq)); | |
f98a3efb | 310 | irq.shorthand = APIC_DEST_SELF; |
5c919412 AS |
311 | irq.dest_mode = APIC_DEST_PHYSICAL; |
312 | irq.delivery_mode = APIC_DM_FIXED; | |
313 | irq.vector = vector; | |
314 | irq.level = 1; | |
315 | ||
f98a3efb | 316 | ret = kvm_irq_delivery_to_apic(vcpu->kvm, vcpu->arch.apic, &irq, NULL); |
18659a9c | 317 | trace_kvm_hv_synic_set_irq(vcpu->vcpu_id, sint, irq.vector, ret); |
5c919412 AS |
318 | return ret; |
319 | } | |
320 | ||
321 | int kvm_hv_synic_set_irq(struct kvm *kvm, u32 vcpu_id, u32 sint) | |
322 | { | |
323 | struct kvm_vcpu_hv_synic *synic; | |
324 | ||
325 | synic = synic_get(kvm, vcpu_id); | |
326 | if (!synic) | |
327 | return -EINVAL; | |
328 | ||
329 | return synic_set_irq(synic, sint); | |
330 | } | |
331 | ||
332 | void kvm_hv_synic_send_eoi(struct kvm_vcpu *vcpu, int vector) | |
333 | { | |
334 | struct kvm_vcpu_hv_synic *synic = vcpu_to_synic(vcpu); | |
335 | int i; | |
336 | ||
18659a9c | 337 | trace_kvm_hv_synic_send_eoi(vcpu->vcpu_id, vector); |
5c919412 AS |
338 | |
339 | for (i = 0; i < ARRAY_SIZE(synic->sint); i++) | |
340 | if (synic_get_sint_vector(synic_read_sint(synic, i)) == vector) | |
341 | kvm_hv_notify_acked_sint(vcpu, i); | |
342 | } | |
343 | ||
344 | static int kvm_hv_set_sint_gsi(struct kvm *kvm, u32 vcpu_id, u32 sint, int gsi) | |
345 | { | |
346 | struct kvm_vcpu_hv_synic *synic; | |
347 | ||
348 | synic = synic_get(kvm, vcpu_id); | |
349 | if (!synic) | |
350 | return -EINVAL; | |
351 | ||
352 | if (sint >= ARRAY_SIZE(synic->sint_to_gsi)) | |
353 | return -EINVAL; | |
354 | ||
355 | atomic_set(&synic->sint_to_gsi[sint], gsi); | |
356 | return 0; | |
357 | } | |
358 | ||
359 | void kvm_hv_irq_routing_update(struct kvm *kvm) | |
360 | { | |
361 | struct kvm_irq_routing_table *irq_rt; | |
362 | struct kvm_kernel_irq_routing_entry *e; | |
363 | u32 gsi; | |
364 | ||
365 | irq_rt = srcu_dereference_check(kvm->irq_routing, &kvm->irq_srcu, | |
366 | lockdep_is_held(&kvm->irq_lock)); | |
367 | ||
368 | for (gsi = 0; gsi < irq_rt->nr_rt_entries; gsi++) { | |
369 | hlist_for_each_entry(e, &irq_rt->map[gsi], link) { | |
370 | if (e->type == KVM_IRQ_ROUTING_HV_SINT) | |
371 | kvm_hv_set_sint_gsi(kvm, e->hv_sint.vcpu, | |
372 | e->hv_sint.sint, gsi); | |
373 | } | |
374 | } | |
375 | } | |
376 | ||
377 | static void synic_init(struct kvm_vcpu_hv_synic *synic) | |
378 | { | |
379 | int i; | |
380 | ||
381 | memset(synic, 0, sizeof(*synic)); | |
382 | synic->version = HV_SYNIC_VERSION_1; | |
383 | for (i = 0; i < ARRAY_SIZE(synic->sint); i++) { | |
384 | atomic64_set(&synic->sint[i], HV_SYNIC_SINT_MASKED); | |
385 | atomic_set(&synic->sint_to_gsi[i], -1); | |
386 | } | |
387 | } | |
388 | ||
93bf4172 AS |
389 | static u64 get_time_ref_counter(struct kvm *kvm) |
390 | { | |
095cf55d PB |
391 | struct kvm_hv *hv = &kvm->arch.hyperv; |
392 | struct kvm_vcpu *vcpu; | |
393 | u64 tsc; | |
394 | ||
395 | /* | |
396 | * The guest has not set up the TSC page or the clock isn't | |
397 | * stable, fall back to get_kvmclock_ns. | |
398 | */ | |
399 | if (!hv->tsc_ref.tsc_sequence) | |
400 | return div_u64(get_kvmclock_ns(kvm), 100); | |
401 | ||
402 | vcpu = kvm_get_vcpu(kvm, 0); | |
403 | tsc = kvm_read_l1_tsc(vcpu, rdtsc()); | |
404 | return mul_u64_u64_shr(tsc, hv->tsc_ref.tsc_scale, 64) | |
405 | + hv->tsc_ref.tsc_offset; | |
93bf4172 AS |
406 | } |
407 | ||
f3b138c5 | 408 | static void stimer_mark_pending(struct kvm_vcpu_hv_stimer *stimer, |
1f4b34f8 AS |
409 | bool vcpu_kick) |
410 | { | |
411 | struct kvm_vcpu *vcpu = stimer_to_vcpu(stimer); | |
412 | ||
413 | set_bit(stimer->index, | |
414 | vcpu_to_hv_vcpu(vcpu)->stimer_pending_bitmap); | |
415 | kvm_make_request(KVM_REQ_HV_STIMER, vcpu); | |
416 | if (vcpu_kick) | |
417 | kvm_vcpu_kick(vcpu); | |
418 | } | |
419 | ||
1f4b34f8 AS |
420 | static void stimer_cleanup(struct kvm_vcpu_hv_stimer *stimer) |
421 | { | |
422 | struct kvm_vcpu *vcpu = stimer_to_vcpu(stimer); | |
423 | ||
ac3e5fca AS |
424 | trace_kvm_hv_stimer_cleanup(stimer_to_vcpu(stimer)->vcpu_id, |
425 | stimer->index); | |
426 | ||
019b9781 | 427 | hrtimer_cancel(&stimer->timer); |
1f4b34f8 AS |
428 | clear_bit(stimer->index, |
429 | vcpu_to_hv_vcpu(vcpu)->stimer_pending_bitmap); | |
430 | stimer->msg_pending = false; | |
f808495d | 431 | stimer->exp_time = 0; |
1f4b34f8 AS |
432 | } |
433 | ||
434 | static enum hrtimer_restart stimer_timer_callback(struct hrtimer *timer) | |
435 | { | |
436 | struct kvm_vcpu_hv_stimer *stimer; | |
437 | ||
438 | stimer = container_of(timer, struct kvm_vcpu_hv_stimer, timer); | |
ac3e5fca AS |
439 | trace_kvm_hv_stimer_callback(stimer_to_vcpu(stimer)->vcpu_id, |
440 | stimer->index); | |
f3b138c5 | 441 | stimer_mark_pending(stimer, true); |
1f4b34f8 AS |
442 | |
443 | return HRTIMER_NORESTART; | |
444 | } | |
445 | ||
f808495d AS |
446 | /* |
447 | * stimer_start() assumptions: | |
448 | * a) stimer->count is not equal to 0 | |
449 | * b) stimer->config has HV_STIMER_ENABLE flag | |
450 | */ | |
1f4b34f8 AS |
451 | static int stimer_start(struct kvm_vcpu_hv_stimer *stimer) |
452 | { | |
453 | u64 time_now; | |
454 | ktime_t ktime_now; | |
455 | ||
456 | time_now = get_time_ref_counter(stimer_to_vcpu(stimer)->kvm); | |
457 | ktime_now = ktime_get(); | |
458 | ||
459 | if (stimer->config & HV_STIMER_PERIODIC) { | |
f808495d AS |
460 | if (stimer->exp_time) { |
461 | if (time_now >= stimer->exp_time) { | |
462 | u64 remainder; | |
463 | ||
464 | div64_u64_rem(time_now - stimer->exp_time, | |
465 | stimer->count, &remainder); | |
466 | stimer->exp_time = | |
467 | time_now + (stimer->count - remainder); | |
468 | } | |
469 | } else | |
470 | stimer->exp_time = time_now + stimer->count; | |
1f4b34f8 | 471 | |
ac3e5fca AS |
472 | trace_kvm_hv_stimer_start_periodic( |
473 | stimer_to_vcpu(stimer)->vcpu_id, | |
474 | stimer->index, | |
475 | time_now, stimer->exp_time); | |
476 | ||
1f4b34f8 | 477 | hrtimer_start(&stimer->timer, |
f808495d AS |
478 | ktime_add_ns(ktime_now, |
479 | 100 * (stimer->exp_time - time_now)), | |
1f4b34f8 AS |
480 | HRTIMER_MODE_ABS); |
481 | return 0; | |
482 | } | |
483 | stimer->exp_time = stimer->count; | |
484 | if (time_now >= stimer->count) { | |
485 | /* | |
486 | * Expire timer according to Hypervisor Top-Level Functional | |
487 | * specification v4(15.3.1): | |
488 | * "If a one shot is enabled and the specified count is in | |
489 | * the past, it will expire immediately." | |
490 | */ | |
f3b138c5 | 491 | stimer_mark_pending(stimer, false); |
1f4b34f8 AS |
492 | return 0; |
493 | } | |
494 | ||
ac3e5fca AS |
495 | trace_kvm_hv_stimer_start_one_shot(stimer_to_vcpu(stimer)->vcpu_id, |
496 | stimer->index, | |
497 | time_now, stimer->count); | |
498 | ||
1f4b34f8 AS |
499 | hrtimer_start(&stimer->timer, |
500 | ktime_add_ns(ktime_now, 100 * (stimer->count - time_now)), | |
501 | HRTIMER_MODE_ABS); | |
502 | return 0; | |
503 | } | |
504 | ||
505 | static int stimer_set_config(struct kvm_vcpu_hv_stimer *stimer, u64 config, | |
506 | bool host) | |
507 | { | |
ac3e5fca AS |
508 | trace_kvm_hv_stimer_set_config(stimer_to_vcpu(stimer)->vcpu_id, |
509 | stimer->index, config, host); | |
510 | ||
f3b138c5 | 511 | stimer_cleanup(stimer); |
23a3b201 | 512 | if ((stimer->config & HV_STIMER_ENABLE) && HV_STIMER_SINT(config) == 0) |
1f4b34f8 AS |
513 | config &= ~HV_STIMER_ENABLE; |
514 | stimer->config = config; | |
f3b138c5 | 515 | stimer_mark_pending(stimer, false); |
1f4b34f8 AS |
516 | return 0; |
517 | } | |
518 | ||
519 | static int stimer_set_count(struct kvm_vcpu_hv_stimer *stimer, u64 count, | |
520 | bool host) | |
521 | { | |
ac3e5fca AS |
522 | trace_kvm_hv_stimer_set_count(stimer_to_vcpu(stimer)->vcpu_id, |
523 | stimer->index, count, host); | |
524 | ||
1f4b34f8 | 525 | stimer_cleanup(stimer); |
f3b138c5 | 526 | stimer->count = count; |
1f4b34f8 AS |
527 | if (stimer->count == 0) |
528 | stimer->config &= ~HV_STIMER_ENABLE; | |
f3b138c5 | 529 | else if (stimer->config & HV_STIMER_AUTOENABLE) |
1f4b34f8 | 530 | stimer->config |= HV_STIMER_ENABLE; |
f3b138c5 | 531 | stimer_mark_pending(stimer, false); |
1f4b34f8 AS |
532 | return 0; |
533 | } | |
534 | ||
535 | static int stimer_get_config(struct kvm_vcpu_hv_stimer *stimer, u64 *pconfig) | |
536 | { | |
537 | *pconfig = stimer->config; | |
538 | return 0; | |
539 | } | |
540 | ||
541 | static int stimer_get_count(struct kvm_vcpu_hv_stimer *stimer, u64 *pcount) | |
542 | { | |
543 | *pcount = stimer->count; | |
544 | return 0; | |
545 | } | |
546 | ||
547 | static int synic_deliver_msg(struct kvm_vcpu_hv_synic *synic, u32 sint, | |
548 | struct hv_message *src_msg) | |
549 | { | |
550 | struct kvm_vcpu *vcpu = synic_to_vcpu(synic); | |
551 | struct page *page; | |
552 | gpa_t gpa; | |
553 | struct hv_message *dst_msg; | |
554 | int r; | |
555 | struct hv_message_page *msg_page; | |
556 | ||
557 | if (!(synic->msg_page & HV_SYNIC_SIMP_ENABLE)) | |
558 | return -ENOENT; | |
559 | ||
560 | gpa = synic->msg_page & PAGE_MASK; | |
561 | page = kvm_vcpu_gfn_to_page(vcpu, gpa >> PAGE_SHIFT); | |
562 | if (is_error_page(page)) | |
563 | return -EFAULT; | |
564 | ||
565 | msg_page = kmap_atomic(page); | |
566 | dst_msg = &msg_page->sint_message[sint]; | |
567 | if (sync_cmpxchg(&dst_msg->header.message_type, HVMSG_NONE, | |
568 | src_msg->header.message_type) != HVMSG_NONE) { | |
569 | dst_msg->header.message_flags.msg_pending = 1; | |
570 | r = -EAGAIN; | |
571 | } else { | |
572 | memcpy(&dst_msg->u.payload, &src_msg->u.payload, | |
573 | src_msg->header.payload_size); | |
574 | dst_msg->header.message_type = src_msg->header.message_type; | |
575 | dst_msg->header.payload_size = src_msg->header.payload_size; | |
576 | r = synic_set_irq(synic, sint); | |
577 | if (r >= 1) | |
578 | r = 0; | |
579 | else if (r == 0) | |
580 | r = -EFAULT; | |
581 | } | |
582 | kunmap_atomic(msg_page); | |
583 | kvm_release_page_dirty(page); | |
584 | kvm_vcpu_mark_page_dirty(vcpu, gpa >> PAGE_SHIFT); | |
585 | return r; | |
586 | } | |
587 | ||
0cdeabb1 | 588 | static int stimer_send_msg(struct kvm_vcpu_hv_stimer *stimer) |
1f4b34f8 AS |
589 | { |
590 | struct kvm_vcpu *vcpu = stimer_to_vcpu(stimer); | |
591 | struct hv_message *msg = &stimer->msg; | |
592 | struct hv_timer_message_payload *payload = | |
593 | (struct hv_timer_message_payload *)&msg->u.payload; | |
1f4b34f8 | 594 | |
1f4b34f8 AS |
595 | payload->expiration_time = stimer->exp_time; |
596 | payload->delivery_time = get_time_ref_counter(vcpu->kvm); | |
0cdeabb1 AS |
597 | return synic_deliver_msg(vcpu_to_synic(vcpu), |
598 | HV_STIMER_SINT(stimer->config), msg); | |
1f4b34f8 AS |
599 | } |
600 | ||
601 | static void stimer_expiration(struct kvm_vcpu_hv_stimer *stimer) | |
602 | { | |
ac3e5fca AS |
603 | int r; |
604 | ||
0cdeabb1 | 605 | stimer->msg_pending = true; |
ac3e5fca AS |
606 | r = stimer_send_msg(stimer); |
607 | trace_kvm_hv_stimer_expiration(stimer_to_vcpu(stimer)->vcpu_id, | |
608 | stimer->index, r); | |
609 | if (!r) { | |
0cdeabb1 AS |
610 | stimer->msg_pending = false; |
611 | if (!(stimer->config & HV_STIMER_PERIODIC)) | |
612 | stimer->config &= ~HV_STIMER_ENABLE; | |
613 | } | |
1f4b34f8 AS |
614 | } |
615 | ||
616 | void kvm_hv_process_stimers(struct kvm_vcpu *vcpu) | |
617 | { | |
618 | struct kvm_vcpu_hv *hv_vcpu = vcpu_to_hv_vcpu(vcpu); | |
619 | struct kvm_vcpu_hv_stimer *stimer; | |
f3b138c5 | 620 | u64 time_now, exp_time; |
1f4b34f8 AS |
621 | int i; |
622 | ||
623 | for (i = 0; i < ARRAY_SIZE(hv_vcpu->stimer); i++) | |
624 | if (test_and_clear_bit(i, hv_vcpu->stimer_pending_bitmap)) { | |
625 | stimer = &hv_vcpu->stimer[i]; | |
1f4b34f8 | 626 | if (stimer->config & HV_STIMER_ENABLE) { |
f3b138c5 AS |
627 | exp_time = stimer->exp_time; |
628 | ||
629 | if (exp_time) { | |
630 | time_now = | |
631 | get_time_ref_counter(vcpu->kvm); | |
632 | if (time_now >= exp_time) | |
633 | stimer_expiration(stimer); | |
634 | } | |
0cdeabb1 | 635 | |
f3b138c5 AS |
636 | if ((stimer->config & HV_STIMER_ENABLE) && |
637 | stimer->count) | |
0cdeabb1 AS |
638 | stimer_start(stimer); |
639 | else | |
640 | stimer_cleanup(stimer); | |
1f4b34f8 AS |
641 | } |
642 | } | |
643 | } | |
644 | ||
645 | void kvm_hv_vcpu_uninit(struct kvm_vcpu *vcpu) | |
646 | { | |
647 | struct kvm_vcpu_hv *hv_vcpu = vcpu_to_hv_vcpu(vcpu); | |
648 | int i; | |
649 | ||
650 | for (i = 0; i < ARRAY_SIZE(hv_vcpu->stimer); i++) | |
651 | stimer_cleanup(&hv_vcpu->stimer[i]); | |
652 | } | |
653 | ||
654 | static void stimer_prepare_msg(struct kvm_vcpu_hv_stimer *stimer) | |
655 | { | |
656 | struct hv_message *msg = &stimer->msg; | |
657 | struct hv_timer_message_payload *payload = | |
658 | (struct hv_timer_message_payload *)&msg->u.payload; | |
659 | ||
660 | memset(&msg->header, 0, sizeof(msg->header)); | |
661 | msg->header.message_type = HVMSG_TIMER_EXPIRED; | |
662 | msg->header.payload_size = sizeof(*payload); | |
663 | ||
664 | payload->timer_index = stimer->index; | |
665 | payload->expiration_time = 0; | |
666 | payload->delivery_time = 0; | |
667 | } | |
668 | ||
669 | static void stimer_init(struct kvm_vcpu_hv_stimer *stimer, int timer_index) | |
670 | { | |
671 | memset(stimer, 0, sizeof(*stimer)); | |
672 | stimer->index = timer_index; | |
673 | hrtimer_init(&stimer->timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS); | |
674 | stimer->timer.function = stimer_timer_callback; | |
675 | stimer_prepare_msg(stimer); | |
676 | } | |
677 | ||
5c919412 AS |
678 | void kvm_hv_vcpu_init(struct kvm_vcpu *vcpu) |
679 | { | |
1f4b34f8 AS |
680 | struct kvm_vcpu_hv *hv_vcpu = vcpu_to_hv_vcpu(vcpu); |
681 | int i; | |
682 | ||
683 | synic_init(&hv_vcpu->synic); | |
684 | ||
685 | bitmap_zero(hv_vcpu->stimer_pending_bitmap, HV_SYNIC_STIMER_COUNT); | |
686 | for (i = 0; i < ARRAY_SIZE(hv_vcpu->stimer); i++) | |
687 | stimer_init(&hv_vcpu->stimer[i], i); | |
5c919412 AS |
688 | } |
689 | ||
690 | int kvm_hv_activate_synic(struct kvm_vcpu *vcpu) | |
691 | { | |
692 | /* | |
693 | * Hyper-V SynIC auto EOI SINT's are | |
694 | * not compatible with APICV, so deactivate APICV | |
695 | */ | |
696 | kvm_vcpu_deactivate_apicv(vcpu); | |
697 | vcpu_to_synic(vcpu)->active = true; | |
698 | return 0; | |
699 | } | |
700 | ||
e83d5887 AS |
701 | static bool kvm_hv_msr_partition_wide(u32 msr) |
702 | { | |
703 | bool r = false; | |
704 | ||
705 | switch (msr) { | |
706 | case HV_X64_MSR_GUEST_OS_ID: | |
707 | case HV_X64_MSR_HYPERCALL: | |
708 | case HV_X64_MSR_REFERENCE_TSC: | |
709 | case HV_X64_MSR_TIME_REF_COUNT: | |
e7d9513b AS |
710 | case HV_X64_MSR_CRASH_CTL: |
711 | case HV_X64_MSR_CRASH_P0 ... HV_X64_MSR_CRASH_P4: | |
e516cebb | 712 | case HV_X64_MSR_RESET: |
e83d5887 AS |
713 | r = true; |
714 | break; | |
715 | } | |
716 | ||
717 | return r; | |
718 | } | |
719 | ||
e7d9513b AS |
720 | static int kvm_hv_msr_get_crash_data(struct kvm_vcpu *vcpu, |
721 | u32 index, u64 *pdata) | |
722 | { | |
723 | struct kvm_hv *hv = &vcpu->kvm->arch.hyperv; | |
724 | ||
725 | if (WARN_ON_ONCE(index >= ARRAY_SIZE(hv->hv_crash_param))) | |
726 | return -EINVAL; | |
727 | ||
728 | *pdata = hv->hv_crash_param[index]; | |
729 | return 0; | |
730 | } | |
731 | ||
732 | static int kvm_hv_msr_get_crash_ctl(struct kvm_vcpu *vcpu, u64 *pdata) | |
733 | { | |
734 | struct kvm_hv *hv = &vcpu->kvm->arch.hyperv; | |
735 | ||
736 | *pdata = hv->hv_crash_ctl; | |
737 | return 0; | |
738 | } | |
739 | ||
740 | static int kvm_hv_msr_set_crash_ctl(struct kvm_vcpu *vcpu, u64 data, bool host) | |
741 | { | |
742 | struct kvm_hv *hv = &vcpu->kvm->arch.hyperv; | |
743 | ||
744 | if (host) | |
745 | hv->hv_crash_ctl = data & HV_X64_MSR_CRASH_CTL_NOTIFY; | |
746 | ||
747 | if (!host && (data & HV_X64_MSR_CRASH_CTL_NOTIFY)) { | |
748 | ||
749 | vcpu_debug(vcpu, "hv crash (0x%llx 0x%llx 0x%llx 0x%llx 0x%llx)\n", | |
750 | hv->hv_crash_param[0], | |
751 | hv->hv_crash_param[1], | |
752 | hv->hv_crash_param[2], | |
753 | hv->hv_crash_param[3], | |
754 | hv->hv_crash_param[4]); | |
755 | ||
756 | /* Send notification about crash to user space */ | |
757 | kvm_make_request(KVM_REQ_HV_CRASH, vcpu); | |
758 | } | |
759 | ||
760 | return 0; | |
761 | } | |
762 | ||
763 | static int kvm_hv_msr_set_crash_data(struct kvm_vcpu *vcpu, | |
764 | u32 index, u64 data) | |
765 | { | |
766 | struct kvm_hv *hv = &vcpu->kvm->arch.hyperv; | |
767 | ||
768 | if (WARN_ON_ONCE(index >= ARRAY_SIZE(hv->hv_crash_param))) | |
769 | return -EINVAL; | |
770 | ||
771 | hv->hv_crash_param[index] = data; | |
772 | return 0; | |
773 | } | |
774 | ||
095cf55d PB |
775 | /* |
776 | * The kvmclock and Hyper-V TSC page use similar formulas, and converting | |
777 | * between them is possible: | |
778 | * | |
779 | * kvmclock formula: | |
780 | * nsec = (ticks - tsc_timestamp) * tsc_to_system_mul * 2^(tsc_shift-32) | |
781 | * + system_time | |
782 | * | |
783 | * Hyper-V formula: | |
784 | * nsec/100 = ticks * scale / 2^64 + offset | |
785 | * | |
786 | * When tsc_timestamp = system_time = 0, offset is zero in the Hyper-V formula. | |
787 | * By dividing the kvmclock formula by 100 and equating what's left we get: | |
788 | * ticks * scale / 2^64 = ticks * tsc_to_system_mul * 2^(tsc_shift-32) / 100 | |
789 | * scale / 2^64 = tsc_to_system_mul * 2^(tsc_shift-32) / 100 | |
790 | * scale = tsc_to_system_mul * 2^(32+tsc_shift) / 100 | |
791 | * | |
792 | * Now expand the kvmclock formula and divide by 100: | |
793 | * nsec = ticks * tsc_to_system_mul * 2^(tsc_shift-32) | |
794 | * - tsc_timestamp * tsc_to_system_mul * 2^(tsc_shift-32) | |
795 | * + system_time | |
796 | * nsec/100 = ticks * tsc_to_system_mul * 2^(tsc_shift-32) / 100 | |
797 | * - tsc_timestamp * tsc_to_system_mul * 2^(tsc_shift-32) / 100 | |
798 | * + system_time / 100 | |
799 | * | |
800 | * Replace tsc_to_system_mul * 2^(tsc_shift-32) / 100 by scale / 2^64: | |
801 | * nsec/100 = ticks * scale / 2^64 | |
802 | * - tsc_timestamp * scale / 2^64 | |
803 | * + system_time / 100 | |
804 | * | |
805 | * Equate with the Hyper-V formula so that ticks * scale / 2^64 cancels out: | |
806 | * offset = system_time / 100 - tsc_timestamp * scale / 2^64 | |
807 | * | |
808 | * These two equivalencies are implemented in this function. | |
809 | */ | |
810 | static bool compute_tsc_page_parameters(struct pvclock_vcpu_time_info *hv_clock, | |
811 | HV_REFERENCE_TSC_PAGE *tsc_ref) | |
812 | { | |
813 | u64 max_mul; | |
814 | ||
815 | if (!(hv_clock->flags & PVCLOCK_TSC_STABLE_BIT)) | |
816 | return false; | |
817 | ||
818 | /* | |
819 | * check if scale would overflow, if so we use the time ref counter | |
820 | * tsc_to_system_mul * 2^(tsc_shift+32) / 100 >= 2^64 | |
821 | * tsc_to_system_mul / 100 >= 2^(32-tsc_shift) | |
822 | * tsc_to_system_mul >= 100 * 2^(32-tsc_shift) | |
823 | */ | |
824 | max_mul = 100ull << (32 - hv_clock->tsc_shift); | |
825 | if (hv_clock->tsc_to_system_mul >= max_mul) | |
826 | return false; | |
827 | ||
828 | /* | |
829 | * Otherwise compute the scale and offset according to the formulas | |
830 | * derived above. | |
831 | */ | |
832 | tsc_ref->tsc_scale = | |
833 | mul_u64_u32_div(1ULL << (32 + hv_clock->tsc_shift), | |
834 | hv_clock->tsc_to_system_mul, | |
835 | 100); | |
836 | ||
837 | tsc_ref->tsc_offset = hv_clock->system_time; | |
838 | do_div(tsc_ref->tsc_offset, 100); | |
839 | tsc_ref->tsc_offset -= | |
840 | mul_u64_u64_shr(hv_clock->tsc_timestamp, tsc_ref->tsc_scale, 64); | |
841 | return true; | |
842 | } | |
843 | ||
844 | void kvm_hv_setup_tsc_page(struct kvm *kvm, | |
845 | struct pvclock_vcpu_time_info *hv_clock) | |
846 | { | |
847 | struct kvm_hv *hv = &kvm->arch.hyperv; | |
848 | u32 tsc_seq; | |
849 | u64 gfn; | |
850 | ||
851 | BUILD_BUG_ON(sizeof(tsc_seq) != sizeof(hv->tsc_ref.tsc_sequence)); | |
852 | BUILD_BUG_ON(offsetof(HV_REFERENCE_TSC_PAGE, tsc_sequence) != 0); | |
853 | ||
854 | if (!(hv->hv_tsc_page & HV_X64_MSR_TSC_REFERENCE_ENABLE)) | |
855 | return; | |
856 | ||
3f5ad8be PB |
857 | mutex_lock(&kvm->arch.hyperv.hv_lock); |
858 | if (!(hv->hv_tsc_page & HV_X64_MSR_TSC_REFERENCE_ENABLE)) | |
859 | goto out_unlock; | |
860 | ||
095cf55d PB |
861 | gfn = hv->hv_tsc_page >> HV_X64_MSR_TSC_REFERENCE_ADDRESS_SHIFT; |
862 | /* | |
863 | * Because the TSC parameters only vary when there is a | |
864 | * change in the master clock, do not bother with caching. | |
865 | */ | |
866 | if (unlikely(kvm_read_guest(kvm, gfn_to_gpa(gfn), | |
867 | &tsc_seq, sizeof(tsc_seq)))) | |
3f5ad8be | 868 | goto out_unlock; |
095cf55d PB |
869 | |
870 | /* | |
871 | * While we're computing and writing the parameters, force the | |
872 | * guest to use the time reference count MSR. | |
873 | */ | |
874 | hv->tsc_ref.tsc_sequence = 0; | |
875 | if (kvm_write_guest(kvm, gfn_to_gpa(gfn), | |
876 | &hv->tsc_ref, sizeof(hv->tsc_ref.tsc_sequence))) | |
3f5ad8be | 877 | goto out_unlock; |
095cf55d PB |
878 | |
879 | if (!compute_tsc_page_parameters(hv_clock, &hv->tsc_ref)) | |
3f5ad8be | 880 | goto out_unlock; |
095cf55d PB |
881 | |
882 | /* Ensure sequence is zero before writing the rest of the struct. */ | |
883 | smp_wmb(); | |
884 | if (kvm_write_guest(kvm, gfn_to_gpa(gfn), &hv->tsc_ref, sizeof(hv->tsc_ref))) | |
3f5ad8be | 885 | goto out_unlock; |
095cf55d PB |
886 | |
887 | /* | |
888 | * Now switch to the TSC page mechanism by writing the sequence. | |
889 | */ | |
890 | tsc_seq++; | |
891 | if (tsc_seq == 0xFFFFFFFF || tsc_seq == 0) | |
892 | tsc_seq = 1; | |
893 | ||
894 | /* Write the struct entirely before the non-zero sequence. */ | |
895 | smp_wmb(); | |
896 | ||
897 | hv->tsc_ref.tsc_sequence = tsc_seq; | |
898 | kvm_write_guest(kvm, gfn_to_gpa(gfn), | |
899 | &hv->tsc_ref, sizeof(hv->tsc_ref.tsc_sequence)); | |
3f5ad8be PB |
900 | out_unlock: |
901 | mutex_unlock(&kvm->arch.hyperv.hv_lock); | |
095cf55d PB |
902 | } |
903 | ||
e7d9513b AS |
904 | static int kvm_hv_set_msr_pw(struct kvm_vcpu *vcpu, u32 msr, u64 data, |
905 | bool host) | |
e83d5887 AS |
906 | { |
907 | struct kvm *kvm = vcpu->kvm; | |
908 | struct kvm_hv *hv = &kvm->arch.hyperv; | |
909 | ||
910 | switch (msr) { | |
911 | case HV_X64_MSR_GUEST_OS_ID: | |
912 | hv->hv_guest_os_id = data; | |
913 | /* setting guest os id to zero disables hypercall page */ | |
914 | if (!hv->hv_guest_os_id) | |
915 | hv->hv_hypercall &= ~HV_X64_MSR_HYPERCALL_ENABLE; | |
916 | break; | |
917 | case HV_X64_MSR_HYPERCALL: { | |
918 | u64 gfn; | |
919 | unsigned long addr; | |
920 | u8 instructions[4]; | |
921 | ||
922 | /* if guest os id is not set hypercall should remain disabled */ | |
923 | if (!hv->hv_guest_os_id) | |
924 | break; | |
925 | if (!(data & HV_X64_MSR_HYPERCALL_ENABLE)) { | |
926 | hv->hv_hypercall = data; | |
927 | break; | |
928 | } | |
929 | gfn = data >> HV_X64_MSR_HYPERCALL_PAGE_ADDRESS_SHIFT; | |
930 | addr = gfn_to_hva(kvm, gfn); | |
931 | if (kvm_is_error_hva(addr)) | |
932 | return 1; | |
933 | kvm_x86_ops->patch_hypercall(vcpu, instructions); | |
934 | ((unsigned char *)instructions)[3] = 0xc3; /* ret */ | |
935 | if (__copy_to_user((void __user *)addr, instructions, 4)) | |
936 | return 1; | |
937 | hv->hv_hypercall = data; | |
938 | mark_page_dirty(kvm, gfn); | |
939 | break; | |
940 | } | |
095cf55d | 941 | case HV_X64_MSR_REFERENCE_TSC: |
e83d5887 | 942 | hv->hv_tsc_page = data; |
095cf55d PB |
943 | if (hv->hv_tsc_page & HV_X64_MSR_TSC_REFERENCE_ENABLE) |
944 | kvm_make_request(KVM_REQ_MASTERCLOCK_UPDATE, vcpu); | |
e83d5887 | 945 | break; |
e7d9513b AS |
946 | case HV_X64_MSR_CRASH_P0 ... HV_X64_MSR_CRASH_P4: |
947 | return kvm_hv_msr_set_crash_data(vcpu, | |
948 | msr - HV_X64_MSR_CRASH_P0, | |
949 | data); | |
950 | case HV_X64_MSR_CRASH_CTL: | |
951 | return kvm_hv_msr_set_crash_ctl(vcpu, data, host); | |
e516cebb AS |
952 | case HV_X64_MSR_RESET: |
953 | if (data == 1) { | |
954 | vcpu_debug(vcpu, "hyper-v reset requested\n"); | |
955 | kvm_make_request(KVM_REQ_HV_RESET, vcpu); | |
956 | } | |
957 | break; | |
e83d5887 AS |
958 | default: |
959 | vcpu_unimpl(vcpu, "Hyper-V uhandled wrmsr: 0x%x data 0x%llx\n", | |
960 | msr, data); | |
961 | return 1; | |
962 | } | |
963 | return 0; | |
964 | } | |
965 | ||
9eec50b8 AS |
966 | /* Calculate cpu time spent by current task in 100ns units */ |
967 | static u64 current_task_runtime_100ns(void) | |
968 | { | |
5613fda9 | 969 | u64 utime, stime; |
9eec50b8 AS |
970 | |
971 | task_cputime_adjusted(current, &utime, &stime); | |
5613fda9 FW |
972 | |
973 | return div_u64(utime + stime, 100); | |
9eec50b8 AS |
974 | } |
975 | ||
976 | static int kvm_hv_set_msr(struct kvm_vcpu *vcpu, u32 msr, u64 data, bool host) | |
e83d5887 AS |
977 | { |
978 | struct kvm_vcpu_hv *hv = &vcpu->arch.hyperv; | |
979 | ||
980 | switch (msr) { | |
981 | case HV_X64_MSR_APIC_ASSIST_PAGE: { | |
982 | u64 gfn; | |
983 | unsigned long addr; | |
984 | ||
985 | if (!(data & HV_X64_MSR_APIC_ASSIST_PAGE_ENABLE)) { | |
986 | hv->hv_vapic = data; | |
987 | if (kvm_lapic_enable_pv_eoi(vcpu, 0)) | |
988 | return 1; | |
989 | break; | |
990 | } | |
991 | gfn = data >> HV_X64_MSR_APIC_ASSIST_PAGE_ADDRESS_SHIFT; | |
992 | addr = kvm_vcpu_gfn_to_hva(vcpu, gfn); | |
993 | if (kvm_is_error_hva(addr)) | |
994 | return 1; | |
995 | if (__clear_user((void __user *)addr, PAGE_SIZE)) | |
996 | return 1; | |
997 | hv->hv_vapic = data; | |
998 | kvm_vcpu_mark_page_dirty(vcpu, gfn); | |
999 | if (kvm_lapic_enable_pv_eoi(vcpu, | |
1000 | gfn_to_gpa(gfn) | KVM_MSR_ENABLED)) | |
1001 | return 1; | |
1002 | break; | |
1003 | } | |
1004 | case HV_X64_MSR_EOI: | |
1005 | return kvm_hv_vapic_msr_write(vcpu, APIC_EOI, data); | |
1006 | case HV_X64_MSR_ICR: | |
1007 | return kvm_hv_vapic_msr_write(vcpu, APIC_ICR, data); | |
1008 | case HV_X64_MSR_TPR: | |
1009 | return kvm_hv_vapic_msr_write(vcpu, APIC_TASKPRI, data); | |
9eec50b8 AS |
1010 | case HV_X64_MSR_VP_RUNTIME: |
1011 | if (!host) | |
1012 | return 1; | |
1013 | hv->runtime_offset = data - current_task_runtime_100ns(); | |
1014 | break; | |
5c919412 AS |
1015 | case HV_X64_MSR_SCONTROL: |
1016 | case HV_X64_MSR_SVERSION: | |
1017 | case HV_X64_MSR_SIEFP: | |
1018 | case HV_X64_MSR_SIMP: | |
1019 | case HV_X64_MSR_EOM: | |
1020 | case HV_X64_MSR_SINT0 ... HV_X64_MSR_SINT15: | |
1021 | return synic_set_msr(vcpu_to_synic(vcpu), msr, data, host); | |
1f4b34f8 AS |
1022 | case HV_X64_MSR_STIMER0_CONFIG: |
1023 | case HV_X64_MSR_STIMER1_CONFIG: | |
1024 | case HV_X64_MSR_STIMER2_CONFIG: | |
1025 | case HV_X64_MSR_STIMER3_CONFIG: { | |
1026 | int timer_index = (msr - HV_X64_MSR_STIMER0_CONFIG)/2; | |
1027 | ||
1028 | return stimer_set_config(vcpu_to_stimer(vcpu, timer_index), | |
1029 | data, host); | |
1030 | } | |
1031 | case HV_X64_MSR_STIMER0_COUNT: | |
1032 | case HV_X64_MSR_STIMER1_COUNT: | |
1033 | case HV_X64_MSR_STIMER2_COUNT: | |
1034 | case HV_X64_MSR_STIMER3_COUNT: { | |
1035 | int timer_index = (msr - HV_X64_MSR_STIMER0_COUNT)/2; | |
1036 | ||
1037 | return stimer_set_count(vcpu_to_stimer(vcpu, timer_index), | |
1038 | data, host); | |
1039 | } | |
e83d5887 AS |
1040 | default: |
1041 | vcpu_unimpl(vcpu, "Hyper-V uhandled wrmsr: 0x%x data 0x%llx\n", | |
1042 | msr, data); | |
1043 | return 1; | |
1044 | } | |
1045 | ||
1046 | return 0; | |
1047 | } | |
1048 | ||
1049 | static int kvm_hv_get_msr_pw(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata) | |
1050 | { | |
1051 | u64 data = 0; | |
1052 | struct kvm *kvm = vcpu->kvm; | |
1053 | struct kvm_hv *hv = &kvm->arch.hyperv; | |
1054 | ||
1055 | switch (msr) { | |
1056 | case HV_X64_MSR_GUEST_OS_ID: | |
1057 | data = hv->hv_guest_os_id; | |
1058 | break; | |
1059 | case HV_X64_MSR_HYPERCALL: | |
1060 | data = hv->hv_hypercall; | |
1061 | break; | |
93bf4172 AS |
1062 | case HV_X64_MSR_TIME_REF_COUNT: |
1063 | data = get_time_ref_counter(kvm); | |
e83d5887 | 1064 | break; |
e83d5887 AS |
1065 | case HV_X64_MSR_REFERENCE_TSC: |
1066 | data = hv->hv_tsc_page; | |
1067 | break; | |
e7d9513b AS |
1068 | case HV_X64_MSR_CRASH_P0 ... HV_X64_MSR_CRASH_P4: |
1069 | return kvm_hv_msr_get_crash_data(vcpu, | |
1070 | msr - HV_X64_MSR_CRASH_P0, | |
1071 | pdata); | |
1072 | case HV_X64_MSR_CRASH_CTL: | |
1073 | return kvm_hv_msr_get_crash_ctl(vcpu, pdata); | |
e516cebb AS |
1074 | case HV_X64_MSR_RESET: |
1075 | data = 0; | |
1076 | break; | |
e83d5887 AS |
1077 | default: |
1078 | vcpu_unimpl(vcpu, "Hyper-V unhandled rdmsr: 0x%x\n", msr); | |
1079 | return 1; | |
1080 | } | |
1081 | ||
1082 | *pdata = data; | |
1083 | return 0; | |
1084 | } | |
1085 | ||
1086 | static int kvm_hv_get_msr(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata) | |
1087 | { | |
1088 | u64 data = 0; | |
1089 | struct kvm_vcpu_hv *hv = &vcpu->arch.hyperv; | |
1090 | ||
1091 | switch (msr) { | |
1092 | case HV_X64_MSR_VP_INDEX: { | |
1093 | int r; | |
1094 | struct kvm_vcpu *v; | |
1095 | ||
1096 | kvm_for_each_vcpu(r, v, vcpu->kvm) { | |
1097 | if (v == vcpu) { | |
1098 | data = r; | |
1099 | break; | |
1100 | } | |
1101 | } | |
1102 | break; | |
1103 | } | |
1104 | case HV_X64_MSR_EOI: | |
1105 | return kvm_hv_vapic_msr_read(vcpu, APIC_EOI, pdata); | |
1106 | case HV_X64_MSR_ICR: | |
1107 | return kvm_hv_vapic_msr_read(vcpu, APIC_ICR, pdata); | |
1108 | case HV_X64_MSR_TPR: | |
1109 | return kvm_hv_vapic_msr_read(vcpu, APIC_TASKPRI, pdata); | |
1110 | case HV_X64_MSR_APIC_ASSIST_PAGE: | |
1111 | data = hv->hv_vapic; | |
1112 | break; | |
9eec50b8 AS |
1113 | case HV_X64_MSR_VP_RUNTIME: |
1114 | data = current_task_runtime_100ns() + hv->runtime_offset; | |
1115 | break; | |
5c919412 AS |
1116 | case HV_X64_MSR_SCONTROL: |
1117 | case HV_X64_MSR_SVERSION: | |
1118 | case HV_X64_MSR_SIEFP: | |
1119 | case HV_X64_MSR_SIMP: | |
1120 | case HV_X64_MSR_EOM: | |
1121 | case HV_X64_MSR_SINT0 ... HV_X64_MSR_SINT15: | |
1122 | return synic_get_msr(vcpu_to_synic(vcpu), msr, pdata); | |
1f4b34f8 AS |
1123 | case HV_X64_MSR_STIMER0_CONFIG: |
1124 | case HV_X64_MSR_STIMER1_CONFIG: | |
1125 | case HV_X64_MSR_STIMER2_CONFIG: | |
1126 | case HV_X64_MSR_STIMER3_CONFIG: { | |
1127 | int timer_index = (msr - HV_X64_MSR_STIMER0_CONFIG)/2; | |
1128 | ||
1129 | return stimer_get_config(vcpu_to_stimer(vcpu, timer_index), | |
1130 | pdata); | |
1131 | } | |
1132 | case HV_X64_MSR_STIMER0_COUNT: | |
1133 | case HV_X64_MSR_STIMER1_COUNT: | |
1134 | case HV_X64_MSR_STIMER2_COUNT: | |
1135 | case HV_X64_MSR_STIMER3_COUNT: { | |
1136 | int timer_index = (msr - HV_X64_MSR_STIMER0_COUNT)/2; | |
1137 | ||
1138 | return stimer_get_count(vcpu_to_stimer(vcpu, timer_index), | |
1139 | pdata); | |
1140 | } | |
e83d5887 AS |
1141 | default: |
1142 | vcpu_unimpl(vcpu, "Hyper-V unhandled rdmsr: 0x%x\n", msr); | |
1143 | return 1; | |
1144 | } | |
1145 | *pdata = data; | |
1146 | return 0; | |
1147 | } | |
1148 | ||
e7d9513b | 1149 | int kvm_hv_set_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 data, bool host) |
e83d5887 AS |
1150 | { |
1151 | if (kvm_hv_msr_partition_wide(msr)) { | |
1152 | int r; | |
1153 | ||
3f5ad8be | 1154 | mutex_lock(&vcpu->kvm->arch.hyperv.hv_lock); |
e7d9513b | 1155 | r = kvm_hv_set_msr_pw(vcpu, msr, data, host); |
3f5ad8be | 1156 | mutex_unlock(&vcpu->kvm->arch.hyperv.hv_lock); |
e83d5887 AS |
1157 | return r; |
1158 | } else | |
9eec50b8 | 1159 | return kvm_hv_set_msr(vcpu, msr, data, host); |
e83d5887 AS |
1160 | } |
1161 | ||
1162 | int kvm_hv_get_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata) | |
1163 | { | |
1164 | if (kvm_hv_msr_partition_wide(msr)) { | |
1165 | int r; | |
1166 | ||
3f5ad8be | 1167 | mutex_lock(&vcpu->kvm->arch.hyperv.hv_lock); |
e83d5887 | 1168 | r = kvm_hv_get_msr_pw(vcpu, msr, pdata); |
3f5ad8be | 1169 | mutex_unlock(&vcpu->kvm->arch.hyperv.hv_lock); |
e83d5887 AS |
1170 | return r; |
1171 | } else | |
1172 | return kvm_hv_get_msr(vcpu, msr, pdata); | |
1173 | } | |
1174 | ||
1175 | bool kvm_hv_hypercall_enabled(struct kvm *kvm) | |
1176 | { | |
3f5ad8be | 1177 | return READ_ONCE(kvm->arch.hyperv.hv_hypercall) & HV_X64_MSR_HYPERCALL_ENABLE; |
e83d5887 AS |
1178 | } |
1179 | ||
83326e43 AS |
1180 | static void kvm_hv_hypercall_set_result(struct kvm_vcpu *vcpu, u64 result) |
1181 | { | |
1182 | bool longmode; | |
1183 | ||
1184 | longmode = is_64_bit_mode(vcpu); | |
1185 | if (longmode) | |
1186 | kvm_register_write(vcpu, VCPU_REGS_RAX, result); | |
1187 | else { | |
1188 | kvm_register_write(vcpu, VCPU_REGS_RDX, result >> 32); | |
1189 | kvm_register_write(vcpu, VCPU_REGS_RAX, result & 0xffffffff); | |
1190 | } | |
1191 | } | |
1192 | ||
1193 | static int kvm_hv_hypercall_complete_userspace(struct kvm_vcpu *vcpu) | |
1194 | { | |
1195 | struct kvm_run *run = vcpu->run; | |
1196 | ||
1197 | kvm_hv_hypercall_set_result(vcpu, run->hyperv.u.hcall.result); | |
1198 | return 1; | |
1199 | } | |
1200 | ||
e83d5887 AS |
1201 | int kvm_hv_hypercall(struct kvm_vcpu *vcpu) |
1202 | { | |
1203 | u64 param, ingpa, outgpa, ret; | |
1204 | uint16_t code, rep_idx, rep_cnt, res = HV_STATUS_SUCCESS, rep_done = 0; | |
1205 | bool fast, longmode; | |
1206 | ||
1207 | /* | |
1208 | * hypercall generates UD from non zero cpl and real mode | |
1209 | * per HYPER-V spec | |
1210 | */ | |
1211 | if (kvm_x86_ops->get_cpl(vcpu) != 0 || !is_protmode(vcpu)) { | |
1212 | kvm_queue_exception(vcpu, UD_VECTOR); | |
0d9c055e | 1213 | return 1; |
e83d5887 AS |
1214 | } |
1215 | ||
1216 | longmode = is_64_bit_mode(vcpu); | |
1217 | ||
1218 | if (!longmode) { | |
1219 | param = ((u64)kvm_register_read(vcpu, VCPU_REGS_RDX) << 32) | | |
1220 | (kvm_register_read(vcpu, VCPU_REGS_RAX) & 0xffffffff); | |
1221 | ingpa = ((u64)kvm_register_read(vcpu, VCPU_REGS_RBX) << 32) | | |
1222 | (kvm_register_read(vcpu, VCPU_REGS_RCX) & 0xffffffff); | |
1223 | outgpa = ((u64)kvm_register_read(vcpu, VCPU_REGS_RDI) << 32) | | |
1224 | (kvm_register_read(vcpu, VCPU_REGS_RSI) & 0xffffffff); | |
1225 | } | |
1226 | #ifdef CONFIG_X86_64 | |
1227 | else { | |
1228 | param = kvm_register_read(vcpu, VCPU_REGS_RCX); | |
1229 | ingpa = kvm_register_read(vcpu, VCPU_REGS_RDX); | |
1230 | outgpa = kvm_register_read(vcpu, VCPU_REGS_R8); | |
1231 | } | |
1232 | #endif | |
1233 | ||
1234 | code = param & 0xffff; | |
1235 | fast = (param >> 16) & 0x1; | |
1236 | rep_cnt = (param >> 32) & 0xfff; | |
1237 | rep_idx = (param >> 48) & 0xfff; | |
1238 | ||
1239 | trace_kvm_hv_hypercall(code, fast, rep_cnt, rep_idx, ingpa, outgpa); | |
1240 | ||
b2fdc257 AS |
1241 | /* Hypercall continuation is not supported yet */ |
1242 | if (rep_cnt || rep_idx) { | |
1243 | res = HV_STATUS_INVALID_HYPERCALL_CODE; | |
1244 | goto set_result; | |
1245 | } | |
1246 | ||
e83d5887 | 1247 | switch (code) { |
8ed6d767 | 1248 | case HVCALL_NOTIFY_LONG_SPIN_WAIT: |
e83d5887 AS |
1249 | kvm_vcpu_on_spin(vcpu); |
1250 | break; | |
83326e43 AS |
1251 | case HVCALL_POST_MESSAGE: |
1252 | case HVCALL_SIGNAL_EVENT: | |
a2b5c3c0 PB |
1253 | /* don't bother userspace if it has no way to handle it */ |
1254 | if (!vcpu_to_synic(vcpu)->active) { | |
1255 | res = HV_STATUS_INVALID_HYPERCALL_CODE; | |
1256 | break; | |
1257 | } | |
83326e43 AS |
1258 | vcpu->run->exit_reason = KVM_EXIT_HYPERV; |
1259 | vcpu->run->hyperv.type = KVM_EXIT_HYPERV_HCALL; | |
1260 | vcpu->run->hyperv.u.hcall.input = param; | |
1261 | vcpu->run->hyperv.u.hcall.params[0] = ingpa; | |
1262 | vcpu->run->hyperv.u.hcall.params[1] = outgpa; | |
1263 | vcpu->arch.complete_userspace_io = | |
1264 | kvm_hv_hypercall_complete_userspace; | |
1265 | return 0; | |
e83d5887 AS |
1266 | default: |
1267 | res = HV_STATUS_INVALID_HYPERCALL_CODE; | |
1268 | break; | |
1269 | } | |
1270 | ||
b2fdc257 | 1271 | set_result: |
e83d5887 | 1272 | ret = res | (((u64)rep_done & 0xfff) << 32); |
83326e43 | 1273 | kvm_hv_hypercall_set_result(vcpu, ret); |
e83d5887 AS |
1274 | return 1; |
1275 | } |