]>
Commit | Line | Data |
---|---|---|
20c8ccb1 | 1 | // SPDX-License-Identifier: GPL-2.0-only |
e83d5887 AS |
2 | /* |
3 | * KVM Microsoft Hyper-V emulation | |
4 | * | |
5 | * derived from arch/x86/kvm/x86.c | |
6 | * | |
7 | * Copyright (C) 2006 Qumranet, Inc. | |
8 | * Copyright (C) 2008 Qumranet, Inc. | |
9 | * Copyright IBM Corporation, 2008 | |
10 | * Copyright 2010 Red Hat, Inc. and/or its affiliates. | |
11 | * Copyright (C) 2015 Andrey Smetanin <asmetanin@virtuozzo.com> | |
12 | * | |
13 | * Authors: | |
14 | * Avi Kivity <avi@qumranet.com> | |
15 | * Yaniv Kamay <yaniv@qumranet.com> | |
16 | * Amit Shah <amit.shah@qumranet.com> | |
17 | * Ben-Ami Yassour <benami@il.ibm.com> | |
18 | * Andrey Smetanin <asmetanin@virtuozzo.com> | |
e83d5887 AS |
19 | */ |
20 | ||
21 | #include "x86.h" | |
22 | #include "lapic.h" | |
5c919412 | 23 | #include "ioapic.h" |
f97f5a56 | 24 | #include "cpuid.h" |
e83d5887 AS |
25 | #include "hyperv.h" |
26 | ||
b2d8b167 | 27 | #include <linux/cpu.h> |
e83d5887 | 28 | #include <linux/kvm_host.h> |
765eaa0f | 29 | #include <linux/highmem.h> |
32ef5517 | 30 | #include <linux/sched/cputime.h> |
faeb7833 | 31 | #include <linux/eventfd.h> |
32ef5517 | 32 | |
5c919412 | 33 | #include <asm/apicdef.h> |
e83d5887 AS |
34 | #include <trace/events/kvm.h> |
35 | ||
36 | #include "trace.h" | |
59508b30 | 37 | #include "irq.h" |
e83d5887 | 38 | |
f21dd494 VK |
39 | #define KVM_HV_MAX_SPARSE_VCPU_SET_BITS DIV_ROUND_UP(KVM_MAX_VCPUS, 64) |
40 | ||
8644f771 VK |
41 | static void stimer_mark_pending(struct kvm_vcpu_hv_stimer *stimer, |
42 | bool vcpu_kick); | |
43 | ||
5c919412 AS |
44 | static inline u64 synic_read_sint(struct kvm_vcpu_hv_synic *synic, int sint) |
45 | { | |
46 | return atomic64_read(&synic->sint[sint]); | |
47 | } | |
48 | ||
49 | static inline int synic_get_sint_vector(u64 sint_value) | |
50 | { | |
51 | if (sint_value & HV_SYNIC_SINT_MASKED) | |
52 | return -1; | |
53 | return sint_value & HV_SYNIC_SINT_VECTOR_MASK; | |
54 | } | |
55 | ||
56 | static bool synic_has_vector_connected(struct kvm_vcpu_hv_synic *synic, | |
57 | int vector) | |
58 | { | |
59 | int i; | |
60 | ||
61 | for (i = 0; i < ARRAY_SIZE(synic->sint); i++) { | |
62 | if (synic_get_sint_vector(synic_read_sint(synic, i)) == vector) | |
63 | return true; | |
64 | } | |
65 | return false; | |
66 | } | |
67 | ||
68 | static bool synic_has_vector_auto_eoi(struct kvm_vcpu_hv_synic *synic, | |
69 | int vector) | |
70 | { | |
71 | int i; | |
72 | u64 sint_value; | |
73 | ||
74 | for (i = 0; i < ARRAY_SIZE(synic->sint); i++) { | |
75 | sint_value = synic_read_sint(synic, i); | |
76 | if (synic_get_sint_vector(sint_value) == vector && | |
77 | sint_value & HV_SYNIC_SINT_AUTO_EOI) | |
78 | return true; | |
79 | } | |
80 | return false; | |
81 | } | |
82 | ||
98f65ad4 VK |
83 | static void synic_update_vector(struct kvm_vcpu_hv_synic *synic, |
84 | int vector) | |
85 | { | |
87a8d795 VK |
86 | if (vector < HV_SYNIC_FIRST_VALID_VECTOR) |
87 | return; | |
88 | ||
98f65ad4 VK |
89 | if (synic_has_vector_connected(synic, vector)) |
90 | __set_bit(vector, synic->vec_bitmap); | |
91 | else | |
92 | __clear_bit(vector, synic->vec_bitmap); | |
93 | ||
94 | if (synic_has_vector_auto_eoi(synic, vector)) | |
95 | __set_bit(vector, synic->auto_eoi_bitmap); | |
96 | else | |
97 | __clear_bit(vector, synic->auto_eoi_bitmap); | |
98 | } | |
99 | ||
7be58a64 AS |
100 | static int synic_set_sint(struct kvm_vcpu_hv_synic *synic, int sint, |
101 | u64 data, bool host) | |
5c919412 | 102 | { |
98f65ad4 | 103 | int vector, old_vector; |
915e6f78 | 104 | bool masked; |
5c919412 AS |
105 | |
106 | vector = data & HV_SYNIC_SINT_VECTOR_MASK; | |
915e6f78 VK |
107 | masked = data & HV_SYNIC_SINT_MASKED; |
108 | ||
109 | /* | |
110 | * Valid vectors are 16-255, however, nested Hyper-V attempts to write | |
111 | * default '0x10000' value on boot and this should not #GP. We need to | |
112 | * allow zero-initing the register from host as well. | |
113 | */ | |
114 | if (vector < HV_SYNIC_FIRST_VALID_VECTOR && !host && !masked) | |
5c919412 AS |
115 | return 1; |
116 | /* | |
117 | * Guest may configure multiple SINTs to use the same vector, so | |
118 | * we maintain a bitmap of vectors handled by synic, and a | |
119 | * bitmap of vectors with auto-eoi behavior. The bitmaps are | |
120 | * updated here, and atomically queried on fast paths. | |
121 | */ | |
98f65ad4 | 122 | old_vector = synic_read_sint(synic, sint) & HV_SYNIC_SINT_VECTOR_MASK; |
5c919412 AS |
123 | |
124 | atomic64_set(&synic->sint[sint], data); | |
125 | ||
98f65ad4 | 126 | synic_update_vector(synic, old_vector); |
5c919412 | 127 | |
98f65ad4 | 128 | synic_update_vector(synic, vector); |
5c919412 AS |
129 | |
130 | /* Load SynIC vectors into EOI exit bitmap */ | |
131 | kvm_make_request(KVM_REQ_SCAN_IOAPIC, synic_to_vcpu(synic)); | |
132 | return 0; | |
133 | } | |
134 | ||
d3457c87 RK |
135 | static struct kvm_vcpu *get_vcpu_by_vpidx(struct kvm *kvm, u32 vpidx) |
136 | { | |
137 | struct kvm_vcpu *vcpu = NULL; | |
138 | int i; | |
139 | ||
9170200e VK |
140 | if (vpidx >= KVM_MAX_VCPUS) |
141 | return NULL; | |
142 | ||
143 | vcpu = kvm_get_vcpu(kvm, vpidx); | |
d3457c87 RK |
144 | if (vcpu && vcpu_to_hv_vcpu(vcpu)->vp_index == vpidx) |
145 | return vcpu; | |
146 | kvm_for_each_vcpu(i, vcpu, kvm) | |
147 | if (vcpu_to_hv_vcpu(vcpu)->vp_index == vpidx) | |
148 | return vcpu; | |
149 | return NULL; | |
150 | } | |
151 | ||
152 | static struct kvm_vcpu_hv_synic *synic_get(struct kvm *kvm, u32 vpidx) | |
5c919412 AS |
153 | { |
154 | struct kvm_vcpu *vcpu; | |
155 | struct kvm_vcpu_hv_synic *synic; | |
156 | ||
d3457c87 | 157 | vcpu = get_vcpu_by_vpidx(kvm, vpidx); |
5c919412 AS |
158 | if (!vcpu) |
159 | return NULL; | |
160 | synic = vcpu_to_synic(vcpu); | |
161 | return (synic->active) ? synic : NULL; | |
162 | } | |
163 | ||
164 | static void kvm_hv_notify_acked_sint(struct kvm_vcpu *vcpu, u32 sint) | |
165 | { | |
166 | struct kvm *kvm = vcpu->kvm; | |
765eaa0f | 167 | struct kvm_vcpu_hv_synic *synic = vcpu_to_synic(vcpu); |
1f4b34f8 AS |
168 | struct kvm_vcpu_hv *hv_vcpu = vcpu_to_hv_vcpu(vcpu); |
169 | struct kvm_vcpu_hv_stimer *stimer; | |
08a800ac | 170 | int gsi, idx; |
5c919412 | 171 | |
18659a9c | 172 | trace_kvm_hv_notify_acked_sint(vcpu->vcpu_id, sint); |
5c919412 | 173 | |
1f4b34f8 | 174 | /* Try to deliver pending Hyper-V SynIC timers messages */ |
1f4b34f8 AS |
175 | for (idx = 0; idx < ARRAY_SIZE(hv_vcpu->stimer); idx++) { |
176 | stimer = &hv_vcpu->stimer[idx]; | |
6a058a1e | 177 | if (stimer->msg_pending && stimer->config.enable && |
8644f771 | 178 | !stimer->config.direct_mode && |
08a800ac VK |
179 | stimer->config.sintx == sint) |
180 | stimer_mark_pending(stimer, false); | |
1f4b34f8 | 181 | } |
1f4b34f8 | 182 | |
5c919412 | 183 | idx = srcu_read_lock(&kvm->irq_srcu); |
1f4b34f8 | 184 | gsi = atomic_read(&synic->sint_to_gsi[sint]); |
5c919412 AS |
185 | if (gsi != -1) |
186 | kvm_notify_acked_gsi(kvm, gsi); | |
187 | srcu_read_unlock(&kvm->irq_srcu, idx); | |
188 | } | |
189 | ||
db397571 AS |
190 | static void synic_exit(struct kvm_vcpu_hv_synic *synic, u32 msr) |
191 | { | |
192 | struct kvm_vcpu *vcpu = synic_to_vcpu(synic); | |
193 | struct kvm_vcpu_hv *hv_vcpu = &vcpu->arch.hyperv; | |
194 | ||
195 | hv_vcpu->exit.type = KVM_EXIT_HYPERV_SYNIC; | |
196 | hv_vcpu->exit.u.synic.msr = msr; | |
197 | hv_vcpu->exit.u.synic.control = synic->control; | |
198 | hv_vcpu->exit.u.synic.evt_page = synic->evt_page; | |
199 | hv_vcpu->exit.u.synic.msg_page = synic->msg_page; | |
200 | ||
201 | kvm_make_request(KVM_REQ_HV_EXIT, vcpu); | |
202 | } | |
203 | ||
5c919412 AS |
204 | static int synic_set_msr(struct kvm_vcpu_hv_synic *synic, |
205 | u32 msr, u64 data, bool host) | |
206 | { | |
207 | struct kvm_vcpu *vcpu = synic_to_vcpu(synic); | |
208 | int ret; | |
209 | ||
44883f01 | 210 | if (!synic->active && !host) |
5c919412 AS |
211 | return 1; |
212 | ||
18659a9c AS |
213 | trace_kvm_hv_synic_set_msr(vcpu->vcpu_id, msr, data, host); |
214 | ||
5c919412 AS |
215 | ret = 0; |
216 | switch (msr) { | |
217 | case HV_X64_MSR_SCONTROL: | |
218 | synic->control = data; | |
db397571 AS |
219 | if (!host) |
220 | synic_exit(synic, msr); | |
5c919412 AS |
221 | break; |
222 | case HV_X64_MSR_SVERSION: | |
223 | if (!host) { | |
224 | ret = 1; | |
225 | break; | |
226 | } | |
227 | synic->version = data; | |
228 | break; | |
229 | case HV_X64_MSR_SIEFP: | |
efc479e6 RK |
230 | if ((data & HV_SYNIC_SIEFP_ENABLE) && !host && |
231 | !synic->dont_zero_synic_pages) | |
5c919412 AS |
232 | if (kvm_clear_guest(vcpu->kvm, |
233 | data & PAGE_MASK, PAGE_SIZE)) { | |
234 | ret = 1; | |
235 | break; | |
236 | } | |
237 | synic->evt_page = data; | |
db397571 AS |
238 | if (!host) |
239 | synic_exit(synic, msr); | |
5c919412 AS |
240 | break; |
241 | case HV_X64_MSR_SIMP: | |
efc479e6 RK |
242 | if ((data & HV_SYNIC_SIMP_ENABLE) && !host && |
243 | !synic->dont_zero_synic_pages) | |
5c919412 AS |
244 | if (kvm_clear_guest(vcpu->kvm, |
245 | data & PAGE_MASK, PAGE_SIZE)) { | |
246 | ret = 1; | |
247 | break; | |
248 | } | |
249 | synic->msg_page = data; | |
db397571 AS |
250 | if (!host) |
251 | synic_exit(synic, msr); | |
5c919412 AS |
252 | break; |
253 | case HV_X64_MSR_EOM: { | |
254 | int i; | |
255 | ||
256 | for (i = 0; i < ARRAY_SIZE(synic->sint); i++) | |
257 | kvm_hv_notify_acked_sint(vcpu, i); | |
258 | break; | |
259 | } | |
260 | case HV_X64_MSR_SINT0 ... HV_X64_MSR_SINT15: | |
7be58a64 | 261 | ret = synic_set_sint(synic, msr - HV_X64_MSR_SINT0, data, host); |
5c919412 AS |
262 | break; |
263 | default: | |
264 | ret = 1; | |
265 | break; | |
266 | } | |
267 | return ret; | |
268 | } | |
269 | ||
f97f5a56 JD |
270 | static bool kvm_hv_is_syndbg_enabled(struct kvm_vcpu *vcpu) |
271 | { | |
272 | struct kvm_cpuid_entry2 *entry; | |
273 | ||
274 | entry = kvm_find_cpuid_entry(vcpu, | |
275 | HYPERV_CPUID_SYNDBG_PLATFORM_CAPABILITIES, | |
276 | 0); | |
277 | if (!entry) | |
278 | return false; | |
279 | ||
280 | return entry->eax & HV_X64_SYNDBG_CAP_ALLOW_KERNEL_DEBUGGING; | |
281 | } | |
282 | ||
283 | static int kvm_hv_syndbg_complete_userspace(struct kvm_vcpu *vcpu) | |
284 | { | |
285 | struct kvm *kvm = vcpu->kvm; | |
286 | struct kvm_hv *hv = &kvm->arch.hyperv; | |
287 | ||
288 | if (vcpu->run->hyperv.u.syndbg.msr == HV_X64_MSR_SYNDBG_CONTROL) | |
289 | hv->hv_syndbg.control.status = | |
290 | vcpu->run->hyperv.u.syndbg.status; | |
291 | return 1; | |
292 | } | |
293 | ||
294 | static void syndbg_exit(struct kvm_vcpu *vcpu, u32 msr) | |
295 | { | |
296 | struct kvm_hv_syndbg *syndbg = vcpu_to_hv_syndbg(vcpu); | |
297 | struct kvm_vcpu_hv *hv_vcpu = &vcpu->arch.hyperv; | |
298 | ||
299 | hv_vcpu->exit.type = KVM_EXIT_HYPERV_SYNDBG; | |
300 | hv_vcpu->exit.u.syndbg.msr = msr; | |
301 | hv_vcpu->exit.u.syndbg.control = syndbg->control.control; | |
302 | hv_vcpu->exit.u.syndbg.send_page = syndbg->control.send_page; | |
303 | hv_vcpu->exit.u.syndbg.recv_page = syndbg->control.recv_page; | |
304 | hv_vcpu->exit.u.syndbg.pending_page = syndbg->control.pending_page; | |
305 | vcpu->arch.complete_userspace_io = | |
306 | kvm_hv_syndbg_complete_userspace; | |
307 | ||
308 | kvm_make_request(KVM_REQ_HV_EXIT, vcpu); | |
309 | } | |
310 | ||
311 | static int syndbg_set_msr(struct kvm_vcpu *vcpu, u32 msr, u64 data, bool host) | |
312 | { | |
313 | struct kvm_hv_syndbg *syndbg = vcpu_to_hv_syndbg(vcpu); | |
314 | ||
315 | if (!kvm_hv_is_syndbg_enabled(vcpu) && !host) | |
316 | return 1; | |
317 | ||
318 | trace_kvm_hv_syndbg_set_msr(vcpu->vcpu_id, | |
319 | vcpu_to_hv_vcpu(vcpu)->vp_index, msr, data); | |
320 | switch (msr) { | |
321 | case HV_X64_MSR_SYNDBG_CONTROL: | |
322 | syndbg->control.control = data; | |
323 | if (!host) | |
324 | syndbg_exit(vcpu, msr); | |
325 | break; | |
326 | case HV_X64_MSR_SYNDBG_STATUS: | |
327 | syndbg->control.status = data; | |
328 | break; | |
329 | case HV_X64_MSR_SYNDBG_SEND_BUFFER: | |
330 | syndbg->control.send_page = data; | |
331 | break; | |
332 | case HV_X64_MSR_SYNDBG_RECV_BUFFER: | |
333 | syndbg->control.recv_page = data; | |
334 | break; | |
335 | case HV_X64_MSR_SYNDBG_PENDING_BUFFER: | |
336 | syndbg->control.pending_page = data; | |
337 | if (!host) | |
338 | syndbg_exit(vcpu, msr); | |
339 | break; | |
340 | case HV_X64_MSR_SYNDBG_OPTIONS: | |
341 | syndbg->options = data; | |
342 | break; | |
343 | default: | |
344 | break; | |
345 | } | |
346 | ||
347 | return 0; | |
348 | } | |
349 | ||
350 | static int syndbg_get_msr(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata, bool host) | |
351 | { | |
352 | struct kvm_hv_syndbg *syndbg = vcpu_to_hv_syndbg(vcpu); | |
353 | ||
354 | if (!kvm_hv_is_syndbg_enabled(vcpu) && !host) | |
355 | return 1; | |
356 | ||
357 | switch (msr) { | |
358 | case HV_X64_MSR_SYNDBG_CONTROL: | |
359 | *pdata = syndbg->control.control; | |
360 | break; | |
361 | case HV_X64_MSR_SYNDBG_STATUS: | |
362 | *pdata = syndbg->control.status; | |
363 | break; | |
364 | case HV_X64_MSR_SYNDBG_SEND_BUFFER: | |
365 | *pdata = syndbg->control.send_page; | |
366 | break; | |
367 | case HV_X64_MSR_SYNDBG_RECV_BUFFER: | |
368 | *pdata = syndbg->control.recv_page; | |
369 | break; | |
370 | case HV_X64_MSR_SYNDBG_PENDING_BUFFER: | |
371 | *pdata = syndbg->control.pending_page; | |
372 | break; | |
373 | case HV_X64_MSR_SYNDBG_OPTIONS: | |
374 | *pdata = syndbg->options; | |
375 | break; | |
376 | default: | |
377 | break; | |
378 | } | |
379 | ||
380 | trace_kvm_hv_syndbg_get_msr(vcpu->vcpu_id, | |
381 | vcpu_to_hv_vcpu(vcpu)->vp_index, msr, | |
382 | *pdata); | |
383 | ||
384 | return 0; | |
385 | } | |
386 | ||
44883f01 PB |
387 | static int synic_get_msr(struct kvm_vcpu_hv_synic *synic, u32 msr, u64 *pdata, |
388 | bool host) | |
5c919412 AS |
389 | { |
390 | int ret; | |
391 | ||
44883f01 | 392 | if (!synic->active && !host) |
5c919412 AS |
393 | return 1; |
394 | ||
395 | ret = 0; | |
396 | switch (msr) { | |
397 | case HV_X64_MSR_SCONTROL: | |
398 | *pdata = synic->control; | |
399 | break; | |
400 | case HV_X64_MSR_SVERSION: | |
401 | *pdata = synic->version; | |
402 | break; | |
403 | case HV_X64_MSR_SIEFP: | |
404 | *pdata = synic->evt_page; | |
405 | break; | |
406 | case HV_X64_MSR_SIMP: | |
407 | *pdata = synic->msg_page; | |
408 | break; | |
409 | case HV_X64_MSR_EOM: | |
410 | *pdata = 0; | |
411 | break; | |
412 | case HV_X64_MSR_SINT0 ... HV_X64_MSR_SINT15: | |
413 | *pdata = atomic64_read(&synic->sint[msr - HV_X64_MSR_SINT0]); | |
414 | break; | |
415 | default: | |
416 | ret = 1; | |
417 | break; | |
418 | } | |
419 | return ret; | |
420 | } | |
421 | ||
ecd8a8c2 | 422 | static int synic_set_irq(struct kvm_vcpu_hv_synic *synic, u32 sint) |
5c919412 AS |
423 | { |
424 | struct kvm_vcpu *vcpu = synic_to_vcpu(synic); | |
425 | struct kvm_lapic_irq irq; | |
426 | int ret, vector; | |
427 | ||
428 | if (sint >= ARRAY_SIZE(synic->sint)) | |
429 | return -EINVAL; | |
430 | ||
431 | vector = synic_get_sint_vector(synic_read_sint(synic, sint)); | |
432 | if (vector < 0) | |
433 | return -ENOENT; | |
434 | ||
435 | memset(&irq, 0, sizeof(irq)); | |
f98a3efb | 436 | irq.shorthand = APIC_DEST_SELF; |
5c919412 AS |
437 | irq.dest_mode = APIC_DEST_PHYSICAL; |
438 | irq.delivery_mode = APIC_DM_FIXED; | |
439 | irq.vector = vector; | |
440 | irq.level = 1; | |
441 | ||
f98a3efb | 442 | ret = kvm_irq_delivery_to_apic(vcpu->kvm, vcpu->arch.apic, &irq, NULL); |
18659a9c | 443 | trace_kvm_hv_synic_set_irq(vcpu->vcpu_id, sint, irq.vector, ret); |
5c919412 AS |
444 | return ret; |
445 | } | |
446 | ||
d3457c87 | 447 | int kvm_hv_synic_set_irq(struct kvm *kvm, u32 vpidx, u32 sint) |
5c919412 AS |
448 | { |
449 | struct kvm_vcpu_hv_synic *synic; | |
450 | ||
d3457c87 | 451 | synic = synic_get(kvm, vpidx); |
5c919412 AS |
452 | if (!synic) |
453 | return -EINVAL; | |
454 | ||
455 | return synic_set_irq(synic, sint); | |
456 | } | |
457 | ||
458 | void kvm_hv_synic_send_eoi(struct kvm_vcpu *vcpu, int vector) | |
459 | { | |
460 | struct kvm_vcpu_hv_synic *synic = vcpu_to_synic(vcpu); | |
461 | int i; | |
462 | ||
18659a9c | 463 | trace_kvm_hv_synic_send_eoi(vcpu->vcpu_id, vector); |
5c919412 AS |
464 | |
465 | for (i = 0; i < ARRAY_SIZE(synic->sint); i++) | |
466 | if (synic_get_sint_vector(synic_read_sint(synic, i)) == vector) | |
467 | kvm_hv_notify_acked_sint(vcpu, i); | |
468 | } | |
469 | ||
d3457c87 | 470 | static int kvm_hv_set_sint_gsi(struct kvm *kvm, u32 vpidx, u32 sint, int gsi) |
5c919412 AS |
471 | { |
472 | struct kvm_vcpu_hv_synic *synic; | |
473 | ||
d3457c87 | 474 | synic = synic_get(kvm, vpidx); |
5c919412 AS |
475 | if (!synic) |
476 | return -EINVAL; | |
477 | ||
478 | if (sint >= ARRAY_SIZE(synic->sint_to_gsi)) | |
479 | return -EINVAL; | |
480 | ||
481 | atomic_set(&synic->sint_to_gsi[sint], gsi); | |
482 | return 0; | |
483 | } | |
484 | ||
485 | void kvm_hv_irq_routing_update(struct kvm *kvm) | |
486 | { | |
487 | struct kvm_irq_routing_table *irq_rt; | |
488 | struct kvm_kernel_irq_routing_entry *e; | |
489 | u32 gsi; | |
490 | ||
491 | irq_rt = srcu_dereference_check(kvm->irq_routing, &kvm->irq_srcu, | |
492 | lockdep_is_held(&kvm->irq_lock)); | |
493 | ||
494 | for (gsi = 0; gsi < irq_rt->nr_rt_entries; gsi++) { | |
495 | hlist_for_each_entry(e, &irq_rt->map[gsi], link) { | |
496 | if (e->type == KVM_IRQ_ROUTING_HV_SINT) | |
497 | kvm_hv_set_sint_gsi(kvm, e->hv_sint.vcpu, | |
498 | e->hv_sint.sint, gsi); | |
499 | } | |
500 | } | |
501 | } | |
502 | ||
503 | static void synic_init(struct kvm_vcpu_hv_synic *synic) | |
504 | { | |
505 | int i; | |
506 | ||
507 | memset(synic, 0, sizeof(*synic)); | |
508 | synic->version = HV_SYNIC_VERSION_1; | |
509 | for (i = 0; i < ARRAY_SIZE(synic->sint); i++) { | |
510 | atomic64_set(&synic->sint[i], HV_SYNIC_SINT_MASKED); | |
511 | atomic_set(&synic->sint_to_gsi[i], -1); | |
512 | } | |
513 | } | |
514 | ||
93bf4172 AS |
515 | static u64 get_time_ref_counter(struct kvm *kvm) |
516 | { | |
095cf55d PB |
517 | struct kvm_hv *hv = &kvm->arch.hyperv; |
518 | struct kvm_vcpu *vcpu; | |
519 | u64 tsc; | |
520 | ||
521 | /* | |
522 | * The guest has not set up the TSC page or the clock isn't | |
523 | * stable, fall back to get_kvmclock_ns. | |
524 | */ | |
525 | if (!hv->tsc_ref.tsc_sequence) | |
526 | return div_u64(get_kvmclock_ns(kvm), 100); | |
527 | ||
528 | vcpu = kvm_get_vcpu(kvm, 0); | |
529 | tsc = kvm_read_l1_tsc(vcpu, rdtsc()); | |
530 | return mul_u64_u64_shr(tsc, hv->tsc_ref.tsc_scale, 64) | |
531 | + hv->tsc_ref.tsc_offset; | |
93bf4172 AS |
532 | } |
533 | ||
f3b138c5 | 534 | static void stimer_mark_pending(struct kvm_vcpu_hv_stimer *stimer, |
1f4b34f8 AS |
535 | bool vcpu_kick) |
536 | { | |
537 | struct kvm_vcpu *vcpu = stimer_to_vcpu(stimer); | |
538 | ||
539 | set_bit(stimer->index, | |
540 | vcpu_to_hv_vcpu(vcpu)->stimer_pending_bitmap); | |
541 | kvm_make_request(KVM_REQ_HV_STIMER, vcpu); | |
542 | if (vcpu_kick) | |
543 | kvm_vcpu_kick(vcpu); | |
544 | } | |
545 | ||
1f4b34f8 AS |
546 | static void stimer_cleanup(struct kvm_vcpu_hv_stimer *stimer) |
547 | { | |
548 | struct kvm_vcpu *vcpu = stimer_to_vcpu(stimer); | |
549 | ||
ac3e5fca AS |
550 | trace_kvm_hv_stimer_cleanup(stimer_to_vcpu(stimer)->vcpu_id, |
551 | stimer->index); | |
552 | ||
019b9781 | 553 | hrtimer_cancel(&stimer->timer); |
1f4b34f8 AS |
554 | clear_bit(stimer->index, |
555 | vcpu_to_hv_vcpu(vcpu)->stimer_pending_bitmap); | |
556 | stimer->msg_pending = false; | |
f808495d | 557 | stimer->exp_time = 0; |
1f4b34f8 AS |
558 | } |
559 | ||
560 | static enum hrtimer_restart stimer_timer_callback(struct hrtimer *timer) | |
561 | { | |
562 | struct kvm_vcpu_hv_stimer *stimer; | |
563 | ||
564 | stimer = container_of(timer, struct kvm_vcpu_hv_stimer, timer); | |
ac3e5fca AS |
565 | trace_kvm_hv_stimer_callback(stimer_to_vcpu(stimer)->vcpu_id, |
566 | stimer->index); | |
f3b138c5 | 567 | stimer_mark_pending(stimer, true); |
1f4b34f8 AS |
568 | |
569 | return HRTIMER_NORESTART; | |
570 | } | |
571 | ||
f808495d AS |
572 | /* |
573 | * stimer_start() assumptions: | |
574 | * a) stimer->count is not equal to 0 | |
575 | * b) stimer->config has HV_STIMER_ENABLE flag | |
576 | */ | |
1f4b34f8 AS |
577 | static int stimer_start(struct kvm_vcpu_hv_stimer *stimer) |
578 | { | |
579 | u64 time_now; | |
580 | ktime_t ktime_now; | |
581 | ||
582 | time_now = get_time_ref_counter(stimer_to_vcpu(stimer)->kvm); | |
583 | ktime_now = ktime_get(); | |
584 | ||
6a058a1e | 585 | if (stimer->config.periodic) { |
f808495d AS |
586 | if (stimer->exp_time) { |
587 | if (time_now >= stimer->exp_time) { | |
588 | u64 remainder; | |
589 | ||
590 | div64_u64_rem(time_now - stimer->exp_time, | |
591 | stimer->count, &remainder); | |
592 | stimer->exp_time = | |
593 | time_now + (stimer->count - remainder); | |
594 | } | |
595 | } else | |
596 | stimer->exp_time = time_now + stimer->count; | |
1f4b34f8 | 597 | |
ac3e5fca AS |
598 | trace_kvm_hv_stimer_start_periodic( |
599 | stimer_to_vcpu(stimer)->vcpu_id, | |
600 | stimer->index, | |
601 | time_now, stimer->exp_time); | |
602 | ||
1f4b34f8 | 603 | hrtimer_start(&stimer->timer, |
f808495d AS |
604 | ktime_add_ns(ktime_now, |
605 | 100 * (stimer->exp_time - time_now)), | |
1f4b34f8 AS |
606 | HRTIMER_MODE_ABS); |
607 | return 0; | |
608 | } | |
609 | stimer->exp_time = stimer->count; | |
610 | if (time_now >= stimer->count) { | |
611 | /* | |
612 | * Expire timer according to Hypervisor Top-Level Functional | |
613 | * specification v4(15.3.1): | |
614 | * "If a one shot is enabled and the specified count is in | |
615 | * the past, it will expire immediately." | |
616 | */ | |
f3b138c5 | 617 | stimer_mark_pending(stimer, false); |
1f4b34f8 AS |
618 | return 0; |
619 | } | |
620 | ||
ac3e5fca AS |
621 | trace_kvm_hv_stimer_start_one_shot(stimer_to_vcpu(stimer)->vcpu_id, |
622 | stimer->index, | |
623 | time_now, stimer->count); | |
624 | ||
1f4b34f8 AS |
625 | hrtimer_start(&stimer->timer, |
626 | ktime_add_ns(ktime_now, 100 * (stimer->count - time_now)), | |
627 | HRTIMER_MODE_ABS); | |
628 | return 0; | |
629 | } | |
630 | ||
631 | static int stimer_set_config(struct kvm_vcpu_hv_stimer *stimer, u64 config, | |
632 | bool host) | |
633 | { | |
8644f771 VK |
634 | union hv_stimer_config new_config = {.as_uint64 = config}, |
635 | old_config = {.as_uint64 = stimer->config.as_uint64}; | |
dbcf3f96 VK |
636 | struct kvm_vcpu *vcpu = stimer_to_vcpu(stimer); |
637 | struct kvm_vcpu_hv_synic *synic = vcpu_to_synic(vcpu); | |
638 | ||
639 | if (!synic->active && !host) | |
640 | return 1; | |
6a058a1e | 641 | |
ac3e5fca AS |
642 | trace_kvm_hv_stimer_set_config(stimer_to_vcpu(stimer)->vcpu_id, |
643 | stimer->index, config, host); | |
644 | ||
f3b138c5 | 645 | stimer_cleanup(stimer); |
8644f771 VK |
646 | if (old_config.enable && |
647 | !new_config.direct_mode && new_config.sintx == 0) | |
6a058a1e VK |
648 | new_config.enable = 0; |
649 | stimer->config.as_uint64 = new_config.as_uint64; | |
8644f771 | 650 | |
013cc6eb VK |
651 | if (stimer->config.enable) |
652 | stimer_mark_pending(stimer, false); | |
653 | ||
1f4b34f8 AS |
654 | return 0; |
655 | } | |
656 | ||
657 | static int stimer_set_count(struct kvm_vcpu_hv_stimer *stimer, u64 count, | |
658 | bool host) | |
659 | { | |
dbcf3f96 VK |
660 | struct kvm_vcpu *vcpu = stimer_to_vcpu(stimer); |
661 | struct kvm_vcpu_hv_synic *synic = vcpu_to_synic(vcpu); | |
662 | ||
663 | if (!synic->active && !host) | |
664 | return 1; | |
665 | ||
ac3e5fca AS |
666 | trace_kvm_hv_stimer_set_count(stimer_to_vcpu(stimer)->vcpu_id, |
667 | stimer->index, count, host); | |
668 | ||
1f4b34f8 | 669 | stimer_cleanup(stimer); |
f3b138c5 | 670 | stimer->count = count; |
1f4b34f8 | 671 | if (stimer->count == 0) |
6a058a1e VK |
672 | stimer->config.enable = 0; |
673 | else if (stimer->config.auto_enable) | |
674 | stimer->config.enable = 1; | |
013cc6eb VK |
675 | |
676 | if (stimer->config.enable) | |
677 | stimer_mark_pending(stimer, false); | |
678 | ||
1f4b34f8 AS |
679 | return 0; |
680 | } | |
681 | ||
682 | static int stimer_get_config(struct kvm_vcpu_hv_stimer *stimer, u64 *pconfig) | |
683 | { | |
6a058a1e | 684 | *pconfig = stimer->config.as_uint64; |
1f4b34f8 AS |
685 | return 0; |
686 | } | |
687 | ||
688 | static int stimer_get_count(struct kvm_vcpu_hv_stimer *stimer, u64 *pcount) | |
689 | { | |
690 | *pcount = stimer->count; | |
691 | return 0; | |
692 | } | |
693 | ||
694 | static int synic_deliver_msg(struct kvm_vcpu_hv_synic *synic, u32 sint, | |
7deec5e0 | 695 | struct hv_message *src_msg, bool no_retry) |
1f4b34f8 AS |
696 | { |
697 | struct kvm_vcpu *vcpu = synic_to_vcpu(synic); | |
3a0e7731 RK |
698 | int msg_off = offsetof(struct hv_message_page, sint_message[sint]); |
699 | gfn_t msg_page_gfn; | |
700 | struct hv_message_header hv_hdr; | |
1f4b34f8 | 701 | int r; |
1f4b34f8 AS |
702 | |
703 | if (!(synic->msg_page & HV_SYNIC_SIMP_ENABLE)) | |
704 | return -ENOENT; | |
705 | ||
3a0e7731 | 706 | msg_page_gfn = synic->msg_page >> PAGE_SHIFT; |
1f4b34f8 | 707 | |
3a0e7731 RK |
708 | /* |
709 | * Strictly following the spec-mandated ordering would assume setting | |
710 | * .msg_pending before checking .message_type. However, this function | |
711 | * is only called in vcpu context so the entire update is atomic from | |
712 | * guest POV and thus the exact order here doesn't matter. | |
713 | */ | |
714 | r = kvm_vcpu_read_guest_page(vcpu, msg_page_gfn, &hv_hdr.message_type, | |
715 | msg_off + offsetof(struct hv_message, | |
716 | header.message_type), | |
717 | sizeof(hv_hdr.message_type)); | |
718 | if (r < 0) | |
719 | return r; | |
720 | ||
721 | if (hv_hdr.message_type != HVMSG_NONE) { | |
7deec5e0 RK |
722 | if (no_retry) |
723 | return 0; | |
724 | ||
3a0e7731 RK |
725 | hv_hdr.message_flags.msg_pending = 1; |
726 | r = kvm_vcpu_write_guest_page(vcpu, msg_page_gfn, | |
727 | &hv_hdr.message_flags, | |
728 | msg_off + | |
729 | offsetof(struct hv_message, | |
730 | header.message_flags), | |
731 | sizeof(hv_hdr.message_flags)); | |
732 | if (r < 0) | |
733 | return r; | |
734 | return -EAGAIN; | |
1f4b34f8 | 735 | } |
3a0e7731 RK |
736 | |
737 | r = kvm_vcpu_write_guest_page(vcpu, msg_page_gfn, src_msg, msg_off, | |
738 | sizeof(src_msg->header) + | |
739 | src_msg->header.payload_size); | |
740 | if (r < 0) | |
741 | return r; | |
742 | ||
743 | r = synic_set_irq(synic, sint); | |
744 | if (r < 0) | |
745 | return r; | |
746 | if (r == 0) | |
747 | return -EFAULT; | |
748 | return 0; | |
1f4b34f8 AS |
749 | } |
750 | ||
0cdeabb1 | 751 | static int stimer_send_msg(struct kvm_vcpu_hv_stimer *stimer) |
1f4b34f8 AS |
752 | { |
753 | struct kvm_vcpu *vcpu = stimer_to_vcpu(stimer); | |
754 | struct hv_message *msg = &stimer->msg; | |
755 | struct hv_timer_message_payload *payload = | |
756 | (struct hv_timer_message_payload *)&msg->u.payload; | |
1f4b34f8 | 757 | |
7deec5e0 RK |
758 | /* |
759 | * To avoid piling up periodic ticks, don't retry message | |
760 | * delivery for them (within "lazy" lost ticks policy). | |
761 | */ | |
6a058a1e | 762 | bool no_retry = stimer->config.periodic; |
7deec5e0 | 763 | |
1f4b34f8 AS |
764 | payload->expiration_time = stimer->exp_time; |
765 | payload->delivery_time = get_time_ref_counter(vcpu->kvm); | |
0cdeabb1 | 766 | return synic_deliver_msg(vcpu_to_synic(vcpu), |
6a058a1e | 767 | stimer->config.sintx, msg, |
7deec5e0 | 768 | no_retry); |
1f4b34f8 AS |
769 | } |
770 | ||
8644f771 VK |
771 | static int stimer_notify_direct(struct kvm_vcpu_hv_stimer *stimer) |
772 | { | |
773 | struct kvm_vcpu *vcpu = stimer_to_vcpu(stimer); | |
774 | struct kvm_lapic_irq irq = { | |
775 | .delivery_mode = APIC_DM_FIXED, | |
776 | .vector = stimer->config.apic_vector | |
777 | }; | |
778 | ||
a073d7e3 WL |
779 | if (lapic_in_kernel(vcpu)) |
780 | return !kvm_apic_set_irq(vcpu, &irq, NULL); | |
781 | return 0; | |
8644f771 VK |
782 | } |
783 | ||
1f4b34f8 AS |
784 | static void stimer_expiration(struct kvm_vcpu_hv_stimer *stimer) |
785 | { | |
8644f771 | 786 | int r, direct = stimer->config.direct_mode; |
ac3e5fca | 787 | |
0cdeabb1 | 788 | stimer->msg_pending = true; |
8644f771 VK |
789 | if (!direct) |
790 | r = stimer_send_msg(stimer); | |
791 | else | |
792 | r = stimer_notify_direct(stimer); | |
ac3e5fca | 793 | trace_kvm_hv_stimer_expiration(stimer_to_vcpu(stimer)->vcpu_id, |
8644f771 | 794 | stimer->index, direct, r); |
ac3e5fca | 795 | if (!r) { |
0cdeabb1 | 796 | stimer->msg_pending = false; |
6a058a1e VK |
797 | if (!(stimer->config.periodic)) |
798 | stimer->config.enable = 0; | |
0cdeabb1 | 799 | } |
1f4b34f8 AS |
800 | } |
801 | ||
802 | void kvm_hv_process_stimers(struct kvm_vcpu *vcpu) | |
803 | { | |
804 | struct kvm_vcpu_hv *hv_vcpu = vcpu_to_hv_vcpu(vcpu); | |
805 | struct kvm_vcpu_hv_stimer *stimer; | |
f3b138c5 | 806 | u64 time_now, exp_time; |
1f4b34f8 AS |
807 | int i; |
808 | ||
809 | for (i = 0; i < ARRAY_SIZE(hv_vcpu->stimer); i++) | |
810 | if (test_and_clear_bit(i, hv_vcpu->stimer_pending_bitmap)) { | |
811 | stimer = &hv_vcpu->stimer[i]; | |
6a058a1e | 812 | if (stimer->config.enable) { |
f3b138c5 AS |
813 | exp_time = stimer->exp_time; |
814 | ||
815 | if (exp_time) { | |
816 | time_now = | |
817 | get_time_ref_counter(vcpu->kvm); | |
818 | if (time_now >= exp_time) | |
819 | stimer_expiration(stimer); | |
820 | } | |
0cdeabb1 | 821 | |
6a058a1e | 822 | if ((stimer->config.enable) && |
f1ff89ec RK |
823 | stimer->count) { |
824 | if (!stimer->msg_pending) | |
825 | stimer_start(stimer); | |
826 | } else | |
0cdeabb1 | 827 | stimer_cleanup(stimer); |
1f4b34f8 AS |
828 | } |
829 | } | |
830 | } | |
831 | ||
832 | void kvm_hv_vcpu_uninit(struct kvm_vcpu *vcpu) | |
833 | { | |
834 | struct kvm_vcpu_hv *hv_vcpu = vcpu_to_hv_vcpu(vcpu); | |
835 | int i; | |
836 | ||
837 | for (i = 0; i < ARRAY_SIZE(hv_vcpu->stimer); i++) | |
838 | stimer_cleanup(&hv_vcpu->stimer[i]); | |
839 | } | |
840 | ||
72bbf935 LP |
841 | bool kvm_hv_assist_page_enabled(struct kvm_vcpu *vcpu) |
842 | { | |
843 | if (!(vcpu->arch.hyperv.hv_vapic & HV_X64_MSR_VP_ASSIST_PAGE_ENABLE)) | |
844 | return false; | |
845 | return vcpu->arch.pv_eoi.msr_val & KVM_MSR_ENABLED; | |
846 | } | |
847 | EXPORT_SYMBOL_GPL(kvm_hv_assist_page_enabled); | |
848 | ||
849 | bool kvm_hv_get_assist_page(struct kvm_vcpu *vcpu, | |
850 | struct hv_vp_assist_page *assist_page) | |
851 | { | |
852 | if (!kvm_hv_assist_page_enabled(vcpu)) | |
853 | return false; | |
854 | return !kvm_read_guest_cached(vcpu->kvm, &vcpu->arch.pv_eoi.data, | |
855 | assist_page, sizeof(*assist_page)); | |
856 | } | |
857 | EXPORT_SYMBOL_GPL(kvm_hv_get_assist_page); | |
858 | ||
1f4b34f8 AS |
859 | static void stimer_prepare_msg(struct kvm_vcpu_hv_stimer *stimer) |
860 | { | |
861 | struct hv_message *msg = &stimer->msg; | |
862 | struct hv_timer_message_payload *payload = | |
863 | (struct hv_timer_message_payload *)&msg->u.payload; | |
864 | ||
865 | memset(&msg->header, 0, sizeof(msg->header)); | |
866 | msg->header.message_type = HVMSG_TIMER_EXPIRED; | |
867 | msg->header.payload_size = sizeof(*payload); | |
868 | ||
869 | payload->timer_index = stimer->index; | |
870 | payload->expiration_time = 0; | |
871 | payload->delivery_time = 0; | |
872 | } | |
873 | ||
874 | static void stimer_init(struct kvm_vcpu_hv_stimer *stimer, int timer_index) | |
875 | { | |
876 | memset(stimer, 0, sizeof(*stimer)); | |
877 | stimer->index = timer_index; | |
878 | hrtimer_init(&stimer->timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS); | |
879 | stimer->timer.function = stimer_timer_callback; | |
880 | stimer_prepare_msg(stimer); | |
881 | } | |
882 | ||
5c919412 AS |
883 | void kvm_hv_vcpu_init(struct kvm_vcpu *vcpu) |
884 | { | |
1f4b34f8 AS |
885 | struct kvm_vcpu_hv *hv_vcpu = vcpu_to_hv_vcpu(vcpu); |
886 | int i; | |
887 | ||
888 | synic_init(&hv_vcpu->synic); | |
889 | ||
890 | bitmap_zero(hv_vcpu->stimer_pending_bitmap, HV_SYNIC_STIMER_COUNT); | |
891 | for (i = 0; i < ARRAY_SIZE(hv_vcpu->stimer); i++) | |
892 | stimer_init(&hv_vcpu->stimer[i], i); | |
5c919412 AS |
893 | } |
894 | ||
d3457c87 RK |
895 | void kvm_hv_vcpu_postcreate(struct kvm_vcpu *vcpu) |
896 | { | |
897 | struct kvm_vcpu_hv *hv_vcpu = vcpu_to_hv_vcpu(vcpu); | |
898 | ||
899 | hv_vcpu->vp_index = kvm_vcpu_get_idx(vcpu); | |
900 | } | |
901 | ||
efc479e6 | 902 | int kvm_hv_activate_synic(struct kvm_vcpu *vcpu, bool dont_zero_synic_pages) |
5c919412 | 903 | { |
efc479e6 RK |
904 | struct kvm_vcpu_hv_synic *synic = vcpu_to_synic(vcpu); |
905 | ||
5c919412 AS |
906 | /* |
907 | * Hyper-V SynIC auto EOI SINT's are | |
f4fdc0a2 SS |
908 | * not compatible with APICV, so request |
909 | * to deactivate APICV permanently. | |
5c919412 | 910 | */ |
f4fdc0a2 | 911 | kvm_request_apicv_update(vcpu->kvm, false, APICV_INHIBIT_REASON_HYPERV); |
efc479e6 RK |
912 | synic->active = true; |
913 | synic->dont_zero_synic_pages = dont_zero_synic_pages; | |
99b48ecc | 914 | synic->control = HV_SYNIC_CONTROL_ENABLE; |
5c919412 AS |
915 | return 0; |
916 | } | |
917 | ||
e83d5887 AS |
918 | static bool kvm_hv_msr_partition_wide(u32 msr) |
919 | { | |
920 | bool r = false; | |
921 | ||
922 | switch (msr) { | |
923 | case HV_X64_MSR_GUEST_OS_ID: | |
924 | case HV_X64_MSR_HYPERCALL: | |
925 | case HV_X64_MSR_REFERENCE_TSC: | |
926 | case HV_X64_MSR_TIME_REF_COUNT: | |
e7d9513b AS |
927 | case HV_X64_MSR_CRASH_CTL: |
928 | case HV_X64_MSR_CRASH_P0 ... HV_X64_MSR_CRASH_P4: | |
e516cebb | 929 | case HV_X64_MSR_RESET: |
a2e164e7 VK |
930 | case HV_X64_MSR_REENLIGHTENMENT_CONTROL: |
931 | case HV_X64_MSR_TSC_EMULATION_CONTROL: | |
932 | case HV_X64_MSR_TSC_EMULATION_STATUS: | |
f97f5a56 JD |
933 | case HV_X64_MSR_SYNDBG_OPTIONS: |
934 | case HV_X64_MSR_SYNDBG_CONTROL ... HV_X64_MSR_SYNDBG_PENDING_BUFFER: | |
e83d5887 AS |
935 | r = true; |
936 | break; | |
937 | } | |
938 | ||
939 | return r; | |
940 | } | |
941 | ||
e7d9513b AS |
942 | static int kvm_hv_msr_get_crash_data(struct kvm_vcpu *vcpu, |
943 | u32 index, u64 *pdata) | |
944 | { | |
945 | struct kvm_hv *hv = &vcpu->kvm->arch.hyperv; | |
86187937 | 946 | size_t size = ARRAY_SIZE(hv->hv_crash_param); |
e7d9513b | 947 | |
86187937 | 948 | if (WARN_ON_ONCE(index >= size)) |
e7d9513b AS |
949 | return -EINVAL; |
950 | ||
86187937 | 951 | *pdata = hv->hv_crash_param[array_index_nospec(index, size)]; |
e7d9513b AS |
952 | return 0; |
953 | } | |
954 | ||
955 | static int kvm_hv_msr_get_crash_ctl(struct kvm_vcpu *vcpu, u64 *pdata) | |
956 | { | |
957 | struct kvm_hv *hv = &vcpu->kvm->arch.hyperv; | |
958 | ||
959 | *pdata = hv->hv_crash_ctl; | |
960 | return 0; | |
961 | } | |
962 | ||
963 | static int kvm_hv_msr_set_crash_ctl(struct kvm_vcpu *vcpu, u64 data, bool host) | |
964 | { | |
965 | struct kvm_hv *hv = &vcpu->kvm->arch.hyperv; | |
966 | ||
967 | if (host) | |
a4987def | 968 | hv->hv_crash_ctl = data & HV_CRASH_CTL_CRASH_NOTIFY; |
e7d9513b | 969 | |
a4987def | 970 | if (!host && (data & HV_CRASH_CTL_CRASH_NOTIFY)) { |
e7d9513b AS |
971 | |
972 | vcpu_debug(vcpu, "hv crash (0x%llx 0x%llx 0x%llx 0x%llx 0x%llx)\n", | |
973 | hv->hv_crash_param[0], | |
974 | hv->hv_crash_param[1], | |
975 | hv->hv_crash_param[2], | |
976 | hv->hv_crash_param[3], | |
977 | hv->hv_crash_param[4]); | |
978 | ||
979 | /* Send notification about crash to user space */ | |
980 | kvm_make_request(KVM_REQ_HV_CRASH, vcpu); | |
981 | } | |
982 | ||
983 | return 0; | |
984 | } | |
985 | ||
986 | static int kvm_hv_msr_set_crash_data(struct kvm_vcpu *vcpu, | |
987 | u32 index, u64 data) | |
988 | { | |
989 | struct kvm_hv *hv = &vcpu->kvm->arch.hyperv; | |
86187937 | 990 | size_t size = ARRAY_SIZE(hv->hv_crash_param); |
e7d9513b | 991 | |
86187937 | 992 | if (WARN_ON_ONCE(index >= size)) |
e7d9513b AS |
993 | return -EINVAL; |
994 | ||
86187937 | 995 | hv->hv_crash_param[array_index_nospec(index, size)] = data; |
e7d9513b AS |
996 | return 0; |
997 | } | |
998 | ||
095cf55d PB |
999 | /* |
1000 | * The kvmclock and Hyper-V TSC page use similar formulas, and converting | |
1001 | * between them is possible: | |
1002 | * | |
1003 | * kvmclock formula: | |
1004 | * nsec = (ticks - tsc_timestamp) * tsc_to_system_mul * 2^(tsc_shift-32) | |
1005 | * + system_time | |
1006 | * | |
1007 | * Hyper-V formula: | |
1008 | * nsec/100 = ticks * scale / 2^64 + offset | |
1009 | * | |
1010 | * When tsc_timestamp = system_time = 0, offset is zero in the Hyper-V formula. | |
1011 | * By dividing the kvmclock formula by 100 and equating what's left we get: | |
1012 | * ticks * scale / 2^64 = ticks * tsc_to_system_mul * 2^(tsc_shift-32) / 100 | |
1013 | * scale / 2^64 = tsc_to_system_mul * 2^(tsc_shift-32) / 100 | |
1014 | * scale = tsc_to_system_mul * 2^(32+tsc_shift) / 100 | |
1015 | * | |
1016 | * Now expand the kvmclock formula and divide by 100: | |
1017 | * nsec = ticks * tsc_to_system_mul * 2^(tsc_shift-32) | |
1018 | * - tsc_timestamp * tsc_to_system_mul * 2^(tsc_shift-32) | |
1019 | * + system_time | |
1020 | * nsec/100 = ticks * tsc_to_system_mul * 2^(tsc_shift-32) / 100 | |
1021 | * - tsc_timestamp * tsc_to_system_mul * 2^(tsc_shift-32) / 100 | |
1022 | * + system_time / 100 | |
1023 | * | |
1024 | * Replace tsc_to_system_mul * 2^(tsc_shift-32) / 100 by scale / 2^64: | |
1025 | * nsec/100 = ticks * scale / 2^64 | |
1026 | * - tsc_timestamp * scale / 2^64 | |
1027 | * + system_time / 100 | |
1028 | * | |
1029 | * Equate with the Hyper-V formula so that ticks * scale / 2^64 cancels out: | |
1030 | * offset = system_time / 100 - tsc_timestamp * scale / 2^64 | |
1031 | * | |
1032 | * These two equivalencies are implemented in this function. | |
1033 | */ | |
1034 | static bool compute_tsc_page_parameters(struct pvclock_vcpu_time_info *hv_clock, | |
7357b1df | 1035 | struct ms_hyperv_tsc_page *tsc_ref) |
095cf55d PB |
1036 | { |
1037 | u64 max_mul; | |
1038 | ||
1039 | if (!(hv_clock->flags & PVCLOCK_TSC_STABLE_BIT)) | |
1040 | return false; | |
1041 | ||
1042 | /* | |
1043 | * check if scale would overflow, if so we use the time ref counter | |
1044 | * tsc_to_system_mul * 2^(tsc_shift+32) / 100 >= 2^64 | |
1045 | * tsc_to_system_mul / 100 >= 2^(32-tsc_shift) | |
1046 | * tsc_to_system_mul >= 100 * 2^(32-tsc_shift) | |
1047 | */ | |
1048 | max_mul = 100ull << (32 - hv_clock->tsc_shift); | |
1049 | if (hv_clock->tsc_to_system_mul >= max_mul) | |
1050 | return false; | |
1051 | ||
1052 | /* | |
1053 | * Otherwise compute the scale and offset according to the formulas | |
1054 | * derived above. | |
1055 | */ | |
1056 | tsc_ref->tsc_scale = | |
1057 | mul_u64_u32_div(1ULL << (32 + hv_clock->tsc_shift), | |
1058 | hv_clock->tsc_to_system_mul, | |
1059 | 100); | |
1060 | ||
1061 | tsc_ref->tsc_offset = hv_clock->system_time; | |
1062 | do_div(tsc_ref->tsc_offset, 100); | |
1063 | tsc_ref->tsc_offset -= | |
1064 | mul_u64_u64_shr(hv_clock->tsc_timestamp, tsc_ref->tsc_scale, 64); | |
1065 | return true; | |
1066 | } | |
1067 | ||
1068 | void kvm_hv_setup_tsc_page(struct kvm *kvm, | |
1069 | struct pvclock_vcpu_time_info *hv_clock) | |
1070 | { | |
1071 | struct kvm_hv *hv = &kvm->arch.hyperv; | |
1072 | u32 tsc_seq; | |
1073 | u64 gfn; | |
1074 | ||
1075 | BUILD_BUG_ON(sizeof(tsc_seq) != sizeof(hv->tsc_ref.tsc_sequence)); | |
7357b1df | 1076 | BUILD_BUG_ON(offsetof(struct ms_hyperv_tsc_page, tsc_sequence) != 0); |
095cf55d PB |
1077 | |
1078 | if (!(hv->hv_tsc_page & HV_X64_MSR_TSC_REFERENCE_ENABLE)) | |
1079 | return; | |
1080 | ||
3f5ad8be PB |
1081 | mutex_lock(&kvm->arch.hyperv.hv_lock); |
1082 | if (!(hv->hv_tsc_page & HV_X64_MSR_TSC_REFERENCE_ENABLE)) | |
1083 | goto out_unlock; | |
1084 | ||
095cf55d PB |
1085 | gfn = hv->hv_tsc_page >> HV_X64_MSR_TSC_REFERENCE_ADDRESS_SHIFT; |
1086 | /* | |
1087 | * Because the TSC parameters only vary when there is a | |
1088 | * change in the master clock, do not bother with caching. | |
1089 | */ | |
1090 | if (unlikely(kvm_read_guest(kvm, gfn_to_gpa(gfn), | |
1091 | &tsc_seq, sizeof(tsc_seq)))) | |
3f5ad8be | 1092 | goto out_unlock; |
095cf55d PB |
1093 | |
1094 | /* | |
1095 | * While we're computing and writing the parameters, force the | |
1096 | * guest to use the time reference count MSR. | |
1097 | */ | |
1098 | hv->tsc_ref.tsc_sequence = 0; | |
1099 | if (kvm_write_guest(kvm, gfn_to_gpa(gfn), | |
1100 | &hv->tsc_ref, sizeof(hv->tsc_ref.tsc_sequence))) | |
3f5ad8be | 1101 | goto out_unlock; |
095cf55d PB |
1102 | |
1103 | if (!compute_tsc_page_parameters(hv_clock, &hv->tsc_ref)) | |
3f5ad8be | 1104 | goto out_unlock; |
095cf55d PB |
1105 | |
1106 | /* Ensure sequence is zero before writing the rest of the struct. */ | |
1107 | smp_wmb(); | |
1108 | if (kvm_write_guest(kvm, gfn_to_gpa(gfn), &hv->tsc_ref, sizeof(hv->tsc_ref))) | |
3f5ad8be | 1109 | goto out_unlock; |
095cf55d PB |
1110 | |
1111 | /* | |
1112 | * Now switch to the TSC page mechanism by writing the sequence. | |
1113 | */ | |
1114 | tsc_seq++; | |
1115 | if (tsc_seq == 0xFFFFFFFF || tsc_seq == 0) | |
1116 | tsc_seq = 1; | |
1117 | ||
1118 | /* Write the struct entirely before the non-zero sequence. */ | |
1119 | smp_wmb(); | |
1120 | ||
1121 | hv->tsc_ref.tsc_sequence = tsc_seq; | |
1122 | kvm_write_guest(kvm, gfn_to_gpa(gfn), | |
1123 | &hv->tsc_ref, sizeof(hv->tsc_ref.tsc_sequence)); | |
3f5ad8be PB |
1124 | out_unlock: |
1125 | mutex_unlock(&kvm->arch.hyperv.hv_lock); | |
095cf55d PB |
1126 | } |
1127 | ||
e7d9513b AS |
1128 | static int kvm_hv_set_msr_pw(struct kvm_vcpu *vcpu, u32 msr, u64 data, |
1129 | bool host) | |
e83d5887 AS |
1130 | { |
1131 | struct kvm *kvm = vcpu->kvm; | |
1132 | struct kvm_hv *hv = &kvm->arch.hyperv; | |
1133 | ||
1134 | switch (msr) { | |
1135 | case HV_X64_MSR_GUEST_OS_ID: | |
1136 | hv->hv_guest_os_id = data; | |
1137 | /* setting guest os id to zero disables hypercall page */ | |
1138 | if (!hv->hv_guest_os_id) | |
1139 | hv->hv_hypercall &= ~HV_X64_MSR_HYPERCALL_ENABLE; | |
1140 | break; | |
1141 | case HV_X64_MSR_HYPERCALL: { | |
1142 | u64 gfn; | |
1143 | unsigned long addr; | |
1144 | u8 instructions[4]; | |
1145 | ||
1146 | /* if guest os id is not set hypercall should remain disabled */ | |
1147 | if (!hv->hv_guest_os_id) | |
1148 | break; | |
1149 | if (!(data & HV_X64_MSR_HYPERCALL_ENABLE)) { | |
1150 | hv->hv_hypercall = data; | |
1151 | break; | |
1152 | } | |
1153 | gfn = data >> HV_X64_MSR_HYPERCALL_PAGE_ADDRESS_SHIFT; | |
1154 | addr = gfn_to_hva(kvm, gfn); | |
1155 | if (kvm_is_error_hva(addr)) | |
1156 | return 1; | |
afaf0b2f | 1157 | kvm_x86_ops.patch_hypercall(vcpu, instructions); |
e83d5887 AS |
1158 | ((unsigned char *)instructions)[3] = 0xc3; /* ret */ |
1159 | if (__copy_to_user((void __user *)addr, instructions, 4)) | |
1160 | return 1; | |
1161 | hv->hv_hypercall = data; | |
1162 | mark_page_dirty(kvm, gfn); | |
1163 | break; | |
1164 | } | |
095cf55d | 1165 | case HV_X64_MSR_REFERENCE_TSC: |
e83d5887 | 1166 | hv->hv_tsc_page = data; |
095cf55d PB |
1167 | if (hv->hv_tsc_page & HV_X64_MSR_TSC_REFERENCE_ENABLE) |
1168 | kvm_make_request(KVM_REQ_MASTERCLOCK_UPDATE, vcpu); | |
e83d5887 | 1169 | break; |
e7d9513b AS |
1170 | case HV_X64_MSR_CRASH_P0 ... HV_X64_MSR_CRASH_P4: |
1171 | return kvm_hv_msr_set_crash_data(vcpu, | |
1172 | msr - HV_X64_MSR_CRASH_P0, | |
1173 | data); | |
1174 | case HV_X64_MSR_CRASH_CTL: | |
1175 | return kvm_hv_msr_set_crash_ctl(vcpu, data, host); | |
e516cebb AS |
1176 | case HV_X64_MSR_RESET: |
1177 | if (data == 1) { | |
1178 | vcpu_debug(vcpu, "hyper-v reset requested\n"); | |
1179 | kvm_make_request(KVM_REQ_HV_RESET, vcpu); | |
1180 | } | |
1181 | break; | |
a2e164e7 VK |
1182 | case HV_X64_MSR_REENLIGHTENMENT_CONTROL: |
1183 | hv->hv_reenlightenment_control = data; | |
1184 | break; | |
1185 | case HV_X64_MSR_TSC_EMULATION_CONTROL: | |
1186 | hv->hv_tsc_emulation_control = data; | |
1187 | break; | |
1188 | case HV_X64_MSR_TSC_EMULATION_STATUS: | |
1189 | hv->hv_tsc_emulation_status = data; | |
1190 | break; | |
44883f01 PB |
1191 | case HV_X64_MSR_TIME_REF_COUNT: |
1192 | /* read-only, but still ignore it if host-initiated */ | |
1193 | if (!host) | |
1194 | return 1; | |
1195 | break; | |
f97f5a56 JD |
1196 | case HV_X64_MSR_SYNDBG_OPTIONS: |
1197 | case HV_X64_MSR_SYNDBG_CONTROL ... HV_X64_MSR_SYNDBG_PENDING_BUFFER: | |
1198 | return syndbg_set_msr(vcpu, msr, data, host); | |
e83d5887 | 1199 | default: |
2f9f5cdd | 1200 | vcpu_unimpl(vcpu, "Hyper-V unhandled wrmsr: 0x%x data 0x%llx\n", |
e83d5887 AS |
1201 | msr, data); |
1202 | return 1; | |
1203 | } | |
1204 | return 0; | |
1205 | } | |
1206 | ||
9eec50b8 AS |
1207 | /* Calculate cpu time spent by current task in 100ns units */ |
1208 | static u64 current_task_runtime_100ns(void) | |
1209 | { | |
5613fda9 | 1210 | u64 utime, stime; |
9eec50b8 AS |
1211 | |
1212 | task_cputime_adjusted(current, &utime, &stime); | |
5613fda9 FW |
1213 | |
1214 | return div_u64(utime + stime, 100); | |
9eec50b8 AS |
1215 | } |
1216 | ||
1217 | static int kvm_hv_set_msr(struct kvm_vcpu *vcpu, u32 msr, u64 data, bool host) | |
e83d5887 | 1218 | { |
1779a39f | 1219 | struct kvm_vcpu_hv *hv_vcpu = &vcpu->arch.hyperv; |
e83d5887 AS |
1220 | |
1221 | switch (msr) { | |
87ee613d VK |
1222 | case HV_X64_MSR_VP_INDEX: { |
1223 | struct kvm_hv *hv = &vcpu->kvm->arch.hyperv; | |
1224 | int vcpu_idx = kvm_vcpu_get_idx(vcpu); | |
1225 | u32 new_vp_index = (u32)data; | |
1226 | ||
1227 | if (!host || new_vp_index >= KVM_MAX_VCPUS) | |
d3457c87 | 1228 | return 1; |
87ee613d VK |
1229 | |
1230 | if (new_vp_index == hv_vcpu->vp_index) | |
1231 | return 0; | |
1232 | ||
1233 | /* | |
1234 | * The VP index is initialized to vcpu_index by | |
1235 | * kvm_hv_vcpu_postcreate so they initially match. Now the | |
1236 | * VP index is changing, adjust num_mismatched_vp_indexes if | |
1237 | * it now matches or no longer matches vcpu_idx. | |
1238 | */ | |
1239 | if (hv_vcpu->vp_index == vcpu_idx) | |
1240 | atomic_inc(&hv->num_mismatched_vp_indexes); | |
1241 | else if (new_vp_index == vcpu_idx) | |
1242 | atomic_dec(&hv->num_mismatched_vp_indexes); | |
1243 | ||
1244 | hv_vcpu->vp_index = new_vp_index; | |
d3457c87 | 1245 | break; |
87ee613d | 1246 | } |
d4abc577 | 1247 | case HV_X64_MSR_VP_ASSIST_PAGE: { |
e83d5887 AS |
1248 | u64 gfn; |
1249 | unsigned long addr; | |
1250 | ||
d4abc577 | 1251 | if (!(data & HV_X64_MSR_VP_ASSIST_PAGE_ENABLE)) { |
1779a39f | 1252 | hv_vcpu->hv_vapic = data; |
72bbf935 | 1253 | if (kvm_lapic_enable_pv_eoi(vcpu, 0, 0)) |
e83d5887 AS |
1254 | return 1; |
1255 | break; | |
1256 | } | |
d4abc577 | 1257 | gfn = data >> HV_X64_MSR_VP_ASSIST_PAGE_ADDRESS_SHIFT; |
e83d5887 AS |
1258 | addr = kvm_vcpu_gfn_to_hva(vcpu, gfn); |
1259 | if (kvm_is_error_hva(addr)) | |
1260 | return 1; | |
12e0c618 VK |
1261 | |
1262 | /* | |
67b0ae43 | 1263 | * Clear apic_assist portion of struct hv_vp_assist_page |
12e0c618 VK |
1264 | * only, there can be valuable data in the rest which needs |
1265 | * to be preserved e.g. on migration. | |
1266 | */ | |
9eb41c52 | 1267 | if (__put_user(0, (u32 __user *)addr)) |
e83d5887 | 1268 | return 1; |
1779a39f | 1269 | hv_vcpu->hv_vapic = data; |
e83d5887 AS |
1270 | kvm_vcpu_mark_page_dirty(vcpu, gfn); |
1271 | if (kvm_lapic_enable_pv_eoi(vcpu, | |
72bbf935 LP |
1272 | gfn_to_gpa(gfn) | KVM_MSR_ENABLED, |
1273 | sizeof(struct hv_vp_assist_page))) | |
e83d5887 AS |
1274 | return 1; |
1275 | break; | |
1276 | } | |
1277 | case HV_X64_MSR_EOI: | |
1278 | return kvm_hv_vapic_msr_write(vcpu, APIC_EOI, data); | |
1279 | case HV_X64_MSR_ICR: | |
1280 | return kvm_hv_vapic_msr_write(vcpu, APIC_ICR, data); | |
1281 | case HV_X64_MSR_TPR: | |
1282 | return kvm_hv_vapic_msr_write(vcpu, APIC_TASKPRI, data); | |
9eec50b8 AS |
1283 | case HV_X64_MSR_VP_RUNTIME: |
1284 | if (!host) | |
1285 | return 1; | |
1779a39f | 1286 | hv_vcpu->runtime_offset = data - current_task_runtime_100ns(); |
9eec50b8 | 1287 | break; |
5c919412 AS |
1288 | case HV_X64_MSR_SCONTROL: |
1289 | case HV_X64_MSR_SVERSION: | |
1290 | case HV_X64_MSR_SIEFP: | |
1291 | case HV_X64_MSR_SIMP: | |
1292 | case HV_X64_MSR_EOM: | |
1293 | case HV_X64_MSR_SINT0 ... HV_X64_MSR_SINT15: | |
1294 | return synic_set_msr(vcpu_to_synic(vcpu), msr, data, host); | |
1f4b34f8 AS |
1295 | case HV_X64_MSR_STIMER0_CONFIG: |
1296 | case HV_X64_MSR_STIMER1_CONFIG: | |
1297 | case HV_X64_MSR_STIMER2_CONFIG: | |
1298 | case HV_X64_MSR_STIMER3_CONFIG: { | |
1299 | int timer_index = (msr - HV_X64_MSR_STIMER0_CONFIG)/2; | |
1300 | ||
1301 | return stimer_set_config(vcpu_to_stimer(vcpu, timer_index), | |
1302 | data, host); | |
1303 | } | |
1304 | case HV_X64_MSR_STIMER0_COUNT: | |
1305 | case HV_X64_MSR_STIMER1_COUNT: | |
1306 | case HV_X64_MSR_STIMER2_COUNT: | |
1307 | case HV_X64_MSR_STIMER3_COUNT: { | |
1308 | int timer_index = (msr - HV_X64_MSR_STIMER0_COUNT)/2; | |
1309 | ||
1310 | return stimer_set_count(vcpu_to_stimer(vcpu, timer_index), | |
1311 | data, host); | |
1312 | } | |
44883f01 PB |
1313 | case HV_X64_MSR_TSC_FREQUENCY: |
1314 | case HV_X64_MSR_APIC_FREQUENCY: | |
1315 | /* read-only, but still ignore it if host-initiated */ | |
1316 | if (!host) | |
1317 | return 1; | |
1318 | break; | |
e83d5887 | 1319 | default: |
2f9f5cdd | 1320 | vcpu_unimpl(vcpu, "Hyper-V unhandled wrmsr: 0x%x data 0x%llx\n", |
e83d5887 AS |
1321 | msr, data); |
1322 | return 1; | |
1323 | } | |
1324 | ||
1325 | return 0; | |
1326 | } | |
1327 | ||
f97f5a56 JD |
1328 | static int kvm_hv_get_msr_pw(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata, |
1329 | bool host) | |
e83d5887 AS |
1330 | { |
1331 | u64 data = 0; | |
1332 | struct kvm *kvm = vcpu->kvm; | |
1333 | struct kvm_hv *hv = &kvm->arch.hyperv; | |
1334 | ||
1335 | switch (msr) { | |
1336 | case HV_X64_MSR_GUEST_OS_ID: | |
1337 | data = hv->hv_guest_os_id; | |
1338 | break; | |
1339 | case HV_X64_MSR_HYPERCALL: | |
1340 | data = hv->hv_hypercall; | |
1341 | break; | |
93bf4172 AS |
1342 | case HV_X64_MSR_TIME_REF_COUNT: |
1343 | data = get_time_ref_counter(kvm); | |
e83d5887 | 1344 | break; |
e83d5887 AS |
1345 | case HV_X64_MSR_REFERENCE_TSC: |
1346 | data = hv->hv_tsc_page; | |
1347 | break; | |
e7d9513b AS |
1348 | case HV_X64_MSR_CRASH_P0 ... HV_X64_MSR_CRASH_P4: |
1349 | return kvm_hv_msr_get_crash_data(vcpu, | |
1350 | msr - HV_X64_MSR_CRASH_P0, | |
1351 | pdata); | |
1352 | case HV_X64_MSR_CRASH_CTL: | |
1353 | return kvm_hv_msr_get_crash_ctl(vcpu, pdata); | |
e516cebb AS |
1354 | case HV_X64_MSR_RESET: |
1355 | data = 0; | |
1356 | break; | |
a2e164e7 VK |
1357 | case HV_X64_MSR_REENLIGHTENMENT_CONTROL: |
1358 | data = hv->hv_reenlightenment_control; | |
1359 | break; | |
1360 | case HV_X64_MSR_TSC_EMULATION_CONTROL: | |
1361 | data = hv->hv_tsc_emulation_control; | |
1362 | break; | |
1363 | case HV_X64_MSR_TSC_EMULATION_STATUS: | |
1364 | data = hv->hv_tsc_emulation_status; | |
1365 | break; | |
f97f5a56 JD |
1366 | case HV_X64_MSR_SYNDBG_OPTIONS: |
1367 | case HV_X64_MSR_SYNDBG_CONTROL ... HV_X64_MSR_SYNDBG_PENDING_BUFFER: | |
1368 | return syndbg_get_msr(vcpu, msr, pdata, host); | |
e83d5887 AS |
1369 | default: |
1370 | vcpu_unimpl(vcpu, "Hyper-V unhandled rdmsr: 0x%x\n", msr); | |
1371 | return 1; | |
1372 | } | |
1373 | ||
1374 | *pdata = data; | |
1375 | return 0; | |
1376 | } | |
1377 | ||
44883f01 PB |
1378 | static int kvm_hv_get_msr(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata, |
1379 | bool host) | |
e83d5887 AS |
1380 | { |
1381 | u64 data = 0; | |
1779a39f | 1382 | struct kvm_vcpu_hv *hv_vcpu = &vcpu->arch.hyperv; |
e83d5887 AS |
1383 | |
1384 | switch (msr) { | |
d3457c87 | 1385 | case HV_X64_MSR_VP_INDEX: |
1779a39f | 1386 | data = hv_vcpu->vp_index; |
e83d5887 | 1387 | break; |
e83d5887 AS |
1388 | case HV_X64_MSR_EOI: |
1389 | return kvm_hv_vapic_msr_read(vcpu, APIC_EOI, pdata); | |
1390 | case HV_X64_MSR_ICR: | |
1391 | return kvm_hv_vapic_msr_read(vcpu, APIC_ICR, pdata); | |
1392 | case HV_X64_MSR_TPR: | |
1393 | return kvm_hv_vapic_msr_read(vcpu, APIC_TASKPRI, pdata); | |
d4abc577 | 1394 | case HV_X64_MSR_VP_ASSIST_PAGE: |
1779a39f | 1395 | data = hv_vcpu->hv_vapic; |
e83d5887 | 1396 | break; |
9eec50b8 | 1397 | case HV_X64_MSR_VP_RUNTIME: |
1779a39f | 1398 | data = current_task_runtime_100ns() + hv_vcpu->runtime_offset; |
9eec50b8 | 1399 | break; |
5c919412 AS |
1400 | case HV_X64_MSR_SCONTROL: |
1401 | case HV_X64_MSR_SVERSION: | |
1402 | case HV_X64_MSR_SIEFP: | |
1403 | case HV_X64_MSR_SIMP: | |
1404 | case HV_X64_MSR_EOM: | |
1405 | case HV_X64_MSR_SINT0 ... HV_X64_MSR_SINT15: | |
44883f01 | 1406 | return synic_get_msr(vcpu_to_synic(vcpu), msr, pdata, host); |
1f4b34f8 AS |
1407 | case HV_X64_MSR_STIMER0_CONFIG: |
1408 | case HV_X64_MSR_STIMER1_CONFIG: | |
1409 | case HV_X64_MSR_STIMER2_CONFIG: | |
1410 | case HV_X64_MSR_STIMER3_CONFIG: { | |
1411 | int timer_index = (msr - HV_X64_MSR_STIMER0_CONFIG)/2; | |
1412 | ||
1413 | return stimer_get_config(vcpu_to_stimer(vcpu, timer_index), | |
1414 | pdata); | |
1415 | } | |
1416 | case HV_X64_MSR_STIMER0_COUNT: | |
1417 | case HV_X64_MSR_STIMER1_COUNT: | |
1418 | case HV_X64_MSR_STIMER2_COUNT: | |
1419 | case HV_X64_MSR_STIMER3_COUNT: { | |
1420 | int timer_index = (msr - HV_X64_MSR_STIMER0_COUNT)/2; | |
1421 | ||
1422 | return stimer_get_count(vcpu_to_stimer(vcpu, timer_index), | |
1423 | pdata); | |
1424 | } | |
72c139ba LP |
1425 | case HV_X64_MSR_TSC_FREQUENCY: |
1426 | data = (u64)vcpu->arch.virtual_tsc_khz * 1000; | |
1427 | break; | |
1428 | case HV_X64_MSR_APIC_FREQUENCY: | |
1429 | data = APIC_BUS_FREQUENCY; | |
1430 | break; | |
e83d5887 AS |
1431 | default: |
1432 | vcpu_unimpl(vcpu, "Hyper-V unhandled rdmsr: 0x%x\n", msr); | |
1433 | return 1; | |
1434 | } | |
1435 | *pdata = data; | |
1436 | return 0; | |
1437 | } | |
1438 | ||
e7d9513b | 1439 | int kvm_hv_set_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 data, bool host) |
e83d5887 AS |
1440 | { |
1441 | if (kvm_hv_msr_partition_wide(msr)) { | |
1442 | int r; | |
1443 | ||
3f5ad8be | 1444 | mutex_lock(&vcpu->kvm->arch.hyperv.hv_lock); |
e7d9513b | 1445 | r = kvm_hv_set_msr_pw(vcpu, msr, data, host); |
3f5ad8be | 1446 | mutex_unlock(&vcpu->kvm->arch.hyperv.hv_lock); |
e83d5887 AS |
1447 | return r; |
1448 | } else | |
9eec50b8 | 1449 | return kvm_hv_set_msr(vcpu, msr, data, host); |
e83d5887 AS |
1450 | } |
1451 | ||
44883f01 | 1452 | int kvm_hv_get_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata, bool host) |
e83d5887 AS |
1453 | { |
1454 | if (kvm_hv_msr_partition_wide(msr)) { | |
1455 | int r; | |
1456 | ||
3f5ad8be | 1457 | mutex_lock(&vcpu->kvm->arch.hyperv.hv_lock); |
f97f5a56 | 1458 | r = kvm_hv_get_msr_pw(vcpu, msr, pdata, host); |
3f5ad8be | 1459 | mutex_unlock(&vcpu->kvm->arch.hyperv.hv_lock); |
e83d5887 AS |
1460 | return r; |
1461 | } else | |
44883f01 | 1462 | return kvm_hv_get_msr(vcpu, msr, pdata, host); |
e83d5887 AS |
1463 | } |
1464 | ||
f21dd494 VK |
1465 | static __always_inline unsigned long *sparse_set_to_vcpu_mask( |
1466 | struct kvm *kvm, u64 *sparse_banks, u64 valid_bank_mask, | |
1467 | u64 *vp_bitmap, unsigned long *vcpu_bitmap) | |
c7012676 | 1468 | { |
f21dd494 VK |
1469 | struct kvm_hv *hv = &kvm->arch.hyperv; |
1470 | struct kvm_vcpu *vcpu; | |
1471 | int i, bank, sbank = 0; | |
c7012676 | 1472 | |
f21dd494 VK |
1473 | memset(vp_bitmap, 0, |
1474 | KVM_HV_MAX_SPARSE_VCPU_SET_BITS * sizeof(*vp_bitmap)); | |
1475 | for_each_set_bit(bank, (unsigned long *)&valid_bank_mask, | |
1476 | KVM_HV_MAX_SPARSE_VCPU_SET_BITS) | |
1477 | vp_bitmap[bank] = sparse_banks[sbank++]; | |
c7012676 | 1478 | |
f21dd494 VK |
1479 | if (likely(!atomic_read(&hv->num_mismatched_vp_indexes))) { |
1480 | /* for all vcpus vp_index == vcpu_idx */ | |
1481 | return (unsigned long *)vp_bitmap; | |
1482 | } | |
2cefc5fe | 1483 | |
f21dd494 VK |
1484 | bitmap_zero(vcpu_bitmap, KVM_MAX_VCPUS); |
1485 | kvm_for_each_vcpu(i, vcpu, kvm) { | |
1486 | if (test_bit(vcpu_to_hv_vcpu(vcpu)->vp_index, | |
1487 | (unsigned long *)vp_bitmap)) | |
1488 | __set_bit(i, vcpu_bitmap); | |
1489 | } | |
1490 | return vcpu_bitmap; | |
c7012676 VK |
1491 | } |
1492 | ||
e2f11f42 | 1493 | static u64 kvm_hv_flush_tlb(struct kvm_vcpu *current_vcpu, u64 ingpa, |
c7012676 | 1494 | u16 rep_cnt, bool ex) |
e2f11f42 VK |
1495 | { |
1496 | struct kvm *kvm = current_vcpu->kvm; | |
2cefc5fe | 1497 | struct kvm_vcpu_hv *hv_vcpu = ¤t_vcpu->arch.hyperv; |
c7012676 | 1498 | struct hv_tlb_flush_ex flush_ex; |
e2f11f42 | 1499 | struct hv_tlb_flush flush; |
f21dd494 VK |
1500 | u64 vp_bitmap[KVM_HV_MAX_SPARSE_VCPU_SET_BITS]; |
1501 | DECLARE_BITMAP(vcpu_bitmap, KVM_MAX_VCPUS); | |
1502 | unsigned long *vcpu_mask; | |
2cefc5fe | 1503 | u64 valid_bank_mask; |
c7012676 | 1504 | u64 sparse_banks[64]; |
f21dd494 | 1505 | int sparse_banks_len; |
c7012676 | 1506 | bool all_cpus; |
e2f11f42 | 1507 | |
c7012676 VK |
1508 | if (!ex) { |
1509 | if (unlikely(kvm_read_guest(kvm, ingpa, &flush, sizeof(flush)))) | |
1510 | return HV_STATUS_INVALID_HYPERCALL_INPUT; | |
e2f11f42 | 1511 | |
c7012676 VK |
1512 | trace_kvm_hv_flush_tlb(flush.processor_mask, |
1513 | flush.address_space, flush.flags); | |
1514 | ||
2cefc5fe | 1515 | valid_bank_mask = BIT_ULL(0); |
c7012676 | 1516 | sparse_banks[0] = flush.processor_mask; |
da66761c VK |
1517 | |
1518 | /* | |
1519 | * Work around possible WS2012 bug: it sends hypercalls | |
1520 | * with processor_mask = 0x0 and HV_FLUSH_ALL_PROCESSORS clear, | |
1521 | * while also expecting us to flush something and crashing if | |
1522 | * we don't. Let's treat processor_mask == 0 same as | |
1523 | * HV_FLUSH_ALL_PROCESSORS. | |
1524 | */ | |
1525 | all_cpus = (flush.flags & HV_FLUSH_ALL_PROCESSORS) || | |
1526 | flush.processor_mask == 0; | |
c7012676 VK |
1527 | } else { |
1528 | if (unlikely(kvm_read_guest(kvm, ingpa, &flush_ex, | |
1529 | sizeof(flush_ex)))) | |
1530 | return HV_STATUS_INVALID_HYPERCALL_INPUT; | |
1531 | ||
1532 | trace_kvm_hv_flush_tlb_ex(flush_ex.hv_vp_set.valid_bank_mask, | |
1533 | flush_ex.hv_vp_set.format, | |
1534 | flush_ex.address_space, | |
1535 | flush_ex.flags); | |
1536 | ||
1537 | valid_bank_mask = flush_ex.hv_vp_set.valid_bank_mask; | |
1538 | all_cpus = flush_ex.hv_vp_set.format != | |
1539 | HV_GENERIC_SET_SPARSE_4K; | |
1540 | ||
0b0a31ba VK |
1541 | sparse_banks_len = |
1542 | bitmap_weight((unsigned long *)&valid_bank_mask, 64) * | |
c7012676 VK |
1543 | sizeof(sparse_banks[0]); |
1544 | ||
1545 | if (!sparse_banks_len && !all_cpus) | |
1546 | goto ret_success; | |
1547 | ||
1548 | if (!all_cpus && | |
1549 | kvm_read_guest(kvm, | |
1550 | ingpa + offsetof(struct hv_tlb_flush_ex, | |
1551 | hv_vp_set.bank_contents), | |
1552 | sparse_banks, | |
1553 | sparse_banks_len)) | |
1554 | return HV_STATUS_INVALID_HYPERCALL_INPUT; | |
1555 | } | |
e2f11f42 | 1556 | |
e6b6c483 | 1557 | cpumask_clear(&hv_vcpu->tlb_flush); |
e2f11f42 | 1558 | |
f21dd494 VK |
1559 | vcpu_mask = all_cpus ? NULL : |
1560 | sparse_set_to_vcpu_mask(kvm, sparse_banks, valid_bank_mask, | |
1561 | vp_bitmap, vcpu_bitmap); | |
e2f11f42 | 1562 | |
2cefc5fe | 1563 | /* |
f21dd494 VK |
1564 | * vcpu->arch.cr3 may not be up-to-date for running vCPUs so we can't |
1565 | * analyze it here, flush TLB regardless of the specified address space. | |
2cefc5fe | 1566 | */ |
0baedd79 | 1567 | kvm_make_vcpus_request_mask(kvm, KVM_REQ_HV_TLB_FLUSH, |
54163a34 | 1568 | NULL, vcpu_mask, &hv_vcpu->tlb_flush); |
e2f11f42 | 1569 | |
c7012676 | 1570 | ret_success: |
e2f11f42 VK |
1571 | /* We always do full TLB flush, set rep_done = rep_cnt. */ |
1572 | return (u64)HV_STATUS_SUCCESS | | |
1573 | ((u64)rep_cnt << HV_HYPERCALL_REP_COMP_OFFSET); | |
1574 | } | |
1575 | ||
f21dd494 VK |
1576 | static void kvm_send_ipi_to_many(struct kvm *kvm, u32 vector, |
1577 | unsigned long *vcpu_bitmap) | |
1578 | { | |
1579 | struct kvm_lapic_irq irq = { | |
1580 | .delivery_mode = APIC_DM_FIXED, | |
1581 | .vector = vector | |
1582 | }; | |
1583 | struct kvm_vcpu *vcpu; | |
1584 | int i; | |
1585 | ||
1586 | kvm_for_each_vcpu(i, vcpu, kvm) { | |
1587 | if (vcpu_bitmap && !test_bit(i, vcpu_bitmap)) | |
1588 | continue; | |
1589 | ||
1590 | /* We fail only when APIC is disabled */ | |
1591 | kvm_apic_set_irq(vcpu, &irq, NULL); | |
1592 | } | |
1593 | } | |
1594 | ||
214ff83d VK |
1595 | static u64 kvm_hv_send_ipi(struct kvm_vcpu *current_vcpu, u64 ingpa, u64 outgpa, |
1596 | bool ex, bool fast) | |
1597 | { | |
1598 | struct kvm *kvm = current_vcpu->kvm; | |
214ff83d VK |
1599 | struct hv_send_ipi_ex send_ipi_ex; |
1600 | struct hv_send_ipi send_ipi; | |
f21dd494 VK |
1601 | u64 vp_bitmap[KVM_HV_MAX_SPARSE_VCPU_SET_BITS]; |
1602 | DECLARE_BITMAP(vcpu_bitmap, KVM_MAX_VCPUS); | |
1603 | unsigned long *vcpu_mask; | |
214ff83d VK |
1604 | unsigned long valid_bank_mask; |
1605 | u64 sparse_banks[64]; | |
f21dd494 VK |
1606 | int sparse_banks_len; |
1607 | u32 vector; | |
214ff83d VK |
1608 | bool all_cpus; |
1609 | ||
1610 | if (!ex) { | |
1611 | if (!fast) { | |
1612 | if (unlikely(kvm_read_guest(kvm, ingpa, &send_ipi, | |
1613 | sizeof(send_ipi)))) | |
1614 | return HV_STATUS_INVALID_HYPERCALL_INPUT; | |
1615 | sparse_banks[0] = send_ipi.cpu_mask; | |
f21dd494 | 1616 | vector = send_ipi.vector; |
214ff83d VK |
1617 | } else { |
1618 | /* 'reserved' part of hv_send_ipi should be 0 */ | |
1619 | if (unlikely(ingpa >> 32 != 0)) | |
1620 | return HV_STATUS_INVALID_HYPERCALL_INPUT; | |
1621 | sparse_banks[0] = outgpa; | |
f21dd494 | 1622 | vector = (u32)ingpa; |
214ff83d VK |
1623 | } |
1624 | all_cpus = false; | |
1625 | valid_bank_mask = BIT_ULL(0); | |
1626 | ||
f21dd494 | 1627 | trace_kvm_hv_send_ipi(vector, sparse_banks[0]); |
214ff83d VK |
1628 | } else { |
1629 | if (unlikely(kvm_read_guest(kvm, ingpa, &send_ipi_ex, | |
1630 | sizeof(send_ipi_ex)))) | |
1631 | return HV_STATUS_INVALID_HYPERCALL_INPUT; | |
1632 | ||
1633 | trace_kvm_hv_send_ipi_ex(send_ipi_ex.vector, | |
1634 | send_ipi_ex.vp_set.format, | |
1635 | send_ipi_ex.vp_set.valid_bank_mask); | |
1636 | ||
f21dd494 | 1637 | vector = send_ipi_ex.vector; |
214ff83d VK |
1638 | valid_bank_mask = send_ipi_ex.vp_set.valid_bank_mask; |
1639 | sparse_banks_len = bitmap_weight(&valid_bank_mask, 64) * | |
1640 | sizeof(sparse_banks[0]); | |
1641 | ||
1642 | all_cpus = send_ipi_ex.vp_set.format == HV_GENERIC_SET_ALL; | |
1643 | ||
1644 | if (!sparse_banks_len) | |
1645 | goto ret_success; | |
1646 | ||
1647 | if (!all_cpus && | |
1648 | kvm_read_guest(kvm, | |
1649 | ingpa + offsetof(struct hv_send_ipi_ex, | |
1650 | vp_set.bank_contents), | |
1651 | sparse_banks, | |
1652 | sparse_banks_len)) | |
1653 | return HV_STATUS_INVALID_HYPERCALL_INPUT; | |
1654 | } | |
1655 | ||
f21dd494 | 1656 | if ((vector < HV_IPI_LOW_VECTOR) || (vector > HV_IPI_HIGH_VECTOR)) |
214ff83d VK |
1657 | return HV_STATUS_INVALID_HYPERCALL_INPUT; |
1658 | ||
f21dd494 VK |
1659 | vcpu_mask = all_cpus ? NULL : |
1660 | sparse_set_to_vcpu_mask(kvm, sparse_banks, valid_bank_mask, | |
1661 | vp_bitmap, vcpu_bitmap); | |
214ff83d | 1662 | |
f21dd494 | 1663 | kvm_send_ipi_to_many(kvm, vector, vcpu_mask); |
214ff83d VK |
1664 | |
1665 | ret_success: | |
1666 | return HV_STATUS_SUCCESS; | |
1667 | } | |
1668 | ||
e83d5887 AS |
1669 | bool kvm_hv_hypercall_enabled(struct kvm *kvm) |
1670 | { | |
45c38973 | 1671 | return READ_ONCE(kvm->arch.hyperv.hv_guest_os_id) != 0; |
e83d5887 AS |
1672 | } |
1673 | ||
83326e43 AS |
1674 | static void kvm_hv_hypercall_set_result(struct kvm_vcpu *vcpu, u64 result) |
1675 | { | |
1676 | bool longmode; | |
1677 | ||
1678 | longmode = is_64_bit_mode(vcpu); | |
1679 | if (longmode) | |
de3cd117 | 1680 | kvm_rax_write(vcpu, result); |
83326e43 | 1681 | else { |
de3cd117 SC |
1682 | kvm_rdx_write(vcpu, result >> 32); |
1683 | kvm_rax_write(vcpu, result & 0xffffffff); | |
83326e43 AS |
1684 | } |
1685 | } | |
1686 | ||
696ca779 | 1687 | static int kvm_hv_hypercall_complete(struct kvm_vcpu *vcpu, u64 result) |
83326e43 | 1688 | { |
696ca779 RK |
1689 | kvm_hv_hypercall_set_result(vcpu, result); |
1690 | ++vcpu->stat.hypercalls; | |
6356ee0c | 1691 | return kvm_skip_emulated_instruction(vcpu); |
83326e43 AS |
1692 | } |
1693 | ||
696ca779 RK |
1694 | static int kvm_hv_hypercall_complete_userspace(struct kvm_vcpu *vcpu) |
1695 | { | |
1696 | return kvm_hv_hypercall_complete(vcpu, vcpu->run->hyperv.u.hcall.result); | |
1697 | } | |
1698 | ||
faeb7833 RK |
1699 | static u16 kvm_hvcall_signal_event(struct kvm_vcpu *vcpu, bool fast, u64 param) |
1700 | { | |
1701 | struct eventfd_ctx *eventfd; | |
1702 | ||
1703 | if (unlikely(!fast)) { | |
1704 | int ret; | |
1705 | gpa_t gpa = param; | |
1706 | ||
1707 | if ((gpa & (__alignof__(param) - 1)) || | |
1708 | offset_in_page(gpa) + sizeof(param) > PAGE_SIZE) | |
1709 | return HV_STATUS_INVALID_ALIGNMENT; | |
1710 | ||
1711 | ret = kvm_vcpu_read_guest(vcpu, gpa, ¶m, sizeof(param)); | |
1712 | if (ret < 0) | |
1713 | return HV_STATUS_INVALID_ALIGNMENT; | |
1714 | } | |
1715 | ||
1716 | /* | |
1717 | * Per spec, bits 32-47 contain the extra "flag number". However, we | |
1718 | * have no use for it, and in all known usecases it is zero, so just | |
1719 | * report lookup failure if it isn't. | |
1720 | */ | |
1721 | if (param & 0xffff00000000ULL) | |
1722 | return HV_STATUS_INVALID_PORT_ID; | |
1723 | /* remaining bits are reserved-zero */ | |
1724 | if (param & ~KVM_HYPERV_CONN_ID_MASK) | |
1725 | return HV_STATUS_INVALID_HYPERCALL_INPUT; | |
1726 | ||
452a68d0 PB |
1727 | /* the eventfd is protected by vcpu->kvm->srcu, but conn_to_evt isn't */ |
1728 | rcu_read_lock(); | |
faeb7833 | 1729 | eventfd = idr_find(&vcpu->kvm->arch.hyperv.conn_to_evt, param); |
452a68d0 | 1730 | rcu_read_unlock(); |
faeb7833 RK |
1731 | if (!eventfd) |
1732 | return HV_STATUS_INVALID_PORT_ID; | |
1733 | ||
1734 | eventfd_signal(eventfd, 1); | |
1735 | return HV_STATUS_SUCCESS; | |
1736 | } | |
1737 | ||
e83d5887 AS |
1738 | int kvm_hv_hypercall(struct kvm_vcpu *vcpu) |
1739 | { | |
d32ef547 DC |
1740 | u64 param, ingpa, outgpa, ret = HV_STATUS_SUCCESS; |
1741 | uint16_t code, rep_idx, rep_cnt; | |
f4e4805e | 1742 | bool fast, rep; |
e83d5887 AS |
1743 | |
1744 | /* | |
1745 | * hypercall generates UD from non zero cpl and real mode | |
1746 | * per HYPER-V spec | |
1747 | */ | |
afaf0b2f | 1748 | if (kvm_x86_ops.get_cpl(vcpu) != 0 || !is_protmode(vcpu)) { |
e83d5887 | 1749 | kvm_queue_exception(vcpu, UD_VECTOR); |
0d9c055e | 1750 | return 1; |
e83d5887 AS |
1751 | } |
1752 | ||
f4e4805e AB |
1753 | #ifdef CONFIG_X86_64 |
1754 | if (is_64_bit_mode(vcpu)) { | |
1755 | param = kvm_rcx_read(vcpu); | |
1756 | ingpa = kvm_rdx_read(vcpu); | |
1757 | outgpa = kvm_r8_read(vcpu); | |
1758 | } else | |
1759 | #endif | |
1760 | { | |
de3cd117 SC |
1761 | param = ((u64)kvm_rdx_read(vcpu) << 32) | |
1762 | (kvm_rax_read(vcpu) & 0xffffffff); | |
1763 | ingpa = ((u64)kvm_rbx_read(vcpu) << 32) | | |
1764 | (kvm_rcx_read(vcpu) & 0xffffffff); | |
1765 | outgpa = ((u64)kvm_rdi_read(vcpu) << 32) | | |
1766 | (kvm_rsi_read(vcpu) & 0xffffffff); | |
e83d5887 | 1767 | } |
e83d5887 AS |
1768 | |
1769 | code = param & 0xffff; | |
142c95da VK |
1770 | fast = !!(param & HV_HYPERCALL_FAST_BIT); |
1771 | rep_cnt = (param >> HV_HYPERCALL_REP_COMP_OFFSET) & 0xfff; | |
1772 | rep_idx = (param >> HV_HYPERCALL_REP_START_OFFSET) & 0xfff; | |
56b9ae78 | 1773 | rep = !!(rep_cnt || rep_idx); |
e83d5887 AS |
1774 | |
1775 | trace_kvm_hv_hypercall(code, fast, rep_cnt, rep_idx, ingpa, outgpa); | |
1776 | ||
1777 | switch (code) { | |
8ed6d767 | 1778 | case HVCALL_NOTIFY_LONG_SPIN_WAIT: |
56b9ae78 VK |
1779 | if (unlikely(rep)) { |
1780 | ret = HV_STATUS_INVALID_HYPERCALL_INPUT; | |
1781 | break; | |
1782 | } | |
de63ad4c | 1783 | kvm_vcpu_on_spin(vcpu, true); |
e83d5887 | 1784 | break; |
83326e43 | 1785 | case HVCALL_SIGNAL_EVENT: |
56b9ae78 VK |
1786 | if (unlikely(rep)) { |
1787 | ret = HV_STATUS_INVALID_HYPERCALL_INPUT; | |
1788 | break; | |
1789 | } | |
d32ef547 DC |
1790 | ret = kvm_hvcall_signal_event(vcpu, fast, ingpa); |
1791 | if (ret != HV_STATUS_INVALID_PORT_ID) | |
faeb7833 | 1792 | break; |
df561f66 | 1793 | fallthrough; /* maybe userspace knows this conn_id */ |
faeb7833 | 1794 | case HVCALL_POST_MESSAGE: |
a2b5c3c0 | 1795 | /* don't bother userspace if it has no way to handle it */ |
56b9ae78 VK |
1796 | if (unlikely(rep || !vcpu_to_synic(vcpu)->active)) { |
1797 | ret = HV_STATUS_INVALID_HYPERCALL_INPUT; | |
a2b5c3c0 PB |
1798 | break; |
1799 | } | |
83326e43 AS |
1800 | vcpu->run->exit_reason = KVM_EXIT_HYPERV; |
1801 | vcpu->run->hyperv.type = KVM_EXIT_HYPERV_HCALL; | |
1802 | vcpu->run->hyperv.u.hcall.input = param; | |
1803 | vcpu->run->hyperv.u.hcall.params[0] = ingpa; | |
1804 | vcpu->run->hyperv.u.hcall.params[1] = outgpa; | |
1805 | vcpu->arch.complete_userspace_io = | |
1806 | kvm_hv_hypercall_complete_userspace; | |
1807 | return 0; | |
e2f11f42 VK |
1808 | case HVCALL_FLUSH_VIRTUAL_ADDRESS_LIST: |
1809 | if (unlikely(fast || !rep_cnt || rep_idx)) { | |
1810 | ret = HV_STATUS_INVALID_HYPERCALL_INPUT; | |
1811 | break; | |
1812 | } | |
c7012676 | 1813 | ret = kvm_hv_flush_tlb(vcpu, ingpa, rep_cnt, false); |
e2f11f42 VK |
1814 | break; |
1815 | case HVCALL_FLUSH_VIRTUAL_ADDRESS_SPACE: | |
1816 | if (unlikely(fast || rep)) { | |
1817 | ret = HV_STATUS_INVALID_HYPERCALL_INPUT; | |
1818 | break; | |
1819 | } | |
c7012676 VK |
1820 | ret = kvm_hv_flush_tlb(vcpu, ingpa, rep_cnt, false); |
1821 | break; | |
1822 | case HVCALL_FLUSH_VIRTUAL_ADDRESS_LIST_EX: | |
1823 | if (unlikely(fast || !rep_cnt || rep_idx)) { | |
1824 | ret = HV_STATUS_INVALID_HYPERCALL_INPUT; | |
1825 | break; | |
1826 | } | |
1827 | ret = kvm_hv_flush_tlb(vcpu, ingpa, rep_cnt, true); | |
1828 | break; | |
1829 | case HVCALL_FLUSH_VIRTUAL_ADDRESS_SPACE_EX: | |
1830 | if (unlikely(fast || rep)) { | |
1831 | ret = HV_STATUS_INVALID_HYPERCALL_INPUT; | |
1832 | break; | |
1833 | } | |
1834 | ret = kvm_hv_flush_tlb(vcpu, ingpa, rep_cnt, true); | |
e2f11f42 | 1835 | break; |
214ff83d VK |
1836 | case HVCALL_SEND_IPI: |
1837 | if (unlikely(rep)) { | |
1838 | ret = HV_STATUS_INVALID_HYPERCALL_INPUT; | |
1839 | break; | |
1840 | } | |
1841 | ret = kvm_hv_send_ipi(vcpu, ingpa, outgpa, false, fast); | |
1842 | break; | |
1843 | case HVCALL_SEND_IPI_EX: | |
1844 | if (unlikely(fast || rep)) { | |
1845 | ret = HV_STATUS_INVALID_HYPERCALL_INPUT; | |
1846 | break; | |
1847 | } | |
1848 | ret = kvm_hv_send_ipi(vcpu, ingpa, outgpa, true, false); | |
1849 | break; | |
b187038b JD |
1850 | case HVCALL_POST_DEBUG_DATA: |
1851 | case HVCALL_RETRIEVE_DEBUG_DATA: | |
1852 | if (unlikely(fast)) { | |
1853 | ret = HV_STATUS_INVALID_PARAMETER; | |
1854 | break; | |
1855 | } | |
1856 | fallthrough; | |
1857 | case HVCALL_RESET_DEBUG_SESSION: { | |
1858 | struct kvm_hv_syndbg *syndbg = vcpu_to_hv_syndbg(vcpu); | |
1859 | ||
1860 | if (!kvm_hv_is_syndbg_enabled(vcpu)) { | |
1861 | ret = HV_STATUS_INVALID_HYPERCALL_CODE; | |
1862 | break; | |
1863 | } | |
1864 | ||
1865 | if (!(syndbg->options & HV_X64_SYNDBG_OPTION_USE_HCALLS)) { | |
1866 | ret = HV_STATUS_OPERATION_DENIED; | |
1867 | break; | |
1868 | } | |
1869 | vcpu->run->exit_reason = KVM_EXIT_HYPERV; | |
1870 | vcpu->run->hyperv.type = KVM_EXIT_HYPERV_HCALL; | |
1871 | vcpu->run->hyperv.u.hcall.input = param; | |
1872 | vcpu->run->hyperv.u.hcall.params[0] = ingpa; | |
1873 | vcpu->run->hyperv.u.hcall.params[1] = outgpa; | |
1874 | vcpu->arch.complete_userspace_io = | |
1875 | kvm_hv_hypercall_complete_userspace; | |
1876 | return 0; | |
1877 | } | |
e83d5887 | 1878 | default: |
d32ef547 | 1879 | ret = HV_STATUS_INVALID_HYPERCALL_CODE; |
e83d5887 AS |
1880 | break; |
1881 | } | |
1882 | ||
696ca779 | 1883 | return kvm_hv_hypercall_complete(vcpu, ret); |
e83d5887 | 1884 | } |
cbc0236a RK |
1885 | |
1886 | void kvm_hv_init_vm(struct kvm *kvm) | |
1887 | { | |
1888 | mutex_init(&kvm->arch.hyperv.hv_lock); | |
faeb7833 | 1889 | idr_init(&kvm->arch.hyperv.conn_to_evt); |
cbc0236a RK |
1890 | } |
1891 | ||
1892 | void kvm_hv_destroy_vm(struct kvm *kvm) | |
1893 | { | |
faeb7833 RK |
1894 | struct eventfd_ctx *eventfd; |
1895 | int i; | |
1896 | ||
1897 | idr_for_each_entry(&kvm->arch.hyperv.conn_to_evt, eventfd, i) | |
1898 | eventfd_ctx_put(eventfd); | |
1899 | idr_destroy(&kvm->arch.hyperv.conn_to_evt); | |
1900 | } | |
1901 | ||
1902 | static int kvm_hv_eventfd_assign(struct kvm *kvm, u32 conn_id, int fd) | |
1903 | { | |
1904 | struct kvm_hv *hv = &kvm->arch.hyperv; | |
1905 | struct eventfd_ctx *eventfd; | |
1906 | int ret; | |
1907 | ||
1908 | eventfd = eventfd_ctx_fdget(fd); | |
1909 | if (IS_ERR(eventfd)) | |
1910 | return PTR_ERR(eventfd); | |
1911 | ||
1912 | mutex_lock(&hv->hv_lock); | |
1913 | ret = idr_alloc(&hv->conn_to_evt, eventfd, conn_id, conn_id + 1, | |
254272ce | 1914 | GFP_KERNEL_ACCOUNT); |
faeb7833 RK |
1915 | mutex_unlock(&hv->hv_lock); |
1916 | ||
1917 | if (ret >= 0) | |
1918 | return 0; | |
1919 | ||
1920 | if (ret == -ENOSPC) | |
1921 | ret = -EEXIST; | |
1922 | eventfd_ctx_put(eventfd); | |
1923 | return ret; | |
1924 | } | |
1925 | ||
1926 | static int kvm_hv_eventfd_deassign(struct kvm *kvm, u32 conn_id) | |
1927 | { | |
1928 | struct kvm_hv *hv = &kvm->arch.hyperv; | |
1929 | struct eventfd_ctx *eventfd; | |
1930 | ||
1931 | mutex_lock(&hv->hv_lock); | |
1932 | eventfd = idr_remove(&hv->conn_to_evt, conn_id); | |
1933 | mutex_unlock(&hv->hv_lock); | |
1934 | ||
1935 | if (!eventfd) | |
1936 | return -ENOENT; | |
1937 | ||
1938 | synchronize_srcu(&kvm->srcu); | |
1939 | eventfd_ctx_put(eventfd); | |
1940 | return 0; | |
1941 | } | |
1942 | ||
1943 | int kvm_vm_ioctl_hv_eventfd(struct kvm *kvm, struct kvm_hyperv_eventfd *args) | |
1944 | { | |
1945 | if ((args->flags & ~KVM_HYPERV_EVENTFD_DEASSIGN) || | |
1946 | (args->conn_id & ~KVM_HYPERV_CONN_ID_MASK)) | |
1947 | return -EINVAL; | |
1948 | ||
1949 | if (args->flags == KVM_HYPERV_EVENTFD_DEASSIGN) | |
1950 | return kvm_hv_eventfd_deassign(kvm, args->conn_id); | |
1951 | return kvm_hv_eventfd_assign(kvm, args->conn_id, args->fd); | |
cbc0236a | 1952 | } |
2bc39970 | 1953 | |
c21d54f0 VK |
1954 | int kvm_get_hv_cpuid(struct kvm_vcpu *vcpu, struct kvm_cpuid2 *cpuid, |
1955 | struct kvm_cpuid_entry2 __user *entries) | |
2bc39970 | 1956 | { |
ea152987 | 1957 | uint16_t evmcs_ver = 0; |
2bc39970 VK |
1958 | struct kvm_cpuid_entry2 cpuid_entries[] = { |
1959 | { .function = HYPERV_CPUID_VENDOR_AND_MAX_FUNCTIONS }, | |
1960 | { .function = HYPERV_CPUID_INTERFACE }, | |
1961 | { .function = HYPERV_CPUID_VERSION }, | |
1962 | { .function = HYPERV_CPUID_FEATURES }, | |
1963 | { .function = HYPERV_CPUID_ENLIGHTMENT_INFO }, | |
1964 | { .function = HYPERV_CPUID_IMPLEMENT_LIMITS }, | |
f97f5a56 JD |
1965 | { .function = HYPERV_CPUID_SYNDBG_VENDOR_AND_MAX_FUNCTIONS }, |
1966 | { .function = HYPERV_CPUID_SYNDBG_INTERFACE }, | |
1967 | { .function = HYPERV_CPUID_SYNDBG_PLATFORM_CAPABILITIES }, | |
2bc39970 VK |
1968 | { .function = HYPERV_CPUID_NESTED_FEATURES }, |
1969 | }; | |
1970 | int i, nent = ARRAY_SIZE(cpuid_entries); | |
1971 | ||
33b22172 PB |
1972 | if (kvm_x86_ops.nested_ops->get_evmcs_version) |
1973 | evmcs_ver = kvm_x86_ops.nested_ops->get_evmcs_version(vcpu); | |
ea152987 | 1974 | |
2bc39970 VK |
1975 | /* Skip NESTED_FEATURES if eVMCS is not supported */ |
1976 | if (!evmcs_ver) | |
1977 | --nent; | |
1978 | ||
1979 | if (cpuid->nent < nent) | |
1980 | return -E2BIG; | |
1981 | ||
1982 | if (cpuid->nent > nent) | |
1983 | cpuid->nent = nent; | |
1984 | ||
1985 | for (i = 0; i < nent; i++) { | |
1986 | struct kvm_cpuid_entry2 *ent = &cpuid_entries[i]; | |
1987 | u32 signature[3]; | |
1988 | ||
1989 | switch (ent->function) { | |
1990 | case HYPERV_CPUID_VENDOR_AND_MAX_FUNCTIONS: | |
1991 | memcpy(signature, "Linux KVM Hv", 12); | |
1992 | ||
f97f5a56 | 1993 | ent->eax = HYPERV_CPUID_SYNDBG_PLATFORM_CAPABILITIES; |
2bc39970 VK |
1994 | ent->ebx = signature[0]; |
1995 | ent->ecx = signature[1]; | |
1996 | ent->edx = signature[2]; | |
1997 | break; | |
1998 | ||
1999 | case HYPERV_CPUID_INTERFACE: | |
2000 | memcpy(signature, "Hv#1\0\0\0\0\0\0\0\0", 12); | |
2001 | ent->eax = signature[0]; | |
2002 | break; | |
2003 | ||
2004 | case HYPERV_CPUID_VERSION: | |
2005 | /* | |
2006 | * We implement some Hyper-V 2016 functions so let's use | |
2007 | * this version. | |
2008 | */ | |
2009 | ent->eax = 0x00003839; | |
2010 | ent->ebx = 0x000A0000; | |
2011 | break; | |
2012 | ||
2013 | case HYPERV_CPUID_FEATURES: | |
dfc53baa | 2014 | ent->eax |= HV_MSR_VP_RUNTIME_AVAILABLE; |
2bc39970 | 2015 | ent->eax |= HV_MSR_TIME_REF_COUNT_AVAILABLE; |
dfc53baa | 2016 | ent->eax |= HV_MSR_SYNIC_AVAILABLE; |
2bc39970 | 2017 | ent->eax |= HV_MSR_SYNTIMER_AVAILABLE; |
dfc53baa JS |
2018 | ent->eax |= HV_MSR_APIC_ACCESS_AVAILABLE; |
2019 | ent->eax |= HV_MSR_HYPERCALL_AVAILABLE; | |
2020 | ent->eax |= HV_MSR_VP_INDEX_AVAILABLE; | |
2021 | ent->eax |= HV_MSR_RESET_AVAILABLE; | |
2bc39970 | 2022 | ent->eax |= HV_MSR_REFERENCE_TSC_AVAILABLE; |
dfc53baa JS |
2023 | ent->eax |= HV_ACCESS_FREQUENCY_MSRS; |
2024 | ent->eax |= HV_ACCESS_REENLIGHTENMENT; | |
2bc39970 | 2025 | |
dfc53baa JS |
2026 | ent->ebx |= HV_POST_MESSAGES; |
2027 | ent->ebx |= HV_SIGNAL_EVENTS; | |
2bc39970 VK |
2028 | |
2029 | ent->edx |= HV_FEATURE_FREQUENCY_MSRS_AVAILABLE; | |
2030 | ent->edx |= HV_FEATURE_GUEST_CRASH_MSR_AVAILABLE; | |
a073d7e3 | 2031 | |
039aeb9d | 2032 | ent->ebx |= HV_DEBUGGING; |
f97f5a56 JD |
2033 | ent->edx |= HV_X64_GUEST_DEBUGGING_AVAILABLE; |
2034 | ent->edx |= HV_FEATURE_DEBUG_MSRS_AVAILABLE; | |
2035 | ||
a073d7e3 WL |
2036 | /* |
2037 | * Direct Synthetic timers only make sense with in-kernel | |
2038 | * LAPIC | |
2039 | */ | |
c21d54f0 | 2040 | if (!vcpu || lapic_in_kernel(vcpu)) |
a073d7e3 | 2041 | ent->edx |= HV_STIMER_DIRECT_MODE_AVAILABLE; |
2bc39970 VK |
2042 | |
2043 | break; | |
2044 | ||
2045 | case HYPERV_CPUID_ENLIGHTMENT_INFO: | |
2046 | ent->eax |= HV_X64_REMOTE_TLB_FLUSH_RECOMMENDED; | |
2047 | ent->eax |= HV_X64_APIC_ACCESS_RECOMMENDED; | |
2bc39970 VK |
2048 | ent->eax |= HV_X64_RELAXED_TIMING_RECOMMENDED; |
2049 | ent->eax |= HV_X64_CLUSTER_IPI_RECOMMENDED; | |
2050 | ent->eax |= HV_X64_EX_PROCESSOR_MASKS_RECOMMENDED; | |
f1adceaf VK |
2051 | if (evmcs_ver) |
2052 | ent->eax |= HV_X64_ENLIGHTENED_VMCS_RECOMMENDED; | |
b2d8b167 VK |
2053 | if (!cpu_smt_possible()) |
2054 | ent->eax |= HV_X64_NO_NONARCH_CORESHARING; | |
2bc39970 VK |
2055 | /* |
2056 | * Default number of spinlock retry attempts, matches | |
2057 | * HyperV 2016. | |
2058 | */ | |
2059 | ent->ebx = 0x00000FFF; | |
2060 | ||
2061 | break; | |
2062 | ||
2063 | case HYPERV_CPUID_IMPLEMENT_LIMITS: | |
2064 | /* Maximum number of virtual processors */ | |
2065 | ent->eax = KVM_MAX_VCPUS; | |
2066 | /* | |
2067 | * Maximum number of logical processors, matches | |
2068 | * HyperV 2016. | |
2069 | */ | |
2070 | ent->ebx = 64; | |
2071 | ||
2072 | break; | |
2073 | ||
2074 | case HYPERV_CPUID_NESTED_FEATURES: | |
2075 | ent->eax = evmcs_ver; | |
2076 | ||
2077 | break; | |
2078 | ||
f97f5a56 JD |
2079 | case HYPERV_CPUID_SYNDBG_VENDOR_AND_MAX_FUNCTIONS: |
2080 | memcpy(signature, "Linux KVM Hv", 12); | |
2081 | ||
2082 | ent->eax = 0; | |
2083 | ent->ebx = signature[0]; | |
2084 | ent->ecx = signature[1]; | |
2085 | ent->edx = signature[2]; | |
2086 | break; | |
2087 | ||
2088 | case HYPERV_CPUID_SYNDBG_INTERFACE: | |
2089 | memcpy(signature, "VS#1\0\0\0\0\0\0\0\0", 12); | |
2090 | ent->eax = signature[0]; | |
2091 | break; | |
2092 | ||
2093 | case HYPERV_CPUID_SYNDBG_PLATFORM_CAPABILITIES: | |
2094 | ent->eax |= HV_X64_SYNDBG_CAP_ALLOW_KERNEL_DEBUGGING; | |
2095 | break; | |
2096 | ||
2bc39970 VK |
2097 | default: |
2098 | break; | |
2099 | } | |
2100 | } | |
2101 | ||
2102 | if (copy_to_user(entries, cpuid_entries, | |
2103 | nent * sizeof(struct kvm_cpuid_entry2))) | |
2104 | return -EFAULT; | |
2105 | ||
2106 | return 0; | |
2107 | } |