]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blame - arch/x86/kernel/kvm.c
Merge branch 'x86-pti-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git...
[mirror_ubuntu-bionic-kernel.git] / arch / x86 / kernel / kvm.c
CommitLineData
0cf1bfd2
MT
1/*
2 * KVM paravirt_ops implementation
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2 of the License, or
7 * (at your option) any later version.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software
16 * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
17 *
18 * Copyright (C) 2007, Red Hat, Inc., Ingo Molnar <mingo@redhat.com>
19 * Copyright IBM Corporation, 2007
20 * Authors: Anthony Liguori <aliguori@us.ibm.com>
21 */
22
56dd9470 23#include <linux/context_tracking.h>
186f4360 24#include <linux/init.h>
0cf1bfd2
MT
25#include <linux/kernel.h>
26#include <linux/kvm_para.h>
27#include <linux/cpu.h>
28#include <linux/mm.h>
1da8a77b 29#include <linux/highmem.h>
096d14a3 30#include <linux/hardirq.h>
fd10cde9
GN
31#include <linux/notifier.h>
32#include <linux/reboot.h>
631bc487
GN
33#include <linux/hash.h>
34#include <linux/sched.h>
35#include <linux/slab.h>
36#include <linux/kprobes.h>
92b75202 37#include <linux/debugfs.h>
9919e39a 38#include <linux/nmi.h>
9db284f3 39#include <linux/swait.h>
a90ede7b 40#include <asm/timer.h>
fd10cde9 41#include <asm/cpu.h>
631bc487
GN
42#include <asm/traps.h>
43#include <asm/desc.h>
6c047cd9 44#include <asm/tlbflush.h>
ab9cf499
MT
45#include <asm/apic.h>
46#include <asm/apicdef.h>
fc73373b 47#include <asm/hypervisor.h>
3dc4f7cf 48#include <asm/kvm_guest.h>
096d14a3 49
fd10cde9
GN
50static int kvmapf = 1;
51
52static int parse_no_kvmapf(char *arg)
53{
54 kvmapf = 0;
55 return 0;
56}
57
58early_param("no-kvmapf", parse_no_kvmapf);
59
d910f5c1
GC
60static int steal_acc = 1;
61static int parse_no_stealacc(char *arg)
62{
63 steal_acc = 0;
64 return 0;
65}
66
67early_param("no-steal-acc", parse_no_stealacc);
68
3dc4f7cf
MT
69static int kvmclock_vsyscall = 1;
70static int parse_no_kvmclock_vsyscall(char *arg)
71{
72 kvmclock_vsyscall = 0;
73 return 0;
74}
75
76early_param("no-kvmclock-vsyscall", parse_no_kvmclock_vsyscall);
77
47162761
BS
78static DEFINE_PER_CPU_DECRYPTED(struct kvm_vcpu_pv_apf_data, apf_reason) __aligned(64);
79static DEFINE_PER_CPU_DECRYPTED(struct kvm_steal_time, steal_time) __aligned(64);
d910f5c1 80static int has_steal_clock = 0;
096d14a3 81
0cf1bfd2
MT
82/*
83 * No need for any "IO delay" on KVM
84 */
85static void kvm_io_delay(void)
86{
87}
88
631bc487
GN
89#define KVM_TASK_SLEEP_HASHBITS 8
90#define KVM_TASK_SLEEP_HASHSIZE (1<<KVM_TASK_SLEEP_HASHBITS)
91
92struct kvm_task_sleep_node {
93 struct hlist_node link;
9db284f3 94 struct swait_queue_head wq;
631bc487
GN
95 u32 token;
96 int cpu;
6c047cd9 97 bool halted;
631bc487
GN
98};
99
100static struct kvm_task_sleep_head {
9db284f3 101 raw_spinlock_t lock;
631bc487
GN
102 struct hlist_head list;
103} async_pf_sleepers[KVM_TASK_SLEEP_HASHSIZE];
104
105static struct kvm_task_sleep_node *_find_apf_task(struct kvm_task_sleep_head *b,
106 u32 token)
107{
108 struct hlist_node *p;
109
110 hlist_for_each(p, &b->list) {
111 struct kvm_task_sleep_node *n =
112 hlist_entry(p, typeof(*n), link);
113 if (n->token == token)
114 return n;
115 }
116
117 return NULL;
118}
119
a2b7861b
BF
120/*
121 * @interrupt_kernel: Is this called from a routine which interrupts the kernel
122 * (other than user space)?
123 */
124void kvm_async_pf_task_wait(u32 token, int interrupt_kernel)
631bc487
GN
125{
126 u32 key = hash_32(token, KVM_TASK_SLEEP_HASHBITS);
127 struct kvm_task_sleep_head *b = &async_pf_sleepers[key];
128 struct kvm_task_sleep_node n, *e;
9db284f3 129 DECLARE_SWAITQUEUE(wait);
631bc487 130
9b132fbe
LZ
131 rcu_irq_enter();
132
9db284f3 133 raw_spin_lock(&b->lock);
631bc487
GN
134 e = _find_apf_task(b, token);
135 if (e) {
136 /* dummy entry exist -> wake up was delivered ahead of PF */
137 hlist_del(&e->link);
138 kfree(e);
9db284f3 139 raw_spin_unlock(&b->lock);
9b132fbe
LZ
140
141 rcu_irq_exit();
631bc487
GN
142 return;
143 }
144
145 n.token = token;
146 n.cpu = smp_processor_id();
a2b7861b
BF
147 n.halted = is_idle_task(current) ||
148 (IS_ENABLED(CONFIG_PREEMPT_COUNT)
149 ? preempt_count() > 1 || rcu_preempt_depth()
150 : interrupt_kernel);
9db284f3 151 init_swait_queue_head(&n.wq);
631bc487 152 hlist_add_head(&n.link, &b->list);
9db284f3 153 raw_spin_unlock(&b->lock);
631bc487
GN
154
155 for (;;) {
6c047cd9 156 if (!n.halted)
9db284f3 157 prepare_to_swait(&n.wq, &wait, TASK_UNINTERRUPTIBLE);
631bc487
GN
158 if (hlist_unhashed(&n.link))
159 break;
6c047cd9 160
337c017c
WL
161 rcu_irq_exit();
162
6c047cd9
GN
163 if (!n.halted) {
164 local_irq_enable();
165 schedule();
166 local_irq_disable();
167 } else {
168 /*
169 * We cannot reschedule. So halt.
170 */
171 native_safe_halt();
172 local_irq_disable();
173 }
337c017c
WL
174
175 rcu_irq_enter();
631bc487 176 }
6c047cd9 177 if (!n.halted)
9db284f3 178 finish_swait(&n.wq, &wait);
631bc487 179
9b132fbe 180 rcu_irq_exit();
631bc487
GN
181 return;
182}
183EXPORT_SYMBOL_GPL(kvm_async_pf_task_wait);
184
185static void apf_task_wake_one(struct kvm_task_sleep_node *n)
186{
187 hlist_del_init(&n->link);
6c047cd9
GN
188 if (n->halted)
189 smp_send_reschedule(n->cpu);
a0cff57b 190 else if (swq_has_sleeper(&n->wq))
9db284f3 191 swake_up(&n->wq);
631bc487
GN
192}
193
194static void apf_task_wake_all(void)
195{
196 int i;
197
198 for (i = 0; i < KVM_TASK_SLEEP_HASHSIZE; i++) {
199 struct hlist_node *p, *next;
200 struct kvm_task_sleep_head *b = &async_pf_sleepers[i];
9db284f3 201 raw_spin_lock(&b->lock);
631bc487
GN
202 hlist_for_each_safe(p, next, &b->list) {
203 struct kvm_task_sleep_node *n =
204 hlist_entry(p, typeof(*n), link);
205 if (n->cpu == smp_processor_id())
206 apf_task_wake_one(n);
207 }
9db284f3 208 raw_spin_unlock(&b->lock);
631bc487
GN
209 }
210}
211
212void kvm_async_pf_task_wake(u32 token)
213{
214 u32 key = hash_32(token, KVM_TASK_SLEEP_HASHBITS);
215 struct kvm_task_sleep_head *b = &async_pf_sleepers[key];
216 struct kvm_task_sleep_node *n;
217
218 if (token == ~0) {
219 apf_task_wake_all();
220 return;
221 }
222
223again:
9db284f3 224 raw_spin_lock(&b->lock);
631bc487
GN
225 n = _find_apf_task(b, token);
226 if (!n) {
227 /*
228 * async PF was not yet handled.
229 * Add dummy entry for the token.
230 */
62c49cc9 231 n = kzalloc(sizeof(*n), GFP_ATOMIC);
631bc487
GN
232 if (!n) {
233 /*
234 * Allocation failed! Busy wait while other cpu
235 * handles async PF.
236 */
9db284f3 237 raw_spin_unlock(&b->lock);
631bc487
GN
238 cpu_relax();
239 goto again;
240 }
241 n->token = token;
242 n->cpu = smp_processor_id();
9db284f3 243 init_swait_queue_head(&n->wq);
631bc487
GN
244 hlist_add_head(&n->link, &b->list);
245 } else
246 apf_task_wake_one(n);
9db284f3 247 raw_spin_unlock(&b->lock);
631bc487
GN
248 return;
249}
250EXPORT_SYMBOL_GPL(kvm_async_pf_task_wake);
251
252u32 kvm_read_and_reset_pf_reason(void)
253{
254 u32 reason = 0;
255
89cbc767
CL
256 if (__this_cpu_read(apf_reason.enabled)) {
257 reason = __this_cpu_read(apf_reason.reason);
258 __this_cpu_write(apf_reason.reason, 0);
631bc487
GN
259 }
260
261 return reason;
262}
263EXPORT_SYMBOL_GPL(kvm_read_and_reset_pf_reason);
9326638c 264NOKPROBE_SYMBOL(kvm_read_and_reset_pf_reason);
631bc487 265
9326638c 266dotraplinkage void
631bc487
GN
267do_async_page_fault(struct pt_regs *regs, unsigned long error_code)
268{
6c1e0256
FW
269 enum ctx_state prev_state;
270
631bc487
GN
271 switch (kvm_read_and_reset_pf_reason()) {
272 default:
11a7ffb0 273 do_page_fault(regs, error_code);
631bc487
GN
274 break;
275 case KVM_PV_REASON_PAGE_NOT_PRESENT:
276 /* page is swapped out by the host. */
6c1e0256 277 prev_state = exception_enter();
a2b7861b 278 kvm_async_pf_task_wait((u32)read_cr2(), !user_mode(regs));
6c1e0256 279 exception_exit(prev_state);
631bc487
GN
280 break;
281 case KVM_PV_REASON_PAGE_READY:
e0875921 282 rcu_irq_enter();
631bc487 283 kvm_async_pf_task_wake((u32)read_cr2());
e0875921 284 rcu_irq_exit();
631bc487
GN
285 break;
286 }
287}
9326638c 288NOKPROBE_SYMBOL(do_async_page_fault);
631bc487 289
d3ac8815 290static void __init paravirt_ops_setup(void)
0cf1bfd2
MT
291{
292 pv_info.name = "KVM";
29fa6825 293
0cf1bfd2
MT
294 if (kvm_para_has_feature(KVM_FEATURE_NOP_IO_DELAY))
295 pv_cpu_ops.io_delay = kvm_io_delay;
296
a90ede7b
MT
297#ifdef CONFIG_X86_IO_APIC
298 no_timer_check = 1;
299#endif
0cf1bfd2
MT
300}
301
d910f5c1
GC
302static void kvm_register_steal_time(void)
303{
304 int cpu = smp_processor_id();
305 struct kvm_steal_time *st = &per_cpu(steal_time, cpu);
306
307 if (!has_steal_clock)
308 return;
309
5dfd486c 310 wrmsrl(MSR_KVM_STEAL_TIME, (slow_virt_to_phys(st) | KVM_MSR_ENABLED));
136867f5
SK
311 pr_info("kvm-stealtime: cpu %d, msr %llx\n",
312 cpu, (unsigned long long) slow_virt_to_phys(st));
d910f5c1
GC
313}
314
47162761 315static DEFINE_PER_CPU_DECRYPTED(unsigned long, kvm_apic_eoi) = KVM_PV_EOI_DISABLED;
ab9cf499 316
8ca22552 317static notrace void kvm_guest_apic_eoi_write(u32 reg, u32 val)
ab9cf499
MT
318{
319 /**
320 * This relies on __test_and_clear_bit to modify the memory
321 * in a way that is atomic with respect to the local CPU.
322 * The hypervisor only accesses this memory from the local CPU so
323 * there's no need for lock or memory barriers.
324 * An optimization barrier is implied in apic write.
325 */
89cbc767 326 if (__test_and_clear_bit(KVM_PV_EOI_BIT, this_cpu_ptr(&kvm_apic_eoi)))
ab9cf499 327 return;
8ca22552 328 apic->native_eoi_write(APIC_EOI, APIC_EOI_ACK);
ab9cf499
MT
329}
330
ed3cf152 331static void kvm_guest_cpu_init(void)
fd10cde9
GN
332{
333 if (!kvm_para_available())
334 return;
335
336 if (kvm_para_has_feature(KVM_FEATURE_ASYNC_PF) && kvmapf) {
89cbc767 337 u64 pa = slow_virt_to_phys(this_cpu_ptr(&apf_reason));
fd10cde9 338
6adba527
GN
339#ifdef CONFIG_PREEMPT
340 pa |= KVM_ASYNC_PF_SEND_ALWAYS;
341#endif
52a5c155
WL
342 pa |= KVM_ASYNC_PF_ENABLED;
343
344 /* Async page fault support for L1 hypervisor is optional */
345 if (wrmsr_safe(MSR_KVM_ASYNC_PF_EN,
346 (pa | KVM_ASYNC_PF_DELIVERY_AS_PF_VMEXIT) & 0xffffffff, pa >> 32) < 0)
347 wrmsrl(MSR_KVM_ASYNC_PF_EN, pa);
89cbc767 348 __this_cpu_write(apf_reason.enabled, 1);
fd10cde9
GN
349 printk(KERN_INFO"KVM setup async PF for cpu %d\n",
350 smp_processor_id());
351 }
d910f5c1 352
ab9cf499
MT
353 if (kvm_para_has_feature(KVM_FEATURE_PV_EOI)) {
354 unsigned long pa;
355 /* Size alignment is implied but just to make it explicit. */
356 BUILD_BUG_ON(__alignof__(kvm_apic_eoi) < 4);
89cbc767
CL
357 __this_cpu_write(kvm_apic_eoi, 0);
358 pa = slow_virt_to_phys(this_cpu_ptr(&kvm_apic_eoi))
5dfd486c 359 | KVM_MSR_ENABLED;
ab9cf499
MT
360 wrmsrl(MSR_KVM_PV_EOI_EN, pa);
361 }
362
d910f5c1
GC
363 if (has_steal_clock)
364 kvm_register_steal_time();
fd10cde9
GN
365}
366
ab9cf499 367static void kvm_pv_disable_apf(void)
fd10cde9 368{
89cbc767 369 if (!__this_cpu_read(apf_reason.enabled))
fd10cde9
GN
370 return;
371
372 wrmsrl(MSR_KVM_ASYNC_PF_EN, 0);
89cbc767 373 __this_cpu_write(apf_reason.enabled, 0);
fd10cde9
GN
374
375 printk(KERN_INFO"Unregister pv shared memory for cpu %d\n",
376 smp_processor_id());
377}
378
ab9cf499
MT
379static void kvm_pv_guest_cpu_reboot(void *unused)
380{
381 /*
382 * We disable PV EOI before we load a new kernel by kexec,
383 * since MSR_KVM_PV_EOI_EN stores a pointer into old kernel's memory.
384 * New kernel can re-enable when it boots.
385 */
386 if (kvm_para_has_feature(KVM_FEATURE_PV_EOI))
387 wrmsrl(MSR_KVM_PV_EOI_EN, 0);
388 kvm_pv_disable_apf();
8fbe6a54 389 kvm_disable_steal_time();
ab9cf499
MT
390}
391
fd10cde9
GN
392static int kvm_pv_reboot_notify(struct notifier_block *nb,
393 unsigned long code, void *unused)
394{
395 if (code == SYS_RESTART)
ab9cf499 396 on_each_cpu(kvm_pv_guest_cpu_reboot, NULL, 1);
fd10cde9
GN
397 return NOTIFY_DONE;
398}
399
400static struct notifier_block kvm_pv_reboot_nb = {
401 .notifier_call = kvm_pv_reboot_notify,
402};
403
d910f5c1
GC
404static u64 kvm_steal_clock(int cpu)
405{
406 u64 steal;
407 struct kvm_steal_time *src;
408 int version;
409
410 src = &per_cpu(steal_time, cpu);
411 do {
412 version = src->version;
5a48a622 413 virt_rmb();
d910f5c1 414 steal = src->steal;
5a48a622 415 virt_rmb();
d910f5c1
GC
416 } while ((version & 1) || (version != src->version));
417
418 return steal;
419}
420
421void kvm_disable_steal_time(void)
422{
423 if (!has_steal_clock)
424 return;
425
426 wrmsr(MSR_KVM_STEAL_TIME, 0, 0);
427}
428
47162761
BS
429static inline void __set_percpu_decrypted(void *ptr, unsigned long size)
430{
431 early_set_memory_decrypted((unsigned long) ptr, size);
432}
433
434/*
435 * Iterate through all possible CPUs and map the memory region pointed
436 * by apf_reason, steal_time and kvm_apic_eoi as decrypted at once.
437 *
438 * Note: we iterate through all possible CPUs to ensure that CPUs
439 * hotplugged will have their per-cpu variable already mapped as
440 * decrypted.
441 */
442static void __init sev_map_percpu_data(void)
443{
444 int cpu;
445
446 if (!sev_active())
447 return;
448
449 for_each_possible_cpu(cpu) {
450 __set_percpu_decrypted(&per_cpu(apf_reason, cpu), sizeof(apf_reason));
451 __set_percpu_decrypted(&per_cpu(steal_time, cpu), sizeof(steal_time));
452 __set_percpu_decrypted(&per_cpu(kvm_apic_eoi, cpu), sizeof(kvm_apic_eoi));
453 }
454}
455
ca3f1017
GN
456#ifdef CONFIG_SMP
457static void __init kvm_smp_prepare_boot_cpu(void)
458{
47162761
BS
459 /*
460 * Map the per-cpu variables as decrypted before kvm_guest_cpu_init()
461 * shares the guest physical address with the hypervisor.
462 */
463 sev_map_percpu_data();
464
fd10cde9 465 kvm_guest_cpu_init();
ca3f1017 466 native_smp_prepare_boot_cpu();
92b75202 467 kvm_spinlock_init();
ca3f1017 468}
fd10cde9 469
9a20ea4b 470static void kvm_guest_cpu_offline(void)
fd10cde9 471{
d910f5c1 472 kvm_disable_steal_time();
ab9cf499
MT
473 if (kvm_para_has_feature(KVM_FEATURE_PV_EOI))
474 wrmsrl(MSR_KVM_PV_EOI_EN, 0);
475 kvm_pv_disable_apf();
631bc487 476 apf_task_wake_all();
fd10cde9
GN
477}
478
9a20ea4b 479static int kvm_cpu_online(unsigned int cpu)
fd10cde9 480{
9a20ea4b
SAS
481 local_irq_disable();
482 kvm_guest_cpu_init();
483 local_irq_enable();
484 return 0;
fd10cde9
GN
485}
486
9a20ea4b
SAS
487static int kvm_cpu_down_prepare(unsigned int cpu)
488{
489 local_irq_disable();
490 kvm_guest_cpu_offline();
491 local_irq_enable();
492 return 0;
493}
ca3f1017
GN
494#endif
495
631bc487
GN
496static void __init kvm_apf_trap_init(void)
497{
facaa3e3 498 update_intr_gate(X86_TRAP_PF, async_page_fault);
631bc487
GN
499}
500
f3614646 501static void __init kvm_guest_init(void)
0cf1bfd2 502{
631bc487
GN
503 int i;
504
0cf1bfd2
MT
505 if (!kvm_para_available())
506 return;
507
508 paravirt_ops_setup();
fd10cde9 509 register_reboot_notifier(&kvm_pv_reboot_nb);
631bc487 510 for (i = 0; i < KVM_TASK_SLEEP_HASHSIZE; i++)
9db284f3 511 raw_spin_lock_init(&async_pf_sleepers[i].lock);
631bc487
GN
512 if (kvm_para_has_feature(KVM_FEATURE_ASYNC_PF))
513 x86_init.irqs.trap_init = kvm_apf_trap_init;
514
d910f5c1
GC
515 if (kvm_para_has_feature(KVM_FEATURE_STEAL_TIME)) {
516 has_steal_clock = 1;
517 pv_time_ops.steal_clock = kvm_steal_clock;
518 }
519
90536664
MT
520 if (kvm_para_has_feature(KVM_FEATURE_PV_EOI))
521 apic_set_eoi_write(kvm_guest_apic_eoi_write);
ab9cf499 522
3dc4f7cf
MT
523 if (kvmclock_vsyscall)
524 kvm_setup_vsyscall_timeinfo();
525
ca3f1017
GN
526#ifdef CONFIG_SMP
527 smp_ops.smp_prepare_boot_cpu = kvm_smp_prepare_boot_cpu;
9a20ea4b
SAS
528 if (cpuhp_setup_state_nocalls(CPUHP_AP_ONLINE_DYN, "x86/kvm:online",
529 kvm_cpu_online, kvm_cpu_down_prepare) < 0)
530 pr_err("kvm_guest: Failed to install cpu hotplug callbacks\n");
fd10cde9 531#else
47162761 532 sev_map_percpu_data();
fd10cde9 533 kvm_guest_cpu_init();
ca3f1017 534#endif
9919e39a
UO
535
536 /*
537 * Hard lockup detection is enabled by default. Disable it, as guests
538 * can get false positives too easily, for example if the host is
539 * overcommitted.
540 */
692297d8 541 hardlockup_detector_disable();
0cf1bfd2 542}
d910f5c1 543
1c300a40
PB
544static noinline uint32_t __kvm_cpuid_base(void)
545{
546 if (boot_cpu_data.cpuid_level < 0)
547 return 0; /* So we don't blow up on old processors */
548
0c9f3536 549 if (boot_cpu_has(X86_FEATURE_HYPERVISOR))
1c300a40
PB
550 return hypervisor_cpuid_base("KVMKVMKVM\0\0\0", 0);
551
552 return 0;
553}
554
555static inline uint32_t kvm_cpuid_base(void)
556{
557 static int kvm_cpuid_base = -1;
558
559 if (kvm_cpuid_base == -1)
560 kvm_cpuid_base = __kvm_cpuid_base();
561
562 return kvm_cpuid_base;
563}
564
565bool kvm_para_available(void)
566{
567 return kvm_cpuid_base() != 0;
568}
569EXPORT_SYMBOL_GPL(kvm_para_available);
570
77f01bdf
PB
571unsigned int kvm_arch_para_features(void)
572{
573 return cpuid_eax(kvm_cpuid_base() | KVM_CPUID_FEATURES);
574}
575
9df56f19 576static uint32_t __init kvm_detect(void)
fc73373b 577{
9df56f19 578 return kvm_cpuid_base();
fc73373b
PB
579}
580
03b2a320 581const __initconst struct hypervisor_x86 x86_hyper_kvm = {
fc73373b
PB
582 .name = "KVM",
583 .detect = kvm_detect,
03b2a320 584 .type = X86_HYPER_KVM,
f3614646 585 .init.guest_late_init = kvm_guest_init,
f72e38e8 586 .init.x2apic_available = kvm_para_available,
fc73373b 587};
fc73373b 588
d910f5c1
GC
589static __init int activate_jump_labels(void)
590{
591 if (has_steal_clock) {
c5905afb 592 static_key_slow_inc(&paravirt_steal_enabled);
d910f5c1 593 if (steal_acc)
c5905afb 594 static_key_slow_inc(&paravirt_steal_rq_enabled);
d910f5c1
GC
595 }
596
597 return 0;
598}
599arch_initcall(activate_jump_labels);
92b75202
SV
600
601#ifdef CONFIG_PARAVIRT_SPINLOCKS
602
603/* Kick a cpu by its apicid. Used to wake up a halted vcpu */
36bd6213 604static void kvm_kick_cpu(int cpu)
92b75202
SV
605{
606 int apicid;
607 unsigned long flags = 0;
608
609 apicid = per_cpu(x86_cpu_to_apicid, cpu);
610 kvm_hypercall2(KVM_HC_KICK_CPU, flags, apicid);
611}
612
bf0c7c34
WL
613#include <asm/qspinlock.h>
614
615static void kvm_wait(u8 *ptr, u8 val)
616{
617 unsigned long flags;
618
619 if (in_nmi())
620 return;
621
622 local_irq_save(flags);
623
624 if (READ_ONCE(*ptr) != val)
625 goto out;
626
627 /*
628 * halt until it's our turn and kicked. Note that we do safe halt
629 * for irq enabled case to avoid hang when lock info is overwritten
630 * in irq spinlock slowpath and no spurious interrupt occur to save us.
631 */
632 if (arch_irqs_disabled_flags(flags))
633 halt();
634 else
635 safe_halt();
636
637out:
638 local_irq_restore(flags);
639}
640
dd0fd8bc 641#ifdef CONFIG_X86_32
6c62985d 642__visible bool __kvm_vcpu_is_preempted(long cpu)
3cded417
PZ
643{
644 struct kvm_steal_time *src = &per_cpu(steal_time, cpu);
645
646 return !!src->preempted;
647}
648PV_CALLEE_SAVE_REGS_THUNK(__kvm_vcpu_is_preempted);
649
dd0fd8bc
WL
650#else
651
652#include <asm/asm-offsets.h>
653
654extern bool __raw_callee_save___kvm_vcpu_is_preempted(long);
655
656/*
657 * Hand-optimize version for x86-64 to avoid 8 64-bit register saving and
658 * restoring to/from the stack.
659 */
660asm(
661".pushsection .text;"
662".global __raw_callee_save___kvm_vcpu_is_preempted;"
663".type __raw_callee_save___kvm_vcpu_is_preempted, @function;"
664"__raw_callee_save___kvm_vcpu_is_preempted:"
665"movq __per_cpu_offset(,%rdi,8), %rax;"
666"cmpb $0, " __stringify(KVM_STEAL_TIME_preempted) "+steal_time(%rax);"
667"setne %al;"
668"ret;"
669".popsection");
670
671#endif
672
92b75202
SV
673/*
674 * Setup pv_lock_ops to exploit KVM_FEATURE_PV_UNHALT if present.
675 */
676void __init kvm_spinlock_init(void)
677{
678 if (!kvm_para_available())
679 return;
680 /* Does host kernel support KVM_FEATURE_PV_UNHALT? */
681 if (!kvm_para_has_feature(KVM_FEATURE_PV_UNHALT))
682 return;
683
bf0c7c34
WL
684 __pv_init_lock_hash();
685 pv_lock_ops.queued_spin_lock_slowpath = __pv_queued_spin_lock_slowpath;
686 pv_lock_ops.queued_spin_unlock = PV_CALLEE_SAVE(__pv_queued_spin_unlock);
687 pv_lock_ops.wait = kvm_wait;
688 pv_lock_ops.kick = kvm_kick_cpu;
3cded417
PZ
689
690 if (kvm_para_has_feature(KVM_FEATURE_STEAL_TIME)) {
691 pv_lock_ops.vcpu_is_preempted =
692 PV_CALLEE_SAVE(__kvm_vcpu_is_preempted);
693 }
3dbef3e3
R
694}
695
92b75202 696#endif /* CONFIG_PARAVIRT_SPINLOCKS */