]>
Commit | Line | Data |
---|---|---|
0cf1bfd2 MT |
1 | /* |
2 | * KVM paravirt_ops implementation | |
3 | * | |
4 | * This program is free software; you can redistribute it and/or modify | |
5 | * it under the terms of the GNU General Public License as published by | |
6 | * the Free Software Foundation; either version 2 of the License, or | |
7 | * (at your option) any later version. | |
8 | * | |
9 | * This program is distributed in the hope that it will be useful, | |
10 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | |
11 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | |
12 | * GNU General Public License for more details. | |
13 | * | |
14 | * You should have received a copy of the GNU General Public License | |
15 | * along with this program; if not, write to the Free Software | |
16 | * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. | |
17 | * | |
18 | * Copyright (C) 2007, Red Hat, Inc., Ingo Molnar <mingo@redhat.com> | |
19 | * Copyright IBM Corporation, 2007 | |
20 | * Authors: Anthony Liguori <aliguori@us.ibm.com> | |
21 | */ | |
22 | ||
56dd9470 | 23 | #include <linux/context_tracking.h> |
186f4360 | 24 | #include <linux/init.h> |
0cf1bfd2 MT |
25 | #include <linux/kernel.h> |
26 | #include <linux/kvm_para.h> | |
27 | #include <linux/cpu.h> | |
28 | #include <linux/mm.h> | |
1da8a77b | 29 | #include <linux/highmem.h> |
096d14a3 | 30 | #include <linux/hardirq.h> |
fd10cde9 GN |
31 | #include <linux/notifier.h> |
32 | #include <linux/reboot.h> | |
631bc487 GN |
33 | #include <linux/hash.h> |
34 | #include <linux/sched.h> | |
35 | #include <linux/slab.h> | |
36 | #include <linux/kprobes.h> | |
92b75202 | 37 | #include <linux/debugfs.h> |
9919e39a | 38 | #include <linux/nmi.h> |
9db284f3 | 39 | #include <linux/swait.h> |
a90ede7b | 40 | #include <asm/timer.h> |
fd10cde9 | 41 | #include <asm/cpu.h> |
631bc487 GN |
42 | #include <asm/traps.h> |
43 | #include <asm/desc.h> | |
6c047cd9 | 44 | #include <asm/tlbflush.h> |
ab9cf499 MT |
45 | #include <asm/apic.h> |
46 | #include <asm/apicdef.h> | |
fc73373b | 47 | #include <asm/hypervisor.h> |
3dc4f7cf | 48 | #include <asm/kvm_guest.h> |
096d14a3 | 49 | |
fd10cde9 GN |
50 | static int kvmapf = 1; |
51 | ||
52 | static int parse_no_kvmapf(char *arg) | |
53 | { | |
54 | kvmapf = 0; | |
55 | return 0; | |
56 | } | |
57 | ||
58 | early_param("no-kvmapf", parse_no_kvmapf); | |
59 | ||
d910f5c1 GC |
60 | static int steal_acc = 1; |
61 | static int parse_no_stealacc(char *arg) | |
62 | { | |
63 | steal_acc = 0; | |
64 | return 0; | |
65 | } | |
66 | ||
67 | early_param("no-steal-acc", parse_no_stealacc); | |
68 | ||
3dc4f7cf MT |
69 | static int kvmclock_vsyscall = 1; |
70 | static int parse_no_kvmclock_vsyscall(char *arg) | |
71 | { | |
72 | kvmclock_vsyscall = 0; | |
73 | return 0; | |
74 | } | |
75 | ||
76 | early_param("no-kvmclock-vsyscall", parse_no_kvmclock_vsyscall); | |
77 | ||
fd10cde9 | 78 | static DEFINE_PER_CPU(struct kvm_vcpu_pv_apf_data, apf_reason) __aligned(64); |
d910f5c1 GC |
79 | static DEFINE_PER_CPU(struct kvm_steal_time, steal_time) __aligned(64); |
80 | static int has_steal_clock = 0; | |
096d14a3 | 81 | |
0cf1bfd2 MT |
82 | /* |
83 | * No need for any "IO delay" on KVM | |
84 | */ | |
85 | static void kvm_io_delay(void) | |
86 | { | |
87 | } | |
88 | ||
631bc487 GN |
89 | #define KVM_TASK_SLEEP_HASHBITS 8 |
90 | #define KVM_TASK_SLEEP_HASHSIZE (1<<KVM_TASK_SLEEP_HASHBITS) | |
91 | ||
92 | struct kvm_task_sleep_node { | |
93 | struct hlist_node link; | |
9db284f3 | 94 | struct swait_queue_head wq; |
631bc487 GN |
95 | u32 token; |
96 | int cpu; | |
6c047cd9 | 97 | bool halted; |
631bc487 GN |
98 | }; |
99 | ||
100 | static struct kvm_task_sleep_head { | |
9db284f3 | 101 | raw_spinlock_t lock; |
631bc487 GN |
102 | struct hlist_head list; |
103 | } async_pf_sleepers[KVM_TASK_SLEEP_HASHSIZE]; | |
104 | ||
105 | static struct kvm_task_sleep_node *_find_apf_task(struct kvm_task_sleep_head *b, | |
106 | u32 token) | |
107 | { | |
108 | struct hlist_node *p; | |
109 | ||
110 | hlist_for_each(p, &b->list) { | |
111 | struct kvm_task_sleep_node *n = | |
112 | hlist_entry(p, typeof(*n), link); | |
113 | if (n->token == token) | |
114 | return n; | |
115 | } | |
116 | ||
117 | return NULL; | |
118 | } | |
119 | ||
120 | void kvm_async_pf_task_wait(u32 token) | |
121 | { | |
122 | u32 key = hash_32(token, KVM_TASK_SLEEP_HASHBITS); | |
123 | struct kvm_task_sleep_head *b = &async_pf_sleepers[key]; | |
124 | struct kvm_task_sleep_node n, *e; | |
9db284f3 | 125 | DECLARE_SWAITQUEUE(wait); |
631bc487 | 126 | |
9b132fbe LZ |
127 | rcu_irq_enter(); |
128 | ||
9db284f3 | 129 | raw_spin_lock(&b->lock); |
631bc487 GN |
130 | e = _find_apf_task(b, token); |
131 | if (e) { | |
132 | /* dummy entry exist -> wake up was delivered ahead of PF */ | |
133 | hlist_del(&e->link); | |
134 | kfree(e); | |
9db284f3 | 135 | raw_spin_unlock(&b->lock); |
9b132fbe LZ |
136 | |
137 | rcu_irq_exit(); | |
631bc487 GN |
138 | return; |
139 | } | |
140 | ||
141 | n.token = token; | |
142 | n.cpu = smp_processor_id(); | |
859f8450 | 143 | n.halted = is_idle_task(current) || preempt_count() > 1; |
9db284f3 | 144 | init_swait_queue_head(&n.wq); |
631bc487 | 145 | hlist_add_head(&n.link, &b->list); |
9db284f3 | 146 | raw_spin_unlock(&b->lock); |
631bc487 GN |
147 | |
148 | for (;;) { | |
6c047cd9 | 149 | if (!n.halted) |
9db284f3 | 150 | prepare_to_swait(&n.wq, &wait, TASK_UNINTERRUPTIBLE); |
631bc487 GN |
151 | if (hlist_unhashed(&n.link)) |
152 | break; | |
6c047cd9 | 153 | |
337c017c WL |
154 | rcu_irq_exit(); |
155 | ||
6c047cd9 GN |
156 | if (!n.halted) { |
157 | local_irq_enable(); | |
158 | schedule(); | |
159 | local_irq_disable(); | |
160 | } else { | |
161 | /* | |
162 | * We cannot reschedule. So halt. | |
163 | */ | |
164 | native_safe_halt(); | |
165 | local_irq_disable(); | |
166 | } | |
337c017c WL |
167 | |
168 | rcu_irq_enter(); | |
631bc487 | 169 | } |
6c047cd9 | 170 | if (!n.halted) |
9db284f3 | 171 | finish_swait(&n.wq, &wait); |
631bc487 | 172 | |
9b132fbe | 173 | rcu_irq_exit(); |
631bc487 GN |
174 | return; |
175 | } | |
176 | EXPORT_SYMBOL_GPL(kvm_async_pf_task_wait); | |
177 | ||
178 | static void apf_task_wake_one(struct kvm_task_sleep_node *n) | |
179 | { | |
180 | hlist_del_init(&n->link); | |
6c047cd9 GN |
181 | if (n->halted) |
182 | smp_send_reschedule(n->cpu); | |
9db284f3 RR |
183 | else if (swait_active(&n->wq)) |
184 | swake_up(&n->wq); | |
631bc487 GN |
185 | } |
186 | ||
187 | static void apf_task_wake_all(void) | |
188 | { | |
189 | int i; | |
190 | ||
191 | for (i = 0; i < KVM_TASK_SLEEP_HASHSIZE; i++) { | |
192 | struct hlist_node *p, *next; | |
193 | struct kvm_task_sleep_head *b = &async_pf_sleepers[i]; | |
9db284f3 | 194 | raw_spin_lock(&b->lock); |
631bc487 GN |
195 | hlist_for_each_safe(p, next, &b->list) { |
196 | struct kvm_task_sleep_node *n = | |
197 | hlist_entry(p, typeof(*n), link); | |
198 | if (n->cpu == smp_processor_id()) | |
199 | apf_task_wake_one(n); | |
200 | } | |
9db284f3 | 201 | raw_spin_unlock(&b->lock); |
631bc487 GN |
202 | } |
203 | } | |
204 | ||
205 | void kvm_async_pf_task_wake(u32 token) | |
206 | { | |
207 | u32 key = hash_32(token, KVM_TASK_SLEEP_HASHBITS); | |
208 | struct kvm_task_sleep_head *b = &async_pf_sleepers[key]; | |
209 | struct kvm_task_sleep_node *n; | |
210 | ||
211 | if (token == ~0) { | |
212 | apf_task_wake_all(); | |
213 | return; | |
214 | } | |
215 | ||
216 | again: | |
9db284f3 | 217 | raw_spin_lock(&b->lock); |
631bc487 GN |
218 | n = _find_apf_task(b, token); |
219 | if (!n) { | |
220 | /* | |
221 | * async PF was not yet handled. | |
222 | * Add dummy entry for the token. | |
223 | */ | |
62c49cc9 | 224 | n = kzalloc(sizeof(*n), GFP_ATOMIC); |
631bc487 GN |
225 | if (!n) { |
226 | /* | |
227 | * Allocation failed! Busy wait while other cpu | |
228 | * handles async PF. | |
229 | */ | |
9db284f3 | 230 | raw_spin_unlock(&b->lock); |
631bc487 GN |
231 | cpu_relax(); |
232 | goto again; | |
233 | } | |
234 | n->token = token; | |
235 | n->cpu = smp_processor_id(); | |
9db284f3 | 236 | init_swait_queue_head(&n->wq); |
631bc487 GN |
237 | hlist_add_head(&n->link, &b->list); |
238 | } else | |
239 | apf_task_wake_one(n); | |
9db284f3 | 240 | raw_spin_unlock(&b->lock); |
631bc487 GN |
241 | return; |
242 | } | |
243 | EXPORT_SYMBOL_GPL(kvm_async_pf_task_wake); | |
244 | ||
245 | u32 kvm_read_and_reset_pf_reason(void) | |
246 | { | |
247 | u32 reason = 0; | |
248 | ||
89cbc767 CL |
249 | if (__this_cpu_read(apf_reason.enabled)) { |
250 | reason = __this_cpu_read(apf_reason.reason); | |
251 | __this_cpu_write(apf_reason.reason, 0); | |
631bc487 GN |
252 | } |
253 | ||
254 | return reason; | |
255 | } | |
256 | EXPORT_SYMBOL_GPL(kvm_read_and_reset_pf_reason); | |
9326638c | 257 | NOKPROBE_SYMBOL(kvm_read_and_reset_pf_reason); |
631bc487 | 258 | |
9326638c | 259 | dotraplinkage void |
631bc487 GN |
260 | do_async_page_fault(struct pt_regs *regs, unsigned long error_code) |
261 | { | |
6c1e0256 FW |
262 | enum ctx_state prev_state; |
263 | ||
631bc487 GN |
264 | switch (kvm_read_and_reset_pf_reason()) { |
265 | default: | |
65a7f03f | 266 | trace_do_page_fault(regs, error_code); |
631bc487 GN |
267 | break; |
268 | case KVM_PV_REASON_PAGE_NOT_PRESENT: | |
269 | /* page is swapped out by the host. */ | |
6c1e0256 | 270 | prev_state = exception_enter(); |
631bc487 | 271 | kvm_async_pf_task_wait((u32)read_cr2()); |
6c1e0256 | 272 | exception_exit(prev_state); |
631bc487 GN |
273 | break; |
274 | case KVM_PV_REASON_PAGE_READY: | |
e0875921 | 275 | rcu_irq_enter(); |
631bc487 | 276 | kvm_async_pf_task_wake((u32)read_cr2()); |
e0875921 | 277 | rcu_irq_exit(); |
631bc487 GN |
278 | break; |
279 | } | |
280 | } | |
9326638c | 281 | NOKPROBE_SYMBOL(do_async_page_fault); |
631bc487 | 282 | |
d3ac8815 | 283 | static void __init paravirt_ops_setup(void) |
0cf1bfd2 MT |
284 | { |
285 | pv_info.name = "KVM"; | |
29fa6825 | 286 | |
0cf1bfd2 MT |
287 | if (kvm_para_has_feature(KVM_FEATURE_NOP_IO_DELAY)) |
288 | pv_cpu_ops.io_delay = kvm_io_delay; | |
289 | ||
a90ede7b MT |
290 | #ifdef CONFIG_X86_IO_APIC |
291 | no_timer_check = 1; | |
292 | #endif | |
0cf1bfd2 MT |
293 | } |
294 | ||
d910f5c1 GC |
295 | static void kvm_register_steal_time(void) |
296 | { | |
297 | int cpu = smp_processor_id(); | |
298 | struct kvm_steal_time *st = &per_cpu(steal_time, cpu); | |
299 | ||
300 | if (!has_steal_clock) | |
301 | return; | |
302 | ||
5dfd486c | 303 | wrmsrl(MSR_KVM_STEAL_TIME, (slow_virt_to_phys(st) | KVM_MSR_ENABLED)); |
136867f5 SK |
304 | pr_info("kvm-stealtime: cpu %d, msr %llx\n", |
305 | cpu, (unsigned long long) slow_virt_to_phys(st)); | |
d910f5c1 GC |
306 | } |
307 | ||
ab9cf499 MT |
308 | static DEFINE_PER_CPU(unsigned long, kvm_apic_eoi) = KVM_PV_EOI_DISABLED; |
309 | ||
8ca22552 | 310 | static notrace void kvm_guest_apic_eoi_write(u32 reg, u32 val) |
ab9cf499 MT |
311 | { |
312 | /** | |
313 | * This relies on __test_and_clear_bit to modify the memory | |
314 | * in a way that is atomic with respect to the local CPU. | |
315 | * The hypervisor only accesses this memory from the local CPU so | |
316 | * there's no need for lock or memory barriers. | |
317 | * An optimization barrier is implied in apic write. | |
318 | */ | |
89cbc767 | 319 | if (__test_and_clear_bit(KVM_PV_EOI_BIT, this_cpu_ptr(&kvm_apic_eoi))) |
ab9cf499 | 320 | return; |
8ca22552 | 321 | apic->native_eoi_write(APIC_EOI, APIC_EOI_ACK); |
ab9cf499 MT |
322 | } |
323 | ||
ed3cf152 | 324 | static void kvm_guest_cpu_init(void) |
fd10cde9 GN |
325 | { |
326 | if (!kvm_para_available()) | |
327 | return; | |
328 | ||
329 | if (kvm_para_has_feature(KVM_FEATURE_ASYNC_PF) && kvmapf) { | |
89cbc767 | 330 | u64 pa = slow_virt_to_phys(this_cpu_ptr(&apf_reason)); |
fd10cde9 | 331 | |
6adba527 GN |
332 | #ifdef CONFIG_PREEMPT |
333 | pa |= KVM_ASYNC_PF_SEND_ALWAYS; | |
334 | #endif | |
52a5c155 WL |
335 | pa |= KVM_ASYNC_PF_ENABLED; |
336 | ||
337 | /* Async page fault support for L1 hypervisor is optional */ | |
338 | if (wrmsr_safe(MSR_KVM_ASYNC_PF_EN, | |
339 | (pa | KVM_ASYNC_PF_DELIVERY_AS_PF_VMEXIT) & 0xffffffff, pa >> 32) < 0) | |
340 | wrmsrl(MSR_KVM_ASYNC_PF_EN, pa); | |
89cbc767 | 341 | __this_cpu_write(apf_reason.enabled, 1); |
fd10cde9 GN |
342 | printk(KERN_INFO"KVM setup async PF for cpu %d\n", |
343 | smp_processor_id()); | |
344 | } | |
d910f5c1 | 345 | |
ab9cf499 MT |
346 | if (kvm_para_has_feature(KVM_FEATURE_PV_EOI)) { |
347 | unsigned long pa; | |
348 | /* Size alignment is implied but just to make it explicit. */ | |
349 | BUILD_BUG_ON(__alignof__(kvm_apic_eoi) < 4); | |
89cbc767 CL |
350 | __this_cpu_write(kvm_apic_eoi, 0); |
351 | pa = slow_virt_to_phys(this_cpu_ptr(&kvm_apic_eoi)) | |
5dfd486c | 352 | | KVM_MSR_ENABLED; |
ab9cf499 MT |
353 | wrmsrl(MSR_KVM_PV_EOI_EN, pa); |
354 | } | |
355 | ||
d910f5c1 GC |
356 | if (has_steal_clock) |
357 | kvm_register_steal_time(); | |
fd10cde9 GN |
358 | } |
359 | ||
ab9cf499 | 360 | static void kvm_pv_disable_apf(void) |
fd10cde9 | 361 | { |
89cbc767 | 362 | if (!__this_cpu_read(apf_reason.enabled)) |
fd10cde9 GN |
363 | return; |
364 | ||
365 | wrmsrl(MSR_KVM_ASYNC_PF_EN, 0); | |
89cbc767 | 366 | __this_cpu_write(apf_reason.enabled, 0); |
fd10cde9 GN |
367 | |
368 | printk(KERN_INFO"Unregister pv shared memory for cpu %d\n", | |
369 | smp_processor_id()); | |
370 | } | |
371 | ||
ab9cf499 MT |
372 | static void kvm_pv_guest_cpu_reboot(void *unused) |
373 | { | |
374 | /* | |
375 | * We disable PV EOI before we load a new kernel by kexec, | |
376 | * since MSR_KVM_PV_EOI_EN stores a pointer into old kernel's memory. | |
377 | * New kernel can re-enable when it boots. | |
378 | */ | |
379 | if (kvm_para_has_feature(KVM_FEATURE_PV_EOI)) | |
380 | wrmsrl(MSR_KVM_PV_EOI_EN, 0); | |
381 | kvm_pv_disable_apf(); | |
8fbe6a54 | 382 | kvm_disable_steal_time(); |
ab9cf499 MT |
383 | } |
384 | ||
fd10cde9 GN |
385 | static int kvm_pv_reboot_notify(struct notifier_block *nb, |
386 | unsigned long code, void *unused) | |
387 | { | |
388 | if (code == SYS_RESTART) | |
ab9cf499 | 389 | on_each_cpu(kvm_pv_guest_cpu_reboot, NULL, 1); |
fd10cde9 GN |
390 | return NOTIFY_DONE; |
391 | } | |
392 | ||
393 | static struct notifier_block kvm_pv_reboot_nb = { | |
394 | .notifier_call = kvm_pv_reboot_notify, | |
395 | }; | |
396 | ||
d910f5c1 GC |
397 | static u64 kvm_steal_clock(int cpu) |
398 | { | |
399 | u64 steal; | |
400 | struct kvm_steal_time *src; | |
401 | int version; | |
402 | ||
403 | src = &per_cpu(steal_time, cpu); | |
404 | do { | |
405 | version = src->version; | |
5a48a622 | 406 | virt_rmb(); |
d910f5c1 | 407 | steal = src->steal; |
5a48a622 | 408 | virt_rmb(); |
d910f5c1 GC |
409 | } while ((version & 1) || (version != src->version)); |
410 | ||
411 | return steal; | |
412 | } | |
413 | ||
414 | void kvm_disable_steal_time(void) | |
415 | { | |
416 | if (!has_steal_clock) | |
417 | return; | |
418 | ||
419 | wrmsr(MSR_KVM_STEAL_TIME, 0, 0); | |
420 | } | |
421 | ||
ca3f1017 GN |
422 | #ifdef CONFIG_SMP |
423 | static void __init kvm_smp_prepare_boot_cpu(void) | |
424 | { | |
fd10cde9 | 425 | kvm_guest_cpu_init(); |
ca3f1017 | 426 | native_smp_prepare_boot_cpu(); |
92b75202 | 427 | kvm_spinlock_init(); |
ca3f1017 | 428 | } |
fd10cde9 | 429 | |
9a20ea4b | 430 | static void kvm_guest_cpu_offline(void) |
fd10cde9 | 431 | { |
d910f5c1 | 432 | kvm_disable_steal_time(); |
ab9cf499 MT |
433 | if (kvm_para_has_feature(KVM_FEATURE_PV_EOI)) |
434 | wrmsrl(MSR_KVM_PV_EOI_EN, 0); | |
435 | kvm_pv_disable_apf(); | |
631bc487 | 436 | apf_task_wake_all(); |
fd10cde9 GN |
437 | } |
438 | ||
9a20ea4b | 439 | static int kvm_cpu_online(unsigned int cpu) |
fd10cde9 | 440 | { |
9a20ea4b SAS |
441 | local_irq_disable(); |
442 | kvm_guest_cpu_init(); | |
443 | local_irq_enable(); | |
444 | return 0; | |
fd10cde9 GN |
445 | } |
446 | ||
9a20ea4b SAS |
447 | static int kvm_cpu_down_prepare(unsigned int cpu) |
448 | { | |
449 | local_irq_disable(); | |
450 | kvm_guest_cpu_offline(); | |
451 | local_irq_enable(); | |
452 | return 0; | |
453 | } | |
ca3f1017 GN |
454 | #endif |
455 | ||
631bc487 GN |
456 | static void __init kvm_apf_trap_init(void) |
457 | { | |
25c74b10 | 458 | set_intr_gate(14, async_page_fault); |
631bc487 GN |
459 | } |
460 | ||
0cf1bfd2 MT |
461 | void __init kvm_guest_init(void) |
462 | { | |
631bc487 GN |
463 | int i; |
464 | ||
0cf1bfd2 MT |
465 | if (!kvm_para_available()) |
466 | return; | |
467 | ||
468 | paravirt_ops_setup(); | |
fd10cde9 | 469 | register_reboot_notifier(&kvm_pv_reboot_nb); |
631bc487 | 470 | for (i = 0; i < KVM_TASK_SLEEP_HASHSIZE; i++) |
9db284f3 | 471 | raw_spin_lock_init(&async_pf_sleepers[i].lock); |
631bc487 GN |
472 | if (kvm_para_has_feature(KVM_FEATURE_ASYNC_PF)) |
473 | x86_init.irqs.trap_init = kvm_apf_trap_init; | |
474 | ||
d910f5c1 GC |
475 | if (kvm_para_has_feature(KVM_FEATURE_STEAL_TIME)) { |
476 | has_steal_clock = 1; | |
477 | pv_time_ops.steal_clock = kvm_steal_clock; | |
478 | } | |
479 | ||
90536664 MT |
480 | if (kvm_para_has_feature(KVM_FEATURE_PV_EOI)) |
481 | apic_set_eoi_write(kvm_guest_apic_eoi_write); | |
ab9cf499 | 482 | |
3dc4f7cf MT |
483 | if (kvmclock_vsyscall) |
484 | kvm_setup_vsyscall_timeinfo(); | |
485 | ||
ca3f1017 GN |
486 | #ifdef CONFIG_SMP |
487 | smp_ops.smp_prepare_boot_cpu = kvm_smp_prepare_boot_cpu; | |
9a20ea4b SAS |
488 | if (cpuhp_setup_state_nocalls(CPUHP_AP_ONLINE_DYN, "x86/kvm:online", |
489 | kvm_cpu_online, kvm_cpu_down_prepare) < 0) | |
490 | pr_err("kvm_guest: Failed to install cpu hotplug callbacks\n"); | |
fd10cde9 GN |
491 | #else |
492 | kvm_guest_cpu_init(); | |
ca3f1017 | 493 | #endif |
9919e39a UO |
494 | |
495 | /* | |
496 | * Hard lockup detection is enabled by default. Disable it, as guests | |
497 | * can get false positives too easily, for example if the host is | |
498 | * overcommitted. | |
499 | */ | |
692297d8 | 500 | hardlockup_detector_disable(); |
0cf1bfd2 | 501 | } |
d910f5c1 | 502 | |
1c300a40 PB |
503 | static noinline uint32_t __kvm_cpuid_base(void) |
504 | { | |
505 | if (boot_cpu_data.cpuid_level < 0) | |
506 | return 0; /* So we don't blow up on old processors */ | |
507 | ||
0c9f3536 | 508 | if (boot_cpu_has(X86_FEATURE_HYPERVISOR)) |
1c300a40 PB |
509 | return hypervisor_cpuid_base("KVMKVMKVM\0\0\0", 0); |
510 | ||
511 | return 0; | |
512 | } | |
513 | ||
514 | static inline uint32_t kvm_cpuid_base(void) | |
515 | { | |
516 | static int kvm_cpuid_base = -1; | |
517 | ||
518 | if (kvm_cpuid_base == -1) | |
519 | kvm_cpuid_base = __kvm_cpuid_base(); | |
520 | ||
521 | return kvm_cpuid_base; | |
522 | } | |
523 | ||
524 | bool kvm_para_available(void) | |
525 | { | |
526 | return kvm_cpuid_base() != 0; | |
527 | } | |
528 | EXPORT_SYMBOL_GPL(kvm_para_available); | |
529 | ||
77f01bdf PB |
530 | unsigned int kvm_arch_para_features(void) |
531 | { | |
532 | return cpuid_eax(kvm_cpuid_base() | KVM_CPUID_FEATURES); | |
533 | } | |
534 | ||
9df56f19 | 535 | static uint32_t __init kvm_detect(void) |
fc73373b | 536 | { |
9df56f19 | 537 | return kvm_cpuid_base(); |
fc73373b PB |
538 | } |
539 | ||
540 | const struct hypervisor_x86 x86_hyper_kvm __refconst = { | |
541 | .name = "KVM", | |
542 | .detect = kvm_detect, | |
4cca6ea0 | 543 | .x2apic_available = kvm_para_available, |
fc73373b PB |
544 | }; |
545 | EXPORT_SYMBOL_GPL(x86_hyper_kvm); | |
546 | ||
d910f5c1 GC |
547 | static __init int activate_jump_labels(void) |
548 | { | |
549 | if (has_steal_clock) { | |
c5905afb | 550 | static_key_slow_inc(¶virt_steal_enabled); |
d910f5c1 | 551 | if (steal_acc) |
c5905afb | 552 | static_key_slow_inc(¶virt_steal_rq_enabled); |
d910f5c1 GC |
553 | } |
554 | ||
555 | return 0; | |
556 | } | |
557 | arch_initcall(activate_jump_labels); | |
92b75202 SV |
558 | |
559 | #ifdef CONFIG_PARAVIRT_SPINLOCKS | |
560 | ||
561 | /* Kick a cpu by its apicid. Used to wake up a halted vcpu */ | |
36bd6213 | 562 | static void kvm_kick_cpu(int cpu) |
92b75202 SV |
563 | { |
564 | int apicid; | |
565 | unsigned long flags = 0; | |
566 | ||
567 | apicid = per_cpu(x86_cpu_to_apicid, cpu); | |
568 | kvm_hypercall2(KVM_HC_KICK_CPU, flags, apicid); | |
569 | } | |
570 | ||
bf0c7c34 WL |
571 | #include <asm/qspinlock.h> |
572 | ||
573 | static void kvm_wait(u8 *ptr, u8 val) | |
574 | { | |
575 | unsigned long flags; | |
576 | ||
577 | if (in_nmi()) | |
578 | return; | |
579 | ||
580 | local_irq_save(flags); | |
581 | ||
582 | if (READ_ONCE(*ptr) != val) | |
583 | goto out; | |
584 | ||
585 | /* | |
586 | * halt until it's our turn and kicked. Note that we do safe halt | |
587 | * for irq enabled case to avoid hang when lock info is overwritten | |
588 | * in irq spinlock slowpath and no spurious interrupt occur to save us. | |
589 | */ | |
590 | if (arch_irqs_disabled_flags(flags)) | |
591 | halt(); | |
592 | else | |
593 | safe_halt(); | |
594 | ||
595 | out: | |
596 | local_irq_restore(flags); | |
597 | } | |
598 | ||
dd0fd8bc | 599 | #ifdef CONFIG_X86_32 |
6c62985d | 600 | __visible bool __kvm_vcpu_is_preempted(long cpu) |
3cded417 PZ |
601 | { |
602 | struct kvm_steal_time *src = &per_cpu(steal_time, cpu); | |
603 | ||
604 | return !!src->preempted; | |
605 | } | |
606 | PV_CALLEE_SAVE_REGS_THUNK(__kvm_vcpu_is_preempted); | |
607 | ||
dd0fd8bc WL |
608 | #else |
609 | ||
610 | #include <asm/asm-offsets.h> | |
611 | ||
612 | extern bool __raw_callee_save___kvm_vcpu_is_preempted(long); | |
613 | ||
614 | /* | |
615 | * Hand-optimize version for x86-64 to avoid 8 64-bit register saving and | |
616 | * restoring to/from the stack. | |
617 | */ | |
618 | asm( | |
619 | ".pushsection .text;" | |
620 | ".global __raw_callee_save___kvm_vcpu_is_preempted;" | |
621 | ".type __raw_callee_save___kvm_vcpu_is_preempted, @function;" | |
622 | "__raw_callee_save___kvm_vcpu_is_preempted:" | |
623 | "movq __per_cpu_offset(,%rdi,8), %rax;" | |
624 | "cmpb $0, " __stringify(KVM_STEAL_TIME_preempted) "+steal_time(%rax);" | |
625 | "setne %al;" | |
626 | "ret;" | |
627 | ".popsection"); | |
628 | ||
629 | #endif | |
630 | ||
92b75202 SV |
631 | /* |
632 | * Setup pv_lock_ops to exploit KVM_FEATURE_PV_UNHALT if present. | |
633 | */ | |
634 | void __init kvm_spinlock_init(void) | |
635 | { | |
636 | if (!kvm_para_available()) | |
637 | return; | |
638 | /* Does host kernel support KVM_FEATURE_PV_UNHALT? */ | |
639 | if (!kvm_para_has_feature(KVM_FEATURE_PV_UNHALT)) | |
640 | return; | |
641 | ||
bf0c7c34 WL |
642 | __pv_init_lock_hash(); |
643 | pv_lock_ops.queued_spin_lock_slowpath = __pv_queued_spin_lock_slowpath; | |
644 | pv_lock_ops.queued_spin_unlock = PV_CALLEE_SAVE(__pv_queued_spin_unlock); | |
645 | pv_lock_ops.wait = kvm_wait; | |
646 | pv_lock_ops.kick = kvm_kick_cpu; | |
3cded417 PZ |
647 | |
648 | if (kvm_para_has_feature(KVM_FEATURE_STEAL_TIME)) { | |
649 | pv_lock_ops.vcpu_is_preempted = | |
650 | PV_CALLEE_SAVE(__kvm_vcpu_is_preempted); | |
651 | } | |
3dbef3e3 R |
652 | } |
653 | ||
92b75202 | 654 | #endif /* CONFIG_PARAVIRT_SPINLOCKS */ |