]> git.proxmox.com Git - mirror_ubuntu-zesty-kernel.git/blame - virt/kvm/kvm_main.c
KVM: MMU: reorganize struct kvm_shadow_walk_iterator
[mirror_ubuntu-zesty-kernel.git] / virt / kvm / kvm_main.c
CommitLineData
6aa8b732
AK
1/*
2 * Kernel-based Virtual Machine driver for Linux
3 *
4 * This module enables machines with Intel VT-x extensions to run virtual
5 * machines without emulation or binary translation.
6 *
7 * Copyright (C) 2006 Qumranet, Inc.
9611c187 8 * Copyright 2010 Red Hat, Inc. and/or its affiliates.
6aa8b732
AK
9 *
10 * Authors:
11 * Avi Kivity <avi@qumranet.com>
12 * Yaniv Kamay <yaniv@qumranet.com>
13 *
14 * This work is licensed under the terms of the GNU GPL, version 2. See
15 * the COPYING file in the top-level directory.
16 *
17 */
18
e2174021 19#include "iodev.h"
6aa8b732 20
edf88417 21#include <linux/kvm_host.h>
6aa8b732
AK
22#include <linux/kvm.h>
23#include <linux/module.h>
24#include <linux/errno.h>
6aa8b732 25#include <linux/percpu.h>
6aa8b732
AK
26#include <linux/mm.h>
27#include <linux/miscdevice.h>
28#include <linux/vmalloc.h>
6aa8b732 29#include <linux/reboot.h>
6aa8b732
AK
30#include <linux/debugfs.h>
31#include <linux/highmem.h>
32#include <linux/file.h>
fb3600cc 33#include <linux/syscore_ops.h>
774c47f1 34#include <linux/cpu.h>
e8edc6e0 35#include <linux/sched.h>
d9e368d6
AK
36#include <linux/cpumask.h>
37#include <linux/smp.h>
d6d28168 38#include <linux/anon_inodes.h>
04d2cc77 39#include <linux/profile.h>
7aa81cc0 40#include <linux/kvm_para.h>
6fc138d2 41#include <linux/pagemap.h>
8d4e1288 42#include <linux/mman.h>
35149e21 43#include <linux/swap.h>
e56d532f 44#include <linux/bitops.h>
547de29e 45#include <linux/spinlock.h>
6ff5894c 46#include <linux/compat.h>
bc6678a3 47#include <linux/srcu.h>
8f0b1ab6 48#include <linux/hugetlb.h>
5a0e3ad6 49#include <linux/slab.h>
6aa8b732 50
e495606d 51#include <asm/processor.h>
e495606d
AK
52#include <asm/io.h>
53#include <asm/uaccess.h>
3e021bf5 54#include <asm/pgtable.h>
6aa8b732 55
5f94c174 56#include "coalesced_mmio.h"
af585b92 57#include "async_pf.h"
5f94c174 58
229456fc
MT
59#define CREATE_TRACE_POINTS
60#include <trace/events/kvm.h>
61
6aa8b732
AK
62MODULE_AUTHOR("Qumranet");
63MODULE_LICENSE("GPL");
64
fa40a821
MT
65/*
66 * Ordering of locks:
67 *
fae3a353 68 * kvm->lock --> kvm->slots_lock --> kvm->irq_lock
fa40a821
MT
69 */
70
e935b837 71DEFINE_RAW_SPINLOCK(kvm_lock);
e9b11c17 72LIST_HEAD(vm_list);
133de902 73
7f59f492 74static cpumask_var_t cpus_hardware_enabled;
10474ae8
AG
75static int kvm_usage_count = 0;
76static atomic_t hardware_enable_failed;
1b6c0168 77
c16f862d
RR
78struct kmem_cache *kvm_vcpu_cache;
79EXPORT_SYMBOL_GPL(kvm_vcpu_cache);
1165f5fe 80
15ad7146
AK
81static __read_mostly struct preempt_ops kvm_preempt_ops;
82
76f7c879 83struct dentry *kvm_debugfs_dir;
6aa8b732 84
bccf2150
AK
85static long kvm_vcpu_ioctl(struct file *file, unsigned int ioctl,
86 unsigned long arg);
1dda606c
AG
87#ifdef CONFIG_COMPAT
88static long kvm_vcpu_compat_ioctl(struct file *file, unsigned int ioctl,
89 unsigned long arg);
90#endif
10474ae8
AG
91static int hardware_enable_all(void);
92static void hardware_disable_all(void);
bccf2150 93
e93f8a0f
MT
94static void kvm_io_bus_destroy(struct kvm_io_bus *bus);
95
b7c4145b
AK
96bool kvm_rebooting;
97EXPORT_SYMBOL_GPL(kvm_rebooting);
4ecac3fd 98
54dee993
MT
99static bool largepages_enabled = true;
100
fa7bff8f
GN
101static struct page *hwpoison_page;
102static pfn_t hwpoison_pfn;
bf998156 103
fce92dce
XG
104struct page *fault_page;
105pfn_t fault_pfn;
edba23e5 106
c77fb9dc 107inline int kvm_is_mmio_pfn(pfn_t pfn)
cbff90a7 108{
fc5659c8 109 if (pfn_valid(pfn)) {
22e5c47e 110 int reserved;
936a5fe6 111 struct page *tail = pfn_to_page(pfn);
22e5c47e
AA
112 struct page *head = compound_trans_head(tail);
113 reserved = PageReserved(head);
936a5fe6 114 if (head != tail) {
936a5fe6 115 /*
22e5c47e
AA
116 * "head" is not a dangling pointer
117 * (compound_trans_head takes care of that)
118 * but the hugepage may have been splitted
119 * from under us (and we may not hold a
120 * reference count on the head page so it can
121 * be reused before we run PageReferenced), so
122 * we've to check PageTail before returning
123 * what we just read.
936a5fe6 124 */
22e5c47e
AA
125 smp_rmb();
126 if (PageTail(tail))
127 return reserved;
936a5fe6
AA
128 }
129 return PageReserved(tail);
fc5659c8 130 }
cbff90a7
BAY
131
132 return true;
133}
134
bccf2150
AK
135/*
136 * Switches to specified vcpu, until a matching vcpu_put()
137 */
313a3dc7 138void vcpu_load(struct kvm_vcpu *vcpu)
6aa8b732 139{
15ad7146
AK
140 int cpu;
141
bccf2150 142 mutex_lock(&vcpu->mutex);
34bb10b7
RR
143 if (unlikely(vcpu->pid != current->pids[PIDTYPE_PID].pid)) {
144 /* The thread running this VCPU changed. */
145 struct pid *oldpid = vcpu->pid;
146 struct pid *newpid = get_task_pid(current, PIDTYPE_PID);
147 rcu_assign_pointer(vcpu->pid, newpid);
148 synchronize_rcu();
149 put_pid(oldpid);
150 }
15ad7146
AK
151 cpu = get_cpu();
152 preempt_notifier_register(&vcpu->preempt_notifier);
313a3dc7 153 kvm_arch_vcpu_load(vcpu, cpu);
15ad7146 154 put_cpu();
6aa8b732
AK
155}
156
313a3dc7 157void vcpu_put(struct kvm_vcpu *vcpu)
6aa8b732 158{
15ad7146 159 preempt_disable();
313a3dc7 160 kvm_arch_vcpu_put(vcpu);
15ad7146
AK
161 preempt_notifier_unregister(&vcpu->preempt_notifier);
162 preempt_enable();
6aa8b732
AK
163 mutex_unlock(&vcpu->mutex);
164}
165
d9e368d6
AK
166static void ack_flush(void *_completed)
167{
d9e368d6
AK
168}
169
49846896 170static bool make_all_cpus_request(struct kvm *kvm, unsigned int req)
d9e368d6 171{
597a5f55 172 int i, cpu, me;
6ef7a1bc
RR
173 cpumask_var_t cpus;
174 bool called = true;
d9e368d6 175 struct kvm_vcpu *vcpu;
d9e368d6 176
79f55997 177 zalloc_cpumask_var(&cpus, GFP_ATOMIC);
6ef7a1bc 178
3cba4130 179 me = get_cpu();
988a2cae 180 kvm_for_each_vcpu(i, vcpu, kvm) {
3cba4130 181 kvm_make_request(req, vcpu);
d9e368d6 182 cpu = vcpu->cpu;
6b7e2d09
XG
183
184 /* Set ->requests bit before we read ->mode */
185 smp_mb();
186
187 if (cpus != NULL && cpu != -1 && cpu != me &&
188 kvm_vcpu_exiting_guest_mode(vcpu) != OUTSIDE_GUEST_MODE)
6ef7a1bc 189 cpumask_set_cpu(cpu, cpus);
49846896 190 }
6ef7a1bc
RR
191 if (unlikely(cpus == NULL))
192 smp_call_function_many(cpu_online_mask, ack_flush, NULL, 1);
193 else if (!cpumask_empty(cpus))
194 smp_call_function_many(cpus, ack_flush, NULL, 1);
195 else
196 called = false;
3cba4130 197 put_cpu();
6ef7a1bc 198 free_cpumask_var(cpus);
49846896 199 return called;
d9e368d6
AK
200}
201
49846896 202void kvm_flush_remote_tlbs(struct kvm *kvm)
2e53d63a 203{
a4ee1ca4
XG
204 int dirty_count = kvm->tlbs_dirty;
205
206 smp_mb();
49846896
RR
207 if (make_all_cpus_request(kvm, KVM_REQ_TLB_FLUSH))
208 ++kvm->stat.remote_tlb_flush;
a4ee1ca4 209 cmpxchg(&kvm->tlbs_dirty, dirty_count, 0);
2e53d63a
MT
210}
211
49846896
RR
212void kvm_reload_remote_mmus(struct kvm *kvm)
213{
214 make_all_cpus_request(kvm, KVM_REQ_MMU_RELOAD);
215}
2e53d63a 216
fb3f0f51
RR
217int kvm_vcpu_init(struct kvm_vcpu *vcpu, struct kvm *kvm, unsigned id)
218{
219 struct page *page;
220 int r;
221
222 mutex_init(&vcpu->mutex);
223 vcpu->cpu = -1;
fb3f0f51
RR
224 vcpu->kvm = kvm;
225 vcpu->vcpu_id = id;
34bb10b7 226 vcpu->pid = NULL;
b6958ce4 227 init_waitqueue_head(&vcpu->wq);
af585b92 228 kvm_async_pf_vcpu_init(vcpu);
fb3f0f51
RR
229
230 page = alloc_page(GFP_KERNEL | __GFP_ZERO);
231 if (!page) {
232 r = -ENOMEM;
233 goto fail;
234 }
235 vcpu->run = page_address(page);
236
e9b11c17 237 r = kvm_arch_vcpu_init(vcpu);
fb3f0f51 238 if (r < 0)
e9b11c17 239 goto fail_free_run;
fb3f0f51
RR
240 return 0;
241
fb3f0f51
RR
242fail_free_run:
243 free_page((unsigned long)vcpu->run);
244fail:
76fafa5e 245 return r;
fb3f0f51
RR
246}
247EXPORT_SYMBOL_GPL(kvm_vcpu_init);
248
249void kvm_vcpu_uninit(struct kvm_vcpu *vcpu)
250{
34bb10b7 251 put_pid(vcpu->pid);
e9b11c17 252 kvm_arch_vcpu_uninit(vcpu);
fb3f0f51
RR
253 free_page((unsigned long)vcpu->run);
254}
255EXPORT_SYMBOL_GPL(kvm_vcpu_uninit);
256
e930bffe
AA
257#if defined(CONFIG_MMU_NOTIFIER) && defined(KVM_ARCH_WANT_MMU_NOTIFIER)
258static inline struct kvm *mmu_notifier_to_kvm(struct mmu_notifier *mn)
259{
260 return container_of(mn, struct kvm, mmu_notifier);
261}
262
263static void kvm_mmu_notifier_invalidate_page(struct mmu_notifier *mn,
264 struct mm_struct *mm,
265 unsigned long address)
266{
267 struct kvm *kvm = mmu_notifier_to_kvm(mn);
bc6678a3 268 int need_tlb_flush, idx;
e930bffe
AA
269
270 /*
271 * When ->invalidate_page runs, the linux pte has been zapped
272 * already but the page is still allocated until
273 * ->invalidate_page returns. So if we increase the sequence
274 * here the kvm page fault will notice if the spte can't be
275 * established because the page is going to be freed. If
276 * instead the kvm page fault establishes the spte before
277 * ->invalidate_page runs, kvm_unmap_hva will release it
278 * before returning.
279 *
280 * The sequence increase only need to be seen at spin_unlock
281 * time, and not at spin_lock time.
282 *
283 * Increasing the sequence after the spin_unlock would be
284 * unsafe because the kvm page fault could then establish the
285 * pte after kvm_unmap_hva returned, without noticing the page
286 * is going to be freed.
287 */
bc6678a3 288 idx = srcu_read_lock(&kvm->srcu);
e930bffe
AA
289 spin_lock(&kvm->mmu_lock);
290 kvm->mmu_notifier_seq++;
a4ee1ca4 291 need_tlb_flush = kvm_unmap_hva(kvm, address) | kvm->tlbs_dirty;
e930bffe 292 spin_unlock(&kvm->mmu_lock);
bc6678a3 293 srcu_read_unlock(&kvm->srcu, idx);
e930bffe
AA
294
295 /* we've to flush the tlb before the pages can be freed */
296 if (need_tlb_flush)
297 kvm_flush_remote_tlbs(kvm);
298
299}
300
3da0dd43
IE
301static void kvm_mmu_notifier_change_pte(struct mmu_notifier *mn,
302 struct mm_struct *mm,
303 unsigned long address,
304 pte_t pte)
305{
306 struct kvm *kvm = mmu_notifier_to_kvm(mn);
bc6678a3 307 int idx;
3da0dd43 308
bc6678a3 309 idx = srcu_read_lock(&kvm->srcu);
3da0dd43
IE
310 spin_lock(&kvm->mmu_lock);
311 kvm->mmu_notifier_seq++;
312 kvm_set_spte_hva(kvm, address, pte);
313 spin_unlock(&kvm->mmu_lock);
bc6678a3 314 srcu_read_unlock(&kvm->srcu, idx);
3da0dd43
IE
315}
316
e930bffe
AA
317static void kvm_mmu_notifier_invalidate_range_start(struct mmu_notifier *mn,
318 struct mm_struct *mm,
319 unsigned long start,
320 unsigned long end)
321{
322 struct kvm *kvm = mmu_notifier_to_kvm(mn);
bc6678a3 323 int need_tlb_flush = 0, idx;
e930bffe 324
bc6678a3 325 idx = srcu_read_lock(&kvm->srcu);
e930bffe
AA
326 spin_lock(&kvm->mmu_lock);
327 /*
328 * The count increase must become visible at unlock time as no
329 * spte can be established without taking the mmu_lock and
330 * count is also read inside the mmu_lock critical section.
331 */
332 kvm->mmu_notifier_count++;
333 for (; start < end; start += PAGE_SIZE)
334 need_tlb_flush |= kvm_unmap_hva(kvm, start);
a4ee1ca4 335 need_tlb_flush |= kvm->tlbs_dirty;
e930bffe 336 spin_unlock(&kvm->mmu_lock);
bc6678a3 337 srcu_read_unlock(&kvm->srcu, idx);
e930bffe
AA
338
339 /* we've to flush the tlb before the pages can be freed */
340 if (need_tlb_flush)
341 kvm_flush_remote_tlbs(kvm);
342}
343
344static void kvm_mmu_notifier_invalidate_range_end(struct mmu_notifier *mn,
345 struct mm_struct *mm,
346 unsigned long start,
347 unsigned long end)
348{
349 struct kvm *kvm = mmu_notifier_to_kvm(mn);
350
351 spin_lock(&kvm->mmu_lock);
352 /*
353 * This sequence increase will notify the kvm page fault that
354 * the page that is going to be mapped in the spte could have
355 * been freed.
356 */
357 kvm->mmu_notifier_seq++;
358 /*
359 * The above sequence increase must be visible before the
360 * below count decrease but both values are read by the kvm
361 * page fault under mmu_lock spinlock so we don't need to add
362 * a smb_wmb() here in between the two.
363 */
364 kvm->mmu_notifier_count--;
365 spin_unlock(&kvm->mmu_lock);
366
367 BUG_ON(kvm->mmu_notifier_count < 0);
368}
369
370static int kvm_mmu_notifier_clear_flush_young(struct mmu_notifier *mn,
371 struct mm_struct *mm,
372 unsigned long address)
373{
374 struct kvm *kvm = mmu_notifier_to_kvm(mn);
bc6678a3 375 int young, idx;
e930bffe 376
bc6678a3 377 idx = srcu_read_lock(&kvm->srcu);
e930bffe
AA
378 spin_lock(&kvm->mmu_lock);
379 young = kvm_age_hva(kvm, address);
380 spin_unlock(&kvm->mmu_lock);
bc6678a3 381 srcu_read_unlock(&kvm->srcu, idx);
e930bffe
AA
382
383 if (young)
384 kvm_flush_remote_tlbs(kvm);
385
386 return young;
387}
388
8ee53820
AA
389static int kvm_mmu_notifier_test_young(struct mmu_notifier *mn,
390 struct mm_struct *mm,
391 unsigned long address)
392{
393 struct kvm *kvm = mmu_notifier_to_kvm(mn);
394 int young, idx;
395
396 idx = srcu_read_lock(&kvm->srcu);
397 spin_lock(&kvm->mmu_lock);
398 young = kvm_test_age_hva(kvm, address);
399 spin_unlock(&kvm->mmu_lock);
400 srcu_read_unlock(&kvm->srcu, idx);
401
402 return young;
403}
404
85db06e5
MT
405static void kvm_mmu_notifier_release(struct mmu_notifier *mn,
406 struct mm_struct *mm)
407{
408 struct kvm *kvm = mmu_notifier_to_kvm(mn);
eda2beda
LJ
409 int idx;
410
411 idx = srcu_read_lock(&kvm->srcu);
85db06e5 412 kvm_arch_flush_shadow(kvm);
eda2beda 413 srcu_read_unlock(&kvm->srcu, idx);
85db06e5
MT
414}
415
e930bffe
AA
416static const struct mmu_notifier_ops kvm_mmu_notifier_ops = {
417 .invalidate_page = kvm_mmu_notifier_invalidate_page,
418 .invalidate_range_start = kvm_mmu_notifier_invalidate_range_start,
419 .invalidate_range_end = kvm_mmu_notifier_invalidate_range_end,
420 .clear_flush_young = kvm_mmu_notifier_clear_flush_young,
8ee53820 421 .test_young = kvm_mmu_notifier_test_young,
3da0dd43 422 .change_pte = kvm_mmu_notifier_change_pte,
85db06e5 423 .release = kvm_mmu_notifier_release,
e930bffe 424};
4c07b0a4
AK
425
426static int kvm_init_mmu_notifier(struct kvm *kvm)
427{
428 kvm->mmu_notifier.ops = &kvm_mmu_notifier_ops;
429 return mmu_notifier_register(&kvm->mmu_notifier, current->mm);
430}
431
432#else /* !(CONFIG_MMU_NOTIFIER && KVM_ARCH_WANT_MMU_NOTIFIER) */
433
434static int kvm_init_mmu_notifier(struct kvm *kvm)
435{
436 return 0;
437}
438
e930bffe
AA
439#endif /* CONFIG_MMU_NOTIFIER && KVM_ARCH_WANT_MMU_NOTIFIER */
440
f17abe9a 441static struct kvm *kvm_create_vm(void)
6aa8b732 442{
d89f5eff
JK
443 int r, i;
444 struct kvm *kvm = kvm_arch_alloc_vm();
6aa8b732 445
d89f5eff
JK
446 if (!kvm)
447 return ERR_PTR(-ENOMEM);
448
449 r = kvm_arch_init_vm(kvm);
450 if (r)
451 goto out_err_nodisable;
10474ae8
AG
452
453 r = hardware_enable_all();
454 if (r)
455 goto out_err_nodisable;
456
75858a84
AK
457#ifdef CONFIG_HAVE_KVM_IRQCHIP
458 INIT_HLIST_HEAD(&kvm->mask_notifier_list);
136bdfee 459 INIT_HLIST_HEAD(&kvm->irq_ack_notifier_list);
75858a84 460#endif
6aa8b732 461
46a26bf5
MT
462 r = -ENOMEM;
463 kvm->memslots = kzalloc(sizeof(struct kvm_memslots), GFP_KERNEL);
464 if (!kvm->memslots)
57e7fbee 465 goto out_err_nosrcu;
bc6678a3 466 if (init_srcu_struct(&kvm->srcu))
57e7fbee 467 goto out_err_nosrcu;
e93f8a0f
MT
468 for (i = 0; i < KVM_NR_BUSES; i++) {
469 kvm->buses[i] = kzalloc(sizeof(struct kvm_io_bus),
470 GFP_KERNEL);
57e7fbee 471 if (!kvm->buses[i])
e93f8a0f 472 goto out_err;
e93f8a0f 473 }
e930bffe 474
74b5c5bf 475 spin_lock_init(&kvm->mmu_lock);
6d4e4c4f
AK
476 kvm->mm = current->mm;
477 atomic_inc(&kvm->mm->mm_count);
d34e6b17 478 kvm_eventfd_init(kvm);
11ec2804 479 mutex_init(&kvm->lock);
60eead79 480 mutex_init(&kvm->irq_lock);
79fac95e 481 mutex_init(&kvm->slots_lock);
d39f13b0 482 atomic_set(&kvm->users_count, 1);
74b5c5bf
MW
483
484 r = kvm_init_mmu_notifier(kvm);
485 if (r)
486 goto out_err;
487
e935b837 488 raw_spin_lock(&kvm_lock);
5e58cfe4 489 list_add(&kvm->vm_list, &vm_list);
e935b837 490 raw_spin_unlock(&kvm_lock);
d89f5eff 491
f17abe9a 492 return kvm;
10474ae8
AG
493
494out_err:
57e7fbee
JK
495 cleanup_srcu_struct(&kvm->srcu);
496out_err_nosrcu:
10474ae8
AG
497 hardware_disable_all();
498out_err_nodisable:
e93f8a0f
MT
499 for (i = 0; i < KVM_NR_BUSES; i++)
500 kfree(kvm->buses[i]);
46a26bf5 501 kfree(kvm->memslots);
d89f5eff 502 kvm_arch_free_vm(kvm);
10474ae8 503 return ERR_PTR(r);
f17abe9a
AK
504}
505
a36a57b1
TY
506static void kvm_destroy_dirty_bitmap(struct kvm_memory_slot *memslot)
507{
508 if (!memslot->dirty_bitmap)
509 return;
510
6f9e5c17
TY
511 if (2 * kvm_dirty_bitmap_bytes(memslot) > PAGE_SIZE)
512 vfree(memslot->dirty_bitmap_head);
513 else
514 kfree(memslot->dirty_bitmap_head);
515
a36a57b1 516 memslot->dirty_bitmap = NULL;
515a0127 517 memslot->dirty_bitmap_head = NULL;
a36a57b1
TY
518}
519
6aa8b732
AK
520/*
521 * Free any memory in @free but not in @dont.
522 */
523static void kvm_free_physmem_slot(struct kvm_memory_slot *free,
524 struct kvm_memory_slot *dont)
525{
ec04b260
JR
526 int i;
527
290fc38d
IE
528 if (!dont || free->rmap != dont->rmap)
529 vfree(free->rmap);
6aa8b732
AK
530
531 if (!dont || free->dirty_bitmap != dont->dirty_bitmap)
a36a57b1 532 kvm_destroy_dirty_bitmap(free);
6aa8b732 533
ec04b260
JR
534
535 for (i = 0; i < KVM_NR_PAGE_SIZES - 1; ++i) {
536 if (!dont || free->lpage_info[i] != dont->lpage_info[i]) {
537 vfree(free->lpage_info[i]);
538 free->lpage_info[i] = NULL;
539 }
540 }
05da4558 541
6aa8b732 542 free->npages = 0;
8d4e1288 543 free->rmap = NULL;
6aa8b732
AK
544}
545
d19a9cd2 546void kvm_free_physmem(struct kvm *kvm)
6aa8b732
AK
547{
548 int i;
46a26bf5
MT
549 struct kvm_memslots *slots = kvm->memslots;
550
551 for (i = 0; i < slots->nmemslots; ++i)
552 kvm_free_physmem_slot(&slots->memslots[i], NULL);
6aa8b732 553
46a26bf5 554 kfree(kvm->memslots);
6aa8b732
AK
555}
556
f17abe9a
AK
557static void kvm_destroy_vm(struct kvm *kvm)
558{
e93f8a0f 559 int i;
6d4e4c4f
AK
560 struct mm_struct *mm = kvm->mm;
561
ad8ba2cd 562 kvm_arch_sync_events(kvm);
e935b837 563 raw_spin_lock(&kvm_lock);
133de902 564 list_del(&kvm->vm_list);
e935b837 565 raw_spin_unlock(&kvm_lock);
399ec807 566 kvm_free_irq_routing(kvm);
e93f8a0f
MT
567 for (i = 0; i < KVM_NR_BUSES; i++)
568 kvm_io_bus_destroy(kvm->buses[i]);
980da6ce 569 kvm_coalesced_mmio_free(kvm);
e930bffe
AA
570#if defined(CONFIG_MMU_NOTIFIER) && defined(KVM_ARCH_WANT_MMU_NOTIFIER)
571 mmu_notifier_unregister(&kvm->mmu_notifier, kvm->mm);
f00be0ca
GN
572#else
573 kvm_arch_flush_shadow(kvm);
5f94c174 574#endif
d19a9cd2 575 kvm_arch_destroy_vm(kvm);
d89f5eff
JK
576 kvm_free_physmem(kvm);
577 cleanup_srcu_struct(&kvm->srcu);
578 kvm_arch_free_vm(kvm);
10474ae8 579 hardware_disable_all();
6d4e4c4f 580 mmdrop(mm);
f17abe9a
AK
581}
582
d39f13b0
IE
583void kvm_get_kvm(struct kvm *kvm)
584{
585 atomic_inc(&kvm->users_count);
586}
587EXPORT_SYMBOL_GPL(kvm_get_kvm);
588
589void kvm_put_kvm(struct kvm *kvm)
590{
591 if (atomic_dec_and_test(&kvm->users_count))
592 kvm_destroy_vm(kvm);
593}
594EXPORT_SYMBOL_GPL(kvm_put_kvm);
595
596
f17abe9a
AK
597static int kvm_vm_release(struct inode *inode, struct file *filp)
598{
599 struct kvm *kvm = filp->private_data;
600
721eecbf
GH
601 kvm_irqfd_release(kvm);
602
d39f13b0 603 kvm_put_kvm(kvm);
6aa8b732
AK
604 return 0;
605}
606
d48ead8b 607#ifndef CONFIG_S390
515a0127
TY
608/*
609 * Allocation size is twice as large as the actual dirty bitmap size.
610 * This makes it possible to do double buffering: see x86's
611 * kvm_vm_ioctl_get_dirty_log().
612 */
a36a57b1
TY
613static int kvm_create_dirty_bitmap(struct kvm_memory_slot *memslot)
614{
515a0127 615 unsigned long dirty_bytes = 2 * kvm_dirty_bitmap_bytes(memslot);
a36a57b1 616
6f9e5c17
TY
617 if (dirty_bytes > PAGE_SIZE)
618 memslot->dirty_bitmap = vzalloc(dirty_bytes);
619 else
620 memslot->dirty_bitmap = kzalloc(dirty_bytes, GFP_KERNEL);
621
a36a57b1
TY
622 if (!memslot->dirty_bitmap)
623 return -ENOMEM;
624
515a0127 625 memslot->dirty_bitmap_head = memslot->dirty_bitmap;
a36a57b1
TY
626 return 0;
627}
d48ead8b 628#endif /* !CONFIG_S390 */
a36a57b1 629
6aa8b732
AK
630/*
631 * Allocate some memory and give it an address in the guest physical address
632 * space.
633 *
634 * Discontiguous memory is allowed, mostly for framebuffers.
f78e0e2e 635 *
10589a46 636 * Must be called holding mmap_sem for write.
6aa8b732 637 */
f78e0e2e
SY
638int __kvm_set_memory_region(struct kvm *kvm,
639 struct kvm_userspace_memory_region *mem,
640 int user_alloc)
6aa8b732 641{
8234b22e 642 int r;
6aa8b732 643 gfn_t base_gfn;
28bcb112
HC
644 unsigned long npages;
645 unsigned long i;
6aa8b732
AK
646 struct kvm_memory_slot *memslot;
647 struct kvm_memory_slot old, new;
bc6678a3 648 struct kvm_memslots *slots, *old_memslots;
6aa8b732
AK
649
650 r = -EINVAL;
651 /* General sanity checks */
652 if (mem->memory_size & (PAGE_SIZE - 1))
653 goto out;
654 if (mem->guest_phys_addr & (PAGE_SIZE - 1))
655 goto out;
fa3d315a
TY
656 /* We can read the guest memory with __xxx_user() later on. */
657 if (user_alloc &&
658 ((mem->userspace_addr & (PAGE_SIZE - 1)) ||
9e3bb6b6
HC
659 !access_ok(VERIFY_WRITE,
660 (void __user *)(unsigned long)mem->userspace_addr,
661 mem->memory_size)))
78749809 662 goto out;
e0d62c7f 663 if (mem->slot >= KVM_MEMORY_SLOTS + KVM_PRIVATE_MEM_SLOTS)
6aa8b732
AK
664 goto out;
665 if (mem->guest_phys_addr + mem->memory_size < mem->guest_phys_addr)
666 goto out;
667
46a26bf5 668 memslot = &kvm->memslots->memslots[mem->slot];
6aa8b732
AK
669 base_gfn = mem->guest_phys_addr >> PAGE_SHIFT;
670 npages = mem->memory_size >> PAGE_SHIFT;
671
660c22c4
TY
672 r = -EINVAL;
673 if (npages > KVM_MEM_MAX_NR_PAGES)
674 goto out;
675
6aa8b732
AK
676 if (!npages)
677 mem->flags &= ~KVM_MEM_LOG_DIRTY_PAGES;
678
6aa8b732
AK
679 new = old = *memslot;
680
e36d96f7 681 new.id = mem->slot;
6aa8b732
AK
682 new.base_gfn = base_gfn;
683 new.npages = npages;
684 new.flags = mem->flags;
685
686 /* Disallow changing a memory slot's size. */
687 r = -EINVAL;
688 if (npages && old.npages && npages != old.npages)
f78e0e2e 689 goto out_free;
6aa8b732
AK
690
691 /* Check for overlaps */
692 r = -EEXIST;
693 for (i = 0; i < KVM_MEMORY_SLOTS; ++i) {
46a26bf5 694 struct kvm_memory_slot *s = &kvm->memslots->memslots[i];
6aa8b732 695
4cd481f6 696 if (s == memslot || !s->npages)
6aa8b732
AK
697 continue;
698 if (!((base_gfn + npages <= s->base_gfn) ||
699 (base_gfn >= s->base_gfn + s->npages)))
f78e0e2e 700 goto out_free;
6aa8b732 701 }
6aa8b732 702
6aa8b732
AK
703 /* Free page dirty bitmap if unneeded */
704 if (!(new.flags & KVM_MEM_LOG_DIRTY_PAGES))
8b6d44c7 705 new.dirty_bitmap = NULL;
6aa8b732
AK
706
707 r = -ENOMEM;
708
709 /* Allocate if a slot is being created */
eff0114a 710#ifndef CONFIG_S390
8d4e1288 711 if (npages && !new.rmap) {
26535037 712 new.rmap = vzalloc(npages * sizeof(*new.rmap));
290fc38d
IE
713
714 if (!new.rmap)
f78e0e2e 715 goto out_free;
290fc38d 716
80b14b5b 717 new.user_alloc = user_alloc;
bc6678a3 718 new.userspace_addr = mem->userspace_addr;
6aa8b732 719 }
ec04b260
JR
720 if (!npages)
721 goto skip_lpage;
05da4558 722
ec04b260 723 for (i = 0; i < KVM_NR_PAGE_SIZES - 1; ++i) {
28bcb112
HC
724 unsigned long ugfn;
725 unsigned long j;
726 int lpages;
ec04b260 727 int level = i + 2;
05da4558 728
ec04b260
JR
729 /* Avoid unused variable warning if no large pages */
730 (void)level;
731
732 if (new.lpage_info[i])
733 continue;
734
82855413
JR
735 lpages = 1 + ((base_gfn + npages - 1)
736 >> KVM_HPAGE_GFN_SHIFT(level));
737 lpages -= base_gfn >> KVM_HPAGE_GFN_SHIFT(level);
ec04b260 738
26535037 739 new.lpage_info[i] = vzalloc(lpages * sizeof(*new.lpage_info[i]));
ec04b260
JR
740
741 if (!new.lpage_info[i])
05da4558
MT
742 goto out_free;
743
82855413 744 if (base_gfn & (KVM_PAGES_PER_HPAGE(level) - 1))
ec04b260 745 new.lpage_info[i][0].write_count = 1;
82855413 746 if ((base_gfn+npages) & (KVM_PAGES_PER_HPAGE(level) - 1))
ec04b260 747 new.lpage_info[i][lpages - 1].write_count = 1;
ac04527f
AK
748 ugfn = new.userspace_addr >> PAGE_SHIFT;
749 /*
750 * If the gfn and userspace address are not aligned wrt each
54dee993
MT
751 * other, or if explicitly asked to, disable large page
752 * support for this slot
ac04527f 753 */
ec04b260 754 if ((base_gfn ^ ugfn) & (KVM_PAGES_PER_HPAGE(level) - 1) ||
54dee993 755 !largepages_enabled)
ec04b260
JR
756 for (j = 0; j < lpages; ++j)
757 new.lpage_info[i][j].write_count = 1;
05da4558 758 }
6aa8b732 759
ec04b260
JR
760skip_lpage:
761
6aa8b732
AK
762 /* Allocate page dirty bitmap if needed */
763 if ((new.flags & KVM_MEM_LOG_DIRTY_PAGES) && !new.dirty_bitmap) {
a36a57b1 764 if (kvm_create_dirty_bitmap(&new) < 0)
f78e0e2e 765 goto out_free;
bc6678a3 766 /* destroy any largepage mappings for dirty tracking */
6aa8b732 767 }
3eea8437
CB
768#else /* not defined CONFIG_S390 */
769 new.user_alloc = user_alloc;
770 if (user_alloc)
771 new.userspace_addr = mem->userspace_addr;
eff0114a 772#endif /* not defined CONFIG_S390 */
6aa8b732 773
bc6678a3
MT
774 if (!npages) {
775 r = -ENOMEM;
776 slots = kzalloc(sizeof(struct kvm_memslots), GFP_KERNEL);
777 if (!slots)
778 goto out_free;
779 memcpy(slots, kvm->memslots, sizeof(struct kvm_memslots));
780 if (mem->slot >= slots->nmemslots)
781 slots->nmemslots = mem->slot + 1;
49c7754c 782 slots->generation++;
bc6678a3
MT
783 slots->memslots[mem->slot].flags |= KVM_MEMSLOT_INVALID;
784
785 old_memslots = kvm->memslots;
786 rcu_assign_pointer(kvm->memslots, slots);
787 synchronize_srcu_expedited(&kvm->srcu);
788 /* From this point no new shadow pages pointing to a deleted
789 * memslot will be created.
790 *
791 * validation of sp->gfn happens in:
792 * - gfn_to_hva (kvm_read_guest, gfn_to_pfn)
793 * - kvm_is_visible_gfn (mmu_check_roots)
794 */
34d4cb8f 795 kvm_arch_flush_shadow(kvm);
bc6678a3
MT
796 kfree(old_memslots);
797 }
34d4cb8f 798
f7784b8e
MT
799 r = kvm_arch_prepare_memory_region(kvm, &new, old, mem, user_alloc);
800 if (r)
801 goto out_free;
802
bc6678a3
MT
803 /* map the pages in iommu page table */
804 if (npages) {
805 r = kvm_iommu_map_pages(kvm, &new);
806 if (r)
807 goto out_free;
808 }
604b38ac 809
bc6678a3
MT
810 r = -ENOMEM;
811 slots = kzalloc(sizeof(struct kvm_memslots), GFP_KERNEL);
812 if (!slots)
813 goto out_free;
814 memcpy(slots, kvm->memslots, sizeof(struct kvm_memslots));
815 if (mem->slot >= slots->nmemslots)
816 slots->nmemslots = mem->slot + 1;
49c7754c 817 slots->generation++;
bc6678a3
MT
818
819 /* actual memory is freed via old in kvm_free_physmem_slot below */
820 if (!npages) {
821 new.rmap = NULL;
822 new.dirty_bitmap = NULL;
823 for (i = 0; i < KVM_NR_PAGE_SIZES - 1; ++i)
824 new.lpage_info[i] = NULL;
825 }
826
827 slots->memslots[mem->slot] = new;
828 old_memslots = kvm->memslots;
829 rcu_assign_pointer(kvm->memslots, slots);
830 synchronize_srcu_expedited(&kvm->srcu);
3ad82a7e 831
f7784b8e 832 kvm_arch_commit_memory_region(kvm, mem, old, user_alloc);
82ce2c96 833
bc6678a3
MT
834 kvm_free_physmem_slot(&old, &new);
835 kfree(old_memslots);
836
6aa8b732
AK
837 return 0;
838
f78e0e2e 839out_free:
6aa8b732
AK
840 kvm_free_physmem_slot(&new, &old);
841out:
842 return r;
210c7c4d
IE
843
844}
f78e0e2e
SY
845EXPORT_SYMBOL_GPL(__kvm_set_memory_region);
846
847int kvm_set_memory_region(struct kvm *kvm,
848 struct kvm_userspace_memory_region *mem,
849 int user_alloc)
850{
851 int r;
852
79fac95e 853 mutex_lock(&kvm->slots_lock);
f78e0e2e 854 r = __kvm_set_memory_region(kvm, mem, user_alloc);
79fac95e 855 mutex_unlock(&kvm->slots_lock);
f78e0e2e
SY
856 return r;
857}
210c7c4d
IE
858EXPORT_SYMBOL_GPL(kvm_set_memory_region);
859
1fe779f8
CO
860int kvm_vm_ioctl_set_memory_region(struct kvm *kvm,
861 struct
862 kvm_userspace_memory_region *mem,
863 int user_alloc)
210c7c4d 864{
e0d62c7f
IE
865 if (mem->slot >= KVM_MEMORY_SLOTS)
866 return -EINVAL;
210c7c4d 867 return kvm_set_memory_region(kvm, mem, user_alloc);
6aa8b732
AK
868}
869
5bb064dc
ZX
870int kvm_get_dirty_log(struct kvm *kvm,
871 struct kvm_dirty_log *log, int *is_dirty)
6aa8b732
AK
872{
873 struct kvm_memory_slot *memslot;
874 int r, i;
87bf6e7d 875 unsigned long n;
6aa8b732
AK
876 unsigned long any = 0;
877
6aa8b732
AK
878 r = -EINVAL;
879 if (log->slot >= KVM_MEMORY_SLOTS)
880 goto out;
881
46a26bf5 882 memslot = &kvm->memslots->memslots[log->slot];
6aa8b732
AK
883 r = -ENOENT;
884 if (!memslot->dirty_bitmap)
885 goto out;
886
87bf6e7d 887 n = kvm_dirty_bitmap_bytes(memslot);
6aa8b732 888
cd1a4a98 889 for (i = 0; !any && i < n/sizeof(long); ++i)
6aa8b732
AK
890 any = memslot->dirty_bitmap[i];
891
892 r = -EFAULT;
893 if (copy_to_user(log->dirty_bitmap, memslot->dirty_bitmap, n))
894 goto out;
895
5bb064dc
ZX
896 if (any)
897 *is_dirty = 1;
6aa8b732
AK
898
899 r = 0;
6aa8b732 900out:
6aa8b732
AK
901 return r;
902}
903
54dee993
MT
904void kvm_disable_largepages(void)
905{
906 largepages_enabled = false;
907}
908EXPORT_SYMBOL_GPL(kvm_disable_largepages);
909
cea7bb21
IE
910int is_error_page(struct page *page)
911{
edba23e5 912 return page == bad_page || page == hwpoison_page || page == fault_page;
cea7bb21
IE
913}
914EXPORT_SYMBOL_GPL(is_error_page);
915
35149e21
AL
916int is_error_pfn(pfn_t pfn)
917{
edba23e5 918 return pfn == bad_pfn || pfn == hwpoison_pfn || pfn == fault_pfn;
35149e21
AL
919}
920EXPORT_SYMBOL_GPL(is_error_pfn);
921
bf998156
HY
922int is_hwpoison_pfn(pfn_t pfn)
923{
924 return pfn == hwpoison_pfn;
925}
926EXPORT_SYMBOL_GPL(is_hwpoison_pfn);
927
edba23e5
GN
928int is_fault_pfn(pfn_t pfn)
929{
930 return pfn == fault_pfn;
931}
932EXPORT_SYMBOL_GPL(is_fault_pfn);
933
fce92dce
XG
934int is_noslot_pfn(pfn_t pfn)
935{
936 return pfn == bad_pfn;
937}
938EXPORT_SYMBOL_GPL(is_noslot_pfn);
939
940int is_invalid_pfn(pfn_t pfn)
941{
942 return pfn == hwpoison_pfn || pfn == fault_pfn;
943}
944EXPORT_SYMBOL_GPL(is_invalid_pfn);
945
f9d46eb0
IE
946static inline unsigned long bad_hva(void)
947{
948 return PAGE_OFFSET;
949}
950
951int kvm_is_error_hva(unsigned long addr)
952{
953 return addr == bad_hva();
954}
955EXPORT_SYMBOL_GPL(kvm_is_error_hva);
956
49c7754c
GN
957static struct kvm_memory_slot *__gfn_to_memslot(struct kvm_memslots *slots,
958 gfn_t gfn)
6aa8b732
AK
959{
960 int i;
961
46a26bf5
MT
962 for (i = 0; i < slots->nmemslots; ++i) {
963 struct kvm_memory_slot *memslot = &slots->memslots[i];
6aa8b732
AK
964
965 if (gfn >= memslot->base_gfn
966 && gfn < memslot->base_gfn + memslot->npages)
967 return memslot;
968 }
8b6d44c7 969 return NULL;
6aa8b732 970}
49c7754c
GN
971
972struct kvm_memory_slot *gfn_to_memslot(struct kvm *kvm, gfn_t gfn)
973{
974 return __gfn_to_memslot(kvm_memslots(kvm), gfn);
975}
a1f4d395 976EXPORT_SYMBOL_GPL(gfn_to_memslot);
6aa8b732 977
e0d62c7f
IE
978int kvm_is_visible_gfn(struct kvm *kvm, gfn_t gfn)
979{
980 int i;
90d83dc3 981 struct kvm_memslots *slots = kvm_memslots(kvm);
e0d62c7f 982
e0d62c7f 983 for (i = 0; i < KVM_MEMORY_SLOTS; ++i) {
46a26bf5 984 struct kvm_memory_slot *memslot = &slots->memslots[i];
e0d62c7f 985
bc6678a3
MT
986 if (memslot->flags & KVM_MEMSLOT_INVALID)
987 continue;
988
e0d62c7f
IE
989 if (gfn >= memslot->base_gfn
990 && gfn < memslot->base_gfn + memslot->npages)
991 return 1;
992 }
993 return 0;
994}
995EXPORT_SYMBOL_GPL(kvm_is_visible_gfn);
996
8f0b1ab6
JR
997unsigned long kvm_host_page_size(struct kvm *kvm, gfn_t gfn)
998{
999 struct vm_area_struct *vma;
1000 unsigned long addr, size;
1001
1002 size = PAGE_SIZE;
1003
1004 addr = gfn_to_hva(kvm, gfn);
1005 if (kvm_is_error_hva(addr))
1006 return PAGE_SIZE;
1007
1008 down_read(&current->mm->mmap_sem);
1009 vma = find_vma(current->mm, addr);
1010 if (!vma)
1011 goto out;
1012
1013 size = vma_kernel_pagesize(vma);
1014
1015out:
1016 up_read(&current->mm->mmap_sem);
1017
1018 return size;
1019}
1020
49c7754c 1021static unsigned long gfn_to_hva_many(struct kvm_memory_slot *slot, gfn_t gfn,
48987781 1022 gfn_t *nr_pages)
539cb660 1023{
bc6678a3 1024 if (!slot || slot->flags & KVM_MEMSLOT_INVALID)
539cb660 1025 return bad_hva();
48987781
XG
1026
1027 if (nr_pages)
1028 *nr_pages = slot->npages - (gfn - slot->base_gfn);
1029
f5c98031 1030 return gfn_to_hva_memslot(slot, gfn);
539cb660 1031}
48987781
XG
1032
1033unsigned long gfn_to_hva(struct kvm *kvm, gfn_t gfn)
1034{
49c7754c 1035 return gfn_to_hva_many(gfn_to_memslot(kvm, gfn), gfn, NULL);
48987781 1036}
0d150298 1037EXPORT_SYMBOL_GPL(gfn_to_hva);
539cb660 1038
8030089f
GN
1039static pfn_t get_fault_pfn(void)
1040{
1041 get_page(fault_page);
1042 return fault_pfn;
1043}
1044
0857b9e9
GN
1045int get_user_page_nowait(struct task_struct *tsk, struct mm_struct *mm,
1046 unsigned long start, int write, struct page **page)
1047{
1048 int flags = FOLL_TOUCH | FOLL_NOWAIT | FOLL_HWPOISON | FOLL_GET;
1049
1050 if (write)
1051 flags |= FOLL_WRITE;
1052
1053 return __get_user_pages(tsk, mm, start, 1, flags, page, NULL, NULL);
1054}
1055
fafc3dba
HY
1056static inline int check_user_page_hwpoison(unsigned long addr)
1057{
1058 int rc, flags = FOLL_TOUCH | FOLL_HWPOISON | FOLL_WRITE;
1059
1060 rc = __get_user_pages(current, current->mm, addr, 1,
1061 flags, NULL, NULL, NULL);
1062 return rc == -EHWPOISON;
1063}
1064
af585b92 1065static pfn_t hva_to_pfn(struct kvm *kvm, unsigned long addr, bool atomic,
612819c3 1066 bool *async, bool write_fault, bool *writable)
954bbbc2 1067{
8d4e1288 1068 struct page *page[1];
af585b92 1069 int npages = 0;
2e2e3738 1070 pfn_t pfn;
954bbbc2 1071
af585b92
GN
1072 /* we can do it either atomically or asynchronously, not both */
1073 BUG_ON(atomic && async);
1074
612819c3
MT
1075 BUG_ON(!write_fault && !writable);
1076
1077 if (writable)
1078 *writable = true;
1079
af585b92 1080 if (atomic || async)
887c08ac 1081 npages = __get_user_pages_fast(addr, 1, 1, page);
af585b92
GN
1082
1083 if (unlikely(npages != 1) && !atomic) {
887c08ac 1084 might_sleep();
612819c3
MT
1085
1086 if (writable)
1087 *writable = write_fault;
1088
0857b9e9
GN
1089 if (async) {
1090 down_read(&current->mm->mmap_sem);
1091 npages = get_user_page_nowait(current, current->mm,
1092 addr, write_fault, page);
1093 up_read(&current->mm->mmap_sem);
1094 } else
1095 npages = get_user_pages_fast(addr, 1, write_fault,
1096 page);
612819c3
MT
1097
1098 /* map read fault as writable if possible */
1099 if (unlikely(!write_fault) && npages == 1) {
1100 struct page *wpage[1];
1101
1102 npages = __get_user_pages_fast(addr, 1, 1, wpage);
1103 if (npages == 1) {
1104 *writable = true;
1105 put_page(page[0]);
1106 page[0] = wpage[0];
1107 }
1108 npages = 1;
1109 }
887c08ac 1110 }
539cb660 1111
2e2e3738
AL
1112 if (unlikely(npages != 1)) {
1113 struct vm_area_struct *vma;
1114
887c08ac 1115 if (atomic)
8030089f 1116 return get_fault_pfn();
887c08ac 1117
bbeb3406 1118 down_read(&current->mm->mmap_sem);
0857b9e9
GN
1119 if (npages == -EHWPOISON ||
1120 (!async && check_user_page_hwpoison(addr))) {
bbeb3406 1121 up_read(&current->mm->mmap_sem);
bf998156
HY
1122 get_page(hwpoison_page);
1123 return page_to_pfn(hwpoison_page);
1124 }
1125
8030089f 1126 vma = find_vma_intersection(current->mm, addr, addr+1);
4c2155ce 1127
8030089f
GN
1128 if (vma == NULL)
1129 pfn = get_fault_pfn();
1130 else if ((vma->vm_flags & VM_PFNMAP)) {
1131 pfn = ((addr - vma->vm_start) >> PAGE_SHIFT) +
1132 vma->vm_pgoff;
1133 BUG_ON(!kvm_is_mmio_pfn(pfn));
1134 } else {
1135 if (async && (vma->vm_flags & VM_WRITE))
af585b92 1136 *async = true;
8030089f 1137 pfn = get_fault_pfn();
2e2e3738 1138 }
4c2155ce 1139 up_read(&current->mm->mmap_sem);
2e2e3738
AL
1140 } else
1141 pfn = page_to_pfn(page[0]);
8d4e1288 1142
2e2e3738 1143 return pfn;
35149e21
AL
1144}
1145
887c08ac
XG
1146pfn_t hva_to_pfn_atomic(struct kvm *kvm, unsigned long addr)
1147{
612819c3 1148 return hva_to_pfn(kvm, addr, true, NULL, true, NULL);
887c08ac
XG
1149}
1150EXPORT_SYMBOL_GPL(hva_to_pfn_atomic);
1151
612819c3
MT
1152static pfn_t __gfn_to_pfn(struct kvm *kvm, gfn_t gfn, bool atomic, bool *async,
1153 bool write_fault, bool *writable)
506f0d6f
MT
1154{
1155 unsigned long addr;
1156
af585b92
GN
1157 if (async)
1158 *async = false;
1159
506f0d6f
MT
1160 addr = gfn_to_hva(kvm, gfn);
1161 if (kvm_is_error_hva(addr)) {
1162 get_page(bad_page);
1163 return page_to_pfn(bad_page);
1164 }
1165
612819c3 1166 return hva_to_pfn(kvm, addr, atomic, async, write_fault, writable);
365fb3fd
XG
1167}
1168
1169pfn_t gfn_to_pfn_atomic(struct kvm *kvm, gfn_t gfn)
1170{
612819c3 1171 return __gfn_to_pfn(kvm, gfn, true, NULL, true, NULL);
365fb3fd
XG
1172}
1173EXPORT_SYMBOL_GPL(gfn_to_pfn_atomic);
1174
612819c3
MT
1175pfn_t gfn_to_pfn_async(struct kvm *kvm, gfn_t gfn, bool *async,
1176 bool write_fault, bool *writable)
af585b92 1177{
612819c3 1178 return __gfn_to_pfn(kvm, gfn, false, async, write_fault, writable);
af585b92
GN
1179}
1180EXPORT_SYMBOL_GPL(gfn_to_pfn_async);
1181
365fb3fd
XG
1182pfn_t gfn_to_pfn(struct kvm *kvm, gfn_t gfn)
1183{
612819c3 1184 return __gfn_to_pfn(kvm, gfn, false, NULL, true, NULL);
506f0d6f 1185}
35149e21
AL
1186EXPORT_SYMBOL_GPL(gfn_to_pfn);
1187
612819c3
MT
1188pfn_t gfn_to_pfn_prot(struct kvm *kvm, gfn_t gfn, bool write_fault,
1189 bool *writable)
1190{
1191 return __gfn_to_pfn(kvm, gfn, false, NULL, write_fault, writable);
1192}
1193EXPORT_SYMBOL_GPL(gfn_to_pfn_prot);
1194
506f0d6f
MT
1195pfn_t gfn_to_pfn_memslot(struct kvm *kvm,
1196 struct kvm_memory_slot *slot, gfn_t gfn)
1197{
1198 unsigned long addr = gfn_to_hva_memslot(slot, gfn);
612819c3 1199 return hva_to_pfn(kvm, addr, false, NULL, true, NULL);
506f0d6f
MT
1200}
1201
48987781
XG
1202int gfn_to_page_many_atomic(struct kvm *kvm, gfn_t gfn, struct page **pages,
1203 int nr_pages)
1204{
1205 unsigned long addr;
1206 gfn_t entry;
1207
49c7754c 1208 addr = gfn_to_hva_many(gfn_to_memslot(kvm, gfn), gfn, &entry);
48987781
XG
1209 if (kvm_is_error_hva(addr))
1210 return -1;
1211
1212 if (entry < nr_pages)
1213 return 0;
1214
1215 return __get_user_pages_fast(addr, nr_pages, 1, pages);
1216}
1217EXPORT_SYMBOL_GPL(gfn_to_page_many_atomic);
1218
35149e21
AL
1219struct page *gfn_to_page(struct kvm *kvm, gfn_t gfn)
1220{
2e2e3738
AL
1221 pfn_t pfn;
1222
1223 pfn = gfn_to_pfn(kvm, gfn);
c77fb9dc 1224 if (!kvm_is_mmio_pfn(pfn))
2e2e3738
AL
1225 return pfn_to_page(pfn);
1226
c77fb9dc 1227 WARN_ON(kvm_is_mmio_pfn(pfn));
2e2e3738
AL
1228
1229 get_page(bad_page);
1230 return bad_page;
954bbbc2 1231}
aab61cc0 1232
954bbbc2
AK
1233EXPORT_SYMBOL_GPL(gfn_to_page);
1234
b4231d61
IE
1235void kvm_release_page_clean(struct page *page)
1236{
35149e21 1237 kvm_release_pfn_clean(page_to_pfn(page));
b4231d61
IE
1238}
1239EXPORT_SYMBOL_GPL(kvm_release_page_clean);
1240
35149e21
AL
1241void kvm_release_pfn_clean(pfn_t pfn)
1242{
c77fb9dc 1243 if (!kvm_is_mmio_pfn(pfn))
2e2e3738 1244 put_page(pfn_to_page(pfn));
35149e21
AL
1245}
1246EXPORT_SYMBOL_GPL(kvm_release_pfn_clean);
1247
b4231d61 1248void kvm_release_page_dirty(struct page *page)
8a7ae055 1249{
35149e21
AL
1250 kvm_release_pfn_dirty(page_to_pfn(page));
1251}
1252EXPORT_SYMBOL_GPL(kvm_release_page_dirty);
1253
1254void kvm_release_pfn_dirty(pfn_t pfn)
1255{
1256 kvm_set_pfn_dirty(pfn);
1257 kvm_release_pfn_clean(pfn);
1258}
1259EXPORT_SYMBOL_GPL(kvm_release_pfn_dirty);
1260
1261void kvm_set_page_dirty(struct page *page)
1262{
1263 kvm_set_pfn_dirty(page_to_pfn(page));
1264}
1265EXPORT_SYMBOL_GPL(kvm_set_page_dirty);
1266
1267void kvm_set_pfn_dirty(pfn_t pfn)
1268{
c77fb9dc 1269 if (!kvm_is_mmio_pfn(pfn)) {
2e2e3738
AL
1270 struct page *page = pfn_to_page(pfn);
1271 if (!PageReserved(page))
1272 SetPageDirty(page);
1273 }
8a7ae055 1274}
35149e21
AL
1275EXPORT_SYMBOL_GPL(kvm_set_pfn_dirty);
1276
1277void kvm_set_pfn_accessed(pfn_t pfn)
1278{
c77fb9dc 1279 if (!kvm_is_mmio_pfn(pfn))
2e2e3738 1280 mark_page_accessed(pfn_to_page(pfn));
35149e21
AL
1281}
1282EXPORT_SYMBOL_GPL(kvm_set_pfn_accessed);
1283
1284void kvm_get_pfn(pfn_t pfn)
1285{
c77fb9dc 1286 if (!kvm_is_mmio_pfn(pfn))
2e2e3738 1287 get_page(pfn_to_page(pfn));
35149e21
AL
1288}
1289EXPORT_SYMBOL_GPL(kvm_get_pfn);
8a7ae055 1290
195aefde
IE
1291static int next_segment(unsigned long len, int offset)
1292{
1293 if (len > PAGE_SIZE - offset)
1294 return PAGE_SIZE - offset;
1295 else
1296 return len;
1297}
1298
1299int kvm_read_guest_page(struct kvm *kvm, gfn_t gfn, void *data, int offset,
1300 int len)
1301{
e0506bcb
IE
1302 int r;
1303 unsigned long addr;
195aefde 1304
e0506bcb
IE
1305 addr = gfn_to_hva(kvm, gfn);
1306 if (kvm_is_error_hva(addr))
1307 return -EFAULT;
fa3d315a 1308 r = __copy_from_user(data, (void __user *)addr + offset, len);
e0506bcb 1309 if (r)
195aefde 1310 return -EFAULT;
195aefde
IE
1311 return 0;
1312}
1313EXPORT_SYMBOL_GPL(kvm_read_guest_page);
1314
1315int kvm_read_guest(struct kvm *kvm, gpa_t gpa, void *data, unsigned long len)
1316{
1317 gfn_t gfn = gpa >> PAGE_SHIFT;
1318 int seg;
1319 int offset = offset_in_page(gpa);
1320 int ret;
1321
1322 while ((seg = next_segment(len, offset)) != 0) {
1323 ret = kvm_read_guest_page(kvm, gfn, data, offset, seg);
1324 if (ret < 0)
1325 return ret;
1326 offset = 0;
1327 len -= seg;
1328 data += seg;
1329 ++gfn;
1330 }
1331 return 0;
1332}
1333EXPORT_SYMBOL_GPL(kvm_read_guest);
1334
7ec54588
MT
1335int kvm_read_guest_atomic(struct kvm *kvm, gpa_t gpa, void *data,
1336 unsigned long len)
1337{
1338 int r;
1339 unsigned long addr;
1340 gfn_t gfn = gpa >> PAGE_SHIFT;
1341 int offset = offset_in_page(gpa);
1342
1343 addr = gfn_to_hva(kvm, gfn);
1344 if (kvm_is_error_hva(addr))
1345 return -EFAULT;
0aac03f0 1346 pagefault_disable();
7ec54588 1347 r = __copy_from_user_inatomic(data, (void __user *)addr + offset, len);
0aac03f0 1348 pagefault_enable();
7ec54588
MT
1349 if (r)
1350 return -EFAULT;
1351 return 0;
1352}
1353EXPORT_SYMBOL(kvm_read_guest_atomic);
1354
195aefde
IE
1355int kvm_write_guest_page(struct kvm *kvm, gfn_t gfn, const void *data,
1356 int offset, int len)
1357{
e0506bcb
IE
1358 int r;
1359 unsigned long addr;
195aefde 1360
e0506bcb
IE
1361 addr = gfn_to_hva(kvm, gfn);
1362 if (kvm_is_error_hva(addr))
1363 return -EFAULT;
8b0cedff 1364 r = __copy_to_user((void __user *)addr + offset, data, len);
e0506bcb 1365 if (r)
195aefde 1366 return -EFAULT;
195aefde
IE
1367 mark_page_dirty(kvm, gfn);
1368 return 0;
1369}
1370EXPORT_SYMBOL_GPL(kvm_write_guest_page);
1371
1372int kvm_write_guest(struct kvm *kvm, gpa_t gpa, const void *data,
1373 unsigned long len)
1374{
1375 gfn_t gfn = gpa >> PAGE_SHIFT;
1376 int seg;
1377 int offset = offset_in_page(gpa);
1378 int ret;
1379
1380 while ((seg = next_segment(len, offset)) != 0) {
1381 ret = kvm_write_guest_page(kvm, gfn, data, offset, seg);
1382 if (ret < 0)
1383 return ret;
1384 offset = 0;
1385 len -= seg;
1386 data += seg;
1387 ++gfn;
1388 }
1389 return 0;
1390}
1391
49c7754c
GN
1392int kvm_gfn_to_hva_cache_init(struct kvm *kvm, struct gfn_to_hva_cache *ghc,
1393 gpa_t gpa)
1394{
1395 struct kvm_memslots *slots = kvm_memslots(kvm);
1396 int offset = offset_in_page(gpa);
1397 gfn_t gfn = gpa >> PAGE_SHIFT;
1398
1399 ghc->gpa = gpa;
1400 ghc->generation = slots->generation;
1401 ghc->memslot = __gfn_to_memslot(slots, gfn);
1402 ghc->hva = gfn_to_hva_many(ghc->memslot, gfn, NULL);
1403 if (!kvm_is_error_hva(ghc->hva))
1404 ghc->hva += offset;
1405 else
1406 return -EFAULT;
1407
1408 return 0;
1409}
1410EXPORT_SYMBOL_GPL(kvm_gfn_to_hva_cache_init);
1411
1412int kvm_write_guest_cached(struct kvm *kvm, struct gfn_to_hva_cache *ghc,
1413 void *data, unsigned long len)
1414{
1415 struct kvm_memslots *slots = kvm_memslots(kvm);
1416 int r;
1417
1418 if (slots->generation != ghc->generation)
1419 kvm_gfn_to_hva_cache_init(kvm, ghc, ghc->gpa);
1420
1421 if (kvm_is_error_hva(ghc->hva))
1422 return -EFAULT;
1423
8b0cedff 1424 r = __copy_to_user((void __user *)ghc->hva, data, len);
49c7754c
GN
1425 if (r)
1426 return -EFAULT;
1427 mark_page_dirty_in_slot(kvm, ghc->memslot, ghc->gpa >> PAGE_SHIFT);
1428
1429 return 0;
1430}
1431EXPORT_SYMBOL_GPL(kvm_write_guest_cached);
1432
e03b644f
GN
1433int kvm_read_guest_cached(struct kvm *kvm, struct gfn_to_hva_cache *ghc,
1434 void *data, unsigned long len)
1435{
1436 struct kvm_memslots *slots = kvm_memslots(kvm);
1437 int r;
1438
1439 if (slots->generation != ghc->generation)
1440 kvm_gfn_to_hva_cache_init(kvm, ghc, ghc->gpa);
1441
1442 if (kvm_is_error_hva(ghc->hva))
1443 return -EFAULT;
1444
1445 r = __copy_from_user(data, (void __user *)ghc->hva, len);
1446 if (r)
1447 return -EFAULT;
1448
1449 return 0;
1450}
1451EXPORT_SYMBOL_GPL(kvm_read_guest_cached);
1452
195aefde
IE
1453int kvm_clear_guest_page(struct kvm *kvm, gfn_t gfn, int offset, int len)
1454{
3bcc8a8c
HC
1455 return kvm_write_guest_page(kvm, gfn, (const void *) empty_zero_page,
1456 offset, len);
195aefde
IE
1457}
1458EXPORT_SYMBOL_GPL(kvm_clear_guest_page);
1459
1460int kvm_clear_guest(struct kvm *kvm, gpa_t gpa, unsigned long len)
1461{
1462 gfn_t gfn = gpa >> PAGE_SHIFT;
1463 int seg;
1464 int offset = offset_in_page(gpa);
1465 int ret;
1466
1467 while ((seg = next_segment(len, offset)) != 0) {
1468 ret = kvm_clear_guest_page(kvm, gfn, offset, seg);
1469 if (ret < 0)
1470 return ret;
1471 offset = 0;
1472 len -= seg;
1473 ++gfn;
1474 }
1475 return 0;
1476}
1477EXPORT_SYMBOL_GPL(kvm_clear_guest);
1478
49c7754c
GN
1479void mark_page_dirty_in_slot(struct kvm *kvm, struct kvm_memory_slot *memslot,
1480 gfn_t gfn)
6aa8b732 1481{
7e9d619d
RR
1482 if (memslot && memslot->dirty_bitmap) {
1483 unsigned long rel_gfn = gfn - memslot->base_gfn;
6aa8b732 1484
cd7e48c5 1485 __set_bit_le(rel_gfn, memslot->dirty_bitmap);
6aa8b732
AK
1486 }
1487}
1488
49c7754c
GN
1489void mark_page_dirty(struct kvm *kvm, gfn_t gfn)
1490{
1491 struct kvm_memory_slot *memslot;
1492
1493 memslot = gfn_to_memslot(kvm, gfn);
1494 mark_page_dirty_in_slot(kvm, memslot, gfn);
1495}
1496
b6958ce4
ED
1497/*
1498 * The vCPU has executed a HLT instruction with in-kernel mode enabled.
1499 */
8776e519 1500void kvm_vcpu_block(struct kvm_vcpu *vcpu)
d3bef15f 1501{
e5c239cf
MT
1502 DEFINE_WAIT(wait);
1503
1504 for (;;) {
1505 prepare_to_wait(&vcpu->wq, &wait, TASK_INTERRUPTIBLE);
1506
a1b37100 1507 if (kvm_arch_vcpu_runnable(vcpu)) {
a8eeb04a 1508 kvm_make_request(KVM_REQ_UNHALT, vcpu);
e5c239cf 1509 break;
d7690175 1510 }
09cec754
GN
1511 if (kvm_cpu_has_pending_timer(vcpu))
1512 break;
e5c239cf
MT
1513 if (signal_pending(current))
1514 break;
1515
b6958ce4 1516 schedule();
b6958ce4 1517 }
d3bef15f 1518
e5c239cf 1519 finish_wait(&vcpu->wq, &wait);
b6958ce4
ED
1520}
1521
6aa8b732
AK
1522void kvm_resched(struct kvm_vcpu *vcpu)
1523{
3fca0365
YD
1524 if (!need_resched())
1525 return;
6aa8b732 1526 cond_resched();
6aa8b732
AK
1527}
1528EXPORT_SYMBOL_GPL(kvm_resched);
1529
217ece61 1530void kvm_vcpu_on_spin(struct kvm_vcpu *me)
d255f4f2 1531{
217ece61
RR
1532 struct kvm *kvm = me->kvm;
1533 struct kvm_vcpu *vcpu;
1534 int last_boosted_vcpu = me->kvm->last_boosted_vcpu;
1535 int yielded = 0;
1536 int pass;
1537 int i;
d255f4f2 1538
217ece61
RR
1539 /*
1540 * We boost the priority of a VCPU that is runnable but not
1541 * currently running, because it got preempted by something
1542 * else and called schedule in __vcpu_run. Hopefully that
1543 * VCPU is holding the lock that we need and will release it.
1544 * We approximate round-robin by starting at the last boosted VCPU.
1545 */
1546 for (pass = 0; pass < 2 && !yielded; pass++) {
1547 kvm_for_each_vcpu(i, vcpu, kvm) {
1548 struct task_struct *task = NULL;
1549 struct pid *pid;
1550 if (!pass && i < last_boosted_vcpu) {
1551 i = last_boosted_vcpu;
1552 continue;
1553 } else if (pass && i > last_boosted_vcpu)
1554 break;
1555 if (vcpu == me)
1556 continue;
1557 if (waitqueue_active(&vcpu->wq))
1558 continue;
1559 rcu_read_lock();
1560 pid = rcu_dereference(vcpu->pid);
1561 if (pid)
1562 task = get_pid_task(vcpu->pid, PIDTYPE_PID);
1563 rcu_read_unlock();
1564 if (!task)
1565 continue;
1566 if (task->flags & PF_VCPU) {
1567 put_task_struct(task);
1568 continue;
1569 }
1570 if (yield_to(task, 1)) {
1571 put_task_struct(task);
1572 kvm->last_boosted_vcpu = i;
1573 yielded = 1;
1574 break;
1575 }
1576 put_task_struct(task);
1577 }
1578 }
d255f4f2
ZE
1579}
1580EXPORT_SYMBOL_GPL(kvm_vcpu_on_spin);
1581
e4a533a4 1582static int kvm_vcpu_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
9a2bb7f4
AK
1583{
1584 struct kvm_vcpu *vcpu = vma->vm_file->private_data;
9a2bb7f4
AK
1585 struct page *page;
1586
e4a533a4 1587 if (vmf->pgoff == 0)
039576c0 1588 page = virt_to_page(vcpu->run);
09566765 1589#ifdef CONFIG_X86
e4a533a4 1590 else if (vmf->pgoff == KVM_PIO_PAGE_OFFSET)
ad312c7c 1591 page = virt_to_page(vcpu->arch.pio_data);
5f94c174
LV
1592#endif
1593#ifdef KVM_COALESCED_MMIO_PAGE_OFFSET
1594 else if (vmf->pgoff == KVM_COALESCED_MMIO_PAGE_OFFSET)
1595 page = virt_to_page(vcpu->kvm->coalesced_mmio_ring);
09566765 1596#endif
039576c0 1597 else
e4a533a4 1598 return VM_FAULT_SIGBUS;
9a2bb7f4 1599 get_page(page);
e4a533a4 1600 vmf->page = page;
1601 return 0;
9a2bb7f4
AK
1602}
1603
f0f37e2f 1604static const struct vm_operations_struct kvm_vcpu_vm_ops = {
e4a533a4 1605 .fault = kvm_vcpu_fault,
9a2bb7f4
AK
1606};
1607
1608static int kvm_vcpu_mmap(struct file *file, struct vm_area_struct *vma)
1609{
1610 vma->vm_ops = &kvm_vcpu_vm_ops;
1611 return 0;
1612}
1613
bccf2150
AK
1614static int kvm_vcpu_release(struct inode *inode, struct file *filp)
1615{
1616 struct kvm_vcpu *vcpu = filp->private_data;
1617
66c0b394 1618 kvm_put_kvm(vcpu->kvm);
bccf2150
AK
1619 return 0;
1620}
1621
3d3aab1b 1622static struct file_operations kvm_vcpu_fops = {
bccf2150
AK
1623 .release = kvm_vcpu_release,
1624 .unlocked_ioctl = kvm_vcpu_ioctl,
1dda606c
AG
1625#ifdef CONFIG_COMPAT
1626 .compat_ioctl = kvm_vcpu_compat_ioctl,
1627#endif
9a2bb7f4 1628 .mmap = kvm_vcpu_mmap,
6038f373 1629 .llseek = noop_llseek,
bccf2150
AK
1630};
1631
1632/*
1633 * Allocates an inode for the vcpu.
1634 */
1635static int create_vcpu_fd(struct kvm_vcpu *vcpu)
1636{
628ff7c1 1637 return anon_inode_getfd("kvm-vcpu", &kvm_vcpu_fops, vcpu, O_RDWR);
bccf2150
AK
1638}
1639
c5ea7660
AK
1640/*
1641 * Creates some virtual cpus. Good luck creating more than one.
1642 */
73880c80 1643static int kvm_vm_ioctl_create_vcpu(struct kvm *kvm, u32 id)
c5ea7660
AK
1644{
1645 int r;
988a2cae 1646 struct kvm_vcpu *vcpu, *v;
c5ea7660 1647
73880c80 1648 vcpu = kvm_arch_vcpu_create(kvm, id);
fb3f0f51
RR
1649 if (IS_ERR(vcpu))
1650 return PTR_ERR(vcpu);
c5ea7660 1651
15ad7146
AK
1652 preempt_notifier_init(&vcpu->preempt_notifier, &kvm_preempt_ops);
1653
26e5215f
AK
1654 r = kvm_arch_vcpu_setup(vcpu);
1655 if (r)
d780592b 1656 goto vcpu_destroy;
26e5215f 1657
11ec2804 1658 mutex_lock(&kvm->lock);
73880c80
GN
1659 if (atomic_read(&kvm->online_vcpus) == KVM_MAX_VCPUS) {
1660 r = -EINVAL;
d780592b 1661 goto unlock_vcpu_destroy;
fb3f0f51 1662 }
73880c80 1663
988a2cae
GN
1664 kvm_for_each_vcpu(r, v, kvm)
1665 if (v->vcpu_id == id) {
73880c80 1666 r = -EEXIST;
d780592b 1667 goto unlock_vcpu_destroy;
73880c80
GN
1668 }
1669
1670 BUG_ON(kvm->vcpus[atomic_read(&kvm->online_vcpus)]);
c5ea7660 1671
fb3f0f51 1672 /* Now it's all set up, let userspace reach it */
66c0b394 1673 kvm_get_kvm(kvm);
bccf2150 1674 r = create_vcpu_fd(vcpu);
73880c80
GN
1675 if (r < 0) {
1676 kvm_put_kvm(kvm);
d780592b 1677 goto unlock_vcpu_destroy;
73880c80
GN
1678 }
1679
1680 kvm->vcpus[atomic_read(&kvm->online_vcpus)] = vcpu;
1681 smp_wmb();
1682 atomic_inc(&kvm->online_vcpus);
1683
1684#ifdef CONFIG_KVM_APIC_ARCHITECTURE
1685 if (kvm->bsp_vcpu_id == id)
1686 kvm->bsp_vcpu = vcpu;
1687#endif
1688 mutex_unlock(&kvm->lock);
fb3f0f51 1689 return r;
39c3b86e 1690
d780592b 1691unlock_vcpu_destroy:
7d8fece6 1692 mutex_unlock(&kvm->lock);
d780592b 1693vcpu_destroy:
d40ccc62 1694 kvm_arch_vcpu_destroy(vcpu);
c5ea7660
AK
1695 return r;
1696}
1697
1961d276
AK
1698static int kvm_vcpu_ioctl_set_sigmask(struct kvm_vcpu *vcpu, sigset_t *sigset)
1699{
1700 if (sigset) {
1701 sigdelsetmask(sigset, sigmask(SIGKILL)|sigmask(SIGSTOP));
1702 vcpu->sigset_active = 1;
1703 vcpu->sigset = *sigset;
1704 } else
1705 vcpu->sigset_active = 0;
1706 return 0;
1707}
1708
bccf2150
AK
1709static long kvm_vcpu_ioctl(struct file *filp,
1710 unsigned int ioctl, unsigned long arg)
6aa8b732 1711{
bccf2150 1712 struct kvm_vcpu *vcpu = filp->private_data;
2f366987 1713 void __user *argp = (void __user *)arg;
313a3dc7 1714 int r;
fa3795a7
DH
1715 struct kvm_fpu *fpu = NULL;
1716 struct kvm_sregs *kvm_sregs = NULL;
6aa8b732 1717
6d4e4c4f
AK
1718 if (vcpu->kvm->mm != current->mm)
1719 return -EIO;
2122ff5e
AK
1720
1721#if defined(CONFIG_S390) || defined(CONFIG_PPC)
1722 /*
1723 * Special cases: vcpu ioctls that are asynchronous to vcpu execution,
1724 * so vcpu_load() would break it.
1725 */
1726 if (ioctl == KVM_S390_INTERRUPT || ioctl == KVM_INTERRUPT)
1727 return kvm_arch_vcpu_ioctl(filp, ioctl, arg);
1728#endif
1729
1730
1731 vcpu_load(vcpu);
6aa8b732 1732 switch (ioctl) {
9a2bb7f4 1733 case KVM_RUN:
f0fe5108
AK
1734 r = -EINVAL;
1735 if (arg)
1736 goto out;
b6c7a5dc 1737 r = kvm_arch_vcpu_ioctl_run(vcpu, vcpu->run);
64be5007 1738 trace_kvm_userspace_exit(vcpu->run->exit_reason, r);
6aa8b732 1739 break;
6aa8b732 1740 case KVM_GET_REGS: {
3e4bb3ac 1741 struct kvm_regs *kvm_regs;
6aa8b732 1742
3e4bb3ac
XZ
1743 r = -ENOMEM;
1744 kvm_regs = kzalloc(sizeof(struct kvm_regs), GFP_KERNEL);
1745 if (!kvm_regs)
6aa8b732 1746 goto out;
3e4bb3ac
XZ
1747 r = kvm_arch_vcpu_ioctl_get_regs(vcpu, kvm_regs);
1748 if (r)
1749 goto out_free1;
6aa8b732 1750 r = -EFAULT;
3e4bb3ac
XZ
1751 if (copy_to_user(argp, kvm_regs, sizeof(struct kvm_regs)))
1752 goto out_free1;
6aa8b732 1753 r = 0;
3e4bb3ac
XZ
1754out_free1:
1755 kfree(kvm_regs);
6aa8b732
AK
1756 break;
1757 }
1758 case KVM_SET_REGS: {
3e4bb3ac 1759 struct kvm_regs *kvm_regs;
6aa8b732 1760
3e4bb3ac
XZ
1761 r = -ENOMEM;
1762 kvm_regs = kzalloc(sizeof(struct kvm_regs), GFP_KERNEL);
1763 if (!kvm_regs)
6aa8b732 1764 goto out;
3e4bb3ac
XZ
1765 r = -EFAULT;
1766 if (copy_from_user(kvm_regs, argp, sizeof(struct kvm_regs)))
1767 goto out_free2;
1768 r = kvm_arch_vcpu_ioctl_set_regs(vcpu, kvm_regs);
6aa8b732 1769 if (r)
3e4bb3ac 1770 goto out_free2;
6aa8b732 1771 r = 0;
3e4bb3ac
XZ
1772out_free2:
1773 kfree(kvm_regs);
6aa8b732
AK
1774 break;
1775 }
1776 case KVM_GET_SREGS: {
fa3795a7
DH
1777 kvm_sregs = kzalloc(sizeof(struct kvm_sregs), GFP_KERNEL);
1778 r = -ENOMEM;
1779 if (!kvm_sregs)
1780 goto out;
1781 r = kvm_arch_vcpu_ioctl_get_sregs(vcpu, kvm_sregs);
6aa8b732
AK
1782 if (r)
1783 goto out;
1784 r = -EFAULT;
fa3795a7 1785 if (copy_to_user(argp, kvm_sregs, sizeof(struct kvm_sregs)))
6aa8b732
AK
1786 goto out;
1787 r = 0;
1788 break;
1789 }
1790 case KVM_SET_SREGS: {
fa3795a7
DH
1791 kvm_sregs = kmalloc(sizeof(struct kvm_sregs), GFP_KERNEL);
1792 r = -ENOMEM;
1793 if (!kvm_sregs)
1794 goto out;
6aa8b732 1795 r = -EFAULT;
fa3795a7 1796 if (copy_from_user(kvm_sregs, argp, sizeof(struct kvm_sregs)))
6aa8b732 1797 goto out;
fa3795a7 1798 r = kvm_arch_vcpu_ioctl_set_sregs(vcpu, kvm_sregs);
6aa8b732
AK
1799 if (r)
1800 goto out;
1801 r = 0;
1802 break;
1803 }
62d9f0db
MT
1804 case KVM_GET_MP_STATE: {
1805 struct kvm_mp_state mp_state;
1806
1807 r = kvm_arch_vcpu_ioctl_get_mpstate(vcpu, &mp_state);
1808 if (r)
1809 goto out;
1810 r = -EFAULT;
1811 if (copy_to_user(argp, &mp_state, sizeof mp_state))
1812 goto out;
1813 r = 0;
1814 break;
1815 }
1816 case KVM_SET_MP_STATE: {
1817 struct kvm_mp_state mp_state;
1818
1819 r = -EFAULT;
1820 if (copy_from_user(&mp_state, argp, sizeof mp_state))
1821 goto out;
1822 r = kvm_arch_vcpu_ioctl_set_mpstate(vcpu, &mp_state);
1823 if (r)
1824 goto out;
1825 r = 0;
1826 break;
1827 }
6aa8b732
AK
1828 case KVM_TRANSLATE: {
1829 struct kvm_translation tr;
1830
1831 r = -EFAULT;
2f366987 1832 if (copy_from_user(&tr, argp, sizeof tr))
6aa8b732 1833 goto out;
8b006791 1834 r = kvm_arch_vcpu_ioctl_translate(vcpu, &tr);
6aa8b732
AK
1835 if (r)
1836 goto out;
1837 r = -EFAULT;
2f366987 1838 if (copy_to_user(argp, &tr, sizeof tr))
6aa8b732
AK
1839 goto out;
1840 r = 0;
1841 break;
1842 }
d0bfb940
JK
1843 case KVM_SET_GUEST_DEBUG: {
1844 struct kvm_guest_debug dbg;
6aa8b732
AK
1845
1846 r = -EFAULT;
2f366987 1847 if (copy_from_user(&dbg, argp, sizeof dbg))
6aa8b732 1848 goto out;
d0bfb940 1849 r = kvm_arch_vcpu_ioctl_set_guest_debug(vcpu, &dbg);
6aa8b732
AK
1850 if (r)
1851 goto out;
1852 r = 0;
1853 break;
1854 }
1961d276
AK
1855 case KVM_SET_SIGNAL_MASK: {
1856 struct kvm_signal_mask __user *sigmask_arg = argp;
1857 struct kvm_signal_mask kvm_sigmask;
1858 sigset_t sigset, *p;
1859
1860 p = NULL;
1861 if (argp) {
1862 r = -EFAULT;
1863 if (copy_from_user(&kvm_sigmask, argp,
1864 sizeof kvm_sigmask))
1865 goto out;
1866 r = -EINVAL;
1867 if (kvm_sigmask.len != sizeof sigset)
1868 goto out;
1869 r = -EFAULT;
1870 if (copy_from_user(&sigset, sigmask_arg->sigset,
1871 sizeof sigset))
1872 goto out;
1873 p = &sigset;
1874 }
376d41ff 1875 r = kvm_vcpu_ioctl_set_sigmask(vcpu, p);
1961d276
AK
1876 break;
1877 }
b8836737 1878 case KVM_GET_FPU: {
fa3795a7
DH
1879 fpu = kzalloc(sizeof(struct kvm_fpu), GFP_KERNEL);
1880 r = -ENOMEM;
1881 if (!fpu)
1882 goto out;
1883 r = kvm_arch_vcpu_ioctl_get_fpu(vcpu, fpu);
b8836737
AK
1884 if (r)
1885 goto out;
1886 r = -EFAULT;
fa3795a7 1887 if (copy_to_user(argp, fpu, sizeof(struct kvm_fpu)))
b8836737
AK
1888 goto out;
1889 r = 0;
1890 break;
1891 }
1892 case KVM_SET_FPU: {
fa3795a7
DH
1893 fpu = kmalloc(sizeof(struct kvm_fpu), GFP_KERNEL);
1894 r = -ENOMEM;
1895 if (!fpu)
1896 goto out;
b8836737 1897 r = -EFAULT;
fa3795a7 1898 if (copy_from_user(fpu, argp, sizeof(struct kvm_fpu)))
b8836737 1899 goto out;
fa3795a7 1900 r = kvm_arch_vcpu_ioctl_set_fpu(vcpu, fpu);
b8836737
AK
1901 if (r)
1902 goto out;
1903 r = 0;
1904 break;
1905 }
bccf2150 1906 default:
313a3dc7 1907 r = kvm_arch_vcpu_ioctl(filp, ioctl, arg);
bccf2150
AK
1908 }
1909out:
2122ff5e 1910 vcpu_put(vcpu);
fa3795a7
DH
1911 kfree(fpu);
1912 kfree(kvm_sregs);
bccf2150
AK
1913 return r;
1914}
1915
1dda606c
AG
1916#ifdef CONFIG_COMPAT
1917static long kvm_vcpu_compat_ioctl(struct file *filp,
1918 unsigned int ioctl, unsigned long arg)
1919{
1920 struct kvm_vcpu *vcpu = filp->private_data;
1921 void __user *argp = compat_ptr(arg);
1922 int r;
1923
1924 if (vcpu->kvm->mm != current->mm)
1925 return -EIO;
1926
1927 switch (ioctl) {
1928 case KVM_SET_SIGNAL_MASK: {
1929 struct kvm_signal_mask __user *sigmask_arg = argp;
1930 struct kvm_signal_mask kvm_sigmask;
1931 compat_sigset_t csigset;
1932 sigset_t sigset;
1933
1934 if (argp) {
1935 r = -EFAULT;
1936 if (copy_from_user(&kvm_sigmask, argp,
1937 sizeof kvm_sigmask))
1938 goto out;
1939 r = -EINVAL;
1940 if (kvm_sigmask.len != sizeof csigset)
1941 goto out;
1942 r = -EFAULT;
1943 if (copy_from_user(&csigset, sigmask_arg->sigset,
1944 sizeof csigset))
1945 goto out;
1946 }
1947 sigset_from_compat(&sigset, &csigset);
1948 r = kvm_vcpu_ioctl_set_sigmask(vcpu, &sigset);
1949 break;
1950 }
1951 default:
1952 r = kvm_vcpu_ioctl(filp, ioctl, arg);
1953 }
1954
1955out:
1956 return r;
1957}
1958#endif
1959
bccf2150
AK
1960static long kvm_vm_ioctl(struct file *filp,
1961 unsigned int ioctl, unsigned long arg)
1962{
1963 struct kvm *kvm = filp->private_data;
1964 void __user *argp = (void __user *)arg;
1fe779f8 1965 int r;
bccf2150 1966
6d4e4c4f
AK
1967 if (kvm->mm != current->mm)
1968 return -EIO;
bccf2150
AK
1969 switch (ioctl) {
1970 case KVM_CREATE_VCPU:
1971 r = kvm_vm_ioctl_create_vcpu(kvm, arg);
1972 if (r < 0)
1973 goto out;
1974 break;
6fc138d2
IE
1975 case KVM_SET_USER_MEMORY_REGION: {
1976 struct kvm_userspace_memory_region kvm_userspace_mem;
1977
1978 r = -EFAULT;
1979 if (copy_from_user(&kvm_userspace_mem, argp,
1980 sizeof kvm_userspace_mem))
1981 goto out;
1982
1983 r = kvm_vm_ioctl_set_memory_region(kvm, &kvm_userspace_mem, 1);
6aa8b732
AK
1984 if (r)
1985 goto out;
1986 break;
1987 }
1988 case KVM_GET_DIRTY_LOG: {
1989 struct kvm_dirty_log log;
1990
1991 r = -EFAULT;
2f366987 1992 if (copy_from_user(&log, argp, sizeof log))
6aa8b732 1993 goto out;
2c6f5df9 1994 r = kvm_vm_ioctl_get_dirty_log(kvm, &log);
6aa8b732
AK
1995 if (r)
1996 goto out;
1997 break;
1998 }
5f94c174
LV
1999#ifdef KVM_COALESCED_MMIO_PAGE_OFFSET
2000 case KVM_REGISTER_COALESCED_MMIO: {
2001 struct kvm_coalesced_mmio_zone zone;
2002 r = -EFAULT;
2003 if (copy_from_user(&zone, argp, sizeof zone))
2004 goto out;
5f94c174
LV
2005 r = kvm_vm_ioctl_register_coalesced_mmio(kvm, &zone);
2006 if (r)
2007 goto out;
2008 r = 0;
2009 break;
2010 }
2011 case KVM_UNREGISTER_COALESCED_MMIO: {
2012 struct kvm_coalesced_mmio_zone zone;
2013 r = -EFAULT;
2014 if (copy_from_user(&zone, argp, sizeof zone))
2015 goto out;
5f94c174
LV
2016 r = kvm_vm_ioctl_unregister_coalesced_mmio(kvm, &zone);
2017 if (r)
2018 goto out;
2019 r = 0;
2020 break;
2021 }
2022#endif
721eecbf
GH
2023 case KVM_IRQFD: {
2024 struct kvm_irqfd data;
2025
2026 r = -EFAULT;
2027 if (copy_from_user(&data, argp, sizeof data))
2028 goto out;
2029 r = kvm_irqfd(kvm, data.fd, data.gsi, data.flags);
2030 break;
2031 }
d34e6b17
GH
2032 case KVM_IOEVENTFD: {
2033 struct kvm_ioeventfd data;
2034
2035 r = -EFAULT;
2036 if (copy_from_user(&data, argp, sizeof data))
2037 goto out;
2038 r = kvm_ioeventfd(kvm, &data);
2039 break;
2040 }
73880c80
GN
2041#ifdef CONFIG_KVM_APIC_ARCHITECTURE
2042 case KVM_SET_BOOT_CPU_ID:
2043 r = 0;
894a9c55 2044 mutex_lock(&kvm->lock);
73880c80
GN
2045 if (atomic_read(&kvm->online_vcpus) != 0)
2046 r = -EBUSY;
2047 else
2048 kvm->bsp_vcpu_id = arg;
894a9c55 2049 mutex_unlock(&kvm->lock);
73880c80
GN
2050 break;
2051#endif
f17abe9a 2052 default:
1fe779f8 2053 r = kvm_arch_vm_ioctl(filp, ioctl, arg);
bfd99ff5
AK
2054 if (r == -ENOTTY)
2055 r = kvm_vm_ioctl_assigned_device(kvm, ioctl, arg);
f17abe9a
AK
2056 }
2057out:
2058 return r;
2059}
2060
6ff5894c
AB
2061#ifdef CONFIG_COMPAT
2062struct compat_kvm_dirty_log {
2063 __u32 slot;
2064 __u32 padding1;
2065 union {
2066 compat_uptr_t dirty_bitmap; /* one bit per page */
2067 __u64 padding2;
2068 };
2069};
2070
2071static long kvm_vm_compat_ioctl(struct file *filp,
2072 unsigned int ioctl, unsigned long arg)
2073{
2074 struct kvm *kvm = filp->private_data;
2075 int r;
2076
2077 if (kvm->mm != current->mm)
2078 return -EIO;
2079 switch (ioctl) {
2080 case KVM_GET_DIRTY_LOG: {
2081 struct compat_kvm_dirty_log compat_log;
2082 struct kvm_dirty_log log;
2083
2084 r = -EFAULT;
2085 if (copy_from_user(&compat_log, (void __user *)arg,
2086 sizeof(compat_log)))
2087 goto out;
2088 log.slot = compat_log.slot;
2089 log.padding1 = compat_log.padding1;
2090 log.padding2 = compat_log.padding2;
2091 log.dirty_bitmap = compat_ptr(compat_log.dirty_bitmap);
2092
2093 r = kvm_vm_ioctl_get_dirty_log(kvm, &log);
2094 if (r)
2095 goto out;
2096 break;
2097 }
2098 default:
2099 r = kvm_vm_ioctl(filp, ioctl, arg);
2100 }
2101
2102out:
2103 return r;
2104}
2105#endif
2106
e4a533a4 2107static int kvm_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
f17abe9a 2108{
777b3f49
MT
2109 struct page *page[1];
2110 unsigned long addr;
2111 int npages;
2112 gfn_t gfn = vmf->pgoff;
f17abe9a 2113 struct kvm *kvm = vma->vm_file->private_data;
f17abe9a 2114
777b3f49
MT
2115 addr = gfn_to_hva(kvm, gfn);
2116 if (kvm_is_error_hva(addr))
e4a533a4 2117 return VM_FAULT_SIGBUS;
777b3f49
MT
2118
2119 npages = get_user_pages(current, current->mm, addr, 1, 1, 0, page,
2120 NULL);
2121 if (unlikely(npages != 1))
e4a533a4 2122 return VM_FAULT_SIGBUS;
777b3f49
MT
2123
2124 vmf->page = page[0];
e4a533a4 2125 return 0;
f17abe9a
AK
2126}
2127
f0f37e2f 2128static const struct vm_operations_struct kvm_vm_vm_ops = {
e4a533a4 2129 .fault = kvm_vm_fault,
f17abe9a
AK
2130};
2131
2132static int kvm_vm_mmap(struct file *file, struct vm_area_struct *vma)
2133{
2134 vma->vm_ops = &kvm_vm_vm_ops;
2135 return 0;
2136}
2137
3d3aab1b 2138static struct file_operations kvm_vm_fops = {
f17abe9a
AK
2139 .release = kvm_vm_release,
2140 .unlocked_ioctl = kvm_vm_ioctl,
6ff5894c
AB
2141#ifdef CONFIG_COMPAT
2142 .compat_ioctl = kvm_vm_compat_ioctl,
2143#endif
f17abe9a 2144 .mmap = kvm_vm_mmap,
6038f373 2145 .llseek = noop_llseek,
f17abe9a
AK
2146};
2147
2148static int kvm_dev_ioctl_create_vm(void)
2149{
aac87636 2150 int r;
f17abe9a
AK
2151 struct kvm *kvm;
2152
f17abe9a 2153 kvm = kvm_create_vm();
d6d28168
AK
2154 if (IS_ERR(kvm))
2155 return PTR_ERR(kvm);
6ce5a090
TY
2156#ifdef KVM_COALESCED_MMIO_PAGE_OFFSET
2157 r = kvm_coalesced_mmio_init(kvm);
2158 if (r < 0) {
2159 kvm_put_kvm(kvm);
2160 return r;
2161 }
2162#endif
aac87636
HC
2163 r = anon_inode_getfd("kvm-vm", &kvm_vm_fops, kvm, O_RDWR);
2164 if (r < 0)
66c0b394 2165 kvm_put_kvm(kvm);
f17abe9a 2166
aac87636 2167 return r;
f17abe9a
AK
2168}
2169
1a811b61
AK
2170static long kvm_dev_ioctl_check_extension_generic(long arg)
2171{
2172 switch (arg) {
ca9edaee 2173 case KVM_CAP_USER_MEMORY:
1a811b61 2174 case KVM_CAP_DESTROY_MEMORY_REGION_WORKS:
4cd481f6 2175 case KVM_CAP_JOIN_MEMORY_REGIONS_WORKS:
73880c80
GN
2176#ifdef CONFIG_KVM_APIC_ARCHITECTURE
2177 case KVM_CAP_SET_BOOT_CPU_ID:
2178#endif
a9c7399d 2179 case KVM_CAP_INTERNAL_ERROR_DATA:
1a811b61 2180 return 1;
399ec807
AK
2181#ifdef CONFIG_HAVE_KVM_IRQCHIP
2182 case KVM_CAP_IRQ_ROUTING:
36463146 2183 return KVM_MAX_IRQ_ROUTES;
399ec807 2184#endif
1a811b61
AK
2185 default:
2186 break;
2187 }
2188 return kvm_dev_ioctl_check_extension(arg);
2189}
2190
f17abe9a
AK
2191static long kvm_dev_ioctl(struct file *filp,
2192 unsigned int ioctl, unsigned long arg)
2193{
07c45a36 2194 long r = -EINVAL;
f17abe9a
AK
2195
2196 switch (ioctl) {
2197 case KVM_GET_API_VERSION:
f0fe5108
AK
2198 r = -EINVAL;
2199 if (arg)
2200 goto out;
f17abe9a
AK
2201 r = KVM_API_VERSION;
2202 break;
2203 case KVM_CREATE_VM:
f0fe5108
AK
2204 r = -EINVAL;
2205 if (arg)
2206 goto out;
f17abe9a
AK
2207 r = kvm_dev_ioctl_create_vm();
2208 break;
018d00d2 2209 case KVM_CHECK_EXTENSION:
1a811b61 2210 r = kvm_dev_ioctl_check_extension_generic(arg);
5d308f45 2211 break;
07c45a36
AK
2212 case KVM_GET_VCPU_MMAP_SIZE:
2213 r = -EINVAL;
2214 if (arg)
2215 goto out;
adb1ff46
AK
2216 r = PAGE_SIZE; /* struct kvm_run */
2217#ifdef CONFIG_X86
2218 r += PAGE_SIZE; /* pio data page */
5f94c174
LV
2219#endif
2220#ifdef KVM_COALESCED_MMIO_PAGE_OFFSET
2221 r += PAGE_SIZE; /* coalesced mmio ring page */
adb1ff46 2222#endif
07c45a36 2223 break;
d4c9ff2d
FEL
2224 case KVM_TRACE_ENABLE:
2225 case KVM_TRACE_PAUSE:
2226 case KVM_TRACE_DISABLE:
2023a29c 2227 r = -EOPNOTSUPP;
d4c9ff2d 2228 break;
6aa8b732 2229 default:
043405e1 2230 return kvm_arch_dev_ioctl(filp, ioctl, arg);
6aa8b732
AK
2231 }
2232out:
2233 return r;
2234}
2235
6aa8b732 2236static struct file_operations kvm_chardev_ops = {
6aa8b732
AK
2237 .unlocked_ioctl = kvm_dev_ioctl,
2238 .compat_ioctl = kvm_dev_ioctl,
6038f373 2239 .llseek = noop_llseek,
6aa8b732
AK
2240};
2241
2242static struct miscdevice kvm_dev = {
bbe4432e 2243 KVM_MINOR,
6aa8b732
AK
2244 "kvm",
2245 &kvm_chardev_ops,
2246};
2247
75b7127c 2248static void hardware_enable_nolock(void *junk)
1b6c0168
AK
2249{
2250 int cpu = raw_smp_processor_id();
10474ae8 2251 int r;
1b6c0168 2252
7f59f492 2253 if (cpumask_test_cpu(cpu, cpus_hardware_enabled))
1b6c0168 2254 return;
10474ae8 2255
7f59f492 2256 cpumask_set_cpu(cpu, cpus_hardware_enabled);
10474ae8
AG
2257
2258 r = kvm_arch_hardware_enable(NULL);
2259
2260 if (r) {
2261 cpumask_clear_cpu(cpu, cpus_hardware_enabled);
2262 atomic_inc(&hardware_enable_failed);
2263 printk(KERN_INFO "kvm: enabling virtualization on "
2264 "CPU%d failed\n", cpu);
2265 }
1b6c0168
AK
2266}
2267
75b7127c
TY
2268static void hardware_enable(void *junk)
2269{
e935b837 2270 raw_spin_lock(&kvm_lock);
75b7127c 2271 hardware_enable_nolock(junk);
e935b837 2272 raw_spin_unlock(&kvm_lock);
75b7127c
TY
2273}
2274
2275static void hardware_disable_nolock(void *junk)
1b6c0168
AK
2276{
2277 int cpu = raw_smp_processor_id();
2278
7f59f492 2279 if (!cpumask_test_cpu(cpu, cpus_hardware_enabled))
1b6c0168 2280 return;
7f59f492 2281 cpumask_clear_cpu(cpu, cpus_hardware_enabled);
e9b11c17 2282 kvm_arch_hardware_disable(NULL);
1b6c0168
AK
2283}
2284
75b7127c
TY
2285static void hardware_disable(void *junk)
2286{
e935b837 2287 raw_spin_lock(&kvm_lock);
75b7127c 2288 hardware_disable_nolock(junk);
e935b837 2289 raw_spin_unlock(&kvm_lock);
75b7127c
TY
2290}
2291
10474ae8
AG
2292static void hardware_disable_all_nolock(void)
2293{
2294 BUG_ON(!kvm_usage_count);
2295
2296 kvm_usage_count--;
2297 if (!kvm_usage_count)
75b7127c 2298 on_each_cpu(hardware_disable_nolock, NULL, 1);
10474ae8
AG
2299}
2300
2301static void hardware_disable_all(void)
2302{
e935b837 2303 raw_spin_lock(&kvm_lock);
10474ae8 2304 hardware_disable_all_nolock();
e935b837 2305 raw_spin_unlock(&kvm_lock);
10474ae8
AG
2306}
2307
2308static int hardware_enable_all(void)
2309{
2310 int r = 0;
2311
e935b837 2312 raw_spin_lock(&kvm_lock);
10474ae8
AG
2313
2314 kvm_usage_count++;
2315 if (kvm_usage_count == 1) {
2316 atomic_set(&hardware_enable_failed, 0);
75b7127c 2317 on_each_cpu(hardware_enable_nolock, NULL, 1);
10474ae8
AG
2318
2319 if (atomic_read(&hardware_enable_failed)) {
2320 hardware_disable_all_nolock();
2321 r = -EBUSY;
2322 }
2323 }
2324
e935b837 2325 raw_spin_unlock(&kvm_lock);
10474ae8
AG
2326
2327 return r;
2328}
2329
774c47f1
AK
2330static int kvm_cpu_hotplug(struct notifier_block *notifier, unsigned long val,
2331 void *v)
2332{
2333 int cpu = (long)v;
2334
10474ae8
AG
2335 if (!kvm_usage_count)
2336 return NOTIFY_OK;
2337
1a6f4d7f 2338 val &= ~CPU_TASKS_FROZEN;
774c47f1 2339 switch (val) {
cec9ad27 2340 case CPU_DYING:
6ec8a856
AK
2341 printk(KERN_INFO "kvm: disabling virtualization on CPU%d\n",
2342 cpu);
2343 hardware_disable(NULL);
2344 break;
da908f2f 2345 case CPU_STARTING:
43934a38
JK
2346 printk(KERN_INFO "kvm: enabling virtualization on CPU%d\n",
2347 cpu);
da908f2f 2348 hardware_enable(NULL);
774c47f1
AK
2349 break;
2350 }
2351 return NOTIFY_OK;
2352}
2353
4ecac3fd 2354
b7c4145b 2355asmlinkage void kvm_spurious_fault(void)
4ecac3fd 2356{
4ecac3fd
AK
2357 /* Fault while not rebooting. We want the trace. */
2358 BUG();
2359}
b7c4145b 2360EXPORT_SYMBOL_GPL(kvm_spurious_fault);
4ecac3fd 2361
9a2b85c6 2362static int kvm_reboot(struct notifier_block *notifier, unsigned long val,
d77c26fc 2363 void *v)
9a2b85c6 2364{
8e1c1815
SY
2365 /*
2366 * Some (well, at least mine) BIOSes hang on reboot if
2367 * in vmx root mode.
2368 *
2369 * And Intel TXT required VMX off for all cpu when system shutdown.
2370 */
2371 printk(KERN_INFO "kvm: exiting hardware virtualization\n");
2372 kvm_rebooting = true;
75b7127c 2373 on_each_cpu(hardware_disable_nolock, NULL, 1);
9a2b85c6
RR
2374 return NOTIFY_OK;
2375}
2376
2377static struct notifier_block kvm_reboot_notifier = {
2378 .notifier_call = kvm_reboot,
2379 .priority = 0,
2380};
2381
e93f8a0f 2382static void kvm_io_bus_destroy(struct kvm_io_bus *bus)
2eeb2e94
GH
2383{
2384 int i;
2385
2386 for (i = 0; i < bus->dev_count; i++) {
2387 struct kvm_io_device *pos = bus->devs[i];
2388
2389 kvm_iodevice_destructor(pos);
2390 }
e93f8a0f 2391 kfree(bus);
2eeb2e94
GH
2392}
2393
bda9020e 2394/* kvm_io_bus_write - called under kvm->slots_lock */
e93f8a0f 2395int kvm_io_bus_write(struct kvm *kvm, enum kvm_bus bus_idx, gpa_t addr,
bda9020e 2396 int len, const void *val)
2eeb2e94
GH
2397{
2398 int i;
90d83dc3
LJ
2399 struct kvm_io_bus *bus;
2400
2401 bus = srcu_dereference(kvm->buses[bus_idx], &kvm->srcu);
bda9020e
MT
2402 for (i = 0; i < bus->dev_count; i++)
2403 if (!kvm_iodevice_write(bus->devs[i], addr, len, val))
2404 return 0;
2405 return -EOPNOTSUPP;
2406}
2eeb2e94 2407
bda9020e 2408/* kvm_io_bus_read - called under kvm->slots_lock */
e93f8a0f
MT
2409int kvm_io_bus_read(struct kvm *kvm, enum kvm_bus bus_idx, gpa_t addr,
2410 int len, void *val)
bda9020e
MT
2411{
2412 int i;
90d83dc3 2413 struct kvm_io_bus *bus;
e93f8a0f 2414
90d83dc3 2415 bus = srcu_dereference(kvm->buses[bus_idx], &kvm->srcu);
bda9020e
MT
2416 for (i = 0; i < bus->dev_count; i++)
2417 if (!kvm_iodevice_read(bus->devs[i], addr, len, val))
2418 return 0;
2419 return -EOPNOTSUPP;
2eeb2e94
GH
2420}
2421
79fac95e 2422/* Caller must hold slots_lock. */
e93f8a0f
MT
2423int kvm_io_bus_register_dev(struct kvm *kvm, enum kvm_bus bus_idx,
2424 struct kvm_io_device *dev)
6c474694 2425{
e93f8a0f 2426 struct kvm_io_bus *new_bus, *bus;
090b7aff 2427
e93f8a0f 2428 bus = kvm->buses[bus_idx];
090b7aff
GH
2429 if (bus->dev_count > NR_IOBUS_DEVS-1)
2430 return -ENOSPC;
2eeb2e94 2431
e93f8a0f
MT
2432 new_bus = kzalloc(sizeof(struct kvm_io_bus), GFP_KERNEL);
2433 if (!new_bus)
2434 return -ENOMEM;
2435 memcpy(new_bus, bus, sizeof(struct kvm_io_bus));
2436 new_bus->devs[new_bus->dev_count++] = dev;
2437 rcu_assign_pointer(kvm->buses[bus_idx], new_bus);
2438 synchronize_srcu_expedited(&kvm->srcu);
2439 kfree(bus);
090b7aff
GH
2440
2441 return 0;
2442}
2443
79fac95e 2444/* Caller must hold slots_lock. */
e93f8a0f
MT
2445int kvm_io_bus_unregister_dev(struct kvm *kvm, enum kvm_bus bus_idx,
2446 struct kvm_io_device *dev)
090b7aff 2447{
e93f8a0f
MT
2448 int i, r;
2449 struct kvm_io_bus *new_bus, *bus;
090b7aff 2450
e93f8a0f
MT
2451 new_bus = kzalloc(sizeof(struct kvm_io_bus), GFP_KERNEL);
2452 if (!new_bus)
2453 return -ENOMEM;
090b7aff 2454
e93f8a0f
MT
2455 bus = kvm->buses[bus_idx];
2456 memcpy(new_bus, bus, sizeof(struct kvm_io_bus));
2457
2458 r = -ENOENT;
2459 for (i = 0; i < new_bus->dev_count; i++)
2460 if (new_bus->devs[i] == dev) {
2461 r = 0;
2462 new_bus->devs[i] = new_bus->devs[--new_bus->dev_count];
090b7aff
GH
2463 break;
2464 }
e93f8a0f
MT
2465
2466 if (r) {
2467 kfree(new_bus);
2468 return r;
2469 }
2470
2471 rcu_assign_pointer(kvm->buses[bus_idx], new_bus);
2472 synchronize_srcu_expedited(&kvm->srcu);
2473 kfree(bus);
2474 return r;
2eeb2e94
GH
2475}
2476
774c47f1
AK
2477static struct notifier_block kvm_cpu_notifier = {
2478 .notifier_call = kvm_cpu_hotplug,
774c47f1
AK
2479};
2480
8b88b099 2481static int vm_stat_get(void *_offset, u64 *val)
ba1389b7
AK
2482{
2483 unsigned offset = (long)_offset;
ba1389b7
AK
2484 struct kvm *kvm;
2485
8b88b099 2486 *val = 0;
e935b837 2487 raw_spin_lock(&kvm_lock);
ba1389b7 2488 list_for_each_entry(kvm, &vm_list, vm_list)
8b88b099 2489 *val += *(u32 *)((void *)kvm + offset);
e935b837 2490 raw_spin_unlock(&kvm_lock);
8b88b099 2491 return 0;
ba1389b7
AK
2492}
2493
2494DEFINE_SIMPLE_ATTRIBUTE(vm_stat_fops, vm_stat_get, NULL, "%llu\n");
2495
8b88b099 2496static int vcpu_stat_get(void *_offset, u64 *val)
1165f5fe
AK
2497{
2498 unsigned offset = (long)_offset;
1165f5fe
AK
2499 struct kvm *kvm;
2500 struct kvm_vcpu *vcpu;
2501 int i;
2502
8b88b099 2503 *val = 0;
e935b837 2504 raw_spin_lock(&kvm_lock);
1165f5fe 2505 list_for_each_entry(kvm, &vm_list, vm_list)
988a2cae
GN
2506 kvm_for_each_vcpu(i, vcpu, kvm)
2507 *val += *(u32 *)((void *)vcpu + offset);
2508
e935b837 2509 raw_spin_unlock(&kvm_lock);
8b88b099 2510 return 0;
1165f5fe
AK
2511}
2512
ba1389b7
AK
2513DEFINE_SIMPLE_ATTRIBUTE(vcpu_stat_fops, vcpu_stat_get, NULL, "%llu\n");
2514
828c0950 2515static const struct file_operations *stat_fops[] = {
ba1389b7
AK
2516 [KVM_STAT_VCPU] = &vcpu_stat_fops,
2517 [KVM_STAT_VM] = &vm_stat_fops,
2518};
1165f5fe 2519
a16b043c 2520static void kvm_init_debug(void)
6aa8b732
AK
2521{
2522 struct kvm_stats_debugfs_item *p;
2523
76f7c879 2524 kvm_debugfs_dir = debugfs_create_dir("kvm", NULL);
6aa8b732 2525 for (p = debugfs_entries; p->name; ++p)
76f7c879 2526 p->dentry = debugfs_create_file(p->name, 0444, kvm_debugfs_dir,
1165f5fe 2527 (void *)(long)p->offset,
ba1389b7 2528 stat_fops[p->kind]);
6aa8b732
AK
2529}
2530
2531static void kvm_exit_debug(void)
2532{
2533 struct kvm_stats_debugfs_item *p;
2534
2535 for (p = debugfs_entries; p->name; ++p)
2536 debugfs_remove(p->dentry);
76f7c879 2537 debugfs_remove(kvm_debugfs_dir);
6aa8b732
AK
2538}
2539
fb3600cc 2540static int kvm_suspend(void)
59ae6c6b 2541{
10474ae8 2542 if (kvm_usage_count)
75b7127c 2543 hardware_disable_nolock(NULL);
59ae6c6b
AK
2544 return 0;
2545}
2546
fb3600cc 2547static void kvm_resume(void)
59ae6c6b 2548{
ca84d1a2 2549 if (kvm_usage_count) {
e935b837 2550 WARN_ON(raw_spin_is_locked(&kvm_lock));
75b7127c 2551 hardware_enable_nolock(NULL);
ca84d1a2 2552 }
59ae6c6b
AK
2553}
2554
fb3600cc 2555static struct syscore_ops kvm_syscore_ops = {
59ae6c6b
AK
2556 .suspend = kvm_suspend,
2557 .resume = kvm_resume,
2558};
2559
cea7bb21 2560struct page *bad_page;
35149e21 2561pfn_t bad_pfn;
6aa8b732 2562
15ad7146
AK
2563static inline
2564struct kvm_vcpu *preempt_notifier_to_vcpu(struct preempt_notifier *pn)
2565{
2566 return container_of(pn, struct kvm_vcpu, preempt_notifier);
2567}
2568
2569static void kvm_sched_in(struct preempt_notifier *pn, int cpu)
2570{
2571 struct kvm_vcpu *vcpu = preempt_notifier_to_vcpu(pn);
2572
e9b11c17 2573 kvm_arch_vcpu_load(vcpu, cpu);
15ad7146
AK
2574}
2575
2576static void kvm_sched_out(struct preempt_notifier *pn,
2577 struct task_struct *next)
2578{
2579 struct kvm_vcpu *vcpu = preempt_notifier_to_vcpu(pn);
2580
e9b11c17 2581 kvm_arch_vcpu_put(vcpu);
15ad7146
AK
2582}
2583
0ee75bea 2584int kvm_init(void *opaque, unsigned vcpu_size, unsigned vcpu_align,
c16f862d 2585 struct module *module)
6aa8b732
AK
2586{
2587 int r;
002c7f7c 2588 int cpu;
6aa8b732 2589
f8c16bba
ZX
2590 r = kvm_arch_init(opaque);
2591 if (r)
d2308784 2592 goto out_fail;
cb498ea2
ZX
2593
2594 bad_page = alloc_page(GFP_KERNEL | __GFP_ZERO);
2595
2596 if (bad_page == NULL) {
2597 r = -ENOMEM;
2598 goto out;
2599 }
2600
35149e21
AL
2601 bad_pfn = page_to_pfn(bad_page);
2602
bf998156
HY
2603 hwpoison_page = alloc_page(GFP_KERNEL | __GFP_ZERO);
2604
2605 if (hwpoison_page == NULL) {
2606 r = -ENOMEM;
2607 goto out_free_0;
2608 }
2609
2610 hwpoison_pfn = page_to_pfn(hwpoison_page);
2611
edba23e5
GN
2612 fault_page = alloc_page(GFP_KERNEL | __GFP_ZERO);
2613
2614 if (fault_page == NULL) {
2615 r = -ENOMEM;
2616 goto out_free_0;
2617 }
2618
2619 fault_pfn = page_to_pfn(fault_page);
2620
8437a617 2621 if (!zalloc_cpumask_var(&cpus_hardware_enabled, GFP_KERNEL)) {
7f59f492
RR
2622 r = -ENOMEM;
2623 goto out_free_0;
2624 }
2625
e9b11c17 2626 r = kvm_arch_hardware_setup();
6aa8b732 2627 if (r < 0)
7f59f492 2628 goto out_free_0a;
6aa8b732 2629
002c7f7c
YS
2630 for_each_online_cpu(cpu) {
2631 smp_call_function_single(cpu,
e9b11c17 2632 kvm_arch_check_processor_compat,
8691e5a8 2633 &r, 1);
002c7f7c 2634 if (r < 0)
d2308784 2635 goto out_free_1;
002c7f7c
YS
2636 }
2637
774c47f1
AK
2638 r = register_cpu_notifier(&kvm_cpu_notifier);
2639 if (r)
d2308784 2640 goto out_free_2;
6aa8b732
AK
2641 register_reboot_notifier(&kvm_reboot_notifier);
2642
c16f862d 2643 /* A kmem cache lets us meet the alignment requirements of fx_save. */
0ee75bea
AK
2644 if (!vcpu_align)
2645 vcpu_align = __alignof__(struct kvm_vcpu);
2646 kvm_vcpu_cache = kmem_cache_create("kvm_vcpu", vcpu_size, vcpu_align,
56919c5c 2647 0, NULL);
c16f862d
RR
2648 if (!kvm_vcpu_cache) {
2649 r = -ENOMEM;
fb3600cc 2650 goto out_free_3;
c16f862d
RR
2651 }
2652
af585b92
GN
2653 r = kvm_async_pf_init();
2654 if (r)
2655 goto out_free;
2656
6aa8b732 2657 kvm_chardev_ops.owner = module;
3d3aab1b
CB
2658 kvm_vm_fops.owner = module;
2659 kvm_vcpu_fops.owner = module;
6aa8b732
AK
2660
2661 r = misc_register(&kvm_dev);
2662 if (r) {
d77c26fc 2663 printk(KERN_ERR "kvm: misc device register failed\n");
af585b92 2664 goto out_unreg;
6aa8b732
AK
2665 }
2666
fb3600cc
RW
2667 register_syscore_ops(&kvm_syscore_ops);
2668
15ad7146
AK
2669 kvm_preempt_ops.sched_in = kvm_sched_in;
2670 kvm_preempt_ops.sched_out = kvm_sched_out;
2671
0ea4ed8e
DW
2672 kvm_init_debug();
2673
c7addb90 2674 return 0;
6aa8b732 2675
af585b92
GN
2676out_unreg:
2677 kvm_async_pf_deinit();
6aa8b732 2678out_free:
c16f862d 2679 kmem_cache_destroy(kvm_vcpu_cache);
d2308784 2680out_free_3:
6aa8b732 2681 unregister_reboot_notifier(&kvm_reboot_notifier);
774c47f1 2682 unregister_cpu_notifier(&kvm_cpu_notifier);
d2308784 2683out_free_2:
d2308784 2684out_free_1:
e9b11c17 2685 kvm_arch_hardware_unsetup();
7f59f492
RR
2686out_free_0a:
2687 free_cpumask_var(cpus_hardware_enabled);
d2308784 2688out_free_0:
edba23e5
GN
2689 if (fault_page)
2690 __free_page(fault_page);
bf998156
HY
2691 if (hwpoison_page)
2692 __free_page(hwpoison_page);
d2308784 2693 __free_page(bad_page);
ca45aaae 2694out:
f8c16bba 2695 kvm_arch_exit();
d2308784 2696out_fail:
6aa8b732
AK
2697 return r;
2698}
cb498ea2 2699EXPORT_SYMBOL_GPL(kvm_init);
6aa8b732 2700
cb498ea2 2701void kvm_exit(void)
6aa8b732 2702{
0ea4ed8e 2703 kvm_exit_debug();
6aa8b732 2704 misc_deregister(&kvm_dev);
c16f862d 2705 kmem_cache_destroy(kvm_vcpu_cache);
af585b92 2706 kvm_async_pf_deinit();
fb3600cc 2707 unregister_syscore_ops(&kvm_syscore_ops);
6aa8b732 2708 unregister_reboot_notifier(&kvm_reboot_notifier);
59ae6c6b 2709 unregister_cpu_notifier(&kvm_cpu_notifier);
75b7127c 2710 on_each_cpu(hardware_disable_nolock, NULL, 1);
e9b11c17 2711 kvm_arch_hardware_unsetup();
f8c16bba 2712 kvm_arch_exit();
7f59f492 2713 free_cpumask_var(cpus_hardware_enabled);
bf998156 2714 __free_page(hwpoison_page);
cea7bb21 2715 __free_page(bad_page);
6aa8b732 2716}
cb498ea2 2717EXPORT_SYMBOL_GPL(kvm_exit);