]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blame - virt/kvm/kvm_main.c
KVM: Increase NR_IOBUS_DEVS limit to 200
[mirror_ubuntu-artful-kernel.git] / virt / kvm / kvm_main.c
CommitLineData
6aa8b732
AK
1/*
2 * Kernel-based Virtual Machine driver for Linux
3 *
4 * This module enables machines with Intel VT-x extensions to run virtual
5 * machines without emulation or binary translation.
6 *
7 * Copyright (C) 2006 Qumranet, Inc.
8 *
9 * Authors:
10 * Avi Kivity <avi@qumranet.com>
11 * Yaniv Kamay <yaniv@qumranet.com>
12 *
13 * This work is licensed under the terms of the GNU GPL, version 2. See
14 * the COPYING file in the top-level directory.
15 *
16 */
17
e2174021 18#include "iodev.h"
6aa8b732 19
edf88417 20#include <linux/kvm_host.h>
6aa8b732
AK
21#include <linux/kvm.h>
22#include <linux/module.h>
23#include <linux/errno.h>
6aa8b732 24#include <linux/percpu.h>
6aa8b732
AK
25#include <linux/mm.h>
26#include <linux/miscdevice.h>
27#include <linux/vmalloc.h>
6aa8b732 28#include <linux/reboot.h>
6aa8b732
AK
29#include <linux/debugfs.h>
30#include <linux/highmem.h>
31#include <linux/file.h>
59ae6c6b 32#include <linux/sysdev.h>
774c47f1 33#include <linux/cpu.h>
e8edc6e0 34#include <linux/sched.h>
d9e368d6
AK
35#include <linux/cpumask.h>
36#include <linux/smp.h>
d6d28168 37#include <linux/anon_inodes.h>
04d2cc77 38#include <linux/profile.h>
7aa81cc0 39#include <linux/kvm_para.h>
6fc138d2 40#include <linux/pagemap.h>
8d4e1288 41#include <linux/mman.h>
35149e21 42#include <linux/swap.h>
e56d532f 43#include <linux/bitops.h>
547de29e 44#include <linux/spinlock.h>
6ff5894c 45#include <linux/compat.h>
bc6678a3 46#include <linux/srcu.h>
8f0b1ab6 47#include <linux/hugetlb.h>
5a0e3ad6 48#include <linux/slab.h>
6aa8b732 49
e495606d 50#include <asm/processor.h>
e495606d
AK
51#include <asm/io.h>
52#include <asm/uaccess.h>
3e021bf5 53#include <asm/pgtable.h>
c8240bd6 54#include <asm-generic/bitops/le.h>
6aa8b732 55
5f94c174 56#include "coalesced_mmio.h"
5f94c174 57
229456fc
MT
58#define CREATE_TRACE_POINTS
59#include <trace/events/kvm.h>
60
6aa8b732
AK
61MODULE_AUTHOR("Qumranet");
62MODULE_LICENSE("GPL");
63
fa40a821
MT
64/*
65 * Ordering of locks:
66 *
fae3a353 67 * kvm->lock --> kvm->slots_lock --> kvm->irq_lock
fa40a821
MT
68 */
69
e9b11c17
ZX
70DEFINE_SPINLOCK(kvm_lock);
71LIST_HEAD(vm_list);
133de902 72
7f59f492 73static cpumask_var_t cpus_hardware_enabled;
10474ae8
AG
74static int kvm_usage_count = 0;
75static atomic_t hardware_enable_failed;
1b6c0168 76
c16f862d
RR
77struct kmem_cache *kvm_vcpu_cache;
78EXPORT_SYMBOL_GPL(kvm_vcpu_cache);
1165f5fe 79
15ad7146
AK
80static __read_mostly struct preempt_ops kvm_preempt_ops;
81
76f7c879 82struct dentry *kvm_debugfs_dir;
6aa8b732 83
bccf2150
AK
84static long kvm_vcpu_ioctl(struct file *file, unsigned int ioctl,
85 unsigned long arg);
10474ae8
AG
86static int hardware_enable_all(void);
87static void hardware_disable_all(void);
bccf2150 88
e93f8a0f
MT
89static void kvm_io_bus_destroy(struct kvm_io_bus *bus);
90
e8ba5d31 91static bool kvm_rebooting;
4ecac3fd 92
54dee993
MT
93static bool largepages_enabled = true;
94
c77fb9dc 95inline int kvm_is_mmio_pfn(pfn_t pfn)
cbff90a7 96{
fc5659c8
JR
97 if (pfn_valid(pfn)) {
98 struct page *page = compound_head(pfn_to_page(pfn));
99 return PageReserved(page);
100 }
cbff90a7
BAY
101
102 return true;
103}
104
bccf2150
AK
105/*
106 * Switches to specified vcpu, until a matching vcpu_put()
107 */
313a3dc7 108void vcpu_load(struct kvm_vcpu *vcpu)
6aa8b732 109{
15ad7146
AK
110 int cpu;
111
bccf2150 112 mutex_lock(&vcpu->mutex);
15ad7146
AK
113 cpu = get_cpu();
114 preempt_notifier_register(&vcpu->preempt_notifier);
313a3dc7 115 kvm_arch_vcpu_load(vcpu, cpu);
15ad7146 116 put_cpu();
6aa8b732
AK
117}
118
313a3dc7 119void vcpu_put(struct kvm_vcpu *vcpu)
6aa8b732 120{
15ad7146 121 preempt_disable();
313a3dc7 122 kvm_arch_vcpu_put(vcpu);
15ad7146
AK
123 preempt_notifier_unregister(&vcpu->preempt_notifier);
124 preempt_enable();
6aa8b732
AK
125 mutex_unlock(&vcpu->mutex);
126}
127
d9e368d6
AK
128static void ack_flush(void *_completed)
129{
d9e368d6
AK
130}
131
49846896 132static bool make_all_cpus_request(struct kvm *kvm, unsigned int req)
d9e368d6 133{
597a5f55 134 int i, cpu, me;
6ef7a1bc
RR
135 cpumask_var_t cpus;
136 bool called = true;
d9e368d6 137 struct kvm_vcpu *vcpu;
d9e368d6 138
79f55997 139 zalloc_cpumask_var(&cpus, GFP_ATOMIC);
6ef7a1bc 140
70e335e1 141 raw_spin_lock(&kvm->requests_lock);
e601e3be 142 me = smp_processor_id();
988a2cae 143 kvm_for_each_vcpu(i, vcpu, kvm) {
49846896 144 if (test_and_set_bit(req, &vcpu->requests))
d9e368d6
AK
145 continue;
146 cpu = vcpu->cpu;
6ef7a1bc
RR
147 if (cpus != NULL && cpu != -1 && cpu != me)
148 cpumask_set_cpu(cpu, cpus);
49846896 149 }
6ef7a1bc
RR
150 if (unlikely(cpus == NULL))
151 smp_call_function_many(cpu_online_mask, ack_flush, NULL, 1);
152 else if (!cpumask_empty(cpus))
153 smp_call_function_many(cpus, ack_flush, NULL, 1);
154 else
155 called = false;
70e335e1 156 raw_spin_unlock(&kvm->requests_lock);
6ef7a1bc 157 free_cpumask_var(cpus);
49846896 158 return called;
d9e368d6
AK
159}
160
49846896 161void kvm_flush_remote_tlbs(struct kvm *kvm)
2e53d63a 162{
49846896
RR
163 if (make_all_cpus_request(kvm, KVM_REQ_TLB_FLUSH))
164 ++kvm->stat.remote_tlb_flush;
2e53d63a
MT
165}
166
49846896
RR
167void kvm_reload_remote_mmus(struct kvm *kvm)
168{
169 make_all_cpus_request(kvm, KVM_REQ_MMU_RELOAD);
170}
2e53d63a 171
fb3f0f51
RR
172int kvm_vcpu_init(struct kvm_vcpu *vcpu, struct kvm *kvm, unsigned id)
173{
174 struct page *page;
175 int r;
176
177 mutex_init(&vcpu->mutex);
178 vcpu->cpu = -1;
fb3f0f51
RR
179 vcpu->kvm = kvm;
180 vcpu->vcpu_id = id;
b6958ce4 181 init_waitqueue_head(&vcpu->wq);
fb3f0f51
RR
182
183 page = alloc_page(GFP_KERNEL | __GFP_ZERO);
184 if (!page) {
185 r = -ENOMEM;
186 goto fail;
187 }
188 vcpu->run = page_address(page);
189
e9b11c17 190 r = kvm_arch_vcpu_init(vcpu);
fb3f0f51 191 if (r < 0)
e9b11c17 192 goto fail_free_run;
fb3f0f51
RR
193 return 0;
194
fb3f0f51
RR
195fail_free_run:
196 free_page((unsigned long)vcpu->run);
197fail:
76fafa5e 198 return r;
fb3f0f51
RR
199}
200EXPORT_SYMBOL_GPL(kvm_vcpu_init);
201
202void kvm_vcpu_uninit(struct kvm_vcpu *vcpu)
203{
e9b11c17 204 kvm_arch_vcpu_uninit(vcpu);
fb3f0f51
RR
205 free_page((unsigned long)vcpu->run);
206}
207EXPORT_SYMBOL_GPL(kvm_vcpu_uninit);
208
e930bffe
AA
209#if defined(CONFIG_MMU_NOTIFIER) && defined(KVM_ARCH_WANT_MMU_NOTIFIER)
210static inline struct kvm *mmu_notifier_to_kvm(struct mmu_notifier *mn)
211{
212 return container_of(mn, struct kvm, mmu_notifier);
213}
214
215static void kvm_mmu_notifier_invalidate_page(struct mmu_notifier *mn,
216 struct mm_struct *mm,
217 unsigned long address)
218{
219 struct kvm *kvm = mmu_notifier_to_kvm(mn);
bc6678a3 220 int need_tlb_flush, idx;
e930bffe
AA
221
222 /*
223 * When ->invalidate_page runs, the linux pte has been zapped
224 * already but the page is still allocated until
225 * ->invalidate_page returns. So if we increase the sequence
226 * here the kvm page fault will notice if the spte can't be
227 * established because the page is going to be freed. If
228 * instead the kvm page fault establishes the spte before
229 * ->invalidate_page runs, kvm_unmap_hva will release it
230 * before returning.
231 *
232 * The sequence increase only need to be seen at spin_unlock
233 * time, and not at spin_lock time.
234 *
235 * Increasing the sequence after the spin_unlock would be
236 * unsafe because the kvm page fault could then establish the
237 * pte after kvm_unmap_hva returned, without noticing the page
238 * is going to be freed.
239 */
bc6678a3 240 idx = srcu_read_lock(&kvm->srcu);
e930bffe
AA
241 spin_lock(&kvm->mmu_lock);
242 kvm->mmu_notifier_seq++;
243 need_tlb_flush = kvm_unmap_hva(kvm, address);
244 spin_unlock(&kvm->mmu_lock);
bc6678a3 245 srcu_read_unlock(&kvm->srcu, idx);
e930bffe
AA
246
247 /* we've to flush the tlb before the pages can be freed */
248 if (need_tlb_flush)
249 kvm_flush_remote_tlbs(kvm);
250
251}
252
3da0dd43
IE
253static void kvm_mmu_notifier_change_pte(struct mmu_notifier *mn,
254 struct mm_struct *mm,
255 unsigned long address,
256 pte_t pte)
257{
258 struct kvm *kvm = mmu_notifier_to_kvm(mn);
bc6678a3 259 int idx;
3da0dd43 260
bc6678a3 261 idx = srcu_read_lock(&kvm->srcu);
3da0dd43
IE
262 spin_lock(&kvm->mmu_lock);
263 kvm->mmu_notifier_seq++;
264 kvm_set_spte_hva(kvm, address, pte);
265 spin_unlock(&kvm->mmu_lock);
bc6678a3 266 srcu_read_unlock(&kvm->srcu, idx);
3da0dd43
IE
267}
268
e930bffe
AA
269static void kvm_mmu_notifier_invalidate_range_start(struct mmu_notifier *mn,
270 struct mm_struct *mm,
271 unsigned long start,
272 unsigned long end)
273{
274 struct kvm *kvm = mmu_notifier_to_kvm(mn);
bc6678a3 275 int need_tlb_flush = 0, idx;
e930bffe 276
bc6678a3 277 idx = srcu_read_lock(&kvm->srcu);
e930bffe
AA
278 spin_lock(&kvm->mmu_lock);
279 /*
280 * The count increase must become visible at unlock time as no
281 * spte can be established without taking the mmu_lock and
282 * count is also read inside the mmu_lock critical section.
283 */
284 kvm->mmu_notifier_count++;
285 for (; start < end; start += PAGE_SIZE)
286 need_tlb_flush |= kvm_unmap_hva(kvm, start);
287 spin_unlock(&kvm->mmu_lock);
bc6678a3 288 srcu_read_unlock(&kvm->srcu, idx);
e930bffe
AA
289
290 /* we've to flush the tlb before the pages can be freed */
291 if (need_tlb_flush)
292 kvm_flush_remote_tlbs(kvm);
293}
294
295static void kvm_mmu_notifier_invalidate_range_end(struct mmu_notifier *mn,
296 struct mm_struct *mm,
297 unsigned long start,
298 unsigned long end)
299{
300 struct kvm *kvm = mmu_notifier_to_kvm(mn);
301
302 spin_lock(&kvm->mmu_lock);
303 /*
304 * This sequence increase will notify the kvm page fault that
305 * the page that is going to be mapped in the spte could have
306 * been freed.
307 */
308 kvm->mmu_notifier_seq++;
309 /*
310 * The above sequence increase must be visible before the
311 * below count decrease but both values are read by the kvm
312 * page fault under mmu_lock spinlock so we don't need to add
313 * a smb_wmb() here in between the two.
314 */
315 kvm->mmu_notifier_count--;
316 spin_unlock(&kvm->mmu_lock);
317
318 BUG_ON(kvm->mmu_notifier_count < 0);
319}
320
321static int kvm_mmu_notifier_clear_flush_young(struct mmu_notifier *mn,
322 struct mm_struct *mm,
323 unsigned long address)
324{
325 struct kvm *kvm = mmu_notifier_to_kvm(mn);
bc6678a3 326 int young, idx;
e930bffe 327
bc6678a3 328 idx = srcu_read_lock(&kvm->srcu);
e930bffe
AA
329 spin_lock(&kvm->mmu_lock);
330 young = kvm_age_hva(kvm, address);
331 spin_unlock(&kvm->mmu_lock);
bc6678a3 332 srcu_read_unlock(&kvm->srcu, idx);
e930bffe
AA
333
334 if (young)
335 kvm_flush_remote_tlbs(kvm);
336
337 return young;
338}
339
85db06e5
MT
340static void kvm_mmu_notifier_release(struct mmu_notifier *mn,
341 struct mm_struct *mm)
342{
343 struct kvm *kvm = mmu_notifier_to_kvm(mn);
344 kvm_arch_flush_shadow(kvm);
345}
346
e930bffe
AA
347static const struct mmu_notifier_ops kvm_mmu_notifier_ops = {
348 .invalidate_page = kvm_mmu_notifier_invalidate_page,
349 .invalidate_range_start = kvm_mmu_notifier_invalidate_range_start,
350 .invalidate_range_end = kvm_mmu_notifier_invalidate_range_end,
351 .clear_flush_young = kvm_mmu_notifier_clear_flush_young,
3da0dd43 352 .change_pte = kvm_mmu_notifier_change_pte,
85db06e5 353 .release = kvm_mmu_notifier_release,
e930bffe 354};
4c07b0a4
AK
355
356static int kvm_init_mmu_notifier(struct kvm *kvm)
357{
358 kvm->mmu_notifier.ops = &kvm_mmu_notifier_ops;
359 return mmu_notifier_register(&kvm->mmu_notifier, current->mm);
360}
361
362#else /* !(CONFIG_MMU_NOTIFIER && KVM_ARCH_WANT_MMU_NOTIFIER) */
363
364static int kvm_init_mmu_notifier(struct kvm *kvm)
365{
366 return 0;
367}
368
e930bffe
AA
369#endif /* CONFIG_MMU_NOTIFIER && KVM_ARCH_WANT_MMU_NOTIFIER */
370
f17abe9a 371static struct kvm *kvm_create_vm(void)
6aa8b732 372{
e93f8a0f 373 int r = 0, i;
d19a9cd2 374 struct kvm *kvm = kvm_arch_create_vm();
6aa8b732 375
d19a9cd2
ZX
376 if (IS_ERR(kvm))
377 goto out;
10474ae8
AG
378
379 r = hardware_enable_all();
380 if (r)
381 goto out_err_nodisable;
382
75858a84
AK
383#ifdef CONFIG_HAVE_KVM_IRQCHIP
384 INIT_HLIST_HEAD(&kvm->mask_notifier_list);
136bdfee 385 INIT_HLIST_HEAD(&kvm->irq_ack_notifier_list);
75858a84 386#endif
6aa8b732 387
46a26bf5
MT
388 r = -ENOMEM;
389 kvm->memslots = kzalloc(sizeof(struct kvm_memslots), GFP_KERNEL);
390 if (!kvm->memslots)
391 goto out_err;
bc6678a3
MT
392 if (init_srcu_struct(&kvm->srcu))
393 goto out_err;
e93f8a0f
MT
394 for (i = 0; i < KVM_NR_BUSES; i++) {
395 kvm->buses[i] = kzalloc(sizeof(struct kvm_io_bus),
396 GFP_KERNEL);
397 if (!kvm->buses[i]) {
398 cleanup_srcu_struct(&kvm->srcu);
399 goto out_err;
400 }
401 }
46a26bf5 402
4c07b0a4 403 r = kvm_init_mmu_notifier(kvm);
283d0c65 404 if (r) {
bc6678a3 405 cleanup_srcu_struct(&kvm->srcu);
283d0c65 406 goto out_err;
e930bffe 407 }
e930bffe 408
6d4e4c4f
AK
409 kvm->mm = current->mm;
410 atomic_inc(&kvm->mm->mm_count);
aaee2c94 411 spin_lock_init(&kvm->mmu_lock);
70e335e1 412 raw_spin_lock_init(&kvm->requests_lock);
d34e6b17 413 kvm_eventfd_init(kvm);
11ec2804 414 mutex_init(&kvm->lock);
60eead79 415 mutex_init(&kvm->irq_lock);
79fac95e 416 mutex_init(&kvm->slots_lock);
d39f13b0 417 atomic_set(&kvm->users_count, 1);
5e58cfe4
RR
418 spin_lock(&kvm_lock);
419 list_add(&kvm->vm_list, &vm_list);
420 spin_unlock(&kvm_lock);
5f94c174
LV
421#ifdef KVM_COALESCED_MMIO_PAGE_OFFSET
422 kvm_coalesced_mmio_init(kvm);
423#endif
d19a9cd2 424out:
f17abe9a 425 return kvm;
10474ae8
AG
426
427out_err:
428 hardware_disable_all();
429out_err_nodisable:
e93f8a0f
MT
430 for (i = 0; i < KVM_NR_BUSES; i++)
431 kfree(kvm->buses[i]);
46a26bf5 432 kfree(kvm->memslots);
10474ae8
AG
433 kfree(kvm);
434 return ERR_PTR(r);
f17abe9a
AK
435}
436
6aa8b732
AK
437/*
438 * Free any memory in @free but not in @dont.
439 */
440static void kvm_free_physmem_slot(struct kvm_memory_slot *free,
441 struct kvm_memory_slot *dont)
442{
ec04b260
JR
443 int i;
444
290fc38d
IE
445 if (!dont || free->rmap != dont->rmap)
446 vfree(free->rmap);
6aa8b732
AK
447
448 if (!dont || free->dirty_bitmap != dont->dirty_bitmap)
449 vfree(free->dirty_bitmap);
450
ec04b260
JR
451
452 for (i = 0; i < KVM_NR_PAGE_SIZES - 1; ++i) {
453 if (!dont || free->lpage_info[i] != dont->lpage_info[i]) {
454 vfree(free->lpage_info[i]);
455 free->lpage_info[i] = NULL;
456 }
457 }
05da4558 458
6aa8b732 459 free->npages = 0;
8b6d44c7 460 free->dirty_bitmap = NULL;
8d4e1288 461 free->rmap = NULL;
6aa8b732
AK
462}
463
d19a9cd2 464void kvm_free_physmem(struct kvm *kvm)
6aa8b732
AK
465{
466 int i;
46a26bf5
MT
467 struct kvm_memslots *slots = kvm->memslots;
468
469 for (i = 0; i < slots->nmemslots; ++i)
470 kvm_free_physmem_slot(&slots->memslots[i], NULL);
6aa8b732 471
46a26bf5 472 kfree(kvm->memslots);
6aa8b732
AK
473}
474
f17abe9a
AK
475static void kvm_destroy_vm(struct kvm *kvm)
476{
e93f8a0f 477 int i;
6d4e4c4f
AK
478 struct mm_struct *mm = kvm->mm;
479
ad8ba2cd 480 kvm_arch_sync_events(kvm);
133de902
AK
481 spin_lock(&kvm_lock);
482 list_del(&kvm->vm_list);
483 spin_unlock(&kvm_lock);
399ec807 484 kvm_free_irq_routing(kvm);
e93f8a0f
MT
485 for (i = 0; i < KVM_NR_BUSES; i++)
486 kvm_io_bus_destroy(kvm->buses[i]);
980da6ce 487 kvm_coalesced_mmio_free(kvm);
e930bffe
AA
488#if defined(CONFIG_MMU_NOTIFIER) && defined(KVM_ARCH_WANT_MMU_NOTIFIER)
489 mmu_notifier_unregister(&kvm->mmu_notifier, kvm->mm);
f00be0ca
GN
490#else
491 kvm_arch_flush_shadow(kvm);
5f94c174 492#endif
d19a9cd2 493 kvm_arch_destroy_vm(kvm);
10474ae8 494 hardware_disable_all();
6d4e4c4f 495 mmdrop(mm);
f17abe9a
AK
496}
497
d39f13b0
IE
498void kvm_get_kvm(struct kvm *kvm)
499{
500 atomic_inc(&kvm->users_count);
501}
502EXPORT_SYMBOL_GPL(kvm_get_kvm);
503
504void kvm_put_kvm(struct kvm *kvm)
505{
506 if (atomic_dec_and_test(&kvm->users_count))
507 kvm_destroy_vm(kvm);
508}
509EXPORT_SYMBOL_GPL(kvm_put_kvm);
510
511
f17abe9a
AK
512static int kvm_vm_release(struct inode *inode, struct file *filp)
513{
514 struct kvm *kvm = filp->private_data;
515
721eecbf
GH
516 kvm_irqfd_release(kvm);
517
d39f13b0 518 kvm_put_kvm(kvm);
6aa8b732
AK
519 return 0;
520}
521
6aa8b732
AK
522/*
523 * Allocate some memory and give it an address in the guest physical address
524 * space.
525 *
526 * Discontiguous memory is allowed, mostly for framebuffers.
f78e0e2e 527 *
10589a46 528 * Must be called holding mmap_sem for write.
6aa8b732 529 */
f78e0e2e
SY
530int __kvm_set_memory_region(struct kvm *kvm,
531 struct kvm_userspace_memory_region *mem,
532 int user_alloc)
6aa8b732 533{
bc6678a3 534 int r, flush_shadow = 0;
6aa8b732 535 gfn_t base_gfn;
28bcb112
HC
536 unsigned long npages;
537 unsigned long i;
6aa8b732
AK
538 struct kvm_memory_slot *memslot;
539 struct kvm_memory_slot old, new;
bc6678a3 540 struct kvm_memslots *slots, *old_memslots;
6aa8b732
AK
541
542 r = -EINVAL;
543 /* General sanity checks */
544 if (mem->memory_size & (PAGE_SIZE - 1))
545 goto out;
546 if (mem->guest_phys_addr & (PAGE_SIZE - 1))
547 goto out;
e7cacd40 548 if (user_alloc && (mem->userspace_addr & (PAGE_SIZE - 1)))
78749809 549 goto out;
e0d62c7f 550 if (mem->slot >= KVM_MEMORY_SLOTS + KVM_PRIVATE_MEM_SLOTS)
6aa8b732
AK
551 goto out;
552 if (mem->guest_phys_addr + mem->memory_size < mem->guest_phys_addr)
553 goto out;
554
46a26bf5 555 memslot = &kvm->memslots->memslots[mem->slot];
6aa8b732
AK
556 base_gfn = mem->guest_phys_addr >> PAGE_SHIFT;
557 npages = mem->memory_size >> PAGE_SHIFT;
558
559 if (!npages)
560 mem->flags &= ~KVM_MEM_LOG_DIRTY_PAGES;
561
6aa8b732
AK
562 new = old = *memslot;
563
564 new.base_gfn = base_gfn;
565 new.npages = npages;
566 new.flags = mem->flags;
567
568 /* Disallow changing a memory slot's size. */
569 r = -EINVAL;
570 if (npages && old.npages && npages != old.npages)
f78e0e2e 571 goto out_free;
6aa8b732
AK
572
573 /* Check for overlaps */
574 r = -EEXIST;
575 for (i = 0; i < KVM_MEMORY_SLOTS; ++i) {
46a26bf5 576 struct kvm_memory_slot *s = &kvm->memslots->memslots[i];
6aa8b732 577
4cd481f6 578 if (s == memslot || !s->npages)
6aa8b732
AK
579 continue;
580 if (!((base_gfn + npages <= s->base_gfn) ||
581 (base_gfn >= s->base_gfn + s->npages)))
f78e0e2e 582 goto out_free;
6aa8b732 583 }
6aa8b732 584
6aa8b732
AK
585 /* Free page dirty bitmap if unneeded */
586 if (!(new.flags & KVM_MEM_LOG_DIRTY_PAGES))
8b6d44c7 587 new.dirty_bitmap = NULL;
6aa8b732
AK
588
589 r = -ENOMEM;
590
591 /* Allocate if a slot is being created */
eff0114a 592#ifndef CONFIG_S390
8d4e1288 593 if (npages && !new.rmap) {
d77c26fc 594 new.rmap = vmalloc(npages * sizeof(struct page *));
290fc38d
IE
595
596 if (!new.rmap)
f78e0e2e 597 goto out_free;
290fc38d 598
290fc38d 599 memset(new.rmap, 0, npages * sizeof(*new.rmap));
8d4e1288 600
80b14b5b 601 new.user_alloc = user_alloc;
bc6678a3 602 new.userspace_addr = mem->userspace_addr;
6aa8b732 603 }
ec04b260
JR
604 if (!npages)
605 goto skip_lpage;
05da4558 606
ec04b260 607 for (i = 0; i < KVM_NR_PAGE_SIZES - 1; ++i) {
28bcb112
HC
608 unsigned long ugfn;
609 unsigned long j;
610 int lpages;
ec04b260 611 int level = i + 2;
05da4558 612
ec04b260
JR
613 /* Avoid unused variable warning if no large pages */
614 (void)level;
615
616 if (new.lpage_info[i])
617 continue;
618
619 lpages = 1 + (base_gfn + npages - 1) /
620 KVM_PAGES_PER_HPAGE(level);
621 lpages -= base_gfn / KVM_PAGES_PER_HPAGE(level);
622
623 new.lpage_info[i] = vmalloc(lpages * sizeof(*new.lpage_info[i]));
624
625 if (!new.lpage_info[i])
05da4558
MT
626 goto out_free;
627
ec04b260
JR
628 memset(new.lpage_info[i], 0,
629 lpages * sizeof(*new.lpage_info[i]));
05da4558 630
ec04b260
JR
631 if (base_gfn % KVM_PAGES_PER_HPAGE(level))
632 new.lpage_info[i][0].write_count = 1;
633 if ((base_gfn+npages) % KVM_PAGES_PER_HPAGE(level))
634 new.lpage_info[i][lpages - 1].write_count = 1;
ac04527f
AK
635 ugfn = new.userspace_addr >> PAGE_SHIFT;
636 /*
637 * If the gfn and userspace address are not aligned wrt each
54dee993
MT
638 * other, or if explicitly asked to, disable large page
639 * support for this slot
ac04527f 640 */
ec04b260 641 if ((base_gfn ^ ugfn) & (KVM_PAGES_PER_HPAGE(level) - 1) ||
54dee993 642 !largepages_enabled)
ec04b260
JR
643 for (j = 0; j < lpages; ++j)
644 new.lpage_info[i][j].write_count = 1;
05da4558 645 }
6aa8b732 646
ec04b260
JR
647skip_lpage:
648
6aa8b732
AK
649 /* Allocate page dirty bitmap if needed */
650 if ((new.flags & KVM_MEM_LOG_DIRTY_PAGES) && !new.dirty_bitmap) {
87bf6e7d 651 unsigned long dirty_bytes = kvm_dirty_bitmap_bytes(&new);
6aa8b732
AK
652
653 new.dirty_bitmap = vmalloc(dirty_bytes);
654 if (!new.dirty_bitmap)
f78e0e2e 655 goto out_free;
6aa8b732 656 memset(new.dirty_bitmap, 0, dirty_bytes);
bc6678a3 657 /* destroy any largepage mappings for dirty tracking */
e244584f 658 if (old.npages)
bc6678a3 659 flush_shadow = 1;
6aa8b732 660 }
3eea8437
CB
661#else /* not defined CONFIG_S390 */
662 new.user_alloc = user_alloc;
663 if (user_alloc)
664 new.userspace_addr = mem->userspace_addr;
eff0114a 665#endif /* not defined CONFIG_S390 */
6aa8b732 666
bc6678a3
MT
667 if (!npages) {
668 r = -ENOMEM;
669 slots = kzalloc(sizeof(struct kvm_memslots), GFP_KERNEL);
670 if (!slots)
671 goto out_free;
672 memcpy(slots, kvm->memslots, sizeof(struct kvm_memslots));
673 if (mem->slot >= slots->nmemslots)
674 slots->nmemslots = mem->slot + 1;
675 slots->memslots[mem->slot].flags |= KVM_MEMSLOT_INVALID;
676
677 old_memslots = kvm->memslots;
678 rcu_assign_pointer(kvm->memslots, slots);
679 synchronize_srcu_expedited(&kvm->srcu);
680 /* From this point no new shadow pages pointing to a deleted
681 * memslot will be created.
682 *
683 * validation of sp->gfn happens in:
684 * - gfn_to_hva (kvm_read_guest, gfn_to_pfn)
685 * - kvm_is_visible_gfn (mmu_check_roots)
686 */
34d4cb8f 687 kvm_arch_flush_shadow(kvm);
bc6678a3
MT
688 kfree(old_memslots);
689 }
34d4cb8f 690
f7784b8e
MT
691 r = kvm_arch_prepare_memory_region(kvm, &new, old, mem, user_alloc);
692 if (r)
693 goto out_free;
694
bc6678a3
MT
695#ifdef CONFIG_DMAR
696 /* map the pages in iommu page table */
697 if (npages) {
698 r = kvm_iommu_map_pages(kvm, &new);
699 if (r)
700 goto out_free;
701 }
702#endif
604b38ac 703
bc6678a3
MT
704 r = -ENOMEM;
705 slots = kzalloc(sizeof(struct kvm_memslots), GFP_KERNEL);
706 if (!slots)
707 goto out_free;
708 memcpy(slots, kvm->memslots, sizeof(struct kvm_memslots));
709 if (mem->slot >= slots->nmemslots)
710 slots->nmemslots = mem->slot + 1;
711
712 /* actual memory is freed via old in kvm_free_physmem_slot below */
713 if (!npages) {
714 new.rmap = NULL;
715 new.dirty_bitmap = NULL;
716 for (i = 0; i < KVM_NR_PAGE_SIZES - 1; ++i)
717 new.lpage_info[i] = NULL;
718 }
719
720 slots->memslots[mem->slot] = new;
721 old_memslots = kvm->memslots;
722 rcu_assign_pointer(kvm->memslots, slots);
723 synchronize_srcu_expedited(&kvm->srcu);
3ad82a7e 724
f7784b8e 725 kvm_arch_commit_memory_region(kvm, mem, old, user_alloc);
82ce2c96 726
bc6678a3
MT
727 kvm_free_physmem_slot(&old, &new);
728 kfree(old_memslots);
729
730 if (flush_shadow)
731 kvm_arch_flush_shadow(kvm);
732
6aa8b732
AK
733 return 0;
734
f78e0e2e 735out_free:
6aa8b732
AK
736 kvm_free_physmem_slot(&new, &old);
737out:
738 return r;
210c7c4d
IE
739
740}
f78e0e2e
SY
741EXPORT_SYMBOL_GPL(__kvm_set_memory_region);
742
743int kvm_set_memory_region(struct kvm *kvm,
744 struct kvm_userspace_memory_region *mem,
745 int user_alloc)
746{
747 int r;
748
79fac95e 749 mutex_lock(&kvm->slots_lock);
f78e0e2e 750 r = __kvm_set_memory_region(kvm, mem, user_alloc);
79fac95e 751 mutex_unlock(&kvm->slots_lock);
f78e0e2e
SY
752 return r;
753}
210c7c4d
IE
754EXPORT_SYMBOL_GPL(kvm_set_memory_region);
755
1fe779f8
CO
756int kvm_vm_ioctl_set_memory_region(struct kvm *kvm,
757 struct
758 kvm_userspace_memory_region *mem,
759 int user_alloc)
210c7c4d 760{
e0d62c7f
IE
761 if (mem->slot >= KVM_MEMORY_SLOTS)
762 return -EINVAL;
210c7c4d 763 return kvm_set_memory_region(kvm, mem, user_alloc);
6aa8b732
AK
764}
765
5bb064dc
ZX
766int kvm_get_dirty_log(struct kvm *kvm,
767 struct kvm_dirty_log *log, int *is_dirty)
6aa8b732
AK
768{
769 struct kvm_memory_slot *memslot;
770 int r, i;
87bf6e7d 771 unsigned long n;
6aa8b732
AK
772 unsigned long any = 0;
773
6aa8b732
AK
774 r = -EINVAL;
775 if (log->slot >= KVM_MEMORY_SLOTS)
776 goto out;
777
46a26bf5 778 memslot = &kvm->memslots->memslots[log->slot];
6aa8b732
AK
779 r = -ENOENT;
780 if (!memslot->dirty_bitmap)
781 goto out;
782
87bf6e7d 783 n = kvm_dirty_bitmap_bytes(memslot);
6aa8b732 784
cd1a4a98 785 for (i = 0; !any && i < n/sizeof(long); ++i)
6aa8b732
AK
786 any = memslot->dirty_bitmap[i];
787
788 r = -EFAULT;
789 if (copy_to_user(log->dirty_bitmap, memslot->dirty_bitmap, n))
790 goto out;
791
5bb064dc
ZX
792 if (any)
793 *is_dirty = 1;
6aa8b732
AK
794
795 r = 0;
6aa8b732 796out:
6aa8b732
AK
797 return r;
798}
799
54dee993
MT
800void kvm_disable_largepages(void)
801{
802 largepages_enabled = false;
803}
804EXPORT_SYMBOL_GPL(kvm_disable_largepages);
805
cea7bb21
IE
806int is_error_page(struct page *page)
807{
808 return page == bad_page;
809}
810EXPORT_SYMBOL_GPL(is_error_page);
811
35149e21
AL
812int is_error_pfn(pfn_t pfn)
813{
814 return pfn == bad_pfn;
815}
816EXPORT_SYMBOL_GPL(is_error_pfn);
817
f9d46eb0
IE
818static inline unsigned long bad_hva(void)
819{
820 return PAGE_OFFSET;
821}
822
823int kvm_is_error_hva(unsigned long addr)
824{
825 return addr == bad_hva();
826}
827EXPORT_SYMBOL_GPL(kvm_is_error_hva);
828
2843099f 829struct kvm_memory_slot *gfn_to_memslot_unaliased(struct kvm *kvm, gfn_t gfn)
6aa8b732
AK
830{
831 int i;
bc6678a3 832 struct kvm_memslots *slots = rcu_dereference(kvm->memslots);
6aa8b732 833
46a26bf5
MT
834 for (i = 0; i < slots->nmemslots; ++i) {
835 struct kvm_memory_slot *memslot = &slots->memslots[i];
6aa8b732
AK
836
837 if (gfn >= memslot->base_gfn
838 && gfn < memslot->base_gfn + memslot->npages)
839 return memslot;
840 }
8b6d44c7 841 return NULL;
6aa8b732 842}
2843099f 843EXPORT_SYMBOL_GPL(gfn_to_memslot_unaliased);
e8207547
AK
844
845struct kvm_memory_slot *gfn_to_memslot(struct kvm *kvm, gfn_t gfn)
846{
847 gfn = unalias_gfn(kvm, gfn);
2843099f 848 return gfn_to_memslot_unaliased(kvm, gfn);
e8207547 849}
6aa8b732 850
e0d62c7f
IE
851int kvm_is_visible_gfn(struct kvm *kvm, gfn_t gfn)
852{
853 int i;
bc6678a3 854 struct kvm_memslots *slots = rcu_dereference(kvm->memslots);
e0d62c7f 855
a983fb23 856 gfn = unalias_gfn_instantiation(kvm, gfn);
e0d62c7f 857 for (i = 0; i < KVM_MEMORY_SLOTS; ++i) {
46a26bf5 858 struct kvm_memory_slot *memslot = &slots->memslots[i];
e0d62c7f 859
bc6678a3
MT
860 if (memslot->flags & KVM_MEMSLOT_INVALID)
861 continue;
862
e0d62c7f
IE
863 if (gfn >= memslot->base_gfn
864 && gfn < memslot->base_gfn + memslot->npages)
865 return 1;
866 }
867 return 0;
868}
869EXPORT_SYMBOL_GPL(kvm_is_visible_gfn);
870
8f0b1ab6
JR
871unsigned long kvm_host_page_size(struct kvm *kvm, gfn_t gfn)
872{
873 struct vm_area_struct *vma;
874 unsigned long addr, size;
875
876 size = PAGE_SIZE;
877
878 addr = gfn_to_hva(kvm, gfn);
879 if (kvm_is_error_hva(addr))
880 return PAGE_SIZE;
881
882 down_read(&current->mm->mmap_sem);
883 vma = find_vma(current->mm, addr);
884 if (!vma)
885 goto out;
886
887 size = vma_kernel_pagesize(vma);
888
889out:
890 up_read(&current->mm->mmap_sem);
891
892 return size;
893}
894
bc6678a3
MT
895int memslot_id(struct kvm *kvm, gfn_t gfn)
896{
897 int i;
898 struct kvm_memslots *slots = rcu_dereference(kvm->memslots);
899 struct kvm_memory_slot *memslot = NULL;
900
901 gfn = unalias_gfn(kvm, gfn);
902 for (i = 0; i < slots->nmemslots; ++i) {
903 memslot = &slots->memslots[i];
904
905 if (gfn >= memslot->base_gfn
906 && gfn < memslot->base_gfn + memslot->npages)
907 break;
908 }
909
910 return memslot - slots->memslots;
911}
912
05da4558 913unsigned long gfn_to_hva(struct kvm *kvm, gfn_t gfn)
539cb660
IE
914{
915 struct kvm_memory_slot *slot;
916
a983fb23 917 gfn = unalias_gfn_instantiation(kvm, gfn);
2843099f 918 slot = gfn_to_memslot_unaliased(kvm, gfn);
bc6678a3 919 if (!slot || slot->flags & KVM_MEMSLOT_INVALID)
539cb660
IE
920 return bad_hva();
921 return (slot->userspace_addr + (gfn - slot->base_gfn) * PAGE_SIZE);
922}
0d150298 923EXPORT_SYMBOL_GPL(gfn_to_hva);
539cb660 924
506f0d6f 925static pfn_t hva_to_pfn(struct kvm *kvm, unsigned long addr)
954bbbc2 926{
8d4e1288
AL
927 struct page *page[1];
928 int npages;
2e2e3738 929 pfn_t pfn;
954bbbc2 930
60395224
AK
931 might_sleep();
932
4c2155ce 933 npages = get_user_pages_fast(addr, 1, 1, page);
539cb660 934
2e2e3738
AL
935 if (unlikely(npages != 1)) {
936 struct vm_area_struct *vma;
937
4c2155ce 938 down_read(&current->mm->mmap_sem);
2e2e3738 939 vma = find_vma(current->mm, addr);
4c2155ce 940
2e2e3738
AL
941 if (vma == NULL || addr < vma->vm_start ||
942 !(vma->vm_flags & VM_PFNMAP)) {
4c2155ce 943 up_read(&current->mm->mmap_sem);
2e2e3738
AL
944 get_page(bad_page);
945 return page_to_pfn(bad_page);
946 }
947
948 pfn = ((addr - vma->vm_start) >> PAGE_SHIFT) + vma->vm_pgoff;
4c2155ce 949 up_read(&current->mm->mmap_sem);
c77fb9dc 950 BUG_ON(!kvm_is_mmio_pfn(pfn));
2e2e3738
AL
951 } else
952 pfn = page_to_pfn(page[0]);
8d4e1288 953
2e2e3738 954 return pfn;
35149e21
AL
955}
956
506f0d6f
MT
957pfn_t gfn_to_pfn(struct kvm *kvm, gfn_t gfn)
958{
959 unsigned long addr;
960
961 addr = gfn_to_hva(kvm, gfn);
962 if (kvm_is_error_hva(addr)) {
963 get_page(bad_page);
964 return page_to_pfn(bad_page);
965 }
966
967 return hva_to_pfn(kvm, addr);
968}
35149e21
AL
969EXPORT_SYMBOL_GPL(gfn_to_pfn);
970
506f0d6f
MT
971static unsigned long gfn_to_hva_memslot(struct kvm_memory_slot *slot, gfn_t gfn)
972{
973 return (slot->userspace_addr + (gfn - slot->base_gfn) * PAGE_SIZE);
974}
975
976pfn_t gfn_to_pfn_memslot(struct kvm *kvm,
977 struct kvm_memory_slot *slot, gfn_t gfn)
978{
979 unsigned long addr = gfn_to_hva_memslot(slot, gfn);
980 return hva_to_pfn(kvm, addr);
981}
982
35149e21
AL
983struct page *gfn_to_page(struct kvm *kvm, gfn_t gfn)
984{
2e2e3738
AL
985 pfn_t pfn;
986
987 pfn = gfn_to_pfn(kvm, gfn);
c77fb9dc 988 if (!kvm_is_mmio_pfn(pfn))
2e2e3738
AL
989 return pfn_to_page(pfn);
990
c77fb9dc 991 WARN_ON(kvm_is_mmio_pfn(pfn));
2e2e3738
AL
992
993 get_page(bad_page);
994 return bad_page;
954bbbc2 995}
aab61cc0 996
954bbbc2
AK
997EXPORT_SYMBOL_GPL(gfn_to_page);
998
b4231d61
IE
999void kvm_release_page_clean(struct page *page)
1000{
35149e21 1001 kvm_release_pfn_clean(page_to_pfn(page));
b4231d61
IE
1002}
1003EXPORT_SYMBOL_GPL(kvm_release_page_clean);
1004
35149e21
AL
1005void kvm_release_pfn_clean(pfn_t pfn)
1006{
c77fb9dc 1007 if (!kvm_is_mmio_pfn(pfn))
2e2e3738 1008 put_page(pfn_to_page(pfn));
35149e21
AL
1009}
1010EXPORT_SYMBOL_GPL(kvm_release_pfn_clean);
1011
b4231d61 1012void kvm_release_page_dirty(struct page *page)
8a7ae055 1013{
35149e21
AL
1014 kvm_release_pfn_dirty(page_to_pfn(page));
1015}
1016EXPORT_SYMBOL_GPL(kvm_release_page_dirty);
1017
1018void kvm_release_pfn_dirty(pfn_t pfn)
1019{
1020 kvm_set_pfn_dirty(pfn);
1021 kvm_release_pfn_clean(pfn);
1022}
1023EXPORT_SYMBOL_GPL(kvm_release_pfn_dirty);
1024
1025void kvm_set_page_dirty(struct page *page)
1026{
1027 kvm_set_pfn_dirty(page_to_pfn(page));
1028}
1029EXPORT_SYMBOL_GPL(kvm_set_page_dirty);
1030
1031void kvm_set_pfn_dirty(pfn_t pfn)
1032{
c77fb9dc 1033 if (!kvm_is_mmio_pfn(pfn)) {
2e2e3738
AL
1034 struct page *page = pfn_to_page(pfn);
1035 if (!PageReserved(page))
1036 SetPageDirty(page);
1037 }
8a7ae055 1038}
35149e21
AL
1039EXPORT_SYMBOL_GPL(kvm_set_pfn_dirty);
1040
1041void kvm_set_pfn_accessed(pfn_t pfn)
1042{
c77fb9dc 1043 if (!kvm_is_mmio_pfn(pfn))
2e2e3738 1044 mark_page_accessed(pfn_to_page(pfn));
35149e21
AL
1045}
1046EXPORT_SYMBOL_GPL(kvm_set_pfn_accessed);
1047
1048void kvm_get_pfn(pfn_t pfn)
1049{
c77fb9dc 1050 if (!kvm_is_mmio_pfn(pfn))
2e2e3738 1051 get_page(pfn_to_page(pfn));
35149e21
AL
1052}
1053EXPORT_SYMBOL_GPL(kvm_get_pfn);
8a7ae055 1054
195aefde
IE
1055static int next_segment(unsigned long len, int offset)
1056{
1057 if (len > PAGE_SIZE - offset)
1058 return PAGE_SIZE - offset;
1059 else
1060 return len;
1061}
1062
1063int kvm_read_guest_page(struct kvm *kvm, gfn_t gfn, void *data, int offset,
1064 int len)
1065{
e0506bcb
IE
1066 int r;
1067 unsigned long addr;
195aefde 1068
e0506bcb
IE
1069 addr = gfn_to_hva(kvm, gfn);
1070 if (kvm_is_error_hva(addr))
1071 return -EFAULT;
1072 r = copy_from_user(data, (void __user *)addr + offset, len);
1073 if (r)
195aefde 1074 return -EFAULT;
195aefde
IE
1075 return 0;
1076}
1077EXPORT_SYMBOL_GPL(kvm_read_guest_page);
1078
1079int kvm_read_guest(struct kvm *kvm, gpa_t gpa, void *data, unsigned long len)
1080{
1081 gfn_t gfn = gpa >> PAGE_SHIFT;
1082 int seg;
1083 int offset = offset_in_page(gpa);
1084 int ret;
1085
1086 while ((seg = next_segment(len, offset)) != 0) {
1087 ret = kvm_read_guest_page(kvm, gfn, data, offset, seg);
1088 if (ret < 0)
1089 return ret;
1090 offset = 0;
1091 len -= seg;
1092 data += seg;
1093 ++gfn;
1094 }
1095 return 0;
1096}
1097EXPORT_SYMBOL_GPL(kvm_read_guest);
1098
7ec54588
MT
1099int kvm_read_guest_atomic(struct kvm *kvm, gpa_t gpa, void *data,
1100 unsigned long len)
1101{
1102 int r;
1103 unsigned long addr;
1104 gfn_t gfn = gpa >> PAGE_SHIFT;
1105 int offset = offset_in_page(gpa);
1106
1107 addr = gfn_to_hva(kvm, gfn);
1108 if (kvm_is_error_hva(addr))
1109 return -EFAULT;
0aac03f0 1110 pagefault_disable();
7ec54588 1111 r = __copy_from_user_inatomic(data, (void __user *)addr + offset, len);
0aac03f0 1112 pagefault_enable();
7ec54588
MT
1113 if (r)
1114 return -EFAULT;
1115 return 0;
1116}
1117EXPORT_SYMBOL(kvm_read_guest_atomic);
1118
195aefde
IE
1119int kvm_write_guest_page(struct kvm *kvm, gfn_t gfn, const void *data,
1120 int offset, int len)
1121{
e0506bcb
IE
1122 int r;
1123 unsigned long addr;
195aefde 1124
e0506bcb
IE
1125 addr = gfn_to_hva(kvm, gfn);
1126 if (kvm_is_error_hva(addr))
1127 return -EFAULT;
1128 r = copy_to_user((void __user *)addr + offset, data, len);
1129 if (r)
195aefde 1130 return -EFAULT;
195aefde
IE
1131 mark_page_dirty(kvm, gfn);
1132 return 0;
1133}
1134EXPORT_SYMBOL_GPL(kvm_write_guest_page);
1135
1136int kvm_write_guest(struct kvm *kvm, gpa_t gpa, const void *data,
1137 unsigned long len)
1138{
1139 gfn_t gfn = gpa >> PAGE_SHIFT;
1140 int seg;
1141 int offset = offset_in_page(gpa);
1142 int ret;
1143
1144 while ((seg = next_segment(len, offset)) != 0) {
1145 ret = kvm_write_guest_page(kvm, gfn, data, offset, seg);
1146 if (ret < 0)
1147 return ret;
1148 offset = 0;
1149 len -= seg;
1150 data += seg;
1151 ++gfn;
1152 }
1153 return 0;
1154}
1155
1156int kvm_clear_guest_page(struct kvm *kvm, gfn_t gfn, int offset, int len)
1157{
3e021bf5 1158 return kvm_write_guest_page(kvm, gfn, empty_zero_page, offset, len);
195aefde
IE
1159}
1160EXPORT_SYMBOL_GPL(kvm_clear_guest_page);
1161
1162int kvm_clear_guest(struct kvm *kvm, gpa_t gpa, unsigned long len)
1163{
1164 gfn_t gfn = gpa >> PAGE_SHIFT;
1165 int seg;
1166 int offset = offset_in_page(gpa);
1167 int ret;
1168
1169 while ((seg = next_segment(len, offset)) != 0) {
1170 ret = kvm_clear_guest_page(kvm, gfn, offset, seg);
1171 if (ret < 0)
1172 return ret;
1173 offset = 0;
1174 len -= seg;
1175 ++gfn;
1176 }
1177 return 0;
1178}
1179EXPORT_SYMBOL_GPL(kvm_clear_guest);
1180
6aa8b732
AK
1181void mark_page_dirty(struct kvm *kvm, gfn_t gfn)
1182{
31389947 1183 struct kvm_memory_slot *memslot;
6aa8b732 1184
3b6fff19 1185 gfn = unalias_gfn(kvm, gfn);
2843099f 1186 memslot = gfn_to_memslot_unaliased(kvm, gfn);
7e9d619d
RR
1187 if (memslot && memslot->dirty_bitmap) {
1188 unsigned long rel_gfn = gfn - memslot->base_gfn;
87bf6e7d
TY
1189 unsigned long *p = memslot->dirty_bitmap +
1190 rel_gfn / BITS_PER_LONG;
1191 int offset = rel_gfn % BITS_PER_LONG;
6aa8b732 1192
7e9d619d 1193 /* avoid RMW */
87bf6e7d
TY
1194 if (!generic_test_le_bit(offset, p))
1195 generic___set_le_bit(offset, p);
6aa8b732
AK
1196 }
1197}
1198
b6958ce4
ED
1199/*
1200 * The vCPU has executed a HLT instruction with in-kernel mode enabled.
1201 */
8776e519 1202void kvm_vcpu_block(struct kvm_vcpu *vcpu)
d3bef15f 1203{
e5c239cf
MT
1204 DEFINE_WAIT(wait);
1205
1206 for (;;) {
1207 prepare_to_wait(&vcpu->wq, &wait, TASK_INTERRUPTIBLE);
1208
a1b37100 1209 if (kvm_arch_vcpu_runnable(vcpu)) {
d7690175 1210 set_bit(KVM_REQ_UNHALT, &vcpu->requests);
e5c239cf 1211 break;
d7690175 1212 }
09cec754
GN
1213 if (kvm_cpu_has_pending_timer(vcpu))
1214 break;
e5c239cf
MT
1215 if (signal_pending(current))
1216 break;
1217
b6958ce4 1218 schedule();
b6958ce4 1219 }
d3bef15f 1220
e5c239cf 1221 finish_wait(&vcpu->wq, &wait);
b6958ce4
ED
1222}
1223
6aa8b732
AK
1224void kvm_resched(struct kvm_vcpu *vcpu)
1225{
3fca0365
YD
1226 if (!need_resched())
1227 return;
6aa8b732 1228 cond_resched();
6aa8b732
AK
1229}
1230EXPORT_SYMBOL_GPL(kvm_resched);
1231
d255f4f2
ZE
1232void kvm_vcpu_on_spin(struct kvm_vcpu *vcpu)
1233{
1234 ktime_t expires;
1235 DEFINE_WAIT(wait);
1236
1237 prepare_to_wait(&vcpu->wq, &wait, TASK_INTERRUPTIBLE);
1238
1239 /* Sleep for 100 us, and hope lock-holder got scheduled */
1240 expires = ktime_add_ns(ktime_get(), 100000UL);
1241 schedule_hrtimeout(&expires, HRTIMER_MODE_ABS);
1242
1243 finish_wait(&vcpu->wq, &wait);
1244}
1245EXPORT_SYMBOL_GPL(kvm_vcpu_on_spin);
1246
e4a533a4 1247static int kvm_vcpu_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
9a2bb7f4
AK
1248{
1249 struct kvm_vcpu *vcpu = vma->vm_file->private_data;
9a2bb7f4
AK
1250 struct page *page;
1251
e4a533a4 1252 if (vmf->pgoff == 0)
039576c0 1253 page = virt_to_page(vcpu->run);
09566765 1254#ifdef CONFIG_X86
e4a533a4 1255 else if (vmf->pgoff == KVM_PIO_PAGE_OFFSET)
ad312c7c 1256 page = virt_to_page(vcpu->arch.pio_data);
5f94c174
LV
1257#endif
1258#ifdef KVM_COALESCED_MMIO_PAGE_OFFSET
1259 else if (vmf->pgoff == KVM_COALESCED_MMIO_PAGE_OFFSET)
1260 page = virt_to_page(vcpu->kvm->coalesced_mmio_ring);
09566765 1261#endif
039576c0 1262 else
e4a533a4 1263 return VM_FAULT_SIGBUS;
9a2bb7f4 1264 get_page(page);
e4a533a4 1265 vmf->page = page;
1266 return 0;
9a2bb7f4
AK
1267}
1268
f0f37e2f 1269static const struct vm_operations_struct kvm_vcpu_vm_ops = {
e4a533a4 1270 .fault = kvm_vcpu_fault,
9a2bb7f4
AK
1271};
1272
1273static int kvm_vcpu_mmap(struct file *file, struct vm_area_struct *vma)
1274{
1275 vma->vm_ops = &kvm_vcpu_vm_ops;
1276 return 0;
1277}
1278
bccf2150
AK
1279static int kvm_vcpu_release(struct inode *inode, struct file *filp)
1280{
1281 struct kvm_vcpu *vcpu = filp->private_data;
1282
66c0b394 1283 kvm_put_kvm(vcpu->kvm);
bccf2150
AK
1284 return 0;
1285}
1286
3d3aab1b 1287static struct file_operations kvm_vcpu_fops = {
bccf2150
AK
1288 .release = kvm_vcpu_release,
1289 .unlocked_ioctl = kvm_vcpu_ioctl,
1290 .compat_ioctl = kvm_vcpu_ioctl,
9a2bb7f4 1291 .mmap = kvm_vcpu_mmap,
bccf2150
AK
1292};
1293
1294/*
1295 * Allocates an inode for the vcpu.
1296 */
1297static int create_vcpu_fd(struct kvm_vcpu *vcpu)
1298{
628ff7c1 1299 return anon_inode_getfd("kvm-vcpu", &kvm_vcpu_fops, vcpu, O_RDWR);
bccf2150
AK
1300}
1301
c5ea7660
AK
1302/*
1303 * Creates some virtual cpus. Good luck creating more than one.
1304 */
73880c80 1305static int kvm_vm_ioctl_create_vcpu(struct kvm *kvm, u32 id)
c5ea7660
AK
1306{
1307 int r;
988a2cae 1308 struct kvm_vcpu *vcpu, *v;
c5ea7660 1309
73880c80 1310 vcpu = kvm_arch_vcpu_create(kvm, id);
fb3f0f51
RR
1311 if (IS_ERR(vcpu))
1312 return PTR_ERR(vcpu);
c5ea7660 1313
15ad7146
AK
1314 preempt_notifier_init(&vcpu->preempt_notifier, &kvm_preempt_ops);
1315
26e5215f
AK
1316 r = kvm_arch_vcpu_setup(vcpu);
1317 if (r)
7d8fece6 1318 return r;
26e5215f 1319
11ec2804 1320 mutex_lock(&kvm->lock);
73880c80
GN
1321 if (atomic_read(&kvm->online_vcpus) == KVM_MAX_VCPUS) {
1322 r = -EINVAL;
e9b11c17 1323 goto vcpu_destroy;
fb3f0f51 1324 }
73880c80 1325
988a2cae
GN
1326 kvm_for_each_vcpu(r, v, kvm)
1327 if (v->vcpu_id == id) {
73880c80
GN
1328 r = -EEXIST;
1329 goto vcpu_destroy;
1330 }
1331
1332 BUG_ON(kvm->vcpus[atomic_read(&kvm->online_vcpus)]);
c5ea7660 1333
fb3f0f51 1334 /* Now it's all set up, let userspace reach it */
66c0b394 1335 kvm_get_kvm(kvm);
bccf2150 1336 r = create_vcpu_fd(vcpu);
73880c80
GN
1337 if (r < 0) {
1338 kvm_put_kvm(kvm);
1339 goto vcpu_destroy;
1340 }
1341
1342 kvm->vcpus[atomic_read(&kvm->online_vcpus)] = vcpu;
1343 smp_wmb();
1344 atomic_inc(&kvm->online_vcpus);
1345
1346#ifdef CONFIG_KVM_APIC_ARCHITECTURE
1347 if (kvm->bsp_vcpu_id == id)
1348 kvm->bsp_vcpu = vcpu;
1349#endif
1350 mutex_unlock(&kvm->lock);
fb3f0f51 1351 return r;
39c3b86e 1352
e9b11c17 1353vcpu_destroy:
7d8fece6 1354 mutex_unlock(&kvm->lock);
d40ccc62 1355 kvm_arch_vcpu_destroy(vcpu);
c5ea7660
AK
1356 return r;
1357}
1358
1961d276
AK
1359static int kvm_vcpu_ioctl_set_sigmask(struct kvm_vcpu *vcpu, sigset_t *sigset)
1360{
1361 if (sigset) {
1362 sigdelsetmask(sigset, sigmask(SIGKILL)|sigmask(SIGSTOP));
1363 vcpu->sigset_active = 1;
1364 vcpu->sigset = *sigset;
1365 } else
1366 vcpu->sigset_active = 0;
1367 return 0;
1368}
1369
bccf2150
AK
1370static long kvm_vcpu_ioctl(struct file *filp,
1371 unsigned int ioctl, unsigned long arg)
6aa8b732 1372{
bccf2150 1373 struct kvm_vcpu *vcpu = filp->private_data;
2f366987 1374 void __user *argp = (void __user *)arg;
313a3dc7 1375 int r;
fa3795a7
DH
1376 struct kvm_fpu *fpu = NULL;
1377 struct kvm_sregs *kvm_sregs = NULL;
6aa8b732 1378
6d4e4c4f
AK
1379 if (vcpu->kvm->mm != current->mm)
1380 return -EIO;
6aa8b732 1381 switch (ioctl) {
9a2bb7f4 1382 case KVM_RUN:
f0fe5108
AK
1383 r = -EINVAL;
1384 if (arg)
1385 goto out;
b6c7a5dc 1386 r = kvm_arch_vcpu_ioctl_run(vcpu, vcpu->run);
6aa8b732 1387 break;
6aa8b732 1388 case KVM_GET_REGS: {
3e4bb3ac 1389 struct kvm_regs *kvm_regs;
6aa8b732 1390
3e4bb3ac
XZ
1391 r = -ENOMEM;
1392 kvm_regs = kzalloc(sizeof(struct kvm_regs), GFP_KERNEL);
1393 if (!kvm_regs)
6aa8b732 1394 goto out;
3e4bb3ac
XZ
1395 r = kvm_arch_vcpu_ioctl_get_regs(vcpu, kvm_regs);
1396 if (r)
1397 goto out_free1;
6aa8b732 1398 r = -EFAULT;
3e4bb3ac
XZ
1399 if (copy_to_user(argp, kvm_regs, sizeof(struct kvm_regs)))
1400 goto out_free1;
6aa8b732 1401 r = 0;
3e4bb3ac
XZ
1402out_free1:
1403 kfree(kvm_regs);
6aa8b732
AK
1404 break;
1405 }
1406 case KVM_SET_REGS: {
3e4bb3ac 1407 struct kvm_regs *kvm_regs;
6aa8b732 1408
3e4bb3ac
XZ
1409 r = -ENOMEM;
1410 kvm_regs = kzalloc(sizeof(struct kvm_regs), GFP_KERNEL);
1411 if (!kvm_regs)
6aa8b732 1412 goto out;
3e4bb3ac
XZ
1413 r = -EFAULT;
1414 if (copy_from_user(kvm_regs, argp, sizeof(struct kvm_regs)))
1415 goto out_free2;
1416 r = kvm_arch_vcpu_ioctl_set_regs(vcpu, kvm_regs);
6aa8b732 1417 if (r)
3e4bb3ac 1418 goto out_free2;
6aa8b732 1419 r = 0;
3e4bb3ac
XZ
1420out_free2:
1421 kfree(kvm_regs);
6aa8b732
AK
1422 break;
1423 }
1424 case KVM_GET_SREGS: {
fa3795a7
DH
1425 kvm_sregs = kzalloc(sizeof(struct kvm_sregs), GFP_KERNEL);
1426 r = -ENOMEM;
1427 if (!kvm_sregs)
1428 goto out;
1429 r = kvm_arch_vcpu_ioctl_get_sregs(vcpu, kvm_sregs);
6aa8b732
AK
1430 if (r)
1431 goto out;
1432 r = -EFAULT;
fa3795a7 1433 if (copy_to_user(argp, kvm_sregs, sizeof(struct kvm_sregs)))
6aa8b732
AK
1434 goto out;
1435 r = 0;
1436 break;
1437 }
1438 case KVM_SET_SREGS: {
fa3795a7
DH
1439 kvm_sregs = kmalloc(sizeof(struct kvm_sregs), GFP_KERNEL);
1440 r = -ENOMEM;
1441 if (!kvm_sregs)
1442 goto out;
6aa8b732 1443 r = -EFAULT;
fa3795a7 1444 if (copy_from_user(kvm_sregs, argp, sizeof(struct kvm_sregs)))
6aa8b732 1445 goto out;
fa3795a7 1446 r = kvm_arch_vcpu_ioctl_set_sregs(vcpu, kvm_sregs);
6aa8b732
AK
1447 if (r)
1448 goto out;
1449 r = 0;
1450 break;
1451 }
62d9f0db
MT
1452 case KVM_GET_MP_STATE: {
1453 struct kvm_mp_state mp_state;
1454
1455 r = kvm_arch_vcpu_ioctl_get_mpstate(vcpu, &mp_state);
1456 if (r)
1457 goto out;
1458 r = -EFAULT;
1459 if (copy_to_user(argp, &mp_state, sizeof mp_state))
1460 goto out;
1461 r = 0;
1462 break;
1463 }
1464 case KVM_SET_MP_STATE: {
1465 struct kvm_mp_state mp_state;
1466
1467 r = -EFAULT;
1468 if (copy_from_user(&mp_state, argp, sizeof mp_state))
1469 goto out;
1470 r = kvm_arch_vcpu_ioctl_set_mpstate(vcpu, &mp_state);
1471 if (r)
1472 goto out;
1473 r = 0;
1474 break;
1475 }
6aa8b732
AK
1476 case KVM_TRANSLATE: {
1477 struct kvm_translation tr;
1478
1479 r = -EFAULT;
2f366987 1480 if (copy_from_user(&tr, argp, sizeof tr))
6aa8b732 1481 goto out;
8b006791 1482 r = kvm_arch_vcpu_ioctl_translate(vcpu, &tr);
6aa8b732
AK
1483 if (r)
1484 goto out;
1485 r = -EFAULT;
2f366987 1486 if (copy_to_user(argp, &tr, sizeof tr))
6aa8b732
AK
1487 goto out;
1488 r = 0;
1489 break;
1490 }
d0bfb940
JK
1491 case KVM_SET_GUEST_DEBUG: {
1492 struct kvm_guest_debug dbg;
6aa8b732
AK
1493
1494 r = -EFAULT;
2f366987 1495 if (copy_from_user(&dbg, argp, sizeof dbg))
6aa8b732 1496 goto out;
d0bfb940 1497 r = kvm_arch_vcpu_ioctl_set_guest_debug(vcpu, &dbg);
6aa8b732
AK
1498 if (r)
1499 goto out;
1500 r = 0;
1501 break;
1502 }
1961d276
AK
1503 case KVM_SET_SIGNAL_MASK: {
1504 struct kvm_signal_mask __user *sigmask_arg = argp;
1505 struct kvm_signal_mask kvm_sigmask;
1506 sigset_t sigset, *p;
1507
1508 p = NULL;
1509 if (argp) {
1510 r = -EFAULT;
1511 if (copy_from_user(&kvm_sigmask, argp,
1512 sizeof kvm_sigmask))
1513 goto out;
1514 r = -EINVAL;
1515 if (kvm_sigmask.len != sizeof sigset)
1516 goto out;
1517 r = -EFAULT;
1518 if (copy_from_user(&sigset, sigmask_arg->sigset,
1519 sizeof sigset))
1520 goto out;
1521 p = &sigset;
1522 }
1523 r = kvm_vcpu_ioctl_set_sigmask(vcpu, &sigset);
1524 break;
1525 }
b8836737 1526 case KVM_GET_FPU: {
fa3795a7
DH
1527 fpu = kzalloc(sizeof(struct kvm_fpu), GFP_KERNEL);
1528 r = -ENOMEM;
1529 if (!fpu)
1530 goto out;
1531 r = kvm_arch_vcpu_ioctl_get_fpu(vcpu, fpu);
b8836737
AK
1532 if (r)
1533 goto out;
1534 r = -EFAULT;
fa3795a7 1535 if (copy_to_user(argp, fpu, sizeof(struct kvm_fpu)))
b8836737
AK
1536 goto out;
1537 r = 0;
1538 break;
1539 }
1540 case KVM_SET_FPU: {
fa3795a7
DH
1541 fpu = kmalloc(sizeof(struct kvm_fpu), GFP_KERNEL);
1542 r = -ENOMEM;
1543 if (!fpu)
1544 goto out;
b8836737 1545 r = -EFAULT;
fa3795a7 1546 if (copy_from_user(fpu, argp, sizeof(struct kvm_fpu)))
b8836737 1547 goto out;
fa3795a7 1548 r = kvm_arch_vcpu_ioctl_set_fpu(vcpu, fpu);
b8836737
AK
1549 if (r)
1550 goto out;
1551 r = 0;
1552 break;
1553 }
bccf2150 1554 default:
313a3dc7 1555 r = kvm_arch_vcpu_ioctl(filp, ioctl, arg);
bccf2150
AK
1556 }
1557out:
fa3795a7
DH
1558 kfree(fpu);
1559 kfree(kvm_sregs);
bccf2150
AK
1560 return r;
1561}
1562
1563static long kvm_vm_ioctl(struct file *filp,
1564 unsigned int ioctl, unsigned long arg)
1565{
1566 struct kvm *kvm = filp->private_data;
1567 void __user *argp = (void __user *)arg;
1fe779f8 1568 int r;
bccf2150 1569
6d4e4c4f
AK
1570 if (kvm->mm != current->mm)
1571 return -EIO;
bccf2150
AK
1572 switch (ioctl) {
1573 case KVM_CREATE_VCPU:
1574 r = kvm_vm_ioctl_create_vcpu(kvm, arg);
1575 if (r < 0)
1576 goto out;
1577 break;
6fc138d2
IE
1578 case KVM_SET_USER_MEMORY_REGION: {
1579 struct kvm_userspace_memory_region kvm_userspace_mem;
1580
1581 r = -EFAULT;
1582 if (copy_from_user(&kvm_userspace_mem, argp,
1583 sizeof kvm_userspace_mem))
1584 goto out;
1585
1586 r = kvm_vm_ioctl_set_memory_region(kvm, &kvm_userspace_mem, 1);
6aa8b732
AK
1587 if (r)
1588 goto out;
1589 break;
1590 }
1591 case KVM_GET_DIRTY_LOG: {
1592 struct kvm_dirty_log log;
1593
1594 r = -EFAULT;
2f366987 1595 if (copy_from_user(&log, argp, sizeof log))
6aa8b732 1596 goto out;
2c6f5df9 1597 r = kvm_vm_ioctl_get_dirty_log(kvm, &log);
6aa8b732
AK
1598 if (r)
1599 goto out;
1600 break;
1601 }
5f94c174
LV
1602#ifdef KVM_COALESCED_MMIO_PAGE_OFFSET
1603 case KVM_REGISTER_COALESCED_MMIO: {
1604 struct kvm_coalesced_mmio_zone zone;
1605 r = -EFAULT;
1606 if (copy_from_user(&zone, argp, sizeof zone))
1607 goto out;
1608 r = -ENXIO;
1609 r = kvm_vm_ioctl_register_coalesced_mmio(kvm, &zone);
1610 if (r)
1611 goto out;
1612 r = 0;
1613 break;
1614 }
1615 case KVM_UNREGISTER_COALESCED_MMIO: {
1616 struct kvm_coalesced_mmio_zone zone;
1617 r = -EFAULT;
1618 if (copy_from_user(&zone, argp, sizeof zone))
1619 goto out;
1620 r = -ENXIO;
1621 r = kvm_vm_ioctl_unregister_coalesced_mmio(kvm, &zone);
1622 if (r)
1623 goto out;
1624 r = 0;
1625 break;
1626 }
1627#endif
721eecbf
GH
1628 case KVM_IRQFD: {
1629 struct kvm_irqfd data;
1630
1631 r = -EFAULT;
1632 if (copy_from_user(&data, argp, sizeof data))
1633 goto out;
1634 r = kvm_irqfd(kvm, data.fd, data.gsi, data.flags);
1635 break;
1636 }
d34e6b17
GH
1637 case KVM_IOEVENTFD: {
1638 struct kvm_ioeventfd data;
1639
1640 r = -EFAULT;
1641 if (copy_from_user(&data, argp, sizeof data))
1642 goto out;
1643 r = kvm_ioeventfd(kvm, &data);
1644 break;
1645 }
73880c80
GN
1646#ifdef CONFIG_KVM_APIC_ARCHITECTURE
1647 case KVM_SET_BOOT_CPU_ID:
1648 r = 0;
894a9c55 1649 mutex_lock(&kvm->lock);
73880c80
GN
1650 if (atomic_read(&kvm->online_vcpus) != 0)
1651 r = -EBUSY;
1652 else
1653 kvm->bsp_vcpu_id = arg;
894a9c55 1654 mutex_unlock(&kvm->lock);
73880c80
GN
1655 break;
1656#endif
f17abe9a 1657 default:
1fe779f8 1658 r = kvm_arch_vm_ioctl(filp, ioctl, arg);
bfd99ff5
AK
1659 if (r == -ENOTTY)
1660 r = kvm_vm_ioctl_assigned_device(kvm, ioctl, arg);
f17abe9a
AK
1661 }
1662out:
1663 return r;
1664}
1665
6ff5894c
AB
1666#ifdef CONFIG_COMPAT
1667struct compat_kvm_dirty_log {
1668 __u32 slot;
1669 __u32 padding1;
1670 union {
1671 compat_uptr_t dirty_bitmap; /* one bit per page */
1672 __u64 padding2;
1673 };
1674};
1675
1676static long kvm_vm_compat_ioctl(struct file *filp,
1677 unsigned int ioctl, unsigned long arg)
1678{
1679 struct kvm *kvm = filp->private_data;
1680 int r;
1681
1682 if (kvm->mm != current->mm)
1683 return -EIO;
1684 switch (ioctl) {
1685 case KVM_GET_DIRTY_LOG: {
1686 struct compat_kvm_dirty_log compat_log;
1687 struct kvm_dirty_log log;
1688
1689 r = -EFAULT;
1690 if (copy_from_user(&compat_log, (void __user *)arg,
1691 sizeof(compat_log)))
1692 goto out;
1693 log.slot = compat_log.slot;
1694 log.padding1 = compat_log.padding1;
1695 log.padding2 = compat_log.padding2;
1696 log.dirty_bitmap = compat_ptr(compat_log.dirty_bitmap);
1697
1698 r = kvm_vm_ioctl_get_dirty_log(kvm, &log);
1699 if (r)
1700 goto out;
1701 break;
1702 }
1703 default:
1704 r = kvm_vm_ioctl(filp, ioctl, arg);
1705 }
1706
1707out:
1708 return r;
1709}
1710#endif
1711
e4a533a4 1712static int kvm_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
f17abe9a 1713{
777b3f49
MT
1714 struct page *page[1];
1715 unsigned long addr;
1716 int npages;
1717 gfn_t gfn = vmf->pgoff;
f17abe9a 1718 struct kvm *kvm = vma->vm_file->private_data;
f17abe9a 1719
777b3f49
MT
1720 addr = gfn_to_hva(kvm, gfn);
1721 if (kvm_is_error_hva(addr))
e4a533a4 1722 return VM_FAULT_SIGBUS;
777b3f49
MT
1723
1724 npages = get_user_pages(current, current->mm, addr, 1, 1, 0, page,
1725 NULL);
1726 if (unlikely(npages != 1))
e4a533a4 1727 return VM_FAULT_SIGBUS;
777b3f49
MT
1728
1729 vmf->page = page[0];
e4a533a4 1730 return 0;
f17abe9a
AK
1731}
1732
f0f37e2f 1733static const struct vm_operations_struct kvm_vm_vm_ops = {
e4a533a4 1734 .fault = kvm_vm_fault,
f17abe9a
AK
1735};
1736
1737static int kvm_vm_mmap(struct file *file, struct vm_area_struct *vma)
1738{
1739 vma->vm_ops = &kvm_vm_vm_ops;
1740 return 0;
1741}
1742
3d3aab1b 1743static struct file_operations kvm_vm_fops = {
f17abe9a
AK
1744 .release = kvm_vm_release,
1745 .unlocked_ioctl = kvm_vm_ioctl,
6ff5894c
AB
1746#ifdef CONFIG_COMPAT
1747 .compat_ioctl = kvm_vm_compat_ioctl,
1748#endif
f17abe9a
AK
1749 .mmap = kvm_vm_mmap,
1750};
1751
1752static int kvm_dev_ioctl_create_vm(void)
1753{
2030a42c 1754 int fd;
f17abe9a
AK
1755 struct kvm *kvm;
1756
f17abe9a 1757 kvm = kvm_create_vm();
d6d28168
AK
1758 if (IS_ERR(kvm))
1759 return PTR_ERR(kvm);
628ff7c1 1760 fd = anon_inode_getfd("kvm-vm", &kvm_vm_fops, kvm, O_RDWR);
2030a42c 1761 if (fd < 0)
66c0b394 1762 kvm_put_kvm(kvm);
f17abe9a 1763
f17abe9a 1764 return fd;
f17abe9a
AK
1765}
1766
1a811b61
AK
1767static long kvm_dev_ioctl_check_extension_generic(long arg)
1768{
1769 switch (arg) {
ca9edaee 1770 case KVM_CAP_USER_MEMORY:
1a811b61 1771 case KVM_CAP_DESTROY_MEMORY_REGION_WORKS:
4cd481f6 1772 case KVM_CAP_JOIN_MEMORY_REGIONS_WORKS:
73880c80
GN
1773#ifdef CONFIG_KVM_APIC_ARCHITECTURE
1774 case KVM_CAP_SET_BOOT_CPU_ID:
1775#endif
a9c7399d 1776 case KVM_CAP_INTERNAL_ERROR_DATA:
1a811b61 1777 return 1;
399ec807
AK
1778#ifdef CONFIG_HAVE_KVM_IRQCHIP
1779 case KVM_CAP_IRQ_ROUTING:
36463146 1780 return KVM_MAX_IRQ_ROUTES;
399ec807 1781#endif
1a811b61
AK
1782 default:
1783 break;
1784 }
1785 return kvm_dev_ioctl_check_extension(arg);
1786}
1787
f17abe9a
AK
1788static long kvm_dev_ioctl(struct file *filp,
1789 unsigned int ioctl, unsigned long arg)
1790{
07c45a36 1791 long r = -EINVAL;
f17abe9a
AK
1792
1793 switch (ioctl) {
1794 case KVM_GET_API_VERSION:
f0fe5108
AK
1795 r = -EINVAL;
1796 if (arg)
1797 goto out;
f17abe9a
AK
1798 r = KVM_API_VERSION;
1799 break;
1800 case KVM_CREATE_VM:
f0fe5108
AK
1801 r = -EINVAL;
1802 if (arg)
1803 goto out;
f17abe9a
AK
1804 r = kvm_dev_ioctl_create_vm();
1805 break;
018d00d2 1806 case KVM_CHECK_EXTENSION:
1a811b61 1807 r = kvm_dev_ioctl_check_extension_generic(arg);
5d308f45 1808 break;
07c45a36
AK
1809 case KVM_GET_VCPU_MMAP_SIZE:
1810 r = -EINVAL;
1811 if (arg)
1812 goto out;
adb1ff46
AK
1813 r = PAGE_SIZE; /* struct kvm_run */
1814#ifdef CONFIG_X86
1815 r += PAGE_SIZE; /* pio data page */
5f94c174
LV
1816#endif
1817#ifdef KVM_COALESCED_MMIO_PAGE_OFFSET
1818 r += PAGE_SIZE; /* coalesced mmio ring page */
adb1ff46 1819#endif
07c45a36 1820 break;
d4c9ff2d
FEL
1821 case KVM_TRACE_ENABLE:
1822 case KVM_TRACE_PAUSE:
1823 case KVM_TRACE_DISABLE:
2023a29c 1824 r = -EOPNOTSUPP;
d4c9ff2d 1825 break;
6aa8b732 1826 default:
043405e1 1827 return kvm_arch_dev_ioctl(filp, ioctl, arg);
6aa8b732
AK
1828 }
1829out:
1830 return r;
1831}
1832
6aa8b732 1833static struct file_operations kvm_chardev_ops = {
6aa8b732
AK
1834 .unlocked_ioctl = kvm_dev_ioctl,
1835 .compat_ioctl = kvm_dev_ioctl,
6aa8b732
AK
1836};
1837
1838static struct miscdevice kvm_dev = {
bbe4432e 1839 KVM_MINOR,
6aa8b732
AK
1840 "kvm",
1841 &kvm_chardev_ops,
1842};
1843
1b6c0168
AK
1844static void hardware_enable(void *junk)
1845{
1846 int cpu = raw_smp_processor_id();
10474ae8 1847 int r;
1b6c0168 1848
7f59f492 1849 if (cpumask_test_cpu(cpu, cpus_hardware_enabled))
1b6c0168 1850 return;
10474ae8 1851
7f59f492 1852 cpumask_set_cpu(cpu, cpus_hardware_enabled);
10474ae8
AG
1853
1854 r = kvm_arch_hardware_enable(NULL);
1855
1856 if (r) {
1857 cpumask_clear_cpu(cpu, cpus_hardware_enabled);
1858 atomic_inc(&hardware_enable_failed);
1859 printk(KERN_INFO "kvm: enabling virtualization on "
1860 "CPU%d failed\n", cpu);
1861 }
1b6c0168
AK
1862}
1863
1864static void hardware_disable(void *junk)
1865{
1866 int cpu = raw_smp_processor_id();
1867
7f59f492 1868 if (!cpumask_test_cpu(cpu, cpus_hardware_enabled))
1b6c0168 1869 return;
7f59f492 1870 cpumask_clear_cpu(cpu, cpus_hardware_enabled);
e9b11c17 1871 kvm_arch_hardware_disable(NULL);
1b6c0168
AK
1872}
1873
10474ae8
AG
1874static void hardware_disable_all_nolock(void)
1875{
1876 BUG_ON(!kvm_usage_count);
1877
1878 kvm_usage_count--;
1879 if (!kvm_usage_count)
1880 on_each_cpu(hardware_disable, NULL, 1);
1881}
1882
1883static void hardware_disable_all(void)
1884{
1885 spin_lock(&kvm_lock);
1886 hardware_disable_all_nolock();
1887 spin_unlock(&kvm_lock);
1888}
1889
1890static int hardware_enable_all(void)
1891{
1892 int r = 0;
1893
1894 spin_lock(&kvm_lock);
1895
1896 kvm_usage_count++;
1897 if (kvm_usage_count == 1) {
1898 atomic_set(&hardware_enable_failed, 0);
1899 on_each_cpu(hardware_enable, NULL, 1);
1900
1901 if (atomic_read(&hardware_enable_failed)) {
1902 hardware_disable_all_nolock();
1903 r = -EBUSY;
1904 }
1905 }
1906
1907 spin_unlock(&kvm_lock);
1908
1909 return r;
1910}
1911
774c47f1
AK
1912static int kvm_cpu_hotplug(struct notifier_block *notifier, unsigned long val,
1913 void *v)
1914{
1915 int cpu = (long)v;
1916
10474ae8
AG
1917 if (!kvm_usage_count)
1918 return NOTIFY_OK;
1919
1a6f4d7f 1920 val &= ~CPU_TASKS_FROZEN;
774c47f1 1921 switch (val) {
cec9ad27 1922 case CPU_DYING:
6ec8a856
AK
1923 printk(KERN_INFO "kvm: disabling virtualization on CPU%d\n",
1924 cpu);
1925 hardware_disable(NULL);
1926 break;
774c47f1 1927 case CPU_UP_CANCELED:
43934a38
JK
1928 printk(KERN_INFO "kvm: disabling virtualization on CPU%d\n",
1929 cpu);
8691e5a8 1930 smp_call_function_single(cpu, hardware_disable, NULL, 1);
774c47f1 1931 break;
43934a38
JK
1932 case CPU_ONLINE:
1933 printk(KERN_INFO "kvm: enabling virtualization on CPU%d\n",
1934 cpu);
8691e5a8 1935 smp_call_function_single(cpu, hardware_enable, NULL, 1);
774c47f1
AK
1936 break;
1937 }
1938 return NOTIFY_OK;
1939}
1940
4ecac3fd
AK
1941
1942asmlinkage void kvm_handle_fault_on_reboot(void)
1943{
1944 if (kvm_rebooting)
1945 /* spin while reset goes on */
1946 while (true)
1947 ;
1948 /* Fault while not rebooting. We want the trace. */
1949 BUG();
1950}
1951EXPORT_SYMBOL_GPL(kvm_handle_fault_on_reboot);
1952
9a2b85c6 1953static int kvm_reboot(struct notifier_block *notifier, unsigned long val,
d77c26fc 1954 void *v)
9a2b85c6 1955{
8e1c1815
SY
1956 /*
1957 * Some (well, at least mine) BIOSes hang on reboot if
1958 * in vmx root mode.
1959 *
1960 * And Intel TXT required VMX off for all cpu when system shutdown.
1961 */
1962 printk(KERN_INFO "kvm: exiting hardware virtualization\n");
1963 kvm_rebooting = true;
1964 on_each_cpu(hardware_disable, NULL, 1);
9a2b85c6
RR
1965 return NOTIFY_OK;
1966}
1967
1968static struct notifier_block kvm_reboot_notifier = {
1969 .notifier_call = kvm_reboot,
1970 .priority = 0,
1971};
1972
e93f8a0f 1973static void kvm_io_bus_destroy(struct kvm_io_bus *bus)
2eeb2e94
GH
1974{
1975 int i;
1976
1977 for (i = 0; i < bus->dev_count; i++) {
1978 struct kvm_io_device *pos = bus->devs[i];
1979
1980 kvm_iodevice_destructor(pos);
1981 }
e93f8a0f 1982 kfree(bus);
2eeb2e94
GH
1983}
1984
bda9020e 1985/* kvm_io_bus_write - called under kvm->slots_lock */
e93f8a0f 1986int kvm_io_bus_write(struct kvm *kvm, enum kvm_bus bus_idx, gpa_t addr,
bda9020e 1987 int len, const void *val)
2eeb2e94
GH
1988{
1989 int i;
e93f8a0f 1990 struct kvm_io_bus *bus = rcu_dereference(kvm->buses[bus_idx]);
bda9020e
MT
1991 for (i = 0; i < bus->dev_count; i++)
1992 if (!kvm_iodevice_write(bus->devs[i], addr, len, val))
1993 return 0;
1994 return -EOPNOTSUPP;
1995}
2eeb2e94 1996
bda9020e 1997/* kvm_io_bus_read - called under kvm->slots_lock */
e93f8a0f
MT
1998int kvm_io_bus_read(struct kvm *kvm, enum kvm_bus bus_idx, gpa_t addr,
1999 int len, void *val)
bda9020e
MT
2000{
2001 int i;
e93f8a0f
MT
2002 struct kvm_io_bus *bus = rcu_dereference(kvm->buses[bus_idx]);
2003
bda9020e
MT
2004 for (i = 0; i < bus->dev_count; i++)
2005 if (!kvm_iodevice_read(bus->devs[i], addr, len, val))
2006 return 0;
2007 return -EOPNOTSUPP;
2eeb2e94
GH
2008}
2009
79fac95e 2010/* Caller must hold slots_lock. */
e93f8a0f
MT
2011int kvm_io_bus_register_dev(struct kvm *kvm, enum kvm_bus bus_idx,
2012 struct kvm_io_device *dev)
6c474694 2013{
e93f8a0f 2014 struct kvm_io_bus *new_bus, *bus;
090b7aff 2015
e93f8a0f 2016 bus = kvm->buses[bus_idx];
090b7aff
GH
2017 if (bus->dev_count > NR_IOBUS_DEVS-1)
2018 return -ENOSPC;
2eeb2e94 2019
e93f8a0f
MT
2020 new_bus = kzalloc(sizeof(struct kvm_io_bus), GFP_KERNEL);
2021 if (!new_bus)
2022 return -ENOMEM;
2023 memcpy(new_bus, bus, sizeof(struct kvm_io_bus));
2024 new_bus->devs[new_bus->dev_count++] = dev;
2025 rcu_assign_pointer(kvm->buses[bus_idx], new_bus);
2026 synchronize_srcu_expedited(&kvm->srcu);
2027 kfree(bus);
090b7aff
GH
2028
2029 return 0;
2030}
2031
79fac95e 2032/* Caller must hold slots_lock. */
e93f8a0f
MT
2033int kvm_io_bus_unregister_dev(struct kvm *kvm, enum kvm_bus bus_idx,
2034 struct kvm_io_device *dev)
090b7aff 2035{
e93f8a0f
MT
2036 int i, r;
2037 struct kvm_io_bus *new_bus, *bus;
090b7aff 2038
e93f8a0f
MT
2039 new_bus = kzalloc(sizeof(struct kvm_io_bus), GFP_KERNEL);
2040 if (!new_bus)
2041 return -ENOMEM;
090b7aff 2042
e93f8a0f
MT
2043 bus = kvm->buses[bus_idx];
2044 memcpy(new_bus, bus, sizeof(struct kvm_io_bus));
2045
2046 r = -ENOENT;
2047 for (i = 0; i < new_bus->dev_count; i++)
2048 if (new_bus->devs[i] == dev) {
2049 r = 0;
2050 new_bus->devs[i] = new_bus->devs[--new_bus->dev_count];
090b7aff
GH
2051 break;
2052 }
e93f8a0f
MT
2053
2054 if (r) {
2055 kfree(new_bus);
2056 return r;
2057 }
2058
2059 rcu_assign_pointer(kvm->buses[bus_idx], new_bus);
2060 synchronize_srcu_expedited(&kvm->srcu);
2061 kfree(bus);
2062 return r;
2eeb2e94
GH
2063}
2064
774c47f1
AK
2065static struct notifier_block kvm_cpu_notifier = {
2066 .notifier_call = kvm_cpu_hotplug,
2067 .priority = 20, /* must be > scheduler priority */
2068};
2069
8b88b099 2070static int vm_stat_get(void *_offset, u64 *val)
ba1389b7
AK
2071{
2072 unsigned offset = (long)_offset;
ba1389b7
AK
2073 struct kvm *kvm;
2074
8b88b099 2075 *val = 0;
ba1389b7
AK
2076 spin_lock(&kvm_lock);
2077 list_for_each_entry(kvm, &vm_list, vm_list)
8b88b099 2078 *val += *(u32 *)((void *)kvm + offset);
ba1389b7 2079 spin_unlock(&kvm_lock);
8b88b099 2080 return 0;
ba1389b7
AK
2081}
2082
2083DEFINE_SIMPLE_ATTRIBUTE(vm_stat_fops, vm_stat_get, NULL, "%llu\n");
2084
8b88b099 2085static int vcpu_stat_get(void *_offset, u64 *val)
1165f5fe
AK
2086{
2087 unsigned offset = (long)_offset;
1165f5fe
AK
2088 struct kvm *kvm;
2089 struct kvm_vcpu *vcpu;
2090 int i;
2091
8b88b099 2092 *val = 0;
1165f5fe
AK
2093 spin_lock(&kvm_lock);
2094 list_for_each_entry(kvm, &vm_list, vm_list)
988a2cae
GN
2095 kvm_for_each_vcpu(i, vcpu, kvm)
2096 *val += *(u32 *)((void *)vcpu + offset);
2097
1165f5fe 2098 spin_unlock(&kvm_lock);
8b88b099 2099 return 0;
1165f5fe
AK
2100}
2101
ba1389b7
AK
2102DEFINE_SIMPLE_ATTRIBUTE(vcpu_stat_fops, vcpu_stat_get, NULL, "%llu\n");
2103
828c0950 2104static const struct file_operations *stat_fops[] = {
ba1389b7
AK
2105 [KVM_STAT_VCPU] = &vcpu_stat_fops,
2106 [KVM_STAT_VM] = &vm_stat_fops,
2107};
1165f5fe 2108
a16b043c 2109static void kvm_init_debug(void)
6aa8b732
AK
2110{
2111 struct kvm_stats_debugfs_item *p;
2112
76f7c879 2113 kvm_debugfs_dir = debugfs_create_dir("kvm", NULL);
6aa8b732 2114 for (p = debugfs_entries; p->name; ++p)
76f7c879 2115 p->dentry = debugfs_create_file(p->name, 0444, kvm_debugfs_dir,
1165f5fe 2116 (void *)(long)p->offset,
ba1389b7 2117 stat_fops[p->kind]);
6aa8b732
AK
2118}
2119
2120static void kvm_exit_debug(void)
2121{
2122 struct kvm_stats_debugfs_item *p;
2123
2124 for (p = debugfs_entries; p->name; ++p)
2125 debugfs_remove(p->dentry);
76f7c879 2126 debugfs_remove(kvm_debugfs_dir);
6aa8b732
AK
2127}
2128
59ae6c6b
AK
2129static int kvm_suspend(struct sys_device *dev, pm_message_t state)
2130{
10474ae8
AG
2131 if (kvm_usage_count)
2132 hardware_disable(NULL);
59ae6c6b
AK
2133 return 0;
2134}
2135
2136static int kvm_resume(struct sys_device *dev)
2137{
10474ae8
AG
2138 if (kvm_usage_count)
2139 hardware_enable(NULL);
59ae6c6b
AK
2140 return 0;
2141}
2142
2143static struct sysdev_class kvm_sysdev_class = {
af5ca3f4 2144 .name = "kvm",
59ae6c6b
AK
2145 .suspend = kvm_suspend,
2146 .resume = kvm_resume,
2147};
2148
2149static struct sys_device kvm_sysdev = {
2150 .id = 0,
2151 .cls = &kvm_sysdev_class,
2152};
2153
cea7bb21 2154struct page *bad_page;
35149e21 2155pfn_t bad_pfn;
6aa8b732 2156
15ad7146
AK
2157static inline
2158struct kvm_vcpu *preempt_notifier_to_vcpu(struct preempt_notifier *pn)
2159{
2160 return container_of(pn, struct kvm_vcpu, preempt_notifier);
2161}
2162
2163static void kvm_sched_in(struct preempt_notifier *pn, int cpu)
2164{
2165 struct kvm_vcpu *vcpu = preempt_notifier_to_vcpu(pn);
2166
e9b11c17 2167 kvm_arch_vcpu_load(vcpu, cpu);
15ad7146
AK
2168}
2169
2170static void kvm_sched_out(struct preempt_notifier *pn,
2171 struct task_struct *next)
2172{
2173 struct kvm_vcpu *vcpu = preempt_notifier_to_vcpu(pn);
2174
e9b11c17 2175 kvm_arch_vcpu_put(vcpu);
15ad7146
AK
2176}
2177
f8c16bba 2178int kvm_init(void *opaque, unsigned int vcpu_size,
c16f862d 2179 struct module *module)
6aa8b732
AK
2180{
2181 int r;
002c7f7c 2182 int cpu;
6aa8b732 2183
f8c16bba
ZX
2184 r = kvm_arch_init(opaque);
2185 if (r)
d2308784 2186 goto out_fail;
cb498ea2
ZX
2187
2188 bad_page = alloc_page(GFP_KERNEL | __GFP_ZERO);
2189
2190 if (bad_page == NULL) {
2191 r = -ENOMEM;
2192 goto out;
2193 }
2194
35149e21
AL
2195 bad_pfn = page_to_pfn(bad_page);
2196
8437a617 2197 if (!zalloc_cpumask_var(&cpus_hardware_enabled, GFP_KERNEL)) {
7f59f492
RR
2198 r = -ENOMEM;
2199 goto out_free_0;
2200 }
2201
e9b11c17 2202 r = kvm_arch_hardware_setup();
6aa8b732 2203 if (r < 0)
7f59f492 2204 goto out_free_0a;
6aa8b732 2205
002c7f7c
YS
2206 for_each_online_cpu(cpu) {
2207 smp_call_function_single(cpu,
e9b11c17 2208 kvm_arch_check_processor_compat,
8691e5a8 2209 &r, 1);
002c7f7c 2210 if (r < 0)
d2308784 2211 goto out_free_1;
002c7f7c
YS
2212 }
2213
774c47f1
AK
2214 r = register_cpu_notifier(&kvm_cpu_notifier);
2215 if (r)
d2308784 2216 goto out_free_2;
6aa8b732
AK
2217 register_reboot_notifier(&kvm_reboot_notifier);
2218
59ae6c6b
AK
2219 r = sysdev_class_register(&kvm_sysdev_class);
2220 if (r)
d2308784 2221 goto out_free_3;
59ae6c6b
AK
2222
2223 r = sysdev_register(&kvm_sysdev);
2224 if (r)
d2308784 2225 goto out_free_4;
59ae6c6b 2226
c16f862d
RR
2227 /* A kmem cache lets us meet the alignment requirements of fx_save. */
2228 kvm_vcpu_cache = kmem_cache_create("kvm_vcpu", vcpu_size,
56919c5c
JP
2229 __alignof__(struct kvm_vcpu),
2230 0, NULL);
c16f862d
RR
2231 if (!kvm_vcpu_cache) {
2232 r = -ENOMEM;
d2308784 2233 goto out_free_5;
c16f862d
RR
2234 }
2235
6aa8b732 2236 kvm_chardev_ops.owner = module;
3d3aab1b
CB
2237 kvm_vm_fops.owner = module;
2238 kvm_vcpu_fops.owner = module;
6aa8b732
AK
2239
2240 r = misc_register(&kvm_dev);
2241 if (r) {
d77c26fc 2242 printk(KERN_ERR "kvm: misc device register failed\n");
6aa8b732
AK
2243 goto out_free;
2244 }
2245
15ad7146
AK
2246 kvm_preempt_ops.sched_in = kvm_sched_in;
2247 kvm_preempt_ops.sched_out = kvm_sched_out;
2248
0ea4ed8e
DW
2249 kvm_init_debug();
2250
c7addb90 2251 return 0;
6aa8b732
AK
2252
2253out_free:
c16f862d 2254 kmem_cache_destroy(kvm_vcpu_cache);
d2308784 2255out_free_5:
59ae6c6b 2256 sysdev_unregister(&kvm_sysdev);
d2308784 2257out_free_4:
59ae6c6b 2258 sysdev_class_unregister(&kvm_sysdev_class);
d2308784 2259out_free_3:
6aa8b732 2260 unregister_reboot_notifier(&kvm_reboot_notifier);
774c47f1 2261 unregister_cpu_notifier(&kvm_cpu_notifier);
d2308784 2262out_free_2:
d2308784 2263out_free_1:
e9b11c17 2264 kvm_arch_hardware_unsetup();
7f59f492
RR
2265out_free_0a:
2266 free_cpumask_var(cpus_hardware_enabled);
d2308784
ZX
2267out_free_0:
2268 __free_page(bad_page);
ca45aaae 2269out:
f8c16bba 2270 kvm_arch_exit();
d2308784 2271out_fail:
6aa8b732
AK
2272 return r;
2273}
cb498ea2 2274EXPORT_SYMBOL_GPL(kvm_init);
6aa8b732 2275
cb498ea2 2276void kvm_exit(void)
6aa8b732 2277{
229456fc 2278 tracepoint_synchronize_unregister();
0ea4ed8e 2279 kvm_exit_debug();
6aa8b732 2280 misc_deregister(&kvm_dev);
c16f862d 2281 kmem_cache_destroy(kvm_vcpu_cache);
59ae6c6b
AK
2282 sysdev_unregister(&kvm_sysdev);
2283 sysdev_class_unregister(&kvm_sysdev_class);
6aa8b732 2284 unregister_reboot_notifier(&kvm_reboot_notifier);
59ae6c6b 2285 unregister_cpu_notifier(&kvm_cpu_notifier);
15c8b6c1 2286 on_each_cpu(hardware_disable, NULL, 1);
e9b11c17 2287 kvm_arch_hardware_unsetup();
f8c16bba 2288 kvm_arch_exit();
7f59f492 2289 free_cpumask_var(cpus_hardware_enabled);
cea7bb21 2290 __free_page(bad_page);
6aa8b732 2291}
cb498ea2 2292EXPORT_SYMBOL_GPL(kvm_exit);