]> git.proxmox.com Git - mirror_ubuntu-jammy-kernel.git/blame - virt/kvm/kvm_main.c
KVM: MMU: Avoid page prefetch on SVM
[mirror_ubuntu-jammy-kernel.git] / virt / kvm / kvm_main.c
CommitLineData
6aa8b732
AK
1/*
2 * Kernel-based Virtual Machine driver for Linux
3 *
4 * This module enables machines with Intel VT-x extensions to run virtual
5 * machines without emulation or binary translation.
6 *
7 * Copyright (C) 2006 Qumranet, Inc.
8 *
9 * Authors:
10 * Avi Kivity <avi@qumranet.com>
11 * Yaniv Kamay <yaniv@qumranet.com>
12 *
13 * This work is licensed under the terms of the GNU GPL, version 2. See
14 * the COPYING file in the top-level directory.
15 *
16 */
17
e2174021 18#include "iodev.h"
6aa8b732 19
edf88417 20#include <linux/kvm_host.h>
6aa8b732
AK
21#include <linux/kvm.h>
22#include <linux/module.h>
23#include <linux/errno.h>
6aa8b732
AK
24#include <linux/percpu.h>
25#include <linux/gfp.h>
6aa8b732
AK
26#include <linux/mm.h>
27#include <linux/miscdevice.h>
28#include <linux/vmalloc.h>
6aa8b732 29#include <linux/reboot.h>
6aa8b732
AK
30#include <linux/debugfs.h>
31#include <linux/highmem.h>
32#include <linux/file.h>
59ae6c6b 33#include <linux/sysdev.h>
774c47f1 34#include <linux/cpu.h>
e8edc6e0 35#include <linux/sched.h>
d9e368d6
AK
36#include <linux/cpumask.h>
37#include <linux/smp.h>
d6d28168 38#include <linux/anon_inodes.h>
04d2cc77 39#include <linux/profile.h>
7aa81cc0 40#include <linux/kvm_para.h>
6fc138d2 41#include <linux/pagemap.h>
8d4e1288 42#include <linux/mman.h>
35149e21 43#include <linux/swap.h>
6aa8b732 44
e495606d 45#include <asm/processor.h>
e495606d
AK
46#include <asm/io.h>
47#include <asm/uaccess.h>
3e021bf5 48#include <asm/pgtable.h>
6aa8b732
AK
49
50MODULE_AUTHOR("Qumranet");
51MODULE_LICENSE("GPL");
52
e9b11c17
ZX
53DEFINE_SPINLOCK(kvm_lock);
54LIST_HEAD(vm_list);
133de902 55
1b6c0168
AK
56static cpumask_t cpus_hardware_enabled;
57
c16f862d
RR
58struct kmem_cache *kvm_vcpu_cache;
59EXPORT_SYMBOL_GPL(kvm_vcpu_cache);
1165f5fe 60
15ad7146
AK
61static __read_mostly struct preempt_ops kvm_preempt_ops;
62
76f7c879 63struct dentry *kvm_debugfs_dir;
6aa8b732 64
bccf2150
AK
65static long kvm_vcpu_ioctl(struct file *file, unsigned int ioctl,
66 unsigned long arg);
67
4ecac3fd
AK
68bool kvm_rebooting;
69
5aacf0ca
JM
70static inline int valid_vcpu(int n)
71{
72 return likely(n >= 0 && n < KVM_MAX_VCPUS);
73}
74
bccf2150
AK
75/*
76 * Switches to specified vcpu, until a matching vcpu_put()
77 */
313a3dc7 78void vcpu_load(struct kvm_vcpu *vcpu)
6aa8b732 79{
15ad7146
AK
80 int cpu;
81
bccf2150 82 mutex_lock(&vcpu->mutex);
15ad7146
AK
83 cpu = get_cpu();
84 preempt_notifier_register(&vcpu->preempt_notifier);
313a3dc7 85 kvm_arch_vcpu_load(vcpu, cpu);
15ad7146 86 put_cpu();
6aa8b732
AK
87}
88
313a3dc7 89void vcpu_put(struct kvm_vcpu *vcpu)
6aa8b732 90{
15ad7146 91 preempt_disable();
313a3dc7 92 kvm_arch_vcpu_put(vcpu);
15ad7146
AK
93 preempt_notifier_unregister(&vcpu->preempt_notifier);
94 preempt_enable();
6aa8b732
AK
95 mutex_unlock(&vcpu->mutex);
96}
97
d9e368d6
AK
98static void ack_flush(void *_completed)
99{
d9e368d6
AK
100}
101
102void kvm_flush_remote_tlbs(struct kvm *kvm)
103{
49d3bd7e 104 int i, cpu;
d9e368d6
AK
105 cpumask_t cpus;
106 struct kvm_vcpu *vcpu;
d9e368d6 107
d9e368d6 108 cpus_clear(cpus);
fb3f0f51
RR
109 for (i = 0; i < KVM_MAX_VCPUS; ++i) {
110 vcpu = kvm->vcpus[i];
111 if (!vcpu)
112 continue;
3176bc3e 113 if (test_and_set_bit(KVM_REQ_TLB_FLUSH, &vcpu->requests))
d9e368d6
AK
114 continue;
115 cpu = vcpu->cpu;
116 if (cpu != -1 && cpu != raw_smp_processor_id())
49d3bd7e 117 cpu_set(cpu, cpus);
d9e368d6 118 }
0f74a24c
AK
119 if (cpus_empty(cpus))
120 return;
121 ++kvm->stat.remote_tlb_flush;
49d3bd7e 122 smp_call_function_mask(cpus, ack_flush, NULL, 1);
d9e368d6
AK
123}
124
2e53d63a
MT
125void kvm_reload_remote_mmus(struct kvm *kvm)
126{
127 int i, cpu;
128 cpumask_t cpus;
129 struct kvm_vcpu *vcpu;
130
131 cpus_clear(cpus);
132 for (i = 0; i < KVM_MAX_VCPUS; ++i) {
133 vcpu = kvm->vcpus[i];
134 if (!vcpu)
135 continue;
136 if (test_and_set_bit(KVM_REQ_MMU_RELOAD, &vcpu->requests))
137 continue;
138 cpu = vcpu->cpu;
139 if (cpu != -1 && cpu != raw_smp_processor_id())
140 cpu_set(cpu, cpus);
141 }
142 if (cpus_empty(cpus))
143 return;
144 smp_call_function_mask(cpus, ack_flush, NULL, 1);
145}
146
147
fb3f0f51
RR
148int kvm_vcpu_init(struct kvm_vcpu *vcpu, struct kvm *kvm, unsigned id)
149{
150 struct page *page;
151 int r;
152
153 mutex_init(&vcpu->mutex);
154 vcpu->cpu = -1;
fb3f0f51
RR
155 vcpu->kvm = kvm;
156 vcpu->vcpu_id = id;
b6958ce4 157 init_waitqueue_head(&vcpu->wq);
fb3f0f51
RR
158
159 page = alloc_page(GFP_KERNEL | __GFP_ZERO);
160 if (!page) {
161 r = -ENOMEM;
162 goto fail;
163 }
164 vcpu->run = page_address(page);
165
e9b11c17 166 r = kvm_arch_vcpu_init(vcpu);
fb3f0f51 167 if (r < 0)
e9b11c17 168 goto fail_free_run;
fb3f0f51
RR
169 return 0;
170
fb3f0f51
RR
171fail_free_run:
172 free_page((unsigned long)vcpu->run);
173fail:
76fafa5e 174 return r;
fb3f0f51
RR
175}
176EXPORT_SYMBOL_GPL(kvm_vcpu_init);
177
178void kvm_vcpu_uninit(struct kvm_vcpu *vcpu)
179{
e9b11c17 180 kvm_arch_vcpu_uninit(vcpu);
fb3f0f51
RR
181 free_page((unsigned long)vcpu->run);
182}
183EXPORT_SYMBOL_GPL(kvm_vcpu_uninit);
184
f17abe9a 185static struct kvm *kvm_create_vm(void)
6aa8b732 186{
d19a9cd2 187 struct kvm *kvm = kvm_arch_create_vm();
6aa8b732 188
d19a9cd2
ZX
189 if (IS_ERR(kvm))
190 goto out;
6aa8b732 191
6d4e4c4f
AK
192 kvm->mm = current->mm;
193 atomic_inc(&kvm->mm->mm_count);
aaee2c94 194 spin_lock_init(&kvm->mmu_lock);
74906345 195 kvm_io_bus_init(&kvm->pio_bus);
11ec2804 196 mutex_init(&kvm->lock);
2eeb2e94 197 kvm_io_bus_init(&kvm->mmio_bus);
72dc67a6 198 init_rwsem(&kvm->slots_lock);
d39f13b0 199 atomic_set(&kvm->users_count, 1);
5e58cfe4
RR
200 spin_lock(&kvm_lock);
201 list_add(&kvm->vm_list, &vm_list);
202 spin_unlock(&kvm_lock);
d19a9cd2 203out:
f17abe9a
AK
204 return kvm;
205}
206
6aa8b732
AK
207/*
208 * Free any memory in @free but not in @dont.
209 */
210static void kvm_free_physmem_slot(struct kvm_memory_slot *free,
211 struct kvm_memory_slot *dont)
212{
290fc38d
IE
213 if (!dont || free->rmap != dont->rmap)
214 vfree(free->rmap);
6aa8b732
AK
215
216 if (!dont || free->dirty_bitmap != dont->dirty_bitmap)
217 vfree(free->dirty_bitmap);
218
05da4558
MT
219 if (!dont || free->lpage_info != dont->lpage_info)
220 vfree(free->lpage_info);
221
6aa8b732 222 free->npages = 0;
8b6d44c7 223 free->dirty_bitmap = NULL;
8d4e1288 224 free->rmap = NULL;
05da4558 225 free->lpage_info = NULL;
6aa8b732
AK
226}
227
d19a9cd2 228void kvm_free_physmem(struct kvm *kvm)
6aa8b732
AK
229{
230 int i;
231
232 for (i = 0; i < kvm->nmemslots; ++i)
8b6d44c7 233 kvm_free_physmem_slot(&kvm->memslots[i], NULL);
6aa8b732
AK
234}
235
f17abe9a
AK
236static void kvm_destroy_vm(struct kvm *kvm)
237{
6d4e4c4f
AK
238 struct mm_struct *mm = kvm->mm;
239
133de902
AK
240 spin_lock(&kvm_lock);
241 list_del(&kvm->vm_list);
242 spin_unlock(&kvm_lock);
74906345 243 kvm_io_bus_destroy(&kvm->pio_bus);
2eeb2e94 244 kvm_io_bus_destroy(&kvm->mmio_bus);
d19a9cd2 245 kvm_arch_destroy_vm(kvm);
6d4e4c4f 246 mmdrop(mm);
f17abe9a
AK
247}
248
d39f13b0
IE
249void kvm_get_kvm(struct kvm *kvm)
250{
251 atomic_inc(&kvm->users_count);
252}
253EXPORT_SYMBOL_GPL(kvm_get_kvm);
254
255void kvm_put_kvm(struct kvm *kvm)
256{
257 if (atomic_dec_and_test(&kvm->users_count))
258 kvm_destroy_vm(kvm);
259}
260EXPORT_SYMBOL_GPL(kvm_put_kvm);
261
262
f17abe9a
AK
263static int kvm_vm_release(struct inode *inode, struct file *filp)
264{
265 struct kvm *kvm = filp->private_data;
266
d39f13b0 267 kvm_put_kvm(kvm);
6aa8b732
AK
268 return 0;
269}
270
6aa8b732
AK
271/*
272 * Allocate some memory and give it an address in the guest physical address
273 * space.
274 *
275 * Discontiguous memory is allowed, mostly for framebuffers.
f78e0e2e 276 *
10589a46 277 * Must be called holding mmap_sem for write.
6aa8b732 278 */
f78e0e2e
SY
279int __kvm_set_memory_region(struct kvm *kvm,
280 struct kvm_userspace_memory_region *mem,
281 int user_alloc)
6aa8b732
AK
282{
283 int r;
284 gfn_t base_gfn;
285 unsigned long npages;
286 unsigned long i;
287 struct kvm_memory_slot *memslot;
288 struct kvm_memory_slot old, new;
6aa8b732
AK
289
290 r = -EINVAL;
291 /* General sanity checks */
292 if (mem->memory_size & (PAGE_SIZE - 1))
293 goto out;
294 if (mem->guest_phys_addr & (PAGE_SIZE - 1))
295 goto out;
e0d62c7f 296 if (mem->slot >= KVM_MEMORY_SLOTS + KVM_PRIVATE_MEM_SLOTS)
6aa8b732
AK
297 goto out;
298 if (mem->guest_phys_addr + mem->memory_size < mem->guest_phys_addr)
299 goto out;
300
301 memslot = &kvm->memslots[mem->slot];
302 base_gfn = mem->guest_phys_addr >> PAGE_SHIFT;
303 npages = mem->memory_size >> PAGE_SHIFT;
304
305 if (!npages)
306 mem->flags &= ~KVM_MEM_LOG_DIRTY_PAGES;
307
6aa8b732
AK
308 new = old = *memslot;
309
310 new.base_gfn = base_gfn;
311 new.npages = npages;
312 new.flags = mem->flags;
313
314 /* Disallow changing a memory slot's size. */
315 r = -EINVAL;
316 if (npages && old.npages && npages != old.npages)
f78e0e2e 317 goto out_free;
6aa8b732
AK
318
319 /* Check for overlaps */
320 r = -EEXIST;
321 for (i = 0; i < KVM_MEMORY_SLOTS; ++i) {
322 struct kvm_memory_slot *s = &kvm->memslots[i];
323
324 if (s == memslot)
325 continue;
326 if (!((base_gfn + npages <= s->base_gfn) ||
327 (base_gfn >= s->base_gfn + s->npages)))
f78e0e2e 328 goto out_free;
6aa8b732 329 }
6aa8b732 330
6aa8b732
AK
331 /* Free page dirty bitmap if unneeded */
332 if (!(new.flags & KVM_MEM_LOG_DIRTY_PAGES))
8b6d44c7 333 new.dirty_bitmap = NULL;
6aa8b732
AK
334
335 r = -ENOMEM;
336
337 /* Allocate if a slot is being created */
8d4e1288 338 if (npages && !new.rmap) {
d77c26fc 339 new.rmap = vmalloc(npages * sizeof(struct page *));
290fc38d
IE
340
341 if (!new.rmap)
f78e0e2e 342 goto out_free;
290fc38d 343
290fc38d 344 memset(new.rmap, 0, npages * sizeof(*new.rmap));
8d4e1288 345
80b14b5b 346 new.user_alloc = user_alloc;
0de10343 347 new.userspace_addr = mem->userspace_addr;
6aa8b732 348 }
05da4558
MT
349 if (npages && !new.lpage_info) {
350 int largepages = npages / KVM_PAGES_PER_HPAGE;
351 if (npages % KVM_PAGES_PER_HPAGE)
352 largepages++;
353 if (base_gfn % KVM_PAGES_PER_HPAGE)
354 largepages++;
355
356 new.lpage_info = vmalloc(largepages * sizeof(*new.lpage_info));
357
358 if (!new.lpage_info)
359 goto out_free;
360
361 memset(new.lpage_info, 0, largepages * sizeof(*new.lpage_info));
362
363 if (base_gfn % KVM_PAGES_PER_HPAGE)
364 new.lpage_info[0].write_count = 1;
365 if ((base_gfn+npages) % KVM_PAGES_PER_HPAGE)
366 new.lpage_info[largepages-1].write_count = 1;
367 }
6aa8b732
AK
368
369 /* Allocate page dirty bitmap if needed */
370 if ((new.flags & KVM_MEM_LOG_DIRTY_PAGES) && !new.dirty_bitmap) {
371 unsigned dirty_bytes = ALIGN(npages, BITS_PER_LONG) / 8;
372
373 new.dirty_bitmap = vmalloc(dirty_bytes);
374 if (!new.dirty_bitmap)
f78e0e2e 375 goto out_free;
6aa8b732
AK
376 memset(new.dirty_bitmap, 0, dirty_bytes);
377 }
378
6aa8b732
AK
379 if (mem->slot >= kvm->nmemslots)
380 kvm->nmemslots = mem->slot + 1;
381
3ad82a7e
ZX
382 *memslot = new;
383
0de10343
ZX
384 r = kvm_arch_set_memory_region(kvm, mem, old, user_alloc);
385 if (r) {
386 *memslot = old;
387 goto out_free;
82ce2c96
IE
388 }
389
6aa8b732
AK
390 kvm_free_physmem_slot(&old, &new);
391 return 0;
392
f78e0e2e 393out_free:
6aa8b732
AK
394 kvm_free_physmem_slot(&new, &old);
395out:
396 return r;
210c7c4d
IE
397
398}
f78e0e2e
SY
399EXPORT_SYMBOL_GPL(__kvm_set_memory_region);
400
401int kvm_set_memory_region(struct kvm *kvm,
402 struct kvm_userspace_memory_region *mem,
403 int user_alloc)
404{
405 int r;
406
72dc67a6 407 down_write(&kvm->slots_lock);
f78e0e2e 408 r = __kvm_set_memory_region(kvm, mem, user_alloc);
72dc67a6 409 up_write(&kvm->slots_lock);
f78e0e2e
SY
410 return r;
411}
210c7c4d
IE
412EXPORT_SYMBOL_GPL(kvm_set_memory_region);
413
1fe779f8
CO
414int kvm_vm_ioctl_set_memory_region(struct kvm *kvm,
415 struct
416 kvm_userspace_memory_region *mem,
417 int user_alloc)
210c7c4d 418{
e0d62c7f
IE
419 if (mem->slot >= KVM_MEMORY_SLOTS)
420 return -EINVAL;
210c7c4d 421 return kvm_set_memory_region(kvm, mem, user_alloc);
6aa8b732
AK
422}
423
5bb064dc
ZX
424int kvm_get_dirty_log(struct kvm *kvm,
425 struct kvm_dirty_log *log, int *is_dirty)
6aa8b732
AK
426{
427 struct kvm_memory_slot *memslot;
428 int r, i;
429 int n;
430 unsigned long any = 0;
431
6aa8b732
AK
432 r = -EINVAL;
433 if (log->slot >= KVM_MEMORY_SLOTS)
434 goto out;
435
436 memslot = &kvm->memslots[log->slot];
437 r = -ENOENT;
438 if (!memslot->dirty_bitmap)
439 goto out;
440
cd1a4a98 441 n = ALIGN(memslot->npages, BITS_PER_LONG) / 8;
6aa8b732 442
cd1a4a98 443 for (i = 0; !any && i < n/sizeof(long); ++i)
6aa8b732
AK
444 any = memslot->dirty_bitmap[i];
445
446 r = -EFAULT;
447 if (copy_to_user(log->dirty_bitmap, memslot->dirty_bitmap, n))
448 goto out;
449
5bb064dc
ZX
450 if (any)
451 *is_dirty = 1;
6aa8b732
AK
452
453 r = 0;
6aa8b732 454out:
6aa8b732
AK
455 return r;
456}
457
cea7bb21
IE
458int is_error_page(struct page *page)
459{
460 return page == bad_page;
461}
462EXPORT_SYMBOL_GPL(is_error_page);
463
35149e21
AL
464int is_error_pfn(pfn_t pfn)
465{
466 return pfn == bad_pfn;
467}
468EXPORT_SYMBOL_GPL(is_error_pfn);
469
f9d46eb0
IE
470static inline unsigned long bad_hva(void)
471{
472 return PAGE_OFFSET;
473}
474
475int kvm_is_error_hva(unsigned long addr)
476{
477 return addr == bad_hva();
478}
479EXPORT_SYMBOL_GPL(kvm_is_error_hva);
480
e8207547 481static struct kvm_memory_slot *__gfn_to_memslot(struct kvm *kvm, gfn_t gfn)
6aa8b732
AK
482{
483 int i;
484
485 for (i = 0; i < kvm->nmemslots; ++i) {
486 struct kvm_memory_slot *memslot = &kvm->memslots[i];
487
488 if (gfn >= memslot->base_gfn
489 && gfn < memslot->base_gfn + memslot->npages)
490 return memslot;
491 }
8b6d44c7 492 return NULL;
6aa8b732 493}
e8207547
AK
494
495struct kvm_memory_slot *gfn_to_memslot(struct kvm *kvm, gfn_t gfn)
496{
497 gfn = unalias_gfn(kvm, gfn);
498 return __gfn_to_memslot(kvm, gfn);
499}
6aa8b732 500
e0d62c7f
IE
501int kvm_is_visible_gfn(struct kvm *kvm, gfn_t gfn)
502{
503 int i;
504
505 gfn = unalias_gfn(kvm, gfn);
506 for (i = 0; i < KVM_MEMORY_SLOTS; ++i) {
507 struct kvm_memory_slot *memslot = &kvm->memslots[i];
508
509 if (gfn >= memslot->base_gfn
510 && gfn < memslot->base_gfn + memslot->npages)
511 return 1;
512 }
513 return 0;
514}
515EXPORT_SYMBOL_GPL(kvm_is_visible_gfn);
516
05da4558 517unsigned long gfn_to_hva(struct kvm *kvm, gfn_t gfn)
539cb660
IE
518{
519 struct kvm_memory_slot *slot;
520
521 gfn = unalias_gfn(kvm, gfn);
522 slot = __gfn_to_memslot(kvm, gfn);
523 if (!slot)
524 return bad_hva();
525 return (slot->userspace_addr + (gfn - slot->base_gfn) * PAGE_SIZE);
526}
0d150298 527EXPORT_SYMBOL_GPL(gfn_to_hva);
539cb660 528
aab61cc0
AL
529/*
530 * Requires current->mm->mmap_sem to be held
531 */
35149e21 532pfn_t gfn_to_pfn(struct kvm *kvm, gfn_t gfn)
954bbbc2 533{
8d4e1288 534 struct page *page[1];
539cb660 535 unsigned long addr;
8d4e1288 536 int npages;
2e2e3738 537 pfn_t pfn;
954bbbc2 538
60395224
AK
539 might_sleep();
540
539cb660
IE
541 addr = gfn_to_hva(kvm, gfn);
542 if (kvm_is_error_hva(addr)) {
8a7ae055 543 get_page(bad_page);
35149e21 544 return page_to_pfn(bad_page);
8a7ae055 545 }
8d4e1288 546
539cb660
IE
547 npages = get_user_pages(current, current->mm, addr, 1, 1, 1, page,
548 NULL);
549
2e2e3738
AL
550 if (unlikely(npages != 1)) {
551 struct vm_area_struct *vma;
552
553 vma = find_vma(current->mm, addr);
554 if (vma == NULL || addr < vma->vm_start ||
555 !(vma->vm_flags & VM_PFNMAP)) {
556 get_page(bad_page);
557 return page_to_pfn(bad_page);
558 }
559
560 pfn = ((addr - vma->vm_start) >> PAGE_SHIFT) + vma->vm_pgoff;
561 BUG_ON(pfn_valid(pfn));
562 } else
563 pfn = page_to_pfn(page[0]);
8d4e1288 564
2e2e3738 565 return pfn;
35149e21
AL
566}
567
568EXPORT_SYMBOL_GPL(gfn_to_pfn);
569
570struct page *gfn_to_page(struct kvm *kvm, gfn_t gfn)
571{
2e2e3738
AL
572 pfn_t pfn;
573
574 pfn = gfn_to_pfn(kvm, gfn);
575 if (pfn_valid(pfn))
576 return pfn_to_page(pfn);
577
578 WARN_ON(!pfn_valid(pfn));
579
580 get_page(bad_page);
581 return bad_page;
954bbbc2 582}
aab61cc0 583
954bbbc2
AK
584EXPORT_SYMBOL_GPL(gfn_to_page);
585
b4231d61
IE
586void kvm_release_page_clean(struct page *page)
587{
35149e21 588 kvm_release_pfn_clean(page_to_pfn(page));
b4231d61
IE
589}
590EXPORT_SYMBOL_GPL(kvm_release_page_clean);
591
35149e21
AL
592void kvm_release_pfn_clean(pfn_t pfn)
593{
2e2e3738
AL
594 if (pfn_valid(pfn))
595 put_page(pfn_to_page(pfn));
35149e21
AL
596}
597EXPORT_SYMBOL_GPL(kvm_release_pfn_clean);
598
b4231d61 599void kvm_release_page_dirty(struct page *page)
8a7ae055 600{
35149e21
AL
601 kvm_release_pfn_dirty(page_to_pfn(page));
602}
603EXPORT_SYMBOL_GPL(kvm_release_page_dirty);
604
605void kvm_release_pfn_dirty(pfn_t pfn)
606{
607 kvm_set_pfn_dirty(pfn);
608 kvm_release_pfn_clean(pfn);
609}
610EXPORT_SYMBOL_GPL(kvm_release_pfn_dirty);
611
612void kvm_set_page_dirty(struct page *page)
613{
614 kvm_set_pfn_dirty(page_to_pfn(page));
615}
616EXPORT_SYMBOL_GPL(kvm_set_page_dirty);
617
618void kvm_set_pfn_dirty(pfn_t pfn)
619{
2e2e3738
AL
620 if (pfn_valid(pfn)) {
621 struct page *page = pfn_to_page(pfn);
622 if (!PageReserved(page))
623 SetPageDirty(page);
624 }
8a7ae055 625}
35149e21
AL
626EXPORT_SYMBOL_GPL(kvm_set_pfn_dirty);
627
628void kvm_set_pfn_accessed(pfn_t pfn)
629{
2e2e3738
AL
630 if (pfn_valid(pfn))
631 mark_page_accessed(pfn_to_page(pfn));
35149e21
AL
632}
633EXPORT_SYMBOL_GPL(kvm_set_pfn_accessed);
634
635void kvm_get_pfn(pfn_t pfn)
636{
2e2e3738
AL
637 if (pfn_valid(pfn))
638 get_page(pfn_to_page(pfn));
35149e21
AL
639}
640EXPORT_SYMBOL_GPL(kvm_get_pfn);
8a7ae055 641
195aefde
IE
642static int next_segment(unsigned long len, int offset)
643{
644 if (len > PAGE_SIZE - offset)
645 return PAGE_SIZE - offset;
646 else
647 return len;
648}
649
650int kvm_read_guest_page(struct kvm *kvm, gfn_t gfn, void *data, int offset,
651 int len)
652{
e0506bcb
IE
653 int r;
654 unsigned long addr;
195aefde 655
e0506bcb
IE
656 addr = gfn_to_hva(kvm, gfn);
657 if (kvm_is_error_hva(addr))
658 return -EFAULT;
659 r = copy_from_user(data, (void __user *)addr + offset, len);
660 if (r)
195aefde 661 return -EFAULT;
195aefde
IE
662 return 0;
663}
664EXPORT_SYMBOL_GPL(kvm_read_guest_page);
665
666int kvm_read_guest(struct kvm *kvm, gpa_t gpa, void *data, unsigned long len)
667{
668 gfn_t gfn = gpa >> PAGE_SHIFT;
669 int seg;
670 int offset = offset_in_page(gpa);
671 int ret;
672
673 while ((seg = next_segment(len, offset)) != 0) {
674 ret = kvm_read_guest_page(kvm, gfn, data, offset, seg);
675 if (ret < 0)
676 return ret;
677 offset = 0;
678 len -= seg;
679 data += seg;
680 ++gfn;
681 }
682 return 0;
683}
684EXPORT_SYMBOL_GPL(kvm_read_guest);
685
7ec54588
MT
686int kvm_read_guest_atomic(struct kvm *kvm, gpa_t gpa, void *data,
687 unsigned long len)
688{
689 int r;
690 unsigned long addr;
691 gfn_t gfn = gpa >> PAGE_SHIFT;
692 int offset = offset_in_page(gpa);
693
694 addr = gfn_to_hva(kvm, gfn);
695 if (kvm_is_error_hva(addr))
696 return -EFAULT;
0aac03f0 697 pagefault_disable();
7ec54588 698 r = __copy_from_user_inatomic(data, (void __user *)addr + offset, len);
0aac03f0 699 pagefault_enable();
7ec54588
MT
700 if (r)
701 return -EFAULT;
702 return 0;
703}
704EXPORT_SYMBOL(kvm_read_guest_atomic);
705
195aefde
IE
706int kvm_write_guest_page(struct kvm *kvm, gfn_t gfn, const void *data,
707 int offset, int len)
708{
e0506bcb
IE
709 int r;
710 unsigned long addr;
195aefde 711
e0506bcb
IE
712 addr = gfn_to_hva(kvm, gfn);
713 if (kvm_is_error_hva(addr))
714 return -EFAULT;
715 r = copy_to_user((void __user *)addr + offset, data, len);
716 if (r)
195aefde 717 return -EFAULT;
195aefde
IE
718 mark_page_dirty(kvm, gfn);
719 return 0;
720}
721EXPORT_SYMBOL_GPL(kvm_write_guest_page);
722
723int kvm_write_guest(struct kvm *kvm, gpa_t gpa, const void *data,
724 unsigned long len)
725{
726 gfn_t gfn = gpa >> PAGE_SHIFT;
727 int seg;
728 int offset = offset_in_page(gpa);
729 int ret;
730
731 while ((seg = next_segment(len, offset)) != 0) {
732 ret = kvm_write_guest_page(kvm, gfn, data, offset, seg);
733 if (ret < 0)
734 return ret;
735 offset = 0;
736 len -= seg;
737 data += seg;
738 ++gfn;
739 }
740 return 0;
741}
742
743int kvm_clear_guest_page(struct kvm *kvm, gfn_t gfn, int offset, int len)
744{
3e021bf5 745 return kvm_write_guest_page(kvm, gfn, empty_zero_page, offset, len);
195aefde
IE
746}
747EXPORT_SYMBOL_GPL(kvm_clear_guest_page);
748
749int kvm_clear_guest(struct kvm *kvm, gpa_t gpa, unsigned long len)
750{
751 gfn_t gfn = gpa >> PAGE_SHIFT;
752 int seg;
753 int offset = offset_in_page(gpa);
754 int ret;
755
756 while ((seg = next_segment(len, offset)) != 0) {
757 ret = kvm_clear_guest_page(kvm, gfn, offset, seg);
758 if (ret < 0)
759 return ret;
760 offset = 0;
761 len -= seg;
762 ++gfn;
763 }
764 return 0;
765}
766EXPORT_SYMBOL_GPL(kvm_clear_guest);
767
6aa8b732
AK
768void mark_page_dirty(struct kvm *kvm, gfn_t gfn)
769{
31389947 770 struct kvm_memory_slot *memslot;
6aa8b732 771
3b6fff19 772 gfn = unalias_gfn(kvm, gfn);
7e9d619d
RR
773 memslot = __gfn_to_memslot(kvm, gfn);
774 if (memslot && memslot->dirty_bitmap) {
775 unsigned long rel_gfn = gfn - memslot->base_gfn;
6aa8b732 776
7e9d619d
RR
777 /* avoid RMW */
778 if (!test_bit(rel_gfn, memslot->dirty_bitmap))
779 set_bit(rel_gfn, memslot->dirty_bitmap);
6aa8b732
AK
780 }
781}
782
b6958ce4
ED
783/*
784 * The vCPU has executed a HLT instruction with in-kernel mode enabled.
785 */
8776e519 786void kvm_vcpu_block(struct kvm_vcpu *vcpu)
d3bef15f 787{
e5c239cf
MT
788 DEFINE_WAIT(wait);
789
790 for (;;) {
791 prepare_to_wait(&vcpu->wq, &wait, TASK_INTERRUPTIBLE);
792
793 if (kvm_cpu_has_interrupt(vcpu))
794 break;
795 if (kvm_cpu_has_pending_timer(vcpu))
796 break;
797 if (kvm_arch_vcpu_runnable(vcpu))
798 break;
799 if (signal_pending(current))
800 break;
801
b6958ce4
ED
802 vcpu_put(vcpu);
803 schedule();
804 vcpu_load(vcpu);
805 }
d3bef15f 806
e5c239cf 807 finish_wait(&vcpu->wq, &wait);
b6958ce4
ED
808}
809
6aa8b732
AK
810void kvm_resched(struct kvm_vcpu *vcpu)
811{
3fca0365
YD
812 if (!need_resched())
813 return;
6aa8b732 814 cond_resched();
6aa8b732
AK
815}
816EXPORT_SYMBOL_GPL(kvm_resched);
817
e4a533a4 818static int kvm_vcpu_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
9a2bb7f4
AK
819{
820 struct kvm_vcpu *vcpu = vma->vm_file->private_data;
9a2bb7f4
AK
821 struct page *page;
822
e4a533a4 823 if (vmf->pgoff == 0)
039576c0 824 page = virt_to_page(vcpu->run);
09566765 825#ifdef CONFIG_X86
e4a533a4 826 else if (vmf->pgoff == KVM_PIO_PAGE_OFFSET)
ad312c7c 827 page = virt_to_page(vcpu->arch.pio_data);
09566765 828#endif
039576c0 829 else
e4a533a4 830 return VM_FAULT_SIGBUS;
9a2bb7f4 831 get_page(page);
e4a533a4
NP
832 vmf->page = page;
833 return 0;
9a2bb7f4
AK
834}
835
836static struct vm_operations_struct kvm_vcpu_vm_ops = {
e4a533a4 837 .fault = kvm_vcpu_fault,
9a2bb7f4
AK
838};
839
840static int kvm_vcpu_mmap(struct file *file, struct vm_area_struct *vma)
841{
842 vma->vm_ops = &kvm_vcpu_vm_ops;
843 return 0;
844}
845
bccf2150
AK
846static int kvm_vcpu_release(struct inode *inode, struct file *filp)
847{
848 struct kvm_vcpu *vcpu = filp->private_data;
849
66c0b394 850 kvm_put_kvm(vcpu->kvm);
bccf2150
AK
851 return 0;
852}
853
5c502742 854static const struct file_operations kvm_vcpu_fops = {
bccf2150
AK
855 .release = kvm_vcpu_release,
856 .unlocked_ioctl = kvm_vcpu_ioctl,
857 .compat_ioctl = kvm_vcpu_ioctl,
9a2bb7f4 858 .mmap = kvm_vcpu_mmap,
bccf2150
AK
859};
860
861/*
862 * Allocates an inode for the vcpu.
863 */
864static int create_vcpu_fd(struct kvm_vcpu *vcpu)
865{
2030a42c
AV
866 int fd = anon_inode_getfd("kvm-vcpu", &kvm_vcpu_fops, vcpu);
867 if (fd < 0)
66c0b394 868 kvm_put_kvm(vcpu->kvm);
bccf2150 869 return fd;
bccf2150
AK
870}
871
c5ea7660
AK
872/*
873 * Creates some virtual cpus. Good luck creating more than one.
874 */
875static int kvm_vm_ioctl_create_vcpu(struct kvm *kvm, int n)
876{
877 int r;
878 struct kvm_vcpu *vcpu;
879
c5ea7660 880 if (!valid_vcpu(n))
fb3f0f51 881 return -EINVAL;
c5ea7660 882
e9b11c17 883 vcpu = kvm_arch_vcpu_create(kvm, n);
fb3f0f51
RR
884 if (IS_ERR(vcpu))
885 return PTR_ERR(vcpu);
c5ea7660 886
15ad7146
AK
887 preempt_notifier_init(&vcpu->preempt_notifier, &kvm_preempt_ops);
888
26e5215f
AK
889 r = kvm_arch_vcpu_setup(vcpu);
890 if (r)
891 goto vcpu_destroy;
892
11ec2804 893 mutex_lock(&kvm->lock);
fb3f0f51
RR
894 if (kvm->vcpus[n]) {
895 r = -EEXIST;
11ec2804 896 mutex_unlock(&kvm->lock);
e9b11c17 897 goto vcpu_destroy;
fb3f0f51
RR
898 }
899 kvm->vcpus[n] = vcpu;
11ec2804 900 mutex_unlock(&kvm->lock);
c5ea7660 901
fb3f0f51 902 /* Now it's all set up, let userspace reach it */
66c0b394 903 kvm_get_kvm(kvm);
bccf2150
AK
904 r = create_vcpu_fd(vcpu);
905 if (r < 0)
fb3f0f51
RR
906 goto unlink;
907 return r;
39c3b86e 908
fb3f0f51 909unlink:
11ec2804 910 mutex_lock(&kvm->lock);
fb3f0f51 911 kvm->vcpus[n] = NULL;
11ec2804 912 mutex_unlock(&kvm->lock);
e9b11c17 913vcpu_destroy:
d40ccc62 914 kvm_arch_vcpu_destroy(vcpu);
c5ea7660
AK
915 return r;
916}
917
1961d276
AK
918static int kvm_vcpu_ioctl_set_sigmask(struct kvm_vcpu *vcpu, sigset_t *sigset)
919{
920 if (sigset) {
921 sigdelsetmask(sigset, sigmask(SIGKILL)|sigmask(SIGSTOP));
922 vcpu->sigset_active = 1;
923 vcpu->sigset = *sigset;
924 } else
925 vcpu->sigset_active = 0;
926 return 0;
927}
928
bccf2150
AK
929static long kvm_vcpu_ioctl(struct file *filp,
930 unsigned int ioctl, unsigned long arg)
6aa8b732 931{
bccf2150 932 struct kvm_vcpu *vcpu = filp->private_data;
2f366987 933 void __user *argp = (void __user *)arg;
313a3dc7 934 int r;
6aa8b732 935
6d4e4c4f
AK
936 if (vcpu->kvm->mm != current->mm)
937 return -EIO;
6aa8b732 938 switch (ioctl) {
9a2bb7f4 939 case KVM_RUN:
f0fe5108
AK
940 r = -EINVAL;
941 if (arg)
942 goto out;
b6c7a5dc 943 r = kvm_arch_vcpu_ioctl_run(vcpu, vcpu->run);
6aa8b732 944 break;
6aa8b732 945 case KVM_GET_REGS: {
3e4bb3ac 946 struct kvm_regs *kvm_regs;
6aa8b732 947
3e4bb3ac
XZ
948 r = -ENOMEM;
949 kvm_regs = kzalloc(sizeof(struct kvm_regs), GFP_KERNEL);
950 if (!kvm_regs)
6aa8b732 951 goto out;
3e4bb3ac
XZ
952 r = kvm_arch_vcpu_ioctl_get_regs(vcpu, kvm_regs);
953 if (r)
954 goto out_free1;
6aa8b732 955 r = -EFAULT;
3e4bb3ac
XZ
956 if (copy_to_user(argp, kvm_regs, sizeof(struct kvm_regs)))
957 goto out_free1;
6aa8b732 958 r = 0;
3e4bb3ac
XZ
959out_free1:
960 kfree(kvm_regs);
6aa8b732
AK
961 break;
962 }
963 case KVM_SET_REGS: {
3e4bb3ac 964 struct kvm_regs *kvm_regs;
6aa8b732 965
3e4bb3ac
XZ
966 r = -ENOMEM;
967 kvm_regs = kzalloc(sizeof(struct kvm_regs), GFP_KERNEL);
968 if (!kvm_regs)
6aa8b732 969 goto out;
3e4bb3ac
XZ
970 r = -EFAULT;
971 if (copy_from_user(kvm_regs, argp, sizeof(struct kvm_regs)))
972 goto out_free2;
973 r = kvm_arch_vcpu_ioctl_set_regs(vcpu, kvm_regs);
6aa8b732 974 if (r)
3e4bb3ac 975 goto out_free2;
6aa8b732 976 r = 0;
3e4bb3ac
XZ
977out_free2:
978 kfree(kvm_regs);
6aa8b732
AK
979 break;
980 }
981 case KVM_GET_SREGS: {
982 struct kvm_sregs kvm_sregs;
983
bccf2150 984 memset(&kvm_sregs, 0, sizeof kvm_sregs);
b6c7a5dc 985 r = kvm_arch_vcpu_ioctl_get_sregs(vcpu, &kvm_sregs);
6aa8b732
AK
986 if (r)
987 goto out;
988 r = -EFAULT;
2f366987 989 if (copy_to_user(argp, &kvm_sregs, sizeof kvm_sregs))
6aa8b732
AK
990 goto out;
991 r = 0;
992 break;
993 }
994 case KVM_SET_SREGS: {
995 struct kvm_sregs kvm_sregs;
996
997 r = -EFAULT;
2f366987 998 if (copy_from_user(&kvm_sregs, argp, sizeof kvm_sregs))
6aa8b732 999 goto out;
b6c7a5dc 1000 r = kvm_arch_vcpu_ioctl_set_sregs(vcpu, &kvm_sregs);
6aa8b732
AK
1001 if (r)
1002 goto out;
1003 r = 0;
1004 break;
1005 }
62d9f0db
MT
1006 case KVM_GET_MP_STATE: {
1007 struct kvm_mp_state mp_state;
1008
1009 r = kvm_arch_vcpu_ioctl_get_mpstate(vcpu, &mp_state);
1010 if (r)
1011 goto out;
1012 r = -EFAULT;
1013 if (copy_to_user(argp, &mp_state, sizeof mp_state))
1014 goto out;
1015 r = 0;
1016 break;
1017 }
1018 case KVM_SET_MP_STATE: {
1019 struct kvm_mp_state mp_state;
1020
1021 r = -EFAULT;
1022 if (copy_from_user(&mp_state, argp, sizeof mp_state))
1023 goto out;
1024 r = kvm_arch_vcpu_ioctl_set_mpstate(vcpu, &mp_state);
1025 if (r)
1026 goto out;
1027 r = 0;
1028 break;
1029 }
6aa8b732
AK
1030 case KVM_TRANSLATE: {
1031 struct kvm_translation tr;
1032
1033 r = -EFAULT;
2f366987 1034 if (copy_from_user(&tr, argp, sizeof tr))
6aa8b732 1035 goto out;
8b006791 1036 r = kvm_arch_vcpu_ioctl_translate(vcpu, &tr);
6aa8b732
AK
1037 if (r)
1038 goto out;
1039 r = -EFAULT;
2f366987 1040 if (copy_to_user(argp, &tr, sizeof tr))
6aa8b732
AK
1041 goto out;
1042 r = 0;
1043 break;
1044 }
6aa8b732
AK
1045 case KVM_DEBUG_GUEST: {
1046 struct kvm_debug_guest dbg;
1047
1048 r = -EFAULT;
2f366987 1049 if (copy_from_user(&dbg, argp, sizeof dbg))
6aa8b732 1050 goto out;
b6c7a5dc 1051 r = kvm_arch_vcpu_ioctl_debug_guest(vcpu, &dbg);
6aa8b732
AK
1052 if (r)
1053 goto out;
1054 r = 0;
1055 break;
1056 }
1961d276
AK
1057 case KVM_SET_SIGNAL_MASK: {
1058 struct kvm_signal_mask __user *sigmask_arg = argp;
1059 struct kvm_signal_mask kvm_sigmask;
1060 sigset_t sigset, *p;
1061
1062 p = NULL;
1063 if (argp) {
1064 r = -EFAULT;
1065 if (copy_from_user(&kvm_sigmask, argp,
1066 sizeof kvm_sigmask))
1067 goto out;
1068 r = -EINVAL;
1069 if (kvm_sigmask.len != sizeof sigset)
1070 goto out;
1071 r = -EFAULT;
1072 if (copy_from_user(&sigset, sigmask_arg->sigset,
1073 sizeof sigset))
1074 goto out;
1075 p = &sigset;
1076 }
1077 r = kvm_vcpu_ioctl_set_sigmask(vcpu, &sigset);
1078 break;
1079 }
b8836737
AK
1080 case KVM_GET_FPU: {
1081 struct kvm_fpu fpu;
1082
1083 memset(&fpu, 0, sizeof fpu);
d0752060 1084 r = kvm_arch_vcpu_ioctl_get_fpu(vcpu, &fpu);
b8836737
AK
1085 if (r)
1086 goto out;
1087 r = -EFAULT;
1088 if (copy_to_user(argp, &fpu, sizeof fpu))
1089 goto out;
1090 r = 0;
1091 break;
1092 }
1093 case KVM_SET_FPU: {
1094 struct kvm_fpu fpu;
1095
1096 r = -EFAULT;
1097 if (copy_from_user(&fpu, argp, sizeof fpu))
1098 goto out;
d0752060 1099 r = kvm_arch_vcpu_ioctl_set_fpu(vcpu, &fpu);
b8836737
AK
1100 if (r)
1101 goto out;
1102 r = 0;
1103 break;
1104 }
bccf2150 1105 default:
313a3dc7 1106 r = kvm_arch_vcpu_ioctl(filp, ioctl, arg);
bccf2150
AK
1107 }
1108out:
1109 return r;
1110}
1111
1112static long kvm_vm_ioctl(struct file *filp,
1113 unsigned int ioctl, unsigned long arg)
1114{
1115 struct kvm *kvm = filp->private_data;
1116 void __user *argp = (void __user *)arg;
1fe779f8 1117 int r;
bccf2150 1118
6d4e4c4f
AK
1119 if (kvm->mm != current->mm)
1120 return -EIO;
bccf2150
AK
1121 switch (ioctl) {
1122 case KVM_CREATE_VCPU:
1123 r = kvm_vm_ioctl_create_vcpu(kvm, arg);
1124 if (r < 0)
1125 goto out;
1126 break;
6fc138d2
IE
1127 case KVM_SET_USER_MEMORY_REGION: {
1128 struct kvm_userspace_memory_region kvm_userspace_mem;
1129
1130 r = -EFAULT;
1131 if (copy_from_user(&kvm_userspace_mem, argp,
1132 sizeof kvm_userspace_mem))
1133 goto out;
1134
1135 r = kvm_vm_ioctl_set_memory_region(kvm, &kvm_userspace_mem, 1);
6aa8b732
AK
1136 if (r)
1137 goto out;
1138 break;
1139 }
1140 case KVM_GET_DIRTY_LOG: {
1141 struct kvm_dirty_log log;
1142
1143 r = -EFAULT;
2f366987 1144 if (copy_from_user(&log, argp, sizeof log))
6aa8b732 1145 goto out;
2c6f5df9 1146 r = kvm_vm_ioctl_get_dirty_log(kvm, &log);
6aa8b732
AK
1147 if (r)
1148 goto out;
1149 break;
1150 }
f17abe9a 1151 default:
1fe779f8 1152 r = kvm_arch_vm_ioctl(filp, ioctl, arg);
f17abe9a
AK
1153 }
1154out:
1155 return r;
1156}
1157
e4a533a4 1158static int kvm_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
f17abe9a
AK
1159{
1160 struct kvm *kvm = vma->vm_file->private_data;
f17abe9a
AK
1161 struct page *page;
1162
e4a533a4
NP
1163 if (!kvm_is_visible_gfn(kvm, vmf->pgoff))
1164 return VM_FAULT_SIGBUS;
10589a46 1165 page = gfn_to_page(kvm, vmf->pgoff);
8a7ae055 1166 if (is_error_page(page)) {
b4231d61 1167 kvm_release_page_clean(page);
e4a533a4 1168 return VM_FAULT_SIGBUS;
8a7ae055 1169 }
e4a533a4
NP
1170 vmf->page = page;
1171 return 0;
f17abe9a
AK
1172}
1173
1174static struct vm_operations_struct kvm_vm_vm_ops = {
e4a533a4 1175 .fault = kvm_vm_fault,
f17abe9a
AK
1176};
1177
1178static int kvm_vm_mmap(struct file *file, struct vm_area_struct *vma)
1179{
1180 vma->vm_ops = &kvm_vm_vm_ops;
1181 return 0;
1182}
1183
5c502742 1184static const struct file_operations kvm_vm_fops = {
f17abe9a
AK
1185 .release = kvm_vm_release,
1186 .unlocked_ioctl = kvm_vm_ioctl,
1187 .compat_ioctl = kvm_vm_ioctl,
1188 .mmap = kvm_vm_mmap,
1189};
1190
1191static int kvm_dev_ioctl_create_vm(void)
1192{
2030a42c 1193 int fd;
f17abe9a
AK
1194 struct kvm *kvm;
1195
f17abe9a 1196 kvm = kvm_create_vm();
d6d28168
AK
1197 if (IS_ERR(kvm))
1198 return PTR_ERR(kvm);
2030a42c
AV
1199 fd = anon_inode_getfd("kvm-vm", &kvm_vm_fops, kvm);
1200 if (fd < 0)
66c0b394 1201 kvm_put_kvm(kvm);
f17abe9a 1202
f17abe9a 1203 return fd;
f17abe9a
AK
1204}
1205
1206static long kvm_dev_ioctl(struct file *filp,
1207 unsigned int ioctl, unsigned long arg)
1208{
07c45a36 1209 long r = -EINVAL;
f17abe9a
AK
1210
1211 switch (ioctl) {
1212 case KVM_GET_API_VERSION:
f0fe5108
AK
1213 r = -EINVAL;
1214 if (arg)
1215 goto out;
f17abe9a
AK
1216 r = KVM_API_VERSION;
1217 break;
1218 case KVM_CREATE_VM:
f0fe5108
AK
1219 r = -EINVAL;
1220 if (arg)
1221 goto out;
f17abe9a
AK
1222 r = kvm_dev_ioctl_create_vm();
1223 break;
018d00d2 1224 case KVM_CHECK_EXTENSION:
1e1c65e0 1225 r = kvm_dev_ioctl_check_extension(arg);
5d308f45 1226 break;
07c45a36
AK
1227 case KVM_GET_VCPU_MMAP_SIZE:
1228 r = -EINVAL;
1229 if (arg)
1230 goto out;
adb1ff46
AK
1231 r = PAGE_SIZE; /* struct kvm_run */
1232#ifdef CONFIG_X86
1233 r += PAGE_SIZE; /* pio data page */
1234#endif
07c45a36 1235 break;
d4c9ff2d
FEL
1236 case KVM_TRACE_ENABLE:
1237 case KVM_TRACE_PAUSE:
1238 case KVM_TRACE_DISABLE:
1239 r = kvm_trace_ioctl(ioctl, arg);
1240 break;
6aa8b732 1241 default:
043405e1 1242 return kvm_arch_dev_ioctl(filp, ioctl, arg);
6aa8b732
AK
1243 }
1244out:
1245 return r;
1246}
1247
6aa8b732 1248static struct file_operations kvm_chardev_ops = {
6aa8b732
AK
1249 .unlocked_ioctl = kvm_dev_ioctl,
1250 .compat_ioctl = kvm_dev_ioctl,
6aa8b732
AK
1251};
1252
1253static struct miscdevice kvm_dev = {
bbe4432e 1254 KVM_MINOR,
6aa8b732
AK
1255 "kvm",
1256 &kvm_chardev_ops,
1257};
1258
1b6c0168
AK
1259static void hardware_enable(void *junk)
1260{
1261 int cpu = raw_smp_processor_id();
1262
1263 if (cpu_isset(cpu, cpus_hardware_enabled))
1264 return;
1265 cpu_set(cpu, cpus_hardware_enabled);
e9b11c17 1266 kvm_arch_hardware_enable(NULL);
1b6c0168
AK
1267}
1268
1269static void hardware_disable(void *junk)
1270{
1271 int cpu = raw_smp_processor_id();
1272
1273 if (!cpu_isset(cpu, cpus_hardware_enabled))
1274 return;
1275 cpu_clear(cpu, cpus_hardware_enabled);
e9b11c17 1276 kvm_arch_hardware_disable(NULL);
1b6c0168
AK
1277}
1278
774c47f1
AK
1279static int kvm_cpu_hotplug(struct notifier_block *notifier, unsigned long val,
1280 void *v)
1281{
1282 int cpu = (long)v;
1283
1a6f4d7f 1284 val &= ~CPU_TASKS_FROZEN;
774c47f1 1285 switch (val) {
cec9ad27 1286 case CPU_DYING:
6ec8a856
AK
1287 printk(KERN_INFO "kvm: disabling virtualization on CPU%d\n",
1288 cpu);
1289 hardware_disable(NULL);
1290 break;
774c47f1 1291 case CPU_UP_CANCELED:
43934a38
JK
1292 printk(KERN_INFO "kvm: disabling virtualization on CPU%d\n",
1293 cpu);
8691e5a8 1294 smp_call_function_single(cpu, hardware_disable, NULL, 1);
774c47f1 1295 break;
43934a38
JK
1296 case CPU_ONLINE:
1297 printk(KERN_INFO "kvm: enabling virtualization on CPU%d\n",
1298 cpu);
8691e5a8 1299 smp_call_function_single(cpu, hardware_enable, NULL, 1);
774c47f1
AK
1300 break;
1301 }
1302 return NOTIFY_OK;
1303}
1304
4ecac3fd
AK
1305
1306asmlinkage void kvm_handle_fault_on_reboot(void)
1307{
1308 if (kvm_rebooting)
1309 /* spin while reset goes on */
1310 while (true)
1311 ;
1312 /* Fault while not rebooting. We want the trace. */
1313 BUG();
1314}
1315EXPORT_SYMBOL_GPL(kvm_handle_fault_on_reboot);
1316
9a2b85c6 1317static int kvm_reboot(struct notifier_block *notifier, unsigned long val,
d77c26fc 1318 void *v)
9a2b85c6
RR
1319{
1320 if (val == SYS_RESTART) {
1321 /*
1322 * Some (well, at least mine) BIOSes hang on reboot if
1323 * in vmx root mode.
1324 */
1325 printk(KERN_INFO "kvm: exiting hardware virtualization\n");
4ecac3fd 1326 kvm_rebooting = true;
15c8b6c1 1327 on_each_cpu(hardware_disable, NULL, 1);
9a2b85c6
RR
1328 }
1329 return NOTIFY_OK;
1330}
1331
1332static struct notifier_block kvm_reboot_notifier = {
1333 .notifier_call = kvm_reboot,
1334 .priority = 0,
1335};
1336
2eeb2e94
GH
1337void kvm_io_bus_init(struct kvm_io_bus *bus)
1338{
1339 memset(bus, 0, sizeof(*bus));
1340}
1341
1342void kvm_io_bus_destroy(struct kvm_io_bus *bus)
1343{
1344 int i;
1345
1346 for (i = 0; i < bus->dev_count; i++) {
1347 struct kvm_io_device *pos = bus->devs[i];
1348
1349 kvm_iodevice_destructor(pos);
1350 }
1351}
1352
1353struct kvm_io_device *kvm_io_bus_find_dev(struct kvm_io_bus *bus, gpa_t addr)
1354{
1355 int i;
1356
1357 for (i = 0; i < bus->dev_count; i++) {
1358 struct kvm_io_device *pos = bus->devs[i];
1359
1360 if (pos->in_range(pos, addr))
1361 return pos;
1362 }
1363
1364 return NULL;
1365}
1366
1367void kvm_io_bus_register_dev(struct kvm_io_bus *bus, struct kvm_io_device *dev)
1368{
1369 BUG_ON(bus->dev_count > (NR_IOBUS_DEVS-1));
1370
1371 bus->devs[bus->dev_count++] = dev;
1372}
1373
774c47f1
AK
1374static struct notifier_block kvm_cpu_notifier = {
1375 .notifier_call = kvm_cpu_hotplug,
1376 .priority = 20, /* must be > scheduler priority */
1377};
1378
8b88b099 1379static int vm_stat_get(void *_offset, u64 *val)
ba1389b7
AK
1380{
1381 unsigned offset = (long)_offset;
ba1389b7
AK
1382 struct kvm *kvm;
1383
8b88b099 1384 *val = 0;
ba1389b7
AK
1385 spin_lock(&kvm_lock);
1386 list_for_each_entry(kvm, &vm_list, vm_list)
8b88b099 1387 *val += *(u32 *)((void *)kvm + offset);
ba1389b7 1388 spin_unlock(&kvm_lock);
8b88b099 1389 return 0;
ba1389b7
AK
1390}
1391
1392DEFINE_SIMPLE_ATTRIBUTE(vm_stat_fops, vm_stat_get, NULL, "%llu\n");
1393
8b88b099 1394static int vcpu_stat_get(void *_offset, u64 *val)
1165f5fe
AK
1395{
1396 unsigned offset = (long)_offset;
1165f5fe
AK
1397 struct kvm *kvm;
1398 struct kvm_vcpu *vcpu;
1399 int i;
1400
8b88b099 1401 *val = 0;
1165f5fe
AK
1402 spin_lock(&kvm_lock);
1403 list_for_each_entry(kvm, &vm_list, vm_list)
1404 for (i = 0; i < KVM_MAX_VCPUS; ++i) {
fb3f0f51
RR
1405 vcpu = kvm->vcpus[i];
1406 if (vcpu)
8b88b099 1407 *val += *(u32 *)((void *)vcpu + offset);
1165f5fe
AK
1408 }
1409 spin_unlock(&kvm_lock);
8b88b099 1410 return 0;
1165f5fe
AK
1411}
1412
ba1389b7
AK
1413DEFINE_SIMPLE_ATTRIBUTE(vcpu_stat_fops, vcpu_stat_get, NULL, "%llu\n");
1414
1415static struct file_operations *stat_fops[] = {
1416 [KVM_STAT_VCPU] = &vcpu_stat_fops,
1417 [KVM_STAT_VM] = &vm_stat_fops,
1418};
1165f5fe 1419
a16b043c 1420static void kvm_init_debug(void)
6aa8b732
AK
1421{
1422 struct kvm_stats_debugfs_item *p;
1423
76f7c879 1424 kvm_debugfs_dir = debugfs_create_dir("kvm", NULL);
6aa8b732 1425 for (p = debugfs_entries; p->name; ++p)
76f7c879 1426 p->dentry = debugfs_create_file(p->name, 0444, kvm_debugfs_dir,
1165f5fe 1427 (void *)(long)p->offset,
ba1389b7 1428 stat_fops[p->kind]);
6aa8b732
AK
1429}
1430
1431static void kvm_exit_debug(void)
1432{
1433 struct kvm_stats_debugfs_item *p;
1434
1435 for (p = debugfs_entries; p->name; ++p)
1436 debugfs_remove(p->dentry);
76f7c879 1437 debugfs_remove(kvm_debugfs_dir);
6aa8b732
AK
1438}
1439
59ae6c6b
AK
1440static int kvm_suspend(struct sys_device *dev, pm_message_t state)
1441{
4267c41a 1442 hardware_disable(NULL);
59ae6c6b
AK
1443 return 0;
1444}
1445
1446static int kvm_resume(struct sys_device *dev)
1447{
4267c41a 1448 hardware_enable(NULL);
59ae6c6b
AK
1449 return 0;
1450}
1451
1452static struct sysdev_class kvm_sysdev_class = {
af5ca3f4 1453 .name = "kvm",
59ae6c6b
AK
1454 .suspend = kvm_suspend,
1455 .resume = kvm_resume,
1456};
1457
1458static struct sys_device kvm_sysdev = {
1459 .id = 0,
1460 .cls = &kvm_sysdev_class,
1461};
1462
cea7bb21 1463struct page *bad_page;
35149e21 1464pfn_t bad_pfn;
6aa8b732 1465
15ad7146
AK
1466static inline
1467struct kvm_vcpu *preempt_notifier_to_vcpu(struct preempt_notifier *pn)
1468{
1469 return container_of(pn, struct kvm_vcpu, preempt_notifier);
1470}
1471
1472static void kvm_sched_in(struct preempt_notifier *pn, int cpu)
1473{
1474 struct kvm_vcpu *vcpu = preempt_notifier_to_vcpu(pn);
1475
e9b11c17 1476 kvm_arch_vcpu_load(vcpu, cpu);
15ad7146
AK
1477}
1478
1479static void kvm_sched_out(struct preempt_notifier *pn,
1480 struct task_struct *next)
1481{
1482 struct kvm_vcpu *vcpu = preempt_notifier_to_vcpu(pn);
1483
e9b11c17 1484 kvm_arch_vcpu_put(vcpu);
15ad7146
AK
1485}
1486
f8c16bba 1487int kvm_init(void *opaque, unsigned int vcpu_size,
c16f862d 1488 struct module *module)
6aa8b732
AK
1489{
1490 int r;
002c7f7c 1491 int cpu;
6aa8b732 1492
cb498ea2
ZX
1493 kvm_init_debug();
1494
f8c16bba
ZX
1495 r = kvm_arch_init(opaque);
1496 if (r)
d2308784 1497 goto out_fail;
cb498ea2
ZX
1498
1499 bad_page = alloc_page(GFP_KERNEL | __GFP_ZERO);
1500
1501 if (bad_page == NULL) {
1502 r = -ENOMEM;
1503 goto out;
1504 }
1505
35149e21
AL
1506 bad_pfn = page_to_pfn(bad_page);
1507
e9b11c17 1508 r = kvm_arch_hardware_setup();
6aa8b732 1509 if (r < 0)
d2308784 1510 goto out_free_0;
6aa8b732 1511
002c7f7c
YS
1512 for_each_online_cpu(cpu) {
1513 smp_call_function_single(cpu,
e9b11c17 1514 kvm_arch_check_processor_compat,
8691e5a8 1515 &r, 1);
002c7f7c 1516 if (r < 0)
d2308784 1517 goto out_free_1;
002c7f7c
YS
1518 }
1519
15c8b6c1 1520 on_each_cpu(hardware_enable, NULL, 1);
774c47f1
AK
1521 r = register_cpu_notifier(&kvm_cpu_notifier);
1522 if (r)
d2308784 1523 goto out_free_2;
6aa8b732
AK
1524 register_reboot_notifier(&kvm_reboot_notifier);
1525
59ae6c6b
AK
1526 r = sysdev_class_register(&kvm_sysdev_class);
1527 if (r)
d2308784 1528 goto out_free_3;
59ae6c6b
AK
1529
1530 r = sysdev_register(&kvm_sysdev);
1531 if (r)
d2308784 1532 goto out_free_4;
59ae6c6b 1533
c16f862d
RR
1534 /* A kmem cache lets us meet the alignment requirements of fx_save. */
1535 kvm_vcpu_cache = kmem_cache_create("kvm_vcpu", vcpu_size,
56919c5c
JP
1536 __alignof__(struct kvm_vcpu),
1537 0, NULL);
c16f862d
RR
1538 if (!kvm_vcpu_cache) {
1539 r = -ENOMEM;
d2308784 1540 goto out_free_5;
c16f862d
RR
1541 }
1542
6aa8b732
AK
1543 kvm_chardev_ops.owner = module;
1544
1545 r = misc_register(&kvm_dev);
1546 if (r) {
d77c26fc 1547 printk(KERN_ERR "kvm: misc device register failed\n");
6aa8b732
AK
1548 goto out_free;
1549 }
1550
15ad7146
AK
1551 kvm_preempt_ops.sched_in = kvm_sched_in;
1552 kvm_preempt_ops.sched_out = kvm_sched_out;
1553
c7addb90 1554 return 0;
6aa8b732
AK
1555
1556out_free:
c16f862d 1557 kmem_cache_destroy(kvm_vcpu_cache);
d2308784 1558out_free_5:
59ae6c6b 1559 sysdev_unregister(&kvm_sysdev);
d2308784 1560out_free_4:
59ae6c6b 1561 sysdev_class_unregister(&kvm_sysdev_class);
d2308784 1562out_free_3:
6aa8b732 1563 unregister_reboot_notifier(&kvm_reboot_notifier);
774c47f1 1564 unregister_cpu_notifier(&kvm_cpu_notifier);
d2308784 1565out_free_2:
15c8b6c1 1566 on_each_cpu(hardware_disable, NULL, 1);
d2308784 1567out_free_1:
e9b11c17 1568 kvm_arch_hardware_unsetup();
d2308784
ZX
1569out_free_0:
1570 __free_page(bad_page);
ca45aaae 1571out:
f8c16bba 1572 kvm_arch_exit();
cb498ea2 1573 kvm_exit_debug();
d2308784 1574out_fail:
6aa8b732
AK
1575 return r;
1576}
cb498ea2 1577EXPORT_SYMBOL_GPL(kvm_init);
6aa8b732 1578
cb498ea2 1579void kvm_exit(void)
6aa8b732 1580{
d4c9ff2d 1581 kvm_trace_cleanup();
6aa8b732 1582 misc_deregister(&kvm_dev);
c16f862d 1583 kmem_cache_destroy(kvm_vcpu_cache);
59ae6c6b
AK
1584 sysdev_unregister(&kvm_sysdev);
1585 sysdev_class_unregister(&kvm_sysdev_class);
6aa8b732 1586 unregister_reboot_notifier(&kvm_reboot_notifier);
59ae6c6b 1587 unregister_cpu_notifier(&kvm_cpu_notifier);
15c8b6c1 1588 on_each_cpu(hardware_disable, NULL, 1);
e9b11c17 1589 kvm_arch_hardware_unsetup();
f8c16bba 1590 kvm_arch_exit();
6aa8b732 1591 kvm_exit_debug();
cea7bb21 1592 __free_page(bad_page);
6aa8b732 1593}
cb498ea2 1594EXPORT_SYMBOL_GPL(kvm_exit);