]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blame - drivers/kvm/kvm_main.c
KVM: Portability: Create kvm_arch_vcpu_runnable() function
[mirror_ubuntu-artful-kernel.git] / drivers / kvm / kvm_main.c
CommitLineData
6aa8b732
AK
1/*
2 * Kernel-based Virtual Machine driver for Linux
3 *
4 * This module enables machines with Intel VT-x extensions to run virtual
5 * machines without emulation or binary translation.
6 *
7 * Copyright (C) 2006 Qumranet, Inc.
8 *
9 * Authors:
10 * Avi Kivity <avi@qumranet.com>
11 * Yaniv Kamay <yaniv@qumranet.com>
12 *
13 * This work is licensed under the terms of the GNU GPL, version 2. See
14 * the COPYING file in the top-level directory.
15 *
16 */
17
18#include "kvm.h"
e2174021 19#include "iodev.h"
6aa8b732
AK
20
21#include <linux/kvm.h>
22#include <linux/module.h>
23#include <linux/errno.h>
6aa8b732
AK
24#include <linux/percpu.h>
25#include <linux/gfp.h>
6aa8b732
AK
26#include <linux/mm.h>
27#include <linux/miscdevice.h>
28#include <linux/vmalloc.h>
6aa8b732 29#include <linux/reboot.h>
6aa8b732
AK
30#include <linux/debugfs.h>
31#include <linux/highmem.h>
32#include <linux/file.h>
59ae6c6b 33#include <linux/sysdev.h>
774c47f1 34#include <linux/cpu.h>
e8edc6e0 35#include <linux/sched.h>
d9e368d6
AK
36#include <linux/cpumask.h>
37#include <linux/smp.h>
d6d28168 38#include <linux/anon_inodes.h>
04d2cc77 39#include <linux/profile.h>
7aa81cc0 40#include <linux/kvm_para.h>
6fc138d2 41#include <linux/pagemap.h>
8d4e1288 42#include <linux/mman.h>
6aa8b732 43
e495606d 44#include <asm/processor.h>
e495606d
AK
45#include <asm/io.h>
46#include <asm/uaccess.h>
3e021bf5 47#include <asm/pgtable.h>
6aa8b732
AK
48
49MODULE_AUTHOR("Qumranet");
50MODULE_LICENSE("GPL");
51
e9b11c17
ZX
52DEFINE_SPINLOCK(kvm_lock);
53LIST_HEAD(vm_list);
133de902 54
1b6c0168
AK
55static cpumask_t cpus_hardware_enabled;
56
c16f862d
RR
57struct kmem_cache *kvm_vcpu_cache;
58EXPORT_SYMBOL_GPL(kvm_vcpu_cache);
1165f5fe 59
15ad7146
AK
60static __read_mostly struct preempt_ops kvm_preempt_ops;
61
6aa8b732
AK
62static struct dentry *debugfs_dir;
63
bccf2150
AK
64static long kvm_vcpu_ioctl(struct file *file, unsigned int ioctl,
65 unsigned long arg);
66
5aacf0ca
JM
67static inline int valid_vcpu(int n)
68{
69 return likely(n >= 0 && n < KVM_MAX_VCPUS);
70}
71
bccf2150
AK
72/*
73 * Switches to specified vcpu, until a matching vcpu_put()
74 */
313a3dc7 75void vcpu_load(struct kvm_vcpu *vcpu)
6aa8b732 76{
15ad7146
AK
77 int cpu;
78
bccf2150 79 mutex_lock(&vcpu->mutex);
15ad7146
AK
80 cpu = get_cpu();
81 preempt_notifier_register(&vcpu->preempt_notifier);
313a3dc7 82 kvm_arch_vcpu_load(vcpu, cpu);
15ad7146 83 put_cpu();
6aa8b732
AK
84}
85
313a3dc7 86void vcpu_put(struct kvm_vcpu *vcpu)
6aa8b732 87{
15ad7146 88 preempt_disable();
313a3dc7 89 kvm_arch_vcpu_put(vcpu);
15ad7146
AK
90 preempt_notifier_unregister(&vcpu->preempt_notifier);
91 preempt_enable();
6aa8b732
AK
92 mutex_unlock(&vcpu->mutex);
93}
94
d9e368d6
AK
95static void ack_flush(void *_completed)
96{
d9e368d6
AK
97}
98
99void kvm_flush_remote_tlbs(struct kvm *kvm)
100{
49d3bd7e 101 int i, cpu;
d9e368d6
AK
102 cpumask_t cpus;
103 struct kvm_vcpu *vcpu;
d9e368d6 104
d9e368d6 105 cpus_clear(cpus);
fb3f0f51
RR
106 for (i = 0; i < KVM_MAX_VCPUS; ++i) {
107 vcpu = kvm->vcpus[i];
108 if (!vcpu)
109 continue;
3176bc3e 110 if (test_and_set_bit(KVM_REQ_TLB_FLUSH, &vcpu->requests))
d9e368d6
AK
111 continue;
112 cpu = vcpu->cpu;
113 if (cpu != -1 && cpu != raw_smp_processor_id())
49d3bd7e 114 cpu_set(cpu, cpus);
d9e368d6 115 }
0f74a24c
AK
116 if (cpus_empty(cpus))
117 return;
118 ++kvm->stat.remote_tlb_flush;
49d3bd7e 119 smp_call_function_mask(cpus, ack_flush, NULL, 1);
d9e368d6
AK
120}
121
fb3f0f51
RR
122int kvm_vcpu_init(struct kvm_vcpu *vcpu, struct kvm *kvm, unsigned id)
123{
124 struct page *page;
125 int r;
126
127 mutex_init(&vcpu->mutex);
128 vcpu->cpu = -1;
fb3f0f51
RR
129 vcpu->kvm = kvm;
130 vcpu->vcpu_id = id;
b6958ce4 131 init_waitqueue_head(&vcpu->wq);
fb3f0f51
RR
132
133 page = alloc_page(GFP_KERNEL | __GFP_ZERO);
134 if (!page) {
135 r = -ENOMEM;
136 goto fail;
137 }
138 vcpu->run = page_address(page);
139
e9b11c17 140 r = kvm_arch_vcpu_init(vcpu);
fb3f0f51 141 if (r < 0)
e9b11c17 142 goto fail_free_run;
fb3f0f51
RR
143 return 0;
144
fb3f0f51
RR
145fail_free_run:
146 free_page((unsigned long)vcpu->run);
147fail:
76fafa5e 148 return r;
fb3f0f51
RR
149}
150EXPORT_SYMBOL_GPL(kvm_vcpu_init);
151
152void kvm_vcpu_uninit(struct kvm_vcpu *vcpu)
153{
e9b11c17 154 kvm_arch_vcpu_uninit(vcpu);
fb3f0f51
RR
155 free_page((unsigned long)vcpu->run);
156}
157EXPORT_SYMBOL_GPL(kvm_vcpu_uninit);
158
f17abe9a 159static struct kvm *kvm_create_vm(void)
6aa8b732 160{
d19a9cd2 161 struct kvm *kvm = kvm_arch_create_vm();
6aa8b732 162
d19a9cd2
ZX
163 if (IS_ERR(kvm))
164 goto out;
6aa8b732 165
6d4e4c4f
AK
166 kvm->mm = current->mm;
167 atomic_inc(&kvm->mm->mm_count);
74906345 168 kvm_io_bus_init(&kvm->pio_bus);
11ec2804 169 mutex_init(&kvm->lock);
2eeb2e94 170 kvm_io_bus_init(&kvm->mmio_bus);
5e58cfe4
RR
171 spin_lock(&kvm_lock);
172 list_add(&kvm->vm_list, &vm_list);
173 spin_unlock(&kvm_lock);
d19a9cd2 174out:
f17abe9a
AK
175 return kvm;
176}
177
6aa8b732
AK
178/*
179 * Free any memory in @free but not in @dont.
180 */
181static void kvm_free_physmem_slot(struct kvm_memory_slot *free,
182 struct kvm_memory_slot *dont)
183{
290fc38d
IE
184 if (!dont || free->rmap != dont->rmap)
185 vfree(free->rmap);
6aa8b732
AK
186
187 if (!dont || free->dirty_bitmap != dont->dirty_bitmap)
188 vfree(free->dirty_bitmap);
189
6aa8b732 190 free->npages = 0;
8b6d44c7 191 free->dirty_bitmap = NULL;
8d4e1288 192 free->rmap = NULL;
6aa8b732
AK
193}
194
d19a9cd2 195void kvm_free_physmem(struct kvm *kvm)
6aa8b732
AK
196{
197 int i;
198
199 for (i = 0; i < kvm->nmemslots; ++i)
8b6d44c7 200 kvm_free_physmem_slot(&kvm->memslots[i], NULL);
6aa8b732
AK
201}
202
f17abe9a
AK
203static void kvm_destroy_vm(struct kvm *kvm)
204{
6d4e4c4f
AK
205 struct mm_struct *mm = kvm->mm;
206
133de902
AK
207 spin_lock(&kvm_lock);
208 list_del(&kvm->vm_list);
209 spin_unlock(&kvm_lock);
74906345 210 kvm_io_bus_destroy(&kvm->pio_bus);
2eeb2e94 211 kvm_io_bus_destroy(&kvm->mmio_bus);
d19a9cd2 212 kvm_arch_destroy_vm(kvm);
6d4e4c4f 213 mmdrop(mm);
f17abe9a
AK
214}
215
216static int kvm_vm_release(struct inode *inode, struct file *filp)
217{
218 struct kvm *kvm = filp->private_data;
219
220 kvm_destroy_vm(kvm);
6aa8b732
AK
221 return 0;
222}
223
6aa8b732
AK
224/*
225 * Allocate some memory and give it an address in the guest physical address
226 * space.
227 *
228 * Discontiguous memory is allowed, mostly for framebuffers.
f78e0e2e
SY
229 *
230 * Must be called holding kvm->lock.
6aa8b732 231 */
f78e0e2e
SY
232int __kvm_set_memory_region(struct kvm *kvm,
233 struct kvm_userspace_memory_region *mem,
234 int user_alloc)
6aa8b732
AK
235{
236 int r;
237 gfn_t base_gfn;
238 unsigned long npages;
239 unsigned long i;
240 struct kvm_memory_slot *memslot;
241 struct kvm_memory_slot old, new;
6aa8b732
AK
242
243 r = -EINVAL;
244 /* General sanity checks */
245 if (mem->memory_size & (PAGE_SIZE - 1))
246 goto out;
247 if (mem->guest_phys_addr & (PAGE_SIZE - 1))
248 goto out;
e0d62c7f 249 if (mem->slot >= KVM_MEMORY_SLOTS + KVM_PRIVATE_MEM_SLOTS)
6aa8b732
AK
250 goto out;
251 if (mem->guest_phys_addr + mem->memory_size < mem->guest_phys_addr)
252 goto out;
253
254 memslot = &kvm->memslots[mem->slot];
255 base_gfn = mem->guest_phys_addr >> PAGE_SHIFT;
256 npages = mem->memory_size >> PAGE_SHIFT;
257
258 if (!npages)
259 mem->flags &= ~KVM_MEM_LOG_DIRTY_PAGES;
260
6aa8b732
AK
261 new = old = *memslot;
262
263 new.base_gfn = base_gfn;
264 new.npages = npages;
265 new.flags = mem->flags;
266
267 /* Disallow changing a memory slot's size. */
268 r = -EINVAL;
269 if (npages && old.npages && npages != old.npages)
f78e0e2e 270 goto out_free;
6aa8b732
AK
271
272 /* Check for overlaps */
273 r = -EEXIST;
274 for (i = 0; i < KVM_MEMORY_SLOTS; ++i) {
275 struct kvm_memory_slot *s = &kvm->memslots[i];
276
277 if (s == memslot)
278 continue;
279 if (!((base_gfn + npages <= s->base_gfn) ||
280 (base_gfn >= s->base_gfn + s->npages)))
f78e0e2e 281 goto out_free;
6aa8b732 282 }
6aa8b732 283
6aa8b732
AK
284 /* Free page dirty bitmap if unneeded */
285 if (!(new.flags & KVM_MEM_LOG_DIRTY_PAGES))
8b6d44c7 286 new.dirty_bitmap = NULL;
6aa8b732
AK
287
288 r = -ENOMEM;
289
290 /* Allocate if a slot is being created */
8d4e1288 291 if (npages && !new.rmap) {
d77c26fc 292 new.rmap = vmalloc(npages * sizeof(struct page *));
290fc38d
IE
293
294 if (!new.rmap)
f78e0e2e 295 goto out_free;
290fc38d 296
290fc38d 297 memset(new.rmap, 0, npages * sizeof(*new.rmap));
8d4e1288 298
80b14b5b 299 new.user_alloc = user_alloc;
0de10343 300 new.userspace_addr = mem->userspace_addr;
6aa8b732
AK
301 }
302
303 /* Allocate page dirty bitmap if needed */
304 if ((new.flags & KVM_MEM_LOG_DIRTY_PAGES) && !new.dirty_bitmap) {
305 unsigned dirty_bytes = ALIGN(npages, BITS_PER_LONG) / 8;
306
307 new.dirty_bitmap = vmalloc(dirty_bytes);
308 if (!new.dirty_bitmap)
f78e0e2e 309 goto out_free;
6aa8b732
AK
310 memset(new.dirty_bitmap, 0, dirty_bytes);
311 }
312
6aa8b732
AK
313 if (mem->slot >= kvm->nmemslots)
314 kvm->nmemslots = mem->slot + 1;
315
3ad82a7e
ZX
316 *memslot = new;
317
0de10343
ZX
318 r = kvm_arch_set_memory_region(kvm, mem, old, user_alloc);
319 if (r) {
320 *memslot = old;
321 goto out_free;
82ce2c96
IE
322 }
323
6aa8b732
AK
324 kvm_free_physmem_slot(&old, &new);
325 return 0;
326
f78e0e2e 327out_free:
6aa8b732
AK
328 kvm_free_physmem_slot(&new, &old);
329out:
330 return r;
210c7c4d
IE
331
332}
f78e0e2e
SY
333EXPORT_SYMBOL_GPL(__kvm_set_memory_region);
334
335int kvm_set_memory_region(struct kvm *kvm,
336 struct kvm_userspace_memory_region *mem,
337 int user_alloc)
338{
339 int r;
340
341 mutex_lock(&kvm->lock);
342 r = __kvm_set_memory_region(kvm, mem, user_alloc);
343 mutex_unlock(&kvm->lock);
344 return r;
345}
210c7c4d
IE
346EXPORT_SYMBOL_GPL(kvm_set_memory_region);
347
1fe779f8
CO
348int kvm_vm_ioctl_set_memory_region(struct kvm *kvm,
349 struct
350 kvm_userspace_memory_region *mem,
351 int user_alloc)
210c7c4d 352{
e0d62c7f
IE
353 if (mem->slot >= KVM_MEMORY_SLOTS)
354 return -EINVAL;
210c7c4d 355 return kvm_set_memory_region(kvm, mem, user_alloc);
6aa8b732
AK
356}
357
5bb064dc
ZX
358int kvm_get_dirty_log(struct kvm *kvm,
359 struct kvm_dirty_log *log, int *is_dirty)
6aa8b732
AK
360{
361 struct kvm_memory_slot *memslot;
362 int r, i;
363 int n;
364 unsigned long any = 0;
365
6aa8b732
AK
366 r = -EINVAL;
367 if (log->slot >= KVM_MEMORY_SLOTS)
368 goto out;
369
370 memslot = &kvm->memslots[log->slot];
371 r = -ENOENT;
372 if (!memslot->dirty_bitmap)
373 goto out;
374
cd1a4a98 375 n = ALIGN(memslot->npages, BITS_PER_LONG) / 8;
6aa8b732 376
cd1a4a98 377 for (i = 0; !any && i < n/sizeof(long); ++i)
6aa8b732
AK
378 any = memslot->dirty_bitmap[i];
379
380 r = -EFAULT;
381 if (copy_to_user(log->dirty_bitmap, memslot->dirty_bitmap, n))
382 goto out;
383
5bb064dc
ZX
384 if (any)
385 *is_dirty = 1;
6aa8b732
AK
386
387 r = 0;
6aa8b732 388out:
6aa8b732
AK
389 return r;
390}
391
cea7bb21
IE
392int is_error_page(struct page *page)
393{
394 return page == bad_page;
395}
396EXPORT_SYMBOL_GPL(is_error_page);
397
f9d46eb0
IE
398static inline unsigned long bad_hva(void)
399{
400 return PAGE_OFFSET;
401}
402
403int kvm_is_error_hva(unsigned long addr)
404{
405 return addr == bad_hva();
406}
407EXPORT_SYMBOL_GPL(kvm_is_error_hva);
408
e8207547 409static struct kvm_memory_slot *__gfn_to_memslot(struct kvm *kvm, gfn_t gfn)
6aa8b732
AK
410{
411 int i;
412
413 for (i = 0; i < kvm->nmemslots; ++i) {
414 struct kvm_memory_slot *memslot = &kvm->memslots[i];
415
416 if (gfn >= memslot->base_gfn
417 && gfn < memslot->base_gfn + memslot->npages)
418 return memslot;
419 }
8b6d44c7 420 return NULL;
6aa8b732 421}
e8207547
AK
422
423struct kvm_memory_slot *gfn_to_memslot(struct kvm *kvm, gfn_t gfn)
424{
425 gfn = unalias_gfn(kvm, gfn);
426 return __gfn_to_memslot(kvm, gfn);
427}
6aa8b732 428
e0d62c7f
IE
429int kvm_is_visible_gfn(struct kvm *kvm, gfn_t gfn)
430{
431 int i;
432
433 gfn = unalias_gfn(kvm, gfn);
434 for (i = 0; i < KVM_MEMORY_SLOTS; ++i) {
435 struct kvm_memory_slot *memslot = &kvm->memslots[i];
436
437 if (gfn >= memslot->base_gfn
438 && gfn < memslot->base_gfn + memslot->npages)
439 return 1;
440 }
441 return 0;
442}
443EXPORT_SYMBOL_GPL(kvm_is_visible_gfn);
444
539cb660
IE
445static unsigned long gfn_to_hva(struct kvm *kvm, gfn_t gfn)
446{
447 struct kvm_memory_slot *slot;
448
449 gfn = unalias_gfn(kvm, gfn);
450 slot = __gfn_to_memslot(kvm, gfn);
451 if (!slot)
452 return bad_hva();
453 return (slot->userspace_addr + (gfn - slot->base_gfn) * PAGE_SIZE);
454}
455
aab61cc0
AL
456/*
457 * Requires current->mm->mmap_sem to be held
458 */
459static struct page *__gfn_to_page(struct kvm *kvm, gfn_t gfn)
954bbbc2 460{
8d4e1288 461 struct page *page[1];
539cb660 462 unsigned long addr;
8d4e1288 463 int npages;
954bbbc2 464
60395224
AK
465 might_sleep();
466
539cb660
IE
467 addr = gfn_to_hva(kvm, gfn);
468 if (kvm_is_error_hva(addr)) {
8a7ae055 469 get_page(bad_page);
cea7bb21 470 return bad_page;
8a7ae055 471 }
8d4e1288 472
539cb660
IE
473 npages = get_user_pages(current, current->mm, addr, 1, 1, 1, page,
474 NULL);
475
8d4e1288
AL
476 if (npages != 1) {
477 get_page(bad_page);
478 return bad_page;
8a7ae055 479 }
8d4e1288
AL
480
481 return page[0];
954bbbc2 482}
aab61cc0
AL
483
484struct page *gfn_to_page(struct kvm *kvm, gfn_t gfn)
485{
486 struct page *page;
487
488 down_read(&current->mm->mmap_sem);
489 page = __gfn_to_page(kvm, gfn);
490 up_read(&current->mm->mmap_sem);
491
492 return page;
493}
494
954bbbc2
AK
495EXPORT_SYMBOL_GPL(gfn_to_page);
496
b4231d61
IE
497void kvm_release_page_clean(struct page *page)
498{
499 put_page(page);
500}
501EXPORT_SYMBOL_GPL(kvm_release_page_clean);
502
503void kvm_release_page_dirty(struct page *page)
8a7ae055
IE
504{
505 if (!PageReserved(page))
506 SetPageDirty(page);
507 put_page(page);
508}
b4231d61 509EXPORT_SYMBOL_GPL(kvm_release_page_dirty);
8a7ae055 510
195aefde
IE
511static int next_segment(unsigned long len, int offset)
512{
513 if (len > PAGE_SIZE - offset)
514 return PAGE_SIZE - offset;
515 else
516 return len;
517}
518
519int kvm_read_guest_page(struct kvm *kvm, gfn_t gfn, void *data, int offset,
520 int len)
521{
e0506bcb
IE
522 int r;
523 unsigned long addr;
195aefde 524
e0506bcb
IE
525 addr = gfn_to_hva(kvm, gfn);
526 if (kvm_is_error_hva(addr))
527 return -EFAULT;
528 r = copy_from_user(data, (void __user *)addr + offset, len);
529 if (r)
195aefde 530 return -EFAULT;
195aefde
IE
531 return 0;
532}
533EXPORT_SYMBOL_GPL(kvm_read_guest_page);
534
535int kvm_read_guest(struct kvm *kvm, gpa_t gpa, void *data, unsigned long len)
536{
537 gfn_t gfn = gpa >> PAGE_SHIFT;
538 int seg;
539 int offset = offset_in_page(gpa);
540 int ret;
541
542 while ((seg = next_segment(len, offset)) != 0) {
543 ret = kvm_read_guest_page(kvm, gfn, data, offset, seg);
544 if (ret < 0)
545 return ret;
546 offset = 0;
547 len -= seg;
548 data += seg;
549 ++gfn;
550 }
551 return 0;
552}
553EXPORT_SYMBOL_GPL(kvm_read_guest);
554
555int kvm_write_guest_page(struct kvm *kvm, gfn_t gfn, const void *data,
556 int offset, int len)
557{
e0506bcb
IE
558 int r;
559 unsigned long addr;
195aefde 560
e0506bcb
IE
561 addr = gfn_to_hva(kvm, gfn);
562 if (kvm_is_error_hva(addr))
563 return -EFAULT;
564 r = copy_to_user((void __user *)addr + offset, data, len);
565 if (r)
195aefde 566 return -EFAULT;
195aefde
IE
567 mark_page_dirty(kvm, gfn);
568 return 0;
569}
570EXPORT_SYMBOL_GPL(kvm_write_guest_page);
571
572int kvm_write_guest(struct kvm *kvm, gpa_t gpa, const void *data,
573 unsigned long len)
574{
575 gfn_t gfn = gpa >> PAGE_SHIFT;
576 int seg;
577 int offset = offset_in_page(gpa);
578 int ret;
579
580 while ((seg = next_segment(len, offset)) != 0) {
581 ret = kvm_write_guest_page(kvm, gfn, data, offset, seg);
582 if (ret < 0)
583 return ret;
584 offset = 0;
585 len -= seg;
586 data += seg;
587 ++gfn;
588 }
589 return 0;
590}
591
592int kvm_clear_guest_page(struct kvm *kvm, gfn_t gfn, int offset, int len)
593{
3e021bf5 594 return kvm_write_guest_page(kvm, gfn, empty_zero_page, offset, len);
195aefde
IE
595}
596EXPORT_SYMBOL_GPL(kvm_clear_guest_page);
597
598int kvm_clear_guest(struct kvm *kvm, gpa_t gpa, unsigned long len)
599{
600 gfn_t gfn = gpa >> PAGE_SHIFT;
601 int seg;
602 int offset = offset_in_page(gpa);
603 int ret;
604
605 while ((seg = next_segment(len, offset)) != 0) {
606 ret = kvm_clear_guest_page(kvm, gfn, offset, seg);
607 if (ret < 0)
608 return ret;
609 offset = 0;
610 len -= seg;
611 ++gfn;
612 }
613 return 0;
614}
615EXPORT_SYMBOL_GPL(kvm_clear_guest);
616
6aa8b732
AK
617void mark_page_dirty(struct kvm *kvm, gfn_t gfn)
618{
31389947 619 struct kvm_memory_slot *memslot;
6aa8b732 620
3b6fff19 621 gfn = unalias_gfn(kvm, gfn);
7e9d619d
RR
622 memslot = __gfn_to_memslot(kvm, gfn);
623 if (memslot && memslot->dirty_bitmap) {
624 unsigned long rel_gfn = gfn - memslot->base_gfn;
6aa8b732 625
7e9d619d
RR
626 /* avoid RMW */
627 if (!test_bit(rel_gfn, memslot->dirty_bitmap))
628 set_bit(rel_gfn, memslot->dirty_bitmap);
6aa8b732
AK
629 }
630}
631
b6958ce4
ED
632/*
633 * The vCPU has executed a HLT instruction with in-kernel mode enabled.
634 */
8776e519 635void kvm_vcpu_block(struct kvm_vcpu *vcpu)
d3bef15f 636{
b6958ce4
ED
637 DECLARE_WAITQUEUE(wait, current);
638
639 add_wait_queue(&vcpu->wq, &wait);
640
641 /*
642 * We will block until either an interrupt or a signal wakes us up
643 */
c5ec1534
HQ
644 while (!kvm_cpu_has_interrupt(vcpu)
645 && !signal_pending(current)
53e0aa7b 646 && !kvm_arch_vcpu_runnable(vcpu)) {
b6958ce4
ED
647 set_current_state(TASK_INTERRUPTIBLE);
648 vcpu_put(vcpu);
649 schedule();
650 vcpu_load(vcpu);
651 }
d3bef15f 652
c5ec1534 653 __set_current_state(TASK_RUNNING);
b6958ce4 654 remove_wait_queue(&vcpu->wq, &wait);
b6958ce4
ED
655}
656
6aa8b732
AK
657void kvm_resched(struct kvm_vcpu *vcpu)
658{
3fca0365
YD
659 if (!need_resched())
660 return;
6aa8b732 661 cond_resched();
6aa8b732
AK
662}
663EXPORT_SYMBOL_GPL(kvm_resched);
664
9a2bb7f4
AK
665static struct page *kvm_vcpu_nopage(struct vm_area_struct *vma,
666 unsigned long address,
667 int *type)
668{
669 struct kvm_vcpu *vcpu = vma->vm_file->private_data;
670 unsigned long pgoff;
671 struct page *page;
672
9a2bb7f4 673 pgoff = ((address - vma->vm_start) >> PAGE_SHIFT) + vma->vm_pgoff;
039576c0
AK
674 if (pgoff == 0)
675 page = virt_to_page(vcpu->run);
676 else if (pgoff == KVM_PIO_PAGE_OFFSET)
677 page = virt_to_page(vcpu->pio_data);
678 else
9a2bb7f4 679 return NOPAGE_SIGBUS;
9a2bb7f4 680 get_page(page);
cd0d9137
NAQ
681 if (type != NULL)
682 *type = VM_FAULT_MINOR;
683
9a2bb7f4
AK
684 return page;
685}
686
687static struct vm_operations_struct kvm_vcpu_vm_ops = {
688 .nopage = kvm_vcpu_nopage,
689};
690
691static int kvm_vcpu_mmap(struct file *file, struct vm_area_struct *vma)
692{
693 vma->vm_ops = &kvm_vcpu_vm_ops;
694 return 0;
695}
696
bccf2150
AK
697static int kvm_vcpu_release(struct inode *inode, struct file *filp)
698{
699 struct kvm_vcpu *vcpu = filp->private_data;
700
701 fput(vcpu->kvm->filp);
702 return 0;
703}
704
705static struct file_operations kvm_vcpu_fops = {
706 .release = kvm_vcpu_release,
707 .unlocked_ioctl = kvm_vcpu_ioctl,
708 .compat_ioctl = kvm_vcpu_ioctl,
9a2bb7f4 709 .mmap = kvm_vcpu_mmap,
bccf2150
AK
710};
711
712/*
713 * Allocates an inode for the vcpu.
714 */
715static int create_vcpu_fd(struct kvm_vcpu *vcpu)
716{
717 int fd, r;
718 struct inode *inode;
719 struct file *file;
720
d6d28168
AK
721 r = anon_inode_getfd(&fd, &inode, &file,
722 "kvm-vcpu", &kvm_vcpu_fops, vcpu);
723 if (r)
724 return r;
bccf2150 725 atomic_inc(&vcpu->kvm->filp->f_count);
bccf2150 726 return fd;
bccf2150
AK
727}
728
c5ea7660
AK
729/*
730 * Creates some virtual cpus. Good luck creating more than one.
731 */
732static int kvm_vm_ioctl_create_vcpu(struct kvm *kvm, int n)
733{
734 int r;
735 struct kvm_vcpu *vcpu;
736
c5ea7660 737 if (!valid_vcpu(n))
fb3f0f51 738 return -EINVAL;
c5ea7660 739
e9b11c17 740 vcpu = kvm_arch_vcpu_create(kvm, n);
fb3f0f51
RR
741 if (IS_ERR(vcpu))
742 return PTR_ERR(vcpu);
c5ea7660 743
15ad7146
AK
744 preempt_notifier_init(&vcpu->preempt_notifier, &kvm_preempt_ops);
745
26e5215f
AK
746 r = kvm_arch_vcpu_setup(vcpu);
747 if (r)
748 goto vcpu_destroy;
749
11ec2804 750 mutex_lock(&kvm->lock);
fb3f0f51
RR
751 if (kvm->vcpus[n]) {
752 r = -EEXIST;
11ec2804 753 mutex_unlock(&kvm->lock);
e9b11c17 754 goto vcpu_destroy;
fb3f0f51
RR
755 }
756 kvm->vcpus[n] = vcpu;
11ec2804 757 mutex_unlock(&kvm->lock);
c5ea7660 758
fb3f0f51 759 /* Now it's all set up, let userspace reach it */
bccf2150
AK
760 r = create_vcpu_fd(vcpu);
761 if (r < 0)
fb3f0f51
RR
762 goto unlink;
763 return r;
39c3b86e 764
fb3f0f51 765unlink:
11ec2804 766 mutex_lock(&kvm->lock);
fb3f0f51 767 kvm->vcpus[n] = NULL;
11ec2804 768 mutex_unlock(&kvm->lock);
e9b11c17 769vcpu_destroy:
d40ccc62 770 kvm_arch_vcpu_destroy(vcpu);
c5ea7660
AK
771 return r;
772}
773
1961d276
AK
774static int kvm_vcpu_ioctl_set_sigmask(struct kvm_vcpu *vcpu, sigset_t *sigset)
775{
776 if (sigset) {
777 sigdelsetmask(sigset, sigmask(SIGKILL)|sigmask(SIGSTOP));
778 vcpu->sigset_active = 1;
779 vcpu->sigset = *sigset;
780 } else
781 vcpu->sigset_active = 0;
782 return 0;
783}
784
bccf2150
AK
785static long kvm_vcpu_ioctl(struct file *filp,
786 unsigned int ioctl, unsigned long arg)
6aa8b732 787{
bccf2150 788 struct kvm_vcpu *vcpu = filp->private_data;
2f366987 789 void __user *argp = (void __user *)arg;
313a3dc7 790 int r;
6aa8b732 791
6d4e4c4f
AK
792 if (vcpu->kvm->mm != current->mm)
793 return -EIO;
6aa8b732 794 switch (ioctl) {
9a2bb7f4 795 case KVM_RUN:
f0fe5108
AK
796 r = -EINVAL;
797 if (arg)
798 goto out;
b6c7a5dc 799 r = kvm_arch_vcpu_ioctl_run(vcpu, vcpu->run);
6aa8b732 800 break;
6aa8b732
AK
801 case KVM_GET_REGS: {
802 struct kvm_regs kvm_regs;
803
bccf2150 804 memset(&kvm_regs, 0, sizeof kvm_regs);
b6c7a5dc 805 r = kvm_arch_vcpu_ioctl_get_regs(vcpu, &kvm_regs);
6aa8b732
AK
806 if (r)
807 goto out;
808 r = -EFAULT;
2f366987 809 if (copy_to_user(argp, &kvm_regs, sizeof kvm_regs))
6aa8b732
AK
810 goto out;
811 r = 0;
812 break;
813 }
814 case KVM_SET_REGS: {
815 struct kvm_regs kvm_regs;
816
817 r = -EFAULT;
2f366987 818 if (copy_from_user(&kvm_regs, argp, sizeof kvm_regs))
6aa8b732 819 goto out;
b6c7a5dc 820 r = kvm_arch_vcpu_ioctl_set_regs(vcpu, &kvm_regs);
6aa8b732
AK
821 if (r)
822 goto out;
823 r = 0;
824 break;
825 }
826 case KVM_GET_SREGS: {
827 struct kvm_sregs kvm_sregs;
828
bccf2150 829 memset(&kvm_sregs, 0, sizeof kvm_sregs);
b6c7a5dc 830 r = kvm_arch_vcpu_ioctl_get_sregs(vcpu, &kvm_sregs);
6aa8b732
AK
831 if (r)
832 goto out;
833 r = -EFAULT;
2f366987 834 if (copy_to_user(argp, &kvm_sregs, sizeof kvm_sregs))
6aa8b732
AK
835 goto out;
836 r = 0;
837 break;
838 }
839 case KVM_SET_SREGS: {
840 struct kvm_sregs kvm_sregs;
841
842 r = -EFAULT;
2f366987 843 if (copy_from_user(&kvm_sregs, argp, sizeof kvm_sregs))
6aa8b732 844 goto out;
b6c7a5dc 845 r = kvm_arch_vcpu_ioctl_set_sregs(vcpu, &kvm_sregs);
6aa8b732
AK
846 if (r)
847 goto out;
848 r = 0;
849 break;
850 }
851 case KVM_TRANSLATE: {
852 struct kvm_translation tr;
853
854 r = -EFAULT;
2f366987 855 if (copy_from_user(&tr, argp, sizeof tr))
6aa8b732 856 goto out;
8b006791 857 r = kvm_arch_vcpu_ioctl_translate(vcpu, &tr);
6aa8b732
AK
858 if (r)
859 goto out;
860 r = -EFAULT;
2f366987 861 if (copy_to_user(argp, &tr, sizeof tr))
6aa8b732
AK
862 goto out;
863 r = 0;
864 break;
865 }
6aa8b732
AK
866 case KVM_DEBUG_GUEST: {
867 struct kvm_debug_guest dbg;
868
869 r = -EFAULT;
2f366987 870 if (copy_from_user(&dbg, argp, sizeof dbg))
6aa8b732 871 goto out;
b6c7a5dc 872 r = kvm_arch_vcpu_ioctl_debug_guest(vcpu, &dbg);
6aa8b732
AK
873 if (r)
874 goto out;
875 r = 0;
876 break;
877 }
1961d276
AK
878 case KVM_SET_SIGNAL_MASK: {
879 struct kvm_signal_mask __user *sigmask_arg = argp;
880 struct kvm_signal_mask kvm_sigmask;
881 sigset_t sigset, *p;
882
883 p = NULL;
884 if (argp) {
885 r = -EFAULT;
886 if (copy_from_user(&kvm_sigmask, argp,
887 sizeof kvm_sigmask))
888 goto out;
889 r = -EINVAL;
890 if (kvm_sigmask.len != sizeof sigset)
891 goto out;
892 r = -EFAULT;
893 if (copy_from_user(&sigset, sigmask_arg->sigset,
894 sizeof sigset))
895 goto out;
896 p = &sigset;
897 }
898 r = kvm_vcpu_ioctl_set_sigmask(vcpu, &sigset);
899 break;
900 }
b8836737
AK
901 case KVM_GET_FPU: {
902 struct kvm_fpu fpu;
903
904 memset(&fpu, 0, sizeof fpu);
d0752060 905 r = kvm_arch_vcpu_ioctl_get_fpu(vcpu, &fpu);
b8836737
AK
906 if (r)
907 goto out;
908 r = -EFAULT;
909 if (copy_to_user(argp, &fpu, sizeof fpu))
910 goto out;
911 r = 0;
912 break;
913 }
914 case KVM_SET_FPU: {
915 struct kvm_fpu fpu;
916
917 r = -EFAULT;
918 if (copy_from_user(&fpu, argp, sizeof fpu))
919 goto out;
d0752060 920 r = kvm_arch_vcpu_ioctl_set_fpu(vcpu, &fpu);
b8836737
AK
921 if (r)
922 goto out;
923 r = 0;
924 break;
925 }
bccf2150 926 default:
313a3dc7 927 r = kvm_arch_vcpu_ioctl(filp, ioctl, arg);
bccf2150
AK
928 }
929out:
930 return r;
931}
932
933static long kvm_vm_ioctl(struct file *filp,
934 unsigned int ioctl, unsigned long arg)
935{
936 struct kvm *kvm = filp->private_data;
937 void __user *argp = (void __user *)arg;
1fe779f8 938 int r;
bccf2150 939
6d4e4c4f
AK
940 if (kvm->mm != current->mm)
941 return -EIO;
bccf2150
AK
942 switch (ioctl) {
943 case KVM_CREATE_VCPU:
944 r = kvm_vm_ioctl_create_vcpu(kvm, arg);
945 if (r < 0)
946 goto out;
947 break;
6fc138d2
IE
948 case KVM_SET_USER_MEMORY_REGION: {
949 struct kvm_userspace_memory_region kvm_userspace_mem;
950
951 r = -EFAULT;
952 if (copy_from_user(&kvm_userspace_mem, argp,
953 sizeof kvm_userspace_mem))
954 goto out;
955
956 r = kvm_vm_ioctl_set_memory_region(kvm, &kvm_userspace_mem, 1);
6aa8b732
AK
957 if (r)
958 goto out;
959 break;
960 }
961 case KVM_GET_DIRTY_LOG: {
962 struct kvm_dirty_log log;
963
964 r = -EFAULT;
2f366987 965 if (copy_from_user(&log, argp, sizeof log))
6aa8b732 966 goto out;
2c6f5df9 967 r = kvm_vm_ioctl_get_dirty_log(kvm, &log);
6aa8b732
AK
968 if (r)
969 goto out;
970 break;
971 }
f17abe9a 972 default:
1fe779f8 973 r = kvm_arch_vm_ioctl(filp, ioctl, arg);
f17abe9a
AK
974 }
975out:
976 return r;
977}
978
979static struct page *kvm_vm_nopage(struct vm_area_struct *vma,
980 unsigned long address,
981 int *type)
982{
983 struct kvm *kvm = vma->vm_file->private_data;
984 unsigned long pgoff;
f17abe9a
AK
985 struct page *page;
986
f17abe9a 987 pgoff = ((address - vma->vm_start) >> PAGE_SHIFT) + vma->vm_pgoff;
e0d62c7f
IE
988 if (!kvm_is_visible_gfn(kvm, pgoff))
989 return NOPAGE_SIGBUS;
aab61cc0
AL
990 /* current->mm->mmap_sem is already held so call lockless version */
991 page = __gfn_to_page(kvm, pgoff);
8a7ae055 992 if (is_error_page(page)) {
b4231d61 993 kvm_release_page_clean(page);
f17abe9a 994 return NOPAGE_SIGBUS;
8a7ae055 995 }
cd0d9137
NAQ
996 if (type != NULL)
997 *type = VM_FAULT_MINOR;
998
f17abe9a
AK
999 return page;
1000}
1001
1002static struct vm_operations_struct kvm_vm_vm_ops = {
1003 .nopage = kvm_vm_nopage,
1004};
1005
1006static int kvm_vm_mmap(struct file *file, struct vm_area_struct *vma)
1007{
1008 vma->vm_ops = &kvm_vm_vm_ops;
1009 return 0;
1010}
1011
1012static struct file_operations kvm_vm_fops = {
1013 .release = kvm_vm_release,
1014 .unlocked_ioctl = kvm_vm_ioctl,
1015 .compat_ioctl = kvm_vm_ioctl,
1016 .mmap = kvm_vm_mmap,
1017};
1018
1019static int kvm_dev_ioctl_create_vm(void)
1020{
1021 int fd, r;
1022 struct inode *inode;
1023 struct file *file;
1024 struct kvm *kvm;
1025
f17abe9a 1026 kvm = kvm_create_vm();
d6d28168
AK
1027 if (IS_ERR(kvm))
1028 return PTR_ERR(kvm);
1029 r = anon_inode_getfd(&fd, &inode, &file, "kvm-vm", &kvm_vm_fops, kvm);
1030 if (r) {
1031 kvm_destroy_vm(kvm);
1032 return r;
f17abe9a
AK
1033 }
1034
bccf2150 1035 kvm->filp = file;
f17abe9a 1036
f17abe9a 1037 return fd;
f17abe9a
AK
1038}
1039
1040static long kvm_dev_ioctl(struct file *filp,
1041 unsigned int ioctl, unsigned long arg)
1042{
1043 void __user *argp = (void __user *)arg;
07c45a36 1044 long r = -EINVAL;
f17abe9a
AK
1045
1046 switch (ioctl) {
1047 case KVM_GET_API_VERSION:
f0fe5108
AK
1048 r = -EINVAL;
1049 if (arg)
1050 goto out;
f17abe9a
AK
1051 r = KVM_API_VERSION;
1052 break;
1053 case KVM_CREATE_VM:
f0fe5108
AK
1054 r = -EINVAL;
1055 if (arg)
1056 goto out;
f17abe9a
AK
1057 r = kvm_dev_ioctl_create_vm();
1058 break;
018d00d2
ZX
1059 case KVM_CHECK_EXTENSION:
1060 r = kvm_dev_ioctl_check_extension((long)argp);
5d308f45 1061 break;
07c45a36
AK
1062 case KVM_GET_VCPU_MMAP_SIZE:
1063 r = -EINVAL;
1064 if (arg)
1065 goto out;
039576c0 1066 r = 2 * PAGE_SIZE;
07c45a36 1067 break;
6aa8b732 1068 default:
043405e1 1069 return kvm_arch_dev_ioctl(filp, ioctl, arg);
6aa8b732
AK
1070 }
1071out:
1072 return r;
1073}
1074
6aa8b732 1075static struct file_operations kvm_chardev_ops = {
6aa8b732
AK
1076 .unlocked_ioctl = kvm_dev_ioctl,
1077 .compat_ioctl = kvm_dev_ioctl,
6aa8b732
AK
1078};
1079
1080static struct miscdevice kvm_dev = {
bbe4432e 1081 KVM_MINOR,
6aa8b732
AK
1082 "kvm",
1083 &kvm_chardev_ops,
1084};
1085
1b6c0168
AK
1086static void hardware_enable(void *junk)
1087{
1088 int cpu = raw_smp_processor_id();
1089
1090 if (cpu_isset(cpu, cpus_hardware_enabled))
1091 return;
1092 cpu_set(cpu, cpus_hardware_enabled);
e9b11c17 1093 kvm_arch_hardware_enable(NULL);
1b6c0168
AK
1094}
1095
1096static void hardware_disable(void *junk)
1097{
1098 int cpu = raw_smp_processor_id();
1099
1100 if (!cpu_isset(cpu, cpus_hardware_enabled))
1101 return;
1102 cpu_clear(cpu, cpus_hardware_enabled);
1103 decache_vcpus_on_cpu(cpu);
e9b11c17 1104 kvm_arch_hardware_disable(NULL);
1b6c0168
AK
1105}
1106
774c47f1
AK
1107static int kvm_cpu_hotplug(struct notifier_block *notifier, unsigned long val,
1108 void *v)
1109{
1110 int cpu = (long)v;
1111
1a6f4d7f 1112 val &= ~CPU_TASKS_FROZEN;
774c47f1 1113 switch (val) {
cec9ad27 1114 case CPU_DYING:
6ec8a856
AK
1115 printk(KERN_INFO "kvm: disabling virtualization on CPU%d\n",
1116 cpu);
1117 hardware_disable(NULL);
1118 break;
774c47f1 1119 case CPU_UP_CANCELED:
43934a38
JK
1120 printk(KERN_INFO "kvm: disabling virtualization on CPU%d\n",
1121 cpu);
1b6c0168 1122 smp_call_function_single(cpu, hardware_disable, NULL, 0, 1);
774c47f1 1123 break;
43934a38
JK
1124 case CPU_ONLINE:
1125 printk(KERN_INFO "kvm: enabling virtualization on CPU%d\n",
1126 cpu);
1b6c0168 1127 smp_call_function_single(cpu, hardware_enable, NULL, 0, 1);
774c47f1
AK
1128 break;
1129 }
1130 return NOTIFY_OK;
1131}
1132
9a2b85c6 1133static int kvm_reboot(struct notifier_block *notifier, unsigned long val,
d77c26fc 1134 void *v)
9a2b85c6
RR
1135{
1136 if (val == SYS_RESTART) {
1137 /*
1138 * Some (well, at least mine) BIOSes hang on reboot if
1139 * in vmx root mode.
1140 */
1141 printk(KERN_INFO "kvm: exiting hardware virtualization\n");
1142 on_each_cpu(hardware_disable, NULL, 0, 1);
1143 }
1144 return NOTIFY_OK;
1145}
1146
1147static struct notifier_block kvm_reboot_notifier = {
1148 .notifier_call = kvm_reboot,
1149 .priority = 0,
1150};
1151
2eeb2e94
GH
1152void kvm_io_bus_init(struct kvm_io_bus *bus)
1153{
1154 memset(bus, 0, sizeof(*bus));
1155}
1156
1157void kvm_io_bus_destroy(struct kvm_io_bus *bus)
1158{
1159 int i;
1160
1161 for (i = 0; i < bus->dev_count; i++) {
1162 struct kvm_io_device *pos = bus->devs[i];
1163
1164 kvm_iodevice_destructor(pos);
1165 }
1166}
1167
1168struct kvm_io_device *kvm_io_bus_find_dev(struct kvm_io_bus *bus, gpa_t addr)
1169{
1170 int i;
1171
1172 for (i = 0; i < bus->dev_count; i++) {
1173 struct kvm_io_device *pos = bus->devs[i];
1174
1175 if (pos->in_range(pos, addr))
1176 return pos;
1177 }
1178
1179 return NULL;
1180}
1181
1182void kvm_io_bus_register_dev(struct kvm_io_bus *bus, struct kvm_io_device *dev)
1183{
1184 BUG_ON(bus->dev_count > (NR_IOBUS_DEVS-1));
1185
1186 bus->devs[bus->dev_count++] = dev;
1187}
1188
774c47f1
AK
1189static struct notifier_block kvm_cpu_notifier = {
1190 .notifier_call = kvm_cpu_hotplug,
1191 .priority = 20, /* must be > scheduler priority */
1192};
1193
ba1389b7
AK
1194static u64 vm_stat_get(void *_offset)
1195{
1196 unsigned offset = (long)_offset;
1197 u64 total = 0;
1198 struct kvm *kvm;
1199
1200 spin_lock(&kvm_lock);
1201 list_for_each_entry(kvm, &vm_list, vm_list)
1202 total += *(u32 *)((void *)kvm + offset);
1203 spin_unlock(&kvm_lock);
1204 return total;
1205}
1206
1207DEFINE_SIMPLE_ATTRIBUTE(vm_stat_fops, vm_stat_get, NULL, "%llu\n");
1208
1209static u64 vcpu_stat_get(void *_offset)
1165f5fe
AK
1210{
1211 unsigned offset = (long)_offset;
1212 u64 total = 0;
1213 struct kvm *kvm;
1214 struct kvm_vcpu *vcpu;
1215 int i;
1216
1217 spin_lock(&kvm_lock);
1218 list_for_each_entry(kvm, &vm_list, vm_list)
1219 for (i = 0; i < KVM_MAX_VCPUS; ++i) {
fb3f0f51
RR
1220 vcpu = kvm->vcpus[i];
1221 if (vcpu)
1222 total += *(u32 *)((void *)vcpu + offset);
1165f5fe
AK
1223 }
1224 spin_unlock(&kvm_lock);
1225 return total;
1226}
1227
ba1389b7
AK
1228DEFINE_SIMPLE_ATTRIBUTE(vcpu_stat_fops, vcpu_stat_get, NULL, "%llu\n");
1229
1230static struct file_operations *stat_fops[] = {
1231 [KVM_STAT_VCPU] = &vcpu_stat_fops,
1232 [KVM_STAT_VM] = &vm_stat_fops,
1233};
1165f5fe 1234
a16b043c 1235static void kvm_init_debug(void)
6aa8b732
AK
1236{
1237 struct kvm_stats_debugfs_item *p;
1238
8b6d44c7 1239 debugfs_dir = debugfs_create_dir("kvm", NULL);
6aa8b732 1240 for (p = debugfs_entries; p->name; ++p)
1165f5fe
AK
1241 p->dentry = debugfs_create_file(p->name, 0444, debugfs_dir,
1242 (void *)(long)p->offset,
ba1389b7 1243 stat_fops[p->kind]);
6aa8b732
AK
1244}
1245
1246static void kvm_exit_debug(void)
1247{
1248 struct kvm_stats_debugfs_item *p;
1249
1250 for (p = debugfs_entries; p->name; ++p)
1251 debugfs_remove(p->dentry);
1252 debugfs_remove(debugfs_dir);
1253}
1254
59ae6c6b
AK
1255static int kvm_suspend(struct sys_device *dev, pm_message_t state)
1256{
4267c41a 1257 hardware_disable(NULL);
59ae6c6b
AK
1258 return 0;
1259}
1260
1261static int kvm_resume(struct sys_device *dev)
1262{
4267c41a 1263 hardware_enable(NULL);
59ae6c6b
AK
1264 return 0;
1265}
1266
1267static struct sysdev_class kvm_sysdev_class = {
af5ca3f4 1268 .name = "kvm",
59ae6c6b
AK
1269 .suspend = kvm_suspend,
1270 .resume = kvm_resume,
1271};
1272
1273static struct sys_device kvm_sysdev = {
1274 .id = 0,
1275 .cls = &kvm_sysdev_class,
1276};
1277
cea7bb21 1278struct page *bad_page;
6aa8b732 1279
15ad7146
AK
1280static inline
1281struct kvm_vcpu *preempt_notifier_to_vcpu(struct preempt_notifier *pn)
1282{
1283 return container_of(pn, struct kvm_vcpu, preempt_notifier);
1284}
1285
1286static void kvm_sched_in(struct preempt_notifier *pn, int cpu)
1287{
1288 struct kvm_vcpu *vcpu = preempt_notifier_to_vcpu(pn);
1289
e9b11c17 1290 kvm_arch_vcpu_load(vcpu, cpu);
15ad7146
AK
1291}
1292
1293static void kvm_sched_out(struct preempt_notifier *pn,
1294 struct task_struct *next)
1295{
1296 struct kvm_vcpu *vcpu = preempt_notifier_to_vcpu(pn);
1297
e9b11c17 1298 kvm_arch_vcpu_put(vcpu);
15ad7146
AK
1299}
1300
f8c16bba 1301int kvm_init(void *opaque, unsigned int vcpu_size,
c16f862d 1302 struct module *module)
6aa8b732
AK
1303{
1304 int r;
002c7f7c 1305 int cpu;
6aa8b732 1306
cb498ea2
ZX
1307 kvm_init_debug();
1308
f8c16bba
ZX
1309 r = kvm_arch_init(opaque);
1310 if (r)
d2308784 1311 goto out_fail;
cb498ea2
ZX
1312
1313 bad_page = alloc_page(GFP_KERNEL | __GFP_ZERO);
1314
1315 if (bad_page == NULL) {
1316 r = -ENOMEM;
1317 goto out;
1318 }
1319
e9b11c17 1320 r = kvm_arch_hardware_setup();
6aa8b732 1321 if (r < 0)
d2308784 1322 goto out_free_0;
6aa8b732 1323
002c7f7c
YS
1324 for_each_online_cpu(cpu) {
1325 smp_call_function_single(cpu,
e9b11c17 1326 kvm_arch_check_processor_compat,
002c7f7c
YS
1327 &r, 0, 1);
1328 if (r < 0)
d2308784 1329 goto out_free_1;
002c7f7c
YS
1330 }
1331
1b6c0168 1332 on_each_cpu(hardware_enable, NULL, 0, 1);
774c47f1
AK
1333 r = register_cpu_notifier(&kvm_cpu_notifier);
1334 if (r)
d2308784 1335 goto out_free_2;
6aa8b732
AK
1336 register_reboot_notifier(&kvm_reboot_notifier);
1337
59ae6c6b
AK
1338 r = sysdev_class_register(&kvm_sysdev_class);
1339 if (r)
d2308784 1340 goto out_free_3;
59ae6c6b
AK
1341
1342 r = sysdev_register(&kvm_sysdev);
1343 if (r)
d2308784 1344 goto out_free_4;
59ae6c6b 1345
c16f862d
RR
1346 /* A kmem cache lets us meet the alignment requirements of fx_save. */
1347 kvm_vcpu_cache = kmem_cache_create("kvm_vcpu", vcpu_size,
56919c5c
JP
1348 __alignof__(struct kvm_vcpu),
1349 0, NULL);
c16f862d
RR
1350 if (!kvm_vcpu_cache) {
1351 r = -ENOMEM;
d2308784 1352 goto out_free_5;
c16f862d
RR
1353 }
1354
6aa8b732
AK
1355 kvm_chardev_ops.owner = module;
1356
1357 r = misc_register(&kvm_dev);
1358 if (r) {
d77c26fc 1359 printk(KERN_ERR "kvm: misc device register failed\n");
6aa8b732
AK
1360 goto out_free;
1361 }
1362
15ad7146
AK
1363 kvm_preempt_ops.sched_in = kvm_sched_in;
1364 kvm_preempt_ops.sched_out = kvm_sched_out;
1365
c7addb90 1366 return 0;
6aa8b732
AK
1367
1368out_free:
c16f862d 1369 kmem_cache_destroy(kvm_vcpu_cache);
d2308784 1370out_free_5:
59ae6c6b 1371 sysdev_unregister(&kvm_sysdev);
d2308784 1372out_free_4:
59ae6c6b 1373 sysdev_class_unregister(&kvm_sysdev_class);
d2308784 1374out_free_3:
6aa8b732 1375 unregister_reboot_notifier(&kvm_reboot_notifier);
774c47f1 1376 unregister_cpu_notifier(&kvm_cpu_notifier);
d2308784 1377out_free_2:
1b6c0168 1378 on_each_cpu(hardware_disable, NULL, 0, 1);
d2308784 1379out_free_1:
e9b11c17 1380 kvm_arch_hardware_unsetup();
d2308784
ZX
1381out_free_0:
1382 __free_page(bad_page);
ca45aaae 1383out:
f8c16bba 1384 kvm_arch_exit();
cb498ea2 1385 kvm_exit_debug();
d2308784 1386out_fail:
6aa8b732
AK
1387 return r;
1388}
cb498ea2 1389EXPORT_SYMBOL_GPL(kvm_init);
6aa8b732 1390
cb498ea2 1391void kvm_exit(void)
6aa8b732
AK
1392{
1393 misc_deregister(&kvm_dev);
c16f862d 1394 kmem_cache_destroy(kvm_vcpu_cache);
59ae6c6b
AK
1395 sysdev_unregister(&kvm_sysdev);
1396 sysdev_class_unregister(&kvm_sysdev_class);
6aa8b732 1397 unregister_reboot_notifier(&kvm_reboot_notifier);
59ae6c6b 1398 unregister_cpu_notifier(&kvm_cpu_notifier);
1b6c0168 1399 on_each_cpu(hardware_disable, NULL, 0, 1);
e9b11c17 1400 kvm_arch_hardware_unsetup();
f8c16bba 1401 kvm_arch_exit();
6aa8b732 1402 kvm_exit_debug();
cea7bb21 1403 __free_page(bad_page);
6aa8b732 1404}
cb498ea2 1405EXPORT_SYMBOL_GPL(kvm_exit);