]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blame - virt/kvm/kvm_main.c
KVM: Move irq routing data structure to rcu locking
[mirror_ubuntu-artful-kernel.git] / virt / kvm / kvm_main.c
CommitLineData
6aa8b732
AK
1/*
2 * Kernel-based Virtual Machine driver for Linux
3 *
4 * This module enables machines with Intel VT-x extensions to run virtual
5 * machines without emulation or binary translation.
6 *
7 * Copyright (C) 2006 Qumranet, Inc.
8 *
9 * Authors:
10 * Avi Kivity <avi@qumranet.com>
11 * Yaniv Kamay <yaniv@qumranet.com>
12 *
13 * This work is licensed under the terms of the GNU GPL, version 2. See
14 * the COPYING file in the top-level directory.
15 *
16 */
17
e2174021 18#include "iodev.h"
6aa8b732 19
edf88417 20#include <linux/kvm_host.h>
6aa8b732
AK
21#include <linux/kvm.h>
22#include <linux/module.h>
23#include <linux/errno.h>
6aa8b732
AK
24#include <linux/percpu.h>
25#include <linux/gfp.h>
6aa8b732
AK
26#include <linux/mm.h>
27#include <linux/miscdevice.h>
28#include <linux/vmalloc.h>
6aa8b732 29#include <linux/reboot.h>
6aa8b732
AK
30#include <linux/debugfs.h>
31#include <linux/highmem.h>
32#include <linux/file.h>
59ae6c6b 33#include <linux/sysdev.h>
774c47f1 34#include <linux/cpu.h>
e8edc6e0 35#include <linux/sched.h>
d9e368d6
AK
36#include <linux/cpumask.h>
37#include <linux/smp.h>
d6d28168 38#include <linux/anon_inodes.h>
04d2cc77 39#include <linux/profile.h>
7aa81cc0 40#include <linux/kvm_para.h>
6fc138d2 41#include <linux/pagemap.h>
8d4e1288 42#include <linux/mman.h>
35149e21 43#include <linux/swap.h>
e56d532f 44#include <linux/bitops.h>
547de29e 45#include <linux/spinlock.h>
6aa8b732 46
e495606d 47#include <asm/processor.h>
e495606d
AK
48#include <asm/io.h>
49#include <asm/uaccess.h>
3e021bf5 50#include <asm/pgtable.h>
6aa8b732 51
5f94c174
LV
52#ifdef KVM_COALESCED_MMIO_PAGE_OFFSET
53#include "coalesced_mmio.h"
54#endif
55
8a98f664
XZ
56#ifdef KVM_CAP_DEVICE_ASSIGNMENT
57#include <linux/pci.h>
58#include <linux/interrupt.h>
59#include "irq.h"
60#endif
61
229456fc
MT
62#define CREATE_TRACE_POINTS
63#include <trace/events/kvm.h>
64
6aa8b732
AK
65MODULE_AUTHOR("Qumranet");
66MODULE_LICENSE("GPL");
67
fa40a821
MT
68/*
69 * Ordering of locks:
70 *
22fc0294 71 * kvm->slots_lock --> kvm->lock --> kvm->irq_lock
fa40a821
MT
72 */
73
e9b11c17
ZX
74DEFINE_SPINLOCK(kvm_lock);
75LIST_HEAD(vm_list);
133de902 76
7f59f492 77static cpumask_var_t cpus_hardware_enabled;
1b6c0168 78
c16f862d
RR
79struct kmem_cache *kvm_vcpu_cache;
80EXPORT_SYMBOL_GPL(kvm_vcpu_cache);
1165f5fe 81
15ad7146
AK
82static __read_mostly struct preempt_ops kvm_preempt_ops;
83
76f7c879 84struct dentry *kvm_debugfs_dir;
6aa8b732 85
bccf2150
AK
86static long kvm_vcpu_ioctl(struct file *file, unsigned int ioctl,
87 unsigned long arg);
88
e8ba5d31 89static bool kvm_rebooting;
4ecac3fd 90
54dee993
MT
91static bool largepages_enabled = true;
92
8a98f664
XZ
93#ifdef KVM_CAP_DEVICE_ASSIGNMENT
94static struct kvm_assigned_dev_kernel *kvm_find_assigned_dev(struct list_head *head,
95 int assigned_dev_id)
96{
97 struct list_head *ptr;
98 struct kvm_assigned_dev_kernel *match;
99
100 list_for_each(ptr, head) {
101 match = list_entry(ptr, struct kvm_assigned_dev_kernel, list);
102 if (match->assigned_dev_id == assigned_dev_id)
103 return match;
104 }
105 return NULL;
106}
107
2350bd1f
SY
108static int find_index_from_host_irq(struct kvm_assigned_dev_kernel
109 *assigned_dev, int irq)
110{
111 int i, index;
112 struct msix_entry *host_msix_entries;
113
114 host_msix_entries = assigned_dev->host_msix_entries;
115
116 index = -1;
117 for (i = 0; i < assigned_dev->entries_nr; i++)
118 if (irq == host_msix_entries[i].vector) {
119 index = i;
120 break;
121 }
122 if (index < 0) {
123 printk(KERN_WARNING "Fail to find correlated MSI-X entry!\n");
124 return 0;
125 }
126
127 return index;
128}
129
8a98f664
XZ
130static void kvm_assigned_dev_interrupt_work_handler(struct work_struct *work)
131{
132 struct kvm_assigned_dev_kernel *assigned_dev;
2350bd1f 133 struct kvm *kvm;
968a6347 134 int i;
8a98f664
XZ
135
136 assigned_dev = container_of(work, struct kvm_assigned_dev_kernel,
137 interrupt_work);
2350bd1f 138 kvm = assigned_dev->kvm;
8a98f664 139
fa40a821 140 mutex_lock(&kvm->irq_lock);
547de29e 141 spin_lock_irq(&assigned_dev->assigned_dev_lock);
e56d532f 142 if (assigned_dev->irq_requested_type & KVM_DEV_IRQ_HOST_MSIX) {
2350bd1f
SY
143 struct kvm_guest_msix_entry *guest_entries =
144 assigned_dev->guest_msix_entries;
145 for (i = 0; i < assigned_dev->entries_nr; i++) {
146 if (!(guest_entries[i].flags &
147 KVM_ASSIGNED_MSIX_PENDING))
148 continue;
149 guest_entries[i].flags &= ~KVM_ASSIGNED_MSIX_PENDING;
150 kvm_set_irq(assigned_dev->kvm,
151 assigned_dev->irq_source_id,
152 guest_entries[i].vector, 1);
2350bd1f 153 }
968a6347 154 } else
2350bd1f
SY
155 kvm_set_irq(assigned_dev->kvm, assigned_dev->irq_source_id,
156 assigned_dev->guest_irq, 1);
2350bd1f 157
547de29e 158 spin_unlock_irq(&assigned_dev->assigned_dev_lock);
fa40a821 159 mutex_unlock(&assigned_dev->kvm->irq_lock);
8a98f664
XZ
160}
161
8a98f664
XZ
162static irqreturn_t kvm_assigned_dev_intr(int irq, void *dev_id)
163{
547de29e 164 unsigned long flags;
8a98f664
XZ
165 struct kvm_assigned_dev_kernel *assigned_dev =
166 (struct kvm_assigned_dev_kernel *) dev_id;
167
547de29e 168 spin_lock_irqsave(&assigned_dev->assigned_dev_lock, flags);
e56d532f 169 if (assigned_dev->irq_requested_type & KVM_DEV_IRQ_HOST_MSIX) {
2350bd1f
SY
170 int index = find_index_from_host_irq(assigned_dev, irq);
171 if (index < 0)
547de29e 172 goto out;
2350bd1f
SY
173 assigned_dev->guest_msix_entries[index].flags |=
174 KVM_ASSIGNED_MSIX_PENDING;
175 }
176
8a98f664 177 schedule_work(&assigned_dev->interrupt_work);
defaf158 178
968a6347
SY
179 if (assigned_dev->irq_requested_type & KVM_DEV_IRQ_GUEST_INTX) {
180 disable_irq_nosync(irq);
181 assigned_dev->host_irq_disabled = true;
182 }
defaf158 183
547de29e
MT
184out:
185 spin_unlock_irqrestore(&assigned_dev->assigned_dev_lock, flags);
8a98f664
XZ
186 return IRQ_HANDLED;
187}
188
189/* Ack the irq line for an assigned device */
190static void kvm_assigned_dev_ack_irq(struct kvm_irq_ack_notifier *kian)
191{
192 struct kvm_assigned_dev_kernel *dev;
547de29e 193 unsigned long flags;
8a98f664
XZ
194
195 if (kian->gsi == -1)
196 return;
197
198 dev = container_of(kian, struct kvm_assigned_dev_kernel,
199 ack_notifier);
defaf158 200
5550af4d 201 kvm_set_irq(dev->kvm, dev->irq_source_id, dev->guest_irq, 0);
defaf158
MM
202
203 /* The guest irq may be shared so this ack may be
204 * from another device.
205 */
547de29e 206 spin_lock_irqsave(&dev->assigned_dev_lock, flags);
defaf158
MM
207 if (dev->host_irq_disabled) {
208 enable_irq(dev->host_irq);
209 dev->host_irq_disabled = false;
210 }
547de29e 211 spin_unlock_irqrestore(&dev->assigned_dev_lock, flags);
8a98f664
XZ
212}
213
e56d532f
SY
214static void deassign_guest_irq(struct kvm *kvm,
215 struct kvm_assigned_dev_kernel *assigned_dev)
8a98f664 216{
fa40a821 217 kvm_unregister_irq_ack_notifier(kvm, &assigned_dev->ack_notifier);
e56d532f 218 assigned_dev->ack_notifier.gsi = -1;
f29b2673
MM
219
220 if (assigned_dev->irq_source_id != -1)
221 kvm_free_irq_source_id(kvm, assigned_dev->irq_source_id);
222 assigned_dev->irq_source_id = -1;
e56d532f
SY
223 assigned_dev->irq_requested_type &= ~(KVM_DEV_IRQ_GUEST_MASK);
224}
8a98f664 225
e56d532f
SY
226/* The function implicit hold kvm->lock mutex due to cancel_work_sync() */
227static void deassign_host_irq(struct kvm *kvm,
228 struct kvm_assigned_dev_kernel *assigned_dev)
229{
ba4cef31
SY
230 /*
231 * In kvm_free_device_irq, cancel_work_sync return true if:
232 * 1. work is scheduled, and then cancelled.
233 * 2. work callback is executed.
234 *
235 * The first one ensured that the irq is disabled and no more events
236 * would happen. But for the second one, the irq may be enabled (e.g.
237 * for MSI). So we disable irq here to prevent further events.
238 *
239 * Notice this maybe result in nested disable if the interrupt type is
240 * INTx, but it's OK for we are going to free it.
241 *
242 * If this function is a part of VM destroy, please ensure that till
243 * now, the kvm state is still legal for probably we also have to wait
244 * interrupt_work done.
245 */
e56d532f 246 if (assigned_dev->irq_requested_type & KVM_DEV_IRQ_HOST_MSIX) {
d510d6cc
SY
247 int i;
248 for (i = 0; i < assigned_dev->entries_nr; i++)
249 disable_irq_nosync(assigned_dev->
250 host_msix_entries[i].vector);
251
252 cancel_work_sync(&assigned_dev->interrupt_work);
253
254 for (i = 0; i < assigned_dev->entries_nr; i++)
255 free_irq(assigned_dev->host_msix_entries[i].vector,
256 (void *)assigned_dev);
257
258 assigned_dev->entries_nr = 0;
259 kfree(assigned_dev->host_msix_entries);
260 kfree(assigned_dev->guest_msix_entries);
261 pci_disable_msix(assigned_dev->dev);
262 } else {
263 /* Deal with MSI and INTx */
264 disable_irq_nosync(assigned_dev->host_irq);
265 cancel_work_sync(&assigned_dev->interrupt_work);
8a98f664 266
d510d6cc 267 free_irq(assigned_dev->host_irq, (void *)assigned_dev);
4a643be8 268
e56d532f 269 if (assigned_dev->irq_requested_type & KVM_DEV_IRQ_HOST_MSI)
d510d6cc
SY
270 pci_disable_msi(assigned_dev->dev);
271 }
4a643be8 272
e56d532f
SY
273 assigned_dev->irq_requested_type &= ~(KVM_DEV_IRQ_HOST_MASK);
274}
275
276static int kvm_deassign_irq(struct kvm *kvm,
277 struct kvm_assigned_dev_kernel *assigned_dev,
278 unsigned long irq_requested_type)
279{
280 unsigned long guest_irq_type, host_irq_type;
281
282 if (!irqchip_in_kernel(kvm))
283 return -EINVAL;
284 /* no irq assignment to deassign */
285 if (!assigned_dev->irq_requested_type)
286 return -ENXIO;
287
288 host_irq_type = irq_requested_type & KVM_DEV_IRQ_HOST_MASK;
289 guest_irq_type = irq_requested_type & KVM_DEV_IRQ_GUEST_MASK;
290
291 if (host_irq_type)
292 deassign_host_irq(kvm, assigned_dev);
293 if (guest_irq_type)
294 deassign_guest_irq(kvm, assigned_dev);
295
296 return 0;
4a643be8
MM
297}
298
e56d532f
SY
299static void kvm_free_assigned_irq(struct kvm *kvm,
300 struct kvm_assigned_dev_kernel *assigned_dev)
301{
302 kvm_deassign_irq(kvm, assigned_dev, assigned_dev->irq_requested_type);
303}
4a643be8
MM
304
305static void kvm_free_assigned_device(struct kvm *kvm,
306 struct kvm_assigned_dev_kernel
307 *assigned_dev)
308{
309 kvm_free_assigned_irq(kvm, assigned_dev);
310
6eb55818
SY
311 pci_reset_function(assigned_dev->dev);
312
8a98f664
XZ
313 pci_release_regions(assigned_dev->dev);
314 pci_disable_device(assigned_dev->dev);
315 pci_dev_put(assigned_dev->dev);
316
317 list_del(&assigned_dev->list);
318 kfree(assigned_dev);
319}
320
321void kvm_free_all_assigned_devices(struct kvm *kvm)
322{
323 struct list_head *ptr, *ptr2;
324 struct kvm_assigned_dev_kernel *assigned_dev;
325
326 list_for_each_safe(ptr, ptr2, &kvm->arch.assigned_dev_head) {
327 assigned_dev = list_entry(ptr,
328 struct kvm_assigned_dev_kernel,
329 list);
330
331 kvm_free_assigned_device(kvm, assigned_dev);
332 }
333}
334
e56d532f
SY
335static int assigned_device_enable_host_intx(struct kvm *kvm,
336 struct kvm_assigned_dev_kernel *dev)
00e3ed39 337{
e56d532f
SY
338 dev->host_irq = dev->dev->irq;
339 /* Even though this is PCI, we don't want to use shared
340 * interrupts. Sharing host devices with guest-assigned devices
341 * on the same interrupt line is not a happy situation: there
342 * are going to be long delays in accepting, acking, etc.
343 */
344 if (request_irq(dev->host_irq, kvm_assigned_dev_intr,
345 0, "kvm_assigned_intx_device", (void *)dev))
346 return -EIO;
347 return 0;
348}
6b9cc7fd 349
e56d532f
SY
350#ifdef __KVM_HAVE_MSI
351static int assigned_device_enable_host_msi(struct kvm *kvm,
352 struct kvm_assigned_dev_kernel *dev)
353{
354 int r;
00e3ed39 355
e56d532f
SY
356 if (!dev->dev->msi_enabled) {
357 r = pci_enable_msi(dev->dev);
358 if (r)
359 return r;
360 }
00e3ed39 361
e56d532f
SY
362 dev->host_irq = dev->dev->irq;
363 if (request_irq(dev->host_irq, kvm_assigned_dev_intr, 0,
364 "kvm_assigned_msi_device", (void *)dev)) {
365 pci_disable_msi(dev->dev);
366 return -EIO;
00e3ed39
SY
367 }
368
00e3ed39
SY
369 return 0;
370}
e56d532f 371#endif
00e3ed39 372
e56d532f
SY
373#ifdef __KVM_HAVE_MSIX
374static int assigned_device_enable_host_msix(struct kvm *kvm,
375 struct kvm_assigned_dev_kernel *dev)
6b9cc7fd 376{
e56d532f 377 int i, r = -EINVAL;
6b9cc7fd 378
e56d532f
SY
379 /* host_msix_entries and guest_msix_entries should have been
380 * initialized */
381 if (dev->entries_nr == 0)
382 return r;
6b9cc7fd 383
e56d532f
SY
384 r = pci_enable_msix(dev->dev, dev->host_msix_entries, dev->entries_nr);
385 if (r)
386 return r;
6b9cc7fd 387
e56d532f
SY
388 for (i = 0; i < dev->entries_nr; i++) {
389 r = request_irq(dev->host_msix_entries[i].vector,
390 kvm_assigned_dev_intr, 0,
391 "kvm_assigned_msix_device",
392 (void *)dev);
393 /* FIXME: free requested_irq's on failure */
394 if (r)
395 return r;
396 }
5319c662 397
e56d532f
SY
398 return 0;
399}
6b9cc7fd 400
e56d532f 401#endif
6b9cc7fd 402
e56d532f
SY
403static int assigned_device_enable_guest_intx(struct kvm *kvm,
404 struct kvm_assigned_dev_kernel *dev,
405 struct kvm_assigned_irq *irq)
406{
407 dev->guest_irq = irq->guest_irq;
408 dev->ack_notifier.gsi = irq->guest_irq;
409 return 0;
410}
5319c662 411
e56d532f
SY
412#ifdef __KVM_HAVE_MSI
413static int assigned_device_enable_guest_msi(struct kvm *kvm,
414 struct kvm_assigned_dev_kernel *dev,
415 struct kvm_assigned_irq *irq)
416{
417 dev->guest_irq = irq->guest_irq;
418 dev->ack_notifier.gsi = -1;
968a6347 419 dev->host_irq_disabled = false;
6b9cc7fd
SY
420 return 0;
421}
422#endif
e56d532f
SY
423#ifdef __KVM_HAVE_MSIX
424static int assigned_device_enable_guest_msix(struct kvm *kvm,
425 struct kvm_assigned_dev_kernel *dev,
426 struct kvm_assigned_irq *irq)
427{
428 dev->guest_irq = irq->guest_irq;
429 dev->ack_notifier.gsi = -1;
968a6347 430 dev->host_irq_disabled = false;
e56d532f
SY
431 return 0;
432}
433#endif
434
435static int assign_host_irq(struct kvm *kvm,
436 struct kvm_assigned_dev_kernel *dev,
437 __u32 host_irq_type)
438{
439 int r = -EEXIST;
440
441 if (dev->irq_requested_type & KVM_DEV_IRQ_HOST_MASK)
442 return r;
6b9cc7fd 443
e56d532f
SY
444 switch (host_irq_type) {
445 case KVM_DEV_IRQ_HOST_INTX:
446 r = assigned_device_enable_host_intx(kvm, dev);
447 break;
448#ifdef __KVM_HAVE_MSI
449 case KVM_DEV_IRQ_HOST_MSI:
450 r = assigned_device_enable_host_msi(kvm, dev);
451 break;
452#endif
d510d6cc 453#ifdef __KVM_HAVE_MSIX
e56d532f
SY
454 case KVM_DEV_IRQ_HOST_MSIX:
455 r = assigned_device_enable_host_msix(kvm, dev);
456 break;
457#endif
458 default:
459 r = -EINVAL;
460 }
d510d6cc 461
e56d532f
SY
462 if (!r)
463 dev->irq_requested_type |= host_irq_type;
d510d6cc 464
e56d532f
SY
465 return r;
466}
d510d6cc 467
e56d532f
SY
468static int assign_guest_irq(struct kvm *kvm,
469 struct kvm_assigned_dev_kernel *dev,
470 struct kvm_assigned_irq *irq,
471 unsigned long guest_irq_type)
472{
473 int id;
474 int r = -EEXIST;
d510d6cc 475
e56d532f
SY
476 if (dev->irq_requested_type & KVM_DEV_IRQ_GUEST_MASK)
477 return r;
478
479 id = kvm_request_irq_source_id(kvm);
480 if (id < 0)
481 return id;
482
483 dev->irq_source_id = id;
484
485 switch (guest_irq_type) {
486 case KVM_DEV_IRQ_GUEST_INTX:
487 r = assigned_device_enable_guest_intx(kvm, dev, irq);
488 break;
489#ifdef __KVM_HAVE_MSI
490 case KVM_DEV_IRQ_GUEST_MSI:
491 r = assigned_device_enable_guest_msi(kvm, dev, irq);
492 break;
493#endif
494#ifdef __KVM_HAVE_MSIX
495 case KVM_DEV_IRQ_GUEST_MSIX:
496 r = assigned_device_enable_guest_msix(kvm, dev, irq);
497 break;
498#endif
499 default:
500 r = -EINVAL;
d510d6cc
SY
501 }
502
e56d532f
SY
503 if (!r) {
504 dev->irq_requested_type |= guest_irq_type;
505 kvm_register_irq_ack_notifier(kvm, &dev->ack_notifier);
506 } else
507 kvm_free_irq_source_id(kvm, dev->irq_source_id);
d510d6cc 508
e56d532f 509 return r;
d510d6cc 510}
d510d6cc 511
e56d532f 512/* TODO Deal with KVM_DEV_IRQ_ASSIGNED_MASK_MSIX */
8a98f664 513static int kvm_vm_ioctl_assign_irq(struct kvm *kvm,
e56d532f 514 struct kvm_assigned_irq *assigned_irq)
8a98f664 515{
e56d532f 516 int r = -EINVAL;
8a98f664 517 struct kvm_assigned_dev_kernel *match;
e56d532f 518 unsigned long host_irq_type, guest_irq_type;
8a98f664 519
e56d532f
SY
520 if (!capable(CAP_SYS_RAWIO))
521 return -EPERM;
8a98f664 522
e56d532f
SY
523 if (!irqchip_in_kernel(kvm))
524 return r;
525
526 mutex_lock(&kvm->lock);
527 r = -ENODEV;
8a98f664
XZ
528 match = kvm_find_assigned_dev(&kvm->arch.assigned_dev_head,
529 assigned_irq->assigned_dev_id);
e56d532f
SY
530 if (!match)
531 goto out;
8a98f664 532
e56d532f
SY
533 host_irq_type = (assigned_irq->flags & KVM_DEV_IRQ_HOST_MASK);
534 guest_irq_type = (assigned_irq->flags & KVM_DEV_IRQ_GUEST_MASK);
17071fe7 535
e56d532f
SY
536 r = -EINVAL;
537 /* can only assign one type at a time */
538 if (hweight_long(host_irq_type) > 1)
539 goto out;
540 if (hweight_long(guest_irq_type) > 1)
541 goto out;
542 if (host_irq_type == 0 && guest_irq_type == 0)
543 goto out;
17071fe7 544
e56d532f
SY
545 r = 0;
546 if (host_irq_type)
547 r = assign_host_irq(kvm, match, host_irq_type);
548 if (r)
549 goto out;
8a98f664 550
e56d532f
SY
551 if (guest_irq_type)
552 r = assign_guest_irq(kvm, match, assigned_irq, guest_irq_type);
553out:
8a98f664
XZ
554 mutex_unlock(&kvm->lock);
555 return r;
e56d532f
SY
556}
557
558static int kvm_vm_ioctl_deassign_dev_irq(struct kvm *kvm,
559 struct kvm_assigned_irq
560 *assigned_irq)
561{
562 int r = -ENODEV;
563 struct kvm_assigned_dev_kernel *match;
564
565 mutex_lock(&kvm->lock);
566
567 match = kvm_find_assigned_dev(&kvm->arch.assigned_dev_head,
568 assigned_irq->assigned_dev_id);
569 if (!match)
570 goto out;
571
572 r = kvm_deassign_irq(kvm, match, assigned_irq->flags);
573out:
8a98f664 574 mutex_unlock(&kvm->lock);
8a98f664
XZ
575 return r;
576}
577
578static int kvm_vm_ioctl_assign_device(struct kvm *kvm,
579 struct kvm_assigned_pci_dev *assigned_dev)
580{
581 int r = 0;
582 struct kvm_assigned_dev_kernel *match;
583 struct pci_dev *dev;
584
682edb4c 585 down_read(&kvm->slots_lock);
8a98f664
XZ
586 mutex_lock(&kvm->lock);
587
588 match = kvm_find_assigned_dev(&kvm->arch.assigned_dev_head,
589 assigned_dev->assigned_dev_id);
590 if (match) {
591 /* device already assigned */
e56d532f 592 r = -EEXIST;
8a98f664
XZ
593 goto out;
594 }
595
596 match = kzalloc(sizeof(struct kvm_assigned_dev_kernel), GFP_KERNEL);
597 if (match == NULL) {
598 printk(KERN_INFO "%s: Couldn't allocate memory\n",
599 __func__);
600 r = -ENOMEM;
601 goto out;
602 }
603 dev = pci_get_bus_and_slot(assigned_dev->busnr,
604 assigned_dev->devfn);
605 if (!dev) {
606 printk(KERN_INFO "%s: host device not found\n", __func__);
607 r = -EINVAL;
608 goto out_free;
609 }
610 if (pci_enable_device(dev)) {
611 printk(KERN_INFO "%s: Could not enable PCI device\n", __func__);
612 r = -EBUSY;
613 goto out_put;
614 }
615 r = pci_request_regions(dev, "kvm_assigned_device");
616 if (r) {
617 printk(KERN_INFO "%s: Could not get access to device regions\n",
618 __func__);
619 goto out_disable;
620 }
6eb55818
SY
621
622 pci_reset_function(dev);
623
8a98f664
XZ
624 match->assigned_dev_id = assigned_dev->assigned_dev_id;
625 match->host_busnr = assigned_dev->busnr;
626 match->host_devfn = assigned_dev->devfn;
b653574a 627 match->flags = assigned_dev->flags;
8a98f664 628 match->dev = dev;
547de29e 629 spin_lock_init(&match->assigned_dev_lock);
f29b2673 630 match->irq_source_id = -1;
8a98f664 631 match->kvm = kvm;
e56d532f
SY
632 match->ack_notifier.irq_acked = kvm_assigned_dev_ack_irq;
633 INIT_WORK(&match->interrupt_work,
634 kvm_assigned_dev_interrupt_work_handler);
8a98f664
XZ
635
636 list_add(&match->list, &kvm->arch.assigned_dev_head);
637
638 if (assigned_dev->flags & KVM_DEV_ASSIGN_ENABLE_IOMMU) {
19de40a8 639 if (!kvm->arch.iommu_domain) {
260782bc
WH
640 r = kvm_iommu_map_guest(kvm);
641 if (r)
642 goto out_list_del;
643 }
644 r = kvm_assign_device(kvm, match);
8a98f664
XZ
645 if (r)
646 goto out_list_del;
647 }
648
649out:
650 mutex_unlock(&kvm->lock);
682edb4c 651 up_read(&kvm->slots_lock);
8a98f664
XZ
652 return r;
653out_list_del:
654 list_del(&match->list);
655 pci_release_regions(dev);
656out_disable:
657 pci_disable_device(dev);
658out_put:
659 pci_dev_put(dev);
660out_free:
661 kfree(match);
662 mutex_unlock(&kvm->lock);
682edb4c 663 up_read(&kvm->slots_lock);
8a98f664
XZ
664 return r;
665}
666#endif
667
0a920356
WH
668#ifdef KVM_CAP_DEVICE_DEASSIGNMENT
669static int kvm_vm_ioctl_deassign_device(struct kvm *kvm,
670 struct kvm_assigned_pci_dev *assigned_dev)
671{
672 int r = 0;
673 struct kvm_assigned_dev_kernel *match;
674
675 mutex_lock(&kvm->lock);
676
677 match = kvm_find_assigned_dev(&kvm->arch.assigned_dev_head,
678 assigned_dev->assigned_dev_id);
679 if (!match) {
680 printk(KERN_INFO "%s: device hasn't been assigned before, "
681 "so cannot be deassigned\n", __func__);
682 r = -EINVAL;
683 goto out;
684 }
685
4a906e49 686 if (match->flags & KVM_DEV_ASSIGN_ENABLE_IOMMU)
0a920356
WH
687 kvm_deassign_device(kvm, match);
688
689 kvm_free_assigned_device(kvm, match);
690
691out:
692 mutex_unlock(&kvm->lock);
693 return r;
694}
695#endif
696
c77fb9dc 697inline int kvm_is_mmio_pfn(pfn_t pfn)
cbff90a7 698{
fc5659c8
JR
699 if (pfn_valid(pfn)) {
700 struct page *page = compound_head(pfn_to_page(pfn));
701 return PageReserved(page);
702 }
cbff90a7
BAY
703
704 return true;
705}
706
bccf2150
AK
707/*
708 * Switches to specified vcpu, until a matching vcpu_put()
709 */
313a3dc7 710void vcpu_load(struct kvm_vcpu *vcpu)
6aa8b732 711{
15ad7146
AK
712 int cpu;
713
bccf2150 714 mutex_lock(&vcpu->mutex);
15ad7146
AK
715 cpu = get_cpu();
716 preempt_notifier_register(&vcpu->preempt_notifier);
313a3dc7 717 kvm_arch_vcpu_load(vcpu, cpu);
15ad7146 718 put_cpu();
6aa8b732
AK
719}
720
313a3dc7 721void vcpu_put(struct kvm_vcpu *vcpu)
6aa8b732 722{
15ad7146 723 preempt_disable();
313a3dc7 724 kvm_arch_vcpu_put(vcpu);
15ad7146
AK
725 preempt_notifier_unregister(&vcpu->preempt_notifier);
726 preempt_enable();
6aa8b732
AK
727 mutex_unlock(&vcpu->mutex);
728}
729
d9e368d6
AK
730static void ack_flush(void *_completed)
731{
d9e368d6
AK
732}
733
49846896 734static bool make_all_cpus_request(struct kvm *kvm, unsigned int req)
d9e368d6 735{
597a5f55 736 int i, cpu, me;
6ef7a1bc
RR
737 cpumask_var_t cpus;
738 bool called = true;
d9e368d6 739 struct kvm_vcpu *vcpu;
d9e368d6 740
79f55997 741 zalloc_cpumask_var(&cpus, GFP_ATOMIC);
6ef7a1bc 742
84261923 743 spin_lock(&kvm->requests_lock);
e601e3be 744 me = smp_processor_id();
988a2cae 745 kvm_for_each_vcpu(i, vcpu, kvm) {
49846896 746 if (test_and_set_bit(req, &vcpu->requests))
d9e368d6
AK
747 continue;
748 cpu = vcpu->cpu;
6ef7a1bc
RR
749 if (cpus != NULL && cpu != -1 && cpu != me)
750 cpumask_set_cpu(cpu, cpus);
49846896 751 }
6ef7a1bc
RR
752 if (unlikely(cpus == NULL))
753 smp_call_function_many(cpu_online_mask, ack_flush, NULL, 1);
754 else if (!cpumask_empty(cpus))
755 smp_call_function_many(cpus, ack_flush, NULL, 1);
756 else
757 called = false;
84261923 758 spin_unlock(&kvm->requests_lock);
6ef7a1bc 759 free_cpumask_var(cpus);
49846896 760 return called;
d9e368d6
AK
761}
762
49846896 763void kvm_flush_remote_tlbs(struct kvm *kvm)
2e53d63a 764{
49846896
RR
765 if (make_all_cpus_request(kvm, KVM_REQ_TLB_FLUSH))
766 ++kvm->stat.remote_tlb_flush;
2e53d63a
MT
767}
768
49846896
RR
769void kvm_reload_remote_mmus(struct kvm *kvm)
770{
771 make_all_cpus_request(kvm, KVM_REQ_MMU_RELOAD);
772}
2e53d63a 773
fb3f0f51
RR
774int kvm_vcpu_init(struct kvm_vcpu *vcpu, struct kvm *kvm, unsigned id)
775{
776 struct page *page;
777 int r;
778
779 mutex_init(&vcpu->mutex);
780 vcpu->cpu = -1;
fb3f0f51
RR
781 vcpu->kvm = kvm;
782 vcpu->vcpu_id = id;
b6958ce4 783 init_waitqueue_head(&vcpu->wq);
fb3f0f51
RR
784
785 page = alloc_page(GFP_KERNEL | __GFP_ZERO);
786 if (!page) {
787 r = -ENOMEM;
788 goto fail;
789 }
790 vcpu->run = page_address(page);
791
e9b11c17 792 r = kvm_arch_vcpu_init(vcpu);
fb3f0f51 793 if (r < 0)
e9b11c17 794 goto fail_free_run;
fb3f0f51
RR
795 return 0;
796
fb3f0f51
RR
797fail_free_run:
798 free_page((unsigned long)vcpu->run);
799fail:
76fafa5e 800 return r;
fb3f0f51
RR
801}
802EXPORT_SYMBOL_GPL(kvm_vcpu_init);
803
804void kvm_vcpu_uninit(struct kvm_vcpu *vcpu)
805{
e9b11c17 806 kvm_arch_vcpu_uninit(vcpu);
fb3f0f51
RR
807 free_page((unsigned long)vcpu->run);
808}
809EXPORT_SYMBOL_GPL(kvm_vcpu_uninit);
810
e930bffe
AA
811#if defined(CONFIG_MMU_NOTIFIER) && defined(KVM_ARCH_WANT_MMU_NOTIFIER)
812static inline struct kvm *mmu_notifier_to_kvm(struct mmu_notifier *mn)
813{
814 return container_of(mn, struct kvm, mmu_notifier);
815}
816
817static void kvm_mmu_notifier_invalidate_page(struct mmu_notifier *mn,
818 struct mm_struct *mm,
819 unsigned long address)
820{
821 struct kvm *kvm = mmu_notifier_to_kvm(mn);
822 int need_tlb_flush;
823
824 /*
825 * When ->invalidate_page runs, the linux pte has been zapped
826 * already but the page is still allocated until
827 * ->invalidate_page returns. So if we increase the sequence
828 * here the kvm page fault will notice if the spte can't be
829 * established because the page is going to be freed. If
830 * instead the kvm page fault establishes the spte before
831 * ->invalidate_page runs, kvm_unmap_hva will release it
832 * before returning.
833 *
834 * The sequence increase only need to be seen at spin_unlock
835 * time, and not at spin_lock time.
836 *
837 * Increasing the sequence after the spin_unlock would be
838 * unsafe because the kvm page fault could then establish the
839 * pte after kvm_unmap_hva returned, without noticing the page
840 * is going to be freed.
841 */
842 spin_lock(&kvm->mmu_lock);
843 kvm->mmu_notifier_seq++;
844 need_tlb_flush = kvm_unmap_hva(kvm, address);
845 spin_unlock(&kvm->mmu_lock);
846
847 /* we've to flush the tlb before the pages can be freed */
848 if (need_tlb_flush)
849 kvm_flush_remote_tlbs(kvm);
850
851}
852
3da0dd43
IE
853static void kvm_mmu_notifier_change_pte(struct mmu_notifier *mn,
854 struct mm_struct *mm,
855 unsigned long address,
856 pte_t pte)
857{
858 struct kvm *kvm = mmu_notifier_to_kvm(mn);
859
860 spin_lock(&kvm->mmu_lock);
861 kvm->mmu_notifier_seq++;
862 kvm_set_spte_hva(kvm, address, pte);
863 spin_unlock(&kvm->mmu_lock);
864}
865
e930bffe
AA
866static void kvm_mmu_notifier_invalidate_range_start(struct mmu_notifier *mn,
867 struct mm_struct *mm,
868 unsigned long start,
869 unsigned long end)
870{
871 struct kvm *kvm = mmu_notifier_to_kvm(mn);
872 int need_tlb_flush = 0;
873
874 spin_lock(&kvm->mmu_lock);
875 /*
876 * The count increase must become visible at unlock time as no
877 * spte can be established without taking the mmu_lock and
878 * count is also read inside the mmu_lock critical section.
879 */
880 kvm->mmu_notifier_count++;
881 for (; start < end; start += PAGE_SIZE)
882 need_tlb_flush |= kvm_unmap_hva(kvm, start);
883 spin_unlock(&kvm->mmu_lock);
884
885 /* we've to flush the tlb before the pages can be freed */
886 if (need_tlb_flush)
887 kvm_flush_remote_tlbs(kvm);
888}
889
890static void kvm_mmu_notifier_invalidate_range_end(struct mmu_notifier *mn,
891 struct mm_struct *mm,
892 unsigned long start,
893 unsigned long end)
894{
895 struct kvm *kvm = mmu_notifier_to_kvm(mn);
896
897 spin_lock(&kvm->mmu_lock);
898 /*
899 * This sequence increase will notify the kvm page fault that
900 * the page that is going to be mapped in the spte could have
901 * been freed.
902 */
903 kvm->mmu_notifier_seq++;
904 /*
905 * The above sequence increase must be visible before the
906 * below count decrease but both values are read by the kvm
907 * page fault under mmu_lock spinlock so we don't need to add
908 * a smb_wmb() here in between the two.
909 */
910 kvm->mmu_notifier_count--;
911 spin_unlock(&kvm->mmu_lock);
912
913 BUG_ON(kvm->mmu_notifier_count < 0);
914}
915
916static int kvm_mmu_notifier_clear_flush_young(struct mmu_notifier *mn,
917 struct mm_struct *mm,
918 unsigned long address)
919{
920 struct kvm *kvm = mmu_notifier_to_kvm(mn);
921 int young;
922
923 spin_lock(&kvm->mmu_lock);
924 young = kvm_age_hva(kvm, address);
925 spin_unlock(&kvm->mmu_lock);
926
927 if (young)
928 kvm_flush_remote_tlbs(kvm);
929
930 return young;
931}
932
85db06e5
MT
933static void kvm_mmu_notifier_release(struct mmu_notifier *mn,
934 struct mm_struct *mm)
935{
936 struct kvm *kvm = mmu_notifier_to_kvm(mn);
937 kvm_arch_flush_shadow(kvm);
938}
939
e930bffe
AA
940static const struct mmu_notifier_ops kvm_mmu_notifier_ops = {
941 .invalidate_page = kvm_mmu_notifier_invalidate_page,
942 .invalidate_range_start = kvm_mmu_notifier_invalidate_range_start,
943 .invalidate_range_end = kvm_mmu_notifier_invalidate_range_end,
944 .clear_flush_young = kvm_mmu_notifier_clear_flush_young,
3da0dd43 945 .change_pte = kvm_mmu_notifier_change_pte,
85db06e5 946 .release = kvm_mmu_notifier_release,
e930bffe
AA
947};
948#endif /* CONFIG_MMU_NOTIFIER && KVM_ARCH_WANT_MMU_NOTIFIER */
949
f17abe9a 950static struct kvm *kvm_create_vm(void)
6aa8b732 951{
d19a9cd2 952 struct kvm *kvm = kvm_arch_create_vm();
5f94c174
LV
953#ifdef KVM_COALESCED_MMIO_PAGE_OFFSET
954 struct page *page;
955#endif
6aa8b732 956
d19a9cd2
ZX
957 if (IS_ERR(kvm))
958 goto out;
75858a84
AK
959#ifdef CONFIG_HAVE_KVM_IRQCHIP
960 INIT_HLIST_HEAD(&kvm->mask_notifier_list);
961#endif
6aa8b732 962
5f94c174
LV
963#ifdef KVM_COALESCED_MMIO_PAGE_OFFSET
964 page = alloc_page(GFP_KERNEL | __GFP_ZERO);
965 if (!page) {
966 kfree(kvm);
967 return ERR_PTR(-ENOMEM);
968 }
969 kvm->coalesced_mmio_ring =
970 (struct kvm_coalesced_mmio_ring *)page_address(page);
971#endif
972
e930bffe
AA
973#if defined(CONFIG_MMU_NOTIFIER) && defined(KVM_ARCH_WANT_MMU_NOTIFIER)
974 {
975 int err;
976 kvm->mmu_notifier.ops = &kvm_mmu_notifier_ops;
977 err = mmu_notifier_register(&kvm->mmu_notifier, current->mm);
978 if (err) {
979#ifdef KVM_COALESCED_MMIO_PAGE_OFFSET
980 put_page(page);
981#endif
982 kfree(kvm);
983 return ERR_PTR(err);
984 }
985 }
986#endif
987
6d4e4c4f
AK
988 kvm->mm = current->mm;
989 atomic_inc(&kvm->mm->mm_count);
aaee2c94 990 spin_lock_init(&kvm->mmu_lock);
84261923 991 spin_lock_init(&kvm->requests_lock);
74906345 992 kvm_io_bus_init(&kvm->pio_bus);
d34e6b17 993 kvm_eventfd_init(kvm);
11ec2804 994 mutex_init(&kvm->lock);
60eead79 995 mutex_init(&kvm->irq_lock);
2eeb2e94 996 kvm_io_bus_init(&kvm->mmio_bus);
72dc67a6 997 init_rwsem(&kvm->slots_lock);
d39f13b0 998 atomic_set(&kvm->users_count, 1);
5e58cfe4
RR
999 spin_lock(&kvm_lock);
1000 list_add(&kvm->vm_list, &vm_list);
1001 spin_unlock(&kvm_lock);
5f94c174
LV
1002#ifdef KVM_COALESCED_MMIO_PAGE_OFFSET
1003 kvm_coalesced_mmio_init(kvm);
1004#endif
d19a9cd2 1005out:
f17abe9a
AK
1006 return kvm;
1007}
1008
6aa8b732
AK
1009/*
1010 * Free any memory in @free but not in @dont.
1011 */
1012static void kvm_free_physmem_slot(struct kvm_memory_slot *free,
1013 struct kvm_memory_slot *dont)
1014{
ec04b260
JR
1015 int i;
1016
290fc38d
IE
1017 if (!dont || free->rmap != dont->rmap)
1018 vfree(free->rmap);
6aa8b732
AK
1019
1020 if (!dont || free->dirty_bitmap != dont->dirty_bitmap)
1021 vfree(free->dirty_bitmap);
1022
ec04b260
JR
1023
1024 for (i = 0; i < KVM_NR_PAGE_SIZES - 1; ++i) {
1025 if (!dont || free->lpage_info[i] != dont->lpage_info[i]) {
1026 vfree(free->lpage_info[i]);
1027 free->lpage_info[i] = NULL;
1028 }
1029 }
05da4558 1030
6aa8b732 1031 free->npages = 0;
8b6d44c7 1032 free->dirty_bitmap = NULL;
8d4e1288 1033 free->rmap = NULL;
6aa8b732
AK
1034}
1035
d19a9cd2 1036void kvm_free_physmem(struct kvm *kvm)
6aa8b732
AK
1037{
1038 int i;
1039
1040 for (i = 0; i < kvm->nmemslots; ++i)
8b6d44c7 1041 kvm_free_physmem_slot(&kvm->memslots[i], NULL);
6aa8b732
AK
1042}
1043
f17abe9a
AK
1044static void kvm_destroy_vm(struct kvm *kvm)
1045{
6d4e4c4f
AK
1046 struct mm_struct *mm = kvm->mm;
1047
ad8ba2cd 1048 kvm_arch_sync_events(kvm);
133de902
AK
1049 spin_lock(&kvm_lock);
1050 list_del(&kvm->vm_list);
1051 spin_unlock(&kvm_lock);
399ec807 1052 kvm_free_irq_routing(kvm);
74906345 1053 kvm_io_bus_destroy(&kvm->pio_bus);
2eeb2e94 1054 kvm_io_bus_destroy(&kvm->mmio_bus);
5f94c174
LV
1055#ifdef KVM_COALESCED_MMIO_PAGE_OFFSET
1056 if (kvm->coalesced_mmio_ring != NULL)
1057 free_page((unsigned long)kvm->coalesced_mmio_ring);
e930bffe
AA
1058#endif
1059#if defined(CONFIG_MMU_NOTIFIER) && defined(KVM_ARCH_WANT_MMU_NOTIFIER)
1060 mmu_notifier_unregister(&kvm->mmu_notifier, kvm->mm);
f00be0ca
GN
1061#else
1062 kvm_arch_flush_shadow(kvm);
5f94c174 1063#endif
d19a9cd2 1064 kvm_arch_destroy_vm(kvm);
6d4e4c4f 1065 mmdrop(mm);
f17abe9a
AK
1066}
1067
d39f13b0
IE
1068void kvm_get_kvm(struct kvm *kvm)
1069{
1070 atomic_inc(&kvm->users_count);
1071}
1072EXPORT_SYMBOL_GPL(kvm_get_kvm);
1073
1074void kvm_put_kvm(struct kvm *kvm)
1075{
1076 if (atomic_dec_and_test(&kvm->users_count))
1077 kvm_destroy_vm(kvm);
1078}
1079EXPORT_SYMBOL_GPL(kvm_put_kvm);
1080
1081
f17abe9a
AK
1082static int kvm_vm_release(struct inode *inode, struct file *filp)
1083{
1084 struct kvm *kvm = filp->private_data;
1085
721eecbf
GH
1086 kvm_irqfd_release(kvm);
1087
d39f13b0 1088 kvm_put_kvm(kvm);
6aa8b732
AK
1089 return 0;
1090}
1091
6aa8b732
AK
1092/*
1093 * Allocate some memory and give it an address in the guest physical address
1094 * space.
1095 *
1096 * Discontiguous memory is allowed, mostly for framebuffers.
f78e0e2e 1097 *
10589a46 1098 * Must be called holding mmap_sem for write.
6aa8b732 1099 */
f78e0e2e
SY
1100int __kvm_set_memory_region(struct kvm *kvm,
1101 struct kvm_userspace_memory_region *mem,
1102 int user_alloc)
6aa8b732
AK
1103{
1104 int r;
1105 gfn_t base_gfn;
28bcb112
HC
1106 unsigned long npages;
1107 unsigned long i;
6aa8b732
AK
1108 struct kvm_memory_slot *memslot;
1109 struct kvm_memory_slot old, new;
6aa8b732
AK
1110
1111 r = -EINVAL;
1112 /* General sanity checks */
1113 if (mem->memory_size & (PAGE_SIZE - 1))
1114 goto out;
1115 if (mem->guest_phys_addr & (PAGE_SIZE - 1))
1116 goto out;
e7cacd40 1117 if (user_alloc && (mem->userspace_addr & (PAGE_SIZE - 1)))
78749809 1118 goto out;
e0d62c7f 1119 if (mem->slot >= KVM_MEMORY_SLOTS + KVM_PRIVATE_MEM_SLOTS)
6aa8b732
AK
1120 goto out;
1121 if (mem->guest_phys_addr + mem->memory_size < mem->guest_phys_addr)
1122 goto out;
1123
1124 memslot = &kvm->memslots[mem->slot];
1125 base_gfn = mem->guest_phys_addr >> PAGE_SHIFT;
1126 npages = mem->memory_size >> PAGE_SHIFT;
1127
1128 if (!npages)
1129 mem->flags &= ~KVM_MEM_LOG_DIRTY_PAGES;
1130
6aa8b732
AK
1131 new = old = *memslot;
1132
1133 new.base_gfn = base_gfn;
1134 new.npages = npages;
1135 new.flags = mem->flags;
1136
1137 /* Disallow changing a memory slot's size. */
1138 r = -EINVAL;
1139 if (npages && old.npages && npages != old.npages)
f78e0e2e 1140 goto out_free;
6aa8b732
AK
1141
1142 /* Check for overlaps */
1143 r = -EEXIST;
1144 for (i = 0; i < KVM_MEMORY_SLOTS; ++i) {
1145 struct kvm_memory_slot *s = &kvm->memslots[i];
1146
4cd481f6 1147 if (s == memslot || !s->npages)
6aa8b732
AK
1148 continue;
1149 if (!((base_gfn + npages <= s->base_gfn) ||
1150 (base_gfn >= s->base_gfn + s->npages)))
f78e0e2e 1151 goto out_free;
6aa8b732 1152 }
6aa8b732 1153
6aa8b732
AK
1154 /* Free page dirty bitmap if unneeded */
1155 if (!(new.flags & KVM_MEM_LOG_DIRTY_PAGES))
8b6d44c7 1156 new.dirty_bitmap = NULL;
6aa8b732
AK
1157
1158 r = -ENOMEM;
1159
1160 /* Allocate if a slot is being created */
eff0114a 1161#ifndef CONFIG_S390
8d4e1288 1162 if (npages && !new.rmap) {
d77c26fc 1163 new.rmap = vmalloc(npages * sizeof(struct page *));
290fc38d
IE
1164
1165 if (!new.rmap)
f78e0e2e 1166 goto out_free;
290fc38d 1167
290fc38d 1168 memset(new.rmap, 0, npages * sizeof(*new.rmap));
8d4e1288 1169
80b14b5b 1170 new.user_alloc = user_alloc;
604b38ac
AA
1171 /*
1172 * hva_to_rmmap() serialzies with the mmu_lock and to be
1173 * safe it has to ignore memslots with !user_alloc &&
1174 * !userspace_addr.
1175 */
1176 if (user_alloc)
1177 new.userspace_addr = mem->userspace_addr;
1178 else
1179 new.userspace_addr = 0;
6aa8b732 1180 }
ec04b260
JR
1181 if (!npages)
1182 goto skip_lpage;
05da4558 1183
ec04b260 1184 for (i = 0; i < KVM_NR_PAGE_SIZES - 1; ++i) {
28bcb112
HC
1185 unsigned long ugfn;
1186 unsigned long j;
1187 int lpages;
ec04b260 1188 int level = i + 2;
05da4558 1189
ec04b260
JR
1190 /* Avoid unused variable warning if no large pages */
1191 (void)level;
1192
1193 if (new.lpage_info[i])
1194 continue;
1195
1196 lpages = 1 + (base_gfn + npages - 1) /
1197 KVM_PAGES_PER_HPAGE(level);
1198 lpages -= base_gfn / KVM_PAGES_PER_HPAGE(level);
1199
1200 new.lpage_info[i] = vmalloc(lpages * sizeof(*new.lpage_info[i]));
1201
1202 if (!new.lpage_info[i])
05da4558
MT
1203 goto out_free;
1204
ec04b260
JR
1205 memset(new.lpage_info[i], 0,
1206 lpages * sizeof(*new.lpage_info[i]));
05da4558 1207
ec04b260
JR
1208 if (base_gfn % KVM_PAGES_PER_HPAGE(level))
1209 new.lpage_info[i][0].write_count = 1;
1210 if ((base_gfn+npages) % KVM_PAGES_PER_HPAGE(level))
1211 new.lpage_info[i][lpages - 1].write_count = 1;
ac04527f
AK
1212 ugfn = new.userspace_addr >> PAGE_SHIFT;
1213 /*
1214 * If the gfn and userspace address are not aligned wrt each
54dee993
MT
1215 * other, or if explicitly asked to, disable large page
1216 * support for this slot
ac04527f 1217 */
ec04b260 1218 if ((base_gfn ^ ugfn) & (KVM_PAGES_PER_HPAGE(level) - 1) ||
54dee993 1219 !largepages_enabled)
ec04b260
JR
1220 for (j = 0; j < lpages; ++j)
1221 new.lpage_info[i][j].write_count = 1;
05da4558 1222 }
6aa8b732 1223
ec04b260
JR
1224skip_lpage:
1225
6aa8b732
AK
1226 /* Allocate page dirty bitmap if needed */
1227 if ((new.flags & KVM_MEM_LOG_DIRTY_PAGES) && !new.dirty_bitmap) {
1228 unsigned dirty_bytes = ALIGN(npages, BITS_PER_LONG) / 8;
1229
1230 new.dirty_bitmap = vmalloc(dirty_bytes);
1231 if (!new.dirty_bitmap)
f78e0e2e 1232 goto out_free;
6aa8b732 1233 memset(new.dirty_bitmap, 0, dirty_bytes);
e244584f
IE
1234 if (old.npages)
1235 kvm_arch_flush_shadow(kvm);
6aa8b732 1236 }
3eea8437
CB
1237#else /* not defined CONFIG_S390 */
1238 new.user_alloc = user_alloc;
1239 if (user_alloc)
1240 new.userspace_addr = mem->userspace_addr;
eff0114a 1241#endif /* not defined CONFIG_S390 */
6aa8b732 1242
34d4cb8f
MT
1243 if (!npages)
1244 kvm_arch_flush_shadow(kvm);
1245
604b38ac
AA
1246 spin_lock(&kvm->mmu_lock);
1247 if (mem->slot >= kvm->nmemslots)
1248 kvm->nmemslots = mem->slot + 1;
1249
3ad82a7e 1250 *memslot = new;
604b38ac 1251 spin_unlock(&kvm->mmu_lock);
3ad82a7e 1252
0de10343
ZX
1253 r = kvm_arch_set_memory_region(kvm, mem, old, user_alloc);
1254 if (r) {
604b38ac 1255 spin_lock(&kvm->mmu_lock);
0de10343 1256 *memslot = old;
604b38ac 1257 spin_unlock(&kvm->mmu_lock);
0de10343 1258 goto out_free;
82ce2c96
IE
1259 }
1260
6f897248
GC
1261 kvm_free_physmem_slot(&old, npages ? &new : NULL);
1262 /* Slot deletion case: we have to update the current slot */
b43b1901 1263 spin_lock(&kvm->mmu_lock);
6f897248
GC
1264 if (!npages)
1265 *memslot = old;
b43b1901 1266 spin_unlock(&kvm->mmu_lock);
8a98f664 1267#ifdef CONFIG_DMAR
62c476c7
BAY
1268 /* map the pages in iommu page table */
1269 r = kvm_iommu_map_pages(kvm, base_gfn, npages);
1270 if (r)
1271 goto out;
8a98f664 1272#endif
6aa8b732
AK
1273 return 0;
1274
f78e0e2e 1275out_free:
6aa8b732
AK
1276 kvm_free_physmem_slot(&new, &old);
1277out:
1278 return r;
210c7c4d
IE
1279
1280}
f78e0e2e
SY
1281EXPORT_SYMBOL_GPL(__kvm_set_memory_region);
1282
1283int kvm_set_memory_region(struct kvm *kvm,
1284 struct kvm_userspace_memory_region *mem,
1285 int user_alloc)
1286{
1287 int r;
1288
72dc67a6 1289 down_write(&kvm->slots_lock);
f78e0e2e 1290 r = __kvm_set_memory_region(kvm, mem, user_alloc);
72dc67a6 1291 up_write(&kvm->slots_lock);
f78e0e2e
SY
1292 return r;
1293}
210c7c4d
IE
1294EXPORT_SYMBOL_GPL(kvm_set_memory_region);
1295
1fe779f8
CO
1296int kvm_vm_ioctl_set_memory_region(struct kvm *kvm,
1297 struct
1298 kvm_userspace_memory_region *mem,
1299 int user_alloc)
210c7c4d 1300{
e0d62c7f
IE
1301 if (mem->slot >= KVM_MEMORY_SLOTS)
1302 return -EINVAL;
210c7c4d 1303 return kvm_set_memory_region(kvm, mem, user_alloc);
6aa8b732
AK
1304}
1305
5bb064dc
ZX
1306int kvm_get_dirty_log(struct kvm *kvm,
1307 struct kvm_dirty_log *log, int *is_dirty)
6aa8b732
AK
1308{
1309 struct kvm_memory_slot *memslot;
1310 int r, i;
1311 int n;
1312 unsigned long any = 0;
1313
6aa8b732
AK
1314 r = -EINVAL;
1315 if (log->slot >= KVM_MEMORY_SLOTS)
1316 goto out;
1317
1318 memslot = &kvm->memslots[log->slot];
1319 r = -ENOENT;
1320 if (!memslot->dirty_bitmap)
1321 goto out;
1322
cd1a4a98 1323 n = ALIGN(memslot->npages, BITS_PER_LONG) / 8;
6aa8b732 1324
cd1a4a98 1325 for (i = 0; !any && i < n/sizeof(long); ++i)
6aa8b732
AK
1326 any = memslot->dirty_bitmap[i];
1327
1328 r = -EFAULT;
1329 if (copy_to_user(log->dirty_bitmap, memslot->dirty_bitmap, n))
1330 goto out;
1331
5bb064dc
ZX
1332 if (any)
1333 *is_dirty = 1;
6aa8b732
AK
1334
1335 r = 0;
6aa8b732 1336out:
6aa8b732
AK
1337 return r;
1338}
1339
54dee993
MT
1340void kvm_disable_largepages(void)
1341{
1342 largepages_enabled = false;
1343}
1344EXPORT_SYMBOL_GPL(kvm_disable_largepages);
1345
cea7bb21
IE
1346int is_error_page(struct page *page)
1347{
1348 return page == bad_page;
1349}
1350EXPORT_SYMBOL_GPL(is_error_page);
1351
35149e21
AL
1352int is_error_pfn(pfn_t pfn)
1353{
1354 return pfn == bad_pfn;
1355}
1356EXPORT_SYMBOL_GPL(is_error_pfn);
1357
f9d46eb0
IE
1358static inline unsigned long bad_hva(void)
1359{
1360 return PAGE_OFFSET;
1361}
1362
1363int kvm_is_error_hva(unsigned long addr)
1364{
1365 return addr == bad_hva();
1366}
1367EXPORT_SYMBOL_GPL(kvm_is_error_hva);
1368
2843099f 1369struct kvm_memory_slot *gfn_to_memslot_unaliased(struct kvm *kvm, gfn_t gfn)
6aa8b732
AK
1370{
1371 int i;
1372
1373 for (i = 0; i < kvm->nmemslots; ++i) {
1374 struct kvm_memory_slot *memslot = &kvm->memslots[i];
1375
1376 if (gfn >= memslot->base_gfn
1377 && gfn < memslot->base_gfn + memslot->npages)
1378 return memslot;
1379 }
8b6d44c7 1380 return NULL;
6aa8b732 1381}
2843099f 1382EXPORT_SYMBOL_GPL(gfn_to_memslot_unaliased);
e8207547
AK
1383
1384struct kvm_memory_slot *gfn_to_memslot(struct kvm *kvm, gfn_t gfn)
1385{
1386 gfn = unalias_gfn(kvm, gfn);
2843099f 1387 return gfn_to_memslot_unaliased(kvm, gfn);
e8207547 1388}
6aa8b732 1389
e0d62c7f
IE
1390int kvm_is_visible_gfn(struct kvm *kvm, gfn_t gfn)
1391{
1392 int i;
1393
1394 gfn = unalias_gfn(kvm, gfn);
1395 for (i = 0; i < KVM_MEMORY_SLOTS; ++i) {
1396 struct kvm_memory_slot *memslot = &kvm->memslots[i];
1397
1398 if (gfn >= memslot->base_gfn
1399 && gfn < memslot->base_gfn + memslot->npages)
1400 return 1;
1401 }
1402 return 0;
1403}
1404EXPORT_SYMBOL_GPL(kvm_is_visible_gfn);
1405
05da4558 1406unsigned long gfn_to_hva(struct kvm *kvm, gfn_t gfn)
539cb660
IE
1407{
1408 struct kvm_memory_slot *slot;
1409
1410 gfn = unalias_gfn(kvm, gfn);
2843099f 1411 slot = gfn_to_memslot_unaliased(kvm, gfn);
539cb660
IE
1412 if (!slot)
1413 return bad_hva();
1414 return (slot->userspace_addr + (gfn - slot->base_gfn) * PAGE_SIZE);
1415}
0d150298 1416EXPORT_SYMBOL_GPL(gfn_to_hva);
539cb660 1417
35149e21 1418pfn_t gfn_to_pfn(struct kvm *kvm, gfn_t gfn)
954bbbc2 1419{
8d4e1288 1420 struct page *page[1];
539cb660 1421 unsigned long addr;
8d4e1288 1422 int npages;
2e2e3738 1423 pfn_t pfn;
954bbbc2 1424
60395224
AK
1425 might_sleep();
1426
539cb660
IE
1427 addr = gfn_to_hva(kvm, gfn);
1428 if (kvm_is_error_hva(addr)) {
8a7ae055 1429 get_page(bad_page);
35149e21 1430 return page_to_pfn(bad_page);
8a7ae055 1431 }
8d4e1288 1432
4c2155ce 1433 npages = get_user_pages_fast(addr, 1, 1, page);
539cb660 1434
2e2e3738
AL
1435 if (unlikely(npages != 1)) {
1436 struct vm_area_struct *vma;
1437
4c2155ce 1438 down_read(&current->mm->mmap_sem);
2e2e3738 1439 vma = find_vma(current->mm, addr);
4c2155ce 1440
2e2e3738
AL
1441 if (vma == NULL || addr < vma->vm_start ||
1442 !(vma->vm_flags & VM_PFNMAP)) {
4c2155ce 1443 up_read(&current->mm->mmap_sem);
2e2e3738
AL
1444 get_page(bad_page);
1445 return page_to_pfn(bad_page);
1446 }
1447
1448 pfn = ((addr - vma->vm_start) >> PAGE_SHIFT) + vma->vm_pgoff;
4c2155ce 1449 up_read(&current->mm->mmap_sem);
c77fb9dc 1450 BUG_ON(!kvm_is_mmio_pfn(pfn));
2e2e3738
AL
1451 } else
1452 pfn = page_to_pfn(page[0]);
8d4e1288 1453
2e2e3738 1454 return pfn;
35149e21
AL
1455}
1456
1457EXPORT_SYMBOL_GPL(gfn_to_pfn);
1458
1459struct page *gfn_to_page(struct kvm *kvm, gfn_t gfn)
1460{
2e2e3738
AL
1461 pfn_t pfn;
1462
1463 pfn = gfn_to_pfn(kvm, gfn);
c77fb9dc 1464 if (!kvm_is_mmio_pfn(pfn))
2e2e3738
AL
1465 return pfn_to_page(pfn);
1466
c77fb9dc 1467 WARN_ON(kvm_is_mmio_pfn(pfn));
2e2e3738
AL
1468
1469 get_page(bad_page);
1470 return bad_page;
954bbbc2 1471}
aab61cc0 1472
954bbbc2
AK
1473EXPORT_SYMBOL_GPL(gfn_to_page);
1474
b4231d61
IE
1475void kvm_release_page_clean(struct page *page)
1476{
35149e21 1477 kvm_release_pfn_clean(page_to_pfn(page));
b4231d61
IE
1478}
1479EXPORT_SYMBOL_GPL(kvm_release_page_clean);
1480
35149e21
AL
1481void kvm_release_pfn_clean(pfn_t pfn)
1482{
c77fb9dc 1483 if (!kvm_is_mmio_pfn(pfn))
2e2e3738 1484 put_page(pfn_to_page(pfn));
35149e21
AL
1485}
1486EXPORT_SYMBOL_GPL(kvm_release_pfn_clean);
1487
b4231d61 1488void kvm_release_page_dirty(struct page *page)
8a7ae055 1489{
35149e21
AL
1490 kvm_release_pfn_dirty(page_to_pfn(page));
1491}
1492EXPORT_SYMBOL_GPL(kvm_release_page_dirty);
1493
1494void kvm_release_pfn_dirty(pfn_t pfn)
1495{
1496 kvm_set_pfn_dirty(pfn);
1497 kvm_release_pfn_clean(pfn);
1498}
1499EXPORT_SYMBOL_GPL(kvm_release_pfn_dirty);
1500
1501void kvm_set_page_dirty(struct page *page)
1502{
1503 kvm_set_pfn_dirty(page_to_pfn(page));
1504}
1505EXPORT_SYMBOL_GPL(kvm_set_page_dirty);
1506
1507void kvm_set_pfn_dirty(pfn_t pfn)
1508{
c77fb9dc 1509 if (!kvm_is_mmio_pfn(pfn)) {
2e2e3738
AL
1510 struct page *page = pfn_to_page(pfn);
1511 if (!PageReserved(page))
1512 SetPageDirty(page);
1513 }
8a7ae055 1514}
35149e21
AL
1515EXPORT_SYMBOL_GPL(kvm_set_pfn_dirty);
1516
1517void kvm_set_pfn_accessed(pfn_t pfn)
1518{
c77fb9dc 1519 if (!kvm_is_mmio_pfn(pfn))
2e2e3738 1520 mark_page_accessed(pfn_to_page(pfn));
35149e21
AL
1521}
1522EXPORT_SYMBOL_GPL(kvm_set_pfn_accessed);
1523
1524void kvm_get_pfn(pfn_t pfn)
1525{
c77fb9dc 1526 if (!kvm_is_mmio_pfn(pfn))
2e2e3738 1527 get_page(pfn_to_page(pfn));
35149e21
AL
1528}
1529EXPORT_SYMBOL_GPL(kvm_get_pfn);
8a7ae055 1530
195aefde
IE
1531static int next_segment(unsigned long len, int offset)
1532{
1533 if (len > PAGE_SIZE - offset)
1534 return PAGE_SIZE - offset;
1535 else
1536 return len;
1537}
1538
1539int kvm_read_guest_page(struct kvm *kvm, gfn_t gfn, void *data, int offset,
1540 int len)
1541{
e0506bcb
IE
1542 int r;
1543 unsigned long addr;
195aefde 1544
e0506bcb
IE
1545 addr = gfn_to_hva(kvm, gfn);
1546 if (kvm_is_error_hva(addr))
1547 return -EFAULT;
1548 r = copy_from_user(data, (void __user *)addr + offset, len);
1549 if (r)
195aefde 1550 return -EFAULT;
195aefde
IE
1551 return 0;
1552}
1553EXPORT_SYMBOL_GPL(kvm_read_guest_page);
1554
1555int kvm_read_guest(struct kvm *kvm, gpa_t gpa, void *data, unsigned long len)
1556{
1557 gfn_t gfn = gpa >> PAGE_SHIFT;
1558 int seg;
1559 int offset = offset_in_page(gpa);
1560 int ret;
1561
1562 while ((seg = next_segment(len, offset)) != 0) {
1563 ret = kvm_read_guest_page(kvm, gfn, data, offset, seg);
1564 if (ret < 0)
1565 return ret;
1566 offset = 0;
1567 len -= seg;
1568 data += seg;
1569 ++gfn;
1570 }
1571 return 0;
1572}
1573EXPORT_SYMBOL_GPL(kvm_read_guest);
1574
7ec54588
MT
1575int kvm_read_guest_atomic(struct kvm *kvm, gpa_t gpa, void *data,
1576 unsigned long len)
1577{
1578 int r;
1579 unsigned long addr;
1580 gfn_t gfn = gpa >> PAGE_SHIFT;
1581 int offset = offset_in_page(gpa);
1582
1583 addr = gfn_to_hva(kvm, gfn);
1584 if (kvm_is_error_hva(addr))
1585 return -EFAULT;
0aac03f0 1586 pagefault_disable();
7ec54588 1587 r = __copy_from_user_inatomic(data, (void __user *)addr + offset, len);
0aac03f0 1588 pagefault_enable();
7ec54588
MT
1589 if (r)
1590 return -EFAULT;
1591 return 0;
1592}
1593EXPORT_SYMBOL(kvm_read_guest_atomic);
1594
195aefde
IE
1595int kvm_write_guest_page(struct kvm *kvm, gfn_t gfn, const void *data,
1596 int offset, int len)
1597{
e0506bcb
IE
1598 int r;
1599 unsigned long addr;
195aefde 1600
e0506bcb
IE
1601 addr = gfn_to_hva(kvm, gfn);
1602 if (kvm_is_error_hva(addr))
1603 return -EFAULT;
1604 r = copy_to_user((void __user *)addr + offset, data, len);
1605 if (r)
195aefde 1606 return -EFAULT;
195aefde
IE
1607 mark_page_dirty(kvm, gfn);
1608 return 0;
1609}
1610EXPORT_SYMBOL_GPL(kvm_write_guest_page);
1611
1612int kvm_write_guest(struct kvm *kvm, gpa_t gpa, const void *data,
1613 unsigned long len)
1614{
1615 gfn_t gfn = gpa >> PAGE_SHIFT;
1616 int seg;
1617 int offset = offset_in_page(gpa);
1618 int ret;
1619
1620 while ((seg = next_segment(len, offset)) != 0) {
1621 ret = kvm_write_guest_page(kvm, gfn, data, offset, seg);
1622 if (ret < 0)
1623 return ret;
1624 offset = 0;
1625 len -= seg;
1626 data += seg;
1627 ++gfn;
1628 }
1629 return 0;
1630}
1631
1632int kvm_clear_guest_page(struct kvm *kvm, gfn_t gfn, int offset, int len)
1633{
3e021bf5 1634 return kvm_write_guest_page(kvm, gfn, empty_zero_page, offset, len);
195aefde
IE
1635}
1636EXPORT_SYMBOL_GPL(kvm_clear_guest_page);
1637
1638int kvm_clear_guest(struct kvm *kvm, gpa_t gpa, unsigned long len)
1639{
1640 gfn_t gfn = gpa >> PAGE_SHIFT;
1641 int seg;
1642 int offset = offset_in_page(gpa);
1643 int ret;
1644
1645 while ((seg = next_segment(len, offset)) != 0) {
1646 ret = kvm_clear_guest_page(kvm, gfn, offset, seg);
1647 if (ret < 0)
1648 return ret;
1649 offset = 0;
1650 len -= seg;
1651 ++gfn;
1652 }
1653 return 0;
1654}
1655EXPORT_SYMBOL_GPL(kvm_clear_guest);
1656
6aa8b732
AK
1657void mark_page_dirty(struct kvm *kvm, gfn_t gfn)
1658{
31389947 1659 struct kvm_memory_slot *memslot;
6aa8b732 1660
3b6fff19 1661 gfn = unalias_gfn(kvm, gfn);
2843099f 1662 memslot = gfn_to_memslot_unaliased(kvm, gfn);
7e9d619d
RR
1663 if (memslot && memslot->dirty_bitmap) {
1664 unsigned long rel_gfn = gfn - memslot->base_gfn;
6aa8b732 1665
7e9d619d
RR
1666 /* avoid RMW */
1667 if (!test_bit(rel_gfn, memslot->dirty_bitmap))
1668 set_bit(rel_gfn, memslot->dirty_bitmap);
6aa8b732
AK
1669 }
1670}
1671
b6958ce4
ED
1672/*
1673 * The vCPU has executed a HLT instruction with in-kernel mode enabled.
1674 */
8776e519 1675void kvm_vcpu_block(struct kvm_vcpu *vcpu)
d3bef15f 1676{
e5c239cf
MT
1677 DEFINE_WAIT(wait);
1678
1679 for (;;) {
1680 prepare_to_wait(&vcpu->wq, &wait, TASK_INTERRUPTIBLE);
1681
a1b37100 1682 if (kvm_arch_vcpu_runnable(vcpu)) {
d7690175 1683 set_bit(KVM_REQ_UNHALT, &vcpu->requests);
e5c239cf 1684 break;
d7690175 1685 }
09cec754
GN
1686 if (kvm_cpu_has_pending_timer(vcpu))
1687 break;
e5c239cf
MT
1688 if (signal_pending(current))
1689 break;
1690
b6958ce4 1691 schedule();
b6958ce4 1692 }
d3bef15f 1693
e5c239cf 1694 finish_wait(&vcpu->wq, &wait);
b6958ce4
ED
1695}
1696
6aa8b732
AK
1697void kvm_resched(struct kvm_vcpu *vcpu)
1698{
3fca0365
YD
1699 if (!need_resched())
1700 return;
6aa8b732 1701 cond_resched();
6aa8b732
AK
1702}
1703EXPORT_SYMBOL_GPL(kvm_resched);
1704
e4a533a4 1705static int kvm_vcpu_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
9a2bb7f4
AK
1706{
1707 struct kvm_vcpu *vcpu = vma->vm_file->private_data;
9a2bb7f4
AK
1708 struct page *page;
1709
e4a533a4 1710 if (vmf->pgoff == 0)
039576c0 1711 page = virt_to_page(vcpu->run);
09566765 1712#ifdef CONFIG_X86
e4a533a4 1713 else if (vmf->pgoff == KVM_PIO_PAGE_OFFSET)
ad312c7c 1714 page = virt_to_page(vcpu->arch.pio_data);
5f94c174
LV
1715#endif
1716#ifdef KVM_COALESCED_MMIO_PAGE_OFFSET
1717 else if (vmf->pgoff == KVM_COALESCED_MMIO_PAGE_OFFSET)
1718 page = virt_to_page(vcpu->kvm->coalesced_mmio_ring);
09566765 1719#endif
039576c0 1720 else
e4a533a4 1721 return VM_FAULT_SIGBUS;
9a2bb7f4 1722 get_page(page);
e4a533a4 1723 vmf->page = page;
1724 return 0;
9a2bb7f4
AK
1725}
1726
f0f37e2f 1727static const struct vm_operations_struct kvm_vcpu_vm_ops = {
e4a533a4 1728 .fault = kvm_vcpu_fault,
9a2bb7f4
AK
1729};
1730
1731static int kvm_vcpu_mmap(struct file *file, struct vm_area_struct *vma)
1732{
1733 vma->vm_ops = &kvm_vcpu_vm_ops;
1734 return 0;
1735}
1736
bccf2150
AK
1737static int kvm_vcpu_release(struct inode *inode, struct file *filp)
1738{
1739 struct kvm_vcpu *vcpu = filp->private_data;
1740
66c0b394 1741 kvm_put_kvm(vcpu->kvm);
bccf2150
AK
1742 return 0;
1743}
1744
3d3aab1b 1745static struct file_operations kvm_vcpu_fops = {
bccf2150
AK
1746 .release = kvm_vcpu_release,
1747 .unlocked_ioctl = kvm_vcpu_ioctl,
1748 .compat_ioctl = kvm_vcpu_ioctl,
9a2bb7f4 1749 .mmap = kvm_vcpu_mmap,
bccf2150
AK
1750};
1751
1752/*
1753 * Allocates an inode for the vcpu.
1754 */
1755static int create_vcpu_fd(struct kvm_vcpu *vcpu)
1756{
73880c80 1757 return anon_inode_getfd("kvm-vcpu", &kvm_vcpu_fops, vcpu, 0);
bccf2150
AK
1758}
1759
c5ea7660
AK
1760/*
1761 * Creates some virtual cpus. Good luck creating more than one.
1762 */
73880c80 1763static int kvm_vm_ioctl_create_vcpu(struct kvm *kvm, u32 id)
c5ea7660
AK
1764{
1765 int r;
988a2cae 1766 struct kvm_vcpu *vcpu, *v;
c5ea7660 1767
73880c80 1768 vcpu = kvm_arch_vcpu_create(kvm, id);
fb3f0f51
RR
1769 if (IS_ERR(vcpu))
1770 return PTR_ERR(vcpu);
c5ea7660 1771
15ad7146
AK
1772 preempt_notifier_init(&vcpu->preempt_notifier, &kvm_preempt_ops);
1773
26e5215f
AK
1774 r = kvm_arch_vcpu_setup(vcpu);
1775 if (r)
7d8fece6 1776 return r;
26e5215f 1777
11ec2804 1778 mutex_lock(&kvm->lock);
73880c80
GN
1779 if (atomic_read(&kvm->online_vcpus) == KVM_MAX_VCPUS) {
1780 r = -EINVAL;
e9b11c17 1781 goto vcpu_destroy;
fb3f0f51 1782 }
73880c80 1783
988a2cae
GN
1784 kvm_for_each_vcpu(r, v, kvm)
1785 if (v->vcpu_id == id) {
73880c80
GN
1786 r = -EEXIST;
1787 goto vcpu_destroy;
1788 }
1789
1790 BUG_ON(kvm->vcpus[atomic_read(&kvm->online_vcpus)]);
c5ea7660 1791
fb3f0f51 1792 /* Now it's all set up, let userspace reach it */
66c0b394 1793 kvm_get_kvm(kvm);
bccf2150 1794 r = create_vcpu_fd(vcpu);
73880c80
GN
1795 if (r < 0) {
1796 kvm_put_kvm(kvm);
1797 goto vcpu_destroy;
1798 }
1799
1800 kvm->vcpus[atomic_read(&kvm->online_vcpus)] = vcpu;
1801 smp_wmb();
1802 atomic_inc(&kvm->online_vcpus);
1803
1804#ifdef CONFIG_KVM_APIC_ARCHITECTURE
1805 if (kvm->bsp_vcpu_id == id)
1806 kvm->bsp_vcpu = vcpu;
1807#endif
1808 mutex_unlock(&kvm->lock);
fb3f0f51 1809 return r;
39c3b86e 1810
e9b11c17 1811vcpu_destroy:
7d8fece6 1812 mutex_unlock(&kvm->lock);
d40ccc62 1813 kvm_arch_vcpu_destroy(vcpu);
c5ea7660
AK
1814 return r;
1815}
1816
1961d276
AK
1817static int kvm_vcpu_ioctl_set_sigmask(struct kvm_vcpu *vcpu, sigset_t *sigset)
1818{
1819 if (sigset) {
1820 sigdelsetmask(sigset, sigmask(SIGKILL)|sigmask(SIGSTOP));
1821 vcpu->sigset_active = 1;
1822 vcpu->sigset = *sigset;
1823 } else
1824 vcpu->sigset_active = 0;
1825 return 0;
1826}
1827
c1e01514
SY
1828#ifdef __KVM_HAVE_MSIX
1829static int kvm_vm_ioctl_set_msix_nr(struct kvm *kvm,
1830 struct kvm_assigned_msix_nr *entry_nr)
1831{
1832 int r = 0;
1833 struct kvm_assigned_dev_kernel *adev;
1834
1835 mutex_lock(&kvm->lock);
1836
1837 adev = kvm_find_assigned_dev(&kvm->arch.assigned_dev_head,
1838 entry_nr->assigned_dev_id);
1839 if (!adev) {
1840 r = -EINVAL;
1841 goto msix_nr_out;
1842 }
1843
1844 if (adev->entries_nr == 0) {
1845 adev->entries_nr = entry_nr->entry_nr;
1846 if (adev->entries_nr == 0 ||
1847 adev->entries_nr >= KVM_MAX_MSIX_PER_DEV) {
1848 r = -EINVAL;
1849 goto msix_nr_out;
1850 }
1851
1852 adev->host_msix_entries = kzalloc(sizeof(struct msix_entry) *
1853 entry_nr->entry_nr,
1854 GFP_KERNEL);
1855 if (!adev->host_msix_entries) {
1856 r = -ENOMEM;
1857 goto msix_nr_out;
1858 }
1859 adev->guest_msix_entries = kzalloc(
1860 sizeof(struct kvm_guest_msix_entry) *
1861 entry_nr->entry_nr, GFP_KERNEL);
1862 if (!adev->guest_msix_entries) {
1863 kfree(adev->host_msix_entries);
1864 r = -ENOMEM;
1865 goto msix_nr_out;
1866 }
1867 } else /* Not allowed set MSI-X number twice */
1868 r = -EINVAL;
1869msix_nr_out:
1870 mutex_unlock(&kvm->lock);
1871 return r;
1872}
1873
1874static int kvm_vm_ioctl_set_msix_entry(struct kvm *kvm,
1875 struct kvm_assigned_msix_entry *entry)
1876{
1877 int r = 0, i;
1878 struct kvm_assigned_dev_kernel *adev;
1879
1880 mutex_lock(&kvm->lock);
1881
1882 adev = kvm_find_assigned_dev(&kvm->arch.assigned_dev_head,
1883 entry->assigned_dev_id);
1884
1885 if (!adev) {
1886 r = -EINVAL;
1887 goto msix_entry_out;
1888 }
1889
1890 for (i = 0; i < adev->entries_nr; i++)
1891 if (adev->guest_msix_entries[i].vector == 0 ||
1892 adev->guest_msix_entries[i].entry == entry->entry) {
1893 adev->guest_msix_entries[i].entry = entry->entry;
1894 adev->guest_msix_entries[i].vector = entry->gsi;
1895 adev->host_msix_entries[i].entry = entry->entry;
1896 break;
1897 }
1898 if (i == adev->entries_nr) {
1899 r = -ENOSPC;
1900 goto msix_entry_out;
1901 }
1902
1903msix_entry_out:
1904 mutex_unlock(&kvm->lock);
1905
1906 return r;
1907}
1908#endif
1909
bccf2150
AK
1910static long kvm_vcpu_ioctl(struct file *filp,
1911 unsigned int ioctl, unsigned long arg)
6aa8b732 1912{
bccf2150 1913 struct kvm_vcpu *vcpu = filp->private_data;
2f366987 1914 void __user *argp = (void __user *)arg;
313a3dc7 1915 int r;
fa3795a7
DH
1916 struct kvm_fpu *fpu = NULL;
1917 struct kvm_sregs *kvm_sregs = NULL;
6aa8b732 1918
6d4e4c4f
AK
1919 if (vcpu->kvm->mm != current->mm)
1920 return -EIO;
6aa8b732 1921 switch (ioctl) {
9a2bb7f4 1922 case KVM_RUN:
f0fe5108
AK
1923 r = -EINVAL;
1924 if (arg)
1925 goto out;
b6c7a5dc 1926 r = kvm_arch_vcpu_ioctl_run(vcpu, vcpu->run);
6aa8b732 1927 break;
6aa8b732 1928 case KVM_GET_REGS: {
3e4bb3ac 1929 struct kvm_regs *kvm_regs;
6aa8b732 1930
3e4bb3ac
XZ
1931 r = -ENOMEM;
1932 kvm_regs = kzalloc(sizeof(struct kvm_regs), GFP_KERNEL);
1933 if (!kvm_regs)
6aa8b732 1934 goto out;
3e4bb3ac
XZ
1935 r = kvm_arch_vcpu_ioctl_get_regs(vcpu, kvm_regs);
1936 if (r)
1937 goto out_free1;
6aa8b732 1938 r = -EFAULT;
3e4bb3ac
XZ
1939 if (copy_to_user(argp, kvm_regs, sizeof(struct kvm_regs)))
1940 goto out_free1;
6aa8b732 1941 r = 0;
3e4bb3ac
XZ
1942out_free1:
1943 kfree(kvm_regs);
6aa8b732
AK
1944 break;
1945 }
1946 case KVM_SET_REGS: {
3e4bb3ac 1947 struct kvm_regs *kvm_regs;
6aa8b732 1948
3e4bb3ac
XZ
1949 r = -ENOMEM;
1950 kvm_regs = kzalloc(sizeof(struct kvm_regs), GFP_KERNEL);
1951 if (!kvm_regs)
6aa8b732 1952 goto out;
3e4bb3ac
XZ
1953 r = -EFAULT;
1954 if (copy_from_user(kvm_regs, argp, sizeof(struct kvm_regs)))
1955 goto out_free2;
1956 r = kvm_arch_vcpu_ioctl_set_regs(vcpu, kvm_regs);
6aa8b732 1957 if (r)
3e4bb3ac 1958 goto out_free2;
6aa8b732 1959 r = 0;
3e4bb3ac
XZ
1960out_free2:
1961 kfree(kvm_regs);
6aa8b732
AK
1962 break;
1963 }
1964 case KVM_GET_SREGS: {
fa3795a7
DH
1965 kvm_sregs = kzalloc(sizeof(struct kvm_sregs), GFP_KERNEL);
1966 r = -ENOMEM;
1967 if (!kvm_sregs)
1968 goto out;
1969 r = kvm_arch_vcpu_ioctl_get_sregs(vcpu, kvm_sregs);
6aa8b732
AK
1970 if (r)
1971 goto out;
1972 r = -EFAULT;
fa3795a7 1973 if (copy_to_user(argp, kvm_sregs, sizeof(struct kvm_sregs)))
6aa8b732
AK
1974 goto out;
1975 r = 0;
1976 break;
1977 }
1978 case KVM_SET_SREGS: {
fa3795a7
DH
1979 kvm_sregs = kmalloc(sizeof(struct kvm_sregs), GFP_KERNEL);
1980 r = -ENOMEM;
1981 if (!kvm_sregs)
1982 goto out;
6aa8b732 1983 r = -EFAULT;
fa3795a7 1984 if (copy_from_user(kvm_sregs, argp, sizeof(struct kvm_sregs)))
6aa8b732 1985 goto out;
fa3795a7 1986 r = kvm_arch_vcpu_ioctl_set_sregs(vcpu, kvm_sregs);
6aa8b732
AK
1987 if (r)
1988 goto out;
1989 r = 0;
1990 break;
1991 }
62d9f0db
MT
1992 case KVM_GET_MP_STATE: {
1993 struct kvm_mp_state mp_state;
1994
1995 r = kvm_arch_vcpu_ioctl_get_mpstate(vcpu, &mp_state);
1996 if (r)
1997 goto out;
1998 r = -EFAULT;
1999 if (copy_to_user(argp, &mp_state, sizeof mp_state))
2000 goto out;
2001 r = 0;
2002 break;
2003 }
2004 case KVM_SET_MP_STATE: {
2005 struct kvm_mp_state mp_state;
2006
2007 r = -EFAULT;
2008 if (copy_from_user(&mp_state, argp, sizeof mp_state))
2009 goto out;
2010 r = kvm_arch_vcpu_ioctl_set_mpstate(vcpu, &mp_state);
2011 if (r)
2012 goto out;
2013 r = 0;
2014 break;
2015 }
6aa8b732
AK
2016 case KVM_TRANSLATE: {
2017 struct kvm_translation tr;
2018
2019 r = -EFAULT;
2f366987 2020 if (copy_from_user(&tr, argp, sizeof tr))
6aa8b732 2021 goto out;
8b006791 2022 r = kvm_arch_vcpu_ioctl_translate(vcpu, &tr);
6aa8b732
AK
2023 if (r)
2024 goto out;
2025 r = -EFAULT;
2f366987 2026 if (copy_to_user(argp, &tr, sizeof tr))
6aa8b732
AK
2027 goto out;
2028 r = 0;
2029 break;
2030 }
d0bfb940
JK
2031 case KVM_SET_GUEST_DEBUG: {
2032 struct kvm_guest_debug dbg;
6aa8b732
AK
2033
2034 r = -EFAULT;
2f366987 2035 if (copy_from_user(&dbg, argp, sizeof dbg))
6aa8b732 2036 goto out;
d0bfb940 2037 r = kvm_arch_vcpu_ioctl_set_guest_debug(vcpu, &dbg);
6aa8b732
AK
2038 if (r)
2039 goto out;
2040 r = 0;
2041 break;
2042 }
1961d276
AK
2043 case KVM_SET_SIGNAL_MASK: {
2044 struct kvm_signal_mask __user *sigmask_arg = argp;
2045 struct kvm_signal_mask kvm_sigmask;
2046 sigset_t sigset, *p;
2047
2048 p = NULL;
2049 if (argp) {
2050 r = -EFAULT;
2051 if (copy_from_user(&kvm_sigmask, argp,
2052 sizeof kvm_sigmask))
2053 goto out;
2054 r = -EINVAL;
2055 if (kvm_sigmask.len != sizeof sigset)
2056 goto out;
2057 r = -EFAULT;
2058 if (copy_from_user(&sigset, sigmask_arg->sigset,
2059 sizeof sigset))
2060 goto out;
2061 p = &sigset;
2062 }
2063 r = kvm_vcpu_ioctl_set_sigmask(vcpu, &sigset);
2064 break;
2065 }
b8836737 2066 case KVM_GET_FPU: {
fa3795a7
DH
2067 fpu = kzalloc(sizeof(struct kvm_fpu), GFP_KERNEL);
2068 r = -ENOMEM;
2069 if (!fpu)
2070 goto out;
2071 r = kvm_arch_vcpu_ioctl_get_fpu(vcpu, fpu);
b8836737
AK
2072 if (r)
2073 goto out;
2074 r = -EFAULT;
fa3795a7 2075 if (copy_to_user(argp, fpu, sizeof(struct kvm_fpu)))
b8836737
AK
2076 goto out;
2077 r = 0;
2078 break;
2079 }
2080 case KVM_SET_FPU: {
fa3795a7
DH
2081 fpu = kmalloc(sizeof(struct kvm_fpu), GFP_KERNEL);
2082 r = -ENOMEM;
2083 if (!fpu)
2084 goto out;
b8836737 2085 r = -EFAULT;
fa3795a7 2086 if (copy_from_user(fpu, argp, sizeof(struct kvm_fpu)))
b8836737 2087 goto out;
fa3795a7 2088 r = kvm_arch_vcpu_ioctl_set_fpu(vcpu, fpu);
b8836737
AK
2089 if (r)
2090 goto out;
2091 r = 0;
2092 break;
2093 }
bccf2150 2094 default:
313a3dc7 2095 r = kvm_arch_vcpu_ioctl(filp, ioctl, arg);
bccf2150
AK
2096 }
2097out:
fa3795a7
DH
2098 kfree(fpu);
2099 kfree(kvm_sregs);
bccf2150
AK
2100 return r;
2101}
2102
2103static long kvm_vm_ioctl(struct file *filp,
2104 unsigned int ioctl, unsigned long arg)
2105{
2106 struct kvm *kvm = filp->private_data;
2107 void __user *argp = (void __user *)arg;
1fe779f8 2108 int r;
bccf2150 2109
6d4e4c4f
AK
2110 if (kvm->mm != current->mm)
2111 return -EIO;
bccf2150
AK
2112 switch (ioctl) {
2113 case KVM_CREATE_VCPU:
2114 r = kvm_vm_ioctl_create_vcpu(kvm, arg);
2115 if (r < 0)
2116 goto out;
2117 break;
6fc138d2
IE
2118 case KVM_SET_USER_MEMORY_REGION: {
2119 struct kvm_userspace_memory_region kvm_userspace_mem;
2120
2121 r = -EFAULT;
2122 if (copy_from_user(&kvm_userspace_mem, argp,
2123 sizeof kvm_userspace_mem))
2124 goto out;
2125
2126 r = kvm_vm_ioctl_set_memory_region(kvm, &kvm_userspace_mem, 1);
6aa8b732
AK
2127 if (r)
2128 goto out;
2129 break;
2130 }
2131 case KVM_GET_DIRTY_LOG: {
2132 struct kvm_dirty_log log;
2133
2134 r = -EFAULT;
2f366987 2135 if (copy_from_user(&log, argp, sizeof log))
6aa8b732 2136 goto out;
2c6f5df9 2137 r = kvm_vm_ioctl_get_dirty_log(kvm, &log);
6aa8b732
AK
2138 if (r)
2139 goto out;
2140 break;
2141 }
5f94c174
LV
2142#ifdef KVM_COALESCED_MMIO_PAGE_OFFSET
2143 case KVM_REGISTER_COALESCED_MMIO: {
2144 struct kvm_coalesced_mmio_zone zone;
2145 r = -EFAULT;
2146 if (copy_from_user(&zone, argp, sizeof zone))
2147 goto out;
2148 r = -ENXIO;
2149 r = kvm_vm_ioctl_register_coalesced_mmio(kvm, &zone);
2150 if (r)
2151 goto out;
2152 r = 0;
2153 break;
2154 }
2155 case KVM_UNREGISTER_COALESCED_MMIO: {
2156 struct kvm_coalesced_mmio_zone zone;
2157 r = -EFAULT;
2158 if (copy_from_user(&zone, argp, sizeof zone))
2159 goto out;
2160 r = -ENXIO;
2161 r = kvm_vm_ioctl_unregister_coalesced_mmio(kvm, &zone);
2162 if (r)
2163 goto out;
2164 r = 0;
2165 break;
2166 }
8a98f664
XZ
2167#endif
2168#ifdef KVM_CAP_DEVICE_ASSIGNMENT
2169 case KVM_ASSIGN_PCI_DEVICE: {
2170 struct kvm_assigned_pci_dev assigned_dev;
2171
2172 r = -EFAULT;
2173 if (copy_from_user(&assigned_dev, argp, sizeof assigned_dev))
2174 goto out;
2175 r = kvm_vm_ioctl_assign_device(kvm, &assigned_dev);
2176 if (r)
2177 goto out;
2178 break;
2179 }
2180 case KVM_ASSIGN_IRQ: {
e56d532f
SY
2181 r = -EOPNOTSUPP;
2182 break;
2183 }
2184#ifdef KVM_CAP_ASSIGN_DEV_IRQ
2185 case KVM_ASSIGN_DEV_IRQ: {
8a98f664
XZ
2186 struct kvm_assigned_irq assigned_irq;
2187
2188 r = -EFAULT;
2189 if (copy_from_user(&assigned_irq, argp, sizeof assigned_irq))
2190 goto out;
2191 r = kvm_vm_ioctl_assign_irq(kvm, &assigned_irq);
2192 if (r)
2193 goto out;
2194 break;
2195 }
e56d532f
SY
2196 case KVM_DEASSIGN_DEV_IRQ: {
2197 struct kvm_assigned_irq assigned_irq;
2198
2199 r = -EFAULT;
2200 if (copy_from_user(&assigned_irq, argp, sizeof assigned_irq))
2201 goto out;
2202 r = kvm_vm_ioctl_deassign_dev_irq(kvm, &assigned_irq);
2203 if (r)
2204 goto out;
2205 break;
2206 }
2207#endif
0a920356
WH
2208#endif
2209#ifdef KVM_CAP_DEVICE_DEASSIGNMENT
2210 case KVM_DEASSIGN_PCI_DEVICE: {
2211 struct kvm_assigned_pci_dev assigned_dev;
2212
2213 r = -EFAULT;
2214 if (copy_from_user(&assigned_dev, argp, sizeof assigned_dev))
2215 goto out;
2216 r = kvm_vm_ioctl_deassign_device(kvm, &assigned_dev);
2217 if (r)
2218 goto out;
2219 break;
2220 }
399ec807
AK
2221#endif
2222#ifdef KVM_CAP_IRQ_ROUTING
2223 case KVM_SET_GSI_ROUTING: {
2224 struct kvm_irq_routing routing;
2225 struct kvm_irq_routing __user *urouting;
2226 struct kvm_irq_routing_entry *entries;
2227
2228 r = -EFAULT;
2229 if (copy_from_user(&routing, argp, sizeof(routing)))
2230 goto out;
2231 r = -EINVAL;
2232 if (routing.nr >= KVM_MAX_IRQ_ROUTES)
2233 goto out;
2234 if (routing.flags)
2235 goto out;
2236 r = -ENOMEM;
2237 entries = vmalloc(routing.nr * sizeof(*entries));
2238 if (!entries)
2239 goto out;
2240 r = -EFAULT;
2241 urouting = argp;
2242 if (copy_from_user(entries, urouting->entries,
2243 routing.nr * sizeof(*entries)))
2244 goto out_free_irq_routing;
2245 r = kvm_set_irq_routing(kvm, entries, routing.nr,
2246 routing.flags);
2247 out_free_irq_routing:
2248 vfree(entries);
2249 break;
2250 }
6621fbc2 2251#endif /* KVM_CAP_IRQ_ROUTING */
c1e01514
SY
2252#ifdef __KVM_HAVE_MSIX
2253 case KVM_ASSIGN_SET_MSIX_NR: {
2254 struct kvm_assigned_msix_nr entry_nr;
2255 r = -EFAULT;
2256 if (copy_from_user(&entry_nr, argp, sizeof entry_nr))
2257 goto out;
2258 r = kvm_vm_ioctl_set_msix_nr(kvm, &entry_nr);
2259 if (r)
2260 goto out;
2261 break;
2262 }
2263 case KVM_ASSIGN_SET_MSIX_ENTRY: {
2264 struct kvm_assigned_msix_entry entry;
2265 r = -EFAULT;
2266 if (copy_from_user(&entry, argp, sizeof entry))
2267 goto out;
2268 r = kvm_vm_ioctl_set_msix_entry(kvm, &entry);
2269 if (r)
2270 goto out;
2271 break;
2272 }
5f94c174 2273#endif
721eecbf
GH
2274 case KVM_IRQFD: {
2275 struct kvm_irqfd data;
2276
2277 r = -EFAULT;
2278 if (copy_from_user(&data, argp, sizeof data))
2279 goto out;
2280 r = kvm_irqfd(kvm, data.fd, data.gsi, data.flags);
2281 break;
2282 }
d34e6b17
GH
2283 case KVM_IOEVENTFD: {
2284 struct kvm_ioeventfd data;
2285
2286 r = -EFAULT;
2287 if (copy_from_user(&data, argp, sizeof data))
2288 goto out;
2289 r = kvm_ioeventfd(kvm, &data);
2290 break;
2291 }
73880c80
GN
2292#ifdef CONFIG_KVM_APIC_ARCHITECTURE
2293 case KVM_SET_BOOT_CPU_ID:
2294 r = 0;
894a9c55 2295 mutex_lock(&kvm->lock);
73880c80
GN
2296 if (atomic_read(&kvm->online_vcpus) != 0)
2297 r = -EBUSY;
2298 else
2299 kvm->bsp_vcpu_id = arg;
894a9c55 2300 mutex_unlock(&kvm->lock);
73880c80
GN
2301 break;
2302#endif
f17abe9a 2303 default:
1fe779f8 2304 r = kvm_arch_vm_ioctl(filp, ioctl, arg);
f17abe9a
AK
2305 }
2306out:
2307 return r;
2308}
2309
e4a533a4 2310static int kvm_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
f17abe9a 2311{
777b3f49
MT
2312 struct page *page[1];
2313 unsigned long addr;
2314 int npages;
2315 gfn_t gfn = vmf->pgoff;
f17abe9a 2316 struct kvm *kvm = vma->vm_file->private_data;
f17abe9a 2317
777b3f49
MT
2318 addr = gfn_to_hva(kvm, gfn);
2319 if (kvm_is_error_hva(addr))
e4a533a4 2320 return VM_FAULT_SIGBUS;
777b3f49
MT
2321
2322 npages = get_user_pages(current, current->mm, addr, 1, 1, 0, page,
2323 NULL);
2324 if (unlikely(npages != 1))
e4a533a4 2325 return VM_FAULT_SIGBUS;
777b3f49
MT
2326
2327 vmf->page = page[0];
e4a533a4 2328 return 0;
f17abe9a
AK
2329}
2330
f0f37e2f 2331static const struct vm_operations_struct kvm_vm_vm_ops = {
e4a533a4 2332 .fault = kvm_vm_fault,
f17abe9a
AK
2333};
2334
2335static int kvm_vm_mmap(struct file *file, struct vm_area_struct *vma)
2336{
2337 vma->vm_ops = &kvm_vm_vm_ops;
2338 return 0;
2339}
2340
3d3aab1b 2341static struct file_operations kvm_vm_fops = {
f17abe9a
AK
2342 .release = kvm_vm_release,
2343 .unlocked_ioctl = kvm_vm_ioctl,
2344 .compat_ioctl = kvm_vm_ioctl,
2345 .mmap = kvm_vm_mmap,
2346};
2347
2348static int kvm_dev_ioctl_create_vm(void)
2349{
2030a42c 2350 int fd;
f17abe9a
AK
2351 struct kvm *kvm;
2352
f17abe9a 2353 kvm = kvm_create_vm();
d6d28168
AK
2354 if (IS_ERR(kvm))
2355 return PTR_ERR(kvm);
7d9dbca3 2356 fd = anon_inode_getfd("kvm-vm", &kvm_vm_fops, kvm, 0);
2030a42c 2357 if (fd < 0)
66c0b394 2358 kvm_put_kvm(kvm);
f17abe9a 2359
f17abe9a 2360 return fd;
f17abe9a
AK
2361}
2362
1a811b61
AK
2363static long kvm_dev_ioctl_check_extension_generic(long arg)
2364{
2365 switch (arg) {
ca9edaee 2366 case KVM_CAP_USER_MEMORY:
1a811b61 2367 case KVM_CAP_DESTROY_MEMORY_REGION_WORKS:
4cd481f6 2368 case KVM_CAP_JOIN_MEMORY_REGIONS_WORKS:
73880c80
GN
2369#ifdef CONFIG_KVM_APIC_ARCHITECTURE
2370 case KVM_CAP_SET_BOOT_CPU_ID:
2371#endif
1a811b61 2372 return 1;
399ec807
AK
2373#ifdef CONFIG_HAVE_KVM_IRQCHIP
2374 case KVM_CAP_IRQ_ROUTING:
36463146 2375 return KVM_MAX_IRQ_ROUTES;
399ec807 2376#endif
1a811b61
AK
2377 default:
2378 break;
2379 }
2380 return kvm_dev_ioctl_check_extension(arg);
2381}
2382
f17abe9a
AK
2383static long kvm_dev_ioctl(struct file *filp,
2384 unsigned int ioctl, unsigned long arg)
2385{
07c45a36 2386 long r = -EINVAL;
f17abe9a
AK
2387
2388 switch (ioctl) {
2389 case KVM_GET_API_VERSION:
f0fe5108
AK
2390 r = -EINVAL;
2391 if (arg)
2392 goto out;
f17abe9a
AK
2393 r = KVM_API_VERSION;
2394 break;
2395 case KVM_CREATE_VM:
f0fe5108
AK
2396 r = -EINVAL;
2397 if (arg)
2398 goto out;
f17abe9a
AK
2399 r = kvm_dev_ioctl_create_vm();
2400 break;
018d00d2 2401 case KVM_CHECK_EXTENSION:
1a811b61 2402 r = kvm_dev_ioctl_check_extension_generic(arg);
5d308f45 2403 break;
07c45a36
AK
2404 case KVM_GET_VCPU_MMAP_SIZE:
2405 r = -EINVAL;
2406 if (arg)
2407 goto out;
adb1ff46
AK
2408 r = PAGE_SIZE; /* struct kvm_run */
2409#ifdef CONFIG_X86
2410 r += PAGE_SIZE; /* pio data page */
5f94c174
LV
2411#endif
2412#ifdef KVM_COALESCED_MMIO_PAGE_OFFSET
2413 r += PAGE_SIZE; /* coalesced mmio ring page */
adb1ff46 2414#endif
07c45a36 2415 break;
d4c9ff2d
FEL
2416 case KVM_TRACE_ENABLE:
2417 case KVM_TRACE_PAUSE:
2418 case KVM_TRACE_DISABLE:
2023a29c 2419 r = -EOPNOTSUPP;
d4c9ff2d 2420 break;
6aa8b732 2421 default:
043405e1 2422 return kvm_arch_dev_ioctl(filp, ioctl, arg);
6aa8b732
AK
2423 }
2424out:
2425 return r;
2426}
2427
6aa8b732 2428static struct file_operations kvm_chardev_ops = {
6aa8b732
AK
2429 .unlocked_ioctl = kvm_dev_ioctl,
2430 .compat_ioctl = kvm_dev_ioctl,
6aa8b732
AK
2431};
2432
2433static struct miscdevice kvm_dev = {
bbe4432e 2434 KVM_MINOR,
6aa8b732
AK
2435 "kvm",
2436 &kvm_chardev_ops,
2437};
2438
1b6c0168
AK
2439static void hardware_enable(void *junk)
2440{
2441 int cpu = raw_smp_processor_id();
2442
7f59f492 2443 if (cpumask_test_cpu(cpu, cpus_hardware_enabled))
1b6c0168 2444 return;
7f59f492 2445 cpumask_set_cpu(cpu, cpus_hardware_enabled);
e9b11c17 2446 kvm_arch_hardware_enable(NULL);
1b6c0168
AK
2447}
2448
2449static void hardware_disable(void *junk)
2450{
2451 int cpu = raw_smp_processor_id();
2452
7f59f492 2453 if (!cpumask_test_cpu(cpu, cpus_hardware_enabled))
1b6c0168 2454 return;
7f59f492 2455 cpumask_clear_cpu(cpu, cpus_hardware_enabled);
e9b11c17 2456 kvm_arch_hardware_disable(NULL);
1b6c0168
AK
2457}
2458
774c47f1
AK
2459static int kvm_cpu_hotplug(struct notifier_block *notifier, unsigned long val,
2460 void *v)
2461{
2462 int cpu = (long)v;
2463
1a6f4d7f 2464 val &= ~CPU_TASKS_FROZEN;
774c47f1 2465 switch (val) {
cec9ad27 2466 case CPU_DYING:
6ec8a856
AK
2467 printk(KERN_INFO "kvm: disabling virtualization on CPU%d\n",
2468 cpu);
2469 hardware_disable(NULL);
2470 break;
774c47f1 2471 case CPU_UP_CANCELED:
43934a38
JK
2472 printk(KERN_INFO "kvm: disabling virtualization on CPU%d\n",
2473 cpu);
8691e5a8 2474 smp_call_function_single(cpu, hardware_disable, NULL, 1);
774c47f1 2475 break;
43934a38
JK
2476 case CPU_ONLINE:
2477 printk(KERN_INFO "kvm: enabling virtualization on CPU%d\n",
2478 cpu);
8691e5a8 2479 smp_call_function_single(cpu, hardware_enable, NULL, 1);
774c47f1
AK
2480 break;
2481 }
2482 return NOTIFY_OK;
2483}
2484
4ecac3fd
AK
2485
2486asmlinkage void kvm_handle_fault_on_reboot(void)
2487{
2488 if (kvm_rebooting)
2489 /* spin while reset goes on */
2490 while (true)
2491 ;
2492 /* Fault while not rebooting. We want the trace. */
2493 BUG();
2494}
2495EXPORT_SYMBOL_GPL(kvm_handle_fault_on_reboot);
2496
9a2b85c6 2497static int kvm_reboot(struct notifier_block *notifier, unsigned long val,
d77c26fc 2498 void *v)
9a2b85c6 2499{
8e1c1815
SY
2500 /*
2501 * Some (well, at least mine) BIOSes hang on reboot if
2502 * in vmx root mode.
2503 *
2504 * And Intel TXT required VMX off for all cpu when system shutdown.
2505 */
2506 printk(KERN_INFO "kvm: exiting hardware virtualization\n");
2507 kvm_rebooting = true;
2508 on_each_cpu(hardware_disable, NULL, 1);
9a2b85c6
RR
2509 return NOTIFY_OK;
2510}
2511
2512static struct notifier_block kvm_reboot_notifier = {
2513 .notifier_call = kvm_reboot,
2514 .priority = 0,
2515};
2516
2eeb2e94
GH
2517void kvm_io_bus_init(struct kvm_io_bus *bus)
2518{
2519 memset(bus, 0, sizeof(*bus));
2520}
2521
2522void kvm_io_bus_destroy(struct kvm_io_bus *bus)
2523{
2524 int i;
2525
2526 for (i = 0; i < bus->dev_count; i++) {
2527 struct kvm_io_device *pos = bus->devs[i];
2528
2529 kvm_iodevice_destructor(pos);
2530 }
2531}
2532
bda9020e
MT
2533/* kvm_io_bus_write - called under kvm->slots_lock */
2534int kvm_io_bus_write(struct kvm_io_bus *bus, gpa_t addr,
2535 int len, const void *val)
2eeb2e94
GH
2536{
2537 int i;
bda9020e
MT
2538 for (i = 0; i < bus->dev_count; i++)
2539 if (!kvm_iodevice_write(bus->devs[i], addr, len, val))
2540 return 0;
2541 return -EOPNOTSUPP;
2542}
2eeb2e94 2543
bda9020e
MT
2544/* kvm_io_bus_read - called under kvm->slots_lock */
2545int kvm_io_bus_read(struct kvm_io_bus *bus, gpa_t addr, int len, void *val)
2546{
2547 int i;
2548 for (i = 0; i < bus->dev_count; i++)
2549 if (!kvm_iodevice_read(bus->devs[i], addr, len, val))
2550 return 0;
2551 return -EOPNOTSUPP;
2eeb2e94
GH
2552}
2553
090b7aff 2554int kvm_io_bus_register_dev(struct kvm *kvm, struct kvm_io_bus *bus,
6c474694
MT
2555 struct kvm_io_device *dev)
2556{
090b7aff
GH
2557 int ret;
2558
6c474694 2559 down_write(&kvm->slots_lock);
090b7aff 2560 ret = __kvm_io_bus_register_dev(bus, dev);
6c474694 2561 up_write(&kvm->slots_lock);
090b7aff
GH
2562
2563 return ret;
6c474694
MT
2564}
2565
2566/* An unlocked version. Caller must have write lock on slots_lock. */
090b7aff
GH
2567int __kvm_io_bus_register_dev(struct kvm_io_bus *bus,
2568 struct kvm_io_device *dev)
2eeb2e94 2569{
090b7aff
GH
2570 if (bus->dev_count > NR_IOBUS_DEVS-1)
2571 return -ENOSPC;
2eeb2e94
GH
2572
2573 bus->devs[bus->dev_count++] = dev;
090b7aff
GH
2574
2575 return 0;
2576}
2577
2578void kvm_io_bus_unregister_dev(struct kvm *kvm,
2579 struct kvm_io_bus *bus,
2580 struct kvm_io_device *dev)
2581{
2582 down_write(&kvm->slots_lock);
2583 __kvm_io_bus_unregister_dev(bus, dev);
2584 up_write(&kvm->slots_lock);
2585}
2586
2587/* An unlocked version. Caller must have write lock on slots_lock. */
2588void __kvm_io_bus_unregister_dev(struct kvm_io_bus *bus,
2589 struct kvm_io_device *dev)
2590{
2591 int i;
2592
2593 for (i = 0; i < bus->dev_count; i++)
2594 if (bus->devs[i] == dev) {
2595 bus->devs[i] = bus->devs[--bus->dev_count];
2596 break;
2597 }
2eeb2e94
GH
2598}
2599
774c47f1
AK
2600static struct notifier_block kvm_cpu_notifier = {
2601 .notifier_call = kvm_cpu_hotplug,
2602 .priority = 20, /* must be > scheduler priority */
2603};
2604
8b88b099 2605static int vm_stat_get(void *_offset, u64 *val)
ba1389b7
AK
2606{
2607 unsigned offset = (long)_offset;
ba1389b7
AK
2608 struct kvm *kvm;
2609
8b88b099 2610 *val = 0;
ba1389b7
AK
2611 spin_lock(&kvm_lock);
2612 list_for_each_entry(kvm, &vm_list, vm_list)
8b88b099 2613 *val += *(u32 *)((void *)kvm + offset);
ba1389b7 2614 spin_unlock(&kvm_lock);
8b88b099 2615 return 0;
ba1389b7
AK
2616}
2617
2618DEFINE_SIMPLE_ATTRIBUTE(vm_stat_fops, vm_stat_get, NULL, "%llu\n");
2619
8b88b099 2620static int vcpu_stat_get(void *_offset, u64 *val)
1165f5fe
AK
2621{
2622 unsigned offset = (long)_offset;
1165f5fe
AK
2623 struct kvm *kvm;
2624 struct kvm_vcpu *vcpu;
2625 int i;
2626
8b88b099 2627 *val = 0;
1165f5fe
AK
2628 spin_lock(&kvm_lock);
2629 list_for_each_entry(kvm, &vm_list, vm_list)
988a2cae
GN
2630 kvm_for_each_vcpu(i, vcpu, kvm)
2631 *val += *(u32 *)((void *)vcpu + offset);
2632
1165f5fe 2633 spin_unlock(&kvm_lock);
8b88b099 2634 return 0;
1165f5fe
AK
2635}
2636
ba1389b7
AK
2637DEFINE_SIMPLE_ATTRIBUTE(vcpu_stat_fops, vcpu_stat_get, NULL, "%llu\n");
2638
828c0950 2639static const struct file_operations *stat_fops[] = {
ba1389b7
AK
2640 [KVM_STAT_VCPU] = &vcpu_stat_fops,
2641 [KVM_STAT_VM] = &vm_stat_fops,
2642};
1165f5fe 2643
a16b043c 2644static void kvm_init_debug(void)
6aa8b732
AK
2645{
2646 struct kvm_stats_debugfs_item *p;
2647
76f7c879 2648 kvm_debugfs_dir = debugfs_create_dir("kvm", NULL);
6aa8b732 2649 for (p = debugfs_entries; p->name; ++p)
76f7c879 2650 p->dentry = debugfs_create_file(p->name, 0444, kvm_debugfs_dir,
1165f5fe 2651 (void *)(long)p->offset,
ba1389b7 2652 stat_fops[p->kind]);
6aa8b732
AK
2653}
2654
2655static void kvm_exit_debug(void)
2656{
2657 struct kvm_stats_debugfs_item *p;
2658
2659 for (p = debugfs_entries; p->name; ++p)
2660 debugfs_remove(p->dentry);
76f7c879 2661 debugfs_remove(kvm_debugfs_dir);
6aa8b732
AK
2662}
2663
59ae6c6b
AK
2664static int kvm_suspend(struct sys_device *dev, pm_message_t state)
2665{
4267c41a 2666 hardware_disable(NULL);
59ae6c6b
AK
2667 return 0;
2668}
2669
2670static int kvm_resume(struct sys_device *dev)
2671{
4267c41a 2672 hardware_enable(NULL);
59ae6c6b
AK
2673 return 0;
2674}
2675
2676static struct sysdev_class kvm_sysdev_class = {
af5ca3f4 2677 .name = "kvm",
59ae6c6b
AK
2678 .suspend = kvm_suspend,
2679 .resume = kvm_resume,
2680};
2681
2682static struct sys_device kvm_sysdev = {
2683 .id = 0,
2684 .cls = &kvm_sysdev_class,
2685};
2686
cea7bb21 2687struct page *bad_page;
35149e21 2688pfn_t bad_pfn;
6aa8b732 2689
15ad7146
AK
2690static inline
2691struct kvm_vcpu *preempt_notifier_to_vcpu(struct preempt_notifier *pn)
2692{
2693 return container_of(pn, struct kvm_vcpu, preempt_notifier);
2694}
2695
2696static void kvm_sched_in(struct preempt_notifier *pn, int cpu)
2697{
2698 struct kvm_vcpu *vcpu = preempt_notifier_to_vcpu(pn);
2699
e9b11c17 2700 kvm_arch_vcpu_load(vcpu, cpu);
15ad7146
AK
2701}
2702
2703static void kvm_sched_out(struct preempt_notifier *pn,
2704 struct task_struct *next)
2705{
2706 struct kvm_vcpu *vcpu = preempt_notifier_to_vcpu(pn);
2707
e9b11c17 2708 kvm_arch_vcpu_put(vcpu);
15ad7146
AK
2709}
2710
f8c16bba 2711int kvm_init(void *opaque, unsigned int vcpu_size,
c16f862d 2712 struct module *module)
6aa8b732
AK
2713{
2714 int r;
002c7f7c 2715 int cpu;
6aa8b732 2716
f8c16bba
ZX
2717 r = kvm_arch_init(opaque);
2718 if (r)
d2308784 2719 goto out_fail;
cb498ea2
ZX
2720
2721 bad_page = alloc_page(GFP_KERNEL | __GFP_ZERO);
2722
2723 if (bad_page == NULL) {
2724 r = -ENOMEM;
2725 goto out;
2726 }
2727
35149e21
AL
2728 bad_pfn = page_to_pfn(bad_page);
2729
8437a617 2730 if (!zalloc_cpumask_var(&cpus_hardware_enabled, GFP_KERNEL)) {
7f59f492
RR
2731 r = -ENOMEM;
2732 goto out_free_0;
2733 }
2734
e9b11c17 2735 r = kvm_arch_hardware_setup();
6aa8b732 2736 if (r < 0)
7f59f492 2737 goto out_free_0a;
6aa8b732 2738
002c7f7c
YS
2739 for_each_online_cpu(cpu) {
2740 smp_call_function_single(cpu,
e9b11c17 2741 kvm_arch_check_processor_compat,
8691e5a8 2742 &r, 1);
002c7f7c 2743 if (r < 0)
d2308784 2744 goto out_free_1;
002c7f7c
YS
2745 }
2746
15c8b6c1 2747 on_each_cpu(hardware_enable, NULL, 1);
774c47f1
AK
2748 r = register_cpu_notifier(&kvm_cpu_notifier);
2749 if (r)
d2308784 2750 goto out_free_2;
6aa8b732
AK
2751 register_reboot_notifier(&kvm_reboot_notifier);
2752
59ae6c6b
AK
2753 r = sysdev_class_register(&kvm_sysdev_class);
2754 if (r)
d2308784 2755 goto out_free_3;
59ae6c6b
AK
2756
2757 r = sysdev_register(&kvm_sysdev);
2758 if (r)
d2308784 2759 goto out_free_4;
59ae6c6b 2760
c16f862d
RR
2761 /* A kmem cache lets us meet the alignment requirements of fx_save. */
2762 kvm_vcpu_cache = kmem_cache_create("kvm_vcpu", vcpu_size,
56919c5c
JP
2763 __alignof__(struct kvm_vcpu),
2764 0, NULL);
c16f862d
RR
2765 if (!kvm_vcpu_cache) {
2766 r = -ENOMEM;
d2308784 2767 goto out_free_5;
c16f862d
RR
2768 }
2769
6aa8b732 2770 kvm_chardev_ops.owner = module;
3d3aab1b
CB
2771 kvm_vm_fops.owner = module;
2772 kvm_vcpu_fops.owner = module;
6aa8b732
AK
2773
2774 r = misc_register(&kvm_dev);
2775 if (r) {
d77c26fc 2776 printk(KERN_ERR "kvm: misc device register failed\n");
6aa8b732
AK
2777 goto out_free;
2778 }
2779
15ad7146
AK
2780 kvm_preempt_ops.sched_in = kvm_sched_in;
2781 kvm_preempt_ops.sched_out = kvm_sched_out;
2782
0ea4ed8e
DW
2783 kvm_init_debug();
2784
c7addb90 2785 return 0;
6aa8b732
AK
2786
2787out_free:
c16f862d 2788 kmem_cache_destroy(kvm_vcpu_cache);
d2308784 2789out_free_5:
59ae6c6b 2790 sysdev_unregister(&kvm_sysdev);
d2308784 2791out_free_4:
59ae6c6b 2792 sysdev_class_unregister(&kvm_sysdev_class);
d2308784 2793out_free_3:
6aa8b732 2794 unregister_reboot_notifier(&kvm_reboot_notifier);
774c47f1 2795 unregister_cpu_notifier(&kvm_cpu_notifier);
d2308784 2796out_free_2:
15c8b6c1 2797 on_each_cpu(hardware_disable, NULL, 1);
d2308784 2798out_free_1:
e9b11c17 2799 kvm_arch_hardware_unsetup();
7f59f492
RR
2800out_free_0a:
2801 free_cpumask_var(cpus_hardware_enabled);
d2308784
ZX
2802out_free_0:
2803 __free_page(bad_page);
ca45aaae 2804out:
f8c16bba 2805 kvm_arch_exit();
d2308784 2806out_fail:
6aa8b732
AK
2807 return r;
2808}
cb498ea2 2809EXPORT_SYMBOL_GPL(kvm_init);
6aa8b732 2810
cb498ea2 2811void kvm_exit(void)
6aa8b732 2812{
229456fc 2813 tracepoint_synchronize_unregister();
0ea4ed8e 2814 kvm_exit_debug();
6aa8b732 2815 misc_deregister(&kvm_dev);
c16f862d 2816 kmem_cache_destroy(kvm_vcpu_cache);
59ae6c6b
AK
2817 sysdev_unregister(&kvm_sysdev);
2818 sysdev_class_unregister(&kvm_sysdev_class);
6aa8b732 2819 unregister_reboot_notifier(&kvm_reboot_notifier);
59ae6c6b 2820 unregister_cpu_notifier(&kvm_cpu_notifier);
15c8b6c1 2821 on_each_cpu(hardware_disable, NULL, 1);
e9b11c17 2822 kvm_arch_hardware_unsetup();
f8c16bba 2823 kvm_arch_exit();
7f59f492 2824 free_cpumask_var(cpus_hardware_enabled);
cea7bb21 2825 __free_page(bad_page);
6aa8b732 2826}
cb498ea2 2827EXPORT_SYMBOL_GPL(kvm_exit);