1 // SPDX-License-Identifier: GPL-2.0-only
3 * Kernel-based Virtual Machine driver for Linux
5 * This module enables machines with Intel VT-x extensions to run virtual
6 * machines without emulation or binary translation.
8 * Copyright (C) 2006 Qumranet, Inc.
9 * Copyright 2010 Red Hat, Inc. and/or its affiliates.
12 * Avi Kivity <avi@qumranet.com>
13 * Yaniv Kamay <yaniv@qumranet.com>
16 #include <kvm/iodev.h>
18 #include <linux/kvm_host.h>
19 #include <linux/kvm.h>
20 #include <linux/module.h>
21 #include <linux/errno.h>
22 #include <linux/percpu.h>
24 #include <linux/miscdevice.h>
25 #include <linux/vmalloc.h>
26 #include <linux/reboot.h>
27 #include <linux/debugfs.h>
28 #include <linux/highmem.h>
29 #include <linux/file.h>
30 #include <linux/syscore_ops.h>
31 #include <linux/cpu.h>
32 #include <linux/sched/signal.h>
33 #include <linux/sched/mm.h>
34 #include <linux/sched/stat.h>
35 #include <linux/cpumask.h>
36 #include <linux/smp.h>
37 #include <linux/anon_inodes.h>
38 #include <linux/profile.h>
39 #include <linux/kvm_para.h>
40 #include <linux/pagemap.h>
41 #include <linux/mman.h>
42 #include <linux/swap.h>
43 #include <linux/bitops.h>
44 #include <linux/spinlock.h>
45 #include <linux/compat.h>
46 #include <linux/srcu.h>
47 #include <linux/hugetlb.h>
48 #include <linux/slab.h>
49 #include <linux/sort.h>
50 #include <linux/bsearch.h>
52 #include <linux/lockdep.h>
53 #include <linux/kthread.h>
55 #include <asm/processor.h>
56 #include <asm/ioctl.h>
57 #include <linux/uaccess.h>
58 #include <asm/pgtable.h>
60 #include "coalesced_mmio.h"
64 #define CREATE_TRACE_POINTS
65 #include <trace/events/kvm.h>
67 /* Worst case buffer size needed for holding an integer. */
68 #define ITOA_MAX_LEN 12
70 MODULE_AUTHOR("Qumranet");
71 MODULE_LICENSE("GPL");
73 /* Architectures should define their poll value according to the halt latency */
74 unsigned int halt_poll_ns
= KVM_HALT_POLL_NS_DEFAULT
;
75 module_param(halt_poll_ns
, uint
, 0644);
76 EXPORT_SYMBOL_GPL(halt_poll_ns
);
78 /* Default doubles per-vcpu halt_poll_ns. */
79 unsigned int halt_poll_ns_grow
= 2;
80 module_param(halt_poll_ns_grow
, uint
, 0644);
81 EXPORT_SYMBOL_GPL(halt_poll_ns_grow
);
83 /* The start value to grow halt_poll_ns from */
84 unsigned int halt_poll_ns_grow_start
= 10000; /* 10us */
85 module_param(halt_poll_ns_grow_start
, uint
, 0644);
86 EXPORT_SYMBOL_GPL(halt_poll_ns_grow_start
);
88 /* Default resets per-vcpu halt_poll_ns . */
89 unsigned int halt_poll_ns_shrink
;
90 module_param(halt_poll_ns_shrink
, uint
, 0644);
91 EXPORT_SYMBOL_GPL(halt_poll_ns_shrink
);
96 * kvm->lock --> kvm->slots_lock --> kvm->irq_lock
99 DEFINE_MUTEX(kvm_lock
);
100 static DEFINE_RAW_SPINLOCK(kvm_count_lock
);
103 static cpumask_var_t cpus_hardware_enabled
;
104 static int kvm_usage_count
;
105 static atomic_t hardware_enable_failed
;
107 struct kmem_cache
*kvm_vcpu_cache
;
108 EXPORT_SYMBOL_GPL(kvm_vcpu_cache
);
110 static __read_mostly
struct preempt_ops kvm_preempt_ops
;
112 struct dentry
*kvm_debugfs_dir
;
113 EXPORT_SYMBOL_GPL(kvm_debugfs_dir
);
115 static int kvm_debugfs_num_entries
;
116 static const struct file_operations
*stat_fops_per_vm
[];
118 static long kvm_vcpu_ioctl(struct file
*file
, unsigned int ioctl
,
120 #ifdef CONFIG_KVM_COMPAT
121 static long kvm_vcpu_compat_ioctl(struct file
*file
, unsigned int ioctl
,
123 #define KVM_COMPAT(c) .compat_ioctl = (c)
125 static long kvm_no_compat_ioctl(struct file
*file
, unsigned int ioctl
,
126 unsigned long arg
) { return -EINVAL
; }
127 #define KVM_COMPAT(c) .compat_ioctl = kvm_no_compat_ioctl
129 static int hardware_enable_all(void);
130 static void hardware_disable_all(void);
132 static void kvm_io_bus_destroy(struct kvm_io_bus
*bus
);
134 static void mark_page_dirty_in_slot(struct kvm_memory_slot
*memslot
, gfn_t gfn
);
136 __visible
bool kvm_rebooting
;
137 EXPORT_SYMBOL_GPL(kvm_rebooting
);
139 static bool largepages_enabled
= true;
141 #define KVM_EVENT_CREATE_VM 0
142 #define KVM_EVENT_DESTROY_VM 1
143 static void kvm_uevent_notify_change(unsigned int type
, struct kvm
*kvm
);
144 static unsigned long long kvm_createvm_count
;
145 static unsigned long long kvm_active_vms
;
147 __weak
int kvm_arch_mmu_notifier_invalidate_range(struct kvm
*kvm
,
148 unsigned long start
, unsigned long end
, bool blockable
)
153 bool kvm_is_zone_device_pfn(kvm_pfn_t pfn
)
156 * The metadata used by is_zone_device_page() to determine whether or
157 * not a page is ZONE_DEVICE is guaranteed to be valid if and only if
158 * the device has been pinned, e.g. by get_user_pages(). WARN if the
159 * page_count() is zero to help detect bad usage of this helper.
161 if (!pfn_valid(pfn
) || WARN_ON_ONCE(!page_count(pfn_to_page(pfn
))))
164 return is_zone_device_page(pfn_to_page(pfn
));
167 bool kvm_is_reserved_pfn(kvm_pfn_t pfn
)
170 * ZONE_DEVICE pages currently set PG_reserved, but from a refcounting
171 * perspective they are "normal" pages, albeit with slightly different
175 return PageReserved(pfn_to_page(pfn
)) &&
176 !kvm_is_zone_device_pfn(pfn
);
182 * Switches to specified vcpu, until a matching vcpu_put()
184 void vcpu_load(struct kvm_vcpu
*vcpu
)
187 preempt_notifier_register(&vcpu
->preempt_notifier
);
188 kvm_arch_vcpu_load(vcpu
, cpu
);
191 EXPORT_SYMBOL_GPL(vcpu_load
);
193 void vcpu_put(struct kvm_vcpu
*vcpu
)
196 kvm_arch_vcpu_put(vcpu
);
197 preempt_notifier_unregister(&vcpu
->preempt_notifier
);
200 EXPORT_SYMBOL_GPL(vcpu_put
);
202 /* TODO: merge with kvm_arch_vcpu_should_kick */
203 static bool kvm_request_needs_ipi(struct kvm_vcpu
*vcpu
, unsigned req
)
205 int mode
= kvm_vcpu_exiting_guest_mode(vcpu
);
208 * We need to wait for the VCPU to reenable interrupts and get out of
209 * READING_SHADOW_PAGE_TABLES mode.
211 if (req
& KVM_REQUEST_WAIT
)
212 return mode
!= OUTSIDE_GUEST_MODE
;
215 * Need to kick a running VCPU, but otherwise there is nothing to do.
217 return mode
== IN_GUEST_MODE
;
220 static void ack_flush(void *_completed
)
224 static inline bool kvm_kick_many_cpus(const struct cpumask
*cpus
, bool wait
)
227 cpus
= cpu_online_mask
;
229 if (cpumask_empty(cpus
))
232 smp_call_function_many(cpus
, ack_flush
, NULL
, wait
);
236 bool kvm_make_vcpus_request_mask(struct kvm
*kvm
, unsigned int req
,
237 unsigned long *vcpu_bitmap
, cpumask_var_t tmp
)
240 struct kvm_vcpu
*vcpu
;
245 kvm_for_each_vcpu(i
, vcpu
, kvm
) {
246 if (vcpu_bitmap
&& !test_bit(i
, vcpu_bitmap
))
249 kvm_make_request(req
, vcpu
);
252 if (!(req
& KVM_REQUEST_NO_WAKEUP
) && kvm_vcpu_wake_up(vcpu
))
255 if (tmp
!= NULL
&& cpu
!= -1 && cpu
!= me
&&
256 kvm_request_needs_ipi(vcpu
, req
))
257 __cpumask_set_cpu(cpu
, tmp
);
260 called
= kvm_kick_many_cpus(tmp
, !!(req
& KVM_REQUEST_WAIT
));
266 bool kvm_make_all_cpus_request(struct kvm
*kvm
, unsigned int req
)
271 zalloc_cpumask_var(&cpus
, GFP_ATOMIC
);
273 called
= kvm_make_vcpus_request_mask(kvm
, req
, NULL
, cpus
);
275 free_cpumask_var(cpus
);
279 #ifndef CONFIG_HAVE_KVM_ARCH_TLB_FLUSH_ALL
280 void kvm_flush_remote_tlbs(struct kvm
*kvm
)
283 * Read tlbs_dirty before setting KVM_REQ_TLB_FLUSH in
284 * kvm_make_all_cpus_request.
286 long dirty_count
= smp_load_acquire(&kvm
->tlbs_dirty
);
289 * We want to publish modifications to the page tables before reading
290 * mode. Pairs with a memory barrier in arch-specific code.
291 * - x86: smp_mb__after_srcu_read_unlock in vcpu_enter_guest
292 * and smp_mb in walk_shadow_page_lockless_begin/end.
293 * - powerpc: smp_mb in kvmppc_prepare_to_enter.
295 * There is already an smp_mb__after_atomic() before
296 * kvm_make_all_cpus_request() reads vcpu->mode. We reuse that
299 if (!kvm_arch_flush_remote_tlb(kvm
)
300 || kvm_make_all_cpus_request(kvm
, KVM_REQ_TLB_FLUSH
))
301 ++kvm
->stat
.remote_tlb_flush
;
302 cmpxchg(&kvm
->tlbs_dirty
, dirty_count
, 0);
304 EXPORT_SYMBOL_GPL(kvm_flush_remote_tlbs
);
307 void kvm_reload_remote_mmus(struct kvm
*kvm
)
309 kvm_make_all_cpus_request(kvm
, KVM_REQ_MMU_RELOAD
);
312 int kvm_vcpu_init(struct kvm_vcpu
*vcpu
, struct kvm
*kvm
, unsigned id
)
317 mutex_init(&vcpu
->mutex
);
322 init_swait_queue_head(&vcpu
->wq
);
323 kvm_async_pf_vcpu_init(vcpu
);
326 INIT_LIST_HEAD(&vcpu
->blocked_vcpu_list
);
328 page
= alloc_page(GFP_KERNEL
| __GFP_ZERO
);
333 vcpu
->run
= page_address(page
);
335 kvm_vcpu_set_in_spin_loop(vcpu
, false);
336 kvm_vcpu_set_dy_eligible(vcpu
, false);
337 vcpu
->preempted
= false;
340 r
= kvm_arch_vcpu_init(vcpu
);
346 free_page((unsigned long)vcpu
->run
);
350 EXPORT_SYMBOL_GPL(kvm_vcpu_init
);
352 void kvm_vcpu_uninit(struct kvm_vcpu
*vcpu
)
355 * no need for rcu_read_lock as VCPU_RUN is the only place that
356 * will change the vcpu->pid pointer and on uninit all file
357 * descriptors are already gone.
359 put_pid(rcu_dereference_protected(vcpu
->pid
, 1));
360 kvm_arch_vcpu_uninit(vcpu
);
361 free_page((unsigned long)vcpu
->run
);
363 EXPORT_SYMBOL_GPL(kvm_vcpu_uninit
);
365 #if defined(CONFIG_MMU_NOTIFIER) && defined(KVM_ARCH_WANT_MMU_NOTIFIER)
366 static inline struct kvm
*mmu_notifier_to_kvm(struct mmu_notifier
*mn
)
368 return container_of(mn
, struct kvm
, mmu_notifier
);
371 static void kvm_mmu_notifier_change_pte(struct mmu_notifier
*mn
,
372 struct mm_struct
*mm
,
373 unsigned long address
,
376 struct kvm
*kvm
= mmu_notifier_to_kvm(mn
);
379 idx
= srcu_read_lock(&kvm
->srcu
);
380 spin_lock(&kvm
->mmu_lock
);
381 kvm
->mmu_notifier_seq
++;
383 if (kvm_set_spte_hva(kvm
, address
, pte
))
384 kvm_flush_remote_tlbs(kvm
);
386 spin_unlock(&kvm
->mmu_lock
);
387 srcu_read_unlock(&kvm
->srcu
, idx
);
390 static int kvm_mmu_notifier_invalidate_range_start(struct mmu_notifier
*mn
,
391 const struct mmu_notifier_range
*range
)
393 struct kvm
*kvm
= mmu_notifier_to_kvm(mn
);
394 int need_tlb_flush
= 0, idx
;
397 idx
= srcu_read_lock(&kvm
->srcu
);
398 spin_lock(&kvm
->mmu_lock
);
400 * The count increase must become visible at unlock time as no
401 * spte can be established without taking the mmu_lock and
402 * count is also read inside the mmu_lock critical section.
404 kvm
->mmu_notifier_count
++;
405 need_tlb_flush
= kvm_unmap_hva_range(kvm
, range
->start
, range
->end
);
406 need_tlb_flush
|= kvm
->tlbs_dirty
;
407 /* we've to flush the tlb before the pages can be freed */
409 kvm_flush_remote_tlbs(kvm
);
411 spin_unlock(&kvm
->mmu_lock
);
413 ret
= kvm_arch_mmu_notifier_invalidate_range(kvm
, range
->start
,
415 mmu_notifier_range_blockable(range
));
417 srcu_read_unlock(&kvm
->srcu
, idx
);
422 static void kvm_mmu_notifier_invalidate_range_end(struct mmu_notifier
*mn
,
423 const struct mmu_notifier_range
*range
)
425 struct kvm
*kvm
= mmu_notifier_to_kvm(mn
);
427 spin_lock(&kvm
->mmu_lock
);
429 * This sequence increase will notify the kvm page fault that
430 * the page that is going to be mapped in the spte could have
433 kvm
->mmu_notifier_seq
++;
436 * The above sequence increase must be visible before the
437 * below count decrease, which is ensured by the smp_wmb above
438 * in conjunction with the smp_rmb in mmu_notifier_retry().
440 kvm
->mmu_notifier_count
--;
441 spin_unlock(&kvm
->mmu_lock
);
443 BUG_ON(kvm
->mmu_notifier_count
< 0);
446 static int kvm_mmu_notifier_clear_flush_young(struct mmu_notifier
*mn
,
447 struct mm_struct
*mm
,
451 struct kvm
*kvm
= mmu_notifier_to_kvm(mn
);
454 idx
= srcu_read_lock(&kvm
->srcu
);
455 spin_lock(&kvm
->mmu_lock
);
457 young
= kvm_age_hva(kvm
, start
, end
);
459 kvm_flush_remote_tlbs(kvm
);
461 spin_unlock(&kvm
->mmu_lock
);
462 srcu_read_unlock(&kvm
->srcu
, idx
);
467 static int kvm_mmu_notifier_clear_young(struct mmu_notifier
*mn
,
468 struct mm_struct
*mm
,
472 struct kvm
*kvm
= mmu_notifier_to_kvm(mn
);
475 idx
= srcu_read_lock(&kvm
->srcu
);
476 spin_lock(&kvm
->mmu_lock
);
478 * Even though we do not flush TLB, this will still adversely
479 * affect performance on pre-Haswell Intel EPT, where there is
480 * no EPT Access Bit to clear so that we have to tear down EPT
481 * tables instead. If we find this unacceptable, we can always
482 * add a parameter to kvm_age_hva so that it effectively doesn't
483 * do anything on clear_young.
485 * Also note that currently we never issue secondary TLB flushes
486 * from clear_young, leaving this job up to the regular system
487 * cadence. If we find this inaccurate, we might come up with a
488 * more sophisticated heuristic later.
490 young
= kvm_age_hva(kvm
, start
, end
);
491 spin_unlock(&kvm
->mmu_lock
);
492 srcu_read_unlock(&kvm
->srcu
, idx
);
497 static int kvm_mmu_notifier_test_young(struct mmu_notifier
*mn
,
498 struct mm_struct
*mm
,
499 unsigned long address
)
501 struct kvm
*kvm
= mmu_notifier_to_kvm(mn
);
504 idx
= srcu_read_lock(&kvm
->srcu
);
505 spin_lock(&kvm
->mmu_lock
);
506 young
= kvm_test_age_hva(kvm
, address
);
507 spin_unlock(&kvm
->mmu_lock
);
508 srcu_read_unlock(&kvm
->srcu
, idx
);
513 static void kvm_mmu_notifier_release(struct mmu_notifier
*mn
,
514 struct mm_struct
*mm
)
516 struct kvm
*kvm
= mmu_notifier_to_kvm(mn
);
519 idx
= srcu_read_lock(&kvm
->srcu
);
520 kvm_arch_flush_shadow_all(kvm
);
521 srcu_read_unlock(&kvm
->srcu
, idx
);
524 static const struct mmu_notifier_ops kvm_mmu_notifier_ops
= {
525 .invalidate_range_start
= kvm_mmu_notifier_invalidate_range_start
,
526 .invalidate_range_end
= kvm_mmu_notifier_invalidate_range_end
,
527 .clear_flush_young
= kvm_mmu_notifier_clear_flush_young
,
528 .clear_young
= kvm_mmu_notifier_clear_young
,
529 .test_young
= kvm_mmu_notifier_test_young
,
530 .change_pte
= kvm_mmu_notifier_change_pte
,
531 .release
= kvm_mmu_notifier_release
,
534 static int kvm_init_mmu_notifier(struct kvm
*kvm
)
536 kvm
->mmu_notifier
.ops
= &kvm_mmu_notifier_ops
;
537 return mmu_notifier_register(&kvm
->mmu_notifier
, current
->mm
);
540 #else /* !(CONFIG_MMU_NOTIFIER && KVM_ARCH_WANT_MMU_NOTIFIER) */
542 static int kvm_init_mmu_notifier(struct kvm
*kvm
)
547 #endif /* CONFIG_MMU_NOTIFIER && KVM_ARCH_WANT_MMU_NOTIFIER */
549 static struct kvm_memslots
*kvm_alloc_memslots(void)
552 struct kvm_memslots
*slots
;
554 slots
= kvzalloc(sizeof(struct kvm_memslots
), GFP_KERNEL_ACCOUNT
);
558 for (i
= 0; i
< KVM_MEM_SLOTS_NUM
; i
++)
559 slots
->id_to_index
[i
] = slots
->memslots
[i
].id
= i
;
564 static void kvm_destroy_dirty_bitmap(struct kvm_memory_slot
*memslot
)
566 if (!memslot
->dirty_bitmap
)
569 kvfree(memslot
->dirty_bitmap
);
570 memslot
->dirty_bitmap
= NULL
;
574 * Free any memory in @free but not in @dont.
576 static void kvm_free_memslot(struct kvm
*kvm
, struct kvm_memory_slot
*free
,
577 struct kvm_memory_slot
*dont
)
579 if (!dont
|| free
->dirty_bitmap
!= dont
->dirty_bitmap
)
580 kvm_destroy_dirty_bitmap(free
);
582 kvm_arch_free_memslot(kvm
, free
, dont
);
587 static void kvm_free_memslots(struct kvm
*kvm
, struct kvm_memslots
*slots
)
589 struct kvm_memory_slot
*memslot
;
594 kvm_for_each_memslot(memslot
, slots
)
595 kvm_free_memslot(kvm
, memslot
, NULL
);
600 static void kvm_destroy_vm_debugfs(struct kvm
*kvm
)
604 if (!kvm
->debugfs_dentry
)
607 debugfs_remove_recursive(kvm
->debugfs_dentry
);
609 if (kvm
->debugfs_stat_data
) {
610 for (i
= 0; i
< kvm_debugfs_num_entries
; i
++)
611 kfree(kvm
->debugfs_stat_data
[i
]);
612 kfree(kvm
->debugfs_stat_data
);
616 static int kvm_create_vm_debugfs(struct kvm
*kvm
, int fd
)
618 char dir_name
[ITOA_MAX_LEN
* 2];
619 struct kvm_stat_data
*stat_data
;
620 struct kvm_stats_debugfs_item
*p
;
622 if (!debugfs_initialized())
625 snprintf(dir_name
, sizeof(dir_name
), "%d-%d", task_pid_nr(current
), fd
);
626 kvm
->debugfs_dentry
= debugfs_create_dir(dir_name
, kvm_debugfs_dir
);
628 kvm
->debugfs_stat_data
= kcalloc(kvm_debugfs_num_entries
,
629 sizeof(*kvm
->debugfs_stat_data
),
631 if (!kvm
->debugfs_stat_data
)
634 for (p
= debugfs_entries
; p
->name
; p
++) {
635 stat_data
= kzalloc(sizeof(*stat_data
), GFP_KERNEL_ACCOUNT
);
639 stat_data
->kvm
= kvm
;
640 stat_data
->offset
= p
->offset
;
641 stat_data
->mode
= p
->mode
? p
->mode
: 0644;
642 kvm
->debugfs_stat_data
[p
- debugfs_entries
] = stat_data
;
643 debugfs_create_file(p
->name
, stat_data
->mode
, kvm
->debugfs_dentry
,
644 stat_data
, stat_fops_per_vm
[p
->kind
]);
650 * Called after the VM is otherwise initialized, but just before adding it to
653 int __weak
kvm_arch_post_init_vm(struct kvm
*kvm
)
659 * Called just after removing the VM from the vm_list, but before doing any
662 void __weak
kvm_arch_pre_destroy_vm(struct kvm
*kvm
)
666 static struct kvm
*kvm_create_vm(unsigned long type
)
669 struct kvm
*kvm
= kvm_arch_alloc_vm();
672 return ERR_PTR(-ENOMEM
);
674 spin_lock_init(&kvm
->mmu_lock
);
676 kvm
->mm
= current
->mm
;
677 kvm_eventfd_init(kvm
);
678 mutex_init(&kvm
->lock
);
679 mutex_init(&kvm
->irq_lock
);
680 mutex_init(&kvm
->slots_lock
);
681 refcount_set(&kvm
->users_count
, 1);
682 INIT_LIST_HEAD(&kvm
->devices
);
684 r
= kvm_arch_init_vm(kvm
, type
);
686 goto out_err_no_disable
;
688 r
= hardware_enable_all();
690 goto out_err_no_disable
;
692 #ifdef CONFIG_HAVE_KVM_IRQFD
693 INIT_HLIST_HEAD(&kvm
->irq_ack_notifier_list
);
696 BUILD_BUG_ON(KVM_MEM_SLOTS_NUM
> SHRT_MAX
);
699 for (i
= 0; i
< KVM_ADDRESS_SPACE_NUM
; i
++) {
700 struct kvm_memslots
*slots
= kvm_alloc_memslots();
702 goto out_err_no_srcu
;
703 /* Generations must be different for each address space. */
704 slots
->generation
= i
;
705 rcu_assign_pointer(kvm
->memslots
[i
], slots
);
708 if (init_srcu_struct(&kvm
->srcu
))
709 goto out_err_no_srcu
;
710 if (init_srcu_struct(&kvm
->irq_srcu
))
711 goto out_err_no_irq_srcu
;
712 for (i
= 0; i
< KVM_NR_BUSES
; i
++) {
713 rcu_assign_pointer(kvm
->buses
[i
],
714 kzalloc(sizeof(struct kvm_io_bus
), GFP_KERNEL_ACCOUNT
));
716 goto out_err_no_mmu_notifier
;
719 r
= kvm_init_mmu_notifier(kvm
);
721 goto out_err_no_mmu_notifier
;
723 r
= kvm_arch_post_init_vm(kvm
);
727 mutex_lock(&kvm_lock
);
728 list_add(&kvm
->vm_list
, &vm_list
);
729 mutex_unlock(&kvm_lock
);
731 preempt_notifier_inc();
736 #if defined(CONFIG_MMU_NOTIFIER) && defined(KVM_ARCH_WANT_MMU_NOTIFIER)
737 if (kvm
->mmu_notifier
.ops
)
738 mmu_notifier_unregister(&kvm
->mmu_notifier
, current
->mm
);
740 out_err_no_mmu_notifier
:
741 cleanup_srcu_struct(&kvm
->irq_srcu
);
743 cleanup_srcu_struct(&kvm
->srcu
);
745 hardware_disable_all();
747 refcount_set(&kvm
->users_count
, 0);
748 for (i
= 0; i
< KVM_NR_BUSES
; i
++)
749 kfree(kvm_get_bus(kvm
, i
));
750 for (i
= 0; i
< KVM_ADDRESS_SPACE_NUM
; i
++)
751 kvm_free_memslots(kvm
, __kvm_memslots(kvm
, i
));
752 kvm_arch_free_vm(kvm
);
757 static void kvm_destroy_devices(struct kvm
*kvm
)
759 struct kvm_device
*dev
, *tmp
;
762 * We do not need to take the kvm->lock here, because nobody else
763 * has a reference to the struct kvm at this point and therefore
764 * cannot access the devices list anyhow.
766 list_for_each_entry_safe(dev
, tmp
, &kvm
->devices
, vm_node
) {
767 list_del(&dev
->vm_node
);
768 dev
->ops
->destroy(dev
);
772 static void kvm_destroy_vm(struct kvm
*kvm
)
775 struct mm_struct
*mm
= kvm
->mm
;
777 kvm_uevent_notify_change(KVM_EVENT_DESTROY_VM
, kvm
);
778 kvm_destroy_vm_debugfs(kvm
);
779 kvm_arch_sync_events(kvm
);
780 mutex_lock(&kvm_lock
);
781 list_del(&kvm
->vm_list
);
782 mutex_unlock(&kvm_lock
);
783 kvm_arch_pre_destroy_vm(kvm
);
785 kvm_free_irq_routing(kvm
);
786 for (i
= 0; i
< KVM_NR_BUSES
; i
++) {
787 struct kvm_io_bus
*bus
= kvm_get_bus(kvm
, i
);
790 kvm_io_bus_destroy(bus
);
791 kvm
->buses
[i
] = NULL
;
793 kvm_coalesced_mmio_free(kvm
);
794 #if defined(CONFIG_MMU_NOTIFIER) && defined(KVM_ARCH_WANT_MMU_NOTIFIER)
795 mmu_notifier_unregister(&kvm
->mmu_notifier
, kvm
->mm
);
797 kvm_arch_flush_shadow_all(kvm
);
799 kvm_arch_destroy_vm(kvm
);
800 kvm_destroy_devices(kvm
);
801 for (i
= 0; i
< KVM_ADDRESS_SPACE_NUM
; i
++)
802 kvm_free_memslots(kvm
, __kvm_memslots(kvm
, i
));
803 cleanup_srcu_struct(&kvm
->irq_srcu
);
804 cleanup_srcu_struct(&kvm
->srcu
);
805 kvm_arch_free_vm(kvm
);
806 preempt_notifier_dec();
807 hardware_disable_all();
811 void kvm_get_kvm(struct kvm
*kvm
)
813 refcount_inc(&kvm
->users_count
);
815 EXPORT_SYMBOL_GPL(kvm_get_kvm
);
817 void kvm_put_kvm(struct kvm
*kvm
)
819 if (refcount_dec_and_test(&kvm
->users_count
))
822 EXPORT_SYMBOL_GPL(kvm_put_kvm
);
825 static int kvm_vm_release(struct inode
*inode
, struct file
*filp
)
827 struct kvm
*kvm
= filp
->private_data
;
829 kvm_irqfd_release(kvm
);
836 * Allocation size is twice as large as the actual dirty bitmap size.
837 * See x86's kvm_vm_ioctl_get_dirty_log() why this is needed.
839 static int kvm_create_dirty_bitmap(struct kvm_memory_slot
*memslot
)
841 unsigned long dirty_bytes
= 2 * kvm_dirty_bitmap_bytes(memslot
);
843 memslot
->dirty_bitmap
= kvzalloc(dirty_bytes
, GFP_KERNEL_ACCOUNT
);
844 if (!memslot
->dirty_bitmap
)
851 * Insert memslot and re-sort memslots based on their GFN,
852 * so binary search could be used to lookup GFN.
853 * Sorting algorithm takes advantage of having initially
854 * sorted array and known changed memslot position.
856 static void update_memslots(struct kvm_memslots
*slots
,
857 struct kvm_memory_slot
*new,
858 enum kvm_mr_change change
)
861 int i
= slots
->id_to_index
[id
];
862 struct kvm_memory_slot
*mslots
= slots
->memslots
;
864 WARN_ON(mslots
[i
].id
!= id
);
868 WARN_ON(mslots
[i
].npages
|| !new->npages
);
872 WARN_ON(new->npages
|| !mslots
[i
].npages
);
878 while (i
< KVM_MEM_SLOTS_NUM
- 1 &&
879 new->base_gfn
<= mslots
[i
+ 1].base_gfn
) {
880 if (!mslots
[i
+ 1].npages
)
882 mslots
[i
] = mslots
[i
+ 1];
883 slots
->id_to_index
[mslots
[i
].id
] = i
;
888 * The ">=" is needed when creating a slot with base_gfn == 0,
889 * so that it moves before all those with base_gfn == npages == 0.
891 * On the other hand, if new->npages is zero, the above loop has
892 * already left i pointing to the beginning of the empty part of
893 * mslots, and the ">=" would move the hole backwards in this
894 * case---which is wrong. So skip the loop when deleting a slot.
898 new->base_gfn
>= mslots
[i
- 1].base_gfn
) {
899 mslots
[i
] = mslots
[i
- 1];
900 slots
->id_to_index
[mslots
[i
].id
] = i
;
904 WARN_ON_ONCE(i
!= slots
->used_slots
);
907 slots
->id_to_index
[mslots
[i
].id
] = i
;
910 static int check_memory_region_flags(const struct kvm_userspace_memory_region
*mem
)
912 u32 valid_flags
= KVM_MEM_LOG_DIRTY_PAGES
;
914 #ifdef __KVM_HAVE_READONLY_MEM
915 valid_flags
|= KVM_MEM_READONLY
;
918 if (mem
->flags
& ~valid_flags
)
924 static struct kvm_memslots
*install_new_memslots(struct kvm
*kvm
,
925 int as_id
, struct kvm_memslots
*slots
)
927 struct kvm_memslots
*old_memslots
= __kvm_memslots(kvm
, as_id
);
928 u64 gen
= old_memslots
->generation
;
930 WARN_ON(gen
& KVM_MEMSLOT_GEN_UPDATE_IN_PROGRESS
);
931 slots
->generation
= gen
| KVM_MEMSLOT_GEN_UPDATE_IN_PROGRESS
;
933 rcu_assign_pointer(kvm
->memslots
[as_id
], slots
);
934 synchronize_srcu_expedited(&kvm
->srcu
);
937 * Increment the new memslot generation a second time, dropping the
938 * update in-progress flag and incrementing then generation based on
939 * the number of address spaces. This provides a unique and easily
940 * identifiable generation number while the memslots are in flux.
942 gen
= slots
->generation
& ~KVM_MEMSLOT_GEN_UPDATE_IN_PROGRESS
;
945 * Generations must be unique even across address spaces. We do not need
946 * a global counter for that, instead the generation space is evenly split
947 * across address spaces. For example, with two address spaces, address
948 * space 0 will use generations 0, 2, 4, ... while address space 1 will
949 * use generations 1, 3, 5, ...
951 gen
+= KVM_ADDRESS_SPACE_NUM
;
953 kvm_arch_memslots_updated(kvm
, gen
);
955 slots
->generation
= gen
;
961 * Allocate some memory and give it an address in the guest physical address
964 * Discontiguous memory is allowed, mostly for framebuffers.
966 * Must be called holding kvm->slots_lock for write.
968 int __kvm_set_memory_region(struct kvm
*kvm
,
969 const struct kvm_userspace_memory_region
*mem
)
973 unsigned long npages
;
974 struct kvm_memory_slot
*slot
;
975 struct kvm_memory_slot old
, new;
976 struct kvm_memslots
*slots
= NULL
, *old_memslots
;
978 enum kvm_mr_change change
;
980 r
= check_memory_region_flags(mem
);
985 as_id
= mem
->slot
>> 16;
988 /* General sanity checks */
989 if (mem
->memory_size
& (PAGE_SIZE
- 1))
991 if (mem
->guest_phys_addr
& (PAGE_SIZE
- 1))
993 /* We can read the guest memory with __xxx_user() later on. */
994 if ((id
< KVM_USER_MEM_SLOTS
) &&
995 ((mem
->userspace_addr
& (PAGE_SIZE
- 1)) ||
996 !access_ok((void __user
*)(unsigned long)mem
->userspace_addr
,
999 if (as_id
>= KVM_ADDRESS_SPACE_NUM
|| id
>= KVM_MEM_SLOTS_NUM
)
1001 if (mem
->guest_phys_addr
+ mem
->memory_size
< mem
->guest_phys_addr
)
1004 slot
= id_to_memslot(__kvm_memslots(kvm
, as_id
), id
);
1005 base_gfn
= mem
->guest_phys_addr
>> PAGE_SHIFT
;
1006 npages
= mem
->memory_size
>> PAGE_SHIFT
;
1008 if (npages
> KVM_MEM_MAX_NR_PAGES
)
1014 new.base_gfn
= base_gfn
;
1015 new.npages
= npages
;
1016 new.flags
= mem
->flags
;
1020 change
= KVM_MR_CREATE
;
1021 else { /* Modify an existing slot. */
1022 if ((mem
->userspace_addr
!= old
.userspace_addr
) ||
1023 (npages
!= old
.npages
) ||
1024 ((new.flags
^ old
.flags
) & KVM_MEM_READONLY
))
1027 if (base_gfn
!= old
.base_gfn
)
1028 change
= KVM_MR_MOVE
;
1029 else if (new.flags
!= old
.flags
)
1030 change
= KVM_MR_FLAGS_ONLY
;
1031 else { /* Nothing to change. */
1040 change
= KVM_MR_DELETE
;
1045 if ((change
== KVM_MR_CREATE
) || (change
== KVM_MR_MOVE
)) {
1046 /* Check for overlaps */
1048 kvm_for_each_memslot(slot
, __kvm_memslots(kvm
, as_id
)) {
1051 if (!((base_gfn
+ npages
<= slot
->base_gfn
) ||
1052 (base_gfn
>= slot
->base_gfn
+ slot
->npages
)))
1057 /* Free page dirty bitmap if unneeded */
1058 if (!(new.flags
& KVM_MEM_LOG_DIRTY_PAGES
))
1059 new.dirty_bitmap
= NULL
;
1062 if (change
== KVM_MR_CREATE
) {
1063 new.userspace_addr
= mem
->userspace_addr
;
1065 if (kvm_arch_create_memslot(kvm
, &new, npages
))
1069 /* Allocate page dirty bitmap if needed */
1070 if ((new.flags
& KVM_MEM_LOG_DIRTY_PAGES
) && !new.dirty_bitmap
) {
1071 if (kvm_create_dirty_bitmap(&new) < 0)
1075 slots
= kvzalloc(sizeof(struct kvm_memslots
), GFP_KERNEL_ACCOUNT
);
1078 memcpy(slots
, __kvm_memslots(kvm
, as_id
), sizeof(struct kvm_memslots
));
1080 if ((change
== KVM_MR_DELETE
) || (change
== KVM_MR_MOVE
)) {
1081 slot
= id_to_memslot(slots
, id
);
1082 slot
->flags
|= KVM_MEMSLOT_INVALID
;
1084 old_memslots
= install_new_memslots(kvm
, as_id
, slots
);
1086 /* From this point no new shadow pages pointing to a deleted,
1087 * or moved, memslot will be created.
1089 * validation of sp->gfn happens in:
1090 * - gfn_to_hva (kvm_read_guest, gfn_to_pfn)
1091 * - kvm_is_visible_gfn (mmu_check_roots)
1093 kvm_arch_flush_shadow_memslot(kvm
, slot
);
1096 * We can re-use the old_memslots from above, the only difference
1097 * from the currently installed memslots is the invalid flag. This
1098 * will get overwritten by update_memslots anyway.
1100 slots
= old_memslots
;
1103 r
= kvm_arch_prepare_memory_region(kvm
, &new, mem
, change
);
1107 /* actual memory is freed via old in kvm_free_memslot below */
1108 if (change
== KVM_MR_DELETE
) {
1109 new.dirty_bitmap
= NULL
;
1110 memset(&new.arch
, 0, sizeof(new.arch
));
1113 update_memslots(slots
, &new, change
);
1114 old_memslots
= install_new_memslots(kvm
, as_id
, slots
);
1116 kvm_arch_commit_memory_region(kvm
, mem
, &old
, &new, change
);
1118 kvm_free_memslot(kvm
, &old
, &new);
1119 kvfree(old_memslots
);
1125 kvm_free_memslot(kvm
, &new, &old
);
1129 EXPORT_SYMBOL_GPL(__kvm_set_memory_region
);
1131 int kvm_set_memory_region(struct kvm
*kvm
,
1132 const struct kvm_userspace_memory_region
*mem
)
1136 mutex_lock(&kvm
->slots_lock
);
1137 r
= __kvm_set_memory_region(kvm
, mem
);
1138 mutex_unlock(&kvm
->slots_lock
);
1141 EXPORT_SYMBOL_GPL(kvm_set_memory_region
);
1143 static int kvm_vm_ioctl_set_memory_region(struct kvm
*kvm
,
1144 struct kvm_userspace_memory_region
*mem
)
1146 if ((u16
)mem
->slot
>= KVM_USER_MEM_SLOTS
)
1149 return kvm_set_memory_region(kvm
, mem
);
1152 int kvm_get_dirty_log(struct kvm
*kvm
,
1153 struct kvm_dirty_log
*log
, int *is_dirty
)
1155 struct kvm_memslots
*slots
;
1156 struct kvm_memory_slot
*memslot
;
1159 unsigned long any
= 0;
1161 as_id
= log
->slot
>> 16;
1162 id
= (u16
)log
->slot
;
1163 if (as_id
>= KVM_ADDRESS_SPACE_NUM
|| id
>= KVM_USER_MEM_SLOTS
)
1166 slots
= __kvm_memslots(kvm
, as_id
);
1167 memslot
= id_to_memslot(slots
, id
);
1168 if (!memslot
->dirty_bitmap
)
1171 n
= kvm_dirty_bitmap_bytes(memslot
);
1173 for (i
= 0; !any
&& i
< n
/sizeof(long); ++i
)
1174 any
= memslot
->dirty_bitmap
[i
];
1176 if (copy_to_user(log
->dirty_bitmap
, memslot
->dirty_bitmap
, n
))
1183 EXPORT_SYMBOL_GPL(kvm_get_dirty_log
);
1185 #ifdef CONFIG_KVM_GENERIC_DIRTYLOG_READ_PROTECT
1187 * kvm_get_dirty_log_protect - get a snapshot of dirty pages
1188 * and reenable dirty page tracking for the corresponding pages.
1189 * @kvm: pointer to kvm instance
1190 * @log: slot id and address to which we copy the log
1191 * @flush: true if TLB flush is needed by caller
1193 * We need to keep it in mind that VCPU threads can write to the bitmap
1194 * concurrently. So, to avoid losing track of dirty pages we keep the
1197 * 1. Take a snapshot of the bit and clear it if needed.
1198 * 2. Write protect the corresponding page.
1199 * 3. Copy the snapshot to the userspace.
1200 * 4. Upon return caller flushes TLB's if needed.
1202 * Between 2 and 4, the guest may write to the page using the remaining TLB
1203 * entry. This is not a problem because the page is reported dirty using
1204 * the snapshot taken before and step 4 ensures that writes done after
1205 * exiting to userspace will be logged for the next call.
1208 int kvm_get_dirty_log_protect(struct kvm
*kvm
,
1209 struct kvm_dirty_log
*log
, bool *flush
)
1211 struct kvm_memslots
*slots
;
1212 struct kvm_memory_slot
*memslot
;
1215 unsigned long *dirty_bitmap
;
1216 unsigned long *dirty_bitmap_buffer
;
1218 as_id
= log
->slot
>> 16;
1219 id
= (u16
)log
->slot
;
1220 if (as_id
>= KVM_ADDRESS_SPACE_NUM
|| id
>= KVM_USER_MEM_SLOTS
)
1223 slots
= __kvm_memslots(kvm
, as_id
);
1224 memslot
= id_to_memslot(slots
, id
);
1226 dirty_bitmap
= memslot
->dirty_bitmap
;
1230 n
= kvm_dirty_bitmap_bytes(memslot
);
1232 if (kvm
->manual_dirty_log_protect
) {
1234 * Unlike kvm_get_dirty_log, we always return false in *flush,
1235 * because no flush is needed until KVM_CLEAR_DIRTY_LOG. There
1236 * is some code duplication between this function and
1237 * kvm_get_dirty_log, but hopefully all architecture
1238 * transition to kvm_get_dirty_log_protect and kvm_get_dirty_log
1239 * can be eliminated.
1241 dirty_bitmap_buffer
= dirty_bitmap
;
1243 dirty_bitmap_buffer
= kvm_second_dirty_bitmap(memslot
);
1244 memset(dirty_bitmap_buffer
, 0, n
);
1246 spin_lock(&kvm
->mmu_lock
);
1247 for (i
= 0; i
< n
/ sizeof(long); i
++) {
1251 if (!dirty_bitmap
[i
])
1255 mask
= xchg(&dirty_bitmap
[i
], 0);
1256 dirty_bitmap_buffer
[i
] = mask
;
1258 offset
= i
* BITS_PER_LONG
;
1259 kvm_arch_mmu_enable_log_dirty_pt_masked(kvm
, memslot
,
1262 spin_unlock(&kvm
->mmu_lock
);
1265 if (copy_to_user(log
->dirty_bitmap
, dirty_bitmap_buffer
, n
))
1269 EXPORT_SYMBOL_GPL(kvm_get_dirty_log_protect
);
1272 * kvm_clear_dirty_log_protect - clear dirty bits in the bitmap
1273 * and reenable dirty page tracking for the corresponding pages.
1274 * @kvm: pointer to kvm instance
1275 * @log: slot id and address from which to fetch the bitmap of dirty pages
1276 * @flush: true if TLB flush is needed by caller
1278 int kvm_clear_dirty_log_protect(struct kvm
*kvm
,
1279 struct kvm_clear_dirty_log
*log
, bool *flush
)
1281 struct kvm_memslots
*slots
;
1282 struct kvm_memory_slot
*memslot
;
1286 unsigned long *dirty_bitmap
;
1287 unsigned long *dirty_bitmap_buffer
;
1289 as_id
= log
->slot
>> 16;
1290 id
= (u16
)log
->slot
;
1291 if (as_id
>= KVM_ADDRESS_SPACE_NUM
|| id
>= KVM_USER_MEM_SLOTS
)
1294 if (log
->first_page
& 63)
1297 slots
= __kvm_memslots(kvm
, as_id
);
1298 memslot
= id_to_memslot(slots
, id
);
1300 dirty_bitmap
= memslot
->dirty_bitmap
;
1304 n
= ALIGN(log
->num_pages
, BITS_PER_LONG
) / 8;
1306 if (log
->first_page
> memslot
->npages
||
1307 log
->num_pages
> memslot
->npages
- log
->first_page
||
1308 (log
->num_pages
< memslot
->npages
- log
->first_page
&& (log
->num_pages
& 63)))
1312 dirty_bitmap_buffer
= kvm_second_dirty_bitmap(memslot
);
1313 if (copy_from_user(dirty_bitmap_buffer
, log
->dirty_bitmap
, n
))
1316 spin_lock(&kvm
->mmu_lock
);
1317 for (offset
= log
->first_page
, i
= offset
/ BITS_PER_LONG
,
1318 n
= DIV_ROUND_UP(log
->num_pages
, BITS_PER_LONG
); n
--;
1319 i
++, offset
+= BITS_PER_LONG
) {
1320 unsigned long mask
= *dirty_bitmap_buffer
++;
1321 atomic_long_t
*p
= (atomic_long_t
*) &dirty_bitmap
[i
];
1325 mask
&= atomic_long_fetch_andnot(mask
, p
);
1328 * mask contains the bits that really have been cleared. This
1329 * never includes any bits beyond the length of the memslot (if
1330 * the length is not aligned to 64 pages), therefore it is not
1331 * a problem if userspace sets them in log->dirty_bitmap.
1335 kvm_arch_mmu_enable_log_dirty_pt_masked(kvm
, memslot
,
1339 spin_unlock(&kvm
->mmu_lock
);
1343 EXPORT_SYMBOL_GPL(kvm_clear_dirty_log_protect
);
1346 bool kvm_largepages_enabled(void)
1348 return largepages_enabled
;
1351 void kvm_disable_largepages(void)
1353 largepages_enabled
= false;
1355 EXPORT_SYMBOL_GPL(kvm_disable_largepages
);
1357 struct kvm_memory_slot
*gfn_to_memslot(struct kvm
*kvm
, gfn_t gfn
)
1359 return __gfn_to_memslot(kvm_memslots(kvm
), gfn
);
1361 EXPORT_SYMBOL_GPL(gfn_to_memslot
);
1363 struct kvm_memory_slot
*kvm_vcpu_gfn_to_memslot(struct kvm_vcpu
*vcpu
, gfn_t gfn
)
1365 return __gfn_to_memslot(kvm_vcpu_memslots(vcpu
), gfn
);
1368 bool kvm_is_visible_gfn(struct kvm
*kvm
, gfn_t gfn
)
1370 struct kvm_memory_slot
*memslot
= gfn_to_memslot(kvm
, gfn
);
1372 if (!memslot
|| memslot
->id
>= KVM_USER_MEM_SLOTS
||
1373 memslot
->flags
& KVM_MEMSLOT_INVALID
)
1378 EXPORT_SYMBOL_GPL(kvm_is_visible_gfn
);
1380 unsigned long kvm_host_page_size(struct kvm
*kvm
, gfn_t gfn
)
1382 struct vm_area_struct
*vma
;
1383 unsigned long addr
, size
;
1387 addr
= gfn_to_hva(kvm
, gfn
);
1388 if (kvm_is_error_hva(addr
))
1391 down_read(¤t
->mm
->mmap_sem
);
1392 vma
= find_vma(current
->mm
, addr
);
1396 size
= vma_kernel_pagesize(vma
);
1399 up_read(¤t
->mm
->mmap_sem
);
1404 static bool memslot_is_readonly(struct kvm_memory_slot
*slot
)
1406 return slot
->flags
& KVM_MEM_READONLY
;
1409 static unsigned long __gfn_to_hva_many(struct kvm_memory_slot
*slot
, gfn_t gfn
,
1410 gfn_t
*nr_pages
, bool write
)
1412 if (!slot
|| slot
->flags
& KVM_MEMSLOT_INVALID
)
1413 return KVM_HVA_ERR_BAD
;
1415 if (memslot_is_readonly(slot
) && write
)
1416 return KVM_HVA_ERR_RO_BAD
;
1419 *nr_pages
= slot
->npages
- (gfn
- slot
->base_gfn
);
1421 return __gfn_to_hva_memslot(slot
, gfn
);
1424 static unsigned long gfn_to_hva_many(struct kvm_memory_slot
*slot
, gfn_t gfn
,
1427 return __gfn_to_hva_many(slot
, gfn
, nr_pages
, true);
1430 unsigned long gfn_to_hva_memslot(struct kvm_memory_slot
*slot
,
1433 return gfn_to_hva_many(slot
, gfn
, NULL
);
1435 EXPORT_SYMBOL_GPL(gfn_to_hva_memslot
);
1437 unsigned long gfn_to_hva(struct kvm
*kvm
, gfn_t gfn
)
1439 return gfn_to_hva_many(gfn_to_memslot(kvm
, gfn
), gfn
, NULL
);
1441 EXPORT_SYMBOL_GPL(gfn_to_hva
);
1443 unsigned long kvm_vcpu_gfn_to_hva(struct kvm_vcpu
*vcpu
, gfn_t gfn
)
1445 return gfn_to_hva_many(kvm_vcpu_gfn_to_memslot(vcpu
, gfn
), gfn
, NULL
);
1447 EXPORT_SYMBOL_GPL(kvm_vcpu_gfn_to_hva
);
1450 * Return the hva of a @gfn and the R/W attribute if possible.
1452 * @slot: the kvm_memory_slot which contains @gfn
1453 * @gfn: the gfn to be translated
1454 * @writable: used to return the read/write attribute of the @slot if the hva
1455 * is valid and @writable is not NULL
1457 unsigned long gfn_to_hva_memslot_prot(struct kvm_memory_slot
*slot
,
1458 gfn_t gfn
, bool *writable
)
1460 unsigned long hva
= __gfn_to_hva_many(slot
, gfn
, NULL
, false);
1462 if (!kvm_is_error_hva(hva
) && writable
)
1463 *writable
= !memslot_is_readonly(slot
);
1468 unsigned long gfn_to_hva_prot(struct kvm
*kvm
, gfn_t gfn
, bool *writable
)
1470 struct kvm_memory_slot
*slot
= gfn_to_memslot(kvm
, gfn
);
1472 return gfn_to_hva_memslot_prot(slot
, gfn
, writable
);
1475 unsigned long kvm_vcpu_gfn_to_hva_prot(struct kvm_vcpu
*vcpu
, gfn_t gfn
, bool *writable
)
1477 struct kvm_memory_slot
*slot
= kvm_vcpu_gfn_to_memslot(vcpu
, gfn
);
1479 return gfn_to_hva_memslot_prot(slot
, gfn
, writable
);
1482 static inline int check_user_page_hwpoison(unsigned long addr
)
1484 int rc
, flags
= FOLL_HWPOISON
| FOLL_WRITE
;
1486 rc
= get_user_pages(addr
, 1, flags
, NULL
, NULL
);
1487 return rc
== -EHWPOISON
;
1491 * The fast path to get the writable pfn which will be stored in @pfn,
1492 * true indicates success, otherwise false is returned. It's also the
1493 * only part that runs if we can are in atomic context.
1495 static bool hva_to_pfn_fast(unsigned long addr
, bool write_fault
,
1496 bool *writable
, kvm_pfn_t
*pfn
)
1498 struct page
*page
[1];
1502 * Fast pin a writable pfn only if it is a write fault request
1503 * or the caller allows to map a writable pfn for a read fault
1506 if (!(write_fault
|| writable
))
1509 npages
= __get_user_pages_fast(addr
, 1, 1, page
);
1511 *pfn
= page_to_pfn(page
[0]);
1522 * The slow path to get the pfn of the specified host virtual address,
1523 * 1 indicates success, -errno is returned if error is detected.
1525 static int hva_to_pfn_slow(unsigned long addr
, bool *async
, bool write_fault
,
1526 bool *writable
, kvm_pfn_t
*pfn
)
1528 unsigned int flags
= FOLL_HWPOISON
;
1535 *writable
= write_fault
;
1538 flags
|= FOLL_WRITE
;
1540 flags
|= FOLL_NOWAIT
;
1542 npages
= get_user_pages_unlocked(addr
, 1, &page
, flags
);
1546 /* map read fault as writable if possible */
1547 if (unlikely(!write_fault
) && writable
) {
1550 if (__get_user_pages_fast(addr
, 1, 1, &wpage
) == 1) {
1556 *pfn
= page_to_pfn(page
);
1560 static bool vma_is_valid(struct vm_area_struct
*vma
, bool write_fault
)
1562 if (unlikely(!(vma
->vm_flags
& VM_READ
)))
1565 if (write_fault
&& (unlikely(!(vma
->vm_flags
& VM_WRITE
))))
1571 static int hva_to_pfn_remapped(struct vm_area_struct
*vma
,
1572 unsigned long addr
, bool *async
,
1573 bool write_fault
, bool *writable
,
1579 r
= follow_pfn(vma
, addr
, &pfn
);
1582 * get_user_pages fails for VM_IO and VM_PFNMAP vmas and does
1583 * not call the fault handler, so do it here.
1585 bool unlocked
= false;
1586 r
= fixup_user_fault(current
, current
->mm
, addr
,
1587 (write_fault
? FAULT_FLAG_WRITE
: 0),
1594 r
= follow_pfn(vma
, addr
, &pfn
);
1604 * Get a reference here because callers of *hva_to_pfn* and
1605 * *gfn_to_pfn* ultimately call kvm_release_pfn_clean on the
1606 * returned pfn. This is only needed if the VMA has VM_MIXEDMAP
1607 * set, but the kvm_get_pfn/kvm_release_pfn_clean pair will
1608 * simply do nothing for reserved pfns.
1610 * Whoever called remap_pfn_range is also going to call e.g.
1611 * unmap_mapping_range before the underlying pages are freed,
1612 * causing a call to our MMU notifier.
1621 * Pin guest page in memory and return its pfn.
1622 * @addr: host virtual address which maps memory to the guest
1623 * @atomic: whether this function can sleep
1624 * @async: whether this function need to wait IO complete if the
1625 * host page is not in the memory
1626 * @write_fault: whether we should get a writable host page
1627 * @writable: whether it allows to map a writable host page for !@write_fault
1629 * The function will map a writable host page for these two cases:
1630 * 1): @write_fault = true
1631 * 2): @write_fault = false && @writable, @writable will tell the caller
1632 * whether the mapping is writable.
1634 static kvm_pfn_t
hva_to_pfn(unsigned long addr
, bool atomic
, bool *async
,
1635 bool write_fault
, bool *writable
)
1637 struct vm_area_struct
*vma
;
1641 /* we can do it either atomically or asynchronously, not both */
1642 BUG_ON(atomic
&& async
);
1644 if (hva_to_pfn_fast(addr
, write_fault
, writable
, &pfn
))
1648 return KVM_PFN_ERR_FAULT
;
1650 npages
= hva_to_pfn_slow(addr
, async
, write_fault
, writable
, &pfn
);
1654 down_read(¤t
->mm
->mmap_sem
);
1655 if (npages
== -EHWPOISON
||
1656 (!async
&& check_user_page_hwpoison(addr
))) {
1657 pfn
= KVM_PFN_ERR_HWPOISON
;
1662 vma
= find_vma_intersection(current
->mm
, addr
, addr
+ 1);
1665 pfn
= KVM_PFN_ERR_FAULT
;
1666 else if (vma
->vm_flags
& (VM_IO
| VM_PFNMAP
)) {
1667 r
= hva_to_pfn_remapped(vma
, addr
, async
, write_fault
, writable
, &pfn
);
1671 pfn
= KVM_PFN_ERR_FAULT
;
1673 if (async
&& vma_is_valid(vma
, write_fault
))
1675 pfn
= KVM_PFN_ERR_FAULT
;
1678 up_read(¤t
->mm
->mmap_sem
);
1682 kvm_pfn_t
__gfn_to_pfn_memslot(struct kvm_memory_slot
*slot
, gfn_t gfn
,
1683 bool atomic
, bool *async
, bool write_fault
,
1686 unsigned long addr
= __gfn_to_hva_many(slot
, gfn
, NULL
, write_fault
);
1688 if (addr
== KVM_HVA_ERR_RO_BAD
) {
1691 return KVM_PFN_ERR_RO_FAULT
;
1694 if (kvm_is_error_hva(addr
)) {
1697 return KVM_PFN_NOSLOT
;
1700 /* Do not map writable pfn in the readonly memslot. */
1701 if (writable
&& memslot_is_readonly(slot
)) {
1706 return hva_to_pfn(addr
, atomic
, async
, write_fault
,
1709 EXPORT_SYMBOL_GPL(__gfn_to_pfn_memslot
);
1711 kvm_pfn_t
gfn_to_pfn_prot(struct kvm
*kvm
, gfn_t gfn
, bool write_fault
,
1714 return __gfn_to_pfn_memslot(gfn_to_memslot(kvm
, gfn
), gfn
, false, NULL
,
1715 write_fault
, writable
);
1717 EXPORT_SYMBOL_GPL(gfn_to_pfn_prot
);
1719 kvm_pfn_t
gfn_to_pfn_memslot(struct kvm_memory_slot
*slot
, gfn_t gfn
)
1721 return __gfn_to_pfn_memslot(slot
, gfn
, false, NULL
, true, NULL
);
1723 EXPORT_SYMBOL_GPL(gfn_to_pfn_memslot
);
1725 kvm_pfn_t
gfn_to_pfn_memslot_atomic(struct kvm_memory_slot
*slot
, gfn_t gfn
)
1727 return __gfn_to_pfn_memslot(slot
, gfn
, true, NULL
, true, NULL
);
1729 EXPORT_SYMBOL_GPL(gfn_to_pfn_memslot_atomic
);
1731 kvm_pfn_t
gfn_to_pfn_atomic(struct kvm
*kvm
, gfn_t gfn
)
1733 return gfn_to_pfn_memslot_atomic(gfn_to_memslot(kvm
, gfn
), gfn
);
1735 EXPORT_SYMBOL_GPL(gfn_to_pfn_atomic
);
1737 kvm_pfn_t
kvm_vcpu_gfn_to_pfn_atomic(struct kvm_vcpu
*vcpu
, gfn_t gfn
)
1739 return gfn_to_pfn_memslot_atomic(kvm_vcpu_gfn_to_memslot(vcpu
, gfn
), gfn
);
1741 EXPORT_SYMBOL_GPL(kvm_vcpu_gfn_to_pfn_atomic
);
1743 kvm_pfn_t
gfn_to_pfn(struct kvm
*kvm
, gfn_t gfn
)
1745 return gfn_to_pfn_memslot(gfn_to_memslot(kvm
, gfn
), gfn
);
1747 EXPORT_SYMBOL_GPL(gfn_to_pfn
);
1749 kvm_pfn_t
kvm_vcpu_gfn_to_pfn(struct kvm_vcpu
*vcpu
, gfn_t gfn
)
1751 return gfn_to_pfn_memslot(kvm_vcpu_gfn_to_memslot(vcpu
, gfn
), gfn
);
1753 EXPORT_SYMBOL_GPL(kvm_vcpu_gfn_to_pfn
);
1755 int gfn_to_page_many_atomic(struct kvm_memory_slot
*slot
, gfn_t gfn
,
1756 struct page
**pages
, int nr_pages
)
1761 addr
= gfn_to_hva_many(slot
, gfn
, &entry
);
1762 if (kvm_is_error_hva(addr
))
1765 if (entry
< nr_pages
)
1768 return __get_user_pages_fast(addr
, nr_pages
, 1, pages
);
1770 EXPORT_SYMBOL_GPL(gfn_to_page_many_atomic
);
1772 static struct page
*kvm_pfn_to_page(kvm_pfn_t pfn
)
1774 if (is_error_noslot_pfn(pfn
))
1775 return KVM_ERR_PTR_BAD_PAGE
;
1777 if (kvm_is_reserved_pfn(pfn
)) {
1779 return KVM_ERR_PTR_BAD_PAGE
;
1782 return pfn_to_page(pfn
);
1785 struct page
*gfn_to_page(struct kvm
*kvm
, gfn_t gfn
)
1789 pfn
= gfn_to_pfn(kvm
, gfn
);
1791 return kvm_pfn_to_page(pfn
);
1793 EXPORT_SYMBOL_GPL(gfn_to_page
);
1795 static int __kvm_map_gfn(struct kvm_memslots
*slots
, gfn_t gfn
,
1796 struct kvm_host_map
*map
)
1800 struct page
*page
= KVM_UNMAPPED_PAGE
;
1801 struct kvm_memory_slot
*slot
= __gfn_to_memslot(slots
, gfn
);
1806 pfn
= gfn_to_pfn_memslot(slot
, gfn
);
1807 if (is_error_noslot_pfn(pfn
))
1810 if (pfn_valid(pfn
)) {
1811 page
= pfn_to_page(pfn
);
1813 #ifdef CONFIG_HAS_IOMEM
1815 hva
= memremap(pfn_to_hpa(pfn
), PAGE_SIZE
, MEMREMAP_WB
);
1830 int kvm_map_gfn(struct kvm_vcpu
*vcpu
, gfn_t gfn
, struct kvm_host_map
*map
)
1832 return __kvm_map_gfn(kvm_memslots(vcpu
->kvm
), gfn
, map
);
1834 EXPORT_SYMBOL_GPL(kvm_map_gfn
);
1836 int kvm_vcpu_map(struct kvm_vcpu
*vcpu
, gfn_t gfn
, struct kvm_host_map
*map
)
1838 return __kvm_map_gfn(kvm_vcpu_memslots(vcpu
), gfn
, map
);
1840 EXPORT_SYMBOL_GPL(kvm_vcpu_map
);
1842 static void __kvm_unmap_gfn(struct kvm_memory_slot
*memslot
,
1843 struct kvm_host_map
*map
, bool dirty
)
1851 if (map
->page
!= KVM_UNMAPPED_PAGE
)
1853 #ifdef CONFIG_HAS_IOMEM
1859 mark_page_dirty_in_slot(memslot
, map
->gfn
);
1860 kvm_release_pfn_dirty(map
->pfn
);
1862 kvm_release_pfn_clean(map
->pfn
);
1869 int kvm_unmap_gfn(struct kvm_vcpu
*vcpu
, struct kvm_host_map
*map
, bool dirty
)
1871 __kvm_unmap_gfn(gfn_to_memslot(vcpu
->kvm
, map
->gfn
), map
, dirty
);
1874 EXPORT_SYMBOL_GPL(kvm_unmap_gfn
);
1876 void kvm_vcpu_unmap(struct kvm_vcpu
*vcpu
, struct kvm_host_map
*map
, bool dirty
)
1878 __kvm_unmap_gfn(kvm_vcpu_gfn_to_memslot(vcpu
, map
->gfn
), map
, dirty
);
1880 EXPORT_SYMBOL_GPL(kvm_vcpu_unmap
);
1882 struct page
*kvm_vcpu_gfn_to_page(struct kvm_vcpu
*vcpu
, gfn_t gfn
)
1886 pfn
= kvm_vcpu_gfn_to_pfn(vcpu
, gfn
);
1888 return kvm_pfn_to_page(pfn
);
1890 EXPORT_SYMBOL_GPL(kvm_vcpu_gfn_to_page
);
1892 void kvm_release_page_clean(struct page
*page
)
1894 WARN_ON(is_error_page(page
));
1896 kvm_release_pfn_clean(page_to_pfn(page
));
1898 EXPORT_SYMBOL_GPL(kvm_release_page_clean
);
1900 void kvm_release_pfn_clean(kvm_pfn_t pfn
)
1902 if (!is_error_noslot_pfn(pfn
) && !kvm_is_reserved_pfn(pfn
))
1903 put_page(pfn_to_page(pfn
));
1905 EXPORT_SYMBOL_GPL(kvm_release_pfn_clean
);
1907 void kvm_release_page_dirty(struct page
*page
)
1909 WARN_ON(is_error_page(page
));
1911 kvm_release_pfn_dirty(page_to_pfn(page
));
1913 EXPORT_SYMBOL_GPL(kvm_release_page_dirty
);
1915 void kvm_release_pfn_dirty(kvm_pfn_t pfn
)
1917 kvm_set_pfn_dirty(pfn
);
1918 kvm_release_pfn_clean(pfn
);
1920 EXPORT_SYMBOL_GPL(kvm_release_pfn_dirty
);
1922 void kvm_set_pfn_dirty(kvm_pfn_t pfn
)
1924 if (!kvm_is_reserved_pfn(pfn
) && !kvm_is_zone_device_pfn(pfn
)) {
1925 struct page
*page
= pfn_to_page(pfn
);
1930 EXPORT_SYMBOL_GPL(kvm_set_pfn_dirty
);
1932 void kvm_set_pfn_accessed(kvm_pfn_t pfn
)
1934 if (!kvm_is_reserved_pfn(pfn
) && !kvm_is_zone_device_pfn(pfn
))
1935 mark_page_accessed(pfn_to_page(pfn
));
1937 EXPORT_SYMBOL_GPL(kvm_set_pfn_accessed
);
1939 void kvm_get_pfn(kvm_pfn_t pfn
)
1941 if (!kvm_is_reserved_pfn(pfn
))
1942 get_page(pfn_to_page(pfn
));
1944 EXPORT_SYMBOL_GPL(kvm_get_pfn
);
1946 static int next_segment(unsigned long len
, int offset
)
1948 if (len
> PAGE_SIZE
- offset
)
1949 return PAGE_SIZE
- offset
;
1954 static int __kvm_read_guest_page(struct kvm_memory_slot
*slot
, gfn_t gfn
,
1955 void *data
, int offset
, int len
)
1960 addr
= gfn_to_hva_memslot_prot(slot
, gfn
, NULL
);
1961 if (kvm_is_error_hva(addr
))
1963 r
= __copy_from_user(data
, (void __user
*)addr
+ offset
, len
);
1969 int kvm_read_guest_page(struct kvm
*kvm
, gfn_t gfn
, void *data
, int offset
,
1972 struct kvm_memory_slot
*slot
= gfn_to_memslot(kvm
, gfn
);
1974 return __kvm_read_guest_page(slot
, gfn
, data
, offset
, len
);
1976 EXPORT_SYMBOL_GPL(kvm_read_guest_page
);
1978 int kvm_vcpu_read_guest_page(struct kvm_vcpu
*vcpu
, gfn_t gfn
, void *data
,
1979 int offset
, int len
)
1981 struct kvm_memory_slot
*slot
= kvm_vcpu_gfn_to_memslot(vcpu
, gfn
);
1983 return __kvm_read_guest_page(slot
, gfn
, data
, offset
, len
);
1985 EXPORT_SYMBOL_GPL(kvm_vcpu_read_guest_page
);
1987 int kvm_read_guest(struct kvm
*kvm
, gpa_t gpa
, void *data
, unsigned long len
)
1989 gfn_t gfn
= gpa
>> PAGE_SHIFT
;
1991 int offset
= offset_in_page(gpa
);
1994 while ((seg
= next_segment(len
, offset
)) != 0) {
1995 ret
= kvm_read_guest_page(kvm
, gfn
, data
, offset
, seg
);
2005 EXPORT_SYMBOL_GPL(kvm_read_guest
);
2007 int kvm_vcpu_read_guest(struct kvm_vcpu
*vcpu
, gpa_t gpa
, void *data
, unsigned long len
)
2009 gfn_t gfn
= gpa
>> PAGE_SHIFT
;
2011 int offset
= offset_in_page(gpa
);
2014 while ((seg
= next_segment(len
, offset
)) != 0) {
2015 ret
= kvm_vcpu_read_guest_page(vcpu
, gfn
, data
, offset
, seg
);
2025 EXPORT_SYMBOL_GPL(kvm_vcpu_read_guest
);
2027 static int __kvm_read_guest_atomic(struct kvm_memory_slot
*slot
, gfn_t gfn
,
2028 void *data
, int offset
, unsigned long len
)
2033 addr
= gfn_to_hva_memslot_prot(slot
, gfn
, NULL
);
2034 if (kvm_is_error_hva(addr
))
2036 pagefault_disable();
2037 r
= __copy_from_user_inatomic(data
, (void __user
*)addr
+ offset
, len
);
2044 int kvm_read_guest_atomic(struct kvm
*kvm
, gpa_t gpa
, void *data
,
2047 gfn_t gfn
= gpa
>> PAGE_SHIFT
;
2048 struct kvm_memory_slot
*slot
= gfn_to_memslot(kvm
, gfn
);
2049 int offset
= offset_in_page(gpa
);
2051 return __kvm_read_guest_atomic(slot
, gfn
, data
, offset
, len
);
2053 EXPORT_SYMBOL_GPL(kvm_read_guest_atomic
);
2055 int kvm_vcpu_read_guest_atomic(struct kvm_vcpu
*vcpu
, gpa_t gpa
,
2056 void *data
, unsigned long len
)
2058 gfn_t gfn
= gpa
>> PAGE_SHIFT
;
2059 struct kvm_memory_slot
*slot
= kvm_vcpu_gfn_to_memslot(vcpu
, gfn
);
2060 int offset
= offset_in_page(gpa
);
2062 return __kvm_read_guest_atomic(slot
, gfn
, data
, offset
, len
);
2064 EXPORT_SYMBOL_GPL(kvm_vcpu_read_guest_atomic
);
2066 static int __kvm_write_guest_page(struct kvm_memory_slot
*memslot
, gfn_t gfn
,
2067 const void *data
, int offset
, int len
)
2072 addr
= gfn_to_hva_memslot(memslot
, gfn
);
2073 if (kvm_is_error_hva(addr
))
2075 r
= __copy_to_user((void __user
*)addr
+ offset
, data
, len
);
2078 mark_page_dirty_in_slot(memslot
, gfn
);
2082 int kvm_write_guest_page(struct kvm
*kvm
, gfn_t gfn
,
2083 const void *data
, int offset
, int len
)
2085 struct kvm_memory_slot
*slot
= gfn_to_memslot(kvm
, gfn
);
2087 return __kvm_write_guest_page(slot
, gfn
, data
, offset
, len
);
2089 EXPORT_SYMBOL_GPL(kvm_write_guest_page
);
2091 int kvm_vcpu_write_guest_page(struct kvm_vcpu
*vcpu
, gfn_t gfn
,
2092 const void *data
, int offset
, int len
)
2094 struct kvm_memory_slot
*slot
= kvm_vcpu_gfn_to_memslot(vcpu
, gfn
);
2096 return __kvm_write_guest_page(slot
, gfn
, data
, offset
, len
);
2098 EXPORT_SYMBOL_GPL(kvm_vcpu_write_guest_page
);
2100 int kvm_write_guest(struct kvm
*kvm
, gpa_t gpa
, const void *data
,
2103 gfn_t gfn
= gpa
>> PAGE_SHIFT
;
2105 int offset
= offset_in_page(gpa
);
2108 while ((seg
= next_segment(len
, offset
)) != 0) {
2109 ret
= kvm_write_guest_page(kvm
, gfn
, data
, offset
, seg
);
2119 EXPORT_SYMBOL_GPL(kvm_write_guest
);
2121 int kvm_vcpu_write_guest(struct kvm_vcpu
*vcpu
, gpa_t gpa
, const void *data
,
2124 gfn_t gfn
= gpa
>> PAGE_SHIFT
;
2126 int offset
= offset_in_page(gpa
);
2129 while ((seg
= next_segment(len
, offset
)) != 0) {
2130 ret
= kvm_vcpu_write_guest_page(vcpu
, gfn
, data
, offset
, seg
);
2140 EXPORT_SYMBOL_GPL(kvm_vcpu_write_guest
);
2142 static int __kvm_gfn_to_hva_cache_init(struct kvm_memslots
*slots
,
2143 struct gfn_to_hva_cache
*ghc
,
2144 gpa_t gpa
, unsigned long len
)
2146 int offset
= offset_in_page(gpa
);
2147 gfn_t start_gfn
= gpa
>> PAGE_SHIFT
;
2148 gfn_t end_gfn
= (gpa
+ len
- 1) >> PAGE_SHIFT
;
2149 gfn_t nr_pages_needed
= end_gfn
- start_gfn
+ 1;
2150 gfn_t nr_pages_avail
;
2151 int r
= start_gfn
<= end_gfn
? 0 : -EINVAL
;
2154 ghc
->generation
= slots
->generation
;
2156 ghc
->hva
= KVM_HVA_ERR_BAD
;
2159 * If the requested region crosses two memslots, we still
2160 * verify that the entire region is valid here.
2162 while (!r
&& start_gfn
<= end_gfn
) {
2163 ghc
->memslot
= __gfn_to_memslot(slots
, start_gfn
);
2164 ghc
->hva
= gfn_to_hva_many(ghc
->memslot
, start_gfn
,
2166 if (kvm_is_error_hva(ghc
->hva
))
2168 start_gfn
+= nr_pages_avail
;
2171 /* Use the slow path for cross page reads and writes. */
2172 if (!r
&& nr_pages_needed
== 1)
2175 ghc
->memslot
= NULL
;
2180 int kvm_gfn_to_hva_cache_init(struct kvm
*kvm
, struct gfn_to_hva_cache
*ghc
,
2181 gpa_t gpa
, unsigned long len
)
2183 struct kvm_memslots
*slots
= kvm_memslots(kvm
);
2184 return __kvm_gfn_to_hva_cache_init(slots
, ghc
, gpa
, len
);
2186 EXPORT_SYMBOL_GPL(kvm_gfn_to_hva_cache_init
);
2188 int kvm_write_guest_offset_cached(struct kvm
*kvm
, struct gfn_to_hva_cache
*ghc
,
2189 void *data
, unsigned int offset
,
2192 struct kvm_memslots
*slots
= kvm_memslots(kvm
);
2194 gpa_t gpa
= ghc
->gpa
+ offset
;
2196 BUG_ON(len
+ offset
> ghc
->len
);
2198 if (slots
->generation
!= ghc
->generation
)
2199 __kvm_gfn_to_hva_cache_init(slots
, ghc
, ghc
->gpa
, ghc
->len
);
2201 if (unlikely(!ghc
->memslot
))
2202 return kvm_write_guest(kvm
, gpa
, data
, len
);
2204 if (kvm_is_error_hva(ghc
->hva
))
2207 r
= __copy_to_user((void __user
*)ghc
->hva
+ offset
, data
, len
);
2210 mark_page_dirty_in_slot(ghc
->memslot
, gpa
>> PAGE_SHIFT
);
2214 EXPORT_SYMBOL_GPL(kvm_write_guest_offset_cached
);
2216 int kvm_write_guest_cached(struct kvm
*kvm
, struct gfn_to_hva_cache
*ghc
,
2217 void *data
, unsigned long len
)
2219 return kvm_write_guest_offset_cached(kvm
, ghc
, data
, 0, len
);
2221 EXPORT_SYMBOL_GPL(kvm_write_guest_cached
);
2223 int kvm_read_guest_cached(struct kvm
*kvm
, struct gfn_to_hva_cache
*ghc
,
2224 void *data
, unsigned long len
)
2226 struct kvm_memslots
*slots
= kvm_memslots(kvm
);
2229 BUG_ON(len
> ghc
->len
);
2231 if (slots
->generation
!= ghc
->generation
)
2232 __kvm_gfn_to_hva_cache_init(slots
, ghc
, ghc
->gpa
, ghc
->len
);
2234 if (unlikely(!ghc
->memslot
))
2235 return kvm_read_guest(kvm
, ghc
->gpa
, data
, len
);
2237 if (kvm_is_error_hva(ghc
->hva
))
2240 r
= __copy_from_user(data
, (void __user
*)ghc
->hva
, len
);
2246 EXPORT_SYMBOL_GPL(kvm_read_guest_cached
);
2248 int kvm_clear_guest_page(struct kvm
*kvm
, gfn_t gfn
, int offset
, int len
)
2250 const void *zero_page
= (const void *) __va(page_to_phys(ZERO_PAGE(0)));
2252 return kvm_write_guest_page(kvm
, gfn
, zero_page
, offset
, len
);
2254 EXPORT_SYMBOL_GPL(kvm_clear_guest_page
);
2256 int kvm_clear_guest(struct kvm
*kvm
, gpa_t gpa
, unsigned long len
)
2258 gfn_t gfn
= gpa
>> PAGE_SHIFT
;
2260 int offset
= offset_in_page(gpa
);
2263 while ((seg
= next_segment(len
, offset
)) != 0) {
2264 ret
= kvm_clear_guest_page(kvm
, gfn
, offset
, seg
);
2273 EXPORT_SYMBOL_GPL(kvm_clear_guest
);
2275 static void mark_page_dirty_in_slot(struct kvm_memory_slot
*memslot
,
2278 if (memslot
&& memslot
->dirty_bitmap
) {
2279 unsigned long rel_gfn
= gfn
- memslot
->base_gfn
;
2281 set_bit_le(rel_gfn
, memslot
->dirty_bitmap
);
2285 void mark_page_dirty(struct kvm
*kvm
, gfn_t gfn
)
2287 struct kvm_memory_slot
*memslot
;
2289 memslot
= gfn_to_memslot(kvm
, gfn
);
2290 mark_page_dirty_in_slot(memslot
, gfn
);
2292 EXPORT_SYMBOL_GPL(mark_page_dirty
);
2294 void kvm_vcpu_mark_page_dirty(struct kvm_vcpu
*vcpu
, gfn_t gfn
)
2296 struct kvm_memory_slot
*memslot
;
2298 memslot
= kvm_vcpu_gfn_to_memslot(vcpu
, gfn
);
2299 mark_page_dirty_in_slot(memslot
, gfn
);
2301 EXPORT_SYMBOL_GPL(kvm_vcpu_mark_page_dirty
);
2303 void kvm_sigset_activate(struct kvm_vcpu
*vcpu
)
2305 if (!vcpu
->sigset_active
)
2309 * This does a lockless modification of ->real_blocked, which is fine
2310 * because, only current can change ->real_blocked and all readers of
2311 * ->real_blocked don't care as long ->real_blocked is always a subset
2314 sigprocmask(SIG_SETMASK
, &vcpu
->sigset
, ¤t
->real_blocked
);
2317 void kvm_sigset_deactivate(struct kvm_vcpu
*vcpu
)
2319 if (!vcpu
->sigset_active
)
2322 sigprocmask(SIG_SETMASK
, ¤t
->real_blocked
, NULL
);
2323 sigemptyset(¤t
->real_blocked
);
2326 static void grow_halt_poll_ns(struct kvm_vcpu
*vcpu
)
2328 unsigned int old
, val
, grow
, grow_start
;
2330 old
= val
= vcpu
->halt_poll_ns
;
2331 grow_start
= READ_ONCE(halt_poll_ns_grow_start
);
2332 grow
= READ_ONCE(halt_poll_ns_grow
);
2337 if (val
< grow_start
)
2340 if (val
> halt_poll_ns
)
2343 vcpu
->halt_poll_ns
= val
;
2345 trace_kvm_halt_poll_ns_grow(vcpu
->vcpu_id
, val
, old
);
2348 static void shrink_halt_poll_ns(struct kvm_vcpu
*vcpu
)
2350 unsigned int old
, val
, shrink
;
2352 old
= val
= vcpu
->halt_poll_ns
;
2353 shrink
= READ_ONCE(halt_poll_ns_shrink
);
2359 vcpu
->halt_poll_ns
= val
;
2360 trace_kvm_halt_poll_ns_shrink(vcpu
->vcpu_id
, val
, old
);
2363 static int kvm_vcpu_check_block(struct kvm_vcpu
*vcpu
)
2366 int idx
= srcu_read_lock(&vcpu
->kvm
->srcu
);
2368 if (kvm_arch_vcpu_runnable(vcpu
)) {
2369 kvm_make_request(KVM_REQ_UNHALT
, vcpu
);
2372 if (kvm_cpu_has_pending_timer(vcpu
))
2374 if (signal_pending(current
))
2379 srcu_read_unlock(&vcpu
->kvm
->srcu
, idx
);
2384 * The vCPU has executed a HLT instruction with in-kernel mode enabled.
2386 void kvm_vcpu_block(struct kvm_vcpu
*vcpu
)
2389 DECLARE_SWAITQUEUE(wait
);
2390 bool waited
= false;
2393 start
= cur
= ktime_get();
2394 if (vcpu
->halt_poll_ns
&& !kvm_arch_no_poll(vcpu
)) {
2395 ktime_t stop
= ktime_add_ns(ktime_get(), vcpu
->halt_poll_ns
);
2397 ++vcpu
->stat
.halt_attempted_poll
;
2400 * This sets KVM_REQ_UNHALT if an interrupt
2403 if (kvm_vcpu_check_block(vcpu
) < 0) {
2404 ++vcpu
->stat
.halt_successful_poll
;
2405 if (!vcpu_valid_wakeup(vcpu
))
2406 ++vcpu
->stat
.halt_poll_invalid
;
2410 } while (single_task_running() && ktime_before(cur
, stop
));
2413 kvm_arch_vcpu_blocking(vcpu
);
2416 prepare_to_swait_exclusive(&vcpu
->wq
, &wait
, TASK_INTERRUPTIBLE
);
2418 if (kvm_vcpu_check_block(vcpu
) < 0)
2425 finish_swait(&vcpu
->wq
, &wait
);
2428 kvm_arch_vcpu_unblocking(vcpu
);
2430 block_ns
= ktime_to_ns(cur
) - ktime_to_ns(start
);
2432 if (!vcpu_valid_wakeup(vcpu
))
2433 shrink_halt_poll_ns(vcpu
);
2434 else if (halt_poll_ns
) {
2435 if (block_ns
<= vcpu
->halt_poll_ns
)
2437 /* we had a long block, shrink polling */
2438 else if (vcpu
->halt_poll_ns
&& block_ns
> halt_poll_ns
)
2439 shrink_halt_poll_ns(vcpu
);
2440 /* we had a short halt and our poll time is too small */
2441 else if (vcpu
->halt_poll_ns
< halt_poll_ns
&&
2442 block_ns
< halt_poll_ns
)
2443 grow_halt_poll_ns(vcpu
);
2445 vcpu
->halt_poll_ns
= 0;
2447 trace_kvm_vcpu_wakeup(block_ns
, waited
, vcpu_valid_wakeup(vcpu
));
2448 kvm_arch_vcpu_block_finish(vcpu
);
2450 EXPORT_SYMBOL_GPL(kvm_vcpu_block
);
2452 bool kvm_vcpu_wake_up(struct kvm_vcpu
*vcpu
)
2454 struct swait_queue_head
*wqp
;
2456 wqp
= kvm_arch_vcpu_wq(vcpu
);
2457 if (swq_has_sleeper(wqp
)) {
2459 WRITE_ONCE(vcpu
->ready
, true);
2460 ++vcpu
->stat
.halt_wakeup
;
2466 EXPORT_SYMBOL_GPL(kvm_vcpu_wake_up
);
2470 * Kick a sleeping VCPU, or a guest VCPU in guest mode, into host kernel mode.
2472 void kvm_vcpu_kick(struct kvm_vcpu
*vcpu
)
2475 int cpu
= vcpu
->cpu
;
2477 if (kvm_vcpu_wake_up(vcpu
))
2481 if (cpu
!= me
&& (unsigned)cpu
< nr_cpu_ids
&& cpu_online(cpu
))
2482 if (kvm_arch_vcpu_should_kick(vcpu
))
2483 smp_send_reschedule(cpu
);
2486 EXPORT_SYMBOL_GPL(kvm_vcpu_kick
);
2487 #endif /* !CONFIG_S390 */
2489 int kvm_vcpu_yield_to(struct kvm_vcpu
*target
)
2492 struct task_struct
*task
= NULL
;
2496 pid
= rcu_dereference(target
->pid
);
2498 task
= get_pid_task(pid
, PIDTYPE_PID
);
2502 ret
= yield_to(task
, 1);
2503 put_task_struct(task
);
2507 EXPORT_SYMBOL_GPL(kvm_vcpu_yield_to
);
2510 * Helper that checks whether a VCPU is eligible for directed yield.
2511 * Most eligible candidate to yield is decided by following heuristics:
2513 * (a) VCPU which has not done pl-exit or cpu relax intercepted recently
2514 * (preempted lock holder), indicated by @in_spin_loop.
2515 * Set at the beiginning and cleared at the end of interception/PLE handler.
2517 * (b) VCPU which has done pl-exit/ cpu relax intercepted but did not get
2518 * chance last time (mostly it has become eligible now since we have probably
2519 * yielded to lockholder in last iteration. This is done by toggling
2520 * @dy_eligible each time a VCPU checked for eligibility.)
2522 * Yielding to a recently pl-exited/cpu relax intercepted VCPU before yielding
2523 * to preempted lock-holder could result in wrong VCPU selection and CPU
2524 * burning. Giving priority for a potential lock-holder increases lock
2527 * Since algorithm is based on heuristics, accessing another VCPU data without
2528 * locking does not harm. It may result in trying to yield to same VCPU, fail
2529 * and continue with next VCPU and so on.
2531 static bool kvm_vcpu_eligible_for_directed_yield(struct kvm_vcpu
*vcpu
)
2533 #ifdef CONFIG_HAVE_KVM_CPU_RELAX_INTERCEPT
2536 eligible
= !vcpu
->spin_loop
.in_spin_loop
||
2537 vcpu
->spin_loop
.dy_eligible
;
2539 if (vcpu
->spin_loop
.in_spin_loop
)
2540 kvm_vcpu_set_dy_eligible(vcpu
, !vcpu
->spin_loop
.dy_eligible
);
2549 * Unlike kvm_arch_vcpu_runnable, this function is called outside
2550 * a vcpu_load/vcpu_put pair. However, for most architectures
2551 * kvm_arch_vcpu_runnable does not require vcpu_load.
2553 bool __weak
kvm_arch_dy_runnable(struct kvm_vcpu
*vcpu
)
2555 return kvm_arch_vcpu_runnable(vcpu
);
2558 static bool vcpu_dy_runnable(struct kvm_vcpu
*vcpu
)
2560 if (kvm_arch_dy_runnable(vcpu
))
2563 #ifdef CONFIG_KVM_ASYNC_PF
2564 if (!list_empty_careful(&vcpu
->async_pf
.done
))
2571 void kvm_vcpu_on_spin(struct kvm_vcpu
*me
, bool yield_to_kernel_mode
)
2573 struct kvm
*kvm
= me
->kvm
;
2574 struct kvm_vcpu
*vcpu
;
2575 int last_boosted_vcpu
= me
->kvm
->last_boosted_vcpu
;
2581 kvm_vcpu_set_in_spin_loop(me
, true);
2583 * We boost the priority of a VCPU that is runnable but not
2584 * currently running, because it got preempted by something
2585 * else and called schedule in __vcpu_run. Hopefully that
2586 * VCPU is holding the lock that we need and will release it.
2587 * We approximate round-robin by starting at the last boosted VCPU.
2589 for (pass
= 0; pass
< 2 && !yielded
&& try; pass
++) {
2590 kvm_for_each_vcpu(i
, vcpu
, kvm
) {
2591 if (!pass
&& i
<= last_boosted_vcpu
) {
2592 i
= last_boosted_vcpu
;
2594 } else if (pass
&& i
> last_boosted_vcpu
)
2596 if (!READ_ONCE(vcpu
->ready
))
2600 if (swait_active(&vcpu
->wq
) && !vcpu_dy_runnable(vcpu
))
2602 if (READ_ONCE(vcpu
->preempted
) && yield_to_kernel_mode
&&
2603 !kvm_arch_vcpu_in_kernel(vcpu
))
2605 if (!kvm_vcpu_eligible_for_directed_yield(vcpu
))
2608 yielded
= kvm_vcpu_yield_to(vcpu
);
2610 kvm
->last_boosted_vcpu
= i
;
2612 } else if (yielded
< 0) {
2619 kvm_vcpu_set_in_spin_loop(me
, false);
2621 /* Ensure vcpu is not eligible during next spinloop */
2622 kvm_vcpu_set_dy_eligible(me
, false);
2624 EXPORT_SYMBOL_GPL(kvm_vcpu_on_spin
);
2626 static vm_fault_t
kvm_vcpu_fault(struct vm_fault
*vmf
)
2628 struct kvm_vcpu
*vcpu
= vmf
->vma
->vm_file
->private_data
;
2631 if (vmf
->pgoff
== 0)
2632 page
= virt_to_page(vcpu
->run
);
2634 else if (vmf
->pgoff
== KVM_PIO_PAGE_OFFSET
)
2635 page
= virt_to_page(vcpu
->arch
.pio_data
);
2637 #ifdef CONFIG_KVM_MMIO
2638 else if (vmf
->pgoff
== KVM_COALESCED_MMIO_PAGE_OFFSET
)
2639 page
= virt_to_page(vcpu
->kvm
->coalesced_mmio_ring
);
2642 return kvm_arch_vcpu_fault(vcpu
, vmf
);
2648 static const struct vm_operations_struct kvm_vcpu_vm_ops
= {
2649 .fault
= kvm_vcpu_fault
,
2652 static int kvm_vcpu_mmap(struct file
*file
, struct vm_area_struct
*vma
)
2654 vma
->vm_ops
= &kvm_vcpu_vm_ops
;
2658 static int kvm_vcpu_release(struct inode
*inode
, struct file
*filp
)
2660 struct kvm_vcpu
*vcpu
= filp
->private_data
;
2662 debugfs_remove_recursive(vcpu
->debugfs_dentry
);
2663 kvm_put_kvm(vcpu
->kvm
);
2667 static struct file_operations kvm_vcpu_fops
= {
2668 .release
= kvm_vcpu_release
,
2669 .unlocked_ioctl
= kvm_vcpu_ioctl
,
2670 .mmap
= kvm_vcpu_mmap
,
2671 .llseek
= noop_llseek
,
2672 KVM_COMPAT(kvm_vcpu_compat_ioctl
),
2676 * Allocates an inode for the vcpu.
2678 static int create_vcpu_fd(struct kvm_vcpu
*vcpu
)
2680 char name
[8 + 1 + ITOA_MAX_LEN
+ 1];
2682 snprintf(name
, sizeof(name
), "kvm-vcpu:%d", vcpu
->vcpu_id
);
2683 return anon_inode_getfd(name
, &kvm_vcpu_fops
, vcpu
, O_RDWR
| O_CLOEXEC
);
2686 static void kvm_create_vcpu_debugfs(struct kvm_vcpu
*vcpu
)
2688 #ifdef __KVM_HAVE_ARCH_VCPU_DEBUGFS
2689 char dir_name
[ITOA_MAX_LEN
* 2];
2691 if (!debugfs_initialized())
2694 snprintf(dir_name
, sizeof(dir_name
), "vcpu%d", vcpu
->vcpu_id
);
2695 vcpu
->debugfs_dentry
= debugfs_create_dir(dir_name
,
2696 vcpu
->kvm
->debugfs_dentry
);
2698 kvm_arch_create_vcpu_debugfs(vcpu
);
2703 * Creates some virtual cpus. Good luck creating more than one.
2705 static int kvm_vm_ioctl_create_vcpu(struct kvm
*kvm
, u32 id
)
2708 struct kvm_vcpu
*vcpu
;
2710 if (id
>= KVM_MAX_VCPU_ID
)
2713 mutex_lock(&kvm
->lock
);
2714 if (kvm
->created_vcpus
== KVM_MAX_VCPUS
) {
2715 mutex_unlock(&kvm
->lock
);
2719 kvm
->created_vcpus
++;
2720 mutex_unlock(&kvm
->lock
);
2722 vcpu
= kvm_arch_vcpu_create(kvm
, id
);
2725 goto vcpu_decrement
;
2728 preempt_notifier_init(&vcpu
->preempt_notifier
, &kvm_preempt_ops
);
2730 r
= kvm_arch_vcpu_setup(vcpu
);
2734 kvm_create_vcpu_debugfs(vcpu
);
2736 mutex_lock(&kvm
->lock
);
2737 if (kvm_get_vcpu_by_id(kvm
, id
)) {
2739 goto unlock_vcpu_destroy
;
2742 BUG_ON(kvm
->vcpus
[atomic_read(&kvm
->online_vcpus
)]);
2744 /* Now it's all set up, let userspace reach it */
2746 r
= create_vcpu_fd(vcpu
);
2749 goto unlock_vcpu_destroy
;
2752 kvm
->vcpus
[atomic_read(&kvm
->online_vcpus
)] = vcpu
;
2755 * Pairs with smp_rmb() in kvm_get_vcpu. Write kvm->vcpus
2756 * before kvm->online_vcpu's incremented value.
2759 atomic_inc(&kvm
->online_vcpus
);
2761 mutex_unlock(&kvm
->lock
);
2762 kvm_arch_vcpu_postcreate(vcpu
);
2765 unlock_vcpu_destroy
:
2766 mutex_unlock(&kvm
->lock
);
2767 debugfs_remove_recursive(vcpu
->debugfs_dentry
);
2769 kvm_arch_vcpu_destroy(vcpu
);
2771 mutex_lock(&kvm
->lock
);
2772 kvm
->created_vcpus
--;
2773 mutex_unlock(&kvm
->lock
);
2777 static int kvm_vcpu_ioctl_set_sigmask(struct kvm_vcpu
*vcpu
, sigset_t
*sigset
)
2780 sigdelsetmask(sigset
, sigmask(SIGKILL
)|sigmask(SIGSTOP
));
2781 vcpu
->sigset_active
= 1;
2782 vcpu
->sigset
= *sigset
;
2784 vcpu
->sigset_active
= 0;
2788 static long kvm_vcpu_ioctl(struct file
*filp
,
2789 unsigned int ioctl
, unsigned long arg
)
2791 struct kvm_vcpu
*vcpu
= filp
->private_data
;
2792 void __user
*argp
= (void __user
*)arg
;
2794 struct kvm_fpu
*fpu
= NULL
;
2795 struct kvm_sregs
*kvm_sregs
= NULL
;
2797 if (vcpu
->kvm
->mm
!= current
->mm
)
2800 if (unlikely(_IOC_TYPE(ioctl
) != KVMIO
))
2804 * Some architectures have vcpu ioctls that are asynchronous to vcpu
2805 * execution; mutex_lock() would break them.
2807 r
= kvm_arch_vcpu_async_ioctl(filp
, ioctl
, arg
);
2808 if (r
!= -ENOIOCTLCMD
)
2811 if (mutex_lock_killable(&vcpu
->mutex
))
2819 oldpid
= rcu_access_pointer(vcpu
->pid
);
2820 if (unlikely(oldpid
!= task_pid(current
))) {
2821 /* The thread running this VCPU changed. */
2824 r
= kvm_arch_vcpu_run_pid_change(vcpu
);
2828 newpid
= get_task_pid(current
, PIDTYPE_PID
);
2829 rcu_assign_pointer(vcpu
->pid
, newpid
);
2834 r
= kvm_arch_vcpu_ioctl_run(vcpu
, vcpu
->run
);
2835 trace_kvm_userspace_exit(vcpu
->run
->exit_reason
, r
);
2838 case KVM_GET_REGS
: {
2839 struct kvm_regs
*kvm_regs
;
2842 kvm_regs
= kzalloc(sizeof(struct kvm_regs
), GFP_KERNEL_ACCOUNT
);
2845 r
= kvm_arch_vcpu_ioctl_get_regs(vcpu
, kvm_regs
);
2849 if (copy_to_user(argp
, kvm_regs
, sizeof(struct kvm_regs
)))
2856 case KVM_SET_REGS
: {
2857 struct kvm_regs
*kvm_regs
;
2860 kvm_regs
= memdup_user(argp
, sizeof(*kvm_regs
));
2861 if (IS_ERR(kvm_regs
)) {
2862 r
= PTR_ERR(kvm_regs
);
2865 r
= kvm_arch_vcpu_ioctl_set_regs(vcpu
, kvm_regs
);
2869 case KVM_GET_SREGS
: {
2870 kvm_sregs
= kzalloc(sizeof(struct kvm_sregs
),
2871 GFP_KERNEL_ACCOUNT
);
2875 r
= kvm_arch_vcpu_ioctl_get_sregs(vcpu
, kvm_sregs
);
2879 if (copy_to_user(argp
, kvm_sregs
, sizeof(struct kvm_sregs
)))
2884 case KVM_SET_SREGS
: {
2885 kvm_sregs
= memdup_user(argp
, sizeof(*kvm_sregs
));
2886 if (IS_ERR(kvm_sregs
)) {
2887 r
= PTR_ERR(kvm_sregs
);
2891 r
= kvm_arch_vcpu_ioctl_set_sregs(vcpu
, kvm_sregs
);
2894 case KVM_GET_MP_STATE
: {
2895 struct kvm_mp_state mp_state
;
2897 r
= kvm_arch_vcpu_ioctl_get_mpstate(vcpu
, &mp_state
);
2901 if (copy_to_user(argp
, &mp_state
, sizeof(mp_state
)))
2906 case KVM_SET_MP_STATE
: {
2907 struct kvm_mp_state mp_state
;
2910 if (copy_from_user(&mp_state
, argp
, sizeof(mp_state
)))
2912 r
= kvm_arch_vcpu_ioctl_set_mpstate(vcpu
, &mp_state
);
2915 case KVM_TRANSLATE
: {
2916 struct kvm_translation tr
;
2919 if (copy_from_user(&tr
, argp
, sizeof(tr
)))
2921 r
= kvm_arch_vcpu_ioctl_translate(vcpu
, &tr
);
2925 if (copy_to_user(argp
, &tr
, sizeof(tr
)))
2930 case KVM_SET_GUEST_DEBUG
: {
2931 struct kvm_guest_debug dbg
;
2934 if (copy_from_user(&dbg
, argp
, sizeof(dbg
)))
2936 r
= kvm_arch_vcpu_ioctl_set_guest_debug(vcpu
, &dbg
);
2939 case KVM_SET_SIGNAL_MASK
: {
2940 struct kvm_signal_mask __user
*sigmask_arg
= argp
;
2941 struct kvm_signal_mask kvm_sigmask
;
2942 sigset_t sigset
, *p
;
2947 if (copy_from_user(&kvm_sigmask
, argp
,
2948 sizeof(kvm_sigmask
)))
2951 if (kvm_sigmask
.len
!= sizeof(sigset
))
2954 if (copy_from_user(&sigset
, sigmask_arg
->sigset
,
2959 r
= kvm_vcpu_ioctl_set_sigmask(vcpu
, p
);
2963 fpu
= kzalloc(sizeof(struct kvm_fpu
), GFP_KERNEL_ACCOUNT
);
2967 r
= kvm_arch_vcpu_ioctl_get_fpu(vcpu
, fpu
);
2971 if (copy_to_user(argp
, fpu
, sizeof(struct kvm_fpu
)))
2977 fpu
= memdup_user(argp
, sizeof(*fpu
));
2983 r
= kvm_arch_vcpu_ioctl_set_fpu(vcpu
, fpu
);
2987 r
= kvm_arch_vcpu_ioctl(filp
, ioctl
, arg
);
2990 mutex_unlock(&vcpu
->mutex
);
2996 #ifdef CONFIG_KVM_COMPAT
2997 static long kvm_vcpu_compat_ioctl(struct file
*filp
,
2998 unsigned int ioctl
, unsigned long arg
)
3000 struct kvm_vcpu
*vcpu
= filp
->private_data
;
3001 void __user
*argp
= compat_ptr(arg
);
3004 if (vcpu
->kvm
->mm
!= current
->mm
)
3008 case KVM_SET_SIGNAL_MASK
: {
3009 struct kvm_signal_mask __user
*sigmask_arg
= argp
;
3010 struct kvm_signal_mask kvm_sigmask
;
3015 if (copy_from_user(&kvm_sigmask
, argp
,
3016 sizeof(kvm_sigmask
)))
3019 if (kvm_sigmask
.len
!= sizeof(compat_sigset_t
))
3022 if (get_compat_sigset(&sigset
, (void *)sigmask_arg
->sigset
))
3024 r
= kvm_vcpu_ioctl_set_sigmask(vcpu
, &sigset
);
3026 r
= kvm_vcpu_ioctl_set_sigmask(vcpu
, NULL
);
3030 r
= kvm_vcpu_ioctl(filp
, ioctl
, arg
);
3038 static int kvm_device_mmap(struct file
*filp
, struct vm_area_struct
*vma
)
3040 struct kvm_device
*dev
= filp
->private_data
;
3043 return dev
->ops
->mmap(dev
, vma
);
3048 static int kvm_device_ioctl_attr(struct kvm_device
*dev
,
3049 int (*accessor
)(struct kvm_device
*dev
,
3050 struct kvm_device_attr
*attr
),
3053 struct kvm_device_attr attr
;
3058 if (copy_from_user(&attr
, (void __user
*)arg
, sizeof(attr
)))
3061 return accessor(dev
, &attr
);
3064 static long kvm_device_ioctl(struct file
*filp
, unsigned int ioctl
,
3067 struct kvm_device
*dev
= filp
->private_data
;
3069 if (dev
->kvm
->mm
!= current
->mm
)
3073 case KVM_SET_DEVICE_ATTR
:
3074 return kvm_device_ioctl_attr(dev
, dev
->ops
->set_attr
, arg
);
3075 case KVM_GET_DEVICE_ATTR
:
3076 return kvm_device_ioctl_attr(dev
, dev
->ops
->get_attr
, arg
);
3077 case KVM_HAS_DEVICE_ATTR
:
3078 return kvm_device_ioctl_attr(dev
, dev
->ops
->has_attr
, arg
);
3080 if (dev
->ops
->ioctl
)
3081 return dev
->ops
->ioctl(dev
, ioctl
, arg
);
3087 static int kvm_device_release(struct inode
*inode
, struct file
*filp
)
3089 struct kvm_device
*dev
= filp
->private_data
;
3090 struct kvm
*kvm
= dev
->kvm
;
3092 if (dev
->ops
->release
) {
3093 mutex_lock(&kvm
->lock
);
3094 list_del(&dev
->vm_node
);
3095 dev
->ops
->release(dev
);
3096 mutex_unlock(&kvm
->lock
);
3103 static const struct file_operations kvm_device_fops
= {
3104 .unlocked_ioctl
= kvm_device_ioctl
,
3105 .release
= kvm_device_release
,
3106 KVM_COMPAT(kvm_device_ioctl
),
3107 .mmap
= kvm_device_mmap
,
3110 struct kvm_device
*kvm_device_from_filp(struct file
*filp
)
3112 if (filp
->f_op
!= &kvm_device_fops
)
3115 return filp
->private_data
;
3118 static struct kvm_device_ops
*kvm_device_ops_table
[KVM_DEV_TYPE_MAX
] = {
3119 #ifdef CONFIG_KVM_MPIC
3120 [KVM_DEV_TYPE_FSL_MPIC_20
] = &kvm_mpic_ops
,
3121 [KVM_DEV_TYPE_FSL_MPIC_42
] = &kvm_mpic_ops
,
3125 int kvm_register_device_ops(struct kvm_device_ops
*ops
, u32 type
)
3127 if (type
>= ARRAY_SIZE(kvm_device_ops_table
))
3130 if (kvm_device_ops_table
[type
] != NULL
)
3133 kvm_device_ops_table
[type
] = ops
;
3137 void kvm_unregister_device_ops(u32 type
)
3139 if (kvm_device_ops_table
[type
] != NULL
)
3140 kvm_device_ops_table
[type
] = NULL
;
3143 static int kvm_ioctl_create_device(struct kvm
*kvm
,
3144 struct kvm_create_device
*cd
)
3146 struct kvm_device_ops
*ops
= NULL
;
3147 struct kvm_device
*dev
;
3148 bool test
= cd
->flags
& KVM_CREATE_DEVICE_TEST
;
3152 if (cd
->type
>= ARRAY_SIZE(kvm_device_ops_table
))
3155 type
= array_index_nospec(cd
->type
, ARRAY_SIZE(kvm_device_ops_table
));
3156 ops
= kvm_device_ops_table
[type
];
3163 dev
= kzalloc(sizeof(*dev
), GFP_KERNEL_ACCOUNT
);
3170 mutex_lock(&kvm
->lock
);
3171 ret
= ops
->create(dev
, type
);
3173 mutex_unlock(&kvm
->lock
);
3177 list_add(&dev
->vm_node
, &kvm
->devices
);
3178 mutex_unlock(&kvm
->lock
);
3184 ret
= anon_inode_getfd(ops
->name
, &kvm_device_fops
, dev
, O_RDWR
| O_CLOEXEC
);
3187 mutex_lock(&kvm
->lock
);
3188 list_del(&dev
->vm_node
);
3189 mutex_unlock(&kvm
->lock
);
3198 static long kvm_vm_ioctl_check_extension_generic(struct kvm
*kvm
, long arg
)
3201 case KVM_CAP_USER_MEMORY
:
3202 case KVM_CAP_DESTROY_MEMORY_REGION_WORKS
:
3203 case KVM_CAP_JOIN_MEMORY_REGIONS_WORKS
:
3204 case KVM_CAP_INTERNAL_ERROR_DATA
:
3205 #ifdef CONFIG_HAVE_KVM_MSI
3206 case KVM_CAP_SIGNAL_MSI
:
3208 #ifdef CONFIG_HAVE_KVM_IRQFD
3210 case KVM_CAP_IRQFD_RESAMPLE
:
3212 case KVM_CAP_IOEVENTFD_ANY_LENGTH
:
3213 case KVM_CAP_CHECK_EXTENSION_VM
:
3214 case KVM_CAP_ENABLE_CAP_VM
:
3215 #ifdef CONFIG_KVM_GENERIC_DIRTYLOG_READ_PROTECT
3216 case KVM_CAP_MANUAL_DIRTY_LOG_PROTECT2
:
3219 #ifdef CONFIG_KVM_MMIO
3220 case KVM_CAP_COALESCED_MMIO
:
3221 return KVM_COALESCED_MMIO_PAGE_OFFSET
;
3222 case KVM_CAP_COALESCED_PIO
:
3225 #ifdef CONFIG_HAVE_KVM_IRQ_ROUTING
3226 case KVM_CAP_IRQ_ROUTING
:
3227 return KVM_MAX_IRQ_ROUTES
;
3229 #if KVM_ADDRESS_SPACE_NUM > 1
3230 case KVM_CAP_MULTI_ADDRESS_SPACE
:
3231 return KVM_ADDRESS_SPACE_NUM
;
3233 case KVM_CAP_NR_MEMSLOTS
:
3234 return KVM_USER_MEM_SLOTS
;
3238 return kvm_vm_ioctl_check_extension(kvm
, arg
);
3241 int __attribute__((weak
)) kvm_vm_ioctl_enable_cap(struct kvm
*kvm
,
3242 struct kvm_enable_cap
*cap
)
3247 static int kvm_vm_ioctl_enable_cap_generic(struct kvm
*kvm
,
3248 struct kvm_enable_cap
*cap
)
3251 #ifdef CONFIG_KVM_GENERIC_DIRTYLOG_READ_PROTECT
3252 case KVM_CAP_MANUAL_DIRTY_LOG_PROTECT2
:
3253 if (cap
->flags
|| (cap
->args
[0] & ~1))
3255 kvm
->manual_dirty_log_protect
= cap
->args
[0];
3259 return kvm_vm_ioctl_enable_cap(kvm
, cap
);
3263 static long kvm_vm_ioctl(struct file
*filp
,
3264 unsigned int ioctl
, unsigned long arg
)
3266 struct kvm
*kvm
= filp
->private_data
;
3267 void __user
*argp
= (void __user
*)arg
;
3270 if (kvm
->mm
!= current
->mm
)
3273 case KVM_CREATE_VCPU
:
3274 r
= kvm_vm_ioctl_create_vcpu(kvm
, arg
);
3276 case KVM_ENABLE_CAP
: {
3277 struct kvm_enable_cap cap
;
3280 if (copy_from_user(&cap
, argp
, sizeof(cap
)))
3282 r
= kvm_vm_ioctl_enable_cap_generic(kvm
, &cap
);
3285 case KVM_SET_USER_MEMORY_REGION
: {
3286 struct kvm_userspace_memory_region kvm_userspace_mem
;
3289 if (copy_from_user(&kvm_userspace_mem
, argp
,
3290 sizeof(kvm_userspace_mem
)))
3293 r
= kvm_vm_ioctl_set_memory_region(kvm
, &kvm_userspace_mem
);
3296 case KVM_GET_DIRTY_LOG
: {
3297 struct kvm_dirty_log log
;
3300 if (copy_from_user(&log
, argp
, sizeof(log
)))
3302 r
= kvm_vm_ioctl_get_dirty_log(kvm
, &log
);
3305 #ifdef CONFIG_KVM_GENERIC_DIRTYLOG_READ_PROTECT
3306 case KVM_CLEAR_DIRTY_LOG
: {
3307 struct kvm_clear_dirty_log log
;
3310 if (copy_from_user(&log
, argp
, sizeof(log
)))
3312 r
= kvm_vm_ioctl_clear_dirty_log(kvm
, &log
);
3316 #ifdef CONFIG_KVM_MMIO
3317 case KVM_REGISTER_COALESCED_MMIO
: {
3318 struct kvm_coalesced_mmio_zone zone
;
3321 if (copy_from_user(&zone
, argp
, sizeof(zone
)))
3323 r
= kvm_vm_ioctl_register_coalesced_mmio(kvm
, &zone
);
3326 case KVM_UNREGISTER_COALESCED_MMIO
: {
3327 struct kvm_coalesced_mmio_zone zone
;
3330 if (copy_from_user(&zone
, argp
, sizeof(zone
)))
3332 r
= kvm_vm_ioctl_unregister_coalesced_mmio(kvm
, &zone
);
3337 struct kvm_irqfd data
;
3340 if (copy_from_user(&data
, argp
, sizeof(data
)))
3342 r
= kvm_irqfd(kvm
, &data
);
3345 case KVM_IOEVENTFD
: {
3346 struct kvm_ioeventfd data
;
3349 if (copy_from_user(&data
, argp
, sizeof(data
)))
3351 r
= kvm_ioeventfd(kvm
, &data
);
3354 #ifdef CONFIG_HAVE_KVM_MSI
3355 case KVM_SIGNAL_MSI
: {
3359 if (copy_from_user(&msi
, argp
, sizeof(msi
)))
3361 r
= kvm_send_userspace_msi(kvm
, &msi
);
3365 #ifdef __KVM_HAVE_IRQ_LINE
3366 case KVM_IRQ_LINE_STATUS
:
3367 case KVM_IRQ_LINE
: {
3368 struct kvm_irq_level irq_event
;
3371 if (copy_from_user(&irq_event
, argp
, sizeof(irq_event
)))
3374 r
= kvm_vm_ioctl_irq_line(kvm
, &irq_event
,
3375 ioctl
== KVM_IRQ_LINE_STATUS
);
3380 if (ioctl
== KVM_IRQ_LINE_STATUS
) {
3381 if (copy_to_user(argp
, &irq_event
, sizeof(irq_event
)))
3389 #ifdef CONFIG_HAVE_KVM_IRQ_ROUTING
3390 case KVM_SET_GSI_ROUTING
: {
3391 struct kvm_irq_routing routing
;
3392 struct kvm_irq_routing __user
*urouting
;
3393 struct kvm_irq_routing_entry
*entries
= NULL
;
3396 if (copy_from_user(&routing
, argp
, sizeof(routing
)))
3399 if (!kvm_arch_can_set_irq_routing(kvm
))
3401 if (routing
.nr
> KVM_MAX_IRQ_ROUTES
)
3407 entries
= vmalloc(array_size(sizeof(*entries
),
3413 if (copy_from_user(entries
, urouting
->entries
,
3414 routing
.nr
* sizeof(*entries
)))
3415 goto out_free_irq_routing
;
3417 r
= kvm_set_irq_routing(kvm
, entries
, routing
.nr
,
3419 out_free_irq_routing
:
3423 #endif /* CONFIG_HAVE_KVM_IRQ_ROUTING */
3424 case KVM_CREATE_DEVICE
: {
3425 struct kvm_create_device cd
;
3428 if (copy_from_user(&cd
, argp
, sizeof(cd
)))
3431 r
= kvm_ioctl_create_device(kvm
, &cd
);
3436 if (copy_to_user(argp
, &cd
, sizeof(cd
)))
3442 case KVM_CHECK_EXTENSION
:
3443 r
= kvm_vm_ioctl_check_extension_generic(kvm
, arg
);
3446 r
= kvm_arch_vm_ioctl(filp
, ioctl
, arg
);
3452 #ifdef CONFIG_KVM_COMPAT
3453 struct compat_kvm_dirty_log
{
3457 compat_uptr_t dirty_bitmap
; /* one bit per page */
3462 static long kvm_vm_compat_ioctl(struct file
*filp
,
3463 unsigned int ioctl
, unsigned long arg
)
3465 struct kvm
*kvm
= filp
->private_data
;
3468 if (kvm
->mm
!= current
->mm
)
3471 case KVM_GET_DIRTY_LOG
: {
3472 struct compat_kvm_dirty_log compat_log
;
3473 struct kvm_dirty_log log
;
3475 if (copy_from_user(&compat_log
, (void __user
*)arg
,
3476 sizeof(compat_log
)))
3478 log
.slot
= compat_log
.slot
;
3479 log
.padding1
= compat_log
.padding1
;
3480 log
.padding2
= compat_log
.padding2
;
3481 log
.dirty_bitmap
= compat_ptr(compat_log
.dirty_bitmap
);
3483 r
= kvm_vm_ioctl_get_dirty_log(kvm
, &log
);
3487 r
= kvm_vm_ioctl(filp
, ioctl
, arg
);
3493 static struct file_operations kvm_vm_fops
= {
3494 .release
= kvm_vm_release
,
3495 .unlocked_ioctl
= kvm_vm_ioctl
,
3496 .llseek
= noop_llseek
,
3497 KVM_COMPAT(kvm_vm_compat_ioctl
),
3500 static int kvm_dev_ioctl_create_vm(unsigned long type
)
3506 kvm
= kvm_create_vm(type
);
3508 return PTR_ERR(kvm
);
3509 #ifdef CONFIG_KVM_MMIO
3510 r
= kvm_coalesced_mmio_init(kvm
);
3514 r
= get_unused_fd_flags(O_CLOEXEC
);
3518 file
= anon_inode_getfile("kvm-vm", &kvm_vm_fops
, kvm
, O_RDWR
);
3526 * Don't call kvm_put_kvm anymore at this point; file->f_op is
3527 * already set, with ->release() being kvm_vm_release(). In error
3528 * cases it will be called by the final fput(file) and will take
3529 * care of doing kvm_put_kvm(kvm).
3531 if (kvm_create_vm_debugfs(kvm
, r
) < 0) {
3536 kvm_uevent_notify_change(KVM_EVENT_CREATE_VM
, kvm
);
3538 fd_install(r
, file
);
3546 static long kvm_dev_ioctl(struct file
*filp
,
3547 unsigned int ioctl
, unsigned long arg
)
3552 case KVM_GET_API_VERSION
:
3555 r
= KVM_API_VERSION
;
3558 r
= kvm_dev_ioctl_create_vm(arg
);
3560 case KVM_CHECK_EXTENSION
:
3561 r
= kvm_vm_ioctl_check_extension_generic(NULL
, arg
);
3563 case KVM_GET_VCPU_MMAP_SIZE
:
3566 r
= PAGE_SIZE
; /* struct kvm_run */
3568 r
+= PAGE_SIZE
; /* pio data page */
3570 #ifdef CONFIG_KVM_MMIO
3571 r
+= PAGE_SIZE
; /* coalesced mmio ring page */
3574 case KVM_TRACE_ENABLE
:
3575 case KVM_TRACE_PAUSE
:
3576 case KVM_TRACE_DISABLE
:
3580 return kvm_arch_dev_ioctl(filp
, ioctl
, arg
);
3586 static struct file_operations kvm_chardev_ops
= {
3587 .unlocked_ioctl
= kvm_dev_ioctl
,
3588 .llseek
= noop_llseek
,
3589 KVM_COMPAT(kvm_dev_ioctl
),
3592 static struct miscdevice kvm_dev
= {
3598 static void hardware_enable_nolock(void *junk
)
3600 int cpu
= raw_smp_processor_id();
3603 if (cpumask_test_cpu(cpu
, cpus_hardware_enabled
))
3606 cpumask_set_cpu(cpu
, cpus_hardware_enabled
);
3608 r
= kvm_arch_hardware_enable();
3611 cpumask_clear_cpu(cpu
, cpus_hardware_enabled
);
3612 atomic_inc(&hardware_enable_failed
);
3613 pr_info("kvm: enabling virtualization on CPU%d failed\n", cpu
);
3617 static int kvm_starting_cpu(unsigned int cpu
)
3619 raw_spin_lock(&kvm_count_lock
);
3620 if (kvm_usage_count
)
3621 hardware_enable_nolock(NULL
);
3622 raw_spin_unlock(&kvm_count_lock
);
3626 static void hardware_disable_nolock(void *junk
)
3628 int cpu
= raw_smp_processor_id();
3630 if (!cpumask_test_cpu(cpu
, cpus_hardware_enabled
))
3632 cpumask_clear_cpu(cpu
, cpus_hardware_enabled
);
3633 kvm_arch_hardware_disable();
3636 static int kvm_dying_cpu(unsigned int cpu
)
3638 raw_spin_lock(&kvm_count_lock
);
3639 if (kvm_usage_count
)
3640 hardware_disable_nolock(NULL
);
3641 raw_spin_unlock(&kvm_count_lock
);
3645 static void hardware_disable_all_nolock(void)
3647 BUG_ON(!kvm_usage_count
);
3650 if (!kvm_usage_count
)
3651 on_each_cpu(hardware_disable_nolock
, NULL
, 1);
3654 static void hardware_disable_all(void)
3656 raw_spin_lock(&kvm_count_lock
);
3657 hardware_disable_all_nolock();
3658 raw_spin_unlock(&kvm_count_lock
);
3661 static int hardware_enable_all(void)
3665 raw_spin_lock(&kvm_count_lock
);
3668 if (kvm_usage_count
== 1) {
3669 atomic_set(&hardware_enable_failed
, 0);
3670 on_each_cpu(hardware_enable_nolock
, NULL
, 1);
3672 if (atomic_read(&hardware_enable_failed
)) {
3673 hardware_disable_all_nolock();
3678 raw_spin_unlock(&kvm_count_lock
);
3683 static int kvm_reboot(struct notifier_block
*notifier
, unsigned long val
,
3687 * Some (well, at least mine) BIOSes hang on reboot if
3690 * And Intel TXT required VMX off for all cpu when system shutdown.
3692 pr_info("kvm: exiting hardware virtualization\n");
3693 kvm_rebooting
= true;
3694 on_each_cpu(hardware_disable_nolock
, NULL
, 1);
3698 static struct notifier_block kvm_reboot_notifier
= {
3699 .notifier_call
= kvm_reboot
,
3703 static void kvm_io_bus_destroy(struct kvm_io_bus
*bus
)
3707 for (i
= 0; i
< bus
->dev_count
; i
++) {
3708 struct kvm_io_device
*pos
= bus
->range
[i
].dev
;
3710 kvm_iodevice_destructor(pos
);
3715 static inline int kvm_io_bus_cmp(const struct kvm_io_range
*r1
,
3716 const struct kvm_io_range
*r2
)
3718 gpa_t addr1
= r1
->addr
;
3719 gpa_t addr2
= r2
->addr
;
3724 /* If r2->len == 0, match the exact address. If r2->len != 0,
3725 * accept any overlapping write. Any order is acceptable for
3726 * overlapping ranges, because kvm_io_bus_get_first_dev ensures
3727 * we process all of them.
3740 static int kvm_io_bus_sort_cmp(const void *p1
, const void *p2
)
3742 return kvm_io_bus_cmp(p1
, p2
);
3745 static int kvm_io_bus_get_first_dev(struct kvm_io_bus
*bus
,
3746 gpa_t addr
, int len
)
3748 struct kvm_io_range
*range
, key
;
3751 key
= (struct kvm_io_range
) {
3756 range
= bsearch(&key
, bus
->range
, bus
->dev_count
,
3757 sizeof(struct kvm_io_range
), kvm_io_bus_sort_cmp
);
3761 off
= range
- bus
->range
;
3763 while (off
> 0 && kvm_io_bus_cmp(&key
, &bus
->range
[off
-1]) == 0)
3769 static int __kvm_io_bus_write(struct kvm_vcpu
*vcpu
, struct kvm_io_bus
*bus
,
3770 struct kvm_io_range
*range
, const void *val
)
3774 idx
= kvm_io_bus_get_first_dev(bus
, range
->addr
, range
->len
);
3778 while (idx
< bus
->dev_count
&&
3779 kvm_io_bus_cmp(range
, &bus
->range
[idx
]) == 0) {
3780 if (!kvm_iodevice_write(vcpu
, bus
->range
[idx
].dev
, range
->addr
,
3789 /* kvm_io_bus_write - called under kvm->slots_lock */
3790 int kvm_io_bus_write(struct kvm_vcpu
*vcpu
, enum kvm_bus bus_idx
, gpa_t addr
,
3791 int len
, const void *val
)
3793 struct kvm_io_bus
*bus
;
3794 struct kvm_io_range range
;
3797 range
= (struct kvm_io_range
) {
3802 bus
= srcu_dereference(vcpu
->kvm
->buses
[bus_idx
], &vcpu
->kvm
->srcu
);
3805 r
= __kvm_io_bus_write(vcpu
, bus
, &range
, val
);
3806 return r
< 0 ? r
: 0;
3808 EXPORT_SYMBOL_GPL(kvm_io_bus_write
);
3810 /* kvm_io_bus_write_cookie - called under kvm->slots_lock */
3811 int kvm_io_bus_write_cookie(struct kvm_vcpu
*vcpu
, enum kvm_bus bus_idx
,
3812 gpa_t addr
, int len
, const void *val
, long cookie
)
3814 struct kvm_io_bus
*bus
;
3815 struct kvm_io_range range
;
3817 range
= (struct kvm_io_range
) {
3822 bus
= srcu_dereference(vcpu
->kvm
->buses
[bus_idx
], &vcpu
->kvm
->srcu
);
3826 /* First try the device referenced by cookie. */
3827 if ((cookie
>= 0) && (cookie
< bus
->dev_count
) &&
3828 (kvm_io_bus_cmp(&range
, &bus
->range
[cookie
]) == 0))
3829 if (!kvm_iodevice_write(vcpu
, bus
->range
[cookie
].dev
, addr
, len
,
3834 * cookie contained garbage; fall back to search and return the
3835 * correct cookie value.
3837 return __kvm_io_bus_write(vcpu
, bus
, &range
, val
);
3840 static int __kvm_io_bus_read(struct kvm_vcpu
*vcpu
, struct kvm_io_bus
*bus
,
3841 struct kvm_io_range
*range
, void *val
)
3845 idx
= kvm_io_bus_get_first_dev(bus
, range
->addr
, range
->len
);
3849 while (idx
< bus
->dev_count
&&
3850 kvm_io_bus_cmp(range
, &bus
->range
[idx
]) == 0) {
3851 if (!kvm_iodevice_read(vcpu
, bus
->range
[idx
].dev
, range
->addr
,
3860 /* kvm_io_bus_read - called under kvm->slots_lock */
3861 int kvm_io_bus_read(struct kvm_vcpu
*vcpu
, enum kvm_bus bus_idx
, gpa_t addr
,
3864 struct kvm_io_bus
*bus
;
3865 struct kvm_io_range range
;
3868 range
= (struct kvm_io_range
) {
3873 bus
= srcu_dereference(vcpu
->kvm
->buses
[bus_idx
], &vcpu
->kvm
->srcu
);
3876 r
= __kvm_io_bus_read(vcpu
, bus
, &range
, val
);
3877 return r
< 0 ? r
: 0;
3880 /* Caller must hold slots_lock. */
3881 int kvm_io_bus_register_dev(struct kvm
*kvm
, enum kvm_bus bus_idx
, gpa_t addr
,
3882 int len
, struct kvm_io_device
*dev
)
3885 struct kvm_io_bus
*new_bus
, *bus
;
3886 struct kvm_io_range range
;
3888 bus
= kvm_get_bus(kvm
, bus_idx
);
3892 /* exclude ioeventfd which is limited by maximum fd */
3893 if (bus
->dev_count
- bus
->ioeventfd_count
> NR_IOBUS_DEVS
- 1)
3896 new_bus
= kmalloc(struct_size(bus
, range
, bus
->dev_count
+ 1),
3897 GFP_KERNEL_ACCOUNT
);
3901 range
= (struct kvm_io_range
) {
3907 for (i
= 0; i
< bus
->dev_count
; i
++)
3908 if (kvm_io_bus_cmp(&bus
->range
[i
], &range
) > 0)
3911 memcpy(new_bus
, bus
, sizeof(*bus
) + i
* sizeof(struct kvm_io_range
));
3912 new_bus
->dev_count
++;
3913 new_bus
->range
[i
] = range
;
3914 memcpy(new_bus
->range
+ i
+ 1, bus
->range
+ i
,
3915 (bus
->dev_count
- i
) * sizeof(struct kvm_io_range
));
3916 rcu_assign_pointer(kvm
->buses
[bus_idx
], new_bus
);
3917 synchronize_srcu_expedited(&kvm
->srcu
);
3923 /* Caller must hold slots_lock. */
3924 void kvm_io_bus_unregister_dev(struct kvm
*kvm
, enum kvm_bus bus_idx
,
3925 struct kvm_io_device
*dev
)
3928 struct kvm_io_bus
*new_bus
, *bus
;
3930 bus
= kvm_get_bus(kvm
, bus_idx
);
3934 for (i
= 0; i
< bus
->dev_count
; i
++)
3935 if (bus
->range
[i
].dev
== dev
) {
3939 if (i
== bus
->dev_count
)
3942 new_bus
= kmalloc(struct_size(bus
, range
, bus
->dev_count
- 1),
3943 GFP_KERNEL_ACCOUNT
);
3945 pr_err("kvm: failed to shrink bus, removing it completely\n");
3949 memcpy(new_bus
, bus
, sizeof(*bus
) + i
* sizeof(struct kvm_io_range
));
3950 new_bus
->dev_count
--;
3951 memcpy(new_bus
->range
+ i
, bus
->range
+ i
+ 1,
3952 (new_bus
->dev_count
- i
) * sizeof(struct kvm_io_range
));
3955 rcu_assign_pointer(kvm
->buses
[bus_idx
], new_bus
);
3956 synchronize_srcu_expedited(&kvm
->srcu
);
3961 struct kvm_io_device
*kvm_io_bus_get_dev(struct kvm
*kvm
, enum kvm_bus bus_idx
,
3964 struct kvm_io_bus
*bus
;
3965 int dev_idx
, srcu_idx
;
3966 struct kvm_io_device
*iodev
= NULL
;
3968 srcu_idx
= srcu_read_lock(&kvm
->srcu
);
3970 bus
= srcu_dereference(kvm
->buses
[bus_idx
], &kvm
->srcu
);
3974 dev_idx
= kvm_io_bus_get_first_dev(bus
, addr
, 1);
3978 iodev
= bus
->range
[dev_idx
].dev
;
3981 srcu_read_unlock(&kvm
->srcu
, srcu_idx
);
3985 EXPORT_SYMBOL_GPL(kvm_io_bus_get_dev
);
3987 static int kvm_debugfs_open(struct inode
*inode
, struct file
*file
,
3988 int (*get
)(void *, u64
*), int (*set
)(void *, u64
),
3991 struct kvm_stat_data
*stat_data
= (struct kvm_stat_data
*)
3994 /* The debugfs files are a reference to the kvm struct which
3995 * is still valid when kvm_destroy_vm is called.
3996 * To avoid the race between open and the removal of the debugfs
3997 * directory we test against the users count.
3999 if (!refcount_inc_not_zero(&stat_data
->kvm
->users_count
))
4002 if (simple_attr_open(inode
, file
, get
,
4003 stat_data
->mode
& S_IWUGO
? set
: NULL
,
4005 kvm_put_kvm(stat_data
->kvm
);
4012 static int kvm_debugfs_release(struct inode
*inode
, struct file
*file
)
4014 struct kvm_stat_data
*stat_data
= (struct kvm_stat_data
*)
4017 simple_attr_release(inode
, file
);
4018 kvm_put_kvm(stat_data
->kvm
);
4023 static int vm_stat_get_per_vm(void *data
, u64
*val
)
4025 struct kvm_stat_data
*stat_data
= (struct kvm_stat_data
*)data
;
4027 *val
= *(ulong
*)((void *)stat_data
->kvm
+ stat_data
->offset
);
4032 static int vm_stat_clear_per_vm(void *data
, u64 val
)
4034 struct kvm_stat_data
*stat_data
= (struct kvm_stat_data
*)data
;
4039 *(ulong
*)((void *)stat_data
->kvm
+ stat_data
->offset
) = 0;
4044 static int vm_stat_get_per_vm_open(struct inode
*inode
, struct file
*file
)
4046 __simple_attr_check_format("%llu\n", 0ull);
4047 return kvm_debugfs_open(inode
, file
, vm_stat_get_per_vm
,
4048 vm_stat_clear_per_vm
, "%llu\n");
4051 static const struct file_operations vm_stat_get_per_vm_fops
= {
4052 .owner
= THIS_MODULE
,
4053 .open
= vm_stat_get_per_vm_open
,
4054 .release
= kvm_debugfs_release
,
4055 .read
= simple_attr_read
,
4056 .write
= simple_attr_write
,
4057 .llseek
= no_llseek
,
4060 static int vcpu_stat_get_per_vm(void *data
, u64
*val
)
4063 struct kvm_stat_data
*stat_data
= (struct kvm_stat_data
*)data
;
4064 struct kvm_vcpu
*vcpu
;
4068 kvm_for_each_vcpu(i
, vcpu
, stat_data
->kvm
)
4069 *val
+= *(u64
*)((void *)vcpu
+ stat_data
->offset
);
4074 static int vcpu_stat_clear_per_vm(void *data
, u64 val
)
4077 struct kvm_stat_data
*stat_data
= (struct kvm_stat_data
*)data
;
4078 struct kvm_vcpu
*vcpu
;
4083 kvm_for_each_vcpu(i
, vcpu
, stat_data
->kvm
)
4084 *(u64
*)((void *)vcpu
+ stat_data
->offset
) = 0;
4089 static int vcpu_stat_get_per_vm_open(struct inode
*inode
, struct file
*file
)
4091 __simple_attr_check_format("%llu\n", 0ull);
4092 return kvm_debugfs_open(inode
, file
, vcpu_stat_get_per_vm
,
4093 vcpu_stat_clear_per_vm
, "%llu\n");
4096 static const struct file_operations vcpu_stat_get_per_vm_fops
= {
4097 .owner
= THIS_MODULE
,
4098 .open
= vcpu_stat_get_per_vm_open
,
4099 .release
= kvm_debugfs_release
,
4100 .read
= simple_attr_read
,
4101 .write
= simple_attr_write
,
4102 .llseek
= no_llseek
,
4105 static const struct file_operations
*stat_fops_per_vm
[] = {
4106 [KVM_STAT_VCPU
] = &vcpu_stat_get_per_vm_fops
,
4107 [KVM_STAT_VM
] = &vm_stat_get_per_vm_fops
,
4110 static int vm_stat_get(void *_offset
, u64
*val
)
4112 unsigned offset
= (long)_offset
;
4114 struct kvm_stat_data stat_tmp
= {.offset
= offset
};
4118 mutex_lock(&kvm_lock
);
4119 list_for_each_entry(kvm
, &vm_list
, vm_list
) {
4121 vm_stat_get_per_vm((void *)&stat_tmp
, &tmp_val
);
4124 mutex_unlock(&kvm_lock
);
4128 static int vm_stat_clear(void *_offset
, u64 val
)
4130 unsigned offset
= (long)_offset
;
4132 struct kvm_stat_data stat_tmp
= {.offset
= offset
};
4137 mutex_lock(&kvm_lock
);
4138 list_for_each_entry(kvm
, &vm_list
, vm_list
) {
4140 vm_stat_clear_per_vm((void *)&stat_tmp
, 0);
4142 mutex_unlock(&kvm_lock
);
4147 DEFINE_SIMPLE_ATTRIBUTE(vm_stat_fops
, vm_stat_get
, vm_stat_clear
, "%llu\n");
4149 static int vcpu_stat_get(void *_offset
, u64
*val
)
4151 unsigned offset
= (long)_offset
;
4153 struct kvm_stat_data stat_tmp
= {.offset
= offset
};
4157 mutex_lock(&kvm_lock
);
4158 list_for_each_entry(kvm
, &vm_list
, vm_list
) {
4160 vcpu_stat_get_per_vm((void *)&stat_tmp
, &tmp_val
);
4163 mutex_unlock(&kvm_lock
);
4167 static int vcpu_stat_clear(void *_offset
, u64 val
)
4169 unsigned offset
= (long)_offset
;
4171 struct kvm_stat_data stat_tmp
= {.offset
= offset
};
4176 mutex_lock(&kvm_lock
);
4177 list_for_each_entry(kvm
, &vm_list
, vm_list
) {
4179 vcpu_stat_clear_per_vm((void *)&stat_tmp
, 0);
4181 mutex_unlock(&kvm_lock
);
4186 DEFINE_SIMPLE_ATTRIBUTE(vcpu_stat_fops
, vcpu_stat_get
, vcpu_stat_clear
,
4189 static const struct file_operations
*stat_fops
[] = {
4190 [KVM_STAT_VCPU
] = &vcpu_stat_fops
,
4191 [KVM_STAT_VM
] = &vm_stat_fops
,
4194 static void kvm_uevent_notify_change(unsigned int type
, struct kvm
*kvm
)
4196 struct kobj_uevent_env
*env
;
4197 unsigned long long created
, active
;
4199 if (!kvm_dev
.this_device
|| !kvm
)
4202 mutex_lock(&kvm_lock
);
4203 if (type
== KVM_EVENT_CREATE_VM
) {
4204 kvm_createvm_count
++;
4206 } else if (type
== KVM_EVENT_DESTROY_VM
) {
4209 created
= kvm_createvm_count
;
4210 active
= kvm_active_vms
;
4211 mutex_unlock(&kvm_lock
);
4213 env
= kzalloc(sizeof(*env
), GFP_KERNEL_ACCOUNT
);
4217 add_uevent_var(env
, "CREATED=%llu", created
);
4218 add_uevent_var(env
, "COUNT=%llu", active
);
4220 if (type
== KVM_EVENT_CREATE_VM
) {
4221 add_uevent_var(env
, "EVENT=create");
4222 kvm
->userspace_pid
= task_pid_nr(current
);
4223 } else if (type
== KVM_EVENT_DESTROY_VM
) {
4224 add_uevent_var(env
, "EVENT=destroy");
4226 add_uevent_var(env
, "PID=%d", kvm
->userspace_pid
);
4228 if (!IS_ERR_OR_NULL(kvm
->debugfs_dentry
)) {
4229 char *tmp
, *p
= kmalloc(PATH_MAX
, GFP_KERNEL_ACCOUNT
);
4232 tmp
= dentry_path_raw(kvm
->debugfs_dentry
, p
, PATH_MAX
);
4234 add_uevent_var(env
, "STATS_PATH=%s", tmp
);
4238 /* no need for checks, since we are adding at most only 5 keys */
4239 env
->envp
[env
->envp_idx
++] = NULL
;
4240 kobject_uevent_env(&kvm_dev
.this_device
->kobj
, KOBJ_CHANGE
, env
->envp
);
4244 static void kvm_init_debug(void)
4246 struct kvm_stats_debugfs_item
*p
;
4248 kvm_debugfs_dir
= debugfs_create_dir("kvm", NULL
);
4250 kvm_debugfs_num_entries
= 0;
4251 for (p
= debugfs_entries
; p
->name
; ++p
, kvm_debugfs_num_entries
++) {
4252 int mode
= p
->mode
? p
->mode
: 0644;
4253 debugfs_create_file(p
->name
, mode
, kvm_debugfs_dir
,
4254 (void *)(long)p
->offset
,
4255 stat_fops
[p
->kind
]);
4259 static int kvm_suspend(void)
4261 if (kvm_usage_count
)
4262 hardware_disable_nolock(NULL
);
4266 static void kvm_resume(void)
4268 if (kvm_usage_count
) {
4269 #ifdef CONFIG_LOCKDEP
4270 WARN_ON(lockdep_is_held(&kvm_count_lock
));
4272 hardware_enable_nolock(NULL
);
4276 static struct syscore_ops kvm_syscore_ops
= {
4277 .suspend
= kvm_suspend
,
4278 .resume
= kvm_resume
,
4282 struct kvm_vcpu
*preempt_notifier_to_vcpu(struct preempt_notifier
*pn
)
4284 return container_of(pn
, struct kvm_vcpu
, preempt_notifier
);
4287 static void kvm_sched_in(struct preempt_notifier
*pn
, int cpu
)
4289 struct kvm_vcpu
*vcpu
= preempt_notifier_to_vcpu(pn
);
4291 WRITE_ONCE(vcpu
->preempted
, false);
4292 WRITE_ONCE(vcpu
->ready
, false);
4294 kvm_arch_sched_in(vcpu
, cpu
);
4296 kvm_arch_vcpu_load(vcpu
, cpu
);
4299 static void kvm_sched_out(struct preempt_notifier
*pn
,
4300 struct task_struct
*next
)
4302 struct kvm_vcpu
*vcpu
= preempt_notifier_to_vcpu(pn
);
4304 if (current
->state
== TASK_RUNNING
) {
4305 WRITE_ONCE(vcpu
->preempted
, true);
4306 WRITE_ONCE(vcpu
->ready
, true);
4308 kvm_arch_vcpu_put(vcpu
);
4311 static void check_processor_compat(void *rtn
)
4313 *(int *)rtn
= kvm_arch_check_processor_compat();
4316 int kvm_init(void *opaque
, unsigned vcpu_size
, unsigned vcpu_align
,
4317 struct module
*module
)
4322 r
= kvm_arch_init(opaque
);
4327 * kvm_arch_init makes sure there's at most one caller
4328 * for architectures that support multiple implementations,
4329 * like intel and amd on x86.
4330 * kvm_arch_init must be called before kvm_irqfd_init to avoid creating
4331 * conflicts in case kvm is already setup for another implementation.
4333 r
= kvm_irqfd_init();
4337 if (!zalloc_cpumask_var(&cpus_hardware_enabled
, GFP_KERNEL
)) {
4342 r
= kvm_arch_hardware_setup();
4346 for_each_online_cpu(cpu
) {
4347 smp_call_function_single(cpu
, check_processor_compat
, &r
, 1);
4352 r
= cpuhp_setup_state_nocalls(CPUHP_AP_KVM_STARTING
, "kvm/cpu:starting",
4353 kvm_starting_cpu
, kvm_dying_cpu
);
4356 register_reboot_notifier(&kvm_reboot_notifier
);
4358 /* A kmem cache lets us meet the alignment requirements of fx_save. */
4360 vcpu_align
= __alignof__(struct kvm_vcpu
);
4362 kmem_cache_create_usercopy("kvm_vcpu", vcpu_size
, vcpu_align
,
4364 offsetof(struct kvm_vcpu
, arch
),
4365 sizeof_field(struct kvm_vcpu
, arch
),
4367 if (!kvm_vcpu_cache
) {
4372 r
= kvm_async_pf_init();
4376 kvm_chardev_ops
.owner
= module
;
4377 kvm_vm_fops
.owner
= module
;
4378 kvm_vcpu_fops
.owner
= module
;
4380 r
= misc_register(&kvm_dev
);
4382 pr_err("kvm: misc device register failed\n");
4386 register_syscore_ops(&kvm_syscore_ops
);
4388 kvm_preempt_ops
.sched_in
= kvm_sched_in
;
4389 kvm_preempt_ops
.sched_out
= kvm_sched_out
;
4393 r
= kvm_vfio_ops_init();
4399 kvm_async_pf_deinit();
4401 kmem_cache_destroy(kvm_vcpu_cache
);
4403 unregister_reboot_notifier(&kvm_reboot_notifier
);
4404 cpuhp_remove_state_nocalls(CPUHP_AP_KVM_STARTING
);
4407 kvm_arch_hardware_unsetup();
4409 free_cpumask_var(cpus_hardware_enabled
);
4417 EXPORT_SYMBOL_GPL(kvm_init
);
4421 debugfs_remove_recursive(kvm_debugfs_dir
);
4422 misc_deregister(&kvm_dev
);
4423 kmem_cache_destroy(kvm_vcpu_cache
);
4424 kvm_async_pf_deinit();
4425 unregister_syscore_ops(&kvm_syscore_ops
);
4426 unregister_reboot_notifier(&kvm_reboot_notifier
);
4427 cpuhp_remove_state_nocalls(CPUHP_AP_KVM_STARTING
);
4428 on_each_cpu(hardware_disable_nolock
, NULL
, 1);
4429 kvm_arch_hardware_unsetup();
4432 free_cpumask_var(cpus_hardware_enabled
);
4433 kvm_vfio_ops_exit();
4435 EXPORT_SYMBOL_GPL(kvm_exit
);
4437 struct kvm_vm_worker_thread_context
{
4439 struct task_struct
*parent
;
4440 struct completion init_done
;
4441 kvm_vm_thread_fn_t thread_fn
;
4446 static int kvm_vm_worker_thread(void *context
)
4449 * The init_context is allocated on the stack of the parent thread, so
4450 * we have to locally copy anything that is needed beyond initialization
4452 struct kvm_vm_worker_thread_context
*init_context
= context
;
4453 struct kvm
*kvm
= init_context
->kvm
;
4454 kvm_vm_thread_fn_t thread_fn
= init_context
->thread_fn
;
4455 uintptr_t data
= init_context
->data
;
4458 err
= kthread_park(current
);
4459 /* kthread_park(current) is never supposed to return an error */
4464 err
= cgroup_attach_task_all(init_context
->parent
, current
);
4466 kvm_err("%s: cgroup_attach_task_all failed with err %d\n",
4471 set_user_nice(current
, task_nice(init_context
->parent
));
4474 init_context
->err
= err
;
4475 complete(&init_context
->init_done
);
4476 init_context
= NULL
;
4481 /* Wait to be woken up by the spawner before proceeding. */
4484 if (!kthread_should_stop())
4485 err
= thread_fn(kvm
, data
);
4490 int kvm_vm_create_worker_thread(struct kvm
*kvm
, kvm_vm_thread_fn_t thread_fn
,
4491 uintptr_t data
, const char *name
,
4492 struct task_struct
**thread_ptr
)
4494 struct kvm_vm_worker_thread_context init_context
= {};
4495 struct task_struct
*thread
;
4498 init_context
.kvm
= kvm
;
4499 init_context
.parent
= current
;
4500 init_context
.thread_fn
= thread_fn
;
4501 init_context
.data
= data
;
4502 init_completion(&init_context
.init_done
);
4504 thread
= kthread_run(kvm_vm_worker_thread
, &init_context
,
4505 "%s-%d", name
, task_pid_nr(current
));
4507 return PTR_ERR(thread
);
4509 /* kthread_run is never supposed to return NULL */
4510 WARN_ON(thread
== NULL
);
4512 wait_for_completion(&init_context
.init_done
);
4514 if (!init_context
.err
)
4515 *thread_ptr
= thread
;
4517 return init_context
.err
;