]> git.proxmox.com Git - mirror_ubuntu-kernels.git/blame - virt/kvm/kvm_main.c
mmap locking API: use coccinelle to convert mmap_sem rwsem call sites
[mirror_ubuntu-kernels.git] / virt / kvm / kvm_main.c
CommitLineData
20c8ccb1 1// SPDX-License-Identifier: GPL-2.0-only
6aa8b732
AK
2/*
3 * Kernel-based Virtual Machine driver for Linux
4 *
5 * This module enables machines with Intel VT-x extensions to run virtual
6 * machines without emulation or binary translation.
7 *
8 * Copyright (C) 2006 Qumranet, Inc.
9611c187 9 * Copyright 2010 Red Hat, Inc. and/or its affiliates.
6aa8b732
AK
10 *
11 * Authors:
12 * Avi Kivity <avi@qumranet.com>
13 * Yaniv Kamay <yaniv@qumranet.com>
6aa8b732
AK
14 */
15
af669ac6 16#include <kvm/iodev.h>
6aa8b732 17
edf88417 18#include <linux/kvm_host.h>
6aa8b732
AK
19#include <linux/kvm.h>
20#include <linux/module.h>
21#include <linux/errno.h>
6aa8b732 22#include <linux/percpu.h>
6aa8b732
AK
23#include <linux/mm.h>
24#include <linux/miscdevice.h>
25#include <linux/vmalloc.h>
6aa8b732 26#include <linux/reboot.h>
6aa8b732
AK
27#include <linux/debugfs.h>
28#include <linux/highmem.h>
29#include <linux/file.h>
fb3600cc 30#include <linux/syscore_ops.h>
774c47f1 31#include <linux/cpu.h>
174cd4b1 32#include <linux/sched/signal.h>
6e84f315 33#include <linux/sched/mm.h>
03441a34 34#include <linux/sched/stat.h>
d9e368d6
AK
35#include <linux/cpumask.h>
36#include <linux/smp.h>
d6d28168 37#include <linux/anon_inodes.h>
04d2cc77 38#include <linux/profile.h>
7aa81cc0 39#include <linux/kvm_para.h>
6fc138d2 40#include <linux/pagemap.h>
8d4e1288 41#include <linux/mman.h>
35149e21 42#include <linux/swap.h>
e56d532f 43#include <linux/bitops.h>
547de29e 44#include <linux/spinlock.h>
6ff5894c 45#include <linux/compat.h>
bc6678a3 46#include <linux/srcu.h>
8f0b1ab6 47#include <linux/hugetlb.h>
5a0e3ad6 48#include <linux/slab.h>
743eeb0b
SL
49#include <linux/sort.h>
50#include <linux/bsearch.h>
c011d23b 51#include <linux/io.h>
2eb06c30 52#include <linux/lockdep.h>
c57c8046 53#include <linux/kthread.h>
6aa8b732 54
e495606d 55#include <asm/processor.h>
2ea75be3 56#include <asm/ioctl.h>
7c0f6ba6 57#include <linux/uaccess.h>
6aa8b732 58
5f94c174 59#include "coalesced_mmio.h"
af585b92 60#include "async_pf.h"
3c3c29fd 61#include "vfio.h"
5f94c174 62
229456fc
MT
63#define CREATE_TRACE_POINTS
64#include <trace/events/kvm.h>
65
536a6f88
JF
66/* Worst case buffer size needed for holding an integer. */
67#define ITOA_MAX_LEN 12
68
6aa8b732
AK
69MODULE_AUTHOR("Qumranet");
70MODULE_LICENSE("GPL");
71
920552b2 72/* Architectures should define their poll value according to the halt latency */
ec76d819 73unsigned int halt_poll_ns = KVM_HALT_POLL_NS_DEFAULT;
039c5d1b 74module_param(halt_poll_ns, uint, 0644);
ec76d819 75EXPORT_SYMBOL_GPL(halt_poll_ns);
f7819512 76
aca6ff29 77/* Default doubles per-vcpu halt_poll_ns. */
ec76d819 78unsigned int halt_poll_ns_grow = 2;
039c5d1b 79module_param(halt_poll_ns_grow, uint, 0644);
ec76d819 80EXPORT_SYMBOL_GPL(halt_poll_ns_grow);
aca6ff29 81
49113d36
NW
82/* The start value to grow halt_poll_ns from */
83unsigned int halt_poll_ns_grow_start = 10000; /* 10us */
84module_param(halt_poll_ns_grow_start, uint, 0644);
85EXPORT_SYMBOL_GPL(halt_poll_ns_grow_start);
86
aca6ff29 87/* Default resets per-vcpu halt_poll_ns . */
ec76d819 88unsigned int halt_poll_ns_shrink;
039c5d1b 89module_param(halt_poll_ns_shrink, uint, 0644);
ec76d819 90EXPORT_SYMBOL_GPL(halt_poll_ns_shrink);
aca6ff29 91
fa40a821
MT
92/*
93 * Ordering of locks:
94 *
b7d409de 95 * kvm->lock --> kvm->slots_lock --> kvm->irq_lock
fa40a821
MT
96 */
97
0d9ce162 98DEFINE_MUTEX(kvm_lock);
4a937f96 99static DEFINE_RAW_SPINLOCK(kvm_count_lock);
e9b11c17 100LIST_HEAD(vm_list);
133de902 101
7f59f492 102static cpumask_var_t cpus_hardware_enabled;
f4fee932 103static int kvm_usage_count;
10474ae8 104static atomic_t hardware_enable_failed;
1b6c0168 105
aaba298c 106static struct kmem_cache *kvm_vcpu_cache;
1165f5fe 107
15ad7146 108static __read_mostly struct preempt_ops kvm_preempt_ops;
7495e22b 109static DEFINE_PER_CPU(struct kvm_vcpu *, kvm_running_vcpu);
15ad7146 110
76f7c879 111struct dentry *kvm_debugfs_dir;
e23a808b 112EXPORT_SYMBOL_GPL(kvm_debugfs_dir);
6aa8b732 113
536a6f88 114static int kvm_debugfs_num_entries;
09cbcef6 115static const struct file_operations stat_fops_per_vm;
536a6f88 116
bccf2150
AK
117static long kvm_vcpu_ioctl(struct file *file, unsigned int ioctl,
118 unsigned long arg);
de8e5d74 119#ifdef CONFIG_KVM_COMPAT
1dda606c
AG
120static long kvm_vcpu_compat_ioctl(struct file *file, unsigned int ioctl,
121 unsigned long arg);
7ddfd3e0
MZ
122#define KVM_COMPAT(c) .compat_ioctl = (c)
123#else
9cb09e7c
MZ
124/*
125 * For architectures that don't implement a compat infrastructure,
126 * adopt a double line of defense:
127 * - Prevent a compat task from opening /dev/kvm
128 * - If the open has been done by a 64bit task, and the KVM fd
129 * passed to a compat task, let the ioctls fail.
130 */
7ddfd3e0
MZ
131static long kvm_no_compat_ioctl(struct file *file, unsigned int ioctl,
132 unsigned long arg) { return -EINVAL; }
b9876e6d
MZ
133
134static int kvm_no_compat_open(struct inode *inode, struct file *file)
135{
136 return is_compat_task() ? -ENODEV : 0;
137}
138#define KVM_COMPAT(c) .compat_ioctl = kvm_no_compat_ioctl, \
139 .open = kvm_no_compat_open
1dda606c 140#endif
10474ae8
AG
141static int hardware_enable_all(void);
142static void hardware_disable_all(void);
bccf2150 143
e93f8a0f 144static void kvm_io_bus_destroy(struct kvm_io_bus *bus);
7940876e 145
bc009e43 146static void mark_page_dirty_in_slot(struct kvm_memory_slot *memslot, gfn_t gfn);
e93f8a0f 147
52480137 148__visible bool kvm_rebooting;
b7c4145b 149EXPORT_SYMBOL_GPL(kvm_rebooting);
4ecac3fd 150
286de8f6
CI
151#define KVM_EVENT_CREATE_VM 0
152#define KVM_EVENT_DESTROY_VM 1
153static void kvm_uevent_notify_change(unsigned int type, struct kvm *kvm);
154static unsigned long long kvm_createvm_count;
155static unsigned long long kvm_active_vms;
156
93065ac7
MH
157__weak int kvm_arch_mmu_notifier_invalidate_range(struct kvm *kvm,
158 unsigned long start, unsigned long end, bool blockable)
b1394e74 159{
93065ac7 160 return 0;
b1394e74
RK
161}
162
a78986aa
SC
163bool kvm_is_zone_device_pfn(kvm_pfn_t pfn)
164{
165 /*
166 * The metadata used by is_zone_device_page() to determine whether or
167 * not a page is ZONE_DEVICE is guaranteed to be valid if and only if
168 * the device has been pinned, e.g. by get_user_pages(). WARN if the
169 * page_count() is zero to help detect bad usage of this helper.
170 */
171 if (!pfn_valid(pfn) || WARN_ON_ONCE(!page_count(pfn_to_page(pfn))))
172 return false;
173
174 return is_zone_device_page(pfn_to_page(pfn));
175}
176
ba049e93 177bool kvm_is_reserved_pfn(kvm_pfn_t pfn)
cbff90a7 178{
a78986aa
SC
179 /*
180 * ZONE_DEVICE pages currently set PG_reserved, but from a refcounting
181 * perspective they are "normal" pages, albeit with slightly different
182 * usage rules.
183 */
11feeb49 184 if (pfn_valid(pfn))
a78986aa 185 return PageReserved(pfn_to_page(pfn)) &&
7df003c8 186 !is_zero_pfn(pfn) &&
a78986aa 187 !kvm_is_zone_device_pfn(pfn);
cbff90a7
BAY
188
189 return true;
190}
191
005ba37c
SC
192bool kvm_is_transparent_hugepage(kvm_pfn_t pfn)
193{
194 struct page *page = pfn_to_page(pfn);
195
196 if (!PageTransCompoundMap(page))
197 return false;
198
199 return is_transparent_hugepage(compound_head(page));
200}
201
bccf2150
AK
202/*
203 * Switches to specified vcpu, until a matching vcpu_put()
204 */
ec7660cc 205void vcpu_load(struct kvm_vcpu *vcpu)
6aa8b732 206{
ec7660cc 207 int cpu = get_cpu();
7495e22b
PB
208
209 __this_cpu_write(kvm_running_vcpu, vcpu);
15ad7146 210 preempt_notifier_register(&vcpu->preempt_notifier);
313a3dc7 211 kvm_arch_vcpu_load(vcpu, cpu);
15ad7146 212 put_cpu();
6aa8b732 213}
2f1fe811 214EXPORT_SYMBOL_GPL(vcpu_load);
6aa8b732 215
313a3dc7 216void vcpu_put(struct kvm_vcpu *vcpu)
6aa8b732 217{
15ad7146 218 preempt_disable();
313a3dc7 219 kvm_arch_vcpu_put(vcpu);
15ad7146 220 preempt_notifier_unregister(&vcpu->preempt_notifier);
7495e22b 221 __this_cpu_write(kvm_running_vcpu, NULL);
15ad7146 222 preempt_enable();
6aa8b732 223}
2f1fe811 224EXPORT_SYMBOL_GPL(vcpu_put);
6aa8b732 225
7a97cec2
PB
226/* TODO: merge with kvm_arch_vcpu_should_kick */
227static bool kvm_request_needs_ipi(struct kvm_vcpu *vcpu, unsigned req)
228{
229 int mode = kvm_vcpu_exiting_guest_mode(vcpu);
230
231 /*
232 * We need to wait for the VCPU to reenable interrupts and get out of
233 * READING_SHADOW_PAGE_TABLES mode.
234 */
235 if (req & KVM_REQUEST_WAIT)
236 return mode != OUTSIDE_GUEST_MODE;
237
238 /*
239 * Need to kick a running VCPU, but otherwise there is nothing to do.
240 */
241 return mode == IN_GUEST_MODE;
242}
243
d9e368d6
AK
244static void ack_flush(void *_completed)
245{
d9e368d6
AK
246}
247
b49defe8
PB
248static inline bool kvm_kick_many_cpus(const struct cpumask *cpus, bool wait)
249{
250 if (unlikely(!cpus))
251 cpus = cpu_online_mask;
252
253 if (cpumask_empty(cpus))
254 return false;
255
256 smp_call_function_many(cpus, ack_flush, NULL, wait);
257 return true;
258}
259
7053df4e 260bool kvm_make_vcpus_request_mask(struct kvm *kvm, unsigned int req,
54163a34 261 struct kvm_vcpu *except,
7053df4e 262 unsigned long *vcpu_bitmap, cpumask_var_t tmp)
d9e368d6 263{
597a5f55 264 int i, cpu, me;
d9e368d6 265 struct kvm_vcpu *vcpu;
7053df4e 266 bool called;
6ef7a1bc 267
3cba4130 268 me = get_cpu();
7053df4e 269
988a2cae 270 kvm_for_each_vcpu(i, vcpu, kvm) {
54163a34
SS
271 if ((vcpu_bitmap && !test_bit(i, vcpu_bitmap)) ||
272 vcpu == except)
7053df4e
VK
273 continue;
274
3cba4130 275 kvm_make_request(req, vcpu);
d9e368d6 276 cpu = vcpu->cpu;
6b7e2d09 277
178f02ff
RK
278 if (!(req & KVM_REQUEST_NO_WAKEUP) && kvm_vcpu_wake_up(vcpu))
279 continue;
6c6e8360 280
7053df4e 281 if (tmp != NULL && cpu != -1 && cpu != me &&
7a97cec2 282 kvm_request_needs_ipi(vcpu, req))
7053df4e 283 __cpumask_set_cpu(cpu, tmp);
49846896 284 }
7053df4e
VK
285
286 called = kvm_kick_many_cpus(tmp, !!(req & KVM_REQUEST_WAIT));
3cba4130 287 put_cpu();
7053df4e
VK
288
289 return called;
290}
291
54163a34
SS
292bool kvm_make_all_cpus_request_except(struct kvm *kvm, unsigned int req,
293 struct kvm_vcpu *except)
7053df4e
VK
294{
295 cpumask_var_t cpus;
296 bool called;
7053df4e
VK
297
298 zalloc_cpumask_var(&cpus, GFP_ATOMIC);
299
54163a34 300 called = kvm_make_vcpus_request_mask(kvm, req, except, NULL, cpus);
7053df4e 301
6ef7a1bc 302 free_cpumask_var(cpus);
49846896 303 return called;
d9e368d6
AK
304}
305
54163a34
SS
306bool kvm_make_all_cpus_request(struct kvm *kvm, unsigned int req)
307{
308 return kvm_make_all_cpus_request_except(kvm, req, NULL);
309}
310
a6d51016 311#ifndef CONFIG_HAVE_KVM_ARCH_TLB_FLUSH_ALL
49846896 312void kvm_flush_remote_tlbs(struct kvm *kvm)
2e53d63a 313{
4ae3cb3a
LT
314 /*
315 * Read tlbs_dirty before setting KVM_REQ_TLB_FLUSH in
316 * kvm_make_all_cpus_request.
317 */
318 long dirty_count = smp_load_acquire(&kvm->tlbs_dirty);
319
320 /*
321 * We want to publish modifications to the page tables before reading
322 * mode. Pairs with a memory barrier in arch-specific code.
323 * - x86: smp_mb__after_srcu_read_unlock in vcpu_enter_guest
324 * and smp_mb in walk_shadow_page_lockless_begin/end.
325 * - powerpc: smp_mb in kvmppc_prepare_to_enter.
326 *
327 * There is already an smp_mb__after_atomic() before
328 * kvm_make_all_cpus_request() reads vcpu->mode. We reuse that
329 * barrier here.
330 */
b08660e5
TL
331 if (!kvm_arch_flush_remote_tlb(kvm)
332 || kvm_make_all_cpus_request(kvm, KVM_REQ_TLB_FLUSH))
49846896 333 ++kvm->stat.remote_tlb_flush;
a086f6a1 334 cmpxchg(&kvm->tlbs_dirty, dirty_count, 0);
2e53d63a 335}
2ba9f0d8 336EXPORT_SYMBOL_GPL(kvm_flush_remote_tlbs);
a6d51016 337#endif
2e53d63a 338
49846896
RR
339void kvm_reload_remote_mmus(struct kvm *kvm)
340{
445b8236 341 kvm_make_all_cpus_request(kvm, KVM_REQ_MMU_RELOAD);
49846896 342}
2e53d63a 343
8bd826d6 344static void kvm_vcpu_init(struct kvm_vcpu *vcpu, struct kvm *kvm, unsigned id)
fb3f0f51 345{
fb3f0f51
RR
346 mutex_init(&vcpu->mutex);
347 vcpu->cpu = -1;
fb3f0f51
RR
348 vcpu->kvm = kvm;
349 vcpu->vcpu_id = id;
34bb10b7 350 vcpu->pid = NULL;
da4ad88c 351 rcuwait_init(&vcpu->wait);
af585b92 352 kvm_async_pf_vcpu_init(vcpu);
fb3f0f51 353
bf9f6ac8
FW
354 vcpu->pre_pcpu = -1;
355 INIT_LIST_HEAD(&vcpu->blocked_vcpu_list);
356
4c088493
R
357 kvm_vcpu_set_in_spin_loop(vcpu, false);
358 kvm_vcpu_set_dy_eligible(vcpu, false);
3a08a8f9 359 vcpu->preempted = false;
d73eb57b 360 vcpu->ready = false;
d5c48deb 361 preempt_notifier_init(&vcpu->preempt_notifier, &kvm_preempt_ops);
fb3f0f51 362}
fb3f0f51 363
4543bdc0
SC
364void kvm_vcpu_destroy(struct kvm_vcpu *vcpu)
365{
366 kvm_arch_vcpu_destroy(vcpu);
e529ef66 367
9941d224
SC
368 /*
369 * No need for rcu_read_lock as VCPU_RUN is the only place that changes
370 * the vcpu->pid pointer, and at destruction time all file descriptors
371 * are already gone.
372 */
373 put_pid(rcu_dereference_protected(vcpu->pid, 1));
374
8bd826d6 375 free_page((unsigned long)vcpu->run);
e529ef66 376 kmem_cache_free(kvm_vcpu_cache, vcpu);
4543bdc0
SC
377}
378EXPORT_SYMBOL_GPL(kvm_vcpu_destroy);
379
e930bffe
AA
380#if defined(CONFIG_MMU_NOTIFIER) && defined(KVM_ARCH_WANT_MMU_NOTIFIER)
381static inline struct kvm *mmu_notifier_to_kvm(struct mmu_notifier *mn)
382{
383 return container_of(mn, struct kvm, mmu_notifier);
384}
385
3da0dd43
IE
386static void kvm_mmu_notifier_change_pte(struct mmu_notifier *mn,
387 struct mm_struct *mm,
388 unsigned long address,
389 pte_t pte)
390{
391 struct kvm *kvm = mmu_notifier_to_kvm(mn);
bc6678a3 392 int idx;
3da0dd43 393
bc6678a3 394 idx = srcu_read_lock(&kvm->srcu);
3da0dd43
IE
395 spin_lock(&kvm->mmu_lock);
396 kvm->mmu_notifier_seq++;
0cf853c5
LT
397
398 if (kvm_set_spte_hva(kvm, address, pte))
399 kvm_flush_remote_tlbs(kvm);
400
3da0dd43 401 spin_unlock(&kvm->mmu_lock);
bc6678a3 402 srcu_read_unlock(&kvm->srcu, idx);
3da0dd43
IE
403}
404
93065ac7 405static int kvm_mmu_notifier_invalidate_range_start(struct mmu_notifier *mn,
5d6527a7 406 const struct mmu_notifier_range *range)
e930bffe
AA
407{
408 struct kvm *kvm = mmu_notifier_to_kvm(mn);
bc6678a3 409 int need_tlb_flush = 0, idx;
93065ac7 410 int ret;
e930bffe 411
bc6678a3 412 idx = srcu_read_lock(&kvm->srcu);
e930bffe
AA
413 spin_lock(&kvm->mmu_lock);
414 /*
415 * The count increase must become visible at unlock time as no
416 * spte can be established without taking the mmu_lock and
417 * count is also read inside the mmu_lock critical section.
418 */
419 kvm->mmu_notifier_count++;
5d6527a7 420 need_tlb_flush = kvm_unmap_hva_range(kvm, range->start, range->end);
a4ee1ca4 421 need_tlb_flush |= kvm->tlbs_dirty;
e930bffe
AA
422 /* we've to flush the tlb before the pages can be freed */
423 if (need_tlb_flush)
424 kvm_flush_remote_tlbs(kvm);
565f3be2
TY
425
426 spin_unlock(&kvm->mmu_lock);
b1394e74 427
5d6527a7 428 ret = kvm_arch_mmu_notifier_invalidate_range(kvm, range->start,
dfcd6660
JG
429 range->end,
430 mmu_notifier_range_blockable(range));
b1394e74 431
565f3be2 432 srcu_read_unlock(&kvm->srcu, idx);
93065ac7
MH
433
434 return ret;
e930bffe
AA
435}
436
437static void kvm_mmu_notifier_invalidate_range_end(struct mmu_notifier *mn,
5d6527a7 438 const struct mmu_notifier_range *range)
e930bffe
AA
439{
440 struct kvm *kvm = mmu_notifier_to_kvm(mn);
441
442 spin_lock(&kvm->mmu_lock);
443 /*
444 * This sequence increase will notify the kvm page fault that
445 * the page that is going to be mapped in the spte could have
446 * been freed.
447 */
448 kvm->mmu_notifier_seq++;
a355aa54 449 smp_wmb();
e930bffe
AA
450 /*
451 * The above sequence increase must be visible before the
a355aa54
PM
452 * below count decrease, which is ensured by the smp_wmb above
453 * in conjunction with the smp_rmb in mmu_notifier_retry().
e930bffe
AA
454 */
455 kvm->mmu_notifier_count--;
456 spin_unlock(&kvm->mmu_lock);
457
458 BUG_ON(kvm->mmu_notifier_count < 0);
459}
460
461static int kvm_mmu_notifier_clear_flush_young(struct mmu_notifier *mn,
462 struct mm_struct *mm,
57128468
ALC
463 unsigned long start,
464 unsigned long end)
e930bffe
AA
465{
466 struct kvm *kvm = mmu_notifier_to_kvm(mn);
bc6678a3 467 int young, idx;
e930bffe 468
bc6678a3 469 idx = srcu_read_lock(&kvm->srcu);
e930bffe 470 spin_lock(&kvm->mmu_lock);
e930bffe 471
57128468 472 young = kvm_age_hva(kvm, start, end);
e930bffe
AA
473 if (young)
474 kvm_flush_remote_tlbs(kvm);
475
565f3be2
TY
476 spin_unlock(&kvm->mmu_lock);
477 srcu_read_unlock(&kvm->srcu, idx);
478
e930bffe
AA
479 return young;
480}
481
1d7715c6
VD
482static int kvm_mmu_notifier_clear_young(struct mmu_notifier *mn,
483 struct mm_struct *mm,
484 unsigned long start,
485 unsigned long end)
486{
487 struct kvm *kvm = mmu_notifier_to_kvm(mn);
488 int young, idx;
489
490 idx = srcu_read_lock(&kvm->srcu);
491 spin_lock(&kvm->mmu_lock);
492 /*
493 * Even though we do not flush TLB, this will still adversely
494 * affect performance on pre-Haswell Intel EPT, where there is
495 * no EPT Access Bit to clear so that we have to tear down EPT
496 * tables instead. If we find this unacceptable, we can always
497 * add a parameter to kvm_age_hva so that it effectively doesn't
498 * do anything on clear_young.
499 *
500 * Also note that currently we never issue secondary TLB flushes
501 * from clear_young, leaving this job up to the regular system
502 * cadence. If we find this inaccurate, we might come up with a
503 * more sophisticated heuristic later.
504 */
505 young = kvm_age_hva(kvm, start, end);
506 spin_unlock(&kvm->mmu_lock);
507 srcu_read_unlock(&kvm->srcu, idx);
508
509 return young;
510}
511
8ee53820
AA
512static int kvm_mmu_notifier_test_young(struct mmu_notifier *mn,
513 struct mm_struct *mm,
514 unsigned long address)
515{
516 struct kvm *kvm = mmu_notifier_to_kvm(mn);
517 int young, idx;
518
519 idx = srcu_read_lock(&kvm->srcu);
520 spin_lock(&kvm->mmu_lock);
521 young = kvm_test_age_hva(kvm, address);
522 spin_unlock(&kvm->mmu_lock);
523 srcu_read_unlock(&kvm->srcu, idx);
524
525 return young;
526}
527
85db06e5
MT
528static void kvm_mmu_notifier_release(struct mmu_notifier *mn,
529 struct mm_struct *mm)
530{
531 struct kvm *kvm = mmu_notifier_to_kvm(mn);
eda2beda
LJ
532 int idx;
533
534 idx = srcu_read_lock(&kvm->srcu);
2df72e9b 535 kvm_arch_flush_shadow_all(kvm);
eda2beda 536 srcu_read_unlock(&kvm->srcu, idx);
85db06e5
MT
537}
538
e930bffe 539static const struct mmu_notifier_ops kvm_mmu_notifier_ops = {
e930bffe
AA
540 .invalidate_range_start = kvm_mmu_notifier_invalidate_range_start,
541 .invalidate_range_end = kvm_mmu_notifier_invalidate_range_end,
542 .clear_flush_young = kvm_mmu_notifier_clear_flush_young,
1d7715c6 543 .clear_young = kvm_mmu_notifier_clear_young,
8ee53820 544 .test_young = kvm_mmu_notifier_test_young,
3da0dd43 545 .change_pte = kvm_mmu_notifier_change_pte,
85db06e5 546 .release = kvm_mmu_notifier_release,
e930bffe 547};
4c07b0a4
AK
548
549static int kvm_init_mmu_notifier(struct kvm *kvm)
550{
551 kvm->mmu_notifier.ops = &kvm_mmu_notifier_ops;
552 return mmu_notifier_register(&kvm->mmu_notifier, current->mm);
553}
554
555#else /* !(CONFIG_MMU_NOTIFIER && KVM_ARCH_WANT_MMU_NOTIFIER) */
556
557static int kvm_init_mmu_notifier(struct kvm *kvm)
558{
559 return 0;
560}
561
e930bffe
AA
562#endif /* CONFIG_MMU_NOTIFIER && KVM_ARCH_WANT_MMU_NOTIFIER */
563
a47d2b07 564static struct kvm_memslots *kvm_alloc_memslots(void)
bf3e05bc
XG
565{
566 int i;
a47d2b07 567 struct kvm_memslots *slots;
bf3e05bc 568
b12ce36a 569 slots = kvzalloc(sizeof(struct kvm_memslots), GFP_KERNEL_ACCOUNT);
a47d2b07
PB
570 if (!slots)
571 return NULL;
572
bf3e05bc 573 for (i = 0; i < KVM_MEM_SLOTS_NUM; i++)
36947254 574 slots->id_to_index[i] = -1;
a47d2b07
PB
575
576 return slots;
577}
578
579static void kvm_destroy_dirty_bitmap(struct kvm_memory_slot *memslot)
580{
581 if (!memslot->dirty_bitmap)
582 return;
583
584 kvfree(memslot->dirty_bitmap);
585 memslot->dirty_bitmap = NULL;
586}
587
e96c81ee 588static void kvm_free_memslot(struct kvm *kvm, struct kvm_memory_slot *slot)
a47d2b07 589{
e96c81ee 590 kvm_destroy_dirty_bitmap(slot);
a47d2b07 591
e96c81ee 592 kvm_arch_free_memslot(kvm, slot);
a47d2b07 593
e96c81ee
SC
594 slot->flags = 0;
595 slot->npages = 0;
a47d2b07
PB
596}
597
598static void kvm_free_memslots(struct kvm *kvm, struct kvm_memslots *slots)
599{
600 struct kvm_memory_slot *memslot;
601
602 if (!slots)
603 return;
604
605 kvm_for_each_memslot(memslot, slots)
e96c81ee 606 kvm_free_memslot(kvm, memslot);
a47d2b07
PB
607
608 kvfree(slots);
bf3e05bc
XG
609}
610
536a6f88
JF
611static void kvm_destroy_vm_debugfs(struct kvm *kvm)
612{
613 int i;
614
615 if (!kvm->debugfs_dentry)
616 return;
617
618 debugfs_remove_recursive(kvm->debugfs_dentry);
619
9d5a1dce
LC
620 if (kvm->debugfs_stat_data) {
621 for (i = 0; i < kvm_debugfs_num_entries; i++)
622 kfree(kvm->debugfs_stat_data[i]);
623 kfree(kvm->debugfs_stat_data);
624 }
536a6f88
JF
625}
626
627static int kvm_create_vm_debugfs(struct kvm *kvm, int fd)
628{
629 char dir_name[ITOA_MAX_LEN * 2];
630 struct kvm_stat_data *stat_data;
631 struct kvm_stats_debugfs_item *p;
632
633 if (!debugfs_initialized())
634 return 0;
635
636 snprintf(dir_name, sizeof(dir_name), "%d-%d", task_pid_nr(current), fd);
929f45e3 637 kvm->debugfs_dentry = debugfs_create_dir(dir_name, kvm_debugfs_dir);
536a6f88
JF
638
639 kvm->debugfs_stat_data = kcalloc(kvm_debugfs_num_entries,
640 sizeof(*kvm->debugfs_stat_data),
b12ce36a 641 GFP_KERNEL_ACCOUNT);
536a6f88
JF
642 if (!kvm->debugfs_stat_data)
643 return -ENOMEM;
644
645 for (p = debugfs_entries; p->name; p++) {
b12ce36a 646 stat_data = kzalloc(sizeof(*stat_data), GFP_KERNEL_ACCOUNT);
536a6f88
JF
647 if (!stat_data)
648 return -ENOMEM;
649
650 stat_data->kvm = kvm;
09cbcef6 651 stat_data->dbgfs_item = p;
536a6f88 652 kvm->debugfs_stat_data[p - debugfs_entries] = stat_data;
09cbcef6
MP
653 debugfs_create_file(p->name, KVM_DBGFS_GET_MODE(p),
654 kvm->debugfs_dentry, stat_data,
655 &stat_fops_per_vm);
536a6f88
JF
656 }
657 return 0;
658}
659
1aa9b957
JS
660/*
661 * Called after the VM is otherwise initialized, but just before adding it to
662 * the vm_list.
663 */
664int __weak kvm_arch_post_init_vm(struct kvm *kvm)
665{
666 return 0;
667}
668
669/*
670 * Called just after removing the VM from the vm_list, but before doing any
671 * other destruction.
672 */
673void __weak kvm_arch_pre_destroy_vm(struct kvm *kvm)
674{
675}
676
e08b9637 677static struct kvm *kvm_create_vm(unsigned long type)
6aa8b732 678{
d89f5eff 679 struct kvm *kvm = kvm_arch_alloc_vm();
9121923c
JM
680 int r = -ENOMEM;
681 int i;
6aa8b732 682
d89f5eff
JK
683 if (!kvm)
684 return ERR_PTR(-ENOMEM);
685
e9ad4ec8 686 spin_lock_init(&kvm->mmu_lock);
f1f10076 687 mmgrab(current->mm);
e9ad4ec8
PB
688 kvm->mm = current->mm;
689 kvm_eventfd_init(kvm);
690 mutex_init(&kvm->lock);
691 mutex_init(&kvm->irq_lock);
692 mutex_init(&kvm->slots_lock);
e9ad4ec8
PB
693 INIT_LIST_HEAD(&kvm->devices);
694
1e702d9a
AW
695 BUILD_BUG_ON(KVM_MEM_SLOTS_NUM > SHRT_MAX);
696
8a44119a
PB
697 if (init_srcu_struct(&kvm->srcu))
698 goto out_err_no_srcu;
699 if (init_srcu_struct(&kvm->irq_srcu))
700 goto out_err_no_irq_srcu;
701
e2d3fcaf 702 refcount_set(&kvm->users_count, 1);
f481b069 703 for (i = 0; i < KVM_ADDRESS_SPACE_NUM; i++) {
4bd518f1 704 struct kvm_memslots *slots = kvm_alloc_memslots();
9121923c 705
4bd518f1 706 if (!slots)
a97b0e77 707 goto out_err_no_arch_destroy_vm;
0e32958e 708 /* Generations must be different for each address space. */
164bf7e5 709 slots->generation = i;
4bd518f1 710 rcu_assign_pointer(kvm->memslots[i], slots);
f481b069 711 }
00f034a1 712
e93f8a0f 713 for (i = 0; i < KVM_NR_BUSES; i++) {
4a12f951 714 rcu_assign_pointer(kvm->buses[i],
b12ce36a 715 kzalloc(sizeof(struct kvm_io_bus), GFP_KERNEL_ACCOUNT));
57e7fbee 716 if (!kvm->buses[i])
a97b0e77 717 goto out_err_no_arch_destroy_vm;
e93f8a0f 718 }
e930bffe 719
acd05785
DM
720 kvm->max_halt_poll_ns = halt_poll_ns;
721
e08b9637 722 r = kvm_arch_init_vm(kvm, type);
d89f5eff 723 if (r)
a97b0e77 724 goto out_err_no_arch_destroy_vm;
10474ae8
AG
725
726 r = hardware_enable_all();
727 if (r)
719d93cd 728 goto out_err_no_disable;
10474ae8 729
c77dcacb 730#ifdef CONFIG_HAVE_KVM_IRQFD
136bdfee 731 INIT_HLIST_HEAD(&kvm->irq_ack_notifier_list);
75858a84 732#endif
6aa8b732 733
74b5c5bf 734 r = kvm_init_mmu_notifier(kvm);
1aa9b957
JS
735 if (r)
736 goto out_err_no_mmu_notifier;
737
738 r = kvm_arch_post_init_vm(kvm);
74b5c5bf
MW
739 if (r)
740 goto out_err;
741
0d9ce162 742 mutex_lock(&kvm_lock);
5e58cfe4 743 list_add(&kvm->vm_list, &vm_list);
0d9ce162 744 mutex_unlock(&kvm_lock);
d89f5eff 745
2ecd9d29
PZ
746 preempt_notifier_inc();
747
f17abe9a 748 return kvm;
10474ae8
AG
749
750out_err:
1aa9b957
JS
751#if defined(CONFIG_MMU_NOTIFIER) && defined(KVM_ARCH_WANT_MMU_NOTIFIER)
752 if (kvm->mmu_notifier.ops)
753 mmu_notifier_unregister(&kvm->mmu_notifier, current->mm);
754#endif
755out_err_no_mmu_notifier:
10474ae8 756 hardware_disable_all();
719d93cd 757out_err_no_disable:
a97b0e77 758 kvm_arch_destroy_vm(kvm);
a97b0e77 759out_err_no_arch_destroy_vm:
e2d3fcaf 760 WARN_ON_ONCE(!refcount_dec_and_test(&kvm->users_count));
e93f8a0f 761 for (i = 0; i < KVM_NR_BUSES; i++)
3898da94 762 kfree(kvm_get_bus(kvm, i));
f481b069 763 for (i = 0; i < KVM_ADDRESS_SPACE_NUM; i++)
3898da94 764 kvm_free_memslots(kvm, __kvm_memslots(kvm, i));
8a44119a
PB
765 cleanup_srcu_struct(&kvm->irq_srcu);
766out_err_no_irq_srcu:
767 cleanup_srcu_struct(&kvm->srcu);
768out_err_no_srcu:
d89f5eff 769 kvm_arch_free_vm(kvm);
e9ad4ec8 770 mmdrop(current->mm);
10474ae8 771 return ERR_PTR(r);
f17abe9a
AK
772}
773
07f0a7bd
SW
774static void kvm_destroy_devices(struct kvm *kvm)
775{
e6e3b5a6 776 struct kvm_device *dev, *tmp;
07f0a7bd 777
a28ebea2
CD
778 /*
779 * We do not need to take the kvm->lock here, because nobody else
780 * has a reference to the struct kvm at this point and therefore
781 * cannot access the devices list anyhow.
782 */
e6e3b5a6
GT
783 list_for_each_entry_safe(dev, tmp, &kvm->devices, vm_node) {
784 list_del(&dev->vm_node);
07f0a7bd
SW
785 dev->ops->destroy(dev);
786 }
787}
788
f17abe9a
AK
789static void kvm_destroy_vm(struct kvm *kvm)
790{
e93f8a0f 791 int i;
6d4e4c4f
AK
792 struct mm_struct *mm = kvm->mm;
793
286de8f6 794 kvm_uevent_notify_change(KVM_EVENT_DESTROY_VM, kvm);
536a6f88 795 kvm_destroy_vm_debugfs(kvm);
ad8ba2cd 796 kvm_arch_sync_events(kvm);
0d9ce162 797 mutex_lock(&kvm_lock);
133de902 798 list_del(&kvm->vm_list);
0d9ce162 799 mutex_unlock(&kvm_lock);
1aa9b957
JS
800 kvm_arch_pre_destroy_vm(kvm);
801
399ec807 802 kvm_free_irq_routing(kvm);
df630b8c 803 for (i = 0; i < KVM_NR_BUSES; i++) {
3898da94 804 struct kvm_io_bus *bus = kvm_get_bus(kvm, i);
4a12f951 805
4a12f951
CB
806 if (bus)
807 kvm_io_bus_destroy(bus);
df630b8c
PX
808 kvm->buses[i] = NULL;
809 }
980da6ce 810 kvm_coalesced_mmio_free(kvm);
e930bffe
AA
811#if defined(CONFIG_MMU_NOTIFIER) && defined(KVM_ARCH_WANT_MMU_NOTIFIER)
812 mmu_notifier_unregister(&kvm->mmu_notifier, kvm->mm);
f00be0ca 813#else
2df72e9b 814 kvm_arch_flush_shadow_all(kvm);
5f94c174 815#endif
d19a9cd2 816 kvm_arch_destroy_vm(kvm);
07f0a7bd 817 kvm_destroy_devices(kvm);
f481b069 818 for (i = 0; i < KVM_ADDRESS_SPACE_NUM; i++)
3898da94 819 kvm_free_memslots(kvm, __kvm_memslots(kvm, i));
820b3fcd 820 cleanup_srcu_struct(&kvm->irq_srcu);
d89f5eff
JK
821 cleanup_srcu_struct(&kvm->srcu);
822 kvm_arch_free_vm(kvm);
2ecd9d29 823 preempt_notifier_dec();
10474ae8 824 hardware_disable_all();
6d4e4c4f 825 mmdrop(mm);
f17abe9a
AK
826}
827
d39f13b0
IE
828void kvm_get_kvm(struct kvm *kvm)
829{
e3736c3e 830 refcount_inc(&kvm->users_count);
d39f13b0
IE
831}
832EXPORT_SYMBOL_GPL(kvm_get_kvm);
833
834void kvm_put_kvm(struct kvm *kvm)
835{
e3736c3e 836 if (refcount_dec_and_test(&kvm->users_count))
d39f13b0
IE
837 kvm_destroy_vm(kvm);
838}
839EXPORT_SYMBOL_GPL(kvm_put_kvm);
840
149487bd
SC
841/*
842 * Used to put a reference that was taken on behalf of an object associated
843 * with a user-visible file descriptor, e.g. a vcpu or device, if installation
844 * of the new file descriptor fails and the reference cannot be transferred to
845 * its final owner. In such cases, the caller is still actively using @kvm and
846 * will fail miserably if the refcount unexpectedly hits zero.
847 */
848void kvm_put_kvm_no_destroy(struct kvm *kvm)
849{
850 WARN_ON(refcount_dec_and_test(&kvm->users_count));
851}
852EXPORT_SYMBOL_GPL(kvm_put_kvm_no_destroy);
d39f13b0 853
f17abe9a
AK
854static int kvm_vm_release(struct inode *inode, struct file *filp)
855{
856 struct kvm *kvm = filp->private_data;
857
721eecbf
GH
858 kvm_irqfd_release(kvm);
859
d39f13b0 860 kvm_put_kvm(kvm);
6aa8b732
AK
861 return 0;
862}
863
515a0127
TY
864/*
865 * Allocation size is twice as large as the actual dirty bitmap size.
0dff0846 866 * See kvm_vm_ioctl_get_dirty_log() why this is needed.
515a0127 867 */
3c9bd400 868static int kvm_alloc_dirty_bitmap(struct kvm_memory_slot *memslot)
a36a57b1 869{
515a0127 870 unsigned long dirty_bytes = 2 * kvm_dirty_bitmap_bytes(memslot);
a36a57b1 871
b12ce36a 872 memslot->dirty_bitmap = kvzalloc(dirty_bytes, GFP_KERNEL_ACCOUNT);
a36a57b1
TY
873 if (!memslot->dirty_bitmap)
874 return -ENOMEM;
875
a36a57b1
TY
876 return 0;
877}
878
bf3e05bc 879/*
0577d1ab
SC
880 * Delete a memslot by decrementing the number of used slots and shifting all
881 * other entries in the array forward one spot.
bf3e05bc 882 */
0577d1ab
SC
883static inline void kvm_memslot_delete(struct kvm_memslots *slots,
884 struct kvm_memory_slot *memslot)
bf3e05bc 885{
063584d4 886 struct kvm_memory_slot *mslots = slots->memslots;
0577d1ab 887 int i;
f85e2cb5 888
0577d1ab
SC
889 if (WARN_ON(slots->id_to_index[memslot->id] == -1))
890 return;
0e60b079 891
0577d1ab
SC
892 slots->used_slots--;
893
0774a964
SC
894 if (atomic_read(&slots->lru_slot) >= slots->used_slots)
895 atomic_set(&slots->lru_slot, 0);
896
0577d1ab 897 for (i = slots->id_to_index[memslot->id]; i < slots->used_slots; i++) {
7f379cff
IM
898 mslots[i] = mslots[i + 1];
899 slots->id_to_index[mslots[i].id] = i;
7f379cff 900 }
0577d1ab
SC
901 mslots[i] = *memslot;
902 slots->id_to_index[memslot->id] = -1;
903}
904
905/*
906 * "Insert" a new memslot by incrementing the number of used slots. Returns
907 * the new slot's initial index into the memslots array.
908 */
909static inline int kvm_memslot_insert_back(struct kvm_memslots *slots)
910{
911 return slots->used_slots++;
912}
913
914/*
915 * Move a changed memslot backwards in the array by shifting existing slots
916 * with a higher GFN toward the front of the array. Note, the changed memslot
917 * itself is not preserved in the array, i.e. not swapped at this time, only
918 * its new index into the array is tracked. Returns the changed memslot's
919 * current index into the memslots array.
920 */
921static inline int kvm_memslot_move_backward(struct kvm_memslots *slots,
922 struct kvm_memory_slot *memslot)
923{
924 struct kvm_memory_slot *mslots = slots->memslots;
925 int i;
926
927 if (WARN_ON_ONCE(slots->id_to_index[memslot->id] == -1) ||
928 WARN_ON_ONCE(!slots->used_slots))
929 return -1;
efbeec70
PB
930
931 /*
0577d1ab
SC
932 * Move the target memslot backward in the array by shifting existing
933 * memslots with a higher GFN (than the target memslot) towards the
934 * front of the array.
efbeec70 935 */
0577d1ab
SC
936 for (i = slots->id_to_index[memslot->id]; i < slots->used_slots - 1; i++) {
937 if (memslot->base_gfn > mslots[i + 1].base_gfn)
938 break;
939
940 WARN_ON_ONCE(memslot->base_gfn == mslots[i + 1].base_gfn);
f85e2cb5 941
0577d1ab
SC
942 /* Shift the next memslot forward one and update its index. */
943 mslots[i] = mslots[i + 1];
944 slots->id_to_index[mslots[i].id] = i;
945 }
946 return i;
947}
948
949/*
950 * Move a changed memslot forwards in the array by shifting existing slots with
951 * a lower GFN toward the back of the array. Note, the changed memslot itself
952 * is not preserved in the array, i.e. not swapped at this time, only its new
953 * index into the array is tracked. Returns the changed memslot's final index
954 * into the memslots array.
955 */
956static inline int kvm_memslot_move_forward(struct kvm_memslots *slots,
957 struct kvm_memory_slot *memslot,
958 int start)
959{
960 struct kvm_memory_slot *mslots = slots->memslots;
961 int i;
962
963 for (i = start; i > 0; i--) {
964 if (memslot->base_gfn < mslots[i - 1].base_gfn)
965 break;
966
967 WARN_ON_ONCE(memslot->base_gfn == mslots[i - 1].base_gfn);
968
969 /* Shift the next memslot back one and update its index. */
970 mslots[i] = mslots[i - 1];
971 slots->id_to_index[mslots[i].id] = i;
972 }
973 return i;
974}
975
976/*
977 * Re-sort memslots based on their GFN to account for an added, deleted, or
978 * moved memslot. Sorting memslots by GFN allows using a binary search during
979 * memslot lookup.
980 *
981 * IMPORTANT: Slots are sorted from highest GFN to lowest GFN! I.e. the entry
982 * at memslots[0] has the highest GFN.
983 *
984 * The sorting algorithm takes advantage of having initially sorted memslots
985 * and knowing the position of the changed memslot. Sorting is also optimized
986 * by not swapping the updated memslot and instead only shifting other memslots
987 * and tracking the new index for the update memslot. Only once its final
988 * index is known is the updated memslot copied into its position in the array.
989 *
990 * - When deleting a memslot, the deleted memslot simply needs to be moved to
991 * the end of the array.
992 *
993 * - When creating a memslot, the algorithm "inserts" the new memslot at the
994 * end of the array and then it forward to its correct location.
995 *
996 * - When moving a memslot, the algorithm first moves the updated memslot
997 * backward to handle the scenario where the memslot's GFN was changed to a
998 * lower value. update_memslots() then falls through and runs the same flow
999 * as creating a memslot to move the memslot forward to handle the scenario
1000 * where its GFN was changed to a higher value.
1001 *
1002 * Note, slots are sorted from highest->lowest instead of lowest->highest for
1003 * historical reasons. Originally, invalid memslots where denoted by having
1004 * GFN=0, thus sorting from highest->lowest naturally sorted invalid memslots
1005 * to the end of the array. The current algorithm uses dedicated logic to
1006 * delete a memslot and thus does not rely on invalid memslots having GFN=0.
1007 *
1008 * The other historical motiviation for highest->lowest was to improve the
1009 * performance of memslot lookup. KVM originally used a linear search starting
1010 * at memslots[0]. On x86, the largest memslot usually has one of the highest,
1011 * if not *the* highest, GFN, as the bulk of the guest's RAM is located in a
1012 * single memslot above the 4gb boundary. As the largest memslot is also the
1013 * most likely to be referenced, sorting it to the front of the array was
1014 * advantageous. The current binary search starts from the middle of the array
1015 * and uses an LRU pointer to improve performance for all memslots and GFNs.
1016 */
1017static void update_memslots(struct kvm_memslots *slots,
1018 struct kvm_memory_slot *memslot,
1019 enum kvm_mr_change change)
1020{
1021 int i;
1022
1023 if (change == KVM_MR_DELETE) {
1024 kvm_memslot_delete(slots, memslot);
1025 } else {
1026 if (change == KVM_MR_CREATE)
1027 i = kvm_memslot_insert_back(slots);
1028 else
1029 i = kvm_memslot_move_backward(slots, memslot);
1030 i = kvm_memslot_move_forward(slots, memslot, i);
1031
1032 /*
1033 * Copy the memslot to its new position in memslots and update
1034 * its index accordingly.
1035 */
1036 slots->memslots[i] = *memslot;
1037 slots->id_to_index[memslot->id] = i;
1038 }
bf3e05bc
XG
1039}
1040
09170a49 1041static int check_memory_region_flags(const struct kvm_userspace_memory_region *mem)
a50d64d6 1042{
4d8b81ab
XG
1043 u32 valid_flags = KVM_MEM_LOG_DIRTY_PAGES;
1044
0f8a4de3 1045#ifdef __KVM_HAVE_READONLY_MEM
4d8b81ab
XG
1046 valid_flags |= KVM_MEM_READONLY;
1047#endif
1048
1049 if (mem->flags & ~valid_flags)
a50d64d6
XG
1050 return -EINVAL;
1051
1052 return 0;
1053}
1054
7ec4fb44 1055static struct kvm_memslots *install_new_memslots(struct kvm *kvm,
f481b069 1056 int as_id, struct kvm_memslots *slots)
7ec4fb44 1057{
f481b069 1058 struct kvm_memslots *old_memslots = __kvm_memslots(kvm, as_id);
361209e0 1059 u64 gen = old_memslots->generation;
7ec4fb44 1060
361209e0
SC
1061 WARN_ON(gen & KVM_MEMSLOT_GEN_UPDATE_IN_PROGRESS);
1062 slots->generation = gen | KVM_MEMSLOT_GEN_UPDATE_IN_PROGRESS;
ee3d1570 1063
f481b069 1064 rcu_assign_pointer(kvm->memslots[as_id], slots);
7ec4fb44 1065 synchronize_srcu_expedited(&kvm->srcu);
e59dbe09 1066
ee3d1570 1067 /*
361209e0 1068 * Increment the new memslot generation a second time, dropping the
00116795 1069 * update in-progress flag and incrementing the generation based on
361209e0
SC
1070 * the number of address spaces. This provides a unique and easily
1071 * identifiable generation number while the memslots are in flux.
1072 */
1073 gen = slots->generation & ~KVM_MEMSLOT_GEN_UPDATE_IN_PROGRESS;
1074
1075 /*
4bd518f1
PB
1076 * Generations must be unique even across address spaces. We do not need
1077 * a global counter for that, instead the generation space is evenly split
1078 * across address spaces. For example, with two address spaces, address
164bf7e5
SC
1079 * space 0 will use generations 0, 2, 4, ... while address space 1 will
1080 * use generations 1, 3, 5, ...
ee3d1570 1081 */
164bf7e5 1082 gen += KVM_ADDRESS_SPACE_NUM;
ee3d1570 1083
15248258 1084 kvm_arch_memslots_updated(kvm, gen);
ee3d1570 1085
15248258 1086 slots->generation = gen;
e59dbe09
TY
1087
1088 return old_memslots;
7ec4fb44
GN
1089}
1090
36947254
SC
1091/*
1092 * Note, at a minimum, the current number of used slots must be allocated, even
1093 * when deleting a memslot, as we need a complete duplicate of the memslots for
1094 * use when invalidating a memslot prior to deleting/moving the memslot.
1095 */
1096static struct kvm_memslots *kvm_dup_memslots(struct kvm_memslots *old,
1097 enum kvm_mr_change change)
1098{
1099 struct kvm_memslots *slots;
1100 size_t old_size, new_size;
1101
1102 old_size = sizeof(struct kvm_memslots) +
1103 (sizeof(struct kvm_memory_slot) * old->used_slots);
1104
1105 if (change == KVM_MR_CREATE)
1106 new_size = old_size + sizeof(struct kvm_memory_slot);
1107 else
1108 new_size = old_size;
1109
1110 slots = kvzalloc(new_size, GFP_KERNEL_ACCOUNT);
1111 if (likely(slots))
1112 memcpy(slots, old, old_size);
1113
1114 return slots;
1115}
1116
cf47f50b
SC
1117static int kvm_set_memslot(struct kvm *kvm,
1118 const struct kvm_userspace_memory_region *mem,
9d4c197c 1119 struct kvm_memory_slot *old,
cf47f50b
SC
1120 struct kvm_memory_slot *new, int as_id,
1121 enum kvm_mr_change change)
1122{
1123 struct kvm_memory_slot *slot;
1124 struct kvm_memslots *slots;
1125 int r;
1126
36947254 1127 slots = kvm_dup_memslots(__kvm_memslots(kvm, as_id), change);
cf47f50b
SC
1128 if (!slots)
1129 return -ENOMEM;
cf47f50b
SC
1130
1131 if (change == KVM_MR_DELETE || change == KVM_MR_MOVE) {
1132 /*
1133 * Note, the INVALID flag needs to be in the appropriate entry
1134 * in the freshly allocated memslots, not in @old or @new.
1135 */
1136 slot = id_to_memslot(slots, old->id);
1137 slot->flags |= KVM_MEMSLOT_INVALID;
1138
1139 /*
1140 * We can re-use the old memslots, the only difference from the
1141 * newly installed memslots is the invalid flag, which will get
1142 * dropped by update_memslots anyway. We'll also revert to the
1143 * old memslots if preparing the new memory region fails.
1144 */
1145 slots = install_new_memslots(kvm, as_id, slots);
1146
1147 /* From this point no new shadow pages pointing to a deleted,
1148 * or moved, memslot will be created.
1149 *
1150 * validation of sp->gfn happens in:
1151 * - gfn_to_hva (kvm_read_guest, gfn_to_pfn)
1152 * - kvm_is_visible_gfn (mmu_check_root)
1153 */
1154 kvm_arch_flush_shadow_memslot(kvm, slot);
1155 }
1156
1157 r = kvm_arch_prepare_memory_region(kvm, new, mem, change);
1158 if (r)
1159 goto out_slots;
1160
1161 update_memslots(slots, new, change);
1162 slots = install_new_memslots(kvm, as_id, slots);
1163
1164 kvm_arch_commit_memory_region(kvm, mem, old, new, change);
1165
1166 kvfree(slots);
1167 return 0;
1168
1169out_slots:
1170 if (change == KVM_MR_DELETE || change == KVM_MR_MOVE)
1171 slots = install_new_memslots(kvm, as_id, slots);
1172 kvfree(slots);
1173 return r;
1174}
1175
5c0b4f3d
SC
1176static int kvm_delete_memslot(struct kvm *kvm,
1177 const struct kvm_userspace_memory_region *mem,
1178 struct kvm_memory_slot *old, int as_id)
1179{
1180 struct kvm_memory_slot new;
1181 int r;
1182
1183 if (!old->npages)
1184 return -EINVAL;
1185
1186 memset(&new, 0, sizeof(new));
1187 new.id = old->id;
1188
1189 r = kvm_set_memslot(kvm, mem, old, &new, as_id, KVM_MR_DELETE);
1190 if (r)
1191 return r;
1192
e96c81ee 1193 kvm_free_memslot(kvm, old);
5c0b4f3d
SC
1194 return 0;
1195}
1196
6aa8b732
AK
1197/*
1198 * Allocate some memory and give it an address in the guest physical address
1199 * space.
1200 *
1201 * Discontiguous memory is allowed, mostly for framebuffers.
f78e0e2e 1202 *
02d5d55b 1203 * Must be called holding kvm->slots_lock for write.
6aa8b732 1204 */
f78e0e2e 1205int __kvm_set_memory_region(struct kvm *kvm,
09170a49 1206 const struct kvm_userspace_memory_region *mem)
6aa8b732 1207{
6aa8b732 1208 struct kvm_memory_slot old, new;
163da372 1209 struct kvm_memory_slot *tmp;
f64c0398 1210 enum kvm_mr_change change;
163da372
SC
1211 int as_id, id;
1212 int r;
6aa8b732 1213
a50d64d6
XG
1214 r = check_memory_region_flags(mem);
1215 if (r)
71a4c30b 1216 return r;
a50d64d6 1217
f481b069
PB
1218 as_id = mem->slot >> 16;
1219 id = (u16)mem->slot;
1220
6aa8b732
AK
1221 /* General sanity checks */
1222 if (mem->memory_size & (PAGE_SIZE - 1))
71a4c30b 1223 return -EINVAL;
6aa8b732 1224 if (mem->guest_phys_addr & (PAGE_SIZE - 1))
71a4c30b 1225 return -EINVAL;
fa3d315a 1226 /* We can read the guest memory with __xxx_user() later on. */
09d952c9 1227 if ((mem->userspace_addr & (PAGE_SIZE - 1)) ||
96d4f267 1228 !access_ok((void __user *)(unsigned long)mem->userspace_addr,
09d952c9 1229 mem->memory_size))
71a4c30b 1230 return -EINVAL;
f481b069 1231 if (as_id >= KVM_ADDRESS_SPACE_NUM || id >= KVM_MEM_SLOTS_NUM)
71a4c30b 1232 return -EINVAL;
6aa8b732 1233 if (mem->guest_phys_addr + mem->memory_size < mem->guest_phys_addr)
71a4c30b 1234 return -EINVAL;
6aa8b732 1235
5c0b4f3d
SC
1236 /*
1237 * Make a full copy of the old memslot, the pointer will become stale
1238 * when the memslots are re-sorted by update_memslots(), and the old
1239 * memslot needs to be referenced after calling update_memslots(), e.g.
0dff0846 1240 * to free its resources and for arch specific behavior.
5c0b4f3d 1241 */
0577d1ab
SC
1242 tmp = id_to_memslot(__kvm_memslots(kvm, as_id), id);
1243 if (tmp) {
1244 old = *tmp;
1245 tmp = NULL;
1246 } else {
1247 memset(&old, 0, sizeof(old));
1248 old.id = id;
1249 }
163da372 1250
5c0b4f3d
SC
1251 if (!mem->memory_size)
1252 return kvm_delete_memslot(kvm, mem, &old, as_id);
1253
f481b069 1254 new.id = id;
163da372
SC
1255 new.base_gfn = mem->guest_phys_addr >> PAGE_SHIFT;
1256 new.npages = mem->memory_size >> PAGE_SHIFT;
6aa8b732 1257 new.flags = mem->flags;
414de7ab 1258 new.userspace_addr = mem->userspace_addr;
6aa8b732 1259
163da372
SC
1260 if (new.npages > KVM_MEM_MAX_NR_PAGES)
1261 return -EINVAL;
1262
5c0b4f3d
SC
1263 if (!old.npages) {
1264 change = KVM_MR_CREATE;
163da372
SC
1265 new.dirty_bitmap = NULL;
1266 memset(&new.arch, 0, sizeof(new.arch));
5c0b4f3d
SC
1267 } else { /* Modify an existing slot. */
1268 if ((new.userspace_addr != old.userspace_addr) ||
163da372 1269 (new.npages != old.npages) ||
5c0b4f3d 1270 ((new.flags ^ old.flags) & KVM_MEM_READONLY))
71a4c30b 1271 return -EINVAL;
09170a49 1272
163da372 1273 if (new.base_gfn != old.base_gfn)
5c0b4f3d
SC
1274 change = KVM_MR_MOVE;
1275 else if (new.flags != old.flags)
1276 change = KVM_MR_FLAGS_ONLY;
1277 else /* Nothing to change. */
1278 return 0;
163da372
SC
1279
1280 /* Copy dirty_bitmap and arch from the current memslot. */
1281 new.dirty_bitmap = old.dirty_bitmap;
1282 memcpy(&new.arch, &old.arch, sizeof(new.arch));
09170a49 1283 }
6aa8b732 1284
f64c0398 1285 if ((change == KVM_MR_CREATE) || (change == KVM_MR_MOVE)) {
0a706bee 1286 /* Check for overlaps */
163da372
SC
1287 kvm_for_each_memslot(tmp, __kvm_memslots(kvm, as_id)) {
1288 if (tmp->id == id)
0a706bee 1289 continue;
163da372
SC
1290 if (!((new.base_gfn + new.npages <= tmp->base_gfn) ||
1291 (new.base_gfn >= tmp->base_gfn + tmp->npages)))
71a4c30b 1292 return -EEXIST;
0a706bee 1293 }
6aa8b732 1294 }
6aa8b732 1295
414de7ab
SC
1296 /* Allocate/free page dirty bitmap as needed */
1297 if (!(new.flags & KVM_MEM_LOG_DIRTY_PAGES))
1298 new.dirty_bitmap = NULL;
1299 else if (!new.dirty_bitmap) {
3c9bd400 1300 r = kvm_alloc_dirty_bitmap(&new);
71a4c30b
SC
1301 if (r)
1302 return r;
3c9bd400
JZ
1303
1304 if (kvm_dirty_log_manual_protect_and_init_set(kvm))
1305 bitmap_set(new.dirty_bitmap, 0, new.npages);
6aa8b732
AK
1306 }
1307
cf47f50b
SC
1308 r = kvm_set_memslot(kvm, mem, &old, &new, as_id, change);
1309 if (r)
1310 goto out_bitmap;
82ce2c96 1311
5c0b4f3d
SC
1312 if (old.dirty_bitmap && !new.dirty_bitmap)
1313 kvm_destroy_dirty_bitmap(&old);
6aa8b732
AK
1314 return 0;
1315
bd0e96fd
SC
1316out_bitmap:
1317 if (new.dirty_bitmap && !old.dirty_bitmap)
1318 kvm_destroy_dirty_bitmap(&new);
6aa8b732 1319 return r;
210c7c4d 1320}
f78e0e2e
SY
1321EXPORT_SYMBOL_GPL(__kvm_set_memory_region);
1322
1323int kvm_set_memory_region(struct kvm *kvm,
09170a49 1324 const struct kvm_userspace_memory_region *mem)
f78e0e2e
SY
1325{
1326 int r;
1327
79fac95e 1328 mutex_lock(&kvm->slots_lock);
47ae31e2 1329 r = __kvm_set_memory_region(kvm, mem);
79fac95e 1330 mutex_unlock(&kvm->slots_lock);
f78e0e2e
SY
1331 return r;
1332}
210c7c4d
IE
1333EXPORT_SYMBOL_GPL(kvm_set_memory_region);
1334
7940876e
SH
1335static int kvm_vm_ioctl_set_memory_region(struct kvm *kvm,
1336 struct kvm_userspace_memory_region *mem)
210c7c4d 1337{
f481b069 1338 if ((u16)mem->slot >= KVM_USER_MEM_SLOTS)
e0d62c7f 1339 return -EINVAL;
09170a49 1340
47ae31e2 1341 return kvm_set_memory_region(kvm, mem);
6aa8b732
AK
1342}
1343
0dff0846 1344#ifndef CONFIG_KVM_GENERIC_DIRTYLOG_READ_PROTECT
2a49f61d
SC
1345/**
1346 * kvm_get_dirty_log - get a snapshot of dirty pages
1347 * @kvm: pointer to kvm instance
1348 * @log: slot id and address to which we copy the log
1349 * @is_dirty: set to '1' if any dirty pages were found
1350 * @memslot: set to the associated memslot, always valid on success
1351 */
1352int kvm_get_dirty_log(struct kvm *kvm, struct kvm_dirty_log *log,
1353 int *is_dirty, struct kvm_memory_slot **memslot)
6aa8b732 1354{
9f6b8029 1355 struct kvm_memslots *slots;
843574a3 1356 int i, as_id, id;
87bf6e7d 1357 unsigned long n;
6aa8b732
AK
1358 unsigned long any = 0;
1359
2a49f61d
SC
1360 *memslot = NULL;
1361 *is_dirty = 0;
1362
f481b069
PB
1363 as_id = log->slot >> 16;
1364 id = (u16)log->slot;
1365 if (as_id >= KVM_ADDRESS_SPACE_NUM || id >= KVM_USER_MEM_SLOTS)
843574a3 1366 return -EINVAL;
6aa8b732 1367
f481b069 1368 slots = __kvm_memslots(kvm, as_id);
2a49f61d 1369 *memslot = id_to_memslot(slots, id);
0577d1ab 1370 if (!(*memslot) || !(*memslot)->dirty_bitmap)
843574a3 1371 return -ENOENT;
6aa8b732 1372
2a49f61d
SC
1373 kvm_arch_sync_dirty_log(kvm, *memslot);
1374
1375 n = kvm_dirty_bitmap_bytes(*memslot);
6aa8b732 1376
cd1a4a98 1377 for (i = 0; !any && i < n/sizeof(long); ++i)
2a49f61d 1378 any = (*memslot)->dirty_bitmap[i];
6aa8b732 1379
2a49f61d 1380 if (copy_to_user(log->dirty_bitmap, (*memslot)->dirty_bitmap, n))
843574a3 1381 return -EFAULT;
6aa8b732 1382
5bb064dc
ZX
1383 if (any)
1384 *is_dirty = 1;
843574a3 1385 return 0;
6aa8b732 1386}
2ba9f0d8 1387EXPORT_SYMBOL_GPL(kvm_get_dirty_log);
6aa8b732 1388
0dff0846 1389#else /* CONFIG_KVM_GENERIC_DIRTYLOG_READ_PROTECT */
ba0513b5 1390/**
b8b00220 1391 * kvm_get_dirty_log_protect - get a snapshot of dirty pages
2a31b9db 1392 * and reenable dirty page tracking for the corresponding pages.
ba0513b5
MS
1393 * @kvm: pointer to kvm instance
1394 * @log: slot id and address to which we copy the log
ba0513b5
MS
1395 *
1396 * We need to keep it in mind that VCPU threads can write to the bitmap
1397 * concurrently. So, to avoid losing track of dirty pages we keep the
1398 * following order:
1399 *
1400 * 1. Take a snapshot of the bit and clear it if needed.
1401 * 2. Write protect the corresponding page.
1402 * 3. Copy the snapshot to the userspace.
1403 * 4. Upon return caller flushes TLB's if needed.
1404 *
1405 * Between 2 and 4, the guest may write to the page using the remaining TLB
1406 * entry. This is not a problem because the page is reported dirty using
1407 * the snapshot taken before and step 4 ensures that writes done after
1408 * exiting to userspace will be logged for the next call.
1409 *
1410 */
0dff0846 1411static int kvm_get_dirty_log_protect(struct kvm *kvm, struct kvm_dirty_log *log)
ba0513b5 1412{
9f6b8029 1413 struct kvm_memslots *slots;
ba0513b5 1414 struct kvm_memory_slot *memslot;
58d6db34 1415 int i, as_id, id;
ba0513b5
MS
1416 unsigned long n;
1417 unsigned long *dirty_bitmap;
1418 unsigned long *dirty_bitmap_buffer;
0dff0846 1419 bool flush;
ba0513b5 1420
f481b069
PB
1421 as_id = log->slot >> 16;
1422 id = (u16)log->slot;
1423 if (as_id >= KVM_ADDRESS_SPACE_NUM || id >= KVM_USER_MEM_SLOTS)
58d6db34 1424 return -EINVAL;
ba0513b5 1425
f481b069
PB
1426 slots = __kvm_memslots(kvm, as_id);
1427 memslot = id_to_memslot(slots, id);
0577d1ab
SC
1428 if (!memslot || !memslot->dirty_bitmap)
1429 return -ENOENT;
ba0513b5
MS
1430
1431 dirty_bitmap = memslot->dirty_bitmap;
ba0513b5 1432
0dff0846
SC
1433 kvm_arch_sync_dirty_log(kvm, memslot);
1434
ba0513b5 1435 n = kvm_dirty_bitmap_bytes(memslot);
0dff0846 1436 flush = false;
2a31b9db
PB
1437 if (kvm->manual_dirty_log_protect) {
1438 /*
1439 * Unlike kvm_get_dirty_log, we always return false in *flush,
1440 * because no flush is needed until KVM_CLEAR_DIRTY_LOG. There
1441 * is some code duplication between this function and
1442 * kvm_get_dirty_log, but hopefully all architecture
1443 * transition to kvm_get_dirty_log_protect and kvm_get_dirty_log
1444 * can be eliminated.
1445 */
1446 dirty_bitmap_buffer = dirty_bitmap;
1447 } else {
1448 dirty_bitmap_buffer = kvm_second_dirty_bitmap(memslot);
1449 memset(dirty_bitmap_buffer, 0, n);
ba0513b5 1450
2a31b9db
PB
1451 spin_lock(&kvm->mmu_lock);
1452 for (i = 0; i < n / sizeof(long); i++) {
1453 unsigned long mask;
1454 gfn_t offset;
ba0513b5 1455
2a31b9db
PB
1456 if (!dirty_bitmap[i])
1457 continue;
1458
0dff0846 1459 flush = true;
2a31b9db
PB
1460 mask = xchg(&dirty_bitmap[i], 0);
1461 dirty_bitmap_buffer[i] = mask;
1462
a67794ca
LT
1463 offset = i * BITS_PER_LONG;
1464 kvm_arch_mmu_enable_log_dirty_pt_masked(kvm, memslot,
1465 offset, mask);
2a31b9db
PB
1466 }
1467 spin_unlock(&kvm->mmu_lock);
1468 }
1469
0dff0846
SC
1470 if (flush)
1471 kvm_arch_flush_remote_tlbs_memslot(kvm, memslot);
1472
2a31b9db
PB
1473 if (copy_to_user(log->dirty_bitmap, dirty_bitmap_buffer, n))
1474 return -EFAULT;
1475 return 0;
1476}
0dff0846
SC
1477
1478
1479/**
1480 * kvm_vm_ioctl_get_dirty_log - get and clear the log of dirty pages in a slot
1481 * @kvm: kvm instance
1482 * @log: slot id and address to which we copy the log
1483 *
1484 * Steps 1-4 below provide general overview of dirty page logging. See
1485 * kvm_get_dirty_log_protect() function description for additional details.
1486 *
1487 * We call kvm_get_dirty_log_protect() to handle steps 1-3, upon return we
1488 * always flush the TLB (step 4) even if previous step failed and the dirty
1489 * bitmap may be corrupt. Regardless of previous outcome the KVM logging API
1490 * does not preclude user space subsequent dirty log read. Flushing TLB ensures
1491 * writes will be marked dirty for next log read.
1492 *
1493 * 1. Take a snapshot of the bit and clear it if needed.
1494 * 2. Write protect the corresponding page.
1495 * 3. Copy the snapshot to the userspace.
1496 * 4. Flush TLB's if needed.
1497 */
1498static int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm,
1499 struct kvm_dirty_log *log)
1500{
1501 int r;
1502
1503 mutex_lock(&kvm->slots_lock);
1504
1505 r = kvm_get_dirty_log_protect(kvm, log);
1506
1507 mutex_unlock(&kvm->slots_lock);
1508 return r;
1509}
2a31b9db
PB
1510
1511/**
1512 * kvm_clear_dirty_log_protect - clear dirty bits in the bitmap
1513 * and reenable dirty page tracking for the corresponding pages.
1514 * @kvm: pointer to kvm instance
1515 * @log: slot id and address from which to fetch the bitmap of dirty pages
1516 */
0dff0846
SC
1517static int kvm_clear_dirty_log_protect(struct kvm *kvm,
1518 struct kvm_clear_dirty_log *log)
2a31b9db
PB
1519{
1520 struct kvm_memslots *slots;
1521 struct kvm_memory_slot *memslot;
98938aa8 1522 int as_id, id;
2a31b9db 1523 gfn_t offset;
98938aa8 1524 unsigned long i, n;
2a31b9db
PB
1525 unsigned long *dirty_bitmap;
1526 unsigned long *dirty_bitmap_buffer;
0dff0846 1527 bool flush;
2a31b9db
PB
1528
1529 as_id = log->slot >> 16;
1530 id = (u16)log->slot;
1531 if (as_id >= KVM_ADDRESS_SPACE_NUM || id >= KVM_USER_MEM_SLOTS)
1532 return -EINVAL;
1533
76d58e0f 1534 if (log->first_page & 63)
2a31b9db
PB
1535 return -EINVAL;
1536
1537 slots = __kvm_memslots(kvm, as_id);
1538 memslot = id_to_memslot(slots, id);
0577d1ab
SC
1539 if (!memslot || !memslot->dirty_bitmap)
1540 return -ENOENT;
2a31b9db
PB
1541
1542 dirty_bitmap = memslot->dirty_bitmap;
2a31b9db 1543
4ddc9204 1544 n = ALIGN(log->num_pages, BITS_PER_LONG) / 8;
98938aa8
TB
1545
1546 if (log->first_page > memslot->npages ||
76d58e0f
PB
1547 log->num_pages > memslot->npages - log->first_page ||
1548 (log->num_pages < memslot->npages - log->first_page && (log->num_pages & 63)))
1549 return -EINVAL;
98938aa8 1550
0dff0846
SC
1551 kvm_arch_sync_dirty_log(kvm, memslot);
1552
1553 flush = false;
2a31b9db
PB
1554 dirty_bitmap_buffer = kvm_second_dirty_bitmap(memslot);
1555 if (copy_from_user(dirty_bitmap_buffer, log->dirty_bitmap, n))
1556 return -EFAULT;
ba0513b5 1557
2a31b9db 1558 spin_lock(&kvm->mmu_lock);
53eac7a8
PX
1559 for (offset = log->first_page, i = offset / BITS_PER_LONG,
1560 n = DIV_ROUND_UP(log->num_pages, BITS_PER_LONG); n--;
2a31b9db
PB
1561 i++, offset += BITS_PER_LONG) {
1562 unsigned long mask = *dirty_bitmap_buffer++;
1563 atomic_long_t *p = (atomic_long_t *) &dirty_bitmap[i];
1564 if (!mask)
ba0513b5
MS
1565 continue;
1566
2a31b9db 1567 mask &= atomic_long_fetch_andnot(mask, p);
ba0513b5 1568
2a31b9db
PB
1569 /*
1570 * mask contains the bits that really have been cleared. This
1571 * never includes any bits beyond the length of the memslot (if
1572 * the length is not aligned to 64 pages), therefore it is not
1573 * a problem if userspace sets them in log->dirty_bitmap.
1574 */
58d2930f 1575 if (mask) {
0dff0846 1576 flush = true;
58d2930f
TY
1577 kvm_arch_mmu_enable_log_dirty_pt_masked(kvm, memslot,
1578 offset, mask);
1579 }
ba0513b5 1580 }
ba0513b5 1581 spin_unlock(&kvm->mmu_lock);
2a31b9db 1582
0dff0846
SC
1583 if (flush)
1584 kvm_arch_flush_remote_tlbs_memslot(kvm, memslot);
1585
58d6db34 1586 return 0;
ba0513b5 1587}
0dff0846
SC
1588
1589static int kvm_vm_ioctl_clear_dirty_log(struct kvm *kvm,
1590 struct kvm_clear_dirty_log *log)
1591{
1592 int r;
1593
1594 mutex_lock(&kvm->slots_lock);
1595
1596 r = kvm_clear_dirty_log_protect(kvm, log);
1597
1598 mutex_unlock(&kvm->slots_lock);
1599 return r;
1600}
1601#endif /* CONFIG_KVM_GENERIC_DIRTYLOG_READ_PROTECT */
ba0513b5 1602
49c7754c
GN
1603struct kvm_memory_slot *gfn_to_memslot(struct kvm *kvm, gfn_t gfn)
1604{
1605 return __gfn_to_memslot(kvm_memslots(kvm), gfn);
1606}
a1f4d395 1607EXPORT_SYMBOL_GPL(gfn_to_memslot);
6aa8b732 1608
8e73485c
PB
1609struct kvm_memory_slot *kvm_vcpu_gfn_to_memslot(struct kvm_vcpu *vcpu, gfn_t gfn)
1610{
1611 return __gfn_to_memslot(kvm_vcpu_memslots(vcpu), gfn);
1612}
e72436bc 1613EXPORT_SYMBOL_GPL(kvm_vcpu_gfn_to_memslot);
8e73485c 1614
33e94154 1615bool kvm_is_visible_gfn(struct kvm *kvm, gfn_t gfn)
e0d62c7f 1616{
bf3e05bc 1617 struct kvm_memory_slot *memslot = gfn_to_memslot(kvm, gfn);
e0d62c7f 1618
c36b7150 1619 return kvm_is_visible_memslot(memslot);
e0d62c7f
IE
1620}
1621EXPORT_SYMBOL_GPL(kvm_is_visible_gfn);
1622
f9b84e19 1623unsigned long kvm_host_page_size(struct kvm_vcpu *vcpu, gfn_t gfn)
8f0b1ab6
JR
1624{
1625 struct vm_area_struct *vma;
1626 unsigned long addr, size;
1627
1628 size = PAGE_SIZE;
1629
42cde48b 1630 addr = kvm_vcpu_gfn_to_hva_prot(vcpu, gfn, NULL);
8f0b1ab6
JR
1631 if (kvm_is_error_hva(addr))
1632 return PAGE_SIZE;
1633
d8ed45c5 1634 mmap_read_lock(current->mm);
8f0b1ab6
JR
1635 vma = find_vma(current->mm, addr);
1636 if (!vma)
1637 goto out;
1638
1639 size = vma_kernel_pagesize(vma);
1640
1641out:
d8ed45c5 1642 mmap_read_unlock(current->mm);
8f0b1ab6
JR
1643
1644 return size;
1645}
1646
4d8b81ab
XG
1647static bool memslot_is_readonly(struct kvm_memory_slot *slot)
1648{
1649 return slot->flags & KVM_MEM_READONLY;
1650}
1651
4d8b81ab
XG
1652static unsigned long __gfn_to_hva_many(struct kvm_memory_slot *slot, gfn_t gfn,
1653 gfn_t *nr_pages, bool write)
539cb660 1654{
bc6678a3 1655 if (!slot || slot->flags & KVM_MEMSLOT_INVALID)
ca3a490c 1656 return KVM_HVA_ERR_BAD;
48987781 1657
4d8b81ab
XG
1658 if (memslot_is_readonly(slot) && write)
1659 return KVM_HVA_ERR_RO_BAD;
48987781
XG
1660
1661 if (nr_pages)
1662 *nr_pages = slot->npages - (gfn - slot->base_gfn);
1663
4d8b81ab 1664 return __gfn_to_hva_memslot(slot, gfn);
539cb660 1665}
48987781 1666
4d8b81ab
XG
1667static unsigned long gfn_to_hva_many(struct kvm_memory_slot *slot, gfn_t gfn,
1668 gfn_t *nr_pages)
1669{
1670 return __gfn_to_hva_many(slot, gfn, nr_pages, true);
539cb660 1671}
48987781 1672
4d8b81ab 1673unsigned long gfn_to_hva_memslot(struct kvm_memory_slot *slot,
7940876e 1674 gfn_t gfn)
4d8b81ab
XG
1675{
1676 return gfn_to_hva_many(slot, gfn, NULL);
1677}
1678EXPORT_SYMBOL_GPL(gfn_to_hva_memslot);
1679
48987781
XG
1680unsigned long gfn_to_hva(struct kvm *kvm, gfn_t gfn)
1681{
49c7754c 1682 return gfn_to_hva_many(gfn_to_memslot(kvm, gfn), gfn, NULL);
48987781 1683}
0d150298 1684EXPORT_SYMBOL_GPL(gfn_to_hva);
539cb660 1685
8e73485c
PB
1686unsigned long kvm_vcpu_gfn_to_hva(struct kvm_vcpu *vcpu, gfn_t gfn)
1687{
1688 return gfn_to_hva_many(kvm_vcpu_gfn_to_memslot(vcpu, gfn), gfn, NULL);
1689}
1690EXPORT_SYMBOL_GPL(kvm_vcpu_gfn_to_hva);
1691
86ab8cff 1692/*
970c0d4b
WY
1693 * Return the hva of a @gfn and the R/W attribute if possible.
1694 *
1695 * @slot: the kvm_memory_slot which contains @gfn
1696 * @gfn: the gfn to be translated
1697 * @writable: used to return the read/write attribute of the @slot if the hva
1698 * is valid and @writable is not NULL
86ab8cff 1699 */
64d83126
CD
1700unsigned long gfn_to_hva_memslot_prot(struct kvm_memory_slot *slot,
1701 gfn_t gfn, bool *writable)
86ab8cff 1702{
a2ac07fe
GN
1703 unsigned long hva = __gfn_to_hva_many(slot, gfn, NULL, false);
1704
1705 if (!kvm_is_error_hva(hva) && writable)
ba6a3541
PB
1706 *writable = !memslot_is_readonly(slot);
1707
a2ac07fe 1708 return hva;
86ab8cff
XG
1709}
1710
64d83126
CD
1711unsigned long gfn_to_hva_prot(struct kvm *kvm, gfn_t gfn, bool *writable)
1712{
1713 struct kvm_memory_slot *slot = gfn_to_memslot(kvm, gfn);
1714
1715 return gfn_to_hva_memslot_prot(slot, gfn, writable);
1716}
1717
8e73485c
PB
1718unsigned long kvm_vcpu_gfn_to_hva_prot(struct kvm_vcpu *vcpu, gfn_t gfn, bool *writable)
1719{
1720 struct kvm_memory_slot *slot = kvm_vcpu_gfn_to_memslot(vcpu, gfn);
1721
1722 return gfn_to_hva_memslot_prot(slot, gfn, writable);
1723}
1724
fafc3dba
HY
1725static inline int check_user_page_hwpoison(unsigned long addr)
1726{
0d731759 1727 int rc, flags = FOLL_HWPOISON | FOLL_WRITE;
fafc3dba 1728
0d731759 1729 rc = get_user_pages(addr, 1, flags, NULL, NULL);
fafc3dba
HY
1730 return rc == -EHWPOISON;
1731}
1732
2fc84311 1733/*
b9b33da2
PB
1734 * The fast path to get the writable pfn which will be stored in @pfn,
1735 * true indicates success, otherwise false is returned. It's also the
311497e0 1736 * only part that runs if we can in atomic context.
2fc84311 1737 */
b9b33da2
PB
1738static bool hva_to_pfn_fast(unsigned long addr, bool write_fault,
1739 bool *writable, kvm_pfn_t *pfn)
954bbbc2 1740{
8d4e1288 1741 struct page *page[1];
954bbbc2 1742
12ce13fe
XG
1743 /*
1744 * Fast pin a writable pfn only if it is a write fault request
1745 * or the caller allows to map a writable pfn for a read fault
1746 * request.
1747 */
1748 if (!(write_fault || writable))
1749 return false;
612819c3 1750
dadbb612 1751 if (get_user_page_fast_only(addr, FOLL_WRITE, page)) {
2fc84311 1752 *pfn = page_to_pfn(page[0]);
612819c3 1753
2fc84311
XG
1754 if (writable)
1755 *writable = true;
1756 return true;
1757 }
af585b92 1758
2fc84311
XG
1759 return false;
1760}
612819c3 1761
2fc84311
XG
1762/*
1763 * The slow path to get the pfn of the specified host virtual address,
1764 * 1 indicates success, -errno is returned if error is detected.
1765 */
1766static int hva_to_pfn_slow(unsigned long addr, bool *async, bool write_fault,
ba049e93 1767 bool *writable, kvm_pfn_t *pfn)
2fc84311 1768{
ce53053c
AV
1769 unsigned int flags = FOLL_HWPOISON;
1770 struct page *page;
2fc84311 1771 int npages = 0;
612819c3 1772
2fc84311
XG
1773 might_sleep();
1774
1775 if (writable)
1776 *writable = write_fault;
1777
ce53053c
AV
1778 if (write_fault)
1779 flags |= FOLL_WRITE;
1780 if (async)
1781 flags |= FOLL_NOWAIT;
d4944b0e 1782
ce53053c 1783 npages = get_user_pages_unlocked(addr, 1, &page, flags);
2fc84311
XG
1784 if (npages != 1)
1785 return npages;
1786
1787 /* map read fault as writable if possible */
12ce13fe 1788 if (unlikely(!write_fault) && writable) {
ce53053c 1789 struct page *wpage;
2fc84311 1790
dadbb612 1791 if (get_user_page_fast_only(addr, FOLL_WRITE, &wpage)) {
2fc84311 1792 *writable = true;
ce53053c
AV
1793 put_page(page);
1794 page = wpage;
612819c3 1795 }
887c08ac 1796 }
ce53053c 1797 *pfn = page_to_pfn(page);
2fc84311
XG
1798 return npages;
1799}
539cb660 1800
4d8b81ab
XG
1801static bool vma_is_valid(struct vm_area_struct *vma, bool write_fault)
1802{
1803 if (unlikely(!(vma->vm_flags & VM_READ)))
1804 return false;
2e2e3738 1805
4d8b81ab
XG
1806 if (write_fault && (unlikely(!(vma->vm_flags & VM_WRITE))))
1807 return false;
887c08ac 1808
4d8b81ab
XG
1809 return true;
1810}
bf998156 1811
92176a8e
PB
1812static int hva_to_pfn_remapped(struct vm_area_struct *vma,
1813 unsigned long addr, bool *async,
a340b3e2
KA
1814 bool write_fault, bool *writable,
1815 kvm_pfn_t *p_pfn)
92176a8e 1816{
add6a0cd
PB
1817 unsigned long pfn;
1818 int r;
1819
1820 r = follow_pfn(vma, addr, &pfn);
1821 if (r) {
1822 /*
1823 * get_user_pages fails for VM_IO and VM_PFNMAP vmas and does
1824 * not call the fault handler, so do it here.
1825 */
1826 bool unlocked = false;
1827 r = fixup_user_fault(current, current->mm, addr,
1828 (write_fault ? FAULT_FLAG_WRITE : 0),
1829 &unlocked);
a8387d0b
PB
1830 if (unlocked)
1831 return -EAGAIN;
add6a0cd
PB
1832 if (r)
1833 return r;
1834
1835 r = follow_pfn(vma, addr, &pfn);
1836 if (r)
1837 return r;
1838
1839 }
1840
a340b3e2
KA
1841 if (writable)
1842 *writable = true;
add6a0cd
PB
1843
1844 /*
1845 * Get a reference here because callers of *hva_to_pfn* and
1846 * *gfn_to_pfn* ultimately call kvm_release_pfn_clean on the
1847 * returned pfn. This is only needed if the VMA has VM_MIXEDMAP
1848 * set, but the kvm_get_pfn/kvm_release_pfn_clean pair will
1849 * simply do nothing for reserved pfns.
1850 *
1851 * Whoever called remap_pfn_range is also going to call e.g.
1852 * unmap_mapping_range before the underlying pages are freed,
1853 * causing a call to our MMU notifier.
1854 */
1855 kvm_get_pfn(pfn);
1856
1857 *p_pfn = pfn;
92176a8e
PB
1858 return 0;
1859}
1860
12ce13fe
XG
1861/*
1862 * Pin guest page in memory and return its pfn.
1863 * @addr: host virtual address which maps memory to the guest
1864 * @atomic: whether this function can sleep
1865 * @async: whether this function need to wait IO complete if the
1866 * host page is not in the memory
1867 * @write_fault: whether we should get a writable host page
1868 * @writable: whether it allows to map a writable host page for !@write_fault
1869 *
1870 * The function will map a writable host page for these two cases:
1871 * 1): @write_fault = true
1872 * 2): @write_fault = false && @writable, @writable will tell the caller
1873 * whether the mapping is writable.
1874 */
ba049e93 1875static kvm_pfn_t hva_to_pfn(unsigned long addr, bool atomic, bool *async,
2fc84311
XG
1876 bool write_fault, bool *writable)
1877{
1878 struct vm_area_struct *vma;
ba049e93 1879 kvm_pfn_t pfn = 0;
92176a8e 1880 int npages, r;
2e2e3738 1881
2fc84311
XG
1882 /* we can do it either atomically or asynchronously, not both */
1883 BUG_ON(atomic && async);
8d4e1288 1884
b9b33da2 1885 if (hva_to_pfn_fast(addr, write_fault, writable, &pfn))
2fc84311
XG
1886 return pfn;
1887
1888 if (atomic)
1889 return KVM_PFN_ERR_FAULT;
1890
1891 npages = hva_to_pfn_slow(addr, async, write_fault, writable, &pfn);
1892 if (npages == 1)
1893 return pfn;
8d4e1288 1894
d8ed45c5 1895 mmap_read_lock(current->mm);
2fc84311
XG
1896 if (npages == -EHWPOISON ||
1897 (!async && check_user_page_hwpoison(addr))) {
1898 pfn = KVM_PFN_ERR_HWPOISON;
1899 goto exit;
1900 }
1901
a8387d0b 1902retry:
2fc84311
XG
1903 vma = find_vma_intersection(current->mm, addr, addr + 1);
1904
1905 if (vma == NULL)
1906 pfn = KVM_PFN_ERR_FAULT;
92176a8e 1907 else if (vma->vm_flags & (VM_IO | VM_PFNMAP)) {
a340b3e2 1908 r = hva_to_pfn_remapped(vma, addr, async, write_fault, writable, &pfn);
a8387d0b
PB
1909 if (r == -EAGAIN)
1910 goto retry;
92176a8e
PB
1911 if (r < 0)
1912 pfn = KVM_PFN_ERR_FAULT;
2fc84311 1913 } else {
4d8b81ab 1914 if (async && vma_is_valid(vma, write_fault))
2fc84311
XG
1915 *async = true;
1916 pfn = KVM_PFN_ERR_FAULT;
1917 }
1918exit:
d8ed45c5 1919 mmap_read_unlock(current->mm);
2e2e3738 1920 return pfn;
35149e21
AL
1921}
1922
ba049e93
DW
1923kvm_pfn_t __gfn_to_pfn_memslot(struct kvm_memory_slot *slot, gfn_t gfn,
1924 bool atomic, bool *async, bool write_fault,
1925 bool *writable)
887c08ac 1926{
4d8b81ab
XG
1927 unsigned long addr = __gfn_to_hva_many(slot, gfn, NULL, write_fault);
1928
b2740d35
PB
1929 if (addr == KVM_HVA_ERR_RO_BAD) {
1930 if (writable)
1931 *writable = false;
4d8b81ab 1932 return KVM_PFN_ERR_RO_FAULT;
b2740d35 1933 }
4d8b81ab 1934
b2740d35
PB
1935 if (kvm_is_error_hva(addr)) {
1936 if (writable)
1937 *writable = false;
81c52c56 1938 return KVM_PFN_NOSLOT;
b2740d35 1939 }
4d8b81ab
XG
1940
1941 /* Do not map writable pfn in the readonly memslot. */
1942 if (writable && memslot_is_readonly(slot)) {
1943 *writable = false;
1944 writable = NULL;
1945 }
1946
1947 return hva_to_pfn(addr, atomic, async, write_fault,
1948 writable);
887c08ac 1949}
3520469d 1950EXPORT_SYMBOL_GPL(__gfn_to_pfn_memslot);
887c08ac 1951
ba049e93 1952kvm_pfn_t gfn_to_pfn_prot(struct kvm *kvm, gfn_t gfn, bool write_fault,
612819c3
MT
1953 bool *writable)
1954{
e37afc6e
PB
1955 return __gfn_to_pfn_memslot(gfn_to_memslot(kvm, gfn), gfn, false, NULL,
1956 write_fault, writable);
612819c3
MT
1957}
1958EXPORT_SYMBOL_GPL(gfn_to_pfn_prot);
1959
ba049e93 1960kvm_pfn_t gfn_to_pfn_memslot(struct kvm_memory_slot *slot, gfn_t gfn)
506f0d6f 1961{
4d8b81ab 1962 return __gfn_to_pfn_memslot(slot, gfn, false, NULL, true, NULL);
506f0d6f 1963}
e37afc6e 1964EXPORT_SYMBOL_GPL(gfn_to_pfn_memslot);
506f0d6f 1965
ba049e93 1966kvm_pfn_t gfn_to_pfn_memslot_atomic(struct kvm_memory_slot *slot, gfn_t gfn)
506f0d6f 1967{
4d8b81ab 1968 return __gfn_to_pfn_memslot(slot, gfn, true, NULL, true, NULL);
506f0d6f 1969}
037d92dc 1970EXPORT_SYMBOL_GPL(gfn_to_pfn_memslot_atomic);
506f0d6f 1971
ba049e93 1972kvm_pfn_t kvm_vcpu_gfn_to_pfn_atomic(struct kvm_vcpu *vcpu, gfn_t gfn)
8e73485c
PB
1973{
1974 return gfn_to_pfn_memslot_atomic(kvm_vcpu_gfn_to_memslot(vcpu, gfn), gfn);
1975}
1976EXPORT_SYMBOL_GPL(kvm_vcpu_gfn_to_pfn_atomic);
1977
ba049e93 1978kvm_pfn_t gfn_to_pfn(struct kvm *kvm, gfn_t gfn)
e37afc6e
PB
1979{
1980 return gfn_to_pfn_memslot(gfn_to_memslot(kvm, gfn), gfn);
1981}
1982EXPORT_SYMBOL_GPL(gfn_to_pfn);
1983
ba049e93 1984kvm_pfn_t kvm_vcpu_gfn_to_pfn(struct kvm_vcpu *vcpu, gfn_t gfn)
8e73485c
PB
1985{
1986 return gfn_to_pfn_memslot(kvm_vcpu_gfn_to_memslot(vcpu, gfn), gfn);
1987}
1988EXPORT_SYMBOL_GPL(kvm_vcpu_gfn_to_pfn);
1989
d9ef13c2
PB
1990int gfn_to_page_many_atomic(struct kvm_memory_slot *slot, gfn_t gfn,
1991 struct page **pages, int nr_pages)
48987781
XG
1992{
1993 unsigned long addr;
076b925d 1994 gfn_t entry = 0;
48987781 1995
d9ef13c2 1996 addr = gfn_to_hva_many(slot, gfn, &entry);
48987781
XG
1997 if (kvm_is_error_hva(addr))
1998 return -1;
1999
2000 if (entry < nr_pages)
2001 return 0;
2002
dadbb612 2003 return get_user_pages_fast_only(addr, nr_pages, FOLL_WRITE, pages);
48987781
XG
2004}
2005EXPORT_SYMBOL_GPL(gfn_to_page_many_atomic);
2006
ba049e93 2007static struct page *kvm_pfn_to_page(kvm_pfn_t pfn)
a2766325 2008{
81c52c56 2009 if (is_error_noslot_pfn(pfn))
cb9aaa30 2010 return KVM_ERR_PTR_BAD_PAGE;
a2766325 2011
bf4bea8e 2012 if (kvm_is_reserved_pfn(pfn)) {
cb9aaa30 2013 WARN_ON(1);
6cede2e6 2014 return KVM_ERR_PTR_BAD_PAGE;
cb9aaa30 2015 }
a2766325
XG
2016
2017 return pfn_to_page(pfn);
2018}
2019
35149e21
AL
2020struct page *gfn_to_page(struct kvm *kvm, gfn_t gfn)
2021{
ba049e93 2022 kvm_pfn_t pfn;
2e2e3738
AL
2023
2024 pfn = gfn_to_pfn(kvm, gfn);
2e2e3738 2025
a2766325 2026 return kvm_pfn_to_page(pfn);
954bbbc2
AK
2027}
2028EXPORT_SYMBOL_GPL(gfn_to_page);
2029
91724814
BO
2030void kvm_release_pfn(kvm_pfn_t pfn, bool dirty, struct gfn_to_pfn_cache *cache)
2031{
2032 if (pfn == 0)
2033 return;
2034
2035 if (cache)
2036 cache->pfn = cache->gfn = 0;
2037
2038 if (dirty)
2039 kvm_release_pfn_dirty(pfn);
2040 else
2041 kvm_release_pfn_clean(pfn);
2042}
2043
2044static void kvm_cache_gfn_to_pfn(struct kvm_memory_slot *slot, gfn_t gfn,
2045 struct gfn_to_pfn_cache *cache, u64 gen)
2046{
2047 kvm_release_pfn(cache->pfn, cache->dirty, cache);
2048
2049 cache->pfn = gfn_to_pfn_memslot(slot, gfn);
2050 cache->gfn = gfn;
2051 cache->dirty = false;
2052 cache->generation = gen;
2053}
2054
1eff70a9 2055static int __kvm_map_gfn(struct kvm_memslots *slots, gfn_t gfn,
91724814
BO
2056 struct kvm_host_map *map,
2057 struct gfn_to_pfn_cache *cache,
2058 bool atomic)
e45adf66
KA
2059{
2060 kvm_pfn_t pfn;
2061 void *hva = NULL;
2062 struct page *page = KVM_UNMAPPED_PAGE;
1eff70a9 2063 struct kvm_memory_slot *slot = __gfn_to_memslot(slots, gfn);
91724814 2064 u64 gen = slots->generation;
e45adf66
KA
2065
2066 if (!map)
2067 return -EINVAL;
2068
91724814
BO
2069 if (cache) {
2070 if (!cache->pfn || cache->gfn != gfn ||
2071 cache->generation != gen) {
2072 if (atomic)
2073 return -EAGAIN;
2074 kvm_cache_gfn_to_pfn(slot, gfn, cache, gen);
2075 }
2076 pfn = cache->pfn;
2077 } else {
2078 if (atomic)
2079 return -EAGAIN;
2080 pfn = gfn_to_pfn_memslot(slot, gfn);
2081 }
e45adf66
KA
2082 if (is_error_noslot_pfn(pfn))
2083 return -EINVAL;
2084
2085 if (pfn_valid(pfn)) {
2086 page = pfn_to_page(pfn);
91724814
BO
2087 if (atomic)
2088 hva = kmap_atomic(page);
2089 else
2090 hva = kmap(page);
d30b214d 2091#ifdef CONFIG_HAS_IOMEM
91724814 2092 } else if (!atomic) {
e45adf66 2093 hva = memremap(pfn_to_hpa(pfn), PAGE_SIZE, MEMREMAP_WB);
91724814
BO
2094 } else {
2095 return -EINVAL;
d30b214d 2096#endif
e45adf66
KA
2097 }
2098
2099 if (!hva)
2100 return -EFAULT;
2101
2102 map->page = page;
2103 map->hva = hva;
2104 map->pfn = pfn;
2105 map->gfn = gfn;
2106
2107 return 0;
2108}
2109
91724814
BO
2110int kvm_map_gfn(struct kvm_vcpu *vcpu, gfn_t gfn, struct kvm_host_map *map,
2111 struct gfn_to_pfn_cache *cache, bool atomic)
1eff70a9 2112{
91724814
BO
2113 return __kvm_map_gfn(kvm_memslots(vcpu->kvm), gfn, map,
2114 cache, atomic);
1eff70a9
BO
2115}
2116EXPORT_SYMBOL_GPL(kvm_map_gfn);
2117
e45adf66
KA
2118int kvm_vcpu_map(struct kvm_vcpu *vcpu, gfn_t gfn, struct kvm_host_map *map)
2119{
91724814
BO
2120 return __kvm_map_gfn(kvm_vcpu_memslots(vcpu), gfn, map,
2121 NULL, false);
e45adf66
KA
2122}
2123EXPORT_SYMBOL_GPL(kvm_vcpu_map);
2124
1eff70a9 2125static void __kvm_unmap_gfn(struct kvm_memory_slot *memslot,
91724814
BO
2126 struct kvm_host_map *map,
2127 struct gfn_to_pfn_cache *cache,
2128 bool dirty, bool atomic)
e45adf66
KA
2129{
2130 if (!map)
2131 return;
2132
2133 if (!map->hva)
2134 return;
2135
91724814
BO
2136 if (map->page != KVM_UNMAPPED_PAGE) {
2137 if (atomic)
2138 kunmap_atomic(map->hva);
2139 else
2140 kunmap(map->page);
2141 }
eb1f2f38 2142#ifdef CONFIG_HAS_IOMEM
91724814 2143 else if (!atomic)
e45adf66 2144 memunmap(map->hva);
91724814
BO
2145 else
2146 WARN_ONCE(1, "Unexpected unmapping in atomic context");
eb1f2f38 2147#endif
e45adf66 2148
91724814 2149 if (dirty)
1eff70a9 2150 mark_page_dirty_in_slot(memslot, map->gfn);
91724814
BO
2151
2152 if (cache)
2153 cache->dirty |= dirty;
2154 else
2155 kvm_release_pfn(map->pfn, dirty, NULL);
e45adf66
KA
2156
2157 map->hva = NULL;
2158 map->page = NULL;
2159}
1eff70a9 2160
91724814
BO
2161int kvm_unmap_gfn(struct kvm_vcpu *vcpu, struct kvm_host_map *map,
2162 struct gfn_to_pfn_cache *cache, bool dirty, bool atomic)
1eff70a9 2163{
91724814
BO
2164 __kvm_unmap_gfn(gfn_to_memslot(vcpu->kvm, map->gfn), map,
2165 cache, dirty, atomic);
1eff70a9
BO
2166 return 0;
2167}
2168EXPORT_SYMBOL_GPL(kvm_unmap_gfn);
2169
2170void kvm_vcpu_unmap(struct kvm_vcpu *vcpu, struct kvm_host_map *map, bool dirty)
2171{
91724814
BO
2172 __kvm_unmap_gfn(kvm_vcpu_gfn_to_memslot(vcpu, map->gfn), map, NULL,
2173 dirty, false);
1eff70a9 2174}
e45adf66
KA
2175EXPORT_SYMBOL_GPL(kvm_vcpu_unmap);
2176
8e73485c
PB
2177struct page *kvm_vcpu_gfn_to_page(struct kvm_vcpu *vcpu, gfn_t gfn)
2178{
ba049e93 2179 kvm_pfn_t pfn;
8e73485c
PB
2180
2181 pfn = kvm_vcpu_gfn_to_pfn(vcpu, gfn);
2182
2183 return kvm_pfn_to_page(pfn);
2184}
2185EXPORT_SYMBOL_GPL(kvm_vcpu_gfn_to_page);
2186
b4231d61
IE
2187void kvm_release_page_clean(struct page *page)
2188{
32cad84f
XG
2189 WARN_ON(is_error_page(page));
2190
35149e21 2191 kvm_release_pfn_clean(page_to_pfn(page));
b4231d61
IE
2192}
2193EXPORT_SYMBOL_GPL(kvm_release_page_clean);
2194
ba049e93 2195void kvm_release_pfn_clean(kvm_pfn_t pfn)
35149e21 2196{
bf4bea8e 2197 if (!is_error_noslot_pfn(pfn) && !kvm_is_reserved_pfn(pfn))
2e2e3738 2198 put_page(pfn_to_page(pfn));
35149e21
AL
2199}
2200EXPORT_SYMBOL_GPL(kvm_release_pfn_clean);
2201
b4231d61 2202void kvm_release_page_dirty(struct page *page)
8a7ae055 2203{
a2766325
XG
2204 WARN_ON(is_error_page(page));
2205
35149e21
AL
2206 kvm_release_pfn_dirty(page_to_pfn(page));
2207}
2208EXPORT_SYMBOL_GPL(kvm_release_page_dirty);
2209
f7a6509f 2210void kvm_release_pfn_dirty(kvm_pfn_t pfn)
35149e21
AL
2211{
2212 kvm_set_pfn_dirty(pfn);
2213 kvm_release_pfn_clean(pfn);
2214}
f7a6509f 2215EXPORT_SYMBOL_GPL(kvm_release_pfn_dirty);
35149e21 2216
ba049e93 2217void kvm_set_pfn_dirty(kvm_pfn_t pfn)
35149e21 2218{
d29c03a5
ML
2219 if (!kvm_is_reserved_pfn(pfn) && !kvm_is_zone_device_pfn(pfn))
2220 SetPageDirty(pfn_to_page(pfn));
8a7ae055 2221}
35149e21
AL
2222EXPORT_SYMBOL_GPL(kvm_set_pfn_dirty);
2223
ba049e93 2224void kvm_set_pfn_accessed(kvm_pfn_t pfn)
35149e21 2225{
a78986aa 2226 if (!kvm_is_reserved_pfn(pfn) && !kvm_is_zone_device_pfn(pfn))
2e2e3738 2227 mark_page_accessed(pfn_to_page(pfn));
35149e21
AL
2228}
2229EXPORT_SYMBOL_GPL(kvm_set_pfn_accessed);
2230
ba049e93 2231void kvm_get_pfn(kvm_pfn_t pfn)
35149e21 2232{
bf4bea8e 2233 if (!kvm_is_reserved_pfn(pfn))
2e2e3738 2234 get_page(pfn_to_page(pfn));
35149e21
AL
2235}
2236EXPORT_SYMBOL_GPL(kvm_get_pfn);
8a7ae055 2237
195aefde
IE
2238static int next_segment(unsigned long len, int offset)
2239{
2240 if (len > PAGE_SIZE - offset)
2241 return PAGE_SIZE - offset;
2242 else
2243 return len;
2244}
2245
8e73485c
PB
2246static int __kvm_read_guest_page(struct kvm_memory_slot *slot, gfn_t gfn,
2247 void *data, int offset, int len)
195aefde 2248{
e0506bcb
IE
2249 int r;
2250 unsigned long addr;
195aefde 2251
8e73485c 2252 addr = gfn_to_hva_memslot_prot(slot, gfn, NULL);
e0506bcb
IE
2253 if (kvm_is_error_hva(addr))
2254 return -EFAULT;
3180a7fc 2255 r = __copy_from_user(data, (void __user *)addr + offset, len);
e0506bcb 2256 if (r)
195aefde 2257 return -EFAULT;
195aefde
IE
2258 return 0;
2259}
8e73485c
PB
2260
2261int kvm_read_guest_page(struct kvm *kvm, gfn_t gfn, void *data, int offset,
2262 int len)
2263{
2264 struct kvm_memory_slot *slot = gfn_to_memslot(kvm, gfn);
2265
2266 return __kvm_read_guest_page(slot, gfn, data, offset, len);
2267}
195aefde
IE
2268EXPORT_SYMBOL_GPL(kvm_read_guest_page);
2269
8e73485c
PB
2270int kvm_vcpu_read_guest_page(struct kvm_vcpu *vcpu, gfn_t gfn, void *data,
2271 int offset, int len)
2272{
2273 struct kvm_memory_slot *slot = kvm_vcpu_gfn_to_memslot(vcpu, gfn);
2274
2275 return __kvm_read_guest_page(slot, gfn, data, offset, len);
2276}
2277EXPORT_SYMBOL_GPL(kvm_vcpu_read_guest_page);
2278
195aefde
IE
2279int kvm_read_guest(struct kvm *kvm, gpa_t gpa, void *data, unsigned long len)
2280{
2281 gfn_t gfn = gpa >> PAGE_SHIFT;
2282 int seg;
2283 int offset = offset_in_page(gpa);
2284 int ret;
2285
2286 while ((seg = next_segment(len, offset)) != 0) {
2287 ret = kvm_read_guest_page(kvm, gfn, data, offset, seg);
2288 if (ret < 0)
2289 return ret;
2290 offset = 0;
2291 len -= seg;
2292 data += seg;
2293 ++gfn;
2294 }
2295 return 0;
2296}
2297EXPORT_SYMBOL_GPL(kvm_read_guest);
2298
8e73485c 2299int kvm_vcpu_read_guest(struct kvm_vcpu *vcpu, gpa_t gpa, void *data, unsigned long len)
7ec54588 2300{
7ec54588 2301 gfn_t gfn = gpa >> PAGE_SHIFT;
8e73485c 2302 int seg;
7ec54588 2303 int offset = offset_in_page(gpa);
8e73485c
PB
2304 int ret;
2305
2306 while ((seg = next_segment(len, offset)) != 0) {
2307 ret = kvm_vcpu_read_guest_page(vcpu, gfn, data, offset, seg);
2308 if (ret < 0)
2309 return ret;
2310 offset = 0;
2311 len -= seg;
2312 data += seg;
2313 ++gfn;
2314 }
2315 return 0;
2316}
2317EXPORT_SYMBOL_GPL(kvm_vcpu_read_guest);
7ec54588 2318
8e73485c
PB
2319static int __kvm_read_guest_atomic(struct kvm_memory_slot *slot, gfn_t gfn,
2320 void *data, int offset, unsigned long len)
2321{
2322 int r;
2323 unsigned long addr;
2324
2325 addr = gfn_to_hva_memslot_prot(slot, gfn, NULL);
7ec54588
MT
2326 if (kvm_is_error_hva(addr))
2327 return -EFAULT;
0aac03f0 2328 pagefault_disable();
3180a7fc 2329 r = __copy_from_user_inatomic(data, (void __user *)addr + offset, len);
0aac03f0 2330 pagefault_enable();
7ec54588
MT
2331 if (r)
2332 return -EFAULT;
2333 return 0;
2334}
7ec54588 2335
8e73485c
PB
2336int kvm_vcpu_read_guest_atomic(struct kvm_vcpu *vcpu, gpa_t gpa,
2337 void *data, unsigned long len)
2338{
2339 gfn_t gfn = gpa >> PAGE_SHIFT;
2340 struct kvm_memory_slot *slot = kvm_vcpu_gfn_to_memslot(vcpu, gfn);
2341 int offset = offset_in_page(gpa);
2342
2343 return __kvm_read_guest_atomic(slot, gfn, data, offset, len);
2344}
2345EXPORT_SYMBOL_GPL(kvm_vcpu_read_guest_atomic);
2346
2347static int __kvm_write_guest_page(struct kvm_memory_slot *memslot, gfn_t gfn,
2348 const void *data, int offset, int len)
195aefde 2349{
e0506bcb
IE
2350 int r;
2351 unsigned long addr;
195aefde 2352
251eb841 2353 addr = gfn_to_hva_memslot(memslot, gfn);
e0506bcb
IE
2354 if (kvm_is_error_hva(addr))
2355 return -EFAULT;
8b0cedff 2356 r = __copy_to_user((void __user *)addr + offset, data, len);
e0506bcb 2357 if (r)
195aefde 2358 return -EFAULT;
bc009e43 2359 mark_page_dirty_in_slot(memslot, gfn);
195aefde
IE
2360 return 0;
2361}
8e73485c
PB
2362
2363int kvm_write_guest_page(struct kvm *kvm, gfn_t gfn,
2364 const void *data, int offset, int len)
2365{
2366 struct kvm_memory_slot *slot = gfn_to_memslot(kvm, gfn);
2367
2368 return __kvm_write_guest_page(slot, gfn, data, offset, len);
2369}
195aefde
IE
2370EXPORT_SYMBOL_GPL(kvm_write_guest_page);
2371
8e73485c
PB
2372int kvm_vcpu_write_guest_page(struct kvm_vcpu *vcpu, gfn_t gfn,
2373 const void *data, int offset, int len)
2374{
2375 struct kvm_memory_slot *slot = kvm_vcpu_gfn_to_memslot(vcpu, gfn);
2376
2377 return __kvm_write_guest_page(slot, gfn, data, offset, len);
2378}
2379EXPORT_SYMBOL_GPL(kvm_vcpu_write_guest_page);
2380
195aefde
IE
2381int kvm_write_guest(struct kvm *kvm, gpa_t gpa, const void *data,
2382 unsigned long len)
2383{
2384 gfn_t gfn = gpa >> PAGE_SHIFT;
2385 int seg;
2386 int offset = offset_in_page(gpa);
2387 int ret;
2388
2389 while ((seg = next_segment(len, offset)) != 0) {
2390 ret = kvm_write_guest_page(kvm, gfn, data, offset, seg);
2391 if (ret < 0)
2392 return ret;
2393 offset = 0;
2394 len -= seg;
2395 data += seg;
2396 ++gfn;
2397 }
2398 return 0;
2399}
ff651cb6 2400EXPORT_SYMBOL_GPL(kvm_write_guest);
195aefde 2401
8e73485c
PB
2402int kvm_vcpu_write_guest(struct kvm_vcpu *vcpu, gpa_t gpa, const void *data,
2403 unsigned long len)
2404{
2405 gfn_t gfn = gpa >> PAGE_SHIFT;
2406 int seg;
2407 int offset = offset_in_page(gpa);
2408 int ret;
2409
2410 while ((seg = next_segment(len, offset)) != 0) {
2411 ret = kvm_vcpu_write_guest_page(vcpu, gfn, data, offset, seg);
2412 if (ret < 0)
2413 return ret;
2414 offset = 0;
2415 len -= seg;
2416 data += seg;
2417 ++gfn;
2418 }
2419 return 0;
2420}
2421EXPORT_SYMBOL_GPL(kvm_vcpu_write_guest);
2422
5a2d4365
PB
2423static int __kvm_gfn_to_hva_cache_init(struct kvm_memslots *slots,
2424 struct gfn_to_hva_cache *ghc,
2425 gpa_t gpa, unsigned long len)
49c7754c 2426{
49c7754c 2427 int offset = offset_in_page(gpa);
8f964525
AH
2428 gfn_t start_gfn = gpa >> PAGE_SHIFT;
2429 gfn_t end_gfn = (gpa + len - 1) >> PAGE_SHIFT;
2430 gfn_t nr_pages_needed = end_gfn - start_gfn + 1;
2431 gfn_t nr_pages_avail;
49c7754c 2432
6ad1e29f 2433 /* Update ghc->generation before performing any error checks. */
49c7754c 2434 ghc->generation = slots->generation;
6ad1e29f
SC
2435
2436 if (start_gfn > end_gfn) {
2437 ghc->hva = KVM_HVA_ERR_BAD;
2438 return -EINVAL;
2439 }
f1b9dd5e
JM
2440
2441 /*
2442 * If the requested region crosses two memslots, we still
2443 * verify that the entire region is valid here.
2444 */
6ad1e29f 2445 for ( ; start_gfn <= end_gfn; start_gfn += nr_pages_avail) {
f1b9dd5e
JM
2446 ghc->memslot = __gfn_to_memslot(slots, start_gfn);
2447 ghc->hva = gfn_to_hva_many(ghc->memslot, start_gfn,
2448 &nr_pages_avail);
2449 if (kvm_is_error_hva(ghc->hva))
6ad1e29f 2450 return -EFAULT;
f1b9dd5e
JM
2451 }
2452
2453 /* Use the slow path for cross page reads and writes. */
6ad1e29f 2454 if (nr_pages_needed == 1)
49c7754c 2455 ghc->hva += offset;
f1b9dd5e 2456 else
8f964525 2457 ghc->memslot = NULL;
f1b9dd5e 2458
6ad1e29f
SC
2459 ghc->gpa = gpa;
2460 ghc->len = len;
2461 return 0;
49c7754c 2462}
5a2d4365 2463
4e335d9e 2464int kvm_gfn_to_hva_cache_init(struct kvm *kvm, struct gfn_to_hva_cache *ghc,
5a2d4365
PB
2465 gpa_t gpa, unsigned long len)
2466{
4e335d9e 2467 struct kvm_memslots *slots = kvm_memslots(kvm);
5a2d4365
PB
2468 return __kvm_gfn_to_hva_cache_init(slots, ghc, gpa, len);
2469}
4e335d9e 2470EXPORT_SYMBOL_GPL(kvm_gfn_to_hva_cache_init);
49c7754c 2471
4e335d9e 2472int kvm_write_guest_offset_cached(struct kvm *kvm, struct gfn_to_hva_cache *ghc,
7a86dab8
JM
2473 void *data, unsigned int offset,
2474 unsigned long len)
49c7754c 2475{
4e335d9e 2476 struct kvm_memslots *slots = kvm_memslots(kvm);
49c7754c 2477 int r;
4ec6e863 2478 gpa_t gpa = ghc->gpa + offset;
49c7754c 2479
4ec6e863 2480 BUG_ON(len + offset > ghc->len);
8f964525 2481
dc9ce71e
SC
2482 if (slots->generation != ghc->generation) {
2483 if (__kvm_gfn_to_hva_cache_init(slots, ghc, ghc->gpa, ghc->len))
2484 return -EFAULT;
2485 }
8f964525 2486
49c7754c
GN
2487 if (kvm_is_error_hva(ghc->hva))
2488 return -EFAULT;
2489
fcfbc617
SC
2490 if (unlikely(!ghc->memslot))
2491 return kvm_write_guest(kvm, gpa, data, len);
2492
4ec6e863 2493 r = __copy_to_user((void __user *)ghc->hva + offset, data, len);
49c7754c
GN
2494 if (r)
2495 return -EFAULT;
4ec6e863 2496 mark_page_dirty_in_slot(ghc->memslot, gpa >> PAGE_SHIFT);
49c7754c
GN
2497
2498 return 0;
2499}
4e335d9e 2500EXPORT_SYMBOL_GPL(kvm_write_guest_offset_cached);
4ec6e863 2501
4e335d9e
PB
2502int kvm_write_guest_cached(struct kvm *kvm, struct gfn_to_hva_cache *ghc,
2503 void *data, unsigned long len)
4ec6e863 2504{
4e335d9e 2505 return kvm_write_guest_offset_cached(kvm, ghc, data, 0, len);
4ec6e863 2506}
4e335d9e 2507EXPORT_SYMBOL_GPL(kvm_write_guest_cached);
49c7754c 2508
0958f0ce
VK
2509int kvm_read_guest_offset_cached(struct kvm *kvm, struct gfn_to_hva_cache *ghc,
2510 void *data, unsigned int offset,
2511 unsigned long len)
e03b644f 2512{
4e335d9e 2513 struct kvm_memslots *slots = kvm_memslots(kvm);
e03b644f 2514 int r;
0958f0ce 2515 gpa_t gpa = ghc->gpa + offset;
e03b644f 2516
0958f0ce 2517 BUG_ON(len + offset > ghc->len);
8f964525 2518
dc9ce71e
SC
2519 if (slots->generation != ghc->generation) {
2520 if (__kvm_gfn_to_hva_cache_init(slots, ghc, ghc->gpa, ghc->len))
2521 return -EFAULT;
2522 }
8f964525 2523
e03b644f
GN
2524 if (kvm_is_error_hva(ghc->hva))
2525 return -EFAULT;
2526
fcfbc617 2527 if (unlikely(!ghc->memslot))
0958f0ce 2528 return kvm_read_guest(kvm, gpa, data, len);
fcfbc617 2529
0958f0ce 2530 r = __copy_from_user(data, (void __user *)ghc->hva + offset, len);
e03b644f
GN
2531 if (r)
2532 return -EFAULT;
2533
2534 return 0;
2535}
0958f0ce
VK
2536EXPORT_SYMBOL_GPL(kvm_read_guest_offset_cached);
2537
2538int kvm_read_guest_cached(struct kvm *kvm, struct gfn_to_hva_cache *ghc,
2539 void *data, unsigned long len)
2540{
2541 return kvm_read_guest_offset_cached(kvm, ghc, data, 0, len);
2542}
4e335d9e 2543EXPORT_SYMBOL_GPL(kvm_read_guest_cached);
e03b644f 2544
195aefde
IE
2545int kvm_clear_guest_page(struct kvm *kvm, gfn_t gfn, int offset, int len)
2546{
8a3caa6d
HC
2547 const void *zero_page = (const void *) __va(page_to_phys(ZERO_PAGE(0)));
2548
2549 return kvm_write_guest_page(kvm, gfn, zero_page, offset, len);
195aefde
IE
2550}
2551EXPORT_SYMBOL_GPL(kvm_clear_guest_page);
2552
2553int kvm_clear_guest(struct kvm *kvm, gpa_t gpa, unsigned long len)
2554{
2555 gfn_t gfn = gpa >> PAGE_SHIFT;
2556 int seg;
2557 int offset = offset_in_page(gpa);
2558 int ret;
2559
bfda0e84 2560 while ((seg = next_segment(len, offset)) != 0) {
195aefde
IE
2561 ret = kvm_clear_guest_page(kvm, gfn, offset, seg);
2562 if (ret < 0)
2563 return ret;
2564 offset = 0;
2565 len -= seg;
2566 ++gfn;
2567 }
2568 return 0;
2569}
2570EXPORT_SYMBOL_GPL(kvm_clear_guest);
2571
bc009e43 2572static void mark_page_dirty_in_slot(struct kvm_memory_slot *memslot,
7940876e 2573 gfn_t gfn)
6aa8b732 2574{
7e9d619d
RR
2575 if (memslot && memslot->dirty_bitmap) {
2576 unsigned long rel_gfn = gfn - memslot->base_gfn;
6aa8b732 2577
b74ca3b3 2578 set_bit_le(rel_gfn, memslot->dirty_bitmap);
6aa8b732
AK
2579 }
2580}
2581
49c7754c
GN
2582void mark_page_dirty(struct kvm *kvm, gfn_t gfn)
2583{
2584 struct kvm_memory_slot *memslot;
2585
2586 memslot = gfn_to_memslot(kvm, gfn);
bc009e43 2587 mark_page_dirty_in_slot(memslot, gfn);
49c7754c 2588}
2ba9f0d8 2589EXPORT_SYMBOL_GPL(mark_page_dirty);
49c7754c 2590
8e73485c
PB
2591void kvm_vcpu_mark_page_dirty(struct kvm_vcpu *vcpu, gfn_t gfn)
2592{
2593 struct kvm_memory_slot *memslot;
2594
2595 memslot = kvm_vcpu_gfn_to_memslot(vcpu, gfn);
2596 mark_page_dirty_in_slot(memslot, gfn);
2597}
2598EXPORT_SYMBOL_GPL(kvm_vcpu_mark_page_dirty);
2599
20b7035c
JS
2600void kvm_sigset_activate(struct kvm_vcpu *vcpu)
2601{
2602 if (!vcpu->sigset_active)
2603 return;
2604
2605 /*
2606 * This does a lockless modification of ->real_blocked, which is fine
2607 * because, only current can change ->real_blocked and all readers of
2608 * ->real_blocked don't care as long ->real_blocked is always a subset
2609 * of ->blocked.
2610 */
2611 sigprocmask(SIG_SETMASK, &vcpu->sigset, &current->real_blocked);
2612}
2613
2614void kvm_sigset_deactivate(struct kvm_vcpu *vcpu)
2615{
2616 if (!vcpu->sigset_active)
2617 return;
2618
2619 sigprocmask(SIG_SETMASK, &current->real_blocked, NULL);
2620 sigemptyset(&current->real_blocked);
2621}
2622
aca6ff29
WL
2623static void grow_halt_poll_ns(struct kvm_vcpu *vcpu)
2624{
dee339b5 2625 unsigned int old, val, grow, grow_start;
aca6ff29 2626
2cbd7824 2627 old = val = vcpu->halt_poll_ns;
dee339b5 2628 grow_start = READ_ONCE(halt_poll_ns_grow_start);
6b6de68c 2629 grow = READ_ONCE(halt_poll_ns_grow);
7fa08e71
NW
2630 if (!grow)
2631 goto out;
2632
dee339b5
NW
2633 val *= grow;
2634 if (val < grow_start)
2635 val = grow_start;
aca6ff29 2636
313f636d
DM
2637 if (val > halt_poll_ns)
2638 val = halt_poll_ns;
2639
aca6ff29 2640 vcpu->halt_poll_ns = val;
7fa08e71 2641out:
2cbd7824 2642 trace_kvm_halt_poll_ns_grow(vcpu->vcpu_id, val, old);
aca6ff29
WL
2643}
2644
2645static void shrink_halt_poll_ns(struct kvm_vcpu *vcpu)
2646{
6b6de68c 2647 unsigned int old, val, shrink;
aca6ff29 2648
2cbd7824 2649 old = val = vcpu->halt_poll_ns;
6b6de68c
CB
2650 shrink = READ_ONCE(halt_poll_ns_shrink);
2651 if (shrink == 0)
aca6ff29
WL
2652 val = 0;
2653 else
6b6de68c 2654 val /= shrink;
aca6ff29
WL
2655
2656 vcpu->halt_poll_ns = val;
2cbd7824 2657 trace_kvm_halt_poll_ns_shrink(vcpu->vcpu_id, val, old);
aca6ff29
WL
2658}
2659
f7819512
PB
2660static int kvm_vcpu_check_block(struct kvm_vcpu *vcpu)
2661{
50c28f21
JS
2662 int ret = -EINTR;
2663 int idx = srcu_read_lock(&vcpu->kvm->srcu);
2664
f7819512
PB
2665 if (kvm_arch_vcpu_runnable(vcpu)) {
2666 kvm_make_request(KVM_REQ_UNHALT, vcpu);
50c28f21 2667 goto out;
f7819512
PB
2668 }
2669 if (kvm_cpu_has_pending_timer(vcpu))
50c28f21 2670 goto out;
f7819512 2671 if (signal_pending(current))
50c28f21 2672 goto out;
f7819512 2673
50c28f21
JS
2674 ret = 0;
2675out:
2676 srcu_read_unlock(&vcpu->kvm->srcu, idx);
2677 return ret;
f7819512
PB
2678}
2679
cb953129
DM
2680static inline void
2681update_halt_poll_stats(struct kvm_vcpu *vcpu, u64 poll_ns, bool waited)
2682{
2683 if (waited)
2684 vcpu->stat.halt_poll_fail_ns += poll_ns;
2685 else
2686 vcpu->stat.halt_poll_success_ns += poll_ns;
2687}
2688
b6958ce4
ED
2689/*
2690 * The vCPU has executed a HLT instruction with in-kernel mode enabled.
2691 */
8776e519 2692void kvm_vcpu_block(struct kvm_vcpu *vcpu)
d3bef15f 2693{
cb953129 2694 ktime_t start, cur, poll_end;
f7819512 2695 bool waited = false;
aca6ff29 2696 u64 block_ns;
f7819512 2697
07ab0f8d
MZ
2698 kvm_arch_vcpu_blocking(vcpu);
2699
cb953129 2700 start = cur = poll_end = ktime_get();
cdd6ad3a 2701 if (vcpu->halt_poll_ns && !kvm_arch_no_poll(vcpu)) {
19020f8a 2702 ktime_t stop = ktime_add_ns(ktime_get(), vcpu->halt_poll_ns);
f95ef0cd 2703
62bea5bf 2704 ++vcpu->stat.halt_attempted_poll;
f7819512
PB
2705 do {
2706 /*
2707 * This sets KVM_REQ_UNHALT if an interrupt
2708 * arrives.
2709 */
2710 if (kvm_vcpu_check_block(vcpu) < 0) {
2711 ++vcpu->stat.halt_successful_poll;
3491caf2
CB
2712 if (!vcpu_valid_wakeup(vcpu))
2713 ++vcpu->stat.halt_poll_invalid;
f7819512
PB
2714 goto out;
2715 }
cb953129 2716 poll_end = cur = ktime_get();
f7819512
PB
2717 } while (single_task_running() && ktime_before(cur, stop));
2718 }
e5c239cf 2719
da4ad88c 2720 prepare_to_rcuwait(&vcpu->wait);
e5c239cf 2721 for (;;) {
da4ad88c 2722 set_current_state(TASK_INTERRUPTIBLE);
e5c239cf 2723
f7819512 2724 if (kvm_vcpu_check_block(vcpu) < 0)
e5c239cf
MT
2725 break;
2726
f7819512 2727 waited = true;
b6958ce4 2728 schedule();
b6958ce4 2729 }
da4ad88c 2730 finish_rcuwait(&vcpu->wait);
f7819512 2731 cur = ktime_get();
f7819512 2732out:
07ab0f8d 2733 kvm_arch_vcpu_unblocking(vcpu);
aca6ff29
WL
2734 block_ns = ktime_to_ns(cur) - ktime_to_ns(start);
2735
cb953129
DM
2736 update_halt_poll_stats(
2737 vcpu, ktime_to_ns(ktime_sub(poll_end, start)), waited);
2738
44551b2f
WL
2739 if (!kvm_arch_no_poll(vcpu)) {
2740 if (!vcpu_valid_wakeup(vcpu)) {
aca6ff29 2741 shrink_halt_poll_ns(vcpu);
acd05785 2742 } else if (vcpu->kvm->max_halt_poll_ns) {
44551b2f
WL
2743 if (block_ns <= vcpu->halt_poll_ns)
2744 ;
2745 /* we had a long block, shrink polling */
acd05785
DM
2746 else if (vcpu->halt_poll_ns &&
2747 block_ns > vcpu->kvm->max_halt_poll_ns)
44551b2f
WL
2748 shrink_halt_poll_ns(vcpu);
2749 /* we had a short halt and our poll time is too small */
acd05785
DM
2750 else if (vcpu->halt_poll_ns < vcpu->kvm->max_halt_poll_ns &&
2751 block_ns < vcpu->kvm->max_halt_poll_ns)
44551b2f
WL
2752 grow_halt_poll_ns(vcpu);
2753 } else {
2754 vcpu->halt_poll_ns = 0;
2755 }
2756 }
aca6ff29 2757
3491caf2
CB
2758 trace_kvm_vcpu_wakeup(block_ns, waited, vcpu_valid_wakeup(vcpu));
2759 kvm_arch_vcpu_block_finish(vcpu);
b6958ce4 2760}
2ba9f0d8 2761EXPORT_SYMBOL_GPL(kvm_vcpu_block);
b6958ce4 2762
178f02ff 2763bool kvm_vcpu_wake_up(struct kvm_vcpu *vcpu)
b6d33834 2764{
da4ad88c 2765 struct rcuwait *waitp;
b6d33834 2766
da4ad88c
DB
2767 waitp = kvm_arch_vcpu_get_wait(vcpu);
2768 if (rcuwait_wake_up(waitp)) {
d73eb57b 2769 WRITE_ONCE(vcpu->ready, true);
b6d33834 2770 ++vcpu->stat.halt_wakeup;
178f02ff 2771 return true;
b6d33834
CD
2772 }
2773
178f02ff 2774 return false;
dd1a4cc1
RK
2775}
2776EXPORT_SYMBOL_GPL(kvm_vcpu_wake_up);
2777
0266c894 2778#ifndef CONFIG_S390
dd1a4cc1
RK
2779/*
2780 * Kick a sleeping VCPU, or a guest VCPU in guest mode, into host kernel mode.
2781 */
2782void kvm_vcpu_kick(struct kvm_vcpu *vcpu)
2783{
2784 int me;
2785 int cpu = vcpu->cpu;
2786
178f02ff
RK
2787 if (kvm_vcpu_wake_up(vcpu))
2788 return;
2789
b6d33834
CD
2790 me = get_cpu();
2791 if (cpu != me && (unsigned)cpu < nr_cpu_ids && cpu_online(cpu))
2792 if (kvm_arch_vcpu_should_kick(vcpu))
2793 smp_send_reschedule(cpu);
2794 put_cpu();
2795}
a20ed54d 2796EXPORT_SYMBOL_GPL(kvm_vcpu_kick);
0266c894 2797#endif /* !CONFIG_S390 */
b6d33834 2798
fa93384f 2799int kvm_vcpu_yield_to(struct kvm_vcpu *target)
41628d33
KW
2800{
2801 struct pid *pid;
2802 struct task_struct *task = NULL;
fa93384f 2803 int ret = 0;
41628d33
KW
2804
2805 rcu_read_lock();
2806 pid = rcu_dereference(target->pid);
2807 if (pid)
27fbe64b 2808 task = get_pid_task(pid, PIDTYPE_PID);
41628d33
KW
2809 rcu_read_unlock();
2810 if (!task)
c45c528e 2811 return ret;
c45c528e 2812 ret = yield_to(task, 1);
41628d33 2813 put_task_struct(task);
c45c528e
R
2814
2815 return ret;
41628d33
KW
2816}
2817EXPORT_SYMBOL_GPL(kvm_vcpu_yield_to);
2818
06e48c51
R
2819/*
2820 * Helper that checks whether a VCPU is eligible for directed yield.
2821 * Most eligible candidate to yield is decided by following heuristics:
2822 *
2823 * (a) VCPU which has not done pl-exit or cpu relax intercepted recently
2824 * (preempted lock holder), indicated by @in_spin_loop.
656012c7 2825 * Set at the beginning and cleared at the end of interception/PLE handler.
06e48c51
R
2826 *
2827 * (b) VCPU which has done pl-exit/ cpu relax intercepted but did not get
2828 * chance last time (mostly it has become eligible now since we have probably
2829 * yielded to lockholder in last iteration. This is done by toggling
2830 * @dy_eligible each time a VCPU checked for eligibility.)
2831 *
2832 * Yielding to a recently pl-exited/cpu relax intercepted VCPU before yielding
2833 * to preempted lock-holder could result in wrong VCPU selection and CPU
2834 * burning. Giving priority for a potential lock-holder increases lock
2835 * progress.
2836 *
2837 * Since algorithm is based on heuristics, accessing another VCPU data without
2838 * locking does not harm. It may result in trying to yield to same VCPU, fail
2839 * and continue with next VCPU and so on.
2840 */
7940876e 2841static bool kvm_vcpu_eligible_for_directed_yield(struct kvm_vcpu *vcpu)
06e48c51 2842{
4a55dd72 2843#ifdef CONFIG_HAVE_KVM_CPU_RELAX_INTERCEPT
06e48c51
R
2844 bool eligible;
2845
2846 eligible = !vcpu->spin_loop.in_spin_loop ||
34656113 2847 vcpu->spin_loop.dy_eligible;
06e48c51
R
2848
2849 if (vcpu->spin_loop.in_spin_loop)
2850 kvm_vcpu_set_dy_eligible(vcpu, !vcpu->spin_loop.dy_eligible);
2851
2852 return eligible;
4a55dd72
SW
2853#else
2854 return true;
06e48c51 2855#endif
4a55dd72 2856}
c45c528e 2857
17e433b5
WL
2858/*
2859 * Unlike kvm_arch_vcpu_runnable, this function is called outside
2860 * a vcpu_load/vcpu_put pair. However, for most architectures
2861 * kvm_arch_vcpu_runnable does not require vcpu_load.
2862 */
2863bool __weak kvm_arch_dy_runnable(struct kvm_vcpu *vcpu)
2864{
2865 return kvm_arch_vcpu_runnable(vcpu);
2866}
2867
2868static bool vcpu_dy_runnable(struct kvm_vcpu *vcpu)
2869{
2870 if (kvm_arch_dy_runnable(vcpu))
2871 return true;
2872
2873#ifdef CONFIG_KVM_ASYNC_PF
2874 if (!list_empty_careful(&vcpu->async_pf.done))
2875 return true;
2876#endif
2877
2878 return false;
2879}
2880
199b5763 2881void kvm_vcpu_on_spin(struct kvm_vcpu *me, bool yield_to_kernel_mode)
d255f4f2 2882{
217ece61
RR
2883 struct kvm *kvm = me->kvm;
2884 struct kvm_vcpu *vcpu;
2885 int last_boosted_vcpu = me->kvm->last_boosted_vcpu;
2886 int yielded = 0;
c45c528e 2887 int try = 3;
217ece61
RR
2888 int pass;
2889 int i;
d255f4f2 2890
4c088493 2891 kvm_vcpu_set_in_spin_loop(me, true);
217ece61
RR
2892 /*
2893 * We boost the priority of a VCPU that is runnable but not
2894 * currently running, because it got preempted by something
2895 * else and called schedule in __vcpu_run. Hopefully that
2896 * VCPU is holding the lock that we need and will release it.
2897 * We approximate round-robin by starting at the last boosted VCPU.
2898 */
c45c528e 2899 for (pass = 0; pass < 2 && !yielded && try; pass++) {
217ece61 2900 kvm_for_each_vcpu(i, vcpu, kvm) {
5cfc2aab 2901 if (!pass && i <= last_boosted_vcpu) {
217ece61
RR
2902 i = last_boosted_vcpu;
2903 continue;
2904 } else if (pass && i > last_boosted_vcpu)
2905 break;
d73eb57b 2906 if (!READ_ONCE(vcpu->ready))
7bc7ae25 2907 continue;
217ece61
RR
2908 if (vcpu == me)
2909 continue;
da4ad88c
DB
2910 if (rcuwait_active(&vcpu->wait) &&
2911 !vcpu_dy_runnable(vcpu))
217ece61 2912 continue;
046ddeed
WL
2913 if (READ_ONCE(vcpu->preempted) && yield_to_kernel_mode &&
2914 !kvm_arch_vcpu_in_kernel(vcpu))
199b5763 2915 continue;
06e48c51
R
2916 if (!kvm_vcpu_eligible_for_directed_yield(vcpu))
2917 continue;
c45c528e
R
2918
2919 yielded = kvm_vcpu_yield_to(vcpu);
2920 if (yielded > 0) {
217ece61 2921 kvm->last_boosted_vcpu = i;
217ece61 2922 break;
c45c528e
R
2923 } else if (yielded < 0) {
2924 try--;
2925 if (!try)
2926 break;
217ece61 2927 }
217ece61
RR
2928 }
2929 }
4c088493 2930 kvm_vcpu_set_in_spin_loop(me, false);
06e48c51
R
2931
2932 /* Ensure vcpu is not eligible during next spinloop */
2933 kvm_vcpu_set_dy_eligible(me, false);
d255f4f2
ZE
2934}
2935EXPORT_SYMBOL_GPL(kvm_vcpu_on_spin);
2936
1499fa80 2937static vm_fault_t kvm_vcpu_fault(struct vm_fault *vmf)
9a2bb7f4 2938{
11bac800 2939 struct kvm_vcpu *vcpu = vmf->vma->vm_file->private_data;
9a2bb7f4
AK
2940 struct page *page;
2941
e4a533a4 2942 if (vmf->pgoff == 0)
039576c0 2943 page = virt_to_page(vcpu->run);
09566765 2944#ifdef CONFIG_X86
e4a533a4 2945 else if (vmf->pgoff == KVM_PIO_PAGE_OFFSET)
ad312c7c 2946 page = virt_to_page(vcpu->arch.pio_data);
5f94c174 2947#endif
4b4357e0 2948#ifdef CONFIG_KVM_MMIO
5f94c174
LV
2949 else if (vmf->pgoff == KVM_COALESCED_MMIO_PAGE_OFFSET)
2950 page = virt_to_page(vcpu->kvm->coalesced_mmio_ring);
09566765 2951#endif
039576c0 2952 else
5b1c1493 2953 return kvm_arch_vcpu_fault(vcpu, vmf);
9a2bb7f4 2954 get_page(page);
e4a533a4 2955 vmf->page = page;
2956 return 0;
9a2bb7f4
AK
2957}
2958
f0f37e2f 2959static const struct vm_operations_struct kvm_vcpu_vm_ops = {
e4a533a4 2960 .fault = kvm_vcpu_fault,
9a2bb7f4
AK
2961};
2962
2963static int kvm_vcpu_mmap(struct file *file, struct vm_area_struct *vma)
2964{
2965 vma->vm_ops = &kvm_vcpu_vm_ops;
2966 return 0;
2967}
2968
bccf2150
AK
2969static int kvm_vcpu_release(struct inode *inode, struct file *filp)
2970{
2971 struct kvm_vcpu *vcpu = filp->private_data;
2972
45b5939e 2973 debugfs_remove_recursive(vcpu->debugfs_dentry);
66c0b394 2974 kvm_put_kvm(vcpu->kvm);
bccf2150
AK
2975 return 0;
2976}
2977
3d3aab1b 2978static struct file_operations kvm_vcpu_fops = {
bccf2150
AK
2979 .release = kvm_vcpu_release,
2980 .unlocked_ioctl = kvm_vcpu_ioctl,
9a2bb7f4 2981 .mmap = kvm_vcpu_mmap,
6038f373 2982 .llseek = noop_llseek,
7ddfd3e0 2983 KVM_COMPAT(kvm_vcpu_compat_ioctl),
bccf2150
AK
2984};
2985
2986/*
2987 * Allocates an inode for the vcpu.
2988 */
2989static int create_vcpu_fd(struct kvm_vcpu *vcpu)
2990{
e46b4692
MY
2991 char name[8 + 1 + ITOA_MAX_LEN + 1];
2992
2993 snprintf(name, sizeof(name), "kvm-vcpu:%d", vcpu->vcpu_id);
2994 return anon_inode_getfd(name, &kvm_vcpu_fops, vcpu, O_RDWR | O_CLOEXEC);
bccf2150
AK
2995}
2996
3e7093d0 2997static void kvm_create_vcpu_debugfs(struct kvm_vcpu *vcpu)
45b5939e 2998{
741cbbae 2999#ifdef __KVM_HAVE_ARCH_VCPU_DEBUGFS
45b5939e 3000 char dir_name[ITOA_MAX_LEN * 2];
45b5939e 3001
45b5939e 3002 if (!debugfs_initialized())
3e7093d0 3003 return;
45b5939e
LC
3004
3005 snprintf(dir_name, sizeof(dir_name), "vcpu%d", vcpu->vcpu_id);
3006 vcpu->debugfs_dentry = debugfs_create_dir(dir_name,
3e7093d0 3007 vcpu->kvm->debugfs_dentry);
45b5939e 3008
3e7093d0 3009 kvm_arch_create_vcpu_debugfs(vcpu);
741cbbae 3010#endif
45b5939e
LC
3011}
3012
c5ea7660
AK
3013/*
3014 * Creates some virtual cpus. Good luck creating more than one.
3015 */
73880c80 3016static int kvm_vm_ioctl_create_vcpu(struct kvm *kvm, u32 id)
c5ea7660
AK
3017{
3018 int r;
e09fefde 3019 struct kvm_vcpu *vcpu;
8bd826d6 3020 struct page *page;
c5ea7660 3021
0b1b1dfd 3022 if (id >= KVM_MAX_VCPU_ID)
338c7dba
AH
3023 return -EINVAL;
3024
6c7caebc
PB
3025 mutex_lock(&kvm->lock);
3026 if (kvm->created_vcpus == KVM_MAX_VCPUS) {
3027 mutex_unlock(&kvm->lock);
3028 return -EINVAL;
3029 }
3030
3031 kvm->created_vcpus++;
3032 mutex_unlock(&kvm->lock);
3033
897cc38e
SC
3034 r = kvm_arch_vcpu_precreate(kvm, id);
3035 if (r)
3036 goto vcpu_decrement;
3037
e529ef66
SC
3038 vcpu = kmem_cache_zalloc(kvm_vcpu_cache, GFP_KERNEL);
3039 if (!vcpu) {
3040 r = -ENOMEM;
6c7caebc
PB
3041 goto vcpu_decrement;
3042 }
c5ea7660 3043
fcd97ad5 3044 BUILD_BUG_ON(sizeof(struct kvm_run) > PAGE_SIZE);
8bd826d6
SC
3045 page = alloc_page(GFP_KERNEL | __GFP_ZERO);
3046 if (!page) {
3047 r = -ENOMEM;
e529ef66 3048 goto vcpu_free;
8bd826d6
SC
3049 }
3050 vcpu->run = page_address(page);
3051
3052 kvm_vcpu_init(vcpu, kvm, id);
e529ef66
SC
3053
3054 r = kvm_arch_vcpu_create(vcpu);
3055 if (r)
8bd826d6 3056 goto vcpu_free_run_page;
e529ef66 3057
11ec2804 3058 mutex_lock(&kvm->lock);
e09fefde
DH
3059 if (kvm_get_vcpu_by_id(kvm, id)) {
3060 r = -EEXIST;
3061 goto unlock_vcpu_destroy;
3062 }
73880c80 3063
8750e72a
RK
3064 vcpu->vcpu_idx = atomic_read(&kvm->online_vcpus);
3065 BUG_ON(kvm->vcpus[vcpu->vcpu_idx]);
c5ea7660 3066
fb3f0f51 3067 /* Now it's all set up, let userspace reach it */
66c0b394 3068 kvm_get_kvm(kvm);
bccf2150 3069 r = create_vcpu_fd(vcpu);
73880c80 3070 if (r < 0) {
149487bd 3071 kvm_put_kvm_no_destroy(kvm);
d780592b 3072 goto unlock_vcpu_destroy;
73880c80
GN
3073 }
3074
8750e72a 3075 kvm->vcpus[vcpu->vcpu_idx] = vcpu;
dd489240
PB
3076
3077 /*
3078 * Pairs with smp_rmb() in kvm_get_vcpu. Write kvm->vcpus
3079 * before kvm->online_vcpu's incremented value.
3080 */
73880c80
GN
3081 smp_wmb();
3082 atomic_inc(&kvm->online_vcpus);
3083
73880c80 3084 mutex_unlock(&kvm->lock);
42897d86 3085 kvm_arch_vcpu_postcreate(vcpu);
63d04348 3086 kvm_create_vcpu_debugfs(vcpu);
fb3f0f51 3087 return r;
39c3b86e 3088
d780592b 3089unlock_vcpu_destroy:
7d8fece6 3090 mutex_unlock(&kvm->lock);
d40ccc62 3091 kvm_arch_vcpu_destroy(vcpu);
8bd826d6
SC
3092vcpu_free_run_page:
3093 free_page((unsigned long)vcpu->run);
e529ef66
SC
3094vcpu_free:
3095 kmem_cache_free(kvm_vcpu_cache, vcpu);
6c7caebc
PB
3096vcpu_decrement:
3097 mutex_lock(&kvm->lock);
3098 kvm->created_vcpus--;
3099 mutex_unlock(&kvm->lock);
c5ea7660
AK
3100 return r;
3101}
3102
1961d276
AK
3103static int kvm_vcpu_ioctl_set_sigmask(struct kvm_vcpu *vcpu, sigset_t *sigset)
3104{
3105 if (sigset) {
3106 sigdelsetmask(sigset, sigmask(SIGKILL)|sigmask(SIGSTOP));
3107 vcpu->sigset_active = 1;
3108 vcpu->sigset = *sigset;
3109 } else
3110 vcpu->sigset_active = 0;
3111 return 0;
3112}
3113
bccf2150
AK
3114static long kvm_vcpu_ioctl(struct file *filp,
3115 unsigned int ioctl, unsigned long arg)
6aa8b732 3116{
bccf2150 3117 struct kvm_vcpu *vcpu = filp->private_data;
2f366987 3118 void __user *argp = (void __user *)arg;
313a3dc7 3119 int r;
fa3795a7
DH
3120 struct kvm_fpu *fpu = NULL;
3121 struct kvm_sregs *kvm_sregs = NULL;
6aa8b732 3122
6d4e4c4f
AK
3123 if (vcpu->kvm->mm != current->mm)
3124 return -EIO;
2122ff5e 3125
2ea75be3
DM
3126 if (unlikely(_IOC_TYPE(ioctl) != KVMIO))
3127 return -EINVAL;
3128
2122ff5e 3129 /*
5cb0944c
PB
3130 * Some architectures have vcpu ioctls that are asynchronous to vcpu
3131 * execution; mutex_lock() would break them.
2122ff5e 3132 */
5cb0944c
PB
3133 r = kvm_arch_vcpu_async_ioctl(filp, ioctl, arg);
3134 if (r != -ENOIOCTLCMD)
9fc77441 3135 return r;
2122ff5e 3136
ec7660cc
CD
3137 if (mutex_lock_killable(&vcpu->mutex))
3138 return -EINTR;
6aa8b732 3139 switch (ioctl) {
0e4524a5
CB
3140 case KVM_RUN: {
3141 struct pid *oldpid;
f0fe5108
AK
3142 r = -EINVAL;
3143 if (arg)
3144 goto out;
0e4524a5 3145 oldpid = rcu_access_pointer(vcpu->pid);
71dbc8a9 3146 if (unlikely(oldpid != task_pid(current))) {
7a72f7a1 3147 /* The thread running this VCPU changed. */
bd2a6394 3148 struct pid *newpid;
f95ef0cd 3149
bd2a6394
CD
3150 r = kvm_arch_vcpu_run_pid_change(vcpu);
3151 if (r)
3152 break;
3153
3154 newpid = get_task_pid(current, PIDTYPE_PID);
7a72f7a1
CB
3155 rcu_assign_pointer(vcpu->pid, newpid);
3156 if (oldpid)
3157 synchronize_rcu();
3158 put_pid(oldpid);
3159 }
1b94f6f8 3160 r = kvm_arch_vcpu_ioctl_run(vcpu);
64be5007 3161 trace_kvm_userspace_exit(vcpu->run->exit_reason, r);
6aa8b732 3162 break;
0e4524a5 3163 }
6aa8b732 3164 case KVM_GET_REGS: {
3e4bb3ac 3165 struct kvm_regs *kvm_regs;
6aa8b732 3166
3e4bb3ac 3167 r = -ENOMEM;
b12ce36a 3168 kvm_regs = kzalloc(sizeof(struct kvm_regs), GFP_KERNEL_ACCOUNT);
3e4bb3ac 3169 if (!kvm_regs)
6aa8b732 3170 goto out;
3e4bb3ac
XZ
3171 r = kvm_arch_vcpu_ioctl_get_regs(vcpu, kvm_regs);
3172 if (r)
3173 goto out_free1;
6aa8b732 3174 r = -EFAULT;
3e4bb3ac
XZ
3175 if (copy_to_user(argp, kvm_regs, sizeof(struct kvm_regs)))
3176 goto out_free1;
6aa8b732 3177 r = 0;
3e4bb3ac
XZ
3178out_free1:
3179 kfree(kvm_regs);
6aa8b732
AK
3180 break;
3181 }
3182 case KVM_SET_REGS: {
3e4bb3ac 3183 struct kvm_regs *kvm_regs;
6aa8b732 3184
ff5c2c03
SL
3185 kvm_regs = memdup_user(argp, sizeof(*kvm_regs));
3186 if (IS_ERR(kvm_regs)) {
3187 r = PTR_ERR(kvm_regs);
6aa8b732 3188 goto out;
ff5c2c03 3189 }
3e4bb3ac 3190 r = kvm_arch_vcpu_ioctl_set_regs(vcpu, kvm_regs);
3e4bb3ac 3191 kfree(kvm_regs);
6aa8b732
AK
3192 break;
3193 }
3194 case KVM_GET_SREGS: {
b12ce36a
BG
3195 kvm_sregs = kzalloc(sizeof(struct kvm_sregs),
3196 GFP_KERNEL_ACCOUNT);
fa3795a7
DH
3197 r = -ENOMEM;
3198 if (!kvm_sregs)
3199 goto out;
3200 r = kvm_arch_vcpu_ioctl_get_sregs(vcpu, kvm_sregs);
6aa8b732
AK
3201 if (r)
3202 goto out;
3203 r = -EFAULT;
fa3795a7 3204 if (copy_to_user(argp, kvm_sregs, sizeof(struct kvm_sregs)))
6aa8b732
AK
3205 goto out;
3206 r = 0;
3207 break;
3208 }
3209 case KVM_SET_SREGS: {
ff5c2c03
SL
3210 kvm_sregs = memdup_user(argp, sizeof(*kvm_sregs));
3211 if (IS_ERR(kvm_sregs)) {
3212 r = PTR_ERR(kvm_sregs);
18595411 3213 kvm_sregs = NULL;
6aa8b732 3214 goto out;
ff5c2c03 3215 }
fa3795a7 3216 r = kvm_arch_vcpu_ioctl_set_sregs(vcpu, kvm_sregs);
6aa8b732
AK
3217 break;
3218 }
62d9f0db
MT
3219 case KVM_GET_MP_STATE: {
3220 struct kvm_mp_state mp_state;
3221
3222 r = kvm_arch_vcpu_ioctl_get_mpstate(vcpu, &mp_state);
3223 if (r)
3224 goto out;
3225 r = -EFAULT;
893bdbf1 3226 if (copy_to_user(argp, &mp_state, sizeof(mp_state)))
62d9f0db
MT
3227 goto out;
3228 r = 0;
3229 break;
3230 }
3231 case KVM_SET_MP_STATE: {
3232 struct kvm_mp_state mp_state;
3233
3234 r = -EFAULT;
893bdbf1 3235 if (copy_from_user(&mp_state, argp, sizeof(mp_state)))
62d9f0db
MT
3236 goto out;
3237 r = kvm_arch_vcpu_ioctl_set_mpstate(vcpu, &mp_state);
62d9f0db
MT
3238 break;
3239 }
6aa8b732
AK
3240 case KVM_TRANSLATE: {
3241 struct kvm_translation tr;
3242
3243 r = -EFAULT;
893bdbf1 3244 if (copy_from_user(&tr, argp, sizeof(tr)))
6aa8b732 3245 goto out;
8b006791 3246 r = kvm_arch_vcpu_ioctl_translate(vcpu, &tr);
6aa8b732
AK
3247 if (r)
3248 goto out;
3249 r = -EFAULT;
893bdbf1 3250 if (copy_to_user(argp, &tr, sizeof(tr)))
6aa8b732
AK
3251 goto out;
3252 r = 0;
3253 break;
3254 }
d0bfb940
JK
3255 case KVM_SET_GUEST_DEBUG: {
3256 struct kvm_guest_debug dbg;
6aa8b732
AK
3257
3258 r = -EFAULT;
893bdbf1 3259 if (copy_from_user(&dbg, argp, sizeof(dbg)))
6aa8b732 3260 goto out;
d0bfb940 3261 r = kvm_arch_vcpu_ioctl_set_guest_debug(vcpu, &dbg);
6aa8b732
AK
3262 break;
3263 }
1961d276
AK
3264 case KVM_SET_SIGNAL_MASK: {
3265 struct kvm_signal_mask __user *sigmask_arg = argp;
3266 struct kvm_signal_mask kvm_sigmask;
3267 sigset_t sigset, *p;
3268
3269 p = NULL;
3270 if (argp) {
3271 r = -EFAULT;
3272 if (copy_from_user(&kvm_sigmask, argp,
893bdbf1 3273 sizeof(kvm_sigmask)))
1961d276
AK
3274 goto out;
3275 r = -EINVAL;
893bdbf1 3276 if (kvm_sigmask.len != sizeof(sigset))
1961d276
AK
3277 goto out;
3278 r = -EFAULT;
3279 if (copy_from_user(&sigset, sigmask_arg->sigset,
893bdbf1 3280 sizeof(sigset)))
1961d276
AK
3281 goto out;
3282 p = &sigset;
3283 }
376d41ff 3284 r = kvm_vcpu_ioctl_set_sigmask(vcpu, p);
1961d276
AK
3285 break;
3286 }
b8836737 3287 case KVM_GET_FPU: {
b12ce36a 3288 fpu = kzalloc(sizeof(struct kvm_fpu), GFP_KERNEL_ACCOUNT);
fa3795a7
DH
3289 r = -ENOMEM;
3290 if (!fpu)
3291 goto out;
3292 r = kvm_arch_vcpu_ioctl_get_fpu(vcpu, fpu);
b8836737
AK
3293 if (r)
3294 goto out;
3295 r = -EFAULT;
fa3795a7 3296 if (copy_to_user(argp, fpu, sizeof(struct kvm_fpu)))
b8836737
AK
3297 goto out;
3298 r = 0;
3299 break;
3300 }
3301 case KVM_SET_FPU: {
ff5c2c03
SL
3302 fpu = memdup_user(argp, sizeof(*fpu));
3303 if (IS_ERR(fpu)) {
3304 r = PTR_ERR(fpu);
18595411 3305 fpu = NULL;
b8836737 3306 goto out;
ff5c2c03 3307 }
fa3795a7 3308 r = kvm_arch_vcpu_ioctl_set_fpu(vcpu, fpu);
b8836737
AK
3309 break;
3310 }
bccf2150 3311 default:
313a3dc7 3312 r = kvm_arch_vcpu_ioctl(filp, ioctl, arg);
bccf2150
AK
3313 }
3314out:
ec7660cc 3315 mutex_unlock(&vcpu->mutex);
fa3795a7
DH
3316 kfree(fpu);
3317 kfree(kvm_sregs);
bccf2150
AK
3318 return r;
3319}
3320
de8e5d74 3321#ifdef CONFIG_KVM_COMPAT
1dda606c
AG
3322static long kvm_vcpu_compat_ioctl(struct file *filp,
3323 unsigned int ioctl, unsigned long arg)
3324{
3325 struct kvm_vcpu *vcpu = filp->private_data;
3326 void __user *argp = compat_ptr(arg);
3327 int r;
3328
3329 if (vcpu->kvm->mm != current->mm)
3330 return -EIO;
3331
3332 switch (ioctl) {
3333 case KVM_SET_SIGNAL_MASK: {
3334 struct kvm_signal_mask __user *sigmask_arg = argp;
3335 struct kvm_signal_mask kvm_sigmask;
1dda606c
AG
3336 sigset_t sigset;
3337
3338 if (argp) {
3339 r = -EFAULT;
3340 if (copy_from_user(&kvm_sigmask, argp,
893bdbf1 3341 sizeof(kvm_sigmask)))
1dda606c
AG
3342 goto out;
3343 r = -EINVAL;
3968cf62 3344 if (kvm_sigmask.len != sizeof(compat_sigset_t))
1dda606c
AG
3345 goto out;
3346 r = -EFAULT;
3968cf62 3347 if (get_compat_sigset(&sigset, (void *)sigmask_arg->sigset))
1dda606c 3348 goto out;
760a9a30
AC
3349 r = kvm_vcpu_ioctl_set_sigmask(vcpu, &sigset);
3350 } else
3351 r = kvm_vcpu_ioctl_set_sigmask(vcpu, NULL);
1dda606c
AG
3352 break;
3353 }
3354 default:
3355 r = kvm_vcpu_ioctl(filp, ioctl, arg);
3356 }
3357
3358out:
3359 return r;
3360}
3361#endif
3362
a1cd3f08
CLG
3363static int kvm_device_mmap(struct file *filp, struct vm_area_struct *vma)
3364{
3365 struct kvm_device *dev = filp->private_data;
3366
3367 if (dev->ops->mmap)
3368 return dev->ops->mmap(dev, vma);
3369
3370 return -ENODEV;
3371}
3372
852b6d57
SW
3373static int kvm_device_ioctl_attr(struct kvm_device *dev,
3374 int (*accessor)(struct kvm_device *dev,
3375 struct kvm_device_attr *attr),
3376 unsigned long arg)
3377{
3378 struct kvm_device_attr attr;
3379
3380 if (!accessor)
3381 return -EPERM;
3382
3383 if (copy_from_user(&attr, (void __user *)arg, sizeof(attr)))
3384 return -EFAULT;
3385
3386 return accessor(dev, &attr);
3387}
3388
3389static long kvm_device_ioctl(struct file *filp, unsigned int ioctl,
3390 unsigned long arg)
3391{
3392 struct kvm_device *dev = filp->private_data;
3393
ddba9180
SC
3394 if (dev->kvm->mm != current->mm)
3395 return -EIO;
3396
852b6d57
SW
3397 switch (ioctl) {
3398 case KVM_SET_DEVICE_ATTR:
3399 return kvm_device_ioctl_attr(dev, dev->ops->set_attr, arg);
3400 case KVM_GET_DEVICE_ATTR:
3401 return kvm_device_ioctl_attr(dev, dev->ops->get_attr, arg);
3402 case KVM_HAS_DEVICE_ATTR:
3403 return kvm_device_ioctl_attr(dev, dev->ops->has_attr, arg);
3404 default:
3405 if (dev->ops->ioctl)
3406 return dev->ops->ioctl(dev, ioctl, arg);
3407
3408 return -ENOTTY;
3409 }
3410}
3411
852b6d57
SW
3412static int kvm_device_release(struct inode *inode, struct file *filp)
3413{
3414 struct kvm_device *dev = filp->private_data;
3415 struct kvm *kvm = dev->kvm;
3416
2bde9b3e
CLG
3417 if (dev->ops->release) {
3418 mutex_lock(&kvm->lock);
3419 list_del(&dev->vm_node);
3420 dev->ops->release(dev);
3421 mutex_unlock(&kvm->lock);
3422 }
3423
852b6d57
SW
3424 kvm_put_kvm(kvm);
3425 return 0;
3426}
3427
3428static const struct file_operations kvm_device_fops = {
3429 .unlocked_ioctl = kvm_device_ioctl,
3430 .release = kvm_device_release,
7ddfd3e0 3431 KVM_COMPAT(kvm_device_ioctl),
a1cd3f08 3432 .mmap = kvm_device_mmap,
852b6d57
SW
3433};
3434
3435struct kvm_device *kvm_device_from_filp(struct file *filp)
3436{
3437 if (filp->f_op != &kvm_device_fops)
3438 return NULL;
3439
3440 return filp->private_data;
3441}
3442
8538cb22 3443static const struct kvm_device_ops *kvm_device_ops_table[KVM_DEV_TYPE_MAX] = {
5df554ad 3444#ifdef CONFIG_KVM_MPIC
d60eacb0
WD
3445 [KVM_DEV_TYPE_FSL_MPIC_20] = &kvm_mpic_ops,
3446 [KVM_DEV_TYPE_FSL_MPIC_42] = &kvm_mpic_ops,
5975a2e0 3447#endif
d60eacb0
WD
3448};
3449
8538cb22 3450int kvm_register_device_ops(const struct kvm_device_ops *ops, u32 type)
d60eacb0
WD
3451{
3452 if (type >= ARRAY_SIZE(kvm_device_ops_table))
3453 return -ENOSPC;
3454
3455 if (kvm_device_ops_table[type] != NULL)
3456 return -EEXIST;
3457
3458 kvm_device_ops_table[type] = ops;
3459 return 0;
3460}
3461
571ee1b6
WL
3462void kvm_unregister_device_ops(u32 type)
3463{
3464 if (kvm_device_ops_table[type] != NULL)
3465 kvm_device_ops_table[type] = NULL;
3466}
3467
852b6d57
SW
3468static int kvm_ioctl_create_device(struct kvm *kvm,
3469 struct kvm_create_device *cd)
3470{
8538cb22 3471 const struct kvm_device_ops *ops = NULL;
852b6d57
SW
3472 struct kvm_device *dev;
3473 bool test = cd->flags & KVM_CREATE_DEVICE_TEST;
1d487e9b 3474 int type;
852b6d57
SW
3475 int ret;
3476
d60eacb0
WD
3477 if (cd->type >= ARRAY_SIZE(kvm_device_ops_table))
3478 return -ENODEV;
3479
1d487e9b
PB
3480 type = array_index_nospec(cd->type, ARRAY_SIZE(kvm_device_ops_table));
3481 ops = kvm_device_ops_table[type];
d60eacb0 3482 if (ops == NULL)
852b6d57 3483 return -ENODEV;
852b6d57
SW
3484
3485 if (test)
3486 return 0;
3487
b12ce36a 3488 dev = kzalloc(sizeof(*dev), GFP_KERNEL_ACCOUNT);
852b6d57
SW
3489 if (!dev)
3490 return -ENOMEM;
3491
3492 dev->ops = ops;
3493 dev->kvm = kvm;
852b6d57 3494
a28ebea2 3495 mutex_lock(&kvm->lock);
1d487e9b 3496 ret = ops->create(dev, type);
852b6d57 3497 if (ret < 0) {
a28ebea2 3498 mutex_unlock(&kvm->lock);
852b6d57
SW
3499 kfree(dev);
3500 return ret;
3501 }
a28ebea2
CD
3502 list_add(&dev->vm_node, &kvm->devices);
3503 mutex_unlock(&kvm->lock);
852b6d57 3504
023e9fdd
CD
3505 if (ops->init)
3506 ops->init(dev);
3507
cfa39381 3508 kvm_get_kvm(kvm);
24009b05 3509 ret = anon_inode_getfd(ops->name, &kvm_device_fops, dev, O_RDWR | O_CLOEXEC);
852b6d57 3510 if (ret < 0) {
149487bd 3511 kvm_put_kvm_no_destroy(kvm);
a28ebea2
CD
3512 mutex_lock(&kvm->lock);
3513 list_del(&dev->vm_node);
3514 mutex_unlock(&kvm->lock);
a0f1d21c 3515 ops->destroy(dev);
852b6d57
SW
3516 return ret;
3517 }
3518
852b6d57
SW
3519 cd->fd = ret;
3520 return 0;
3521}
3522
92b591a4
AG
3523static long kvm_vm_ioctl_check_extension_generic(struct kvm *kvm, long arg)
3524{
3525 switch (arg) {
3526 case KVM_CAP_USER_MEMORY:
3527 case KVM_CAP_DESTROY_MEMORY_REGION_WORKS:
3528 case KVM_CAP_JOIN_MEMORY_REGIONS_WORKS:
92b591a4
AG
3529 case KVM_CAP_INTERNAL_ERROR_DATA:
3530#ifdef CONFIG_HAVE_KVM_MSI
3531 case KVM_CAP_SIGNAL_MSI:
3532#endif
297e2105 3533#ifdef CONFIG_HAVE_KVM_IRQFD
dc9be0fa 3534 case KVM_CAP_IRQFD:
92b591a4
AG
3535 case KVM_CAP_IRQFD_RESAMPLE:
3536#endif
e9ea5069 3537 case KVM_CAP_IOEVENTFD_ANY_LENGTH:
92b591a4 3538 case KVM_CAP_CHECK_EXTENSION_VM:
e5d83c74 3539 case KVM_CAP_ENABLE_CAP_VM:
acd05785 3540 case KVM_CAP_HALT_POLL:
92b591a4 3541 return 1;
4b4357e0 3542#ifdef CONFIG_KVM_MMIO
30422558
PB
3543 case KVM_CAP_COALESCED_MMIO:
3544 return KVM_COALESCED_MMIO_PAGE_OFFSET;
0804c849
PH
3545 case KVM_CAP_COALESCED_PIO:
3546 return 1;
30422558 3547#endif
3c9bd400
JZ
3548#ifdef CONFIG_KVM_GENERIC_DIRTYLOG_READ_PROTECT
3549 case KVM_CAP_MANUAL_DIRTY_LOG_PROTECT2:
3550 return KVM_DIRTY_LOG_MANUAL_CAPS;
3551#endif
92b591a4
AG
3552#ifdef CONFIG_HAVE_KVM_IRQ_ROUTING
3553 case KVM_CAP_IRQ_ROUTING:
3554 return KVM_MAX_IRQ_ROUTES;
f481b069
PB
3555#endif
3556#if KVM_ADDRESS_SPACE_NUM > 1
3557 case KVM_CAP_MULTI_ADDRESS_SPACE:
3558 return KVM_ADDRESS_SPACE_NUM;
92b591a4 3559#endif
c110ae57
PB
3560 case KVM_CAP_NR_MEMSLOTS:
3561 return KVM_USER_MEM_SLOTS;
92b591a4
AG
3562 default:
3563 break;
3564 }
3565 return kvm_vm_ioctl_check_extension(kvm, arg);
3566}
3567
e5d83c74
PB
3568int __attribute__((weak)) kvm_vm_ioctl_enable_cap(struct kvm *kvm,
3569 struct kvm_enable_cap *cap)
3570{
3571 return -EINVAL;
3572}
3573
3574static int kvm_vm_ioctl_enable_cap_generic(struct kvm *kvm,
3575 struct kvm_enable_cap *cap)
3576{
3577 switch (cap->cap) {
2a31b9db 3578#ifdef CONFIG_KVM_GENERIC_DIRTYLOG_READ_PROTECT
3c9bd400
JZ
3579 case KVM_CAP_MANUAL_DIRTY_LOG_PROTECT2: {
3580 u64 allowed_options = KVM_DIRTY_LOG_MANUAL_PROTECT_ENABLE;
3581
3582 if (cap->args[0] & KVM_DIRTY_LOG_MANUAL_PROTECT_ENABLE)
3583 allowed_options = KVM_DIRTY_LOG_MANUAL_CAPS;
3584
3585 if (cap->flags || (cap->args[0] & ~allowed_options))
2a31b9db
PB
3586 return -EINVAL;
3587 kvm->manual_dirty_log_protect = cap->args[0];
3588 return 0;
3c9bd400 3589 }
2a31b9db 3590#endif
acd05785
DM
3591 case KVM_CAP_HALT_POLL: {
3592 if (cap->flags || cap->args[0] != (unsigned int)cap->args[0])
3593 return -EINVAL;
3594
3595 kvm->max_halt_poll_ns = cap->args[0];
3596 return 0;
3597 }
e5d83c74
PB
3598 default:
3599 return kvm_vm_ioctl_enable_cap(kvm, cap);
3600 }
3601}
3602
bccf2150
AK
3603static long kvm_vm_ioctl(struct file *filp,
3604 unsigned int ioctl, unsigned long arg)
3605{
3606 struct kvm *kvm = filp->private_data;
3607 void __user *argp = (void __user *)arg;
1fe779f8 3608 int r;
bccf2150 3609
6d4e4c4f
AK
3610 if (kvm->mm != current->mm)
3611 return -EIO;
bccf2150
AK
3612 switch (ioctl) {
3613 case KVM_CREATE_VCPU:
3614 r = kvm_vm_ioctl_create_vcpu(kvm, arg);
bccf2150 3615 break;
e5d83c74
PB
3616 case KVM_ENABLE_CAP: {
3617 struct kvm_enable_cap cap;
3618
3619 r = -EFAULT;
3620 if (copy_from_user(&cap, argp, sizeof(cap)))
3621 goto out;
3622 r = kvm_vm_ioctl_enable_cap_generic(kvm, &cap);
3623 break;
3624 }
6fc138d2
IE
3625 case KVM_SET_USER_MEMORY_REGION: {
3626 struct kvm_userspace_memory_region kvm_userspace_mem;
3627
3628 r = -EFAULT;
3629 if (copy_from_user(&kvm_userspace_mem, argp,
893bdbf1 3630 sizeof(kvm_userspace_mem)))
6fc138d2
IE
3631 goto out;
3632
47ae31e2 3633 r = kvm_vm_ioctl_set_memory_region(kvm, &kvm_userspace_mem);
6aa8b732
AK
3634 break;
3635 }
3636 case KVM_GET_DIRTY_LOG: {
3637 struct kvm_dirty_log log;
3638
3639 r = -EFAULT;
893bdbf1 3640 if (copy_from_user(&log, argp, sizeof(log)))
6aa8b732 3641 goto out;
2c6f5df9 3642 r = kvm_vm_ioctl_get_dirty_log(kvm, &log);
6aa8b732
AK
3643 break;
3644 }
2a31b9db
PB
3645#ifdef CONFIG_KVM_GENERIC_DIRTYLOG_READ_PROTECT
3646 case KVM_CLEAR_DIRTY_LOG: {
3647 struct kvm_clear_dirty_log log;
3648
3649 r = -EFAULT;
3650 if (copy_from_user(&log, argp, sizeof(log)))
3651 goto out;
3652 r = kvm_vm_ioctl_clear_dirty_log(kvm, &log);
3653 break;
3654 }
3655#endif
4b4357e0 3656#ifdef CONFIG_KVM_MMIO
5f94c174
LV
3657 case KVM_REGISTER_COALESCED_MMIO: {
3658 struct kvm_coalesced_mmio_zone zone;
f95ef0cd 3659
5f94c174 3660 r = -EFAULT;
893bdbf1 3661 if (copy_from_user(&zone, argp, sizeof(zone)))
5f94c174 3662 goto out;
5f94c174 3663 r = kvm_vm_ioctl_register_coalesced_mmio(kvm, &zone);
5f94c174
LV
3664 break;
3665 }
3666 case KVM_UNREGISTER_COALESCED_MMIO: {
3667 struct kvm_coalesced_mmio_zone zone;
f95ef0cd 3668
5f94c174 3669 r = -EFAULT;
893bdbf1 3670 if (copy_from_user(&zone, argp, sizeof(zone)))
5f94c174 3671 goto out;
5f94c174 3672 r = kvm_vm_ioctl_unregister_coalesced_mmio(kvm, &zone);
5f94c174
LV
3673 break;
3674 }
3675#endif
721eecbf
GH
3676 case KVM_IRQFD: {
3677 struct kvm_irqfd data;
3678
3679 r = -EFAULT;
893bdbf1 3680 if (copy_from_user(&data, argp, sizeof(data)))
721eecbf 3681 goto out;
d4db2935 3682 r = kvm_irqfd(kvm, &data);
721eecbf
GH
3683 break;
3684 }
d34e6b17
GH
3685 case KVM_IOEVENTFD: {
3686 struct kvm_ioeventfd data;
3687
3688 r = -EFAULT;
893bdbf1 3689 if (copy_from_user(&data, argp, sizeof(data)))
d34e6b17
GH
3690 goto out;
3691 r = kvm_ioeventfd(kvm, &data);
3692 break;
3693 }
07975ad3
JK
3694#ifdef CONFIG_HAVE_KVM_MSI
3695 case KVM_SIGNAL_MSI: {
3696 struct kvm_msi msi;
3697
3698 r = -EFAULT;
893bdbf1 3699 if (copy_from_user(&msi, argp, sizeof(msi)))
07975ad3
JK
3700 goto out;
3701 r = kvm_send_userspace_msi(kvm, &msi);
3702 break;
3703 }
23d43cf9
CD
3704#endif
3705#ifdef __KVM_HAVE_IRQ_LINE
3706 case KVM_IRQ_LINE_STATUS:
3707 case KVM_IRQ_LINE: {
3708 struct kvm_irq_level irq_event;
3709
3710 r = -EFAULT;
893bdbf1 3711 if (copy_from_user(&irq_event, argp, sizeof(irq_event)))
23d43cf9
CD
3712 goto out;
3713
aa2fbe6d
YZ
3714 r = kvm_vm_ioctl_irq_line(kvm, &irq_event,
3715 ioctl == KVM_IRQ_LINE_STATUS);
23d43cf9
CD
3716 if (r)
3717 goto out;
3718
3719 r = -EFAULT;
3720 if (ioctl == KVM_IRQ_LINE_STATUS) {
893bdbf1 3721 if (copy_to_user(argp, &irq_event, sizeof(irq_event)))
23d43cf9
CD
3722 goto out;
3723 }
3724
3725 r = 0;
3726 break;
3727 }
73880c80 3728#endif
aa8d5944
AG
3729#ifdef CONFIG_HAVE_KVM_IRQ_ROUTING
3730 case KVM_SET_GSI_ROUTING: {
3731 struct kvm_irq_routing routing;
3732 struct kvm_irq_routing __user *urouting;
f8c1b85b 3733 struct kvm_irq_routing_entry *entries = NULL;
aa8d5944
AG
3734
3735 r = -EFAULT;
3736 if (copy_from_user(&routing, argp, sizeof(routing)))
3737 goto out;
3738 r = -EINVAL;
5c0aea0e
DH
3739 if (!kvm_arch_can_set_irq_routing(kvm))
3740 goto out;
caf1ff26 3741 if (routing.nr > KVM_MAX_IRQ_ROUTES)
aa8d5944
AG
3742 goto out;
3743 if (routing.flags)
3744 goto out;
f8c1b85b
PB
3745 if (routing.nr) {
3746 r = -ENOMEM;
42bc47b3
KC
3747 entries = vmalloc(array_size(sizeof(*entries),
3748 routing.nr));
f8c1b85b
PB
3749 if (!entries)
3750 goto out;
3751 r = -EFAULT;
3752 urouting = argp;
3753 if (copy_from_user(entries, urouting->entries,
3754 routing.nr * sizeof(*entries)))
3755 goto out_free_irq_routing;
3756 }
aa8d5944
AG
3757 r = kvm_set_irq_routing(kvm, entries, routing.nr,
3758 routing.flags);
a642a175 3759out_free_irq_routing:
aa8d5944
AG
3760 vfree(entries);
3761 break;
3762 }
3763#endif /* CONFIG_HAVE_KVM_IRQ_ROUTING */
852b6d57
SW
3764 case KVM_CREATE_DEVICE: {
3765 struct kvm_create_device cd;
3766
3767 r = -EFAULT;
3768 if (copy_from_user(&cd, argp, sizeof(cd)))
3769 goto out;
3770
3771 r = kvm_ioctl_create_device(kvm, &cd);
3772 if (r)
3773 goto out;
3774
3775 r = -EFAULT;
3776 if (copy_to_user(argp, &cd, sizeof(cd)))
3777 goto out;
3778
3779 r = 0;
3780 break;
3781 }
92b591a4
AG
3782 case KVM_CHECK_EXTENSION:
3783 r = kvm_vm_ioctl_check_extension_generic(kvm, arg);
3784 break;
f17abe9a 3785 default:
1fe779f8 3786 r = kvm_arch_vm_ioctl(filp, ioctl, arg);
f17abe9a
AK
3787 }
3788out:
3789 return r;
3790}
3791
de8e5d74 3792#ifdef CONFIG_KVM_COMPAT
6ff5894c
AB
3793struct compat_kvm_dirty_log {
3794 __u32 slot;
3795 __u32 padding1;
3796 union {
3797 compat_uptr_t dirty_bitmap; /* one bit per page */
3798 __u64 padding2;
3799 };
3800};
3801
3802static long kvm_vm_compat_ioctl(struct file *filp,
3803 unsigned int ioctl, unsigned long arg)
3804{
3805 struct kvm *kvm = filp->private_data;
3806 int r;
3807
3808 if (kvm->mm != current->mm)
3809 return -EIO;
3810 switch (ioctl) {
3811 case KVM_GET_DIRTY_LOG: {
3812 struct compat_kvm_dirty_log compat_log;
3813 struct kvm_dirty_log log;
3814
6ff5894c
AB
3815 if (copy_from_user(&compat_log, (void __user *)arg,
3816 sizeof(compat_log)))
f6a3b168 3817 return -EFAULT;
6ff5894c
AB
3818 log.slot = compat_log.slot;
3819 log.padding1 = compat_log.padding1;
3820 log.padding2 = compat_log.padding2;
3821 log.dirty_bitmap = compat_ptr(compat_log.dirty_bitmap);
3822
3823 r = kvm_vm_ioctl_get_dirty_log(kvm, &log);
6ff5894c
AB
3824 break;
3825 }
3826 default:
3827 r = kvm_vm_ioctl(filp, ioctl, arg);
3828 }
6ff5894c
AB
3829 return r;
3830}
3831#endif
3832
3d3aab1b 3833static struct file_operations kvm_vm_fops = {
f17abe9a
AK
3834 .release = kvm_vm_release,
3835 .unlocked_ioctl = kvm_vm_ioctl,
6038f373 3836 .llseek = noop_llseek,
7ddfd3e0 3837 KVM_COMPAT(kvm_vm_compat_ioctl),
f17abe9a
AK
3838};
3839
e08b9637 3840static int kvm_dev_ioctl_create_vm(unsigned long type)
f17abe9a 3841{
aac87636 3842 int r;
f17abe9a 3843 struct kvm *kvm;
506cfba9 3844 struct file *file;
f17abe9a 3845
e08b9637 3846 kvm = kvm_create_vm(type);
d6d28168
AK
3847 if (IS_ERR(kvm))
3848 return PTR_ERR(kvm);
4b4357e0 3849#ifdef CONFIG_KVM_MMIO
6ce5a090 3850 r = kvm_coalesced_mmio_init(kvm);
78588335
ME
3851 if (r < 0)
3852 goto put_kvm;
6ce5a090 3853#endif
506cfba9 3854 r = get_unused_fd_flags(O_CLOEXEC);
78588335
ME
3855 if (r < 0)
3856 goto put_kvm;
3857
506cfba9
AV
3858 file = anon_inode_getfile("kvm-vm", &kvm_vm_fops, kvm, O_RDWR);
3859 if (IS_ERR(file)) {
3860 put_unused_fd(r);
78588335
ME
3861 r = PTR_ERR(file);
3862 goto put_kvm;
506cfba9 3863 }
536a6f88 3864
525df861
PB
3865 /*
3866 * Don't call kvm_put_kvm anymore at this point; file->f_op is
3867 * already set, with ->release() being kvm_vm_release(). In error
3868 * cases it will be called by the final fput(file) and will take
3869 * care of doing kvm_put_kvm(kvm).
3870 */
536a6f88 3871 if (kvm_create_vm_debugfs(kvm, r) < 0) {
506cfba9
AV
3872 put_unused_fd(r);
3873 fput(file);
536a6f88
JF
3874 return -ENOMEM;
3875 }
286de8f6 3876 kvm_uevent_notify_change(KVM_EVENT_CREATE_VM, kvm);
f17abe9a 3877
506cfba9 3878 fd_install(r, file);
aac87636 3879 return r;
78588335
ME
3880
3881put_kvm:
3882 kvm_put_kvm(kvm);
3883 return r;
f17abe9a
AK
3884}
3885
3886static long kvm_dev_ioctl(struct file *filp,
3887 unsigned int ioctl, unsigned long arg)
3888{
07c45a36 3889 long r = -EINVAL;
f17abe9a
AK
3890
3891 switch (ioctl) {
3892 case KVM_GET_API_VERSION:
f0fe5108
AK
3893 if (arg)
3894 goto out;
f17abe9a
AK
3895 r = KVM_API_VERSION;
3896 break;
3897 case KVM_CREATE_VM:
e08b9637 3898 r = kvm_dev_ioctl_create_vm(arg);
f17abe9a 3899 break;
018d00d2 3900 case KVM_CHECK_EXTENSION:
784aa3d7 3901 r = kvm_vm_ioctl_check_extension_generic(NULL, arg);
5d308f45 3902 break;
07c45a36 3903 case KVM_GET_VCPU_MMAP_SIZE:
07c45a36
AK
3904 if (arg)
3905 goto out;
adb1ff46
AK
3906 r = PAGE_SIZE; /* struct kvm_run */
3907#ifdef CONFIG_X86
3908 r += PAGE_SIZE; /* pio data page */
5f94c174 3909#endif
4b4357e0 3910#ifdef CONFIG_KVM_MMIO
5f94c174 3911 r += PAGE_SIZE; /* coalesced mmio ring page */
adb1ff46 3912#endif
07c45a36 3913 break;
d4c9ff2d
FEL
3914 case KVM_TRACE_ENABLE:
3915 case KVM_TRACE_PAUSE:
3916 case KVM_TRACE_DISABLE:
2023a29c 3917 r = -EOPNOTSUPP;
d4c9ff2d 3918 break;
6aa8b732 3919 default:
043405e1 3920 return kvm_arch_dev_ioctl(filp, ioctl, arg);
6aa8b732
AK
3921 }
3922out:
3923 return r;
3924}
3925
6aa8b732 3926static struct file_operations kvm_chardev_ops = {
6aa8b732 3927 .unlocked_ioctl = kvm_dev_ioctl,
6038f373 3928 .llseek = noop_llseek,
7ddfd3e0 3929 KVM_COMPAT(kvm_dev_ioctl),
6aa8b732
AK
3930};
3931
3932static struct miscdevice kvm_dev = {
bbe4432e 3933 KVM_MINOR,
6aa8b732
AK
3934 "kvm",
3935 &kvm_chardev_ops,
3936};
3937
75b7127c 3938static void hardware_enable_nolock(void *junk)
1b6c0168
AK
3939{
3940 int cpu = raw_smp_processor_id();
10474ae8 3941 int r;
1b6c0168 3942
7f59f492 3943 if (cpumask_test_cpu(cpu, cpus_hardware_enabled))
1b6c0168 3944 return;
10474ae8 3945
7f59f492 3946 cpumask_set_cpu(cpu, cpus_hardware_enabled);
10474ae8 3947
13a34e06 3948 r = kvm_arch_hardware_enable();
10474ae8
AG
3949
3950 if (r) {
3951 cpumask_clear_cpu(cpu, cpus_hardware_enabled);
3952 atomic_inc(&hardware_enable_failed);
1170adc6 3953 pr_info("kvm: enabling virtualization on CPU%d failed\n", cpu);
10474ae8 3954 }
1b6c0168
AK
3955}
3956
8c18b2d2 3957static int kvm_starting_cpu(unsigned int cpu)
75b7127c 3958{
4a937f96 3959 raw_spin_lock(&kvm_count_lock);
4fa92fb2
PB
3960 if (kvm_usage_count)
3961 hardware_enable_nolock(NULL);
4a937f96 3962 raw_spin_unlock(&kvm_count_lock);
8c18b2d2 3963 return 0;
75b7127c
TY
3964}
3965
3966static void hardware_disable_nolock(void *junk)
1b6c0168
AK
3967{
3968 int cpu = raw_smp_processor_id();
3969
7f59f492 3970 if (!cpumask_test_cpu(cpu, cpus_hardware_enabled))
1b6c0168 3971 return;
7f59f492 3972 cpumask_clear_cpu(cpu, cpus_hardware_enabled);
13a34e06 3973 kvm_arch_hardware_disable();
1b6c0168
AK
3974}
3975
8c18b2d2 3976static int kvm_dying_cpu(unsigned int cpu)
75b7127c 3977{
4a937f96 3978 raw_spin_lock(&kvm_count_lock);
4fa92fb2
PB
3979 if (kvm_usage_count)
3980 hardware_disable_nolock(NULL);
4a937f96 3981 raw_spin_unlock(&kvm_count_lock);
8c18b2d2 3982 return 0;
75b7127c
TY
3983}
3984
10474ae8
AG
3985static void hardware_disable_all_nolock(void)
3986{
3987 BUG_ON(!kvm_usage_count);
3988
3989 kvm_usage_count--;
3990 if (!kvm_usage_count)
75b7127c 3991 on_each_cpu(hardware_disable_nolock, NULL, 1);
10474ae8
AG
3992}
3993
3994static void hardware_disable_all(void)
3995{
4a937f96 3996 raw_spin_lock(&kvm_count_lock);
10474ae8 3997 hardware_disable_all_nolock();
4a937f96 3998 raw_spin_unlock(&kvm_count_lock);
10474ae8
AG
3999}
4000
4001static int hardware_enable_all(void)
4002{
4003 int r = 0;
4004
4a937f96 4005 raw_spin_lock(&kvm_count_lock);
10474ae8
AG
4006
4007 kvm_usage_count++;
4008 if (kvm_usage_count == 1) {
4009 atomic_set(&hardware_enable_failed, 0);
75b7127c 4010 on_each_cpu(hardware_enable_nolock, NULL, 1);
10474ae8
AG
4011
4012 if (atomic_read(&hardware_enable_failed)) {
4013 hardware_disable_all_nolock();
4014 r = -EBUSY;
4015 }
4016 }
4017
4a937f96 4018 raw_spin_unlock(&kvm_count_lock);
10474ae8
AG
4019
4020 return r;
4021}
4022
9a2b85c6 4023static int kvm_reboot(struct notifier_block *notifier, unsigned long val,
d77c26fc 4024 void *v)
9a2b85c6 4025{
8e1c1815
SY
4026 /*
4027 * Some (well, at least mine) BIOSes hang on reboot if
4028 * in vmx root mode.
4029 *
4030 * And Intel TXT required VMX off for all cpu when system shutdown.
4031 */
1170adc6 4032 pr_info("kvm: exiting hardware virtualization\n");
8e1c1815 4033 kvm_rebooting = true;
75b7127c 4034 on_each_cpu(hardware_disable_nolock, NULL, 1);
9a2b85c6
RR
4035 return NOTIFY_OK;
4036}
4037
4038static struct notifier_block kvm_reboot_notifier = {
4039 .notifier_call = kvm_reboot,
4040 .priority = 0,
4041};
4042
e93f8a0f 4043static void kvm_io_bus_destroy(struct kvm_io_bus *bus)
2eeb2e94
GH
4044{
4045 int i;
4046
4047 for (i = 0; i < bus->dev_count; i++) {
743eeb0b 4048 struct kvm_io_device *pos = bus->range[i].dev;
2eeb2e94
GH
4049
4050 kvm_iodevice_destructor(pos);
4051 }
e93f8a0f 4052 kfree(bus);
2eeb2e94
GH
4053}
4054
c21fbff1 4055static inline int kvm_io_bus_cmp(const struct kvm_io_range *r1,
20e87b72 4056 const struct kvm_io_range *r2)
743eeb0b 4057{
8f4216c7
JW
4058 gpa_t addr1 = r1->addr;
4059 gpa_t addr2 = r2->addr;
4060
4061 if (addr1 < addr2)
743eeb0b 4062 return -1;
8f4216c7
JW
4063
4064 /* If r2->len == 0, match the exact address. If r2->len != 0,
4065 * accept any overlapping write. Any order is acceptable for
4066 * overlapping ranges, because kvm_io_bus_get_first_dev ensures
4067 * we process all of them.
4068 */
4069 if (r2->len) {
4070 addr1 += r1->len;
4071 addr2 += r2->len;
4072 }
4073
4074 if (addr1 > addr2)
743eeb0b 4075 return 1;
8f4216c7 4076
743eeb0b
SL
4077 return 0;
4078}
4079
a343c9b7
PB
4080static int kvm_io_bus_sort_cmp(const void *p1, const void *p2)
4081{
c21fbff1 4082 return kvm_io_bus_cmp(p1, p2);
a343c9b7
PB
4083}
4084
39369f7a 4085static int kvm_io_bus_get_first_dev(struct kvm_io_bus *bus,
743eeb0b
SL
4086 gpa_t addr, int len)
4087{
4088 struct kvm_io_range *range, key;
4089 int off;
4090
4091 key = (struct kvm_io_range) {
4092 .addr = addr,
4093 .len = len,
4094 };
4095
4096 range = bsearch(&key, bus->range, bus->dev_count,
4097 sizeof(struct kvm_io_range), kvm_io_bus_sort_cmp);
4098 if (range == NULL)
4099 return -ENOENT;
4100
4101 off = range - bus->range;
4102
c21fbff1 4103 while (off > 0 && kvm_io_bus_cmp(&key, &bus->range[off-1]) == 0)
743eeb0b
SL
4104 off--;
4105
4106 return off;
4107}
4108
e32edf4f 4109static int __kvm_io_bus_write(struct kvm_vcpu *vcpu, struct kvm_io_bus *bus,
126a5af5
CH
4110 struct kvm_io_range *range, const void *val)
4111{
4112 int idx;
4113
4114 idx = kvm_io_bus_get_first_dev(bus, range->addr, range->len);
4115 if (idx < 0)
4116 return -EOPNOTSUPP;
4117
4118 while (idx < bus->dev_count &&
c21fbff1 4119 kvm_io_bus_cmp(range, &bus->range[idx]) == 0) {
e32edf4f 4120 if (!kvm_iodevice_write(vcpu, bus->range[idx].dev, range->addr,
126a5af5
CH
4121 range->len, val))
4122 return idx;
4123 idx++;
4124 }
4125
4126 return -EOPNOTSUPP;
4127}
4128
bda9020e 4129/* kvm_io_bus_write - called under kvm->slots_lock */
e32edf4f 4130int kvm_io_bus_write(struct kvm_vcpu *vcpu, enum kvm_bus bus_idx, gpa_t addr,
bda9020e 4131 int len, const void *val)
2eeb2e94 4132{
90d83dc3 4133 struct kvm_io_bus *bus;
743eeb0b 4134 struct kvm_io_range range;
126a5af5 4135 int r;
743eeb0b
SL
4136
4137 range = (struct kvm_io_range) {
4138 .addr = addr,
4139 .len = len,
4140 };
90d83dc3 4141
e32edf4f 4142 bus = srcu_dereference(vcpu->kvm->buses[bus_idx], &vcpu->kvm->srcu);
90db1043
DH
4143 if (!bus)
4144 return -ENOMEM;
e32edf4f 4145 r = __kvm_io_bus_write(vcpu, bus, &range, val);
126a5af5
CH
4146 return r < 0 ? r : 0;
4147}
a2420107 4148EXPORT_SYMBOL_GPL(kvm_io_bus_write);
126a5af5
CH
4149
4150/* kvm_io_bus_write_cookie - called under kvm->slots_lock */
e32edf4f
NN
4151int kvm_io_bus_write_cookie(struct kvm_vcpu *vcpu, enum kvm_bus bus_idx,
4152 gpa_t addr, int len, const void *val, long cookie)
126a5af5
CH
4153{
4154 struct kvm_io_bus *bus;
4155 struct kvm_io_range range;
4156
4157 range = (struct kvm_io_range) {
4158 .addr = addr,
4159 .len = len,
4160 };
4161
e32edf4f 4162 bus = srcu_dereference(vcpu->kvm->buses[bus_idx], &vcpu->kvm->srcu);
90db1043
DH
4163 if (!bus)
4164 return -ENOMEM;
126a5af5
CH
4165
4166 /* First try the device referenced by cookie. */
4167 if ((cookie >= 0) && (cookie < bus->dev_count) &&
c21fbff1 4168 (kvm_io_bus_cmp(&range, &bus->range[cookie]) == 0))
e32edf4f 4169 if (!kvm_iodevice_write(vcpu, bus->range[cookie].dev, addr, len,
126a5af5
CH
4170 val))
4171 return cookie;
4172
4173 /*
4174 * cookie contained garbage; fall back to search and return the
4175 * correct cookie value.
4176 */
e32edf4f 4177 return __kvm_io_bus_write(vcpu, bus, &range, val);
126a5af5
CH
4178}
4179
e32edf4f
NN
4180static int __kvm_io_bus_read(struct kvm_vcpu *vcpu, struct kvm_io_bus *bus,
4181 struct kvm_io_range *range, void *val)
126a5af5
CH
4182{
4183 int idx;
4184
4185 idx = kvm_io_bus_get_first_dev(bus, range->addr, range->len);
743eeb0b
SL
4186 if (idx < 0)
4187 return -EOPNOTSUPP;
4188
4189 while (idx < bus->dev_count &&
c21fbff1 4190 kvm_io_bus_cmp(range, &bus->range[idx]) == 0) {
e32edf4f 4191 if (!kvm_iodevice_read(vcpu, bus->range[idx].dev, range->addr,
126a5af5
CH
4192 range->len, val))
4193 return idx;
743eeb0b
SL
4194 idx++;
4195 }
4196
bda9020e
MT
4197 return -EOPNOTSUPP;
4198}
2eeb2e94 4199
bda9020e 4200/* kvm_io_bus_read - called under kvm->slots_lock */
e32edf4f 4201int kvm_io_bus_read(struct kvm_vcpu *vcpu, enum kvm_bus bus_idx, gpa_t addr,
e93f8a0f 4202 int len, void *val)
bda9020e 4203{
90d83dc3 4204 struct kvm_io_bus *bus;
743eeb0b 4205 struct kvm_io_range range;
126a5af5 4206 int r;
743eeb0b
SL
4207
4208 range = (struct kvm_io_range) {
4209 .addr = addr,
4210 .len = len,
4211 };
e93f8a0f 4212
e32edf4f 4213 bus = srcu_dereference(vcpu->kvm->buses[bus_idx], &vcpu->kvm->srcu);
90db1043
DH
4214 if (!bus)
4215 return -ENOMEM;
e32edf4f 4216 r = __kvm_io_bus_read(vcpu, bus, &range, val);
126a5af5
CH
4217 return r < 0 ? r : 0;
4218}
743eeb0b 4219
79fac95e 4220/* Caller must hold slots_lock. */
743eeb0b
SL
4221int kvm_io_bus_register_dev(struct kvm *kvm, enum kvm_bus bus_idx, gpa_t addr,
4222 int len, struct kvm_io_device *dev)
6c474694 4223{
d4c67a7a 4224 int i;
e93f8a0f 4225 struct kvm_io_bus *new_bus, *bus;
d4c67a7a 4226 struct kvm_io_range range;
090b7aff 4227
4a12f951 4228 bus = kvm_get_bus(kvm, bus_idx);
90db1043
DH
4229 if (!bus)
4230 return -ENOMEM;
4231
6ea34c9b
AK
4232 /* exclude ioeventfd which is limited by maximum fd */
4233 if (bus->dev_count - bus->ioeventfd_count > NR_IOBUS_DEVS - 1)
090b7aff 4234 return -ENOSPC;
2eeb2e94 4235
90952cd3 4236 new_bus = kmalloc(struct_size(bus, range, bus->dev_count + 1),
b12ce36a 4237 GFP_KERNEL_ACCOUNT);
e93f8a0f
MT
4238 if (!new_bus)
4239 return -ENOMEM;
d4c67a7a
GH
4240
4241 range = (struct kvm_io_range) {
4242 .addr = addr,
4243 .len = len,
4244 .dev = dev,
4245 };
4246
4247 for (i = 0; i < bus->dev_count; i++)
4248 if (kvm_io_bus_cmp(&bus->range[i], &range) > 0)
4249 break;
4250
4251 memcpy(new_bus, bus, sizeof(*bus) + i * sizeof(struct kvm_io_range));
4252 new_bus->dev_count++;
4253 new_bus->range[i] = range;
4254 memcpy(new_bus->range + i + 1, bus->range + i,
4255 (bus->dev_count - i) * sizeof(struct kvm_io_range));
e93f8a0f
MT
4256 rcu_assign_pointer(kvm->buses[bus_idx], new_bus);
4257 synchronize_srcu_expedited(&kvm->srcu);
4258 kfree(bus);
090b7aff
GH
4259
4260 return 0;
4261}
4262
79fac95e 4263/* Caller must hold slots_lock. */
90db1043
DH
4264void kvm_io_bus_unregister_dev(struct kvm *kvm, enum kvm_bus bus_idx,
4265 struct kvm_io_device *dev)
090b7aff 4266{
90db1043 4267 int i;
e93f8a0f 4268 struct kvm_io_bus *new_bus, *bus;
090b7aff 4269
4a12f951 4270 bus = kvm_get_bus(kvm, bus_idx);
df630b8c 4271 if (!bus)
90db1043 4272 return;
df630b8c 4273
a1300716
AK
4274 for (i = 0; i < bus->dev_count; i++)
4275 if (bus->range[i].dev == dev) {
090b7aff
GH
4276 break;
4277 }
e93f8a0f 4278
90db1043
DH
4279 if (i == bus->dev_count)
4280 return;
a1300716 4281
90952cd3 4282 new_bus = kmalloc(struct_size(bus, range, bus->dev_count - 1),
b12ce36a 4283 GFP_KERNEL_ACCOUNT);
90db1043
DH
4284 if (!new_bus) {
4285 pr_err("kvm: failed to shrink bus, removing it completely\n");
4286 goto broken;
4287 }
a1300716
AK
4288
4289 memcpy(new_bus, bus, sizeof(*bus) + i * sizeof(struct kvm_io_range));
4290 new_bus->dev_count--;
4291 memcpy(new_bus->range + i, bus->range + i + 1,
4292 (new_bus->dev_count - i) * sizeof(struct kvm_io_range));
e93f8a0f 4293
90db1043 4294broken:
e93f8a0f
MT
4295 rcu_assign_pointer(kvm->buses[bus_idx], new_bus);
4296 synchronize_srcu_expedited(&kvm->srcu);
4297 kfree(bus);
90db1043 4298 return;
2eeb2e94
GH
4299}
4300
8a39d006
AP
4301struct kvm_io_device *kvm_io_bus_get_dev(struct kvm *kvm, enum kvm_bus bus_idx,
4302 gpa_t addr)
4303{
4304 struct kvm_io_bus *bus;
4305 int dev_idx, srcu_idx;
4306 struct kvm_io_device *iodev = NULL;
4307
4308 srcu_idx = srcu_read_lock(&kvm->srcu);
4309
4310 bus = srcu_dereference(kvm->buses[bus_idx], &kvm->srcu);
90db1043
DH
4311 if (!bus)
4312 goto out_unlock;
8a39d006
AP
4313
4314 dev_idx = kvm_io_bus_get_first_dev(bus, addr, 1);
4315 if (dev_idx < 0)
4316 goto out_unlock;
4317
4318 iodev = bus->range[dev_idx].dev;
4319
4320out_unlock:
4321 srcu_read_unlock(&kvm->srcu, srcu_idx);
4322
4323 return iodev;
4324}
4325EXPORT_SYMBOL_GPL(kvm_io_bus_get_dev);
4326
536a6f88
JF
4327static int kvm_debugfs_open(struct inode *inode, struct file *file,
4328 int (*get)(void *, u64 *), int (*set)(void *, u64),
4329 const char *fmt)
4330{
4331 struct kvm_stat_data *stat_data = (struct kvm_stat_data *)
4332 inode->i_private;
4333
4334 /* The debugfs files are a reference to the kvm struct which
4335 * is still valid when kvm_destroy_vm is called.
4336 * To avoid the race between open and the removal of the debugfs
4337 * directory we test against the users count.
4338 */
e3736c3e 4339 if (!refcount_inc_not_zero(&stat_data->kvm->users_count))
536a6f88
JF
4340 return -ENOENT;
4341
833b45de 4342 if (simple_attr_open(inode, file, get,
09cbcef6
MP
4343 KVM_DBGFS_GET_MODE(stat_data->dbgfs_item) & 0222
4344 ? set : NULL,
4345 fmt)) {
536a6f88
JF
4346 kvm_put_kvm(stat_data->kvm);
4347 return -ENOMEM;
4348 }
4349
4350 return 0;
4351}
4352
4353static int kvm_debugfs_release(struct inode *inode, struct file *file)
4354{
4355 struct kvm_stat_data *stat_data = (struct kvm_stat_data *)
4356 inode->i_private;
4357
4358 simple_attr_release(inode, file);
4359 kvm_put_kvm(stat_data->kvm);
4360
4361 return 0;
4362}
4363
09cbcef6 4364static int kvm_get_stat_per_vm(struct kvm *kvm, size_t offset, u64 *val)
536a6f88 4365{
09cbcef6 4366 *val = *(ulong *)((void *)kvm + offset);
536a6f88 4367
09cbcef6
MP
4368 return 0;
4369}
4370
4371static int kvm_clear_stat_per_vm(struct kvm *kvm, size_t offset)
4372{
4373 *(ulong *)((void *)kvm + offset) = 0;
536a6f88
JF
4374
4375 return 0;
4376}
4377
09cbcef6 4378static int kvm_get_stat_per_vcpu(struct kvm *kvm, size_t offset, u64 *val)
ce35ef27 4379{
09cbcef6
MP
4380 int i;
4381 struct kvm_vcpu *vcpu;
ce35ef27 4382
09cbcef6 4383 *val = 0;
ce35ef27 4384
09cbcef6
MP
4385 kvm_for_each_vcpu(i, vcpu, kvm)
4386 *val += *(u64 *)((void *)vcpu + offset);
ce35ef27
SJS
4387
4388 return 0;
4389}
4390
09cbcef6 4391static int kvm_clear_stat_per_vcpu(struct kvm *kvm, size_t offset)
536a6f88 4392{
09cbcef6
MP
4393 int i;
4394 struct kvm_vcpu *vcpu;
536a6f88 4395
09cbcef6
MP
4396 kvm_for_each_vcpu(i, vcpu, kvm)
4397 *(u64 *)((void *)vcpu + offset) = 0;
4398
4399 return 0;
4400}
536a6f88 4401
09cbcef6 4402static int kvm_stat_data_get(void *data, u64 *val)
536a6f88 4403{
09cbcef6 4404 int r = -EFAULT;
536a6f88 4405 struct kvm_stat_data *stat_data = (struct kvm_stat_data *)data;
536a6f88 4406
09cbcef6
MP
4407 switch (stat_data->dbgfs_item->kind) {
4408 case KVM_STAT_VM:
4409 r = kvm_get_stat_per_vm(stat_data->kvm,
4410 stat_data->dbgfs_item->offset, val);
4411 break;
4412 case KVM_STAT_VCPU:
4413 r = kvm_get_stat_per_vcpu(stat_data->kvm,
4414 stat_data->dbgfs_item->offset, val);
4415 break;
4416 }
536a6f88 4417
09cbcef6 4418 return r;
536a6f88
JF
4419}
4420
09cbcef6 4421static int kvm_stat_data_clear(void *data, u64 val)
ce35ef27 4422{
09cbcef6 4423 int r = -EFAULT;
ce35ef27 4424 struct kvm_stat_data *stat_data = (struct kvm_stat_data *)data;
ce35ef27
SJS
4425
4426 if (val)
4427 return -EINVAL;
4428
09cbcef6
MP
4429 switch (stat_data->dbgfs_item->kind) {
4430 case KVM_STAT_VM:
4431 r = kvm_clear_stat_per_vm(stat_data->kvm,
4432 stat_data->dbgfs_item->offset);
4433 break;
4434 case KVM_STAT_VCPU:
4435 r = kvm_clear_stat_per_vcpu(stat_data->kvm,
4436 stat_data->dbgfs_item->offset);
4437 break;
4438 }
ce35ef27 4439
09cbcef6 4440 return r;
ce35ef27
SJS
4441}
4442
09cbcef6 4443static int kvm_stat_data_open(struct inode *inode, struct file *file)
536a6f88
JF
4444{
4445 __simple_attr_check_format("%llu\n", 0ull);
09cbcef6
MP
4446 return kvm_debugfs_open(inode, file, kvm_stat_data_get,
4447 kvm_stat_data_clear, "%llu\n");
536a6f88
JF
4448}
4449
09cbcef6
MP
4450static const struct file_operations stat_fops_per_vm = {
4451 .owner = THIS_MODULE,
4452 .open = kvm_stat_data_open,
536a6f88 4453 .release = kvm_debugfs_release,
09cbcef6
MP
4454 .read = simple_attr_read,
4455 .write = simple_attr_write,
4456 .llseek = no_llseek,
536a6f88
JF
4457};
4458
8b88b099 4459static int vm_stat_get(void *_offset, u64 *val)
ba1389b7
AK
4460{
4461 unsigned offset = (long)_offset;
ba1389b7 4462 struct kvm *kvm;
536a6f88 4463 u64 tmp_val;
ba1389b7 4464
8b88b099 4465 *val = 0;
0d9ce162 4466 mutex_lock(&kvm_lock);
536a6f88 4467 list_for_each_entry(kvm, &vm_list, vm_list) {
09cbcef6 4468 kvm_get_stat_per_vm(kvm, offset, &tmp_val);
536a6f88
JF
4469 *val += tmp_val;
4470 }
0d9ce162 4471 mutex_unlock(&kvm_lock);
8b88b099 4472 return 0;
ba1389b7
AK
4473}
4474
ce35ef27
SJS
4475static int vm_stat_clear(void *_offset, u64 val)
4476{
4477 unsigned offset = (long)_offset;
4478 struct kvm *kvm;
ce35ef27
SJS
4479
4480 if (val)
4481 return -EINVAL;
4482
0d9ce162 4483 mutex_lock(&kvm_lock);
ce35ef27 4484 list_for_each_entry(kvm, &vm_list, vm_list) {
09cbcef6 4485 kvm_clear_stat_per_vm(kvm, offset);
ce35ef27 4486 }
0d9ce162 4487 mutex_unlock(&kvm_lock);
ce35ef27
SJS
4488
4489 return 0;
4490}
4491
4492DEFINE_SIMPLE_ATTRIBUTE(vm_stat_fops, vm_stat_get, vm_stat_clear, "%llu\n");
ba1389b7 4493
8b88b099 4494static int vcpu_stat_get(void *_offset, u64 *val)
1165f5fe
AK
4495{
4496 unsigned offset = (long)_offset;
1165f5fe 4497 struct kvm *kvm;
536a6f88 4498 u64 tmp_val;
1165f5fe 4499
8b88b099 4500 *val = 0;
0d9ce162 4501 mutex_lock(&kvm_lock);
536a6f88 4502 list_for_each_entry(kvm, &vm_list, vm_list) {
09cbcef6 4503 kvm_get_stat_per_vcpu(kvm, offset, &tmp_val);
536a6f88
JF
4504 *val += tmp_val;
4505 }
0d9ce162 4506 mutex_unlock(&kvm_lock);
8b88b099 4507 return 0;
1165f5fe
AK
4508}
4509
ce35ef27
SJS
4510static int vcpu_stat_clear(void *_offset, u64 val)
4511{
4512 unsigned offset = (long)_offset;
4513 struct kvm *kvm;
ce35ef27
SJS
4514
4515 if (val)
4516 return -EINVAL;
4517
0d9ce162 4518 mutex_lock(&kvm_lock);
ce35ef27 4519 list_for_each_entry(kvm, &vm_list, vm_list) {
09cbcef6 4520 kvm_clear_stat_per_vcpu(kvm, offset);
ce35ef27 4521 }
0d9ce162 4522 mutex_unlock(&kvm_lock);
ce35ef27
SJS
4523
4524 return 0;
4525}
4526
4527DEFINE_SIMPLE_ATTRIBUTE(vcpu_stat_fops, vcpu_stat_get, vcpu_stat_clear,
4528 "%llu\n");
ba1389b7 4529
828c0950 4530static const struct file_operations *stat_fops[] = {
ba1389b7
AK
4531 [KVM_STAT_VCPU] = &vcpu_stat_fops,
4532 [KVM_STAT_VM] = &vm_stat_fops,
4533};
1165f5fe 4534
286de8f6
CI
4535static void kvm_uevent_notify_change(unsigned int type, struct kvm *kvm)
4536{
4537 struct kobj_uevent_env *env;
286de8f6
CI
4538 unsigned long long created, active;
4539
4540 if (!kvm_dev.this_device || !kvm)
4541 return;
4542
0d9ce162 4543 mutex_lock(&kvm_lock);
286de8f6
CI
4544 if (type == KVM_EVENT_CREATE_VM) {
4545 kvm_createvm_count++;
4546 kvm_active_vms++;
4547 } else if (type == KVM_EVENT_DESTROY_VM) {
4548 kvm_active_vms--;
4549 }
4550 created = kvm_createvm_count;
4551 active = kvm_active_vms;
0d9ce162 4552 mutex_unlock(&kvm_lock);
286de8f6 4553
b12ce36a 4554 env = kzalloc(sizeof(*env), GFP_KERNEL_ACCOUNT);
286de8f6
CI
4555 if (!env)
4556 return;
4557
4558 add_uevent_var(env, "CREATED=%llu", created);
4559 add_uevent_var(env, "COUNT=%llu", active);
4560
fdeaf7e3 4561 if (type == KVM_EVENT_CREATE_VM) {
286de8f6 4562 add_uevent_var(env, "EVENT=create");
fdeaf7e3
CI
4563 kvm->userspace_pid = task_pid_nr(current);
4564 } else if (type == KVM_EVENT_DESTROY_VM) {
286de8f6 4565 add_uevent_var(env, "EVENT=destroy");
fdeaf7e3
CI
4566 }
4567 add_uevent_var(env, "PID=%d", kvm->userspace_pid);
286de8f6 4568
8ed0579c 4569 if (!IS_ERR_OR_NULL(kvm->debugfs_dentry)) {
b12ce36a 4570 char *tmp, *p = kmalloc(PATH_MAX, GFP_KERNEL_ACCOUNT);
fdeaf7e3
CI
4571
4572 if (p) {
4573 tmp = dentry_path_raw(kvm->debugfs_dentry, p, PATH_MAX);
4574 if (!IS_ERR(tmp))
4575 add_uevent_var(env, "STATS_PATH=%s", tmp);
4576 kfree(p);
286de8f6
CI
4577 }
4578 }
4579 /* no need for checks, since we are adding at most only 5 keys */
4580 env->envp[env->envp_idx++] = NULL;
4581 kobject_uevent_env(&kvm_dev.this_device->kobj, KOBJ_CHANGE, env->envp);
4582 kfree(env);
286de8f6
CI
4583}
4584
929f45e3 4585static void kvm_init_debug(void)
6aa8b732
AK
4586{
4587 struct kvm_stats_debugfs_item *p;
4588
76f7c879 4589 kvm_debugfs_dir = debugfs_create_dir("kvm", NULL);
4f69b680 4590
536a6f88
JF
4591 kvm_debugfs_num_entries = 0;
4592 for (p = debugfs_entries; p->name; ++p, kvm_debugfs_num_entries++) {
09cbcef6
MP
4593 debugfs_create_file(p->name, KVM_DBGFS_GET_MODE(p),
4594 kvm_debugfs_dir, (void *)(long)p->offset,
929f45e3 4595 stat_fops[p->kind]);
4f69b680 4596 }
6aa8b732
AK
4597}
4598
fb3600cc 4599static int kvm_suspend(void)
59ae6c6b 4600{
10474ae8 4601 if (kvm_usage_count)
75b7127c 4602 hardware_disable_nolock(NULL);
59ae6c6b
AK
4603 return 0;
4604}
4605
fb3600cc 4606static void kvm_resume(void)
59ae6c6b 4607{
ca84d1a2 4608 if (kvm_usage_count) {
2eb06c30
WL
4609#ifdef CONFIG_LOCKDEP
4610 WARN_ON(lockdep_is_held(&kvm_count_lock));
4611#endif
75b7127c 4612 hardware_enable_nolock(NULL);
ca84d1a2 4613 }
59ae6c6b
AK
4614}
4615
fb3600cc 4616static struct syscore_ops kvm_syscore_ops = {
59ae6c6b
AK
4617 .suspend = kvm_suspend,
4618 .resume = kvm_resume,
4619};
4620
15ad7146
AK
4621static inline
4622struct kvm_vcpu *preempt_notifier_to_vcpu(struct preempt_notifier *pn)
4623{
4624 return container_of(pn, struct kvm_vcpu, preempt_notifier);
4625}
4626
4627static void kvm_sched_in(struct preempt_notifier *pn, int cpu)
4628{
4629 struct kvm_vcpu *vcpu = preempt_notifier_to_vcpu(pn);
f95ef0cd 4630
046ddeed 4631 WRITE_ONCE(vcpu->preempted, false);
d73eb57b 4632 WRITE_ONCE(vcpu->ready, false);
15ad7146 4633
7495e22b 4634 __this_cpu_write(kvm_running_vcpu, vcpu);
e790d9ef 4635 kvm_arch_sched_in(vcpu, cpu);
e9b11c17 4636 kvm_arch_vcpu_load(vcpu, cpu);
15ad7146
AK
4637}
4638
4639static void kvm_sched_out(struct preempt_notifier *pn,
4640 struct task_struct *next)
4641{
4642 struct kvm_vcpu *vcpu = preempt_notifier_to_vcpu(pn);
4643
d73eb57b 4644 if (current->state == TASK_RUNNING) {
046ddeed 4645 WRITE_ONCE(vcpu->preempted, true);
d73eb57b
WL
4646 WRITE_ONCE(vcpu->ready, true);
4647 }
e9b11c17 4648 kvm_arch_vcpu_put(vcpu);
7495e22b
PB
4649 __this_cpu_write(kvm_running_vcpu, NULL);
4650}
4651
4652/**
4653 * kvm_get_running_vcpu - get the vcpu running on the current CPU.
1f03b2bc
MZ
4654 *
4655 * We can disable preemption locally around accessing the per-CPU variable,
4656 * and use the resolved vcpu pointer after enabling preemption again,
4657 * because even if the current thread is migrated to another CPU, reading
4658 * the per-CPU value later will give us the same value as we update the
4659 * per-CPU variable in the preempt notifier handlers.
7495e22b
PB
4660 */
4661struct kvm_vcpu *kvm_get_running_vcpu(void)
4662{
1f03b2bc
MZ
4663 struct kvm_vcpu *vcpu;
4664
4665 preempt_disable();
4666 vcpu = __this_cpu_read(kvm_running_vcpu);
4667 preempt_enable();
4668
4669 return vcpu;
7495e22b 4670}
379a3c8e 4671EXPORT_SYMBOL_GPL(kvm_get_running_vcpu);
7495e22b
PB
4672
4673/**
4674 * kvm_get_running_vcpus - get the per-CPU array of currently running vcpus.
4675 */
4676struct kvm_vcpu * __percpu *kvm_get_running_vcpus(void)
4677{
4678 return &kvm_running_vcpu;
15ad7146
AK
4679}
4680
b9904085
SC
4681struct kvm_cpu_compat_check {
4682 void *opaque;
4683 int *ret;
4684};
4685
4686static void check_processor_compat(void *data)
f257d6dc 4687{
b9904085
SC
4688 struct kvm_cpu_compat_check *c = data;
4689
4690 *c->ret = kvm_arch_check_processor_compat(c->opaque);
f257d6dc
SC
4691}
4692
0ee75bea 4693int kvm_init(void *opaque, unsigned vcpu_size, unsigned vcpu_align,
c16f862d 4694 struct module *module)
6aa8b732 4695{
b9904085 4696 struct kvm_cpu_compat_check c;
6aa8b732 4697 int r;
002c7f7c 4698 int cpu;
6aa8b732 4699
f8c16bba
ZX
4700 r = kvm_arch_init(opaque);
4701 if (r)
d2308784 4702 goto out_fail;
cb498ea2 4703
7dac16c3
AH
4704 /*
4705 * kvm_arch_init makes sure there's at most one caller
4706 * for architectures that support multiple implementations,
4707 * like intel and amd on x86.
36343f6e
PB
4708 * kvm_arch_init must be called before kvm_irqfd_init to avoid creating
4709 * conflicts in case kvm is already setup for another implementation.
7dac16c3 4710 */
36343f6e
PB
4711 r = kvm_irqfd_init();
4712 if (r)
4713 goto out_irqfd;
7dac16c3 4714
8437a617 4715 if (!zalloc_cpumask_var(&cpus_hardware_enabled, GFP_KERNEL)) {
7f59f492
RR
4716 r = -ENOMEM;
4717 goto out_free_0;
4718 }
4719
b9904085 4720 r = kvm_arch_hardware_setup(opaque);
6aa8b732 4721 if (r < 0)
faf0be22 4722 goto out_free_1;
6aa8b732 4723
b9904085
SC
4724 c.ret = &r;
4725 c.opaque = opaque;
002c7f7c 4726 for_each_online_cpu(cpu) {
b9904085 4727 smp_call_function_single(cpu, check_processor_compat, &c, 1);
002c7f7c 4728 if (r < 0)
faf0be22 4729 goto out_free_2;
002c7f7c
YS
4730 }
4731
73c1b41e 4732 r = cpuhp_setup_state_nocalls(CPUHP_AP_KVM_STARTING, "kvm/cpu:starting",
8c18b2d2 4733 kvm_starting_cpu, kvm_dying_cpu);
774c47f1 4734 if (r)
d2308784 4735 goto out_free_2;
6aa8b732
AK
4736 register_reboot_notifier(&kvm_reboot_notifier);
4737
c16f862d 4738 /* A kmem cache lets us meet the alignment requirements of fx_save. */
0ee75bea
AK
4739 if (!vcpu_align)
4740 vcpu_align = __alignof__(struct kvm_vcpu);
46515736
PB
4741 kvm_vcpu_cache =
4742 kmem_cache_create_usercopy("kvm_vcpu", vcpu_size, vcpu_align,
4743 SLAB_ACCOUNT,
4744 offsetof(struct kvm_vcpu, arch),
4745 sizeof_field(struct kvm_vcpu, arch),
4746 NULL);
c16f862d
RR
4747 if (!kvm_vcpu_cache) {
4748 r = -ENOMEM;
fb3600cc 4749 goto out_free_3;
c16f862d
RR
4750 }
4751
af585b92
GN
4752 r = kvm_async_pf_init();
4753 if (r)
4754 goto out_free;
4755
6aa8b732 4756 kvm_chardev_ops.owner = module;
3d3aab1b
CB
4757 kvm_vm_fops.owner = module;
4758 kvm_vcpu_fops.owner = module;
6aa8b732
AK
4759
4760 r = misc_register(&kvm_dev);
4761 if (r) {
1170adc6 4762 pr_err("kvm: misc device register failed\n");
af585b92 4763 goto out_unreg;
6aa8b732
AK
4764 }
4765
fb3600cc
RW
4766 register_syscore_ops(&kvm_syscore_ops);
4767
15ad7146
AK
4768 kvm_preempt_ops.sched_in = kvm_sched_in;
4769 kvm_preempt_ops.sched_out = kvm_sched_out;
4770
929f45e3 4771 kvm_init_debug();
0ea4ed8e 4772
3c3c29fd
PB
4773 r = kvm_vfio_ops_init();
4774 WARN_ON(r);
4775
c7addb90 4776 return 0;
6aa8b732 4777
af585b92
GN
4778out_unreg:
4779 kvm_async_pf_deinit();
6aa8b732 4780out_free:
c16f862d 4781 kmem_cache_destroy(kvm_vcpu_cache);
d2308784 4782out_free_3:
6aa8b732 4783 unregister_reboot_notifier(&kvm_reboot_notifier);
8c18b2d2 4784 cpuhp_remove_state_nocalls(CPUHP_AP_KVM_STARTING);
d2308784 4785out_free_2:
e9b11c17 4786 kvm_arch_hardware_unsetup();
faf0be22 4787out_free_1:
7f59f492 4788 free_cpumask_var(cpus_hardware_enabled);
d2308784 4789out_free_0:
a0f155e9 4790 kvm_irqfd_exit();
36343f6e 4791out_irqfd:
7dac16c3
AH
4792 kvm_arch_exit();
4793out_fail:
6aa8b732
AK
4794 return r;
4795}
cb498ea2 4796EXPORT_SYMBOL_GPL(kvm_init);
6aa8b732 4797
cb498ea2 4798void kvm_exit(void)
6aa8b732 4799{
4bd33b56 4800 debugfs_remove_recursive(kvm_debugfs_dir);
6aa8b732 4801 misc_deregister(&kvm_dev);
c16f862d 4802 kmem_cache_destroy(kvm_vcpu_cache);
af585b92 4803 kvm_async_pf_deinit();
fb3600cc 4804 unregister_syscore_ops(&kvm_syscore_ops);
6aa8b732 4805 unregister_reboot_notifier(&kvm_reboot_notifier);
8c18b2d2 4806 cpuhp_remove_state_nocalls(CPUHP_AP_KVM_STARTING);
75b7127c 4807 on_each_cpu(hardware_disable_nolock, NULL, 1);
e9b11c17 4808 kvm_arch_hardware_unsetup();
f8c16bba 4809 kvm_arch_exit();
a0f155e9 4810 kvm_irqfd_exit();
7f59f492 4811 free_cpumask_var(cpus_hardware_enabled);
571ee1b6 4812 kvm_vfio_ops_exit();
6aa8b732 4813}
cb498ea2 4814EXPORT_SYMBOL_GPL(kvm_exit);
c57c8046
JS
4815
4816struct kvm_vm_worker_thread_context {
4817 struct kvm *kvm;
4818 struct task_struct *parent;
4819 struct completion init_done;
4820 kvm_vm_thread_fn_t thread_fn;
4821 uintptr_t data;
4822 int err;
4823};
4824
4825static int kvm_vm_worker_thread(void *context)
4826{
4827 /*
4828 * The init_context is allocated on the stack of the parent thread, so
4829 * we have to locally copy anything that is needed beyond initialization
4830 */
4831 struct kvm_vm_worker_thread_context *init_context = context;
4832 struct kvm *kvm = init_context->kvm;
4833 kvm_vm_thread_fn_t thread_fn = init_context->thread_fn;
4834 uintptr_t data = init_context->data;
4835 int err;
4836
4837 err = kthread_park(current);
4838 /* kthread_park(current) is never supposed to return an error */
4839 WARN_ON(err != 0);
4840 if (err)
4841 goto init_complete;
4842
4843 err = cgroup_attach_task_all(init_context->parent, current);
4844 if (err) {
4845 kvm_err("%s: cgroup_attach_task_all failed with err %d\n",
4846 __func__, err);
4847 goto init_complete;
4848 }
4849
4850 set_user_nice(current, task_nice(init_context->parent));
4851
4852init_complete:
4853 init_context->err = err;
4854 complete(&init_context->init_done);
4855 init_context = NULL;
4856
4857 if (err)
4858 return err;
4859
4860 /* Wait to be woken up by the spawner before proceeding. */
4861 kthread_parkme();
4862
4863 if (!kthread_should_stop())
4864 err = thread_fn(kvm, data);
4865
4866 return err;
4867}
4868
4869int kvm_vm_create_worker_thread(struct kvm *kvm, kvm_vm_thread_fn_t thread_fn,
4870 uintptr_t data, const char *name,
4871 struct task_struct **thread_ptr)
4872{
4873 struct kvm_vm_worker_thread_context init_context = {};
4874 struct task_struct *thread;
4875
4876 *thread_ptr = NULL;
4877 init_context.kvm = kvm;
4878 init_context.parent = current;
4879 init_context.thread_fn = thread_fn;
4880 init_context.data = data;
4881 init_completion(&init_context.init_done);
4882
4883 thread = kthread_run(kvm_vm_worker_thread, &init_context,
4884 "%s-%d", name, task_pid_nr(current));
4885 if (IS_ERR(thread))
4886 return PTR_ERR(thread);
4887
4888 /* kthread_run is never supposed to return NULL */
4889 WARN_ON(thread == NULL);
4890
4891 wait_for_completion(&init_context.init_done);
4892
4893 if (!init_context.err)
4894 *thread_ptr = thread;
4895
4896 return init_context.err;
4897}