]> git.proxmox.com Git - pve-kernel.git/blame - patches/kernel/0009-KVM-Take-vcpu-mutex-outside-vcpu_load.patch
rebase patches on top of Ubuntu-4.15.0-56.62
[pve-kernel.git] / patches / kernel / 0009-KVM-Take-vcpu-mutex-outside-vcpu_load.patch
CommitLineData
045bc2ba
TL
1From 0000000000000000000000000000000000000000 Mon Sep 17 00:00:00 2001
2From: Christoffer Dall <christoffer.dall@linaro.org>
3Date: Mon, 4 Dec 2017 21:35:23 +0100
4Subject: [PATCH] KVM: Take vcpu->mutex outside vcpu_load
5
6As we're about to call vcpu_load() from architecture-specific
7implementations of the KVM vcpu ioctls, but yet we access data
8structures protected by the vcpu->mutex in the generic code, factor
9this logic out from vcpu_load().
10
11x86 is the only architecture which calls vcpu_load() outside of the main
12vcpu ioctl function, and these calls will no longer take the vcpu mutex
13following this patch. However, with the exception of
14kvm_arch_vcpu_postcreate (see below), the callers are either in the
15creation or destruction path of the VCPU, which means there cannot be
16any concurrent access to the data structure, because the file descriptor
17is not yet accessible, or is already gone.
18
19kvm_arch_vcpu_postcreate makes the newly created vcpu potentially
20accessible by other in-kernel threads through the kvm->vcpus array, and
21we therefore take the vcpu mutex in this case directly.
22
23Signed-off-by: Christoffer Dall <christoffer.dall@linaro.org>
24Reviewed-by: Cornelia Huck <cohuck@redhat.com>
25Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
26(cherry picked from commit ec7660ccdd2b71d8c7f0243f8590253713e9b75d)
27Signed-off-by: Thomas Lamprecht <t.lamprecht@proxmox.com>
28---
29 arch/x86/kvm/vmx.c | 4 +---
30 arch/x86/kvm/x86.c | 20 +++++++-------------
31 include/linux/kvm_host.h | 2 +-
32 virt/kvm/kvm_main.c | 17 ++++++-----------
33 4 files changed, 15 insertions(+), 28 deletions(-)
34
35diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
442a3e92 36index 7c8341e2d3c1..921968c00dde 100644
045bc2ba
TL
37--- a/arch/x86/kvm/vmx.c
38+++ b/arch/x86/kvm/vmx.c
442a3e92 39@@ -9966,10 +9966,8 @@ static void vmx_switch_vmcs(struct kvm_vcpu *vcpu, struct loaded_vmcs *vmcs)
045bc2ba
TL
40 static void vmx_free_vcpu_nested(struct kvm_vcpu *vcpu)
41 {
42 struct vcpu_vmx *vmx = to_vmx(vcpu);
43- int r;
44
45- r = vcpu_load(vcpu);
46- BUG_ON(r);
47+ vcpu_load(vcpu);
48 vmx_switch_vmcs(vcpu, &vmx->vmcs01);
49 free_nested(vmx);
50 vcpu_put(vcpu);
51diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
442a3e92 52index e68f7bd79df5..0c971dce8915 100644
045bc2ba
TL
53--- a/arch/x86/kvm/x86.c
54+++ b/arch/x86/kvm/x86.c
442a3e92 55@@ -7957,16 +7957,12 @@ struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm,
045bc2ba
TL
56
57 int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu)
58 {
59- int r;
60-
61 kvm_vcpu_mtrr_init(vcpu);
62- r = vcpu_load(vcpu);
63- if (r)
64- return r;
65+ vcpu_load(vcpu);
66 kvm_vcpu_reset(vcpu, false);
67 kvm_mmu_setup(vcpu);
68 vcpu_put(vcpu);
69- return r;
70+ return 0;
71 }
72
73 void kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu)
442a3e92 74@@ -7976,13 +7972,15 @@ void kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu)
045bc2ba
TL
75
76 kvm_hv_vcpu_postcreate(vcpu);
77
78- if (vcpu_load(vcpu))
79+ if (mutex_lock_killable(&vcpu->mutex))
80 return;
81+ vcpu_load(vcpu);
82 msr.data = 0x0;
83 msr.index = MSR_IA32_TSC;
84 msr.host_initiated = true;
85 kvm_write_tsc(vcpu, &msr);
86 vcpu_put(vcpu);
87+ mutex_unlock(&vcpu->mutex);
88
89 if (!kvmclock_periodic_sync)
90 return;
442a3e92 91@@ -7993,11 +7991,9 @@ void kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu)
045bc2ba
TL
92
93 void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu)
94 {
95- int r;
96 vcpu->arch.apf.msr_val = 0;
97
98- r = vcpu_load(vcpu);
99- BUG_ON(r);
100+ vcpu_load(vcpu);
101 kvm_mmu_unload(vcpu);
102 vcpu_put(vcpu);
103
442a3e92 104@@ -8371,9 +8367,7 @@ int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
045bc2ba
TL
105
106 static void kvm_unload_vcpu_mmu(struct kvm_vcpu *vcpu)
107 {
108- int r;
109- r = vcpu_load(vcpu);
110- BUG_ON(r);
111+ vcpu_load(vcpu);
112 kvm_mmu_unload(vcpu);
113 vcpu_put(vcpu);
114 }
115diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h
116index c807eab9c1d3..6684da3f197f 100644
117--- a/include/linux/kvm_host.h
118+++ b/include/linux/kvm_host.h
119@@ -533,7 +533,7 @@ static inline int kvm_vcpu_get_idx(struct kvm_vcpu *vcpu)
120 int kvm_vcpu_init(struct kvm_vcpu *vcpu, struct kvm *kvm, unsigned id);
121 void kvm_vcpu_uninit(struct kvm_vcpu *vcpu);
122
123-int __must_check vcpu_load(struct kvm_vcpu *vcpu);
124+void vcpu_load(struct kvm_vcpu *vcpu);
125 void vcpu_put(struct kvm_vcpu *vcpu);
126
127 #ifdef __KVM_HAVE_IOAPIC
128diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
71090250 129index d7a24fd29144..9cb73320866c 100644
045bc2ba
TL
130--- a/virt/kvm/kvm_main.c
131+++ b/virt/kvm/kvm_main.c
132@@ -151,17 +151,12 @@ bool kvm_is_reserved_pfn(kvm_pfn_t pfn)
133 /*
134 * Switches to specified vcpu, until a matching vcpu_put()
135 */
136-int vcpu_load(struct kvm_vcpu *vcpu)
137+void vcpu_load(struct kvm_vcpu *vcpu)
138 {
139- int cpu;
140-
141- if (mutex_lock_killable(&vcpu->mutex))
142- return -EINTR;
143- cpu = get_cpu();
144+ int cpu = get_cpu();
145 preempt_notifier_register(&vcpu->preempt_notifier);
146 kvm_arch_vcpu_load(vcpu, cpu);
147 put_cpu();
148- return 0;
149 }
150 EXPORT_SYMBOL_GPL(vcpu_load);
151
152@@ -171,7 +166,6 @@ void vcpu_put(struct kvm_vcpu *vcpu)
153 kvm_arch_vcpu_put(vcpu);
154 preempt_notifier_unregister(&vcpu->preempt_notifier);
155 preempt_enable();
156- mutex_unlock(&vcpu->mutex);
157 }
158 EXPORT_SYMBOL_GPL(vcpu_put);
159
160@@ -2562,9 +2556,9 @@ static long kvm_vcpu_ioctl(struct file *filp,
161 #endif
162
163
164- r = vcpu_load(vcpu);
165- if (r)
166- return r;
167+ if (mutex_lock_killable(&vcpu->mutex))
168+ return -EINTR;
169+ vcpu_load(vcpu);
170 switch (ioctl) {
171 case KVM_RUN: {
172 struct pid *oldpid;
173@@ -2737,6 +2731,7 @@ static long kvm_vcpu_ioctl(struct file *filp,
174 }
175 out:
176 vcpu_put(vcpu);
177+ mutex_unlock(&vcpu->mutex);
178 kfree(fpu);
179 kfree(kvm_sregs);
180 return r;