]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blame - virt/kvm/async_pf.c
ipv4: convert dst_metrics.refcnt from atomic_t to refcount_t
[mirror_ubuntu-artful-kernel.git] / virt / kvm / async_pf.c
CommitLineData
af585b92
GN
1/*
2 * kvm asynchronous fault support
3 *
4 * Copyright 2010 Red Hat, Inc.
5 *
6 * Author:
7 * Gleb Natapov <gleb@redhat.com>
8 *
9 * This file is free software; you can redistribute it and/or modify
10 * it under the terms of version 2 of the GNU General Public License
11 * as published by the Free Software Foundation.
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software Foundation,
20 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301, USA.
21 */
22
23#include <linux/kvm_host.h>
24#include <linux/slab.h>
25#include <linux/module.h>
26#include <linux/mmu_context.h>
6e84f315 27#include <linux/sched/mm.h>
af585b92
GN
28
29#include "async_pf.h"
30#include <trace/events/kvm.h>
31
e0ead41a
DD
32static inline void kvm_async_page_present_sync(struct kvm_vcpu *vcpu,
33 struct kvm_async_pf *work)
34{
35#ifdef CONFIG_KVM_ASYNC_PF_SYNC
36 kvm_arch_async_page_present(vcpu, work);
37#endif
38}
39static inline void kvm_async_page_present_async(struct kvm_vcpu *vcpu,
40 struct kvm_async_pf *work)
41{
42#ifndef CONFIG_KVM_ASYNC_PF_SYNC
43 kvm_arch_async_page_present(vcpu, work);
44#endif
45}
46
af585b92
GN
47static struct kmem_cache *async_pf_cache;
48
49int kvm_async_pf_init(void)
50{
51 async_pf_cache = KMEM_CACHE(kvm_async_pf, 0);
52
53 if (!async_pf_cache)
54 return -ENOMEM;
55
56 return 0;
57}
58
59void kvm_async_pf_deinit(void)
60{
4f52696a 61 kmem_cache_destroy(async_pf_cache);
af585b92
GN
62 async_pf_cache = NULL;
63}
64
65void kvm_async_pf_vcpu_init(struct kvm_vcpu *vcpu)
66{
67 INIT_LIST_HEAD(&vcpu->async_pf.done);
68 INIT_LIST_HEAD(&vcpu->async_pf.queue);
69 spin_lock_init(&vcpu->async_pf.lock);
70}
71
72static void async_pf_execute(struct work_struct *work)
73{
af585b92
GN
74 struct kvm_async_pf *apf =
75 container_of(work, struct kvm_async_pf, work);
76 struct mm_struct *mm = apf->mm;
77 struct kvm_vcpu *vcpu = apf->vcpu;
78 unsigned long addr = apf->addr;
79 gva_t gva = apf->gva;
8b7457ef 80 int locked = 1;
af585b92
GN
81
82 might_sleep();
83
1e987790
DH
84 /*
85 * This work is run asynchromously to the task which owns
86 * mm and might be done in another context, so we must
8b7457ef 87 * access remotely.
1e987790 88 */
8b7457ef
LS
89 down_read(&mm->mmap_sem);
90 get_user_pages_remote(NULL, mm, addr, 1, FOLL_WRITE, NULL, NULL,
91 &locked);
92 if (locked)
93 up_read(&mm->mmap_sem);
1e987790 94
e0ead41a 95 kvm_async_page_present_sync(vcpu, apf);
af585b92
GN
96
97 spin_lock(&vcpu->async_pf.lock);
98 list_add_tail(&apf->link, &vcpu->async_pf.done);
22583f0d 99 apf->vcpu = NULL;
af585b92
GN
100 spin_unlock(&vcpu->async_pf.lock);
101
102 /*
103 * apf may be freed by kvm_check_async_pf_completion() after
104 * this point
105 */
106
f2e10669 107 trace_kvm_async_pf_completed(addr, gva);
af585b92 108
6003a420
KT
109 /*
110 * This memory barrier pairs with prepare_to_wait's set_current_state()
111 */
112 smp_mb();
8577370f
MT
113 if (swait_active(&vcpu->wq))
114 swake_up(&vcpu->wq);
af585b92 115
41c22f62 116 mmput(mm);
af585b92
GN
117 kvm_put_kvm(vcpu->kvm);
118}
119
120void kvm_clear_async_pf_completion_queue(struct kvm_vcpu *vcpu)
121{
22583f0d
PB
122 spin_lock(&vcpu->async_pf.lock);
123
af585b92
GN
124 /* cancel outstanding work queue item */
125 while (!list_empty(&vcpu->async_pf.queue)) {
126 struct kvm_async_pf *work =
433da860
GT
127 list_first_entry(&vcpu->async_pf.queue,
128 typeof(*work), queue);
af585b92 129 list_del(&work->queue);
9f2ceda4 130
22583f0d
PB
131 /*
132 * We know it's present in vcpu->async_pf.done, do
133 * nothing here.
134 */
135 if (!work->vcpu)
136 continue;
137
138 spin_unlock(&vcpu->async_pf.lock);
9f2ceda4
DD
139#ifdef CONFIG_KVM_ASYNC_PF_SYNC
140 flush_work(&work->work);
141#else
98fda169 142 if (cancel_work_sync(&work->work)) {
41c22f62 143 mmput(work->mm);
28b441e2 144 kvm_put_kvm(vcpu->kvm); /* == work->vcpu->kvm */
af585b92 145 kmem_cache_free(async_pf_cache, work);
28b441e2 146 }
9f2ceda4 147#endif
22583f0d 148 spin_lock(&vcpu->async_pf.lock);
af585b92
GN
149 }
150
af585b92
GN
151 while (!list_empty(&vcpu->async_pf.done)) {
152 struct kvm_async_pf *work =
433da860
GT
153 list_first_entry(&vcpu->async_pf.done,
154 typeof(*work), link);
af585b92 155 list_del(&work->link);
af585b92
GN
156 kmem_cache_free(async_pf_cache, work);
157 }
158 spin_unlock(&vcpu->async_pf.lock);
159
160 vcpu->async_pf.queued = 0;
161}
162
163void kvm_check_async_pf_completion(struct kvm_vcpu *vcpu)
164{
165 struct kvm_async_pf *work;
166
15096ffc
XG
167 while (!list_empty_careful(&vcpu->async_pf.done) &&
168 kvm_arch_can_inject_async_page_present(vcpu)) {
169 spin_lock(&vcpu->async_pf.lock);
170 work = list_first_entry(&vcpu->async_pf.done, typeof(*work),
171 link);
172 list_del(&work->link);
173 spin_unlock(&vcpu->async_pf.lock);
af585b92 174
f2e10669 175 kvm_arch_async_page_ready(vcpu, work);
1179ba53 176 kvm_async_page_present_async(vcpu, work);
af585b92 177
15096ffc
XG
178 list_del(&work->queue);
179 vcpu->async_pf.queued--;
15096ffc
XG
180 kmem_cache_free(async_pf_cache, work);
181 }
af585b92
GN
182}
183
e0ead41a 184int kvm_setup_async_pf(struct kvm_vcpu *vcpu, gva_t gva, unsigned long hva,
af585b92
GN
185 struct kvm_arch_async_pf *arch)
186{
187 struct kvm_async_pf *work;
188
189 if (vcpu->async_pf.queued >= ASYNC_PF_PER_VCPU)
190 return 0;
191
192 /* setup delayed work */
193
194 /*
195 * do alloc nowait since if we are going to sleep anyway we
196 * may as well sleep faulting in page
197 */
d7444794 198 work = kmem_cache_zalloc(async_pf_cache, GFP_NOWAIT | __GFP_NOWARN);
af585b92
GN
199 if (!work)
200 return 0;
201
f2e10669 202 work->wakeup_all = false;
af585b92
GN
203 work->vcpu = vcpu;
204 work->gva = gva;
e0ead41a 205 work->addr = hva;
af585b92
GN
206 work->arch = *arch;
207 work->mm = current->mm;
3fce371b 208 mmget(work->mm);
af585b92
GN
209 kvm_get_kvm(work->vcpu->kvm);
210
211 /* this can't really happen otherwise gfn_to_pfn_async
212 would succeed */
213 if (unlikely(kvm_is_error_hva(work->addr)))
214 goto retry_sync;
215
216 INIT_WORK(&work->work, async_pf_execute);
217 if (!schedule_work(&work->work))
218 goto retry_sync;
219
220 list_add_tail(&work->queue, &vcpu->async_pf.queue);
221 vcpu->async_pf.queued++;
222 kvm_arch_async_page_not_present(vcpu, work);
223 return 1;
224retry_sync:
225 kvm_put_kvm(work->vcpu->kvm);
41c22f62 226 mmput(work->mm);
af585b92
GN
227 kmem_cache_free(async_pf_cache, work);
228 return 0;
229}
344d9588
GN
230
231int kvm_async_pf_wakeup_all(struct kvm_vcpu *vcpu)
232{
233 struct kvm_async_pf *work;
234
64f638c7 235 if (!list_empty_careful(&vcpu->async_pf.done))
344d9588
GN
236 return 0;
237
238 work = kmem_cache_zalloc(async_pf_cache, GFP_ATOMIC);
239 if (!work)
240 return -ENOMEM;
241
f2e10669 242 work->wakeup_all = true;
344d9588
GN
243 INIT_LIST_HEAD(&work->queue); /* for list_del to work */
244
64f638c7 245 spin_lock(&vcpu->async_pf.lock);
344d9588 246 list_add_tail(&work->link, &vcpu->async_pf.done);
64f638c7
XG
247 spin_unlock(&vcpu->async_pf.lock);
248
344d9588
GN
249 vcpu->async_pf.queued++;
250 return 0;
251}