]>
git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blob - virt/kvm/async_pf.c
2 * kvm asynchronous fault support
4 * Copyright 2010 Red Hat, Inc.
7 * Gleb Natapov <gleb@redhat.com>
9 * This file is free software; you can redistribute it and/or modify
10 * it under the terms of version 2 of the GNU General Public License
11 * as published by the Free Software Foundation.
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software Foundation,
20 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301, USA.
23 #include <linux/kvm_host.h>
24 #include <linux/slab.h>
25 #include <linux/module.h>
26 #include <linux/mmu_context.h>
27 #include <linux/sched/mm.h>
30 #include <trace/events/kvm.h>
32 static inline void kvm_async_page_present_sync(struct kvm_vcpu
*vcpu
,
33 struct kvm_async_pf
*work
)
35 #ifdef CONFIG_KVM_ASYNC_PF_SYNC
36 kvm_arch_async_page_present(vcpu
, work
);
39 static inline void kvm_async_page_present_async(struct kvm_vcpu
*vcpu
,
40 struct kvm_async_pf
*work
)
42 #ifndef CONFIG_KVM_ASYNC_PF_SYNC
43 kvm_arch_async_page_present(vcpu
, work
);
47 static struct kmem_cache
*async_pf_cache
;
49 int kvm_async_pf_init(void)
51 async_pf_cache
= KMEM_CACHE(kvm_async_pf
, 0);
59 void kvm_async_pf_deinit(void)
61 kmem_cache_destroy(async_pf_cache
);
62 async_pf_cache
= NULL
;
65 void kvm_async_pf_vcpu_init(struct kvm_vcpu
*vcpu
)
67 INIT_LIST_HEAD(&vcpu
->async_pf
.done
);
68 INIT_LIST_HEAD(&vcpu
->async_pf
.queue
);
69 spin_lock_init(&vcpu
->async_pf
.lock
);
72 static void async_pf_execute(struct work_struct
*work
)
74 struct kvm_async_pf
*apf
=
75 container_of(work
, struct kvm_async_pf
, work
);
76 struct mm_struct
*mm
= apf
->mm
;
77 struct kvm_vcpu
*vcpu
= apf
->vcpu
;
78 unsigned long addr
= apf
->addr
;
85 * This work is run asynchromously to the task which owns
86 * mm and might be done in another context, so we must
89 down_read(&mm
->mmap_sem
);
90 get_user_pages_remote(NULL
, mm
, addr
, 1, FOLL_WRITE
, NULL
, NULL
,
93 up_read(&mm
->mmap_sem
);
95 kvm_async_page_present_sync(vcpu
, apf
);
97 spin_lock(&vcpu
->async_pf
.lock
);
98 list_add_tail(&apf
->link
, &vcpu
->async_pf
.done
);
100 spin_unlock(&vcpu
->async_pf
.lock
);
103 * apf may be freed by kvm_check_async_pf_completion() after
107 trace_kvm_async_pf_completed(addr
, gva
);
110 * This memory barrier pairs with prepare_to_wait's set_current_state()
113 if (swait_active(&vcpu
->wq
))
117 kvm_put_kvm(vcpu
->kvm
);
120 void kvm_clear_async_pf_completion_queue(struct kvm_vcpu
*vcpu
)
122 spin_lock(&vcpu
->async_pf
.lock
);
124 /* cancel outstanding work queue item */
125 while (!list_empty(&vcpu
->async_pf
.queue
)) {
126 struct kvm_async_pf
*work
=
127 list_first_entry(&vcpu
->async_pf
.queue
,
128 typeof(*work
), queue
);
129 list_del(&work
->queue
);
132 * We know it's present in vcpu->async_pf.done, do
138 spin_unlock(&vcpu
->async_pf
.lock
);
139 #ifdef CONFIG_KVM_ASYNC_PF_SYNC
140 flush_work(&work
->work
);
142 if (cancel_work_sync(&work
->work
)) {
144 kvm_put_kvm(vcpu
->kvm
); /* == work->vcpu->kvm */
145 kmem_cache_free(async_pf_cache
, work
);
148 spin_lock(&vcpu
->async_pf
.lock
);
151 while (!list_empty(&vcpu
->async_pf
.done
)) {
152 struct kvm_async_pf
*work
=
153 list_first_entry(&vcpu
->async_pf
.done
,
154 typeof(*work
), link
);
155 list_del(&work
->link
);
156 kmem_cache_free(async_pf_cache
, work
);
158 spin_unlock(&vcpu
->async_pf
.lock
);
160 vcpu
->async_pf
.queued
= 0;
163 void kvm_check_async_pf_completion(struct kvm_vcpu
*vcpu
)
165 struct kvm_async_pf
*work
;
167 while (!list_empty_careful(&vcpu
->async_pf
.done
) &&
168 kvm_arch_can_inject_async_page_present(vcpu
)) {
169 spin_lock(&vcpu
->async_pf
.lock
);
170 work
= list_first_entry(&vcpu
->async_pf
.done
, typeof(*work
),
172 list_del(&work
->link
);
173 spin_unlock(&vcpu
->async_pf
.lock
);
175 kvm_arch_async_page_ready(vcpu
, work
);
176 kvm_async_page_present_async(vcpu
, work
);
178 list_del(&work
->queue
);
179 vcpu
->async_pf
.queued
--;
180 kmem_cache_free(async_pf_cache
, work
);
184 int kvm_setup_async_pf(struct kvm_vcpu
*vcpu
, gva_t gva
, unsigned long hva
,
185 struct kvm_arch_async_pf
*arch
)
187 struct kvm_async_pf
*work
;
189 if (vcpu
->async_pf
.queued
>= ASYNC_PF_PER_VCPU
)
192 /* setup delayed work */
195 * do alloc nowait since if we are going to sleep anyway we
196 * may as well sleep faulting in page
198 work
= kmem_cache_zalloc(async_pf_cache
, GFP_NOWAIT
| __GFP_NOWARN
);
202 work
->wakeup_all
= false;
207 work
->mm
= current
->mm
;
209 kvm_get_kvm(work
->vcpu
->kvm
);
211 /* this can't really happen otherwise gfn_to_pfn_async
213 if (unlikely(kvm_is_error_hva(work
->addr
)))
216 INIT_WORK(&work
->work
, async_pf_execute
);
217 if (!schedule_work(&work
->work
))
220 list_add_tail(&work
->queue
, &vcpu
->async_pf
.queue
);
221 vcpu
->async_pf
.queued
++;
222 kvm_arch_async_page_not_present(vcpu
, work
);
225 kvm_put_kvm(work
->vcpu
->kvm
);
227 kmem_cache_free(async_pf_cache
, work
);
231 int kvm_async_pf_wakeup_all(struct kvm_vcpu
*vcpu
)
233 struct kvm_async_pf
*work
;
235 if (!list_empty_careful(&vcpu
->async_pf
.done
))
238 work
= kmem_cache_zalloc(async_pf_cache
, GFP_ATOMIC
);
242 work
->wakeup_all
= true;
243 INIT_LIST_HEAD(&work
->queue
); /* for list_del to work */
245 spin_lock(&vcpu
->async_pf
.lock
);
246 list_add_tail(&work
->link
, &vcpu
->async_pf
.done
);
247 spin_unlock(&vcpu
->async_pf
.lock
);
249 vcpu
->async_pf
.queued
++;