]> git.proxmox.com Git - mirror_ubuntu-zesty-kernel.git/blob - virt/kvm/vfio.c
KVM: PPC: VFIO: Add in-kernel acceleration for VFIO
[mirror_ubuntu-zesty-kernel.git] / virt / kvm / vfio.c
1 /*
2 * VFIO-KVM bridge pseudo device
3 *
4 * Copyright (C) 2013 Red Hat, Inc. All rights reserved.
5 * Author: Alex Williamson <alex.williamson@redhat.com>
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 */
11
12 #include <linux/errno.h>
13 #include <linux/file.h>
14 #include <linux/kvm_host.h>
15 #include <linux/list.h>
16 #include <linux/module.h>
17 #include <linux/mutex.h>
18 #include <linux/slab.h>
19 #include <linux/uaccess.h>
20 #include <linux/vfio.h>
21 #include "vfio.h"
22
23 #ifdef CONFIG_SPAPR_TCE_IOMMU
24 #include <asm/kvm_ppc.h>
25 #endif
26
27 struct kvm_vfio_group {
28 struct list_head node;
29 struct vfio_group *vfio_group;
30 };
31
32 struct kvm_vfio {
33 struct list_head group_list;
34 struct mutex lock;
35 bool noncoherent;
36 };
37
38 static struct vfio_group *kvm_vfio_group_get_external_user(struct file *filep)
39 {
40 struct vfio_group *vfio_group;
41 struct vfio_group *(*fn)(struct file *);
42
43 fn = symbol_get(vfio_group_get_external_user);
44 if (!fn)
45 return ERR_PTR(-EINVAL);
46
47 vfio_group = fn(filep);
48
49 symbol_put(vfio_group_get_external_user);
50
51 return vfio_group;
52 }
53
54 static void kvm_vfio_group_put_external_user(struct vfio_group *vfio_group)
55 {
56 void (*fn)(struct vfio_group *);
57
58 fn = symbol_get(vfio_group_put_external_user);
59 if (!fn)
60 return;
61
62 fn(vfio_group);
63
64 symbol_put(vfio_group_put_external_user);
65 }
66
67 static void kvm_vfio_group_set_kvm(struct vfio_group *group, struct kvm *kvm)
68 {
69 void (*fn)(struct vfio_group *, struct kvm *);
70
71 fn = symbol_get(vfio_group_set_kvm);
72 if (!fn)
73 return;
74
75 fn(group, kvm);
76
77 symbol_put(vfio_group_set_kvm);
78 }
79
80 static bool kvm_vfio_group_is_coherent(struct vfio_group *vfio_group)
81 {
82 long (*fn)(struct vfio_group *, unsigned long);
83 long ret;
84
85 fn = symbol_get(vfio_external_check_extension);
86 if (!fn)
87 return false;
88
89 ret = fn(vfio_group, VFIO_DMA_CC_IOMMU);
90
91 symbol_put(vfio_external_check_extension);
92
93 return ret > 0;
94 }
95
96 #ifdef CONFIG_SPAPR_TCE_IOMMU
97 static int kvm_vfio_external_user_iommu_id(struct vfio_group *vfio_group)
98 {
99 int (*fn)(struct vfio_group *);
100 int ret = -EINVAL;
101
102 fn = symbol_get(vfio_external_user_iommu_id);
103 if (!fn)
104 return ret;
105
106 ret = fn(vfio_group);
107
108 symbol_put(vfio_external_user_iommu_id);
109
110 return ret;
111 }
112
113 static struct iommu_group *kvm_vfio_group_get_iommu_group(
114 struct vfio_group *group)
115 {
116 int group_id = kvm_vfio_external_user_iommu_id(group);
117
118 if (group_id < 0)
119 return NULL;
120
121 return iommu_group_get_by_id(group_id);
122 }
123
124 static void kvm_spapr_tce_release_vfio_group(struct kvm *kvm,
125 struct vfio_group *vfio_group)
126 {
127 struct iommu_group *grp = kvm_vfio_group_get_iommu_group(vfio_group);
128
129 if (WARN_ON_ONCE(!grp))
130 return;
131
132 kvm_spapr_tce_release_iommu_group(kvm, grp);
133 iommu_group_put(grp);
134 }
135 #endif
136
137 /*
138 * Groups can use the same or different IOMMU domains. If the same then
139 * adding a new group may change the coherency of groups we've previously
140 * been told about. We don't want to care about any of that so we retest
141 * each group and bail as soon as we find one that's noncoherent. This
142 * means we only ever [un]register_noncoherent_dma once for the whole device.
143 */
144 static void kvm_vfio_update_coherency(struct kvm_device *dev)
145 {
146 struct kvm_vfio *kv = dev->private;
147 bool noncoherent = false;
148 struct kvm_vfio_group *kvg;
149
150 mutex_lock(&kv->lock);
151
152 list_for_each_entry(kvg, &kv->group_list, node) {
153 if (!kvm_vfio_group_is_coherent(kvg->vfio_group)) {
154 noncoherent = true;
155 break;
156 }
157 }
158
159 if (noncoherent != kv->noncoherent) {
160 kv->noncoherent = noncoherent;
161
162 if (kv->noncoherent)
163 kvm_arch_register_noncoherent_dma(dev->kvm);
164 else
165 kvm_arch_unregister_noncoherent_dma(dev->kvm);
166 }
167
168 mutex_unlock(&kv->lock);
169 }
170
171 static int kvm_vfio_set_group(struct kvm_device *dev, long attr, u64 arg)
172 {
173 struct kvm_vfio *kv = dev->private;
174 struct vfio_group *vfio_group;
175 struct kvm_vfio_group *kvg;
176 int32_t __user *argp = (int32_t __user *)(unsigned long)arg;
177 struct fd f;
178 int32_t fd;
179 int ret;
180
181 switch (attr) {
182 case KVM_DEV_VFIO_GROUP_ADD:
183 if (get_user(fd, argp))
184 return -EFAULT;
185
186 f = fdget(fd);
187 if (!f.file)
188 return -EBADF;
189
190 vfio_group = kvm_vfio_group_get_external_user(f.file);
191 fdput(f);
192
193 if (IS_ERR(vfio_group))
194 return PTR_ERR(vfio_group);
195
196 mutex_lock(&kv->lock);
197
198 list_for_each_entry(kvg, &kv->group_list, node) {
199 if (kvg->vfio_group == vfio_group) {
200 mutex_unlock(&kv->lock);
201 kvm_vfio_group_put_external_user(vfio_group);
202 return -EEXIST;
203 }
204 }
205
206 kvg = kzalloc(sizeof(*kvg), GFP_KERNEL);
207 if (!kvg) {
208 mutex_unlock(&kv->lock);
209 kvm_vfio_group_put_external_user(vfio_group);
210 return -ENOMEM;
211 }
212
213 list_add_tail(&kvg->node, &kv->group_list);
214 kvg->vfio_group = vfio_group;
215
216 kvm_arch_start_assignment(dev->kvm);
217
218 mutex_unlock(&kv->lock);
219
220 kvm_vfio_group_set_kvm(vfio_group, dev->kvm);
221
222 kvm_vfio_update_coherency(dev);
223
224 return 0;
225
226 case KVM_DEV_VFIO_GROUP_DEL:
227 if (get_user(fd, argp))
228 return -EFAULT;
229
230 f = fdget(fd);
231 if (!f.file)
232 return -EBADF;
233
234 vfio_group = kvm_vfio_group_get_external_user(f.file);
235 fdput(f);
236
237 if (IS_ERR(vfio_group))
238 return PTR_ERR(vfio_group);
239
240 ret = -ENOENT;
241
242 mutex_lock(&kv->lock);
243
244 list_for_each_entry(kvg, &kv->group_list, node) {
245 if (kvg->vfio_group != vfio_group)
246 continue;
247
248 list_del(&kvg->node);
249 kvm_vfio_group_put_external_user(kvg->vfio_group);
250 kfree(kvg);
251 ret = 0;
252 break;
253 }
254
255 kvm_arch_end_assignment(dev->kvm);
256
257 mutex_unlock(&kv->lock);
258
259 #ifdef CONFIG_SPAPR_TCE_IOMMU
260 kvm_spapr_tce_release_vfio_group(dev->kvm, vfio_group);
261 #endif
262 kvm_vfio_group_set_kvm(vfio_group, NULL);
263
264 kvm_vfio_group_put_external_user(vfio_group);
265
266 kvm_vfio_update_coherency(dev);
267
268 return ret;
269
270 #ifdef CONFIG_SPAPR_TCE_IOMMU
271 case KVM_DEV_VFIO_GROUP_SET_SPAPR_TCE: {
272 struct kvm_vfio_spapr_tce param;
273 struct kvm_vfio *kv = dev->private;
274 struct vfio_group *vfio_group;
275 struct kvm_vfio_group *kvg;
276 struct fd f;
277 struct iommu_group *grp;
278
279 if (copy_from_user(&param, (void __user *)arg,
280 sizeof(struct kvm_vfio_spapr_tce)))
281 return -EFAULT;
282
283 f = fdget(param.groupfd);
284 if (!f.file)
285 return -EBADF;
286
287 vfio_group = kvm_vfio_group_get_external_user(f.file);
288 fdput(f);
289
290 if (IS_ERR(vfio_group))
291 return PTR_ERR(vfio_group);
292
293 grp = kvm_vfio_group_get_iommu_group(vfio_group);
294 if (WARN_ON_ONCE(!grp)) {
295 kvm_vfio_group_put_external_user(vfio_group);
296 return -EIO;
297 }
298
299 ret = -ENOENT;
300
301 mutex_lock(&kv->lock);
302
303 list_for_each_entry(kvg, &kv->group_list, node) {
304 if (kvg->vfio_group != vfio_group)
305 continue;
306
307 ret = kvm_spapr_tce_attach_iommu_group(dev->kvm,
308 param.tablefd, grp);
309 break;
310 }
311
312 mutex_unlock(&kv->lock);
313
314 iommu_group_put(grp);
315 kvm_vfio_group_put_external_user(vfio_group);
316
317 return ret;
318 }
319 #endif /* CONFIG_SPAPR_TCE_IOMMU */
320 }
321
322 return -ENXIO;
323 }
324
325 static int kvm_vfio_set_attr(struct kvm_device *dev,
326 struct kvm_device_attr *attr)
327 {
328 switch (attr->group) {
329 case KVM_DEV_VFIO_GROUP:
330 return kvm_vfio_set_group(dev, attr->attr, attr->addr);
331 }
332
333 return -ENXIO;
334 }
335
336 static int kvm_vfio_has_attr(struct kvm_device *dev,
337 struct kvm_device_attr *attr)
338 {
339 switch (attr->group) {
340 case KVM_DEV_VFIO_GROUP:
341 switch (attr->attr) {
342 case KVM_DEV_VFIO_GROUP_ADD:
343 case KVM_DEV_VFIO_GROUP_DEL:
344 #ifdef CONFIG_SPAPR_TCE_IOMMU
345 case KVM_DEV_VFIO_GROUP_SET_SPAPR_TCE:
346 #endif
347 return 0;
348 }
349
350 break;
351 }
352
353 return -ENXIO;
354 }
355
356 static void kvm_vfio_destroy(struct kvm_device *dev)
357 {
358 struct kvm_vfio *kv = dev->private;
359 struct kvm_vfio_group *kvg, *tmp;
360
361 list_for_each_entry_safe(kvg, tmp, &kv->group_list, node) {
362 #ifdef CONFIG_SPAPR_TCE_IOMMU
363 kvm_spapr_tce_release_vfio_group(dev->kvm, kvg->vfio_group);
364 #endif
365 kvm_vfio_group_set_kvm(kvg->vfio_group, NULL);
366 kvm_vfio_group_put_external_user(kvg->vfio_group);
367 list_del(&kvg->node);
368 kfree(kvg);
369 kvm_arch_end_assignment(dev->kvm);
370 }
371
372 kvm_vfio_update_coherency(dev);
373
374 kfree(kv);
375 kfree(dev); /* alloc by kvm_ioctl_create_device, free by .destroy */
376 }
377
378 static int kvm_vfio_create(struct kvm_device *dev, u32 type);
379
380 static struct kvm_device_ops kvm_vfio_ops = {
381 .name = "kvm-vfio",
382 .create = kvm_vfio_create,
383 .destroy = kvm_vfio_destroy,
384 .set_attr = kvm_vfio_set_attr,
385 .has_attr = kvm_vfio_has_attr,
386 };
387
388 static int kvm_vfio_create(struct kvm_device *dev, u32 type)
389 {
390 struct kvm_device *tmp;
391 struct kvm_vfio *kv;
392
393 /* Only one VFIO "device" per VM */
394 list_for_each_entry(tmp, &dev->kvm->devices, vm_node)
395 if (tmp->ops == &kvm_vfio_ops)
396 return -EBUSY;
397
398 kv = kzalloc(sizeof(*kv), GFP_KERNEL);
399 if (!kv)
400 return -ENOMEM;
401
402 INIT_LIST_HEAD(&kv->group_list);
403 mutex_init(&kv->lock);
404
405 dev->private = kv;
406
407 return 0;
408 }
409
410 int kvm_vfio_ops_init(void)
411 {
412 return kvm_register_device_ops(&kvm_vfio_ops, KVM_DEV_TYPE_VFIO);
413 }
414
415 void kvm_vfio_ops_exit(void)
416 {
417 kvm_unregister_device_ops(KVM_DEV_TYPE_VFIO);
418 }