]>
Commit | Line | Data |
---|---|---|
cba3345c AW |
1 | /* |
2 | * VFIO core | |
3 | * | |
4 | * Copyright (C) 2012 Red Hat, Inc. All rights reserved. | |
5 | * Author: Alex Williamson <alex.williamson@redhat.com> | |
6 | * | |
7 | * This program is free software; you can redistribute it and/or modify | |
8 | * it under the terms of the GNU General Public License version 2 as | |
9 | * published by the Free Software Foundation. | |
10 | * | |
11 | * Derived from original vfio: | |
12 | * Copyright 2010 Cisco Systems, Inc. All rights reserved. | |
13 | * Author: Tom Lyon, pugs@cisco.com | |
14 | */ | |
15 | ||
16 | #include <linux/cdev.h> | |
17 | #include <linux/compat.h> | |
18 | #include <linux/device.h> | |
19 | #include <linux/file.h> | |
20 | #include <linux/anon_inodes.h> | |
21 | #include <linux/fs.h> | |
22 | #include <linux/idr.h> | |
23 | #include <linux/iommu.h> | |
24 | #include <linux/list.h> | |
25 | #include <linux/module.h> | |
26 | #include <linux/mutex.h> | |
9587f44a | 27 | #include <linux/rwsem.h> |
cba3345c AW |
28 | #include <linux/sched.h> |
29 | #include <linux/slab.h> | |
664e9386 | 30 | #include <linux/stat.h> |
cba3345c AW |
31 | #include <linux/string.h> |
32 | #include <linux/uaccess.h> | |
33 | #include <linux/vfio.h> | |
34 | #include <linux/wait.h> | |
35 | ||
36 | #define DRIVER_VERSION "0.3" | |
37 | #define DRIVER_AUTHOR "Alex Williamson <alex.williamson@redhat.com>" | |
38 | #define DRIVER_DESC "VFIO - User Level meta-driver" | |
39 | ||
40 | static struct vfio { | |
41 | struct class *class; | |
42 | struct list_head iommu_drivers_list; | |
43 | struct mutex iommu_drivers_lock; | |
44 | struct list_head group_list; | |
45 | struct idr group_idr; | |
46 | struct mutex group_lock; | |
47 | struct cdev group_cdev; | |
48 | struct device *dev; | |
49 | dev_t devt; | |
50 | struct cdev cdev; | |
51 | wait_queue_head_t release_q; | |
52 | } vfio; | |
53 | ||
54 | struct vfio_iommu_driver { | |
55 | const struct vfio_iommu_driver_ops *ops; | |
56 | struct list_head vfio_next; | |
57 | }; | |
58 | ||
59 | struct vfio_container { | |
60 | struct kref kref; | |
61 | struct list_head group_list; | |
9587f44a | 62 | struct rw_semaphore group_lock; |
cba3345c AW |
63 | struct vfio_iommu_driver *iommu_driver; |
64 | void *iommu_data; | |
65 | }; | |
66 | ||
67 | struct vfio_group { | |
68 | struct kref kref; | |
69 | int minor; | |
70 | atomic_t container_users; | |
71 | struct iommu_group *iommu_group; | |
72 | struct vfio_container *container; | |
73 | struct list_head device_list; | |
74 | struct mutex device_lock; | |
75 | struct device *dev; | |
76 | struct notifier_block nb; | |
77 | struct list_head vfio_next; | |
78 | struct list_head container_next; | |
6d6768c6 | 79 | atomic_t opened; |
cba3345c AW |
80 | }; |
81 | ||
82 | struct vfio_device { | |
83 | struct kref kref; | |
84 | struct device *dev; | |
85 | const struct vfio_device_ops *ops; | |
86 | struct vfio_group *group; | |
87 | struct list_head group_next; | |
88 | void *device_data; | |
89 | }; | |
90 | ||
91 | /** | |
92 | * IOMMU driver registration | |
93 | */ | |
94 | int vfio_register_iommu_driver(const struct vfio_iommu_driver_ops *ops) | |
95 | { | |
96 | struct vfio_iommu_driver *driver, *tmp; | |
97 | ||
98 | driver = kzalloc(sizeof(*driver), GFP_KERNEL); | |
99 | if (!driver) | |
100 | return -ENOMEM; | |
101 | ||
102 | driver->ops = ops; | |
103 | ||
104 | mutex_lock(&vfio.iommu_drivers_lock); | |
105 | ||
106 | /* Check for duplicates */ | |
107 | list_for_each_entry(tmp, &vfio.iommu_drivers_list, vfio_next) { | |
108 | if (tmp->ops == ops) { | |
109 | mutex_unlock(&vfio.iommu_drivers_lock); | |
110 | kfree(driver); | |
111 | return -EINVAL; | |
112 | } | |
113 | } | |
114 | ||
115 | list_add(&driver->vfio_next, &vfio.iommu_drivers_list); | |
116 | ||
117 | mutex_unlock(&vfio.iommu_drivers_lock); | |
118 | ||
119 | return 0; | |
120 | } | |
121 | EXPORT_SYMBOL_GPL(vfio_register_iommu_driver); | |
122 | ||
123 | void vfio_unregister_iommu_driver(const struct vfio_iommu_driver_ops *ops) | |
124 | { | |
125 | struct vfio_iommu_driver *driver; | |
126 | ||
127 | mutex_lock(&vfio.iommu_drivers_lock); | |
128 | list_for_each_entry(driver, &vfio.iommu_drivers_list, vfio_next) { | |
129 | if (driver->ops == ops) { | |
130 | list_del(&driver->vfio_next); | |
131 | mutex_unlock(&vfio.iommu_drivers_lock); | |
132 | kfree(driver); | |
133 | return; | |
134 | } | |
135 | } | |
136 | mutex_unlock(&vfio.iommu_drivers_lock); | |
137 | } | |
138 | EXPORT_SYMBOL_GPL(vfio_unregister_iommu_driver); | |
139 | ||
140 | /** | |
141 | * Group minor allocation/free - both called with vfio.group_lock held | |
142 | */ | |
143 | static int vfio_alloc_group_minor(struct vfio_group *group) | |
144 | { | |
cba3345c | 145 | /* index 0 is used by /dev/vfio/vfio */ |
a1c36b16 | 146 | return idr_alloc(&vfio.group_idr, group, 1, MINORMASK + 1, GFP_KERNEL); |
cba3345c AW |
147 | } |
148 | ||
149 | static void vfio_free_group_minor(int minor) | |
150 | { | |
151 | idr_remove(&vfio.group_idr, minor); | |
152 | } | |
153 | ||
154 | static int vfio_iommu_group_notifier(struct notifier_block *nb, | |
155 | unsigned long action, void *data); | |
156 | static void vfio_group_get(struct vfio_group *group); | |
157 | ||
158 | /** | |
159 | * Container objects - containers are created when /dev/vfio/vfio is | |
160 | * opened, but their lifecycle extends until the last user is done, so | |
161 | * it's freed via kref. Must support container/group/device being | |
162 | * closed in any order. | |
163 | */ | |
164 | static void vfio_container_get(struct vfio_container *container) | |
165 | { | |
166 | kref_get(&container->kref); | |
167 | } | |
168 | ||
169 | static void vfio_container_release(struct kref *kref) | |
170 | { | |
171 | struct vfio_container *container; | |
172 | container = container_of(kref, struct vfio_container, kref); | |
173 | ||
174 | kfree(container); | |
175 | } | |
176 | ||
177 | static void vfio_container_put(struct vfio_container *container) | |
178 | { | |
179 | kref_put(&container->kref, vfio_container_release); | |
180 | } | |
181 | ||
9df7b25a JL |
182 | static void vfio_group_unlock_and_free(struct vfio_group *group) |
183 | { | |
184 | mutex_unlock(&vfio.group_lock); | |
185 | /* | |
186 | * Unregister outside of lock. A spurious callback is harmless now | |
187 | * that the group is no longer in vfio.group_list. | |
188 | */ | |
189 | iommu_group_unregister_notifier(group->iommu_group, &group->nb); | |
190 | kfree(group); | |
191 | } | |
192 | ||
cba3345c AW |
193 | /** |
194 | * Group objects - create, release, get, put, search | |
195 | */ | |
196 | static struct vfio_group *vfio_create_group(struct iommu_group *iommu_group) | |
197 | { | |
198 | struct vfio_group *group, *tmp; | |
199 | struct device *dev; | |
200 | int ret, minor; | |
201 | ||
202 | group = kzalloc(sizeof(*group), GFP_KERNEL); | |
203 | if (!group) | |
204 | return ERR_PTR(-ENOMEM); | |
205 | ||
206 | kref_init(&group->kref); | |
207 | INIT_LIST_HEAD(&group->device_list); | |
208 | mutex_init(&group->device_lock); | |
209 | atomic_set(&group->container_users, 0); | |
6d6768c6 | 210 | atomic_set(&group->opened, 0); |
cba3345c AW |
211 | group->iommu_group = iommu_group; |
212 | ||
213 | group->nb.notifier_call = vfio_iommu_group_notifier; | |
214 | ||
215 | /* | |
216 | * blocking notifiers acquire a rwsem around registering and hold | |
217 | * it around callback. Therefore, need to register outside of | |
218 | * vfio.group_lock to avoid A-B/B-A contention. Our callback won't | |
219 | * do anything unless it can find the group in vfio.group_list, so | |
220 | * no harm in registering early. | |
221 | */ | |
222 | ret = iommu_group_register_notifier(iommu_group, &group->nb); | |
223 | if (ret) { | |
224 | kfree(group); | |
225 | return ERR_PTR(ret); | |
226 | } | |
227 | ||
228 | mutex_lock(&vfio.group_lock); | |
229 | ||
230 | minor = vfio_alloc_group_minor(group); | |
231 | if (minor < 0) { | |
9df7b25a | 232 | vfio_group_unlock_and_free(group); |
cba3345c AW |
233 | return ERR_PTR(minor); |
234 | } | |
235 | ||
236 | /* Did we race creating this group? */ | |
237 | list_for_each_entry(tmp, &vfio.group_list, vfio_next) { | |
238 | if (tmp->iommu_group == iommu_group) { | |
239 | vfio_group_get(tmp); | |
240 | vfio_free_group_minor(minor); | |
9df7b25a | 241 | vfio_group_unlock_and_free(group); |
cba3345c AW |
242 | return tmp; |
243 | } | |
244 | } | |
245 | ||
246 | dev = device_create(vfio.class, NULL, MKDEV(MAJOR(vfio.devt), minor), | |
247 | group, "%d", iommu_group_id(iommu_group)); | |
248 | if (IS_ERR(dev)) { | |
249 | vfio_free_group_minor(minor); | |
9df7b25a | 250 | vfio_group_unlock_and_free(group); |
cba3345c AW |
251 | return (struct vfio_group *)dev; /* ERR_PTR */ |
252 | } | |
253 | ||
254 | group->minor = minor; | |
255 | group->dev = dev; | |
256 | ||
257 | list_add(&group->vfio_next, &vfio.group_list); | |
258 | ||
259 | mutex_unlock(&vfio.group_lock); | |
260 | ||
261 | return group; | |
262 | } | |
263 | ||
6d2cd3ce | 264 | /* called with vfio.group_lock held */ |
cba3345c AW |
265 | static void vfio_group_release(struct kref *kref) |
266 | { | |
267 | struct vfio_group *group = container_of(kref, struct vfio_group, kref); | |
268 | ||
269 | WARN_ON(!list_empty(&group->device_list)); | |
270 | ||
271 | device_destroy(vfio.class, MKDEV(MAJOR(vfio.devt), group->minor)); | |
272 | list_del(&group->vfio_next); | |
273 | vfio_free_group_minor(group->minor); | |
9df7b25a | 274 | vfio_group_unlock_and_free(group); |
cba3345c AW |
275 | } |
276 | ||
277 | static void vfio_group_put(struct vfio_group *group) | |
278 | { | |
6d2cd3ce | 279 | kref_put_mutex(&group->kref, vfio_group_release, &vfio.group_lock); |
cba3345c AW |
280 | } |
281 | ||
282 | /* Assume group_lock or group reference is held */ | |
283 | static void vfio_group_get(struct vfio_group *group) | |
284 | { | |
285 | kref_get(&group->kref); | |
286 | } | |
287 | ||
288 | /* | |
289 | * Not really a try as we will sleep for mutex, but we need to make | |
290 | * sure the group pointer is valid under lock and get a reference. | |
291 | */ | |
292 | static struct vfio_group *vfio_group_try_get(struct vfio_group *group) | |
293 | { | |
294 | struct vfio_group *target = group; | |
295 | ||
296 | mutex_lock(&vfio.group_lock); | |
297 | list_for_each_entry(group, &vfio.group_list, vfio_next) { | |
298 | if (group == target) { | |
299 | vfio_group_get(group); | |
300 | mutex_unlock(&vfio.group_lock); | |
301 | return group; | |
302 | } | |
303 | } | |
304 | mutex_unlock(&vfio.group_lock); | |
305 | ||
306 | return NULL; | |
307 | } | |
308 | ||
309 | static | |
310 | struct vfio_group *vfio_group_get_from_iommu(struct iommu_group *iommu_group) | |
311 | { | |
312 | struct vfio_group *group; | |
313 | ||
314 | mutex_lock(&vfio.group_lock); | |
315 | list_for_each_entry(group, &vfio.group_list, vfio_next) { | |
316 | if (group->iommu_group == iommu_group) { | |
317 | vfio_group_get(group); | |
318 | mutex_unlock(&vfio.group_lock); | |
319 | return group; | |
320 | } | |
321 | } | |
322 | mutex_unlock(&vfio.group_lock); | |
323 | ||
324 | return NULL; | |
325 | } | |
326 | ||
327 | static struct vfio_group *vfio_group_get_from_minor(int minor) | |
328 | { | |
329 | struct vfio_group *group; | |
330 | ||
331 | mutex_lock(&vfio.group_lock); | |
332 | group = idr_find(&vfio.group_idr, minor); | |
333 | if (!group) { | |
334 | mutex_unlock(&vfio.group_lock); | |
335 | return NULL; | |
336 | } | |
337 | vfio_group_get(group); | |
338 | mutex_unlock(&vfio.group_lock); | |
339 | ||
340 | return group; | |
341 | } | |
342 | ||
343 | /** | |
344 | * Device objects - create, release, get, put, search | |
345 | */ | |
346 | static | |
347 | struct vfio_device *vfio_group_create_device(struct vfio_group *group, | |
348 | struct device *dev, | |
349 | const struct vfio_device_ops *ops, | |
350 | void *device_data) | |
351 | { | |
352 | struct vfio_device *device; | |
353 | int ret; | |
354 | ||
355 | device = kzalloc(sizeof(*device), GFP_KERNEL); | |
356 | if (!device) | |
357 | return ERR_PTR(-ENOMEM); | |
358 | ||
359 | kref_init(&device->kref); | |
360 | device->dev = dev; | |
361 | device->group = group; | |
362 | device->ops = ops; | |
363 | device->device_data = device_data; | |
364 | ||
365 | ret = dev_set_drvdata(dev, device); | |
366 | if (ret) { | |
367 | kfree(device); | |
368 | return ERR_PTR(ret); | |
369 | } | |
370 | ||
371 | /* No need to get group_lock, caller has group reference */ | |
372 | vfio_group_get(group); | |
373 | ||
374 | mutex_lock(&group->device_lock); | |
375 | list_add(&device->group_next, &group->device_list); | |
376 | mutex_unlock(&group->device_lock); | |
377 | ||
378 | return device; | |
379 | } | |
380 | ||
381 | static void vfio_device_release(struct kref *kref) | |
382 | { | |
383 | struct vfio_device *device = container_of(kref, | |
384 | struct vfio_device, kref); | |
385 | struct vfio_group *group = device->group; | |
386 | ||
cba3345c AW |
387 | list_del(&device->group_next); |
388 | mutex_unlock(&group->device_lock); | |
389 | ||
390 | dev_set_drvdata(device->dev, NULL); | |
391 | ||
392 | kfree(device); | |
393 | ||
394 | /* vfio_del_group_dev may be waiting for this device */ | |
395 | wake_up(&vfio.release_q); | |
396 | } | |
397 | ||
398 | /* Device reference always implies a group reference */ | |
44f50716 | 399 | void vfio_device_put(struct vfio_device *device) |
cba3345c | 400 | { |
934ad4c2 | 401 | struct vfio_group *group = device->group; |
90b1253e | 402 | kref_put_mutex(&device->kref, vfio_device_release, &group->device_lock); |
934ad4c2 | 403 | vfio_group_put(group); |
cba3345c | 404 | } |
44f50716 | 405 | EXPORT_SYMBOL_GPL(vfio_device_put); |
cba3345c AW |
406 | |
407 | static void vfio_device_get(struct vfio_device *device) | |
408 | { | |
409 | vfio_group_get(device->group); | |
410 | kref_get(&device->kref); | |
411 | } | |
412 | ||
413 | static struct vfio_device *vfio_group_get_device(struct vfio_group *group, | |
414 | struct device *dev) | |
415 | { | |
416 | struct vfio_device *device; | |
417 | ||
418 | mutex_lock(&group->device_lock); | |
419 | list_for_each_entry(device, &group->device_list, group_next) { | |
420 | if (device->dev == dev) { | |
421 | vfio_device_get(device); | |
422 | mutex_unlock(&group->device_lock); | |
423 | return device; | |
424 | } | |
425 | } | |
426 | mutex_unlock(&group->device_lock); | |
427 | return NULL; | |
428 | } | |
429 | ||
430 | /* | |
431 | * Whitelist some drivers that we know are safe (no dma) or just sit on | |
432 | * a device. It's not always practical to leave a device within a group | |
433 | * driverless as it could get re-bound to something unsafe. | |
434 | */ | |
2b489a45 | 435 | static const char * const vfio_driver_whitelist[] = { "pci-stub", "pcieport" }; |
cba3345c AW |
436 | |
437 | static bool vfio_whitelisted_driver(struct device_driver *drv) | |
438 | { | |
439 | int i; | |
440 | ||
441 | for (i = 0; i < ARRAY_SIZE(vfio_driver_whitelist); i++) { | |
442 | if (!strcmp(drv->name, vfio_driver_whitelist[i])) | |
443 | return true; | |
444 | } | |
445 | ||
446 | return false; | |
447 | } | |
448 | ||
449 | /* | |
450 | * A vfio group is viable for use by userspace if all devices are either | |
451 | * driver-less or bound to a vfio or whitelisted driver. We test the | |
452 | * latter by the existence of a struct vfio_device matching the dev. | |
453 | */ | |
454 | static int vfio_dev_viable(struct device *dev, void *data) | |
455 | { | |
456 | struct vfio_group *group = data; | |
457 | struct vfio_device *device; | |
de2b3eea | 458 | struct device_driver *drv = ACCESS_ONCE(dev->driver); |
cba3345c | 459 | |
de2b3eea | 460 | if (!drv || vfio_whitelisted_driver(drv)) |
cba3345c AW |
461 | return 0; |
462 | ||
463 | device = vfio_group_get_device(group, dev); | |
464 | if (device) { | |
465 | vfio_device_put(device); | |
466 | return 0; | |
467 | } | |
468 | ||
469 | return -EINVAL; | |
470 | } | |
471 | ||
472 | /** | |
473 | * Async device support | |
474 | */ | |
475 | static int vfio_group_nb_add_dev(struct vfio_group *group, struct device *dev) | |
476 | { | |
477 | struct vfio_device *device; | |
478 | ||
479 | /* Do we already know about it? We shouldn't */ | |
480 | device = vfio_group_get_device(group, dev); | |
481 | if (WARN_ON_ONCE(device)) { | |
482 | vfio_device_put(device); | |
483 | return 0; | |
484 | } | |
485 | ||
486 | /* Nothing to do for idle groups */ | |
487 | if (!atomic_read(&group->container_users)) | |
488 | return 0; | |
489 | ||
490 | /* TODO Prevent device auto probing */ | |
491 | WARN("Device %s added to live group %d!\n", dev_name(dev), | |
492 | iommu_group_id(group->iommu_group)); | |
493 | ||
494 | return 0; | |
495 | } | |
496 | ||
cba3345c AW |
497 | static int vfio_group_nb_verify(struct vfio_group *group, struct device *dev) |
498 | { | |
499 | /* We don't care what happens when the group isn't in use */ | |
500 | if (!atomic_read(&group->container_users)) | |
501 | return 0; | |
502 | ||
503 | return vfio_dev_viable(dev, group); | |
504 | } | |
505 | ||
506 | static int vfio_iommu_group_notifier(struct notifier_block *nb, | |
507 | unsigned long action, void *data) | |
508 | { | |
509 | struct vfio_group *group = container_of(nb, struct vfio_group, nb); | |
510 | struct device *dev = data; | |
511 | ||
512 | /* | |
513 | * Need to go through a group_lock lookup to get a reference or | |
514 | * we risk racing a group being removed. Leave a WARN_ON for | |
515 | * debuging, but if the group no longer exists, a spurious notify | |
516 | * is harmless. | |
517 | */ | |
518 | group = vfio_group_try_get(group); | |
519 | if (WARN_ON(!group)) | |
520 | return NOTIFY_OK; | |
521 | ||
522 | switch (action) { | |
523 | case IOMMU_GROUP_NOTIFY_ADD_DEVICE: | |
524 | vfio_group_nb_add_dev(group, dev); | |
525 | break; | |
526 | case IOMMU_GROUP_NOTIFY_DEL_DEVICE: | |
de9c7602 AW |
527 | /* |
528 | * Nothing to do here. If the device is in use, then the | |
529 | * vfio sub-driver should block the remove callback until | |
530 | * it is unused. If the device is unused or attached to a | |
531 | * stub driver, then it should be released and we don't | |
532 | * care that it will be going away. | |
533 | */ | |
cba3345c AW |
534 | break; |
535 | case IOMMU_GROUP_NOTIFY_BIND_DRIVER: | |
536 | pr_debug("%s: Device %s, group %d binding to driver\n", | |
537 | __func__, dev_name(dev), | |
538 | iommu_group_id(group->iommu_group)); | |
539 | break; | |
540 | case IOMMU_GROUP_NOTIFY_BOUND_DRIVER: | |
541 | pr_debug("%s: Device %s, group %d bound to driver %s\n", | |
542 | __func__, dev_name(dev), | |
543 | iommu_group_id(group->iommu_group), dev->driver->name); | |
544 | BUG_ON(vfio_group_nb_verify(group, dev)); | |
545 | break; | |
546 | case IOMMU_GROUP_NOTIFY_UNBIND_DRIVER: | |
547 | pr_debug("%s: Device %s, group %d unbinding from driver %s\n", | |
548 | __func__, dev_name(dev), | |
549 | iommu_group_id(group->iommu_group), dev->driver->name); | |
550 | break; | |
551 | case IOMMU_GROUP_NOTIFY_UNBOUND_DRIVER: | |
552 | pr_debug("%s: Device %s, group %d unbound from driver\n", | |
553 | __func__, dev_name(dev), | |
554 | iommu_group_id(group->iommu_group)); | |
555 | /* | |
556 | * XXX An unbound device in a live group is ok, but we'd | |
557 | * really like to avoid the above BUG_ON by preventing other | |
558 | * drivers from binding to it. Once that occurs, we have to | |
559 | * stop the system to maintain isolation. At a minimum, we'd | |
560 | * want a toggle to disable driver auto probe for this device. | |
561 | */ | |
562 | break; | |
563 | } | |
564 | ||
565 | vfio_group_put(group); | |
566 | return NOTIFY_OK; | |
567 | } | |
568 | ||
569 | /** | |
570 | * VFIO driver API | |
571 | */ | |
572 | int vfio_add_group_dev(struct device *dev, | |
573 | const struct vfio_device_ops *ops, void *device_data) | |
574 | { | |
575 | struct iommu_group *iommu_group; | |
576 | struct vfio_group *group; | |
577 | struct vfio_device *device; | |
578 | ||
579 | iommu_group = iommu_group_get(dev); | |
580 | if (!iommu_group) | |
581 | return -EINVAL; | |
582 | ||
583 | group = vfio_group_get_from_iommu(iommu_group); | |
584 | if (!group) { | |
585 | group = vfio_create_group(iommu_group); | |
586 | if (IS_ERR(group)) { | |
587 | iommu_group_put(iommu_group); | |
588 | return PTR_ERR(group); | |
589 | } | |
590 | } | |
591 | ||
592 | device = vfio_group_get_device(group, dev); | |
593 | if (device) { | |
594 | WARN(1, "Device %s already exists on group %d\n", | |
595 | dev_name(dev), iommu_group_id(iommu_group)); | |
596 | vfio_device_put(device); | |
597 | vfio_group_put(group); | |
598 | iommu_group_put(iommu_group); | |
599 | return -EBUSY; | |
600 | } | |
601 | ||
602 | device = vfio_group_create_device(group, dev, ops, device_data); | |
603 | if (IS_ERR(device)) { | |
604 | vfio_group_put(group); | |
605 | iommu_group_put(iommu_group); | |
606 | return PTR_ERR(device); | |
607 | } | |
608 | ||
609 | /* | |
610 | * Added device holds reference to iommu_group and vfio_device | |
611 | * (which in turn holds reference to vfio_group). Drop extra | |
612 | * group reference used while acquiring device. | |
613 | */ | |
614 | vfio_group_put(group); | |
615 | ||
616 | return 0; | |
617 | } | |
618 | EXPORT_SYMBOL_GPL(vfio_add_group_dev); | |
619 | ||
44f50716 VMP |
620 | /** |
621 | * Get a reference to the vfio_device for a device that is known to | |
622 | * be bound to a vfio driver. The driver implicitly holds a | |
623 | * vfio_device reference between vfio_add_group_dev and | |
624 | * vfio_del_group_dev. We can therefore use drvdata to increment | |
625 | * that reference from the struct device. This additional | |
626 | * reference must be released by calling vfio_device_put. | |
627 | */ | |
628 | struct vfio_device *vfio_device_get_from_dev(struct device *dev) | |
629 | { | |
630 | struct vfio_device *device = dev_get_drvdata(dev); | |
631 | ||
632 | vfio_device_get(device); | |
633 | ||
634 | return device; | |
635 | } | |
636 | EXPORT_SYMBOL_GPL(vfio_device_get_from_dev); | |
637 | ||
638 | /* | |
639 | * Caller must hold a reference to the vfio_device | |
640 | */ | |
641 | void *vfio_device_data(struct vfio_device *device) | |
642 | { | |
643 | return device->device_data; | |
644 | } | |
645 | EXPORT_SYMBOL_GPL(vfio_device_data); | |
646 | ||
e014e944 AW |
647 | /* Given a referenced group, check if it contains the device */ |
648 | static bool vfio_dev_present(struct vfio_group *group, struct device *dev) | |
cba3345c | 649 | { |
cba3345c AW |
650 | struct vfio_device *device; |
651 | ||
cba3345c | 652 | device = vfio_group_get_device(group, dev); |
e014e944 | 653 | if (!device) |
cba3345c | 654 | return false; |
cba3345c AW |
655 | |
656 | vfio_device_put(device); | |
cba3345c AW |
657 | return true; |
658 | } | |
659 | ||
660 | /* | |
661 | * Decrement the device reference count and wait for the device to be | |
662 | * removed. Open file descriptors for the device... */ | |
663 | void *vfio_del_group_dev(struct device *dev) | |
664 | { | |
665 | struct vfio_device *device = dev_get_drvdata(dev); | |
666 | struct vfio_group *group = device->group; | |
667 | struct iommu_group *iommu_group = group->iommu_group; | |
668 | void *device_data = device->device_data; | |
669 | ||
e014e944 AW |
670 | /* |
671 | * The group exists so long as we have a device reference. Get | |
672 | * a group reference and use it to scan for the device going away. | |
673 | */ | |
674 | vfio_group_get(group); | |
675 | ||
cba3345c AW |
676 | vfio_device_put(device); |
677 | ||
678 | /* TODO send a signal to encourage this to be released */ | |
e014e944 AW |
679 | wait_event(vfio.release_q, !vfio_dev_present(group, dev)); |
680 | ||
681 | vfio_group_put(group); | |
cba3345c AW |
682 | |
683 | iommu_group_put(iommu_group); | |
684 | ||
685 | return device_data; | |
686 | } | |
687 | EXPORT_SYMBOL_GPL(vfio_del_group_dev); | |
688 | ||
689 | /** | |
690 | * VFIO base fd, /dev/vfio/vfio | |
691 | */ | |
692 | static long vfio_ioctl_check_extension(struct vfio_container *container, | |
693 | unsigned long arg) | |
694 | { | |
0b43c082 | 695 | struct vfio_iommu_driver *driver; |
cba3345c AW |
696 | long ret = 0; |
697 | ||
0b43c082 AW |
698 | down_read(&container->group_lock); |
699 | ||
700 | driver = container->iommu_driver; | |
701 | ||
cba3345c AW |
702 | switch (arg) { |
703 | /* No base extensions yet */ | |
704 | default: | |
705 | /* | |
706 | * If no driver is set, poll all registered drivers for | |
707 | * extensions and return the first positive result. If | |
708 | * a driver is already set, further queries will be passed | |
709 | * only to that driver. | |
710 | */ | |
711 | if (!driver) { | |
712 | mutex_lock(&vfio.iommu_drivers_lock); | |
713 | list_for_each_entry(driver, &vfio.iommu_drivers_list, | |
714 | vfio_next) { | |
715 | if (!try_module_get(driver->ops->owner)) | |
716 | continue; | |
717 | ||
718 | ret = driver->ops->ioctl(NULL, | |
719 | VFIO_CHECK_EXTENSION, | |
720 | arg); | |
721 | module_put(driver->ops->owner); | |
722 | if (ret > 0) | |
723 | break; | |
724 | } | |
725 | mutex_unlock(&vfio.iommu_drivers_lock); | |
726 | } else | |
727 | ret = driver->ops->ioctl(container->iommu_data, | |
728 | VFIO_CHECK_EXTENSION, arg); | |
729 | } | |
730 | ||
0b43c082 AW |
731 | up_read(&container->group_lock); |
732 | ||
cba3345c AW |
733 | return ret; |
734 | } | |
735 | ||
9587f44a | 736 | /* hold write lock on container->group_lock */ |
cba3345c AW |
737 | static int __vfio_container_attach_groups(struct vfio_container *container, |
738 | struct vfio_iommu_driver *driver, | |
739 | void *data) | |
740 | { | |
741 | struct vfio_group *group; | |
742 | int ret = -ENODEV; | |
743 | ||
744 | list_for_each_entry(group, &container->group_list, container_next) { | |
745 | ret = driver->ops->attach_group(data, group->iommu_group); | |
746 | if (ret) | |
747 | goto unwind; | |
748 | } | |
749 | ||
750 | return ret; | |
751 | ||
752 | unwind: | |
753 | list_for_each_entry_continue_reverse(group, &container->group_list, | |
754 | container_next) { | |
755 | driver->ops->detach_group(data, group->iommu_group); | |
756 | } | |
757 | ||
758 | return ret; | |
759 | } | |
760 | ||
761 | static long vfio_ioctl_set_iommu(struct vfio_container *container, | |
762 | unsigned long arg) | |
763 | { | |
764 | struct vfio_iommu_driver *driver; | |
765 | long ret = -ENODEV; | |
766 | ||
9587f44a | 767 | down_write(&container->group_lock); |
cba3345c AW |
768 | |
769 | /* | |
770 | * The container is designed to be an unprivileged interface while | |
771 | * the group can be assigned to specific users. Therefore, only by | |
772 | * adding a group to a container does the user get the privilege of | |
773 | * enabling the iommu, which may allocate finite resources. There | |
774 | * is no unset_iommu, but by removing all the groups from a container, | |
775 | * the container is deprivileged and returns to an unset state. | |
776 | */ | |
777 | if (list_empty(&container->group_list) || container->iommu_driver) { | |
9587f44a | 778 | up_write(&container->group_lock); |
cba3345c AW |
779 | return -EINVAL; |
780 | } | |
781 | ||
782 | mutex_lock(&vfio.iommu_drivers_lock); | |
783 | list_for_each_entry(driver, &vfio.iommu_drivers_list, vfio_next) { | |
784 | void *data; | |
785 | ||
786 | if (!try_module_get(driver->ops->owner)) | |
787 | continue; | |
788 | ||
789 | /* | |
790 | * The arg magic for SET_IOMMU is the same as CHECK_EXTENSION, | |
791 | * so test which iommu driver reported support for this | |
792 | * extension and call open on them. We also pass them the | |
793 | * magic, allowing a single driver to support multiple | |
794 | * interfaces if they'd like. | |
795 | */ | |
796 | if (driver->ops->ioctl(NULL, VFIO_CHECK_EXTENSION, arg) <= 0) { | |
797 | module_put(driver->ops->owner); | |
798 | continue; | |
799 | } | |
800 | ||
801 | /* module reference holds the driver we're working on */ | |
802 | mutex_unlock(&vfio.iommu_drivers_lock); | |
803 | ||
804 | data = driver->ops->open(arg); | |
805 | if (IS_ERR(data)) { | |
806 | ret = PTR_ERR(data); | |
807 | module_put(driver->ops->owner); | |
808 | goto skip_drivers_unlock; | |
809 | } | |
810 | ||
811 | ret = __vfio_container_attach_groups(container, driver, data); | |
812 | if (!ret) { | |
813 | container->iommu_driver = driver; | |
814 | container->iommu_data = data; | |
815 | } else { | |
816 | driver->ops->release(data); | |
817 | module_put(driver->ops->owner); | |
818 | } | |
819 | ||
820 | goto skip_drivers_unlock; | |
821 | } | |
822 | ||
823 | mutex_unlock(&vfio.iommu_drivers_lock); | |
824 | skip_drivers_unlock: | |
9587f44a | 825 | up_write(&container->group_lock); |
cba3345c AW |
826 | |
827 | return ret; | |
828 | } | |
829 | ||
830 | static long vfio_fops_unl_ioctl(struct file *filep, | |
831 | unsigned int cmd, unsigned long arg) | |
832 | { | |
833 | struct vfio_container *container = filep->private_data; | |
834 | struct vfio_iommu_driver *driver; | |
835 | void *data; | |
836 | long ret = -EINVAL; | |
837 | ||
838 | if (!container) | |
839 | return ret; | |
840 | ||
cba3345c AW |
841 | switch (cmd) { |
842 | case VFIO_GET_API_VERSION: | |
843 | ret = VFIO_API_VERSION; | |
844 | break; | |
845 | case VFIO_CHECK_EXTENSION: | |
846 | ret = vfio_ioctl_check_extension(container, arg); | |
847 | break; | |
848 | case VFIO_SET_IOMMU: | |
849 | ret = vfio_ioctl_set_iommu(container, arg); | |
850 | break; | |
851 | default: | |
0b43c082 AW |
852 | down_read(&container->group_lock); |
853 | ||
854 | driver = container->iommu_driver; | |
855 | data = container->iommu_data; | |
856 | ||
cba3345c AW |
857 | if (driver) /* passthrough all unrecognized ioctls */ |
858 | ret = driver->ops->ioctl(data, cmd, arg); | |
0b43c082 AW |
859 | |
860 | up_read(&container->group_lock); | |
cba3345c AW |
861 | } |
862 | ||
863 | return ret; | |
864 | } | |
865 | ||
866 | #ifdef CONFIG_COMPAT | |
867 | static long vfio_fops_compat_ioctl(struct file *filep, | |
868 | unsigned int cmd, unsigned long arg) | |
869 | { | |
870 | arg = (unsigned long)compat_ptr(arg); | |
871 | return vfio_fops_unl_ioctl(filep, cmd, arg); | |
872 | } | |
873 | #endif /* CONFIG_COMPAT */ | |
874 | ||
875 | static int vfio_fops_open(struct inode *inode, struct file *filep) | |
876 | { | |
877 | struct vfio_container *container; | |
878 | ||
879 | container = kzalloc(sizeof(*container), GFP_KERNEL); | |
880 | if (!container) | |
881 | return -ENOMEM; | |
882 | ||
883 | INIT_LIST_HEAD(&container->group_list); | |
9587f44a | 884 | init_rwsem(&container->group_lock); |
cba3345c AW |
885 | kref_init(&container->kref); |
886 | ||
887 | filep->private_data = container; | |
888 | ||
889 | return 0; | |
890 | } | |
891 | ||
892 | static int vfio_fops_release(struct inode *inode, struct file *filep) | |
893 | { | |
894 | struct vfio_container *container = filep->private_data; | |
895 | ||
896 | filep->private_data = NULL; | |
897 | ||
898 | vfio_container_put(container); | |
899 | ||
900 | return 0; | |
901 | } | |
902 | ||
903 | /* | |
904 | * Once an iommu driver is set, we optionally pass read/write/mmap | |
905 | * on to the driver, allowing management interfaces beyond ioctl. | |
906 | */ | |
907 | static ssize_t vfio_fops_read(struct file *filep, char __user *buf, | |
908 | size_t count, loff_t *ppos) | |
909 | { | |
910 | struct vfio_container *container = filep->private_data; | |
0b43c082 AW |
911 | struct vfio_iommu_driver *driver; |
912 | ssize_t ret = -EINVAL; | |
cba3345c | 913 | |
0b43c082 AW |
914 | down_read(&container->group_lock); |
915 | ||
916 | driver = container->iommu_driver; | |
917 | if (likely(driver && driver->ops->read)) | |
918 | ret = driver->ops->read(container->iommu_data, | |
919 | buf, count, ppos); | |
cba3345c | 920 | |
0b43c082 AW |
921 | up_read(&container->group_lock); |
922 | ||
923 | return ret; | |
cba3345c AW |
924 | } |
925 | ||
926 | static ssize_t vfio_fops_write(struct file *filep, const char __user *buf, | |
927 | size_t count, loff_t *ppos) | |
928 | { | |
929 | struct vfio_container *container = filep->private_data; | |
0b43c082 AW |
930 | struct vfio_iommu_driver *driver; |
931 | ssize_t ret = -EINVAL; | |
cba3345c | 932 | |
0b43c082 AW |
933 | down_read(&container->group_lock); |
934 | ||
935 | driver = container->iommu_driver; | |
936 | if (likely(driver && driver->ops->write)) | |
937 | ret = driver->ops->write(container->iommu_data, | |
938 | buf, count, ppos); | |
939 | ||
940 | up_read(&container->group_lock); | |
cba3345c | 941 | |
0b43c082 | 942 | return ret; |
cba3345c AW |
943 | } |
944 | ||
945 | static int vfio_fops_mmap(struct file *filep, struct vm_area_struct *vma) | |
946 | { | |
947 | struct vfio_container *container = filep->private_data; | |
0b43c082 AW |
948 | struct vfio_iommu_driver *driver; |
949 | int ret = -EINVAL; | |
cba3345c | 950 | |
0b43c082 | 951 | down_read(&container->group_lock); |
cba3345c | 952 | |
0b43c082 AW |
953 | driver = container->iommu_driver; |
954 | if (likely(driver && driver->ops->mmap)) | |
955 | ret = driver->ops->mmap(container->iommu_data, vma); | |
956 | ||
957 | up_read(&container->group_lock); | |
958 | ||
959 | return ret; | |
cba3345c AW |
960 | } |
961 | ||
962 | static const struct file_operations vfio_fops = { | |
963 | .owner = THIS_MODULE, | |
964 | .open = vfio_fops_open, | |
965 | .release = vfio_fops_release, | |
966 | .read = vfio_fops_read, | |
967 | .write = vfio_fops_write, | |
968 | .unlocked_ioctl = vfio_fops_unl_ioctl, | |
969 | #ifdef CONFIG_COMPAT | |
970 | .compat_ioctl = vfio_fops_compat_ioctl, | |
971 | #endif | |
972 | .mmap = vfio_fops_mmap, | |
973 | }; | |
974 | ||
975 | /** | |
976 | * VFIO Group fd, /dev/vfio/$GROUP | |
977 | */ | |
978 | static void __vfio_group_unset_container(struct vfio_group *group) | |
979 | { | |
980 | struct vfio_container *container = group->container; | |
981 | struct vfio_iommu_driver *driver; | |
982 | ||
9587f44a | 983 | down_write(&container->group_lock); |
cba3345c AW |
984 | |
985 | driver = container->iommu_driver; | |
986 | if (driver) | |
987 | driver->ops->detach_group(container->iommu_data, | |
988 | group->iommu_group); | |
989 | ||
990 | group->container = NULL; | |
991 | list_del(&group->container_next); | |
992 | ||
993 | /* Detaching the last group deprivileges a container, remove iommu */ | |
994 | if (driver && list_empty(&container->group_list)) { | |
995 | driver->ops->release(container->iommu_data); | |
996 | module_put(driver->ops->owner); | |
997 | container->iommu_driver = NULL; | |
998 | container->iommu_data = NULL; | |
999 | } | |
1000 | ||
9587f44a | 1001 | up_write(&container->group_lock); |
cba3345c AW |
1002 | |
1003 | vfio_container_put(container); | |
1004 | } | |
1005 | ||
1006 | /* | |
1007 | * VFIO_GROUP_UNSET_CONTAINER should fail if there are other users or | |
1008 | * if there was no container to unset. Since the ioctl is called on | |
1009 | * the group, we know that still exists, therefore the only valid | |
1010 | * transition here is 1->0. | |
1011 | */ | |
1012 | static int vfio_group_unset_container(struct vfio_group *group) | |
1013 | { | |
1014 | int users = atomic_cmpxchg(&group->container_users, 1, 0); | |
1015 | ||
1016 | if (!users) | |
1017 | return -EINVAL; | |
1018 | if (users != 1) | |
1019 | return -EBUSY; | |
1020 | ||
1021 | __vfio_group_unset_container(group); | |
1022 | ||
1023 | return 0; | |
1024 | } | |
1025 | ||
1026 | /* | |
1027 | * When removing container users, anything that removes the last user | |
1028 | * implicitly removes the group from the container. That is, if the | |
1029 | * group file descriptor is closed, as well as any device file descriptors, | |
1030 | * the group is free. | |
1031 | */ | |
1032 | static void vfio_group_try_dissolve_container(struct vfio_group *group) | |
1033 | { | |
1034 | if (0 == atomic_dec_if_positive(&group->container_users)) | |
1035 | __vfio_group_unset_container(group); | |
1036 | } | |
1037 | ||
1038 | static int vfio_group_set_container(struct vfio_group *group, int container_fd) | |
1039 | { | |
2903ff01 | 1040 | struct fd f; |
cba3345c AW |
1041 | struct vfio_container *container; |
1042 | struct vfio_iommu_driver *driver; | |
2903ff01 | 1043 | int ret = 0; |
cba3345c AW |
1044 | |
1045 | if (atomic_read(&group->container_users)) | |
1046 | return -EINVAL; | |
1047 | ||
2903ff01 AV |
1048 | f = fdget(container_fd); |
1049 | if (!f.file) | |
cba3345c AW |
1050 | return -EBADF; |
1051 | ||
1052 | /* Sanity check, is this really our fd? */ | |
2903ff01 AV |
1053 | if (f.file->f_op != &vfio_fops) { |
1054 | fdput(f); | |
cba3345c AW |
1055 | return -EINVAL; |
1056 | } | |
1057 | ||
2903ff01 | 1058 | container = f.file->private_data; |
cba3345c AW |
1059 | WARN_ON(!container); /* fget ensures we don't race vfio_release */ |
1060 | ||
9587f44a | 1061 | down_write(&container->group_lock); |
cba3345c AW |
1062 | |
1063 | driver = container->iommu_driver; | |
1064 | if (driver) { | |
1065 | ret = driver->ops->attach_group(container->iommu_data, | |
1066 | group->iommu_group); | |
1067 | if (ret) | |
1068 | goto unlock_out; | |
1069 | } | |
1070 | ||
1071 | group->container = container; | |
1072 | list_add(&group->container_next, &container->group_list); | |
1073 | ||
1074 | /* Get a reference on the container and mark a user within the group */ | |
1075 | vfio_container_get(container); | |
1076 | atomic_inc(&group->container_users); | |
1077 | ||
1078 | unlock_out: | |
9587f44a | 1079 | up_write(&container->group_lock); |
2903ff01 | 1080 | fdput(f); |
cba3345c AW |
1081 | return ret; |
1082 | } | |
1083 | ||
1084 | static bool vfio_group_viable(struct vfio_group *group) | |
1085 | { | |
1086 | return (iommu_group_for_each_dev(group->iommu_group, | |
1087 | group, vfio_dev_viable) == 0); | |
1088 | } | |
1089 | ||
1090 | static const struct file_operations vfio_device_fops; | |
1091 | ||
1092 | static int vfio_group_get_device_fd(struct vfio_group *group, char *buf) | |
1093 | { | |
1094 | struct vfio_device *device; | |
1095 | struct file *filep; | |
1096 | int ret = -ENODEV; | |
1097 | ||
1098 | if (0 == atomic_read(&group->container_users) || | |
1099 | !group->container->iommu_driver || !vfio_group_viable(group)) | |
1100 | return -EINVAL; | |
1101 | ||
1102 | mutex_lock(&group->device_lock); | |
1103 | list_for_each_entry(device, &group->device_list, group_next) { | |
1104 | if (strcmp(dev_name(device->dev), buf)) | |
1105 | continue; | |
1106 | ||
1107 | ret = device->ops->open(device->device_data); | |
1108 | if (ret) | |
1109 | break; | |
1110 | /* | |
1111 | * We can't use anon_inode_getfd() because we need to modify | |
1112 | * the f_mode flags directly to allow more than just ioctls | |
1113 | */ | |
1114 | ret = get_unused_fd(); | |
1115 | if (ret < 0) { | |
1116 | device->ops->release(device->device_data); | |
1117 | break; | |
1118 | } | |
1119 | ||
1120 | filep = anon_inode_getfile("[vfio-device]", &vfio_device_fops, | |
1121 | device, O_RDWR); | |
1122 | if (IS_ERR(filep)) { | |
1123 | put_unused_fd(ret); | |
1124 | ret = PTR_ERR(filep); | |
1125 | device->ops->release(device->device_data); | |
1126 | break; | |
1127 | } | |
1128 | ||
1129 | /* | |
1130 | * TODO: add an anon_inode interface to do this. | |
1131 | * Appears to be missing by lack of need rather than | |
1132 | * explicitly prevented. Now there's need. | |
1133 | */ | |
1134 | filep->f_mode |= (FMODE_LSEEK | FMODE_PREAD | FMODE_PWRITE); | |
1135 | ||
cba3345c AW |
1136 | vfio_device_get(device); |
1137 | atomic_inc(&group->container_users); | |
31605deb AV |
1138 | |
1139 | fd_install(ret, filep); | |
cba3345c AW |
1140 | break; |
1141 | } | |
1142 | mutex_unlock(&group->device_lock); | |
1143 | ||
1144 | return ret; | |
1145 | } | |
1146 | ||
1147 | static long vfio_group_fops_unl_ioctl(struct file *filep, | |
1148 | unsigned int cmd, unsigned long arg) | |
1149 | { | |
1150 | struct vfio_group *group = filep->private_data; | |
1151 | long ret = -ENOTTY; | |
1152 | ||
1153 | switch (cmd) { | |
1154 | case VFIO_GROUP_GET_STATUS: | |
1155 | { | |
1156 | struct vfio_group_status status; | |
1157 | unsigned long minsz; | |
1158 | ||
1159 | minsz = offsetofend(struct vfio_group_status, flags); | |
1160 | ||
1161 | if (copy_from_user(&status, (void __user *)arg, minsz)) | |
1162 | return -EFAULT; | |
1163 | ||
1164 | if (status.argsz < minsz) | |
1165 | return -EINVAL; | |
1166 | ||
1167 | status.flags = 0; | |
1168 | ||
1169 | if (vfio_group_viable(group)) | |
1170 | status.flags |= VFIO_GROUP_FLAGS_VIABLE; | |
1171 | ||
1172 | if (group->container) | |
1173 | status.flags |= VFIO_GROUP_FLAGS_CONTAINER_SET; | |
1174 | ||
1175 | if (copy_to_user((void __user *)arg, &status, minsz)) | |
1176 | return -EFAULT; | |
1177 | ||
1178 | ret = 0; | |
1179 | break; | |
1180 | } | |
1181 | case VFIO_GROUP_SET_CONTAINER: | |
1182 | { | |
1183 | int fd; | |
1184 | ||
1185 | if (get_user(fd, (int __user *)arg)) | |
1186 | return -EFAULT; | |
1187 | ||
1188 | if (fd < 0) | |
1189 | return -EINVAL; | |
1190 | ||
1191 | ret = vfio_group_set_container(group, fd); | |
1192 | break; | |
1193 | } | |
1194 | case VFIO_GROUP_UNSET_CONTAINER: | |
1195 | ret = vfio_group_unset_container(group); | |
1196 | break; | |
1197 | case VFIO_GROUP_GET_DEVICE_FD: | |
1198 | { | |
1199 | char *buf; | |
1200 | ||
1201 | buf = strndup_user((const char __user *)arg, PAGE_SIZE); | |
1202 | if (IS_ERR(buf)) | |
1203 | return PTR_ERR(buf); | |
1204 | ||
1205 | ret = vfio_group_get_device_fd(group, buf); | |
1206 | kfree(buf); | |
1207 | break; | |
1208 | } | |
1209 | } | |
1210 | ||
1211 | return ret; | |
1212 | } | |
1213 | ||
1214 | #ifdef CONFIG_COMPAT | |
1215 | static long vfio_group_fops_compat_ioctl(struct file *filep, | |
1216 | unsigned int cmd, unsigned long arg) | |
1217 | { | |
1218 | arg = (unsigned long)compat_ptr(arg); | |
1219 | return vfio_group_fops_unl_ioctl(filep, cmd, arg); | |
1220 | } | |
1221 | #endif /* CONFIG_COMPAT */ | |
1222 | ||
1223 | static int vfio_group_fops_open(struct inode *inode, struct file *filep) | |
1224 | { | |
1225 | struct vfio_group *group; | |
6d6768c6 | 1226 | int opened; |
cba3345c AW |
1227 | |
1228 | group = vfio_group_get_from_minor(iminor(inode)); | |
1229 | if (!group) | |
1230 | return -ENODEV; | |
1231 | ||
6d6768c6 AW |
1232 | /* Do we need multiple instances of the group open? Seems not. */ |
1233 | opened = atomic_cmpxchg(&group->opened, 0, 1); | |
1234 | if (opened) { | |
1235 | vfio_group_put(group); | |
1236 | return -EBUSY; | |
1237 | } | |
1238 | ||
1239 | /* Is something still in use from a previous open? */ | |
cba3345c | 1240 | if (group->container) { |
6d6768c6 | 1241 | atomic_dec(&group->opened); |
cba3345c AW |
1242 | vfio_group_put(group); |
1243 | return -EBUSY; | |
1244 | } | |
1245 | ||
1246 | filep->private_data = group; | |
1247 | ||
1248 | return 0; | |
1249 | } | |
1250 | ||
1251 | static int vfio_group_fops_release(struct inode *inode, struct file *filep) | |
1252 | { | |
1253 | struct vfio_group *group = filep->private_data; | |
1254 | ||
1255 | filep->private_data = NULL; | |
1256 | ||
1257 | vfio_group_try_dissolve_container(group); | |
1258 | ||
6d6768c6 AW |
1259 | atomic_dec(&group->opened); |
1260 | ||
cba3345c AW |
1261 | vfio_group_put(group); |
1262 | ||
1263 | return 0; | |
1264 | } | |
1265 | ||
1266 | static const struct file_operations vfio_group_fops = { | |
1267 | .owner = THIS_MODULE, | |
1268 | .unlocked_ioctl = vfio_group_fops_unl_ioctl, | |
1269 | #ifdef CONFIG_COMPAT | |
1270 | .compat_ioctl = vfio_group_fops_compat_ioctl, | |
1271 | #endif | |
1272 | .open = vfio_group_fops_open, | |
1273 | .release = vfio_group_fops_release, | |
1274 | }; | |
1275 | ||
1276 | /** | |
1277 | * VFIO Device fd | |
1278 | */ | |
1279 | static int vfio_device_fops_release(struct inode *inode, struct file *filep) | |
1280 | { | |
1281 | struct vfio_device *device = filep->private_data; | |
1282 | ||
1283 | device->ops->release(device->device_data); | |
1284 | ||
1285 | vfio_group_try_dissolve_container(device->group); | |
1286 | ||
1287 | vfio_device_put(device); | |
1288 | ||
1289 | return 0; | |
1290 | } | |
1291 | ||
1292 | static long vfio_device_fops_unl_ioctl(struct file *filep, | |
1293 | unsigned int cmd, unsigned long arg) | |
1294 | { | |
1295 | struct vfio_device *device = filep->private_data; | |
1296 | ||
1297 | if (unlikely(!device->ops->ioctl)) | |
1298 | return -EINVAL; | |
1299 | ||
1300 | return device->ops->ioctl(device->device_data, cmd, arg); | |
1301 | } | |
1302 | ||
1303 | static ssize_t vfio_device_fops_read(struct file *filep, char __user *buf, | |
1304 | size_t count, loff_t *ppos) | |
1305 | { | |
1306 | struct vfio_device *device = filep->private_data; | |
1307 | ||
1308 | if (unlikely(!device->ops->read)) | |
1309 | return -EINVAL; | |
1310 | ||
1311 | return device->ops->read(device->device_data, buf, count, ppos); | |
1312 | } | |
1313 | ||
1314 | static ssize_t vfio_device_fops_write(struct file *filep, | |
1315 | const char __user *buf, | |
1316 | size_t count, loff_t *ppos) | |
1317 | { | |
1318 | struct vfio_device *device = filep->private_data; | |
1319 | ||
1320 | if (unlikely(!device->ops->write)) | |
1321 | return -EINVAL; | |
1322 | ||
1323 | return device->ops->write(device->device_data, buf, count, ppos); | |
1324 | } | |
1325 | ||
1326 | static int vfio_device_fops_mmap(struct file *filep, struct vm_area_struct *vma) | |
1327 | { | |
1328 | struct vfio_device *device = filep->private_data; | |
1329 | ||
1330 | if (unlikely(!device->ops->mmap)) | |
1331 | return -EINVAL; | |
1332 | ||
1333 | return device->ops->mmap(device->device_data, vma); | |
1334 | } | |
1335 | ||
1336 | #ifdef CONFIG_COMPAT | |
1337 | static long vfio_device_fops_compat_ioctl(struct file *filep, | |
1338 | unsigned int cmd, unsigned long arg) | |
1339 | { | |
1340 | arg = (unsigned long)compat_ptr(arg); | |
1341 | return vfio_device_fops_unl_ioctl(filep, cmd, arg); | |
1342 | } | |
1343 | #endif /* CONFIG_COMPAT */ | |
1344 | ||
1345 | static const struct file_operations vfio_device_fops = { | |
1346 | .owner = THIS_MODULE, | |
1347 | .release = vfio_device_fops_release, | |
1348 | .read = vfio_device_fops_read, | |
1349 | .write = vfio_device_fops_write, | |
1350 | .unlocked_ioctl = vfio_device_fops_unl_ioctl, | |
1351 | #ifdef CONFIG_COMPAT | |
1352 | .compat_ioctl = vfio_device_fops_compat_ioctl, | |
1353 | #endif | |
1354 | .mmap = vfio_device_fops_mmap, | |
1355 | }; | |
1356 | ||
1357 | /** | |
1358 | * Module/class support | |
1359 | */ | |
1360 | static char *vfio_devnode(struct device *dev, umode_t *mode) | |
1361 | { | |
9a6aa279 | 1362 | if (mode && (MINOR(dev->devt) == 0)) |
664e9386 AW |
1363 | *mode = S_IRUGO | S_IWUGO; |
1364 | ||
cba3345c AW |
1365 | return kasprintf(GFP_KERNEL, "vfio/%s", dev_name(dev)); |
1366 | } | |
1367 | ||
1368 | static int __init vfio_init(void) | |
1369 | { | |
1370 | int ret; | |
1371 | ||
1372 | idr_init(&vfio.group_idr); | |
1373 | mutex_init(&vfio.group_lock); | |
1374 | mutex_init(&vfio.iommu_drivers_lock); | |
1375 | INIT_LIST_HEAD(&vfio.group_list); | |
1376 | INIT_LIST_HEAD(&vfio.iommu_drivers_list); | |
1377 | init_waitqueue_head(&vfio.release_q); | |
1378 | ||
1379 | vfio.class = class_create(THIS_MODULE, "vfio"); | |
1380 | if (IS_ERR(vfio.class)) { | |
1381 | ret = PTR_ERR(vfio.class); | |
1382 | goto err_class; | |
1383 | } | |
1384 | ||
1385 | vfio.class->devnode = vfio_devnode; | |
1386 | ||
1387 | ret = alloc_chrdev_region(&vfio.devt, 0, MINORMASK, "vfio"); | |
1388 | if (ret) | |
1389 | goto err_base_chrdev; | |
1390 | ||
1391 | cdev_init(&vfio.cdev, &vfio_fops); | |
1392 | ret = cdev_add(&vfio.cdev, vfio.devt, 1); | |
1393 | if (ret) | |
1394 | goto err_base_cdev; | |
1395 | ||
1396 | vfio.dev = device_create(vfio.class, NULL, vfio.devt, NULL, "vfio"); | |
1397 | if (IS_ERR(vfio.dev)) { | |
1398 | ret = PTR_ERR(vfio.dev); | |
1399 | goto err_base_dev; | |
1400 | } | |
1401 | ||
1402 | /* /dev/vfio/$GROUP */ | |
1403 | cdev_init(&vfio.group_cdev, &vfio_group_fops); | |
1404 | ret = cdev_add(&vfio.group_cdev, | |
1405 | MKDEV(MAJOR(vfio.devt), 1), MINORMASK - 1); | |
1406 | if (ret) | |
1407 | goto err_groups_cdev; | |
1408 | ||
1409 | pr_info(DRIVER_DESC " version: " DRIVER_VERSION "\n"); | |
1410 | ||
73fa0d10 AW |
1411 | /* |
1412 | * Attempt to load known iommu-drivers. This gives us a working | |
1413 | * environment without the user needing to explicitly load iommu | |
1414 | * drivers. | |
1415 | */ | |
1416 | request_module_nowait("vfio_iommu_type1"); | |
5ffd229c | 1417 | request_module_nowait("vfio_iommu_spapr_tce"); |
73fa0d10 | 1418 | |
cba3345c AW |
1419 | return 0; |
1420 | ||
1421 | err_groups_cdev: | |
1422 | device_destroy(vfio.class, vfio.devt); | |
1423 | err_base_dev: | |
1424 | cdev_del(&vfio.cdev); | |
1425 | err_base_cdev: | |
1426 | unregister_chrdev_region(vfio.devt, MINORMASK); | |
1427 | err_base_chrdev: | |
1428 | class_destroy(vfio.class); | |
1429 | vfio.class = NULL; | |
1430 | err_class: | |
1431 | return ret; | |
1432 | } | |
1433 | ||
1434 | static void __exit vfio_cleanup(void) | |
1435 | { | |
1436 | WARN_ON(!list_empty(&vfio.group_list)); | |
1437 | ||
1438 | idr_destroy(&vfio.group_idr); | |
1439 | cdev_del(&vfio.group_cdev); | |
1440 | device_destroy(vfio.class, vfio.devt); | |
1441 | cdev_del(&vfio.cdev); | |
1442 | unregister_chrdev_region(vfio.devt, MINORMASK); | |
1443 | class_destroy(vfio.class); | |
1444 | vfio.class = NULL; | |
1445 | } | |
1446 | ||
1447 | module_init(vfio_init); | |
1448 | module_exit(vfio_cleanup); | |
1449 | ||
1450 | MODULE_VERSION(DRIVER_VERSION); | |
1451 | MODULE_LICENSE("GPL v2"); | |
1452 | MODULE_AUTHOR(DRIVER_AUTHOR); | |
1453 | MODULE_DESCRIPTION(DRIVER_DESC); |