1 // SPDX-License-Identifier: GPL-2.0-only
5 * Copyright (C) 2012 Red Hat, Inc. All rights reserved.
6 * Author: Alex Williamson <alex.williamson@redhat.com>
8 * Derived from original vfio:
9 * Copyright 2010 Cisco Systems, Inc. All rights reserved.
10 * Author: Tom Lyon, pugs@cisco.com
13 #include <linux/cdev.h>
14 #include <linux/compat.h>
15 #include <linux/device.h>
16 #include <linux/file.h>
17 #include <linux/anon_inodes.h>
19 #include <linux/idr.h>
20 #include <linux/iommu.h>
21 #include <linux/list.h>
22 #include <linux/miscdevice.h>
23 #include <linux/module.h>
24 #include <linux/mutex.h>
25 #include <linux/pci.h>
26 #include <linux/rwsem.h>
27 #include <linux/sched.h>
28 #include <linux/slab.h>
29 #include <linux/stat.h>
30 #include <linux/string.h>
31 #include <linux/uaccess.h>
32 #include <linux/vfio.h>
33 #include <linux/wait.h>
34 #include <linux/sched/signal.h>
36 #define DRIVER_VERSION "0.3"
37 #define DRIVER_AUTHOR "Alex Williamson <alex.williamson@redhat.com>"
38 #define DRIVER_DESC "VFIO - User Level meta-driver"
42 struct list_head iommu_drivers_list
;
43 struct mutex iommu_drivers_lock
;
44 struct list_head group_list
;
46 struct mutex group_lock
;
47 struct cdev group_cdev
;
49 wait_queue_head_t release_q
;
52 struct vfio_iommu_driver
{
53 const struct vfio_iommu_driver_ops
*ops
;
54 struct list_head vfio_next
;
57 struct vfio_container
{
59 struct list_head group_list
;
60 struct rw_semaphore group_lock
;
61 struct vfio_iommu_driver
*iommu_driver
;
66 struct vfio_unbound_dev
{
68 struct list_head unbound_next
;
74 atomic_t container_users
;
75 struct iommu_group
*iommu_group
;
76 struct vfio_container
*container
;
77 struct list_head device_list
;
78 struct mutex device_lock
;
80 struct notifier_block nb
;
81 struct list_head vfio_next
;
82 struct list_head container_next
;
83 struct list_head unbound_list
;
84 struct mutex unbound_lock
;
86 wait_queue_head_t container_q
;
88 unsigned int dev_counter
;
90 struct blocking_notifier_head notifier
;
96 const struct vfio_device_ops
*ops
;
97 struct vfio_group
*group
;
98 struct list_head group_next
;
102 #ifdef CONFIG_VFIO_NOIOMMU
103 static bool noiommu __read_mostly
;
104 module_param_named(enable_unsafe_noiommu_mode
,
105 noiommu
, bool, S_IRUGO
| S_IWUSR
);
106 MODULE_PARM_DESC(enable_unsafe_noiommu_mode
, "Enable UNSAFE, no-IOMMU mode. This mode provides no device isolation, no DMA translation, no host kernel protection, cannot be used for device assignment to virtual machines, requires RAWIO permissions, and will taint the kernel. If you do not know what this is for, step away. (default: false)");
110 * vfio_iommu_group_{get,put} are only intended for VFIO bus driver probe
111 * and remove functions, any use cases other than acquiring the first
112 * reference for the purpose of calling vfio_add_group_dev() or removing
113 * that symmetric reference after vfio_del_group_dev() should use the raw
114 * iommu_group_{get,put} functions. In particular, vfio_iommu_group_put()
115 * removes the device from the dummy group and cannot be nested.
117 struct iommu_group
*vfio_iommu_group_get(struct device
*dev
)
119 struct iommu_group
*group
;
120 int __maybe_unused ret
;
122 group
= iommu_group_get(dev
);
124 #ifdef CONFIG_VFIO_NOIOMMU
126 * With noiommu enabled, an IOMMU group will be created for a device
127 * that doesn't already have one and doesn't have an iommu_ops on their
128 * bus. We set iommudata simply to be able to identify these groups
129 * as special use and for reclamation later.
131 if (group
|| !noiommu
|| iommu_present(dev
->bus
))
134 group
= iommu_group_alloc();
138 iommu_group_set_name(group
, "vfio-noiommu");
139 iommu_group_set_iommudata(group
, &noiommu
, NULL
);
140 ret
= iommu_group_add_device(group
, dev
);
142 iommu_group_put(group
);
147 * Where to taint? At this point we've added an IOMMU group for a
148 * device that is not backed by iommu_ops, therefore any iommu_
149 * callback using iommu_ops can legitimately Oops. So, while we may
150 * be about to give a DMA capable device to a user without IOMMU
151 * protection, which is clearly taint-worthy, let's go ahead and do
154 add_taint(TAINT_USER
, LOCKDEP_STILL_OK
);
155 dev_warn(dev
, "Adding kernel taint for vfio-noiommu group on device\n");
160 EXPORT_SYMBOL_GPL(vfio_iommu_group_get
);
162 void vfio_iommu_group_put(struct iommu_group
*group
, struct device
*dev
)
164 #ifdef CONFIG_VFIO_NOIOMMU
165 if (iommu_group_get_iommudata(group
) == &noiommu
)
166 iommu_group_remove_device(dev
);
169 iommu_group_put(group
);
171 EXPORT_SYMBOL_GPL(vfio_iommu_group_put
);
173 #ifdef CONFIG_VFIO_NOIOMMU
174 static void *vfio_noiommu_open(unsigned long arg
)
176 if (arg
!= VFIO_NOIOMMU_IOMMU
)
177 return ERR_PTR(-EINVAL
);
178 if (!capable(CAP_SYS_RAWIO
))
179 return ERR_PTR(-EPERM
);
184 static void vfio_noiommu_release(void *iommu_data
)
188 static long vfio_noiommu_ioctl(void *iommu_data
,
189 unsigned int cmd
, unsigned long arg
)
191 if (cmd
== VFIO_CHECK_EXTENSION
)
192 return noiommu
&& (arg
== VFIO_NOIOMMU_IOMMU
) ? 1 : 0;
197 static int vfio_noiommu_attach_group(void *iommu_data
,
198 struct iommu_group
*iommu_group
)
200 return iommu_group_get_iommudata(iommu_group
) == &noiommu
? 0 : -EINVAL
;
203 static void vfio_noiommu_detach_group(void *iommu_data
,
204 struct iommu_group
*iommu_group
)
208 static const struct vfio_iommu_driver_ops vfio_noiommu_ops
= {
209 .name
= "vfio-noiommu",
210 .owner
= THIS_MODULE
,
211 .open
= vfio_noiommu_open
,
212 .release
= vfio_noiommu_release
,
213 .ioctl
= vfio_noiommu_ioctl
,
214 .attach_group
= vfio_noiommu_attach_group
,
215 .detach_group
= vfio_noiommu_detach_group
,
221 * IOMMU driver registration
223 int vfio_register_iommu_driver(const struct vfio_iommu_driver_ops
*ops
)
225 struct vfio_iommu_driver
*driver
, *tmp
;
227 driver
= kzalloc(sizeof(*driver
), GFP_KERNEL
);
233 mutex_lock(&vfio
.iommu_drivers_lock
);
235 /* Check for duplicates */
236 list_for_each_entry(tmp
, &vfio
.iommu_drivers_list
, vfio_next
) {
237 if (tmp
->ops
== ops
) {
238 mutex_unlock(&vfio
.iommu_drivers_lock
);
244 list_add(&driver
->vfio_next
, &vfio
.iommu_drivers_list
);
246 mutex_unlock(&vfio
.iommu_drivers_lock
);
250 EXPORT_SYMBOL_GPL(vfio_register_iommu_driver
);
252 void vfio_unregister_iommu_driver(const struct vfio_iommu_driver_ops
*ops
)
254 struct vfio_iommu_driver
*driver
;
256 mutex_lock(&vfio
.iommu_drivers_lock
);
257 list_for_each_entry(driver
, &vfio
.iommu_drivers_list
, vfio_next
) {
258 if (driver
->ops
== ops
) {
259 list_del(&driver
->vfio_next
);
260 mutex_unlock(&vfio
.iommu_drivers_lock
);
265 mutex_unlock(&vfio
.iommu_drivers_lock
);
267 EXPORT_SYMBOL_GPL(vfio_unregister_iommu_driver
);
270 * Group minor allocation/free - both called with vfio.group_lock held
272 static int vfio_alloc_group_minor(struct vfio_group
*group
)
274 return idr_alloc(&vfio
.group_idr
, group
, 0, MINORMASK
+ 1, GFP_KERNEL
);
277 static void vfio_free_group_minor(int minor
)
279 idr_remove(&vfio
.group_idr
, minor
);
282 static int vfio_iommu_group_notifier(struct notifier_block
*nb
,
283 unsigned long action
, void *data
);
284 static void vfio_group_get(struct vfio_group
*group
);
287 * Container objects - containers are created when /dev/vfio/vfio is
288 * opened, but their lifecycle extends until the last user is done, so
289 * it's freed via kref. Must support container/group/device being
290 * closed in any order.
292 static void vfio_container_get(struct vfio_container
*container
)
294 kref_get(&container
->kref
);
297 static void vfio_container_release(struct kref
*kref
)
299 struct vfio_container
*container
;
300 container
= container_of(kref
, struct vfio_container
, kref
);
305 static void vfio_container_put(struct vfio_container
*container
)
307 kref_put(&container
->kref
, vfio_container_release
);
310 static void vfio_group_unlock_and_free(struct vfio_group
*group
)
312 mutex_unlock(&vfio
.group_lock
);
314 * Unregister outside of lock. A spurious callback is harmless now
315 * that the group is no longer in vfio.group_list.
317 iommu_group_unregister_notifier(group
->iommu_group
, &group
->nb
);
322 * Group objects - create, release, get, put, search
324 static struct vfio_group
*vfio_create_group(struct iommu_group
*iommu_group
)
326 struct vfio_group
*group
, *tmp
;
330 group
= kzalloc(sizeof(*group
), GFP_KERNEL
);
332 return ERR_PTR(-ENOMEM
);
334 kref_init(&group
->kref
);
335 INIT_LIST_HEAD(&group
->device_list
);
336 mutex_init(&group
->device_lock
);
337 INIT_LIST_HEAD(&group
->unbound_list
);
338 mutex_init(&group
->unbound_lock
);
339 atomic_set(&group
->container_users
, 0);
340 atomic_set(&group
->opened
, 0);
341 init_waitqueue_head(&group
->container_q
);
342 group
->iommu_group
= iommu_group
;
343 #ifdef CONFIG_VFIO_NOIOMMU
344 group
->noiommu
= (iommu_group_get_iommudata(iommu_group
) == &noiommu
);
346 BLOCKING_INIT_NOTIFIER_HEAD(&group
->notifier
);
348 group
->nb
.notifier_call
= vfio_iommu_group_notifier
;
351 * blocking notifiers acquire a rwsem around registering and hold
352 * it around callback. Therefore, need to register outside of
353 * vfio.group_lock to avoid A-B/B-A contention. Our callback won't
354 * do anything unless it can find the group in vfio.group_list, so
355 * no harm in registering early.
357 ret
= iommu_group_register_notifier(iommu_group
, &group
->nb
);
363 mutex_lock(&vfio
.group_lock
);
365 /* Did we race creating this group? */
366 list_for_each_entry(tmp
, &vfio
.group_list
, vfio_next
) {
367 if (tmp
->iommu_group
== iommu_group
) {
369 vfio_group_unlock_and_free(group
);
374 minor
= vfio_alloc_group_minor(group
);
376 vfio_group_unlock_and_free(group
);
377 return ERR_PTR(minor
);
380 dev
= device_create(vfio
.class, NULL
,
381 MKDEV(MAJOR(vfio
.group_devt
), minor
),
382 group
, "%s%d", group
->noiommu
? "noiommu-" : "",
383 iommu_group_id(iommu_group
));
385 vfio_free_group_minor(minor
);
386 vfio_group_unlock_and_free(group
);
387 return ERR_CAST(dev
);
390 group
->minor
= minor
;
393 list_add(&group
->vfio_next
, &vfio
.group_list
);
395 mutex_unlock(&vfio
.group_lock
);
400 /* called with vfio.group_lock held */
401 static void vfio_group_release(struct kref
*kref
)
403 struct vfio_group
*group
= container_of(kref
, struct vfio_group
, kref
);
404 struct vfio_unbound_dev
*unbound
, *tmp
;
405 struct iommu_group
*iommu_group
= group
->iommu_group
;
407 WARN_ON(!list_empty(&group
->device_list
));
408 WARN_ON(group
->notifier
.head
);
410 list_for_each_entry_safe(unbound
, tmp
,
411 &group
->unbound_list
, unbound_next
) {
412 list_del(&unbound
->unbound_next
);
416 device_destroy(vfio
.class, MKDEV(MAJOR(vfio
.group_devt
), group
->minor
));
417 list_del(&group
->vfio_next
);
418 vfio_free_group_minor(group
->minor
);
419 vfio_group_unlock_and_free(group
);
420 iommu_group_put(iommu_group
);
423 static void vfio_group_put(struct vfio_group
*group
)
425 kref_put_mutex(&group
->kref
, vfio_group_release
, &vfio
.group_lock
);
428 struct vfio_group_put_work
{
429 struct work_struct work
;
430 struct vfio_group
*group
;
433 static void vfio_group_put_bg(struct work_struct
*work
)
435 struct vfio_group_put_work
*do_work
;
437 do_work
= container_of(work
, struct vfio_group_put_work
, work
);
439 vfio_group_put(do_work
->group
);
443 static void vfio_group_schedule_put(struct vfio_group
*group
)
445 struct vfio_group_put_work
*do_work
;
447 do_work
= kmalloc(sizeof(*do_work
), GFP_KERNEL
);
448 if (WARN_ON(!do_work
))
451 INIT_WORK(&do_work
->work
, vfio_group_put_bg
);
452 do_work
->group
= group
;
453 schedule_work(&do_work
->work
);
456 /* Assume group_lock or group reference is held */
457 static void vfio_group_get(struct vfio_group
*group
)
459 kref_get(&group
->kref
);
463 * Not really a try as we will sleep for mutex, but we need to make
464 * sure the group pointer is valid under lock and get a reference.
466 static struct vfio_group
*vfio_group_try_get(struct vfio_group
*group
)
468 struct vfio_group
*target
= group
;
470 mutex_lock(&vfio
.group_lock
);
471 list_for_each_entry(group
, &vfio
.group_list
, vfio_next
) {
472 if (group
== target
) {
473 vfio_group_get(group
);
474 mutex_unlock(&vfio
.group_lock
);
478 mutex_unlock(&vfio
.group_lock
);
484 struct vfio_group
*vfio_group_get_from_iommu(struct iommu_group
*iommu_group
)
486 struct vfio_group
*group
;
488 mutex_lock(&vfio
.group_lock
);
489 list_for_each_entry(group
, &vfio
.group_list
, vfio_next
) {
490 if (group
->iommu_group
== iommu_group
) {
491 vfio_group_get(group
);
492 mutex_unlock(&vfio
.group_lock
);
496 mutex_unlock(&vfio
.group_lock
);
501 static struct vfio_group
*vfio_group_get_from_minor(int minor
)
503 struct vfio_group
*group
;
505 mutex_lock(&vfio
.group_lock
);
506 group
= idr_find(&vfio
.group_idr
, minor
);
508 mutex_unlock(&vfio
.group_lock
);
511 vfio_group_get(group
);
512 mutex_unlock(&vfio
.group_lock
);
517 static struct vfio_group
*vfio_group_get_from_dev(struct device
*dev
)
519 struct iommu_group
*iommu_group
;
520 struct vfio_group
*group
;
522 iommu_group
= iommu_group_get(dev
);
526 group
= vfio_group_get_from_iommu(iommu_group
);
527 iommu_group_put(iommu_group
);
533 * Device objects - create, release, get, put, search
536 struct vfio_device
*vfio_group_create_device(struct vfio_group
*group
,
538 const struct vfio_device_ops
*ops
,
541 struct vfio_device
*device
;
543 device
= kzalloc(sizeof(*device
), GFP_KERNEL
);
545 return ERR_PTR(-ENOMEM
);
547 kref_init(&device
->kref
);
549 device
->group
= group
;
551 device
->device_data
= device_data
;
552 dev_set_drvdata(dev
, device
);
554 /* No need to get group_lock, caller has group reference */
555 vfio_group_get(group
);
557 mutex_lock(&group
->device_lock
);
558 list_add(&device
->group_next
, &group
->device_list
);
559 group
->dev_counter
++;
560 mutex_unlock(&group
->device_lock
);
565 static void vfio_device_release(struct kref
*kref
)
567 struct vfio_device
*device
= container_of(kref
,
568 struct vfio_device
, kref
);
569 struct vfio_group
*group
= device
->group
;
571 list_del(&device
->group_next
);
572 group
->dev_counter
--;
573 mutex_unlock(&group
->device_lock
);
575 dev_set_drvdata(device
->dev
, NULL
);
579 /* vfio_del_group_dev may be waiting for this device */
580 wake_up(&vfio
.release_q
);
583 /* Device reference always implies a group reference */
584 void vfio_device_put(struct vfio_device
*device
)
586 struct vfio_group
*group
= device
->group
;
587 kref_put_mutex(&device
->kref
, vfio_device_release
, &group
->device_lock
);
588 vfio_group_put(group
);
590 EXPORT_SYMBOL_GPL(vfio_device_put
);
592 static void vfio_device_get(struct vfio_device
*device
)
594 vfio_group_get(device
->group
);
595 kref_get(&device
->kref
);
598 static struct vfio_device
*vfio_group_get_device(struct vfio_group
*group
,
601 struct vfio_device
*device
;
603 mutex_lock(&group
->device_lock
);
604 list_for_each_entry(device
, &group
->device_list
, group_next
) {
605 if (device
->dev
== dev
) {
606 vfio_device_get(device
);
607 mutex_unlock(&group
->device_lock
);
611 mutex_unlock(&group
->device_lock
);
616 * Some drivers, like pci-stub, are only used to prevent other drivers from
617 * claiming a device and are therefore perfectly legitimate for a user owned
618 * group. The pci-stub driver has no dependencies on DMA or the IOVA mapping
619 * of the device, but it does prevent the user from having direct access to
620 * the device, which is useful in some circumstances.
622 * We also assume that we can include PCI interconnect devices, ie. bridges.
623 * IOMMU grouping on PCI necessitates that if we lack isolation on a bridge
624 * then all of the downstream devices will be part of the same IOMMU group as
625 * the bridge. Thus, if placing the bridge into the user owned IOVA space
626 * breaks anything, it only does so for user owned devices downstream. Note
627 * that error notification via MSI can be affected for platforms that handle
628 * MSI within the same IOVA space as DMA.
630 static const char * const vfio_driver_allowed
[] = { "pci-stub" };
632 static bool vfio_dev_driver_allowed(struct device
*dev
,
633 struct device_driver
*drv
)
635 if (dev_is_pci(dev
)) {
636 struct pci_dev
*pdev
= to_pci_dev(dev
);
638 if (pdev
->hdr_type
!= PCI_HEADER_TYPE_NORMAL
)
642 return match_string(vfio_driver_allowed
,
643 ARRAY_SIZE(vfio_driver_allowed
),
648 * A vfio group is viable for use by userspace if all devices are in
649 * one of the following states:
651 * - bound to a vfio driver
652 * - bound to an otherwise allowed driver
653 * - a PCI interconnect device
655 * We use two methods to determine whether a device is bound to a vfio
656 * driver. The first is to test whether the device exists in the vfio
657 * group. The second is to test if the device exists on the group
658 * unbound_list, indicating it's in the middle of transitioning from
659 * a vfio driver to driver-less.
661 static int vfio_dev_viable(struct device
*dev
, void *data
)
663 struct vfio_group
*group
= data
;
664 struct vfio_device
*device
;
665 struct device_driver
*drv
= READ_ONCE(dev
->driver
);
666 struct vfio_unbound_dev
*unbound
;
669 mutex_lock(&group
->unbound_lock
);
670 list_for_each_entry(unbound
, &group
->unbound_list
, unbound_next
) {
671 if (dev
== unbound
->dev
) {
676 mutex_unlock(&group
->unbound_lock
);
678 if (!ret
|| !drv
|| vfio_dev_driver_allowed(dev
, drv
))
681 device
= vfio_group_get_device(group
, dev
);
683 vfio_device_put(device
);
691 * Async device support
693 static int vfio_group_nb_add_dev(struct vfio_group
*group
, struct device
*dev
)
695 struct vfio_device
*device
;
697 /* Do we already know about it? We shouldn't */
698 device
= vfio_group_get_device(group
, dev
);
699 if (WARN_ON_ONCE(device
)) {
700 vfio_device_put(device
);
704 /* Nothing to do for idle groups */
705 if (!atomic_read(&group
->container_users
))
708 /* TODO Prevent device auto probing */
709 dev_WARN(dev
, "Device added to live group %d!\n",
710 iommu_group_id(group
->iommu_group
));
715 static int vfio_group_nb_verify(struct vfio_group
*group
, struct device
*dev
)
717 /* We don't care what happens when the group isn't in use */
718 if (!atomic_read(&group
->container_users
))
721 return vfio_dev_viable(dev
, group
);
724 static int vfio_iommu_group_notifier(struct notifier_block
*nb
,
725 unsigned long action
, void *data
)
727 struct vfio_group
*group
= container_of(nb
, struct vfio_group
, nb
);
728 struct device
*dev
= data
;
729 struct vfio_unbound_dev
*unbound
;
732 * Need to go through a group_lock lookup to get a reference or we
733 * risk racing a group being removed. Ignore spurious notifies.
735 group
= vfio_group_try_get(group
);
740 case IOMMU_GROUP_NOTIFY_ADD_DEVICE
:
741 vfio_group_nb_add_dev(group
, dev
);
743 case IOMMU_GROUP_NOTIFY_DEL_DEVICE
:
745 * Nothing to do here. If the device is in use, then the
746 * vfio sub-driver should block the remove callback until
747 * it is unused. If the device is unused or attached to a
748 * stub driver, then it should be released and we don't
749 * care that it will be going away.
752 case IOMMU_GROUP_NOTIFY_BIND_DRIVER
:
753 dev_dbg(dev
, "%s: group %d binding to driver\n", __func__
,
754 iommu_group_id(group
->iommu_group
));
756 case IOMMU_GROUP_NOTIFY_BOUND_DRIVER
:
757 dev_dbg(dev
, "%s: group %d bound to driver %s\n", __func__
,
758 iommu_group_id(group
->iommu_group
), dev
->driver
->name
);
759 BUG_ON(vfio_group_nb_verify(group
, dev
));
761 case IOMMU_GROUP_NOTIFY_UNBIND_DRIVER
:
762 dev_dbg(dev
, "%s: group %d unbinding from driver %s\n",
763 __func__
, iommu_group_id(group
->iommu_group
),
766 case IOMMU_GROUP_NOTIFY_UNBOUND_DRIVER
:
767 dev_dbg(dev
, "%s: group %d unbound from driver\n", __func__
,
768 iommu_group_id(group
->iommu_group
));
770 * XXX An unbound device in a live group is ok, but we'd
771 * really like to avoid the above BUG_ON by preventing other
772 * drivers from binding to it. Once that occurs, we have to
773 * stop the system to maintain isolation. At a minimum, we'd
774 * want a toggle to disable driver auto probe for this device.
777 mutex_lock(&group
->unbound_lock
);
778 list_for_each_entry(unbound
,
779 &group
->unbound_list
, unbound_next
) {
780 if (dev
== unbound
->dev
) {
781 list_del(&unbound
->unbound_next
);
786 mutex_unlock(&group
->unbound_lock
);
791 * If we're the last reference to the group, the group will be
792 * released, which includes unregistering the iommu group notifier.
793 * We hold a read-lock on that notifier list, unregistering needs
794 * a write-lock... deadlock. Release our reference asynchronously
795 * to avoid that situation.
797 vfio_group_schedule_put(group
);
804 int vfio_add_group_dev(struct device
*dev
,
805 const struct vfio_device_ops
*ops
, void *device_data
)
807 struct iommu_group
*iommu_group
;
808 struct vfio_group
*group
;
809 struct vfio_device
*device
;
811 iommu_group
= iommu_group_get(dev
);
815 group
= vfio_group_get_from_iommu(iommu_group
);
817 group
= vfio_create_group(iommu_group
);
819 iommu_group_put(iommu_group
);
820 return PTR_ERR(group
);
824 * A found vfio_group already holds a reference to the
825 * iommu_group. A created vfio_group keeps the reference.
827 iommu_group_put(iommu_group
);
830 device
= vfio_group_get_device(group
, dev
);
832 dev_WARN(dev
, "Device already exists on group %d\n",
833 iommu_group_id(iommu_group
));
834 vfio_device_put(device
);
835 vfio_group_put(group
);
839 device
= vfio_group_create_device(group
, dev
, ops
, device_data
);
840 if (IS_ERR(device
)) {
841 vfio_group_put(group
);
842 return PTR_ERR(device
);
846 * Drop all but the vfio_device reference. The vfio_device holds
847 * a reference to the vfio_group, which holds a reference to the
850 vfio_group_put(group
);
854 EXPORT_SYMBOL_GPL(vfio_add_group_dev
);
857 * Get a reference to the vfio_device for a device. Even if the
858 * caller thinks they own the device, they could be racing with a
859 * release call path, so we can't trust drvdata for the shortcut.
860 * Go the long way around, from the iommu_group to the vfio_group
861 * to the vfio_device.
863 struct vfio_device
*vfio_device_get_from_dev(struct device
*dev
)
865 struct vfio_group
*group
;
866 struct vfio_device
*device
;
868 group
= vfio_group_get_from_dev(dev
);
872 device
= vfio_group_get_device(group
, dev
);
873 vfio_group_put(group
);
877 EXPORT_SYMBOL_GPL(vfio_device_get_from_dev
);
879 static struct vfio_device
*vfio_device_get_from_name(struct vfio_group
*group
,
882 struct vfio_device
*it
, *device
= ERR_PTR(-ENODEV
);
884 mutex_lock(&group
->device_lock
);
885 list_for_each_entry(it
, &group
->device_list
, group_next
) {
888 if (it
->ops
->match
) {
889 ret
= it
->ops
->match(it
->device_data
, buf
);
891 device
= ERR_PTR(ret
);
895 ret
= !strcmp(dev_name(it
->dev
), buf
);
900 vfio_device_get(device
);
904 mutex_unlock(&group
->device_lock
);
910 * Caller must hold a reference to the vfio_device
912 void *vfio_device_data(struct vfio_device
*device
)
914 return device
->device_data
;
916 EXPORT_SYMBOL_GPL(vfio_device_data
);
919 * Decrement the device reference count and wait for the device to be
920 * removed. Open file descriptors for the device... */
921 void *vfio_del_group_dev(struct device
*dev
)
923 DEFINE_WAIT_FUNC(wait
, woken_wake_function
);
924 struct vfio_device
*device
= dev_get_drvdata(dev
);
925 struct vfio_group
*group
= device
->group
;
926 void *device_data
= device
->device_data
;
927 struct vfio_unbound_dev
*unbound
;
929 bool interrupted
= false;
932 * The group exists so long as we have a device reference. Get
933 * a group reference and use it to scan for the device going away.
935 vfio_group_get(group
);
938 * When the device is removed from the group, the group suddenly
939 * becomes non-viable; the device has a driver (until the unbind
940 * completes), but it's not present in the group. This is bad news
941 * for any external users that need to re-acquire a group reference
942 * in order to match and release their existing reference. To
943 * solve this, we track such devices on the unbound_list to bridge
944 * the gap until they're fully unbound.
946 unbound
= kzalloc(sizeof(*unbound
), GFP_KERNEL
);
949 mutex_lock(&group
->unbound_lock
);
950 list_add(&unbound
->unbound_next
, &group
->unbound_list
);
951 mutex_unlock(&group
->unbound_lock
);
955 vfio_device_put(device
);
958 * If the device is still present in the group after the above
959 * 'put', then it is in use and we need to request it from the
960 * bus driver. The driver may in turn need to request the
961 * device from the user. We send the request on an arbitrary
962 * interval with counter to allow the driver to take escalating
963 * measures to release the device if it has the ability to do so.
965 add_wait_queue(&vfio
.release_q
, &wait
);
968 device
= vfio_group_get_device(group
, dev
);
972 if (device
->ops
->request
)
973 device
->ops
->request(device_data
, i
++);
975 vfio_device_put(device
);
978 wait_woken(&wait
, TASK_UNINTERRUPTIBLE
, HZ
* 10);
980 wait_woken(&wait
, TASK_INTERRUPTIBLE
, HZ
* 10);
981 if (signal_pending(current
)) {
984 "Device is currently in use, task"
986 "blocked until device is released",
987 current
->comm
, task_pid_nr(current
));
993 remove_wait_queue(&vfio
.release_q
, &wait
);
995 * In order to support multiple devices per group, devices can be
996 * plucked from the group while other devices in the group are still
997 * in use. The container persists with this group and those remaining
998 * devices still attached. If the user creates an isolation violation
999 * by binding this device to another driver while the group is still in
1000 * use, that's their fault. However, in the case of removing the last,
1001 * or potentially the only, device in the group there can be no other
1002 * in-use devices in the group. The user has done their due diligence
1003 * and we should lay no claims to those devices. In order to do that,
1004 * we need to make sure the group is detached from the container.
1005 * Without this stall, we're potentially racing with a user process
1006 * that may attempt to immediately bind this device to another driver.
1008 if (list_empty(&group
->device_list
))
1009 wait_event(group
->container_q
, !group
->container
);
1011 vfio_group_put(group
);
1015 EXPORT_SYMBOL_GPL(vfio_del_group_dev
);
1018 * VFIO base fd, /dev/vfio/vfio
1020 static long vfio_ioctl_check_extension(struct vfio_container
*container
,
1023 struct vfio_iommu_driver
*driver
;
1026 down_read(&container
->group_lock
);
1028 driver
= container
->iommu_driver
;
1031 /* No base extensions yet */
1034 * If no driver is set, poll all registered drivers for
1035 * extensions and return the first positive result. If
1036 * a driver is already set, further queries will be passed
1037 * only to that driver.
1040 mutex_lock(&vfio
.iommu_drivers_lock
);
1041 list_for_each_entry(driver
, &vfio
.iommu_drivers_list
,
1044 #ifdef CONFIG_VFIO_NOIOMMU
1045 if (!list_empty(&container
->group_list
) &&
1046 (container
->noiommu
!=
1047 (driver
->ops
== &vfio_noiommu_ops
)))
1051 if (!try_module_get(driver
->ops
->owner
))
1054 ret
= driver
->ops
->ioctl(NULL
,
1055 VFIO_CHECK_EXTENSION
,
1057 module_put(driver
->ops
->owner
);
1061 mutex_unlock(&vfio
.iommu_drivers_lock
);
1063 ret
= driver
->ops
->ioctl(container
->iommu_data
,
1064 VFIO_CHECK_EXTENSION
, arg
);
1067 up_read(&container
->group_lock
);
1072 /* hold write lock on container->group_lock */
1073 static int __vfio_container_attach_groups(struct vfio_container
*container
,
1074 struct vfio_iommu_driver
*driver
,
1077 struct vfio_group
*group
;
1080 list_for_each_entry(group
, &container
->group_list
, container_next
) {
1081 ret
= driver
->ops
->attach_group(data
, group
->iommu_group
);
1089 list_for_each_entry_continue_reverse(group
, &container
->group_list
,
1091 driver
->ops
->detach_group(data
, group
->iommu_group
);
1097 static long vfio_ioctl_set_iommu(struct vfio_container
*container
,
1100 struct vfio_iommu_driver
*driver
;
1103 down_write(&container
->group_lock
);
1106 * The container is designed to be an unprivileged interface while
1107 * the group can be assigned to specific users. Therefore, only by
1108 * adding a group to a container does the user get the privilege of
1109 * enabling the iommu, which may allocate finite resources. There
1110 * is no unset_iommu, but by removing all the groups from a container,
1111 * the container is deprivileged and returns to an unset state.
1113 if (list_empty(&container
->group_list
) || container
->iommu_driver
) {
1114 up_write(&container
->group_lock
);
1118 mutex_lock(&vfio
.iommu_drivers_lock
);
1119 list_for_each_entry(driver
, &vfio
.iommu_drivers_list
, vfio_next
) {
1122 #ifdef CONFIG_VFIO_NOIOMMU
1124 * Only noiommu containers can use vfio-noiommu and noiommu
1125 * containers can only use vfio-noiommu.
1127 if (container
->noiommu
!= (driver
->ops
== &vfio_noiommu_ops
))
1131 if (!try_module_get(driver
->ops
->owner
))
1135 * The arg magic for SET_IOMMU is the same as CHECK_EXTENSION,
1136 * so test which iommu driver reported support for this
1137 * extension and call open on them. We also pass them the
1138 * magic, allowing a single driver to support multiple
1139 * interfaces if they'd like.
1141 if (driver
->ops
->ioctl(NULL
, VFIO_CHECK_EXTENSION
, arg
) <= 0) {
1142 module_put(driver
->ops
->owner
);
1146 data
= driver
->ops
->open(arg
);
1148 ret
= PTR_ERR(data
);
1149 module_put(driver
->ops
->owner
);
1153 ret
= __vfio_container_attach_groups(container
, driver
, data
);
1155 driver
->ops
->release(data
);
1156 module_put(driver
->ops
->owner
);
1160 container
->iommu_driver
= driver
;
1161 container
->iommu_data
= data
;
1165 mutex_unlock(&vfio
.iommu_drivers_lock
);
1166 up_write(&container
->group_lock
);
1171 static long vfio_fops_unl_ioctl(struct file
*filep
,
1172 unsigned int cmd
, unsigned long arg
)
1174 struct vfio_container
*container
= filep
->private_data
;
1175 struct vfio_iommu_driver
*driver
;
1183 case VFIO_GET_API_VERSION
:
1184 ret
= VFIO_API_VERSION
;
1186 case VFIO_CHECK_EXTENSION
:
1187 ret
= vfio_ioctl_check_extension(container
, arg
);
1189 case VFIO_SET_IOMMU
:
1190 ret
= vfio_ioctl_set_iommu(container
, arg
);
1193 driver
= container
->iommu_driver
;
1194 data
= container
->iommu_data
;
1196 if (driver
) /* passthrough all unrecognized ioctls */
1197 ret
= driver
->ops
->ioctl(data
, cmd
, arg
);
1203 static int vfio_fops_open(struct inode
*inode
, struct file
*filep
)
1205 struct vfio_container
*container
;
1207 container
= kzalloc(sizeof(*container
), GFP_KERNEL
);
1211 INIT_LIST_HEAD(&container
->group_list
);
1212 init_rwsem(&container
->group_lock
);
1213 kref_init(&container
->kref
);
1215 filep
->private_data
= container
;
1220 static int vfio_fops_release(struct inode
*inode
, struct file
*filep
)
1222 struct vfio_container
*container
= filep
->private_data
;
1223 struct vfio_iommu_driver
*driver
= container
->iommu_driver
;
1225 if (driver
&& driver
->ops
->notify
)
1226 driver
->ops
->notify(container
->iommu_data
,
1227 VFIO_IOMMU_CONTAINER_CLOSE
);
1229 filep
->private_data
= NULL
;
1231 vfio_container_put(container
);
1237 * Once an iommu driver is set, we optionally pass read/write/mmap
1238 * on to the driver, allowing management interfaces beyond ioctl.
1240 static ssize_t
vfio_fops_read(struct file
*filep
, char __user
*buf
,
1241 size_t count
, loff_t
*ppos
)
1243 struct vfio_container
*container
= filep
->private_data
;
1244 struct vfio_iommu_driver
*driver
;
1245 ssize_t ret
= -EINVAL
;
1247 driver
= container
->iommu_driver
;
1248 if (likely(driver
&& driver
->ops
->read
))
1249 ret
= driver
->ops
->read(container
->iommu_data
,
1255 static ssize_t
vfio_fops_write(struct file
*filep
, const char __user
*buf
,
1256 size_t count
, loff_t
*ppos
)
1258 struct vfio_container
*container
= filep
->private_data
;
1259 struct vfio_iommu_driver
*driver
;
1260 ssize_t ret
= -EINVAL
;
1262 driver
= container
->iommu_driver
;
1263 if (likely(driver
&& driver
->ops
->write
))
1264 ret
= driver
->ops
->write(container
->iommu_data
,
1270 static int vfio_fops_mmap(struct file
*filep
, struct vm_area_struct
*vma
)
1272 struct vfio_container
*container
= filep
->private_data
;
1273 struct vfio_iommu_driver
*driver
;
1276 driver
= container
->iommu_driver
;
1277 if (likely(driver
&& driver
->ops
->mmap
))
1278 ret
= driver
->ops
->mmap(container
->iommu_data
, vma
);
1283 static const struct file_operations vfio_fops
= {
1284 .owner
= THIS_MODULE
,
1285 .open
= vfio_fops_open
,
1286 .release
= vfio_fops_release
,
1287 .read
= vfio_fops_read
,
1288 .write
= vfio_fops_write
,
1289 .unlocked_ioctl
= vfio_fops_unl_ioctl
,
1290 .compat_ioctl
= compat_ptr_ioctl
,
1291 .mmap
= vfio_fops_mmap
,
1295 * VFIO Group fd, /dev/vfio/$GROUP
1297 static void __vfio_group_unset_container(struct vfio_group
*group
)
1299 struct vfio_container
*container
= group
->container
;
1300 struct vfio_iommu_driver
*driver
;
1302 down_write(&container
->group_lock
);
1304 driver
= container
->iommu_driver
;
1306 driver
->ops
->detach_group(container
->iommu_data
,
1307 group
->iommu_group
);
1309 group
->container
= NULL
;
1310 wake_up(&group
->container_q
);
1311 list_del(&group
->container_next
);
1313 /* Detaching the last group deprivileges a container, remove iommu */
1314 if (driver
&& list_empty(&container
->group_list
)) {
1315 driver
->ops
->release(container
->iommu_data
);
1316 module_put(driver
->ops
->owner
);
1317 container
->iommu_driver
= NULL
;
1318 container
->iommu_data
= NULL
;
1321 up_write(&container
->group_lock
);
1323 vfio_container_put(container
);
1327 * VFIO_GROUP_UNSET_CONTAINER should fail if there are other users or
1328 * if there was no container to unset. Since the ioctl is called on
1329 * the group, we know that still exists, therefore the only valid
1330 * transition here is 1->0.
1332 static int vfio_group_unset_container(struct vfio_group
*group
)
1334 int users
= atomic_cmpxchg(&group
->container_users
, 1, 0);
1341 __vfio_group_unset_container(group
);
1347 * When removing container users, anything that removes the last user
1348 * implicitly removes the group from the container. That is, if the
1349 * group file descriptor is closed, as well as any device file descriptors,
1350 * the group is free.
1352 static void vfio_group_try_dissolve_container(struct vfio_group
*group
)
1354 if (0 == atomic_dec_if_positive(&group
->container_users
))
1355 __vfio_group_unset_container(group
);
1358 static int vfio_group_set_container(struct vfio_group
*group
, int container_fd
)
1361 struct vfio_container
*container
;
1362 struct vfio_iommu_driver
*driver
;
1365 if (atomic_read(&group
->container_users
))
1368 if (group
->noiommu
&& !capable(CAP_SYS_RAWIO
))
1371 f
= fdget(container_fd
);
1375 /* Sanity check, is this really our fd? */
1376 if (f
.file
->f_op
!= &vfio_fops
) {
1381 container
= f
.file
->private_data
;
1382 WARN_ON(!container
); /* fget ensures we don't race vfio_release */
1384 down_write(&container
->group_lock
);
1386 /* Real groups and fake groups cannot mix */
1387 if (!list_empty(&container
->group_list
) &&
1388 container
->noiommu
!= group
->noiommu
) {
1393 driver
= container
->iommu_driver
;
1395 ret
= driver
->ops
->attach_group(container
->iommu_data
,
1396 group
->iommu_group
);
1401 group
->container
= container
;
1402 container
->noiommu
= group
->noiommu
;
1403 list_add(&group
->container_next
, &container
->group_list
);
1405 /* Get a reference on the container and mark a user within the group */
1406 vfio_container_get(container
);
1407 atomic_inc(&group
->container_users
);
1410 up_write(&container
->group_lock
);
1415 static bool vfio_group_viable(struct vfio_group
*group
)
1417 return (iommu_group_for_each_dev(group
->iommu_group
,
1418 group
, vfio_dev_viable
) == 0);
1421 static int vfio_group_add_container_user(struct vfio_group
*group
)
1423 if (!atomic_inc_not_zero(&group
->container_users
))
1426 if (group
->noiommu
) {
1427 atomic_dec(&group
->container_users
);
1430 if (!group
->container
->iommu_driver
|| !vfio_group_viable(group
)) {
1431 atomic_dec(&group
->container_users
);
1438 static const struct file_operations vfio_device_fops
;
1440 static int vfio_group_get_device_fd(struct vfio_group
*group
, char *buf
)
1442 struct vfio_device
*device
;
1446 if (0 == atomic_read(&group
->container_users
) ||
1447 !group
->container
->iommu_driver
|| !vfio_group_viable(group
))
1450 if (group
->noiommu
&& !capable(CAP_SYS_RAWIO
))
1453 device
= vfio_device_get_from_name(group
, buf
);
1455 return PTR_ERR(device
);
1457 ret
= device
->ops
->open(device
->device_data
);
1459 vfio_device_put(device
);
1464 * We can't use anon_inode_getfd() because we need to modify
1465 * the f_mode flags directly to allow more than just ioctls
1467 ret
= get_unused_fd_flags(O_CLOEXEC
);
1469 device
->ops
->release(device
->device_data
);
1470 vfio_device_put(device
);
1474 filep
= anon_inode_getfile("[vfio-device]", &vfio_device_fops
,
1476 if (IS_ERR(filep
)) {
1478 ret
= PTR_ERR(filep
);
1479 device
->ops
->release(device
->device_data
);
1480 vfio_device_put(device
);
1485 * TODO: add an anon_inode interface to do this.
1486 * Appears to be missing by lack of need rather than
1487 * explicitly prevented. Now there's need.
1489 filep
->f_mode
|= (FMODE_LSEEK
| FMODE_PREAD
| FMODE_PWRITE
);
1491 atomic_inc(&group
->container_users
);
1493 fd_install(ret
, filep
);
1496 dev_warn(device
->dev
, "vfio-noiommu device opened by user "
1497 "(%s:%d)\n", current
->comm
, task_pid_nr(current
));
1502 static long vfio_group_fops_unl_ioctl(struct file
*filep
,
1503 unsigned int cmd
, unsigned long arg
)
1505 struct vfio_group
*group
= filep
->private_data
;
1509 case VFIO_GROUP_GET_STATUS
:
1511 struct vfio_group_status status
;
1512 unsigned long minsz
;
1514 minsz
= offsetofend(struct vfio_group_status
, flags
);
1516 if (copy_from_user(&status
, (void __user
*)arg
, minsz
))
1519 if (status
.argsz
< minsz
)
1524 if (vfio_group_viable(group
))
1525 status
.flags
|= VFIO_GROUP_FLAGS_VIABLE
;
1527 if (group
->container
)
1528 status
.flags
|= VFIO_GROUP_FLAGS_CONTAINER_SET
;
1530 if (copy_to_user((void __user
*)arg
, &status
, minsz
))
1536 case VFIO_GROUP_SET_CONTAINER
:
1540 if (get_user(fd
, (int __user
*)arg
))
1546 ret
= vfio_group_set_container(group
, fd
);
1549 case VFIO_GROUP_UNSET_CONTAINER
:
1550 ret
= vfio_group_unset_container(group
);
1552 case VFIO_GROUP_GET_DEVICE_FD
:
1556 buf
= strndup_user((const char __user
*)arg
, PAGE_SIZE
);
1558 return PTR_ERR(buf
);
1560 ret
= vfio_group_get_device_fd(group
, buf
);
1569 static int vfio_group_fops_open(struct inode
*inode
, struct file
*filep
)
1571 struct vfio_group
*group
;
1574 group
= vfio_group_get_from_minor(iminor(inode
));
1578 if (group
->noiommu
&& !capable(CAP_SYS_RAWIO
)) {
1579 vfio_group_put(group
);
1583 /* Do we need multiple instances of the group open? Seems not. */
1584 opened
= atomic_cmpxchg(&group
->opened
, 0, 1);
1586 vfio_group_put(group
);
1590 /* Is something still in use from a previous open? */
1591 if (group
->container
) {
1592 atomic_dec(&group
->opened
);
1593 vfio_group_put(group
);
1597 /* Warn if previous user didn't cleanup and re-init to drop them */
1598 if (WARN_ON(group
->notifier
.head
))
1599 BLOCKING_INIT_NOTIFIER_HEAD(&group
->notifier
);
1601 filep
->private_data
= group
;
1606 static int vfio_group_fops_release(struct inode
*inode
, struct file
*filep
)
1608 struct vfio_group
*group
= filep
->private_data
;
1610 filep
->private_data
= NULL
;
1612 vfio_group_try_dissolve_container(group
);
1614 atomic_dec(&group
->opened
);
1616 vfio_group_put(group
);
1621 static const struct file_operations vfio_group_fops
= {
1622 .owner
= THIS_MODULE
,
1623 .unlocked_ioctl
= vfio_group_fops_unl_ioctl
,
1624 .compat_ioctl
= compat_ptr_ioctl
,
1625 .open
= vfio_group_fops_open
,
1626 .release
= vfio_group_fops_release
,
1632 static int vfio_device_fops_release(struct inode
*inode
, struct file
*filep
)
1634 struct vfio_device
*device
= filep
->private_data
;
1636 device
->ops
->release(device
->device_data
);
1638 vfio_group_try_dissolve_container(device
->group
);
1640 vfio_device_put(device
);
1645 static long vfio_device_fops_unl_ioctl(struct file
*filep
,
1646 unsigned int cmd
, unsigned long arg
)
1648 struct vfio_device
*device
= filep
->private_data
;
1650 if (unlikely(!device
->ops
->ioctl
))
1653 return device
->ops
->ioctl(device
->device_data
, cmd
, arg
);
1656 static ssize_t
vfio_device_fops_read(struct file
*filep
, char __user
*buf
,
1657 size_t count
, loff_t
*ppos
)
1659 struct vfio_device
*device
= filep
->private_data
;
1661 if (unlikely(!device
->ops
->read
))
1664 return device
->ops
->read(device
->device_data
, buf
, count
, ppos
);
1667 static ssize_t
vfio_device_fops_write(struct file
*filep
,
1668 const char __user
*buf
,
1669 size_t count
, loff_t
*ppos
)
1671 struct vfio_device
*device
= filep
->private_data
;
1673 if (unlikely(!device
->ops
->write
))
1676 return device
->ops
->write(device
->device_data
, buf
, count
, ppos
);
1679 static int vfio_device_fops_mmap(struct file
*filep
, struct vm_area_struct
*vma
)
1681 struct vfio_device
*device
= filep
->private_data
;
1683 if (unlikely(!device
->ops
->mmap
))
1686 return device
->ops
->mmap(device
->device_data
, vma
);
1689 static const struct file_operations vfio_device_fops
= {
1690 .owner
= THIS_MODULE
,
1691 .release
= vfio_device_fops_release
,
1692 .read
= vfio_device_fops_read
,
1693 .write
= vfio_device_fops_write
,
1694 .unlocked_ioctl
= vfio_device_fops_unl_ioctl
,
1695 .compat_ioctl
= compat_ptr_ioctl
,
1696 .mmap
= vfio_device_fops_mmap
,
1700 * External user API, exported by symbols to be linked dynamically.
1702 * The protocol includes:
1703 * 1. do normal VFIO init operation:
1704 * - opening a new container;
1705 * - attaching group(s) to it;
1706 * - setting an IOMMU driver for a container.
1707 * When IOMMU is set for a container, all groups in it are
1708 * considered ready to use by an external user.
1710 * 2. User space passes a group fd to an external user.
1711 * The external user calls vfio_group_get_external_user()
1713 * - the group is initialized;
1714 * - IOMMU is set for it.
1715 * If both checks passed, vfio_group_get_external_user()
1716 * increments the container user counter to prevent
1717 * the VFIO group from disposal before KVM exits.
1719 * 3. The external user calls vfio_external_user_iommu_id()
1720 * to know an IOMMU ID.
1722 * 4. When the external KVM finishes, it calls
1723 * vfio_group_put_external_user() to release the VFIO group.
1724 * This call decrements the container user counter.
1726 struct vfio_group
*vfio_group_get_external_user(struct file
*filep
)
1728 struct vfio_group
*group
= filep
->private_data
;
1731 if (filep
->f_op
!= &vfio_group_fops
)
1732 return ERR_PTR(-EINVAL
);
1734 ret
= vfio_group_add_container_user(group
);
1736 return ERR_PTR(ret
);
1738 vfio_group_get(group
);
1742 EXPORT_SYMBOL_GPL(vfio_group_get_external_user
);
1745 * External user API, exported by symbols to be linked dynamically.
1746 * The external user passes in a device pointer
1748 * - A VFIO group is assiciated with the device;
1749 * - IOMMU is set for the group.
1750 * If both checks passed, vfio_group_get_external_user_from_dev()
1751 * increments the container user counter to prevent the VFIO group
1752 * from disposal before external user exits and returns the pointer
1753 * to the VFIO group.
1755 * When the external user finishes using the VFIO group, it calls
1756 * vfio_group_put_external_user() to release the VFIO group and
1757 * decrement the container user counter.
1759 * @dev [in] : device
1760 * Return error PTR or pointer to VFIO group.
1763 struct vfio_group
*vfio_group_get_external_user_from_dev(struct device
*dev
)
1765 struct vfio_group
*group
;
1768 group
= vfio_group_get_from_dev(dev
);
1770 return ERR_PTR(-ENODEV
);
1772 ret
= vfio_group_add_container_user(group
);
1774 vfio_group_put(group
);
1775 return ERR_PTR(ret
);
1780 EXPORT_SYMBOL_GPL(vfio_group_get_external_user_from_dev
);
1782 void vfio_group_put_external_user(struct vfio_group
*group
)
1784 vfio_group_try_dissolve_container(group
);
1785 vfio_group_put(group
);
1787 EXPORT_SYMBOL_GPL(vfio_group_put_external_user
);
1789 bool vfio_external_group_match_file(struct vfio_group
*test_group
,
1792 struct vfio_group
*group
= filep
->private_data
;
1794 return (filep
->f_op
== &vfio_group_fops
) && (group
== test_group
);
1796 EXPORT_SYMBOL_GPL(vfio_external_group_match_file
);
1798 int vfio_external_user_iommu_id(struct vfio_group
*group
)
1800 return iommu_group_id(group
->iommu_group
);
1802 EXPORT_SYMBOL_GPL(vfio_external_user_iommu_id
);
1804 long vfio_external_check_extension(struct vfio_group
*group
, unsigned long arg
)
1806 return vfio_ioctl_check_extension(group
->container
, arg
);
1808 EXPORT_SYMBOL_GPL(vfio_external_check_extension
);
1811 * Sub-module support
1814 * Helper for managing a buffer of info chain capabilities, allocate or
1815 * reallocate a buffer with additional @size, filling in @id and @version
1816 * of the capability. A pointer to the new capability is returned.
1818 * NB. The chain is based at the head of the buffer, so new entries are
1819 * added to the tail, vfio_info_cap_shift() should be called to fixup the
1820 * next offsets prior to copying to the user buffer.
1822 struct vfio_info_cap_header
*vfio_info_cap_add(struct vfio_info_cap
*caps
,
1823 size_t size
, u16 id
, u16 version
)
1826 struct vfio_info_cap_header
*header
, *tmp
;
1828 buf
= krealloc(caps
->buf
, caps
->size
+ size
, GFP_KERNEL
);
1832 return ERR_PTR(-ENOMEM
);
1836 header
= buf
+ caps
->size
;
1838 /* Eventually copied to user buffer, zero */
1839 memset(header
, 0, size
);
1842 header
->version
= version
;
1844 /* Add to the end of the capability chain */
1845 for (tmp
= buf
; tmp
->next
; tmp
= buf
+ tmp
->next
)
1848 tmp
->next
= caps
->size
;
1853 EXPORT_SYMBOL_GPL(vfio_info_cap_add
);
1855 void vfio_info_cap_shift(struct vfio_info_cap
*caps
, size_t offset
)
1857 struct vfio_info_cap_header
*tmp
;
1858 void *buf
= (void *)caps
->buf
;
1860 for (tmp
= buf
; tmp
->next
; tmp
= buf
+ tmp
->next
- offset
)
1861 tmp
->next
+= offset
;
1863 EXPORT_SYMBOL(vfio_info_cap_shift
);
1865 int vfio_info_add_capability(struct vfio_info_cap
*caps
,
1866 struct vfio_info_cap_header
*cap
, size_t size
)
1868 struct vfio_info_cap_header
*header
;
1870 header
= vfio_info_cap_add(caps
, size
, cap
->id
, cap
->version
);
1872 return PTR_ERR(header
);
1874 memcpy(header
+ 1, cap
+ 1, size
- sizeof(*header
));
1878 EXPORT_SYMBOL(vfio_info_add_capability
);
1880 int vfio_set_irqs_validate_and_prepare(struct vfio_irq_set
*hdr
, int num_irqs
,
1881 int max_irq_type
, size_t *data_size
)
1883 unsigned long minsz
;
1886 minsz
= offsetofend(struct vfio_irq_set
, count
);
1888 if ((hdr
->argsz
< minsz
) || (hdr
->index
>= max_irq_type
) ||
1889 (hdr
->count
>= (U32_MAX
- hdr
->start
)) ||
1890 (hdr
->flags
& ~(VFIO_IRQ_SET_DATA_TYPE_MASK
|
1891 VFIO_IRQ_SET_ACTION_TYPE_MASK
)))
1897 if (hdr
->start
>= num_irqs
|| hdr
->start
+ hdr
->count
> num_irqs
)
1900 switch (hdr
->flags
& VFIO_IRQ_SET_DATA_TYPE_MASK
) {
1901 case VFIO_IRQ_SET_DATA_NONE
:
1904 case VFIO_IRQ_SET_DATA_BOOL
:
1905 size
= sizeof(uint8_t);
1907 case VFIO_IRQ_SET_DATA_EVENTFD
:
1908 size
= sizeof(int32_t);
1915 if (hdr
->argsz
- minsz
< hdr
->count
* size
)
1921 *data_size
= hdr
->count
* size
;
1926 EXPORT_SYMBOL(vfio_set_irqs_validate_and_prepare
);
1929 * Pin a set of guest PFNs and return their associated host PFNs for local
1931 * @dev [in] : device
1932 * @user_pfn [in]: array of user/guest PFNs to be pinned.
1933 * @npage [in] : count of elements in user_pfn array. This count should not
1934 * be greater VFIO_PIN_PAGES_MAX_ENTRIES.
1935 * @prot [in] : protection flags
1936 * @phys_pfn[out]: array of host PFNs
1937 * Return error or number of pages pinned.
1939 int vfio_pin_pages(struct device
*dev
, unsigned long *user_pfn
, int npage
,
1940 int prot
, unsigned long *phys_pfn
)
1942 struct vfio_container
*container
;
1943 struct vfio_group
*group
;
1944 struct vfio_iommu_driver
*driver
;
1947 if (!dev
|| !user_pfn
|| !phys_pfn
|| !npage
)
1950 if (npage
> VFIO_PIN_PAGES_MAX_ENTRIES
)
1953 group
= vfio_group_get_from_dev(dev
);
1957 if (group
->dev_counter
> 1) {
1962 ret
= vfio_group_add_container_user(group
);
1966 container
= group
->container
;
1967 driver
= container
->iommu_driver
;
1968 if (likely(driver
&& driver
->ops
->pin_pages
))
1969 ret
= driver
->ops
->pin_pages(container
->iommu_data
,
1970 group
->iommu_group
, user_pfn
,
1971 npage
, prot
, phys_pfn
);
1975 vfio_group_try_dissolve_container(group
);
1978 vfio_group_put(group
);
1981 EXPORT_SYMBOL(vfio_pin_pages
);
1984 * Unpin set of host PFNs for local domain only.
1985 * @dev [in] : device
1986 * @user_pfn [in]: array of user/guest PFNs to be unpinned. Number of user/guest
1987 * PFNs should not be greater than VFIO_PIN_PAGES_MAX_ENTRIES.
1988 * @npage [in] : count of elements in user_pfn array. This count should not
1989 * be greater than VFIO_PIN_PAGES_MAX_ENTRIES.
1990 * Return error or number of pages unpinned.
1992 int vfio_unpin_pages(struct device
*dev
, unsigned long *user_pfn
, int npage
)
1994 struct vfio_container
*container
;
1995 struct vfio_group
*group
;
1996 struct vfio_iommu_driver
*driver
;
1999 if (!dev
|| !user_pfn
|| !npage
)
2002 if (npage
> VFIO_PIN_PAGES_MAX_ENTRIES
)
2005 group
= vfio_group_get_from_dev(dev
);
2009 ret
= vfio_group_add_container_user(group
);
2011 goto err_unpin_pages
;
2013 container
= group
->container
;
2014 driver
= container
->iommu_driver
;
2015 if (likely(driver
&& driver
->ops
->unpin_pages
))
2016 ret
= driver
->ops
->unpin_pages(container
->iommu_data
, user_pfn
,
2021 vfio_group_try_dissolve_container(group
);
2024 vfio_group_put(group
);
2027 EXPORT_SYMBOL(vfio_unpin_pages
);
2030 * Pin a set of guest IOVA PFNs and return their associated host PFNs for a
2033 * The caller needs to call vfio_group_get_external_user() or
2034 * vfio_group_get_external_user_from_dev() prior to calling this interface,
2035 * so as to prevent the VFIO group from disposal in the middle of the call.
2036 * But it can keep the reference to the VFIO group for several calls into
2038 * After finishing using of the VFIO group, the caller needs to release the
2039 * VFIO group by calling vfio_group_put_external_user().
2041 * @group [in] : VFIO group
2042 * @user_iova_pfn [in] : array of user/guest IOVA PFNs to be pinned.
2043 * @npage [in] : count of elements in user_iova_pfn array.
2044 * This count should not be greater
2045 * VFIO_PIN_PAGES_MAX_ENTRIES.
2046 * @prot [in] : protection flags
2047 * @phys_pfn [out] : array of host PFNs
2048 * Return error or number of pages pinned.
2050 int vfio_group_pin_pages(struct vfio_group
*group
,
2051 unsigned long *user_iova_pfn
, int npage
,
2052 int prot
, unsigned long *phys_pfn
)
2054 struct vfio_container
*container
;
2055 struct vfio_iommu_driver
*driver
;
2058 if (!group
|| !user_iova_pfn
|| !phys_pfn
|| !npage
)
2061 if (group
->dev_counter
> 1)
2064 if (npage
> VFIO_PIN_PAGES_MAX_ENTRIES
)
2067 container
= group
->container
;
2068 driver
= container
->iommu_driver
;
2069 if (likely(driver
&& driver
->ops
->pin_pages
))
2070 ret
= driver
->ops
->pin_pages(container
->iommu_data
,
2071 group
->iommu_group
, user_iova_pfn
,
2072 npage
, prot
, phys_pfn
);
2078 EXPORT_SYMBOL(vfio_group_pin_pages
);
2081 * Unpin a set of guest IOVA PFNs for a VFIO group.
2083 * The caller needs to call vfio_group_get_external_user() or
2084 * vfio_group_get_external_user_from_dev() prior to calling this interface,
2085 * so as to prevent the VFIO group from disposal in the middle of the call.
2086 * But it can keep the reference to the VFIO group for several calls into
2088 * After finishing using of the VFIO group, the caller needs to release the
2089 * VFIO group by calling vfio_group_put_external_user().
2091 * @group [in] : vfio group
2092 * @user_iova_pfn [in] : array of user/guest IOVA PFNs to be unpinned.
2093 * @npage [in] : count of elements in user_iova_pfn array.
2094 * This count should not be greater than
2095 * VFIO_PIN_PAGES_MAX_ENTRIES.
2096 * Return error or number of pages unpinned.
2098 int vfio_group_unpin_pages(struct vfio_group
*group
,
2099 unsigned long *user_iova_pfn
, int npage
)
2101 struct vfio_container
*container
;
2102 struct vfio_iommu_driver
*driver
;
2105 if (!group
|| !user_iova_pfn
|| !npage
)
2108 if (npage
> VFIO_PIN_PAGES_MAX_ENTRIES
)
2111 container
= group
->container
;
2112 driver
= container
->iommu_driver
;
2113 if (likely(driver
&& driver
->ops
->unpin_pages
))
2114 ret
= driver
->ops
->unpin_pages(container
->iommu_data
,
2115 user_iova_pfn
, npage
);
2121 EXPORT_SYMBOL(vfio_group_unpin_pages
);
2125 * This interface allows the CPUs to perform some sort of virtual DMA on
2126 * behalf of the device.
2128 * CPUs read/write from/into a range of IOVAs pointing to user space memory
2129 * into/from a kernel buffer.
2131 * As the read/write of user space memory is conducted via the CPUs and is
2132 * not a real device DMA, it is not necessary to pin the user space memory.
2134 * The caller needs to call vfio_group_get_external_user() or
2135 * vfio_group_get_external_user_from_dev() prior to calling this interface,
2136 * so as to prevent the VFIO group from disposal in the middle of the call.
2137 * But it can keep the reference to the VFIO group for several calls into
2139 * After finishing using of the VFIO group, the caller needs to release the
2140 * VFIO group by calling vfio_group_put_external_user().
2142 * @group [in] : VFIO group
2143 * @user_iova [in] : base IOVA of a user space buffer
2144 * @data [in] : pointer to kernel buffer
2145 * @len [in] : kernel buffer length
2146 * @write : indicate read or write
2147 * Return error code on failure or 0 on success.
2149 int vfio_dma_rw(struct vfio_group
*group
, dma_addr_t user_iova
,
2150 void *data
, size_t len
, bool write
)
2152 struct vfio_container
*container
;
2153 struct vfio_iommu_driver
*driver
;
2156 if (!group
|| !data
|| len
<= 0)
2159 container
= group
->container
;
2160 driver
= container
->iommu_driver
;
2162 if (likely(driver
&& driver
->ops
->dma_rw
))
2163 ret
= driver
->ops
->dma_rw(container
->iommu_data
,
2164 user_iova
, data
, len
, write
);
2170 EXPORT_SYMBOL(vfio_dma_rw
);
2172 static int vfio_register_iommu_notifier(struct vfio_group
*group
,
2173 unsigned long *events
,
2174 struct notifier_block
*nb
)
2176 struct vfio_container
*container
;
2177 struct vfio_iommu_driver
*driver
;
2180 ret
= vfio_group_add_container_user(group
);
2184 container
= group
->container
;
2185 driver
= container
->iommu_driver
;
2186 if (likely(driver
&& driver
->ops
->register_notifier
))
2187 ret
= driver
->ops
->register_notifier(container
->iommu_data
,
2192 vfio_group_try_dissolve_container(group
);
2197 static int vfio_unregister_iommu_notifier(struct vfio_group
*group
,
2198 struct notifier_block
*nb
)
2200 struct vfio_container
*container
;
2201 struct vfio_iommu_driver
*driver
;
2204 ret
= vfio_group_add_container_user(group
);
2208 container
= group
->container
;
2209 driver
= container
->iommu_driver
;
2210 if (likely(driver
&& driver
->ops
->unregister_notifier
))
2211 ret
= driver
->ops
->unregister_notifier(container
->iommu_data
,
2216 vfio_group_try_dissolve_container(group
);
2221 void vfio_group_set_kvm(struct vfio_group
*group
, struct kvm
*kvm
)
2224 blocking_notifier_call_chain(&group
->notifier
,
2225 VFIO_GROUP_NOTIFY_SET_KVM
, kvm
);
2227 EXPORT_SYMBOL_GPL(vfio_group_set_kvm
);
2229 static int vfio_register_group_notifier(struct vfio_group
*group
,
2230 unsigned long *events
,
2231 struct notifier_block
*nb
)
2234 bool set_kvm
= false;
2236 if (*events
& VFIO_GROUP_NOTIFY_SET_KVM
)
2239 /* clear known events */
2240 *events
&= ~VFIO_GROUP_NOTIFY_SET_KVM
;
2242 /* refuse to continue if still events remaining */
2246 ret
= vfio_group_add_container_user(group
);
2250 ret
= blocking_notifier_chain_register(&group
->notifier
, nb
);
2253 * The attaching of kvm and vfio_group might already happen, so
2254 * here we replay once upon registration.
2256 if (!ret
&& set_kvm
&& group
->kvm
)
2257 blocking_notifier_call_chain(&group
->notifier
,
2258 VFIO_GROUP_NOTIFY_SET_KVM
, group
->kvm
);
2260 vfio_group_try_dissolve_container(group
);
2265 static int vfio_unregister_group_notifier(struct vfio_group
*group
,
2266 struct notifier_block
*nb
)
2270 ret
= vfio_group_add_container_user(group
);
2274 ret
= blocking_notifier_chain_unregister(&group
->notifier
, nb
);
2276 vfio_group_try_dissolve_container(group
);
2281 int vfio_register_notifier(struct device
*dev
, enum vfio_notify_type type
,
2282 unsigned long *events
, struct notifier_block
*nb
)
2284 struct vfio_group
*group
;
2287 if (!dev
|| !nb
|| !events
|| (*events
== 0))
2290 group
= vfio_group_get_from_dev(dev
);
2295 case VFIO_IOMMU_NOTIFY
:
2296 ret
= vfio_register_iommu_notifier(group
, events
, nb
);
2298 case VFIO_GROUP_NOTIFY
:
2299 ret
= vfio_register_group_notifier(group
, events
, nb
);
2305 vfio_group_put(group
);
2308 EXPORT_SYMBOL(vfio_register_notifier
);
2310 int vfio_unregister_notifier(struct device
*dev
, enum vfio_notify_type type
,
2311 struct notifier_block
*nb
)
2313 struct vfio_group
*group
;
2319 group
= vfio_group_get_from_dev(dev
);
2324 case VFIO_IOMMU_NOTIFY
:
2325 ret
= vfio_unregister_iommu_notifier(group
, nb
);
2327 case VFIO_GROUP_NOTIFY
:
2328 ret
= vfio_unregister_group_notifier(group
, nb
);
2334 vfio_group_put(group
);
2337 EXPORT_SYMBOL(vfio_unregister_notifier
);
2339 struct iommu_domain
*vfio_group_iommu_domain(struct vfio_group
*group
)
2341 struct vfio_container
*container
;
2342 struct vfio_iommu_driver
*driver
;
2345 return ERR_PTR(-EINVAL
);
2347 container
= group
->container
;
2348 driver
= container
->iommu_driver
;
2349 if (likely(driver
&& driver
->ops
->group_iommu_domain
))
2350 return driver
->ops
->group_iommu_domain(container
->iommu_data
,
2351 group
->iommu_group
);
2353 return ERR_PTR(-ENOTTY
);
2355 EXPORT_SYMBOL_GPL(vfio_group_iommu_domain
);
2358 * Module/class support
2360 static char *vfio_devnode(struct device
*dev
, umode_t
*mode
)
2362 return kasprintf(GFP_KERNEL
, "vfio/%s", dev_name(dev
));
2365 static struct miscdevice vfio_dev
= {
2366 .minor
= VFIO_MINOR
,
2369 .nodename
= "vfio/vfio",
2370 .mode
= S_IRUGO
| S_IWUGO
,
2373 static int __init
vfio_init(void)
2377 idr_init(&vfio
.group_idr
);
2378 mutex_init(&vfio
.group_lock
);
2379 mutex_init(&vfio
.iommu_drivers_lock
);
2380 INIT_LIST_HEAD(&vfio
.group_list
);
2381 INIT_LIST_HEAD(&vfio
.iommu_drivers_list
);
2382 init_waitqueue_head(&vfio
.release_q
);
2384 ret
= misc_register(&vfio_dev
);
2386 pr_err("vfio: misc device register failed\n");
2390 /* /dev/vfio/$GROUP */
2391 vfio
.class = class_create(THIS_MODULE
, "vfio");
2392 if (IS_ERR(vfio
.class)) {
2393 ret
= PTR_ERR(vfio
.class);
2397 vfio
.class->devnode
= vfio_devnode
;
2399 ret
= alloc_chrdev_region(&vfio
.group_devt
, 0, MINORMASK
+ 1, "vfio");
2401 goto err_alloc_chrdev
;
2403 cdev_init(&vfio
.group_cdev
, &vfio_group_fops
);
2404 ret
= cdev_add(&vfio
.group_cdev
, vfio
.group_devt
, MINORMASK
+ 1);
2408 pr_info(DRIVER_DESC
" version: " DRIVER_VERSION
"\n");
2410 #ifdef CONFIG_VFIO_NOIOMMU
2411 vfio_register_iommu_driver(&vfio_noiommu_ops
);
2416 unregister_chrdev_region(vfio
.group_devt
, MINORMASK
+ 1);
2418 class_destroy(vfio
.class);
2421 misc_deregister(&vfio_dev
);
2425 static void __exit
vfio_cleanup(void)
2427 WARN_ON(!list_empty(&vfio
.group_list
));
2429 #ifdef CONFIG_VFIO_NOIOMMU
2430 vfio_unregister_iommu_driver(&vfio_noiommu_ops
);
2432 idr_destroy(&vfio
.group_idr
);
2433 cdev_del(&vfio
.group_cdev
);
2434 unregister_chrdev_region(vfio
.group_devt
, MINORMASK
+ 1);
2435 class_destroy(vfio
.class);
2437 misc_deregister(&vfio_dev
);
2440 module_init(vfio_init
);
2441 module_exit(vfio_cleanup
);
2443 MODULE_VERSION(DRIVER_VERSION
);
2444 MODULE_LICENSE("GPL v2");
2445 MODULE_AUTHOR(DRIVER_AUTHOR
);
2446 MODULE_DESCRIPTION(DRIVER_DESC
);
2447 MODULE_ALIAS_MISCDEV(VFIO_MINOR
);
2448 MODULE_ALIAS("devname:vfio/vfio");
2449 MODULE_SOFTDEP("post: vfio_iommu_type1 vfio_iommu_spapr_tce");