1 /* SPDX-License-Identifier: GPL-2.0-only */
5 * Copyright (C) 2012 Red Hat, Inc. All rights reserved.
6 * Author: Alex Williamson <alex.williamson@redhat.com>
12 #include <linux/iommu.h>
14 #include <linux/workqueue.h>
15 #include <linux/poll.h>
16 #include <uapi/linux/vfio.h>
20 const struct vfio_device_ops
*ops
;
21 struct vfio_group
*group
;
23 /* Members below here are private, not for driver use */
25 struct completion comp
;
26 struct list_head group_next
;
30 * struct vfio_device_ops - VFIO bus driver device callbacks
32 * @open: Called when userspace creates new file descriptor for device
33 * @release: Called when userspace releases file descriptor for device
34 * @read: Perform read(2) on device file descriptor
35 * @write: Perform write(2) on device file descriptor
36 * @ioctl: Perform ioctl(2) on device file descriptor, supporting VFIO_DEVICE_*
37 * operations documented below
38 * @mmap: Perform mmap(2) on a region of the device file descriptor
39 * @request: Request for the bus driver to release the device
40 * @match: Optional device name match callback (return: 0 for no-match, >0 for
41 * match, -errno for abort (ex. match with insufficient or incorrect
44 struct vfio_device_ops
{
46 int (*open
)(struct vfio_device
*vdev
);
47 void (*release
)(struct vfio_device
*vdev
);
48 ssize_t (*read
)(struct vfio_device
*vdev
, char __user
*buf
,
49 size_t count
, loff_t
*ppos
);
50 ssize_t (*write
)(struct vfio_device
*vdev
, const char __user
*buf
,
51 size_t count
, loff_t
*size
);
52 long (*ioctl
)(struct vfio_device
*vdev
, unsigned int cmd
,
54 int (*mmap
)(struct vfio_device
*vdev
, struct vm_area_struct
*vma
);
55 void (*request
)(struct vfio_device
*vdev
, unsigned int count
);
56 int (*match
)(struct vfio_device
*vdev
, char *buf
);
59 extern struct iommu_group
*vfio_iommu_group_get(struct device
*dev
);
60 extern void vfio_iommu_group_put(struct iommu_group
*group
, struct device
*dev
);
62 void vfio_init_group_dev(struct vfio_device
*device
, struct device
*dev
,
63 const struct vfio_device_ops
*ops
);
64 int vfio_register_group_dev(struct vfio_device
*device
);
65 void vfio_unregister_group_dev(struct vfio_device
*device
);
66 extern struct vfio_device
*vfio_device_get_from_dev(struct device
*dev
);
67 extern void vfio_device_put(struct vfio_device
*device
);
69 /* events for the backend driver notify callback */
70 enum vfio_iommu_notify_type
{
71 VFIO_IOMMU_CONTAINER_CLOSE
= 0,
75 * struct vfio_iommu_driver_ops - VFIO IOMMU driver callbacks
77 struct vfio_iommu_driver_ops
{
80 void *(*open
)(unsigned long arg
);
81 void (*release
)(void *iommu_data
);
82 ssize_t (*read
)(void *iommu_data
, char __user
*buf
,
83 size_t count
, loff_t
*ppos
);
84 ssize_t (*write
)(void *iommu_data
, const char __user
*buf
,
85 size_t count
, loff_t
*size
);
86 long (*ioctl
)(void *iommu_data
, unsigned int cmd
,
88 int (*mmap
)(void *iommu_data
, struct vm_area_struct
*vma
);
89 int (*attach_group
)(void *iommu_data
,
90 struct iommu_group
*group
);
91 void (*detach_group
)(void *iommu_data
,
92 struct iommu_group
*group
);
93 int (*pin_pages
)(void *iommu_data
,
94 struct iommu_group
*group
,
95 unsigned long *user_pfn
,
97 unsigned long *phys_pfn
);
98 int (*unpin_pages
)(void *iommu_data
,
99 unsigned long *user_pfn
, int npage
);
100 int (*register_notifier
)(void *iommu_data
,
101 unsigned long *events
,
102 struct notifier_block
*nb
);
103 int (*unregister_notifier
)(void *iommu_data
,
104 struct notifier_block
*nb
);
105 int (*dma_rw
)(void *iommu_data
, dma_addr_t user_iova
,
106 void *data
, size_t count
, bool write
);
107 struct iommu_domain
*(*group_iommu_domain
)(void *iommu_data
,
108 struct iommu_group
*group
);
109 void (*notify
)(void *iommu_data
,
110 enum vfio_iommu_notify_type event
);
113 extern int vfio_register_iommu_driver(const struct vfio_iommu_driver_ops
*ops
);
115 extern void vfio_unregister_iommu_driver(
116 const struct vfio_iommu_driver_ops
*ops
);
121 extern struct vfio_group
*vfio_group_get_external_user(struct file
*filep
);
122 extern void vfio_group_put_external_user(struct vfio_group
*group
);
123 extern struct vfio_group
*vfio_group_get_external_user_from_dev(struct device
125 extern bool vfio_external_group_match_file(struct vfio_group
*group
,
127 extern int vfio_external_user_iommu_id(struct vfio_group
*group
);
128 extern long vfio_external_check_extension(struct vfio_group
*group
,
131 #define VFIO_PIN_PAGES_MAX_ENTRIES (PAGE_SIZE/sizeof(unsigned long))
133 extern int vfio_pin_pages(struct device
*dev
, unsigned long *user_pfn
,
134 int npage
, int prot
, unsigned long *phys_pfn
);
135 extern int vfio_unpin_pages(struct device
*dev
, unsigned long *user_pfn
,
138 extern int vfio_group_pin_pages(struct vfio_group
*group
,
139 unsigned long *user_iova_pfn
, int npage
,
140 int prot
, unsigned long *phys_pfn
);
141 extern int vfio_group_unpin_pages(struct vfio_group
*group
,
142 unsigned long *user_iova_pfn
, int npage
);
144 extern int vfio_dma_rw(struct vfio_group
*group
, dma_addr_t user_iova
,
145 void *data
, size_t len
, bool write
);
147 extern struct iommu_domain
*vfio_group_iommu_domain(struct vfio_group
*group
);
149 /* each type has independent events */
150 enum vfio_notify_type
{
151 VFIO_IOMMU_NOTIFY
= 0,
152 VFIO_GROUP_NOTIFY
= 1,
155 /* events for VFIO_IOMMU_NOTIFY */
156 #define VFIO_IOMMU_NOTIFY_DMA_UNMAP BIT(0)
158 /* events for VFIO_GROUP_NOTIFY */
159 #define VFIO_GROUP_NOTIFY_SET_KVM BIT(0)
161 extern int vfio_register_notifier(struct device
*dev
,
162 enum vfio_notify_type type
,
163 unsigned long *required_events
,
164 struct notifier_block
*nb
);
165 extern int vfio_unregister_notifier(struct device
*dev
,
166 enum vfio_notify_type type
,
167 struct notifier_block
*nb
);
170 extern void vfio_group_set_kvm(struct vfio_group
*group
, struct kvm
*kvm
);
175 struct vfio_info_cap
{
176 struct vfio_info_cap_header
*buf
;
179 extern struct vfio_info_cap_header
*vfio_info_cap_add(
180 struct vfio_info_cap
*caps
, size_t size
, u16 id
, u16 version
);
181 extern void vfio_info_cap_shift(struct vfio_info_cap
*caps
, size_t offset
);
183 extern int vfio_info_add_capability(struct vfio_info_cap
*caps
,
184 struct vfio_info_cap_header
*cap
,
187 extern int vfio_set_irqs_validate_and_prepare(struct vfio_irq_set
*hdr
,
188 int num_irqs
, int max_irq_type
,
192 #if IS_ENABLED(CONFIG_VFIO_SPAPR_EEH)
193 extern void vfio_spapr_pci_eeh_open(struct pci_dev
*pdev
);
194 extern void vfio_spapr_pci_eeh_release(struct pci_dev
*pdev
);
195 extern long vfio_spapr_iommu_eeh_ioctl(struct iommu_group
*group
,
199 static inline void vfio_spapr_pci_eeh_open(struct pci_dev
*pdev
)
203 static inline void vfio_spapr_pci_eeh_release(struct pci_dev
*pdev
)
207 static inline long vfio_spapr_iommu_eeh_ioctl(struct iommu_group
*group
,
213 #endif /* CONFIG_VFIO_SPAPR_EEH */
220 struct eventfd_ctx
*eventfd
;
221 int (*handler
)(void *, void *);
222 void (*thread
)(void *, void *);
224 struct work_struct inject
;
225 wait_queue_entry_t wait
;
227 struct work_struct shutdown
;
228 struct virqfd
**pvirqfd
;
231 extern int vfio_virqfd_enable(void *opaque
,
232 int (*handler
)(void *, void *),
233 void (*thread
)(void *, void *),
234 void *data
, struct virqfd
**pvirqfd
, int fd
);
235 extern void vfio_virqfd_disable(struct virqfd
**pvirqfd
);