1 /* SPDX-License-Identifier: GPL-2.0 */
5 #include <linux/kernel.h>
6 #include <linux/device.h>
7 #include <linux/interrupt.h>
8 #include <linux/vhost_iotlb.h>
11 * struct vdpa_calllback - vDPA callback definition.
12 * @callback: interrupt callback function
13 * @private: the data passed to the callback function
15 struct vdpa_callback
{
16 irqreturn_t (*callback
)(void *data
);
21 * struct vdpa_notification_area - vDPA notification area
22 * @addr: base address of the notification area
23 * @size: size of the notification area
25 struct vdpa_notification_area
{
31 * struct vdpa_vq_state_split - vDPA split virtqueue state
32 * @avail_index: available index
34 struct vdpa_vq_state_split
{
39 * struct vdpa_vq_state_packed - vDPA packed virtqueue state
40 * @last_avail_counter: last driver ring wrap counter observed by device
41 * @last_avail_idx: device available index
42 * @last_used_counter: device ring wrap counter
43 * @last_used_idx: used index
45 struct vdpa_vq_state_packed
{
46 u16 last_avail_counter
:1;
47 u16 last_avail_idx
:15;
48 u16 last_used_counter
:1;
52 struct vdpa_vq_state
{
54 struct vdpa_vq_state_split split
;
55 struct vdpa_vq_state_packed packed
;
62 * struct vdpa_device - representation of a vDPA device
63 * @dev: underlying device
64 * @dma_dev: the actual device that is performing DMA
65 * @config: the configuration ops for this device.
66 * @index: device index
67 * @features_valid: were features initialized? for legacy guests
68 * @nvqs: maximum number of supported virtqueues
69 * @mdev: management device pointer; caller must setup when registering device as part
70 * of dev_add() mgmtdev ops callback before invoking _vdpa_register_device().
74 struct device
*dma_dev
;
75 const struct vdpa_config_ops
*config
;
79 struct vdpa_mgmt_dev
*mdev
;
83 * struct vdpa_iova_range - the IOVA range support by the device
84 * @first: start of the IOVA range
85 * @last: end of the IOVA range
87 struct vdpa_iova_range
{
93 * struct vdpa_config_ops - operations for configuring a vDPA device.
94 * Note: vDPA device drivers are required to implement all of the
95 * operations unless it is mentioned to be optional in the following
98 * @set_vq_address: Set the address of virtqueue
100 * @idx: virtqueue index
101 * @desc_area: address of desc area
102 * @driver_area: address of driver area
103 * @device_area: address of device area
104 * Returns integer: success (0) or error (< 0)
105 * @set_vq_num: Set the size of virtqueue
107 * @idx: virtqueue index
108 * @num: the size of virtqueue
109 * @kick_vq: Kick the virtqueue
111 * @idx: virtqueue index
112 * @set_vq_cb: Set the interrupt callback function for
115 * @idx: virtqueue index
116 * @cb: virtio-vdev interrupt callback structure
117 * @set_vq_ready: Set ready status for a virtqueue
119 * @idx: virtqueue index
120 * @ready: ready (true) not ready(false)
121 * @get_vq_ready: Get ready status for a virtqueue
123 * @idx: virtqueue index
124 * Returns boolean: ready (true) or not (false)
125 * @set_vq_state: Set the state for a virtqueue
127 * @idx: virtqueue index
128 * @state: pointer to set virtqueue state (last_avail_idx)
129 * Returns integer: success (0) or error (< 0)
130 * @get_vq_state: Get the state for a virtqueue
132 * @idx: virtqueue index
133 * @state: pointer to returned state (last_avail_idx)
134 * @get_vq_notification: Get the notification area for a virtqueue
136 * @idx: virtqueue index
137 * Returns the notifcation area
138 * @get_vq_irq: Get the irq number of a virtqueue (optional,
139 * but must implemented if require vq irq offloading)
141 * @idx: virtqueue index
142 * Returns int: irq number of a virtqueue,
143 * negative number if no irq assigned.
144 * @get_vq_align: Get the virtqueue align requirement
147 * Returns virtqueue algin requirement
148 * @get_features: Get virtio features supported by the device
150 * Returns the virtio features support by the
152 * @set_features: Set virtio features supported by the driver
154 * @features: feature support by the driver
155 * Returns integer: success (0) or error (< 0)
156 * @set_config_cb: Set the config interrupt callback
158 * @cb: virtio-vdev interrupt callback structure
159 * @get_vq_num_max: Get the max size of virtqueue
161 * Returns u16: max size of virtqueue
162 * @get_device_id: Get virtio device id
164 * Returns u32: virtio device id
165 * @get_vendor_id: Get id for the vendor that provides this device
167 * Returns u32: virtio vendor id
168 * @get_status: Get the device status
170 * Returns u8: virtio device status
171 * @set_status: Set the device status
173 * @status: virtio device status
174 * @get_config_size: Get the size of the configuration space
176 * Returns size_t: configuration size
177 * @get_config: Read from device specific configuration space
179 * @offset: offset from the beginning of
180 * configuration space
181 * @buf: buffer used to read to
182 * @len: the length to read from
183 * configuration space
184 * @set_config: Write to device specific configuration space
186 * @offset: offset from the beginning of
187 * configuration space
188 * @buf: buffer used to write from
189 * @len: the length to write to
190 * configuration space
191 * @get_generation: Get device config generation (optional)
193 * Returns u32: device generation
194 * @get_iova_range: Get supported iova range (optional)
196 * Returns the iova range supported by
198 * @set_map: Set device memory mapping (optional)
199 * Needed for device that using device
200 * specific DMA translation (on-chip IOMMU)
202 * @iotlb: vhost memory mapping to be
204 * Returns integer: success (0) or error (< 0)
205 * @dma_map: Map an area of PA to IOVA (optional)
206 * Needed for device that using device
207 * specific DMA translation (on-chip IOMMU)
208 * and preferring incremental map.
210 * @iova: iova to be mapped
211 * @size: size of the area
212 * @pa: physical address for the map
213 * @perm: device access permission (VHOST_MAP_XX)
214 * Returns integer: success (0) or error (< 0)
215 * @dma_unmap: Unmap an area of IOVA (optional but
216 * must be implemented with dma_map)
217 * Needed for device that using device
218 * specific DMA translation (on-chip IOMMU)
219 * and preferring incremental unmap.
221 * @iova: iova to be unmapped
222 * @size: size of the area
223 * Returns integer: success (0) or error (< 0)
224 * @free: Free resources that belongs to vDPA (optional)
227 struct vdpa_config_ops
{
229 int (*set_vq_address
)(struct vdpa_device
*vdev
,
230 u16 idx
, u64 desc_area
, u64 driver_area
,
232 void (*set_vq_num
)(struct vdpa_device
*vdev
, u16 idx
, u32 num
);
233 void (*kick_vq
)(struct vdpa_device
*vdev
, u16 idx
);
234 void (*set_vq_cb
)(struct vdpa_device
*vdev
, u16 idx
,
235 struct vdpa_callback
*cb
);
236 void (*set_vq_ready
)(struct vdpa_device
*vdev
, u16 idx
, bool ready
);
237 bool (*get_vq_ready
)(struct vdpa_device
*vdev
, u16 idx
);
238 int (*set_vq_state
)(struct vdpa_device
*vdev
, u16 idx
,
239 const struct vdpa_vq_state
*state
);
240 int (*get_vq_state
)(struct vdpa_device
*vdev
, u16 idx
,
241 struct vdpa_vq_state
*state
);
242 struct vdpa_notification_area
243 (*get_vq_notification
)(struct vdpa_device
*vdev
, u16 idx
);
244 /* vq irq is not expected to be changed once DRIVER_OK is set */
245 int (*get_vq_irq
)(struct vdpa_device
*vdv
, u16 idx
);
248 u32 (*get_vq_align
)(struct vdpa_device
*vdev
);
249 u64 (*get_features
)(struct vdpa_device
*vdev
);
250 int (*set_features
)(struct vdpa_device
*vdev
, u64 features
);
251 void (*set_config_cb
)(struct vdpa_device
*vdev
,
252 struct vdpa_callback
*cb
);
253 u16 (*get_vq_num_max
)(struct vdpa_device
*vdev
);
254 u32 (*get_device_id
)(struct vdpa_device
*vdev
);
255 u32 (*get_vendor_id
)(struct vdpa_device
*vdev
);
256 u8 (*get_status
)(struct vdpa_device
*vdev
);
257 void (*set_status
)(struct vdpa_device
*vdev
, u8 status
);
258 size_t (*get_config_size
)(struct vdpa_device
*vdev
);
259 void (*get_config
)(struct vdpa_device
*vdev
, unsigned int offset
,
260 void *buf
, unsigned int len
);
261 void (*set_config
)(struct vdpa_device
*vdev
, unsigned int offset
,
262 const void *buf
, unsigned int len
);
263 u32 (*get_generation
)(struct vdpa_device
*vdev
);
264 struct vdpa_iova_range (*get_iova_range
)(struct vdpa_device
*vdev
);
267 int (*set_map
)(struct vdpa_device
*vdev
, struct vhost_iotlb
*iotlb
);
268 int (*dma_map
)(struct vdpa_device
*vdev
, u64 iova
, u64 size
,
270 int (*dma_unmap
)(struct vdpa_device
*vdev
, u64 iova
, u64 size
);
272 /* Free device resources */
273 void (*free
)(struct vdpa_device
*vdev
);
276 struct vdpa_device
*__vdpa_alloc_device(struct device
*parent
,
277 const struct vdpa_config_ops
*config
,
278 size_t size
, const char *name
);
281 * vdpa_alloc_device - allocate and initilaize a vDPA device
283 * @dev_struct: the type of the parent structure
284 * @member: the name of struct vdpa_device within the @dev_struct
285 * @parent: the parent device
286 * @config: the bus operations that is supported by this device
287 * @name: name of the vdpa device
289 * Return allocated data structure or ERR_PTR upon error
291 #define vdpa_alloc_device(dev_struct, member, parent, config, name) \
292 container_of(__vdpa_alloc_device( \
294 sizeof(dev_struct) + \
295 BUILD_BUG_ON_ZERO(offsetof( \
296 dev_struct, member)), name), \
299 int vdpa_register_device(struct vdpa_device
*vdev
, int nvqs
);
300 void vdpa_unregister_device(struct vdpa_device
*vdev
);
302 int _vdpa_register_device(struct vdpa_device
*vdev
, int nvqs
);
303 void _vdpa_unregister_device(struct vdpa_device
*vdev
);
306 * struct vdpa_driver - operations for a vDPA driver
307 * @driver: underlying device driver
308 * @probe: the function to call when a device is found. Returns 0 or -errno.
309 * @remove: the function to call when a device is removed.
312 struct device_driver driver
;
313 int (*probe
)(struct vdpa_device
*vdev
);
314 void (*remove
)(struct vdpa_device
*vdev
);
317 #define vdpa_register_driver(drv) \
318 __vdpa_register_driver(drv, THIS_MODULE)
319 int __vdpa_register_driver(struct vdpa_driver
*drv
, struct module
*owner
);
320 void vdpa_unregister_driver(struct vdpa_driver
*drv
);
322 #define module_vdpa_driver(__vdpa_driver) \
323 module_driver(__vdpa_driver, vdpa_register_driver, \
324 vdpa_unregister_driver)
326 static inline struct vdpa_driver
*drv_to_vdpa(struct device_driver
*driver
)
328 return container_of(driver
, struct vdpa_driver
, driver
);
331 static inline struct vdpa_device
*dev_to_vdpa(struct device
*_dev
)
333 return container_of(_dev
, struct vdpa_device
, dev
);
336 static inline void *vdpa_get_drvdata(const struct vdpa_device
*vdev
)
338 return dev_get_drvdata(&vdev
->dev
);
341 static inline void vdpa_set_drvdata(struct vdpa_device
*vdev
, void *data
)
343 dev_set_drvdata(&vdev
->dev
, data
);
346 static inline struct device
*vdpa_get_dma_dev(struct vdpa_device
*vdev
)
348 return vdev
->dma_dev
;
351 static inline void vdpa_reset(struct vdpa_device
*vdev
)
353 const struct vdpa_config_ops
*ops
= vdev
->config
;
355 vdev
->features_valid
= false;
356 ops
->set_status(vdev
, 0);
359 static inline int vdpa_set_features(struct vdpa_device
*vdev
, u64 features
)
361 const struct vdpa_config_ops
*ops
= vdev
->config
;
363 vdev
->features_valid
= true;
364 return ops
->set_features(vdev
, features
);
368 static inline void vdpa_get_config(struct vdpa_device
*vdev
, unsigned offset
,
369 void *buf
, unsigned int len
)
371 const struct vdpa_config_ops
*ops
= vdev
->config
;
374 * Config accesses aren't supposed to trigger before features are set.
375 * If it does happen we assume a legacy guest.
377 if (!vdev
->features_valid
)
378 vdpa_set_features(vdev
, 0);
379 ops
->get_config(vdev
, offset
, buf
, len
);
383 * struct vdpa_mgmtdev_ops - vdpa device ops
384 * @dev_add: Add a vdpa device using alloc and register
385 * @mdev: parent device to use for device addition
386 * @name: name of the new vdpa device
387 * Driver need to add a new device using _vdpa_register_device()
388 * after fully initializing the vdpa device. Driver must return 0
389 * on success or appropriate error code.
390 * @dev_del: Remove a vdpa device using unregister
391 * @mdev: parent device to use for device removal
392 * @dev: vdpa device to remove
393 * Driver need to remove the specified device by calling
394 * _vdpa_unregister_device().
396 struct vdpa_mgmtdev_ops
{
397 int (*dev_add
)(struct vdpa_mgmt_dev
*mdev
, const char *name
);
398 void (*dev_del
)(struct vdpa_mgmt_dev
*mdev
, struct vdpa_device
*dev
);
401 struct vdpa_mgmt_dev
{
402 struct device
*device
;
403 const struct vdpa_mgmtdev_ops
*ops
;
404 const struct virtio_device_id
*id_table
; /* supported ids */
405 struct list_head list
;
408 int vdpa_mgmtdev_register(struct vdpa_mgmt_dev
*mdev
);
409 void vdpa_mgmtdev_unregister(struct vdpa_mgmt_dev
*mdev
);
411 #endif /* _LINUX_VDPA_H */