]> git.proxmox.com Git - mirror_ubuntu-jammy-kernel.git/blame - include/linux/vdpa.h
mm: fs: invalidate bh_lrus for only cold path
[mirror_ubuntu-jammy-kernel.git] / include / linux / vdpa.h
CommitLineData
961e9c84
JW
1/* SPDX-License-Identifier: GPL-2.0 */
2#ifndef _LINUX_VDPA_H
3#define _LINUX_VDPA_H
4
5#include <linux/kernel.h>
6#include <linux/device.h>
7#include <linux/interrupt.h>
8#include <linux/vhost_iotlb.h>
9
10/**
d0f9164e 11 * struct vdpa_calllback - vDPA callback definition.
961e9c84
JW
12 * @callback: interrupt callback function
13 * @private: the data passed to the callback function
14 */
15struct vdpa_callback {
16 irqreturn_t (*callback)(void *data);
17 void *private;
18};
19
c25a26e6 20/**
d0f9164e 21 * struct vdpa_notification_area - vDPA notification area
c25a26e6
JW
22 * @addr: base address of the notification area
23 * @size: size of the notification area
24 */
25struct vdpa_notification_area {
26 resource_size_t addr;
27 resource_size_t size;
28};
29
aac50c0b 30/**
530a5678 31 * struct vdpa_vq_state_split - vDPA split virtqueue state
aac50c0b
EC
32 * @avail_index: available index
33 */
530a5678 34struct vdpa_vq_state_split {
aac50c0b
EC
35 u16 avail_index;
36};
37
530a5678
JW
38/**
39 * struct vdpa_vq_state_packed - vDPA packed virtqueue state
40 * @last_avail_counter: last driver ring wrap counter observed by device
41 * @last_avail_idx: device available index
42 * @last_used_counter: device ring wrap counter
43 * @last_used_idx: used index
44 */
45struct vdpa_vq_state_packed {
86e17a51
XY
46 u16 last_avail_counter:1;
47 u16 last_avail_idx:15;
48 u16 last_used_counter:1;
49 u16 last_used_idx:15;
530a5678
JW
50};
51
52struct vdpa_vq_state {
86e17a51
XY
53 union {
54 struct vdpa_vq_state_split split;
55 struct vdpa_vq_state_packed packed;
56 };
530a5678
JW
57};
58
33b34750
PP
59struct vdpa_mgmt_dev;
60
961e9c84 61/**
d0f9164e 62 * struct vdpa_device - representation of a vDPA device
961e9c84
JW
63 * @dev: underlying device
64 * @dma_dev: the actual device that is performing DMA
65 * @config: the configuration ops for this device.
66 * @index: device index
452639a6 67 * @features_valid: were features initialized? for legacy guests
d8945ec4 68 * @use_va: indicate whether virtual address must be used by this device
476c135e 69 * @nvqs: maximum number of supported virtqueues
903f7bca
PP
70 * @mdev: management device pointer; caller must setup when registering device as part
71 * of dev_add() mgmtdev ops callback before invoking _vdpa_register_device().
961e9c84
JW
72 */
73struct vdpa_device {
74 struct device dev;
75 struct device *dma_dev;
76 const struct vdpa_config_ops *config;
77 unsigned int index;
452639a6 78 bool features_valid;
d8945ec4 79 bool use_va;
a9974489 80 int nvqs;
903f7bca 81 struct vdpa_mgmt_dev *mdev;
961e9c84
JW
82};
83
3f1b623a 84/**
d0f9164e 85 * struct vdpa_iova_range - the IOVA range support by the device
3f1b623a
JW
86 * @first: start of the IOVA range
87 * @last: end of the IOVA range
88 */
89struct vdpa_iova_range {
90 u64 first;
91 u64 last;
92};
93
d8945ec4
XY
94/**
95 * Corresponding file area for device memory mapping
96 * @file: vma->vm_file for the mapping
97 * @offset: mapping offset in the vm_file
98 */
99struct vdpa_map_file {
100 struct file *file;
101 u64 offset;
102};
103
961e9c84 104/**
d0f9164e 105 * struct vdpa_config_ops - operations for configuring a vDPA device.
961e9c84
JW
106 * Note: vDPA device drivers are required to implement all of the
107 * operations unless it is mentioned to be optional in the following
108 * list.
109 *
110 * @set_vq_address: Set the address of virtqueue
111 * @vdev: vdpa device
112 * @idx: virtqueue index
113 * @desc_area: address of desc area
114 * @driver_area: address of driver area
115 * @device_area: address of device area
116 * Returns integer: success (0) or error (< 0)
117 * @set_vq_num: Set the size of virtqueue
118 * @vdev: vdpa device
119 * @idx: virtqueue index
120 * @num: the size of virtqueue
121 * @kick_vq: Kick the virtqueue
122 * @vdev: vdpa device
123 * @idx: virtqueue index
124 * @set_vq_cb: Set the interrupt callback function for
125 * a virtqueue
126 * @vdev: vdpa device
127 * @idx: virtqueue index
128 * @cb: virtio-vdev interrupt callback structure
129 * @set_vq_ready: Set ready status for a virtqueue
130 * @vdev: vdpa device
131 * @idx: virtqueue index
132 * @ready: ready (true) not ready(false)
133 * @get_vq_ready: Get ready status for a virtqueue
134 * @vdev: vdpa device
135 * @idx: virtqueue index
136 * Returns boolean: ready (true) or not (false)
137 * @set_vq_state: Set the state for a virtqueue
138 * @vdev: vdpa device
139 * @idx: virtqueue index
aac50c0b 140 * @state: pointer to set virtqueue state (last_avail_idx)
961e9c84
JW
141 * Returns integer: success (0) or error (< 0)
142 * @get_vq_state: Get the state for a virtqueue
143 * @vdev: vdpa device
144 * @idx: virtqueue index
aac50c0b 145 * @state: pointer to returned state (last_avail_idx)
86e17a51 146 * @get_vq_notification: Get the notification area for a virtqueue
c25a26e6
JW
147 * @vdev: vdpa device
148 * @idx: virtqueue index
149 * Returns the notifcation area
7164675a
ZL
150 * @get_vq_irq: Get the irq number of a virtqueue (optional,
151 * but must implemented if require vq irq offloading)
152 * @vdev: vdpa device
153 * @idx: virtqueue index
154 * Returns int: irq number of a virtqueue,
155 * negative number if no irq assigned.
961e9c84
JW
156 * @get_vq_align: Get the virtqueue align requirement
157 * for the device
158 * @vdev: vdpa device
159 * Returns virtqueue algin requirement
160 * @get_features: Get virtio features supported by the device
161 * @vdev: vdpa device
162 * Returns the virtio features support by the
163 * device
164 * @set_features: Set virtio features supported by the driver
165 * @vdev: vdpa device
166 * @features: feature support by the driver
167 * Returns integer: success (0) or error (< 0)
168 * @set_config_cb: Set the config interrupt callback
169 * @vdev: vdpa device
170 * @cb: virtio-vdev interrupt callback structure
171 * @get_vq_num_max: Get the max size of virtqueue
172 * @vdev: vdpa device
173 * Returns u16: max size of virtqueue
174 * @get_device_id: Get virtio device id
175 * @vdev: vdpa device
176 * Returns u32: virtio device id
177 * @get_vendor_id: Get id for the vendor that provides this device
178 * @vdev: vdpa device
179 * Returns u32: virtio vendor id
180 * @get_status: Get the device status
181 * @vdev: vdpa device
182 * Returns u8: virtio device status
183 * @set_status: Set the device status
184 * @vdev: vdpa device
185 * @status: virtio device status
0686082d
XY
186 * @reset: Reset device
187 * @vdev: vdpa device
188 * Returns integer: success (0) or error (< 0)
442706f9
SG
189 * @get_config_size: Get the size of the configuration space
190 * @vdev: vdpa device
191 * Returns size_t: configuration size
961e9c84
JW
192 * @get_config: Read from device specific configuration space
193 * @vdev: vdpa device
194 * @offset: offset from the beginning of
195 * configuration space
196 * @buf: buffer used to read to
197 * @len: the length to read from
198 * configuration space
199 * @set_config: Write to device specific configuration space
200 * @vdev: vdpa device
201 * @offset: offset from the beginning of
202 * configuration space
203 * @buf: buffer used to write from
204 * @len: the length to write to
205 * configuration space
206 * @get_generation: Get device config generation (optional)
207 * @vdev: vdpa device
208 * Returns u32: device generation
3f1b623a
JW
209 * @get_iova_range: Get supported iova range (optional)
210 * @vdev: vdpa device
211 * Returns the iova range supported by
212 * the device.
961e9c84
JW
213 * @set_map: Set device memory mapping (optional)
214 * Needed for device that using device
215 * specific DMA translation (on-chip IOMMU)
216 * @vdev: vdpa device
217 * @iotlb: vhost memory mapping to be
218 * used by the vDPA
219 * Returns integer: success (0) or error (< 0)
220 * @dma_map: Map an area of PA to IOVA (optional)
221 * Needed for device that using device
222 * specific DMA translation (on-chip IOMMU)
223 * and preferring incremental map.
224 * @vdev: vdpa device
225 * @iova: iova to be mapped
226 * @size: size of the area
227 * @pa: physical address for the map
228 * @perm: device access permission (VHOST_MAP_XX)
229 * Returns integer: success (0) or error (< 0)
230 * @dma_unmap: Unmap an area of IOVA (optional but
231 * must be implemented with dma_map)
232 * Needed for device that using device
233 * specific DMA translation (on-chip IOMMU)
234 * and preferring incremental unmap.
235 * @vdev: vdpa device
236 * @iova: iova to be unmapped
237 * @size: size of the area
238 * Returns integer: success (0) or error (< 0)
239 * @free: Free resources that belongs to vDPA (optional)
240 * @vdev: vdpa device
241 */
242struct vdpa_config_ops {
243 /* Virtqueue ops */
244 int (*set_vq_address)(struct vdpa_device *vdev,
245 u16 idx, u64 desc_area, u64 driver_area,
246 u64 device_area);
247 void (*set_vq_num)(struct vdpa_device *vdev, u16 idx, u32 num);
248 void (*kick_vq)(struct vdpa_device *vdev, u16 idx);
249 void (*set_vq_cb)(struct vdpa_device *vdev, u16 idx,
250 struct vdpa_callback *cb);
251 void (*set_vq_ready)(struct vdpa_device *vdev, u16 idx, bool ready);
252 bool (*get_vq_ready)(struct vdpa_device *vdev, u16 idx);
aac50c0b
EC
253 int (*set_vq_state)(struct vdpa_device *vdev, u16 idx,
254 const struct vdpa_vq_state *state);
23750e39
EC
255 int (*get_vq_state)(struct vdpa_device *vdev, u16 idx,
256 struct vdpa_vq_state *state);
c25a26e6
JW
257 struct vdpa_notification_area
258 (*get_vq_notification)(struct vdpa_device *vdev, u16 idx);
4c05433b 259 /* vq irq is not expected to be changed once DRIVER_OK is set */
7164675a 260 int (*get_vq_irq)(struct vdpa_device *vdv, u16 idx);
961e9c84
JW
261
262 /* Device ops */
425a5070 263 u32 (*get_vq_align)(struct vdpa_device *vdev);
961e9c84
JW
264 u64 (*get_features)(struct vdpa_device *vdev);
265 int (*set_features)(struct vdpa_device *vdev, u64 features);
266 void (*set_config_cb)(struct vdpa_device *vdev,
267 struct vdpa_callback *cb);
268 u16 (*get_vq_num_max)(struct vdpa_device *vdev);
269 u32 (*get_device_id)(struct vdpa_device *vdev);
270 u32 (*get_vendor_id)(struct vdpa_device *vdev);
271 u8 (*get_status)(struct vdpa_device *vdev);
272 void (*set_status)(struct vdpa_device *vdev, u8 status);
0686082d 273 int (*reset)(struct vdpa_device *vdev);
442706f9 274 size_t (*get_config_size)(struct vdpa_device *vdev);
961e9c84
JW
275 void (*get_config)(struct vdpa_device *vdev, unsigned int offset,
276 void *buf, unsigned int len);
277 void (*set_config)(struct vdpa_device *vdev, unsigned int offset,
278 const void *buf, unsigned int len);
279 u32 (*get_generation)(struct vdpa_device *vdev);
3f1b623a 280 struct vdpa_iova_range (*get_iova_range)(struct vdpa_device *vdev);
961e9c84
JW
281
282 /* DMA ops */
283 int (*set_map)(struct vdpa_device *vdev, struct vhost_iotlb *iotlb);
284 int (*dma_map)(struct vdpa_device *vdev, u64 iova, u64 size,
c10fb945 285 u64 pa, u32 perm, void *opaque);
961e9c84
JW
286 int (*dma_unmap)(struct vdpa_device *vdev, u64 iova, u64 size);
287
288 /* Free device resources */
289 void (*free)(struct vdpa_device *vdev);
290};
291
292struct vdpa_device *__vdpa_alloc_device(struct device *parent,
293 const struct vdpa_config_ops *config,
d8945ec4
XY
294 size_t size, const char *name,
295 bool use_va);
961e9c84 296
c8d182bd
XY
297/**
298 * vdpa_alloc_device - allocate and initilaize a vDPA device
299 *
300 * @dev_struct: the type of the parent structure
301 * @member: the name of struct vdpa_device within the @dev_struct
302 * @parent: the parent device
303 * @config: the bus operations that is supported by this device
304 * @name: name of the vdpa device
d8945ec4 305 * @use_va: indicate whether virtual address must be used by this device
c8d182bd
XY
306 *
307 * Return allocated data structure or ERR_PTR upon error
308 */
d8945ec4 309#define vdpa_alloc_device(dev_struct, member, parent, config, name, use_va) \
961e9c84 310 container_of(__vdpa_alloc_device( \
f00bdce0 311 parent, config, \
961e9c84
JW
312 sizeof(dev_struct) + \
313 BUILD_BUG_ON_ZERO(offsetof( \
d8945ec4 314 dev_struct, member)), name, use_va), \
961e9c84
JW
315 dev_struct, member)
316
f00bdce0 317int vdpa_register_device(struct vdpa_device *vdev, int nvqs);
961e9c84
JW
318void vdpa_unregister_device(struct vdpa_device *vdev);
319
f00bdce0 320int _vdpa_register_device(struct vdpa_device *vdev, int nvqs);
903f7bca
PP
321void _vdpa_unregister_device(struct vdpa_device *vdev);
322
961e9c84 323/**
d0f9164e 324 * struct vdpa_driver - operations for a vDPA driver
961e9c84
JW
325 * @driver: underlying device driver
326 * @probe: the function to call when a device is found. Returns 0 or -errno.
327 * @remove: the function to call when a device is removed.
328 */
329struct vdpa_driver {
330 struct device_driver driver;
331 int (*probe)(struct vdpa_device *vdev);
332 void (*remove)(struct vdpa_device *vdev);
333};
334
335#define vdpa_register_driver(drv) \
336 __vdpa_register_driver(drv, THIS_MODULE)
337int __vdpa_register_driver(struct vdpa_driver *drv, struct module *owner);
338void vdpa_unregister_driver(struct vdpa_driver *drv);
339
340#define module_vdpa_driver(__vdpa_driver) \
341 module_driver(__vdpa_driver, vdpa_register_driver, \
342 vdpa_unregister_driver)
343
344static inline struct vdpa_driver *drv_to_vdpa(struct device_driver *driver)
345{
346 return container_of(driver, struct vdpa_driver, driver);
347}
348
349static inline struct vdpa_device *dev_to_vdpa(struct device *_dev)
350{
351 return container_of(_dev, struct vdpa_device, dev);
352}
353
354static inline void *vdpa_get_drvdata(const struct vdpa_device *vdev)
355{
356 return dev_get_drvdata(&vdev->dev);
357}
358
359static inline void vdpa_set_drvdata(struct vdpa_device *vdev, void *data)
360{
361 dev_set_drvdata(&vdev->dev, data);
362}
363
364static inline struct device *vdpa_get_dma_dev(struct vdpa_device *vdev)
365{
366 return vdev->dma_dev;
367}
452639a6 368
0686082d 369static inline int vdpa_reset(struct vdpa_device *vdev)
452639a6 370{
86e17a51 371 const struct vdpa_config_ops *ops = vdev->config;
452639a6
MT
372
373 vdev->features_valid = false;
0686082d 374 return ops->reset(vdev);
452639a6
MT
375}
376
377static inline int vdpa_set_features(struct vdpa_device *vdev, u64 features)
378{
86e17a51 379 const struct vdpa_config_ops *ops = vdev->config;
452639a6
MT
380
381 vdev->features_valid = true;
86e17a51 382 return ops->set_features(vdev, features);
452639a6
MT
383}
384
86e17a51
XY
385static inline void vdpa_get_config(struct vdpa_device *vdev,
386 unsigned int offset, void *buf,
387 unsigned int len)
452639a6 388{
86e17a51 389 const struct vdpa_config_ops *ops = vdev->config;
452639a6
MT
390
391 /*
392 * Config accesses aren't supposed to trigger before features are set.
393 * If it does happen we assume a legacy guest.
394 */
395 if (!vdev->features_valid)
396 vdpa_set_features(vdev, 0);
397 ops->get_config(vdev, offset, buf, len);
398}
399
33b34750 400/**
d0f9164e
PP
401 * struct vdpa_mgmtdev_ops - vdpa device ops
402 * @dev_add: Add a vdpa device using alloc and register
403 * @mdev: parent device to use for device addition
404 * @name: name of the new vdpa device
405 * Driver need to add a new device using _vdpa_register_device()
406 * after fully initializing the vdpa device. Driver must return 0
407 * on success or appropriate error code.
408 * @dev_del: Remove a vdpa device using unregister
409 * @mdev: parent device to use for device removal
410 * @dev: vdpa device to remove
411 * Driver need to remove the specified device by calling
412 * _vdpa_unregister_device().
33b34750
PP
413 */
414struct vdpa_mgmtdev_ops {
415 int (*dev_add)(struct vdpa_mgmt_dev *mdev, const char *name);
416 void (*dev_del)(struct vdpa_mgmt_dev *mdev, struct vdpa_device *dev);
417};
418
419struct vdpa_mgmt_dev {
420 struct device *device;
421 const struct vdpa_mgmtdev_ops *ops;
422 const struct virtio_device_id *id_table; /* supported ids */
423 struct list_head list;
424};
425
426int vdpa_mgmtdev_register(struct vdpa_mgmt_dev *mdev);
427void vdpa_mgmtdev_unregister(struct vdpa_mgmt_dev *mdev);
428
961e9c84 429#endif /* _LINUX_VDPA_H */