1 // SPDX-License-Identifier: GPL-2.0-only
3 * VIRTIO based driver for vDPA device
5 * Copyright (c) 2020, Red Hat. All rights reserved.
6 * Author: Jason Wang <jasowang@redhat.com>
10 #include <linux/init.h>
11 #include <linux/module.h>
12 #include <linux/device.h>
13 #include <linux/kernel.h>
14 #include <linux/slab.h>
15 #include <linux/uuid.h>
16 #include <linux/virtio.h>
17 #include <linux/vdpa.h>
18 #include <linux/virtio_config.h>
19 #include <linux/virtio_ring.h>
21 #define MOD_VERSION "0.1"
22 #define MOD_AUTHOR "Jason Wang <jasowang@redhat.com>"
23 #define MOD_DESC "vDPA bus driver for virtio devices"
24 #define MOD_LICENSE "GPL v2"
26 struct virtio_vdpa_device
{
27 struct virtio_device vdev
;
28 struct vdpa_device
*vdpa
;
31 /* The lock to protect virtqueue list */
33 /* List of virtio_vdpa_vq_info */
34 struct list_head virtqueues
;
37 struct virtio_vdpa_vq_info
{
38 /* the actual virtqueue */
41 /* the list node for the virtqueues list */
42 struct list_head node
;
45 static inline struct virtio_vdpa_device
*
46 to_virtio_vdpa_device(struct virtio_device
*dev
)
48 return container_of(dev
, struct virtio_vdpa_device
, vdev
);
51 static struct vdpa_device
*vd_get_vdpa(struct virtio_device
*vdev
)
53 return to_virtio_vdpa_device(vdev
)->vdpa
;
56 static void virtio_vdpa_get(struct virtio_device
*vdev
, unsigned offset
,
57 void *buf
, unsigned len
)
59 struct vdpa_device
*vdpa
= vd_get_vdpa(vdev
);
61 vdpa_get_config(vdpa
, offset
, buf
, len
);
64 static void virtio_vdpa_set(struct virtio_device
*vdev
, unsigned offset
,
65 const void *buf
, unsigned len
)
67 struct vdpa_device
*vdpa
= vd_get_vdpa(vdev
);
68 const struct vdpa_config_ops
*ops
= vdpa
->config
;
70 ops
->set_config(vdpa
, offset
, buf
, len
);
73 static u32
virtio_vdpa_generation(struct virtio_device
*vdev
)
75 struct vdpa_device
*vdpa
= vd_get_vdpa(vdev
);
76 const struct vdpa_config_ops
*ops
= vdpa
->config
;
78 if (ops
->get_generation
)
79 return ops
->get_generation(vdpa
);
84 static u8
virtio_vdpa_get_status(struct virtio_device
*vdev
)
86 struct vdpa_device
*vdpa
= vd_get_vdpa(vdev
);
87 const struct vdpa_config_ops
*ops
= vdpa
->config
;
89 return ops
->get_status(vdpa
);
92 static void virtio_vdpa_set_status(struct virtio_device
*vdev
, u8 status
)
94 struct vdpa_device
*vdpa
= vd_get_vdpa(vdev
);
95 const struct vdpa_config_ops
*ops
= vdpa
->config
;
97 return ops
->set_status(vdpa
, status
);
100 static void virtio_vdpa_reset(struct virtio_device
*vdev
)
102 struct vdpa_device
*vdpa
= vd_get_vdpa(vdev
);
107 static bool virtio_vdpa_notify(struct virtqueue
*vq
)
109 struct vdpa_device
*vdpa
= vd_get_vdpa(vq
->vdev
);
110 const struct vdpa_config_ops
*ops
= vdpa
->config
;
112 ops
->kick_vq(vdpa
, vq
->index
);
117 static irqreturn_t
virtio_vdpa_config_cb(void *private)
119 struct virtio_vdpa_device
*vd_dev
= private;
121 virtio_config_changed(&vd_dev
->vdev
);
126 static irqreturn_t
virtio_vdpa_virtqueue_cb(void *private)
128 struct virtio_vdpa_vq_info
*info
= private;
130 return vring_interrupt(0, info
->vq
);
133 static struct virtqueue
*
134 virtio_vdpa_setup_vq(struct virtio_device
*vdev
, unsigned int index
,
135 void (*callback
)(struct virtqueue
*vq
),
136 const char *name
, bool ctx
)
138 struct virtio_vdpa_device
*vd_dev
= to_virtio_vdpa_device(vdev
);
139 struct vdpa_device
*vdpa
= vd_get_vdpa(vdev
);
140 const struct vdpa_config_ops
*ops
= vdpa
->config
;
141 struct virtio_vdpa_vq_info
*info
;
142 struct vdpa_callback cb
;
143 struct virtqueue
*vq
;
144 u64 desc_addr
, driver_addr
, device_addr
;
152 /* Queue shouldn't already be set up. */
153 if (ops
->get_vq_ready(vdpa
, index
))
154 return ERR_PTR(-ENOENT
);
156 /* Allocate and fill out our active queue description */
157 info
= kmalloc(sizeof(*info
), GFP_KERNEL
);
159 return ERR_PTR(-ENOMEM
);
161 num
= ops
->get_vq_num_max(vdpa
);
164 goto error_new_virtqueue
;
167 /* Create the vring */
168 align
= ops
->get_vq_align(vdpa
);
169 vq
= vring_create_virtqueue(index
, num
, align
, vdev
,
171 virtio_vdpa_notify
, callback
, name
);
174 goto error_new_virtqueue
;
177 /* Setup virtqueue callback */
178 cb
.callback
= virtio_vdpa_virtqueue_cb
;
180 ops
->set_vq_cb(vdpa
, index
, &cb
);
181 ops
->set_vq_num(vdpa
, index
, virtqueue_get_vring_size(vq
));
183 desc_addr
= virtqueue_get_desc_addr(vq
);
184 driver_addr
= virtqueue_get_avail_addr(vq
);
185 device_addr
= virtqueue_get_used_addr(vq
);
187 if (ops
->set_vq_address(vdpa
, index
,
188 desc_addr
, driver_addr
,
194 ops
->set_vq_ready(vdpa
, index
, 1);
199 spin_lock_irqsave(&vd_dev
->lock
, flags
);
200 list_add(&info
->node
, &vd_dev
->virtqueues
);
201 spin_unlock_irqrestore(&vd_dev
->lock
, flags
);
206 vring_del_virtqueue(vq
);
208 ops
->set_vq_ready(vdpa
, index
, 0);
209 /* VDPA driver should make sure vq is stopeed here */
210 WARN_ON(ops
->get_vq_ready(vdpa
, index
));
215 static void virtio_vdpa_del_vq(struct virtqueue
*vq
)
217 struct virtio_vdpa_device
*vd_dev
= to_virtio_vdpa_device(vq
->vdev
);
218 struct vdpa_device
*vdpa
= vd_dev
->vdpa
;
219 const struct vdpa_config_ops
*ops
= vdpa
->config
;
220 struct virtio_vdpa_vq_info
*info
= vq
->priv
;
221 unsigned int index
= vq
->index
;
224 spin_lock_irqsave(&vd_dev
->lock
, flags
);
225 list_del(&info
->node
);
226 spin_unlock_irqrestore(&vd_dev
->lock
, flags
);
228 /* Select and deactivate the queue (best effort) */
229 ops
->set_vq_ready(vdpa
, index
, 0);
231 vring_del_virtqueue(vq
);
236 static void virtio_vdpa_del_vqs(struct virtio_device
*vdev
)
238 struct virtqueue
*vq
, *n
;
240 list_for_each_entry_safe(vq
, n
, &vdev
->vqs
, list
)
241 virtio_vdpa_del_vq(vq
);
244 static int virtio_vdpa_find_vqs(struct virtio_device
*vdev
, unsigned nvqs
,
245 struct virtqueue
*vqs
[],
246 vq_callback_t
*callbacks
[],
247 const char * const names
[],
249 struct irq_affinity
*desc
)
251 struct virtio_vdpa_device
*vd_dev
= to_virtio_vdpa_device(vdev
);
252 struct vdpa_device
*vdpa
= vd_get_vdpa(vdev
);
253 const struct vdpa_config_ops
*ops
= vdpa
->config
;
254 struct vdpa_callback cb
;
255 int i
, err
, queue_idx
= 0;
257 for (i
= 0; i
< nvqs
; ++i
) {
263 vqs
[i
] = virtio_vdpa_setup_vq(vdev
, queue_idx
++,
264 callbacks
[i
], names
[i
], ctx
?
266 if (IS_ERR(vqs
[i
])) {
267 err
= PTR_ERR(vqs
[i
]);
272 cb
.callback
= virtio_vdpa_config_cb
;
274 ops
->set_config_cb(vdpa
, &cb
);
279 virtio_vdpa_del_vqs(vdev
);
283 static u64
virtio_vdpa_get_features(struct virtio_device
*vdev
)
285 struct vdpa_device
*vdpa
= vd_get_vdpa(vdev
);
286 const struct vdpa_config_ops
*ops
= vdpa
->config
;
288 return ops
->get_features(vdpa
);
291 static int virtio_vdpa_finalize_features(struct virtio_device
*vdev
)
293 struct vdpa_device
*vdpa
= vd_get_vdpa(vdev
);
295 /* Give virtio_ring a chance to accept features. */
296 vring_transport_features(vdev
);
298 return vdpa_set_features(vdpa
, vdev
->features
);
301 static const char *virtio_vdpa_bus_name(struct virtio_device
*vdev
)
303 struct virtio_vdpa_device
*vd_dev
= to_virtio_vdpa_device(vdev
);
304 struct vdpa_device
*vdpa
= vd_dev
->vdpa
;
306 return dev_name(&vdpa
->dev
);
309 static const struct virtio_config_ops virtio_vdpa_config_ops
= {
310 .get
= virtio_vdpa_get
,
311 .set
= virtio_vdpa_set
,
312 .generation
= virtio_vdpa_generation
,
313 .get_status
= virtio_vdpa_get_status
,
314 .set_status
= virtio_vdpa_set_status
,
315 .reset
= virtio_vdpa_reset
,
316 .find_vqs
= virtio_vdpa_find_vqs
,
317 .del_vqs
= virtio_vdpa_del_vqs
,
318 .get_features
= virtio_vdpa_get_features
,
319 .finalize_features
= virtio_vdpa_finalize_features
,
320 .bus_name
= virtio_vdpa_bus_name
,
323 static void virtio_vdpa_release_dev(struct device
*_d
)
325 struct virtio_device
*vdev
=
326 container_of(_d
, struct virtio_device
, dev
);
327 struct virtio_vdpa_device
*vd_dev
=
328 container_of(vdev
, struct virtio_vdpa_device
, vdev
);
333 static int virtio_vdpa_probe(struct vdpa_device
*vdpa
)
335 const struct vdpa_config_ops
*ops
= vdpa
->config
;
336 struct virtio_vdpa_device
*vd_dev
, *reg_dev
= NULL
;
339 vd_dev
= kzalloc(sizeof(*vd_dev
), GFP_KERNEL
);
343 vd_dev
->vdev
.dev
.parent
= vdpa_get_dma_dev(vdpa
);
344 vd_dev
->vdev
.dev
.release
= virtio_vdpa_release_dev
;
345 vd_dev
->vdev
.config
= &virtio_vdpa_config_ops
;
347 INIT_LIST_HEAD(&vd_dev
->virtqueues
);
348 spin_lock_init(&vd_dev
->lock
);
350 vd_dev
->vdev
.id
.device
= ops
->get_device_id(vdpa
);
351 if (vd_dev
->vdev
.id
.device
== 0)
354 vd_dev
->vdev
.id
.vendor
= ops
->get_vendor_id(vdpa
);
355 ret
= register_virtio_device(&vd_dev
->vdev
);
360 vdpa_set_drvdata(vdpa
, vd_dev
);
366 put_device(&vd_dev
->vdev
.dev
);
372 static void virtio_vdpa_remove(struct vdpa_device
*vdpa
)
374 struct virtio_vdpa_device
*vd_dev
= vdpa_get_drvdata(vdpa
);
376 unregister_virtio_device(&vd_dev
->vdev
);
379 static struct vdpa_driver virtio_vdpa_driver
= {
381 .name
= "virtio_vdpa",
383 .probe
= virtio_vdpa_probe
,
384 .remove
= virtio_vdpa_remove
,
387 module_vdpa_driver(virtio_vdpa_driver
);
389 MODULE_VERSION(MOD_VERSION
);
390 MODULE_LICENSE(MOD_LICENSE
);
391 MODULE_AUTHOR(MOD_AUTHOR
);
392 MODULE_DESCRIPTION(MOD_DESC
);