1 // SPDX-License-Identifier: GPL-2.0-only
3 * VIRTIO based driver for vDPA device
5 * Copyright (c) 2020, Red Hat. All rights reserved.
6 * Author: Jason Wang <jasowang@redhat.com>
10 #include <linux/init.h>
11 #include <linux/module.h>
12 #include <linux/device.h>
13 #include <linux/kernel.h>
14 #include <linux/slab.h>
15 #include <linux/uuid.h>
16 #include <linux/virtio.h>
17 #include <linux/vdpa.h>
18 #include <linux/virtio_config.h>
19 #include <linux/virtio_ring.h>
21 #define MOD_VERSION "0.1"
22 #define MOD_AUTHOR "Jason Wang <jasowang@redhat.com>"
23 #define MOD_DESC "vDPA bus driver for virtio devices"
24 #define MOD_LICENSE "GPL v2"
26 struct virtio_vdpa_device
{
27 struct virtio_device vdev
;
28 struct vdpa_device
*vdpa
;
31 /* The lock to protect virtqueue list */
33 /* List of virtio_vdpa_vq_info */
34 struct list_head virtqueues
;
37 struct virtio_vdpa_vq_info
{
38 /* the actual virtqueue */
41 /* the list node for the virtqueues list */
42 struct list_head node
;
45 static inline struct virtio_vdpa_device
*
46 to_virtio_vdpa_device(struct virtio_device
*dev
)
48 return container_of(dev
, struct virtio_vdpa_device
, vdev
);
51 static struct vdpa_device
*vd_get_vdpa(struct virtio_device
*vdev
)
53 return to_virtio_vdpa_device(vdev
)->vdpa
;
56 static void virtio_vdpa_get(struct virtio_device
*vdev
, unsigned offset
,
57 void *buf
, unsigned len
)
59 struct vdpa_device
*vdpa
= vd_get_vdpa(vdev
);
61 vdpa_get_config(vdpa
, offset
, buf
, len
);
64 static void virtio_vdpa_set(struct virtio_device
*vdev
, unsigned offset
,
65 const void *buf
, unsigned len
)
67 struct vdpa_device
*vdpa
= vd_get_vdpa(vdev
);
68 const struct vdpa_config_ops
*ops
= vdpa
->config
;
70 ops
->set_config(vdpa
, offset
, buf
, len
);
73 static u32
virtio_vdpa_generation(struct virtio_device
*vdev
)
75 struct vdpa_device
*vdpa
= vd_get_vdpa(vdev
);
76 const struct vdpa_config_ops
*ops
= vdpa
->config
;
78 if (ops
->get_generation
)
79 return ops
->get_generation(vdpa
);
84 static u8
virtio_vdpa_get_status(struct virtio_device
*vdev
)
86 struct vdpa_device
*vdpa
= vd_get_vdpa(vdev
);
87 const struct vdpa_config_ops
*ops
= vdpa
->config
;
89 return ops
->get_status(vdpa
);
92 static void virtio_vdpa_set_status(struct virtio_device
*vdev
, u8 status
)
94 struct vdpa_device
*vdpa
= vd_get_vdpa(vdev
);
95 const struct vdpa_config_ops
*ops
= vdpa
->config
;
97 return ops
->set_status(vdpa
, status
);
100 static void virtio_vdpa_reset(struct virtio_device
*vdev
)
102 struct vdpa_device
*vdpa
= vd_get_vdpa(vdev
);
107 static bool virtio_vdpa_notify(struct virtqueue
*vq
)
109 struct vdpa_device
*vdpa
= vd_get_vdpa(vq
->vdev
);
110 const struct vdpa_config_ops
*ops
= vdpa
->config
;
112 ops
->kick_vq(vdpa
, vq
->index
);
117 static irqreturn_t
virtio_vdpa_config_cb(void *private)
119 struct virtio_vdpa_device
*vd_dev
= private;
121 virtio_config_changed(&vd_dev
->vdev
);
126 static irqreturn_t
virtio_vdpa_virtqueue_cb(void *private)
128 struct virtio_vdpa_vq_info
*info
= private;
130 return vring_interrupt(0, info
->vq
);
133 static struct virtqueue
*
134 virtio_vdpa_setup_vq(struct virtio_device
*vdev
, unsigned int index
,
135 void (*callback
)(struct virtqueue
*vq
),
136 const char *name
, bool ctx
)
138 struct virtio_vdpa_device
*vd_dev
= to_virtio_vdpa_device(vdev
);
139 struct vdpa_device
*vdpa
= vd_get_vdpa(vdev
);
140 const struct vdpa_config_ops
*ops
= vdpa
->config
;
141 struct virtio_vdpa_vq_info
*info
;
142 struct vdpa_callback cb
;
143 struct virtqueue
*vq
;
144 u64 desc_addr
, driver_addr
, device_addr
;
145 /* Assume split virtqueue, switch to packed if necessary */
146 struct vdpa_vq_state state
= {0};
154 if (index
>= vdpa
->nvqs
)
155 return ERR_PTR(-ENOENT
);
157 /* Queue shouldn't already be set up. */
158 if (ops
->get_vq_ready(vdpa
, index
))
159 return ERR_PTR(-ENOENT
);
161 /* Allocate and fill out our active queue description */
162 info
= kmalloc(sizeof(*info
), GFP_KERNEL
);
164 return ERR_PTR(-ENOMEM
);
166 num
= ops
->get_vq_num_max(vdpa
);
169 goto error_new_virtqueue
;
172 /* Create the vring */
173 align
= ops
->get_vq_align(vdpa
);
174 vq
= vring_create_virtqueue(index
, num
, align
, vdev
,
176 virtio_vdpa_notify
, callback
, name
);
179 goto error_new_virtqueue
;
182 /* Setup virtqueue callback */
183 cb
.callback
= virtio_vdpa_virtqueue_cb
;
185 ops
->set_vq_cb(vdpa
, index
, &cb
);
186 ops
->set_vq_num(vdpa
, index
, virtqueue_get_vring_size(vq
));
188 desc_addr
= virtqueue_get_desc_addr(vq
);
189 driver_addr
= virtqueue_get_avail_addr(vq
);
190 device_addr
= virtqueue_get_used_addr(vq
);
192 if (ops
->set_vq_address(vdpa
, index
,
193 desc_addr
, driver_addr
,
199 /* reset virtqueue state index */
200 if (virtio_has_feature(vdev
, VIRTIO_F_RING_PACKED
)) {
201 struct vdpa_vq_state_packed
*s
= &state
.packed
;
203 s
->last_avail_counter
= 1;
204 s
->last_avail_idx
= 0;
205 s
->last_used_counter
= 1;
206 s
->last_used_idx
= 0;
208 err
= ops
->set_vq_state(vdpa
, index
, &state
);
212 ops
->set_vq_ready(vdpa
, index
, 1);
217 spin_lock_irqsave(&vd_dev
->lock
, flags
);
218 list_add(&info
->node
, &vd_dev
->virtqueues
);
219 spin_unlock_irqrestore(&vd_dev
->lock
, flags
);
224 vring_del_virtqueue(vq
);
226 ops
->set_vq_ready(vdpa
, index
, 0);
227 /* VDPA driver should make sure vq is stopeed here */
228 WARN_ON(ops
->get_vq_ready(vdpa
, index
));
233 static void virtio_vdpa_del_vq(struct virtqueue
*vq
)
235 struct virtio_vdpa_device
*vd_dev
= to_virtio_vdpa_device(vq
->vdev
);
236 struct vdpa_device
*vdpa
= vd_dev
->vdpa
;
237 const struct vdpa_config_ops
*ops
= vdpa
->config
;
238 struct virtio_vdpa_vq_info
*info
= vq
->priv
;
239 unsigned int index
= vq
->index
;
242 spin_lock_irqsave(&vd_dev
->lock
, flags
);
243 list_del(&info
->node
);
244 spin_unlock_irqrestore(&vd_dev
->lock
, flags
);
246 /* Select and deactivate the queue (best effort) */
247 ops
->set_vq_ready(vdpa
, index
, 0);
249 vring_del_virtqueue(vq
);
254 static void virtio_vdpa_del_vqs(struct virtio_device
*vdev
)
256 struct virtqueue
*vq
, *n
;
258 list_for_each_entry_safe(vq
, n
, &vdev
->vqs
, list
)
259 virtio_vdpa_del_vq(vq
);
262 static int virtio_vdpa_find_vqs(struct virtio_device
*vdev
, unsigned nvqs
,
263 struct virtqueue
*vqs
[],
264 vq_callback_t
*callbacks
[],
265 const char * const names
[],
267 struct irq_affinity
*desc
)
269 struct virtio_vdpa_device
*vd_dev
= to_virtio_vdpa_device(vdev
);
270 struct vdpa_device
*vdpa
= vd_get_vdpa(vdev
);
271 const struct vdpa_config_ops
*ops
= vdpa
->config
;
272 struct vdpa_callback cb
;
273 int i
, err
, queue_idx
= 0;
275 for (i
= 0; i
< nvqs
; ++i
) {
281 vqs
[i
] = virtio_vdpa_setup_vq(vdev
, queue_idx
++,
282 callbacks
[i
], names
[i
], ctx
?
284 if (IS_ERR(vqs
[i
])) {
285 err
= PTR_ERR(vqs
[i
]);
290 cb
.callback
= virtio_vdpa_config_cb
;
292 ops
->set_config_cb(vdpa
, &cb
);
297 virtio_vdpa_del_vqs(vdev
);
301 static u64
virtio_vdpa_get_features(struct virtio_device
*vdev
)
303 struct vdpa_device
*vdpa
= vd_get_vdpa(vdev
);
304 const struct vdpa_config_ops
*ops
= vdpa
->config
;
306 return ops
->get_features(vdpa
);
309 static int virtio_vdpa_finalize_features(struct virtio_device
*vdev
)
311 struct vdpa_device
*vdpa
= vd_get_vdpa(vdev
);
313 /* Give virtio_ring a chance to accept features. */
314 vring_transport_features(vdev
);
316 return vdpa_set_features(vdpa
, vdev
->features
);
319 static const char *virtio_vdpa_bus_name(struct virtio_device
*vdev
)
321 struct virtio_vdpa_device
*vd_dev
= to_virtio_vdpa_device(vdev
);
322 struct vdpa_device
*vdpa
= vd_dev
->vdpa
;
324 return dev_name(&vdpa
->dev
);
327 static const struct virtio_config_ops virtio_vdpa_config_ops
= {
328 .get
= virtio_vdpa_get
,
329 .set
= virtio_vdpa_set
,
330 .generation
= virtio_vdpa_generation
,
331 .get_status
= virtio_vdpa_get_status
,
332 .set_status
= virtio_vdpa_set_status
,
333 .reset
= virtio_vdpa_reset
,
334 .find_vqs
= virtio_vdpa_find_vqs
,
335 .del_vqs
= virtio_vdpa_del_vqs
,
336 .get_features
= virtio_vdpa_get_features
,
337 .finalize_features
= virtio_vdpa_finalize_features
,
338 .bus_name
= virtio_vdpa_bus_name
,
341 static void virtio_vdpa_release_dev(struct device
*_d
)
343 struct virtio_device
*vdev
=
344 container_of(_d
, struct virtio_device
, dev
);
345 struct virtio_vdpa_device
*vd_dev
=
346 container_of(vdev
, struct virtio_vdpa_device
, vdev
);
351 static int virtio_vdpa_probe(struct vdpa_device
*vdpa
)
353 const struct vdpa_config_ops
*ops
= vdpa
->config
;
354 struct virtio_vdpa_device
*vd_dev
, *reg_dev
= NULL
;
357 vd_dev
= kzalloc(sizeof(*vd_dev
), GFP_KERNEL
);
361 vd_dev
->vdev
.dev
.parent
= vdpa_get_dma_dev(vdpa
);
362 vd_dev
->vdev
.dev
.release
= virtio_vdpa_release_dev
;
363 vd_dev
->vdev
.config
= &virtio_vdpa_config_ops
;
365 INIT_LIST_HEAD(&vd_dev
->virtqueues
);
366 spin_lock_init(&vd_dev
->lock
);
368 vd_dev
->vdev
.id
.device
= ops
->get_device_id(vdpa
);
369 if (vd_dev
->vdev
.id
.device
== 0)
372 vd_dev
->vdev
.id
.vendor
= ops
->get_vendor_id(vdpa
);
373 ret
= register_virtio_device(&vd_dev
->vdev
);
378 vdpa_set_drvdata(vdpa
, vd_dev
);
384 put_device(&vd_dev
->vdev
.dev
);
390 static void virtio_vdpa_remove(struct vdpa_device
*vdpa
)
392 struct virtio_vdpa_device
*vd_dev
= vdpa_get_drvdata(vdpa
);
394 unregister_virtio_device(&vd_dev
->vdev
);
397 static struct vdpa_driver virtio_vdpa_driver
= {
399 .name
= "virtio_vdpa",
401 .probe
= virtio_vdpa_probe
,
402 .remove
= virtio_vdpa_remove
,
405 module_vdpa_driver(virtio_vdpa_driver
);
407 MODULE_VERSION(MOD_VERSION
);
408 MODULE_LICENSE(MOD_LICENSE
);
409 MODULE_AUTHOR(MOD_AUTHOR
);
410 MODULE_DESCRIPTION(MOD_DESC
);