]> git.proxmox.com Git - mirror_ubuntu-hirsute-kernel.git/blame - drivers/virtio/virtio_ring.c
virtio: Rename set_features to finalize_features
[mirror_ubuntu-hirsute-kernel.git] / drivers / virtio / virtio_ring.c
CommitLineData
0a8a69dd
RR
1/* Virtio ring implementation.
2 *
3 * Copyright 2007 Rusty Russell IBM Corporation
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation; either version 2 of the License, or
8 * (at your option) any later version.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
18 */
19#include <linux/virtio.h>
20#include <linux/virtio_ring.h>
21#include <linux/device.h>
22
23#ifdef DEBUG
24/* For development, we want to crash whenever the ring is screwed. */
25#define BAD_RING(vq, fmt...) \
26 do { dev_err(&vq->vq.vdev->dev, fmt); BUG(); } while(0)
27#define START_USE(vq) \
28 do { if ((vq)->in_use) panic("in_use = %i\n", (vq)->in_use); (vq)->in_use = __LINE__; mb(); } while(0)
29#define END_USE(vq) \
30 do { BUG_ON(!(vq)->in_use); (vq)->in_use = 0; mb(); } while(0)
31#else
32#define BAD_RING(vq, fmt...) \
33 do { dev_err(&vq->vq.vdev->dev, fmt); (vq)->broken = true; } while(0)
34#define START_USE(vq)
35#define END_USE(vq)
36#endif
37
38struct vring_virtqueue
39{
40 struct virtqueue vq;
41
42 /* Actual memory layout for this queue */
43 struct vring vring;
44
45 /* Other side has made a mess, don't try any more. */
46 bool broken;
47
48 /* Number of free buffers */
49 unsigned int num_free;
50 /* Head of free buffer list. */
51 unsigned int free_head;
52 /* Number we've added since last sync. */
53 unsigned int num_added;
54
55 /* Last used index we've seen. */
1bc4953e 56 u16 last_used_idx;
0a8a69dd
RR
57
58 /* How to notify other side. FIXME: commonalize hcalls! */
59 void (*notify)(struct virtqueue *vq);
60
61#ifdef DEBUG
62 /* They're supposed to lock for us. */
63 unsigned int in_use;
64#endif
65
66 /* Tokens for callbacks. */
67 void *data[];
68};
69
70#define to_vvq(_vq) container_of(_vq, struct vring_virtqueue, vq)
71
72static int vring_add_buf(struct virtqueue *_vq,
73 struct scatterlist sg[],
74 unsigned int out,
75 unsigned int in,
76 void *data)
77{
78 struct vring_virtqueue *vq = to_vvq(_vq);
79 unsigned int i, avail, head, uninitialized_var(prev);
80
81 BUG_ON(data == NULL);
82 BUG_ON(out + in > vq->vring.num);
83 BUG_ON(out + in == 0);
84
85 START_USE(vq);
86
87 if (vq->num_free < out + in) {
88 pr_debug("Can't add buf len %i - avail = %i\n",
89 out + in, vq->num_free);
44653eae
RR
90 /* FIXME: for historical reasons, we force a notify here if
91 * there are outgoing parts to the buffer. Presumably the
92 * host should service the ring ASAP. */
93 if (out)
94 vq->notify(&vq->vq);
0a8a69dd
RR
95 END_USE(vq);
96 return -ENOSPC;
97 }
98
99 /* We're about to use some buffers from the free list. */
100 vq->num_free -= out + in;
101
102 head = vq->free_head;
103 for (i = vq->free_head; out; i = vq->vring.desc[i].next, out--) {
104 vq->vring.desc[i].flags = VRING_DESC_F_NEXT;
15f9c890 105 vq->vring.desc[i].addr = sg_phys(sg);
0a8a69dd
RR
106 vq->vring.desc[i].len = sg->length;
107 prev = i;
108 sg++;
109 }
110 for (; in; i = vq->vring.desc[i].next, in--) {
111 vq->vring.desc[i].flags = VRING_DESC_F_NEXT|VRING_DESC_F_WRITE;
15f9c890 112 vq->vring.desc[i].addr = sg_phys(sg);
0a8a69dd
RR
113 vq->vring.desc[i].len = sg->length;
114 prev = i;
115 sg++;
116 }
117 /* Last one doesn't continue. */
118 vq->vring.desc[prev].flags &= ~VRING_DESC_F_NEXT;
119
120 /* Update free pointer */
121 vq->free_head = i;
122
123 /* Set token. */
124 vq->data[head] = data;
125
126 /* Put entry in available array (but don't update avail->idx until they
127 * do sync). FIXME: avoid modulus here? */
128 avail = (vq->vring.avail->idx + vq->num_added++) % vq->vring.num;
129 vq->vring.avail->ring[avail] = head;
130
131 pr_debug("Added buffer head %i to %p\n", head, vq);
132 END_USE(vq);
133 return 0;
134}
135
136static void vring_kick(struct virtqueue *_vq)
137{
138 struct vring_virtqueue *vq = to_vvq(_vq);
139 START_USE(vq);
140 /* Descriptors and available array need to be set before we expose the
141 * new available array entries. */
142 wmb();
143
144 vq->vring.avail->idx += vq->num_added;
145 vq->num_added = 0;
146
147 /* Need to update avail index before checking if we should notify */
148 mb();
149
150 if (!(vq->vring.used->flags & VRING_USED_F_NO_NOTIFY))
151 /* Prod other side to tell it about changes. */
152 vq->notify(&vq->vq);
153
154 END_USE(vq);
155}
156
157static void detach_buf(struct vring_virtqueue *vq, unsigned int head)
158{
159 unsigned int i;
160
161 /* Clear data ptr. */
162 vq->data[head] = NULL;
163
164 /* Put back on free list: find end */
165 i = head;
166 while (vq->vring.desc[i].flags & VRING_DESC_F_NEXT) {
167 i = vq->vring.desc[i].next;
168 vq->num_free++;
169 }
170
171 vq->vring.desc[i].next = vq->free_head;
172 vq->free_head = head;
173 /* Plus final descriptor */
174 vq->num_free++;
175}
176
0a8a69dd
RR
177static inline bool more_used(const struct vring_virtqueue *vq)
178{
179 return vq->last_used_idx != vq->vring.used->idx;
180}
181
182static void *vring_get_buf(struct virtqueue *_vq, unsigned int *len)
183{
184 struct vring_virtqueue *vq = to_vvq(_vq);
185 void *ret;
186 unsigned int i;
187
188 START_USE(vq);
189
5ef82752
RR
190 if (unlikely(vq->broken)) {
191 END_USE(vq);
192 return NULL;
193 }
194
0a8a69dd
RR
195 if (!more_used(vq)) {
196 pr_debug("No more buffers in queue\n");
197 END_USE(vq);
198 return NULL;
199 }
200
201 i = vq->vring.used->ring[vq->last_used_idx%vq->vring.num].id;
202 *len = vq->vring.used->ring[vq->last_used_idx%vq->vring.num].len;
203
204 if (unlikely(i >= vq->vring.num)) {
205 BAD_RING(vq, "id %u out of range\n", i);
206 return NULL;
207 }
208 if (unlikely(!vq->data[i])) {
209 BAD_RING(vq, "id %u is not a head!\n", i);
210 return NULL;
211 }
212
213 /* detach_buf clears data, so grab it now. */
214 ret = vq->data[i];
215 detach_buf(vq, i);
216 vq->last_used_idx++;
217 END_USE(vq);
218 return ret;
219}
220
18445c4d
RR
221static void vring_disable_cb(struct virtqueue *_vq)
222{
223 struct vring_virtqueue *vq = to_vvq(_vq);
224
18445c4d 225 vq->vring.avail->flags |= VRING_AVAIL_F_NO_INTERRUPT;
18445c4d
RR
226}
227
228static bool vring_enable_cb(struct virtqueue *_vq)
0a8a69dd
RR
229{
230 struct vring_virtqueue *vq = to_vvq(_vq);
231
232 START_USE(vq);
0a8a69dd
RR
233
234 /* We optimistically turn back on interrupts, then check if there was
235 * more to do. */
236 vq->vring.avail->flags &= ~VRING_AVAIL_F_NO_INTERRUPT;
237 mb();
238 if (unlikely(more_used(vq))) {
0a8a69dd
RR
239 END_USE(vq);
240 return false;
241 }
242
243 END_USE(vq);
244 return true;
245}
246
247irqreturn_t vring_interrupt(int irq, void *_vq)
248{
249 struct vring_virtqueue *vq = to_vvq(_vq);
250
251 if (!more_used(vq)) {
252 pr_debug("virtqueue interrupt with no work for %p\n", vq);
253 return IRQ_NONE;
254 }
255
256 if (unlikely(vq->broken))
257 return IRQ_HANDLED;
258
259 pr_debug("virtqueue callback for %p (%p)\n", vq, vq->vq.callback);
18445c4d
RR
260 if (vq->vq.callback)
261 vq->vq.callback(&vq->vq);
0a8a69dd
RR
262
263 return IRQ_HANDLED;
264}
c6fd4701 265EXPORT_SYMBOL_GPL(vring_interrupt);
0a8a69dd
RR
266
267static struct virtqueue_ops vring_vq_ops = {
268 .add_buf = vring_add_buf,
269 .get_buf = vring_get_buf,
270 .kick = vring_kick,
18445c4d
RR
271 .disable_cb = vring_disable_cb,
272 .enable_cb = vring_enable_cb,
0a8a69dd
RR
273};
274
275struct virtqueue *vring_new_virtqueue(unsigned int num,
276 struct virtio_device *vdev,
277 void *pages,
278 void (*notify)(struct virtqueue *),
18445c4d 279 void (*callback)(struct virtqueue *))
0a8a69dd
RR
280{
281 struct vring_virtqueue *vq;
282 unsigned int i;
283
42b36cc0
RR
284 /* We assume num is a power of 2. */
285 if (num & (num - 1)) {
286 dev_warn(&vdev->dev, "Bad virtqueue length %u\n", num);
287 return NULL;
288 }
289
0a8a69dd
RR
290 vq = kmalloc(sizeof(*vq) + sizeof(void *)*num, GFP_KERNEL);
291 if (!vq)
292 return NULL;
293
42b36cc0 294 vring_init(&vq->vring, num, pages, PAGE_SIZE);
0a8a69dd
RR
295 vq->vq.callback = callback;
296 vq->vq.vdev = vdev;
297 vq->vq.vq_ops = &vring_vq_ops;
298 vq->notify = notify;
299 vq->broken = false;
300 vq->last_used_idx = 0;
301 vq->num_added = 0;
302#ifdef DEBUG
303 vq->in_use = false;
304#endif
305
306 /* No callback? Tell other side not to bother us. */
307 if (!callback)
308 vq->vring.avail->flags |= VRING_AVAIL_F_NO_INTERRUPT;
309
310 /* Put everything in free lists. */
311 vq->num_free = num;
312 vq->free_head = 0;
313 for (i = 0; i < num-1; i++)
314 vq->vring.desc[i].next = i+1;
315
316 return &vq->vq;
317}
c6fd4701 318EXPORT_SYMBOL_GPL(vring_new_virtqueue);
0a8a69dd
RR
319
320void vring_del_virtqueue(struct virtqueue *vq)
321{
322 kfree(to_vvq(vq));
323}
c6fd4701 324EXPORT_SYMBOL_GPL(vring_del_virtqueue);
0a8a69dd 325
c6fd4701 326MODULE_LICENSE("GPL");