]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blame - drivers/virtio/virtio_ring.c
include cleanup: Update gfp.h and slab.h includes to prepare for breaking implicit...
[mirror_ubuntu-bionic-kernel.git] / drivers / virtio / virtio_ring.c
CommitLineData
0a8a69dd
RR
1/* Virtio ring implementation.
2 *
3 * Copyright 2007 Rusty Russell IBM Corporation
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation; either version 2 of the License, or
8 * (at your option) any later version.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
18 */
19#include <linux/virtio.h>
20#include <linux/virtio_ring.h>
e34f8725 21#include <linux/virtio_config.h>
0a8a69dd 22#include <linux/device.h>
5a0e3ad6 23#include <linux/slab.h>
0a8a69dd 24
d57ed95d
MT
25/* virtio guest is communicating with a virtual "device" that actually runs on
26 * a host processor. Memory barriers are used to control SMP effects. */
27#ifdef CONFIG_SMP
28/* Where possible, use SMP barriers which are more lightweight than mandatory
29 * barriers, because mandatory barriers control MMIO effects on accesses
30 * through relaxed memory I/O windows (which virtio does not use). */
31#define virtio_mb() smp_mb()
32#define virtio_rmb() smp_rmb()
33#define virtio_wmb() smp_wmb()
34#else
35/* We must force memory ordering even if guest is UP since host could be
36 * running on another CPU, but SMP barriers are defined to barrier() in that
37 * configuration. So fall back to mandatory barriers instead. */
38#define virtio_mb() mb()
39#define virtio_rmb() rmb()
40#define virtio_wmb() wmb()
41#endif
42
0a8a69dd
RR
43#ifdef DEBUG
44/* For development, we want to crash whenever the ring is screwed. */
9499f5e7
RR
45#define BAD_RING(_vq, fmt, args...) \
46 do { \
47 dev_err(&(_vq)->vq.vdev->dev, \
48 "%s:"fmt, (_vq)->vq.name, ##args); \
49 BUG(); \
50 } while (0)
c5f841f1
RR
51/* Caller is supposed to guarantee no reentry. */
52#define START_USE(_vq) \
53 do { \
54 if ((_vq)->in_use) \
9499f5e7
RR
55 panic("%s:in_use = %i\n", \
56 (_vq)->vq.name, (_vq)->in_use); \
c5f841f1 57 (_vq)->in_use = __LINE__; \
9499f5e7 58 } while (0)
3a35ce7d 59#define END_USE(_vq) \
97a545ab 60 do { BUG_ON(!(_vq)->in_use); (_vq)->in_use = 0; } while(0)
0a8a69dd 61#else
9499f5e7
RR
62#define BAD_RING(_vq, fmt, args...) \
63 do { \
64 dev_err(&_vq->vq.vdev->dev, \
65 "%s:"fmt, (_vq)->vq.name, ##args); \
66 (_vq)->broken = true; \
67 } while (0)
0a8a69dd
RR
68#define START_USE(vq)
69#define END_USE(vq)
70#endif
71
72struct vring_virtqueue
73{
74 struct virtqueue vq;
75
76 /* Actual memory layout for this queue */
77 struct vring vring;
78
79 /* Other side has made a mess, don't try any more. */
80 bool broken;
81
9fa29b9d
MM
82 /* Host supports indirect buffers */
83 bool indirect;
84
0a8a69dd
RR
85 /* Number of free buffers */
86 unsigned int num_free;
87 /* Head of free buffer list. */
88 unsigned int free_head;
89 /* Number we've added since last sync. */
90 unsigned int num_added;
91
92 /* Last used index we've seen. */
1bc4953e 93 u16 last_used_idx;
0a8a69dd
RR
94
95 /* How to notify other side. FIXME: commonalize hcalls! */
96 void (*notify)(struct virtqueue *vq);
97
98#ifdef DEBUG
99 /* They're supposed to lock for us. */
100 unsigned int in_use;
101#endif
102
103 /* Tokens for callbacks. */
104 void *data[];
105};
106
107#define to_vvq(_vq) container_of(_vq, struct vring_virtqueue, vq)
108
9fa29b9d
MM
109/* Set up an indirect table of descriptors and add it to the queue. */
110static int vring_add_indirect(struct vring_virtqueue *vq,
111 struct scatterlist sg[],
112 unsigned int out,
113 unsigned int in)
114{
115 struct vring_desc *desc;
116 unsigned head;
117 int i;
118
119 desc = kmalloc((out + in) * sizeof(struct vring_desc), GFP_ATOMIC);
120 if (!desc)
121 return vq->vring.num;
122
123 /* Transfer entries from the sg list into the indirect page */
124 for (i = 0; i < out; i++) {
125 desc[i].flags = VRING_DESC_F_NEXT;
126 desc[i].addr = sg_phys(sg);
127 desc[i].len = sg->length;
128 desc[i].next = i+1;
129 sg++;
130 }
131 for (; i < (out + in); i++) {
132 desc[i].flags = VRING_DESC_F_NEXT|VRING_DESC_F_WRITE;
133 desc[i].addr = sg_phys(sg);
134 desc[i].len = sg->length;
135 desc[i].next = i+1;
136 sg++;
137 }
138
139 /* Last one doesn't continue. */
140 desc[i-1].flags &= ~VRING_DESC_F_NEXT;
141 desc[i-1].next = 0;
142
143 /* We're about to use a buffer */
144 vq->num_free--;
145
146 /* Use a single buffer which doesn't continue */
147 head = vq->free_head;
148 vq->vring.desc[head].flags = VRING_DESC_F_INDIRECT;
149 vq->vring.desc[head].addr = virt_to_phys(desc);
150 vq->vring.desc[head].len = i * sizeof(struct vring_desc);
151
152 /* Update free pointer */
153 vq->free_head = vq->vring.desc[head].next;
154
155 return head;
156}
157
0a8a69dd
RR
158static int vring_add_buf(struct virtqueue *_vq,
159 struct scatterlist sg[],
160 unsigned int out,
161 unsigned int in,
162 void *data)
163{
164 struct vring_virtqueue *vq = to_vvq(_vq);
165 unsigned int i, avail, head, uninitialized_var(prev);
166
9fa29b9d
MM
167 START_USE(vq);
168
0a8a69dd 169 BUG_ON(data == NULL);
9fa29b9d
MM
170
171 /* If the host supports indirect descriptor tables, and we have multiple
172 * buffers, then go indirect. FIXME: tune this threshold */
173 if (vq->indirect && (out + in) > 1 && vq->num_free) {
174 head = vring_add_indirect(vq, sg, out, in);
175 if (head != vq->vring.num)
176 goto add_head;
177 }
178
0a8a69dd
RR
179 BUG_ON(out + in > vq->vring.num);
180 BUG_ON(out + in == 0);
181
0a8a69dd
RR
182 if (vq->num_free < out + in) {
183 pr_debug("Can't add buf len %i - avail = %i\n",
184 out + in, vq->num_free);
44653eae
RR
185 /* FIXME: for historical reasons, we force a notify here if
186 * there are outgoing parts to the buffer. Presumably the
187 * host should service the ring ASAP. */
188 if (out)
189 vq->notify(&vq->vq);
0a8a69dd
RR
190 END_USE(vq);
191 return -ENOSPC;
192 }
193
194 /* We're about to use some buffers from the free list. */
195 vq->num_free -= out + in;
196
197 head = vq->free_head;
198 for (i = vq->free_head; out; i = vq->vring.desc[i].next, out--) {
199 vq->vring.desc[i].flags = VRING_DESC_F_NEXT;
15f9c890 200 vq->vring.desc[i].addr = sg_phys(sg);
0a8a69dd
RR
201 vq->vring.desc[i].len = sg->length;
202 prev = i;
203 sg++;
204 }
205 for (; in; i = vq->vring.desc[i].next, in--) {
206 vq->vring.desc[i].flags = VRING_DESC_F_NEXT|VRING_DESC_F_WRITE;
15f9c890 207 vq->vring.desc[i].addr = sg_phys(sg);
0a8a69dd
RR
208 vq->vring.desc[i].len = sg->length;
209 prev = i;
210 sg++;
211 }
212 /* Last one doesn't continue. */
213 vq->vring.desc[prev].flags &= ~VRING_DESC_F_NEXT;
214
215 /* Update free pointer */
216 vq->free_head = i;
217
9fa29b9d 218add_head:
0a8a69dd
RR
219 /* Set token. */
220 vq->data[head] = data;
221
222 /* Put entry in available array (but don't update avail->idx until they
223 * do sync). FIXME: avoid modulus here? */
224 avail = (vq->vring.avail->idx + vq->num_added++) % vq->vring.num;
225 vq->vring.avail->ring[avail] = head;
226
227 pr_debug("Added buffer head %i to %p\n", head, vq);
228 END_USE(vq);
3c1b27d5
RR
229
230 /* If we're indirect, we can fit many (assuming not OOM). */
231 if (vq->indirect)
232 return vq->num_free ? vq->vring.num : 0;
233 return vq->num_free;
0a8a69dd
RR
234}
235
236static void vring_kick(struct virtqueue *_vq)
237{
238 struct vring_virtqueue *vq = to_vvq(_vq);
239 START_USE(vq);
240 /* Descriptors and available array need to be set before we expose the
241 * new available array entries. */
d57ed95d 242 virtio_wmb();
0a8a69dd
RR
243
244 vq->vring.avail->idx += vq->num_added;
245 vq->num_added = 0;
246
247 /* Need to update avail index before checking if we should notify */
d57ed95d 248 virtio_mb();
0a8a69dd
RR
249
250 if (!(vq->vring.used->flags & VRING_USED_F_NO_NOTIFY))
251 /* Prod other side to tell it about changes. */
252 vq->notify(&vq->vq);
253
254 END_USE(vq);
255}
256
257static void detach_buf(struct vring_virtqueue *vq, unsigned int head)
258{
259 unsigned int i;
260
261 /* Clear data ptr. */
262 vq->data[head] = NULL;
263
264 /* Put back on free list: find end */
265 i = head;
9fa29b9d
MM
266
267 /* Free the indirect table */
268 if (vq->vring.desc[i].flags & VRING_DESC_F_INDIRECT)
269 kfree(phys_to_virt(vq->vring.desc[i].addr));
270
0a8a69dd
RR
271 while (vq->vring.desc[i].flags & VRING_DESC_F_NEXT) {
272 i = vq->vring.desc[i].next;
273 vq->num_free++;
274 }
275
276 vq->vring.desc[i].next = vq->free_head;
277 vq->free_head = head;
278 /* Plus final descriptor */
279 vq->num_free++;
280}
281
0a8a69dd
RR
282static inline bool more_used(const struct vring_virtqueue *vq)
283{
284 return vq->last_used_idx != vq->vring.used->idx;
285}
286
287static void *vring_get_buf(struct virtqueue *_vq, unsigned int *len)
288{
289 struct vring_virtqueue *vq = to_vvq(_vq);
290 void *ret;
291 unsigned int i;
292
293 START_USE(vq);
294
5ef82752
RR
295 if (unlikely(vq->broken)) {
296 END_USE(vq);
297 return NULL;
298 }
299
0a8a69dd
RR
300 if (!more_used(vq)) {
301 pr_debug("No more buffers in queue\n");
302 END_USE(vq);
303 return NULL;
304 }
305
2d61ba95 306 /* Only get used array entries after they have been exposed by host. */
d57ed95d 307 virtio_rmb();
2d61ba95 308
0a8a69dd
RR
309 i = vq->vring.used->ring[vq->last_used_idx%vq->vring.num].id;
310 *len = vq->vring.used->ring[vq->last_used_idx%vq->vring.num].len;
311
312 if (unlikely(i >= vq->vring.num)) {
313 BAD_RING(vq, "id %u out of range\n", i);
314 return NULL;
315 }
316 if (unlikely(!vq->data[i])) {
317 BAD_RING(vq, "id %u is not a head!\n", i);
318 return NULL;
319 }
320
321 /* detach_buf clears data, so grab it now. */
322 ret = vq->data[i];
323 detach_buf(vq, i);
324 vq->last_used_idx++;
325 END_USE(vq);
326 return ret;
327}
328
18445c4d
RR
329static void vring_disable_cb(struct virtqueue *_vq)
330{
331 struct vring_virtqueue *vq = to_vvq(_vq);
332
18445c4d 333 vq->vring.avail->flags |= VRING_AVAIL_F_NO_INTERRUPT;
18445c4d
RR
334}
335
336static bool vring_enable_cb(struct virtqueue *_vq)
0a8a69dd
RR
337{
338 struct vring_virtqueue *vq = to_vvq(_vq);
339
340 START_USE(vq);
0a8a69dd
RR
341
342 /* We optimistically turn back on interrupts, then check if there was
343 * more to do. */
344 vq->vring.avail->flags &= ~VRING_AVAIL_F_NO_INTERRUPT;
d57ed95d 345 virtio_mb();
0a8a69dd 346 if (unlikely(more_used(vq))) {
0a8a69dd
RR
347 END_USE(vq);
348 return false;
349 }
350
351 END_USE(vq);
352 return true;
353}
354
c021eac4
SM
355static void *vring_detach_unused_buf(struct virtqueue *_vq)
356{
357 struct vring_virtqueue *vq = to_vvq(_vq);
358 unsigned int i;
359 void *buf;
360
361 START_USE(vq);
362
363 for (i = 0; i < vq->vring.num; i++) {
364 if (!vq->data[i])
365 continue;
366 /* detach_buf clears data, so grab it now. */
367 buf = vq->data[i];
368 detach_buf(vq, i);
369 END_USE(vq);
370 return buf;
371 }
372 /* That should have freed everything. */
373 BUG_ON(vq->num_free != vq->vring.num);
374
375 END_USE(vq);
376 return NULL;
377}
378
0a8a69dd
RR
379irqreturn_t vring_interrupt(int irq, void *_vq)
380{
381 struct vring_virtqueue *vq = to_vvq(_vq);
382
383 if (!more_used(vq)) {
384 pr_debug("virtqueue interrupt with no work for %p\n", vq);
385 return IRQ_NONE;
386 }
387
388 if (unlikely(vq->broken))
389 return IRQ_HANDLED;
390
391 pr_debug("virtqueue callback for %p (%p)\n", vq, vq->vq.callback);
18445c4d
RR
392 if (vq->vq.callback)
393 vq->vq.callback(&vq->vq);
0a8a69dd
RR
394
395 return IRQ_HANDLED;
396}
c6fd4701 397EXPORT_SYMBOL_GPL(vring_interrupt);
0a8a69dd
RR
398
399static struct virtqueue_ops vring_vq_ops = {
400 .add_buf = vring_add_buf,
401 .get_buf = vring_get_buf,
402 .kick = vring_kick,
18445c4d
RR
403 .disable_cb = vring_disable_cb,
404 .enable_cb = vring_enable_cb,
c021eac4 405 .detach_unused_buf = vring_detach_unused_buf,
0a8a69dd
RR
406};
407
408struct virtqueue *vring_new_virtqueue(unsigned int num,
87c7d57c 409 unsigned int vring_align,
0a8a69dd
RR
410 struct virtio_device *vdev,
411 void *pages,
412 void (*notify)(struct virtqueue *),
9499f5e7
RR
413 void (*callback)(struct virtqueue *),
414 const char *name)
0a8a69dd
RR
415{
416 struct vring_virtqueue *vq;
417 unsigned int i;
418
42b36cc0
RR
419 /* We assume num is a power of 2. */
420 if (num & (num - 1)) {
421 dev_warn(&vdev->dev, "Bad virtqueue length %u\n", num);
422 return NULL;
423 }
424
0a8a69dd
RR
425 vq = kmalloc(sizeof(*vq) + sizeof(void *)*num, GFP_KERNEL);
426 if (!vq)
427 return NULL;
428
87c7d57c 429 vring_init(&vq->vring, num, pages, vring_align);
0a8a69dd
RR
430 vq->vq.callback = callback;
431 vq->vq.vdev = vdev;
432 vq->vq.vq_ops = &vring_vq_ops;
9499f5e7 433 vq->vq.name = name;
0a8a69dd
RR
434 vq->notify = notify;
435 vq->broken = false;
436 vq->last_used_idx = 0;
437 vq->num_added = 0;
9499f5e7 438 list_add_tail(&vq->vq.list, &vdev->vqs);
0a8a69dd
RR
439#ifdef DEBUG
440 vq->in_use = false;
441#endif
442
9fa29b9d
MM
443 vq->indirect = virtio_has_feature(vdev, VIRTIO_RING_F_INDIRECT_DESC);
444
0a8a69dd
RR
445 /* No callback? Tell other side not to bother us. */
446 if (!callback)
447 vq->vring.avail->flags |= VRING_AVAIL_F_NO_INTERRUPT;
448
449 /* Put everything in free lists. */
450 vq->num_free = num;
451 vq->free_head = 0;
3b870624 452 for (i = 0; i < num-1; i++) {
0a8a69dd 453 vq->vring.desc[i].next = i+1;
3b870624
AS
454 vq->data[i] = NULL;
455 }
456 vq->data[i] = NULL;
0a8a69dd
RR
457
458 return &vq->vq;
459}
c6fd4701 460EXPORT_SYMBOL_GPL(vring_new_virtqueue);
0a8a69dd
RR
461
462void vring_del_virtqueue(struct virtqueue *vq)
463{
9499f5e7 464 list_del(&vq->list);
0a8a69dd
RR
465 kfree(to_vvq(vq));
466}
c6fd4701 467EXPORT_SYMBOL_GPL(vring_del_virtqueue);
0a8a69dd 468
e34f8725
RR
469/* Manipulates transport-specific feature bits. */
470void vring_transport_features(struct virtio_device *vdev)
471{
472 unsigned int i;
473
474 for (i = VIRTIO_TRANSPORT_F_START; i < VIRTIO_TRANSPORT_F_END; i++) {
475 switch (i) {
9fa29b9d
MM
476 case VIRTIO_RING_F_INDIRECT_DESC:
477 break;
e34f8725
RR
478 default:
479 /* We don't understand this bit. */
480 clear_bit(i, vdev->features);
481 }
482 }
483}
484EXPORT_SYMBOL_GPL(vring_transport_features);
485
c6fd4701 486MODULE_LICENSE("GPL");