]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blob - drivers/virtio/virtio_ring.c
5e1b548828e60745ba87581d2b9bcb6380b092f8
[mirror_ubuntu-bionic-kernel.git] / drivers / virtio / virtio_ring.c
1 /* Virtio ring implementation.
2 *
3 * Copyright 2007 Rusty Russell IBM Corporation
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation; either version 2 of the License, or
8 * (at your option) any later version.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
18 */
19 #include <linux/virtio.h>
20 #include <linux/virtio_ring.h>
21 #include <linux/virtio_config.h>
22 #include <linux/device.h>
23 #include <linux/slab.h>
24 #include <linux/module.h>
25 #include <linux/hrtimer.h>
26 #include <linux/kmemleak.h>
27 #include <linux/dma-mapping.h>
28 #include <xen/xen.h>
29
30 #ifdef DEBUG
31 /* For development, we want to crash whenever the ring is screwed. */
32 #define BAD_RING(_vq, fmt, args...) \
33 do { \
34 dev_err(&(_vq)->vq.vdev->dev, \
35 "%s:"fmt, (_vq)->vq.name, ##args); \
36 BUG(); \
37 } while (0)
38 /* Caller is supposed to guarantee no reentry. */
39 #define START_USE(_vq) \
40 do { \
41 if ((_vq)->in_use) \
42 panic("%s:in_use = %i\n", \
43 (_vq)->vq.name, (_vq)->in_use); \
44 (_vq)->in_use = __LINE__; \
45 } while (0)
46 #define END_USE(_vq) \
47 do { BUG_ON(!(_vq)->in_use); (_vq)->in_use = 0; } while(0)
48 #else
49 #define BAD_RING(_vq, fmt, args...) \
50 do { \
51 dev_err(&_vq->vq.vdev->dev, \
52 "%s:"fmt, (_vq)->vq.name, ##args); \
53 (_vq)->broken = true; \
54 } while (0)
55 #define START_USE(vq)
56 #define END_USE(vq)
57 #endif
58
59 struct vring_desc_state {
60 void *data; /* Data for callback. */
61 struct vring_desc *indir_desc; /* Indirect descriptor, if any. */
62 };
63
64 struct vring_virtqueue {
65 struct virtqueue vq;
66
67 /* Actual memory layout for this queue */
68 struct vring vring;
69
70 /* Can we use weak barriers? */
71 bool weak_barriers;
72
73 /* Other side has made a mess, don't try any more. */
74 bool broken;
75
76 /* Host supports indirect buffers */
77 bool indirect;
78
79 /* Host publishes avail event idx */
80 bool event;
81
82 /* Head of free buffer list. */
83 unsigned int free_head;
84 /* Number we've added since last sync. */
85 unsigned int num_added;
86
87 /* Last used index we've seen. */
88 u16 last_used_idx;
89
90 /* Last written value to avail->flags */
91 u16 avail_flags_shadow;
92
93 /* Last written value to avail->idx in guest byte order */
94 u16 avail_idx_shadow;
95
96 /* How to notify other side. FIXME: commonalize hcalls! */
97 bool (*notify)(struct virtqueue *vq);
98
99 /* DMA, allocation, and size information */
100 bool we_own_ring;
101 size_t queue_size_in_bytes;
102 dma_addr_t queue_dma_addr;
103
104 #ifdef DEBUG
105 /* They're supposed to lock for us. */
106 unsigned int in_use;
107
108 /* Figure out if their kicks are too delayed. */
109 bool last_add_time_valid;
110 ktime_t last_add_time;
111 #endif
112
113 /* Per-descriptor state. */
114 struct vring_desc_state desc_state[];
115 };
116
117 #define to_vvq(_vq) container_of(_vq, struct vring_virtqueue, vq)
118
119 /*
120 * Modern virtio devices have feature bits to specify whether they need a
121 * quirk and bypass the IOMMU. If not there, just use the DMA API.
122 *
123 * If there, the interaction between virtio and DMA API is messy.
124 *
125 * On most systems with virtio, physical addresses match bus addresses,
126 * and it doesn't particularly matter whether we use the DMA API.
127 *
128 * On some systems, including Xen and any system with a physical device
129 * that speaks virtio behind a physical IOMMU, we must use the DMA API
130 * for virtio DMA to work at all.
131 *
132 * On other systems, including SPARC and PPC64, virtio-pci devices are
133 * enumerated as though they are behind an IOMMU, but the virtio host
134 * ignores the IOMMU, so we must either pretend that the IOMMU isn't
135 * there or somehow map everything as the identity.
136 *
137 * For the time being, we preserve historic behavior and bypass the DMA
138 * API.
139 *
140 * TODO: install a per-device DMA ops structure that does the right thing
141 * taking into account all the above quirks, and use the DMA API
142 * unconditionally on data path.
143 */
144
145 static bool vring_use_dma_api(struct virtio_device *vdev)
146 {
147 if (!virtio_has_iommu_quirk(vdev))
148 return true;
149
150 /* Otherwise, we are left to guess. */
151 /*
152 * In theory, it's possible to have a buggy QEMU-supposed
153 * emulated Q35 IOMMU and Xen enabled at the same time. On
154 * such a configuration, virtio has never worked and will
155 * not work without an even larger kludge. Instead, enable
156 * the DMA API if we're a Xen guest, which at least allows
157 * all of the sensible Xen configurations to work correctly.
158 */
159 if (xen_domain())
160 return true;
161
162 return false;
163 }
164
165 /*
166 * The DMA ops on various arches are rather gnarly right now, and
167 * making all of the arch DMA ops work on the vring device itself
168 * is a mess. For now, we use the parent device for DMA ops.
169 */
170 static inline struct device *vring_dma_dev(const struct vring_virtqueue *vq)
171 {
172 return vq->vq.vdev->dev.parent;
173 }
174
175 /* Map one sg entry. */
176 static dma_addr_t vring_map_one_sg(const struct vring_virtqueue *vq,
177 struct scatterlist *sg,
178 enum dma_data_direction direction)
179 {
180 if (!vring_use_dma_api(vq->vq.vdev))
181 return (dma_addr_t)sg_phys(sg);
182
183 /*
184 * We can't use dma_map_sg, because we don't use scatterlists in
185 * the way it expects (we don't guarantee that the scatterlist
186 * will exist for the lifetime of the mapping).
187 */
188 return dma_map_page(vring_dma_dev(vq),
189 sg_page(sg), sg->offset, sg->length,
190 direction);
191 }
192
193 static dma_addr_t vring_map_single(const struct vring_virtqueue *vq,
194 void *cpu_addr, size_t size,
195 enum dma_data_direction direction)
196 {
197 if (!vring_use_dma_api(vq->vq.vdev))
198 return (dma_addr_t)virt_to_phys(cpu_addr);
199
200 return dma_map_single(vring_dma_dev(vq),
201 cpu_addr, size, direction);
202 }
203
204 static void vring_unmap_one(const struct vring_virtqueue *vq,
205 struct vring_desc *desc)
206 {
207 u16 flags;
208
209 if (!vring_use_dma_api(vq->vq.vdev))
210 return;
211
212 flags = virtio16_to_cpu(vq->vq.vdev, desc->flags);
213
214 if (flags & VRING_DESC_F_INDIRECT) {
215 dma_unmap_single(vring_dma_dev(vq),
216 virtio64_to_cpu(vq->vq.vdev, desc->addr),
217 virtio32_to_cpu(vq->vq.vdev, desc->len),
218 (flags & VRING_DESC_F_WRITE) ?
219 DMA_FROM_DEVICE : DMA_TO_DEVICE);
220 } else {
221 dma_unmap_page(vring_dma_dev(vq),
222 virtio64_to_cpu(vq->vq.vdev, desc->addr),
223 virtio32_to_cpu(vq->vq.vdev, desc->len),
224 (flags & VRING_DESC_F_WRITE) ?
225 DMA_FROM_DEVICE : DMA_TO_DEVICE);
226 }
227 }
228
229 static int vring_mapping_error(const struct vring_virtqueue *vq,
230 dma_addr_t addr)
231 {
232 if (!vring_use_dma_api(vq->vq.vdev))
233 return 0;
234
235 return dma_mapping_error(vring_dma_dev(vq), addr);
236 }
237
238 static struct vring_desc *alloc_indirect(struct virtqueue *_vq,
239 unsigned int total_sg, gfp_t gfp)
240 {
241 struct vring_desc *desc;
242 unsigned int i;
243
244 /*
245 * We require lowmem mappings for the descriptors because
246 * otherwise virt_to_phys will give us bogus addresses in the
247 * virtqueue.
248 */
249 gfp &= ~__GFP_HIGHMEM;
250
251 desc = kmalloc(total_sg * sizeof(struct vring_desc), gfp);
252 if (!desc)
253 return NULL;
254
255 for (i = 0; i < total_sg; i++)
256 desc[i].next = cpu_to_virtio16(_vq->vdev, i + 1);
257 return desc;
258 }
259
260 static inline int virtqueue_add(struct virtqueue *_vq,
261 struct scatterlist *sgs[],
262 unsigned int total_sg,
263 unsigned int out_sgs,
264 unsigned int in_sgs,
265 void *data,
266 void *ctx,
267 gfp_t gfp)
268 {
269 struct vring_virtqueue *vq = to_vvq(_vq);
270 struct scatterlist *sg;
271 struct vring_desc *desc;
272 unsigned int i, n, avail, descs_used, uninitialized_var(prev), err_idx;
273 int head;
274 bool indirect;
275
276 START_USE(vq);
277
278 BUG_ON(data == NULL);
279 BUG_ON(ctx && vq->indirect);
280
281 if (unlikely(vq->broken)) {
282 END_USE(vq);
283 return -EIO;
284 }
285
286 #ifdef DEBUG
287 {
288 ktime_t now = ktime_get();
289
290 /* No kick or get, with .1 second between? Warn. */
291 if (vq->last_add_time_valid)
292 WARN_ON(ktime_to_ms(ktime_sub(now, vq->last_add_time))
293 > 100);
294 vq->last_add_time = now;
295 vq->last_add_time_valid = true;
296 }
297 #endif
298
299 BUG_ON(total_sg > vq->vring.num);
300 BUG_ON(total_sg == 0);
301
302 head = vq->free_head;
303
304 /* If the host supports indirect descriptor tables, and we have multiple
305 * buffers, then go indirect. FIXME: tune this threshold */
306 if (vq->indirect && total_sg > 1 && vq->vq.num_free)
307 desc = alloc_indirect(_vq, total_sg, gfp);
308 else
309 desc = NULL;
310
311 if (desc) {
312 /* Use a single buffer which doesn't continue */
313 indirect = true;
314 /* Set up rest to use this indirect table. */
315 i = 0;
316 descs_used = 1;
317 } else {
318 indirect = false;
319 desc = vq->vring.desc;
320 i = head;
321 descs_used = total_sg;
322 }
323
324 if (vq->vq.num_free < descs_used) {
325 pr_debug("Can't add buf len %i - avail = %i\n",
326 descs_used, vq->vq.num_free);
327 /* FIXME: for historical reasons, we force a notify here if
328 * there are outgoing parts to the buffer. Presumably the
329 * host should service the ring ASAP. */
330 if (out_sgs)
331 vq->notify(&vq->vq);
332 if (indirect)
333 kfree(desc);
334 END_USE(vq);
335 return -ENOSPC;
336 }
337
338 for (n = 0; n < out_sgs; n++) {
339 for (sg = sgs[n]; sg; sg = sg_next(sg)) {
340 dma_addr_t addr = vring_map_one_sg(vq, sg, DMA_TO_DEVICE);
341 if (vring_mapping_error(vq, addr))
342 goto unmap_release;
343
344 desc[i].flags = cpu_to_virtio16(_vq->vdev, VRING_DESC_F_NEXT);
345 desc[i].addr = cpu_to_virtio64(_vq->vdev, addr);
346 desc[i].len = cpu_to_virtio32(_vq->vdev, sg->length);
347 prev = i;
348 i = virtio16_to_cpu(_vq->vdev, desc[i].next);
349 }
350 }
351 for (; n < (out_sgs + in_sgs); n++) {
352 for (sg = sgs[n]; sg; sg = sg_next(sg)) {
353 dma_addr_t addr = vring_map_one_sg(vq, sg, DMA_FROM_DEVICE);
354 if (vring_mapping_error(vq, addr))
355 goto unmap_release;
356
357 desc[i].flags = cpu_to_virtio16(_vq->vdev, VRING_DESC_F_NEXT | VRING_DESC_F_WRITE);
358 desc[i].addr = cpu_to_virtio64(_vq->vdev, addr);
359 desc[i].len = cpu_to_virtio32(_vq->vdev, sg->length);
360 prev = i;
361 i = virtio16_to_cpu(_vq->vdev, desc[i].next);
362 }
363 }
364 /* Last one doesn't continue. */
365 desc[prev].flags &= cpu_to_virtio16(_vq->vdev, ~VRING_DESC_F_NEXT);
366
367 if (indirect) {
368 /* Now that the indirect table is filled in, map it. */
369 dma_addr_t addr = vring_map_single(
370 vq, desc, total_sg * sizeof(struct vring_desc),
371 DMA_TO_DEVICE);
372 if (vring_mapping_error(vq, addr))
373 goto unmap_release;
374
375 vq->vring.desc[head].flags = cpu_to_virtio16(_vq->vdev, VRING_DESC_F_INDIRECT);
376 vq->vring.desc[head].addr = cpu_to_virtio64(_vq->vdev, addr);
377
378 vq->vring.desc[head].len = cpu_to_virtio32(_vq->vdev, total_sg * sizeof(struct vring_desc));
379 }
380
381 /* We're using some buffers from the free list. */
382 vq->vq.num_free -= descs_used;
383
384 /* Update free pointer */
385 if (indirect)
386 vq->free_head = virtio16_to_cpu(_vq->vdev, vq->vring.desc[head].next);
387 else
388 vq->free_head = i;
389
390 /* Store token and indirect buffer state. */
391 vq->desc_state[head].data = data;
392 if (indirect)
393 vq->desc_state[head].indir_desc = desc;
394 if (ctx)
395 vq->desc_state[head].indir_desc = ctx;
396
397 /* Put entry in available array (but don't update avail->idx until they
398 * do sync). */
399 avail = vq->avail_idx_shadow & (vq->vring.num - 1);
400 vq->vring.avail->ring[avail] = cpu_to_virtio16(_vq->vdev, head);
401
402 /* Descriptors and available array need to be set before we expose the
403 * new available array entries. */
404 virtio_wmb(vq->weak_barriers);
405 vq->avail_idx_shadow++;
406 vq->vring.avail->idx = cpu_to_virtio16(_vq->vdev, vq->avail_idx_shadow);
407 vq->num_added++;
408
409 pr_debug("Added buffer head %i to %p\n", head, vq);
410 END_USE(vq);
411
412 /* This is very unlikely, but theoretically possible. Kick
413 * just in case. */
414 if (unlikely(vq->num_added == (1 << 16) - 1))
415 virtqueue_kick(_vq);
416
417 return 0;
418
419 unmap_release:
420 err_idx = i;
421 i = head;
422
423 for (n = 0; n < total_sg; n++) {
424 if (i == err_idx)
425 break;
426 vring_unmap_one(vq, &desc[i]);
427 i = virtio16_to_cpu(_vq->vdev, vq->vring.desc[i].next);
428 }
429
430 vq->vq.num_free += total_sg;
431
432 if (indirect)
433 kfree(desc);
434
435 END_USE(vq);
436 return -EIO;
437 }
438
439 /**
440 * virtqueue_add_sgs - expose buffers to other end
441 * @vq: the struct virtqueue we're talking about.
442 * @sgs: array of terminated scatterlists.
443 * @out_num: the number of scatterlists readable by other side
444 * @in_num: the number of scatterlists which are writable (after readable ones)
445 * @data: the token identifying the buffer.
446 * @gfp: how to do memory allocations (if necessary).
447 *
448 * Caller must ensure we don't call this with other virtqueue operations
449 * at the same time (except where noted).
450 *
451 * Returns zero or a negative error (ie. ENOSPC, ENOMEM, EIO).
452 */
453 int virtqueue_add_sgs(struct virtqueue *_vq,
454 struct scatterlist *sgs[],
455 unsigned int out_sgs,
456 unsigned int in_sgs,
457 void *data,
458 gfp_t gfp)
459 {
460 unsigned int i, total_sg = 0;
461
462 /* Count them first. */
463 for (i = 0; i < out_sgs + in_sgs; i++) {
464 struct scatterlist *sg;
465 for (sg = sgs[i]; sg; sg = sg_next(sg))
466 total_sg++;
467 }
468 return virtqueue_add(_vq, sgs, total_sg, out_sgs, in_sgs,
469 data, NULL, gfp);
470 }
471 EXPORT_SYMBOL_GPL(virtqueue_add_sgs);
472
473 /**
474 * virtqueue_add_outbuf - expose output buffers to other end
475 * @vq: the struct virtqueue we're talking about.
476 * @sg: scatterlist (must be well-formed and terminated!)
477 * @num: the number of entries in @sg readable by other side
478 * @data: the token identifying the buffer.
479 * @gfp: how to do memory allocations (if necessary).
480 *
481 * Caller must ensure we don't call this with other virtqueue operations
482 * at the same time (except where noted).
483 *
484 * Returns zero or a negative error (ie. ENOSPC, ENOMEM, EIO).
485 */
486 int virtqueue_add_outbuf(struct virtqueue *vq,
487 struct scatterlist *sg, unsigned int num,
488 void *data,
489 gfp_t gfp)
490 {
491 return virtqueue_add(vq, &sg, num, 1, 0, data, NULL, gfp);
492 }
493 EXPORT_SYMBOL_GPL(virtqueue_add_outbuf);
494
495 /**
496 * virtqueue_add_inbuf - expose input buffers to other end
497 * @vq: the struct virtqueue we're talking about.
498 * @sg: scatterlist (must be well-formed and terminated!)
499 * @num: the number of entries in @sg writable by other side
500 * @data: the token identifying the buffer.
501 * @gfp: how to do memory allocations (if necessary).
502 *
503 * Caller must ensure we don't call this with other virtqueue operations
504 * at the same time (except where noted).
505 *
506 * Returns zero or a negative error (ie. ENOSPC, ENOMEM, EIO).
507 */
508 int virtqueue_add_inbuf(struct virtqueue *vq,
509 struct scatterlist *sg, unsigned int num,
510 void *data,
511 gfp_t gfp)
512 {
513 return virtqueue_add(vq, &sg, num, 0, 1, data, NULL, gfp);
514 }
515 EXPORT_SYMBOL_GPL(virtqueue_add_inbuf);
516
517 /**
518 * virtqueue_add_inbuf_ctx - expose input buffers to other end
519 * @vq: the struct virtqueue we're talking about.
520 * @sg: scatterlist (must be well-formed and terminated!)
521 * @num: the number of entries in @sg writable by other side
522 * @data: the token identifying the buffer.
523 * @ctx: extra context for the token
524 * @gfp: how to do memory allocations (if necessary).
525 *
526 * Caller must ensure we don't call this with other virtqueue operations
527 * at the same time (except where noted).
528 *
529 * Returns zero or a negative error (ie. ENOSPC, ENOMEM, EIO).
530 */
531 int virtqueue_add_inbuf_ctx(struct virtqueue *vq,
532 struct scatterlist *sg, unsigned int num,
533 void *data,
534 void *ctx,
535 gfp_t gfp)
536 {
537 return virtqueue_add(vq, &sg, num, 0, 1, data, ctx, gfp);
538 }
539 EXPORT_SYMBOL_GPL(virtqueue_add_inbuf_ctx);
540
541 /**
542 * virtqueue_kick_prepare - first half of split virtqueue_kick call.
543 * @vq: the struct virtqueue
544 *
545 * Instead of virtqueue_kick(), you can do:
546 * if (virtqueue_kick_prepare(vq))
547 * virtqueue_notify(vq);
548 *
549 * This is sometimes useful because the virtqueue_kick_prepare() needs
550 * to be serialized, but the actual virtqueue_notify() call does not.
551 */
552 bool virtqueue_kick_prepare(struct virtqueue *_vq)
553 {
554 struct vring_virtqueue *vq = to_vvq(_vq);
555 u16 new, old;
556 bool needs_kick;
557
558 START_USE(vq);
559 /* We need to expose available array entries before checking avail
560 * event. */
561 virtio_mb(vq->weak_barriers);
562
563 old = vq->avail_idx_shadow - vq->num_added;
564 new = vq->avail_idx_shadow;
565 vq->num_added = 0;
566
567 #ifdef DEBUG
568 if (vq->last_add_time_valid) {
569 WARN_ON(ktime_to_ms(ktime_sub(ktime_get(),
570 vq->last_add_time)) > 100);
571 }
572 vq->last_add_time_valid = false;
573 #endif
574
575 if (vq->event) {
576 needs_kick = vring_need_event(virtio16_to_cpu(_vq->vdev, vring_avail_event(&vq->vring)),
577 new, old);
578 } else {
579 needs_kick = !(vq->vring.used->flags & cpu_to_virtio16(_vq->vdev, VRING_USED_F_NO_NOTIFY));
580 }
581 END_USE(vq);
582 return needs_kick;
583 }
584 EXPORT_SYMBOL_GPL(virtqueue_kick_prepare);
585
586 /**
587 * virtqueue_notify - second half of split virtqueue_kick call.
588 * @vq: the struct virtqueue
589 *
590 * This does not need to be serialized.
591 *
592 * Returns false if host notify failed or queue is broken, otherwise true.
593 */
594 bool virtqueue_notify(struct virtqueue *_vq)
595 {
596 struct vring_virtqueue *vq = to_vvq(_vq);
597
598 if (unlikely(vq->broken))
599 return false;
600
601 /* Prod other side to tell it about changes. */
602 if (!vq->notify(_vq)) {
603 vq->broken = true;
604 return false;
605 }
606 return true;
607 }
608 EXPORT_SYMBOL_GPL(virtqueue_notify);
609
610 /**
611 * virtqueue_kick - update after add_buf
612 * @vq: the struct virtqueue
613 *
614 * After one or more virtqueue_add_* calls, invoke this to kick
615 * the other side.
616 *
617 * Caller must ensure we don't call this with other virtqueue
618 * operations at the same time (except where noted).
619 *
620 * Returns false if kick failed, otherwise true.
621 */
622 bool virtqueue_kick(struct virtqueue *vq)
623 {
624 if (virtqueue_kick_prepare(vq))
625 return virtqueue_notify(vq);
626 return true;
627 }
628 EXPORT_SYMBOL_GPL(virtqueue_kick);
629
630 static void detach_buf(struct vring_virtqueue *vq, unsigned int head,
631 void **ctx)
632 {
633 unsigned int i, j;
634 __virtio16 nextflag = cpu_to_virtio16(vq->vq.vdev, VRING_DESC_F_NEXT);
635
636 /* Clear data ptr. */
637 vq->desc_state[head].data = NULL;
638
639 /* Put back on free list: unmap first-level descriptors and find end */
640 i = head;
641
642 while (vq->vring.desc[i].flags & nextflag) {
643 vring_unmap_one(vq, &vq->vring.desc[i]);
644 i = virtio16_to_cpu(vq->vq.vdev, vq->vring.desc[i].next);
645 vq->vq.num_free++;
646 }
647
648 vring_unmap_one(vq, &vq->vring.desc[i]);
649 vq->vring.desc[i].next = cpu_to_virtio16(vq->vq.vdev, vq->free_head);
650 vq->free_head = head;
651
652 /* Plus final descriptor */
653 vq->vq.num_free++;
654
655 if (vq->indirect) {
656 struct vring_desc *indir_desc = vq->desc_state[head].indir_desc;
657 u32 len;
658
659 /* Free the indirect table, if any, now that it's unmapped. */
660 if (!indir_desc)
661 return;
662
663 len = virtio32_to_cpu(vq->vq.vdev, vq->vring.desc[head].len);
664
665 BUG_ON(!(vq->vring.desc[head].flags &
666 cpu_to_virtio16(vq->vq.vdev, VRING_DESC_F_INDIRECT)));
667 BUG_ON(len == 0 || len % sizeof(struct vring_desc));
668
669 for (j = 0; j < len / sizeof(struct vring_desc); j++)
670 vring_unmap_one(vq, &indir_desc[j]);
671
672 kfree(indir_desc);
673 vq->desc_state[head].indir_desc = NULL;
674 } else if (ctx) {
675 *ctx = vq->desc_state[head].indir_desc;
676 }
677 }
678
679 static inline bool more_used(const struct vring_virtqueue *vq)
680 {
681 return vq->last_used_idx != virtio16_to_cpu(vq->vq.vdev, vq->vring.used->idx);
682 }
683
684 /**
685 * virtqueue_get_buf - get the next used buffer
686 * @vq: the struct virtqueue we're talking about.
687 * @len: the length written into the buffer
688 *
689 * If the device wrote data into the buffer, @len will be set to the
690 * amount written. This means you don't need to clear the buffer
691 * beforehand to ensure there's no data leakage in the case of short
692 * writes.
693 *
694 * Caller must ensure we don't call this with other virtqueue
695 * operations at the same time (except where noted).
696 *
697 * Returns NULL if there are no used buffers, or the "data" token
698 * handed to virtqueue_add_*().
699 */
700 void *virtqueue_get_buf_ctx(struct virtqueue *_vq, unsigned int *len,
701 void **ctx)
702 {
703 struct vring_virtqueue *vq = to_vvq(_vq);
704 void *ret;
705 unsigned int i;
706 u16 last_used;
707
708 START_USE(vq);
709
710 if (unlikely(vq->broken)) {
711 END_USE(vq);
712 return NULL;
713 }
714
715 if (!more_used(vq)) {
716 pr_debug("No more buffers in queue\n");
717 END_USE(vq);
718 return NULL;
719 }
720
721 /* Only get used array entries after they have been exposed by host. */
722 virtio_rmb(vq->weak_barriers);
723
724 last_used = (vq->last_used_idx & (vq->vring.num - 1));
725 i = virtio32_to_cpu(_vq->vdev, vq->vring.used->ring[last_used].id);
726 *len = virtio32_to_cpu(_vq->vdev, vq->vring.used->ring[last_used].len);
727
728 if (unlikely(i >= vq->vring.num)) {
729 BAD_RING(vq, "id %u out of range\n", i);
730 return NULL;
731 }
732 if (unlikely(!vq->desc_state[i].data)) {
733 BAD_RING(vq, "id %u is not a head!\n", i);
734 return NULL;
735 }
736
737 /* detach_buf clears data, so grab it now. */
738 ret = vq->desc_state[i].data;
739 detach_buf(vq, i, ctx);
740 vq->last_used_idx++;
741 /* If we expect an interrupt for the next entry, tell host
742 * by writing event index and flush out the write before
743 * the read in the next get_buf call. */
744 if (!(vq->avail_flags_shadow & VRING_AVAIL_F_NO_INTERRUPT))
745 virtio_store_mb(vq->weak_barriers,
746 &vring_used_event(&vq->vring),
747 cpu_to_virtio16(_vq->vdev, vq->last_used_idx));
748
749 #ifdef DEBUG
750 vq->last_add_time_valid = false;
751 #endif
752
753 END_USE(vq);
754 return ret;
755 }
756 EXPORT_SYMBOL_GPL(virtqueue_get_buf_ctx);
757
758 void *virtqueue_get_buf(struct virtqueue *_vq, unsigned int *len)
759 {
760 return virtqueue_get_buf_ctx(_vq, len, NULL);
761 }
762 EXPORT_SYMBOL_GPL(virtqueue_get_buf);
763 /**
764 * virtqueue_disable_cb - disable callbacks
765 * @vq: the struct virtqueue we're talking about.
766 *
767 * Note that this is not necessarily synchronous, hence unreliable and only
768 * useful as an optimization.
769 *
770 * Unlike other operations, this need not be serialized.
771 */
772 void virtqueue_disable_cb(struct virtqueue *_vq)
773 {
774 struct vring_virtqueue *vq = to_vvq(_vq);
775
776 if (!(vq->avail_flags_shadow & VRING_AVAIL_F_NO_INTERRUPT)) {
777 vq->avail_flags_shadow |= VRING_AVAIL_F_NO_INTERRUPT;
778 if (!vq->event)
779 vq->vring.avail->flags = cpu_to_virtio16(_vq->vdev, vq->avail_flags_shadow);
780 }
781
782 }
783 EXPORT_SYMBOL_GPL(virtqueue_disable_cb);
784
785 /**
786 * virtqueue_enable_cb_prepare - restart callbacks after disable_cb
787 * @vq: the struct virtqueue we're talking about.
788 *
789 * This re-enables callbacks; it returns current queue state
790 * in an opaque unsigned value. This value should be later tested by
791 * virtqueue_poll, to detect a possible race between the driver checking for
792 * more work, and enabling callbacks.
793 *
794 * Caller must ensure we don't call this with other virtqueue
795 * operations at the same time (except where noted).
796 */
797 unsigned virtqueue_enable_cb_prepare(struct virtqueue *_vq)
798 {
799 struct vring_virtqueue *vq = to_vvq(_vq);
800 u16 last_used_idx;
801
802 START_USE(vq);
803
804 /* We optimistically turn back on interrupts, then check if there was
805 * more to do. */
806 /* Depending on the VIRTIO_RING_F_EVENT_IDX feature, we need to
807 * either clear the flags bit or point the event index at the next
808 * entry. Always do both to keep code simple. */
809 if (vq->avail_flags_shadow & VRING_AVAIL_F_NO_INTERRUPT) {
810 vq->avail_flags_shadow &= ~VRING_AVAIL_F_NO_INTERRUPT;
811 if (!vq->event)
812 vq->vring.avail->flags = cpu_to_virtio16(_vq->vdev, vq->avail_flags_shadow);
813 }
814 vring_used_event(&vq->vring) = cpu_to_virtio16(_vq->vdev, last_used_idx = vq->last_used_idx);
815 END_USE(vq);
816 return last_used_idx;
817 }
818 EXPORT_SYMBOL_GPL(virtqueue_enable_cb_prepare);
819
820 /**
821 * virtqueue_poll - query pending used buffers
822 * @vq: the struct virtqueue we're talking about.
823 * @last_used_idx: virtqueue state (from call to virtqueue_enable_cb_prepare).
824 *
825 * Returns "true" if there are pending used buffers in the queue.
826 *
827 * This does not need to be serialized.
828 */
829 bool virtqueue_poll(struct virtqueue *_vq, unsigned last_used_idx)
830 {
831 struct vring_virtqueue *vq = to_vvq(_vq);
832
833 virtio_mb(vq->weak_barriers);
834 return (u16)last_used_idx != virtio16_to_cpu(_vq->vdev, vq->vring.used->idx);
835 }
836 EXPORT_SYMBOL_GPL(virtqueue_poll);
837
838 /**
839 * virtqueue_enable_cb - restart callbacks after disable_cb.
840 * @vq: the struct virtqueue we're talking about.
841 *
842 * This re-enables callbacks; it returns "false" if there are pending
843 * buffers in the queue, to detect a possible race between the driver
844 * checking for more work, and enabling callbacks.
845 *
846 * Caller must ensure we don't call this with other virtqueue
847 * operations at the same time (except where noted).
848 */
849 bool virtqueue_enable_cb(struct virtqueue *_vq)
850 {
851 unsigned last_used_idx = virtqueue_enable_cb_prepare(_vq);
852 return !virtqueue_poll(_vq, last_used_idx);
853 }
854 EXPORT_SYMBOL_GPL(virtqueue_enable_cb);
855
856 /**
857 * virtqueue_enable_cb_delayed - restart callbacks after disable_cb.
858 * @vq: the struct virtqueue we're talking about.
859 *
860 * This re-enables callbacks but hints to the other side to delay
861 * interrupts until most of the available buffers have been processed;
862 * it returns "false" if there are many pending buffers in the queue,
863 * to detect a possible race between the driver checking for more work,
864 * and enabling callbacks.
865 *
866 * Caller must ensure we don't call this with other virtqueue
867 * operations at the same time (except where noted).
868 */
869 bool virtqueue_enable_cb_delayed(struct virtqueue *_vq)
870 {
871 struct vring_virtqueue *vq = to_vvq(_vq);
872 u16 bufs;
873
874 START_USE(vq);
875
876 /* We optimistically turn back on interrupts, then check if there was
877 * more to do. */
878 /* Depending on the VIRTIO_RING_F_USED_EVENT_IDX feature, we need to
879 * either clear the flags bit or point the event index at the next
880 * entry. Always update the event index to keep code simple. */
881 if (vq->avail_flags_shadow & VRING_AVAIL_F_NO_INTERRUPT) {
882 vq->avail_flags_shadow &= ~VRING_AVAIL_F_NO_INTERRUPT;
883 if (!vq->event)
884 vq->vring.avail->flags = cpu_to_virtio16(_vq->vdev, vq->avail_flags_shadow);
885 }
886 /* TODO: tune this threshold */
887 bufs = (u16)(vq->avail_idx_shadow - vq->last_used_idx) * 3 / 4;
888
889 virtio_store_mb(vq->weak_barriers,
890 &vring_used_event(&vq->vring),
891 cpu_to_virtio16(_vq->vdev, vq->last_used_idx + bufs));
892
893 if (unlikely((u16)(virtio16_to_cpu(_vq->vdev, vq->vring.used->idx) - vq->last_used_idx) > bufs)) {
894 END_USE(vq);
895 return false;
896 }
897
898 END_USE(vq);
899 return true;
900 }
901 EXPORT_SYMBOL_GPL(virtqueue_enable_cb_delayed);
902
903 /**
904 * virtqueue_detach_unused_buf - detach first unused buffer
905 * @vq: the struct virtqueue we're talking about.
906 *
907 * Returns NULL or the "data" token handed to virtqueue_add_*().
908 * This is not valid on an active queue; it is useful only for device
909 * shutdown.
910 */
911 void *virtqueue_detach_unused_buf(struct virtqueue *_vq)
912 {
913 struct vring_virtqueue *vq = to_vvq(_vq);
914 unsigned int i;
915 void *buf;
916
917 START_USE(vq);
918
919 for (i = 0; i < vq->vring.num; i++) {
920 if (!vq->desc_state[i].data)
921 continue;
922 /* detach_buf clears data, so grab it now. */
923 buf = vq->desc_state[i].data;
924 detach_buf(vq, i, NULL);
925 vq->avail_idx_shadow--;
926 vq->vring.avail->idx = cpu_to_virtio16(_vq->vdev, vq->avail_idx_shadow);
927 END_USE(vq);
928 return buf;
929 }
930 /* That should have freed everything. */
931 BUG_ON(vq->vq.num_free != vq->vring.num);
932
933 END_USE(vq);
934 return NULL;
935 }
936 EXPORT_SYMBOL_GPL(virtqueue_detach_unused_buf);
937
938 irqreturn_t vring_interrupt(int irq, void *_vq)
939 {
940 struct vring_virtqueue *vq = to_vvq(_vq);
941
942 if (!more_used(vq)) {
943 pr_debug("virtqueue interrupt with no work for %p\n", vq);
944 return IRQ_NONE;
945 }
946
947 if (unlikely(vq->broken))
948 return IRQ_HANDLED;
949
950 pr_debug("virtqueue callback for %p (%p)\n", vq, vq->vq.callback);
951 if (vq->vq.callback)
952 vq->vq.callback(&vq->vq);
953
954 return IRQ_HANDLED;
955 }
956 EXPORT_SYMBOL_GPL(vring_interrupt);
957
958 struct virtqueue *__vring_new_virtqueue(unsigned int index,
959 struct vring vring,
960 struct virtio_device *vdev,
961 bool weak_barriers,
962 bool context,
963 bool (*notify)(struct virtqueue *),
964 void (*callback)(struct virtqueue *),
965 const char *name)
966 {
967 unsigned int i;
968 struct vring_virtqueue *vq;
969
970 vq = kmalloc(sizeof(*vq) + vring.num * sizeof(struct vring_desc_state),
971 GFP_KERNEL);
972 if (!vq)
973 return NULL;
974
975 vq->vring = vring;
976 vq->vq.callback = callback;
977 vq->vq.vdev = vdev;
978 vq->vq.name = name;
979 vq->vq.num_free = vring.num;
980 vq->vq.index = index;
981 vq->we_own_ring = false;
982 vq->queue_dma_addr = 0;
983 vq->queue_size_in_bytes = 0;
984 vq->notify = notify;
985 vq->weak_barriers = weak_barriers;
986 vq->broken = false;
987 vq->last_used_idx = 0;
988 vq->avail_flags_shadow = 0;
989 vq->avail_idx_shadow = 0;
990 vq->num_added = 0;
991 list_add_tail(&vq->vq.list, &vdev->vqs);
992 #ifdef DEBUG
993 vq->in_use = false;
994 vq->last_add_time_valid = false;
995 #endif
996
997 vq->indirect = virtio_has_feature(vdev, VIRTIO_RING_F_INDIRECT_DESC) &&
998 !context;
999 vq->event = virtio_has_feature(vdev, VIRTIO_RING_F_EVENT_IDX);
1000
1001 /* No callback? Tell other side not to bother us. */
1002 if (!callback) {
1003 vq->avail_flags_shadow |= VRING_AVAIL_F_NO_INTERRUPT;
1004 if (!vq->event)
1005 vq->vring.avail->flags = cpu_to_virtio16(vdev, vq->avail_flags_shadow);
1006 }
1007
1008 /* Put everything in free lists. */
1009 vq->free_head = 0;
1010 for (i = 0; i < vring.num-1; i++)
1011 vq->vring.desc[i].next = cpu_to_virtio16(vdev, i + 1);
1012 memset(vq->desc_state, 0, vring.num * sizeof(struct vring_desc_state));
1013
1014 return &vq->vq;
1015 }
1016 EXPORT_SYMBOL_GPL(__vring_new_virtqueue);
1017
1018 static void *vring_alloc_queue(struct virtio_device *vdev, size_t size,
1019 dma_addr_t *dma_handle, gfp_t flag)
1020 {
1021 if (vring_use_dma_api(vdev)) {
1022 return dma_alloc_coherent(vdev->dev.parent, size,
1023 dma_handle, flag);
1024 } else {
1025 void *queue = alloc_pages_exact(PAGE_ALIGN(size), flag);
1026 if (queue) {
1027 phys_addr_t phys_addr = virt_to_phys(queue);
1028 *dma_handle = (dma_addr_t)phys_addr;
1029
1030 /*
1031 * Sanity check: make sure we dind't truncate
1032 * the address. The only arches I can find that
1033 * have 64-bit phys_addr_t but 32-bit dma_addr_t
1034 * are certain non-highmem MIPS and x86
1035 * configurations, but these configurations
1036 * should never allocate physical pages above 32
1037 * bits, so this is fine. Just in case, throw a
1038 * warning and abort if we end up with an
1039 * unrepresentable address.
1040 */
1041 if (WARN_ON_ONCE(*dma_handle != phys_addr)) {
1042 free_pages_exact(queue, PAGE_ALIGN(size));
1043 return NULL;
1044 }
1045 }
1046 return queue;
1047 }
1048 }
1049
1050 static void vring_free_queue(struct virtio_device *vdev, size_t size,
1051 void *queue, dma_addr_t dma_handle)
1052 {
1053 if (vring_use_dma_api(vdev)) {
1054 dma_free_coherent(vdev->dev.parent, size, queue, dma_handle);
1055 } else {
1056 free_pages_exact(queue, PAGE_ALIGN(size));
1057 }
1058 }
1059
1060 struct virtqueue *vring_create_virtqueue(
1061 unsigned int index,
1062 unsigned int num,
1063 unsigned int vring_align,
1064 struct virtio_device *vdev,
1065 bool weak_barriers,
1066 bool may_reduce_num,
1067 bool context,
1068 bool (*notify)(struct virtqueue *),
1069 void (*callback)(struct virtqueue *),
1070 const char *name)
1071 {
1072 struct virtqueue *vq;
1073 void *queue = NULL;
1074 dma_addr_t dma_addr;
1075 size_t queue_size_in_bytes;
1076 struct vring vring;
1077
1078 /* We assume num is a power of 2. */
1079 if (num & (num - 1)) {
1080 dev_warn(&vdev->dev, "Bad virtqueue length %u\n", num);
1081 return NULL;
1082 }
1083
1084 /* TODO: allocate each queue chunk individually */
1085 for (; num && vring_size(num, vring_align) > PAGE_SIZE; num /= 2) {
1086 queue = vring_alloc_queue(vdev, vring_size(num, vring_align),
1087 &dma_addr,
1088 GFP_KERNEL|__GFP_NOWARN|__GFP_ZERO);
1089 if (queue)
1090 break;
1091 }
1092
1093 if (!num)
1094 return NULL;
1095
1096 if (!queue) {
1097 /* Try to get a single page. You are my only hope! */
1098 queue = vring_alloc_queue(vdev, vring_size(num, vring_align),
1099 &dma_addr, GFP_KERNEL|__GFP_ZERO);
1100 }
1101 if (!queue)
1102 return NULL;
1103
1104 queue_size_in_bytes = vring_size(num, vring_align);
1105 vring_init(&vring, num, queue, vring_align);
1106
1107 vq = __vring_new_virtqueue(index, vring, vdev, weak_barriers, context,
1108 notify, callback, name);
1109 if (!vq) {
1110 vring_free_queue(vdev, queue_size_in_bytes, queue,
1111 dma_addr);
1112 return NULL;
1113 }
1114
1115 to_vvq(vq)->queue_dma_addr = dma_addr;
1116 to_vvq(vq)->queue_size_in_bytes = queue_size_in_bytes;
1117 to_vvq(vq)->we_own_ring = true;
1118
1119 return vq;
1120 }
1121 EXPORT_SYMBOL_GPL(vring_create_virtqueue);
1122
1123 struct virtqueue *vring_new_virtqueue(unsigned int index,
1124 unsigned int num,
1125 unsigned int vring_align,
1126 struct virtio_device *vdev,
1127 bool weak_barriers,
1128 bool context,
1129 void *pages,
1130 bool (*notify)(struct virtqueue *vq),
1131 void (*callback)(struct virtqueue *vq),
1132 const char *name)
1133 {
1134 struct vring vring;
1135 vring_init(&vring, num, pages, vring_align);
1136 return __vring_new_virtqueue(index, vring, vdev, weak_barriers, context,
1137 notify, callback, name);
1138 }
1139 EXPORT_SYMBOL_GPL(vring_new_virtqueue);
1140
1141 void vring_del_virtqueue(struct virtqueue *_vq)
1142 {
1143 struct vring_virtqueue *vq = to_vvq(_vq);
1144
1145 if (vq->we_own_ring) {
1146 vring_free_queue(vq->vq.vdev, vq->queue_size_in_bytes,
1147 vq->vring.desc, vq->queue_dma_addr);
1148 }
1149 list_del(&_vq->list);
1150 kfree(vq);
1151 }
1152 EXPORT_SYMBOL_GPL(vring_del_virtqueue);
1153
1154 /* Manipulates transport-specific feature bits. */
1155 void vring_transport_features(struct virtio_device *vdev)
1156 {
1157 unsigned int i;
1158
1159 for (i = VIRTIO_TRANSPORT_F_START; i < VIRTIO_TRANSPORT_F_END; i++) {
1160 switch (i) {
1161 case VIRTIO_RING_F_INDIRECT_DESC:
1162 break;
1163 case VIRTIO_RING_F_EVENT_IDX:
1164 break;
1165 case VIRTIO_F_VERSION_1:
1166 break;
1167 case VIRTIO_F_IOMMU_PLATFORM:
1168 break;
1169 default:
1170 /* We don't understand this bit. */
1171 __virtio_clear_bit(vdev, i);
1172 }
1173 }
1174 }
1175 EXPORT_SYMBOL_GPL(vring_transport_features);
1176
1177 /**
1178 * virtqueue_get_vring_size - return the size of the virtqueue's vring
1179 * @vq: the struct virtqueue containing the vring of interest.
1180 *
1181 * Returns the size of the vring. This is mainly used for boasting to
1182 * userspace. Unlike other operations, this need not be serialized.
1183 */
1184 unsigned int virtqueue_get_vring_size(struct virtqueue *_vq)
1185 {
1186
1187 struct vring_virtqueue *vq = to_vvq(_vq);
1188
1189 return vq->vring.num;
1190 }
1191 EXPORT_SYMBOL_GPL(virtqueue_get_vring_size);
1192
1193 bool virtqueue_is_broken(struct virtqueue *_vq)
1194 {
1195 struct vring_virtqueue *vq = to_vvq(_vq);
1196
1197 return vq->broken;
1198 }
1199 EXPORT_SYMBOL_GPL(virtqueue_is_broken);
1200
1201 /*
1202 * This should prevent the device from being used, allowing drivers to
1203 * recover. You may need to grab appropriate locks to flush.
1204 */
1205 void virtio_break_device(struct virtio_device *dev)
1206 {
1207 struct virtqueue *_vq;
1208
1209 list_for_each_entry(_vq, &dev->vqs, list) {
1210 struct vring_virtqueue *vq = to_vvq(_vq);
1211 vq->broken = true;
1212 }
1213 }
1214 EXPORT_SYMBOL_GPL(virtio_break_device);
1215
1216 dma_addr_t virtqueue_get_desc_addr(struct virtqueue *_vq)
1217 {
1218 struct vring_virtqueue *vq = to_vvq(_vq);
1219
1220 BUG_ON(!vq->we_own_ring);
1221
1222 return vq->queue_dma_addr;
1223 }
1224 EXPORT_SYMBOL_GPL(virtqueue_get_desc_addr);
1225
1226 dma_addr_t virtqueue_get_avail_addr(struct virtqueue *_vq)
1227 {
1228 struct vring_virtqueue *vq = to_vvq(_vq);
1229
1230 BUG_ON(!vq->we_own_ring);
1231
1232 return vq->queue_dma_addr +
1233 ((char *)vq->vring.avail - (char *)vq->vring.desc);
1234 }
1235 EXPORT_SYMBOL_GPL(virtqueue_get_avail_addr);
1236
1237 dma_addr_t virtqueue_get_used_addr(struct virtqueue *_vq)
1238 {
1239 struct vring_virtqueue *vq = to_vvq(_vq);
1240
1241 BUG_ON(!vq->we_own_ring);
1242
1243 return vq->queue_dma_addr +
1244 ((char *)vq->vring.used - (char *)vq->vring.desc);
1245 }
1246 EXPORT_SYMBOL_GPL(virtqueue_get_used_addr);
1247
1248 const struct vring *virtqueue_get_vring(struct virtqueue *vq)
1249 {
1250 return &to_vvq(vq)->vring;
1251 }
1252 EXPORT_SYMBOL_GPL(virtqueue_get_vring);
1253
1254 MODULE_LICENSE("GPL");