]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blame - include/linux/virtio_ring.h
arm/arm64: KVM: Advertise SMCCC v1.1
[mirror_ubuntu-artful-kernel.git] / include / linux / virtio_ring.h
CommitLineData
0a8a69dd
RR
1#ifndef _LINUX_VIRTIO_RING_H
2#define _LINUX_VIRTIO_RING_H
0a8a69dd 3
c5610a5d 4#include <asm/barrier.h>
0a8a69dd 5#include <linux/irqreturn.h>
607ca46e
DH
6#include <uapi/linux/virtio_ring.h>
7
a9a0fef7
RR
8/*
9 * Barriers in virtio are tricky. Non-SMP virtio guests can't assume
10 * they're not on an SMP host system, so they need to assume real
11 * barriers. Non-SMP virtio hosts could skip the barriers, but does
12 * anyone care?
13 *
14 * For virtio_pci on SMP, we don't need to order with respect to MMIO
a6596127 15 * accesses through relaxed memory I/O windows, so virt_mb() et al are
a9a0fef7
RR
16 * sufficient.
17 *
18 * For using virtio to talk to real devices (eg. other heterogeneous
19 * CPUs) we do need real barriers. In theory, we could be using both
20 * kinds of virtio, so it's a runtime decision, and the branch is
21 * actually quite cheap.
22 */
23
a9a0fef7
RR
24static inline void virtio_mb(bool weak_barriers)
25{
26 if (weak_barriers)
a6596127 27 virt_mb();
a9a0fef7
RR
28 else
29 mb();
30}
31
32static inline void virtio_rmb(bool weak_barriers)
33{
34 if (weak_barriers)
a6596127 35 virt_rmb();
a9a0fef7
RR
36 else
37 rmb();
38}
39
40static inline void virtio_wmb(bool weak_barriers)
41{
42 if (weak_barriers)
a6596127 43 virt_wmb();
a9a0fef7
RR
44 else
45 wmb();
46}
a9a0fef7 47
788e5b3a
MT
48static inline void virtio_store_mb(bool weak_barriers,
49 __virtio16 *p, __virtio16 v)
50{
51 if (weak_barriers) {
52 virt_store_mb(*p, v);
53 } else {
54 WRITE_ONCE(*p, v);
55 mb();
56 }
57}
58
0a8a69dd
RR
59struct virtio_device;
60struct virtqueue;
61
2a2d1382
AL
62/*
63 * Creates a virtqueue and allocates the descriptor ring. If
64 * may_reduce_num is set, then this may allocate a smaller ring than
65 * expected. The caller should query virtqueue_get_ring_size to learn
66 * the actual size of the ring.
67 */
68struct virtqueue *vring_create_virtqueue(unsigned int index,
69 unsigned int num,
70 unsigned int vring_align,
71 struct virtio_device *vdev,
72 bool weak_barriers,
73 bool may_reduce_num,
f94682dd 74 bool ctx,
2a2d1382
AL
75 bool (*notify)(struct virtqueue *vq),
76 void (*callback)(struct virtqueue *vq),
77 const char *name);
78
79/* Creates a virtqueue with a custom layout. */
80struct virtqueue *__vring_new_virtqueue(unsigned int index,
81 struct vring vring,
82 struct virtio_device *vdev,
83 bool weak_barriers,
f94682dd 84 bool ctx,
2a2d1382
AL
85 bool (*notify)(struct virtqueue *),
86 void (*callback)(struct virtqueue *),
87 const char *name);
88
89/*
90 * Creates a virtqueue with a standard layout but a caller-allocated
91 * ring.
92 */
17bb6d40
JW
93struct virtqueue *vring_new_virtqueue(unsigned int index,
94 unsigned int num,
87c7d57c 95 unsigned int vring_align,
0a8a69dd 96 struct virtio_device *vdev,
7b21e34f 97 bool weak_barriers,
f94682dd 98 bool ctx,
0a8a69dd 99 void *pages,
46f9c2b9 100 bool (*notify)(struct virtqueue *vq),
9499f5e7
RR
101 void (*callback)(struct virtqueue *vq),
102 const char *name);
2a2d1382
AL
103
104/*
105 * Destroys a virtqueue. If created with vring_create_virtqueue, this
106 * also frees the ring.
107 */
0a8a69dd 108void vring_del_virtqueue(struct virtqueue *vq);
2a2d1382 109
e34f8725
RR
110/* Filter out transport-specific feature bits. */
111void vring_transport_features(struct virtio_device *vdev);
0a8a69dd
RR
112
113irqreturn_t vring_interrupt(int irq, void *_vq);
0a8a69dd 114#endif /* _LINUX_VIRTIO_RING_H */