]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blame - drivers/vhost/vhost.h
vhost: apply cgroup to vhost workers
[mirror_ubuntu-artful-kernel.git] / drivers / vhost / vhost.h
CommitLineData
3a4d5c94
MT
1#ifndef _VHOST_H
2#define _VHOST_H
3
4#include <linux/eventfd.h>
5#include <linux/vhost.h>
6#include <linux/mm.h>
7#include <linux/mutex.h>
3a4d5c94
MT
8#include <linux/poll.h>
9#include <linux/file.h>
10#include <linux/skbuff.h>
11#include <linux/uio.h>
12#include <linux/virtio_config.h>
13#include <linux/virtio_ring.h>
c23f3445 14#include <asm/atomic.h>
3a4d5c94
MT
15
16struct vhost_device;
17
18enum {
19 /* Enough place for all fragments, head, and virtio net header. */
20 VHOST_NET_MAX_SG = MAX_SKB_FRAGS + 2,
21};
22
c23f3445
TH
23struct vhost_work;
24typedef void (*vhost_work_fn_t)(struct vhost_work *work);
25
26struct vhost_work {
27 struct list_head node;
28 vhost_work_fn_t fn;
29 wait_queue_head_t done;
30 int flushing;
31 unsigned queue_seq;
32 unsigned done_seq;
33};
34
3a4d5c94
MT
35/* Poll a file (eventfd or socket) */
36/* Note: there's nothing vhost specific about this structure. */
37struct vhost_poll {
38 poll_table table;
39 wait_queue_head_t *wqh;
40 wait_queue_t wait;
c23f3445 41 struct vhost_work work;
3a4d5c94 42 unsigned long mask;
c23f3445 43 struct vhost_dev *dev;
3a4d5c94
MT
44};
45
c23f3445
TH
46void vhost_poll_init(struct vhost_poll *poll, vhost_work_fn_t fn,
47 unsigned long mask, struct vhost_dev *dev);
3a4d5c94
MT
48void vhost_poll_start(struct vhost_poll *poll, struct file *file);
49void vhost_poll_stop(struct vhost_poll *poll);
50void vhost_poll_flush(struct vhost_poll *poll);
51void vhost_poll_queue(struct vhost_poll *poll);
52
53struct vhost_log {
54 u64 addr;
55 u64 len;
56};
57
58/* The virtqueue structure describes a queue attached to a device. */
59struct vhost_virtqueue {
60 struct vhost_dev *dev;
61
62 /* The actual ring of buffers. */
63 struct mutex mutex;
64 unsigned int num;
65 struct vring_desc __user *desc;
66 struct vring_avail __user *avail;
67 struct vring_used __user *used;
68 struct file *kick;
69 struct file *call;
70 struct file *error;
71 struct eventfd_ctx *call_ctx;
72 struct eventfd_ctx *error_ctx;
73 struct eventfd_ctx *log_ctx;
74
75 struct vhost_poll poll;
76
77 /* The routine to call when the Guest pings us, or timeout. */
c23f3445 78 vhost_work_fn_t handle_kick;
3a4d5c94
MT
79
80 /* Last available index we saw. */
81 u16 last_avail_idx;
82
83 /* Caches available index value from user. */
84 u16 avail_idx;
85
86 /* Last index we used. */
87 u16 last_used_idx;
88
89 /* Used flags */
90 u16 used_flags;
91
92 /* Log writes to used structure. */
93 bool log_used;
94 u64 log_addr;
95
96 struct iovec indirect[VHOST_NET_MAX_SG];
97 struct iovec iov[VHOST_NET_MAX_SG];
98 struct iovec hdr[VHOST_NET_MAX_SG];
99 size_t hdr_size;
100 /* We use a kind of RCU to access private pointer.
c23f3445
TH
101 * All readers access it from worker, which makes it possible to
102 * flush the vhost_work instead of synchronize_rcu. Therefore readers do
3a4d5c94 103 * not need to call rcu_read_lock/rcu_read_unlock: the beginning of
c23f3445
TH
104 * vhost_work execution acts instead of rcu_read_lock() and the end of
105 * vhost_work execution acts instead of rcu_read_lock().
3a4d5c94
MT
106 * Writers use virtqueue mutex. */
107 void *private_data;
108 /* Log write descriptors */
109 void __user *log_base;
110 struct vhost_log log[VHOST_NET_MAX_SG];
111};
112
113struct vhost_dev {
114 /* Readers use RCU to access memory table pointer
115 * log base pointer and features.
116 * Writers use mutex below.*/
117 struct vhost_memory *memory;
118 struct mm_struct *mm;
119 struct mutex mutex;
120 unsigned acked_features;
121 struct vhost_virtqueue *vqs;
122 int nvqs;
123 struct file *log_file;
124 struct eventfd_ctx *log_ctx;
c23f3445
TH
125 spinlock_t work_lock;
126 struct list_head work_list;
127 struct task_struct *worker;
3a4d5c94
MT
128};
129
130long vhost_dev_init(struct vhost_dev *, struct vhost_virtqueue *vqs, int nvqs);
131long vhost_dev_check_owner(struct vhost_dev *);
132long vhost_dev_reset_owner(struct vhost_dev *);
133void vhost_dev_cleanup(struct vhost_dev *);
134long vhost_dev_ioctl(struct vhost_dev *, unsigned int ioctl, unsigned long arg);
135int vhost_vq_access_ok(struct vhost_virtqueue *vq);
136int vhost_log_access_ok(struct vhost_dev *);
137
d5675bd2
MT
138int vhost_get_vq_desc(struct vhost_dev *, struct vhost_virtqueue *,
139 struct iovec iov[], unsigned int iov_count,
140 unsigned int *out_num, unsigned int *in_num,
141 struct vhost_log *log, unsigned int *log_num);
3a4d5c94
MT
142void vhost_discard_vq_desc(struct vhost_virtqueue *);
143
144int vhost_add_used(struct vhost_virtqueue *, unsigned int head, int len);
145void vhost_signal(struct vhost_dev *, struct vhost_virtqueue *);
146void vhost_add_used_and_signal(struct vhost_dev *, struct vhost_virtqueue *,
147 unsigned int head, int len);
148void vhost_disable_notify(struct vhost_virtqueue *);
149bool vhost_enable_notify(struct vhost_virtqueue *);
150
151int vhost_log_write(struct vhost_virtqueue *vq, struct vhost_log *log,
152 unsigned int log_num, u64 len);
153
3a4d5c94
MT
154#define vq_err(vq, fmt, ...) do { \
155 pr_debug(pr_fmt(fmt), ##__VA_ARGS__); \
156 if ((vq)->error_ctx) \
157 eventfd_signal((vq)->error_ctx, 1);\
158 } while (0)
159
160enum {
161 VHOST_FEATURES = (1 << VIRTIO_F_NOTIFY_ON_EMPTY) |
162 (1 << VIRTIO_RING_F_INDIRECT_DESC) |
163 (1 << VHOST_F_LOG_ALL) |
164 (1 << VHOST_NET_F_VIRTIO_NET_HDR),
165};
166
167static inline int vhost_has_feature(struct vhost_dev *dev, int bit)
168{
169 unsigned acked_features = rcu_dereference(dev->acked_features);
170 return acked_features & (1 << bit);
171}
172
173#endif