4 #include <linux/eventfd.h>
5 #include <linux/vhost.h>
7 #include <linux/mutex.h>
8 #include <linux/poll.h>
9 #include <linux/file.h>
10 #include <linux/uio.h>
11 #include <linux/virtio_config.h>
12 #include <linux/virtio_ring.h>
13 #include <linux/atomic.h>
16 typedef void (*vhost_work_fn_t
)(struct vhost_work
*work
);
18 #define VHOST_WORK_QUEUED 1
20 struct llist_node node
;
22 wait_queue_head_t done
;
29 /* Poll a file (eventfd or socket) */
30 /* Note: there's nothing vhost specific about this structure. */
33 wait_queue_head_t
*wqh
;
34 wait_queue_entry_t wait
;
35 struct vhost_work work
;
37 struct vhost_dev
*dev
;
40 void vhost_work_init(struct vhost_work
*work
, vhost_work_fn_t fn
);
41 void vhost_work_queue(struct vhost_dev
*dev
, struct vhost_work
*work
);
42 bool vhost_has_work(struct vhost_dev
*dev
);
44 void vhost_poll_init(struct vhost_poll
*poll
, vhost_work_fn_t fn
,
45 unsigned long mask
, struct vhost_dev
*dev
);
46 int vhost_poll_start(struct vhost_poll
*poll
, struct file
*file
);
47 void vhost_poll_stop(struct vhost_poll
*poll
);
48 void vhost_poll_flush(struct vhost_poll
*poll
);
49 void vhost_poll_queue(struct vhost_poll
*poll
);
50 void vhost_work_flush(struct vhost_dev
*dev
, struct vhost_work
*work
);
51 long vhost_vring_ioctl(struct vhost_dev
*d
, int ioctl
, void __user
*argp
);
58 #define START(node) ((node)->start)
59 #define LAST(node) ((node)->last)
61 struct vhost_umem_node
{
63 struct list_head link
;
74 struct rb_root umem_tree
;
75 struct list_head umem_list
;
79 enum vhost_uaddr_type
{
86 /* The virtqueue structure describes a queue attached to a device. */
87 struct vhost_virtqueue
{
88 struct vhost_dev
*dev
;
90 /* The actual ring of buffers. */
93 struct vring_desc __user
*desc
;
94 struct vring_avail __user
*avail
;
95 struct vring_used __user
*used
;
96 const struct vhost_umem_node
*meta_iotlb
[VHOST_NUM_ADDRS
];
100 struct eventfd_ctx
*call_ctx
;
101 struct eventfd_ctx
*error_ctx
;
102 struct eventfd_ctx
*log_ctx
;
104 struct vhost_poll poll
;
106 /* The routine to call when the Guest pings us, or timeout. */
107 vhost_work_fn_t handle_kick
;
109 /* Last available index we saw. */
112 /* Caches available index value from user. */
115 /* Last index we used. */
121 /* Last used index value we have signalled on */
124 /* Last used index value we have signalled on */
125 bool signalled_used_valid
;
127 /* Log writes to used structure. */
131 struct iovec iov
[UIO_MAXIOV
];
132 struct iovec iotlb_iov
[64];
133 struct iovec
*indirect
;
134 struct vring_used_elem
*heads
;
135 /* Protected by virtqueue mutex. */
136 struct vhost_umem
*umem
;
137 struct vhost_umem
*iotlb
;
140 /* Log write descriptors */
141 void __user
*log_base
;
142 struct vhost_log
*log
;
144 /* Ring endianness. Defaults to legacy native endianness.
145 * Set to true when starting a modern virtio device. */
147 #ifdef CONFIG_VHOST_CROSS_ENDIAN_LEGACY
148 /* Ring endianness requested by userspace for cross-endian support. */
151 u32 busyloop_timeout
;
154 struct vhost_msg_node
{
155 struct vhost_msg msg
;
156 struct vhost_virtqueue
*vq
;
157 struct list_head node
;
161 struct mm_struct
*mm
;
163 struct vhost_virtqueue
**vqs
;
165 struct file
*log_file
;
166 struct eventfd_ctx
*log_ctx
;
167 struct llist_head work_list
;
168 struct task_struct
*worker
;
169 struct vhost_umem
*umem
;
170 struct vhost_umem
*iotlb
;
171 spinlock_t iotlb_lock
;
172 struct list_head read_list
;
173 struct list_head pending_list
;
174 wait_queue_head_t wait
;
177 void vhost_dev_init(struct vhost_dev
*, struct vhost_virtqueue
**vqs
, int nvqs
);
178 long vhost_dev_set_owner(struct vhost_dev
*dev
);
179 bool vhost_dev_has_owner(struct vhost_dev
*dev
);
180 long vhost_dev_check_owner(struct vhost_dev
*);
181 struct vhost_umem
*vhost_dev_reset_owner_prepare(void);
182 void vhost_dev_reset_owner(struct vhost_dev
*, struct vhost_umem
*);
183 void vhost_dev_cleanup(struct vhost_dev
*, bool locked
);
184 void vhost_dev_stop(struct vhost_dev
*);
185 long vhost_dev_ioctl(struct vhost_dev
*, unsigned int ioctl
, void __user
*argp
);
186 long vhost_vring_ioctl(struct vhost_dev
*d
, int ioctl
, void __user
*argp
);
187 int vhost_vq_access_ok(struct vhost_virtqueue
*vq
);
188 int vhost_log_access_ok(struct vhost_dev
*);
190 int vhost_get_vq_desc(struct vhost_virtqueue
*,
191 struct iovec iov
[], unsigned int iov_count
,
192 unsigned int *out_num
, unsigned int *in_num
,
193 struct vhost_log
*log
, unsigned int *log_num
);
194 void vhost_discard_vq_desc(struct vhost_virtqueue
*, int n
);
196 int vhost_vq_init_access(struct vhost_virtqueue
*);
197 int vhost_add_used(struct vhost_virtqueue
*, unsigned int head
, int len
);
198 int vhost_add_used_n(struct vhost_virtqueue
*, struct vring_used_elem
*heads
,
200 void vhost_add_used_and_signal(struct vhost_dev
*, struct vhost_virtqueue
*,
201 unsigned int id
, int len
);
202 void vhost_add_used_and_signal_n(struct vhost_dev
*, struct vhost_virtqueue
*,
203 struct vring_used_elem
*heads
, unsigned count
);
204 void vhost_signal(struct vhost_dev
*, struct vhost_virtqueue
*);
205 void vhost_disable_notify(struct vhost_dev
*, struct vhost_virtqueue
*);
206 bool vhost_vq_avail_empty(struct vhost_dev
*, struct vhost_virtqueue
*);
207 bool vhost_enable_notify(struct vhost_dev
*, struct vhost_virtqueue
*);
209 int vhost_log_write(struct vhost_virtqueue
*vq
, struct vhost_log
*log
,
210 unsigned int log_num
, u64 len
);
211 int vq_iotlb_prefetch(struct vhost_virtqueue
*vq
);
213 struct vhost_msg_node
*vhost_new_msg(struct vhost_virtqueue
*vq
, int type
);
214 void vhost_enqueue_msg(struct vhost_dev
*dev
,
215 struct list_head
*head
,
216 struct vhost_msg_node
*node
);
217 struct vhost_msg_node
*vhost_dequeue_msg(struct vhost_dev
*dev
,
218 struct list_head
*head
);
219 unsigned int vhost_chr_poll(struct file
*file
, struct vhost_dev
*dev
,
221 ssize_t
vhost_chr_read_iter(struct vhost_dev
*dev
, struct iov_iter
*to
,
223 ssize_t
vhost_chr_write_iter(struct vhost_dev
*dev
,
224 struct iov_iter
*from
);
225 int vhost_init_device_iotlb(struct vhost_dev
*d
, bool enabled
);
227 #define vq_err(vq, fmt, ...) do { \
228 pr_debug(pr_fmt(fmt), ##__VA_ARGS__); \
229 if ((vq)->error_ctx) \
230 eventfd_signal((vq)->error_ctx, 1);\
234 VHOST_FEATURES
= (1ULL << VIRTIO_F_NOTIFY_ON_EMPTY
) |
235 (1ULL << VIRTIO_RING_F_INDIRECT_DESC
) |
236 (1ULL << VIRTIO_RING_F_EVENT_IDX
) |
237 (1ULL << VHOST_F_LOG_ALL
) |
238 (1ULL << VIRTIO_F_ANY_LAYOUT
) |
239 (1ULL << VIRTIO_F_VERSION_1
)
242 static inline bool vhost_has_feature(struct vhost_virtqueue
*vq
, int bit
)
244 return vq
->acked_features
& (1ULL << bit
);
247 #ifdef CONFIG_VHOST_CROSS_ENDIAN_LEGACY
248 static inline bool vhost_is_little_endian(struct vhost_virtqueue
*vq
)
253 static inline bool vhost_is_little_endian(struct vhost_virtqueue
*vq
)
255 return virtio_legacy_is_little_endian() || vq
->is_le
;
259 /* Memory accessors */
260 static inline u16
vhost16_to_cpu(struct vhost_virtqueue
*vq
, __virtio16 val
)
262 return __virtio16_to_cpu(vhost_is_little_endian(vq
), val
);
265 static inline __virtio16
cpu_to_vhost16(struct vhost_virtqueue
*vq
, u16 val
)
267 return __cpu_to_virtio16(vhost_is_little_endian(vq
), val
);
270 static inline u32
vhost32_to_cpu(struct vhost_virtqueue
*vq
, __virtio32 val
)
272 return __virtio32_to_cpu(vhost_is_little_endian(vq
), val
);
275 static inline __virtio32
cpu_to_vhost32(struct vhost_virtqueue
*vq
, u32 val
)
277 return __cpu_to_virtio32(vhost_is_little_endian(vq
), val
);
280 static inline u64
vhost64_to_cpu(struct vhost_virtqueue
*vq
, __virtio64 val
)
282 return __virtio64_to_cpu(vhost_is_little_endian(vq
), val
);
285 static inline __virtio64
cpu_to_vhost64(struct vhost_virtqueue
*vq
, u64 val
)
287 return __cpu_to_virtio64(vhost_is_little_endian(vq
), val
);