1 /* Copyright (C) 2009 Red Hat, Inc.
2 * Copyright (C) 2006 Rusty Russell IBM Corporation
4 * Author: Michael S. Tsirkin <mst@redhat.com>
6 * Inspiration, some code, and most witty comments come from
7 * Documentation/virtual/lguest/lguest.c, by Rusty Russell
9 * This work is licensed under the terms of the GNU GPL, version 2.
11 * Generic code for virtio server in host kernel.
14 #include <linux/eventfd.h>
15 #include <linux/vhost.h>
16 #include <linux/uio.h>
18 #include <linux/mmu_context.h>
19 #include <linux/miscdevice.h>
20 #include <linux/mutex.h>
21 #include <linux/poll.h>
22 #include <linux/file.h>
23 #include <linux/highmem.h>
24 #include <linux/slab.h>
25 #include <linux/vmalloc.h>
26 #include <linux/kthread.h>
27 #include <linux/cgroup.h>
28 #include <linux/module.h>
29 #include <linux/sort.h>
30 #include <linux/sched/mm.h>
31 #include <linux/sched/signal.h>
32 #include <linux/interval_tree_generic.h>
33 #include <linux/nospec.h>
37 static ushort max_mem_regions
= 64;
38 module_param(max_mem_regions
, ushort
, 0444);
39 MODULE_PARM_DESC(max_mem_regions
,
40 "Maximum number of memory regions in memory map. (default: 64)");
41 static int max_iotlb_entries
= 2048;
42 module_param(max_iotlb_entries
, int, 0444);
43 MODULE_PARM_DESC(max_iotlb_entries
,
44 "Maximum number of iotlb entries. (default: 2048)");
47 VHOST_MEMORY_F_LOG
= 0x1,
50 #define vhost_used_event(vq) ((__virtio16 __user *)&vq->avail->ring[vq->num])
51 #define vhost_avail_event(vq) ((__virtio16 __user *)&vq->used->ring[vq->num])
53 INTERVAL_TREE_DEFINE(struct vhost_umem_node
,
54 rb
, __u64
, __subtree_last
,
55 START
, LAST
, static inline, vhost_umem_interval_tree
);
57 #ifdef CONFIG_VHOST_CROSS_ENDIAN_LEGACY
58 static void vhost_disable_cross_endian(struct vhost_virtqueue
*vq
)
60 vq
->user_be
= !virtio_legacy_is_little_endian();
63 static void vhost_enable_cross_endian_big(struct vhost_virtqueue
*vq
)
68 static void vhost_enable_cross_endian_little(struct vhost_virtqueue
*vq
)
73 static long vhost_set_vring_endian(struct vhost_virtqueue
*vq
, int __user
*argp
)
75 struct vhost_vring_state s
;
80 if (copy_from_user(&s
, argp
, sizeof(s
)))
83 if (s
.num
!= VHOST_VRING_LITTLE_ENDIAN
&&
84 s
.num
!= VHOST_VRING_BIG_ENDIAN
)
87 if (s
.num
== VHOST_VRING_BIG_ENDIAN
)
88 vhost_enable_cross_endian_big(vq
);
90 vhost_enable_cross_endian_little(vq
);
95 static long vhost_get_vring_endian(struct vhost_virtqueue
*vq
, u32 idx
,
98 struct vhost_vring_state s
= {
103 if (copy_to_user(argp
, &s
, sizeof(s
)))
109 static void vhost_init_is_le(struct vhost_virtqueue
*vq
)
111 /* Note for legacy virtio: user_be is initialized at reset time
112 * according to the host endianness. If userspace does not set an
113 * explicit endianness, the default behavior is native endian, as
114 * expected by legacy virtio.
116 vq
->is_le
= vhost_has_feature(vq
, VIRTIO_F_VERSION_1
) || !vq
->user_be
;
119 static void vhost_disable_cross_endian(struct vhost_virtqueue
*vq
)
123 static long vhost_set_vring_endian(struct vhost_virtqueue
*vq
, int __user
*argp
)
128 static long vhost_get_vring_endian(struct vhost_virtqueue
*vq
, u32 idx
,
134 static void vhost_init_is_le(struct vhost_virtqueue
*vq
)
136 vq
->is_le
= vhost_has_feature(vq
, VIRTIO_F_VERSION_1
)
137 || virtio_legacy_is_little_endian();
139 #endif /* CONFIG_VHOST_CROSS_ENDIAN_LEGACY */
141 static void vhost_reset_is_le(struct vhost_virtqueue
*vq
)
143 vhost_init_is_le(vq
);
146 struct vhost_flush_struct
{
147 struct vhost_work work
;
148 struct completion wait_event
;
151 static void vhost_flush_work(struct vhost_work
*work
)
153 struct vhost_flush_struct
*s
;
155 s
= container_of(work
, struct vhost_flush_struct
, work
);
156 complete(&s
->wait_event
);
159 static void vhost_poll_func(struct file
*file
, wait_queue_head_t
*wqh
,
162 struct vhost_poll
*poll
;
164 poll
= container_of(pt
, struct vhost_poll
, table
);
166 add_wait_queue(wqh
, &poll
->wait
);
169 static int vhost_poll_wakeup(wait_queue_entry_t
*wait
, unsigned mode
, int sync
,
172 struct vhost_poll
*poll
= container_of(wait
, struct vhost_poll
, wait
);
174 if (!(key_to_poll(key
) & poll
->mask
))
177 vhost_poll_queue(poll
);
181 void vhost_work_init(struct vhost_work
*work
, vhost_work_fn_t fn
)
183 clear_bit(VHOST_WORK_QUEUED
, &work
->flags
);
186 EXPORT_SYMBOL_GPL(vhost_work_init
);
188 /* Init poll structure */
189 void vhost_poll_init(struct vhost_poll
*poll
, vhost_work_fn_t fn
,
190 __poll_t mask
, struct vhost_dev
*dev
)
192 init_waitqueue_func_entry(&poll
->wait
, vhost_poll_wakeup
);
193 init_poll_funcptr(&poll
->table
, vhost_poll_func
);
198 vhost_work_init(&poll
->work
, fn
);
200 EXPORT_SYMBOL_GPL(vhost_poll_init
);
202 /* Start polling a file. We add ourselves to file's wait queue. The caller must
203 * keep a reference to a file until after vhost_poll_stop is called. */
204 int vhost_poll_start(struct vhost_poll
*poll
, struct file
*file
)
212 mask
= vfs_poll(file
, &poll
->table
);
214 vhost_poll_wakeup(&poll
->wait
, 0, 0, poll_to_key(mask
));
215 if (mask
& EPOLLERR
) {
216 vhost_poll_stop(poll
);
222 EXPORT_SYMBOL_GPL(vhost_poll_start
);
224 /* Stop polling a file. After this function returns, it becomes safe to drop the
225 * file reference. You must also flush afterwards. */
226 void vhost_poll_stop(struct vhost_poll
*poll
)
229 remove_wait_queue(poll
->wqh
, &poll
->wait
);
233 EXPORT_SYMBOL_GPL(vhost_poll_stop
);
235 void vhost_work_flush(struct vhost_dev
*dev
, struct vhost_work
*work
)
237 struct vhost_flush_struct flush
;
240 init_completion(&flush
.wait_event
);
241 vhost_work_init(&flush
.work
, vhost_flush_work
);
243 vhost_work_queue(dev
, &flush
.work
);
244 wait_for_completion(&flush
.wait_event
);
247 EXPORT_SYMBOL_GPL(vhost_work_flush
);
249 /* Flush any work that has been scheduled. When calling this, don't hold any
250 * locks that are also used by the callback. */
251 void vhost_poll_flush(struct vhost_poll
*poll
)
253 vhost_work_flush(poll
->dev
, &poll
->work
);
255 EXPORT_SYMBOL_GPL(vhost_poll_flush
);
257 void vhost_work_queue(struct vhost_dev
*dev
, struct vhost_work
*work
)
262 if (!test_and_set_bit(VHOST_WORK_QUEUED
, &work
->flags
)) {
263 /* We can only add the work to the list after we're
264 * sure it was not in the list.
265 * test_and_set_bit() implies a memory barrier.
267 llist_add(&work
->node
, &dev
->work_list
);
268 wake_up_process(dev
->worker
);
271 EXPORT_SYMBOL_GPL(vhost_work_queue
);
273 /* A lockless hint for busy polling code to exit the loop */
274 bool vhost_has_work(struct vhost_dev
*dev
)
276 return !llist_empty(&dev
->work_list
);
278 EXPORT_SYMBOL_GPL(vhost_has_work
);
280 void vhost_poll_queue(struct vhost_poll
*poll
)
282 vhost_work_queue(poll
->dev
, &poll
->work
);
284 EXPORT_SYMBOL_GPL(vhost_poll_queue
);
286 static void __vhost_vq_meta_reset(struct vhost_virtqueue
*vq
)
290 for (j
= 0; j
< VHOST_NUM_ADDRS
; j
++)
291 vq
->meta_iotlb
[j
] = NULL
;
294 static void vhost_vq_meta_reset(struct vhost_dev
*d
)
298 for (i
= 0; i
< d
->nvqs
; ++i
)
299 __vhost_vq_meta_reset(d
->vqs
[i
]);
302 static void vhost_vq_reset(struct vhost_dev
*dev
,
303 struct vhost_virtqueue
*vq
)
309 vq
->last_avail_idx
= 0;
311 vq
->last_used_idx
= 0;
312 vq
->signalled_used
= 0;
313 vq
->signalled_used_valid
= false;
315 vq
->log_used
= false;
316 vq
->log_addr
= -1ull;
317 vq
->private_data
= NULL
;
318 vq
->acked_features
= 0;
319 vq
->acked_backend_features
= 0;
321 vq
->error_ctx
= NULL
;
325 vhost_reset_is_le(vq
);
326 vhost_disable_cross_endian(vq
);
327 vq
->busyloop_timeout
= 0;
330 __vhost_vq_meta_reset(vq
);
333 static int vhost_worker(void *data
)
335 struct vhost_dev
*dev
= data
;
336 struct vhost_work
*work
, *work_next
;
337 struct llist_node
*node
;
338 mm_segment_t oldfs
= get_fs();
344 /* mb paired w/ kthread_stop */
345 set_current_state(TASK_INTERRUPTIBLE
);
347 if (kthread_should_stop()) {
348 __set_current_state(TASK_RUNNING
);
352 node
= llist_del_all(&dev
->work_list
);
356 node
= llist_reverse_order(node
);
357 /* make sure flag is seen after deletion */
359 llist_for_each_entry_safe(work
, work_next
, node
, node
) {
360 clear_bit(VHOST_WORK_QUEUED
, &work
->flags
);
361 __set_current_state(TASK_RUNNING
);
372 static void vhost_vq_free_iovecs(struct vhost_virtqueue
*vq
)
382 /* Helper to allocate iovec buffers for all vqs. */
383 static long vhost_dev_alloc_iovecs(struct vhost_dev
*dev
)
385 struct vhost_virtqueue
*vq
;
388 for (i
= 0; i
< dev
->nvqs
; ++i
) {
390 vq
->indirect
= kmalloc_array(UIO_MAXIOV
,
391 sizeof(*vq
->indirect
),
393 vq
->log
= kmalloc_array(UIO_MAXIOV
, sizeof(*vq
->log
),
395 vq
->heads
= kmalloc_array(UIO_MAXIOV
, sizeof(*vq
->heads
),
397 if (!vq
->indirect
|| !vq
->log
|| !vq
->heads
)
404 vhost_vq_free_iovecs(dev
->vqs
[i
]);
408 static void vhost_dev_free_iovecs(struct vhost_dev
*dev
)
412 for (i
= 0; i
< dev
->nvqs
; ++i
)
413 vhost_vq_free_iovecs(dev
->vqs
[i
]);
416 void vhost_dev_init(struct vhost_dev
*dev
,
417 struct vhost_virtqueue
**vqs
, int nvqs
)
419 struct vhost_virtqueue
*vq
;
424 mutex_init(&dev
->mutex
);
430 init_llist_head(&dev
->work_list
);
431 init_waitqueue_head(&dev
->wait
);
432 INIT_LIST_HEAD(&dev
->read_list
);
433 INIT_LIST_HEAD(&dev
->pending_list
);
434 spin_lock_init(&dev
->iotlb_lock
);
437 for (i
= 0; i
< dev
->nvqs
; ++i
) {
443 mutex_init(&vq
->mutex
);
444 vhost_vq_reset(dev
, vq
);
446 vhost_poll_init(&vq
->poll
, vq
->handle_kick
,
450 EXPORT_SYMBOL_GPL(vhost_dev_init
);
452 /* Caller should have device mutex */
453 long vhost_dev_check_owner(struct vhost_dev
*dev
)
455 /* Are you the owner? If not, I don't think you mean to do that */
456 return dev
->mm
== current
->mm
? 0 : -EPERM
;
458 EXPORT_SYMBOL_GPL(vhost_dev_check_owner
);
460 struct vhost_attach_cgroups_struct
{
461 struct vhost_work work
;
462 struct task_struct
*owner
;
466 static void vhost_attach_cgroups_work(struct vhost_work
*work
)
468 struct vhost_attach_cgroups_struct
*s
;
470 s
= container_of(work
, struct vhost_attach_cgroups_struct
, work
);
471 s
->ret
= cgroup_attach_task_all(s
->owner
, current
);
474 static int vhost_attach_cgroups(struct vhost_dev
*dev
)
476 struct vhost_attach_cgroups_struct attach
;
478 attach
.owner
= current
;
479 vhost_work_init(&attach
.work
, vhost_attach_cgroups_work
);
480 vhost_work_queue(dev
, &attach
.work
);
481 vhost_work_flush(dev
, &attach
.work
);
485 /* Caller should have device mutex */
486 bool vhost_dev_has_owner(struct vhost_dev
*dev
)
490 EXPORT_SYMBOL_GPL(vhost_dev_has_owner
);
492 /* Caller should have device mutex */
493 long vhost_dev_set_owner(struct vhost_dev
*dev
)
495 struct task_struct
*worker
;
498 /* Is there an owner already? */
499 if (vhost_dev_has_owner(dev
)) {
504 /* No owner, become one */
505 dev
->mm
= get_task_mm(current
);
506 worker
= kthread_create(vhost_worker
, dev
, "vhost-%d", current
->pid
);
507 if (IS_ERR(worker
)) {
508 err
= PTR_ERR(worker
);
512 dev
->worker
= worker
;
513 wake_up_process(worker
); /* avoid contributing to loadavg */
515 err
= vhost_attach_cgroups(dev
);
519 err
= vhost_dev_alloc_iovecs(dev
);
525 kthread_stop(worker
);
534 EXPORT_SYMBOL_GPL(vhost_dev_set_owner
);
536 struct vhost_umem
*vhost_dev_reset_owner_prepare(void)
538 return kvzalloc(sizeof(struct vhost_umem
), GFP_KERNEL
);
540 EXPORT_SYMBOL_GPL(vhost_dev_reset_owner_prepare
);
542 /* Caller should have device mutex */
543 void vhost_dev_reset_owner(struct vhost_dev
*dev
, struct vhost_umem
*umem
)
547 vhost_dev_cleanup(dev
);
549 /* Restore memory to default empty mapping. */
550 INIT_LIST_HEAD(&umem
->umem_list
);
552 /* We don't need VQ locks below since vhost_dev_cleanup makes sure
553 * VQs aren't running.
555 for (i
= 0; i
< dev
->nvqs
; ++i
)
556 dev
->vqs
[i
]->umem
= umem
;
558 EXPORT_SYMBOL_GPL(vhost_dev_reset_owner
);
560 void vhost_dev_stop(struct vhost_dev
*dev
)
564 for (i
= 0; i
< dev
->nvqs
; ++i
) {
565 if (dev
->vqs
[i
]->kick
&& dev
->vqs
[i
]->handle_kick
) {
566 vhost_poll_stop(&dev
->vqs
[i
]->poll
);
567 vhost_poll_flush(&dev
->vqs
[i
]->poll
);
571 EXPORT_SYMBOL_GPL(vhost_dev_stop
);
573 static void vhost_umem_free(struct vhost_umem
*umem
,
574 struct vhost_umem_node
*node
)
576 vhost_umem_interval_tree_remove(node
, &umem
->umem_tree
);
577 list_del(&node
->link
);
582 static void vhost_umem_clean(struct vhost_umem
*umem
)
584 struct vhost_umem_node
*node
, *tmp
;
589 list_for_each_entry_safe(node
, tmp
, &umem
->umem_list
, link
)
590 vhost_umem_free(umem
, node
);
595 static void vhost_clear_msg(struct vhost_dev
*dev
)
597 struct vhost_msg_node
*node
, *n
;
599 spin_lock(&dev
->iotlb_lock
);
601 list_for_each_entry_safe(node
, n
, &dev
->read_list
, node
) {
602 list_del(&node
->node
);
606 list_for_each_entry_safe(node
, n
, &dev
->pending_list
, node
) {
607 list_del(&node
->node
);
611 spin_unlock(&dev
->iotlb_lock
);
614 void vhost_dev_cleanup(struct vhost_dev
*dev
)
618 for (i
= 0; i
< dev
->nvqs
; ++i
) {
619 if (dev
->vqs
[i
]->error_ctx
)
620 eventfd_ctx_put(dev
->vqs
[i
]->error_ctx
);
621 if (dev
->vqs
[i
]->kick
)
622 fput(dev
->vqs
[i
]->kick
);
623 if (dev
->vqs
[i
]->call_ctx
)
624 eventfd_ctx_put(dev
->vqs
[i
]->call_ctx
);
625 vhost_vq_reset(dev
, dev
->vqs
[i
]);
627 vhost_dev_free_iovecs(dev
);
629 eventfd_ctx_put(dev
->log_ctx
);
631 /* No one will access memory at this point */
632 vhost_umem_clean(dev
->umem
);
634 vhost_umem_clean(dev
->iotlb
);
636 vhost_clear_msg(dev
);
637 wake_up_interruptible_poll(&dev
->wait
, EPOLLIN
| EPOLLRDNORM
);
638 WARN_ON(!llist_empty(&dev
->work_list
));
640 kthread_stop(dev
->worker
);
647 EXPORT_SYMBOL_GPL(vhost_dev_cleanup
);
649 static bool log_access_ok(void __user
*log_base
, u64 addr
, unsigned long sz
)
651 u64 a
= addr
/ VHOST_PAGE_SIZE
/ 8;
653 /* Make sure 64 bit math will not overflow. */
654 if (a
> ULONG_MAX
- (unsigned long)log_base
||
655 a
+ (unsigned long)log_base
> ULONG_MAX
)
658 return access_ok(log_base
+ a
,
659 (sz
+ VHOST_PAGE_SIZE
* 8 - 1) / VHOST_PAGE_SIZE
/ 8);
662 static bool vhost_overflow(u64 uaddr
, u64 size
)
664 /* Make sure 64 bit math will not overflow. */
665 return uaddr
> ULONG_MAX
|| size
> ULONG_MAX
|| uaddr
> ULONG_MAX
- size
;
668 /* Caller should have vq mutex and device mutex. */
669 static bool vq_memory_access_ok(void __user
*log_base
, struct vhost_umem
*umem
,
672 struct vhost_umem_node
*node
;
677 list_for_each_entry(node
, &umem
->umem_list
, link
) {
678 unsigned long a
= node
->userspace_addr
;
680 if (vhost_overflow(node
->userspace_addr
, node
->size
))
684 if (!access_ok((void __user
*)a
,
687 else if (log_all
&& !log_access_ok(log_base
,
695 static inline void __user
*vhost_vq_meta_fetch(struct vhost_virtqueue
*vq
,
696 u64 addr
, unsigned int size
,
699 const struct vhost_umem_node
*node
= vq
->meta_iotlb
[type
];
704 return (void *)(uintptr_t)(node
->userspace_addr
+ addr
- node
->start
);
707 /* Can we switch to this memory table? */
708 /* Caller should have device mutex but not vq mutex */
709 static bool memory_access_ok(struct vhost_dev
*d
, struct vhost_umem
*umem
,
714 for (i
= 0; i
< d
->nvqs
; ++i
) {
718 mutex_lock(&d
->vqs
[i
]->mutex
);
719 log
= log_all
|| vhost_has_feature(d
->vqs
[i
], VHOST_F_LOG_ALL
);
720 /* If ring is inactive, will check when it's enabled. */
721 if (d
->vqs
[i
]->private_data
)
722 ok
= vq_memory_access_ok(d
->vqs
[i
]->log_base
,
726 mutex_unlock(&d
->vqs
[i
]->mutex
);
733 static int translate_desc(struct vhost_virtqueue
*vq
, u64 addr
, u32 len
,
734 struct iovec iov
[], int iov_size
, int access
);
736 static int vhost_copy_to_user(struct vhost_virtqueue
*vq
, void __user
*to
,
737 const void *from
, unsigned size
)
742 return __copy_to_user(to
, from
, size
);
744 /* This function should be called after iotlb
745 * prefetch, which means we're sure that all vq
746 * could be access through iotlb. So -EAGAIN should
747 * not happen in this case.
750 void __user
*uaddr
= vhost_vq_meta_fetch(vq
,
751 (u64
)(uintptr_t)to
, size
,
755 return __copy_to_user(uaddr
, from
, size
);
757 ret
= translate_desc(vq
, (u64
)(uintptr_t)to
, size
, vq
->iotlb_iov
,
758 ARRAY_SIZE(vq
->iotlb_iov
),
762 iov_iter_init(&t
, WRITE
, vq
->iotlb_iov
, ret
, size
);
763 ret
= copy_to_iter(from
, size
, &t
);
771 static int vhost_copy_from_user(struct vhost_virtqueue
*vq
, void *to
,
772 void __user
*from
, unsigned size
)
777 return __copy_from_user(to
, from
, size
);
779 /* This function should be called after iotlb
780 * prefetch, which means we're sure that vq
781 * could be access through iotlb. So -EAGAIN should
782 * not happen in this case.
784 void __user
*uaddr
= vhost_vq_meta_fetch(vq
,
785 (u64
)(uintptr_t)from
, size
,
790 return __copy_from_user(to
, uaddr
, size
);
792 ret
= translate_desc(vq
, (u64
)(uintptr_t)from
, size
, vq
->iotlb_iov
,
793 ARRAY_SIZE(vq
->iotlb_iov
),
796 vq_err(vq
, "IOTLB translation failure: uaddr "
797 "%p size 0x%llx\n", from
,
798 (unsigned long long) size
);
801 iov_iter_init(&f
, READ
, vq
->iotlb_iov
, ret
, size
);
802 ret
= copy_from_iter(to
, size
, &f
);
811 static void __user
*__vhost_get_user_slow(struct vhost_virtqueue
*vq
,
812 void __user
*addr
, unsigned int size
,
817 ret
= translate_desc(vq
, (u64
)(uintptr_t)addr
, size
, vq
->iotlb_iov
,
818 ARRAY_SIZE(vq
->iotlb_iov
),
821 vq_err(vq
, "IOTLB translation failure: uaddr "
822 "%p size 0x%llx\n", addr
,
823 (unsigned long long) size
);
827 if (ret
!= 1 || vq
->iotlb_iov
[0].iov_len
!= size
) {
828 vq_err(vq
, "Non atomic userspace memory access: uaddr "
829 "%p size 0x%llx\n", addr
,
830 (unsigned long long) size
);
834 return vq
->iotlb_iov
[0].iov_base
;
837 /* This function should be called after iotlb
838 * prefetch, which means we're sure that vq
839 * could be access through iotlb. So -EAGAIN should
840 * not happen in this case.
842 static inline void __user
*__vhost_get_user(struct vhost_virtqueue
*vq
,
843 void *addr
, unsigned int size
,
846 void __user
*uaddr
= vhost_vq_meta_fetch(vq
,
847 (u64
)(uintptr_t)addr
, size
, type
);
851 return __vhost_get_user_slow(vq
, addr
, size
, type
);
854 #define vhost_put_user(vq, x, ptr) \
858 ret = __put_user(x, ptr); \
860 __typeof__(ptr) to = \
861 (__typeof__(ptr)) __vhost_get_user(vq, ptr, \
862 sizeof(*ptr), VHOST_ADDR_USED); \
864 ret = __put_user(x, to); \
871 #define vhost_get_user(vq, x, ptr, type) \
875 ret = __get_user(x, ptr); \
877 __typeof__(ptr) from = \
878 (__typeof__(ptr)) __vhost_get_user(vq, ptr, \
882 ret = __get_user(x, from); \
889 #define vhost_get_avail(vq, x, ptr) \
890 vhost_get_user(vq, x, ptr, VHOST_ADDR_AVAIL)
892 #define vhost_get_used(vq, x, ptr) \
893 vhost_get_user(vq, x, ptr, VHOST_ADDR_USED)
895 static void vhost_dev_lock_vqs(struct vhost_dev
*d
)
898 for (i
= 0; i
< d
->nvqs
; ++i
)
899 mutex_lock_nested(&d
->vqs
[i
]->mutex
, i
);
902 static void vhost_dev_unlock_vqs(struct vhost_dev
*d
)
905 for (i
= 0; i
< d
->nvqs
; ++i
)
906 mutex_unlock(&d
->vqs
[i
]->mutex
);
909 static int vhost_new_umem_range(struct vhost_umem
*umem
,
910 u64 start
, u64 size
, u64 end
,
911 u64 userspace_addr
, int perm
)
913 struct vhost_umem_node
*tmp
, *node
= kmalloc(sizeof(*node
), GFP_ATOMIC
);
918 if (umem
->numem
== max_iotlb_entries
) {
919 tmp
= list_first_entry(&umem
->umem_list
, typeof(*tmp
), link
);
920 vhost_umem_free(umem
, tmp
);
926 node
->userspace_addr
= userspace_addr
;
928 INIT_LIST_HEAD(&node
->link
);
929 list_add_tail(&node
->link
, &umem
->umem_list
);
930 vhost_umem_interval_tree_insert(node
, &umem
->umem_tree
);
936 static void vhost_del_umem_range(struct vhost_umem
*umem
,
939 struct vhost_umem_node
*node
;
941 while ((node
= vhost_umem_interval_tree_iter_first(&umem
->umem_tree
,
943 vhost_umem_free(umem
, node
);
946 static void vhost_iotlb_notify_vq(struct vhost_dev
*d
,
947 struct vhost_iotlb_msg
*msg
)
949 struct vhost_msg_node
*node
, *n
;
951 spin_lock(&d
->iotlb_lock
);
953 list_for_each_entry_safe(node
, n
, &d
->pending_list
, node
) {
954 struct vhost_iotlb_msg
*vq_msg
= &node
->msg
.iotlb
;
955 if (msg
->iova
<= vq_msg
->iova
&&
956 msg
->iova
+ msg
->size
- 1 >= vq_msg
->iova
&&
957 vq_msg
->type
== VHOST_IOTLB_MISS
) {
958 vhost_poll_queue(&node
->vq
->poll
);
959 list_del(&node
->node
);
964 spin_unlock(&d
->iotlb_lock
);
967 static bool umem_access_ok(u64 uaddr
, u64 size
, int access
)
969 unsigned long a
= uaddr
;
971 /* Make sure 64 bit math will not overflow. */
972 if (vhost_overflow(uaddr
, size
))
975 if ((access
& VHOST_ACCESS_RO
) &&
976 !access_ok((void __user
*)a
, size
))
978 if ((access
& VHOST_ACCESS_WO
) &&
979 !access_ok((void __user
*)a
, size
))
984 static int vhost_process_iotlb_msg(struct vhost_dev
*dev
,
985 struct vhost_iotlb_msg
*msg
)
989 mutex_lock(&dev
->mutex
);
990 vhost_dev_lock_vqs(dev
);
992 case VHOST_IOTLB_UPDATE
:
997 if (!umem_access_ok(msg
->uaddr
, msg
->size
, msg
->perm
)) {
1001 vhost_vq_meta_reset(dev
);
1002 if (vhost_new_umem_range(dev
->iotlb
, msg
->iova
, msg
->size
,
1003 msg
->iova
+ msg
->size
- 1,
1004 msg
->uaddr
, msg
->perm
)) {
1008 vhost_iotlb_notify_vq(dev
, msg
);
1010 case VHOST_IOTLB_INVALIDATE
:
1015 vhost_vq_meta_reset(dev
);
1016 vhost_del_umem_range(dev
->iotlb
, msg
->iova
,
1017 msg
->iova
+ msg
->size
- 1);
1024 vhost_dev_unlock_vqs(dev
);
1025 mutex_unlock(&dev
->mutex
);
1029 ssize_t
vhost_chr_write_iter(struct vhost_dev
*dev
,
1030 struct iov_iter
*from
)
1032 struct vhost_iotlb_msg msg
;
1036 ret
= copy_from_iter(&type
, sizeof(type
), from
);
1037 if (ret
!= sizeof(type
))
1041 case VHOST_IOTLB_MSG
:
1042 /* There maybe a hole after type for V1 message type,
1045 offset
= offsetof(struct vhost_msg
, iotlb
) - sizeof(int);
1047 case VHOST_IOTLB_MSG_V2
:
1048 offset
= sizeof(__u32
);
1055 iov_iter_advance(from
, offset
);
1056 ret
= copy_from_iter(&msg
, sizeof(msg
), from
);
1057 if (ret
!= sizeof(msg
))
1059 if (vhost_process_iotlb_msg(dev
, &msg
)) {
1064 ret
= (type
== VHOST_IOTLB_MSG
) ? sizeof(struct vhost_msg
) :
1065 sizeof(struct vhost_msg_v2
);
1069 EXPORT_SYMBOL(vhost_chr_write_iter
);
1071 __poll_t
vhost_chr_poll(struct file
*file
, struct vhost_dev
*dev
,
1076 poll_wait(file
, &dev
->wait
, wait
);
1078 if (!list_empty(&dev
->read_list
))
1079 mask
|= EPOLLIN
| EPOLLRDNORM
;
1083 EXPORT_SYMBOL(vhost_chr_poll
);
1085 ssize_t
vhost_chr_read_iter(struct vhost_dev
*dev
, struct iov_iter
*to
,
1089 struct vhost_msg_node
*node
;
1091 unsigned size
= sizeof(struct vhost_msg
);
1093 if (iov_iter_count(to
) < size
)
1098 prepare_to_wait(&dev
->wait
, &wait
,
1099 TASK_INTERRUPTIBLE
);
1101 node
= vhost_dequeue_msg(dev
, &dev
->read_list
);
1108 if (signal_pending(current
)) {
1121 finish_wait(&dev
->wait
, &wait
);
1124 struct vhost_iotlb_msg
*msg
;
1125 void *start
= &node
->msg
;
1127 switch (node
->msg
.type
) {
1128 case VHOST_IOTLB_MSG
:
1129 size
= sizeof(node
->msg
);
1130 msg
= &node
->msg
.iotlb
;
1132 case VHOST_IOTLB_MSG_V2
:
1133 size
= sizeof(node
->msg_v2
);
1134 msg
= &node
->msg_v2
.iotlb
;
1141 ret
= copy_to_iter(start
, size
, to
);
1142 if (ret
!= size
|| msg
->type
!= VHOST_IOTLB_MISS
) {
1146 vhost_enqueue_msg(dev
, &dev
->pending_list
, node
);
1151 EXPORT_SYMBOL_GPL(vhost_chr_read_iter
);
1153 static int vhost_iotlb_miss(struct vhost_virtqueue
*vq
, u64 iova
, int access
)
1155 struct vhost_dev
*dev
= vq
->dev
;
1156 struct vhost_msg_node
*node
;
1157 struct vhost_iotlb_msg
*msg
;
1158 bool v2
= vhost_backend_has_feature(vq
, VHOST_BACKEND_F_IOTLB_MSG_V2
);
1160 node
= vhost_new_msg(vq
, v2
? VHOST_IOTLB_MSG_V2
: VHOST_IOTLB_MSG
);
1165 node
->msg_v2
.type
= VHOST_IOTLB_MSG_V2
;
1166 msg
= &node
->msg_v2
.iotlb
;
1168 msg
= &node
->msg
.iotlb
;
1171 msg
->type
= VHOST_IOTLB_MISS
;
1175 vhost_enqueue_msg(dev
, &dev
->read_list
, node
);
1180 static bool vq_access_ok(struct vhost_virtqueue
*vq
, unsigned int num
,
1181 struct vring_desc __user
*desc
,
1182 struct vring_avail __user
*avail
,
1183 struct vring_used __user
*used
)
1186 size_t s
= vhost_has_feature(vq
, VIRTIO_RING_F_EVENT_IDX
) ? 2 : 0;
1188 return access_ok(desc
, num
* sizeof *desc
) &&
1190 sizeof *avail
+ num
* sizeof *avail
->ring
+ s
) &&
1192 sizeof *used
+ num
* sizeof *used
->ring
+ s
);
1195 static void vhost_vq_meta_update(struct vhost_virtqueue
*vq
,
1196 const struct vhost_umem_node
*node
,
1199 int access
= (type
== VHOST_ADDR_USED
) ?
1200 VHOST_ACCESS_WO
: VHOST_ACCESS_RO
;
1202 if (likely(node
->perm
& access
))
1203 vq
->meta_iotlb
[type
] = node
;
1206 static bool iotlb_access_ok(struct vhost_virtqueue
*vq
,
1207 int access
, u64 addr
, u64 len
, int type
)
1209 const struct vhost_umem_node
*node
;
1210 struct vhost_umem
*umem
= vq
->iotlb
;
1211 u64 s
= 0, size
, orig_addr
= addr
, last
= addr
+ len
- 1;
1213 if (vhost_vq_meta_fetch(vq
, addr
, len
, type
))
1217 node
= vhost_umem_interval_tree_iter_first(&umem
->umem_tree
,
1220 if (node
== NULL
|| node
->start
> addr
) {
1221 vhost_iotlb_miss(vq
, addr
, access
);
1223 } else if (!(node
->perm
& access
)) {
1224 /* Report the possible access violation by
1225 * request another translation from userspace.
1230 size
= node
->size
- addr
+ node
->start
;
1232 if (orig_addr
== addr
&& size
>= len
)
1233 vhost_vq_meta_update(vq
, node
, type
);
1242 int vq_iotlb_prefetch(struct vhost_virtqueue
*vq
)
1244 size_t s
= vhost_has_feature(vq
, VIRTIO_RING_F_EVENT_IDX
) ? 2 : 0;
1245 unsigned int num
= vq
->num
;
1250 return iotlb_access_ok(vq
, VHOST_ACCESS_RO
, (u64
)(uintptr_t)vq
->desc
,
1251 num
* sizeof(*vq
->desc
), VHOST_ADDR_DESC
) &&
1252 iotlb_access_ok(vq
, VHOST_ACCESS_RO
, (u64
)(uintptr_t)vq
->avail
,
1254 num
* sizeof(*vq
->avail
->ring
) + s
,
1255 VHOST_ADDR_AVAIL
) &&
1256 iotlb_access_ok(vq
, VHOST_ACCESS_WO
, (u64
)(uintptr_t)vq
->used
,
1258 num
* sizeof(*vq
->used
->ring
) + s
,
1261 EXPORT_SYMBOL_GPL(vq_iotlb_prefetch
);
1263 /* Can we log writes? */
1264 /* Caller should have device mutex but not vq mutex */
1265 bool vhost_log_access_ok(struct vhost_dev
*dev
)
1267 return memory_access_ok(dev
, dev
->umem
, 1);
1269 EXPORT_SYMBOL_GPL(vhost_log_access_ok
);
1271 /* Verify access for write logging. */
1272 /* Caller should have vq mutex and device mutex */
1273 static bool vq_log_access_ok(struct vhost_virtqueue
*vq
,
1274 void __user
*log_base
)
1276 size_t s
= vhost_has_feature(vq
, VIRTIO_RING_F_EVENT_IDX
) ? 2 : 0;
1278 return vq_memory_access_ok(log_base
, vq
->umem
,
1279 vhost_has_feature(vq
, VHOST_F_LOG_ALL
)) &&
1280 (!vq
->log_used
|| log_access_ok(log_base
, vq
->log_addr
,
1282 vq
->num
* sizeof *vq
->used
->ring
+ s
));
1285 /* Can we start vq? */
1286 /* Caller should have vq mutex and device mutex */
1287 bool vhost_vq_access_ok(struct vhost_virtqueue
*vq
)
1289 if (!vq_log_access_ok(vq
, vq
->log_base
))
1292 /* Access validation occurs at prefetch time with IOTLB */
1296 return vq_access_ok(vq
, vq
->num
, vq
->desc
, vq
->avail
, vq
->used
);
1298 EXPORT_SYMBOL_GPL(vhost_vq_access_ok
);
1300 static struct vhost_umem
*vhost_umem_alloc(void)
1302 struct vhost_umem
*umem
= kvzalloc(sizeof(*umem
), GFP_KERNEL
);
1307 umem
->umem_tree
= RB_ROOT_CACHED
;
1309 INIT_LIST_HEAD(&umem
->umem_list
);
1314 static long vhost_set_memory(struct vhost_dev
*d
, struct vhost_memory __user
*m
)
1316 struct vhost_memory mem
, *newmem
;
1317 struct vhost_memory_region
*region
;
1318 struct vhost_umem
*newumem
, *oldumem
;
1319 unsigned long size
= offsetof(struct vhost_memory
, regions
);
1322 if (copy_from_user(&mem
, m
, size
))
1326 if (mem
.nregions
> max_mem_regions
)
1328 newmem
= kvzalloc(struct_size(newmem
, regions
, mem
.nregions
),
1333 memcpy(newmem
, &mem
, size
);
1334 if (copy_from_user(newmem
->regions
, m
->regions
,
1335 mem
.nregions
* sizeof *m
->regions
)) {
1340 newumem
= vhost_umem_alloc();
1346 for (region
= newmem
->regions
;
1347 region
< newmem
->regions
+ mem
.nregions
;
1349 if (vhost_new_umem_range(newumem
,
1350 region
->guest_phys_addr
,
1351 region
->memory_size
,
1352 region
->guest_phys_addr
+
1353 region
->memory_size
- 1,
1354 region
->userspace_addr
,
1359 if (!memory_access_ok(d
, newumem
, 0))
1365 /* All memory accesses are done under some VQ mutex. */
1366 for (i
= 0; i
< d
->nvqs
; ++i
) {
1367 mutex_lock(&d
->vqs
[i
]->mutex
);
1368 d
->vqs
[i
]->umem
= newumem
;
1369 mutex_unlock(&d
->vqs
[i
]->mutex
);
1373 vhost_umem_clean(oldumem
);
1377 vhost_umem_clean(newumem
);
1382 long vhost_vring_ioctl(struct vhost_dev
*d
, unsigned int ioctl
, void __user
*argp
)
1384 struct file
*eventfp
, *filep
= NULL
;
1385 bool pollstart
= false, pollstop
= false;
1386 struct eventfd_ctx
*ctx
= NULL
;
1387 u32 __user
*idxp
= argp
;
1388 struct vhost_virtqueue
*vq
;
1389 struct vhost_vring_state s
;
1390 struct vhost_vring_file f
;
1391 struct vhost_vring_addr a
;
1395 r
= get_user(idx
, idxp
);
1401 idx
= array_index_nospec(idx
, d
->nvqs
);
1404 mutex_lock(&vq
->mutex
);
1407 case VHOST_SET_VRING_NUM
:
1408 /* Resizing ring with an active backend?
1409 * You don't want to do that. */
1410 if (vq
->private_data
) {
1414 if (copy_from_user(&s
, argp
, sizeof s
)) {
1418 if (!s
.num
|| s
.num
> 0xffff || (s
.num
& (s
.num
- 1))) {
1424 case VHOST_SET_VRING_BASE
:
1425 /* Moving base with an active backend?
1426 * You don't want to do that. */
1427 if (vq
->private_data
) {
1431 if (copy_from_user(&s
, argp
, sizeof s
)) {
1435 if (s
.num
> 0xffff) {
1439 vq
->last_avail_idx
= s
.num
;
1440 /* Forget the cached index value. */
1441 vq
->avail_idx
= vq
->last_avail_idx
;
1443 case VHOST_GET_VRING_BASE
:
1445 s
.num
= vq
->last_avail_idx
;
1446 if (copy_to_user(argp
, &s
, sizeof s
))
1449 case VHOST_SET_VRING_ADDR
:
1450 if (copy_from_user(&a
, argp
, sizeof a
)) {
1454 if (a
.flags
& ~(0x1 << VHOST_VRING_F_LOG
)) {
1458 /* For 32bit, verify that the top 32bits of the user
1459 data are set to zero. */
1460 if ((u64
)(unsigned long)a
.desc_user_addr
!= a
.desc_user_addr
||
1461 (u64
)(unsigned long)a
.used_user_addr
!= a
.used_user_addr
||
1462 (u64
)(unsigned long)a
.avail_user_addr
!= a
.avail_user_addr
) {
1467 /* Make sure it's safe to cast pointers to vring types. */
1468 BUILD_BUG_ON(__alignof__
*vq
->avail
> VRING_AVAIL_ALIGN_SIZE
);
1469 BUILD_BUG_ON(__alignof__
*vq
->used
> VRING_USED_ALIGN_SIZE
);
1470 if ((a
.avail_user_addr
& (VRING_AVAIL_ALIGN_SIZE
- 1)) ||
1471 (a
.used_user_addr
& (VRING_USED_ALIGN_SIZE
- 1)) ||
1472 (a
.log_guest_addr
& (VRING_USED_ALIGN_SIZE
- 1))) {
1477 /* We only verify access here if backend is configured.
1478 * If it is not, we don't as size might not have been setup.
1479 * We will verify when backend is configured. */
1480 if (vq
->private_data
) {
1481 if (!vq_access_ok(vq
, vq
->num
,
1482 (void __user
*)(unsigned long)a
.desc_user_addr
,
1483 (void __user
*)(unsigned long)a
.avail_user_addr
,
1484 (void __user
*)(unsigned long)a
.used_user_addr
)) {
1489 /* Also validate log access for used ring if enabled. */
1490 if ((a
.flags
& (0x1 << VHOST_VRING_F_LOG
)) &&
1491 !log_access_ok(vq
->log_base
, a
.log_guest_addr
,
1493 vq
->num
* sizeof *vq
->used
->ring
)) {
1499 vq
->log_used
= !!(a
.flags
& (0x1 << VHOST_VRING_F_LOG
));
1500 vq
->desc
= (void __user
*)(unsigned long)a
.desc_user_addr
;
1501 vq
->avail
= (void __user
*)(unsigned long)a
.avail_user_addr
;
1502 vq
->log_addr
= a
.log_guest_addr
;
1503 vq
->used
= (void __user
*)(unsigned long)a
.used_user_addr
;
1505 case VHOST_SET_VRING_KICK
:
1506 if (copy_from_user(&f
, argp
, sizeof f
)) {
1510 eventfp
= f
.fd
== -1 ? NULL
: eventfd_fget(f
.fd
);
1511 if (IS_ERR(eventfp
)) {
1512 r
= PTR_ERR(eventfp
);
1515 if (eventfp
!= vq
->kick
) {
1516 pollstop
= (filep
= vq
->kick
) != NULL
;
1517 pollstart
= (vq
->kick
= eventfp
) != NULL
;
1521 case VHOST_SET_VRING_CALL
:
1522 if (copy_from_user(&f
, argp
, sizeof f
)) {
1526 ctx
= f
.fd
== -1 ? NULL
: eventfd_ctx_fdget(f
.fd
);
1531 swap(ctx
, vq
->call_ctx
);
1533 case VHOST_SET_VRING_ERR
:
1534 if (copy_from_user(&f
, argp
, sizeof f
)) {
1538 ctx
= f
.fd
== -1 ? NULL
: eventfd_ctx_fdget(f
.fd
);
1543 swap(ctx
, vq
->error_ctx
);
1545 case VHOST_SET_VRING_ENDIAN
:
1546 r
= vhost_set_vring_endian(vq
, argp
);
1548 case VHOST_GET_VRING_ENDIAN
:
1549 r
= vhost_get_vring_endian(vq
, idx
, argp
);
1551 case VHOST_SET_VRING_BUSYLOOP_TIMEOUT
:
1552 if (copy_from_user(&s
, argp
, sizeof(s
))) {
1556 vq
->busyloop_timeout
= s
.num
;
1558 case VHOST_GET_VRING_BUSYLOOP_TIMEOUT
:
1560 s
.num
= vq
->busyloop_timeout
;
1561 if (copy_to_user(argp
, &s
, sizeof(s
)))
1568 if (pollstop
&& vq
->handle_kick
)
1569 vhost_poll_stop(&vq
->poll
);
1571 if (!IS_ERR_OR_NULL(ctx
))
1572 eventfd_ctx_put(ctx
);
1576 if (pollstart
&& vq
->handle_kick
)
1577 r
= vhost_poll_start(&vq
->poll
, vq
->kick
);
1579 mutex_unlock(&vq
->mutex
);
1581 if (pollstop
&& vq
->handle_kick
)
1582 vhost_poll_flush(&vq
->poll
);
1585 EXPORT_SYMBOL_GPL(vhost_vring_ioctl
);
1587 int vhost_init_device_iotlb(struct vhost_dev
*d
, bool enabled
)
1589 struct vhost_umem
*niotlb
, *oiotlb
;
1592 niotlb
= vhost_umem_alloc();
1599 for (i
= 0; i
< d
->nvqs
; ++i
) {
1600 struct vhost_virtqueue
*vq
= d
->vqs
[i
];
1602 mutex_lock(&vq
->mutex
);
1604 __vhost_vq_meta_reset(vq
);
1605 mutex_unlock(&vq
->mutex
);
1608 vhost_umem_clean(oiotlb
);
1612 EXPORT_SYMBOL_GPL(vhost_init_device_iotlb
);
1614 /* Caller must have device mutex */
1615 long vhost_dev_ioctl(struct vhost_dev
*d
, unsigned int ioctl
, void __user
*argp
)
1617 struct eventfd_ctx
*ctx
;
1622 /* If you are not the owner, you can become one */
1623 if (ioctl
== VHOST_SET_OWNER
) {
1624 r
= vhost_dev_set_owner(d
);
1628 /* You must be the owner to do anything else */
1629 r
= vhost_dev_check_owner(d
);
1634 case VHOST_SET_MEM_TABLE
:
1635 r
= vhost_set_memory(d
, argp
);
1637 case VHOST_SET_LOG_BASE
:
1638 if (copy_from_user(&p
, argp
, sizeof p
)) {
1642 if ((u64
)(unsigned long)p
!= p
) {
1646 for (i
= 0; i
< d
->nvqs
; ++i
) {
1647 struct vhost_virtqueue
*vq
;
1648 void __user
*base
= (void __user
*)(unsigned long)p
;
1650 mutex_lock(&vq
->mutex
);
1651 /* If ring is inactive, will check when it's enabled. */
1652 if (vq
->private_data
&& !vq_log_access_ok(vq
, base
))
1655 vq
->log_base
= base
;
1656 mutex_unlock(&vq
->mutex
);
1659 case VHOST_SET_LOG_FD
:
1660 r
= get_user(fd
, (int __user
*)argp
);
1663 ctx
= fd
== -1 ? NULL
: eventfd_ctx_fdget(fd
);
1668 swap(ctx
, d
->log_ctx
);
1669 for (i
= 0; i
< d
->nvqs
; ++i
) {
1670 mutex_lock(&d
->vqs
[i
]->mutex
);
1671 d
->vqs
[i
]->log_ctx
= d
->log_ctx
;
1672 mutex_unlock(&d
->vqs
[i
]->mutex
);
1675 eventfd_ctx_put(ctx
);
1684 EXPORT_SYMBOL_GPL(vhost_dev_ioctl
);
1686 /* TODO: This is really inefficient. We need something like get_user()
1687 * (instruction directly accesses the data, with an exception table entry
1688 * returning -EFAULT). See Documentation/x86/exception-tables.txt.
1690 static int set_bit_to_user(int nr
, void __user
*addr
)
1692 unsigned long log
= (unsigned long)addr
;
1695 int bit
= nr
+ (log
% PAGE_SIZE
) * 8;
1698 r
= get_user_pages_fast(log
, 1, 1, &page
);
1702 base
= kmap_atomic(page
);
1704 kunmap_atomic(base
);
1705 set_page_dirty_lock(page
);
1710 static int log_write(void __user
*log_base
,
1711 u64 write_address
, u64 write_length
)
1713 u64 write_page
= write_address
/ VHOST_PAGE_SIZE
;
1718 write_length
+= write_address
% VHOST_PAGE_SIZE
;
1720 u64 base
= (u64
)(unsigned long)log_base
;
1721 u64 log
= base
+ write_page
/ 8;
1722 int bit
= write_page
% 8;
1723 if ((u64
)(unsigned long)log
!= log
)
1725 r
= set_bit_to_user(bit
, (void __user
*)(unsigned long)log
);
1728 if (write_length
<= VHOST_PAGE_SIZE
)
1730 write_length
-= VHOST_PAGE_SIZE
;
1736 int vhost_log_write(struct vhost_virtqueue
*vq
, struct vhost_log
*log
,
1737 unsigned int log_num
, u64 len
)
1741 /* Make sure data written is seen before log. */
1743 for (i
= 0; i
< log_num
; ++i
) {
1744 u64 l
= min(log
[i
].len
, len
);
1745 r
= log_write(vq
->log_base
, log
[i
].addr
, l
);
1751 eventfd_signal(vq
->log_ctx
, 1);
1755 /* Length written exceeds what we have stored. This is a bug. */
1759 EXPORT_SYMBOL_GPL(vhost_log_write
);
1761 static int vhost_update_used_flags(struct vhost_virtqueue
*vq
)
1764 if (vhost_put_user(vq
, cpu_to_vhost16(vq
, vq
->used_flags
),
1765 &vq
->used
->flags
) < 0)
1767 if (unlikely(vq
->log_used
)) {
1768 /* Make sure the flag is seen before log. */
1770 /* Log used flag write. */
1771 used
= &vq
->used
->flags
;
1772 log_write(vq
->log_base
, vq
->log_addr
+
1773 (used
- (void __user
*)vq
->used
),
1774 sizeof vq
->used
->flags
);
1776 eventfd_signal(vq
->log_ctx
, 1);
1781 static int vhost_update_avail_event(struct vhost_virtqueue
*vq
, u16 avail_event
)
1783 if (vhost_put_user(vq
, cpu_to_vhost16(vq
, vq
->avail_idx
),
1784 vhost_avail_event(vq
)))
1786 if (unlikely(vq
->log_used
)) {
1788 /* Make sure the event is seen before log. */
1790 /* Log avail event write */
1791 used
= vhost_avail_event(vq
);
1792 log_write(vq
->log_base
, vq
->log_addr
+
1793 (used
- (void __user
*)vq
->used
),
1794 sizeof *vhost_avail_event(vq
));
1796 eventfd_signal(vq
->log_ctx
, 1);
1801 int vhost_vq_init_access(struct vhost_virtqueue
*vq
)
1803 __virtio16 last_used_idx
;
1805 bool is_le
= vq
->is_le
;
1807 if (!vq
->private_data
)
1810 vhost_init_is_le(vq
);
1812 r
= vhost_update_used_flags(vq
);
1815 vq
->signalled_used_valid
= false;
1817 !access_ok(&vq
->used
->idx
, sizeof vq
->used
->idx
)) {
1821 r
= vhost_get_used(vq
, last_used_idx
, &vq
->used
->idx
);
1823 vq_err(vq
, "Can't access used idx at %p\n",
1827 vq
->last_used_idx
= vhost16_to_cpu(vq
, last_used_idx
);
1834 EXPORT_SYMBOL_GPL(vhost_vq_init_access
);
1836 static int translate_desc(struct vhost_virtqueue
*vq
, u64 addr
, u32 len
,
1837 struct iovec iov
[], int iov_size
, int access
)
1839 const struct vhost_umem_node
*node
;
1840 struct vhost_dev
*dev
= vq
->dev
;
1841 struct vhost_umem
*umem
= dev
->iotlb
? dev
->iotlb
: dev
->umem
;
1846 while ((u64
)len
> s
) {
1848 if (unlikely(ret
>= iov_size
)) {
1853 node
= vhost_umem_interval_tree_iter_first(&umem
->umem_tree
,
1854 addr
, addr
+ len
- 1);
1855 if (node
== NULL
|| node
->start
> addr
) {
1856 if (umem
!= dev
->iotlb
) {
1862 } else if (!(node
->perm
& access
)) {
1868 size
= node
->size
- addr
+ node
->start
;
1869 _iov
->iov_len
= min((u64
)len
- s
, size
);
1870 _iov
->iov_base
= (void __user
*)(unsigned long)
1871 (node
->userspace_addr
+ addr
- node
->start
);
1878 vhost_iotlb_miss(vq
, addr
, access
);
1882 /* Each buffer in the virtqueues is actually a chain of descriptors. This
1883 * function returns the next descriptor in the chain,
1884 * or -1U if we're at the end. */
1885 static unsigned next_desc(struct vhost_virtqueue
*vq
, struct vring_desc
*desc
)
1889 /* If this descriptor says it doesn't chain, we're done. */
1890 if (!(desc
->flags
& cpu_to_vhost16(vq
, VRING_DESC_F_NEXT
)))
1893 /* Check they're not leading us off end of descriptors. */
1894 next
= vhost16_to_cpu(vq
, READ_ONCE(desc
->next
));
1898 static int get_indirect(struct vhost_virtqueue
*vq
,
1899 struct iovec iov
[], unsigned int iov_size
,
1900 unsigned int *out_num
, unsigned int *in_num
,
1901 struct vhost_log
*log
, unsigned int *log_num
,
1902 struct vring_desc
*indirect
)
1904 struct vring_desc desc
;
1905 unsigned int i
= 0, count
, found
= 0;
1906 u32 len
= vhost32_to_cpu(vq
, indirect
->len
);
1907 struct iov_iter from
;
1911 if (unlikely(len
% sizeof desc
)) {
1912 vq_err(vq
, "Invalid length in indirect descriptor: "
1913 "len 0x%llx not multiple of 0x%zx\n",
1914 (unsigned long long)len
,
1919 ret
= translate_desc(vq
, vhost64_to_cpu(vq
, indirect
->addr
), len
, vq
->indirect
,
1920 UIO_MAXIOV
, VHOST_ACCESS_RO
);
1921 if (unlikely(ret
< 0)) {
1923 vq_err(vq
, "Translation failure %d in indirect.\n", ret
);
1926 iov_iter_init(&from
, READ
, vq
->indirect
, ret
, len
);
1928 /* We will use the result as an address to read from, so most
1929 * architectures only need a compiler barrier here. */
1930 read_barrier_depends();
1932 count
= len
/ sizeof desc
;
1933 /* Buffers are chained via a 16 bit next field, so
1934 * we can have at most 2^16 of these. */
1935 if (unlikely(count
> USHRT_MAX
+ 1)) {
1936 vq_err(vq
, "Indirect buffer length too big: %d\n",
1942 unsigned iov_count
= *in_num
+ *out_num
;
1943 if (unlikely(++found
> count
)) {
1944 vq_err(vq
, "Loop detected: last one at %u "
1945 "indirect size %u\n",
1949 if (unlikely(!copy_from_iter_full(&desc
, sizeof(desc
), &from
))) {
1950 vq_err(vq
, "Failed indirect descriptor: idx %d, %zx\n",
1951 i
, (size_t)vhost64_to_cpu(vq
, indirect
->addr
) + i
* sizeof desc
);
1954 if (unlikely(desc
.flags
& cpu_to_vhost16(vq
, VRING_DESC_F_INDIRECT
))) {
1955 vq_err(vq
, "Nested indirect descriptor: idx %d, %zx\n",
1956 i
, (size_t)vhost64_to_cpu(vq
, indirect
->addr
) + i
* sizeof desc
);
1960 if (desc
.flags
& cpu_to_vhost16(vq
, VRING_DESC_F_WRITE
))
1961 access
= VHOST_ACCESS_WO
;
1963 access
= VHOST_ACCESS_RO
;
1965 ret
= translate_desc(vq
, vhost64_to_cpu(vq
, desc
.addr
),
1966 vhost32_to_cpu(vq
, desc
.len
), iov
+ iov_count
,
1967 iov_size
- iov_count
, access
);
1968 if (unlikely(ret
< 0)) {
1970 vq_err(vq
, "Translation failure %d indirect idx %d\n",
1974 /* If this is an input descriptor, increment that count. */
1975 if (access
== VHOST_ACCESS_WO
) {
1977 if (unlikely(log
)) {
1978 log
[*log_num
].addr
= vhost64_to_cpu(vq
, desc
.addr
);
1979 log
[*log_num
].len
= vhost32_to_cpu(vq
, desc
.len
);
1983 /* If it's an output descriptor, they're all supposed
1984 * to come before any input descriptors. */
1985 if (unlikely(*in_num
)) {
1986 vq_err(vq
, "Indirect descriptor "
1987 "has out after in: idx %d\n", i
);
1992 } while ((i
= next_desc(vq
, &desc
)) != -1);
1996 /* This looks in the virtqueue and for the first available buffer, and converts
1997 * it to an iovec for convenient access. Since descriptors consist of some
1998 * number of output then some number of input descriptors, it's actually two
1999 * iovecs, but we pack them into one and note how many of each there were.
2001 * This function returns the descriptor number found, or vq->num (which is
2002 * never a valid descriptor number) if none was found. A negative code is
2003 * returned on error. */
2004 int vhost_get_vq_desc(struct vhost_virtqueue
*vq
,
2005 struct iovec iov
[], unsigned int iov_size
,
2006 unsigned int *out_num
, unsigned int *in_num
,
2007 struct vhost_log
*log
, unsigned int *log_num
)
2009 struct vring_desc desc
;
2010 unsigned int i
, head
, found
= 0;
2012 __virtio16 avail_idx
;
2013 __virtio16 ring_head
;
2016 /* Check it isn't doing very strange things with descriptor numbers. */
2017 last_avail_idx
= vq
->last_avail_idx
;
2019 if (vq
->avail_idx
== vq
->last_avail_idx
) {
2020 if (unlikely(vhost_get_avail(vq
, avail_idx
, &vq
->avail
->idx
))) {
2021 vq_err(vq
, "Failed to access avail idx at %p\n",
2025 vq
->avail_idx
= vhost16_to_cpu(vq
, avail_idx
);
2027 if (unlikely((u16
)(vq
->avail_idx
- last_avail_idx
) > vq
->num
)) {
2028 vq_err(vq
, "Guest moved used index from %u to %u",
2029 last_avail_idx
, vq
->avail_idx
);
2033 /* If there's nothing new since last we looked, return
2036 if (vq
->avail_idx
== last_avail_idx
)
2039 /* Only get avail ring entries after they have been
2045 /* Grab the next descriptor number they're advertising, and increment
2046 * the index we've seen. */
2047 if (unlikely(vhost_get_avail(vq
, ring_head
,
2048 &vq
->avail
->ring
[last_avail_idx
& (vq
->num
- 1)]))) {
2049 vq_err(vq
, "Failed to read head: idx %d address %p\n",
2051 &vq
->avail
->ring
[last_avail_idx
% vq
->num
]);
2055 head
= vhost16_to_cpu(vq
, ring_head
);
2057 /* If their number is silly, that's an error. */
2058 if (unlikely(head
>= vq
->num
)) {
2059 vq_err(vq
, "Guest says index %u > %u is available",
2064 /* When we start there are none of either input nor output. */
2065 *out_num
= *in_num
= 0;
2071 unsigned iov_count
= *in_num
+ *out_num
;
2072 if (unlikely(i
>= vq
->num
)) {
2073 vq_err(vq
, "Desc index is %u > %u, head = %u",
2077 if (unlikely(++found
> vq
->num
)) {
2078 vq_err(vq
, "Loop detected: last one at %u "
2079 "vq size %u head %u\n",
2083 ret
= vhost_copy_from_user(vq
, &desc
, vq
->desc
+ i
,
2085 if (unlikely(ret
)) {
2086 vq_err(vq
, "Failed to get descriptor: idx %d addr %p\n",
2090 if (desc
.flags
& cpu_to_vhost16(vq
, VRING_DESC_F_INDIRECT
)) {
2091 ret
= get_indirect(vq
, iov
, iov_size
,
2093 log
, log_num
, &desc
);
2094 if (unlikely(ret
< 0)) {
2096 vq_err(vq
, "Failure detected "
2097 "in indirect descriptor at idx %d\n", i
);
2103 if (desc
.flags
& cpu_to_vhost16(vq
, VRING_DESC_F_WRITE
))
2104 access
= VHOST_ACCESS_WO
;
2106 access
= VHOST_ACCESS_RO
;
2107 ret
= translate_desc(vq
, vhost64_to_cpu(vq
, desc
.addr
),
2108 vhost32_to_cpu(vq
, desc
.len
), iov
+ iov_count
,
2109 iov_size
- iov_count
, access
);
2110 if (unlikely(ret
< 0)) {
2112 vq_err(vq
, "Translation failure %d descriptor idx %d\n",
2116 if (access
== VHOST_ACCESS_WO
) {
2117 /* If this is an input descriptor,
2118 * increment that count. */
2120 if (unlikely(log
)) {
2121 log
[*log_num
].addr
= vhost64_to_cpu(vq
, desc
.addr
);
2122 log
[*log_num
].len
= vhost32_to_cpu(vq
, desc
.len
);
2126 /* If it's an output descriptor, they're all supposed
2127 * to come before any input descriptors. */
2128 if (unlikely(*in_num
)) {
2129 vq_err(vq
, "Descriptor has out after in: "
2135 } while ((i
= next_desc(vq
, &desc
)) != -1);
2137 /* On success, increment avail index. */
2138 vq
->last_avail_idx
++;
2140 /* Assume notifications from guest are disabled at this point,
2141 * if they aren't we would need to update avail_event index. */
2142 BUG_ON(!(vq
->used_flags
& VRING_USED_F_NO_NOTIFY
));
2145 EXPORT_SYMBOL_GPL(vhost_get_vq_desc
);
2147 /* Reverse the effect of vhost_get_vq_desc. Useful for error handling. */
2148 void vhost_discard_vq_desc(struct vhost_virtqueue
*vq
, int n
)
2150 vq
->last_avail_idx
-= n
;
2152 EXPORT_SYMBOL_GPL(vhost_discard_vq_desc
);
2154 /* After we've used one of their buffers, we tell them about it. We'll then
2155 * want to notify the guest, using eventfd. */
2156 int vhost_add_used(struct vhost_virtqueue
*vq
, unsigned int head
, int len
)
2158 struct vring_used_elem heads
= {
2159 cpu_to_vhost32(vq
, head
),
2160 cpu_to_vhost32(vq
, len
)
2163 return vhost_add_used_n(vq
, &heads
, 1);
2165 EXPORT_SYMBOL_GPL(vhost_add_used
);
2167 static int __vhost_add_used_n(struct vhost_virtqueue
*vq
,
2168 struct vring_used_elem
*heads
,
2171 struct vring_used_elem __user
*used
;
2175 start
= vq
->last_used_idx
& (vq
->num
- 1);
2176 used
= vq
->used
->ring
+ start
;
2178 if (vhost_put_user(vq
, heads
[0].id
, &used
->id
)) {
2179 vq_err(vq
, "Failed to write used id");
2182 if (vhost_put_user(vq
, heads
[0].len
, &used
->len
)) {
2183 vq_err(vq
, "Failed to write used len");
2186 } else if (vhost_copy_to_user(vq
, used
, heads
, count
* sizeof *used
)) {
2187 vq_err(vq
, "Failed to write used");
2190 if (unlikely(vq
->log_used
)) {
2191 /* Make sure data is seen before log. */
2193 /* Log used ring entry write. */
2194 log_write(vq
->log_base
,
2196 ((void __user
*)used
- (void __user
*)vq
->used
),
2197 count
* sizeof *used
);
2199 old
= vq
->last_used_idx
;
2200 new = (vq
->last_used_idx
+= count
);
2201 /* If the driver never bothers to signal in a very long while,
2202 * used index might wrap around. If that happens, invalidate
2203 * signalled_used index we stored. TODO: make sure driver
2204 * signals at least once in 2^16 and remove this. */
2205 if (unlikely((u16
)(new - vq
->signalled_used
) < (u16
)(new - old
)))
2206 vq
->signalled_used_valid
= false;
2210 /* After we've used one of their buffers, we tell them about it. We'll then
2211 * want to notify the guest, using eventfd. */
2212 int vhost_add_used_n(struct vhost_virtqueue
*vq
, struct vring_used_elem
*heads
,
2217 start
= vq
->last_used_idx
& (vq
->num
- 1);
2218 n
= vq
->num
- start
;
2220 r
= __vhost_add_used_n(vq
, heads
, n
);
2226 r
= __vhost_add_used_n(vq
, heads
, count
);
2228 /* Make sure buffer is written before we update index. */
2230 if (vhost_put_user(vq
, cpu_to_vhost16(vq
, vq
->last_used_idx
),
2232 vq_err(vq
, "Failed to increment used idx");
2235 if (unlikely(vq
->log_used
)) {
2236 /* Make sure used idx is seen before log. */
2238 /* Log used index update. */
2239 log_write(vq
->log_base
,
2240 vq
->log_addr
+ offsetof(struct vring_used
, idx
),
2241 sizeof vq
->used
->idx
);
2243 eventfd_signal(vq
->log_ctx
, 1);
2247 EXPORT_SYMBOL_GPL(vhost_add_used_n
);
2249 static bool vhost_notify(struct vhost_dev
*dev
, struct vhost_virtqueue
*vq
)
2254 /* Flush out used index updates. This is paired
2255 * with the barrier that the Guest executes when enabling
2259 if (vhost_has_feature(vq
, VIRTIO_F_NOTIFY_ON_EMPTY
) &&
2260 unlikely(vq
->avail_idx
== vq
->last_avail_idx
))
2263 if (!vhost_has_feature(vq
, VIRTIO_RING_F_EVENT_IDX
)) {
2265 if (vhost_get_avail(vq
, flags
, &vq
->avail
->flags
)) {
2266 vq_err(vq
, "Failed to get flags");
2269 return !(flags
& cpu_to_vhost16(vq
, VRING_AVAIL_F_NO_INTERRUPT
));
2271 old
= vq
->signalled_used
;
2272 v
= vq
->signalled_used_valid
;
2273 new = vq
->signalled_used
= vq
->last_used_idx
;
2274 vq
->signalled_used_valid
= true;
2279 if (vhost_get_avail(vq
, event
, vhost_used_event(vq
))) {
2280 vq_err(vq
, "Failed to get used event idx");
2283 return vring_need_event(vhost16_to_cpu(vq
, event
), new, old
);
2286 /* This actually signals the guest, using eventfd. */
2287 void vhost_signal(struct vhost_dev
*dev
, struct vhost_virtqueue
*vq
)
2289 /* Signal the Guest tell them we used something up. */
2290 if (vq
->call_ctx
&& vhost_notify(dev
, vq
))
2291 eventfd_signal(vq
->call_ctx
, 1);
2293 EXPORT_SYMBOL_GPL(vhost_signal
);
2295 /* And here's the combo meal deal. Supersize me! */
2296 void vhost_add_used_and_signal(struct vhost_dev
*dev
,
2297 struct vhost_virtqueue
*vq
,
2298 unsigned int head
, int len
)
2300 vhost_add_used(vq
, head
, len
);
2301 vhost_signal(dev
, vq
);
2303 EXPORT_SYMBOL_GPL(vhost_add_used_and_signal
);
2305 /* multi-buffer version of vhost_add_used_and_signal */
2306 void vhost_add_used_and_signal_n(struct vhost_dev
*dev
,
2307 struct vhost_virtqueue
*vq
,
2308 struct vring_used_elem
*heads
, unsigned count
)
2310 vhost_add_used_n(vq
, heads
, count
);
2311 vhost_signal(dev
, vq
);
2313 EXPORT_SYMBOL_GPL(vhost_add_used_and_signal_n
);
2315 /* return true if we're sure that avaiable ring is empty */
2316 bool vhost_vq_avail_empty(struct vhost_dev
*dev
, struct vhost_virtqueue
*vq
)
2318 __virtio16 avail_idx
;
2321 if (vq
->avail_idx
!= vq
->last_avail_idx
)
2324 r
= vhost_get_avail(vq
, avail_idx
, &vq
->avail
->idx
);
2327 vq
->avail_idx
= vhost16_to_cpu(vq
, avail_idx
);
2329 return vq
->avail_idx
== vq
->last_avail_idx
;
2331 EXPORT_SYMBOL_GPL(vhost_vq_avail_empty
);
2333 /* OK, now we need to know about added descriptors. */
2334 bool vhost_enable_notify(struct vhost_dev
*dev
, struct vhost_virtqueue
*vq
)
2336 __virtio16 avail_idx
;
2339 if (!(vq
->used_flags
& VRING_USED_F_NO_NOTIFY
))
2341 vq
->used_flags
&= ~VRING_USED_F_NO_NOTIFY
;
2342 if (!vhost_has_feature(vq
, VIRTIO_RING_F_EVENT_IDX
)) {
2343 r
= vhost_update_used_flags(vq
);
2345 vq_err(vq
, "Failed to enable notification at %p: %d\n",
2346 &vq
->used
->flags
, r
);
2350 r
= vhost_update_avail_event(vq
, vq
->avail_idx
);
2352 vq_err(vq
, "Failed to update avail event index at %p: %d\n",
2353 vhost_avail_event(vq
), r
);
2357 /* They could have slipped one in as we were doing that: make
2358 * sure it's written, then check again. */
2360 r
= vhost_get_avail(vq
, avail_idx
, &vq
->avail
->idx
);
2362 vq_err(vq
, "Failed to check avail idx at %p: %d\n",
2363 &vq
->avail
->idx
, r
);
2367 return vhost16_to_cpu(vq
, avail_idx
) != vq
->avail_idx
;
2369 EXPORT_SYMBOL_GPL(vhost_enable_notify
);
2371 /* We don't need to be notified again. */
2372 void vhost_disable_notify(struct vhost_dev
*dev
, struct vhost_virtqueue
*vq
)
2376 if (vq
->used_flags
& VRING_USED_F_NO_NOTIFY
)
2378 vq
->used_flags
|= VRING_USED_F_NO_NOTIFY
;
2379 if (!vhost_has_feature(vq
, VIRTIO_RING_F_EVENT_IDX
)) {
2380 r
= vhost_update_used_flags(vq
);
2382 vq_err(vq
, "Failed to enable notification at %p: %d\n",
2383 &vq
->used
->flags
, r
);
2386 EXPORT_SYMBOL_GPL(vhost_disable_notify
);
2388 /* Create a new message. */
2389 struct vhost_msg_node
*vhost_new_msg(struct vhost_virtqueue
*vq
, int type
)
2391 struct vhost_msg_node
*node
= kmalloc(sizeof *node
, GFP_KERNEL
);
2395 /* Make sure all padding within the structure is initialized. */
2396 memset(&node
->msg
, 0, sizeof node
->msg
);
2398 node
->msg
.type
= type
;
2401 EXPORT_SYMBOL_GPL(vhost_new_msg
);
2403 void vhost_enqueue_msg(struct vhost_dev
*dev
, struct list_head
*head
,
2404 struct vhost_msg_node
*node
)
2406 spin_lock(&dev
->iotlb_lock
);
2407 list_add_tail(&node
->node
, head
);
2408 spin_unlock(&dev
->iotlb_lock
);
2410 wake_up_interruptible_poll(&dev
->wait
, EPOLLIN
| EPOLLRDNORM
);
2412 EXPORT_SYMBOL_GPL(vhost_enqueue_msg
);
2414 struct vhost_msg_node
*vhost_dequeue_msg(struct vhost_dev
*dev
,
2415 struct list_head
*head
)
2417 struct vhost_msg_node
*node
= NULL
;
2419 spin_lock(&dev
->iotlb_lock
);
2420 if (!list_empty(head
)) {
2421 node
= list_first_entry(head
, struct vhost_msg_node
,
2423 list_del(&node
->node
);
2425 spin_unlock(&dev
->iotlb_lock
);
2429 EXPORT_SYMBOL_GPL(vhost_dequeue_msg
);
2432 static int __init
vhost_init(void)
2437 static void __exit
vhost_exit(void)
2441 module_init(vhost_init
);
2442 module_exit(vhost_exit
);
2444 MODULE_VERSION("0.0.1");
2445 MODULE_LICENSE("GPL v2");
2446 MODULE_AUTHOR("Michael S. Tsirkin");
2447 MODULE_DESCRIPTION("Host kernel accelerator for virtio");