1 /* Copyright (C) 2009 Red Hat, Inc.
2 * Copyright (C) 2006 Rusty Russell IBM Corporation
4 * Author: Michael S. Tsirkin <mst@redhat.com>
6 * Inspiration, some code, and most witty comments come from
7 * Documentation/virtual/lguest/lguest.c, by Rusty Russell
9 * This work is licensed under the terms of the GNU GPL, version 2.
11 * Generic code for virtio server in host kernel.
14 #include <linux/eventfd.h>
15 #include <linux/vhost.h>
16 #include <linux/uio.h>
18 #include <linux/mmu_context.h>
19 #include <linux/miscdevice.h>
20 #include <linux/mutex.h>
21 #include <linux/poll.h>
22 #include <linux/file.h>
23 #include <linux/highmem.h>
24 #include <linux/slab.h>
25 #include <linux/vmalloc.h>
26 #include <linux/kthread.h>
27 #include <linux/cgroup.h>
28 #include <linux/module.h>
29 #include <linux/sort.h>
30 #include <linux/interval_tree_generic.h>
34 static ushort max_mem_regions
= 64;
35 module_param(max_mem_regions
, ushort
, 0444);
36 MODULE_PARM_DESC(max_mem_regions
,
37 "Maximum number of memory regions in memory map. (default: 64)");
40 VHOST_MEMORY_F_LOG
= 0x1,
43 #define vhost_used_event(vq) ((__virtio16 __user *)&vq->avail->ring[vq->num])
44 #define vhost_avail_event(vq) ((__virtio16 __user *)&vq->used->ring[vq->num])
46 INTERVAL_TREE_DEFINE(struct vhost_umem_node
,
47 rb
, __u64
, __subtree_last
,
48 START
, LAST
, , vhost_umem_interval_tree
);
50 #ifdef CONFIG_VHOST_CROSS_ENDIAN_LEGACY
51 static void vhost_disable_cross_endian(struct vhost_virtqueue
*vq
)
53 vq
->user_be
= !virtio_legacy_is_little_endian();
56 static void vhost_enable_cross_endian_big(struct vhost_virtqueue
*vq
)
61 static void vhost_enable_cross_endian_little(struct vhost_virtqueue
*vq
)
66 static long vhost_set_vring_endian(struct vhost_virtqueue
*vq
, int __user
*argp
)
68 struct vhost_vring_state s
;
73 if (copy_from_user(&s
, argp
, sizeof(s
)))
76 if (s
.num
!= VHOST_VRING_LITTLE_ENDIAN
&&
77 s
.num
!= VHOST_VRING_BIG_ENDIAN
)
80 if (s
.num
== VHOST_VRING_BIG_ENDIAN
)
81 vhost_enable_cross_endian_big(vq
);
83 vhost_enable_cross_endian_little(vq
);
88 static long vhost_get_vring_endian(struct vhost_virtqueue
*vq
, u32 idx
,
91 struct vhost_vring_state s
= {
96 if (copy_to_user(argp
, &s
, sizeof(s
)))
102 static void vhost_init_is_le(struct vhost_virtqueue
*vq
)
104 /* Note for legacy virtio: user_be is initialized at reset time
105 * according to the host endianness. If userspace does not set an
106 * explicit endianness, the default behavior is native endian, as
107 * expected by legacy virtio.
109 vq
->is_le
= vhost_has_feature(vq
, VIRTIO_F_VERSION_1
) || !vq
->user_be
;
112 static void vhost_disable_cross_endian(struct vhost_virtqueue
*vq
)
116 static long vhost_set_vring_endian(struct vhost_virtqueue
*vq
, int __user
*argp
)
121 static long vhost_get_vring_endian(struct vhost_virtqueue
*vq
, u32 idx
,
127 static void vhost_init_is_le(struct vhost_virtqueue
*vq
)
129 if (vhost_has_feature(vq
, VIRTIO_F_VERSION_1
))
132 #endif /* CONFIG_VHOST_CROSS_ENDIAN_LEGACY */
134 static void vhost_reset_is_le(struct vhost_virtqueue
*vq
)
136 vq
->is_le
= virtio_legacy_is_little_endian();
139 struct vhost_flush_struct
{
140 struct vhost_work work
;
141 struct completion wait_event
;
144 static void vhost_flush_work(struct vhost_work
*work
)
146 struct vhost_flush_struct
*s
;
148 s
= container_of(work
, struct vhost_flush_struct
, work
);
149 complete(&s
->wait_event
);
152 static void vhost_poll_func(struct file
*file
, wait_queue_head_t
*wqh
,
155 struct vhost_poll
*poll
;
157 poll
= container_of(pt
, struct vhost_poll
, table
);
159 add_wait_queue(wqh
, &poll
->wait
);
162 static int vhost_poll_wakeup(wait_queue_t
*wait
, unsigned mode
, int sync
,
165 struct vhost_poll
*poll
= container_of(wait
, struct vhost_poll
, wait
);
167 if (!((unsigned long)key
& poll
->mask
))
170 vhost_poll_queue(poll
);
174 void vhost_work_init(struct vhost_work
*work
, vhost_work_fn_t fn
)
176 clear_bit(VHOST_WORK_QUEUED
, &work
->flags
);
178 init_waitqueue_head(&work
->done
);
180 EXPORT_SYMBOL_GPL(vhost_work_init
);
182 /* Init poll structure */
183 void vhost_poll_init(struct vhost_poll
*poll
, vhost_work_fn_t fn
,
184 unsigned long mask
, struct vhost_dev
*dev
)
186 init_waitqueue_func_entry(&poll
->wait
, vhost_poll_wakeup
);
187 init_poll_funcptr(&poll
->table
, vhost_poll_func
);
192 vhost_work_init(&poll
->work
, fn
);
194 EXPORT_SYMBOL_GPL(vhost_poll_init
);
196 /* Start polling a file. We add ourselves to file's wait queue. The caller must
197 * keep a reference to a file until after vhost_poll_stop is called. */
198 int vhost_poll_start(struct vhost_poll
*poll
, struct file
*file
)
206 mask
= file
->f_op
->poll(file
, &poll
->table
);
208 vhost_poll_wakeup(&poll
->wait
, 0, 0, (void *)mask
);
209 if (mask
& POLLERR
) {
211 remove_wait_queue(poll
->wqh
, &poll
->wait
);
217 EXPORT_SYMBOL_GPL(vhost_poll_start
);
219 /* Stop polling a file. After this function returns, it becomes safe to drop the
220 * file reference. You must also flush afterwards. */
221 void vhost_poll_stop(struct vhost_poll
*poll
)
224 remove_wait_queue(poll
->wqh
, &poll
->wait
);
228 EXPORT_SYMBOL_GPL(vhost_poll_stop
);
230 void vhost_work_flush(struct vhost_dev
*dev
, struct vhost_work
*work
)
232 struct vhost_flush_struct flush
;
235 init_completion(&flush
.wait_event
);
236 vhost_work_init(&flush
.work
, vhost_flush_work
);
238 vhost_work_queue(dev
, &flush
.work
);
239 wait_for_completion(&flush
.wait_event
);
242 EXPORT_SYMBOL_GPL(vhost_work_flush
);
244 /* Flush any work that has been scheduled. When calling this, don't hold any
245 * locks that are also used by the callback. */
246 void vhost_poll_flush(struct vhost_poll
*poll
)
248 vhost_work_flush(poll
->dev
, &poll
->work
);
250 EXPORT_SYMBOL_GPL(vhost_poll_flush
);
252 void vhost_work_queue(struct vhost_dev
*dev
, struct vhost_work
*work
)
257 if (!test_and_set_bit(VHOST_WORK_QUEUED
, &work
->flags
)) {
258 /* We can only add the work to the list after we're
259 * sure it was not in the list.
262 llist_add(&work
->node
, &dev
->work_list
);
263 wake_up_process(dev
->worker
);
266 EXPORT_SYMBOL_GPL(vhost_work_queue
);
268 /* A lockless hint for busy polling code to exit the loop */
269 bool vhost_has_work(struct vhost_dev
*dev
)
271 return !llist_empty(&dev
->work_list
);
273 EXPORT_SYMBOL_GPL(vhost_has_work
);
275 void vhost_poll_queue(struct vhost_poll
*poll
)
277 vhost_work_queue(poll
->dev
, &poll
->work
);
279 EXPORT_SYMBOL_GPL(vhost_poll_queue
);
281 static void vhost_vq_reset(struct vhost_dev
*dev
,
282 struct vhost_virtqueue
*vq
)
288 vq
->last_avail_idx
= 0;
290 vq
->last_used_idx
= 0;
291 vq
->signalled_used
= 0;
292 vq
->signalled_used_valid
= false;
294 vq
->log_used
= false;
295 vq
->log_addr
= -1ull;
296 vq
->private_data
= NULL
;
297 vq
->acked_features
= 0;
299 vq
->error_ctx
= NULL
;
305 vhost_reset_is_le(vq
);
306 vhost_disable_cross_endian(vq
);
307 vq
->busyloop_timeout
= 0;
311 static int vhost_worker(void *data
)
313 struct vhost_dev
*dev
= data
;
314 struct vhost_work
*work
, *work_next
;
315 struct llist_node
*node
;
316 mm_segment_t oldfs
= get_fs();
322 /* mb paired w/ kthread_stop */
323 set_current_state(TASK_INTERRUPTIBLE
);
325 if (kthread_should_stop()) {
326 __set_current_state(TASK_RUNNING
);
330 node
= llist_del_all(&dev
->work_list
);
334 node
= llist_reverse_order(node
);
335 /* make sure flag is seen after deletion */
337 llist_for_each_entry_safe(work
, work_next
, node
, node
) {
338 clear_bit(VHOST_WORK_QUEUED
, &work
->flags
);
339 __set_current_state(TASK_RUNNING
);
350 static void vhost_vq_free_iovecs(struct vhost_virtqueue
*vq
)
360 /* Helper to allocate iovec buffers for all vqs. */
361 static long vhost_dev_alloc_iovecs(struct vhost_dev
*dev
)
363 struct vhost_virtqueue
*vq
;
366 for (i
= 0; i
< dev
->nvqs
; ++i
) {
368 vq
->indirect
= kmalloc(sizeof *vq
->indirect
* UIO_MAXIOV
,
370 vq
->log
= kmalloc(sizeof *vq
->log
* UIO_MAXIOV
, GFP_KERNEL
);
371 vq
->heads
= kmalloc(sizeof *vq
->heads
* UIO_MAXIOV
, GFP_KERNEL
);
372 if (!vq
->indirect
|| !vq
->log
|| !vq
->heads
)
379 vhost_vq_free_iovecs(dev
->vqs
[i
]);
383 static void vhost_dev_free_iovecs(struct vhost_dev
*dev
)
387 for (i
= 0; i
< dev
->nvqs
; ++i
)
388 vhost_vq_free_iovecs(dev
->vqs
[i
]);
391 void vhost_dev_init(struct vhost_dev
*dev
,
392 struct vhost_virtqueue
**vqs
, int nvqs
)
394 struct vhost_virtqueue
*vq
;
399 mutex_init(&dev
->mutex
);
401 dev
->log_file
= NULL
;
405 init_llist_head(&dev
->work_list
);
408 for (i
= 0; i
< dev
->nvqs
; ++i
) {
414 mutex_init(&vq
->mutex
);
415 vhost_vq_reset(dev
, vq
);
417 vhost_poll_init(&vq
->poll
, vq
->handle_kick
,
421 EXPORT_SYMBOL_GPL(vhost_dev_init
);
423 /* Caller should have device mutex */
424 long vhost_dev_check_owner(struct vhost_dev
*dev
)
426 /* Are you the owner? If not, I don't think you mean to do that */
427 return dev
->mm
== current
->mm
? 0 : -EPERM
;
429 EXPORT_SYMBOL_GPL(vhost_dev_check_owner
);
431 struct vhost_attach_cgroups_struct
{
432 struct vhost_work work
;
433 struct task_struct
*owner
;
437 static void vhost_attach_cgroups_work(struct vhost_work
*work
)
439 struct vhost_attach_cgroups_struct
*s
;
441 s
= container_of(work
, struct vhost_attach_cgroups_struct
, work
);
442 s
->ret
= cgroup_attach_task_all(s
->owner
, current
);
445 static int vhost_attach_cgroups(struct vhost_dev
*dev
)
447 struct vhost_attach_cgroups_struct attach
;
449 attach
.owner
= current
;
450 vhost_work_init(&attach
.work
, vhost_attach_cgroups_work
);
451 vhost_work_queue(dev
, &attach
.work
);
452 vhost_work_flush(dev
, &attach
.work
);
456 /* Caller should have device mutex */
457 bool vhost_dev_has_owner(struct vhost_dev
*dev
)
461 EXPORT_SYMBOL_GPL(vhost_dev_has_owner
);
463 /* Caller should have device mutex */
464 long vhost_dev_set_owner(struct vhost_dev
*dev
)
466 struct task_struct
*worker
;
469 /* Is there an owner already? */
470 if (vhost_dev_has_owner(dev
)) {
475 /* No owner, become one */
476 dev
->mm
= get_task_mm(current
);
477 worker
= kthread_create(vhost_worker
, dev
, "vhost-%d", current
->pid
);
478 if (IS_ERR(worker
)) {
479 err
= PTR_ERR(worker
);
483 dev
->worker
= worker
;
484 wake_up_process(worker
); /* avoid contributing to loadavg */
486 err
= vhost_attach_cgroups(dev
);
490 err
= vhost_dev_alloc_iovecs(dev
);
496 kthread_stop(worker
);
505 EXPORT_SYMBOL_GPL(vhost_dev_set_owner
);
507 static void *vhost_kvzalloc(unsigned long size
)
509 void *n
= kzalloc(size
, GFP_KERNEL
| __GFP_NOWARN
| __GFP_REPEAT
);
516 struct vhost_umem
*vhost_dev_reset_owner_prepare(void)
518 return vhost_kvzalloc(sizeof(struct vhost_umem
));
520 EXPORT_SYMBOL_GPL(vhost_dev_reset_owner_prepare
);
522 /* Caller should have device mutex */
523 void vhost_dev_reset_owner(struct vhost_dev
*dev
, struct vhost_umem
*umem
)
527 vhost_dev_cleanup(dev
, true);
529 /* Restore memory to default empty mapping. */
530 INIT_LIST_HEAD(&umem
->umem_list
);
532 /* We don't need VQ locks below since vhost_dev_cleanup makes sure
533 * VQs aren't running.
535 for (i
= 0; i
< dev
->nvqs
; ++i
)
536 dev
->vqs
[i
]->umem
= umem
;
538 EXPORT_SYMBOL_GPL(vhost_dev_reset_owner
);
540 void vhost_dev_stop(struct vhost_dev
*dev
)
544 for (i
= 0; i
< dev
->nvqs
; ++i
) {
545 if (dev
->vqs
[i
]->kick
&& dev
->vqs
[i
]->handle_kick
) {
546 vhost_poll_stop(&dev
->vqs
[i
]->poll
);
547 vhost_poll_flush(&dev
->vqs
[i
]->poll
);
551 EXPORT_SYMBOL_GPL(vhost_dev_stop
);
553 static void vhost_umem_clean(struct vhost_umem
*umem
)
555 struct vhost_umem_node
*node
, *tmp
;
560 list_for_each_entry_safe(node
, tmp
, &umem
->umem_list
, link
) {
561 vhost_umem_interval_tree_remove(node
, &umem
->umem_tree
);
562 list_del(&node
->link
);
568 /* Caller should have device mutex if and only if locked is set */
569 void vhost_dev_cleanup(struct vhost_dev
*dev
, bool locked
)
573 for (i
= 0; i
< dev
->nvqs
; ++i
) {
574 if (dev
->vqs
[i
]->error_ctx
)
575 eventfd_ctx_put(dev
->vqs
[i
]->error_ctx
);
576 if (dev
->vqs
[i
]->error
)
577 fput(dev
->vqs
[i
]->error
);
578 if (dev
->vqs
[i
]->kick
)
579 fput(dev
->vqs
[i
]->kick
);
580 if (dev
->vqs
[i
]->call_ctx
)
581 eventfd_ctx_put(dev
->vqs
[i
]->call_ctx
);
582 if (dev
->vqs
[i
]->call
)
583 fput(dev
->vqs
[i
]->call
);
584 vhost_vq_reset(dev
, dev
->vqs
[i
]);
586 vhost_dev_free_iovecs(dev
);
588 eventfd_ctx_put(dev
->log_ctx
);
592 dev
->log_file
= NULL
;
593 /* No one will access memory at this point */
594 vhost_umem_clean(dev
->umem
);
596 WARN_ON(!llist_empty(&dev
->work_list
));
598 kthread_stop(dev
->worker
);
605 EXPORT_SYMBOL_GPL(vhost_dev_cleanup
);
607 static int log_access_ok(void __user
*log_base
, u64 addr
, unsigned long sz
)
609 u64 a
= addr
/ VHOST_PAGE_SIZE
/ 8;
611 /* Make sure 64 bit math will not overflow. */
612 if (a
> ULONG_MAX
- (unsigned long)log_base
||
613 a
+ (unsigned long)log_base
> ULONG_MAX
)
616 return access_ok(VERIFY_WRITE
, log_base
+ a
,
617 (sz
+ VHOST_PAGE_SIZE
* 8 - 1) / VHOST_PAGE_SIZE
/ 8);
620 /* Caller should have vq mutex and device mutex. */
621 static int vq_memory_access_ok(void __user
*log_base
, struct vhost_umem
*umem
,
624 struct vhost_umem_node
*node
;
629 list_for_each_entry(node
, &umem
->umem_list
, link
) {
630 unsigned long a
= node
->userspace_addr
;
632 if (node
->size
> ULONG_MAX
)
634 else if (!access_ok(VERIFY_WRITE
, (void __user
*)a
,
637 else if (log_all
&& !log_access_ok(log_base
,
645 /* Can we switch to this memory table? */
646 /* Caller should have device mutex but not vq mutex */
647 static int memory_access_ok(struct vhost_dev
*d
, struct vhost_umem
*umem
,
652 for (i
= 0; i
< d
->nvqs
; ++i
) {
656 mutex_lock(&d
->vqs
[i
]->mutex
);
657 log
= log_all
|| vhost_has_feature(d
->vqs
[i
], VHOST_F_LOG_ALL
);
658 /* If ring is inactive, will check when it's enabled. */
659 if (d
->vqs
[i
]->private_data
)
660 ok
= vq_memory_access_ok(d
->vqs
[i
]->log_base
,
664 mutex_unlock(&d
->vqs
[i
]->mutex
);
671 #define vhost_put_user(vq, x, ptr) __put_user(x, ptr)
673 static int vhost_copy_to_user(struct vhost_virtqueue
*vq
, void *to
,
674 const void *from
, unsigned size
)
676 return __copy_to_user(to
, from
, size
);
679 #define vhost_get_user(vq, x, ptr) __get_user(x, ptr)
681 static int vhost_copy_from_user(struct vhost_virtqueue
*vq
, void *to
,
682 void *from
, unsigned size
)
684 return __copy_from_user(to
, from
, size
);
687 static int vq_access_ok(struct vhost_virtqueue
*vq
, unsigned int num
,
688 struct vring_desc __user
*desc
,
689 struct vring_avail __user
*avail
,
690 struct vring_used __user
*used
)
692 size_t s
= vhost_has_feature(vq
, VIRTIO_RING_F_EVENT_IDX
) ? 2 : 0;
693 return access_ok(VERIFY_READ
, desc
, num
* sizeof *desc
) &&
694 access_ok(VERIFY_READ
, avail
,
695 sizeof *avail
+ num
* sizeof *avail
->ring
+ s
) &&
696 access_ok(VERIFY_WRITE
, used
,
697 sizeof *used
+ num
* sizeof *used
->ring
+ s
);
700 /* Can we log writes? */
701 /* Caller should have device mutex but not vq mutex */
702 int vhost_log_access_ok(struct vhost_dev
*dev
)
704 return memory_access_ok(dev
, dev
->umem
, 1);
706 EXPORT_SYMBOL_GPL(vhost_log_access_ok
);
708 /* Verify access for write logging. */
709 /* Caller should have vq mutex and device mutex */
710 static int vq_log_access_ok(struct vhost_virtqueue
*vq
,
711 void __user
*log_base
)
713 size_t s
= vhost_has_feature(vq
, VIRTIO_RING_F_EVENT_IDX
) ? 2 : 0;
715 return vq_memory_access_ok(log_base
, vq
->umem
,
716 vhost_has_feature(vq
, VHOST_F_LOG_ALL
)) &&
717 (!vq
->log_used
|| log_access_ok(log_base
, vq
->log_addr
,
719 vq
->num
* sizeof *vq
->used
->ring
+ s
));
722 /* Can we start vq? */
723 /* Caller should have vq mutex and device mutex */
724 int vhost_vq_access_ok(struct vhost_virtqueue
*vq
)
726 return vq_access_ok(vq
, vq
->num
, vq
->desc
, vq
->avail
, vq
->used
) &&
727 vq_log_access_ok(vq
, vq
->log_base
);
729 EXPORT_SYMBOL_GPL(vhost_vq_access_ok
);
731 static long vhost_set_memory(struct vhost_dev
*d
, struct vhost_memory __user
*m
)
733 struct vhost_memory mem
, *newmem
;
734 struct vhost_memory_region
*region
;
735 struct vhost_umem_node
*node
;
736 struct vhost_umem
*newumem
, *oldumem
;
737 unsigned long size
= offsetof(struct vhost_memory
, regions
);
740 if (copy_from_user(&mem
, m
, size
))
744 if (mem
.nregions
> max_mem_regions
)
746 newmem
= vhost_kvzalloc(size
+ mem
.nregions
* sizeof(*m
->regions
));
750 memcpy(newmem
, &mem
, size
);
751 if (copy_from_user(newmem
->regions
, m
->regions
,
752 mem
.nregions
* sizeof *m
->regions
)) {
757 newumem
= vhost_kvzalloc(sizeof(*newumem
));
763 newumem
->umem_tree
= RB_ROOT
;
764 INIT_LIST_HEAD(&newumem
->umem_list
);
766 for (region
= newmem
->regions
;
767 region
< newmem
->regions
+ mem
.nregions
;
769 node
= vhost_kvzalloc(sizeof(*node
));
772 node
->start
= region
->guest_phys_addr
;
773 node
->size
= region
->memory_size
;
774 node
->last
= node
->start
+ node
->size
- 1;
775 node
->userspace_addr
= region
->userspace_addr
;
776 INIT_LIST_HEAD(&node
->link
);
777 list_add_tail(&node
->link
, &newumem
->umem_list
);
778 vhost_umem_interval_tree_insert(node
, &newumem
->umem_tree
);
781 if (!memory_access_ok(d
, newumem
, 0))
787 /* All memory accesses are done under some VQ mutex. */
788 for (i
= 0; i
< d
->nvqs
; ++i
) {
789 mutex_lock(&d
->vqs
[i
]->mutex
);
790 d
->vqs
[i
]->umem
= newumem
;
791 mutex_unlock(&d
->vqs
[i
]->mutex
);
795 vhost_umem_clean(oldumem
);
799 vhost_umem_clean(newumem
);
804 long vhost_vring_ioctl(struct vhost_dev
*d
, int ioctl
, void __user
*argp
)
806 struct file
*eventfp
, *filep
= NULL
;
807 bool pollstart
= false, pollstop
= false;
808 struct eventfd_ctx
*ctx
= NULL
;
809 u32 __user
*idxp
= argp
;
810 struct vhost_virtqueue
*vq
;
811 struct vhost_vring_state s
;
812 struct vhost_vring_file f
;
813 struct vhost_vring_addr a
;
817 r
= get_user(idx
, idxp
);
825 mutex_lock(&vq
->mutex
);
828 case VHOST_SET_VRING_NUM
:
829 /* Resizing ring with an active backend?
830 * You don't want to do that. */
831 if (vq
->private_data
) {
835 if (copy_from_user(&s
, argp
, sizeof s
)) {
839 if (!s
.num
|| s
.num
> 0xffff || (s
.num
& (s
.num
- 1))) {
845 case VHOST_SET_VRING_BASE
:
846 /* Moving base with an active backend?
847 * You don't want to do that. */
848 if (vq
->private_data
) {
852 if (copy_from_user(&s
, argp
, sizeof s
)) {
856 if (s
.num
> 0xffff) {
860 vq
->last_avail_idx
= s
.num
;
861 /* Forget the cached index value. */
862 vq
->avail_idx
= vq
->last_avail_idx
;
864 case VHOST_GET_VRING_BASE
:
866 s
.num
= vq
->last_avail_idx
;
867 if (copy_to_user(argp
, &s
, sizeof s
))
870 case VHOST_SET_VRING_ADDR
:
871 if (copy_from_user(&a
, argp
, sizeof a
)) {
875 if (a
.flags
& ~(0x1 << VHOST_VRING_F_LOG
)) {
879 /* For 32bit, verify that the top 32bits of the user
880 data are set to zero. */
881 if ((u64
)(unsigned long)a
.desc_user_addr
!= a
.desc_user_addr
||
882 (u64
)(unsigned long)a
.used_user_addr
!= a
.used_user_addr
||
883 (u64
)(unsigned long)a
.avail_user_addr
!= a
.avail_user_addr
) {
888 /* Make sure it's safe to cast pointers to vring types. */
889 BUILD_BUG_ON(__alignof__
*vq
->avail
> VRING_AVAIL_ALIGN_SIZE
);
890 BUILD_BUG_ON(__alignof__
*vq
->used
> VRING_USED_ALIGN_SIZE
);
891 if ((a
.avail_user_addr
& (VRING_AVAIL_ALIGN_SIZE
- 1)) ||
892 (a
.used_user_addr
& (VRING_USED_ALIGN_SIZE
- 1)) ||
893 (a
.log_guest_addr
& (VRING_USED_ALIGN_SIZE
- 1))) {
898 /* We only verify access here if backend is configured.
899 * If it is not, we don't as size might not have been setup.
900 * We will verify when backend is configured. */
901 if (vq
->private_data
) {
902 if (!vq_access_ok(vq
, vq
->num
,
903 (void __user
*)(unsigned long)a
.desc_user_addr
,
904 (void __user
*)(unsigned long)a
.avail_user_addr
,
905 (void __user
*)(unsigned long)a
.used_user_addr
)) {
910 /* Also validate log access for used ring if enabled. */
911 if ((a
.flags
& (0x1 << VHOST_VRING_F_LOG
)) &&
912 !log_access_ok(vq
->log_base
, a
.log_guest_addr
,
914 vq
->num
* sizeof *vq
->used
->ring
)) {
920 vq
->log_used
= !!(a
.flags
& (0x1 << VHOST_VRING_F_LOG
));
921 vq
->desc
= (void __user
*)(unsigned long)a
.desc_user_addr
;
922 vq
->avail
= (void __user
*)(unsigned long)a
.avail_user_addr
;
923 vq
->log_addr
= a
.log_guest_addr
;
924 vq
->used
= (void __user
*)(unsigned long)a
.used_user_addr
;
926 case VHOST_SET_VRING_KICK
:
927 if (copy_from_user(&f
, argp
, sizeof f
)) {
931 eventfp
= f
.fd
== -1 ? NULL
: eventfd_fget(f
.fd
);
932 if (IS_ERR(eventfp
)) {
933 r
= PTR_ERR(eventfp
);
936 if (eventfp
!= vq
->kick
) {
937 pollstop
= (filep
= vq
->kick
) != NULL
;
938 pollstart
= (vq
->kick
= eventfp
) != NULL
;
942 case VHOST_SET_VRING_CALL
:
943 if (copy_from_user(&f
, argp
, sizeof f
)) {
947 eventfp
= f
.fd
== -1 ? NULL
: eventfd_fget(f
.fd
);
948 if (IS_ERR(eventfp
)) {
949 r
= PTR_ERR(eventfp
);
952 if (eventfp
!= vq
->call
) {
956 vq
->call_ctx
= eventfp
?
957 eventfd_ctx_fileget(eventfp
) : NULL
;
961 case VHOST_SET_VRING_ERR
:
962 if (copy_from_user(&f
, argp
, sizeof f
)) {
966 eventfp
= f
.fd
== -1 ? NULL
: eventfd_fget(f
.fd
);
967 if (IS_ERR(eventfp
)) {
968 r
= PTR_ERR(eventfp
);
971 if (eventfp
!= vq
->error
) {
975 vq
->error_ctx
= eventfp
?
976 eventfd_ctx_fileget(eventfp
) : NULL
;
980 case VHOST_SET_VRING_ENDIAN
:
981 r
= vhost_set_vring_endian(vq
, argp
);
983 case VHOST_GET_VRING_ENDIAN
:
984 r
= vhost_get_vring_endian(vq
, idx
, argp
);
986 case VHOST_SET_VRING_BUSYLOOP_TIMEOUT
:
987 if (copy_from_user(&s
, argp
, sizeof(s
))) {
991 vq
->busyloop_timeout
= s
.num
;
993 case VHOST_GET_VRING_BUSYLOOP_TIMEOUT
:
995 s
.num
= vq
->busyloop_timeout
;
996 if (copy_to_user(argp
, &s
, sizeof(s
)))
1003 if (pollstop
&& vq
->handle_kick
)
1004 vhost_poll_stop(&vq
->poll
);
1007 eventfd_ctx_put(ctx
);
1011 if (pollstart
&& vq
->handle_kick
)
1012 r
= vhost_poll_start(&vq
->poll
, vq
->kick
);
1014 mutex_unlock(&vq
->mutex
);
1016 if (pollstop
&& vq
->handle_kick
)
1017 vhost_poll_flush(&vq
->poll
);
1020 EXPORT_SYMBOL_GPL(vhost_vring_ioctl
);
1022 /* Caller must have device mutex */
1023 long vhost_dev_ioctl(struct vhost_dev
*d
, unsigned int ioctl
, void __user
*argp
)
1025 struct file
*eventfp
, *filep
= NULL
;
1026 struct eventfd_ctx
*ctx
= NULL
;
1031 /* If you are not the owner, you can become one */
1032 if (ioctl
== VHOST_SET_OWNER
) {
1033 r
= vhost_dev_set_owner(d
);
1037 /* You must be the owner to do anything else */
1038 r
= vhost_dev_check_owner(d
);
1043 case VHOST_SET_MEM_TABLE
:
1044 r
= vhost_set_memory(d
, argp
);
1046 case VHOST_SET_LOG_BASE
:
1047 if (copy_from_user(&p
, argp
, sizeof p
)) {
1051 if ((u64
)(unsigned long)p
!= p
) {
1055 for (i
= 0; i
< d
->nvqs
; ++i
) {
1056 struct vhost_virtqueue
*vq
;
1057 void __user
*base
= (void __user
*)(unsigned long)p
;
1059 mutex_lock(&vq
->mutex
);
1060 /* If ring is inactive, will check when it's enabled. */
1061 if (vq
->private_data
&& !vq_log_access_ok(vq
, base
))
1064 vq
->log_base
= base
;
1065 mutex_unlock(&vq
->mutex
);
1068 case VHOST_SET_LOG_FD
:
1069 r
= get_user(fd
, (int __user
*)argp
);
1072 eventfp
= fd
== -1 ? NULL
: eventfd_fget(fd
);
1073 if (IS_ERR(eventfp
)) {
1074 r
= PTR_ERR(eventfp
);
1077 if (eventfp
!= d
->log_file
) {
1078 filep
= d
->log_file
;
1079 d
->log_file
= eventfp
;
1081 d
->log_ctx
= eventfp
?
1082 eventfd_ctx_fileget(eventfp
) : NULL
;
1085 for (i
= 0; i
< d
->nvqs
; ++i
) {
1086 mutex_lock(&d
->vqs
[i
]->mutex
);
1087 d
->vqs
[i
]->log_ctx
= d
->log_ctx
;
1088 mutex_unlock(&d
->vqs
[i
]->mutex
);
1091 eventfd_ctx_put(ctx
);
1102 EXPORT_SYMBOL_GPL(vhost_dev_ioctl
);
1104 /* TODO: This is really inefficient. We need something like get_user()
1105 * (instruction directly accesses the data, with an exception table entry
1106 * returning -EFAULT). See Documentation/x86/exception-tables.txt.
1108 static int set_bit_to_user(int nr
, void __user
*addr
)
1110 unsigned long log
= (unsigned long)addr
;
1113 int bit
= nr
+ (log
% PAGE_SIZE
) * 8;
1116 r
= get_user_pages_fast(log
, 1, 1, &page
);
1120 base
= kmap_atomic(page
);
1122 kunmap_atomic(base
);
1123 set_page_dirty_lock(page
);
1128 static int log_write(void __user
*log_base
,
1129 u64 write_address
, u64 write_length
)
1131 u64 write_page
= write_address
/ VHOST_PAGE_SIZE
;
1136 write_length
+= write_address
% VHOST_PAGE_SIZE
;
1138 u64 base
= (u64
)(unsigned long)log_base
;
1139 u64 log
= base
+ write_page
/ 8;
1140 int bit
= write_page
% 8;
1141 if ((u64
)(unsigned long)log
!= log
)
1143 r
= set_bit_to_user(bit
, (void __user
*)(unsigned long)log
);
1146 if (write_length
<= VHOST_PAGE_SIZE
)
1148 write_length
-= VHOST_PAGE_SIZE
;
1154 int vhost_log_write(struct vhost_virtqueue
*vq
, struct vhost_log
*log
,
1155 unsigned int log_num
, u64 len
)
1159 /* Make sure data written is seen before log. */
1161 for (i
= 0; i
< log_num
; ++i
) {
1162 u64 l
= min(log
[i
].len
, len
);
1163 r
= log_write(vq
->log_base
, log
[i
].addr
, l
);
1169 eventfd_signal(vq
->log_ctx
, 1);
1173 /* Length written exceeds what we have stored. This is a bug. */
1177 EXPORT_SYMBOL_GPL(vhost_log_write
);
1179 static int vhost_update_used_flags(struct vhost_virtqueue
*vq
)
1182 if (vhost_put_user(vq
, cpu_to_vhost16(vq
, vq
->used_flags
),
1183 &vq
->used
->flags
) < 0)
1185 if (unlikely(vq
->log_used
)) {
1186 /* Make sure the flag is seen before log. */
1188 /* Log used flag write. */
1189 used
= &vq
->used
->flags
;
1190 log_write(vq
->log_base
, vq
->log_addr
+
1191 (used
- (void __user
*)vq
->used
),
1192 sizeof vq
->used
->flags
);
1194 eventfd_signal(vq
->log_ctx
, 1);
1199 static int vhost_update_avail_event(struct vhost_virtqueue
*vq
, u16 avail_event
)
1201 if (vhost_put_user(vq
, cpu_to_vhost16(vq
, vq
->avail_idx
),
1202 vhost_avail_event(vq
)))
1204 if (unlikely(vq
->log_used
)) {
1206 /* Make sure the event is seen before log. */
1208 /* Log avail event write */
1209 used
= vhost_avail_event(vq
);
1210 log_write(vq
->log_base
, vq
->log_addr
+
1211 (used
- (void __user
*)vq
->used
),
1212 sizeof *vhost_avail_event(vq
));
1214 eventfd_signal(vq
->log_ctx
, 1);
1219 int vhost_vq_init_access(struct vhost_virtqueue
*vq
)
1221 __virtio16 last_used_idx
;
1223 bool is_le
= vq
->is_le
;
1225 if (!vq
->private_data
) {
1226 vhost_reset_is_le(vq
);
1230 vhost_init_is_le(vq
);
1232 r
= vhost_update_used_flags(vq
);
1235 vq
->signalled_used_valid
= false;
1236 if (!access_ok(VERIFY_READ
, &vq
->used
->idx
, sizeof vq
->used
->idx
)) {
1240 r
= vhost_get_user(vq
, last_used_idx
, &vq
->used
->idx
);
1243 vq
->last_used_idx
= vhost16_to_cpu(vq
, last_used_idx
);
1249 EXPORT_SYMBOL_GPL(vhost_vq_init_access
);
1251 static int translate_desc(struct vhost_virtqueue
*vq
, u64 addr
, u32 len
,
1252 struct iovec iov
[], int iov_size
)
1254 const struct vhost_umem_node
*node
;
1255 struct vhost_umem
*umem
= vq
->umem
;
1260 while ((u64
)len
> s
) {
1262 if (unlikely(ret
>= iov_size
)) {
1266 node
= vhost_umem_interval_tree_iter_first(&umem
->umem_tree
,
1267 addr
, addr
+ len
- 1);
1268 if (node
== NULL
|| node
->start
> addr
) {
1273 size
= node
->size
- addr
+ node
->start
;
1274 _iov
->iov_len
= min((u64
)len
- s
, size
);
1275 _iov
->iov_base
= (void __user
*)(unsigned long)
1276 (node
->userspace_addr
+ addr
- node
->start
);
1285 /* Each buffer in the virtqueues is actually a chain of descriptors. This
1286 * function returns the next descriptor in the chain,
1287 * or -1U if we're at the end. */
1288 static unsigned next_desc(struct vhost_virtqueue
*vq
, struct vring_desc
*desc
)
1292 /* If this descriptor says it doesn't chain, we're done. */
1293 if (!(desc
->flags
& cpu_to_vhost16(vq
, VRING_DESC_F_NEXT
)))
1296 /* Check they're not leading us off end of descriptors. */
1297 next
= vhost16_to_cpu(vq
, desc
->next
);
1298 /* Make sure compiler knows to grab that: we don't want it changing! */
1299 /* We will use the result as an index in an array, so most
1300 * architectures only need a compiler barrier here. */
1301 read_barrier_depends();
1306 static int get_indirect(struct vhost_virtqueue
*vq
,
1307 struct iovec iov
[], unsigned int iov_size
,
1308 unsigned int *out_num
, unsigned int *in_num
,
1309 struct vhost_log
*log
, unsigned int *log_num
,
1310 struct vring_desc
*indirect
)
1312 struct vring_desc desc
;
1313 unsigned int i
= 0, count
, found
= 0;
1314 u32 len
= vhost32_to_cpu(vq
, indirect
->len
);
1315 struct iov_iter from
;
1319 if (unlikely(len
% sizeof desc
)) {
1320 vq_err(vq
, "Invalid length in indirect descriptor: "
1321 "len 0x%llx not multiple of 0x%zx\n",
1322 (unsigned long long)len
,
1327 ret
= translate_desc(vq
, vhost64_to_cpu(vq
, indirect
->addr
), len
, vq
->indirect
,
1329 if (unlikely(ret
< 0)) {
1330 vq_err(vq
, "Translation failure %d in indirect.\n", ret
);
1333 iov_iter_init(&from
, READ
, vq
->indirect
, ret
, len
);
1335 /* We will use the result as an address to read from, so most
1336 * architectures only need a compiler barrier here. */
1337 read_barrier_depends();
1339 count
= len
/ sizeof desc
;
1340 /* Buffers are chained via a 16 bit next field, so
1341 * we can have at most 2^16 of these. */
1342 if (unlikely(count
> USHRT_MAX
+ 1)) {
1343 vq_err(vq
, "Indirect buffer length too big: %d\n",
1349 unsigned iov_count
= *in_num
+ *out_num
;
1350 if (unlikely(++found
> count
)) {
1351 vq_err(vq
, "Loop detected: last one at %u "
1352 "indirect size %u\n",
1356 if (unlikely(copy_from_iter(&desc
, sizeof(desc
), &from
) !=
1358 vq_err(vq
, "Failed indirect descriptor: idx %d, %zx\n",
1359 i
, (size_t)vhost64_to_cpu(vq
, indirect
->addr
) + i
* sizeof desc
);
1362 if (unlikely(desc
.flags
& cpu_to_vhost16(vq
, VRING_DESC_F_INDIRECT
))) {
1363 vq_err(vq
, "Nested indirect descriptor: idx %d, %zx\n",
1364 i
, (size_t)vhost64_to_cpu(vq
, indirect
->addr
) + i
* sizeof desc
);
1368 ret
= translate_desc(vq
, vhost64_to_cpu(vq
, desc
.addr
),
1369 vhost32_to_cpu(vq
, desc
.len
), iov
+ iov_count
,
1370 iov_size
- iov_count
);
1371 if (unlikely(ret
< 0)) {
1372 vq_err(vq
, "Translation failure %d indirect idx %d\n",
1376 /* If this is an input descriptor, increment that count. */
1377 if (desc
.flags
& cpu_to_vhost16(vq
, VRING_DESC_F_WRITE
)) {
1379 if (unlikely(log
)) {
1380 log
[*log_num
].addr
= vhost64_to_cpu(vq
, desc
.addr
);
1381 log
[*log_num
].len
= vhost32_to_cpu(vq
, desc
.len
);
1385 /* If it's an output descriptor, they're all supposed
1386 * to come before any input descriptors. */
1387 if (unlikely(*in_num
)) {
1388 vq_err(vq
, "Indirect descriptor "
1389 "has out after in: idx %d\n", i
);
1394 } while ((i
= next_desc(vq
, &desc
)) != -1);
1398 /* This looks in the virtqueue and for the first available buffer, and converts
1399 * it to an iovec for convenient access. Since descriptors consist of some
1400 * number of output then some number of input descriptors, it's actually two
1401 * iovecs, but we pack them into one and note how many of each there were.
1403 * This function returns the descriptor number found, or vq->num (which is
1404 * never a valid descriptor number) if none was found. A negative code is
1405 * returned on error. */
1406 int vhost_get_vq_desc(struct vhost_virtqueue
*vq
,
1407 struct iovec iov
[], unsigned int iov_size
,
1408 unsigned int *out_num
, unsigned int *in_num
,
1409 struct vhost_log
*log
, unsigned int *log_num
)
1411 struct vring_desc desc
;
1412 unsigned int i
, head
, found
= 0;
1414 __virtio16 avail_idx
;
1415 __virtio16 ring_head
;
1418 /* Check it isn't doing very strange things with descriptor numbers. */
1419 last_avail_idx
= vq
->last_avail_idx
;
1420 if (unlikely(vhost_get_user(vq
, avail_idx
, &vq
->avail
->idx
))) {
1421 vq_err(vq
, "Failed to access avail idx at %p\n",
1425 vq
->avail_idx
= vhost16_to_cpu(vq
, avail_idx
);
1427 if (unlikely((u16
)(vq
->avail_idx
- last_avail_idx
) > vq
->num
)) {
1428 vq_err(vq
, "Guest moved used index from %u to %u",
1429 last_avail_idx
, vq
->avail_idx
);
1433 /* If there's nothing new since last we looked, return invalid. */
1434 if (vq
->avail_idx
== last_avail_idx
)
1437 /* Only get avail ring entries after they have been exposed by guest. */
1440 /* Grab the next descriptor number they're advertising, and increment
1441 * the index we've seen. */
1442 if (unlikely(vhost_get_user(vq
, ring_head
,
1443 &vq
->avail
->ring
[last_avail_idx
& (vq
->num
- 1)]))) {
1444 vq_err(vq
, "Failed to read head: idx %d address %p\n",
1446 &vq
->avail
->ring
[last_avail_idx
% vq
->num
]);
1450 head
= vhost16_to_cpu(vq
, ring_head
);
1452 /* If their number is silly, that's an error. */
1453 if (unlikely(head
>= vq
->num
)) {
1454 vq_err(vq
, "Guest says index %u > %u is available",
1459 /* When we start there are none of either input nor output. */
1460 *out_num
= *in_num
= 0;
1466 unsigned iov_count
= *in_num
+ *out_num
;
1467 if (unlikely(i
>= vq
->num
)) {
1468 vq_err(vq
, "Desc index is %u > %u, head = %u",
1472 if (unlikely(++found
> vq
->num
)) {
1473 vq_err(vq
, "Loop detected: last one at %u "
1474 "vq size %u head %u\n",
1478 ret
= vhost_copy_from_user(vq
, &desc
, vq
->desc
+ i
,
1480 if (unlikely(ret
)) {
1481 vq_err(vq
, "Failed to get descriptor: idx %d addr %p\n",
1485 if (desc
.flags
& cpu_to_vhost16(vq
, VRING_DESC_F_INDIRECT
)) {
1486 ret
= get_indirect(vq
, iov
, iov_size
,
1488 log
, log_num
, &desc
);
1489 if (unlikely(ret
< 0)) {
1490 vq_err(vq
, "Failure detected "
1491 "in indirect descriptor at idx %d\n", i
);
1497 ret
= translate_desc(vq
, vhost64_to_cpu(vq
, desc
.addr
),
1498 vhost32_to_cpu(vq
, desc
.len
), iov
+ iov_count
,
1499 iov_size
- iov_count
);
1500 if (unlikely(ret
< 0)) {
1501 vq_err(vq
, "Translation failure %d descriptor idx %d\n",
1505 if (desc
.flags
& cpu_to_vhost16(vq
, VRING_DESC_F_WRITE
)) {
1506 /* If this is an input descriptor,
1507 * increment that count. */
1509 if (unlikely(log
)) {
1510 log
[*log_num
].addr
= vhost64_to_cpu(vq
, desc
.addr
);
1511 log
[*log_num
].len
= vhost32_to_cpu(vq
, desc
.len
);
1515 /* If it's an output descriptor, they're all supposed
1516 * to come before any input descriptors. */
1517 if (unlikely(*in_num
)) {
1518 vq_err(vq
, "Descriptor has out after in: "
1524 } while ((i
= next_desc(vq
, &desc
)) != -1);
1526 /* On success, increment avail index. */
1527 vq
->last_avail_idx
++;
1529 /* Assume notifications from guest are disabled at this point,
1530 * if they aren't we would need to update avail_event index. */
1531 BUG_ON(!(vq
->used_flags
& VRING_USED_F_NO_NOTIFY
));
1534 EXPORT_SYMBOL_GPL(vhost_get_vq_desc
);
1536 /* Reverse the effect of vhost_get_vq_desc. Useful for error handling. */
1537 void vhost_discard_vq_desc(struct vhost_virtqueue
*vq
, int n
)
1539 vq
->last_avail_idx
-= n
;
1541 EXPORT_SYMBOL_GPL(vhost_discard_vq_desc
);
1543 /* After we've used one of their buffers, we tell them about it. We'll then
1544 * want to notify the guest, using eventfd. */
1545 int vhost_add_used(struct vhost_virtqueue
*vq
, unsigned int head
, int len
)
1547 struct vring_used_elem heads
= {
1548 cpu_to_vhost32(vq
, head
),
1549 cpu_to_vhost32(vq
, len
)
1552 return vhost_add_used_n(vq
, &heads
, 1);
1554 EXPORT_SYMBOL_GPL(vhost_add_used
);
1556 static int __vhost_add_used_n(struct vhost_virtqueue
*vq
,
1557 struct vring_used_elem
*heads
,
1560 struct vring_used_elem __user
*used
;
1564 start
= vq
->last_used_idx
& (vq
->num
- 1);
1565 used
= vq
->used
->ring
+ start
;
1567 if (vhost_put_user(vq
, heads
[0].id
, &used
->id
)) {
1568 vq_err(vq
, "Failed to write used id");
1571 if (vhost_put_user(vq
, heads
[0].len
, &used
->len
)) {
1572 vq_err(vq
, "Failed to write used len");
1575 } else if (vhost_copy_to_user(vq
, used
, heads
, count
* sizeof *used
)) {
1576 vq_err(vq
, "Failed to write used");
1579 if (unlikely(vq
->log_used
)) {
1580 /* Make sure data is seen before log. */
1582 /* Log used ring entry write. */
1583 log_write(vq
->log_base
,
1585 ((void __user
*)used
- (void __user
*)vq
->used
),
1586 count
* sizeof *used
);
1588 old
= vq
->last_used_idx
;
1589 new = (vq
->last_used_idx
+= count
);
1590 /* If the driver never bothers to signal in a very long while,
1591 * used index might wrap around. If that happens, invalidate
1592 * signalled_used index we stored. TODO: make sure driver
1593 * signals at least once in 2^16 and remove this. */
1594 if (unlikely((u16
)(new - vq
->signalled_used
) < (u16
)(new - old
)))
1595 vq
->signalled_used_valid
= false;
1599 /* After we've used one of their buffers, we tell them about it. We'll then
1600 * want to notify the guest, using eventfd. */
1601 int vhost_add_used_n(struct vhost_virtqueue
*vq
, struct vring_used_elem
*heads
,
1606 start
= vq
->last_used_idx
& (vq
->num
- 1);
1607 n
= vq
->num
- start
;
1609 r
= __vhost_add_used_n(vq
, heads
, n
);
1615 r
= __vhost_add_used_n(vq
, heads
, count
);
1617 /* Make sure buffer is written before we update index. */
1619 if (vhost_put_user(vq
, cpu_to_vhost16(vq
, vq
->last_used_idx
),
1621 vq_err(vq
, "Failed to increment used idx");
1624 if (unlikely(vq
->log_used
)) {
1625 /* Log used index update. */
1626 log_write(vq
->log_base
,
1627 vq
->log_addr
+ offsetof(struct vring_used
, idx
),
1628 sizeof vq
->used
->idx
);
1630 eventfd_signal(vq
->log_ctx
, 1);
1634 EXPORT_SYMBOL_GPL(vhost_add_used_n
);
1636 static bool vhost_notify(struct vhost_dev
*dev
, struct vhost_virtqueue
*vq
)
1641 /* Flush out used index updates. This is paired
1642 * with the barrier that the Guest executes when enabling
1646 if (vhost_has_feature(vq
, VIRTIO_F_NOTIFY_ON_EMPTY
) &&
1647 unlikely(vq
->avail_idx
== vq
->last_avail_idx
))
1650 if (!vhost_has_feature(vq
, VIRTIO_RING_F_EVENT_IDX
)) {
1652 if (vhost_get_user(vq
, flags
, &vq
->avail
->flags
)) {
1653 vq_err(vq
, "Failed to get flags");
1656 return !(flags
& cpu_to_vhost16(vq
, VRING_AVAIL_F_NO_INTERRUPT
));
1658 old
= vq
->signalled_used
;
1659 v
= vq
->signalled_used_valid
;
1660 new = vq
->signalled_used
= vq
->last_used_idx
;
1661 vq
->signalled_used_valid
= true;
1666 if (vhost_get_user(vq
, event
, vhost_used_event(vq
))) {
1667 vq_err(vq
, "Failed to get used event idx");
1670 return vring_need_event(vhost16_to_cpu(vq
, event
), new, old
);
1673 /* This actually signals the guest, using eventfd. */
1674 void vhost_signal(struct vhost_dev
*dev
, struct vhost_virtqueue
*vq
)
1676 /* Signal the Guest tell them we used something up. */
1677 if (vq
->call_ctx
&& vhost_notify(dev
, vq
))
1678 eventfd_signal(vq
->call_ctx
, 1);
1680 EXPORT_SYMBOL_GPL(vhost_signal
);
1682 /* And here's the combo meal deal. Supersize me! */
1683 void vhost_add_used_and_signal(struct vhost_dev
*dev
,
1684 struct vhost_virtqueue
*vq
,
1685 unsigned int head
, int len
)
1687 vhost_add_used(vq
, head
, len
);
1688 vhost_signal(dev
, vq
);
1690 EXPORT_SYMBOL_GPL(vhost_add_used_and_signal
);
1692 /* multi-buffer version of vhost_add_used_and_signal */
1693 void vhost_add_used_and_signal_n(struct vhost_dev
*dev
,
1694 struct vhost_virtqueue
*vq
,
1695 struct vring_used_elem
*heads
, unsigned count
)
1697 vhost_add_used_n(vq
, heads
, count
);
1698 vhost_signal(dev
, vq
);
1700 EXPORT_SYMBOL_GPL(vhost_add_used_and_signal_n
);
1702 /* return true if we're sure that avaiable ring is empty */
1703 bool vhost_vq_avail_empty(struct vhost_dev
*dev
, struct vhost_virtqueue
*vq
)
1705 __virtio16 avail_idx
;
1708 r
= vhost_get_user(vq
, avail_idx
, &vq
->avail
->idx
);
1712 return vhost16_to_cpu(vq
, avail_idx
) == vq
->avail_idx
;
1714 EXPORT_SYMBOL_GPL(vhost_vq_avail_empty
);
1716 /* OK, now we need to know about added descriptors. */
1717 bool vhost_enable_notify(struct vhost_dev
*dev
, struct vhost_virtqueue
*vq
)
1719 __virtio16 avail_idx
;
1722 if (!(vq
->used_flags
& VRING_USED_F_NO_NOTIFY
))
1724 vq
->used_flags
&= ~VRING_USED_F_NO_NOTIFY
;
1725 if (!vhost_has_feature(vq
, VIRTIO_RING_F_EVENT_IDX
)) {
1726 r
= vhost_update_used_flags(vq
);
1728 vq_err(vq
, "Failed to enable notification at %p: %d\n",
1729 &vq
->used
->flags
, r
);
1733 r
= vhost_update_avail_event(vq
, vq
->avail_idx
);
1735 vq_err(vq
, "Failed to update avail event index at %p: %d\n",
1736 vhost_avail_event(vq
), r
);
1740 /* They could have slipped one in as we were doing that: make
1741 * sure it's written, then check again. */
1743 r
= vhost_get_user(vq
, avail_idx
, &vq
->avail
->idx
);
1745 vq_err(vq
, "Failed to check avail idx at %p: %d\n",
1746 &vq
->avail
->idx
, r
);
1750 return vhost16_to_cpu(vq
, avail_idx
) != vq
->avail_idx
;
1752 EXPORT_SYMBOL_GPL(vhost_enable_notify
);
1754 /* We don't need to be notified again. */
1755 void vhost_disable_notify(struct vhost_dev
*dev
, struct vhost_virtqueue
*vq
)
1759 if (vq
->used_flags
& VRING_USED_F_NO_NOTIFY
)
1761 vq
->used_flags
|= VRING_USED_F_NO_NOTIFY
;
1762 if (!vhost_has_feature(vq
, VIRTIO_RING_F_EVENT_IDX
)) {
1763 r
= vhost_update_used_flags(vq
);
1765 vq_err(vq
, "Failed to enable notification at %p: %d\n",
1766 &vq
->used
->flags
, r
);
1769 EXPORT_SYMBOL_GPL(vhost_disable_notify
);
1771 static int __init
vhost_init(void)
1776 static void __exit
vhost_exit(void)
1780 module_init(vhost_init
);
1781 module_exit(vhost_exit
);
1783 MODULE_VERSION("0.0.1");
1784 MODULE_LICENSE("GPL v2");
1785 MODULE_AUTHOR("Michael S. Tsirkin");
1786 MODULE_DESCRIPTION("Host kernel accelerator for virtio");